text
stringlengths
478
483k
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void jpc_qmfb_split_row(jpc_fix_t *a, int numcols, int parity) { int bufsize = JPC_CEILDIVPOW2(numcols, 1); #if !defined(HAVE_VLA) jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE]; #else jpc_fix_t splitbuf[bufsize]; #endif jpc_fix_t *buf = splitbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; register int m; int hstartcol; #if !defined(HAVE_VLA) /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_malloc(bufsize * sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } #endif if (numcols >= 2) { hstartcol = (numcols + 1 - parity) >> 1; m = (parity) ? hstartcol : (numcols - hstartcol); /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[1 - parity]; while (n-- > 0) { *dstptr = *srcptr; ++dstptr; srcptr += 2; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[1 - parity]; srcptr = &a[2 - parity]; n = numcols - m - (!parity); while (n-- > 0) { *dstptr = *srcptr; ++dstptr; srcptr += 2; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartcol]; srcptr = buf; n = m; while (n-- > 0) { *dstptr = *srcptr; ++dstptr; ++srcptr; } } #if !defined(HAVE_VLA) /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } #endif } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'CVE-2014-8158'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void jpc_qmfb_join_row(jpc_fix_t *a, int numcols, int parity) { int bufsize = JPC_CEILDIVPOW2(numcols, 1); #if !defined(HAVE_VLA) jpc_fix_t joinbuf[QMFB_JOINBUFSIZE]; #else jpc_fix_t joinbuf[bufsize]; #endif jpc_fix_t *buf = joinbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; int hstartcol; #if !defined(HAVE_VLA) /* Allocate memory for the join buffer from the heap. */ if (bufsize > QMFB_JOINBUFSIZE) { if (!(buf = jas_malloc(bufsize * sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide. */ abort(); } } #endif hstartcol = (numcols + 1 - parity) >> 1; /* Save the samples from the lowpass channel. */ n = hstartcol; srcptr = &a[0]; dstptr = buf; while (n-- > 0) { *dstptr = *srcptr; ++srcptr; ++dstptr; } /* Copy the samples from the highpass channel into place. */ srcptr = &a[hstartcol]; dstptr = &a[1 - parity]; n = numcols - hstartcol; while (n-- > 0) { *dstptr = *srcptr; dstptr += 2; ++srcptr; } /* Copy the samples from the lowpass channel into place. */ srcptr = buf; dstptr = &a[parity]; n = hstartcol; while (n-- > 0) { *dstptr = *srcptr; dstptr += 2; ++srcptr; } #if !defined(HAVE_VLA) /* If the join buffer was allocated on the heap, free this memory. */ if (buf != joinbuf) { jas_free(buf); } #endif } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'CVE-2014-8158'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void jpc_qmfb_split_colres(jpc_fix_t *a, int numrows, int numcols, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); #if !defined(HAVE_VLA) jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE * JPC_QMFB_COLGRPSIZE]; #else jpc_fix_t splitbuf[bufsize * numcols]; #endif jpc_fix_t *buf = splitbuf; jpc_fix_t *srcptr; jpc_fix_t *dstptr; register jpc_fix_t *srcptr2; register jpc_fix_t *dstptr2; register int n; register int i; int m; int hstartcol; #if !defined(HAVE_VLA) /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_malloc(bufsize * sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } #endif if (numrows >= 2) { hstartcol = (numrows + 1 - parity) >> 1; m = (parity) ? hstartcol : (numrows - hstartcol); /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < numcols; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += numcols; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < numcols; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartcol * stride]; srcptr = buf; n = m; while (n-- > 0) { dstptr2 = dstptr; srcptr2 = srcptr; for (i = 0; i < numcols; ++i) { *dstptr2 = *srcptr2; ++dstptr2; ++srcptr2; } dstptr += stride; srcptr += numcols; } } #if !defined(HAVE_VLA) /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } #endif } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'CVE-2014-8158'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static noinline void key_gc_unused_keys(struct list_head *keys) { while (!list_empty(keys)) { struct key *key = list_entry(keys->next, struct key, graveyard_link); list_del(&key->graveyard_link); kdebug("- %u", key->serial); key_check(key); security_key_free(key); /* deal with the user's key tracking and quota */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); key_user_put(key->user); /* now throw away the key memory */ if (key->type->destroy) key->type->destroy(key); kfree(key->description); #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC_X; #endif kmem_cache_free(key_jar, key); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'KEYS: close race between key lookup and freeing When a key is being garbage collected, it's key->user would get put before the ->destroy() callback is called, where the key is removed from it's respective tracking structures. This leaves a key hanging in a semi-invalid state which leaves a window open for a different task to try an access key->user. An example is find_keyring_by_name() which would dereference key->user for a key that is in the process of being garbage collected (where key->user was freed but ->destroy() wasn't called yet - so it's still present in the linked list). This would cause either a panic, or corrupt memory. Fixes CVE-2014-9529. Signed-off-by: Sasha Levin <[email protected]> Signed-off-by: David Howells <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: krb5_gss_inquire_sec_context_by_oid (OM_uint32 *minor_status, const gss_ctx_id_t context_handle, const gss_OID desired_object, gss_buffer_set_t *data_set) { krb5_gss_ctx_id_rec *ctx; size_t i; if (minor_status == NULL) return GSS_S_CALL_INACCESSIBLE_WRITE; *minor_status = 0; if (desired_object == GSS_C_NO_OID) return GSS_S_CALL_INACCESSIBLE_READ; if (data_set == NULL) return GSS_S_CALL_INACCESSIBLE_WRITE; *data_set = GSS_C_NO_BUFFER_SET; ctx = (krb5_gss_ctx_id_rec *) context_handle; if (!ctx->established) return GSS_S_NO_CONTEXT; for (i = 0; i < sizeof(krb5_gss_inquire_sec_context_by_oid_ops)/ sizeof(krb5_gss_inquire_sec_context_by_oid_ops[0]); i++) { if (g_OID_prefix_equal(desired_object, &krb5_gss_inquire_sec_context_by_oid_ops[i].oid)) { return (*krb5_gss_inquire_sec_context_by_oid_ops[i].func)(minor_status, context_handle, desired_object, data_set); } } *minor_status = EINVAL; return GSS_S_UNAVAILABLE; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703'], 'message': 'Fix gss_process_context_token() [CVE-2014-5352] [MITKRB5-SA-2015-001] The krb5 gss_process_context_token() should not actually delete the context; that leaves the caller with a dangling pointer and no way to know that it is invalid. Instead, mark the context as terminated, and check for terminated contexts in the GSS functions which expect established contexts. Also add checks in export_sec_context and pseudo_random, and adjust t_prf.c for the pseudo_random check. ticket: 8055 (new) target_version: 1.13.1 tags: pullup'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: gss_krb5int_export_lucid_sec_context( OM_uint32 *minor_status, const gss_ctx_id_t context_handle, const gss_OID desired_object, gss_buffer_set_t *data_set) { krb5_error_code kret = 0; OM_uint32 retval; krb5_gss_ctx_id_t ctx = (krb5_gss_ctx_id_t)context_handle; void *lctx = NULL; int version = 0; gss_buffer_desc rep; /* Assume failure */ retval = GSS_S_FAILURE; *minor_status = 0; *data_set = GSS_C_NO_BUFFER_SET; retval = generic_gss_oid_decompose(minor_status, GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID, GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH, desired_object, &version); if (GSS_ERROR(retval)) return retval; /* Externalize a structure of the right version */ switch (version) { case 1: kret = make_external_lucid_ctx_v1((krb5_pointer)ctx, version, &lctx); break; default: kret = (OM_uint32) KG_LUCID_VERSION; break; } if (kret) goto error_out; rep.value = &lctx; rep.length = sizeof(lctx); retval = generic_gss_add_buffer_set_member(minor_status, &rep, data_set); if (GSS_ERROR(retval)) goto error_out; error_out: if (*minor_status == 0) *minor_status = (OM_uint32) kret; return(retval); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703'], 'message': 'Fix gss_process_context_token() [CVE-2014-5352] [MITKRB5-SA-2015-001] The krb5 gss_process_context_token() should not actually delete the context; that leaves the caller with a dangling pointer and no way to know that it is invalid. Instead, mark the context as terminated, and check for terminated contexts in the GSS functions which expect established contexts. Also add checks in export_sec_context and pseudo_random, and adjust t_prf.c for the pseudo_random check. ticket: 8055 (new) target_version: 1.13.1 tags: pullup'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: kg_seal(minor_status, context_handle, conf_req_flag, qop_req, input_message_buffer, conf_state, output_message_buffer, toktype) OM_uint32 *minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; gss_buffer_t input_message_buffer; int *conf_state; gss_buffer_t output_message_buffer; int toktype; { krb5_gss_ctx_id_rec *ctx; krb5_error_code code; krb5_context context; output_message_buffer->length = 0; output_message_buffer->value = NULL; /* Only default qop or matching established cryptosystem is allowed. There are NO EXTENSIONS to this set for AES and friends! The new spec says "just use 0". The old spec plus extensions would actually allow for certain non-zero values. Fix this to handle them later. */ if (qop_req != 0) { *minor_status = (OM_uint32) G_UNKNOWN_QOP; return GSS_S_FAILURE; } ctx = (krb5_gss_ctx_id_rec *) context_handle; if (! ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } context = ctx->k5_context; switch (ctx->proto) { case 0: code = make_seal_token_v1(context, ctx->enc, ctx->seq, &ctx->seq_send, ctx->initiate, input_message_buffer, output_message_buffer, ctx->signalg, ctx->cksum_size, ctx->sealalg, conf_req_flag, toktype, ctx->mech_used); break; case 1: code = gss_krb5int_make_seal_token_v3(context, ctx, input_message_buffer, output_message_buffer, conf_req_flag, toktype); break; default: code = G_UNKNOWN_QOP; /* XXX */ break; } if (code) { *minor_status = code; save_error_info(*minor_status, context); return(GSS_S_FAILURE); } if (conf_state) *conf_state = conf_req_flag; *minor_status = 0; return(GSS_S_COMPLETE); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703'], 'message': 'Fix gss_process_context_token() [CVE-2014-5352] [MITKRB5-SA-2015-001] The krb5 gss_process_context_token() should not actually delete the context; that leaves the caller with a dangling pointer and no way to know that it is invalid. Instead, mark the context as terminated, and check for terminated contexts in the GSS functions which expect established contexts. Also add checks in export_sec_context and pseudo_random, and adjust t_prf.c for the pseudo_random check. ticket: 8055 (new) target_version: 1.13.1 tags: pullup'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: krb5_gss_export_sec_context(minor_status, context_handle, interprocess_token) OM_uint32 *minor_status; gss_ctx_id_t *context_handle; gss_buffer_t interprocess_token; { krb5_context context = NULL; krb5_error_code kret; OM_uint32 retval; size_t bufsize, blen; krb5_gss_ctx_id_t ctx; krb5_octet *obuffer, *obp; /* Assume a tragic failure */ obuffer = (krb5_octet *) NULL; retval = GSS_S_FAILURE; *minor_status = 0; ctx = (krb5_gss_ctx_id_t) *context_handle; context = ctx->k5_context; kret = krb5_gss_ser_init(context); if (kret) goto error_out; /* Determine size needed for externalization of context */ bufsize = 0; if ((kret = kg_ctx_size(context, (krb5_pointer) ctx, &bufsize))) goto error_out; /* Allocate the buffer */ if ((obuffer = gssalloc_malloc(bufsize)) == NULL) { kret = ENOMEM; goto error_out; } obp = obuffer; blen = bufsize; /* Externalize the context */ if ((kret = kg_ctx_externalize(context, (krb5_pointer) ctx, &obp, &blen))) goto error_out; /* Success! Return the buffer */ interprocess_token->length = bufsize - blen; interprocess_token->value = obuffer; *minor_status = 0; retval = GSS_S_COMPLETE; /* Now, clean up the context state */ (void)krb5_gss_delete_sec_context(minor_status, context_handle, NULL); *context_handle = GSS_C_NO_CONTEXT; return (GSS_S_COMPLETE); error_out: if (retval != GSS_S_COMPLETE) if (kret != 0 && context != 0) save_error_info((OM_uint32)kret, context); if (obuffer && bufsize) { memset(obuffer, 0, bufsize); xfree(obuffer); } if (*minor_status == 0) *minor_status = (OM_uint32) kret; return(retval); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703'], 'message': 'Fix gss_process_context_token() [CVE-2014-5352] [MITKRB5-SA-2015-001] The krb5 gss_process_context_token() should not actually delete the context; that leaves the caller with a dangling pointer and no way to know that it is invalid. Instead, mark the context as terminated, and check for terminated contexts in the GSS functions which expect established contexts. Also add checks in export_sec_context and pseudo_random, and adjust t_prf.c for the pseudo_random check. ticket: 8055 (new) target_version: 1.13.1 tags: pullup'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: kg_unseal_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int *conf_state, gss_qop_t *qop_state, gss_iov_buffer_desc *iov, int iov_count, int toktype) { krb5_gss_ctx_id_rec *ctx; OM_uint32 code; ctx = (krb5_gss_ctx_id_rec *)context_handle; if (!ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return GSS_S_NO_CONTEXT; } if (kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_STREAM) != NULL) { code = kg_unseal_stream_iov(minor_status, ctx, conf_state, qop_state, iov, iov_count, toktype); } else { code = kg_unseal_iov_token(minor_status, ctx, conf_state, qop_state, iov, iov_count, toktype); } return code; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703'], 'message': 'Fix gss_process_context_token() [CVE-2014-5352] [MITKRB5-SA-2015-001] The krb5 gss_process_context_token() should not actually delete the context; that leaves the caller with a dangling pointer and no way to know that it is invalid. Instead, mark the context as terminated, and check for terminated contexts in the GSS functions which expect established contexts. Also add checks in export_sec_context and pseudo_random, and adjust t_prf.c for the pseudo_random check. ticket: 8055 (new) target_version: 1.13.1 tags: pullup'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: krb5_gss_inquire_context(minor_status, context_handle, initiator_name, acceptor_name, lifetime_rec, mech_type, ret_flags, locally_initiated, opened) OM_uint32 *minor_status; gss_ctx_id_t context_handle; gss_name_t *initiator_name; gss_name_t *acceptor_name; OM_uint32 *lifetime_rec; gss_OID *mech_type; OM_uint32 *ret_flags; int *locally_initiated; int *opened; { krb5_context context; krb5_error_code code; krb5_gss_ctx_id_rec *ctx; krb5_gss_name_t initiator, acceptor; krb5_timestamp now; krb5_deltat lifetime; if (initiator_name) *initiator_name = (gss_name_t) NULL; if (acceptor_name) *acceptor_name = (gss_name_t) NULL; ctx = (krb5_gss_ctx_id_rec *) context_handle; if (! ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } initiator = NULL; acceptor = NULL; context = ctx->k5_context; if ((code = krb5_timeofday(context, &now))) { *minor_status = code; save_error_info(*minor_status, context); return(GSS_S_FAILURE); } if ((lifetime = ctx->krb_times.endtime - now) < 0) lifetime = 0; if (initiator_name) { if ((code = kg_duplicate_name(context, ctx->initiate ? ctx->here : ctx->there, &initiator))) { *minor_status = code; save_error_info(*minor_status, context); return(GSS_S_FAILURE); } } if (acceptor_name) { if ((code = kg_duplicate_name(context, ctx->initiate ? ctx->there : ctx->here, &acceptor))) { if (initiator) kg_release_name(context, &initiator); *minor_status = code; save_error_info(*minor_status, context); return(GSS_S_FAILURE); } } if (initiator_name) *initiator_name = (gss_name_t) initiator; if (acceptor_name) *acceptor_name = (gss_name_t) acceptor; if (lifetime_rec) *lifetime_rec = lifetime; if (mech_type) *mech_type = (gss_OID) ctx->mech_used; if (ret_flags) *ret_flags = ctx->gss_flags; if (locally_initiated) *locally_initiated = ctx->initiate; if (opened) *opened = ctx->established; *minor_status = 0; return((lifetime == 0)?GSS_S_CONTEXT_EXPIRED:GSS_S_COMPLETE); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703'], 'message': 'Fix gss_process_context_token() [CVE-2014-5352] [MITKRB5-SA-2015-001] The krb5 gss_process_context_token() should not actually delete the context; that leaves the caller with a dangling pointer and no way to know that it is invalid. Instead, mark the context as terminated, and check for terminated contexts in the GSS functions which expect established contexts. Also add checks in export_sec_context and pseudo_random, and adjust t_prf.c for the pseudo_random check. ticket: 8055 (new) target_version: 1.13.1 tags: pullup'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: bool_t xdr_krb5_tl_data(XDR *xdrs, krb5_tl_data **tl_data_head) { krb5_tl_data *tl, *tl2; bool_t more; unsigned int len; switch (xdrs->x_op) { case XDR_FREE: tl = tl2 = *tl_data_head; while (tl) { tl2 = tl->tl_data_next; free(tl->tl_data_contents); free(tl); tl = tl2; } break; case XDR_ENCODE: tl = *tl_data_head; while (1) { more = (tl != NULL); if (!xdr_bool(xdrs, &more)) return FALSE; if (tl == NULL) break; if (!xdr_krb5_int16(xdrs, &tl->tl_data_type)) return FALSE; len = tl->tl_data_length; if (!xdr_bytes(xdrs, (char **) &tl->tl_data_contents, &len, ~0)) return FALSE; tl = tl->tl_data_next; } break; case XDR_DECODE: tl = NULL; while (1) { if (!xdr_bool(xdrs, &more)) return FALSE; if (more == FALSE) break; tl2 = (krb5_tl_data *) malloc(sizeof(krb5_tl_data)); if (tl2 == NULL) return FALSE; memset(tl2, 0, sizeof(krb5_tl_data)); if (!xdr_krb5_int16(xdrs, &tl2->tl_data_type)) return FALSE; if (!xdr_bytes(xdrs, (char **)&tl2->tl_data_contents, &len, ~0)) return FALSE; tl2->tl_data_length = len; tl2->tl_data_next = tl; tl = tl2; } *tl_data_head = tl; break; } return TRUE; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703'], 'message': 'Fix kadm5/gssrpc XDR double free [CVE-2014-9421] [MITKRB5-SA-2015-001] In auth_gssapi_unwrap_data(), do not free partial deserialization results upon failure to deserialize. This responsibility belongs to the callers, svctcp_getargs() and svcudp_getargs(); doing it in the unwrap function results in freeing the results twice. In xdr_krb5_tl_data() and xdr_krb5_principal(), null out the pointers we are freeing, as other XDR functions such as xdr_bytes() and xdr_string(). ticket: 8056 (new) target_version: 1.13.1 tags: pullup'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: bool_t auth_gssapi_unwrap_data( OM_uint32 *major, OM_uint32 *minor, gss_ctx_id_t context, uint32_t seq_num, XDR *in_xdrs, bool_t (*xdr_func)(), caddr_t xdr_ptr) { gss_buffer_desc in_buf, out_buf; XDR temp_xdrs; uint32_t verf_seq_num; int conf, qop; unsigned int length; PRINTF(("gssapi_unwrap_data: starting\n")); *major = GSS_S_COMPLETE; *minor = 0; /* assumption */ in_buf.value = NULL; out_buf.value = NULL; if (! xdr_bytes(in_xdrs, (char **) &in_buf.value, &length, (unsigned int) -1)) { PRINTF(("gssapi_unwrap_data: deserializing encrypted data failed\n")); temp_xdrs.x_op = XDR_FREE; (void)xdr_bytes(&temp_xdrs, (char **) &in_buf.value, &length, (unsigned int) -1); return FALSE; } in_buf.length = length; *major = gss_unseal(minor, context, &in_buf, &out_buf, &conf, &qop); free(in_buf.value); if (*major != GSS_S_COMPLETE) return FALSE; PRINTF(("gssapi_unwrap_data: %llu bytes data, %llu bytes sealed\n", (unsigned long long)out_buf.length, (unsigned long long)in_buf.length)); xdrmem_create(&temp_xdrs, out_buf.value, out_buf.length, XDR_DECODE); /* deserialize the sequence number */ if (! xdr_u_int32(&temp_xdrs, &verf_seq_num)) { PRINTF(("gssapi_unwrap_data: deserializing verf_seq_num failed\n")); gss_release_buffer(minor, &out_buf); XDR_DESTROY(&temp_xdrs); return FALSE; } if (verf_seq_num != seq_num) { PRINTF(("gssapi_unwrap_data: seq %d specified, read %d\n", seq_num, verf_seq_num)); gss_release_buffer(minor, &out_buf); XDR_DESTROY(&temp_xdrs); return FALSE; } PRINTF(("gssapi_unwrap_data: unwrap seq_num %d okay\n", verf_seq_num)); /* deserialize the arguments into xdr_ptr */ if (! (*xdr_func)(&temp_xdrs, xdr_ptr)) { PRINTF(("gssapi_unwrap_data: deserializing arguments failed\n")); gss_release_buffer(minor, &out_buf); xdr_free(xdr_func, xdr_ptr); XDR_DESTROY(&temp_xdrs); return FALSE; } PRINTF(("gssapi_unwrap_data: succeeding\n\n")); gss_release_buffer(minor, &out_buf); XDR_DESTROY(&temp_xdrs); return TRUE; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703'], 'message': 'Fix kadm5/gssrpc XDR double free [CVE-2014-9421] [MITKRB5-SA-2015-001] In auth_gssapi_unwrap_data(), do not free partial deserialization results upon failure to deserialize. This responsibility belongs to the callers, svctcp_getargs() and svcudp_getargs(); doing it in the unwrap function results in freeing the results twice. In xdr_krb5_tl_data() and xdr_krb5_principal(), null out the pointers we are freeing, as other XDR functions such as xdr_bytes() and xdr_string(). ticket: 8056 (new) target_version: 1.13.1 tags: pullup'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: gssrpc__svcauth_gss(struct svc_req *rqst, struct rpc_msg *msg, bool_t *no_dispatch) { enum auth_stat retstat; XDR xdrs; SVCAUTH *auth; struct svc_rpc_gss_data *gd; struct rpc_gss_cred *gc; struct rpc_gss_init_res gr; int call_stat, offset; OM_uint32 min_stat; log_debug("in svcauth_gss()"); /* Initialize reply. */ rqst->rq_xprt->xp_verf = gssrpc__null_auth; /* Allocate and set up server auth handle. */ if (rqst->rq_xprt->xp_auth == NULL || rqst->rq_xprt->xp_auth == &svc_auth_none) { if ((auth = calloc(sizeof(*auth), 1)) == NULL) { fprintf(stderr, "svcauth_gss: out_of_memory\n"); return (AUTH_FAILED); } if ((gd = calloc(sizeof(*gd), 1)) == NULL) { fprintf(stderr, "svcauth_gss: out_of_memory\n"); return (AUTH_FAILED); } auth->svc_ah_ops = &svc_auth_gss_ops; SVCAUTH_PRIVATE(auth) = gd; rqst->rq_xprt->xp_auth = auth; } else gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth); log_debug("xp_auth=%p, gd=%p", rqst->rq_xprt->xp_auth, gd); /* Deserialize client credentials. */ if (rqst->rq_cred.oa_length <= 0) return (AUTH_BADCRED); gc = (struct rpc_gss_cred *)rqst->rq_clntcred; memset(gc, 0, sizeof(*gc)); log_debug("calling xdrmem_create()"); log_debug("oa_base=%p, oa_length=%u", rqst->rq_cred.oa_base, rqst->rq_cred.oa_length); xdrmem_create(&xdrs, rqst->rq_cred.oa_base, rqst->rq_cred.oa_length, XDR_DECODE); log_debug("xdrmem_create() returned"); if (!xdr_rpc_gss_cred(&xdrs, gc)) { log_debug("xdr_rpc_gss_cred() failed"); XDR_DESTROY(&xdrs); return (AUTH_BADCRED); } XDR_DESTROY(&xdrs); retstat = AUTH_FAILED; #define ret_freegc(code) do { retstat = code; goto freegc; } while (0) /* Check version. */ if (gc->gc_v != RPCSEC_GSS_VERSION) ret_freegc (AUTH_BADCRED); /* Check RPCSEC_GSS service. */ if (gc->gc_svc != RPCSEC_GSS_SVC_NONE && gc->gc_svc != RPCSEC_GSS_SVC_INTEGRITY && gc->gc_svc != RPCSEC_GSS_SVC_PRIVACY) ret_freegc (AUTH_BADCRED); /* Check sequence number. */ if (gd->established) { if (gc->gc_seq > MAXSEQ) ret_freegc (RPCSEC_GSS_CTXPROBLEM); if ((offset = gd->seqlast - gc->gc_seq) < 0) { gd->seqlast = gc->gc_seq; offset = 0 - offset; gd->seqmask <<= offset; offset = 0; } else if ((u_int)offset >= gd->win || (gd->seqmask & (1 << offset))) { *no_dispatch = 1; ret_freegc (RPCSEC_GSS_CTXPROBLEM); } gd->seq = gc->gc_seq; gd->seqmask |= (1 << offset); } if (gd->established) { rqst->rq_clntname = (char *)gd->client_name; rqst->rq_svccred = (char *)gd->ctx; } /* Handle RPCSEC_GSS control procedure. */ switch (gc->gc_proc) { case RPCSEC_GSS_INIT: case RPCSEC_GSS_CONTINUE_INIT: if (rqst->rq_proc != NULLPROC) ret_freegc (AUTH_FAILED); /* XXX ? */ if (!svcauth_gss_acquire_cred()) ret_freegc (AUTH_FAILED); if (!svcauth_gss_accept_sec_context(rqst, &gr)) ret_freegc (AUTH_REJECTEDCRED); if (!svcauth_gss_nextverf(rqst, htonl(gr.gr_win))) { gss_release_buffer(&min_stat, &gr.gr_token); mem_free(gr.gr_ctx.value, sizeof(gss_union_ctx_id_desc)); ret_freegc (AUTH_FAILED); } *no_dispatch = TRUE; call_stat = svc_sendreply(rqst->rq_xprt, xdr_rpc_gss_init_res, (caddr_t)&gr); gss_release_buffer(&min_stat, &gr.gr_token); gss_release_buffer(&min_stat, &gd->checksum); mem_free(gr.gr_ctx.value, sizeof(gss_union_ctx_id_desc)); if (!call_stat) ret_freegc (AUTH_FAILED); if (gr.gr_major == GSS_S_COMPLETE) gd->established = TRUE; break; case RPCSEC_GSS_DATA: if (!svcauth_gss_validate(rqst, gd, msg)) ret_freegc (RPCSEC_GSS_CREDPROBLEM); if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq))) ret_freegc (AUTH_FAILED); break; case RPCSEC_GSS_DESTROY: if (rqst->rq_proc != NULLPROC) ret_freegc (AUTH_FAILED); /* XXX ? */ if (!svcauth_gss_validate(rqst, gd, msg)) ret_freegc (RPCSEC_GSS_CREDPROBLEM); if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq))) ret_freegc (AUTH_FAILED); *no_dispatch = TRUE; call_stat = svc_sendreply(rqst->rq_xprt, xdr_void, (caddr_t)NULL); log_debug("sendreply in destroy: %d", call_stat); if (!svcauth_gss_release_cred()) ret_freegc (AUTH_FAILED); SVCAUTH_DESTROY(rqst->rq_xprt->xp_auth); rqst->rq_xprt->xp_auth = &svc_auth_none; break; default: ret_freegc (AUTH_REJECTEDCRED); break; } retstat = AUTH_OK; freegc: xdr_free(xdr_rpc_gss_cred, gc); log_debug("returning %d from svcauth_gss()", retstat); return (retstat); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200'], 'message': 'Fix gssrpc data leakage [CVE-2014-9423] [MITKRB5-SA-2015-001] In svcauth_gss_accept_sec_context(), do not copy bytes from the union context into the handle field we send to the client. We do not use this handle field, so just supply a fixed string of "xxxx". In gss_union_ctx_id_struct, remove the unused "interposer" field which was causing part of the union context to remain uninitialized. ticket: 8058 (new) target_version: 1.13.1 tags: pullup'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: check_exclusion_constraint(Relation heap, Relation index, IndexInfo *indexInfo, ItemPointer tupleid, Datum *values, bool *isnull, EState *estate, bool newIndex, bool errorOK) { Oid *constr_procs = indexInfo->ii_ExclusionProcs; uint16 *constr_strats = indexInfo->ii_ExclusionStrats; Oid *index_collations = index->rd_indcollation; int index_natts = index->rd_index->indnatts; IndexScanDesc index_scan; HeapTuple tup; ScanKeyData scankeys[INDEX_MAX_KEYS]; SnapshotData DirtySnapshot; int i; bool conflict; bool found_self; ExprContext *econtext; TupleTableSlot *existing_slot; TupleTableSlot *save_scantuple; /* * If any of the input values are NULL, the constraint check is assumed to * pass (i.e., we assume the operators are strict). */ for (i = 0; i < index_natts; i++) { if (isnull[i]) return true; } /* * Search the tuples that are in the index for any violations, including * tuples that aren't visible yet. */ InitDirtySnapshot(DirtySnapshot); for (i = 0; i < index_natts; i++) { ScanKeyEntryInitialize(&scankeys[i], 0, i + 1, constr_strats[i], InvalidOid, index_collations[i], constr_procs[i], values[i]); } /* * Need a TupleTableSlot to put existing tuples in. * * To use FormIndexDatum, we have to make the econtext's scantuple point * to this slot. Be sure to save and restore caller's value for * scantuple. */ existing_slot = MakeSingleTupleTableSlot(RelationGetDescr(heap)); econtext = GetPerTupleExprContext(estate); save_scantuple = econtext->ecxt_scantuple; econtext->ecxt_scantuple = existing_slot; /* * May have to restart scan from this point if a potential conflict is * found. */ retry: conflict = false; found_self = false; index_scan = index_beginscan(heap, index, &DirtySnapshot, index_natts, 0); index_rescan(index_scan, scankeys, index_natts, NULL, 0); while ((tup = index_getnext(index_scan, ForwardScanDirection)) != NULL) { TransactionId xwait; Datum existing_values[INDEX_MAX_KEYS]; bool existing_isnull[INDEX_MAX_KEYS]; char *error_new; char *error_existing; /* * Ignore the entry for the tuple we're trying to check. */ if (ItemPointerEquals(tupleid, &tup->t_self)) { if (found_self) /* should not happen */ elog(ERROR, "found self tuple multiple times in index \"%s\"", RelationGetRelationName(index)); found_self = true; continue; } /* * Extract the index column values and isnull flags from the existing * tuple. */ ExecStoreTuple(tup, existing_slot, InvalidBuffer, false); FormIndexDatum(indexInfo, existing_slot, estate, existing_values, existing_isnull); /* If lossy indexscan, must recheck the condition */ if (index_scan->xs_recheck) { if (!index_recheck_constraint(index, constr_procs, existing_values, existing_isnull, values)) continue; /* tuple doesn't actually match, so no * conflict */ } /* * At this point we have either a conflict or a potential conflict. If * we're not supposed to raise error, just return the fact of the * potential conflict without waiting to see if it's real. */ if (errorOK) { conflict = true; break; } /* * If an in-progress transaction is affecting the visibility of this * tuple, we need to wait for it to complete and then recheck. For * simplicity we do rechecking by just restarting the whole scan --- * this case probably doesn't happen often enough to be worth trying * harder, and anyway we don't want to hold any index internal locks * while waiting. */ xwait = TransactionIdIsValid(DirtySnapshot.xmin) ? DirtySnapshot.xmin : DirtySnapshot.xmax; if (TransactionIdIsValid(xwait)) { index_endscan(index_scan); XactLockTableWait(xwait, heap, &tup->t_data->t_ctid, XLTW_RecheckExclusionConstr); goto retry; } /* * We have a definite conflict. Report it. */ error_new = BuildIndexValueDescription(index, values, isnull); error_existing = BuildIndexValueDescription(index, existing_values, existing_isnull); if (newIndex) ereport(ERROR, (errcode(ERRCODE_EXCLUSION_VIOLATION), errmsg("could not create exclusion constraint \"%s\"", RelationGetRelationName(index)), errdetail("Key %s conflicts with key %s.", error_new, error_existing), errtableconstraint(heap, RelationGetRelationName(index)))); else ereport(ERROR, (errcode(ERRCODE_EXCLUSION_VIOLATION), errmsg("conflicting key value violates exclusion constraint \"%s\"", RelationGetRelationName(index)), errdetail("Key %s conflicts with existing key %s.", error_new, error_existing), errtableconstraint(heap, RelationGetRelationName(index)))); } index_endscan(index_scan); /* * Ordinarily, at this point the search should have found the originally * inserted tuple, unless we exited the loop early because of conflict. * However, it is possible to define exclusion constraints for which that * wouldn't be true --- for instance, if the operator is <>. So we no * longer complain if found_self is still false. */ econtext->ecxt_scantuple = save_scantuple; ExecDropSingleTupleTableSlot(existing_slot); return !conflict; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-209'], 'message': 'Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: ExecConstraints(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate) { Relation rel = resultRelInfo->ri_RelationDesc; TupleDesc tupdesc = RelationGetDescr(rel); TupleConstr *constr = tupdesc->constr; Assert(constr); if (constr->has_not_null) { int natts = tupdesc->natts; int attrChk; for (attrChk = 1; attrChk <= natts; attrChk++) { if (tupdesc->attrs[attrChk - 1]->attnotnull && slot_attisnull(slot, attrChk)) ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("null value in column \"%s\" violates not-null constraint", NameStr(tupdesc->attrs[attrChk - 1]->attname)), errdetail("Failing row contains %s.", ExecBuildSlotValueDescription(slot, tupdesc, 64)), errtablecol(rel, attrChk))); } } if (constr->num_check > 0) { const char *failed; if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL) ereport(ERROR, (errcode(ERRCODE_CHECK_VIOLATION), errmsg("new row for relation \"%s\" violates check constraint \"%s\"", RelationGetRelationName(rel), failed), errdetail("Failing row contains %s.", ExecBuildSlotValueDescription(slot, tupdesc, 64)), errtableconstraint(rel, failed))); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-209'], 'message': 'Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: ExecBuildSlotValueDescription(TupleTableSlot *slot, TupleDesc tupdesc, int maxfieldlen) { StringInfoData buf; bool write_comma = false; int i; /* Make sure the tuple is fully deconstructed */ slot_getallattrs(slot); initStringInfo(&buf); appendStringInfoChar(&buf, '('); for (i = 0; i < tupdesc->natts; i++) { char *val; int vallen; /* ignore dropped columns */ if (tupdesc->attrs[i]->attisdropped) continue; if (slot->tts_isnull[i]) val = "null"; else { Oid foutoid; bool typisvarlena; getTypeOutputInfo(tupdesc->attrs[i]->atttypid, &foutoid, &typisvarlena); val = OidOutputFunctionCall(foutoid, slot->tts_values[i]); } if (write_comma) appendStringInfoString(&buf, ", "); else write_comma = true; /* truncate if needed */ vallen = strlen(val); if (vallen <= maxfieldlen) appendStringInfoString(&buf, val); else { vallen = pg_mbcliplen(val, vallen, maxfieldlen); appendBinaryStringInfo(&buf, val, vallen); appendStringInfoString(&buf, "..."); } } appendStringInfoChar(&buf, ')'); return buf.data; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-209'], 'message': 'Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) { DR_intorel *myState = (DR_intorel *) self; IntoClause *into = myState->into; bool is_matview; char relkind; CreateStmt *create; Oid intoRelationId; Relation intoRelationDesc; RangeTblEntry *rte; Datum toast_options; ListCell *lc; int attnum; static char *validnsps[] = HEAP_RELOPT_NAMESPACES; Assert(into != NULL); /* else somebody forgot to set it */ /* This code supports both CREATE TABLE AS and CREATE MATERIALIZED VIEW */ is_matview = (into->viewQuery != NULL); relkind = is_matview ? RELKIND_MATVIEW : RELKIND_RELATION; /* * Create the target relation by faking up a CREATE TABLE parsetree and * passing it to DefineRelation. */ create = makeNode(CreateStmt); create->relation = into->rel; create->tableElts = NIL; /* will fill below */ create->inhRelations = NIL; create->ofTypename = NULL; create->constraints = NIL; create->options = into->options; create->oncommit = into->onCommit; create->tablespacename = into->tableSpaceName; create->if_not_exists = false; /* * Build column definitions using "pre-cooked" type and collation info. If * a column name list was specified in CREATE TABLE AS, override the * column names derived from the query. (Too few column names are OK, too * many are not.) */ lc = list_head(into->colNames); for (attnum = 0; attnum < typeinfo->natts; attnum++) { Form_pg_attribute attribute = typeinfo->attrs[attnum]; ColumnDef *col = makeNode(ColumnDef); TypeName *coltype = makeNode(TypeName); if (lc) { col->colname = strVal(lfirst(lc)); lc = lnext(lc); } else col->colname = NameStr(attribute->attname); col->typeName = coltype; col->inhcount = 0; col->is_local = true; col->is_not_null = false; col->is_from_type = false; col->storage = 0; col->raw_default = NULL; col->cooked_default = NULL; col->collClause = NULL; col->collOid = attribute->attcollation; col->constraints = NIL; col->fdwoptions = NIL; col->location = -1; coltype->names = NIL; coltype->typeOid = attribute->atttypid; coltype->setof = false; coltype->pct_type = false; coltype->typmods = NIL; coltype->typemod = attribute->atttypmod; coltype->arrayBounds = NIL; coltype->location = -1; /* * It's possible that the column is of a collatable type but the * collation could not be resolved, so double-check. (We must check * this here because DefineRelation would adopt the type's default * collation rather than complaining.) */ if (!OidIsValid(col->collOid) && type_is_collatable(coltype->typeOid)) ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_COLLATION), errmsg("no collation was derived for column \"%s\" with collatable type %s", col->colname, format_type_be(coltype->typeOid)), errhint("Use the COLLATE clause to set the collation explicitly."))); create->tableElts = lappend(create->tableElts, col); } if (lc != NULL) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("too many column names were specified"))); /* * Actually create the target table */ intoRelationId = DefineRelation(create, relkind, InvalidOid); /* * If necessary, create a TOAST table for the target table. Note that * NewRelationCreateToastTable ends with CommandCounterIncrement(), so * that the TOAST table will be visible for insertion. */ CommandCounterIncrement(); /* parse and validate reloptions for the toast table */ toast_options = transformRelOptions((Datum) 0, create->options, "toast", validnsps, true, false); (void) heap_reloptions(RELKIND_TOASTVALUE, toast_options, true); NewRelationCreateToastTable(intoRelationId, toast_options); /* Create the "view" part of a materialized view. */ if (is_matview) { /* StoreViewQuery scribbles on tree, so make a copy */ Query *query = (Query *) copyObject(into->viewQuery); StoreViewQuery(intoRelationId, query, false); CommandCounterIncrement(); } /* * Finally we can open the target table */ intoRelationDesc = heap_open(intoRelationId, AccessExclusiveLock); /* * Check INSERT permission on the constructed table. * * XXX: It would arguably make sense to skip this check if into->skipData * is true. */ rte = makeNode(RangeTblEntry); rte->rtekind = RTE_RELATION; rte->relid = intoRelationId; rte->relkind = relkind; rte->requiredPerms = ACL_INSERT; for (attnum = 1; attnum <= intoRelationDesc->rd_att->natts; attnum++) rte->modifiedCols = bms_add_member(rte->modifiedCols, attnum - FirstLowInvalidHeapAttributeNumber); ExecCheckRTPerms(list_make1(rte), true); /* * Make sure the constructed table does not have RLS enabled. * * check_enable_rls() will ereport(ERROR) itself if the user has requested * something invalid, and otherwise will return RLS_ENABLED if RLS should * be enabled here. We don't actually support that currently, so throw * our own ereport(ERROR) if that happens. */ if (check_enable_rls(intoRelationId, InvalidOid) == RLS_ENABLED) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errmsg("policies not yet implemented for this command")))); /* * Tentatively mark the target as populated, if it's a matview and we're * going to fill it; otherwise, no change needed. */ if (is_matview && !into->skipData) SetMatViewPopulatedState(intoRelationDesc, true); /* * Fill private fields of myState for use by later routines */ myState->rel = intoRelationDesc; myState->output_cid = GetCurrentCommandId(true); /* and remember the new relation's OID for ExecCreateTableAs */ CreateAsRelid = RelationGetRelid(myState->rel); /* * We can skip WAL-logging the insertions, unless PITR or streaming * replication is in use. We can skip the FSM in any case. */ myState->hi_options = HEAP_INSERT_SKIP_FSM | (XLogIsNeeded() ? 0 : HEAP_INSERT_SKIP_WAL); myState->bistate = GetBulkInsertState(); /* Not using WAL requires smgr_targblock be initially invalid */ Assert(RelationGetTargetBlock(intoRelationDesc) == InvalidBlockNumber); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-209'], 'message': 'Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) { CopyState cstate; bool is_from = stmt->is_from; bool pipe = (stmt->filename == NULL); Relation rel; Oid relid; Node *query = NULL; /* Disallow COPY to/from file or program except to superusers. */ if (!pipe && !superuser()) { if (stmt->is_program) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to COPY to or from an external program"), errhint("Anyone can COPY to stdout or from stdin. " "psql's \\copy command also works for anyone."))); else ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to COPY to or from a file"), errhint("Anyone can COPY to stdout or from stdin. " "psql's \\copy command also works for anyone."))); } if (stmt->relation) { TupleDesc tupDesc; AclMode required_access = (is_from ? ACL_INSERT : ACL_SELECT); RangeTblEntry *rte; List *attnums; ListCell *cur; Assert(!stmt->query); /* Open and lock the relation, using the appropriate lock type. */ rel = heap_openrv(stmt->relation, (is_from ? RowExclusiveLock : AccessShareLock)); relid = RelationGetRelid(rel); rte = makeNode(RangeTblEntry); rte->rtekind = RTE_RELATION; rte->relid = RelationGetRelid(rel); rte->relkind = rel->rd_rel->relkind; rte->requiredPerms = required_access; tupDesc = RelationGetDescr(rel); attnums = CopyGetAttnums(tupDesc, rel, stmt->attlist); foreach(cur, attnums) { int attno = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; if (is_from) rte->modifiedCols = bms_add_member(rte->modifiedCols, attno); else rte->selectedCols = bms_add_member(rte->selectedCols, attno); } ExecCheckRTPerms(list_make1(rte), true); /* * Permission check for row security policies. * * check_enable_rls will ereport(ERROR) if the user has requested * something invalid and will otherwise indicate if we should enable * RLS (returns RLS_ENABLED) or not for this COPY statement. * * If the relation has a row security policy and we are to apply it * then perform a "query" copy and allow the normal query processing to * handle the policies. * * If RLS is not enabled for this, then just fall through to the * normal non-filtering relation handling. */ if (check_enable_rls(rte->relid, InvalidOid) == RLS_ENABLED) { SelectStmt *select; ColumnRef *cr; ResTarget *target; RangeVar *from; if (is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("COPY FROM not supported with row level security."), errhint("Use direct INSERT statements instead."))); /* Build target list */ cr = makeNode(ColumnRef); if (!stmt->attlist) cr->fields = list_make1(makeNode(A_Star)); else cr->fields = stmt->attlist; cr->location = 1; target = makeNode(ResTarget); target->name = NULL; target->indirection = NIL; target->val = (Node *) cr; target->location = 1; /* Build FROM clause */ from = stmt->relation; /* Build query */ select = makeNode(SelectStmt); select->targetList = list_make1(target); select->fromClause = list_make1(from); query = (Node*) select; /* Close the handle to the relation as it is no longer needed. */ heap_close(rel, (is_from ? RowExclusiveLock : AccessShareLock)); rel = NULL; } } else { Assert(stmt->query); query = stmt->query; relid = InvalidOid; rel = NULL; } if (is_from) { Assert(rel); /* check read-only transaction */ if (XactReadOnly && !rel->rd_islocaltemp) PreventCommandIfReadOnly("COPY FROM"); cstate = BeginCopyFrom(rel, stmt->filename, stmt->is_program, stmt->attlist, stmt->options); *processed = CopyFrom(cstate); /* copy from file to database */ EndCopyFrom(cstate); } else { cstate = BeginCopyTo(rel, query, queryString, relid, stmt->filename, stmt->is_program, stmt->attlist, stmt->options); *processed = DoCopyTo(cstate); /* copy from database to file */ EndCopyTo(cstate); } /* * Close the relation. If reading, we can release the AccessShareLock we * got; if writing, we should hold the lock until end of transaction to * ensure that updates will be committed before lock is released. */ if (rel != NULL) heap_close(rel, (is_from ? NoLock : AccessShareLock)); return relid; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-209'], 'message': 'Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: ExecWithCheckOptions(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate) { ExprContext *econtext; ListCell *l1, *l2; /* * We will use the EState's per-tuple context for evaluating constraint * expressions (creating it if it's not already there). */ econtext = GetPerTupleExprContext(estate); /* Arrange for econtext's scan tuple to be the tuple under test */ econtext->ecxt_scantuple = slot; /* Check each of the constraints */ forboth(l1, resultRelInfo->ri_WithCheckOptions, l2, resultRelInfo->ri_WithCheckOptionExprs) { WithCheckOption *wco = (WithCheckOption *) lfirst(l1); ExprState *wcoExpr = (ExprState *) lfirst(l2); /* * WITH CHECK OPTION checks are intended to ensure that the new tuple * is visible (in the case of a view) or that it passes the * 'with-check' policy (in the case of row security). * If the qual evaluates to NULL or FALSE, then the new tuple won't be * included in the view or doesn't pass the 'with-check' policy for the * table. We need ExecQual to return FALSE for NULL to handle the view * case (the opposite of what we do above for CHECK constraints). */ if (!ExecQual((List *) wcoExpr, econtext, false)) ereport(ERROR, (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION), errmsg("new row violates WITH CHECK OPTION for \"%s\"", wco->viewname), errdetail("Failing row contains %s.", ExecBuildSlotValueDescription(slot, RelationGetDescr(resultRelInfo->ri_RelationDesc), 64)))); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-209'], 'message': 'Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: prepend_row_security_policies(Query* root, RangeTblEntry* rte, int rt_index) { Expr *rowsec_expr = NULL; Expr *rowsec_with_check_expr = NULL; Expr *hook_expr = NULL; Expr *hook_with_check_expr = NULL; List *rowsec_policies; List *hook_policies = NIL; Relation rel; Oid user_id; int sec_context; int rls_status; bool defaultDeny = true; bool hassublinks = false; /* This is just to get the security context */ GetUserIdAndSecContext(&user_id, &sec_context); /* Switch to checkAsUser if it's set */ user_id = rte->checkAsUser ? rte->checkAsUser : GetUserId(); /* * If this is not a normal relation, or we have been told * to explicitly skip RLS (perhaps because this is an FK check) * then just return immediately. */ if (rte->relid < FirstNormalObjectId || rte->relkind != RELKIND_RELATION || (sec_context & SECURITY_ROW_LEVEL_DISABLED)) return false; /* Determine the state of RLS for this, pass checkAsUser explicitly */ rls_status = check_enable_rls(rte->relid, rte->checkAsUser); /* If there is no RLS on this table at all, nothing to do */ if (rls_status == RLS_NONE) return false; /* * RLS_NONE_ENV means we are not doing any RLS now, but that may change * with changes to the environment, so we mark it as hasRowSecurity to * force a re-plan when the environment changes. */ if (rls_status == RLS_NONE_ENV) { /* * Indicate that this query may involve RLS and must therefore * be replanned if the environment changes (GUCs, role), but we * are not adding anything here. */ root->hasRowSecurity = true; return false; } /* * We may end up getting called multiple times for the same RTE, so check * to make sure we aren't doing double-work. */ if (rte->securityQuals != NIL) return false; /* Grab the built-in policies which should be applied to this relation. */ rel = heap_open(rte->relid, NoLock); rowsec_policies = pull_row_security_policies(root->commandType, rel, user_id); /* * Check if this is only the default-deny policy. * * Normally, if the table has row security enabled but there are * no policies, we use a default-deny policy and not allow anything. * However, when an extension uses the hook to add their own * policies, we don't want to include the default deny policy or * there won't be any way for a user to use an extension exclusively * for the policies to be used. */ if (((RowSecurityPolicy *) linitial(rowsec_policies))->policy_id == InvalidOid) defaultDeny = true; /* Now that we have our policies, build the expressions from them. */ process_policies(rowsec_policies, rt_index, &rowsec_expr, &rowsec_with_check_expr, &hassublinks); /* * Also, allow extensions to add their own policies. * * Note that, as with the internal policies, if multiple policies are * returned then they will be combined into a single expression with * all of them OR'd together. However, to avoid the situation of an * extension granting more access to a table than the internal policies * would allow, the extension's policies are AND'd with the internal * policies. In other words - extensions can only provide further * filtering of the result set (or further reduce the set of records * allowed to be added). * * If only a USING policy is returned by the extension then it will be * used for WITH CHECK as well, similar to how internal policies are * handled. * * The only caveat to this is that if there are NO internal policies * defined, there ARE policies returned by the extension, and RLS is * enabled on the table, then we will ignore the internally-generated * default-deny policy and use only the policies returned by the * extension. */ if (row_security_policy_hook) { hook_policies = (*row_security_policy_hook)(root->commandType, rel); /* Build the expression from any policies returned. */ process_policies(hook_policies, rt_index, &hook_expr, &hook_with_check_expr, &hassublinks); } /* * If the only built-in policy is the default-deny one, and hook * policies exist, then use the hook policies only and do not apply * the default-deny policy. Otherwise, apply both sets (AND'd * together). */ if (defaultDeny && hook_policies != NIL) rowsec_expr = NULL; /* * For INSERT or UPDATE, we need to add the WITH CHECK quals to * Query's withCheckOptions to verify that any new records pass the * WITH CHECK policy (this will be a copy of the USING policy, if no * explicit WITH CHECK policy exists). */ if (root->commandType == CMD_INSERT || root->commandType == CMD_UPDATE) { /* * WITH CHECK OPTIONS wants a WCO node which wraps each Expr, so * create them as necessary. */ if (rowsec_with_check_expr) { WithCheckOption *wco; wco = (WithCheckOption *) makeNode(WithCheckOption); wco->viewname = RelationGetRelationName(rel); wco->qual = (Node *) rowsec_with_check_expr; wco->cascaded = false; root->withCheckOptions = lcons(wco, root->withCheckOptions); } /* * Ditto for the expression, if any, returned from the extension. */ if (hook_with_check_expr) { WithCheckOption *wco; wco = (WithCheckOption *) makeNode(WithCheckOption); wco->viewname = RelationGetRelationName(rel); wco->qual = (Node *) hook_with_check_expr; wco->cascaded = false; root->withCheckOptions = lcons(wco, root->withCheckOptions); } } /* For SELECT, UPDATE, and DELETE, set the security quals */ if (root->commandType == CMD_SELECT || root->commandType == CMD_UPDATE || root->commandType == CMD_DELETE) { if (rowsec_expr) rte->securityQuals = lcons(rowsec_expr, rte->securityQuals); if (hook_expr) rte->securityQuals = lcons(hook_expr, rte->securityQuals); } heap_close(rel, NoLock); /* * Mark this query as having row security, so plancache can invalidate * it when necessary (eg: role changes) */ root->hasRowSecurity = true; /* * If we have sublinks added because of the policies being added to the * query, then set hasSubLinks on the Query to force subLinks to be * properly expanded. */ root->hasSubLinks |= hassublinks; /* If we got this far, we must have added quals */ return true; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-209'], 'message': 'Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: another_hunk (enum diff difftype, bool rev) { char *s; lin context = 0; size_t chars_read; char numbuf0[LINENUM_LENGTH_BOUND + 1]; char numbuf1[LINENUM_LENGTH_BOUND + 1]; char numbuf2[LINENUM_LENGTH_BOUND + 1]; char numbuf3[LINENUM_LENGTH_BOUND + 1]; while (p_end >= 0) { if (p_end == p_efake) p_end = p_bfake; /* don't free twice */ else free(p_line[p_end]); p_end--; } assert(p_end == -1); p_efake = -1; if (p_c_function) { free (p_c_function); p_c_function = NULL; } p_max = hunkmax; /* gets reduced when --- found */ if (difftype == CONTEXT_DIFF || difftype == NEW_CONTEXT_DIFF) { file_offset line_beginning = file_tell (pfp); /* file pos of the current line */ lin repl_beginning = 0; /* index of --- line */ lin fillcnt = 0; /* #lines of missing ptrn or repl */ lin fillsrc; /* index of first line to copy */ lin filldst; /* index of first missing line */ bool ptrn_spaces_eaten = false; /* ptrn was slightly misformed */ bool some_context = false; /* (perhaps internal) context seen */ bool repl_could_be_missing = true; bool ptrn_missing = false; /* The pattern was missing. */ bool repl_missing = false; /* Likewise for replacement. */ file_offset repl_backtrack_position = 0; /* file pos of first repl line */ lin repl_patch_line; /* input line number for same */ lin repl_context; /* context for same */ lin ptrn_prefix_context = -1; /* lines in pattern prefix context */ lin ptrn_suffix_context = -1; /* lines in pattern suffix context */ lin repl_prefix_context = -1; /* lines in replac. prefix context */ lin ptrn_copiable = 0; /* # of copiable lines in ptrn */ lin repl_copiable = 0; /* Likewise for replacement. */ /* Pacify 'gcc -Wall'. */ fillsrc = filldst = repl_patch_line = repl_context = 0; chars_read = get_line (); if (chars_read == (size_t) -1 || chars_read <= 8 || strncmp (buf, "********", 8) != 0) { next_intuit_at(line_beginning,p_input_line); return chars_read == (size_t) -1 ? -1 : 0; } s = buf; while (*s == '*') s++; if (*s == ' ') { p_c_function = s; while (*s != '\n') s++; *s = '\0'; p_c_function = savestr (p_c_function); if (! p_c_function) return -1; } p_hunk_beg = p_input_line + 1; while (p_end < p_max) { chars_read = get_line (); if (chars_read == (size_t) -1) return -1; if (!chars_read) { if (repl_beginning && repl_could_be_missing) { repl_missing = true; goto hunk_done; } if (p_max - p_end < 4) { strcpy (buf, " \n"); /* assume blank lines got chopped */ chars_read = 3; } else { fatal ("unexpected end of file in patch"); } } p_end++; if (p_end == hunkmax) fatal ("unterminated hunk starting at line %s; giving up at line %s: %s", format_linenum (numbuf0, pch_hunk_beg ()), format_linenum (numbuf1, p_input_line), buf); assert(p_end < hunkmax); p_Char[p_end] = *buf; p_len[p_end] = 0; p_line[p_end] = 0; switch (*buf) { case '*': if (strnEQ(buf, "********", 8)) { if (repl_beginning && repl_could_be_missing) { repl_missing = true; goto hunk_done; } else fatal ("unexpected end of hunk at line %s", format_linenum (numbuf0, p_input_line)); } if (p_end != 0) { if (repl_beginning && repl_could_be_missing) { repl_missing = true; goto hunk_done; } fatal ("unexpected '***' at line %s: %s", format_linenum (numbuf0, p_input_line), buf); } context = 0; p_len[p_end] = strlen (buf); if (! (p_line[p_end] = savestr (buf))) { p_end--; return -1; } for (s = buf; *s && !ISDIGIT (*s); s++) /* do nothing */ ; if (strnEQ(s,"0,0",3)) remove_prefix (s, 2); s = scan_linenum (s, &p_first); if (*s == ',') { while (*s && !ISDIGIT (*s)) s++; scan_linenum (s, &p_ptrn_lines); p_ptrn_lines += 1 - p_first; } else if (p_first) p_ptrn_lines = 1; else { p_ptrn_lines = 0; p_first = 1; } p_max = p_ptrn_lines + 6; /* we need this much at least */ while (p_max + 1 >= hunkmax) if (! grow_hunkmax ()) return -1; p_max = hunkmax; break; case '-': if (buf[1] != '-') goto change_line; if (ptrn_prefix_context == -1) ptrn_prefix_context = context; ptrn_suffix_context = context; if (repl_beginning || (p_end != p_ptrn_lines + 1 + (p_Char[p_end - 1] == '\n'))) { if (p_end == 1) { /* 'Old' lines were omitted. Set up to fill them in from 'new' context lines. */ ptrn_missing = true; p_end = p_ptrn_lines + 1; ptrn_prefix_context = ptrn_suffix_context = -1; fillsrc = p_end + 1; filldst = 1; fillcnt = p_ptrn_lines; } else if (! repl_beginning) fatal ("%s '---' at line %s; check line numbers at line %s", (p_end <= p_ptrn_lines ? "Premature" : "Overdue"), format_linenum (numbuf0, p_input_line), format_linenum (numbuf1, p_hunk_beg)); else if (! repl_could_be_missing) fatal ("duplicate '---' at line %s; check line numbers at line %s", format_linenum (numbuf0, p_input_line), format_linenum (numbuf1, p_hunk_beg + repl_beginning)); else { repl_missing = true; goto hunk_done; } } repl_beginning = p_end; repl_backtrack_position = file_tell (pfp); repl_patch_line = p_input_line; repl_context = context; p_len[p_end] = strlen (buf); if (! (p_line[p_end] = savestr (buf))) { p_end--; return -1; } p_Char[p_end] = '='; for (s = buf; *s && ! ISDIGIT (*s); s++) /* do nothing */ ; s = scan_linenum (s, &p_newfirst); if (*s == ',') { do { if (!*++s) malformed (); } while (! ISDIGIT (*s)); scan_linenum (s, &p_repl_lines); p_repl_lines += 1 - p_newfirst; } else if (p_newfirst) p_repl_lines = 1; else { p_repl_lines = 0; p_newfirst = 1; } p_max = p_repl_lines + p_end; while (p_max + 1 >= hunkmax) if (! grow_hunkmax ()) return -1; if (p_repl_lines != ptrn_copiable && (p_prefix_context != 0 || context != 0 || p_repl_lines != 1)) repl_could_be_missing = false; context = 0; break; case '+': case '!': repl_could_be_missing = false; change_line: s = buf + 1; chars_read--; if (*s == '\n' && canonicalize) { strcpy (s, " \n"); chars_read = 2; } if (*s == ' ' || *s == '\t') { s++; chars_read--; } else if (repl_beginning && repl_could_be_missing) { repl_missing = true; goto hunk_done; } if (! repl_beginning) { if (ptrn_prefix_context == -1) ptrn_prefix_context = context; } else { if (repl_prefix_context == -1) repl_prefix_context = context; } chars_read -= (1 < chars_read && p_end == (repl_beginning ? p_max : p_ptrn_lines) && incomplete_line ()); p_len[p_end] = chars_read; p_line[p_end] = savebuf (s, chars_read); if (chars_read && ! p_line[p_end]) { p_end--; return -1; } context = 0; break; case '\t': case '\n': /* assume spaces got eaten */ s = buf; if (*buf == '\t') { s++; chars_read--; } if (repl_beginning && repl_could_be_missing && (!ptrn_spaces_eaten || difftype == NEW_CONTEXT_DIFF) ) { repl_missing = true; goto hunk_done; } chars_read -= (1 < chars_read && p_end == (repl_beginning ? p_max : p_ptrn_lines) && incomplete_line ()); p_len[p_end] = chars_read; p_line[p_end] = savebuf (buf, chars_read); if (chars_read && ! p_line[p_end]) { p_end--; return -1; } if (p_end != p_ptrn_lines + 1) { ptrn_spaces_eaten |= (repl_beginning != 0); some_context = true; context++; if (repl_beginning) repl_copiable++; else ptrn_copiable++; p_Char[p_end] = ' '; } break; case ' ': s = buf + 1; chars_read--; if (*s == '\n' && canonicalize) { strcpy (s, "\n"); chars_read = 2; } if (*s == ' ' || *s == '\t') { s++; chars_read--; } else if (repl_beginning && repl_could_be_missing) { repl_missing = true; goto hunk_done; } some_context = true; context++; if (repl_beginning) repl_copiable++; else ptrn_copiable++; chars_read -= (1 < chars_read && p_end == (repl_beginning ? p_max : p_ptrn_lines) && incomplete_line ()); p_len[p_end] = chars_read; p_line[p_end] = savebuf (s, chars_read); if (chars_read && ! p_line[p_end]) { p_end--; return -1; } break; default: if (repl_beginning && repl_could_be_missing) { repl_missing = true; goto hunk_done; } malformed (); } } hunk_done: if (p_end >=0 && !repl_beginning) fatal ("no '---' found in patch at line %s", format_linenum (numbuf0, pch_hunk_beg ())); if (repl_missing) { /* reset state back to just after --- */ p_input_line = repl_patch_line; context = repl_context; for (p_end--; p_end > repl_beginning; p_end--) free(p_line[p_end]); Fseek (pfp, repl_backtrack_position, SEEK_SET); /* redundant 'new' context lines were omitted - set */ /* up to fill them in from the old file context */ fillsrc = 1; filldst = repl_beginning+1; fillcnt = p_repl_lines; p_end = p_max; } else if (! ptrn_missing && ptrn_copiable != repl_copiable) fatal ("context mangled in hunk at line %s", format_linenum (numbuf0, p_hunk_beg)); else if (!some_context && fillcnt == 1) { /* the first hunk was a null hunk with no context */ /* and we were expecting one line -- fix it up. */ while (filldst < p_end) { p_line[filldst] = p_line[filldst+1]; p_Char[filldst] = p_Char[filldst+1]; p_len[filldst] = p_len[filldst+1]; filldst++; } #if 0 repl_beginning--; /* this doesn't need to be fixed */ #endif p_end--; p_first++; /* do append rather than insert */ fillcnt = 0; p_ptrn_lines = 0; } p_prefix_context = ((repl_prefix_context == -1 || (ptrn_prefix_context != -1 && ptrn_prefix_context < repl_prefix_context)) ? ptrn_prefix_context : repl_prefix_context); p_suffix_context = ((ptrn_suffix_context != -1 && ptrn_suffix_context < context) ? ptrn_suffix_context : context); if (p_prefix_context == -1 || p_suffix_context == -1) fatal ("replacement text or line numbers mangled in hunk at line %s", format_linenum (numbuf0, p_hunk_beg)); if (difftype == CONTEXT_DIFF && (fillcnt || (p_first > 1 && p_prefix_context + p_suffix_context < ptrn_copiable))) { if (verbosity == VERBOSE) say ("%s\n%s\n%s\n", "(Fascinating -- this is really a new-style context diff but without", "the telltale extra asterisks on the *** line that usually indicate", "the new style...)"); diff_type = difftype = NEW_CONTEXT_DIFF; } /* if there were omitted context lines, fill them in now */ if (fillcnt) { p_bfake = filldst; /* remember where not to free() */ p_efake = filldst + fillcnt - 1; while (fillcnt-- > 0) { while (fillsrc <= p_end && fillsrc != repl_beginning && p_Char[fillsrc] != ' ') fillsrc++; if (p_end < fillsrc || fillsrc == repl_beginning) { fatal ("replacement text or line numbers mangled in hunk at line %s", format_linenum (numbuf0, p_hunk_beg)); } p_line[filldst] = p_line[fillsrc]; p_Char[filldst] = p_Char[fillsrc]; p_len[filldst] = p_len[fillsrc]; fillsrc++; filldst++; } while (fillsrc <= p_end && fillsrc != repl_beginning) { if (p_Char[fillsrc] == ' ') fatal ("replacement text or line numbers mangled in hunk at line %s", format_linenum (numbuf0, p_hunk_beg)); fillsrc++; } if (debug & 64) printf ("fillsrc %s, filldst %s, rb %s, e+1 %s\n", format_linenum (numbuf0, fillsrc), format_linenum (numbuf1, filldst), format_linenum (numbuf2, repl_beginning), format_linenum (numbuf3, p_end + 1)); assert(fillsrc==p_end+1 || fillsrc==repl_beginning); assert(filldst==p_end+1 || filldst==repl_beginning); } } else if (difftype == UNI_DIFF) { file_offset line_beginning = file_tell (pfp); /* file pos of the current line */ lin fillsrc; /* index of old lines */ lin filldst; /* index of new lines */ char ch = '\0'; chars_read = get_line (); if (chars_read == (size_t) -1 || chars_read <= 4 || strncmp (buf, "@@ -", 4) != 0) { next_intuit_at(line_beginning,p_input_line); return chars_read == (size_t) -1 ? -1 : 0; } s = scan_linenum (buf + 4, &p_first); if (*s == ',') s = scan_linenum (s + 1, &p_ptrn_lines); else p_ptrn_lines = 1; if (*s == ' ') s++; if (*s != '+') malformed (); s = scan_linenum (s + 1, &p_newfirst); if (*s == ',') s = scan_linenum (s + 1, &p_repl_lines); else p_repl_lines = 1; if (*s == ' ') s++; if (*s++ != '@') malformed (); if (*s++ == '@' && *s == ' ') { p_c_function = s; while (*s != '\n') s++; *s = '\0'; p_c_function = savestr (p_c_function); if (! p_c_function) return -1; } if (!p_ptrn_lines) p_first++; /* do append rather than insert */ if (!p_repl_lines) p_newfirst++; p_max = p_ptrn_lines + p_repl_lines + 1; while (p_max + 1 >= hunkmax) if (! grow_hunkmax ()) return -1; fillsrc = 1; filldst = fillsrc + p_ptrn_lines; p_end = filldst + p_repl_lines; sprintf (buf, "*** %s,%s ****\n", format_linenum (numbuf0, p_first), format_linenum (numbuf1, p_first + p_ptrn_lines - 1)); p_len[0] = strlen (buf); if (! (p_line[0] = savestr (buf))) { p_end = -1; return -1; } p_Char[0] = '*'; sprintf (buf, "--- %s,%s ----\n", format_linenum (numbuf0, p_newfirst), format_linenum (numbuf1, p_newfirst + p_repl_lines - 1)); p_len[filldst] = strlen (buf); if (! (p_line[filldst] = savestr (buf))) { p_end = 0; return -1; } p_Char[filldst++] = '='; p_prefix_context = -1; p_hunk_beg = p_input_line + 1; while (fillsrc <= p_ptrn_lines || filldst <= p_end) { chars_read = get_line (); if (!chars_read) { if (p_max - filldst < 3) { strcpy (buf, " \n"); /* assume blank lines got chopped */ chars_read = 2; } else { fatal ("unexpected end of file in patch"); } } if (chars_read == (size_t) -1) s = 0; else if (*buf == '\t' || *buf == '\n') { ch = ' '; /* assume the space got eaten */ s = savebuf (buf, chars_read); } else { ch = *buf; s = savebuf (buf+1, --chars_read); } if (chars_read && ! s) { while (--filldst > p_ptrn_lines) free(p_line[filldst]); p_end = fillsrc-1; return -1; } switch (ch) { case '-': if (fillsrc > p_ptrn_lines) { free(s); p_end = filldst-1; malformed (); } chars_read -= fillsrc == p_ptrn_lines && incomplete_line (); p_Char[fillsrc] = ch; p_line[fillsrc] = s; p_len[fillsrc++] = chars_read; break; case '=': ch = ' '; /* FALL THROUGH */ case ' ': if (fillsrc > p_ptrn_lines) { free(s); while (--filldst > p_ptrn_lines) free(p_line[filldst]); p_end = fillsrc-1; malformed (); } context++; chars_read -= fillsrc == p_ptrn_lines && incomplete_line (); p_Char[fillsrc] = ch; p_line[fillsrc] = s; p_len[fillsrc++] = chars_read; s = savebuf (s, chars_read); if (chars_read && ! s) { while (--filldst > p_ptrn_lines) free(p_line[filldst]); p_end = fillsrc-1; return -1; } /* FALL THROUGH */ case '+': if (filldst > p_end) { free(s); while (--filldst > p_ptrn_lines) free(p_line[filldst]); p_end = fillsrc-1; malformed (); } chars_read -= filldst == p_end && incomplete_line (); p_Char[filldst] = ch; p_line[filldst] = s; p_len[filldst++] = chars_read; break; default: p_end = filldst; malformed (); } if (ch != ' ') { if (p_prefix_context == -1) p_prefix_context = context; context = 0; } }/* while */ if (p_prefix_context == -1) malformed (); p_suffix_context = context; } else { /* normal diff--fake it up */ char hunk_type; int i; lin min, max; file_offset line_beginning = file_tell (pfp); p_prefix_context = p_suffix_context = 0; chars_read = get_line (); if (chars_read == (size_t) -1 || !chars_read || !ISDIGIT (*buf)) { next_intuit_at(line_beginning,p_input_line); return chars_read == (size_t) -1 ? -1 : 0; } s = scan_linenum (buf, &p_first); if (*s == ',') { s = scan_linenum (s + 1, &p_ptrn_lines); p_ptrn_lines += 1 - p_first; } else p_ptrn_lines = (*s != 'a'); hunk_type = *s; if (hunk_type == 'a') p_first++; /* do append rather than insert */ s = scan_linenum (s + 1, &min); if (*s == ',') scan_linenum (s + 1, &max); else max = min; if (hunk_type == 'd') min++; p_end = p_ptrn_lines + 1 + max - min + 1; while (p_end + 1 >= hunkmax) if (! grow_hunkmax ()) { p_end = -1; return -1; } p_newfirst = min; p_repl_lines = max - min + 1; sprintf (buf, "*** %s,%s\n", format_linenum (numbuf0, p_first), format_linenum (numbuf1, p_first + p_ptrn_lines - 1)); p_len[0] = strlen (buf); if (! (p_line[0] = savestr (buf))) { p_end = -1; return -1; } p_Char[0] = '*'; for (i=1; i<=p_ptrn_lines; i++) { chars_read = get_line (); if (chars_read == (size_t) -1) { p_end = i - 1; return -1; } if (!chars_read) fatal ("unexpected end of file in patch at line %s", format_linenum (numbuf0, p_input_line)); if (buf[0] != '<' || (buf[1] != ' ' && buf[1] != '\t')) fatal ("'<' expected at line %s of patch", format_linenum (numbuf0, p_input_line)); chars_read -= 2 + (i == p_ptrn_lines && incomplete_line ()); p_len[i] = chars_read; p_line[i] = savebuf (buf + 2, chars_read); if (chars_read && ! p_line[i]) { p_end = i-1; return -1; } p_Char[i] = '-'; } if (hunk_type == 'c') { chars_read = get_line (); if (chars_read == (size_t) -1) { p_end = i - 1; return -1; } if (! chars_read) fatal ("unexpected end of file in patch at line %s", format_linenum (numbuf0, p_input_line)); if (*buf != '-') fatal ("'---' expected at line %s of patch", format_linenum (numbuf0, p_input_line)); } sprintf (buf, "--- %s,%s\n", format_linenum (numbuf0, min), format_linenum (numbuf1, max)); p_len[i] = strlen (buf); if (! (p_line[i] = savestr (buf))) { p_end = i-1; return -1; } p_Char[i] = '='; for (i++; i<=p_end; i++) { chars_read = get_line (); if (chars_read == (size_t) -1) { p_end = i - 1; return -1; } if (!chars_read) fatal ("unexpected end of file in patch at line %s", format_linenum (numbuf0, p_input_line)); if (buf[0] != '>' || (buf[1] != ' ' && buf[1] != '\t')) fatal ("'>' expected at line %s of patch", format_linenum (numbuf0, p_input_line)); chars_read -= 2 + (i == p_end && incomplete_line ()); p_len[i] = chars_read; p_line[i] = savebuf (buf + 2, chars_read); if (chars_read && ! p_line[i]) { p_end = i-1; return -1; } p_Char[i] = '+'; } } if (rev) /* backwards patch? */ if (!pch_swap()) say ("Not enough memory to swap next hunk!\n"); assert (p_end + 1 < hunkmax); p_Char[p_end + 1] = '^'; /* add a stopper for apply_hunk */ if (debug & 2) { lin i; for (i = 0; i <= p_end + 1; i++) { fprintf (stderr, "%s %c", format_linenum (numbuf0, i), p_Char[i]); if (p_Char[i] == '*') fprintf (stderr, " %s,%s\n", format_linenum (numbuf0, p_first), format_linenum (numbuf1, p_ptrn_lines)); else if (p_Char[i] == '=') fprintf (stderr, " %s,%s\n", format_linenum (numbuf0, p_newfirst), format_linenum (numbuf1, p_repl_lines)); else if (p_Char[i] != '^') { fputs(" |", stderr); pch_write_line (i, stderr); } else fputc('\n', stderr); } fflush (stderr); } return 1; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-59'], 'message': 'Add line number overflow checking * bootstrap.conf: use intprops module. * src/common.h: Define LINENUM_MIN and LINENUM_MAX macros. * src/pch.c (another_hunk): Add line number overflow checking. Based on Robert C. Seacord's INT32-C document for integer overflow checking and Tobias Stoeckmann's "integer overflows and oob memory access" patch for FreeBSD.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int ip_forward(struct sk_buff *skb) { u32 mtu; struct iphdr *iph; /* Our header */ struct rtable *rt; /* Route we use */ struct ip_options *opt = &(IPCB(skb)->opt); /* that should never happen */ if (skb->pkt_type != PACKET_HOST) goto drop; if (skb_warn_if_lro(skb)) goto drop; if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) goto drop; if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb)) return NET_RX_SUCCESS; skb_forward_csum(skb); /* * According to the RFC, we must first decrease the TTL field. If * that reaches zero, we must reply an ICMP control message telling * that the packet's lifetime expired. */ if (ip_hdr(skb)->ttl <= 1) goto too_many_hops; if (!xfrm4_route_forward(skb)) goto drop; rt = skb_rtable(skb); if (opt->is_strictroute && rt->rt_uses_gateway) goto sr_failed; IPCB(skb)->flags |= IPSKB_FORWARDED; mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) { IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); goto drop; } /* We are about to mangle packet. Copy it! */ if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+rt->dst.header_len)) goto drop; iph = ip_hdr(skb); /* Decrease ttl after skb cow done */ ip_decrease_ttl(iph); /* * We now generate an ICMP HOST REDIRECT giving the route * we calculated. */ if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) ip_rt_send_redirect(skb); skb->priority = rt_tos2priority(iph->tos); return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, rt->dst.dev, ip_forward_finish); sr_failed: /* * Strict routing permits no gatewaying */ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0); goto drop; too_many_hops: /* Tell the sender its packet died... */ IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_INHDRERRORS); icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); drop: kfree_skb(skb); return NET_RX_DROP; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'ipv4: try to cache dst_entries which would cause a redirect Not caching dst_entries which cause redirects could be exploited by hosts on the same subnet, causing a severe DoS attack. This effect aggravated since commit f88649721268999 ("ipv4: fix dst race in sk_dst_get()"). Lookups causing redirects will be allocated with DST_NOCACHE set which will force dst_release to free them via RCU. Unfortunately waiting for RCU grace period just takes too long, we can end up with >1M dst_entries waiting to be released and the system will run OOM. rcuos threads cannot catch up under high softirq load. Attaching the flag to emit a redirect later on to the specific skb allows us to cache those dst_entries thus reducing the pressure on allocation and deallocation. This issue was discovered by Marcelo Leitner. Cc: Julian Anastasov <[email protected]> Signed-off-by: Marcelo Leitner <[email protected]> Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: Julian Anastasov <[email protected]> Signed-off-by: David S. Miller <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int rt_fill_info(struct net *net, __be32 dst, __be32 src, struct flowi4 *fl4, struct sk_buff *skb, u32 portid, u32 seq, int event, int nowait, unsigned int flags) { struct rtable *rt = skb_rtable(skb); struct rtmsg *r; struct nlmsghdr *nlh; unsigned long expires = 0; u32 error; u32 metrics[RTAX_MAX]; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); if (nlh == NULL) return -EMSGSIZE; r = nlmsg_data(nlh); r->rtm_family = AF_INET; r->rtm_dst_len = 32; r->rtm_src_len = 0; r->rtm_tos = fl4->flowi4_tos; r->rtm_table = RT_TABLE_MAIN; if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN)) goto nla_put_failure; r->rtm_type = rt->rt_type; r->rtm_scope = RT_SCOPE_UNIVERSE; r->rtm_protocol = RTPROT_UNSPEC; r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; if (rt->rt_flags & RTCF_NOTIFY) r->rtm_flags |= RTM_F_NOTIFY; if (nla_put_be32(skb, RTA_DST, dst)) goto nla_put_failure; if (src) { r->rtm_src_len = 32; if (nla_put_be32(skb, RTA_SRC, src)) goto nla_put_failure; } if (rt->dst.dev && nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) goto nla_put_failure; #ifdef CONFIG_IP_ROUTE_CLASSID if (rt->dst.tclassid && nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) goto nla_put_failure; #endif if (!rt_is_input_route(rt) && fl4->saddr != src) { if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr)) goto nla_put_failure; } if (rt->rt_uses_gateway && nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway)) goto nla_put_failure; expires = rt->dst.expires; if (expires) { unsigned long now = jiffies; if (time_before(now, expires)) expires -= now; else expires = 0; } memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); if (rt->rt_pmtu && expires) metrics[RTAX_MTU - 1] = rt->rt_pmtu; if (rtnetlink_put_metrics(skb, metrics) < 0) goto nla_put_failure; if (fl4->flowi4_mark && nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) goto nla_put_failure; error = rt->dst.error; if (rt_is_input_route(rt)) { #ifdef CONFIG_IP_MROUTE if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { int err = ipmr_get_route(net, skb, fl4->saddr, fl4->daddr, r, nowait); if (err <= 0) { if (!nowait) { if (err == 0) return 0; goto nla_put_failure; } else { if (err == -EMSGSIZE) goto nla_put_failure; error = err; } } } else #endif if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex)) goto nla_put_failure; } if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'ipv4: try to cache dst_entries which would cause a redirect Not caching dst_entries which cause redirects could be exploited by hosts on the same subnet, causing a severe DoS attack. This effect aggravated since commit f88649721268999 ("ipv4: fix dst race in sk_dst_get()"). Lookups causing redirects will be allocated with DST_NOCACHE set which will force dst_release to free them via RCU. Unfortunately waiting for RCU grace period just takes too long, we can end up with >1M dst_entries waiting to be released and the system will run OOM. rcuos threads cannot catch up under high softirq load. Attaching the flag to emit a redirect later on to the specific skb allows us to cache those dst_entries thus reducing the pressure on allocation and deallocation. This issue was discovered by Marcelo Leitner. Cc: Julian Anastasov <[email protected]> Signed-off-by: Marcelo Leitner <[email protected]> Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: Julian Anastasov <[email protected]> Signed-off-by: David S. Miller <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: open_patch_file (char const *filename) { file_offset file_pos = 0; file_offset pos; struct stat st; if (!filename || !*filename || strEQ (filename, "-")) pfp = stdin; else { pfp = fopen (filename, binary_transput ? "rb" : "r"); if (!pfp) pfatal ("Can't open patch file %s", quotearg (filename)); } #if HAVE_SETMODE_DOS if (binary_transput) { if (isatty (fileno (pfp))) fatal ("cannot read binary data from tty on this platform"); setmode (fileno (pfp), O_BINARY); } #endif if (fstat (fileno (pfp), &st) != 0) pfatal ("fstat"); if (S_ISREG (st.st_mode) && (pos = file_tell (pfp)) != -1) file_pos = pos; else { size_t charsread; int fd = make_tempfile (&TMPPATNAME, 'p', NULL, O_RDWR | O_BINARY, 0); FILE *read_pfp = pfp; TMPPATNAME_needs_removal = true; pfp = fdopen (fd, "w+b"); if (! pfp) pfatal ("Can't open stream for file %s", quotearg (TMPPATNAME)); for (st.st_size = 0; (charsread = fread (buf, 1, bufsize, read_pfp)) != 0; st.st_size += charsread) if (fwrite (buf, 1, charsread, pfp) != charsread) write_fatal (); if (ferror (read_pfp) || fclose (read_pfp) != 0) read_fatal (); if (fflush (pfp) != 0 || file_seek (pfp, (file_offset) 0, SEEK_SET) != 0) write_fatal (); } p_filesize = st.st_size; if (p_filesize != (file_offset) p_filesize) fatal ("patch file is too long"); next_intuit_at (file_pos, 1); set_hunkmax(); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-399'], 'message': 'Fail when out of memory in set_hunkmax() src/pch.c (another_hunk): Call set_hunkmax() from here to make sure it is called even when falling back from plan A to plan B. (open_patch_file): No need to call set_hunkmax() anymore. src/pch.c (set_hunkmax): Fail when out of memory. Make static. src/pch.h: Remove set_hunkmax() prototype.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static inline int process_nested_data(UNSERIALIZE_PARAMETER, HashTable *ht, long elements, int objprops) { while (elements-- > 0) { zval *key, *data, **old_data; ALLOC_INIT_ZVAL(key); if (!php_var_unserialize(&key, p, max, NULL TSRMLS_CC)) { zval_dtor(key); FREE_ZVAL(key); return 0; } if (Z_TYPE_P(key) != IS_LONG && Z_TYPE_P(key) != IS_STRING) { zval_dtor(key); FREE_ZVAL(key); return 0; } ALLOC_INIT_ZVAL(data); if (!php_var_unserialize(&data, p, max, var_hash TSRMLS_CC)) { zval_dtor(key); FREE_ZVAL(key); zval_dtor(data); FREE_ZVAL(data); return 0; } if (!objprops) { switch (Z_TYPE_P(key)) { case IS_LONG: if (zend_hash_index_find(ht, Z_LVAL_P(key), (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_hash_index_update(ht, Z_LVAL_P(key), &data, sizeof(data), NULL); break; case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_symtable_update(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, &data, sizeof(data), NULL); break; } } else { /* object properties should include no integers */ convert_to_string(key); if (zend_symtable_find(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, (void **)&old_data)==SUCCESS) { var_push_dtor(var_hash, old_data); } zend_hash_update(ht, Z_STRVAL_P(key), Z_STRLEN_P(key) + 1, &data, sizeof data, NULL); } zval_dtor(key); FREE_ZVAL(key); if (elements && *(*p-1) != ';' && *(*p-1) != '}') { (*p)--; return 0; } } return 1; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-94'], 'message': 'Fix for bug #68710 (Use After Free Vulnerability in PHP's unserialize())'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void *_zend_shared_memdup(void *source, size_t size, zend_bool free_source TSRMLS_DC) { void **old_p, *retval; if (zend_hash_index_find(&xlat_table, (ulong)source, (void **)&old_p) == SUCCESS) { /* we already duplicated this pointer */ return *old_p; } retval = ZCG(mem);; ZCG(mem) = (void*)(((char*)ZCG(mem)) + ZEND_ALIGNED_SIZE(size)); memcpy(retval, source, size); if (free_source) { interned_efree((char*)source); } zend_shared_alloc_register_xlat_entry(source, retval); return retval; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-416'], 'message': 'Fixed bug #68677 (Use After Free in OPcache) (cherry picked from commit 777c39f4042327eac4b63c7ee87dc1c7a09a3115)'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: PHPAPI void php_pcre_match_impl(pcre_cache_entry *pce, char *subject, int subject_len, zval *return_value, zval *subpats, int global, int use_flags, long flags, long start_offset TSRMLS_DC) { zval *result_set, /* Holds a set of subpatterns after a global match */ **match_sets = NULL; /* An array of sets of matches for each subpattern after a global match */ pcre_extra *extra = pce->extra;/* Holds results of studying */ pcre_extra extra_data; /* Used locally for exec options */ int exoptions = 0; /* Execution options */ int count = 0; /* Count of matched subpatterns */ int *offsets; /* Array of subpattern offsets */ int num_subpats; /* Number of captured subpatterns */ int size_offsets; /* Size of the offsets array */ int matched; /* Has anything matched */ int g_notempty = 0; /* If the match should not be empty */ const char **stringlist; /* Holds list of subpatterns */ char **subpat_names; /* Array for named subpatterns */ int i, rc; int subpats_order; /* Order of subpattern matches */ int offset_capture; /* Capture match offsets: yes/no */ /* Overwrite the passed-in value for subpatterns with an empty array. */ if (subpats != NULL) { zval_dtor(subpats); array_init(subpats); } subpats_order = global ? PREG_PATTERN_ORDER : 0; if (use_flags) { offset_capture = flags & PREG_OFFSET_CAPTURE; /* * subpats_order is pre-set to pattern mode so we change it only if * necessary. */ if (flags & 0xff) { subpats_order = flags & 0xff; } if ((global && (subpats_order < PREG_PATTERN_ORDER || subpats_order > PREG_SET_ORDER)) || (!global && subpats_order != 0)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid flags specified"); return; } } else { offset_capture = 0; } /* Negative offset counts from the end of the string. */ if (start_offset < 0) { start_offset = subject_len + start_offset; if (start_offset < 0) { start_offset = 0; } } if (extra == NULL) { extra_data.flags = PCRE_EXTRA_MATCH_LIMIT | PCRE_EXTRA_MATCH_LIMIT_RECURSION; extra = &extra_data; } extra->match_limit = PCRE_G(backtrack_limit); extra->match_limit_recursion = PCRE_G(recursion_limit); /* Calculate the size of the offsets array, and allocate memory for it. */ rc = pcre_fullinfo(pce->re, extra, PCRE_INFO_CAPTURECOUNT, &num_subpats); if (rc < 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Internal pcre_fullinfo() error %d", rc); RETURN_FALSE; } num_subpats++; size_offsets = num_subpats * 3; /* * Build a mapping from subpattern numbers to their names. We will always * allocate the table, even though there may be no named subpatterns. This * avoids somewhat more complicated logic in the inner loops. */ subpat_names = make_subpats_table(num_subpats, pce TSRMLS_CC); if (!subpat_names) { RETURN_FALSE; } offsets = (int *)safe_emalloc(size_offsets, sizeof(int), 0); /* Allocate match sets array and initialize the values. */ if (global && subpats && subpats_order == PREG_PATTERN_ORDER) { match_sets = (zval **)safe_emalloc(num_subpats, sizeof(zval *), 0); for (i=0; i<num_subpats; i++) { ALLOC_ZVAL(match_sets[i]); array_init(match_sets[i]); INIT_PZVAL(match_sets[i]); } } matched = 0; PCRE_G(error_code) = PHP_PCRE_NO_ERROR; do { /* Execute the regular expression. */ count = pcre_exec(pce->re, extra, subject, subject_len, start_offset, exoptions|g_notempty, offsets, size_offsets); /* the string was already proved to be valid UTF-8 */ exoptions |= PCRE_NO_UTF8_CHECK; /* Check for too many substrings condition. */ if (count == 0) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Matched, but too many substrings"); count = size_offsets/3; } /* If something has matched */ if (count > 0) { matched++; /* If subpatterns array has been passed, fill it in with values. */ if (subpats != NULL) { /* Try to get the list of substrings and display a warning if failed. */ if (pcre_get_substring_list(subject, offsets, count, &stringlist) < 0) { efree(subpat_names); efree(offsets); if (match_sets) efree(match_sets); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Get subpatterns list failed"); RETURN_FALSE; } if (global) { /* global pattern matching */ if (subpats && subpats_order == PREG_PATTERN_ORDER) { /* For each subpattern, insert it into the appropriate array. */ for (i = 0; i < count; i++) { if (offset_capture) { add_offset_pair(match_sets[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], offsets[i<<1], NULL); } else { add_next_index_stringl(match_sets[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } } /* * If the number of captured subpatterns on this run is * less than the total possible number, pad the result * arrays with empty strings. */ if (count < num_subpats) { for (; i < num_subpats; i++) { add_next_index_string(match_sets[i], "", 1); } } } else { /* Allocate the result set array */ ALLOC_ZVAL(result_set); array_init(result_set); INIT_PZVAL(result_set); /* Add all the subpatterns to it */ for (i = 0; i < count; i++) { if (offset_capture) { add_offset_pair(result_set, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], offsets[i<<1], subpat_names[i]); } else { if (subpat_names[i]) { add_assoc_stringl(result_set, subpat_names[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } add_next_index_stringl(result_set, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } } /* And add it to the output array */ zend_hash_next_index_insert(Z_ARRVAL_P(subpats), &result_set, sizeof(zval *), NULL); } } else { /* single pattern matching */ /* For each subpattern, insert it into the subpatterns array. */ for (i = 0; i < count; i++) { if (offset_capture) { add_offset_pair(subpats, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], offsets[i<<1], subpat_names[i]); } else { if (subpat_names[i]) { add_assoc_stringl(subpats, subpat_names[i], (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } add_next_index_stringl(subpats, (char *)stringlist[i], offsets[(i<<1)+1] - offsets[i<<1], 1); } } } pcre_free((void *) stringlist); } } else if (count == PCRE_ERROR_NOMATCH) { /* If we previously set PCRE_NOTEMPTY after a null match, this is not necessarily the end. We need to advance the start offset, and continue. Fudge the offset values to achieve this, unless we're already at the end of the string. */ if (g_notempty != 0 && start_offset < subject_len) { offsets[0] = start_offset; offsets[1] = start_offset + 1; } else break; } else { pcre_handle_exec_error(count TSRMLS_CC); break; } /* If we have matched an empty string, mimic what Perl's /g options does. This turns out to be rather cunning. First we set PCRE_NOTEMPTY and try the match again at the same point. If this fails (picked up above) we advance to the next character. */ g_notempty = (offsets[1] == offsets[0])? PCRE_NOTEMPTY | PCRE_ANCHORED : 0; /* Advance to the position right after the last full match */ start_offset = offsets[1]; } while (global); /* Add the match sets to the output array and clean up */ if (global && subpats && subpats_order == PREG_PATTERN_ORDER) { for (i = 0; i < num_subpats; i++) { if (subpat_names[i]) { zend_hash_update(Z_ARRVAL_P(subpats), subpat_names[i], strlen(subpat_names[i])+1, &match_sets[i], sizeof(zval *), NULL); Z_ADDREF_P(match_sets[i]); } zend_hash_next_index_insert(Z_ARRVAL_P(subpats), &match_sets[i], sizeof(zval *), NULL); } efree(match_sets); } efree(offsets); efree(subpat_names); /* Did we encounter an error? */ if (PCRE_G(error_code) == PHP_PCRE_NO_ERROR) { RETVAL_LONG(matched); } else { RETVAL_FALSE; } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'Initialize the offset table - PCRE may sometimes miss offsets'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: */ private int mconvert(struct magic_set *ms, struct magic *m, int flip) { union VALUETYPE *p = &ms->ms_value; uint8_t type; switch (type = cvt_flip(m->type, flip)) { case FILE_BYTE: cvt_8(p, m); return 1; case FILE_SHORT: cvt_16(p, m); return 1; case FILE_LONG: case FILE_DATE: case FILE_LDATE: cvt_32(p, m); return 1; case FILE_QUAD: case FILE_QDATE: case FILE_QLDATE: case FILE_QWDATE: cvt_64(p, m); return 1; case FILE_STRING: case FILE_BESTRING16: case FILE_LESTRING16: { /* Null terminate and eat *trailing* return */ p->s[sizeof(p->s) - 1] = '\0'; return 1; } case FILE_PSTRING: { size_t sz = file_pstring_length_size(m); char *ptr1 = p->s, *ptr2 = ptr1 + sz; size_t len = file_pstring_get_length(m, ptr1); if (len >= sizeof(p->s)) { /* * The size of the pascal string length (sz) * is 1, 2, or 4. We need at least 1 byte for NUL * termination, but we've already truncated the * string by p->s, so we need to deduct sz. */ len = sizeof(p->s) - sz; } while (len--) *ptr1++ = *ptr2++; *ptr1 = '\0'; return 1; } case FILE_BESHORT: p->h = (short)((p->hs[0]<<8)|(p->hs[1])); cvt_16(p, m); return 1; case FILE_BELONG: case FILE_BEDATE: case FILE_BELDATE: p->l = (int32_t) ((p->hl[0]<<24)|(p->hl[1]<<16)|(p->hl[2]<<8)|(p->hl[3])); if (type == FILE_BELONG) cvt_32(p, m); return 1; case FILE_BEQUAD: case FILE_BEQDATE: case FILE_BEQLDATE: case FILE_BEQWDATE: p->q = (uint64_t) (((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)| ((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)| ((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)| ((uint64_t)p->hq[6]<<8)|((uint64_t)p->hq[7])); if (type == FILE_BEQUAD) cvt_64(p, m); return 1; case FILE_LESHORT: p->h = (short)((p->hs[1]<<8)|(p->hs[0])); cvt_16(p, m); return 1; case FILE_LELONG: case FILE_LEDATE: case FILE_LELDATE: p->l = (int32_t) ((p->hl[3]<<24)|(p->hl[2]<<16)|(p->hl[1]<<8)|(p->hl[0])); if (type == FILE_LELONG) cvt_32(p, m); return 1; case FILE_LEQUAD: case FILE_LEQDATE: case FILE_LEQLDATE: case FILE_LEQWDATE: p->q = (uint64_t) (((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)| ((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)| ((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)| ((uint64_t)p->hq[1]<<8)|((uint64_t)p->hq[0])); if (type == FILE_LEQUAD) cvt_64(p, m); return 1; case FILE_MELONG: case FILE_MEDATE: case FILE_MELDATE: p->l = (int32_t) ((p->hl[1]<<24)|(p->hl[0]<<16)|(p->hl[3]<<8)|(p->hl[2])); if (type == FILE_MELONG) cvt_32(p, m); return 1; case FILE_FLOAT: cvt_float(p, m); return 1; case FILE_BEFLOAT: p->l = ((uint32_t)p->hl[0]<<24)|((uint32_t)p->hl[1]<<16)| ((uint32_t)p->hl[2]<<8) |((uint32_t)p->hl[3]); cvt_float(p, m); return 1; case FILE_LEFLOAT: p->l = ((uint32_t)p->hl[3]<<24)|((uint32_t)p->hl[2]<<16)| ((uint32_t)p->hl[1]<<8) |((uint32_t)p->hl[0]); cvt_float(p, m); return 1; case FILE_DOUBLE: cvt_double(p, m); return 1; case FILE_BEDOUBLE: p->q = ((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)| ((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)| ((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)| ((uint64_t)p->hq[6]<<8) |((uint64_t)p->hq[7]); cvt_double(p, m); return 1; case FILE_LEDOUBLE: p->q = ((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)| ((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)| ((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)| ((uint64_t)p->hq[1]<<8) |((uint64_t)p->hq[0]); cvt_double(p, m); return 1; case FILE_REGEX: case FILE_SEARCH: case FILE_DEFAULT: case FILE_CLEAR: case FILE_NAME: case FILE_USE: return 1; default: file_magerror(ms, "invalid type %d in mconvert()", m->type); return 0; ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'PR/398: Correctly truncate pascal strings (fixes out of bounds read of 1, 2, or 4 bytes).'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: GetCode_(gdIOCtx *fd, CODE_STATIC_DATA *scd, int code_size, int flag, int *ZeroDataBlockP) { int i, j, ret; unsigned char count; if (flag) { scd->curbit = 0; scd->lastbit = 0; scd->last_byte = 0; scd->done = FALSE; return 0; } if ( (scd->curbit + code_size) >= scd->lastbit) { if (scd->done) { if (scd->curbit >= scd->lastbit) { /* Oh well */ } return -1; } scd->buf[0] = scd->buf[scd->last_byte-2]; scd->buf[1] = scd->buf[scd->last_byte-1]; if ((count = GetDataBlock(fd, &scd->buf[2], ZeroDataBlockP)) <= 0) scd->done = TRUE; scd->last_byte = 2 + count; scd->curbit = (scd->curbit - scd->lastbit) + 16; scd->lastbit = (2+count)*8 ; } ret = 0; for (i = scd->curbit, j = 0; j < code_size; ++i, ++j) ret |= ((scd->buf[ i / 8 ] & (1 << (i % 8))) != 0) << j; scd->curbit += code_size; return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'Fix bug #68601 buffer read overflow in gd_gif_in.c'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: NUM_numpart_to_char(NUMProc *Np, int id) { int end; if (IS_ROMAN(Np->Num)) return; /* Note: in this elog() output not set '\0' in 'inout' */ #ifdef DEBUG_TO_FROM_CHAR /* * Np->num_curr is number of current item in format-picture, it is not * current position in inout! */ elog(DEBUG_elog_output, "SIGN_WROTE: %d, CURRENT: %d, NUMBER_P: \"%s\", INOUT: \"%s\"", Np->sign_wrote, Np->num_curr, Np->number_p, Np->inout); #endif Np->num_in = FALSE; /* * Write sign if real number will write to output Note: IS_PREDEC_SPACE() * handle "9.9" --> " .1" */ if (Np->sign_wrote == FALSE && (Np->num_curr >= Np->out_pre_spaces || (IS_ZERO(Np->Num) && Np->Num->zero_start == Np->num_curr)) && (IS_PREDEC_SPACE(Np) == FALSE || (Np->last_relevant && *Np->last_relevant == '.'))) { if (IS_LSIGN(Np->Num)) { if (Np->Num->lsign == NUM_LSIGN_PRE) { if (Np->sign == '-') strcpy(Np->inout_p, Np->L_negative_sign); else strcpy(Np->inout_p, Np->L_positive_sign); Np->inout_p += strlen(Np->inout_p); Np->sign_wrote = TRUE; } } else if (IS_BRACKET(Np->Num)) { *Np->inout_p = Np->sign == '+' ? ' ' : '<'; ++Np->inout_p; Np->sign_wrote = TRUE; } else if (Np->sign == '+') { if (!IS_FILLMODE(Np->Num)) { *Np->inout_p = ' '; /* Write + */ ++Np->inout_p; } Np->sign_wrote = TRUE; } else if (Np->sign == '-') { /* Write - */ *Np->inout_p = '-'; ++Np->inout_p; Np->sign_wrote = TRUE; } } /* * digits / FM / Zero / Dec. point */ if (id == NUM_9 || id == NUM_0 || id == NUM_D || id == NUM_DEC) { if (Np->num_curr < Np->out_pre_spaces && (Np->Num->zero_start > Np->num_curr || !IS_ZERO(Np->Num))) { /* * Write blank space */ if (!IS_FILLMODE(Np->Num)) { *Np->inout_p = ' '; /* Write ' ' */ ++Np->inout_p; } } else if (IS_ZERO(Np->Num) && Np->num_curr < Np->out_pre_spaces && Np->Num->zero_start <= Np->num_curr) { /* * Write ZERO */ *Np->inout_p = '0'; /* Write '0' */ ++Np->inout_p; Np->num_in = TRUE; } else { /* * Write Decimal point */ if (*Np->number_p == '.') { if (!Np->last_relevant || *Np->last_relevant != '.') { strcpy(Np->inout_p, Np->decimal); /* Write DEC/D */ Np->inout_p += strlen(Np->inout_p); } /* * Ora 'n' -- FM9.9 --> 'n.' */ else if (IS_FILLMODE(Np->Num) && Np->last_relevant && *Np->last_relevant == '.') { strcpy(Np->inout_p, Np->decimal); /* Write DEC/D */ Np->inout_p += strlen(Np->inout_p); } } else { /* * Write Digits */ if (Np->last_relevant && Np->number_p > Np->last_relevant && id != NUM_0) ; /* * '0.1' -- 9.9 --> ' .1' */ else if (IS_PREDEC_SPACE(Np)) { if (!IS_FILLMODE(Np->Num)) { *Np->inout_p = ' '; ++Np->inout_p; } /* * '0' -- FM9.9 --> '0.' */ else if (Np->last_relevant && *Np->last_relevant == '.') { *Np->inout_p = '0'; ++Np->inout_p; } } else { *Np->inout_p = *Np->number_p; /* Write DIGIT */ ++Np->inout_p; Np->num_in = TRUE; } } ++Np->number_p; } end = Np->num_count + (Np->out_pre_spaces ? 1 : 0) + (IS_DECIMAL(Np->Num) ? 1 : 0); if (Np->last_relevant && Np->last_relevant == Np->number_p) end = Np->num_curr; if (Np->num_curr + 1 == end) { if (Np->sign_wrote == TRUE && IS_BRACKET(Np->Num)) { *Np->inout_p = Np->sign == '+' ? ' ' : '>'; ++Np->inout_p; } else if (IS_LSIGN(Np->Num) && Np->Num->lsign == NUM_LSIGN_POST) { if (Np->sign == '-') strcpy(Np->inout_p, Np->L_negative_sign); else strcpy(Np->inout_p, Np->L_positive_sign); Np->inout_p += strlen(Np->inout_p); } } } ++Np->num_curr; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-120'], 'message': 'to_char(): prevent accesses beyond the allocated buffer Previously very long field masks for floats could access memory beyond the existing buffer allocated to hold the result. Reported by Andres Freund and Peter Geoghegan. Backpatch to all supported versions. Security: CVE-2015-0241'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: pullf_read_max(PullFilter *pf, int len, uint8 **data_p, uint8 *tmpbuf) { int res, total; uint8 *tmp; res = pullf_read(pf, len, data_p); if (res <= 0 || res == len) return res; /* read was shorter, use tmpbuf */ memcpy(tmpbuf, *data_p, res); *data_p = tmpbuf; len -= res; total = res; while (len > 0) { res = pullf_read(pf, len, &tmp); if (res < 0) { /* so the caller must clear only on success */ px_memset(tmpbuf, 0, total); return res; } if (res == 0) break; memcpy(tmpbuf + total, tmp, res); total += res; } return total; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-120'], 'message': 'Fix buffer overrun after incomplete read in pullf_read_max(). Most callers pass a stack buffer. The ensuing stack smash can crash the server, and we have not ruled out the viability of attacks that lead to privilege escalation. Back-patch to 9.0 (all supported versions). Marko Tiikkaja Security: CVE-2015-0243'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: StatementCancelHandler(SIGNAL_ARGS) { int save_errno = errno; /* * Don't joggle the elbow of proc_exit */ if (!proc_exit_inprogress) { InterruptPending = true; QueryCancelPending = true; /* * If it's safe to interrupt, and we're waiting for input or a lock, * service the interrupt immediately */ if (ImmediateInterruptOK && InterruptHoldoffCount == 0 && CritSectionCount == 0) { /* bump holdoff count to make ProcessInterrupts() a no-op */ /* until we are done getting ready for it */ InterruptHoldoffCount++; LockErrorCleanup(); /* prevent CheckDeadLock from running */ DisableNotifyInterrupt(); DisableCatchupInterrupt(); InterruptHoldoffCount--; ProcessInterrupts(); } } /* If we're still here, waken anything waiting on the process latch */ SetLatch(MyLatch); errno = save_errno; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: CopyFrom(CopyState cstate) { HeapTuple tuple; TupleDesc tupDesc; Datum *values; bool *nulls; ResultRelInfo *resultRelInfo; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ ExprContext *econtext; TupleTableSlot *myslot; MemoryContext oldcontext = CurrentMemoryContext; ErrorContextCallback errcallback; CommandId mycid = GetCurrentCommandId(true); int hi_options = 0; /* start with default heap_insert options */ BulkInsertState bistate; uint64 processed = 0; bool useHeapMultiInsert; int nBufferedTuples = 0; #define MAX_BUFFERED_TUPLES 1000 HeapTuple *bufferedTuples = NULL; /* initialize to silence warning */ Size bufferedTuplesSize = 0; int firstBufferedLineNo = 0; Assert(cstate->rel); if (cstate->rel->rd_rel->relkind != RELKIND_RELATION) { if (cstate->rel->rd_rel->relkind == RELKIND_VIEW) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot copy to view \"%s\"", RelationGetRelationName(cstate->rel)))); else if (cstate->rel->rd_rel->relkind == RELKIND_MATVIEW) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot copy to materialized view \"%s\"", RelationGetRelationName(cstate->rel)))); else if (cstate->rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot copy to foreign table \"%s\"", RelationGetRelationName(cstate->rel)))); else if (cstate->rel->rd_rel->relkind == RELKIND_SEQUENCE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot copy to sequence \"%s\"", RelationGetRelationName(cstate->rel)))); else ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cannot copy to non-table relation \"%s\"", RelationGetRelationName(cstate->rel)))); } tupDesc = RelationGetDescr(cstate->rel); /*---------- * Check to see if we can avoid writing WAL * * If archive logging/streaming is not enabled *and* either * - table was created in same transaction as this COPY * - data is being written to relfilenode created in this transaction * then we can skip writing WAL. It's safe because if the transaction * doesn't commit, we'll discard the table (or the new relfilenode file). * If it does commit, we'll have done the heap_sync at the bottom of this * routine first. * * As mentioned in comments in utils/rel.h, the in-same-transaction test * is not always set correctly, since in rare cases rd_newRelfilenodeSubid * can be cleared before the end of the transaction. The exact case is * when a relation sets a new relfilenode twice in same transaction, yet * the second one fails in an aborted subtransaction, e.g. * * BEGIN; * TRUNCATE t; * SAVEPOINT save; * TRUNCATE t; * ROLLBACK TO save; * COPY ... * * Also, if the target file is new-in-transaction, we assume that checking * FSM for free space is a waste of time, even if we must use WAL because * of archiving. This could possibly be wrong, but it's unlikely. * * The comments for heap_insert and RelationGetBufferForTuple specify that * skipping WAL logging is only safe if we ensure that our tuples do not * go into pages containing tuples from any other transactions --- but this * must be the case if we have a new table or new relfilenode, so we need * no additional work to enforce that. *---------- */ /* createSubid is creation check, newRelfilenodeSubid is truncation check */ if (cstate->rel->rd_createSubid != InvalidSubTransactionId || cstate->rel->rd_newRelfilenodeSubid != InvalidSubTransactionId) { hi_options |= HEAP_INSERT_SKIP_FSM; if (!XLogIsNeeded()) hi_options |= HEAP_INSERT_SKIP_WAL; } /* * Optimize if new relfilenode was created in this subxact or one of its * committed children and we won't see those rows later as part of an * earlier scan or command. This ensures that if this subtransaction * aborts then the frozen rows won't be visible after xact cleanup. Note * that the stronger test of exactly which subtransaction created it is * crucial for correctness of this optimisation. */ if (cstate->freeze) { if (!ThereAreNoPriorRegisteredSnapshots() || !ThereAreNoReadyPortals()) ereport(ERROR, (ERRCODE_INVALID_TRANSACTION_STATE, errmsg("cannot perform FREEZE because of prior transaction activity"))); if (cstate->rel->rd_createSubid != GetCurrentSubTransactionId() && cstate->rel->rd_newRelfilenodeSubid != GetCurrentSubTransactionId()) ereport(ERROR, (ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE, errmsg("cannot perform FREEZE because the table was not created or truncated in the current subtransaction"))); hi_options |= HEAP_INSERT_FROZEN; } /* * We need a ResultRelInfo so we can use the regular executor's * index-entry-making machinery. (There used to be a huge amount of code * here that basically duplicated execUtils.c ...) */ resultRelInfo = makeNode(ResultRelInfo); InitResultRelInfo(resultRelInfo, cstate->rel, 1, /* dummy rangetable index */ 0); ExecOpenIndices(resultRelInfo); estate->es_result_relations = resultRelInfo; estate->es_num_result_relations = 1; estate->es_result_relation_info = resultRelInfo; estate->es_range_table = cstate->range_table; /* Set up a tuple slot too */ myslot = ExecInitExtraTupleSlot(estate); ExecSetSlotDescriptor(myslot, tupDesc); /* Triggers might need a slot as well */ estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); /* * It's more efficient to prepare a bunch of tuples for insertion, and * insert them in one heap_multi_insert() call, than call heap_insert() * separately for every tuple. However, we can't do that if there are * BEFORE/INSTEAD OF triggers, or we need to evaluate volatile default * expressions. Such triggers or expressions might query the table we're * inserting to, and act differently if the tuples that have already been * processed and prepared for insertion are not there. */ if ((resultRelInfo->ri_TrigDesc != NULL && (resultRelInfo->ri_TrigDesc->trig_insert_before_row || resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) || cstate->volatile_defexprs) { useHeapMultiInsert = false; } else { useHeapMultiInsert = true; bufferedTuples = palloc(MAX_BUFFERED_TUPLES * sizeof(HeapTuple)); } /* Prepare to catch AFTER triggers. */ AfterTriggerBeginQuery(); /* * Check BEFORE STATEMENT insertion triggers. It's debatable whether we * should do this for COPY, since it's not really an "INSERT" statement as * such. However, executing these triggers maintains consistency with the * EACH ROW triggers that we already fire on COPY. */ ExecBSInsertTriggers(estate, resultRelInfo); values = (Datum *) palloc(tupDesc->natts * sizeof(Datum)); nulls = (bool *) palloc(tupDesc->natts * sizeof(bool)); bistate = GetBulkInsertState(); econtext = GetPerTupleExprContext(estate); /* Set up callback to identify error line number */ errcallback.callback = CopyFromErrorCallback; errcallback.arg = (void *) cstate; errcallback.previous = error_context_stack; error_context_stack = &errcallback; for (;;) { TupleTableSlot *slot; bool skip_tuple; Oid loaded_oid = InvalidOid; CHECK_FOR_INTERRUPTS(); if (nBufferedTuples == 0) { /* * Reset the per-tuple exprcontext. We can only do this if the * tuple buffer is empty. (Calling the context the per-tuple * memory context is a bit of a misnomer now.) */ ResetPerTupleExprContext(estate); } /* Switch into its memory context */ MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); if (!NextCopyFrom(cstate, econtext, values, nulls, &loaded_oid)) break; /* And now we can form the input tuple. */ tuple = heap_form_tuple(tupDesc, values, nulls); if (loaded_oid != InvalidOid) HeapTupleSetOid(tuple, loaded_oid); /* * Constraints might reference the tableoid column, so initialize * t_tableOid before evaluating them. */ tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); /* Triggers and stuff need to be invoked in query context. */ MemoryContextSwitchTo(oldcontext); /* Place tuple in tuple slot --- but slot shouldn't free it */ slot = myslot; ExecStoreTuple(tuple, slot, InvalidBuffer, false); skip_tuple = false; /* BEFORE ROW INSERT Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_insert_before_row) { slot = ExecBRInsertTriggers(estate, resultRelInfo, slot); if (slot == NULL) /* "do nothing" */ skip_tuple = true; else /* trigger might have changed tuple */ tuple = ExecMaterializeSlot(slot); } if (!skip_tuple) { /* Check the constraints of the tuple */ if (cstate->rel->rd_att->constr) ExecConstraints(resultRelInfo, slot, estate); if (useHeapMultiInsert) { /* Add this tuple to the tuple buffer */ if (nBufferedTuples == 0) firstBufferedLineNo = cstate->cur_lineno; bufferedTuples[nBufferedTuples++] = tuple; bufferedTuplesSize += tuple->t_len; /* * If the buffer filled up, flush it. Also flush if the total * size of all the tuples in the buffer becomes large, to * avoid using large amounts of memory for the buffers when * the tuples are exceptionally wide. */ if (nBufferedTuples == MAX_BUFFERED_TUPLES || bufferedTuplesSize > 65535) { CopyFromInsertBatch(cstate, estate, mycid, hi_options, resultRelInfo, myslot, bistate, nBufferedTuples, bufferedTuples, firstBufferedLineNo); nBufferedTuples = 0; bufferedTuplesSize = 0; } } else { List *recheckIndexes = NIL; /* OK, store the tuple and create index entries for it */ heap_insert(cstate->rel, tuple, mycid, hi_options, bistate); if (resultRelInfo->ri_NumIndices > 0) recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), estate); /* AFTER ROW INSERT Triggers */ ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes); list_free(recheckIndexes); } /* * We count only tuples not suppressed by a BEFORE INSERT trigger; * this is the same definition used by execMain.c for counting * tuples inserted by an INSERT command. */ processed++; } } /* Flush any remaining buffered tuples */ if (nBufferedTuples > 0) CopyFromInsertBatch(cstate, estate, mycid, hi_options, resultRelInfo, myslot, bistate, nBufferedTuples, bufferedTuples, firstBufferedLineNo); /* Done, clean up */ error_context_stack = errcallback.previous; FreeBulkInsertState(bistate); MemoryContextSwitchTo(oldcontext); /* Execute AFTER STATEMENT insertion triggers */ ExecASInsertTriggers(estate, resultRelInfo); /* Handle queued AFTER triggers */ AfterTriggerEndQuery(estate); pfree(values); pfree(nulls); ExecResetTupleTable(estate->es_tupleTable, false); ExecCloseIndices(resultRelInfo); FreeExecutorState(estate); /* * If we skipped writing WAL, then we need to sync the heap (but not * indexes since those use WAL anyway) */ if (hi_options & HEAP_INSERT_SKIP_WAL) heap_sync(cstate->rel); return processed; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: SocketBackend(StringInfo inBuf) { int qtype; /* * Get message type code from the frontend. */ qtype = pq_getbyte(); if (qtype == EOF) /* frontend disconnected */ { if (IsTransactionState()) ereport(COMMERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("unexpected EOF on client connection with an open transaction"))); else { /* * Can't send DEBUG log messages to client at this point. Since * we're disconnecting right away, we don't need to restore * whereToSendOutput. */ whereToSendOutput = DestNone; ereport(DEBUG1, (errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST), errmsg("unexpected EOF on client connection"))); } return qtype; } /* * Validate message type code before trying to read body; if we have lost * sync, better to say "command unknown" than to run out of memory because * we used garbage as a length word. * * This also gives us a place to set the doing_extended_query_message flag * as soon as possible. */ switch (qtype) { case 'Q': /* simple query */ doing_extended_query_message = false; if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3) { /* old style without length word; convert */ if (pq_getstring(inBuf)) { if (IsTransactionState()) ereport(COMMERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("unexpected EOF on client connection with an open transaction"))); else { /* * Can't send DEBUG log messages to client at this * point.Since we're disconnecting right away, we * don't need to restore whereToSendOutput. */ whereToSendOutput = DestNone; ereport(DEBUG1, (errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST), errmsg("unexpected EOF on client connection"))); } return EOF; } } break; case 'F': /* fastpath function call */ /* we let fastpath.c cope with old-style input of this */ doing_extended_query_message = false; break; case 'X': /* terminate */ doing_extended_query_message = false; ignore_till_sync = false; break; case 'B': /* bind */ case 'C': /* close */ case 'D': /* describe */ case 'E': /* execute */ case 'H': /* flush */ case 'P': /* parse */ doing_extended_query_message = true; /* these are only legal in protocol 3 */ if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3) ereport(FATAL, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid frontend message type %d", qtype))); break; case 'S': /* sync */ /* stop any active skip-till-Sync */ ignore_till_sync = false; /* mark not-extended, so that a new error doesn't begin skip */ doing_extended_query_message = false; /* only legal in protocol 3 */ if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3) ereport(FATAL, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid frontend message type %d", qtype))); break; case 'd': /* copy data */ case 'c': /* copy done */ case 'f': /* copy fail */ doing_extended_query_message = false; /* these are only legal in protocol 3 */ if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3) ereport(FATAL, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid frontend message type %d", qtype))); break; default: /* * Otherwise we got garbage from the frontend. We treat this as * fatal because we have probably lost message boundary sync, and * there's no good way to recover. */ ereport(FATAL, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid frontend message type %d", qtype))); break; } /* * In protocol version 3, all frontend messages have a length word next * after the type code; we can read the message contents independently of * the type. */ if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3) { if (pq_getmessage(inBuf, 0)) return EOF; /* suitable message already logged */ } return qtype; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: pq_getstring(StringInfo s) { int i; resetStringInfo(s); /* Read until we get the terminating '\0' */ for (;;) { while (PqRecvPointer >= PqRecvLength) { if (pq_recvbuf()) /* If nothing in buffer, then recv some */ return EOF; /* Failed to recv data */ } for (i = PqRecvPointer; i < PqRecvLength; i++) { if (PqRecvBuffer[i] == '\0') { /* include the '\0' in the copy */ appendBinaryStringInfo(s, PqRecvBuffer + PqRecvPointer, i - PqRecvPointer + 1); PqRecvPointer = i + 1; /* advance past \0 */ return 0; } } /* If we're here we haven't got the \0 in the buffer yet. */ appendBinaryStringInfo(s, PqRecvBuffer + PqRecvPointer, PqRecvLength - PqRecvPointer); PqRecvPointer = PqRecvLength; } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: pq_getbyte(void) { while (PqRecvPointer >= PqRecvLength) { if (pq_recvbuf()) /* If nothing in buffer, then recv some */ return EOF; /* Failed to recv data */ } return (unsigned char) PqRecvBuffer[PqRecvPointer++]; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: ReceiveCopyBegin(CopyState cstate) { if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3) { /* new way */ StringInfoData buf; int natts = list_length(cstate->attnumlist); int16 format = (cstate->binary ? 1 : 0); int i; pq_beginmessage(&buf, 'G'); pq_sendbyte(&buf, format); /* overall format */ pq_sendint(&buf, natts, 2); for (i = 0; i < natts; i++) pq_sendint(&buf, format, 2); /* per-column formats */ pq_endmessage(&buf); cstate->copy_dest = COPY_NEW_FE; cstate->fe_msgbuf = makeStringInfo(); } else if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 2) { /* old way */ if (cstate->binary) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("COPY BINARY is not supported to stdout or from stdin"))); pq_putemptymessage('G'); cstate->copy_dest = COPY_OLD_FE; } else { /* very old way */ if (cstate->binary) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("COPY BINARY is not supported to stdout or from stdin"))); pq_putemptymessage('D'); cstate->copy_dest = COPY_OLD_FE; } /* We *must* flush here to ensure FE knows it can send. */ pq_flush(); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: LockErrorCleanup(void) { LWLock *partitionLock; DisableTimeoutParams timeouts[2]; AbortStrongLockAcquire(); /* Nothing to do if we weren't waiting for a lock */ if (lockAwaited == NULL) return; /* * Turn off the deadlock and lock timeout timers, if they are still * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT * indicator flag, since this function is executed before * ProcessInterrupts when responding to SIGINT; else we'd lose the * knowledge that the SIGINT came from a lock timeout and not an external * source. */ timeouts[0].id = DEADLOCK_TIMEOUT; timeouts[0].keep_indicator = false; timeouts[1].id = LOCK_TIMEOUT; timeouts[1].keep_indicator = true; disable_timeouts(timeouts, 2); /* Unlink myself from the wait queue, if on it (might not be anymore!) */ partitionLock = LockHashPartitionLock(lockAwaited->hashcode); LWLockAcquire(partitionLock, LW_EXCLUSIVE); if (MyProc->links.next != NULL) { /* We could not have been granted the lock yet */ RemoveFromWaitQueue(MyProc, lockAwaited->hashcode); } else { /* * Somebody kicked us off the lock queue already. Perhaps they * granted us the lock, or perhaps they detected a deadlock. If they * did grant us the lock, we'd better remember it in our local lock * table. */ if (MyProc->waitStatus == STATUS_OK) GrantAwaitedLock(); } lockAwaited = NULL; LWLockRelease(partitionLock); /* * We used to do PGSemaphoreReset() here to ensure that our proc's wait * semaphore is reset to zero. This prevented a leftover wakeup signal * from remaining in the semaphore if someone else had granted us the lock * we wanted before we were able to remove ourselves from the wait-list. * However, now that ProcSleep loops until waitStatus changes, a leftover * wakeup signal isn't harmful, and it seems not worth expending cycles to * get rid of a signal that most likely isn't there. */ } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: recv_password_packet(Port *port) { StringInfoData buf; if (PG_PROTOCOL_MAJOR(port->proto) >= 3) { /* Expect 'p' message type */ int mtype; mtype = pq_getbyte(); if (mtype != 'p') { /* * If the client just disconnects without offering a password, * don't make a log entry. This is legal per protocol spec and in * fact commonly done by psql, so complaining just clutters the * log. */ if (mtype != EOF) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("expected password response, got message type %d", mtype))); return NULL; /* EOF or bad message type */ } } else { /* For pre-3.0 clients, avoid log entry if they just disconnect */ if (pq_peekbyte() == EOF) return NULL; /* EOF */ } initStringInfo(&buf); if (pq_getmessage(&buf, 1000)) /* receive password */ { /* EOF - pq_getmessage already logged a suitable message */ pfree(buf.data); return NULL; } /* * Apply sanity check: password packet length should agree with length of * contained string. Note it is safe to use strlen here because * StringInfo is guaranteed to have an appended '\0'. */ if (strlen(buf.data) + 1 != buf.len) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid password packet size"))); /* Do not echo password to logs, for security. */ ereport(DEBUG5, (errmsg("received password packet"))); /* * Return the received string. Note we do not attempt to do any * character-set conversion on it; since we don't yet know the client's * encoding, there wouldn't be much point. */ return buf.data; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: pq_getbyte_if_available(unsigned char *c) { int r; if (PqRecvPointer < PqRecvLength) { *c = PqRecvBuffer[PqRecvPointer++]; return 1; } /* Put the socket into non-blocking mode */ socket_set_nonblocking(true); r = secure_read(MyProcPort, c, 1); if (r < 0) { /* * Ok if no data available without blocking or interrupted (though * EINTR really shouldn't happen with a non-blocking socket). Report * other errors. */ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) r = 0; else { /* * Careful: an ereport() that tries to write to the client would * cause recursion to here, leading to stack overflow and core * dump! This message must go *only* to the postmaster log. */ ereport(COMMERROR, (errcode_for_socket_access(), errmsg("could not receive data from client: %m"))); r = EOF; } } else if (r == 0) { /* EOF detected */ r = EOF; } return r; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: die(SIGNAL_ARGS) { int save_errno = errno; /* Don't joggle the elbow of proc_exit */ if (!proc_exit_inprogress) { InterruptPending = true; ProcDiePending = true; /* * If it's safe to interrupt, and we're waiting for input or a lock, * service the interrupt immediately */ if (ImmediateInterruptOK && InterruptHoldoffCount == 0 && CritSectionCount == 0) { /* bump holdoff count to make ProcessInterrupts() a no-op */ /* until we are done getting ready for it */ InterruptHoldoffCount++; LockErrorCleanup(); /* prevent CheckDeadLock from running */ DisableNotifyInterrupt(); DisableCatchupInterrupt(); InterruptHoldoffCount--; ProcessInterrupts(); } } /* If we're still here, waken anything waiting on the process latch */ SetLatch(MyLatch); errno = save_errno; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: ProcessStandbyMessage(void) { char msgtype; resetStringInfo(&reply_message); /* * Read the message contents. */ if (pq_getmessage(&reply_message, 0)) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("unexpected EOF on standby connection"))); proc_exit(0); } /* * Check message type from the first byte. */ msgtype = pq_getmsgbyte(&reply_message); switch (msgtype) { case 'r': ProcessStandbyReplyMessage(); break; case 'h': ProcessStandbyHSFeedbackMessage(); break; default: ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("unexpected message type \"%c\"", msgtype))); proc_exit(0); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: pq_getmessage(StringInfo s, int maxlen) { int32 len; resetStringInfo(s); /* Read message length word */ if (pq_getbytes((char *) &len, 4) == EOF) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("unexpected EOF within message length word"))); return EOF; } len = ntohl(len); if (len < 4 || (maxlen > 0 && len > maxlen)) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid message length"))); return EOF; } len -= 4; /* discount length itself */ if (len > 0) { /* * Allocate space for message. If we run out of room (ridiculously * large message), we will elog(ERROR), but we want to discard the * message body so as not to lose communication sync. */ PG_TRY(); { enlargeStringInfo(s, len); } PG_CATCH(); { if (pq_discardbytes(len) == EOF) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete message from client"))); PG_RE_THROW(); } PG_END_TRY(); /* And grab the message */ if (pq_getbytes(s->data, len) == EOF) { ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete message from client"))); return EOF; } s->len = len; /* Place a trailing null per StringInfo convention */ s->data[len] = '\0'; } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: pq_discardbytes(size_t len) { size_t amount; while (len > 0) { while (PqRecvPointer >= PqRecvLength) { if (pq_recvbuf()) /* If nothing in buffer, then recv some */ return EOF; /* Failed to recv data */ } amount = PqRecvLength - PqRecvPointer; if (amount > len) amount = len; PqRecvPointer += amount; len -= amount; } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-89'], 'message': 'Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags) { Elf32_Phdr ph32; Elf64_Phdr ph64; size_t offset, len; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } /* * Loop through all the program headers. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Perhaps warn here */ continue; } if (xph_type != PT_NOTE) continue; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, 4, flags); if (offset == 0) break; } } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20', 'CWE-703'], 'message': 'Bail out on partial reads, from Alexander Cherepanov'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: file_printable(char *buf, size_t bufsiz, const char *str) { char *ptr, *eptr; const unsigned char *s = (const unsigned char *)str; for (ptr = buf, eptr = ptr + bufsiz - 1; ptr < eptr && *s; s++) { if (isprint(*s)) { *ptr++ = *s; continue; } if (ptr >= eptr + 4) break; *ptr++ = '\\'; *ptr++ = ((*s >> 6) & 7) + '0'; *ptr++ = ((*s >> 3) & 7) + '0'; *ptr++ = ((*s >> 0) & 7) + '0'; } *ptr = '\0'; return buf; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Fix incorrect bounds check, from Alexander Cherepanov'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int nft_flush_table(struct nft_ctx *ctx) { int err; struct nft_chain *chain, *nc; struct nft_set *set, *ns; list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { ctx->chain = chain; err = nft_delrule_by_chain(ctx); if (err < 0) goto out; err = nft_delchain(ctx); if (err < 0) goto out; } list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { if (set->flags & NFT_SET_ANONYMOUS && !list_empty(&set->bindings)) continue; err = nft_delset(ctx, set); if (err < 0) goto out; } err = nft_deltable(ctx); out: return err; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-19'], 'message': 'netfilter: nf_tables: fix flush ruleset chain dependencies Jumping between chains doesn't mix well with flush ruleset. Rules from a different chain and set elements may still refer to us. [ 353.373791] ------------[ cut here ]------------ [ 353.373845] kernel BUG at net/netfilter/nf_tables_api.c:1159! [ 353.373896] invalid opcode: 0000 [#1] SMP [ 353.373942] Modules linked in: intel_powerclamp uas iwldvm iwlwifi [ 353.374017] CPU: 0 PID: 6445 Comm: 31c3.nft Not tainted 3.18.0 #98 [ 353.374069] Hardware name: LENOVO 5129CTO/5129CTO, BIOS 6QET47WW (1.17 ) 07/14/2010 [...] [ 353.375018] Call Trace: [ 353.375046] [<ffffffff81964c31>] ? nf_tables_commit+0x381/0x540 [ 353.375101] [<ffffffff81949118>] nfnetlink_rcv+0x3d8/0x4b0 [ 353.375150] [<ffffffff81943fc5>] netlink_unicast+0x105/0x1a0 [ 353.375200] [<ffffffff8194438e>] netlink_sendmsg+0x32e/0x790 [ 353.375253] [<ffffffff818f398e>] sock_sendmsg+0x8e/0xc0 [ 353.375300] [<ffffffff818f36b9>] ? move_addr_to_kernel.part.20+0x19/0x70 [ 353.375357] [<ffffffff818f44f9>] ? move_addr_to_kernel+0x19/0x30 [ 353.375410] [<ffffffff819016d2>] ? verify_iovec+0x42/0xd0 [ 353.375459] [<ffffffff818f3e10>] ___sys_sendmsg+0x3f0/0x400 [ 353.375510] [<ffffffff810615fa>] ? native_sched_clock+0x2a/0x90 [ 353.375563] [<ffffffff81176697>] ? acct_account_cputime+0x17/0x20 [ 353.375616] [<ffffffff8110dc78>] ? account_user_time+0x88/0xa0 [ 353.375667] [<ffffffff818f4bbd>] __sys_sendmsg+0x3d/0x80 [ 353.375719] [<ffffffff81b184f4>] ? int_check_syscall_exit_work+0x34/0x3d [ 353.375776] [<ffffffff818f4c0d>] SyS_sendmsg+0xd/0x20 [ 353.375823] [<ffffffff81b1826d>] system_call_fastpath+0x16/0x1b Release objects in this order: rules -> sets -> chains -> tables, to make sure no references to chains are held anymore. Reported-by: Asbjoern Sloth Toennesen <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static unsigned int stack_maxrandom_size(void) { unsigned int max = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT; } return max; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-284', 'CWE-264'], 'message': 'x86, mm/ASLR: Fix stack randomization on 64-bit systems The issue is that the stack for processes is not properly randomized on 64 bit architectures due to an integer overflow. The affected function is randomize_stack_top() in file "fs/binfmt_elf.c": static unsigned long randomize_stack_top(unsigned long stack_top) { unsigned int random_variable = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { random_variable = get_random_int() & STACK_RND_MASK; random_variable <<= PAGE_SHIFT; } return PAGE_ALIGN(stack_top) + random_variable; return PAGE_ALIGN(stack_top) - random_variable; } Note that, it declares the "random_variable" variable as "unsigned int". Since the result of the shifting operation between STACK_RND_MASK (which is 0x3fffff on x86_64, 22 bits) and PAGE_SHIFT (which is 12 on x86_64): random_variable <<= PAGE_SHIFT; then the two leftmost bits are dropped when storing the result in the "random_variable". This variable shall be at least 34 bits long to hold the (22+12) result. These two dropped bits have an impact on the entropy of process stack. Concretely, the total stack entropy is reduced by four: from 2^28 to 2^30 (One fourth of expected entropy). This patch restores back the entropy by correcting the types involved in the operations in the functions randomize_stack_top() and stack_maxrandom_size(). The successful fix can be tested with: $ for i in `seq 1 10`; do cat /proc/self/maps | grep stack; done 7ffeda566000-7ffeda587000 rw-p 00000000 00:00 0 [stack] 7fff5a332000-7fff5a353000 rw-p 00000000 00:00 0 [stack] 7ffcdb7a1000-7ffcdb7c2000 rw-p 00000000 00:00 0 [stack] 7ffd5e2c4000-7ffd5e2e5000 rw-p 00000000 00:00 0 [stack] ... Once corrected, the leading bytes should be between 7ffc and 7fff, rather than always being 7fff. Signed-off-by: Hector Marco-Gisbert <[email protected]> Signed-off-by: Ismael Ripoll <[email protected]> [ Rebased, fixed 80 char bugs, cleaned up commit message, added test example and CVE ] Signed-off-by: Kees Cook <[email protected]> Cc: <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Al Viro <[email protected]> Fixes: CVE-2015-1593 Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Borislav Petkov <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: parse_keyblock_image (iobuf_t iobuf, int pk_no, int uid_no, const u32 *sigstatus, kbnode_t *r_keyblock) { gpg_error_t err; PACKET *pkt; kbnode_t keyblock = NULL; kbnode_t node, *tail; int in_cert, save_mode; u32 n_sigs; int pk_count, uid_count; *r_keyblock = NULL; pkt = xtrymalloc (sizeof *pkt); if (!pkt) return gpg_error_from_syserror (); init_packet (pkt); save_mode = set_packet_list_mode (0); in_cert = 0; n_sigs = 0; tail = NULL; pk_count = uid_count = 0; while ((err = parse_packet (iobuf, pkt)) != -1) { if (gpg_err_code (err) == GPG_ERR_UNKNOWN_PACKET) { free_packet (pkt); init_packet (pkt); continue; } if (err) { log_error ("parse_keyblock_image: read error: %s\n", gpg_strerror (err)); err = gpg_error (GPG_ERR_INV_KEYRING); break; } if (pkt->pkttype == PKT_COMPRESSED) { log_error ("skipped compressed packet in keybox blob\n"); free_packet(pkt); init_packet(pkt); continue; } if (pkt->pkttype == PKT_RING_TRUST) { log_info ("skipped ring trust packet in keybox blob\n"); free_packet(pkt); init_packet(pkt); continue; } if (!in_cert && pkt->pkttype != PKT_PUBLIC_KEY) { log_error ("parse_keyblock_image: first packet in a keybox blob " "is not a public key packet\n"); err = gpg_error (GPG_ERR_INV_KEYRING); break; } if (in_cert && (pkt->pkttype == PKT_PUBLIC_KEY || pkt->pkttype == PKT_SECRET_KEY)) { log_error ("parse_keyblock_image: " "multiple keyblocks in a keybox blob\n"); err = gpg_error (GPG_ERR_INV_KEYRING); break; } in_cert = 1; if (pkt->pkttype == PKT_SIGNATURE && sigstatus) { PKT_signature *sig = pkt->pkt.signature; n_sigs++; if (n_sigs > sigstatus[0]) { log_error ("parse_keyblock_image: " "more signatures than found in the meta data\n"); err = gpg_error (GPG_ERR_INV_KEYRING); break; } if (sigstatus[n_sigs]) { sig->flags.checked = 1; if (sigstatus[n_sigs] == 1 ) ; /* missing key */ else if (sigstatus[n_sigs] == 2 ) ; /* bad signature */ else if (sigstatus[n_sigs] < 0x10000000) ; /* bad flag */ else { sig->flags.valid = 1; /* Fixme: Shall we set the expired flag here? */ } } } node = new_kbnode (pkt); switch (pkt->pkttype) { case PKT_PUBLIC_KEY: case PKT_PUBLIC_SUBKEY: case PKT_SECRET_KEY: case PKT_SECRET_SUBKEY: if (++pk_count == pk_no) node->flag |= 1; break; case PKT_USER_ID: if (++uid_count == uid_no) node->flag |= 2; break; default: break; } if (!keyblock) keyblock = node; else *tail = node; tail = &node->next; pkt = xtrymalloc (sizeof *pkt); if (!pkt) { err = gpg_error_from_syserror (); break; } init_packet (pkt); } set_packet_list_mode (save_mode); if (err == -1 && keyblock) err = 0; /* Got the entire keyblock. */ if (!err && sigstatus && n_sigs != sigstatus[0]) { log_error ("parse_keyblock_image: signature count does not match\n"); err = gpg_error (GPG_ERR_INV_KEYRING); } if (err) release_kbnode (keyblock); else *r_keyblock = keyblock; free_packet (pkt); xfree (pkt); return err; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-416'], 'message': 'gpg: Prevent an invalid memory read using a garbled keyring. * g10/keyring.c (keyring_get_keyblock): Whitelist allowed packet types. * g10/keydb.c (parse_keyblock_image): Ditto. -- The keyring DB code did not reject packets which don't belong into a keyring. If for example the keyblock contains a literal data packet it is expected that the processing code stops at the data packet and reads from the input stream which is referenced from the data packets. Obviously the keyring processing code does not and cannot do that. However, when exporting this messes up the IOBUF and leads to an invalid read of sizeof (int). We now skip all packets which are not allowed in a keyring. Reported-by: Hanno Böck <[email protected]> Test data: gpg2 --no-default-keyring --keyring FILE --export >/dev/null With this unpacked data for FILE: -----BEGIN PGP ARMORED FILE----- mI0EVNP2zQEEALvETPVDCJDBXkegF4esiV1fqlne40yJnCmJeDEJYocwFPXfFA86 sSGjInzgDbpbC9gQPwq91Qe9x3Vy81CkyVonPOejhINlzfpzqAAa3A6viJccZTwt DJ8E/I9jg53sbYW8q+VgfLn1hlggH/XQRT0HkXMP5y9ClURYnTsNwJhXABEBAAGs CXRlc3QgdGVzdIi5BBMBCgAjBQJU0/bNAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwEC HgECF4AACgkQlsmuCapsqYLvtQP/byY0tM0Lc3moftbHQZ2eHj9ykLjsCjeMDfPx kZUUtUS3HQaqgZLZOeqPjM7XgGh5hJsd9pfhmRWJ0x+iGB47XQNpRTtdLBV/WMCS l5z3uW7e9Md7QVUVuSlJnBgQHTS6EgP8JQadPkAiF+jgpJZXP+gFs2j3gobS0qUF eyTtxs+wAgAD =uIt9 -----END PGP ARMORED FILE----- Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: keyid_from_fingerprint( const byte *fprint, size_t fprint_len, u32 *keyid ) { u32 dummy_keyid[2]; if( !keyid ) keyid = dummy_keyid; if (fprint_len != 20) { /* This is special as we have to lookup the key first. */ PKT_public_key pk; int rc; memset (&pk, 0, sizeof pk); rc = get_pubkey_byfprint (&pk, fprint, fprint_len); if( rc ) { log_error("Oops: keyid_from_fingerprint: no pubkey\n"); keyid[0] = 0; keyid[1] = 0; } else keyid_from_pk (&pk, keyid); } else { const byte *dp = fprint; keyid[0] = dp[12] << 24 | dp[13] << 16 | dp[14] << 8 | dp[15] ; keyid[1] = dp[16] << 24 | dp[17] << 16 | dp[18] << 8 | dp[19] ; } return keyid[1]; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xxxx_do_check( PKT_secret_key *sk, const char *tryagain_text, int mode, int *canceled ) { gpg_error_t err; byte *buffer; u16 csum=0; int i, res; size_t nbytes; if( sk->is_protected ) { /* remove the protection */ DEK *dek = NULL; u32 keyid[4]; /* 4! because we need two of them */ gcry_cipher_hd_t cipher_hd=NULL; PKT_secret_key *save_sk; if( sk->protect.s2k.mode == 1001 ) { log_info(_("secret key parts are not available\n")); return GPG_ERR_UNUSABLE_SECKEY; } if( sk->protect.algo == CIPHER_ALGO_NONE ) BUG(); if( openpgp_cipher_test_algo( sk->protect.algo ) ) { log_info(_("protection algorithm %d%s is not supported\n"), sk->protect.algo,sk->protect.algo==1?" (IDEA)":"" ); return GPG_ERR_CIPHER_ALGO; } if(gcry_md_test_algo (sk->protect.s2k.hash_algo)) { log_info(_("protection digest %d is not supported\n"), sk->protect.s2k.hash_algo); return GPG_ERR_DIGEST_ALGO; } keyid_from_sk( sk, keyid ); keyid[2] = keyid[3] = 0; if (!sk->flags.primary) { keyid[2] = sk->main_keyid[0]; keyid[3] = sk->main_keyid[1]; } dek = passphrase_to_dek( keyid, sk->pubkey_algo, sk->protect.algo, &sk->protect.s2k, mode, tryagain_text, canceled ); if (!dek && canceled && *canceled) return GPG_ERR_CANCELED; err = openpgp_cipher_open (&cipher_hd, sk->protect.algo, GCRY_CIPHER_MODE_CFB, (GCRY_CIPHER_SECURE | (sk->protect.algo >= 100 ? 0 : GCRY_CIPHER_ENABLE_SYNC))); if (err) log_fatal ("cipher open failed: %s\n", gpg_strerror (err) ); err = gcry_cipher_setkey (cipher_hd, dek->key, dek->keylen); if (err) log_fatal ("set key failed: %s\n", gpg_strerror (err) ); xfree(dek); save_sk = copy_secret_key( NULL, sk ); gcry_cipher_setiv ( cipher_hd, sk->protect.iv, sk->protect.ivlen ); csum = 0; if( sk->version >= 4 ) { int ndata; unsigned int ndatabits; byte *p, *data; u16 csumc = 0; i = pubkey_get_npkey(sk->pubkey_algo); assert ( gcry_mpi_get_flag (sk->skey[i], GCRYMPI_FLAG_OPAQUE )); p = gcry_mpi_get_opaque ( sk->skey[i], &ndatabits ); ndata = (ndatabits+7)/8; if ( ndata > 1 ) csumc = p[ndata-2] << 8 | p[ndata-1]; data = xmalloc_secure ( ndata ); gcry_cipher_decrypt ( cipher_hd, data, ndata, p, ndata ); gcry_mpi_release (sk->skey[i]); sk->skey[i] = NULL ; p = data; if (sk->protect.sha1chk) { /* This is the new SHA1 checksum method to detect tampering with the key as used by the Klima/Rosa attack */ sk->csum = 0; csum = 1; if( ndata < 20 ) log_error("not enough bytes for SHA-1 checksum\n"); else { gcry_md_hd_t h; if ( gcry_md_open (&h, DIGEST_ALGO_SHA1, 1)) BUG(); /* Algo not available. */ gcry_md_write (h, data, ndata - 20); gcry_md_final (h); if (!memcmp (gcry_md_read (h, DIGEST_ALGO_SHA1), data + ndata - 20, 20) ) { /* Digest does match. We have to keep the old style checksum in sk->csum, so that the test used for unprotected keys does work. This test gets used when we are adding new keys. */ sk->csum = csum = checksum (data, ndata-20); } gcry_md_close (h); } } else { if( ndata < 2 ) { log_error("not enough bytes for checksum\n"); sk->csum = 0; csum = 1; } else { csum = checksum( data, ndata-2); sk->csum = data[ndata-2] << 8 | data[ndata-1]; if ( sk->csum != csum ) { /* This is a PGP 7.0.0 workaround */ sk->csum = csumc; /* take the encrypted one */ } } } /* Must check it here otherwise the mpi_read_xx would fail because the length may have an arbitrary value */ if( sk->csum == csum ) { for( ; i < pubkey_get_nskey(sk->pubkey_algo); i++ ) { if ( gcry_mpi_scan( &sk->skey[i], GCRYMPI_FMT_PGP, p, ndata, &nbytes)) { /* Checksum was okay, but not correctly decrypted. */ sk->csum = 0; csum = 1; break; } ndata -= nbytes; p += nbytes; } /* Note: at this point ndata should be 2 for a simple checksum or 20 for the sha1 digest */ } xfree(data); } else { for(i=pubkey_get_npkey(sk->pubkey_algo); i < pubkey_get_nskey(sk->pubkey_algo); i++ ) { byte *p; size_t ndata; unsigned int ndatabits; assert (gcry_mpi_get_flag (sk->skey[i], GCRYMPI_FLAG_OPAQUE)); p = gcry_mpi_get_opaque (sk->skey[i], &ndatabits); ndata = (ndatabits+7)/8; assert (ndata >= 2); assert (ndata == ((p[0] << 8 | p[1]) + 7)/8 + 2); buffer = xmalloc_secure (ndata); gcry_cipher_sync (cipher_hd); buffer[0] = p[0]; buffer[1] = p[1]; gcry_cipher_decrypt (cipher_hd, buffer+2, ndata-2, p+2, ndata-2); csum += checksum (buffer, ndata); gcry_mpi_release (sk->skey[i]); err = gcry_mpi_scan( &sk->skey[i], GCRYMPI_FMT_PGP, buffer, ndata, &ndata ); xfree (buffer); if (err) { /* Checksum was okay, but not correctly decrypted. */ sk->csum = 0; csum = 1; break; } /* csum += checksum_mpi (sk->skey[i]); */ } } gcry_cipher_close ( cipher_hd ); /* Now let's see whether we have used the correct passphrase. */ if( csum != sk->csum ) { copy_secret_key( sk, save_sk ); passphrase_clear_cache ( keyid, NULL, sk->pubkey_algo ); free_secret_key( save_sk ); return gpg_error (GPG_ERR_BAD_PASSPHRASE); } /* The checksum may fail, so we also check the key itself. */ res = pk_check_secret_key ( sk->pubkey_algo, sk->skey ); if( res ) { copy_secret_key( sk, save_sk ); passphrase_clear_cache ( keyid, NULL, sk->pubkey_algo ); free_secret_key( save_sk ); return gpg_error (GPG_ERR_BAD_PASSPHRASE); } free_secret_key( save_sk ); sk->is_protected = 0; } else { /* not protected, assume it is okay if the checksum is okay */ csum = 0; for(i=pubkey_get_npkey(sk->pubkey_algo); i < pubkey_get_nskey(sk->pubkey_algo); i++ ) { csum += checksum_mpi( sk->skey[i] ); } if( csum != sk->csum ) return GPG_ERR_CHECKSUM; } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: close_pcsc_reader_wrapped (int slot) { long err; reader_table_t slotp; size_t len; int i; unsigned char msgbuf[9]; slotp = reader_table + slot; if (slotp->pcsc.req_fd == -1 || slotp->pcsc.rsp_fd == -1 || slotp->pcsc.pid == (pid_t)(-1) ) { log_error ("close_pcsc_reader: pcsc-wrapper not running\n"); return 0; } msgbuf[0] = 0x02; /* CLOSE command. */ len = 0; msgbuf[1] = (len >> 24); msgbuf[2] = (len >> 16); msgbuf[3] = (len >> 8); msgbuf[4] = (len ); if ( writen (slotp->pcsc.req_fd, msgbuf, 5) ) { log_error ("error sending PC/SC CLOSE request: %s\n", strerror (errno)); goto command_failed; } /* Read the response. */ if ((i=readn (slotp->pcsc.rsp_fd, msgbuf, 9, &len)) || len != 9) { log_error ("error receiving PC/SC CLOSE response: %s\n", i? strerror (errno) : "premature EOF"); goto command_failed; } len = (msgbuf[1] << 24) | (msgbuf[2] << 16) | (msgbuf[3] << 8 ) | msgbuf[4]; if (msgbuf[0] != 0x81 || len < 4) { log_error ("invalid response header from PC/SC received\n"); goto command_failed; } len -= 4; /* Already read the error code. */ err = PCSC_ERR_MASK ((msgbuf[5] << 24) | (msgbuf[6] << 16) | (msgbuf[7] << 8 ) | msgbuf[8]); if (err) log_error ("pcsc_close failed: %s (0x%lx)\n", pcsc_error_string (err), err); /* We will close the wrapper in any case - errors are merely informational. */ command_failed: close (slotp->pcsc.req_fd); close (slotp->pcsc.rsp_fd); slotp->pcsc.req_fd = -1; slotp->pcsc.rsp_fd = -1; if (slotp->pcsc.pid != -1) kill (slotp->pcsc.pid, SIGTERM); slotp->pcsc.pid = (pid_t)(-1); slotp->used = 0; return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: merge_selfsigs_main (KBNODE keyblock, int *r_revoked, struct revoke_info *rinfo) { PKT_public_key *pk = NULL; KBNODE k; u32 kid[2]; u32 sigdate, uiddate, uiddate2; KBNODE signode, uidnode, uidnode2; u32 curtime = make_timestamp (); unsigned int key_usage = 0; u32 keytimestamp = 0; u32 key_expire = 0; int key_expire_seen = 0; byte sigversion = 0; *r_revoked = 0; memset (rinfo, 0, sizeof (*rinfo)); if (keyblock->pkt->pkttype != PKT_PUBLIC_KEY) BUG (); pk = keyblock->pkt->pkt.public_key; keytimestamp = pk->timestamp; keyid_from_pk (pk, kid); pk->main_keyid[0] = kid[0]; pk->main_keyid[1] = kid[1]; if (pk->version < 4) { /* Before v4 the key packet itself contains the expiration date * and there was no way to change it, so we start with the one * from the key packet. */ key_expire = pk->max_expiredate; key_expire_seen = 1; } /* First pass: Find the latest direct key self-signature. We assume * that the newest one overrides all others. */ /* In case this key was already merged. */ xfree (pk->revkey); pk->revkey = NULL; pk->numrevkeys = 0; signode = NULL; sigdate = 0; /* Helper variable to find the latest signature. */ for (k = keyblock; k && k->pkt->pkttype != PKT_USER_ID; k = k->next) { if (k->pkt->pkttype == PKT_SIGNATURE) { PKT_signature *sig = k->pkt->pkt.signature; if (sig->keyid[0] == kid[0] && sig->keyid[1] == kid[1]) { if (check_key_signature (keyblock, k, NULL)) ; /* Signature did not verify. */ else if (IS_KEY_REV (sig)) { /* Key has been revoked - there is no way to * override such a revocation, so we theoretically * can stop now. We should not cope with expiration * times for revocations here because we have to * assume that an attacker can generate all kinds of * signatures. However due to the fact that the key * has been revoked it does not harm either and by * continuing we gather some more info on that * key. */ *r_revoked = 1; sig_to_revoke_info (sig, rinfo); } else if (IS_KEY_SIG (sig)) { /* Add any revocation keys onto the pk. This is particularly interesting since we normally only get data from the most recent 1F signature, but you need multiple 1F sigs to properly handle revocation keys (PGP does it this way, and a revocation key could be sensitive and hence in a different signature). */ if (sig->revkey) { int i; pk->revkey = xrealloc (pk->revkey, sizeof (struct revocation_key) * (pk->numrevkeys + sig->numrevkeys)); for (i = 0; i < sig->numrevkeys; i++) memcpy (&pk->revkey[pk->numrevkeys++], sig->revkey[i], sizeof (struct revocation_key)); } if (sig->timestamp >= sigdate) { if (sig->flags.expired) ; /* Signature has expired - ignore it. */ else { sigdate = sig->timestamp; signode = k; if (sig->version > sigversion) sigversion = sig->version; } } } } } } /* Remove dupes from the revocation keys. */ if (pk->revkey) { int i, j, x, changed = 0; for (i = 0; i < pk->numrevkeys; i++) { for (j = i + 1; j < pk->numrevkeys; j++) { if (memcmp (&pk->revkey[i], &pk->revkey[j], sizeof (struct revocation_key)) == 0) { /* remove j */ for (x = j; x < pk->numrevkeys - 1; x++) pk->revkey[x] = pk->revkey[x + 1]; pk->numrevkeys--; j--; changed = 1; } } } if (changed) pk->revkey = xrealloc (pk->revkey, pk->numrevkeys * sizeof (struct revocation_key)); } if (signode) { /* Some information from a direct key signature take precedence * over the same information given in UID sigs. */ PKT_signature *sig = signode->pkt->pkt.signature; const byte *p; key_usage = parse_key_usage (sig); p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_KEY_EXPIRE, NULL); if (p && buffer_to_u32 (p)) { key_expire = keytimestamp + buffer_to_u32 (p); key_expire_seen = 1; } /* Mark that key as valid: One direct key signature should * render a key as valid. */ pk->flags.valid = 1; } /* Pass 1.5: Look for key revocation signatures that were not made by the key (i.e. did a revocation key issue a revocation for us?). Only bother to do this if there is a revocation key in the first place and we're not revoked already. */ if (!*r_revoked && pk->revkey) for (k = keyblock; k && k->pkt->pkttype != PKT_USER_ID; k = k->next) { if (k->pkt->pkttype == PKT_SIGNATURE) { PKT_signature *sig = k->pkt->pkt.signature; if (IS_KEY_REV (sig) && (sig->keyid[0] != kid[0] || sig->keyid[1] != kid[1])) { int rc = check_revocation_keys (pk, sig); if (rc == 0) { *r_revoked = 2; sig_to_revoke_info (sig, rinfo); /* Don't continue checking since we can't be any more revoked than this. */ break; } else if (gpg_err_code (rc) == GPG_ERR_NO_PUBKEY) pk->flags.maybe_revoked = 1; /* A failure here means the sig did not verify, was not issued by a revocation key, or a revocation key loop was broken. If a revocation key isn't findable, however, the key might be revoked and we don't know it. */ /* TODO: In the future handle subkey and cert revocations? PGP doesn't, but it's in 2440. */ } } } /* Second pass: Look at the self-signature of all user IDs. */ signode = uidnode = NULL; sigdate = 0; /* Helper variable to find the latest signature in one UID. */ for (k = keyblock; k && k->pkt->pkttype != PKT_PUBLIC_SUBKEY; k = k->next) { if (k->pkt->pkttype == PKT_USER_ID) { if (uidnode && signode) { fixup_uidnode (uidnode, signode, keytimestamp); pk->flags.valid = 1; } uidnode = k; signode = NULL; sigdate = 0; } else if (k->pkt->pkttype == PKT_SIGNATURE && uidnode) { PKT_signature *sig = k->pkt->pkt.signature; if (sig->keyid[0] == kid[0] && sig->keyid[1] == kid[1]) { if (check_key_signature (keyblock, k, NULL)) ; /* signature did not verify */ else if ((IS_UID_SIG (sig) || IS_UID_REV (sig)) && sig->timestamp >= sigdate) { /* Note: we allow to invalidate cert revocations * by a newer signature. An attacker can't use this * because a key should be revoced with a key revocation. * The reason why we have to allow for that is that at * one time an email address may become invalid but later * the same email address may become valid again (hired, * fired, hired again). */ sigdate = sig->timestamp; signode = k; signode->pkt->pkt.signature->flags.chosen_selfsig = 0; if (sig->version > sigversion) sigversion = sig->version; } } } } if (uidnode && signode) { fixup_uidnode (uidnode, signode, keytimestamp); pk->flags.valid = 1; } /* If the key isn't valid yet, and we have --allow-non-selfsigned-uid set, then force it valid. */ if (!pk->flags.valid && opt.allow_non_selfsigned_uid) { if (opt.verbose) log_info (_("Invalid key %s made valid by" " --allow-non-selfsigned-uid\n"), keystr_from_pk (pk)); pk->flags.valid = 1; } /* The key STILL isn't valid, so try and find an ultimately trusted signature. */ if (!pk->flags.valid) { uidnode = NULL; for (k = keyblock; k && k->pkt->pkttype != PKT_PUBLIC_SUBKEY; k = k->next) { if (k->pkt->pkttype == PKT_USER_ID) uidnode = k; else if (k->pkt->pkttype == PKT_SIGNATURE && uidnode) { PKT_signature *sig = k->pkt->pkt.signature; if (sig->keyid[0] != kid[0] || sig->keyid[1] != kid[1]) { PKT_public_key *ultimate_pk; ultimate_pk = xmalloc_clear (sizeof (*ultimate_pk)); /* We don't want to use the full get_pubkey to avoid infinite recursion in certain cases. There is no reason to check that an ultimately trusted key is still valid - if it has been revoked or the user should also renmove the ultimate trust flag. */ if (get_pubkey_fast (ultimate_pk, sig->keyid) == 0 && check_key_signature2 (keyblock, k, ultimate_pk, NULL, NULL, NULL, NULL) == 0 && get_ownertrust (ultimate_pk) == TRUST_ULTIMATE) { free_public_key (ultimate_pk); pk->flags.valid = 1; break; } free_public_key (ultimate_pk); } } } } /* Record the highest selfsig version so we know if this is a v3 key through and through, or a v3 key with a v4 selfsig somewhere. This is useful in a few places to know if the key must be treated as PGP2-style or OpenPGP-style. Note that a selfsig revocation with a higher version number will also raise this value. This is okay since such a revocation must be issued by the user (i.e. it cannot be issued by someone else to modify the key behavior.) */ pk->selfsigversion = sigversion; /* Now that we had a look at all user IDs we can now get some information * from those user IDs. */ if (!key_usage) { /* Find the latest user ID with key flags set. */ uiddate = 0; /* Helper to find the latest user ID. */ for (k = keyblock; k && k->pkt->pkttype != PKT_PUBLIC_SUBKEY; k = k->next) { if (k->pkt->pkttype == PKT_USER_ID) { PKT_user_id *uid = k->pkt->pkt.user_id; if (uid->help_key_usage && uid->created > uiddate) { key_usage = uid->help_key_usage; uiddate = uid->created; } } } } if (!key_usage) { /* No key flags at all: get it from the algo. */ key_usage = openpgp_pk_algo_usage (pk->pubkey_algo); } else { /* Check that the usage matches the usage as given by the algo. */ int x = openpgp_pk_algo_usage (pk->pubkey_algo); if (x) /* Mask it down to the actual allowed usage. */ key_usage &= x; } /* Whatever happens, it's a primary key, so it can certify. */ pk->pubkey_usage = key_usage | PUBKEY_USAGE_CERT; if (!key_expire_seen) { /* Find the latest valid user ID with a key expiration set * Note, that this may be a different one from the above because * some user IDs may have no expiration date set. */ uiddate = 0; for (k = keyblock; k && k->pkt->pkttype != PKT_PUBLIC_SUBKEY; k = k->next) { if (k->pkt->pkttype == PKT_USER_ID) { PKT_user_id *uid = k->pkt->pkt.user_id; if (uid->help_key_expire && uid->created > uiddate) { key_expire = uid->help_key_expire; uiddate = uid->created; } } } } /* Currently only v3 keys have a maximum expiration date, but I'll bet v5 keys get this feature again. */ if (key_expire == 0 || (pk->max_expiredate && key_expire > pk->max_expiredate)) key_expire = pk->max_expiredate; pk->has_expired = key_expire >= curtime ? 0 : key_expire; pk->expiredate = key_expire; /* Fixme: we should see how to get rid of the expiretime fields but * this needs changes at other places too. */ /* And now find the real primary user ID and delete all others. */ uiddate = uiddate2 = 0; uidnode = uidnode2 = NULL; for (k = keyblock; k && k->pkt->pkttype != PKT_PUBLIC_SUBKEY; k = k->next) { if (k->pkt->pkttype == PKT_USER_ID && !k->pkt->pkt.user_id->attrib_data) { PKT_user_id *uid = k->pkt->pkt.user_id; if (uid->is_primary) { if (uid->created > uiddate) { uiddate = uid->created; uidnode = k; } else if (uid->created == uiddate && uidnode) { /* The dates are equal, so we need to do a different (and arbitrary) comparison. This should rarely, if ever, happen. It's good to try and guarantee that two different GnuPG users with two different keyrings at least pick the same primary. */ if (cmp_user_ids (uid, uidnode->pkt->pkt.user_id) > 0) uidnode = k; } } else { if (uid->created > uiddate2) { uiddate2 = uid->created; uidnode2 = k; } else if (uid->created == uiddate2 && uidnode2) { if (cmp_user_ids (uid, uidnode2->pkt->pkt.user_id) > 0) uidnode2 = k; } } } } if (uidnode) { for (k = keyblock; k && k->pkt->pkttype != PKT_PUBLIC_SUBKEY; k = k->next) { if (k->pkt->pkttype == PKT_USER_ID && !k->pkt->pkt.user_id->attrib_data) { PKT_user_id *uid = k->pkt->pkt.user_id; if (k != uidnode) uid->is_primary = 0; } } } else if (uidnode2) { /* None is flagged primary - use the latest user ID we have, and disambiguate with the arbitrary packet comparison. */ uidnode2->pkt->pkt.user_id->is_primary = 1; } else { /* None of our uids were self-signed, so pick the one that sorts first to be the primary. This is the best we can do here since there are no self sigs to date the uids. */ uidnode = NULL; for (k = keyblock; k && k->pkt->pkttype != PKT_PUBLIC_SUBKEY; k = k->next) { if (k->pkt->pkttype == PKT_USER_ID && !k->pkt->pkt.user_id->attrib_data) { if (!uidnode) { uidnode = k; uidnode->pkt->pkt.user_id->is_primary = 1; continue; } else { if (cmp_user_ids (k->pkt->pkt.user_id, uidnode->pkt->pkt.user_id) > 0) { uidnode->pkt->pkt.user_id->is_primary = 0; uidnode = k; uidnode->pkt->pkt.user_id->is_primary = 1; } else k->pkt->pkt.user_id->is_primary = 0; /* just to be safe */ } } } } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: enum_sig_subpkt (const subpktarea_t * pktbuf, sigsubpkttype_t reqtype, size_t * ret_n, int *start, int *critical) { const byte *buffer; int buflen; int type; int critical_dummy; int offset; size_t n; int seq = 0; int reqseq = start ? *start : 0; if (!critical) critical = &critical_dummy; if (!pktbuf || reqseq == -1) { static char dummy[] = "x"; /* Return a value different from NULL to indicate that * there is no critical bit we do not understand. */ return reqtype == SIGSUBPKT_TEST_CRITICAL ? dummy : NULL; } buffer = pktbuf->data; buflen = pktbuf->len; while (buflen) { n = *buffer++; buflen--; if (n == 255) /* 4 byte length header. */ { if (buflen < 4) goto too_short; n = (buffer[0] << 24) | (buffer[1] << 16) | (buffer[2] << 8) | buffer[3]; buffer += 4; buflen -= 4; } else if (n >= 192) /* 4 byte special encoded length header. */ { if (buflen < 2) goto too_short; n = ((n - 192) << 8) + *buffer + 192; buffer++; buflen--; } if (buflen < n) goto too_short; type = *buffer; if (type & 0x80) { type &= 0x7f; *critical = 1; } else *critical = 0; if (!(++seq > reqseq)) ; else if (reqtype == SIGSUBPKT_TEST_CRITICAL) { if (*critical) { if (n - 1 > buflen + 1) goto too_short; if (!can_handle_critical (buffer + 1, n - 1, type)) { if (opt.verbose) log_info (_("subpacket of type %d has " "critical bit set\n"), type); if (start) *start = seq; return NULL; /* This is an error. */ } } } else if (reqtype < 0) /* List packets. */ dump_sig_subpkt (reqtype == SIGSUBPKT_LIST_HASHED, type, *critical, buffer, buflen, n); else if (type == reqtype) /* Found. */ { buffer++; n--; if (n > buflen) goto too_short; if (ret_n) *ret_n = n; offset = parse_one_sig_subpkt (buffer, n, type); switch (offset) { case -2: log_error ("subpacket of type %d too short\n", type); return NULL; case -1: return NULL; default: break; } if (start) *start = seq; return buffer + offset; } buffer += n; buflen -= n; } if (reqtype == SIGSUBPKT_TEST_CRITICAL) return buffer; /* Used as True to indicate that there is no. */ /* Critical bit we don't understand. */ if (start) *start = -1; return NULL; /* End of packets; not found. */ too_short: if (opt.verbose) log_info ("buffer shorter than subpacket\n"); if (start) *start = -1; return NULL; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: keyid_from_pk (PKT_public_key *pk, u32 *keyid) { u32 lowbits; u32 dummy_keyid[2]; if (!keyid) keyid = dummy_keyid; if( pk->keyid[0] || pk->keyid[1] ) { keyid[0] = pk->keyid[0]; keyid[1] = pk->keyid[1]; lowbits = keyid[1]; } else { const byte *dp; gcry_md_hd_t md; md = do_fingerprint_md(pk); if(md) { dp = gcry_md_read ( md, 0 ); keyid[0] = dp[12] << 24 | dp[13] << 16 | dp[14] << 8 | dp[15] ; keyid[1] = dp[16] << 24 | dp[17] << 16 | dp[18] << 8 | dp[19] ; lowbits = keyid[1]; gcry_md_close (md); pk->keyid[0] = keyid[0]; pk->keyid[1] = keyid[1]; } else pk->keyid[0]=pk->keyid[1]=keyid[0]=keyid[1]=lowbits=0xFFFFFFFF; } return lowbits; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: dump_sig_subpkt (int hashed, int type, int critical, const byte * buffer, size_t buflen, size_t length) { const char *p = NULL; int i; /* The CERT has warning out with explains how to use GNUPG to detect * the ARRs - we print our old message here when it is a faked ARR * and add an additional notice. */ if (type == SIGSUBPKT_ARR && !hashed) { es_fprintf (listfp, "\tsubpkt %d len %u (additional recipient request)\n" "WARNING: PGP versions > 5.0 and < 6.5.8 will automagically " "encrypt to this key and thereby reveal the plaintext to " "the owner of this ARR key. Detailed info follows:\n", type, (unsigned) length); } buffer++; length--; es_fprintf (listfp, "\t%s%ssubpkt %d len %u (", /*) */ critical ? "critical " : "", hashed ? "hashed " : "", type, (unsigned) length); if (length > buflen) { es_fprintf (listfp, "too short: buffer is only %u)\n", (unsigned) buflen); return; } switch (type) { case SIGSUBPKT_SIG_CREATED: if (length >= 4) es_fprintf (listfp, "sig created %s", strtimestamp (buffer_to_u32 (buffer))); break; case SIGSUBPKT_SIG_EXPIRE: if (length >= 4) { if (buffer_to_u32 (buffer)) es_fprintf (listfp, "sig expires after %s", strtimevalue (buffer_to_u32 (buffer))); else es_fprintf (listfp, "sig does not expire"); } break; case SIGSUBPKT_EXPORTABLE: if (length) es_fprintf (listfp, "%sexportable", *buffer ? "" : "not "); break; case SIGSUBPKT_TRUST: if (length != 2) p = "[invalid trust subpacket]"; else es_fprintf (listfp, "trust signature of depth %d, value %d", buffer[0], buffer[1]); break; case SIGSUBPKT_REGEXP: if (!length) p = "[invalid regexp subpacket]"; else { es_fprintf (listfp, "regular expression: \""); es_write_sanitized (listfp, buffer, length, "\"", NULL); p = "\""; } break; case SIGSUBPKT_REVOCABLE: if (length) es_fprintf (listfp, "%srevocable", *buffer ? "" : "not "); break; case SIGSUBPKT_KEY_EXPIRE: if (length >= 4) { if (buffer_to_u32 (buffer)) es_fprintf (listfp, "key expires after %s", strtimevalue (buffer_to_u32 (buffer))); else es_fprintf (listfp, "key does not expire"); } break; case SIGSUBPKT_PREF_SYM: es_fputs ("pref-sym-algos:", listfp); for (i = 0; i < length; i++) es_fprintf (listfp, " %d", buffer[i]); break; case SIGSUBPKT_REV_KEY: es_fputs ("revocation key: ", listfp); if (length < 22) p = "[too short]"; else { es_fprintf (listfp, "c=%02x a=%d f=", buffer[0], buffer[1]); for (i = 2; i < length; i++) es_fprintf (listfp, "%02X", buffer[i]); } break; case SIGSUBPKT_ISSUER: if (length >= 8) es_fprintf (listfp, "issuer key ID %08lX%08lX", (ulong) buffer_to_u32 (buffer), (ulong) buffer_to_u32 (buffer + 4)); break; case SIGSUBPKT_NOTATION: { es_fputs ("notation: ", listfp); if (length < 8) p = "[too short]"; else { const byte *s = buffer; size_t n1, n2; n1 = (s[4] << 8) | s[5]; n2 = (s[6] << 8) | s[7]; s += 8; if (8 + n1 + n2 != length) p = "[error]"; else { es_write_sanitized (listfp, s, n1, ")", NULL); es_putc ('=', listfp); if (*buffer & 0x80) es_write_sanitized (listfp, s + n1, n2, ")", NULL); else p = "[not human readable]"; } } } break; case SIGSUBPKT_PREF_HASH: es_fputs ("pref-hash-algos:", listfp); for (i = 0; i < length; i++) es_fprintf (listfp, " %d", buffer[i]); break; case SIGSUBPKT_PREF_COMPR: es_fputs ("pref-zip-algos:", listfp); for (i = 0; i < length; i++) es_fprintf (listfp, " %d", buffer[i]); break; case SIGSUBPKT_KS_FLAGS: es_fputs ("key server preferences:", listfp); for (i = 0; i < length; i++) es_fprintf (listfp, " %02X", buffer[i]); break; case SIGSUBPKT_PREF_KS: es_fputs ("preferred key server: ", listfp); es_write_sanitized (listfp, buffer, length, ")", NULL); break; case SIGSUBPKT_PRIMARY_UID: p = "primary user ID"; break; case SIGSUBPKT_POLICY: es_fputs ("policy: ", listfp); es_write_sanitized (listfp, buffer, length, ")", NULL); break; case SIGSUBPKT_KEY_FLAGS: es_fputs ("key flags:", listfp); for (i = 0; i < length; i++) es_fprintf (listfp, " %02X", buffer[i]); break; case SIGSUBPKT_SIGNERS_UID: p = "signer's user ID"; break; case SIGSUBPKT_REVOC_REASON: if (length) { es_fprintf (listfp, "revocation reason 0x%02x (", *buffer); es_write_sanitized (listfp, buffer + 1, length - 1, ")", NULL); p = ")"; } break; case SIGSUBPKT_ARR: es_fputs ("Big Brother's key (ignored): ", listfp); if (length < 22) p = "[too short]"; else { es_fprintf (listfp, "c=%02x a=%d f=", buffer[0], buffer[1]); if (length > 2) es_write_hexstring (listfp, buffer+2, length-2, 0, NULL); } break; case SIGSUBPKT_FEATURES: es_fputs ("features:", listfp); for (i = 0; i < length; i++) es_fprintf (listfp, " %02x", buffer[i]); break; case SIGSUBPKT_SIGNATURE: es_fputs ("signature: ", listfp); if (length < 17) p = "[too short]"; else es_fprintf (listfp, "v%d, class 0x%02X, algo %d, digest algo %d", buffer[0], buffer[0] == 3 ? buffer[2] : buffer[1], buffer[0] == 3 ? buffer[15] : buffer[2], buffer[0] == 3 ? buffer[16] : buffer[3]); break; default: if (type >= 100 && type <= 110) p = "experimental / private subpacket"; else p = "?"; break; } es_fprintf (listfp, "%s)\n", p ? p : ""); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: pcsc_vendor_specific_init (int slot) { unsigned char buf[256]; pcsc_dword_t len; int sw; int vendor = 0; int product = 0; pcsc_dword_t get_tlv_ioctl = (pcsc_dword_t)-1; unsigned char *p; len = sizeof (buf); sw = control_pcsc (slot, CM_IOCTL_GET_FEATURE_REQUEST, NULL, 0, buf, &len); if (sw) { log_error ("pcsc_vendor_specific_init: GET_FEATURE_REQUEST failed: %d\n", sw); return SW_NOT_SUPPORTED; } else { p = buf; while (p < buf + len) { unsigned char code = *p++; int l = *p++; unsigned int v = 0; if (l == 1) v = p[0]; else if (l == 2) v = ((p[0] << 8) | p[1]); else if (l == 4) v = ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]); if (code == FEATURE_VERIFY_PIN_DIRECT) reader_table[slot].pcsc.verify_ioctl = v; else if (code == FEATURE_MODIFY_PIN_DIRECT) reader_table[slot].pcsc.modify_ioctl = v; else if (code == FEATURE_GET_TLV_PROPERTIES) get_tlv_ioctl = v; if (DBG_CARD_IO) log_debug ("feature: code=%02X, len=%d, v=%02X\n", code, l, v); p += l; } } if (get_tlv_ioctl == (pcsc_dword_t)-1) { /* * For system which doesn't support GET_TLV_PROPERTIES, * we put some heuristics here. */ if (reader_table[slot].rdrname) { if (strstr (reader_table[slot].rdrname, "SPRx32")) { reader_table[slot].is_spr532 = 1; reader_table[slot].pinpad_varlen_supported = 1; } else if (strstr (reader_table[slot].rdrname, "ST-2xxx") || strstr (reader_table[slot].rdrname, "cyberJack") || strstr (reader_table[slot].rdrname, "DIGIPASS") || strstr (reader_table[slot].rdrname, "Gnuk") || strstr (reader_table[slot].rdrname, "KAAN")) reader_table[slot].pinpad_varlen_supported = 1; } return 0; } len = sizeof (buf); sw = control_pcsc (slot, get_tlv_ioctl, NULL, 0, buf, &len); if (sw) { log_error ("pcsc_vendor_specific_init: GET_TLV_IOCTL failed: %d\n", sw); return SW_NOT_SUPPORTED; } p = buf; while (p < buf + len) { unsigned char tag = *p++; int l = *p++; unsigned int v = 0; /* Umm... here is little endian, while the encoding above is big. */ if (l == 1) v = p[0]; else if (l == 2) v = ((p[1] << 8) | p[0]); else if (l == 4) v = ((p[3] << 24) | (p[2] << 16) | (p[1] << 8) | p[0]); if (tag == PCSCv2_PART10_PROPERTY_bMinPINSize) reader_table[slot].pcsc.pinmin = v; else if (tag == PCSCv2_PART10_PROPERTY_bMaxPINSize) reader_table[slot].pcsc.pinmax = v; else if (tag == PCSCv2_PART10_PROPERTY_wIdVendor) vendor = v; else if (tag == PCSCv2_PART10_PROPERTY_wIdProduct) product = v; if (DBG_CARD_IO) log_debug ("TLV properties: tag=%02X, len=%d, v=%08X\n", tag, l, v); p += l; } if (vendor == VENDOR_VEGA && product == VEGA_ALPHA) { /* * Please read the comment of ccid_vendor_specific_init in * ccid-driver.c. */ const unsigned char cmd[] = { '\xb5', '\x01', '\x00', '\x03', '\x00' }; sw = control_pcsc (slot, CM_IOCTL_VENDOR_IFD_EXCHANGE, cmd, sizeof (cmd), NULL, 0); if (sw) return SW_NOT_SUPPORTED; } else if (vendor == VENDOR_SCM && product == SCM_SPR532) /* SCM SPR532 */ { reader_table[slot].is_spr532 = 1; reader_table[slot].pinpad_varlen_supported = 1; } else if ((vendor == 0x046a && product == 0x003e) /* Cherry ST-2xxx */ || vendor == 0x0c4b /* Tested with Reiner cyberJack GO */ || vendor == 0x1a44 /* Tested with Vasco DIGIPASS 920 */ || vendor == 0x234b /* Tested with FSIJ Gnuk Token */ || vendor == 0x0d46 /* Tested with KAAN Advanced??? */) reader_table[slot].pinpad_varlen_supported = 1; return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: make_backsig (PKT_signature *sig, PKT_public_key *pk, PKT_public_key *sub_pk, PKT_public_key *sub_psk, u32 timestamp, const char *cache_nonce) { gpg_error_t err; PKT_signature *backsig; cache_public_key (sub_pk); err = make_keysig_packet (&backsig, pk, NULL, sub_pk, sub_psk, 0x19, 0, timestamp, 0, NULL, NULL, cache_nonce); if (err) log_error ("make_keysig_packet failed for backsig: %s\n", gpg_strerror (err)); else { /* Get it into a binary packed form. */ IOBUF backsig_out = iobuf_temp(); PACKET backsig_pkt; init_packet (&backsig_pkt); backsig_pkt.pkttype = PKT_SIGNATURE; backsig_pkt.pkt.signature = backsig; err = build_packet (backsig_out, &backsig_pkt); free_packet (&backsig_pkt); if (err) log_error ("build_packet failed for backsig: %s\n", gpg_strerror (err)); else { size_t pktlen = 0; byte *buf = iobuf_get_temp_buffer (backsig_out); /* Remove the packet header. */ if(buf[0]&0x40) { if (buf[1] < 192) { pktlen = buf[1]; buf += 2; } else if(buf[1] < 224) { pktlen = (buf[1]-192)*256; pktlen += buf[2]+192; buf += 3; } else if (buf[1] == 255) { pktlen = buf[2] << 24; pktlen |= buf[3] << 16; pktlen |= buf[4] << 8; pktlen |= buf[5]; buf += 6; } else BUG (); } else { int mark = 1; switch (buf[0]&3) { case 3: BUG (); break; case 2: pktlen = buf[mark++] << 24; pktlen |= buf[mark++] << 16; case 1: pktlen |= buf[mark++] << 8; case 0: pktlen |= buf[mark++]; } buf += mark; } /* Now make the binary blob into a subpacket. */ build_sig_subpkt (sig, SIGSUBPKT_SIGNATURE, buf, pktlen); iobuf_close (backsig_out); } } return err; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: do_find_tlv (const unsigned char *buffer, size_t length, int tag, size_t *nbytes, int nestlevel) { const unsigned char *s = buffer; size_t n = length; size_t len; int this_tag; int composite; for (;;) { buffer = s; if (n < 2) return NULL; /* Buffer definitely too short for tag and length. */ if (!*s || *s == 0xff) { /* Skip optional filler between TLV objects. */ s++; n--; continue; } composite = !!(*s & 0x20); if ((*s & 0x1f) == 0x1f) { /* more tag bytes to follow */ s++; n--; if (n < 2) return NULL; /* buffer definitely too short for tag and length. */ if ((*s & 0x1f) == 0x1f) return NULL; /* We support only up to 2 bytes. */ this_tag = (s[-1] << 8) | (s[0] & 0x7f); } else this_tag = s[0]; len = s[1]; s += 2; n -= 2; if (len < 0x80) ; else if (len == 0x81) { /* One byte length follows. */ if (!n) return NULL; /* we expected 1 more bytes with the length. */ len = s[0]; s++; n--; } else if (len == 0x82) { /* Two byte length follows. */ if (n < 2) return NULL; /* We expected 2 more bytes with the length. */ len = (s[0] << 8) | s[1]; s += 2; n -= 2; } else return NULL; /* APDU limit is 65535, thus it does not make sense to assume longer length fields. */ if (composite && nestlevel < 100) { /* Dive into this composite DO after checking for a too deep nesting. */ const unsigned char *tmp_s; size_t tmp_len; tmp_s = do_find_tlv (s, len, tag, &tmp_len, nestlevel+1); if (tmp_s) { *nbytes = tmp_len; return tmp_s; } } if (this_tag == tag) { *nbytes = len; return s; } if (len > n) return NULL; /* Buffer too short to skip to the next tag. */ s += len; n -= len; } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: parse_signature (IOBUF inp, int pkttype, unsigned long pktlen, PKT_signature * sig) { int md5_len = 0; unsigned n; int is_v4 = 0; int rc = 0; int i, ndata; if (pktlen < 16) { log_error ("packet(%d) too short\n", pkttype); if (list_mode) es_fputs (":signature packet: [too short]\n", listfp); goto leave; } sig->version = iobuf_get_noeof (inp); pktlen--; if (sig->version == 4) is_v4 = 1; else if (sig->version != 2 && sig->version != 3) { log_error ("packet(%d) with unknown version %d\n", pkttype, sig->version); if (list_mode) es_fputs (":signature packet: [unknown version]\n", listfp); rc = gpg_error (GPG_ERR_INV_PACKET); goto leave; } if (!is_v4) { md5_len = iobuf_get_noeof (inp); pktlen--; } sig->sig_class = iobuf_get_noeof (inp); pktlen--; if (!is_v4) { sig->timestamp = read_32 (inp); pktlen -= 4; sig->keyid[0] = read_32 (inp); pktlen -= 4; sig->keyid[1] = read_32 (inp); pktlen -= 4; } sig->pubkey_algo = iobuf_get_noeof (inp); pktlen--; sig->digest_algo = iobuf_get_noeof (inp); pktlen--; sig->flags.exportable = 1; sig->flags.revocable = 1; if (is_v4) /* Read subpackets. */ { n = read_16 (inp); pktlen -= 2; /* Length of hashed data. */ if (n > 10000) { log_error ("signature packet: hashed data too long\n"); if (list_mode) es_fputs (":signature packet: [hashed data too long]\n", listfp); rc = GPG_ERR_INV_PACKET; goto leave; } if (n) { sig->hashed = xmalloc (sizeof (*sig->hashed) + n - 1); sig->hashed->size = n; sig->hashed->len = n; if (iobuf_read (inp, sig->hashed->data, n) != n) { log_error ("premature eof while reading " "hashed signature data\n"); if (list_mode) es_fputs (":signature packet: [premature eof]\n", listfp); rc = -1; goto leave; } pktlen -= n; } n = read_16 (inp); pktlen -= 2; /* Length of unhashed data. */ if (n > 10000) { log_error ("signature packet: unhashed data too long\n"); if (list_mode) es_fputs (":signature packet: [unhashed data too long]\n", listfp); rc = GPG_ERR_INV_PACKET; goto leave; } if (n) { sig->unhashed = xmalloc (sizeof (*sig->unhashed) + n - 1); sig->unhashed->size = n; sig->unhashed->len = n; if (iobuf_read (inp, sig->unhashed->data, n) != n) { log_error ("premature eof while reading " "unhashed signature data\n"); if (list_mode) es_fputs (":signature packet: [premature eof]\n", listfp); rc = -1; goto leave; } pktlen -= n; } } if (pktlen < 5) /* Sanity check. */ { log_error ("packet(%d) too short\n", pkttype); if (list_mode) es_fputs (":signature packet: [too short]\n", listfp); rc = GPG_ERR_INV_PACKET; goto leave; } sig->digest_start[0] = iobuf_get_noeof (inp); pktlen--; sig->digest_start[1] = iobuf_get_noeof (inp); pktlen--; if (is_v4 && sig->pubkey_algo) /* Extract required information. */ { const byte *p; size_t len; /* Set sig->flags.unknown_critical if there is a critical bit * set for packets which we do not understand. */ if (!parse_sig_subpkt (sig->hashed, SIGSUBPKT_TEST_CRITICAL, NULL) || !parse_sig_subpkt (sig->unhashed, SIGSUBPKT_TEST_CRITICAL, NULL)) sig->flags.unknown_critical = 1; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_SIG_CREATED, NULL); if (p) sig->timestamp = buffer_to_u32 (p); else if (!(sig->pubkey_algo >= 100 && sig->pubkey_algo <= 110) && opt.verbose) log_info ("signature packet without timestamp\n"); p = parse_sig_subpkt2 (sig, SIGSUBPKT_ISSUER, NULL); if (p) { sig->keyid[0] = buffer_to_u32 (p); sig->keyid[1] = buffer_to_u32 (p + 4); } else if (!(sig->pubkey_algo >= 100 && sig->pubkey_algo <= 110) && opt.verbose) log_info ("signature packet without keyid\n"); p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_SIG_EXPIRE, NULL); if (p && buffer_to_u32 (p)) sig->expiredate = sig->timestamp + buffer_to_u32 (p); if (sig->expiredate && sig->expiredate <= make_timestamp ()) sig->flags.expired = 1; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_POLICY, NULL); if (p) sig->flags.policy_url = 1; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_PREF_KS, NULL); if (p) sig->flags.pref_ks = 1; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_NOTATION, NULL); if (p) sig->flags.notation = 1; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_REVOCABLE, NULL); if (p && *p == 0) sig->flags.revocable = 0; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_TRUST, &len); if (p && len == 2) { sig->trust_depth = p[0]; sig->trust_value = p[1]; /* Only look for a regexp if there is also a trust subpacket. */ sig->trust_regexp = parse_sig_subpkt (sig->hashed, SIGSUBPKT_REGEXP, &len); /* If the regular expression is of 0 length, there is no regular expression. */ if (len == 0) sig->trust_regexp = NULL; } /* We accept the exportable subpacket from either the hashed or unhashed areas as older versions of gpg put it in the unhashed area. In theory, anyway, we should never see this packet off of a local keyring. */ p = parse_sig_subpkt2 (sig, SIGSUBPKT_EXPORTABLE, NULL); if (p && *p == 0) sig->flags.exportable = 0; /* Find all revocation keys. */ if (sig->sig_class == 0x1F) parse_revkeys (sig); } if (list_mode) { es_fprintf (listfp, ":signature packet: algo %d, keyid %08lX%08lX\n" "\tversion %d, created %lu, md5len %d, sigclass 0x%02x\n" "\tdigest algo %d, begin of digest %02x %02x\n", sig->pubkey_algo, (ulong) sig->keyid[0], (ulong) sig->keyid[1], sig->version, (ulong) sig->timestamp, md5_len, sig->sig_class, sig->digest_algo, sig->digest_start[0], sig->digest_start[1]); if (is_v4) { parse_sig_subpkt (sig->hashed, SIGSUBPKT_LIST_HASHED, NULL); parse_sig_subpkt (sig->unhashed, SIGSUBPKT_LIST_UNHASHED, NULL); } } ndata = pubkey_get_nsig (sig->pubkey_algo); if (!ndata) { if (list_mode) es_fprintf (listfp, "\tunknown algorithm %d\n", sig->pubkey_algo); unknown_pubkey_warning (sig->pubkey_algo); /* We store the plain material in data[0], so that we are able * to write it back with build_packet(). */ if (pktlen > (5 * MAX_EXTERN_MPI_BITS / 8)) { /* We include a limit to avoid too trivial DoS attacks by having gpg allocate too much memory. */ log_error ("signature packet: too much data\n"); rc = GPG_ERR_INV_PACKET; } else { sig->data[0] = gcry_mpi_set_opaque (NULL, read_rest (inp, pktlen), pktlen * 8); pktlen = 0; } } else { for (i = 0; i < ndata; i++) { n = pktlen; sig->data[i] = mpi_read (inp, &n, 0); pktlen -= n; if (list_mode) { es_fprintf (listfp, "\tdata: "); mpi_print (listfp, sig->data[i], mpi_print_mode); es_putc ('\n', listfp); } if (!sig->data[i]) rc = GPG_ERR_INV_PACKET; } } leave: iobuf_skip_rest (inp, pktlen, 0); return rc; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: parse_header (const char *filename, const unsigned char *packet, size_t packetlen, size_t *r_headerlen) { unsigned int len; if (packetlen != 32) return gpg_error (GPG_ERR_BUG); len = ((packet[2] << 24) | (packet[3] << 16) | (packet[4] << 8) | packet[5]); if (packet[0] != (0xc0|61) || len < 26 || memcmp (packet+6, "GnuPG/G13", 10)) { log_error ("file '%s' is not valid container\n", filename); return gpg_error (GPG_ERR_INV_OBJ); } if (packet[16] != 1) { log_error ("unknown version %u of container '%s'\n", (unsigned int)packet[16], filename); return gpg_error (GPG_ERR_INV_OBJ); } if (packet[17] || packet[18] || packet[26] || packet[27] || packet[28] || packet[29] || packet[30] || packet[31]) log_info ("WARNING: unknown meta information in '%s'\n", filename); if (packet[19]) log_info ("WARNING: OS flag is not supported in '%s'\n", filename); if (packet[24] != 1 || packet[25] != 0) { log_error ("meta data copies in '%s' are not supported\n", filename); return gpg_error (GPG_ERR_NOT_IMPLEMENTED); } len = ((packet[20] << 24) | (packet[21] << 16) | (packet[22] << 8) | packet[23]); /* Do a basic sanity check on the length. */ if (len < 32 || len > 1024*1024) { log_error ("bad length given in container '%s'\n", filename); return gpg_error (GPG_ERR_INV_OBJ); } *r_headerlen = len; return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: get_dns_cert (const char *name, estream_t *r_key, unsigned char **r_fpr, size_t *r_fprlen, char **r_url) { #ifdef USE_DNS_CERT #ifdef USE_ADNS gpg_error_t err; adns_state state; adns_answer *answer = NULL; unsigned int ctype; int count; *r_key = NULL; *r_fpr = NULL; *r_fprlen = 0; *r_url = NULL; if (adns_init (&state, adns_if_noerrprint, NULL)) { err = gpg_err_make (default_errsource, gpg_err_code_from_syserror ()); log_error ("error initializing adns: %s\n", strerror (errno)); return err; } if (adns_synchronous (state, name, (adns_r_unknown | my_adns_r_cert), adns_qf_quoteok_query, &answer)) { err = gpg_err_make (default_errsource, gpg_err_code_from_syserror ()); /* log_error ("DNS query failed: %s\n", strerror (errno)); */ adns_finish (state); return err; } if (answer->status != adns_s_ok) { /* log_error ("DNS query returned an error: %s (%s)\n", */ /* adns_strerror (answer->status), */ /* adns_errabbrev (answer->status)); */ err = gpg_err_make (default_errsource, GPG_ERR_NOT_FOUND); goto leave; } err = gpg_err_make (default_errsource, GPG_ERR_NOT_FOUND); for (count = 0; count < answer->nrrs; count++) { int datalen = answer->rrs.byteblock[count].len; const unsigned char *data = answer->rrs.byteblock[count].data; if (datalen < 5) continue; /* Truncated CERT record - skip. */ ctype = ((data[0] << 8) | data[1]); /* (key tag and algorithm fields are not required.) */ data += 5; datalen -= 5; if (ctype == CERTTYPE_PGP && datalen >= 11) { /* CERT type is PGP. Gpg checks for a minimum length of 11, thus we do the same. */ *r_key = es_fopenmem_init (0, "rwb", data, datalen); if (!*r_key) err = gpg_err_make (default_errsource, gpg_err_code_from_syserror ()); else err = 0; goto leave; } else if (ctype == CERTTYPE_IPGP && datalen && datalen < 1023 && datalen >= data[0] + 1 && r_fpr && r_fprlen && r_url) { /* CERT type is IPGP. We made sure that the data is plausible and that the caller requested this information. */ *r_fprlen = data[0]; if (*r_fprlen) { *r_fpr = xtrymalloc (*r_fprlen); if (!*r_fpr) { err = gpg_err_make (default_errsource, gpg_err_code_from_syserror ()); goto leave; } memcpy (*r_fpr, data + 1, *r_fprlen); } else *r_fpr = NULL; if (datalen > *r_fprlen + 1) { *r_url = xtrymalloc (datalen - (*r_fprlen + 1) + 1); if (!*r_url) { err = gpg_err_make (default_errsource, gpg_err_code_from_syserror ()); xfree (*r_fpr); *r_fpr = NULL; goto leave; } memcpy (*r_url, data + (*r_fprlen + 1), datalen - (*r_fprlen + 1)); (*r_url)[datalen - (*r_fprlen + 1)] = '\0'; } else *r_url = NULL; err = 0; goto leave; } } leave: adns_free (answer); adns_finish (state); return err; #else /*!USE_ADNS*/ gpg_error_t err; unsigned char *answer; int r; u16 count; *r_key = NULL; *r_fpr = NULL; *r_fprlen = 0; *r_url = NULL; /* Allocate a 64k buffer which is the limit for an DNS response. */ answer = xtrymalloc (65536); if (!answer) return gpg_err_make (default_errsource, gpg_err_code_from_syserror ()); err = gpg_err_make (default_errsource, GPG_ERR_NOT_FOUND); r = res_query (name, C_IN, T_CERT, answer, 65536); /* Not too big, not too small, no errors and at least 1 answer. */ if (r >= sizeof (HEADER) && r <= 65536 && (((HEADER *) answer)->rcode) == NOERROR && (count = ntohs (((HEADER *) answer)->ancount))) { int rc; unsigned char *pt, *emsg; emsg = &answer[r]; pt = &answer[sizeof (HEADER)]; /* Skip over the query */ rc = dn_skipname (pt, emsg); if (rc == -1) { err = gpg_err_make (default_errsource, GPG_ERR_INV_OBJ); goto leave; } pt += rc + QFIXEDSZ; /* There are several possible response types for a CERT request. We're interested in the PGP (a key) and IPGP (a URI) types. Skip all others. TODO: A key is better than a URI since we've gone through all this bother to fetch it, so favor that if we have both PGP and IPGP? */ while (count-- > 0 && pt < emsg) { u16 type, class, dlen, ctype; rc = dn_skipname (pt, emsg); /* the name we just queried for */ if (rc == -1) { err = gpg_err_make (default_errsource, GPG_ERR_INV_OBJ); goto leave; } pt += rc; /* Truncated message? 15 bytes takes us to the point where we start looking at the ctype. */ if ((emsg - pt) < 15) break; type = *pt++ << 8; type |= *pt++; class = *pt++ << 8; class |= *pt++; /* We asked for IN and got something else !? */ if (class != C_IN) break; /* ttl */ pt += 4; /* data length */ dlen = *pt++ << 8; dlen |= *pt++; /* We asked for CERT and got something else - might be a CNAME, so loop around again. */ if (type != T_CERT) { pt += dlen; continue; } /* The CERT type */ ctype = *pt++ << 8; ctype |= *pt++; /* Skip the CERT key tag and algo which we don't need. */ pt += 3; dlen -= 5; /* 15 bytes takes us to here */ if (ctype == CERTTYPE_PGP && dlen) { /* PGP type */ *r_key = es_fopenmem_init (0, "rwb", pt, dlen); if (!*r_key) err = gpg_err_make (default_errsource, gpg_err_code_from_syserror ()); else err = 0; goto leave; } else if (ctype == CERTTYPE_IPGP && dlen && dlen < 1023 && dlen >= pt[0] + 1) { /* IPGP type */ *r_fprlen = pt[0]; if (*r_fprlen) { *r_fpr = xtrymalloc (*r_fprlen); if (!*r_fpr) { err = gpg_err_make (default_errsource, gpg_err_code_from_syserror ()); goto leave; } memcpy (*r_fpr, &pt[1], *r_fprlen); } else *r_fpr = NULL; if (dlen > *r_fprlen + 1) { *r_url = xtrymalloc (dlen - (*r_fprlen + 1) + 1); if (!*r_fpr) { err = gpg_err_make (default_errsource, gpg_err_code_from_syserror ()); xfree (*r_fpr); *r_fpr = NULL; goto leave; } memcpy (*r_url, &pt[*r_fprlen + 1], dlen - (*r_fprlen + 1)); (*r_url)[dlen - (*r_fprlen + 1)] = '\0'; } else *r_url = NULL; err = 0; goto leave; } /* Neither type matches, so go around to the next answer. */ pt += dlen; } } leave: xfree (answer); return err; #endif /*!USE_ADNS */ #else /* !USE_DNS_CERT */ (void)name; *r_key = NULL; *r_fpr = NULL; *r_fprlen = 0; *r_url = NULL; return gpg_err_make (default_errsource, GPG_ERR_NOT_SUPPORTED); #endif } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: delete_sig_subpkt (subpktarea_t *area, sigsubpkttype_t reqtype ) { int buflen; sigsubpkttype_t type; byte *buffer, *bufstart; size_t n; size_t unused = 0; int okay = 0; if( !area ) return 0; buflen = area->len; buffer = area->data; for(;;) { if( !buflen ) { okay = 1; break; } bufstart = buffer; n = *buffer++; buflen--; if( n == 255 ) { if( buflen < 4 ) break; n = (((size_t)buffer[0] << 24) | (buffer[1] << 16) | (buffer[2] << 8) | buffer[3]); buffer += 4; buflen -= 4; } else if( n >= 192 ) { if( buflen < 2 ) break; n = (( n - 192 ) << 8) + *buffer + 192; buffer++; buflen--; } if( buflen < n ) break; type = *buffer & 0x7f; if( type == reqtype ) { buffer++; buflen--; n--; if( n > buflen ) break; buffer += n; /* point to next subpkt */ buflen -= n; memmove (bufstart, buffer, buflen); /* shift */ unused += buffer - bufstart; buffer = bufstart; } else { buffer += n; buflen -=n; } } if (!okay) log_error ("delete_subpkt: buffer shorter than subpacket\n"); assert (unused <= area->len); area->len -= unused; return !!unused; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: merge_selfsigs_subkey (KBNODE keyblock, KBNODE subnode) { PKT_public_key *mainpk = NULL, *subpk = NULL; PKT_signature *sig; KBNODE k; u32 mainkid[2]; u32 sigdate = 0; KBNODE signode; u32 curtime = make_timestamp (); unsigned int key_usage = 0; u32 keytimestamp = 0; u32 key_expire = 0; const byte *p; if (subnode->pkt->pkttype != PKT_PUBLIC_SUBKEY) BUG (); mainpk = keyblock->pkt->pkt.public_key; if (mainpk->version < 4) return;/* (actually this should never happen) */ keyid_from_pk (mainpk, mainkid); subpk = subnode->pkt->pkt.public_key; keytimestamp = subpk->timestamp; subpk->flags.valid = 0; subpk->main_keyid[0] = mainpk->main_keyid[0]; subpk->main_keyid[1] = mainpk->main_keyid[1]; /* Find the latest key binding self-signature. */ signode = NULL; sigdate = 0; /* Helper to find the latest signature. */ for (k = subnode->next; k && k->pkt->pkttype != PKT_PUBLIC_SUBKEY; k = k->next) { if (k->pkt->pkttype == PKT_SIGNATURE) { sig = k->pkt->pkt.signature; if (sig->keyid[0] == mainkid[0] && sig->keyid[1] == mainkid[1]) { if (check_key_signature (keyblock, k, NULL)) ; /* Signature did not verify. */ else if (IS_SUBKEY_REV (sig)) { /* Note that this means that the date on a revocation sig does not matter - even if the binding sig is dated after the revocation sig, the subkey is still marked as revoked. This seems ok, as it is just as easy to make new subkeys rather than re-sign old ones as the problem is in the distribution. Plus, PGP (7) does this the same way. */ subpk->flags.revoked = 1; sig_to_revoke_info (sig, &subpk->revoked); /* Although we could stop now, we continue to * figure out other information like the old expiration * time. */ } else if (IS_SUBKEY_SIG (sig) && sig->timestamp >= sigdate) { if (sig->flags.expired) ; /* Signature has expired - ignore it. */ else { sigdate = sig->timestamp; signode = k; signode->pkt->pkt.signature->flags.chosen_selfsig = 0; } } } } } /* No valid key binding. */ if (!signode) return; sig = signode->pkt->pkt.signature; sig->flags.chosen_selfsig = 1; /* So we know which selfsig we chose later. */ key_usage = parse_key_usage (sig); if (!key_usage) { /* No key flags at all: get it from the algo. */ key_usage = openpgp_pk_algo_usage (subpk->pubkey_algo); } else { /* Check that the usage matches the usage as given by the algo. */ int x = openpgp_pk_algo_usage (subpk->pubkey_algo); if (x) /* Mask it down to the actual allowed usage. */ key_usage &= x; } subpk->pubkey_usage = key_usage; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_KEY_EXPIRE, NULL); if (p && buffer_to_u32 (p)) key_expire = keytimestamp + buffer_to_u32 (p); else key_expire = 0; subpk->has_expired = key_expire >= curtime ? 0 : key_expire; subpk->expiredate = key_expire; /* Algo doesn't exist. */ if (openpgp_pk_test_algo (subpk->pubkey_algo)) return; subpk->flags.valid = 1; /* Find the most recent 0x19 embedded signature on our self-sig. */ if (!subpk->flags.backsig) { int seq = 0; size_t n; PKT_signature *backsig = NULL; sigdate = 0; /* We do this while() since there may be other embedded signatures in the future. We only want 0x19 here. */ while ((p = enum_sig_subpkt (sig->hashed, SIGSUBPKT_SIGNATURE, &n, &seq, NULL))) if (n > 3 && ((p[0] == 3 && p[2] == 0x19) || (p[0] == 4 && p[1] == 0x19))) { PKT_signature *tempsig = buf_to_sig (p, n); if (tempsig) { if (tempsig->timestamp > sigdate) { if (backsig) free_seckey_enc (backsig); backsig = tempsig; sigdate = backsig->timestamp; } else free_seckey_enc (tempsig); } } seq = 0; /* It is safe to have this in the unhashed area since the 0x19 is located on the selfsig for convenience, not security. */ while ((p = enum_sig_subpkt (sig->unhashed, SIGSUBPKT_SIGNATURE, &n, &seq, NULL))) if (n > 3 && ((p[0] == 3 && p[2] == 0x19) || (p[0] == 4 && p[1] == 0x19))) { PKT_signature *tempsig = buf_to_sig (p, n); if (tempsig) { if (tempsig->timestamp > sigdate) { if (backsig) free_seckey_enc (backsig); backsig = tempsig; sigdate = backsig->timestamp; } else free_seckey_enc (tempsig); } } if (backsig) { /* At ths point, backsig contains the most recent 0x19 sig. Let's see if it is good. */ /* 2==valid, 1==invalid, 0==didn't check */ if (check_backsig (mainpk, subpk, backsig) == 0) subpk->flags.backsig = 2; else subpk->flags.backsig = 1; free_seckey_enc (backsig); } } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: getsrv (const char *name,struct srventry **list) { int srvcount=0; u16 count; int i, rc; *list = NULL; #ifdef USE_ADNS { adns_state state; adns_answer *answer = NULL; rc = adns_init (&state, adns_if_noerrprint, NULL); if (rc) { log_error ("error initializing adns: %s\n", strerror (errno)); return -1; } rc = adns_synchronous (state, name, adns_r_srv, adns_qf_quoteok_query, &answer); if (rc) { log_error ("DNS query failed: %s\n", strerror (errno)); adns_finish (state); return -1; } if (answer->status != adns_s_ok || answer->type != adns_r_srv || !answer->nrrs) { log_error ("DNS query returned an error or no records: %s (%s)\n", adns_strerror (answer->status), adns_errabbrev (answer->status)); adns_free (answer); adns_finish (state); return 0; } for (count = 0; count < answer->nrrs; count++) { struct srventry *srv = NULL; struct srventry *newlist; if (strlen (answer->rrs.srvha[count].ha.host) >= MAXDNAME) { log_info ("hostname in SRV record too long - skipped\n"); continue; } newlist = xtryrealloc (*list, (srvcount+1)*sizeof(struct srventry)); if (!newlist) goto fail; *list = newlist; memset (&(*list)[srvcount], 0, sizeof(struct srventry)); srv = &(*list)[srvcount]; srvcount++; srv->priority = answer->rrs.srvha[count].priority; srv->weight = answer->rrs.srvha[count].weight; srv->port = answer->rrs.srvha[count].port; strcpy (srv->target, answer->rrs.srvha[count].ha.host); } adns_free (answer); adns_finish (state); } #else /*!USE_ADNS*/ { unsigned char answer[2048]; HEADER *header = (HEADER *)answer; unsigned char *pt, *emsg; int r; u16 dlen; r = res_query (name, C_IN, T_SRV, answer, sizeof answer); if (r < sizeof (HEADER) || r > sizeof answer) return -1; if (header->rcode != NOERROR || !(count=ntohs (header->ancount))) return 0; /* Error or no record found. */ emsg = &answer[r]; pt = &answer[sizeof(HEADER)]; /* Skip over the query */ rc = dn_skipname (pt, emsg); if (rc == -1) goto fail; pt += rc + QFIXEDSZ; while (count-- > 0 && pt < emsg) { struct srventry *srv=NULL; u16 type,class; struct srventry *newlist; newlist = xtryrealloc (*list, (srvcount+1)*sizeof(struct srventry)); if (!newlist) goto fail; *list = newlist; memset(&(*list)[srvcount],0,sizeof(struct srventry)); srv=&(*list)[srvcount]; srvcount++; rc = dn_skipname(pt,emsg); /* the name we just queried for */ if (rc == -1) goto fail; pt+=rc; /* Truncated message? */ if((emsg-pt)<16) goto fail; type=*pt++ << 8; type|=*pt++; /* We asked for SRV and got something else !? */ if(type!=T_SRV) goto fail; class=*pt++ << 8; class|=*pt++; /* We asked for IN and got something else !? */ if(class!=C_IN) goto fail; pt+=4; /* ttl */ dlen=*pt++ << 8; dlen|=*pt++; srv->priority=*pt++ << 8; srv->priority|=*pt++; srv->weight=*pt++ << 8; srv->weight|=*pt++; srv->port=*pt++ << 8; srv->port|=*pt++; /* Get the name. 2782 doesn't allow name compression, but dn_expand still works to pull the name out of the packet. */ rc = dn_expand(answer,emsg,pt,srv->target,MAXDNAME); if (rc == 1 && srv->target[0] == 0) /* "." */ { xfree(*list); *list = NULL; return 0; } if (rc == -1) goto fail; pt += rc; /* Corrupt packet? */ if (dlen != rc+6) goto fail; } } #endif /*!USE_ADNS*/ /* Now we have an array of all the srv records. */ /* Order by priority */ qsort(*list,srvcount,sizeof(struct srventry),priosort); /* For each priority, move the zero-weighted items first. */ for (i=0; i < srvcount; i++) { int j; for (j=i;j < srvcount && (*list)[i].priority == (*list)[j].priority; j++) { if((*list)[j].weight==0) { /* Swap j with i */ if(j!=i) { struct srventry temp; memcpy (&temp,&(*list)[j],sizeof(struct srventry)); memcpy (&(*list)[j],&(*list)[i],sizeof(struct srventry)); memcpy (&(*list)[i],&temp,sizeof(struct srventry)); } break; } } } /* Run the RFC-2782 weighting algorithm. We don't need very high quality randomness for this, so regular libc srand/rand is sufficient. Fixme: It is a bit questionaly to reinitalize srand - better use a gnupg fucntion for this. */ srand(time(NULL)*getpid()); for (i=0; i < srvcount; i++) { int j; float prio_count=0,chose; for (j=i; j < srvcount && (*list)[i].priority == (*list)[j].priority; j++) { prio_count+=(*list)[j].weight; (*list)[j].run_count=prio_count; } chose=prio_count*rand()/RAND_MAX; for (j=i;j<srvcount && (*list)[i].priority==(*list)[j].priority;j++) { if (chose<=(*list)[j].run_count) { /* Swap j with i */ if(j!=i) { struct srventry temp; memcpy(&temp,&(*list)[j],sizeof(struct srventry)); memcpy(&(*list)[j],&(*list)[i],sizeof(struct srventry)); memcpy(&(*list)[i],&temp,sizeof(struct srventry)); } break; } } } return srvcount; fail: xfree(*list); *list=NULL; return -1; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: fixup_uidnode (KBNODE uidnode, KBNODE signode, u32 keycreated) { PKT_user_id *uid = uidnode->pkt->pkt.user_id; PKT_signature *sig = signode->pkt->pkt.signature; const byte *p, *sym, *hash, *zip; size_t n, nsym, nhash, nzip; sig->flags.chosen_selfsig = 1;/* We chose this one. */ uid->created = 0; /* Not created == invalid. */ if (IS_UID_REV (sig)) { uid->is_revoked = 1; return; /* Has been revoked. */ } else uid->is_revoked = 0; uid->expiredate = sig->expiredate; if (sig->flags.expired) { uid->is_expired = 1; return; /* Has expired. */ } else uid->is_expired = 0; uid->created = sig->timestamp; /* This one is okay. */ uid->selfsigversion = sig->version; /* If we got this far, it's not expired :) */ uid->is_expired = 0; /* Store the key flags in the helper variable for later processing. */ uid->help_key_usage = parse_key_usage (sig); /* Ditto for the key expiration. */ p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_KEY_EXPIRE, NULL); if (p && buffer_to_u32 (p)) uid->help_key_expire = keycreated + buffer_to_u32 (p); else uid->help_key_expire = 0; /* Set the primary user ID flag - we will later wipe out some * of them to only have one in our keyblock. */ uid->is_primary = 0; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_PRIMARY_UID, NULL); if (p && *p) uid->is_primary = 2; /* We could also query this from the unhashed area if it is not in * the hased area and then later try to decide which is the better * there should be no security problem with this. * For now we only look at the hashed one. */ /* Now build the preferences list. These must come from the hashed section so nobody can modify the ciphers a key is willing to accept. */ p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_PREF_SYM, &n); sym = p; nsym = p ? n : 0; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_PREF_HASH, &n); hash = p; nhash = p ? n : 0; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_PREF_COMPR, &n); zip = p; nzip = p ? n : 0; if (uid->prefs) xfree (uid->prefs); n = nsym + nhash + nzip; if (!n) uid->prefs = NULL; else { uid->prefs = xmalloc (sizeof (*uid->prefs) * (n + 1)); n = 0; for (; nsym; nsym--, n++) { uid->prefs[n].type = PREFTYPE_SYM; uid->prefs[n].value = *sym++; } for (; nhash; nhash--, n++) { uid->prefs[n].type = PREFTYPE_HASH; uid->prefs[n].value = *hash++; } for (; nzip; nzip--, n++) { uid->prefs[n].type = PREFTYPE_ZIP; uid->prefs[n].value = *zip++; } uid->prefs[n].type = PREFTYPE_NONE; /* End of list marker */ uid->prefs[n].value = 0; } /* See whether we have the MDC feature. */ uid->flags.mdc = 0; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_FEATURES, &n); if (p && n && (p[0] & 0x01)) uid->flags.mdc = 1; /* And the keyserver modify flag. */ uid->flags.ks_modify = 1; p = parse_sig_subpkt (sig->hashed, SIGSUBPKT_KS_FLAGS, &n); if (p && n && (p[0] & 0x80)) uid->flags.ks_modify = 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: iobuf_temp_with_content (const char *buffer, size_t length) { iobuf_t a; a = iobuf_alloc (3, length); memcpy (a->d.buf, buffer, length); a->d.len = length; return a; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: read_32 (IOBUF inp) { unsigned long a; a = iobuf_get_noeof (inp) << 24; a |= iobuf_get_noeof (inp) << 16; a |= iobuf_get_noeof (inp) << 8; a |= iobuf_get_noeof (inp); return a; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: do_unprotect (const char *passphrase, int pkt_version, int pubkey_algo, int is_protected, const char *curve, gcry_mpi_t *skey, size_t skeysize, int protect_algo, void *protect_iv, size_t protect_ivlen, int s2k_mode, int s2k_algo, byte *s2k_salt, u32 s2k_count, u16 desired_csum, gcry_sexp_t *r_key) { gpg_error_t err; unsigned int npkey, nskey, skeylen; gcry_cipher_hd_t cipher_hd = NULL; u16 actual_csum; size_t nbytes; int i; gcry_mpi_t tmpmpi; *r_key = NULL; err = prepare_unprotect (pubkey_algo, skey, skeysize, s2k_mode, &npkey, &nskey, &skeylen); if (err) return err; /* Check whether SKEY is at all protected. If it is not protected merely verify the checksum. */ if (!is_protected) { actual_csum = 0; for (i=npkey; i < nskey; i++) { if (!skey[i] || gcry_mpi_get_flag (skey[i], GCRYMPI_FLAG_USER1)) return gpg_error (GPG_ERR_BAD_SECKEY); if (gcry_mpi_get_flag (skey[i], GCRYMPI_FLAG_OPAQUE)) { unsigned int nbits; const unsigned char *buffer; buffer = gcry_mpi_get_opaque (skey[i], &nbits); nbytes = (nbits+7)/8; actual_csum += checksum (buffer, nbytes); } else { unsigned char *buffer; err = gcry_mpi_aprint (GCRYMPI_FMT_PGP, &buffer, &nbytes, skey[i]); if (!err) actual_csum += checksum (buffer, nbytes); xfree (buffer); } if (err) return err; } if (actual_csum != desired_csum) return gpg_error (GPG_ERR_CHECKSUM); goto do_convert; } if (gcry_cipher_test_algo (protect_algo)) { /* The algorithm numbers are Libgcrypt numbers but fortunately the OpenPGP algorithm numbers map one-to-one to the Libgcrypt numbers. */ log_info (_("protection algorithm %d (%s) is not supported\n"), protect_algo, gnupg_cipher_algo_name (protect_algo)); return gpg_error (GPG_ERR_CIPHER_ALGO); } if (gcry_md_test_algo (s2k_algo)) { log_info (_("protection hash algorithm %d (%s) is not supported\n"), s2k_algo, gcry_md_algo_name (s2k_algo)); return gpg_error (GPG_ERR_DIGEST_ALGO); } err = gcry_cipher_open (&cipher_hd, protect_algo, GCRY_CIPHER_MODE_CFB, (GCRY_CIPHER_SECURE | (protect_algo >= 100 ? 0 : GCRY_CIPHER_ENABLE_SYNC))); if (err) { log_error ("failed to open cipher_algo %d: %s\n", protect_algo, gpg_strerror (err)); return err; } err = hash_passphrase_and_set_key (passphrase, cipher_hd, protect_algo, s2k_mode, s2k_algo, s2k_salt, s2k_count); if (err) { gcry_cipher_close (cipher_hd); return err; } gcry_cipher_setiv (cipher_hd, protect_iv, protect_ivlen); actual_csum = 0; if (pkt_version >= 4) { int ndata; unsigned int ndatabits; const unsigned char *p; unsigned char *data; u16 csum_pgp7 = 0; if (!gcry_mpi_get_flag (skey[npkey], GCRYMPI_FLAG_OPAQUE )) { gcry_cipher_close (cipher_hd); return gpg_error (GPG_ERR_BAD_SECKEY); } p = gcry_mpi_get_opaque (skey[npkey], &ndatabits); ndata = (ndatabits+7)/8; if (ndata > 1) csum_pgp7 = p[ndata-2] << 8 | p[ndata-1]; data = xtrymalloc_secure (ndata); if (!data) { err = gpg_error_from_syserror (); gcry_cipher_close (cipher_hd); return err; } gcry_cipher_decrypt (cipher_hd, data, ndata, p, ndata); p = data; if (is_protected == 2) { /* This is the new SHA1 checksum method to detect tampering with the key as used by the Klima/Rosa attack. */ desired_csum = 0; actual_csum = 1; /* Default to bad checksum. */ if (ndata < 20) log_error ("not enough bytes for SHA-1 checksum\n"); else { gcry_md_hd_t h; if (gcry_md_open (&h, GCRY_MD_SHA1, 1)) BUG(); /* Algo not available. */ gcry_md_write (h, data, ndata - 20); gcry_md_final (h); if (!memcmp (gcry_md_read (h, GCRY_MD_SHA1), data+ndata-20, 20)) actual_csum = 0; /* Digest does match. */ gcry_md_close (h); } } else { /* Old 16 bit checksum method. */ if (ndata < 2) { log_error ("not enough bytes for checksum\n"); desired_csum = 0; actual_csum = 1; /* Mark checksum bad. */ } else { desired_csum = (data[ndata-2] << 8 | data[ndata-1]); actual_csum = checksum (data, ndata-2); if (desired_csum != actual_csum) { /* This is a PGP 7.0.0 workaround */ desired_csum = csum_pgp7; /* Take the encrypted one. */ } } } /* Better check it here. Otherwise the gcry_mpi_scan would fail because the length may have an arbitrary value. */ if (desired_csum == actual_csum) { for (i=npkey; i < nskey; i++ ) { if (gcry_mpi_scan (&tmpmpi, GCRYMPI_FMT_PGP, p, ndata, &nbytes)) { /* Checksum was okay, but not correctly decrypted. */ desired_csum = 0; actual_csum = 1; /* Mark checksum bad. */ break; } gcry_mpi_release (skey[i]); skey[i] = tmpmpi; ndata -= nbytes; p += nbytes; } skey[i] = NULL; skeylen = i; assert (skeylen <= skeysize); /* Note: at this point NDATA should be 2 for a simple checksum or 20 for the sha1 digest. */ } xfree(data); } else /* Packet version <= 3. */ { unsigned char *buffer; for (i = npkey; i < nskey; i++) { const unsigned char *p; size_t ndata; unsigned int ndatabits; if (!skey[i] || !gcry_mpi_get_flag (skey[i], GCRYMPI_FLAG_OPAQUE)) { gcry_cipher_close (cipher_hd); return gpg_error (GPG_ERR_BAD_SECKEY); } p = gcry_mpi_get_opaque (skey[i], &ndatabits); ndata = (ndatabits+7)/8; if (!(ndata >= 2) || !(ndata == ((p[0] << 8 | p[1]) + 7)/8 + 2)) { gcry_cipher_close (cipher_hd); return gpg_error (GPG_ERR_BAD_SECKEY); } buffer = xtrymalloc_secure (ndata); if (!buffer) { err = gpg_error_from_syserror (); gcry_cipher_close (cipher_hd); return err; } gcry_cipher_sync (cipher_hd); buffer[0] = p[0]; buffer[1] = p[1]; gcry_cipher_decrypt (cipher_hd, buffer+2, ndata-2, p+2, ndata-2); actual_csum += checksum (buffer, ndata); err = gcry_mpi_scan (&tmpmpi, GCRYMPI_FMT_PGP, buffer, ndata, &ndata); xfree (buffer); if (err) { /* Checksum was okay, but not correctly decrypted. */ desired_csum = 0; actual_csum = 1; /* Mark checksum bad. */ break; } gcry_mpi_release (skey[i]); skey[i] = tmpmpi; } } gcry_cipher_close (cipher_hd); /* Now let's see whether we have used the correct passphrase. */ if (actual_csum != desired_csum) return gpg_error (GPG_ERR_BAD_PASSPHRASE); do_convert: if (nskey != skeylen) err = gpg_error (GPG_ERR_BAD_SECKEY); else err = convert_secret_key (r_key, pubkey_algo, skey, curve); if (err) return err; /* The checksum may fail, thus we also check the key itself. */ err = gcry_pk_testkey (*r_key); if (err) { gcry_sexp_release (*r_key); *r_key = NULL; return gpg_error (GPG_ERR_BAD_PASSPHRASE); } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: next_packet (unsigned char const **bufptr, size_t *buflen, unsigned char const **r_data, size_t *r_datalen, int *r_pkttype, size_t *r_ntotal) { const unsigned char *buf = *bufptr; size_t len = *buflen; int c, ctb, pkttype; unsigned long pktlen; if (!len) return gpg_error (GPG_ERR_NO_DATA); ctb = *buf++; len--; if ( !(ctb & 0x80) ) return gpg_error (GPG_ERR_INV_PACKET); /* Invalid CTB. */ pktlen = 0; if ((ctb & 0x40)) /* New style (OpenPGP) CTB. */ { pkttype = (ctb & 0x3f); if (!len) return gpg_error (GPG_ERR_INV_PACKET); /* No 1st length byte. */ c = *buf++; len--; if (pkttype == PKT_COMPRESSED) return gpg_error (GPG_ERR_UNEXPECTED); /* ... packet in a keyblock. */ if ( c < 192 ) pktlen = c; else if ( c < 224 ) { pktlen = (c - 192) * 256; if (!len) return gpg_error (GPG_ERR_INV_PACKET); /* No 2nd length byte. */ c = *buf++; len--; pktlen += c + 192; } else if (c == 255) { if (len <4 ) return gpg_error (GPG_ERR_INV_PACKET); /* No length bytes. */ pktlen = (*buf++) << 24; pktlen |= (*buf++) << 16; pktlen |= (*buf++) << 8; pktlen |= (*buf++); len -= 4; } else /* Partial length encoding is not allowed for key packets. */ return gpg_error (GPG_ERR_UNEXPECTED); } else /* Old style CTB. */ { int lenbytes; pktlen = 0; pkttype = (ctb>>2)&0xf; lenbytes = ((ctb&3)==3)? 0 : (1<<(ctb & 3)); if (!lenbytes) /* Not allowed in key packets. */ return gpg_error (GPG_ERR_UNEXPECTED); if (len < lenbytes) return gpg_error (GPG_ERR_INV_PACKET); /* Not enough length bytes. */ for (; lenbytes; lenbytes--) { pktlen <<= 8; pktlen |= *buf++; len--; } } /* Do some basic sanity check. */ switch (pkttype) { case PKT_SIGNATURE: case PKT_SECRET_KEY: case PKT_PUBLIC_KEY: case PKT_SECRET_SUBKEY: case PKT_MARKER: case PKT_RING_TRUST: case PKT_USER_ID: case PKT_PUBLIC_SUBKEY: case PKT_OLD_COMMENT: case PKT_ATTRIBUTE: case PKT_COMMENT: case PKT_GPG_CONTROL: break; /* Okay these are allowed packets. */ default: return gpg_error (GPG_ERR_UNEXPECTED); } if (pktlen == (unsigned long)(-1)) return gpg_error (GPG_ERR_INV_PACKET); if (pktlen > len) return gpg_error (GPG_ERR_INV_PACKET); /* Packet length header too long. */ *r_data = buf; *r_datalen = pktlen; *r_pkttype = pkttype; *r_ntotal = (buf - *bufptr) + pktlen; *bufptr = buf + pktlen; *buflen = len - pktlen; if (!*buflen) *bufptr = NULL; return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: fetch_next_cert_ldap (cert_fetch_context_t context, unsigned char **value, size_t *valuelen) { gpg_error_t err; unsigned char hdr[5]; char *p, *pend; int n; int okay = 0; /* int is_cms = 0; */ *value = NULL; *valuelen = 0; err = 0; while (!err) { err = read_buffer (context->reader, hdr, 5); if (err) break; n = (hdr[1] << 24)|(hdr[2]<<16)|(hdr[3]<<8)|hdr[4]; if (*hdr == 'V' && okay) { #if 0 /* That code is not yet ready. */ if (is_cms) { /* The certificate needs to be parsed from CMS data. */ ksba_cms_t cms; ksba_stop_reason_t stopreason; int i; err = ksba_cms_new (&cms); if (err) goto leave; err = ksba_cms_set_reader_writer (cms, context->reader, NULL); if (err) { log_error ("ksba_cms_set_reader_writer failed: %s\n", gpg_strerror (err)); goto leave; } do { err = ksba_cms_parse (cms, &stopreason); if (err) { log_error ("ksba_cms_parse failed: %s\n", gpg_strerror (err)); goto leave; } if (stopreason == KSBA_SR_BEGIN_DATA) log_error ("userSMIMECertificate is not " "a certs-only message\n"); } while (stopreason != KSBA_SR_READY); for (i=0; (cert=ksba_cms_get_cert (cms, i)); i++) { check_and_store (ctrl, stats, cert, 0); ksba_cert_release (cert); cert = NULL; } if (!i) log_error ("no certificate found\n"); else any = 1; } else #endif { *value = xtrymalloc (n); if (!*value) return gpg_error_from_errno (errno); *valuelen = n; err = read_buffer (context->reader, *value, n); break; /* Ready or error. */ } } else if (!n && *hdr == 'A') okay = 0; else if (n) { if (n > context->tmpbufsize) { xfree (context->tmpbuf); context->tmpbufsize = 0; context->tmpbuf = xtrymalloc (n+1); if (!context->tmpbuf) return gpg_error_from_errno (errno); context->tmpbufsize = n; } err = read_buffer (context->reader, context->tmpbuf, n); if (err) break; if (*hdr == 'A') { p = context->tmpbuf; p[n] = 0; /*(we allocated one extra byte for this.)*/ /* fixme: is_cms = 0; */ if ( (pend = strchr (p, ';')) ) *pend = 0; /* Strip off the extension. */ if (!ascii_strcasecmp (p, USERCERTIFICATE)) { if (DBG_LOOKUP) log_debug ("fetch_next_cert_ldap: got attribute '%s'\n", USERCERTIFICATE); okay = 1; } else if (!ascii_strcasecmp (p, CACERTIFICATE)) { if (DBG_LOOKUP) log_debug ("fetch_next_cert_ldap: got attribute '%s'\n", CACERTIFICATE); okay = 1; } else if (!ascii_strcasecmp (p, X509CACERT)) { if (DBG_LOOKUP) log_debug ("fetch_next_cert_ldap: got attribute '%s'\n", CACERTIFICATE); okay = 1; } /* else if (!ascii_strcasecmp (p, USERSMIMECERTIFICATE)) */ /* { */ /* if (DBG_LOOKUP) */ /* log_debug ("fetch_next_cert_ldap: got attribute '%s'\n", */ /* USERSMIMECERTIFICATE); */ /* okay = 1; */ /* is_cms = 1; */ /* } */ else { if (DBG_LOOKUP) log_debug ("fetch_next_cert_ldap: got attribute '%s'" " - ignored\n", p); okay = 0; } } else if (*hdr == 'E') { p = context->tmpbuf; p[n] = 0; /*(we allocated one extra byte for this.)*/ if (!strcmp (p, "truncated")) { context->truncated = 1; log_info (_("ldap_search hit the size limit of" " the server\n")); } } } } if (err) { xfree (*value); *value = NULL; *valuelen = 0; if (gpg_err_code (err) == GPG_ERR_EOF && context->truncated) { context->truncated = 0; /* So that the next call would return EOF. */ err = gpg_error (GPG_ERR_TRUNCATED); } } return err; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: keystr_from_desc(KEYDB_SEARCH_DESC *desc) { switch(desc->mode) { case KEYDB_SEARCH_MODE_LONG_KID: case KEYDB_SEARCH_MODE_SHORT_KID: return keystr(desc->u.kid); case KEYDB_SEARCH_MODE_FPR20: { u32 keyid[2]; keyid[0] = ((unsigned char)desc->u.fpr[12] << 24 | (unsigned char)desc->u.fpr[13] << 16 | (unsigned char)desc->u.fpr[14] << 8 | (unsigned char)desc->u.fpr[15]); keyid[1] = ((unsigned char)desc->u.fpr[16] << 24 | (unsigned char)desc->u.fpr[17] << 16 | (unsigned char)desc->u.fpr[18] << 8 | (unsigned char)desc->u.fpr[19]); return keystr(keyid); } case KEYDB_SEARCH_MODE_FPR16: return "?v3 fpr?"; default: BUG(); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: fingerprint_from_pk (PKT_public_key *pk, byte *array, size_t *ret_len) { const byte *dp; size_t len; gcry_md_hd_t md; md = do_fingerprint_md(pk); dp = gcry_md_read( md, 0 ); len = gcry_md_get_algo_dlen (gcry_md_get_algo (md)); assert( len <= MAX_FINGERPRINT_LEN ); if (!array) array = xmalloc ( len ); memcpy (array, dp, len ); pk->keyid[0] = dp[12] << 24 | dp[13] << 16 | dp[14] << 8 | dp[15] ; pk->keyid[1] = dp[16] << 24 | dp[17] << 16 | dp[18] << 8 | dp[19] ; gcry_md_close( md); if (ret_len) *ret_len = len; return array; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: get_pka_info (const char *address, unsigned char *fpr) { #ifdef USE_ADNS int rc; adns_state state; const char *domain; char *name; adns_answer *answer = NULL; char *buffer = NULL; domain = strrchr (address, '@'); if (!domain || domain == address || !domain[1]) return NULL; /* Invalid mail address given. */ name = xtrymalloc (strlen (address) + 5 + 1); if (!name) return NULL; memcpy (name, address, domain - address); strcpy (stpcpy (name + (domain-address), "._pka."), domain+1); rc = adns_init (&state, adns_if_noerrprint, NULL); if (rc) { log_error ("error initializing adns: %s\n", strerror (errno)); xfree (name); return NULL; } rc = adns_synchronous (state, name, adns_r_txt, adns_qf_quoteok_query, &answer); xfree (name); if (rc) { log_error ("DNS query failed: %s\n", strerror (errno)); adns_finish (state); return NULL; } if (answer->status != adns_s_ok || answer->type != adns_r_txt || !answer->nrrs) { log_error ("DNS query returned an error: %s (%s)\n", adns_strerror (answer->status), adns_errabbrev (answer->status)); adns_free (answer); adns_finish (state); return NULL; } /* We use a PKA records iff there is exactly one record. */ if (answer->nrrs == 1 && answer->rrs.manyistr[0]->i != -1) { buffer = xtrystrdup (answer->rrs.manyistr[0]->str); if (parse_txt_record (buffer, fpr)) { xfree (buffer); buffer = NULL; /* Not a valid gpg trustdns RR. */ } } adns_free (answer); adns_finish (state); return buffer; #else /*!USE_ADNS*/ unsigned char answer[PACKETSZ]; int anslen; int qdcount, ancount; int rc; unsigned char *p, *pend; const char *domain; char *name; HEADER header; domain = strrchr (address, '@'); if (!domain || domain == address || !domain[1]) return NULL; /* invalid mail address given. */ name = xtrymalloc (strlen (address) + 5 + 1); if (!name) return NULL; memcpy (name, address, domain - address); strcpy (stpcpy (name + (domain-address), "._pka."), domain+1); anslen = res_query (name, C_IN, T_TXT, answer, PACKETSZ); xfree (name); if (anslen < sizeof(HEADER)) return NULL; /* DNS resolver returned a too short answer. */ /* Don't despair: A good compiler should optimize this away, as header is just 32 byte and constant at compile time. It's one way to comply with strict aliasing rules. */ memcpy (&header, answer, sizeof (header)); if ( (rc=header.rcode) != NOERROR ) return NULL; /* DNS resolver returned an error. */ /* We assume that PACKETSZ is large enough and don't do dynmically expansion of the buffer. */ if (anslen > PACKETSZ) return NULL; /* DNS resolver returned a too long answer */ qdcount = ntohs (header.qdcount); ancount = ntohs (header.ancount); if (!ancount) return NULL; /* Got no answer. */ p = answer + sizeof (HEADER); pend = answer + anslen; /* Actually points directly behind the buffer. */ while (qdcount-- && p < pend) { rc = dn_skipname (p, pend); if (rc == -1) return NULL; p += rc + QFIXEDSZ; } if (ancount > 1) return NULL; /* more than one possible gpg trustdns record - none used. */ while (ancount-- && p <= pend) { unsigned int type, class, txtlen, n; char *buffer, *bufp; rc = dn_skipname (p, pend); if (rc == -1) return NULL; p += rc; if (p >= pend - 10) return NULL; /* RR too short. */ type = *p++ << 8; type |= *p++; class = *p++ << 8; class |= *p++; p += 4; txtlen = *p++ << 8; txtlen |= *p++; if (type != T_TXT || class != C_IN) return NULL; /* Answer does not match the query. */ buffer = bufp = xmalloc (txtlen + 1); while (txtlen && p < pend) { for (n = *p++, txtlen--; txtlen && n && p < pend; txtlen--, n--) *bufp++ = *p++; } *bufp = 0; if (parse_txt_record (buffer, fpr)) { xfree (buffer); return NULL; /* Not a valid gpg trustdns RR. */ } return buffer; } return NULL; #endif /*!USE_ADNS*/ } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Use inline functions to convert buffer data to scalars. * common/host2net.h (buf16_to_ulong, buf16_to_uint): New. (buf16_to_ushort, buf16_to_u16): New. (buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New. -- Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to avoid all sign extension on shift problems. Hanno Böck found a case with an invalid read due to this problem. To fix that once and for all almost all uses of "<< 24" and "<< 8" are changed by this patch to use an inline function from host2net.h. Signed-off-by: Werner Koch <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int main(argc, argv) int argc; char *argv[]; { krb5_data pname_data, tkt_data; int sock = 0; socklen_t l; int retval; struct sockaddr_in l_inaddr, f_inaddr; /* local, foreign address */ krb5_creds creds, *new_creds; krb5_ccache cc; krb5_data msgtext, msg; krb5_context context; krb5_auth_context auth_context = NULL; #ifndef DEBUG freopen("/tmp/uu-server.log", "w", stderr); #endif retval = krb5_init_context(&context); if (retval) { com_err(argv[0], retval, "while initializing krb5"); exit(1); } #ifdef DEBUG { int one = 1; int acc; struct servent *sp; socklen_t namelen = sizeof(f_inaddr); if ((sock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { com_err("uu-server", errno, "creating socket"); exit(3); } l_inaddr.sin_family = AF_INET; l_inaddr.sin_addr.s_addr = 0; if (argc == 2) { l_inaddr.sin_port = htons(atoi(argv[1])); } else { if (!(sp = getservbyname("uu-sample", "tcp"))) { com_err("uu-server", 0, "can't find uu-sample/tcp service"); exit(3); } l_inaddr.sin_port = sp->s_port; } (void) setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&one, sizeof (one)); if (bind(sock, (struct sockaddr *)&l_inaddr, sizeof(l_inaddr))) { com_err("uu-server", errno, "binding socket"); exit(3); } if (listen(sock, 1) == -1) { com_err("uu-server", errno, "listening"); exit(3); } printf("Server started\n"); fflush(stdout); if ((acc = accept(sock, (struct sockaddr *)&f_inaddr, &namelen)) == -1) { com_err("uu-server", errno, "accepting"); exit(3); } dup2(acc, 0); close(sock); sock = 0; } #endif retval = krb5_read_message(context, (krb5_pointer) &sock, &pname_data); if (retval) { com_err ("uu-server", retval, "reading pname"); return 2; } retval = krb5_read_message(context, (krb5_pointer) &sock, &tkt_data); if (retval) { com_err ("uu-server", retval, "reading ticket data"); return 2; } retval = krb5_cc_default(context, &cc); if (retval) { com_err("uu-server", retval, "getting credentials cache"); return 4; } memset (&creds, 0, sizeof(creds)); retval = krb5_cc_get_principal(context, cc, &creds.client); if (retval) { com_err("uu-client", retval, "getting principal name"); return 6; } /* client sends it already null-terminated. */ printf ("uu-server: client principal is \"%s\".\n", pname_data.data); retval = krb5_parse_name(context, pname_data.data, &creds.server); if (retval) { com_err("uu-server", retval, "parsing client name"); return 3; } creds.second_ticket = tkt_data; printf ("uu-server: client ticket is %d bytes.\n", creds.second_ticket.length); retval = krb5_get_credentials(context, KRB5_GC_USER_USER, cc, &creds, &new_creds); if (retval) { com_err("uu-server", retval, "getting user-user ticket"); return 5; } #ifndef DEBUG l = sizeof(f_inaddr); if (getpeername(0, (struct sockaddr *)&f_inaddr, &l) == -1) { com_err("uu-server", errno, "getting client address"); return 6; } #endif l = sizeof(l_inaddr); if (getsockname(0, (struct sockaddr *)&l_inaddr, &l) == -1) { com_err("uu-server", errno, "getting local address"); return 6; } /* send a ticket/authenticator to the other side, so it can get the key we're using for the krb_safe below. */ retval = krb5_auth_con_init(context, &auth_context); if (retval) { com_err("uu-server", retval, "making auth_context"); return 8; } retval = krb5_auth_con_setflags(context, auth_context, KRB5_AUTH_CONTEXT_DO_SEQUENCE); if (retval) { com_err("uu-server", retval, "initializing the auth_context flags"); return 8; } retval = krb5_auth_con_genaddrs(context, auth_context, sock, KRB5_AUTH_CONTEXT_GENERATE_LOCAL_FULL_ADDR | KRB5_AUTH_CONTEXT_GENERATE_REMOTE_FULL_ADDR); if (retval) { com_err("uu-server", retval, "generating addrs for auth_context"); return 9; } #if 1 retval = krb5_mk_req_extended(context, &auth_context, AP_OPTS_USE_SESSION_KEY, NULL, new_creds, &msg); if (retval) { com_err("uu-server", retval, "making AP_REQ"); return 8; } retval = krb5_write_message(context, (krb5_pointer) &sock, &msg); #else retval = krb5_sendauth(context, &auth_context, (krb5_pointer)&sock, "???", 0, 0, AP_OPTS_MUTUAL_REQUIRED | AP_OPTS_USE_SESSION_KEY, NULL, &creds, cc, NULL, NULL, NULL); #endif if (retval) goto cl_short_wrt; free(msg.data); msgtext.length = 32; msgtext.data = "Hello, other end of connection."; retval = krb5_mk_safe(context, auth_context, &msgtext, &msg, NULL); if (retval) { com_err("uu-server", retval, "encoding message to client"); return 6; } retval = krb5_write_message(context, (krb5_pointer) &sock, &msg); if (retval) { cl_short_wrt: com_err("uu-server", retval, "writing message to client"); return 7; } krb5_free_data_contents(context, &msg); krb5_free_data_contents(context, &pname_data); /* tkt_data freed with creds */ krb5_free_cred_contents(context, &creds); krb5_free_creds(context, new_creds); krb5_cc_close(context, cc); krb5_auth_con_free(context, auth_context); krb5_free_context(context); return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703'], 'message': 'Fix krb5_read_message handling [CVE-2014-5355] In recvauth_common, do not use strcmp against the data fields of krb5_data objects populated by krb5_read_message(), as there is no guarantee that they are C strings. Instead, create an expected krb5_data value and use data_eq(). In the sample user-to-user server application, check that the received client principal name is null-terminated before using it with printf and krb5_parse_name. CVE-2014-5355: In MIT krb5, when a server process uses the krb5_recvauth function, an unauthenticated remote attacker can cause a NULL dereference by sending a zero-byte version string, or a read beyond the end of allocated storage by sending a non-null-terminated version string. The example user-to-user server application (uuserver) is similarly vulnerable to a zero-length or non-null-terminated principal name string. The krb5_recvauth function reads two version strings from the client using krb5_read_message(), which produces a krb5_data structure containing a length and a pointer to an octet sequence. krb5_recvauth assumes that the data pointer is a valid C string and passes it to strcmp() to verify the versions. If the client sends an empty octet sequence, the data pointer will be NULL and strcmp() will dereference a NULL pointer, causing the process to crash. If the client sends a non-null-terminated octet sequence, strcmp() will read beyond the end of the allocated storage, possibly causing the process to crash. uuserver similarly uses krb5_read_message() to read a client principal name, and then passes it to printf() and krb5_parse_name() without verifying that it is a valid C string. The krb5_recvauth function is used by kpropd and the Kerberized versions of the BSD rlogin and rsh daemons. These daemons are usually run out of inetd or in a mode which forks before processing incoming connections, so a process crash will generally not result in a complete denial of service. Thanks to Tim Uglow for discovering this issue. CVSSv2: AV:N/AC:L/Au:N/C:N/I:N/A:P/E:POC/RL:OF/RC:C [[email protected]: CVSS score] ticket: 8050 (new) target_version: 1.13.1 tags: pullup'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: errcode_t ext2fs_flush2(ext2_filsys fs, int flags) { dgrp_t i; errcode_t retval; unsigned long fs_state; __u32 feature_incompat; struct ext2_super_block *super_shadow = 0; struct ext2_group_desc *group_shadow = 0; #ifdef WORDS_BIGENDIAN struct ext2_group_desc *gdp; dgrp_t j; #endif char *group_ptr; int old_desc_blocks; struct ext2fs_numeric_progress_struct progress; EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS); fs_state = fs->super->s_state; feature_incompat = fs->super->s_feature_incompat; fs->super->s_wtime = fs->now ? fs->now : time(NULL); fs->super->s_block_group_nr = 0; #ifdef WORDS_BIGENDIAN retval = EXT2_ET_NO_MEMORY; retval = ext2fs_get_mem(SUPERBLOCK_SIZE, &super_shadow); if (retval) goto errout; retval = ext2fs_get_array(fs->desc_blocks, fs->blocksize, &group_shadow); if (retval) goto errout; memcpy(group_shadow, fs->group_desc, (size_t) fs->blocksize * fs->desc_blocks); /* swap the group descriptors */ for (j = 0; j < fs->group_desc_count; j++) { gdp = ext2fs_group_desc(fs, group_shadow, j); ext2fs_swap_group_desc2(fs, gdp); } #else super_shadow = fs->super; group_shadow = ext2fs_group_desc(fs, fs->group_desc, 0); #endif /* * Set the state of the FS to be non-valid. (The state has * already been backed up earlier, and will be restored after * we write out the backup superblocks.) */ fs->super->s_state &= ~EXT2_VALID_FS; fs->super->s_feature_incompat &= ~EXT3_FEATURE_INCOMPAT_RECOVER; #ifdef WORDS_BIGENDIAN *super_shadow = *fs->super; ext2fs_swap_super(super_shadow); #endif /* * If this is an external journal device, don't write out the * block group descriptors or any of the backup superblocks */ if (fs->super->s_feature_incompat & EXT3_FEATURE_INCOMPAT_JOURNAL_DEV) goto write_primary_superblock_only; /* * Write out the master group descriptors, and the backup * superblocks and group descriptors. */ group_ptr = (char *) group_shadow; if (fs->super->s_feature_incompat & EXT2_FEATURE_INCOMPAT_META_BG) { old_desc_blocks = fs->super->s_first_meta_bg; if (old_desc_blocks > fs->super->s_first_meta_bg) old_desc_blocks = fs->desc_blocks; } else old_desc_blocks = fs->desc_blocks; ext2fs_numeric_progress_init(fs, &progress, NULL, fs->group_desc_count); for (i = 0; i < fs->group_desc_count; i++) { blk64_t super_blk, old_desc_blk, new_desc_blk; ext2fs_numeric_progress_update(fs, &progress, i); ext2fs_super_and_bgd_loc2(fs, i, &super_blk, &old_desc_blk, &new_desc_blk, 0); if (!(fs->flags & EXT2_FLAG_MASTER_SB_ONLY) &&i && super_blk) { retval = write_backup_super(fs, i, super_blk, super_shadow); if (retval) goto errout; } if (fs->flags & EXT2_FLAG_SUPER_ONLY) continue; if ((old_desc_blk) && (!(fs->flags & EXT2_FLAG_MASTER_SB_ONLY) || (i == 0))) { retval = io_channel_write_blk64(fs->io, old_desc_blk, old_desc_blocks, group_ptr); if (retval) goto errout; } if (new_desc_blk) { int meta_bg = i / EXT2_DESC_PER_BLOCK(fs->super); retval = io_channel_write_blk64(fs->io, new_desc_blk, 1, group_ptr + (meta_bg*fs->blocksize)); if (retval) goto errout; } } ext2fs_numeric_progress_close(fs, &progress, NULL); /* * If the write_bitmaps() function is present, call it to * flush the bitmaps. This is done this way so that a simple * program that doesn't mess with the bitmaps doesn't need to * drag in the bitmaps.c code. */ if (fs->write_bitmaps) { retval = fs->write_bitmaps(fs); if (retval) goto errout; } write_primary_superblock_only: /* * Write out master superblock. This has to be done * separately, since it is located at a fixed location * (SUPERBLOCK_OFFSET). We flush all other pending changes * out to disk first, just to avoid a race condition with an * insy-tinsy window.... */ fs->super->s_block_group_nr = 0; fs->super->s_state = fs_state; fs->super->s_feature_incompat = feature_incompat; #ifdef WORDS_BIGENDIAN *super_shadow = *fs->super; ext2fs_swap_super(super_shadow); #endif if (!(flags & EXT2_FLAG_FLUSH_NO_SYNC)) retval = io_channel_flush(fs->io); retval = write_primary_superblock(fs, super_shadow); if (retval) goto errout; fs->flags &= ~EXT2_FLAG_DIRTY; if (!(flags & EXT2_FLAG_FLUSH_NO_SYNC)) retval = io_channel_flush(fs->io); errout: fs->super->s_state = fs_state; #ifdef WORDS_BIGENDIAN if (super_shadow) ext2fs_free_mem(&super_shadow); if (group_shadow) ext2fs_free_mem(&group_shadow); #endif return retval; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'libext2fs: fix potential buffer overflow in closefs() The bug fix in f66e6ce4446: "libext2fs: avoid buffer overflow if s_first_meta_bg is too big" had a typo in the fix for ext2fs_closefs(). In practice most of the security exposure was from the openfs path, since this meant if there was a carefully crafted file system, buffer overrun would be triggered when the file system was opened. However, if corrupted file system didn't trip over some corruption check, and then the file system was modified via tune2fs or debugfs, such that the superblock was marked dirty and then written out via the closefs() path, it's possible that the buffer overrun could be triggered when the file system is closed. Also clear up a signed vs unsigned warning while we're at it. Thanks to Nick Kralevich <[email protected]> for asking me to look at compiler warning in the code in question, which led me to notice the bug in f66e6ce4446. Addresses: CVE-2015-1572 Signed-off-by: Theodore Ts'o <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void uv__process_child_init(const uv_process_options_t* options, int stdio_count, int (*pipes)[2], int error_fd) { int close_fd; int use_fd; int fd; if (options->flags & UV_PROCESS_DETACHED) setsid(); for (fd = 0; fd < stdio_count; fd++) { close_fd = pipes[fd][0]; use_fd = pipes[fd][1]; if (use_fd < 0) { if (fd >= 3) continue; else { /* redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is * set */ use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR); close_fd = use_fd; if (use_fd == -1) { uv__write_int(error_fd, -errno); perror("failed to open stdio"); _exit(127); } } } if (fd == use_fd) uv__cloexec(use_fd, 0); else dup2(use_fd, fd); if (fd <= 2) uv__nonblock(fd, 0); if (close_fd != -1) uv__close(close_fd); } for (fd = 0; fd < stdio_count; fd++) { use_fd = pipes[fd][1]; if (use_fd >= 0 && fd != use_fd) close(use_fd); } if (options->cwd != NULL && chdir(options->cwd)) { uv__write_int(error_fd, -errno); perror("chdir()"); _exit(127); } if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid)) { uv__write_int(error_fd, -errno); perror("setgid()"); _exit(127); } if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid)) { uv__write_int(error_fd, -errno); perror("setuid()"); _exit(127); } if (options->env != NULL) { environ = options->env; } execvp(options->file, options->args); uv__write_int(error_fd, -errno); perror("execvp()"); _exit(127); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-273', 'CWE-284', 'CWE-264'], 'message': 'unix: call setgoups before calling setuid/setgid Partial fix for #1093'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xfs_attr_rmtval_set( struct xfs_da_args *args) { struct xfs_inode *dp = args->dp; struct xfs_mount *mp = dp->i_mount; struct xfs_bmbt_irec map; xfs_dablk_t lblkno; xfs_fileoff_t lfileoff = 0; __uint8_t *src = args->value; int blkcnt; int valuelen; int nmap; int error; int offset = 0; trace_xfs_attr_rmtval_set(args); /* * Find a "hole" in the attribute address space large enough for * us to drop the new attribute's value into. Because CRC enable * attributes have headers, we can't just do a straight byte to FSB * conversion and have to take the header space into account. */ blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen); error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff, XFS_ATTR_FORK); if (error) return error; args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff; args->rmtblkcnt = blkcnt; /* * Roll through the "value", allocating blocks on disk as required. */ while (blkcnt > 0) { int committed; /* * Allocate a single extent, up to the size of the value. */ xfs_bmap_init(args->flist, args->firstblock); nmap = 1; error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, blkcnt, XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, args->firstblock, args->total, &map, &nmap, args->flist); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); return(error); } /* * bmap_finish() may have committed the last trans and started * a new one. We need the inode to be in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); ASSERT(nmap == 1); ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK)); lblkno += map.br_blockcount; blkcnt -= map.br_blockcount; /* * Start the next trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) return (error); } /* * Roll through the "value", copying the attribute value to the * already-allocated blocks. Blocks are written synchronously * so that we can know they are all on disk before we turn off * the INCOMPLETE flag. */ lblkno = args->rmtblkno; blkcnt = args->rmtblkcnt; valuelen = args->valuelen; while (valuelen > 0) { struct xfs_buf *bp; xfs_daddr_t dblkno; int dblkcnt; ASSERT(blkcnt > 0); xfs_bmap_init(args->flist, args->firstblock); nmap = 1; error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno, blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK); if (error) return(error); ASSERT(nmap == 1); ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK)); dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0); if (!bp) return ENOMEM; bp->b_ops = &xfs_attr3_rmt_buf_ops; xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset, &valuelen, &src); error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */ xfs_buf_relse(bp); if (error) return error; /* roll attribute extent map forwards */ lblkno += map.br_blockcount; blkcnt -= map.br_blockcount; } ASSERT(valuelen == 0); return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-241', 'CWE-19'], 'message': 'xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <[email protected]> Reviewed-by: Brian Foster <[email protected]> Signed-off-by: Dave Chinner <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xfs_attr3_leaf_lookup_int( struct xfs_buf *bp, struct xfs_da_args *args) { struct xfs_attr_leafblock *leaf; struct xfs_attr3_icleaf_hdr ichdr; struct xfs_attr_leaf_entry *entry; struct xfs_attr_leaf_entry *entries; struct xfs_attr_leaf_name_local *name_loc; struct xfs_attr_leaf_name_remote *name_rmt; xfs_dahash_t hashval; int probe; int span; trace_xfs_attr_leaf_lookup(args); leaf = bp->b_addr; xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); entries = xfs_attr3_leaf_entryp(leaf); ASSERT(ichdr.count < XFS_LBSIZE(args->dp->i_mount) / 8); /* * Binary search. (note: small blocks will skip this loop) */ hashval = args->hashval; probe = span = ichdr.count / 2; for (entry = &entries[probe]; span > 4; entry = &entries[probe]) { span /= 2; if (be32_to_cpu(entry->hashval) < hashval) probe += span; else if (be32_to_cpu(entry->hashval) > hashval) probe -= span; else break; } ASSERT(probe >= 0 && (!ichdr.count || probe < ichdr.count)); ASSERT(span <= 4 || be32_to_cpu(entry->hashval) == hashval); /* * Since we may have duplicate hashval's, find the first matching * hashval in the leaf. */ while (probe > 0 && be32_to_cpu(entry->hashval) >= hashval) { entry--; probe--; } while (probe < ichdr.count && be32_to_cpu(entry->hashval) < hashval) { entry++; probe++; } if (probe == ichdr.count || be32_to_cpu(entry->hashval) != hashval) { args->index = probe; return XFS_ERROR(ENOATTR); } /* * Duplicate keys may be present, so search all of them for a match. */ for (; probe < ichdr.count && (be32_to_cpu(entry->hashval) == hashval); entry++, probe++) { /* * GROT: Add code to remove incomplete entries. */ /* * If we are looking for INCOMPLETE entries, show only those. * If we are looking for complete entries, show only those. */ if ((args->flags & XFS_ATTR_INCOMPLETE) != (entry->flags & XFS_ATTR_INCOMPLETE)) { continue; } if (entry->flags & XFS_ATTR_LOCAL) { name_loc = xfs_attr3_leaf_name_local(leaf, probe); if (name_loc->namelen != args->namelen) continue; if (memcmp(args->name, name_loc->nameval, args->namelen) != 0) continue; if (!xfs_attr_namesp_match(args->flags, entry->flags)) continue; args->index = probe; return XFS_ERROR(EEXIST); } else { name_rmt = xfs_attr3_leaf_name_remote(leaf, probe); if (name_rmt->namelen != args->namelen) continue; if (memcmp(args->name, name_rmt->name, args->namelen) != 0) continue; if (!xfs_attr_namesp_match(args->flags, entry->flags)) continue; args->index = probe; args->valuelen = be32_to_cpu(name_rmt->valuelen); args->rmtblkno = be32_to_cpu(name_rmt->valueblk); args->rmtblkcnt = xfs_attr3_rmt_blocks( args->dp->i_mount, args->valuelen); return XFS_ERROR(EEXIST); } } args->index = probe; return XFS_ERROR(ENOATTR); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-241', 'CWE-19'], 'message': 'xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <[email protected]> Reviewed-by: Brian Foster <[email protected]> Signed-off-by: Dave Chinner <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xfs_attr3_leaf_add_work( struct xfs_buf *bp, struct xfs_attr3_icleaf_hdr *ichdr, struct xfs_da_args *args, int mapindex) { struct xfs_attr_leafblock *leaf; struct xfs_attr_leaf_entry *entry; struct xfs_attr_leaf_name_local *name_loc; struct xfs_attr_leaf_name_remote *name_rmt; struct xfs_mount *mp; int tmp; int i; trace_xfs_attr_leaf_add_work(args); leaf = bp->b_addr; ASSERT(mapindex >= 0 && mapindex < XFS_ATTR_LEAF_MAPSIZE); ASSERT(args->index >= 0 && args->index <= ichdr->count); /* * Force open some space in the entry array and fill it in. */ entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; if (args->index < ichdr->count) { tmp = ichdr->count - args->index; tmp *= sizeof(xfs_attr_leaf_entry_t); memmove(entry + 1, entry, tmp); xfs_trans_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); } ichdr->count++; /* * Allocate space for the new string (at the end of the run). */ mp = args->trans->t_mountp; ASSERT(ichdr->freemap[mapindex].base < XFS_LBSIZE(mp)); ASSERT((ichdr->freemap[mapindex].base & 0x3) == 0); ASSERT(ichdr->freemap[mapindex].size >= xfs_attr_leaf_newentsize(args->namelen, args->valuelen, mp->m_sb.sb_blocksize, NULL)); ASSERT(ichdr->freemap[mapindex].size < XFS_LBSIZE(mp)); ASSERT((ichdr->freemap[mapindex].size & 0x3) == 0); ichdr->freemap[mapindex].size -= xfs_attr_leaf_newentsize(args->namelen, args->valuelen, mp->m_sb.sb_blocksize, &tmp); entry->nameidx = cpu_to_be16(ichdr->freemap[mapindex].base + ichdr->freemap[mapindex].size); entry->hashval = cpu_to_be32(args->hashval); entry->flags = tmp ? XFS_ATTR_LOCAL : 0; entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags); if (args->op_flags & XFS_DA_OP_RENAME) { entry->flags |= XFS_ATTR_INCOMPLETE; if ((args->blkno2 == args->blkno) && (args->index2 <= args->index)) { args->index2++; } } xfs_trans_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); ASSERT((args->index == 0) || (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval))); ASSERT((args->index == ichdr->count - 1) || (be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval))); /* * For "remote" attribute values, simply note that we need to * allocate space for the "remote" value. We can't actually * allocate the extents in this transaction, and we can't decide * which blocks they should be as we might allocate more blocks * as part of this transaction (a split operation for example). */ if (entry->flags & XFS_ATTR_LOCAL) { name_loc = xfs_attr3_leaf_name_local(leaf, args->index); name_loc->namelen = args->namelen; name_loc->valuelen = cpu_to_be16(args->valuelen); memcpy((char *)name_loc->nameval, args->name, args->namelen); memcpy((char *)&name_loc->nameval[args->namelen], args->value, be16_to_cpu(name_loc->valuelen)); } else { name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); name_rmt->namelen = args->namelen; memcpy((char *)name_rmt->name, args->name, args->namelen); entry->flags |= XFS_ATTR_INCOMPLETE; /* just in case */ name_rmt->valuelen = 0; name_rmt->valueblk = 0; args->rmtblkno = 1; args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen); } xfs_trans_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index), xfs_attr_leaf_entsize(leaf, args->index))); /* * Update the control info for this leaf node */ if (be16_to_cpu(entry->nameidx) < ichdr->firstused) ichdr->firstused = be16_to_cpu(entry->nameidx); ASSERT(ichdr->firstused >= ichdr->count * sizeof(xfs_attr_leaf_entry_t) + xfs_attr3_leaf_hdr_size(leaf)); tmp = (ichdr->count - 1) * sizeof(xfs_attr_leaf_entry_t) + xfs_attr3_leaf_hdr_size(leaf); for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { if (ichdr->freemap[i].base == tmp) { ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t); ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t); } } ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index); return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-241', 'CWE-19'], 'message': 'xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <[email protected]> Reviewed-by: Brian Foster <[email protected]> Signed-off-by: Dave Chinner <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xfs_attr3_leaf_clearflag( struct xfs_da_args *args) { struct xfs_attr_leafblock *leaf; struct xfs_attr_leaf_entry *entry; struct xfs_attr_leaf_name_remote *name_rmt; struct xfs_buf *bp; int error; #ifdef DEBUG struct xfs_attr3_icleaf_hdr ichdr; xfs_attr_leaf_name_local_t *name_loc; int namelen; char *name; #endif /* DEBUG */ trace_xfs_attr_leaf_clearflag(args); /* * Set up the operation. */ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); if (error) return(error); leaf = bp->b_addr; entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; ASSERT(entry->flags & XFS_ATTR_INCOMPLETE); #ifdef DEBUG xfs_attr3_leaf_hdr_from_disk(&ichdr, leaf); ASSERT(args->index < ichdr.count); ASSERT(args->index >= 0); if (entry->flags & XFS_ATTR_LOCAL) { name_loc = xfs_attr3_leaf_name_local(leaf, args->index); namelen = name_loc->namelen; name = (char *)name_loc->nameval; } else { name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); namelen = name_rmt->namelen; name = (char *)name_rmt->name; } ASSERT(be32_to_cpu(entry->hashval) == args->hashval); ASSERT(namelen == args->namelen); ASSERT(memcmp(name, args->name, namelen) == 0); #endif /* DEBUG */ entry->flags &= ~XFS_ATTR_INCOMPLETE; xfs_trans_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); if (args->rmtblkno) { ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index); name_rmt->valueblk = cpu_to_be32(args->rmtblkno); name_rmt->valuelen = cpu_to_be32(args->valuelen); xfs_trans_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); } /* * Commit the flag value change and start the next trans in series. */ return xfs_trans_roll(&args->trans, args->dp); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-241', 'CWE-19'], 'message': 'xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <[email protected]> Reviewed-by: Brian Foster <[email protected]> Signed-off-by: Dave Chinner <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xfs_attr_node_addname(xfs_da_args_t *args) { xfs_da_state_t *state; xfs_da_state_blk_t *blk; xfs_inode_t *dp; xfs_mount_t *mp; int committed, retval, error; trace_xfs_attr_node_addname(args); /* * Fill in bucket of arguments/results/context to carry around. */ dp = args->dp; mp = dp->i_mount; restart: state = xfs_da_state_alloc(); state->args = args; state->mp = mp; state->blocksize = state->mp->m_sb.sb_blocksize; state->node_ents = state->mp->m_attr_node_ents; /* * Search to see if name already exists, and get back a pointer * to where it should go. */ error = xfs_da3_node_lookup_int(state, &retval); if (error) goto out; blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { goto out; } else if (retval == EEXIST) { if (args->flags & ATTR_CREATE) goto out; trace_xfs_attr_node_replace(args); args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */ args->blkno2 = args->blkno; /* set 2nd entry info*/ args->index2 = args->index; args->rmtblkno2 = args->rmtblkno; args->rmtblkcnt2 = args->rmtblkcnt; args->rmtblkno = 0; args->rmtblkcnt = 0; } retval = xfs_attr3_leaf_add(blk->bp, state->args); if (retval == ENOSPC) { if (state->path.active == 1) { /* * Its really a single leaf node, but it had * out-of-line values so it looked like it *might* * have been a b-tree. */ xfs_da_state_free(state); state = NULL; xfs_bmap_init(args->flist, args->firstblock); error = xfs_attr3_leaf_to_node(args); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans * and started a new one. We need the inode to be * in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); /* * Commit the node conversion and start the next * trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; goto restart; } /* * Split as many Btree elements as required. * This code tracks the new and old attr's location * in the index/blkno/rmtblkno/rmtblkcnt fields and * in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields. */ xfs_bmap_init(args->flist, args->firstblock); error = xfs_da3_split(state); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans and started * a new one. We need the inode to be in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); } else { /* * Addition succeeded, update Btree hashvals. */ xfs_da3_fixhashpath(state, &state->path); } /* * Kill the state structure, we're done with it and need to * allow the buffers to come back later. */ xfs_da_state_free(state); state = NULL; /* * Commit the leaf addition or btree split and start the next * trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; /* * If there was an out-of-line value, allocate the blocks we * identified for its storage and copy the value. This is done * after we create the attribute so that we don't overflow the * maximum size of a transaction and/or hit a deadlock. */ if (args->rmtblkno > 0) { error = xfs_attr_rmtval_set(args); if (error) return(error); } /* * If this is an atomic rename operation, we must "flip" the * incomplete flags on the "new" and "old" attribute/value pairs * so that one disappears and one appears atomically. Then we * must remove the "old" attribute/value pair. */ if (args->op_flags & XFS_DA_OP_RENAME) { /* * In a separate transaction, set the incomplete flag on the * "old" attr and clear the incomplete flag on the "new" attr. */ error = xfs_attr3_leaf_flipflags(args); if (error) goto out; /* * Dismantle the "old" attribute/value pair by removing * a "remote" value (if it exists). */ args->index = args->index2; args->blkno = args->blkno2; args->rmtblkno = args->rmtblkno2; args->rmtblkcnt = args->rmtblkcnt2; if (args->rmtblkno) { error = xfs_attr_rmtval_remove(args); if (error) return(error); } /* * Re-find the "old" attribute entry after any split ops. * The INCOMPLETE flag means that we will find the "old" * attr, not the "new" one. */ args->flags |= XFS_ATTR_INCOMPLETE; state = xfs_da_state_alloc(); state->args = args; state->mp = mp; state->blocksize = state->mp->m_sb.sb_blocksize; state->node_ents = state->mp->m_attr_node_ents; state->inleaf = 0; error = xfs_da3_node_lookup_int(state, &retval); if (error) goto out; /* * Remove the name and update the hashvals in the tree. */ blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); error = xfs_attr3_leaf_remove(blk->bp, args); xfs_da3_fixhashpath(state, &state->path); /* * Check to see if the tree needs to be collapsed. */ if (retval && (state->path.active > 1)) { xfs_bmap_init(args->flist, args->firstblock); error = xfs_da3_join(state); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); } if (error) { ASSERT(committed); args->trans = NULL; xfs_bmap_cancel(args->flist); goto out; } /* * bmap_finish() may have committed the last trans * and started a new one. We need the inode to be * in all transactions. */ if (committed) xfs_trans_ijoin(args->trans, dp, 0); } /* * Commit and start the next trans in the chain. */ error = xfs_trans_roll(&args->trans, dp); if (error) goto out; } else if (args->rmtblkno > 0) { /* * Added a "remote" value, just clear the incomplete flag. */ error = xfs_attr3_leaf_clearflag(args); if (error) goto out; } retval = error = 0; out: if (state) xfs_da_state_free(state); if (error) return(error); return(retval); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-241', 'CWE-19'], 'message': 'xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <[email protected]> Reviewed-by: Brian Foster <[email protected]> Signed-off-by: Dave Chinner <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xfs_attr_calc_size( struct xfs_inode *ip, int namelen, int valuelen, int *local) { struct xfs_mount *mp = ip->i_mount; int size; int nblks; /* * Determine space new attribute will use, and if it would be * "local" or "remote" (note: local != inline). */ size = xfs_attr_leaf_newentsize(namelen, valuelen, mp->m_sb.sb_blocksize, local); nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK); if (*local) { if (size > (mp->m_sb.sb_blocksize >> 1)) { /* Double split possible */ nblks *= 2; } } else { /* * Out of line attribute, cannot double split, but * make room for the attribute value itself. */ uint dblocks = XFS_B_TO_FSB(mp, valuelen); nblks += dblocks; nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK); } return nblks; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-241', 'CWE-19'], 'message': 'xfs: remote attribute overwrite causes transaction overrun Commit e461fcb ("xfs: remote attribute lookups require the value length") passes the remote attribute length in the xfs_da_args structure on lookup so that CRC calculations and validity checking can be performed correctly by related code. This, unfortunately has the side effect of changing the args->valuelen parameter in cases where it shouldn't. That is, when we replace a remote attribute, the incoming replacement stores the value and length in args->value and args->valuelen, but then the lookup which finds the existing remote attribute overwrites args->valuelen with the length of the remote attribute being replaced. Hence when we go to create the new attribute, we create it of the size of the existing remote attribute, not the size it is supposed to be. When the new attribute is much smaller than the old attribute, this results in a transaction overrun and an ASSERT() failure on a debug kernel: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, file: fs/xfs/xfs_trans.c, line: 331 Fix this by keeping the remote attribute value length separate to the attribute value length in the xfs_da_args structure. The enables us to pass the length of the remote attribute to be removed without overwriting the new attribute's length. Also, ensure that when we save remote block contexts for a later rename we zero the original state variables so that we don't confuse the state of the attribute to be removes with the state of the new attribute that we just added. [Spotted by Brain Foster.] Signed-off-by: Dave Chinner <[email protected]> Reviewed-by: Brian Foster <[email protected]> Signed-off-by: Dave Chinner <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: gnutls_x509_crt_import(gnutls_x509_crt_t cert, const gnutls_datum_t * data, gnutls_x509_crt_fmt_t format) { int result = 0; int version; if (cert == NULL) { gnutls_assert(); return GNUTLS_E_INVALID_REQUEST; } if (cert->der.data) { gnutls_free(cert->der.data); cert->der.data = NULL; } /* If the Certificate is in PEM format then decode it */ if (format == GNUTLS_X509_FMT_PEM) { /* Try the first header */ result = _gnutls_fbase64_decode(PEM_X509_CERT2, data->data, data->size, &cert->der); if (result <= 0) { /* try for the second header */ result = _gnutls_fbase64_decode(PEM_X509_CERT, data->data, data->size, &cert->der); if (result < 0) { gnutls_assert(); return result; } } } else { result = _gnutls_set_datum(&cert->der, data->data, data->size); if (result < 0) { gnutls_assert(); return result; } } if (cert->expanded) { /* Any earlier asn1_der_decoding will modify the ASN.1 structure, so we need to replace it with a fresh structure. */ result = crt_reinit(cert); if (result < 0) { gnutls_assert(); goto cleanup; } } cert->expanded = 1; result = asn1_der_decoding(&cert->cert, cert->der.data, cert->der.size, NULL); if (result != ASN1_SUCCESS) { result = _gnutls_asn2err(result); gnutls_assert(); goto cleanup; } result = _gnutls_x509_get_raw_field2(cert->cert, &cert->der, "tbsCertificate.issuer.rdnSequence", &cert->raw_issuer_dn); if (result < 0) { gnutls_assert(); goto cleanup; } result = _gnutls_x509_get_raw_field2(cert->cert, &cert->der, "tbsCertificate.subject.rdnSequence", &cert->raw_dn); if (result < 0) { gnutls_assert(); goto cleanup; } result = _gnutls_x509_get_raw_field2(cert->cert, &cert->der, "tbsCertificate.subjectPublicKeyInfo", &cert->raw_spki); if (result < 0) { gnutls_assert(); goto cleanup; } /* enforce the rule that only version 3 certificates carry extensions */ version = gnutls_x509_crt_get_version(cert); if (version < 3) { gnutls_datum_t exts; result = _gnutls_x509_get_raw_field2(cert->cert, &cert->der, "tbsCertificate.extensions", &exts); if (result >= 0 && exts.size > 0) { gnutls_assert(); _gnutls_debug_log("error: extensions present in certificate with version %d\n", version); result = GNUTLS_E_X509_CERTIFICATE_ERROR; goto cleanup; } } /* Since we do not want to disable any extension */ cert->use_extensions = 1; return 0; cleanup: _gnutls_free_datum(&cert->der); return result; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-295'], 'message': 'on certificate import check whether the two signature algorithms match'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: idna_to_unicode_8zlz (const char *input, char **output, int flags) { char *utf8; int rc; rc = idna_to_unicode_8z8z (input, &utf8, flags); *output = stringprep_utf8_to_locale (utf8); free (utf8); if (!*output) return IDNA_ICONV_ERROR; return rc; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'libidn: Fix crash in idna_to_unicode_8z8z and idna_to_unicode_8zlz.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: _gnutls_verify_certificate2 (gnutls_x509_crt_t cert, const gnutls_x509_crt_t * trusted_cas, int tcas_size, unsigned int flags, unsigned int *output) { gnutls_datum_t cert_signed_data = { NULL, 0 }; gnutls_datum_t cert_signature = { NULL, 0 }; gnutls_x509_crt_t issuer; int ret, issuer_version, result; if (output) *output = 0; if (tcas_size >= 1) issuer = find_issuer (cert, trusted_cas, tcas_size); else { gnutls_assert (); if (output) *output |= GNUTLS_CERT_SIGNER_NOT_FOUND | GNUTLS_CERT_INVALID; return 0; } /* issuer is not in trusted certificate * authorities. */ if (issuer == NULL) { if (output) *output |= GNUTLS_CERT_SIGNER_NOT_FOUND | GNUTLS_CERT_INVALID; gnutls_assert (); return 0; } issuer_version = gnutls_x509_crt_get_version (issuer); if (issuer_version < 0) { gnutls_assert (); return issuer_version; } if (!(flags & GNUTLS_VERIFY_DISABLE_CA_SIGN) && !((flags & GNUTLS_VERIFY_ALLOW_X509_V1_CA_CRT) && issuer_version == 1)) { if (check_if_ca (cert, issuer, flags) == 0) { gnutls_assert (); if (output) *output |= GNUTLS_CERT_SIGNER_NOT_CA | GNUTLS_CERT_INVALID; return 0; } } result = _gnutls_x509_get_signed_data (cert->cert, "tbsCertificate", &cert_signed_data); if (result < 0) { gnutls_assert (); goto cleanup; } result = _gnutls_x509_get_signature (cert->cert, "signature", &cert_signature); if (result < 0) { gnutls_assert (); goto cleanup; } ret = _gnutls_x509_verify_signature (&cert_signed_data, NULL, &cert_signature, issuer); if (ret < 0) { gnutls_assert (); } else if (ret == 0) { gnutls_assert (); /* error. ignore it */ if (output) *output |= GNUTLS_CERT_INVALID; ret = 0; } /* If the certificate is not self signed check if the algorithms * used are secure. If the certificate is self signed it doesn't * really matter. */ if (is_issuer (cert, cert) == 0) { int sigalg; sigalg = gnutls_x509_crt_get_signature_algorithm (cert); if (((sigalg == GNUTLS_SIGN_RSA_MD2) && !(flags & GNUTLS_VERIFY_ALLOW_SIGN_RSA_MD2)) || ((sigalg == GNUTLS_SIGN_RSA_MD5) && !(flags & GNUTLS_VERIFY_ALLOW_SIGN_RSA_MD5))) { if (output) *output |= GNUTLS_CERT_INSECURE_ALGORITHM | GNUTLS_CERT_INVALID; ret = 0; } } result = ret; cleanup: _gnutls_free_datum (&cert_signed_data); _gnutls_free_datum (&cert_signature); return result; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'Extended time verification to trusted certificate list as well. Introduced the flag GNUTLS_VERIFY_DISABLE_TRUSTED_TIME_CHECKS that will prevent the trusted certificate list verification.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: _gnutls_x509_verify_certificate (const gnutls_x509_crt_t * certificate_list, int clist_size, const gnutls_x509_crt_t * trusted_cas, int tcas_size, const gnutls_x509_crl_t * CRLs, int crls_size, unsigned int flags) { int i = 0, ret; unsigned int status = 0, output; if (clist_size > 1) { /* Check if the last certificate in the path is self signed. * In that case ignore it (a certificate is trusted only if it * leads to a trusted party by us, not the server's). * * This prevents from verifying self signed certificates against * themselves. This (although not bad) caused verification * failures on some root self signed certificates that use the * MD2 algorithm. */ if (gnutls_x509_crt_check_issuer (certificate_list[clist_size - 1], certificate_list[clist_size - 1]) > 0) { clist_size--; } } /* We want to shorten the chain by removing the cert that matches * one of the certs we trust and all the certs after that i.e. if * cert chain is A signed-by B signed-by C signed-by D (signed-by * self-signed E but already removed above), and we trust B, remove * B, C and D. */ if (!(flags & GNUTLS_VERIFY_DO_NOT_ALLOW_SAME)) { for (i = 0; i < clist_size; i++) { int j; for (j = 0; j < tcas_size; j++) { if (check_if_same_cert (certificate_list[i], trusted_cas[j]) == 0) { clist_size = i; break; } } /* clist_size may have been changed which gets out of loop */ } } if (clist_size == 0) /* The certificate is already present in the trusted certificate list. * Nothing to verify. */ return status; /* Verify the last certificate in the certificate path * against the trusted CA certificate list. * * If no CAs are present returns CERT_INVALID. Thus works * in self signed etc certificates. */ ret = _gnutls_verify_certificate2 (certificate_list[clist_size - 1], trusted_cas, tcas_size, flags, &output); if (ret == 0) { /* if the last certificate in the certificate * list is invalid, then the certificate is not * trusted. */ gnutls_assert (); status |= output; status |= GNUTLS_CERT_INVALID; return status; } /* Check for revoked certificates in the chain */ #ifdef ENABLE_PKI for (i = 0; i < clist_size; i++) { ret = gnutls_x509_crt_check_revocation (certificate_list[i], CRLs, crls_size); if (ret == 1) { /* revoked */ status |= GNUTLS_CERT_REVOKED; status |= GNUTLS_CERT_INVALID; return status; } } #endif /* Check activation/expiration times */ if (!(flags & GNUTLS_VERIFY_DISABLE_TIME_CHECKS)) { time_t t, now = time (0); for (i = 0; i < clist_size; i++) { t = gnutls_x509_crt_get_activation_time (certificate_list[i]); if (t == (time_t) - 1 || now < t) { status |= GNUTLS_CERT_NOT_ACTIVATED; status |= GNUTLS_CERT_INVALID; return status; } t = gnutls_x509_crt_get_expiration_time (certificate_list[i]); if (t == (time_t) - 1 || now > t) { status |= GNUTLS_CERT_EXPIRED; status |= GNUTLS_CERT_INVALID; return status; } } } /* Verify the certificate path (chain) */ for (i = clist_size - 1; i > 0; i--) { if (i - 1 < 0) break; /* note that here we disable this V1 CA flag. So that no version 1 * certificates can exist in a supplied chain. */ if (!(flags & GNUTLS_VERIFY_ALLOW_ANY_X509_V1_CA_CRT)) flags &= ~(GNUTLS_VERIFY_ALLOW_X509_V1_CA_CRT); if ((ret = _gnutls_verify_certificate2 (certificate_list[i - 1], &certificate_list[i], 1, flags, NULL)) == 0) { status |= GNUTLS_CERT_INVALID; return status; } } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'Extended time verification to trusted certificate list as well. Introduced the flag GNUTLS_VERIFY_DISABLE_TRUSTED_TIME_CHECKS that will prevent the trusted certificate list verification.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: decrypt(gcry_mpi_t output, gcry_mpi_t a, gcry_mpi_t b, ELG_secret_key *skey ) { gcry_mpi_t t1 = mpi_alloc_secure( mpi_get_nlimbs( skey->p ) ); /* output = b/(a^x) mod p */ gcry_mpi_powm( t1, a, skey->x, skey->p ); mpi_invm( t1, t1, skey->p ); mpi_mulm( output, b, t1, skey->p ); #if 0 if( DBG_CIPHER ) { log_mpidump("elg decrypted x= ", skey->x); log_mpidump("elg decrypted p= ", skey->p); log_mpidump("elg decrypted a= ", a); log_mpidump("elg decrypted b= ", b); log_mpidump("elg decrypted M= ", output); } #endif mpi_free(t1); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200'], 'message': 'cipher: Use ciphertext blinding for Elgamal decryption. * cipher/elgamal.c (USE_BLINDING): New. (decrypt): Rewrite to use ciphertext blinding. -- CVE-id: CVE-2014-3591 As a countermeasure to a new side-channel attacks on sliding windows exponentiation we blind the ciphertext for Elgamal decryption. This is similar to what we are doing with RSA. This patch is a backport of the GnuPG 1.4 commit ff53cf06e966dce0daba5f2c84e03ab9db2c3c8b. Unfortunately, the performance impact of Elgamal blinding is quite noticeable (i5-2410M CPU @ 2.30GHz TP 220): Algorithm generate 100*priv 100*public ------------------------------------------------ ELG 1024 bit - 100ms 90ms ELG 2048 bit - 330ms 350ms ELG 3072 bit - 660ms 790ms Algorithm generate 100*priv 100*public ------------------------------------------------ ELG 1024 bit - 150ms 90ms ELG 2048 bit - 520ms 360ms ELG 3072 bit - 1100ms 800ms Signed-off-by: Werner Koch <[email protected]> (cherry picked from commit 410d70bad9a650e3837055e36f157894ae49a57d) Resolved conflicts: cipher/elgamal.c.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: struct file *get_empty_filp(void) { const struct cred *cred = current_cred(); static long old_max; struct file *f; int error; /* * Privileged users can go above max_files */ if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { /* * percpu_counters are inaccurate. Do an expensive check before * we go and fail. */ if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) goto over; } f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); if (unlikely(!f)) return ERR_PTR(-ENOMEM); percpu_counter_inc(&nr_files); f->f_cred = get_cred(cred); error = security_file_alloc(f); if (unlikely(error)) { file_free(f); return ERR_PTR(error); } INIT_LIST_HEAD(&f->f_u.fu_list); atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); spin_lock_init(&f->f_lock); eventpoll_init_file(f); /* f->f_version: 0 */ return f; over: /* Ran out of filps - report that */ if (get_nr_files() > old_max) { pr_info("VFS: file-max limit %lu reached\n", get_max_files()); old_max = get_nr_files(); } return ERR_PTR(-ENFILE); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'get rid of s_files and files_lock The only thing we need it for is alt-sysrq-r (emergency remount r/o) and these days we can do just as well without going through the list of files. Signed-off-by: Al Viro <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void destroy_super(struct super_block *s) { int i; list_lru_destroy(&s->s_dentry_lru); list_lru_destroy(&s->s_inode_lru); #ifdef CONFIG_SMP free_percpu(s->s_files); #endif for (i = 0; i < SB_FREEZE_LEVELS; i++) percpu_counter_destroy(&s->s_writers.counter[i]); security_sb_free(s); WARN_ON(!list_empty(&s->s_mounts)); kfree(s->s_subtype); kfree(s->s_options); kfree_rcu(s, rcu); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'get rid of s_files and files_lock The only thing we need it for is alt-sysrq-r (emergency remount r/o) and these days we can do just as well without going through the list of files. Signed-off-by: Al Viro <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static struct super_block *alloc_super(struct file_system_type *type, int flags) { struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); static const struct super_operations default_op; int i; if (!s) return NULL; if (security_sb_alloc(s)) goto fail; #ifdef CONFIG_SMP s->s_files = alloc_percpu(struct list_head); if (!s->s_files) goto fail; for_each_possible_cpu(i) INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i)); #else INIT_LIST_HEAD(&s->s_files); #endif for (i = 0; i < SB_FREEZE_LEVELS; i++) { if (percpu_counter_init(&s->s_writers.counter[i], 0) < 0) goto fail; lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i], &type->s_writers_key[i], 0); } init_waitqueue_head(&s->s_writers.wait); init_waitqueue_head(&s->s_writers.wait_unfrozen); s->s_flags = flags; s->s_bdi = &default_backing_dev_info; INIT_HLIST_NODE(&s->s_instances); INIT_HLIST_BL_HEAD(&s->s_anon); INIT_LIST_HEAD(&s->s_inodes); if (list_lru_init(&s->s_dentry_lru)) goto fail; if (list_lru_init(&s->s_inode_lru)) goto fail; INIT_LIST_HEAD(&s->s_mounts); init_rwsem(&s->s_umount); lockdep_set_class(&s->s_umount, &type->s_umount_key); /* * sget() can have s_umount recursion. * * When it cannot find a suitable sb, it allocates a new * one (this one), and tries again to find a suitable old * one. * * In case that succeeds, it will acquire the s_umount * lock of the old one. Since these are clearly distrinct * locks, and this object isn't exposed yet, there's no * risk of deadlocks. * * Annotate this by putting this lock in a different * subclass. */ down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); s->s_count = 1; atomic_set(&s->s_active, 1); mutex_init(&s->s_vfs_rename_mutex); lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); mutex_init(&s->s_dquot.dqio_mutex); mutex_init(&s->s_dquot.dqonoff_mutex); init_rwsem(&s->s_dquot.dqptr_sem); s->s_maxbytes = MAX_NON_LFS; s->s_op = &default_op; s->s_time_gran = 1000000000; s->cleancache_poolid = -1; s->s_shrink.seeks = DEFAULT_SEEKS; s->s_shrink.scan_objects = super_cache_scan; s->s_shrink.count_objects = super_cache_count; s->s_shrink.batch = 1024; s->s_shrink.flags = SHRINKER_NUMA_AWARE; return s; fail: destroy_super(s); return NULL; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'get rid of s_files and files_lock The only thing we need it for is alt-sysrq-r (emergency remount r/o) and these days we can do just as well without going through the list of files. Signed-off-by: Al Viro <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void __fput_sync(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) { struct task_struct *task = current; file_sb_list_del(file); BUG_ON(!(task->flags & PF_KTHREAD)); __fput(file); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'get rid of s_files and files_lock The only thing we need it for is alt-sysrq-r (emergency remount r/o) and these days we can do just as well without going through the list of files. Signed-off-by: Al Viro <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void mark_files_ro(struct super_block *sb) { struct file *f; lg_global_lock(&files_lglock); do_file_list_for_each_entry(sb, f) { if (!file_count(f)) continue; if (!(f->f_mode & FMODE_WRITE)) continue; spin_lock(&f->f_lock); f->f_mode &= ~FMODE_WRITE; spin_unlock(&f->f_lock); if (file_check_writeable(f) != 0) continue; __mnt_drop_write(f->f_path.mnt); file_release_write(f); } while_file_list_for_each_entry; lg_global_unlock(&files_lglock); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'get rid of s_files and files_lock The only thing we need it for is alt-sysrq-r (emergency remount r/o) and these days we can do just as well without going through the list of files. Signed-off-by: Al Viro <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void put_filp(struct file *file) { if (atomic_long_dec_and_test(&file->f_count)) { security_file_free(file); file_sb_list_del(file); file_free(file); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'get rid of s_files and files_lock The only thing we need it for is alt-sysrq-r (emergency remount r/o) and these days we can do just as well without going through the list of files. Signed-off-by: Al Viro <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void __init files_init(unsigned long mempages) { unsigned long n; filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); /* * One file with associated inode and dcache is very roughly 1K. * Per default don't use more than 10% of our memory for files. */ n = (mempages * (PAGE_SIZE / 1024)) / 10; files_stat.max_files = max_t(unsigned long, n, NR_FILE); files_defer_init(); lg_lock_init(&files_lglock, "files_lglock"); percpu_counter_init(&nr_files, 0); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'get rid of s_files and files_lock The only thing we need it for is alt-sysrq-r (emergency remount r/o) and these days we can do just as well without going through the list of files. Signed-off-by: Al Viro <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: Status readUTF8String( StringData* out ) { int sz; if ( !readNumber<int>( &sz ) ) return Status( ErrorCodes::InvalidBSON, "invalid bson" ); if ( out ) { *out = StringData( _buffer + _position, sz ); } if ( !skip( sz - 1 ) ) return Status( ErrorCodes::InvalidBSON, "invalid bson" ); char c; if ( !readNumber<char>( &c ) ) return Status( ErrorCodes::InvalidBSON, "invalid bson" ); if ( c != 0 ) return Status( ErrorCodes::InvalidBSON, "not null terminate string" ); return Status::OK(); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'SERVER-17264: improve bson validation for utf-8 strings (cherry picked from commit 394a8569ff14a215c0691aa34440227b2e62a4de) Conflicts: src/mongo/bson/bson_validate_test.cpp'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len) { while (!iov->iov_len) iov++; while (len > 0) { unsigned long this_len; this_len = min_t(unsigned long, len, iov->iov_len); fault_in_pages_readable(iov->iov_base, this_len); len -= this_len; iov++; } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'new helper: copy_page_from_iter() parallel to copy_page_to_iter(). pipe_write() switched to it (and became ->write_iter()). Signed-off-by: Al Viro <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: pipe_write(struct kiocb *iocb, const struct iovec *_iov, unsigned long nr_segs, loff_t ppos) { struct file *filp = iocb->ki_filp; struct pipe_inode_info *pipe = filp->private_data; ssize_t ret; int do_wakeup; struct iovec *iov = (struct iovec *)_iov; size_t total_len; ssize_t chars; total_len = iov_length(iov, nr_segs); /* Null write succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; __pipe_lock(pipe); if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; goto out; } /* We try to merge small writes */ chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */ if (pipe->nrbufs && chars != 0) { int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) & (pipe->buffers - 1); struct pipe_buffer *buf = pipe->bufs + lastbuf; const struct pipe_buf_operations *ops = buf->ops; int offset = buf->offset + buf->len; if (ops->can_merge && offset + chars <= PAGE_SIZE) { int error, atomic = 1; void *addr; error = ops->confirm(pipe, buf); if (error) goto out; iov_fault_in_pages_read(iov, chars); redo1: if (atomic) addr = kmap_atomic(buf->page); else addr = kmap(buf->page); error = pipe_iov_copy_from_user(offset + addr, iov, chars, atomic); if (atomic) kunmap_atomic(addr); else kunmap(buf->page); ret = error; do_wakeup = 1; if (error) { if (atomic) { atomic = 0; goto redo1; } goto out; } buf->len += chars; total_len -= chars; ret = chars; if (!total_len) goto out; } } for (;;) { int bufs; if (!pipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } bufs = pipe->nrbufs; if (bufs < pipe->buffers) { int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1); struct pipe_buffer *buf = pipe->bufs + newbuf; struct page *page = pipe->tmp_page; char *src; int error, atomic = 1; if (!page) { page = alloc_page(GFP_HIGHUSER); if (unlikely(!page)) { ret = ret ? : -ENOMEM; break; } pipe->tmp_page = page; } /* Always wake up, even if the copy fails. Otherwise * we lock up (O_NONBLOCK-)readers that sleep due to * syscall merging. * FIXME! Is this really true? */ do_wakeup = 1; chars = PAGE_SIZE; if (chars > total_len) chars = total_len; iov_fault_in_pages_read(iov, chars); redo2: if (atomic) src = kmap_atomic(page); else src = kmap(page); error = pipe_iov_copy_from_user(src, iov, chars, atomic); if (atomic) kunmap_atomic(src); else kunmap(page); if (unlikely(error)) { if (atomic) { atomic = 0; goto redo2; } if (!ret) ret = error; break; } ret += chars; /* Insert it into the buffer array */ buf->page = page; buf->ops = &anon_pipe_buf_ops; buf->offset = 0; buf->len = chars; buf->flags = 0; if (is_packetized(filp)) { buf->ops = &packet_pipe_buf_ops; buf->flags = PIPE_BUF_FLAG_PACKET; } pipe->nrbufs = ++bufs; pipe->tmp_page = NULL; total_len -= chars; if (!total_len) break; } if (bufs < pipe->buffers) continue; if (filp->f_flags & O_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } out: __pipe_unlock(pipe); if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { int err = file_update_time(filp); if (err) ret = err; sb_end_write(file_inode(filp)->i_sb); } return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'new helper: copy_page_from_iter() parallel to copy_page_to_iter(). pipe_write() switched to it (and became ->write_iter()). Signed-off-by: Al Viro <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: pipe_read(struct kiocb *iocb, const struct iovec *_iov, unsigned long nr_segs, loff_t pos) { struct file *filp = iocb->ki_filp; struct pipe_inode_info *pipe = filp->private_data; int do_wakeup; ssize_t ret; struct iovec *iov = (struct iovec *)_iov; size_t total_len; total_len = iov_length(iov, nr_segs); /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; __pipe_lock(pipe); for (;;) { int bufs = pipe->nrbufs; if (bufs) { int curbuf = pipe->curbuf; struct pipe_buffer *buf = pipe->bufs + curbuf; const struct pipe_buf_operations *ops = buf->ops; void *addr; size_t chars = buf->len; int error, atomic; if (chars > total_len) chars = total_len; error = ops->confirm(pipe, buf); if (error) { if (!ret) ret = error; break; } atomic = !iov_fault_in_pages_write(iov, chars); redo: if (atomic) addr = kmap_atomic(buf->page); else addr = kmap(buf->page); error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic); if (atomic) kunmap_atomic(addr); else kunmap(buf->page); if (unlikely(error)) { /* * Just retry with the slow path if we failed. */ if (atomic) { atomic = 0; goto redo; } if (!ret) ret = error; break; } ret += chars; buf->offset += chars; buf->len -= chars; /* Was it a packet buffer? Clean up and exit */ if (buf->flags & PIPE_BUF_FLAG_PACKET) { total_len = chars; buf->len = 0; } if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); curbuf = (curbuf + 1) & (pipe->buffers - 1); pipe->curbuf = curbuf; pipe->nrbufs = --bufs; do_wakeup = 1; } total_len -= chars; if (!total_len) break; /* common path: read succeeded */ } if (bufs) /* More to do? */ continue; if (!pipe->writers) break; if (!pipe->waiting_writers) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } pipe_wait(pipe); } __pipe_unlock(pipe); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } if (ret > 0) file_accessed(filp); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-17'], 'message': 'switch pipe_read() to copy_page_to_iter() Signed-off-by: Al Viro <[email protected]>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: _zip_cdir_new(int nentry, struct zip_error *error) { struct zip_cdir *cd; if ((cd=(struct zip_cdir *)malloc(sizeof(*cd))) == NULL) { _zip_error_set(error, ZIP_ER_MEMORY, 0); return NULL; } if ((cd->entry=(struct zip_dirent *)malloc(sizeof(*(cd->entry))*nentry)) == NULL) { _zip_error_set(error, ZIP_ER_MEMORY, 0); free(cd); return NULL; } /* entries must be initialized by caller */ cd->nentry = nentry; cd->size = cd->offset = 0; cd->comment = NULL; cd->comment_len = 0; return cd; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Fix bug #69253 - ZIP Integer Overflow leads to writing past heap boundary'</s>