text
stringlengths 478
483k
|
---|
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static ssize_t write_mem(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
phys_addr_t p = *ppos;
ssize_t written, sz;
unsigned long copied;
void *ptr;
if (p != *ppos)
return -EFBIG;
if (!valid_phys_addr_range(p, count))
return -EFAULT;
written = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
sz = size_inside_page(p, count);
/* Hmm. Do something? */
buf += sz;
p += sz;
count -= sz;
written += sz;
}
#endif
while (count > 0) {
sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
return -EPERM;
/*
* On ia64 if a page has been mapped somewhere as uncached, then
* it must also be accessed uncached by the kernel or data
* corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr) {
if (written)
break;
return -EFAULT;
}
copied = copy_from_user(ptr, buf, sz);
unxlate_dev_mem_ptr(p, ptr);
if (copied) {
written += sz - copied;
if (written)
break;
return -EFAULT;
}
buf += sz;
p += sz;
count -= sz;
written += sz;
}
*ppos += written;
return written;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-200', 'CWE-732'], 'message': 'mm: Tighten x86 /dev/mem with zeroing reads
Under CONFIG_STRICT_DEVMEM, reading System RAM through /dev/mem is
disallowed. However, on x86, the first 1MB was always allowed for BIOS
and similar things, regardless of it actually being System RAM. It was
possible for heap to end up getting allocated in low 1MB RAM, and then
read by things like x86info or dd, which would trip hardened usercopy:
usercopy: kernel memory exposure attempt detected from ffff880000090000 (dma-kmalloc-256) (4096 bytes)
This changes the x86 exception for the low 1MB by reading back zeros for
System RAM areas instead of blindly allowing them. More work is needed to
extend this to mmap, but currently mmap doesn't go through usercopy, so
hardened usercopy won't Oops the kernel.
Reported-by: Tommi Rantala <[email protected]>
Tested-by: Tommi Rantala <[email protected]>
Signed-off-by: Kees Cook <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: ssize_t v9fs_list_xattr(FsContext *ctx, const char *path,
void *value, size_t vsize)
{
ssize_t size = 0;
void *ovalue = value;
XattrOperations *xops;
char *orig_value, *orig_value_start;
ssize_t xattr_len, parsed_len = 0, attr_len;
char *dirpath, *name;
int dirfd;
/* Get the actual len */
dirpath = g_path_get_dirname(path);
dirfd = local_opendir_nofollow(ctx, dirpath);
g_free(dirpath);
if (dirfd == -1) {
return -1;
}
name = g_path_get_basename(path);
xattr_len = flistxattrat_nofollow(dirfd, name, value, 0);
if (xattr_len <= 0) {
g_free(name);
close_preserve_errno(dirfd);
return xattr_len;
}
/* Now fetch the xattr and find the actual size */
orig_value = g_malloc(xattr_len);
xattr_len = flistxattrat_nofollow(dirfd, name, orig_value, xattr_len);
g_free(name);
close_preserve_errno(dirfd);
if (xattr_len < 0) {
return -1;
}
/* store the orig pointer */
orig_value_start = orig_value;
while (xattr_len > parsed_len) {
xops = get_xattr_operations(ctx->xops, orig_value);
if (!xops) {
goto next_entry;
}
if (!value) {
size += xops->listxattr(ctx, path, orig_value, value, vsize);
} else {
size = xops->listxattr(ctx, path, orig_value, value, vsize);
if (size < 0) {
goto err_out;
}
value += size;
vsize -= size;
}
next_entry:
/* Got the next entry */
attr_len = strlen(orig_value) + 1;
parsed_len += attr_len;
orig_value += attr_len;
}
if (value) {
size = value - ovalue;
}
err_out:
g_free(orig_value_start);
return size;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-772'], 'message': '9pfs: xattr: fix memory leak in v9fs_list_xattr
Free 'orig_value' in error path.
Signed-off-by: Li Qiang <[email protected]>
Signed-off-by: Greg Kurz <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct net_device *dev)
{
int ret;
struct scatterlist *sg;
unsigned char *iv;
struct ethhdr *eth;
struct macsec_eth_header *hh;
size_t unprotected_len;
struct aead_request *req;
struct macsec_secy *secy;
struct macsec_tx_sc *tx_sc;
struct macsec_tx_sa *tx_sa;
struct macsec_dev *macsec = macsec_priv(dev);
bool sci_present;
u32 pn;
secy = &macsec->secy;
tx_sc = &secy->tx_sc;
/* 10.5.1 TX SA assignment */
tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
if (!tx_sa) {
secy->operational = false;
kfree_skb(skb);
return ERR_PTR(-EINVAL);
}
if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
struct sk_buff *nskb = skb_copy_expand(skb,
MACSEC_NEEDED_HEADROOM,
MACSEC_NEEDED_TAILROOM,
GFP_ATOMIC);
if (likely(nskb)) {
consume_skb(skb);
skb = nskb;
} else {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
} else {
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb) {
macsec_txsa_put(tx_sa);
return ERR_PTR(-ENOMEM);
}
}
unprotected_len = skb->len;
eth = eth_hdr(skb);
sci_present = send_sci(secy);
hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy);
if (pn == 0) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOLINK);
}
macsec_fill_sectag(hh, secy, pn, sci_present);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len);
if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.OutPktsTooLong++;
u64_stats_update_end(&secy_stats->syncp);
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-EINVAL);
}
req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
if (!req) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
macsec_fill_iv(iv, secy->sci, pn);
sg_init_table(sg, MAX_SKB_FRAGS + 1);
skb_to_sgvec(skb, sg, 0, skb->len);
if (tx_sc->encrypt) {
int len = skb->len - macsec_hdr_len(sci_present) -
secy->icv_len;
aead_request_set_crypt(req, sg, sg, len, iv);
aead_request_set_ad(req, macsec_hdr_len(sci_present));
} else {
aead_request_set_crypt(req, sg, sg, 0, iv);
aead_request_set_ad(req, skb->len - secy->icv_len);
}
macsec_skb_cb(skb)->req = req;
macsec_skb_cb(skb)->tx_sa = tx_sa;
aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
dev_hold(skb->dev);
ret = crypto_aead_encrypt(req);
if (ret == -EINPROGRESS) {
return ERR_PTR(ret);
} else if (ret != 0) {
dev_put(skb->dev);
kfree_skb(skb);
aead_request_free(req);
macsec_txsa_put(tx_sa);
return ERR_PTR(-EINVAL);
}
dev_put(skb->dev);
aead_request_free(req);
macsec_txsa_put(tx_sa);
return skb;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'macsec: dynamically allocate space for sglist
We call skb_cow_data, which is good anyway to ensure we can actually
modify the skb as such (another error from prior). Now that we have the
number of fragments required, we can safely allocate exactly that amount
of memory.
Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver")
Signed-off-by: Jason A. Donenfeld <[email protected]>
Acked-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
unsigned char **iv,
struct scatterlist **sg)
{
size_t size, iv_offset, sg_offset;
struct aead_request *req;
void *tmp;
size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
iv_offset = size;
size += GCM_AES_IV_LEN;
size = ALIGN(size, __alignof__(struct scatterlist));
sg_offset = size;
size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
tmp = kmalloc(size, GFP_ATOMIC);
if (!tmp)
return NULL;
*iv = (unsigned char *)(tmp + iv_offset);
*sg = (struct scatterlist *)(tmp + sg_offset);
req = tmp;
aead_request_set_tfm(req, tfm);
return req;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'macsec: dynamically allocate space for sglist
We call skb_cow_data, which is good anyway to ensure we can actually
modify the skb as such (another error from prior). Now that we have the
number of fragments required, we can safely allocate exactly that amount
of memory.
Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver")
Signed-off-by: Jason A. Donenfeld <[email protected]>
Acked-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: cr_input_read_char (CRInput * a_this, guint32 * a_char)
{
enum CRStatus status = CR_OK;
gulong consumed = 0,
nb_bytes_left = 0;
g_return_val_if_fail (a_this && PRIVATE (a_this) && a_char,
CR_BAD_PARAM_ERROR);
if (PRIVATE (a_this)->end_of_input == TRUE)
return CR_END_OF_INPUT_ERROR;
nb_bytes_left = cr_input_get_nb_bytes_left (a_this);
if (nb_bytes_left < 1) {
return CR_END_OF_INPUT_ERROR;
}
status = cr_utils_read_char_from_utf8_buf
(PRIVATE (a_this)->in_buf
+
PRIVATE (a_this)->next_byte_index,
nb_bytes_left, a_char, &consumed);
if (status == CR_OK) {
/*update next byte index */
PRIVATE (a_this)->next_byte_index += consumed;
/*update line and column number */
if (PRIVATE (a_this)->end_of_line == TRUE) {
PRIVATE (a_this)->col = 1;
PRIVATE (a_this)->line++;
PRIVATE (a_this)->end_of_line = FALSE;
} else if (*a_char != '\n') {
PRIVATE (a_this)->col++;
}
if (*a_char == '\n') {
PRIVATE (a_this)->end_of_line = TRUE;
}
}
return status;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-125'], 'message': 'input: check end of input before reading a byte
When reading bytes we weren't check that the index wasn't
out of bound and this could produce an invalid read which
could deal to a security bug.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct device *dev = &intf->dev;
struct usb_device *usbdev = interface_to_usbdev(intf);
struct net_device *netdev;
struct catc *catc;
u8 broadcast[ETH_ALEN];
int i, pktsz, ret;
if (usb_set_interface(usbdev,
intf->altsetting->desc.bInterfaceNumber, 1)) {
dev_err(dev, "Can't set altsetting 1.\n");
return -EIO;
}
netdev = alloc_etherdev(sizeof(struct catc));
if (!netdev)
return -ENOMEM;
catc = netdev_priv(netdev);
netdev->netdev_ops = &catc_netdev_ops;
netdev->watchdog_timeo = TX_TIMEOUT;
netdev->ethtool_ops = &ops;
catc->usbdev = usbdev;
catc->netdev = netdev;
spin_lock_init(&catc->tx_lock);
spin_lock_init(&catc->ctrl_lock);
init_timer(&catc->timer);
catc->timer.data = (long) catc;
catc->timer.function = catc_stats_timer;
catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
(!catc->rx_urb) || (!catc->irq_urb)) {
dev_err(&intf->dev, "No free urbs available.\n");
ret = -ENOMEM;
goto fail_free;
}
/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
dev_dbg(dev, "Testing for f5u011\n");
catc->is_f5u011 = 1;
atomic_set(&catc->recq_sz, 0);
pktsz = RX_PKT_SZ;
} else {
pktsz = RX_MAX_BURST * (PKT_SZ + 2);
}
usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0),
NULL, NULL, 0, catc_ctrl_done, catc);
usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1),
NULL, 0, catc_tx_done, catc);
usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1),
catc->rx_buf, pktsz, catc_rx_done, catc);
usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2),
catc->irq_buf, 2, catc_irq_done, catc, 1);
if (!catc->is_f5u011) {
dev_dbg(dev, "Checking memory size\n");
i = 0x12345678;
catc_write_mem(catc, 0x7a80, &i, 4);
i = 0x87654321;
catc_write_mem(catc, 0xfa80, &i, 4);
catc_read_mem(catc, 0x7a80, &i, 4);
switch (i) {
case 0x12345678:
catc_set_reg(catc, TxBufCount, 8);
catc_set_reg(catc, RxBufCount, 32);
dev_dbg(dev, "64k Memory\n");
break;
default:
dev_warn(&intf->dev,
"Couldn't detect memory size, assuming 32k\n");
case 0x87654321:
catc_set_reg(catc, TxBufCount, 4);
catc_set_reg(catc, RxBufCount, 16);
dev_dbg(dev, "32k Memory\n");
break;
}
dev_dbg(dev, "Getting MAC from SEEROM.\n");
catc_get_mac(catc, netdev->dev_addr);
dev_dbg(dev, "Setting MAC into registers.\n");
for (i = 0; i < 6; i++)
catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
dev_dbg(dev, "Filling the multicast list.\n");
eth_broadcast_addr(broadcast);
catc_multicast(broadcast, catc->multicast);
catc_multicast(netdev->dev_addr, catc->multicast);
catc_write_mem(catc, 0xfa80, catc->multicast, 64);
dev_dbg(dev, "Clearing error counters.\n");
for (i = 0; i < 8; i++)
catc_set_reg(catc, EthStats + i, 0);
catc->last_stats = jiffies;
dev_dbg(dev, "Enabling.\n");
catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
catc_set_reg(catc, LEDCtrl, LEDLink);
catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast);
} else {
dev_dbg(dev, "Performing reset\n");
catc_reset(catc);
catc_get_mac(catc, netdev->dev_addr);
dev_dbg(dev, "Setting RX Mode\n");
catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
catc->rxmode[1] = 0;
f5u011_rxmode(catc, catc->rxmode);
}
dev_dbg(dev, "Init done.\n");
printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
usb_set_intfdata(intf, catc);
SET_NETDEV_DEV(netdev, &intf->dev);
ret = register_netdev(netdev);
if (ret)
goto fail_clear_intfdata;
return 0;
fail_clear_intfdata:
usb_set_intfdata(intf, NULL);
fail_free:
usb_free_urb(catc->ctrl_urb);
usb_free_urb(catc->tx_urb);
usb_free_urb(catc->rx_urb);
usb_free_urb(catc->irq_urb);
free_netdev(netdev);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20', 'CWE-119'], 'message': 'catc: Use heap buffer for memory size test
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct cp2112_device *dev;
u8 buf[3];
struct cp2112_smbus_config_report config;
int ret;
dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH,
GFP_KERNEL);
if (!dev->in_out_buffer)
return -ENOMEM;
spin_lock_init(&dev->lock);
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
return ret;
}
ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
if (ret) {
hid_err(hdev, "hw start failed\n");
return ret;
}
ret = hid_hw_open(hdev);
if (ret) {
hid_err(hdev, "hw open failed\n");
goto err_hid_stop;
}
ret = hid_hw_power(hdev, PM_HINT_FULLON);
if (ret < 0) {
hid_err(hdev, "power management error: %d\n", ret);
goto err_hid_close;
}
ret = cp2112_hid_get(hdev, CP2112_GET_VERSION_INFO, buf, sizeof(buf),
HID_FEATURE_REPORT);
if (ret != sizeof(buf)) {
hid_err(hdev, "error requesting version\n");
if (ret >= 0)
ret = -EIO;
goto err_power_normal;
}
hid_info(hdev, "Part Number: 0x%02X Device Version: 0x%02X\n",
buf[1], buf[2]);
ret = cp2112_hid_get(hdev, CP2112_SMBUS_CONFIG, (u8 *)&config,
sizeof(config), HID_FEATURE_REPORT);
if (ret != sizeof(config)) {
hid_err(hdev, "error requesting SMBus config\n");
if (ret >= 0)
ret = -EIO;
goto err_power_normal;
}
config.retry_time = cpu_to_be16(1);
ret = cp2112_hid_output(hdev, (u8 *)&config, sizeof(config),
HID_FEATURE_REPORT);
if (ret != sizeof(config)) {
hid_err(hdev, "error setting SMBus config\n");
if (ret >= 0)
ret = -EIO;
goto err_power_normal;
}
hid_set_drvdata(hdev, (void *)dev);
dev->hdev = hdev;
dev->adap.owner = THIS_MODULE;
dev->adap.class = I2C_CLASS_HWMON;
dev->adap.algo = &smbus_algorithm;
dev->adap.algo_data = dev;
dev->adap.dev.parent = &hdev->dev;
snprintf(dev->adap.name, sizeof(dev->adap.name),
"CP2112 SMBus Bridge on hiddev%d", hdev->minor);
dev->hwversion = buf[2];
init_waitqueue_head(&dev->wait);
hid_device_io_start(hdev);
ret = i2c_add_adapter(&dev->adap);
hid_device_io_stop(hdev);
if (ret) {
hid_err(hdev, "error registering i2c adapter\n");
goto err_power_normal;
}
hid_dbg(hdev, "adapter registered\n");
dev->gc.label = "cp2112_gpio";
dev->gc.direction_input = cp2112_gpio_direction_input;
dev->gc.direction_output = cp2112_gpio_direction_output;
dev->gc.set = cp2112_gpio_set;
dev->gc.get = cp2112_gpio_get;
dev->gc.base = -1;
dev->gc.ngpio = 8;
dev->gc.can_sleep = 1;
dev->gc.parent = &hdev->dev;
ret = gpiochip_add_data(&dev->gc, dev);
if (ret < 0) {
hid_err(hdev, "error registering gpio chip\n");
goto err_free_i2c;
}
ret = sysfs_create_group(&hdev->dev.kobj, &cp2112_attr_group);
if (ret < 0) {
hid_err(hdev, "error creating sysfs attrs\n");
goto err_gpiochip_remove;
}
chmod_sysfs_attrs(hdev);
hid_hw_power(hdev, PM_HINT_NORMAL);
ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0,
handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev->gc.parent, "failed to add IRQ chip\n");
goto err_sysfs_remove;
}
return ret;
err_sysfs_remove:
sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
err_gpiochip_remove:
gpiochip_remove(&dev->gc);
err_free_i2c:
i2c_del_adapter(&dev->adap);
err_power_normal:
hid_hw_power(hdev, PM_HINT_NORMAL);
err_hid_close:
hid_hw_close(hdev);
err_hid_stop:
hid_hw_stop(hdev);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-404', 'CWE-703'], 'message': 'HID: cp2112: fix sleep-while-atomic
A recent commit fixing DMA-buffers on stack added a shared transfer
buffer protected by a spinlock. This is broken as the USB HID request
callbacks can sleep. Fix this up by replacing the spinlock with a mutex.
Fixes: 1ffb3c40ffb5 ("HID: cp2112: make transfer buffers DMA capable")
Cc: stable <[email protected]> # 4.9
Signed-off-by: Johan Hovold <[email protected]>
Reviewed-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int cp2112_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->lock, flags);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
goto fail;
}
buf[1] |= 1 << offset;
buf[2] = gpio_push_pull;
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
goto fail;
}
spin_unlock_irqrestore(&dev->lock, flags);
/*
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
cp2112_gpio_set(chip, offset, value);
return 0;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
return ret < 0 ? ret : -EIO;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-404', 'CWE-703'], 'message': 'HID: cp2112: fix sleep-while-atomic
A recent commit fixing DMA-buffers on stack added a shared transfer
buffer protected by a spinlock. This is broken as the USB HID request
callbacks can sleep. Fix this up by replacing the spinlock with a mutex.
Fixes: 1ffb3c40ffb5 ("HID: cp2112: make transfer buffers DMA capable")
Cc: stable <[email protected]> # 4.9
Signed-off-by: Johan Hovold <[email protected]>
Reviewed-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->lock, flags);
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? 0xff : 0;
buf[2] = 1 << offset;
ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
spin_unlock_irqrestore(&dev->lock, flags);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-404', 'CWE-703'], 'message': 'HID: cp2112: fix sleep-while-atomic
A recent commit fixing DMA-buffers on stack added a shared transfer
buffer protected by a spinlock. This is broken as the USB HID request
callbacks can sleep. Fix this up by replacing the spinlock with a mutex.
Fixes: 1ffb3c40ffb5 ("HID: cp2112: make transfer buffers DMA capable")
Cc: stable <[email protected]> # 4.9
Signed-off-by: Johan Hovold <[email protected]>
Reviewed-by: Benjamin Tissoires <[email protected]>
Signed-off-by: Jiri Kosina <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
{
struct dw2102_state *state = (struct dw2102_state *)d->priv;
u8 obuf[] = {0xde, 0};
info("%s: %d, initialized %d", __func__, i, state->initialized);
if (i && !state->initialized) {
state->initialized = 1;
/* reset board */
return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476', 'CWE-119'], 'message': '[media] dw2102: don't do DMA on stack
On Kernel 4.9, WARNINGs about doing DMA on stack are hit at
the dw2102 driver: one in su3000_power_ctrl() and the other in tt_s2_4600_frontend_attach().
Both were due to the use of buffers on the stack as parameters to
dvb_usb_generic_rw() and the resulting attempt to do DMA with them.
The device was non-functional as a result.
So, switch this driver over to use a buffer within the device state
structure, as has been done with other DVB-USB drivers.
Tested with TechnoTrend TT-connect S2-4600.
[[email protected]: fixed a warning at su3000_i2c_transfer() that
state var were dereferenced before check 'd']
Signed-off-by: Jonathan McDowell <[email protected]>
Cc: <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
{
u8 obuf[] = { 0x51 };
u8 ibuf[] = { 0 };
if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
err("command 0x51 transfer failed.");
d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config,
&d->dev->i2c_adap);
if (d->fe_adap[0].fe == NULL)
return -EIO;
if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
&dw2104_ts2020_config,
&d->dev->i2c_adap)) {
info("Attached RS2000/TS2020!");
return 0;
}
info("Failed to attach RS2000/TS2020!");
return -EIO;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476', 'CWE-119'], 'message': '[media] dw2102: don't do DMA on stack
On Kernel 4.9, WARNINGs about doing DMA on stack are hit at
the dw2102 driver: one in su3000_power_ctrl() and the other in tt_s2_4600_frontend_attach().
Both were due to the use of buffers on the stack as parameters to
dvb_usb_generic_rw() and the resulting attempt to do DMA with them.
The device was non-functional as a result.
So, switch this driver over to use a buffer within the device state
structure, as has been done with other DVB-USB drivers.
Tested with TechnoTrend TT-connect S2-4600.
[[email protected]: fixed a warning at su3000_i2c_transfer() that
state var were dereferenced before check 'd']
Signed-off-by: Jonathan McDowell <[email protected]>
Cc: <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap->dev;
struct dw2102_state *state = d->priv;
u8 obuf[3] = { 0xe, 0x80, 0 };
u8 ibuf[] = { 0 };
struct i2c_adapter *i2c_adapter;
struct i2c_client *client;
struct i2c_board_info board_info;
struct m88ds3103_platform_data m88ds3103_pdata = {};
struct ts2020_config ts2020_config = {};
if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
err("command 0x0e transfer failed.");
obuf[0] = 0xe;
obuf[1] = 0x02;
obuf[2] = 1;
if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
err("command 0x0e transfer failed.");
msleep(300);
obuf[0] = 0xe;
obuf[1] = 0x83;
obuf[2] = 0;
if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
err("command 0x0e transfer failed.");
obuf[0] = 0xe;
obuf[1] = 0x83;
obuf[2] = 1;
if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
err("command 0x0e transfer failed.");
obuf[0] = 0x51;
if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0)
err("command 0x51 transfer failed.");
/* attach demod */
m88ds3103_pdata.clk = 27000000;
m88ds3103_pdata.i2c_wr_max = 33;
m88ds3103_pdata.ts_mode = M88DS3103_TS_CI;
m88ds3103_pdata.ts_clk = 16000;
m88ds3103_pdata.ts_clk_pol = 0;
m88ds3103_pdata.spec_inv = 0;
m88ds3103_pdata.agc = 0x99;
m88ds3103_pdata.agc_inv = 0;
m88ds3103_pdata.clk_out = M88DS3103_CLOCK_OUT_ENABLED;
m88ds3103_pdata.envelope_mode = 0;
m88ds3103_pdata.lnb_hv_pol = 1;
m88ds3103_pdata.lnb_en_pol = 0;
memset(&board_info, 0, sizeof(board_info));
strlcpy(board_info.type, "m88ds3103", I2C_NAME_SIZE);
board_info.addr = 0x68;
board_info.platform_data = &m88ds3103_pdata;
request_module("m88ds3103");
client = i2c_new_device(&d->i2c_adap, &board_info);
if (client == NULL || client->dev.driver == NULL)
return -ENODEV;
if (!try_module_get(client->dev.driver->owner)) {
i2c_unregister_device(client);
return -ENODEV;
}
adap->fe_adap[0].fe = m88ds3103_pdata.get_dvb_frontend(client);
i2c_adapter = m88ds3103_pdata.get_i2c_adapter(client);
state->i2c_client_demod = client;
/* attach tuner */
ts2020_config.fe = adap->fe_adap[0].fe;
memset(&board_info, 0, sizeof(board_info));
strlcpy(board_info.type, "ts2022", I2C_NAME_SIZE);
board_info.addr = 0x60;
board_info.platform_data = &ts2020_config;
request_module("ts2020");
client = i2c_new_device(i2c_adapter, &board_info);
if (client == NULL || client->dev.driver == NULL) {
dvb_frontend_detach(adap->fe_adap[0].fe);
return -ENODEV;
}
if (!try_module_get(client->dev.driver->owner)) {
i2c_unregister_device(client);
dvb_frontend_detach(adap->fe_adap[0].fe);
return -ENODEV;
}
/* delegate signal strength measurement to tuner */
adap->fe_adap[0].fe->ops.read_signal_strength =
adap->fe_adap[0].fe->ops.tuner_ops.get_rf_strength;
state->i2c_client_tuner = client;
/* hook fe: need to resync the slave fifo when signal locks */
state->fe_read_status = adap->fe_adap[0].fe->ops.read_status;
adap->fe_adap[0].fe->ops.read_status = tt_s2_4600_read_status;
state->last_lock = 0;
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476', 'CWE-119'], 'message': '[media] dw2102: don't do DMA on stack
On Kernel 4.9, WARNINGs about doing DMA on stack are hit at
the dw2102 driver: one in su3000_power_ctrl() and the other in tt_s2_4600_frontend_attach().
Both were due to the use of buffers on the stack as parameters to
dvb_usb_generic_rw() and the resulting attempt to do DMA with them.
The device was non-functional as a result.
So, switch this driver over to use a buffer within the device state
structure, as has been done with other DVB-USB drivers.
Tested with TechnoTrend TT-connect S2-4600.
[[email protected]: fixed a warning at su3000_i2c_transfer() that
state var were dereferenced before check 'd']
Signed-off-by: Jonathan McDowell <[email protected]>
Cc: <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int cxusb_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
struct cxusb_state *st = d->priv;
int ret, wo;
if (1 + wlen > MAX_XFER_SIZE) {
warn("i2c wr: len=%d is too big!\n", wlen);
return -EOPNOTSUPP;
}
wo = (rbuf == NULL || rlen == 0); /* write-only */
mutex_lock(&d->data_mutex);
st->data[0] = cmd;
memcpy(&st->data[1], wbuf, wlen);
if (wo)
ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
else
ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
rbuf, rlen, 0);
mutex_unlock(&d->data_mutex);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': '[media] cxusb: Use a dma capable buffer also for reading
Commit 17ce039b4e54 ("[media] cxusb: don't do DMA on stack")
added a kmalloc'ed bounce buffer for writes, but missed to do the same
for reads. As the read only happens after the write is finished, we can
reuse the same buffer.
As dvb_usb_generic_rw handles a read length of 0 by itself, avoid calling
it using the dvb_usb_generic_read wrapper function.
Signed-off-by: Stefan Brüns <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
unsigned int cryptlen)
{
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
AHASH_REQUEST_ON_STACK(ahreq, ctx->mac);
unsigned int assoclen = req->assoclen;
struct scatterlist sg[3];
u8 odata[16];
u8 idata[16];
int ilen, err;
/* format control data for input */
err = format_input(odata, req, cryptlen);
if (err)
goto out;
sg_init_table(sg, 3);
sg_set_buf(&sg[0], odata, 16);
/* format associated data and compute into mac */
if (assoclen) {
ilen = format_adata(idata, assoclen);
sg_set_buf(&sg[1], idata, ilen);
sg_chain(sg, 3, req->src);
} else {
ilen = 0;
sg_chain(sg, 2, req->src);
}
ahash_request_set_tfm(ahreq, ctx->mac);
ahash_request_set_callback(ahreq, pctx->flags, NULL, NULL);
ahash_request_set_crypt(ahreq, sg, NULL, assoclen + ilen + 16);
err = crypto_ahash_init(ahreq);
if (err)
goto out;
err = crypto_ahash_update(ahreq);
if (err)
goto out;
/* we need to pad the MAC input to a round multiple of the block size */
ilen = 16 - (assoclen + ilen) % 16;
if (ilen < 16) {
memset(idata, 0, ilen);
sg_init_table(sg, 2);
sg_set_buf(&sg[0], idata, ilen);
if (plain)
sg_chain(sg, 2, plain);
plain = sg;
cryptlen += ilen;
}
ahash_request_set_crypt(ahreq, plain, pctx->odata, cryptlen);
err = crypto_ahash_finup(ahreq);
out:
return err;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'crypto: ccm - move cbcmac input off the stack
Commit f15f05b0a5de ("crypto: ccm - switch to separate cbcmac driver")
refactored the CCM driver to allow separate implementations of the
underlying MAC to be provided by a platform. However, in doing so, it
moved some data from the linear region to the stack, which violates the
SG constraints when the stack is virtually mapped.
So move idata/odata back to the request ctx struct, of which we can
reasonably expect that it has been allocated using kmalloc() et al.
Reported-by: Johannes Berg <[email protected]>
Fixes: f15f05b0a5de ("crypto: ccm - switch to separate cbcmac driver")
Signed-off-by: Ard Biesheuvel <[email protected]>
Tested-by: Johannes Berg <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int put_chars(u32 vtermno, const char *buf, int count)
{
struct port *port;
struct scatterlist sg[1];
if (unlikely(early_put_chars))
return early_put_chars(vtermno, buf, count);
port = find_port_by_vtermno(vtermno);
if (!port)
return -EPIPE;
sg_init_one(sg, buf, count);
return __send_to_port(port, sg, 1, count, (void *)buf, false);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'virtio-console: avoid DMA from stack
put_chars() stuffs the buffer it gets into an sg, but that buffer may be
on the stack. This breaks with CONFIG_VMAP_STACK=y (for me, it
manifested as printks getting turned into NUL bytes).
Signed-off-by: Omar Sandoval <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Amit Shah <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
int ret;
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
indx, &data, 1, 1000);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-284'], 'message': 'pegasus: Use heap buffers for all register access
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
References: https://bugs.debian.org/852556
Reported-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Tested-by: Lisandro Damián Nicanor Pérez Meyer <[email protected]>
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
{
return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
indx, 0, data, size, 500);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'rtl8150: Use heap buffers for all register access
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: PLIST_API void plist_from_bin(const char *plist_bin, uint32_t length, plist_t * plist)
{
bplist_trailer_t *trailer = NULL;
uint8_t offset_size = 0;
uint8_t ref_size = 0;
uint64_t num_objects = 0;
uint64_t root_object = 0;
const char *offset_table = NULL;
const char *start_data = NULL;
const char *end_data = NULL;
//first check we have enough data
if (!(length >= BPLIST_MAGIC_SIZE + BPLIST_VERSION_SIZE + sizeof(bplist_trailer_t))) {
PLIST_BIN_ERR("plist data is to small to hold a binary plist\n");
return;
}
//check that plist_bin in actually a plist
if (memcmp(plist_bin, BPLIST_MAGIC, BPLIST_MAGIC_SIZE) != 0) {
PLIST_BIN_ERR("bplist magic mismatch\n");
return;
}
//check for known version
if (memcmp(plist_bin + BPLIST_MAGIC_SIZE, BPLIST_VERSION, BPLIST_VERSION_SIZE) != 0) {
PLIST_BIN_ERR("unsupported binary plist version '%.2s\n", plist_bin+BPLIST_MAGIC_SIZE);
return;
}
start_data = plist_bin + BPLIST_MAGIC_SIZE + BPLIST_VERSION_SIZE;
end_data = plist_bin + length - sizeof(bplist_trailer_t);
//now parse trailer
trailer = (bplist_trailer_t*)end_data;
offset_size = trailer->offset_size;
ref_size = trailer->ref_size;
num_objects = be64toh(trailer->num_objects);
root_object = be64toh(trailer->root_object_index);
offset_table = (char *)(plist_bin + be64toh(trailer->offset_table_offset));
if (num_objects == 0) {
PLIST_BIN_ERR("number of objects must be larger than 0\n");
return;
}
if (offset_size == 0) {
PLIST_BIN_ERR("offset size in trailer must be larger than 0\n");
return;
}
if (ref_size == 0) {
PLIST_BIN_ERR("object reference size in trailer must be larger than 0\n");
return;
}
if (root_object >= num_objects) {
PLIST_BIN_ERR("root object index (%" PRIu64 ") must be smaller than number of objects (%" PRIu64 ")\n", root_object, num_objects);
return;
}
if (offset_table < start_data || offset_table >= end_data) {
PLIST_BIN_ERR("offset table offset points outside of valid range\n");
return;
}
if (num_objects * offset_size < num_objects) {
PLIST_BIN_ERR("integer overflow when calculating offset table size (too many objects)\n");
return;
}
if ((uint64_t)offset_table + num_objects * offset_size > (uint64_t)end_data) {
PLIST_BIN_ERR("offset table points outside of valid range\n");
return;
}
struct bplist_data bplist;
bplist.data = plist_bin;
bplist.size = length;
bplist.num_objects = num_objects;
bplist.ref_size = ref_size;
bplist.offset_size = offset_size;
bplist.offset_table = offset_table;
bplist.level = 0;
bplist.used_indexes = plist_new_array();
if (!bplist.used_indexes) {
PLIST_BIN_ERR("failed to create array to hold used node indexes. Out of memory?\n");
return;
}
*plist = parse_bin_node_at_index(&bplist, root_object);
plist_free(bplist.used_indexes);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190'], 'message': 'bplist: Fix integer overflow check (offset table size)'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd_writeargs *args)
{
unsigned int len, hdr, dlen;
struct kvec *head = rqstp->rq_arg.head;
int v;
p = decode_fh(p, &args->fh);
if (!p)
return 0;
p++; /* beginoffset */
args->offset = ntohl(*p++); /* offset */
p++; /* totalcount */
len = args->len = ntohl(*p++);
/*
* The protocol specifies a maximum of 8192 bytes.
*/
if (len > NFSSVC_MAXBLKSIZE_V2)
return 0;
/*
* Check to make sure that we got the right number of
* bytes.
*/
hdr = (void*)p - head->iov_base;
dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
/*
* Round the length of the data which was specified up to
* the next multiple of XDR units and then compare that
* against the length which was actually received.
* Note that when RPCSEC/GSS (for example) is used, the
* data buffer can be padded so dlen might be larger
* than required. It must never be smaller.
*/
if (dlen < XDR_QUADLEN(len)*4)
return 0;
rqstp->rq_vec[0].iov_base = (void*)p;
rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
v = 0;
while (len > rqstp->rq_vec[v].iov_len) {
len -= rqstp->rq_vec[v].iov_len;
v++;
rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]);
rqstp->rq_vec[v].iov_len = PAGE_SIZE;
}
rqstp->rq_vec[v].iov_len = len;
args->vlen = v + 1;
return 1;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-703'], 'message': 'nfsd: stricter decoding of write-like NFSv2/v3 ops
The NFSv2/v3 code does not systematically check whether we decode past
the end of the buffer. This generally appears to be harmless, but there
are a few places where we do arithmetic on the pointers involved and
don't account for the possibility that a length could be negative. Add
checks to catch these.
Reported-by: Tuomas Haanpää <[email protected]>
Reported-by: Ari Kauppi <[email protected]>
Reviewed-by: NeilBrown <[email protected]>
Cc: [email protected]
Signed-off-by: J. Bruce Fields <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
struct nfsd3_writeargs *args)
{
unsigned int len, v, hdr, dlen;
u32 max_blocksize = svc_max_payload(rqstp);
struct kvec *head = rqstp->rq_arg.head;
struct kvec *tail = rqstp->rq_arg.tail;
p = decode_fh(p, &args->fh);
if (!p)
return 0;
p = xdr_decode_hyper(p, &args->offset);
args->count = ntohl(*p++);
args->stable = ntohl(*p++);
len = args->len = ntohl(*p++);
/*
* The count must equal the amount of data passed.
*/
if (args->count != args->len)
return 0;
/*
* Check to make sure that we got the right number of
* bytes.
*/
hdr = (void*)p - head->iov_base;
dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr;
/*
* Round the length of the data which was specified up to
* the next multiple of XDR units and then compare that
* against the length which was actually received.
* Note that when RPCSEC/GSS (for example) is used, the
* data buffer can be padded so dlen might be larger
* than required. It must never be smaller.
*/
if (dlen < XDR_QUADLEN(len)*4)
return 0;
if (args->count > max_blocksize) {
args->count = max_blocksize;
len = args->len = max_blocksize;
}
rqstp->rq_vec[0].iov_base = (void*)p;
rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
v = 0;
while (len > rqstp->rq_vec[v].iov_len) {
len -= rqstp->rq_vec[v].iov_len;
v++;
rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]);
rqstp->rq_vec[v].iov_len = PAGE_SIZE;
}
rqstp->rq_vec[v].iov_len = len;
args->vlen = v + 1;
return 1;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-703'], 'message': 'nfsd: stricter decoding of write-like NFSv2/v3 ops
The NFSv2/v3 code does not systematically check whether we decode past
the end of the buffer. This generally appears to be harmless, but there
are a few places where we do arithmetic on the pointers involved and
don't account for the possibility that a length could be negative. Add
checks to catch these.
Reported-by: Tuomas Haanpää <[email protected]>
Reported-by: Ari Kauppi <[email protected]>
Reviewed-by: NeilBrown <[email protected]>
Cc: [email protected]
Signed-off-by: J. Bruce Fields <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
const __be32 **_xdr,
unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
unsigned int toklen = *_toklen, n_parts, loop, tmp;
/* there must be at least one name, and at least #names+1 length
* words */
if (toklen <= 12)
return -EINVAL;
_enter(",{%x,%x,%x},%u",
ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), toklen);
n_parts = ntohl(*xdr++);
toklen -= 4;
if (n_parts <= 0 || n_parts > AFSTOKEN_K5_COMPONENTS_MAX)
return -EINVAL;
princ->n_name_parts = n_parts;
if (toklen <= (n_parts + 1) * 4)
return -EINVAL;
princ->name_parts = kcalloc(n_parts, sizeof(char *), GFP_KERNEL);
if (!princ->name_parts)
return -ENOMEM;
for (loop = 0; loop < n_parts; loop++) {
if (toklen < 4)
return -EINVAL;
tmp = ntohl(*xdr++);
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
return -EINVAL;
if (tmp > toklen)
return -EINVAL;
princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->name_parts[loop])
return -ENOMEM;
memcpy(princ->name_parts[loop], xdr, tmp);
princ->name_parts[loop][tmp] = 0;
tmp = (tmp + 3) & ~3;
toklen -= tmp;
xdr += tmp >> 2;
}
if (toklen < 4)
return -EINVAL;
tmp = ntohl(*xdr++);
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
return -EINVAL;
if (tmp > toklen)
return -EINVAL;
princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->realm)
return -ENOMEM;
memcpy(princ->realm, xdr, tmp);
princ->realm[tmp] = 0;
tmp = (tmp + 3) & ~3;
toklen -= tmp;
xdr += tmp >> 2;
_debug("%s/...@%s", princ->name_parts[0], princ->realm);
*_xdr = xdr;
*_toklen = toklen;
_leave(" = 0 [toklen=%u]", toklen);
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190'], 'message': 'rxrpc: Fix several cases where a padded len isn't checked in ticket decode
This fixes CVE-2017-7482.
When a kerberos 5 ticket is being decoded so that it can be loaded into an
rxrpc-type key, there are several places in which the length of a
variable-length field is checked to make sure that it's not going to
overrun the available data - but the data is padded to the nearest
four-byte boundary and the code doesn't check for this extra. This could
lead to the size-remaining variable wrapping and the data pointer going
over the end of the buffer.
Fix this by making the various variable-length data checks use the padded
length.
Reported-by: 石磊 <[email protected]>
Signed-off-by: David Howells <[email protected]>
Reviewed-by: Marc Dionne <[email protected]>
Reviewed-by: Dan Carpenter <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
{
const __be32 *xdr = prep->data, *token;
const char *cp;
unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
size_t datalen = prep->datalen;
int ret;
_enter(",{%x,%x,%x,%x},%zu",
ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), ntohl(xdr[3]),
prep->datalen);
if (datalen > AFSTOKEN_LENGTH_MAX)
goto not_xdr;
/* XDR is an array of __be32's */
if (datalen & 3)
goto not_xdr;
/* the flags should be 0 (the setpag bit must be handled by
* userspace) */
if (ntohl(*xdr++) != 0)
goto not_xdr;
datalen -= 4;
/* check the cell name */
len = ntohl(*xdr++);
if (len < 1 || len > AFSTOKEN_CELL_MAX)
goto not_xdr;
datalen -= 4;
tmp = (len + 3) & ~3;
if (tmp > datalen)
goto not_xdr;
cp = (const char *) xdr;
for (loop = 0; loop < len; loop++)
if (!isprint(cp[loop]))
goto not_xdr;
if (len < tmp)
for (; loop < tmp; loop++)
if (cp[loop])
goto not_xdr;
_debug("cellname: [%u/%u] '%*.*s'",
len, tmp, len, len, (const char *) xdr);
datalen -= tmp;
xdr += tmp >> 2;
/* get the token count */
if (datalen < 12)
goto not_xdr;
ntoken = ntohl(*xdr++);
datalen -= 4;
_debug("ntoken: %x", ntoken);
if (ntoken < 1 || ntoken > AFSTOKEN_MAX)
goto not_xdr;
/* check each token wrapper */
token = xdr;
loop = ntoken;
do {
if (datalen < 8)
goto not_xdr;
toklen = ntohl(*xdr++);
sec_ix = ntohl(*xdr);
datalen -= 4;
_debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
if (toklen < 20 || toklen > datalen)
goto not_xdr;
datalen -= (toklen + 3) & ~3;
xdr += (toklen + 3) >> 2;
} while (--loop > 0);
_debug("remainder: %zu", datalen);
if (datalen != 0)
goto not_xdr;
/* okay: we're going to assume it's valid XDR format
* - we ignore the cellname, relying on the key to be correctly named
*/
do {
xdr = token;
toklen = ntohl(*xdr++);
token = xdr + ((toklen + 3) >> 2);
sec_ix = ntohl(*xdr++);
toklen -= 4;
_debug("TOKEN type=%u [%p-%p]", sec_ix, xdr, token);
switch (sec_ix) {
case RXRPC_SECURITY_RXKAD:
ret = rxrpc_preparse_xdr_rxkad(prep, datalen, xdr, toklen);
if (ret != 0)
goto error;
break;
case RXRPC_SECURITY_RXK5:
ret = rxrpc_preparse_xdr_rxk5(prep, datalen, xdr, toklen);
if (ret != 0)
goto error;
break;
default:
ret = -EPROTONOSUPPORT;
goto error;
}
} while (--ntoken > 0);
_leave(" = 0");
return 0;
not_xdr:
_leave(" = -EPROTO");
return -EPROTO;
error:
_leave(" = %d", ret);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190'], 'message': 'rxrpc: Fix several cases where a padded len isn't checked in ticket decode
This fixes CVE-2017-7482.
When a kerberos 5 ticket is being decoded so that it can be loaded into an
rxrpc-type key, there are several places in which the length of a
variable-length field is checked to make sure that it's not going to
overrun the available data - but the data is padded to the nearest
four-byte boundary and the code doesn't check for this extra. This could
lead to the size-remaining variable wrapping and the data pointer going
over the end of the buffer.
Fix this by making the various variable-length data checks use the padded
length.
Reported-by: 石磊 <[email protected]>
Signed-off-by: David Howells <[email protected]>
Reviewed-by: Marc Dionne <[email protected]>
Reviewed-by: Dan Carpenter <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
struct perf_event_context *ctx;
struct file *event_file = NULL;
struct fd group = {NULL, 0};
struct task_struct *task = NULL;
struct pmu *pmu;
int event_fd;
int move_group = 0;
int err;
int f_flags = O_RDWR;
/* for future expandability... */
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
if (!attr.exclude_kernel) {
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
if (attr.freq) {
if (attr.sample_freq > sysctl_perf_event_sample_rate)
return -EINVAL;
} else {
if (attr.sample_period & (1ULL << 63))
return -EINVAL;
}
/*
* In cgroup mode, the pid argument is used to pass the fd
* opened to the cgroup directory in cgroupfs. The cpu argument
* designates the cpu on which to monitor threads from that
* cgroup.
*/
if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
return -EINVAL;
if (flags & PERF_FLAG_FD_CLOEXEC)
f_flags |= O_CLOEXEC;
event_fd = get_unused_fd_flags(f_flags);
if (event_fd < 0)
return event_fd;
if (group_fd != -1) {
err = perf_fget_light(group_fd, &group);
if (err)
goto err_fd;
group_leader = group.file->private_data;
if (flags & PERF_FLAG_FD_OUTPUT)
output_event = group_leader;
if (flags & PERF_FLAG_FD_NO_GROUP)
group_leader = NULL;
}
if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
task = find_lively_task_by_vpid(pid);
if (IS_ERR(task)) {
err = PTR_ERR(task);
goto err_group_fd;
}
}
if (task && group_leader &&
group_leader->attr.inherit != attr.inherit) {
err = -EINVAL;
goto err_task;
}
get_online_cpus();
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
NULL, NULL);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_cpus;
}
if (flags & PERF_FLAG_PID_CGROUP) {
err = perf_cgroup_connect(pid, event, &attr, group_leader);
if (err) {
__free_event(event);
goto err_cpus;
}
}
if (is_sampling_event(event)) {
if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
err = -ENOTSUPP;
goto err_alloc;
}
}
account_event(event);
/*
* Special case software events and allow them to be part of
* any hardware group.
*/
pmu = event->pmu;
if (group_leader &&
(is_software_event(event) != is_software_event(group_leader))) {
if (is_software_event(event)) {
/*
* If event and group_leader are not both a software
* event, and event is, then group leader is not.
*
* Allow the addition of software events to !software
* groups, this is safe because software events never
* fail to schedule.
*/
pmu = group_leader->pmu;
} else if (is_software_event(group_leader) &&
(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
/*
* In case the group is a pure software group, and we
* try to add a hardware event, move the whole group to
* the hardware context.
*/
move_group = 1;
}
}
/*
* Get the target context (task or percpu):
*/
ctx = find_get_context(pmu, task, event->cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_alloc;
}
if (task) {
put_task_struct(task);
task = NULL;
}
/*
* Look up the group leader (we will attach this event to it):
*/
if (group_leader) {
err = -EINVAL;
/*
* Do not allow a recursive hierarchy (this new sibling
* becoming part of another group-sibling):
*/
if (group_leader->group_leader != group_leader)
goto err_context;
/*
* Do not allow to attach to a group in a different
* task or CPU context:
*/
if (move_group) {
if (group_leader->ctx->type != ctx->type)
goto err_context;
} else {
if (group_leader->ctx != ctx)
goto err_context;
}
/*
* Only a group leader can be exclusive or pinned
*/
if (attr.exclusive || attr.pinned)
goto err_context;
}
if (output_event) {
err = perf_event_set_output(event, output_event);
if (err)
goto err_context;
}
event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
f_flags);
if (IS_ERR(event_file)) {
err = PTR_ERR(event_file);
goto err_context;
}
if (move_group) {
struct perf_event_context *gctx = group_leader->ctx;
mutex_lock(&gctx->mutex);
perf_remove_from_context(group_leader, false);
/*
* Removing from the context ends up with disabled
* event. What we want here is event in the initial
* startup state, ready to be add into new context.
*/
perf_event__state_init(group_leader);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_remove_from_context(sibling, false);
perf_event__state_init(sibling);
put_ctx(gctx);
}
mutex_unlock(&gctx->mutex);
put_ctx(gctx);
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
if (move_group) {
synchronize_rcu();
perf_install_in_context(ctx, group_leader, group_leader->cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_install_in_context(ctx, sibling, sibling->cpu);
get_ctx(ctx);
}
}
perf_install_in_context(ctx, event, event->cpu);
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
put_online_cpus();
event->owner = current;
mutex_lock(¤t->perf_event_mutex);
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
mutex_unlock(¤t->perf_event_mutex);
/*
* Precalculate sample_data sizes
*/
perf_event__header_size(event);
perf_event__id_header_size(event);
/*
* Drop the reference on the group_event after placing the
* new event on the sibling_list. This ensures destruction
* of the group leader will find the pointer to itself in
* perf_group_detach().
*/
fdput(group);
fd_install(event_fd, event_file);
return event_fd;
err_context:
perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
free_event(event);
err_cpus:
put_online_cpus();
err_task:
if (task)
put_task_struct(task);
err_group_fd:
fdput(group);
err_fd:
put_unused_fd(event_fd);
return err;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-284', 'CWE-264'], 'message': 'perf: Tighten (and fix) the grouping condition
The fix from 9fc81d87420d ("perf: Fix events installation during
moving group") was incomplete in that it failed to recognise that
creating a group with events for different CPUs is semantically
broken -- they cannot be co-scheduled.
Furthermore, it leads to real breakage where, when we create an event
for CPU Y and then migrate it to form a group on CPU X, the code gets
confused where the counter is programmed -- triggered in practice
as well by me via the perf fuzzer.
Fix this by tightening the rules for creating groups. Only allow
grouping of counters that can be co-scheduled in the same context.
This means for the same task and/or the same cpu.
Fixes: 9fc81d87420d ("perf: Fix events installation during moving group")
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Linus Torvalds <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int mailimf_group_parse(const char * message, size_t length,
size_t * indx,
struct mailimf_group ** result)
{
size_t cur_token;
char * display_name;
struct mailimf_mailbox_list * mailbox_list;
struct mailimf_group * group;
int r;
int res;
cur_token = * indx;
mailbox_list = NULL;
r = mailimf_display_name_parse(message, length, &cur_token, &display_name);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto err;
}
r = mailimf_colon_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto free_display_name;
}
r = mailimf_mailbox_list_parse(message, length, &cur_token, &mailbox_list);
switch (r) {
case MAILIMF_NO_ERROR:
break;
case MAILIMF_ERROR_PARSE:
r = mailimf_cfws_parse(message, length, &cur_token);
if ((r != MAILIMF_NO_ERROR) && (r != MAILIMF_ERROR_PARSE)) {
res = r;
goto free_display_name;
}
break;
default:
res = r;
goto free_display_name;
}
r = mailimf_semi_colon_parse(message, length, &cur_token);
if (r != MAILIMF_NO_ERROR) {
res = r;
goto free_mailbox_list;
}
group = mailimf_group_new(display_name, mailbox_list);
if (group == NULL) {
res = MAILIMF_ERROR_MEMORY;
goto free_mailbox_list;
}
* indx = cur_token;
* result = group;
return MAILIMF_NO_ERROR;
free_mailbox_list:
if (mailbox_list != NULL) {
mailimf_mailbox_list_free(mailbox_list);
}
free_display_name:
mailimf_display_name_free(display_name);
err:
return res;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476'], 'message': 'Fixed crash #274'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: ActionReply Smb4KMountHelper::mount(const QVariantMap &args)
{
ActionReply reply;
// The mountpoint is a unique and can be used to
// find the share.
reply.addData("mh_mountpoint", args["mh_mountpoint"]);
KProcess proc(this);
proc.setOutputChannelMode(KProcess::SeparateChannels);
proc.setProcessEnvironment(QProcessEnvironment::systemEnvironment());
#if defined(Q_OS_LINUX)
proc.setEnv("PASSWD", args["mh_url"].toUrl().password(), true);
#endif
// We need this to avoid a translated password prompt.
proc.setEnv("LANG", "C");
// If the location of a Kerberos ticket is passed, it needs to
// be passed to the process environment here.
if (args.contains("mh_krb5ticket"))
{
proc.setEnv("KRB5CCNAME", args["mh_krb5ticket"].toString());
}
else
{
// Do nothing
}
// Set the mount command here.
QStringList command;
#if defined(Q_OS_LINUX)
command << args["mh_command"].toString();
command << args["mh_unc"].toString();
command << args["mh_mountpoint"].toString();
command << args["mh_options"].toStringList();
#elif defined(Q_OS_FREEBSD) || defined(Q_OS_NETBSD)
command << args["mh_command"].toString();
command << args["mh_options"].toStringList();
command << args["mh_unc"].toString();
command << args["mh_mountpoint"].toString();
#else
// Do nothing.
#endif
proc.setProgram(command);
// Run the mount process.
proc.start();
if (proc.waitForStarted(-1))
{
bool user_kill = false;
while (!proc.waitForFinished(10))
{
// Check if there is a password prompt. If there is one, pass
// the password to it.
QByteArray out = proc.readAllStandardError();
if (out.startsWith("Password:"))
{
proc.write(args["mh_url"].toUrl().password().toUtf8());
proc.write("\r");
}
else
{
// Do nothing
}
// We want to be able to terminate the process from outside.
// Thus, we implement a loop that checks periodically, if we
// need to kill the process.
if (HelperSupport::isStopped())
{
proc.kill();
user_kill = true;
break;
}
else
{
// Do nothing
}
}
if (proc.exitStatus() == KProcess::CrashExit)
{
if (!user_kill)
{
reply.setErrorCode(ActionReply::HelperError);
reply.setErrorDescription(i18n("The mount process crashed."));
return reply;
}
else
{
// Do nothing
}
}
else
{
// Check if there is output on stderr.
QString stdErr = QString::fromUtf8(proc.readAllStandardError());
reply.addData("mh_error_message", stdErr.trimmed());
}
}
else
{
reply.setErrorCode(ActionReply::HelperError);
reply.setErrorDescription(i18n("The mount process could not be started."));
}
return reply;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'Find the mount/umount commands in the helper
Instead of trusting what we get passed in
CVE-2017-8849'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: bool Smb4KMountJob::fillArgs(Smb4KShare *share, QMap<QString, QVariant>& map)
{
// Find the mount program.
QString mount;
QStringList paths;
paths << "/bin";
paths << "/sbin";
paths << "/usr/bin";
paths << "/usr/sbin";
paths << "/usr/local/bin";
paths << "/usr/local/sbin";
for (int i = 0; i < paths.size(); ++i)
{
mount = KGlobal::dirs()->findExe("mount_smbfs", paths.at(i));
if (!mount.isEmpty())
{
map.insert("mh_command", mount);
break;
}
else
{
continue;
}
}
if (mount.isEmpty())
{
Smb4KNotification::commandNotFound("mount_smbfs");
return false;
}
else
{
// Do nothing
}
// Mount arguments.
QMap<QString, QString> global_options = globalSambaOptions();
Smb4KCustomOptions *options = Smb4KCustomOptionsManager::self()->findOptions(share);
// Set some settings for the share.
share->setFileSystem(Smb4KShare::SMBFS);
// Compile the list of arguments.
QStringList args_list;
// Workgroup
if (!share->workgroupName().isEmpty())
{
args_list << "-W";
args_list << KShell::quoteArg(share->workgroupName());
}
else
{
// Do nothing
}
// Host IP
if (!share->hostIP().isEmpty())
{
args_list << "-I";
args_list << share->hostIP();
}
else
{
// Do nothing
}
// UID
if (options)
{
args_list << "-u";
args_list << QString("%1").arg(options->uid());
}
else
{
args_list << "-u";
args_list << QString("%1").arg((K_UID)Smb4KMountSettings::userID().toInt());
}
// GID
if (options)
{
args_list << "-g";
args_list << QString("%1").arg(options->gid());
}
else
{
args_list << "-g";
args_list << QString("%1").arg((K_GID)Smb4KMountSettings::groupID().toInt());
}
// Character sets for the client and server
QString client_charset, server_charset;
switch (Smb4KMountSettings::clientCharset())
{
case Smb4KMountSettings::EnumClientCharset::default_charset:
{
client_charset = global_options["unix charset"].toLower(); // maybe empty
break;
}
default:
{
client_charset = Smb4KMountSettings::self()->clientCharsetItem()->choices().value(Smb4KMountSettings::clientCharset()).label;
break;
}
}
switch (Smb4KMountSettings::serverCodepage())
{
case Smb4KMountSettings::EnumServerCodepage::default_codepage:
{
server_charset = global_options["dos charset"].toLower(); // maybe empty
break;
}
default:
{
server_charset = Smb4KMountSettings::self()->serverCodepageItem()->choices().value(Smb4KMountSettings::serverCodepage()).label;
break;
}
}
if (!client_charset.isEmpty() && !server_charset.isEmpty())
{
args_list << "-E";
args_list << QString("%1:%2").arg(client_charset, server_charset);
}
else
{
// Do nothing
}
// File mask
if (!Smb4KMountSettings::fileMask().isEmpty())
{
args_list << "-f";
args_list << QString("%1").arg(Smb4KMountSettings::fileMask());
}
else
{
// Do nothing
}
// Directory mask
if (!Smb4KMountSettings::directoryMask().isEmpty())
{
args_list << "-d";
args_list << QString("%1").arg(Smb4KMountSettings::directoryMask());
}
else
{
// Do nothing
}
// User name
if (!share->login().isEmpty())
{
args_list << "-U";
args_list << QString("%1").arg(share->login());
}
else
{
args_list << "-N";
}
// Mount options
map.insert("mh_options", args_list);
// Mount point
map.insert("mh_mountpoint", share->canonicalPath());
// UNC
map.insert("mh_unc", !share->isHomesShare() ? share->unc() : share->homeUNC());;
return true;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'Find the mount/umount commands in the helper
Instead of trusting what we get passed in
CVE-2017-8849'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: bool Smb4KUnmountJob::createUnmountAction(Smb4KShare *share, Action *action)
{
Q_ASSERT(share);
Q_ASSERT(action);
if (!share || !action)
{
return false;
}
else
{
// Do nothing
}
// Find the umount program.
QString umount;
QStringList paths;
paths << "/bin";
paths << "/sbin";
paths << "/usr/bin";
paths << "/usr/sbin";
paths << "/usr/local/bin";
paths << "/usr/local/sbin";
for ( int i = 0; i < paths.size(); ++i )
{
umount = KGlobal::dirs()->findExe("umount", paths.at(i));
if (!umount.isEmpty())
{
break;
}
else
{
continue;
}
}
if (umount.isEmpty() && !m_silent)
{
Smb4KNotification::commandNotFound("umount");
return false;
}
else
{
// Do nothing
}
QStringList options;
#if defined(Q_OS_LINUX)
if (m_force)
{
options << "-l"; // lazy unmount
}
else
{
// Do nothing
}
#endif
action->setName("net.sourceforge.smb4k.mounthelper.unmount");
action->setHelperID("net.sourceforge.smb4k.mounthelper");
action->addArgument("mh_command", umount);
action->addArgument("mh_url", share->url().url());
action->addArgument("mh_mountpoint", share->canonicalPath());
action->addArgument("mh_options", options);
return true;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'Find the mount/umount commands in the helper
Instead of trusting what we get passed in
CVE-2017-8849'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
struct extent_status es;
int retval;
int ret = 0;
#ifdef ES_AGGRESSIVE_TEST
struct ext4_map_blocks orig_map;
memcpy(&orig_map, map, sizeof(*map));
#endif
map->m_flags = 0;
ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
"logical block %lu\n", inode->i_ino, flags, map->m_len,
(unsigned long) map->m_lblk);
/*
* ext4_map_blocks returns an int, and m_len is an unsigned int
*/
if (unlikely(map->m_len > INT_MAX))
map->m_len = INT_MAX;
/* We can handle the block number less than EXT_MAX_BLOCKS */
if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
return -EFSCORRUPTED;
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
map->m_pblk = ext4_es_pblock(&es) +
map->m_lblk - es.es_lblk;
map->m_flags |= ext4_es_is_written(&es) ?
EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
map->m_pblk = 0;
retval = es.es_len - (map->m_lblk - es.es_lblk);
if (retval > map->m_len)
retval = map->m_len;
map->m_len = retval;
retval = 0;
} else {
BUG_ON(1);
}
#ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(handle, inode, map,
&orig_map, flags);
#endif
goto found;
}
/*
* Try to see if we can get the block without requesting a new
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
}
if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk,
map->m_len, map->m_pblk, status);
if (ret < 0)
retval = ret;
}
up_read((&EXT4_I(inode)->i_data_sem));
found:
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
/* If it is only a block(s) look up */
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
return retval;
/*
* Returns if the blocks have already allocated
*
* Note that if blocks have been preallocated
* ext4_ext_get_block() returns the create = 0
* with buffer head unmapped.
*/
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
/*
* If we need to convert extent to unwritten
* we continue and do the actual work in
* ext4_ext_map_blocks()
*/
if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
return retval;
/*
* Here we clear m_flags because after allocating an new extent,
* it will be set again.
*/
map->m_flags &= ~EXT4_MAP_FLAGS;
/*
* New blocks allocate and/or writing to unwritten extent
* will possibly result in updating i_data, so we take
* the write lock of i_data_sem, and call get_block()
* with create == 1 flag.
*/
down_write(&EXT4_I(inode)->i_data_sem);
/*
* We need to check for EXT4 here because migrate
* could have changed the inode type in between
*/
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags);
} else {
retval = ext4_ind_map_blocks(handle, inode, map, flags);
if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
/*
* We allocated new blocks which will result in
* i_data's format changing. Force the migrate
* to fail by clearing migrate flags
*/
ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
}
/*
* Update reserved blocks/metadata blocks after successful
* block allocation which had been deferred till now. We don't
* support fallocate for non extent files. So we can update
* reserve space here.
*/
if ((retval > 0) &&
(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
ext4_da_update_reserve_space(inode, retval, 1);
}
if (retval > 0) {
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
/*
* We have to zeroout blocks before inserting them into extent
* status tree. Otherwise someone could look them up there and
* use them before they are really zeroed.
*/
if (flags & EXT4_GET_BLOCKS_ZERO &&
map->m_flags & EXT4_MAP_MAPPED &&
map->m_flags & EXT4_MAP_NEW) {
ret = ext4_issue_zeroout(inode, map->m_lblk,
map->m_pblk, map->m_len);
if (ret) {
retval = ret;
goto out_sem;
}
}
/*
* If the extent has been zeroed out, we don't need to update
* extent status tree.
*/
if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es))
goto out_sem;
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
!(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
if (ret < 0) {
retval = ret;
goto out_sem;
}
}
out_sem:
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
return retval;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-200'], 'message': 'ext4: fix data exposure after a crash
Huang has reported that in his powerfail testing he is seeing stale
block contents in some of recently allocated blocks although he mounts
ext4 in data=ordered mode. After some investigation I have found out
that indeed when delayed allocation is used, we don't add inode to
transaction's list of inodes needing flushing before commit. Originally
we were doing that but commit f3b59291a69d removed the logic with a
flawed argument that it is not needed.
The problem is that although for delayed allocated blocks we write their
contents immediately after allocating them, there is no guarantee that
the IO scheduler or device doesn't reorder things and thus transaction
allocating blocks and attaching them to inode can reach stable storage
before actual block contents. Actually whenever we attach freshly
allocated blocks to inode using a written extent, we should add inode to
transaction's ordered inode list to make sure we properly wait for block
contents to be written before committing the transaction. So that is
what we do in this patch. This also handles other cases where stale data
exposure was possible - like filling hole via mmap in
data=ordered,nodelalloc mode.
The only exception to the above rule are extending direct IO writes where
blkdev_direct_IO() waits for IO to complete before increasing i_size and
thus stale data exposure is not possible. For now we don't complicate
the code with optimizing this special case since the overhead is pretty
low. In case this is observed to be a performance problem we can always
handle it using a special flag to ext4_map_blocks().
CC: [email protected]
Fixes: f3b59291a69d0b734be1fc8be489fef2dd846d3d
Reported-by: "HUANG Weller (CM/ESW12-CN)" <[email protected]>
Tested-by: "HUANG Weller (CM/ESW12-CN)" <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int fsmVerify(const char *path, rpmfi fi)
{
int rc;
int saveerrno = errno;
struct stat dsb;
mode_t mode = rpmfiFMode(fi);
rc = fsmStat(path, 1, &dsb);
if (rc)
return rc;
if (S_ISREG(mode)) {
/* HP-UX (and other os'es) don't permit unlink on busy files. */
char *rmpath = rstrscat(NULL, path, "-RPMDELETE", NULL);
rc = fsmRename(path, rmpath);
/* XXX shouldn't we take unlink return code here? */
if (!rc)
(void) fsmUnlink(rmpath);
else
rc = RPMERR_UNLINK_FAILED;
free(rmpath);
return (rc ? rc : RPMERR_ENOENT); /* XXX HACK */
} else if (S_ISDIR(mode)) {
if (S_ISDIR(dsb.st_mode)) return 0;
if (S_ISLNK(dsb.st_mode)) {
rc = fsmStat(path, 0, &dsb);
if (rc == RPMERR_ENOENT) rc = 0;
if (rc) return rc;
errno = saveerrno;
if (S_ISDIR(dsb.st_mode)) return 0;
}
} else if (S_ISLNK(mode)) {
if (S_ISLNK(dsb.st_mode)) {
char buf[8 * BUFSIZ];
size_t len;
rc = fsmReadLink(path, buf, 8 * BUFSIZ, &len);
errno = saveerrno;
if (rc) return rc;
if (rstreq(rpmfiFLink(fi), buf)) return 0;
}
} else if (S_ISFIFO(mode)) {
if (S_ISFIFO(dsb.st_mode)) return 0;
} else if (S_ISCHR(mode) || S_ISBLK(mode)) {
if ((S_ISCHR(dsb.st_mode) || S_ISBLK(dsb.st_mode)) &&
(dsb.st_rdev == rpmfiFRdev(fi))) return 0;
} else if (S_ISSOCK(mode)) {
if (S_ISSOCK(dsb.st_mode)) return 0;
}
/* XXX shouldn't do this with commit/undo. */
rc = fsmUnlink(path);
if (rc == 0) rc = RPMERR_ENOENT;
return (rc ? rc : RPMERR_ENOENT); /* XXX HACK */
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-59', 'CWE-61'], 'message': 'Restrict following symlinks to directories by ownership (CVE-2017-7500)
Only follow directory symlinks owned by target directory owner or root.
This prevents privilege escalation from user-writable directories via
directory symlinks to privileged directories on package upgrade, while
still allowing admin to arrange disk usage with symlinks.
The rationale is that if you can create symlinks owned by user X you *are*
user X (or root), and if you also own directory Y you can do whatever with
it already, including change permissions. So when you create a symlink to
that directory, the link ownership acts as a simple stamp of authority that
you indeed want rpm to treat this symlink as it were the directory that
you own. Such a permission can only be given by you or root, which
is just the way we want it. Plus it's almost ridiculously simple as far
as rules go, compared to trying to calculate something from the
source vs destination directory permissions etc.
In the normal case, the user arranging diskspace with symlinks is indeed
root so nothing changes, the only real change here is to links created by
non-privileged users which should be few and far between in practise.
Unfortunately our test-suite runs as a regular user via fakechroot and
thus the testcase for this fails under the new rules. Adjust the testcase
to get the ownership straight and add a second case for the illegal
behavior, basically the same as the old one but with different expectations.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: rpmVerifyAttrs rpmfilesVerify(rpmfiles fi, int ix, rpmVerifyAttrs omitMask)
{
rpm_mode_t fmode = rpmfilesFMode(fi, ix);
rpmfileAttrs fileAttrs = rpmfilesFFlags(fi, ix);
rpmVerifyAttrs flags = rpmfilesVFlags(fi, ix);
const char * fn = rpmfilesFN(fi, ix);
struct stat sb;
rpmVerifyAttrs vfy = RPMVERIFY_NONE;
/*
* Check to see if the file was installed - if not pretend all is OK.
*/
switch (rpmfilesFState(fi, ix)) {
case RPMFILE_STATE_NETSHARED:
case RPMFILE_STATE_NOTINSTALLED:
goto exit;
break;
case RPMFILE_STATE_REPLACED:
/* For replaced files we can only verify if it exists at all */
flags = RPMVERIFY_LSTATFAIL;
break;
case RPMFILE_STATE_WRONGCOLOR:
/*
* Files with wrong color are supposed to share some attributes
* with the actually installed file - verify what we can.
*/
flags &= ~(RPMVERIFY_FILEDIGEST | RPMVERIFY_FILESIZE |
RPMVERIFY_MTIME | RPMVERIFY_RDEV);
break;
case RPMFILE_STATE_NORMAL:
/* File from a non-installed package, try to verify nevertheless */
case RPMFILE_STATE_MISSING:
break;
}
if (fn == NULL || lstat(fn, &sb) != 0) {
vfy |= RPMVERIFY_LSTATFAIL;
goto exit;
}
/* If we expected a directory but got a symlink to one, follow the link */
if (S_ISDIR(fmode) && S_ISLNK(sb.st_mode) && stat(fn, &sb) != 0) {
vfy |= RPMVERIFY_LSTATFAIL;
goto exit;
}
/* Links have no mode, other types have no linkto */
if (S_ISLNK(sb.st_mode))
flags &= ~(RPMVERIFY_MODE);
else
flags &= ~(RPMVERIFY_LINKTO);
/* Not all attributes of non-regular files can be verified */
if (!S_ISREG(sb.st_mode))
flags &= ~(RPMVERIFY_FILEDIGEST | RPMVERIFY_FILESIZE |
RPMVERIFY_MTIME | RPMVERIFY_CAPS);
/* Content checks of %ghost files are meaningless. */
if (fileAttrs & RPMFILE_GHOST)
flags &= ~(RPMVERIFY_FILEDIGEST | RPMVERIFY_FILESIZE |
RPMVERIFY_MTIME | RPMVERIFY_LINKTO);
/* Don't verify any features in omitMask. */
flags &= ~(omitMask | RPMVERIFY_FAILURES);
if (flags & RPMVERIFY_FILEDIGEST) {
const unsigned char *digest;
int algo;
size_t diglen;
/* XXX If --nomd5, then prelinked library sizes are not corrected. */
if ((digest = rpmfilesFDigest(fi, ix, &algo, &diglen))) {
unsigned char fdigest[diglen];
rpm_loff_t fsize;
if (rpmDoDigest(algo, fn, 0, fdigest, &fsize)) {
vfy |= (RPMVERIFY_READFAIL|RPMVERIFY_FILEDIGEST);
} else {
sb.st_size = fsize;
if (memcmp(fdigest, digest, diglen))
vfy |= RPMVERIFY_FILEDIGEST;
}
} else {
vfy |= RPMVERIFY_FILEDIGEST;
}
}
if (flags & RPMVERIFY_LINKTO) {
char linkto[1024+1];
int size = 0;
if ((size = readlink(fn, linkto, sizeof(linkto)-1)) == -1)
vfy |= (RPMVERIFY_READLINKFAIL|RPMVERIFY_LINKTO);
else {
const char * flink = rpmfilesFLink(fi, ix);
linkto[size] = '\0';
if (flink == NULL || !rstreq(linkto, flink))
vfy |= RPMVERIFY_LINKTO;
}
}
if (flags & RPMVERIFY_FILESIZE) {
if (sb.st_size != rpmfilesFSize(fi, ix))
vfy |= RPMVERIFY_FILESIZE;
}
if (flags & RPMVERIFY_MODE) {
rpm_mode_t metamode = fmode;
rpm_mode_t filemode;
/*
* Platforms (like AIX) where sizeof(rpm_mode_t) != sizeof(mode_t)
* need the (rpm_mode_t) cast here.
*/
filemode = (rpm_mode_t)sb.st_mode;
/*
* Comparing the type of %ghost files is meaningless, but perms are OK.
*/
if (fileAttrs & RPMFILE_GHOST) {
metamode &= ~0xf000;
filemode &= ~0xf000;
}
if (metamode != filemode)
vfy |= RPMVERIFY_MODE;
#if WITH_ACL
/*
* For now, any non-default acl's on a file is a difference as rpm
* cannot have set them.
*/
acl_t facl = acl_get_file(fn, ACL_TYPE_ACCESS);
if (facl) {
if (acl_equiv_mode(facl, NULL) == 1) {
vfy |= RPMVERIFY_MODE;
}
acl_free(facl);
}
#endif
}
if (flags & RPMVERIFY_RDEV) {
if (S_ISCHR(fmode) != S_ISCHR(sb.st_mode)
|| S_ISBLK(fmode) != S_ISBLK(sb.st_mode))
{
vfy |= RPMVERIFY_RDEV;
} else if (S_ISDEV(fmode) && S_ISDEV(sb.st_mode)) {
rpm_rdev_t st_rdev = (sb.st_rdev & 0xffff);
rpm_rdev_t frdev = (rpmfilesFRdev(fi, ix) & 0xffff);
if (st_rdev != frdev)
vfy |= RPMVERIFY_RDEV;
}
}
#if WITH_CAP
if (flags & RPMVERIFY_CAPS) {
/*
* Empty capability set ("=") is not exactly the same as no
* capabilities at all but suffices for now...
*/
cap_t cap, fcap;
cap = cap_from_text(rpmfilesFCaps(fi, ix));
if (!cap) {
cap = cap_from_text("=");
}
fcap = cap_get_file(fn);
if (!fcap) {
fcap = cap_from_text("=");
}
if (cap_compare(cap, fcap) != 0)
vfy |= RPMVERIFY_CAPS;
cap_free(fcap);
cap_free(cap);
}
#endif
if ((flags & RPMVERIFY_MTIME) && (sb.st_mtime != rpmfilesFMtime(fi, ix))) {
vfy |= RPMVERIFY_MTIME;
}
if (flags & RPMVERIFY_USER) {
const char * name = rpmugUname(sb.st_uid);
const char * fuser = rpmfilesFUser(fi, ix);
uid_t uid;
int namematch = 0;
int idmatch = 0;
if (name && fuser)
namematch = rstreq(name, fuser);
if (fuser && rpmugUid(fuser, &uid) == 0)
idmatch = (uid == sb.st_uid);
if (namematch != idmatch) {
rpmlog(RPMLOG_WARNING,
_("Duplicate username or UID for user %s\n"), fuser);
}
if (!(namematch || idmatch))
vfy |= RPMVERIFY_USER;
}
if (flags & RPMVERIFY_GROUP) {
const char * name = rpmugGname(sb.st_gid);
const char * fgroup = rpmfilesFGroup(fi, ix);
gid_t gid;
int namematch = 0;
int idmatch = 0;
if (name && fgroup)
namematch = rstreq(name, fgroup);
if (fgroup && rpmugGid(fgroup, &gid) == 0)
idmatch = (gid == sb.st_gid);
if (namematch != idmatch) {
rpmlog(RPMLOG_WARNING,
_("Duplicate groupname or GID for group %s\n"), fgroup);
}
if (!(namematch || idmatch))
vfy |= RPMVERIFY_GROUP;
}
exit:
return vfy;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-59', 'CWE-61'], 'message': 'Make verification match the new restricted directory symlink behavior
Only follow directory symlinks owned by target directory owner or root
during verification to match the behavior of fsmVerify() in the new
CVE-2017-7500 world order.
The code is klunkier than it should and the logic should use common code
with fsmVerify() instead of duplicating it here, but that needs more
changes than is comfortable to backport so starting with this.
Also worth noting that the previous "follow the link" logic from
commit 3ccd774255b8215733e0bdfdf5a683da9dd10923 was not quite right,
it'd fail with RPMVERIFY_LSTATFAIL on a broken symlink when it should've
ran verification on the symlink itself. This behavior is fixed here too.
Finally, once again fakechroot gets in the way and forces the related
verify testcase to be changed to be able to create a valid link. Reuse
the replacement testcase for the purpose and add another case for
verifying an invalid link.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: struct sock *tcp_create_openreq_child(const struct sock *sk,
struct request_sock *req,
struct sk_buff *skb)
{
struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
if (newsk) {
const struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_connection_sock *newicsk = inet_csk(newsk);
struct tcp_sock *newtp = tcp_sk(newsk);
/* Now setup tcp_sock */
newtp->pred_flags = 0;
newtp->rcv_wup = newtp->copied_seq =
newtp->rcv_nxt = treq->rcv_isn + 1;
newtp->segs_in = 1;
newtp->snd_sml = newtp->snd_una =
newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
tcp_prequeue_init(newtp);
INIT_LIST_HEAD(&newtp->tsq_node);
tcp_init_wl(newtp, treq->rcv_isn);
newtp->srtt_us = 0;
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
newtp->packets_out = 0;
newtp->retrans_out = 0;
newtp->sacked_out = 0;
newtp->fackets_out = 0;
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
newtp->tlp_high_seq = 0;
newtp->lsndtime = treq->snt_synack.stamp_jiffies;
newsk->sk_txhash = treq->txhash;
newtp->last_oow_ack_time = 0;
newtp->total_retrans = req->num_retrans;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
* algorithms that we must have the following bandaid to talk
* efficiently to them. -DaveM
*/
newtp->snd_cwnd = TCP_INIT_CWND;
newtp->snd_cwnd_cnt = 0;
/* There's a bubble in the pipe until at least the first ACK. */
newtp->app_limited = ~0U;
tcp_init_xmit_timers(newsk);
newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
newtp->rx_opt.saw_tstamp = 0;
newtp->rx_opt.dsack = 0;
newtp->rx_opt.num_sacks = 0;
newtp->urg_data = 0;
if (sock_flag(newsk, SOCK_KEEPOPEN))
inet_csk_reset_keepalive_timer(newsk,
keepalive_time_when(newtp));
newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
if (sysctl_tcp_fack)
tcp_enable_fack(newtp);
}
newtp->window_clamp = req->rsk_window_clamp;
newtp->rcv_ssthresh = req->rsk_rcv_wnd;
newtp->rcv_wnd = req->rsk_rcv_wnd;
newtp->rx_opt.wscale_ok = ireq->wscale_ok;
if (newtp->rx_opt.wscale_ok) {
newtp->rx_opt.snd_wscale = ireq->snd_wscale;
newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
} else {
newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
newtp->window_clamp = min(newtp->window_clamp, 65535U);
}
newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
newtp->rx_opt.snd_wscale);
newtp->max_window = newtp->snd_wnd;
if (newtp->rx_opt.tstamp_ok) {
newtp->rx_opt.ts_recent = req->ts_recent;
newtp->rx_opt.ts_recent_stamp = get_seconds();
newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
} else {
newtp->rx_opt.ts_recent_stamp = 0;
newtp->tcp_header_len = sizeof(struct tcphdr);
}
newtp->tsoffset = treq->ts_off;
#ifdef CONFIG_TCP_MD5SIG
newtp->md5sig_info = NULL; /*XXX*/
if (newtp->af_specific->md5_lookup(sk, newsk))
newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
tcp_ecn_openreq_child(newtp, req);
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
newtp->rack.mstamp.v64 = 0;
newtp->rack.advanced = 0;
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
}
return newsk;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-415'], 'message': 'tcp: do not inherit fastopen_req from parent
Under fuzzer stress, it is possible that a child gets a non NULL
fastopen_req pointer from its parent at accept() time, when/if parent
morphs from listener to active session.
We need to make sure this can not happen, by clearing the field after
socket cloning.
BUG: Double free or freeing an invalid pointer
Unexpected shadow byte: 0xFB
CPU: 3 PID: 20933 Comm: syz-executor3 Not tainted 4.11.0+ #306
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs
01/01/2011
Call Trace:
<IRQ>
__dump_stack lib/dump_stack.c:16 [inline]
dump_stack+0x292/0x395 lib/dump_stack.c:52
kasan_object_err+0x1c/0x70 mm/kasan/report.c:164
kasan_report_double_free+0x5c/0x70 mm/kasan/report.c:185
kasan_slab_free+0x9d/0xc0 mm/kasan/kasan.c:580
slab_free_hook mm/slub.c:1357 [inline]
slab_free_freelist_hook mm/slub.c:1379 [inline]
slab_free mm/slub.c:2961 [inline]
kfree+0xe8/0x2b0 mm/slub.c:3882
tcp_free_fastopen_req net/ipv4/tcp.c:1077 [inline]
tcp_disconnect+0xc15/0x13e0 net/ipv4/tcp.c:2328
inet_child_forget+0xb8/0x600 net/ipv4/inet_connection_sock.c:898
inet_csk_reqsk_queue_add+0x1e7/0x250
net/ipv4/inet_connection_sock.c:928
tcp_get_cookie_sock+0x21a/0x510 net/ipv4/syncookies.c:217
cookie_v4_check+0x1a19/0x28b0 net/ipv4/syncookies.c:384
tcp_v4_cookie_check net/ipv4/tcp_ipv4.c:1384 [inline]
tcp_v4_do_rcv+0x731/0x940 net/ipv4/tcp_ipv4.c:1421
tcp_v4_rcv+0x2dc0/0x31c0 net/ipv4/tcp_ipv4.c:1715
ip_local_deliver_finish+0x4cc/0xc20 net/ipv4/ip_input.c:216
NF_HOOK include/linux/netfilter.h:257 [inline]
ip_local_deliver+0x1ce/0x700 net/ipv4/ip_input.c:257
dst_input include/net/dst.h:492 [inline]
ip_rcv_finish+0xb1d/0x20b0 net/ipv4/ip_input.c:396
NF_HOOK include/linux/netfilter.h:257 [inline]
ip_rcv+0xd8c/0x19c0 net/ipv4/ip_input.c:487
__netif_receive_skb_core+0x1ad1/0x3400 net/core/dev.c:4210
__netif_receive_skb+0x2a/0x1a0 net/core/dev.c:4248
process_backlog+0xe5/0x6c0 net/core/dev.c:4868
napi_poll net/core/dev.c:5270 [inline]
net_rx_action+0xe70/0x18e0 net/core/dev.c:5335
__do_softirq+0x2fb/0xb99 kernel/softirq.c:284
do_softirq_own_stack+0x1c/0x30 arch/x86/entry/entry_64.S:899
</IRQ>
do_softirq.part.17+0x1e8/0x230 kernel/softirq.c:328
do_softirq kernel/softirq.c:176 [inline]
__local_bh_enable_ip+0x1cf/0x1e0 kernel/softirq.c:181
local_bh_enable include/linux/bottom_half.h:31 [inline]
rcu_read_unlock_bh include/linux/rcupdate.h:931 [inline]
ip_finish_output2+0x9ab/0x15e0 net/ipv4/ip_output.c:230
ip_finish_output+0xa35/0xdf0 net/ipv4/ip_output.c:316
NF_HOOK_COND include/linux/netfilter.h:246 [inline]
ip_output+0x1f6/0x7b0 net/ipv4/ip_output.c:404
dst_output include/net/dst.h:486 [inline]
ip_local_out+0x95/0x160 net/ipv4/ip_output.c:124
ip_queue_xmit+0x9a8/0x1a10 net/ipv4/ip_output.c:503
tcp_transmit_skb+0x1ade/0x3470 net/ipv4/tcp_output.c:1057
tcp_write_xmit+0x79e/0x55b0 net/ipv4/tcp_output.c:2265
__tcp_push_pending_frames+0xfa/0x3a0 net/ipv4/tcp_output.c:2450
tcp_push+0x4ee/0x780 net/ipv4/tcp.c:683
tcp_sendmsg+0x128d/0x39b0 net/ipv4/tcp.c:1342
inet_sendmsg+0x164/0x5b0 net/ipv4/af_inet.c:762
sock_sendmsg_nosec net/socket.c:633 [inline]
sock_sendmsg+0xca/0x110 net/socket.c:643
SYSC_sendto+0x660/0x810 net/socket.c:1696
SyS_sendto+0x40/0x50 net/socket.c:1664
entry_SYSCALL_64_fastpath+0x1f/0xbe
RIP: 0033:0x446059
RSP: 002b:00007faa6761fb58 EFLAGS: 00000282 ORIG_RAX: 000000000000002c
RAX: ffffffffffffffda RBX: 0000000000000017 RCX: 0000000000446059
RDX: 0000000000000001 RSI: 0000000020ba3fcd RDI: 0000000000000017
RBP: 00000000006e40a0 R08: 0000000020ba4ff0 R09: 0000000000000010
R10: 0000000020000000 R11: 0000000000000282 R12: 0000000000708150
R13: 0000000000000000 R14: 00007faa676209c0 R15: 00007faa67620700
Object at ffff88003b5bbcb8, in cache kmalloc-64 size: 64
Allocated:
PID = 20909
save_stack_trace+0x16/0x20 arch/x86/kernel/stacktrace.c:59
save_stack+0x43/0xd0 mm/kasan/kasan.c:513
set_track mm/kasan/kasan.c:525 [inline]
kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:616
kmem_cache_alloc_trace+0x82/0x270 mm/slub.c:2745
kmalloc include/linux/slab.h:490 [inline]
kzalloc include/linux/slab.h:663 [inline]
tcp_sendmsg_fastopen net/ipv4/tcp.c:1094 [inline]
tcp_sendmsg+0x221a/0x39b0 net/ipv4/tcp.c:1139
inet_sendmsg+0x164/0x5b0 net/ipv4/af_inet.c:762
sock_sendmsg_nosec net/socket.c:633 [inline]
sock_sendmsg+0xca/0x110 net/socket.c:643
SYSC_sendto+0x660/0x810 net/socket.c:1696
SyS_sendto+0x40/0x50 net/socket.c:1664
entry_SYSCALL_64_fastpath+0x1f/0xbe
Freed:
PID = 20909
save_stack_trace+0x16/0x20 arch/x86/kernel/stacktrace.c:59
save_stack+0x43/0xd0 mm/kasan/kasan.c:513
set_track mm/kasan/kasan.c:525 [inline]
kasan_slab_free+0x73/0xc0 mm/kasan/kasan.c:589
slab_free_hook mm/slub.c:1357 [inline]
slab_free_freelist_hook mm/slub.c:1379 [inline]
slab_free mm/slub.c:2961 [inline]
kfree+0xe8/0x2b0 mm/slub.c:3882
tcp_free_fastopen_req net/ipv4/tcp.c:1077 [inline]
tcp_disconnect+0xc15/0x13e0 net/ipv4/tcp.c:2328
__inet_stream_connect+0x20c/0xf90 net/ipv4/af_inet.c:593
tcp_sendmsg_fastopen net/ipv4/tcp.c:1111 [inline]
tcp_sendmsg+0x23a8/0x39b0 net/ipv4/tcp.c:1139
inet_sendmsg+0x164/0x5b0 net/ipv4/af_inet.c:762
sock_sendmsg_nosec net/socket.c:633 [inline]
sock_sendmsg+0xca/0x110 net/socket.c:643
SYSC_sendto+0x660/0x810 net/socket.c:1696
SyS_sendto+0x40/0x50 net/socket.c:1664
entry_SYSCALL_64_fastpath+0x1f/0xbe
Fixes: e994b2f0fb92 ("tcp: do not lock listener to process SYN packets")
Fixes: 7db92362d2fe ("tcp: fix potential double free issue for fastopen_req")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Acked-by: Wei Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code:
int yr_execute_code(
YR_RULES* rules,
YR_SCAN_CONTEXT* context,
int timeout,
time_t start_time)
{
int64_t mem[MEM_SIZE];
int32_t sp = 0;
uint8_t* ip = rules->code_start;
YR_VALUE args[MAX_FUNCTION_ARGS];
YR_VALUE *stack;
YR_VALUE r1;
YR_VALUE r2;
YR_VALUE r3;
#ifdef PROFILING_ENABLED
YR_RULE* current_rule = NULL;
#endif
YR_RULE* rule;
YR_MATCH* match;
YR_OBJECT_FUNCTION* function;
char* identifier;
char* args_fmt;
int i;
int found;
int count;
int result = ERROR_SUCCESS;
int stop = FALSE;
int cycle = 0;
int tidx = context->tidx;
int stack_size;
#ifdef PROFILING_ENABLED
clock_t start = clock();
#endif
yr_get_configuration(YR_CONFIG_STACK_SIZE, (void*) &stack_size);
stack = (YR_VALUE*) yr_malloc(stack_size * sizeof(YR_VALUE));
if (stack == NULL)
return ERROR_INSUFFICIENT_MEMORY;
while(!stop)
{
switch(*ip)
{
case OP_NOP:
break;
case OP_HALT:
assert(sp == 0); // When HALT is reached the stack should be empty.
stop = TRUE;
break;
case OP_PUSH:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
push(r1);
break;
case OP_POP:
pop(r1);
break;
case OP_CLEAR_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
mem[r1.i] = 0;
break;
case OP_ADD_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
if (!is_undef(r2))
mem[r1.i] += r2.i;
break;
case OP_INCR_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
mem[r1.i]++;
break;
case OP_PUSH_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
r1.i = mem[r1.i];
push(r1);
break;
case OP_POP_M:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
mem[r1.i] = r2.i;
break;
case OP_SWAPUNDEF:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
pop(r2);
if (is_undef(r2))
{
r1.i = mem[r1.i];
push(r1);
}
else
{
push(r2);
}
break;
case OP_JNUNDEF:
pop(r1);
push(r1);
ip = jmp_if(!is_undef(r1), ip);
break;
case OP_JLE:
pop(r2);
pop(r1);
push(r1);
push(r2);
ip = jmp_if(r1.i <= r2.i, ip);
break;
case OP_JTRUE:
pop(r1);
push(r1);
ip = jmp_if(!is_undef(r1) && r1.i, ip);
break;
case OP_JFALSE:
pop(r1);
push(r1);
ip = jmp_if(is_undef(r1) || !r1.i, ip);
break;
case OP_AND:
pop(r2);
pop(r1);
if (is_undef(r1) || is_undef(r2))
r1.i = 0;
else
r1.i = r1.i && r2.i;
push(r1);
break;
case OP_OR:
pop(r2);
pop(r1);
if (is_undef(r1))
{
push(r2);
}
else if (is_undef(r2))
{
push(r1);
}
else
{
r1.i = r1.i || r2.i;
push(r1);
}
break;
case OP_NOT:
pop(r1);
if (is_undef(r1))
r1.i = UNDEFINED;
else
r1.i= !r1.i;
push(r1);
break;
case OP_MOD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r2.i != 0)
r1.i = r1.i % r2.i;
else
r1.i = UNDEFINED;
push(r1);
break;
case OP_SHR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i >> r2.i;
push(r1);
break;
case OP_SHL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i << r2.i;
push(r1);
break;
case OP_BITWISE_NOT:
pop(r1);
ensure_defined(r1);
r1.i = ~r1.i;
push(r1);
break;
case OP_BITWISE_AND:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i & r2.i;
push(r1);
break;
case OP_BITWISE_OR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i | r2.i;
push(r1);
break;
case OP_BITWISE_XOR:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i ^ r2.i;
push(r1);
break;
case OP_PUSH_RULE:
rule = *(YR_RULE**)(ip + 1);
ip += sizeof(uint64_t);
r1.i = rule->t_flags[tidx] & RULE_TFLAGS_MATCH ? 1 : 0;
push(r1);
break;
case OP_INIT_RULE:
#ifdef PROFILING_ENABLED
current_rule = *(YR_RULE**)(ip + 1);
#endif
ip += sizeof(uint64_t);
break;
case OP_MATCH_RULE:
pop(r1);
rule = *(YR_RULE**)(ip + 1);
ip += sizeof(uint64_t);
if (!is_undef(r1) && r1.i)
rule->t_flags[tidx] |= RULE_TFLAGS_MATCH;
else if (RULE_IS_GLOBAL(rule))
rule->ns->t_flags[tidx] |= NAMESPACE_TFLAGS_UNSATISFIED_GLOBAL;
#ifdef PROFILING_ENABLED
rule->clock_ticks += clock() - start;
start = clock();
#endif
break;
case OP_OBJ_LOAD:
identifier = *(char**)(ip + 1);
ip += sizeof(uint64_t);
r1.o = (YR_OBJECT*) yr_hash_table_lookup(
context->objects_table,
identifier,
NULL);
assert(r1.o != NULL);
push(r1);
break;
case OP_OBJ_FIELD:
identifier = *(char**)(ip + 1);
ip += sizeof(uint64_t);
pop(r1);
ensure_defined(r1);
r1.o = yr_object_lookup_field(r1.o, identifier);
assert(r1.o != NULL);
push(r1);
break;
case OP_OBJ_VALUE:
pop(r1);
ensure_defined(r1);
switch(r1.o->type)
{
case OBJECT_TYPE_INTEGER:
r1.i = ((YR_OBJECT_INTEGER*) r1.o)->value;
break;
case OBJECT_TYPE_FLOAT:
if (isnan(((YR_OBJECT_DOUBLE*) r1.o)->value))
r1.i = UNDEFINED;
else
r1.d = ((YR_OBJECT_DOUBLE*) r1.o)->value;
break;
case OBJECT_TYPE_STRING:
if (((YR_OBJECT_STRING*) r1.o)->value == NULL)
r1.i = UNDEFINED;
else
r1.p = ((YR_OBJECT_STRING*) r1.o)->value;
break;
default:
assert(FALSE);
}
push(r1);
break;
case OP_INDEX_ARRAY:
pop(r1); // index
pop(r2); // array
ensure_defined(r1);
ensure_defined(r2);
assert(r2.o->type == OBJECT_TYPE_ARRAY);
r1.o = yr_object_array_get_item(r2.o, 0, (int) r1.i);
if (r1.o == NULL)
r1.i = UNDEFINED;
push(r1);
break;
case OP_LOOKUP_DICT:
pop(r1); // key
pop(r2); // dictionary
ensure_defined(r1);
ensure_defined(r2);
assert(r2.o->type == OBJECT_TYPE_DICTIONARY);
r1.o = yr_object_dict_get_item(
r2.o, 0, r1.ss->c_string);
if (r1.o == NULL)
r1.i = UNDEFINED;
push(r1);
break;
case OP_CALL:
args_fmt = *(char**)(ip + 1);
ip += sizeof(uint64_t);
i = (int) strlen(args_fmt);
count = 0;
// pop arguments from stack and copy them to args array
while (i > 0)
{
pop(r1);
if (is_undef(r1)) // count the number of undefined args
count++;
args[i - 1] = r1;
i--;
}
pop(r2);
ensure_defined(r2);
if (count > 0)
{
// if there are undefined args, result for function call
// is undefined as well.
r1.i = UNDEFINED;
push(r1);
break;
}
function = (YR_OBJECT_FUNCTION*) r2.o;
result = ERROR_INTERNAL_FATAL_ERROR;
for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++)
{
if (function->prototypes[i].arguments_fmt == NULL)
break;
if (strcmp(function->prototypes[i].arguments_fmt, args_fmt) == 0)
{
result = function->prototypes[i].code(args, context, function);
break;
}
}
assert(i < MAX_OVERLOADED_FUNCTIONS);
if (result == ERROR_SUCCESS)
{
r1.o = function->return_obj;
push(r1);
}
else
{
stop = TRUE;
}
break;
case OP_FOUND:
pop(r1);
r1.i = r1.s->matches[tidx].tail != NULL ? 1 : 0;
push(r1);
break;
case OP_FOUND_AT:
pop(r2);
pop(r1);
if (is_undef(r1))
{
r1.i = 0;
push(r1);
break;
}
match = r2.s->matches[tidx].head;
r3.i = FALSE;
while (match != NULL)
{
if (r1.i == match->base + match->offset)
{
r3.i = TRUE;
break;
}
if (r1.i < match->base + match->offset)
break;
match = match->next;
}
push(r3);
break;
case OP_FOUND_IN:
pop(r3);
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
match = r3.s->matches[tidx].head;
r3.i = FALSE;
while (match != NULL && !r3.i)
{
if (match->base + match->offset >= r1.i &&
match->base + match->offset <= r2.i)
{
r3.i = TRUE;
}
if (match->base + match->offset > r2.i)
break;
match = match->next;
}
push(r3);
break;
case OP_COUNT:
pop(r1);
r1.i = r1.s->matches[tidx].count;
push(r1);
break;
case OP_OFFSET:
pop(r2);
pop(r1);
ensure_defined(r1);
match = r2.s->matches[tidx].head;
i = 1;
r3.i = UNDEFINED;
while (match != NULL && r3.i == UNDEFINED)
{
if (r1.i == i)
r3.i = match->base + match->offset;
i++;
match = match->next;
}
push(r3);
break;
case OP_LENGTH:
pop(r2);
pop(r1);
ensure_defined(r1);
match = r2.s->matches[tidx].head;
i = 1;
r3.i = UNDEFINED;
while (match != NULL && r3.i == UNDEFINED)
{
if (r1.i == i)
r3.i = match->match_length;
i++;
match = match->next;
}
push(r3);
break;
case OP_OF:
found = 0;
count = 0;
pop(r1);
while (!is_undef(r1))
{
if (r1.s->matches[tidx].tail != NULL)
found++;
count++;
pop(r1);
}
pop(r2);
if (is_undef(r2))
r1.i = found >= count ? 1 : 0;
else
r1.i = found >= r2.i ? 1 : 0;
push(r1);
break;
case OP_FILESIZE:
r1.i = context->file_size;
push(r1);
break;
case OP_ENTRYPOINT:
r1.i = context->entry_point;
push(r1);
break;
case OP_INT8:
pop(r1);
r1.i = read_int8_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT16:
pop(r1);
r1.i = read_int16_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT32:
pop(r1);
r1.i = read_int32_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT8:
pop(r1);
r1.i = read_uint8_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT16:
pop(r1);
r1.i = read_uint16_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT32:
pop(r1);
r1.i = read_uint32_t_little_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT8BE:
pop(r1);
r1.i = read_int8_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT16BE:
pop(r1);
r1.i = read_int16_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_INT32BE:
pop(r1);
r1.i = read_int32_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT8BE:
pop(r1);
r1.i = read_uint8_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT16BE:
pop(r1);
r1.i = read_uint16_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_UINT32BE:
pop(r1);
r1.i = read_uint32_t_big_endian(context->iterator, (size_t) r1.i);
push(r1);
break;
case OP_CONTAINS:
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
r1.i = memmem(r1.ss->c_string, r1.ss->length,
r2.ss->c_string, r2.ss->length) != NULL;
push(r1);
break;
case OP_IMPORT:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
result = yr_modules_load((char*) r1.p, context);
if (result != ERROR_SUCCESS)
stop = TRUE;
break;
case OP_MATCHES:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r1.ss->length == 0)
{
r1.i = FALSE;
push(r1);
break;
}
result = yr_re_exec(
(uint8_t*) r2.re->code,
(uint8_t*) r1.ss->c_string,
r1.ss->length,
0,
r2.re->flags | RE_FLAGS_SCAN,
NULL,
NULL,
&found);
if (result != ERROR_SUCCESS)
stop = TRUE;
r1.i = found >= 0;
push(r1);
break;
case OP_INT_TO_DBL:
r1.i = *(uint64_t*)(ip + 1);
ip += sizeof(uint64_t);
r2 = stack[sp - r1.i];
if (is_undef(r2))
stack[sp - r1.i].i = UNDEFINED;
else
stack[sp - r1.i].d = (double) r2.i;
break;
case OP_STR_TO_BOOL:
pop(r1);
ensure_defined(r1);
r1.i = r1.ss->length > 0;
push(r1);
break;
case OP_INT_EQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i == r2.i;
push(r1);
break;
case OP_INT_NEQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i != r2.i;
push(r1);
break;
case OP_INT_LT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i < r2.i;
push(r1);
break;
case OP_INT_GT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i > r2.i;
push(r1);
break;
case OP_INT_LE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i <= r2.i;
push(r1);
break;
case OP_INT_GE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i >= r2.i;
push(r1);
break;
case OP_INT_ADD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i + r2.i;
push(r1);
break;
case OP_INT_SUB:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i - r2.i;
push(r1);
break;
case OP_INT_MUL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.i * r2.i;
push(r1);
break;
case OP_INT_DIV:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
if (r2.i != 0)
r1.i = r1.i / r2.i;
else
r1.i = UNDEFINED;
push(r1);
break;
case OP_INT_MINUS:
pop(r1);
ensure_defined(r1);
r1.i = -r1.i;
push(r1);
break;
case OP_DBL_LT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d < r2.d;
push(r1);
break;
case OP_DBL_GT:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d > r2.d;
push(r1);
break;
case OP_DBL_LE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d <= r2.d;
push(r1);
break;
case OP_DBL_GE:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d >= r2.d;
push(r1);
break;
case OP_DBL_EQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d == r2.d;
push(r1);
break;
case OP_DBL_NEQ:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.i = r1.d != r2.d;
push(r1);
break;
case OP_DBL_ADD:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d + r2.d;
push(r1);
break;
case OP_DBL_SUB:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d - r2.d;
push(r1);
break;
case OP_DBL_MUL:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d * r2.d;
push(r1);
break;
case OP_DBL_DIV:
pop(r2);
pop(r1);
ensure_defined(r2);
ensure_defined(r1);
r1.d = r1.d / r2.d;
push(r1);
break;
case OP_DBL_MINUS:
pop(r1);
ensure_defined(r1);
r1.d = -r1.d;
push(r1);
break;
case OP_STR_EQ:
case OP_STR_NEQ:
case OP_STR_LT:
case OP_STR_LE:
case OP_STR_GT:
case OP_STR_GE:
pop(r2);
pop(r1);
ensure_defined(r1);
ensure_defined(r2);
switch(*ip)
{
case OP_STR_EQ:
r1.i = (sized_string_cmp(r1.ss, r2.ss) == 0);
break;
case OP_STR_NEQ:
r1.i = (sized_string_cmp(r1.ss, r2.ss) != 0);
break;
case OP_STR_LT:
r1.i = (sized_string_cmp(r1.ss, r2.ss) < 0);
break;
case OP_STR_LE:
r1.i = (sized_string_cmp(r1.ss, r2.ss) <= 0);
break;
case OP_STR_GT:
r1.i = (sized_string_cmp(r1.ss, r2.ss) > 0);
break;
case OP_STR_GE:
r1.i = (sized_string_cmp(r1.ss, r2.ss) >= 0);
break;
}
push(r1);
break;
default:
// Unknown instruction, this shouldn't happen.
assert(FALSE);
}
if (timeout > 0) // timeout == 0 means no timeout
{
// Check for timeout every 10 instruction cycles.
if (++cycle == 10)
{
if (difftime(time(NULL), start_time) > timeout)
{
#ifdef PROFILING_ENABLED
assert(current_rule != NULL);
current_rule->clock_ticks += clock() - start;
#endif
result = ERROR_SCAN_TIMEOUT;
stop = TRUE;
}
cycle = 0;
}
}
ip++;
}
yr_modules_unload_all(context);
yr_free(stack);
return result; ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-416'], 'message': 'Fix issue #658'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int yr_object_copy(
YR_OBJECT* object,
YR_OBJECT** object_copy)
{
YR_OBJECT* copy;
YR_OBJECT* o;
YR_STRUCTURE_MEMBER* structure_member;
YR_OBJECT_FUNCTION* func;
YR_OBJECT_FUNCTION* func_copy;
int i;
*object_copy = NULL;
FAIL_ON_ERROR(yr_object_create(
object->type,
object->identifier,
NULL,
©));
switch(object->type)
{
case OBJECT_TYPE_INTEGER:
((YR_OBJECT_INTEGER*) copy)->value = UNDEFINED;
break;
case OBJECT_TYPE_STRING:
((YR_OBJECT_STRING*) copy)->value = NULL;
break;
case OBJECT_TYPE_FUNCTION:
func = (YR_OBJECT_FUNCTION*) object;
func_copy = (YR_OBJECT_FUNCTION*) copy;
FAIL_ON_ERROR_WITH_CLEANUP(
yr_object_copy(func->return_obj, &func_copy->return_obj),
yr_object_destroy(copy));
for (i = 0; i < MAX_OVERLOADED_FUNCTIONS; i++)
func_copy->prototypes[i] = func->prototypes[i];
break;
case OBJECT_TYPE_STRUCTURE:
structure_member = ((YR_OBJECT_STRUCTURE*) object)->members;
while (structure_member != NULL)
{
FAIL_ON_ERROR_WITH_CLEANUP(
yr_object_copy(structure_member->object, &o),
yr_object_destroy(copy));
FAIL_ON_ERROR_WITH_CLEANUP(
yr_object_structure_set_member(copy, o),
yr_free(o);
yr_object_destroy(copy));
structure_member = structure_member->next;
}
break;
case OBJECT_TYPE_ARRAY:
yr_object_copy(
((YR_OBJECT_ARRAY *) object)->prototype_item,
&o);
((YR_OBJECT_ARRAY *)copy)->prototype_item = o;
break;
case OBJECT_TYPE_DICTIONARY:
yr_object_copy(
((YR_OBJECT_DICTIONARY *) object)->prototype_item,
&o);
((YR_OBJECT_DICTIONARY *)copy)->prototype_item = o;
break;
default:
assert(FALSE);
}
*object_copy = copy;
return ERROR_SUCCESS;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-416'], 'message': 'Fix issue #658'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static void edge_bulk_in_callback(struct urb *urb)
{
struct edgeport_port *edge_port = urb->context;
struct device *dev = &edge_port->port->dev;
unsigned char *data = urb->transfer_buffer;
int retval = 0;
int port_number;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n", __func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n", __func__, status);
}
if (status == -EPIPE)
goto exit;
if (status) {
dev_err(&urb->dev->dev, "%s - stopping read!\n", __func__);
return;
}
port_number = edge_port->port->port_number;
if (edge_port->lsr_event) {
edge_port->lsr_event = 0;
dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
__func__, port_number, edge_port->lsr_mask, *data);
handle_new_lsr(edge_port, 1, edge_port->lsr_mask, *data);
/* Adjust buffer length/pointer */
--urb->actual_length;
++data;
}
if (urb->actual_length) {
usb_serial_debug_data(dev, __func__, urb->actual_length, data);
if (edge_port->close_pending)
dev_dbg(dev, "%s - close pending, dropping data on the floor\n",
__func__);
else
edge_tty_recv(edge_port->port, data,
urb->actual_length);
edge_port->port->icount.rx += urb->actual_length;
}
exit:
/* continue read unless stopped */
spin_lock(&edge_port->ep_lock);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED;
spin_unlock(&edge_port->ep_lock);
if (retval)
dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-191'], 'message': 'USB: serial: io_ti: fix information leak in completion handler
Add missing sanity check to the bulk-in completion handler to avoid an
integer underflow that can be triggered by a malicious device.
This avoids leaking 128 kB of memory content from after the URB transfer
buffer to user space.
Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32")
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Cc: stable <[email protected]> # 2.6.30
Signed-off-by: Johan Hovold <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int CLASS parse_jpeg(int offset)
{
int len, save, hlen, mark;
fseek(ifp, offset, SEEK_SET);
if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8)
return 0;
while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda)
{
order = 0x4d4d;
len = get2() - 2;
save = ftell(ifp);
if (mark == 0xc0 || mark == 0xc3 || mark == 0xc9)
{
fgetc(ifp);
raw_height = get2();
raw_width = get2();
}
order = get2();
hlen = get4();
if (get4() == 0x48454150) /* "HEAP" */
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
#endif
parse_ciff(save + hlen, len - hlen, 0);
}
if (parse_tiff(save + 6))
apply_tiff();
fseek(ifp, save + len, SEEK_SET);
}
return 1;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476', 'CWE-119'], 'message': 'Secunia SA75000 advisory: several buffer overruns'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: ZEND_API int ZEND_FASTCALL concat_function(zval *result, zval *op1, zval *op2) /* {{{ */
{
zval *orig_op1 = op1;
zval op1_copy, op2_copy;
ZVAL_UNDEF(&op1_copy);
ZVAL_UNDEF(&op2_copy);
do {
if (UNEXPECTED(Z_TYPE_P(op1) != IS_STRING)) {
if (Z_ISREF_P(op1)) {
op1 = Z_REFVAL_P(op1);
if (Z_TYPE_P(op1) == IS_STRING) break;
}
ZEND_TRY_BINARY_OBJECT_OPERATION(ZEND_CONCAT, concat_function);
ZVAL_STR(&op1_copy, zval_get_string_func(op1));
if (UNEXPECTED(EG(exception))) {
zval_ptr_dtor_str(&op1_copy);
if (orig_op1 != result) {
ZVAL_UNDEF(result);
}
return FAILURE;
}
if (result == op1) {
if (UNEXPECTED(op1 == op2)) {
op2 = &op1_copy;
}
}
op1 = &op1_copy;
}
} while (0);
do {
if (UNEXPECTED(Z_TYPE_P(op2) != IS_STRING)) {
if (Z_ISREF_P(op2)) {
op2 = Z_REFVAL_P(op2);
if (Z_TYPE_P(op2) == IS_STRING) break;
}
ZEND_TRY_BINARY_OP2_OBJECT_OPERATION(ZEND_CONCAT);
ZVAL_STR(&op2_copy, zval_get_string_func(op2));
if (UNEXPECTED(EG(exception))) {
zval_ptr_dtor_str(&op1_copy);
zval_ptr_dtor_str(&op2_copy);
if (orig_op1 != result) {
ZVAL_UNDEF(result);
}
return FAILURE;
}
op2 = &op2_copy;
}
} while (0);
if (UNEXPECTED(Z_STRLEN_P(op1) == 0)) {
if (EXPECTED(result != op2)) {
if (result == orig_op1) {
i_zval_ptr_dtor(result);
}
ZVAL_COPY(result, op2);
}
} else if (UNEXPECTED(Z_STRLEN_P(op2) == 0)) {
if (EXPECTED(result != op1)) {
if (result == orig_op1) {
i_zval_ptr_dtor(result);
}
ZVAL_COPY(result, op1);
}
} else {
size_t op1_len = Z_STRLEN_P(op1);
size_t op2_len = Z_STRLEN_P(op2);
size_t result_len = op1_len + op2_len;
zend_string *result_str;
if (UNEXPECTED(op1_len > ZSTR_MAX_LEN - op2_len)) {
zend_throw_error(NULL, "String size overflow");
zval_ptr_dtor_str(&op1_copy);
zval_ptr_dtor_str(&op2_copy);
if (orig_op1 != result) {
ZVAL_UNDEF(result);
}
return FAILURE;
}
if (result == op1 && Z_REFCOUNTED_P(result)) {
/* special case, perform operations on result */
result_str = zend_string_extend(Z_STR_P(result), result_len, 0);
} else {
result_str = zend_string_alloc(result_len, 0);
memcpy(ZSTR_VAL(result_str), Z_STRVAL_P(op1), op1_len);
if (result == orig_op1) {
i_zval_ptr_dtor(result);
}
}
/* This has to happen first to account for the cases where result == op1 == op2 and
* the realloc is done. In this case this line will also update Z_STRVAL_P(op2) to
* point to the new string. The first op2_len bytes of result will still be the same. */
ZVAL_NEW_STR(result, result_str);
memcpy(ZSTR_VAL(result_str) + op1_len, Z_STRVAL_P(op2), op2_len);
ZSTR_VAL(result_str)[result_len] = '\0';
}
zval_ptr_dtor_str(&op1_copy);
zval_ptr_dtor_str(&op2_copy);
return SUCCESS;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-787'], 'message': 'Fix #73122: Integer Overflow when concatenating strings
We must avoid integer overflows in memory allocations, so we introduce
an additional check in the VM, and bail out in the rare case of an
overflow. Since the recent fix for bug #74960 still doesn't catch all
possible overflows, we fix that right away.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: ZEND_API int ZEND_FASTCALL concat_function(zval *result, zval *op1, zval *op2) /* {{{ */
{
zval *orig_op1 = op1;
zval op1_copy, op2_copy;
ZVAL_UNDEF(&op1_copy);
ZVAL_UNDEF(&op2_copy);
do {
if (UNEXPECTED(Z_TYPE_P(op1) != IS_STRING)) {
if (Z_ISREF_P(op1)) {
op1 = Z_REFVAL_P(op1);
if (Z_TYPE_P(op1) == IS_STRING) break;
}
ZEND_TRY_BINARY_OBJECT_OPERATION(ZEND_CONCAT, concat_function);
ZVAL_STR(&op1_copy, zval_get_string_func(op1));
if (UNEXPECTED(EG(exception))) {
zval_ptr_dtor_str(&op1_copy);
if (orig_op1 != result) {
ZVAL_UNDEF(result);
}
return FAILURE;
}
if (result == op1) {
if (UNEXPECTED(op1 == op2)) {
op2 = &op1_copy;
}
}
op1 = &op1_copy;
}
} while (0);
do {
if (UNEXPECTED(Z_TYPE_P(op2) != IS_STRING)) {
if (Z_ISREF_P(op2)) {
op2 = Z_REFVAL_P(op2);
if (Z_TYPE_P(op2) == IS_STRING) break;
}
ZEND_TRY_BINARY_OP2_OBJECT_OPERATION(ZEND_CONCAT);
ZVAL_STR(&op2_copy, zval_get_string_func(op2));
if (UNEXPECTED(EG(exception))) {
zval_ptr_dtor_str(&op1_copy);
zval_ptr_dtor_str(&op2_copy);
if (orig_op1 != result) {
ZVAL_UNDEF(result);
}
return FAILURE;
}
op2 = &op2_copy;
}
} while (0);
if (UNEXPECTED(Z_STRLEN_P(op1) == 0)) {
if (EXPECTED(result != op2)) {
if (result == orig_op1) {
i_zval_ptr_dtor(result);
}
ZVAL_COPY(result, op2);
}
} else if (UNEXPECTED(Z_STRLEN_P(op2) == 0)) {
if (EXPECTED(result != op1)) {
if (result == orig_op1) {
i_zval_ptr_dtor(result);
}
ZVAL_COPY(result, op1);
}
} else {
size_t op1_len = Z_STRLEN_P(op1);
size_t op2_len = Z_STRLEN_P(op2);
size_t result_len = op1_len + op2_len;
zend_string *result_str;
if (UNEXPECTED(op1_len > SIZE_MAX - op2_len)) {
zend_throw_error(NULL, "String size overflow");
zval_ptr_dtor_str(&op1_copy);
zval_ptr_dtor_str(&op2_copy);
if (orig_op1 != result) {
ZVAL_UNDEF(result);
}
return FAILURE;
}
if (result == op1 && Z_REFCOUNTED_P(result)) {
/* special case, perform operations on result */
result_str = zend_string_extend(Z_STR_P(result), result_len, 0);
} else {
result_str = zend_string_alloc(result_len, 0);
memcpy(ZSTR_VAL(result_str), Z_STRVAL_P(op1), op1_len);
if (result == orig_op1) {
i_zval_ptr_dtor(result);
}
}
/* This has to happen first to account for the cases where result == op1 == op2 and
* the realloc is done. In this case this line will also update Z_STRVAL_P(op2) to
* point to the new string. The first op2_len bytes of result will still be the same. */
ZVAL_NEW_STR(result, result_str);
memcpy(ZSTR_VAL(result_str) + op1_len, Z_STRVAL_P(op2), op2_len);
ZSTR_VAL(result_str)[result_len] = '\0';
}
zval_ptr_dtor_str(&op1_copy);
zval_ptr_dtor_str(&op2_copy);
return SUCCESS;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-787'], 'message': 'Fix #74960: Heap buffer overflow via str_repeat
Trying to allocate a `zend_string` with a length only slighty smaller
than `SIZE_MAX` causes an integer overflow, so callers may need to
check that explicitly. To make that easy in a portable way, we
introduce `ZSTR_MAX_LEN`.
Closes GH-7294.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int expandRegular(rpmfi fi, const char *dest, rpmpsm psm, int nodigest, int nocontent)
{
FD_t wfd = NULL;
int rc = 0;
/* Create the file with 0200 permissions (write by owner). */
{
mode_t old_umask = umask(0577);
wfd = Fopen(dest, "w.ufdio");
umask(old_umask);
}
if (Ferror(wfd)) {
rc = RPMERR_OPEN_FAILED;
goto exit;
}
if (!nocontent)
rc = rpmfiArchiveReadToFilePsm(fi, wfd, nodigest, psm);
exit:
if (wfd) {
int myerrno = errno;
Fclose(wfd);
errno = myerrno;
}
return rc;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-59', 'CWE-61'], 'message': 'Don't follow symlinks on file creation (CVE-2017-7501)
Open newly created files with O_EXCL to prevent symlink tricks.
When reopening hardlinks for writing the actual content, use append
mode instead. This is compatible with the write-only permissions but
is not destructive in case we got redirected to somebody elses file,
verify the target before actually writing anything.
As these are files with the temporary suffix, errors mean a local
user with sufficient privileges to break the installation of the package
anyway is trying to goof us on purpose, don't bother trying to mend it
(we couldn't fix the hardlink case anyhow) but just bail out.
Based on a patch by Florian Festi.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
return core->mac[r->dh] == core->mac[r->dt];
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-835'], 'message': 'net: e1000e: fix an infinite loop issue
This issue is like the issue in e1000 network card addressed in
this commit:
e1000: eliminate infinite loops on out-of-bounds transfer start.
Signed-off-by: Li Qiang <[email protected]>
Reviewed-by: Dmitry Fleytman <[email protected]>
Signed-off-by: Jason Wang <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
const struct net_offload *ops;
int proto;
struct frag_hdr *fptr;
unsigned int unfrag_ip6hlen;
unsigned int payload_len;
u8 *prevhdr;
int offset = 0;
bool encap, udpfrag;
int nhoff;
bool gso_partial;
skb_reset_network_header(skb);
nhoff = skb_network_header(skb) - skb_mac_header(skb);
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
encap = SKB_GSO_CB(skb)->encap_level > 0;
if (encap)
features &= skb->dev->hw_enc_features;
SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
ipv6h = ipv6_hdr(skb);
__skb_pull(skb, sizeof(*ipv6h));
segs = ERR_PTR(-EPROTONOSUPPORT);
proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
if (skb->encapsulation &&
skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6))
udpfrag = proto == IPPROTO_UDP && encap;
else
udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
ops = rcu_dereference(inet6_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment)) {
skb_reset_transport_header(skb);
segs = ops->callbacks.gso_segment(skb, features);
}
if (IS_ERR_OR_NULL(segs))
goto out;
gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
for (skb = segs; skb; skb = skb->next) {
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
if (gso_partial)
payload_len = skb_shinfo(skb)->gso_size +
SKB_GSO_CB(skb)->data_offset +
skb->head - (unsigned char *)(ipv6h + 1);
else
payload_len = skb->len - nhoff - sizeof(*ipv6h);
ipv6h->payload_len = htons(payload_len);
skb->network_header = (u8 *)ipv6h - skb->head;
if (udpfrag) {
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
fptr->frag_off = htons(offset);
if (skb->next)
fptr->frag_off |= htons(IP6_MF);
offset += (ntohs(ipv6h->payload_len) -
sizeof(struct frag_hdr));
}
if (encap)
skb_reset_inner_headers(skb);
}
out:
return segs;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-125'], 'message': 'ipv6: Prevent overrun when parsing v6 header options
The KASAN warning repoted below was discovered with a syzkaller
program. The reproducer is basically:
int s = socket(AF_INET6, SOCK_RAW, NEXTHDR_HOP);
send(s, &one_byte_of_data, 1, MSG_MORE);
send(s, &more_than_mtu_bytes_data, 2000, 0);
The socket() call sets the nexthdr field of the v6 header to
NEXTHDR_HOP, the first send call primes the payload with a non zero
byte of data, and the second send call triggers the fragmentation path.
The fragmentation code tries to parse the header options in order
to figure out where to insert the fragment option. Since nexthdr points
to an invalid option, the calculation of the size of the network header
can made to be much larger than the linear section of the skb and data
is read outside of it.
This fix makes ip6_find_1stfrag return an error if it detects
running out-of-bounds.
[ 42.361487] ==================================================================
[ 42.364412] BUG: KASAN: slab-out-of-bounds in ip6_fragment+0x11c8/0x3730
[ 42.365471] Read of size 840 at addr ffff88000969e798 by task ip6_fragment-oo/3789
[ 42.366469]
[ 42.366696] CPU: 1 PID: 3789 Comm: ip6_fragment-oo Not tainted 4.11.0+ #41
[ 42.367628] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.1-1ubuntu1 04/01/2014
[ 42.368824] Call Trace:
[ 42.369183] dump_stack+0xb3/0x10b
[ 42.369664] print_address_description+0x73/0x290
[ 42.370325] kasan_report+0x252/0x370
[ 42.370839] ? ip6_fragment+0x11c8/0x3730
[ 42.371396] check_memory_region+0x13c/0x1a0
[ 42.371978] memcpy+0x23/0x50
[ 42.372395] ip6_fragment+0x11c8/0x3730
[ 42.372920] ? nf_ct_expect_unregister_notifier+0x110/0x110
[ 42.373681] ? ip6_copy_metadata+0x7f0/0x7f0
[ 42.374263] ? ip6_forward+0x2e30/0x2e30
[ 42.374803] ip6_finish_output+0x584/0x990
[ 42.375350] ip6_output+0x1b7/0x690
[ 42.375836] ? ip6_finish_output+0x990/0x990
[ 42.376411] ? ip6_fragment+0x3730/0x3730
[ 42.376968] ip6_local_out+0x95/0x160
[ 42.377471] ip6_send_skb+0xa1/0x330
[ 42.377969] ip6_push_pending_frames+0xb3/0xe0
[ 42.378589] rawv6_sendmsg+0x2051/0x2db0
[ 42.379129] ? rawv6_bind+0x8b0/0x8b0
[ 42.379633] ? _copy_from_user+0x84/0xe0
[ 42.380193] ? debug_check_no_locks_freed+0x290/0x290
[ 42.380878] ? ___sys_sendmsg+0x162/0x930
[ 42.381427] ? rcu_read_lock_sched_held+0xa3/0x120
[ 42.382074] ? sock_has_perm+0x1f6/0x290
[ 42.382614] ? ___sys_sendmsg+0x167/0x930
[ 42.383173] ? lock_downgrade+0x660/0x660
[ 42.383727] inet_sendmsg+0x123/0x500
[ 42.384226] ? inet_sendmsg+0x123/0x500
[ 42.384748] ? inet_recvmsg+0x540/0x540
[ 42.385263] sock_sendmsg+0xca/0x110
[ 42.385758] SYSC_sendto+0x217/0x380
[ 42.386249] ? SYSC_connect+0x310/0x310
[ 42.386783] ? __might_fault+0x110/0x1d0
[ 42.387324] ? lock_downgrade+0x660/0x660
[ 42.387880] ? __fget_light+0xa1/0x1f0
[ 42.388403] ? __fdget+0x18/0x20
[ 42.388851] ? sock_common_setsockopt+0x95/0xd0
[ 42.389472] ? SyS_setsockopt+0x17f/0x260
[ 42.390021] ? entry_SYSCALL_64_fastpath+0x5/0xbe
[ 42.390650] SyS_sendto+0x40/0x50
[ 42.391103] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.391731] RIP: 0033:0x7fbbb711e383
[ 42.392217] RSP: 002b:00007ffff4d34f28 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
[ 42.393235] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbbb711e383
[ 42.394195] RDX: 0000000000001000 RSI: 00007ffff4d34f60 RDI: 0000000000000003
[ 42.395145] RBP: 0000000000000046 R08: 00007ffff4d34f40 R09: 0000000000000018
[ 42.396056] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000400aad
[ 42.396598] R13: 0000000000000066 R14: 00007ffff4d34ee0 R15: 00007fbbb717af00
[ 42.397257]
[ 42.397411] Allocated by task 3789:
[ 42.397702] save_stack_trace+0x16/0x20
[ 42.398005] save_stack+0x46/0xd0
[ 42.398267] kasan_kmalloc+0xad/0xe0
[ 42.398548] kasan_slab_alloc+0x12/0x20
[ 42.398848] __kmalloc_node_track_caller+0xcb/0x380
[ 42.399224] __kmalloc_reserve.isra.32+0x41/0xe0
[ 42.399654] __alloc_skb+0xf8/0x580
[ 42.400003] sock_wmalloc+0xab/0xf0
[ 42.400346] __ip6_append_data.isra.41+0x2472/0x33d0
[ 42.400813] ip6_append_data+0x1a8/0x2f0
[ 42.401122] rawv6_sendmsg+0x11ee/0x2db0
[ 42.401505] inet_sendmsg+0x123/0x500
[ 42.401860] sock_sendmsg+0xca/0x110
[ 42.402209] ___sys_sendmsg+0x7cb/0x930
[ 42.402582] __sys_sendmsg+0xd9/0x190
[ 42.402941] SyS_sendmsg+0x2d/0x50
[ 42.403273] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.403718]
[ 42.403871] Freed by task 1794:
[ 42.404146] save_stack_trace+0x16/0x20
[ 42.404515] save_stack+0x46/0xd0
[ 42.404827] kasan_slab_free+0x72/0xc0
[ 42.405167] kfree+0xe8/0x2b0
[ 42.405462] skb_free_head+0x74/0xb0
[ 42.405806] skb_release_data+0x30e/0x3a0
[ 42.406198] skb_release_all+0x4a/0x60
[ 42.406563] consume_skb+0x113/0x2e0
[ 42.406910] skb_free_datagram+0x1a/0xe0
[ 42.407288] netlink_recvmsg+0x60d/0xe40
[ 42.407667] sock_recvmsg+0xd7/0x110
[ 42.408022] ___sys_recvmsg+0x25c/0x580
[ 42.408395] __sys_recvmsg+0xd6/0x190
[ 42.408753] SyS_recvmsg+0x2d/0x50
[ 42.409086] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.409513]
[ 42.409665] The buggy address belongs to the object at ffff88000969e780
[ 42.409665] which belongs to the cache kmalloc-512 of size 512
[ 42.410846] The buggy address is located 24 bytes inside of
[ 42.410846] 512-byte region [ffff88000969e780, ffff88000969e980)
[ 42.411941] The buggy address belongs to the page:
[ 42.412405] page:ffffea000025a780 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0
[ 42.413298] flags: 0x100000000008100(slab|head)
[ 42.413729] raw: 0100000000008100 0000000000000000 0000000000000000 00000001800c000c
[ 42.414387] raw: ffffea00002a9500 0000000900000007 ffff88000c401280 0000000000000000
[ 42.415074] page dumped because: kasan: bad access detected
[ 42.415604]
[ 42.415757] Memory state around the buggy address:
[ 42.416222] ffff88000969e880: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 42.416904] ffff88000969e900: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 42.417591] >ffff88000969e980: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 42.418273] ^
[ 42.418588] ffff88000969ea00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 42.419273] ffff88000969ea80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 42.419882] ==================================================================
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Craig Gallek <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
unsigned int unfrag_ip6hlen, unfrag_len;
struct frag_hdr *fptr;
u8 *packet_start, *prevhdr;
u8 nexthdr;
u8 frag_hdr_sz = sizeof(struct frag_hdr);
__wsum csum;
int tnl_hlen;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
/* Packet is from an untrusted source, reset gso_segs. */
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
/* Set the IPv6 fragment id if not set yet */
if (!skb_shinfo(skb)->ip6_frag_id)
ipv6_proxy_select_ident(dev_net(skb->dev), skb);
segs = NULL;
goto out;
}
if (skb->encapsulation && skb_shinfo(skb)->gso_type &
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
segs = skb_udp_tunnel_segment(skb, features, true);
else {
const struct ipv6hdr *ipv6h;
struct udphdr *uh;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto out;
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments.
*/
uh = udp_hdr(skb);
ipv6h = ipv6_hdr(skb);
uh->check = 0;
csum = skb_checksum(skb, 0, skb->len, 0);
uh->check = udp_v6_check(skb->len, &ipv6h->saddr,
&ipv6h->daddr, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
skb->ip_summed = CHECKSUM_NONE;
/* If there is no outer header we can fake a checksum offload
* due to the fact that we have already done the checksum in
* software prior to segmenting the frame.
*/
if (!skb->encap_hdr_csum)
features |= NETIF_F_HW_CSUM;
/* Check if there is enough headroom to insert fragment header. */
tnl_hlen = skb_tnl_header_len(skb);
if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
goto out;
}
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
unfrag_ip6hlen + tnl_hlen;
packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
skb->mac_header -= frag_hdr_sz;
skb->network_header -= frag_hdr_sz;
fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
if (!skb_shinfo(skb)->ip6_frag_id)
ipv6_proxy_select_ident(dev_net(skb->dev), skb);
fptr->identification = skb_shinfo(skb)->ip6_frag_id;
/* Fragment the skb. ipv6 header and the remaining fields of the
* fragment header are updated in ipv6_gso_segment()
*/
segs = skb_segment(skb, features);
}
out:
return segs;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-125'], 'message': 'ipv6: Prevent overrun when parsing v6 header options
The KASAN warning repoted below was discovered with a syzkaller
program. The reproducer is basically:
int s = socket(AF_INET6, SOCK_RAW, NEXTHDR_HOP);
send(s, &one_byte_of_data, 1, MSG_MORE);
send(s, &more_than_mtu_bytes_data, 2000, 0);
The socket() call sets the nexthdr field of the v6 header to
NEXTHDR_HOP, the first send call primes the payload with a non zero
byte of data, and the second send call triggers the fragmentation path.
The fragmentation code tries to parse the header options in order
to figure out where to insert the fragment option. Since nexthdr points
to an invalid option, the calculation of the size of the network header
can made to be much larger than the linear section of the skb and data
is read outside of it.
This fix makes ip6_find_1stfrag return an error if it detects
running out-of-bounds.
[ 42.361487] ==================================================================
[ 42.364412] BUG: KASAN: slab-out-of-bounds in ip6_fragment+0x11c8/0x3730
[ 42.365471] Read of size 840 at addr ffff88000969e798 by task ip6_fragment-oo/3789
[ 42.366469]
[ 42.366696] CPU: 1 PID: 3789 Comm: ip6_fragment-oo Not tainted 4.11.0+ #41
[ 42.367628] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.1-1ubuntu1 04/01/2014
[ 42.368824] Call Trace:
[ 42.369183] dump_stack+0xb3/0x10b
[ 42.369664] print_address_description+0x73/0x290
[ 42.370325] kasan_report+0x252/0x370
[ 42.370839] ? ip6_fragment+0x11c8/0x3730
[ 42.371396] check_memory_region+0x13c/0x1a0
[ 42.371978] memcpy+0x23/0x50
[ 42.372395] ip6_fragment+0x11c8/0x3730
[ 42.372920] ? nf_ct_expect_unregister_notifier+0x110/0x110
[ 42.373681] ? ip6_copy_metadata+0x7f0/0x7f0
[ 42.374263] ? ip6_forward+0x2e30/0x2e30
[ 42.374803] ip6_finish_output+0x584/0x990
[ 42.375350] ip6_output+0x1b7/0x690
[ 42.375836] ? ip6_finish_output+0x990/0x990
[ 42.376411] ? ip6_fragment+0x3730/0x3730
[ 42.376968] ip6_local_out+0x95/0x160
[ 42.377471] ip6_send_skb+0xa1/0x330
[ 42.377969] ip6_push_pending_frames+0xb3/0xe0
[ 42.378589] rawv6_sendmsg+0x2051/0x2db0
[ 42.379129] ? rawv6_bind+0x8b0/0x8b0
[ 42.379633] ? _copy_from_user+0x84/0xe0
[ 42.380193] ? debug_check_no_locks_freed+0x290/0x290
[ 42.380878] ? ___sys_sendmsg+0x162/0x930
[ 42.381427] ? rcu_read_lock_sched_held+0xa3/0x120
[ 42.382074] ? sock_has_perm+0x1f6/0x290
[ 42.382614] ? ___sys_sendmsg+0x167/0x930
[ 42.383173] ? lock_downgrade+0x660/0x660
[ 42.383727] inet_sendmsg+0x123/0x500
[ 42.384226] ? inet_sendmsg+0x123/0x500
[ 42.384748] ? inet_recvmsg+0x540/0x540
[ 42.385263] sock_sendmsg+0xca/0x110
[ 42.385758] SYSC_sendto+0x217/0x380
[ 42.386249] ? SYSC_connect+0x310/0x310
[ 42.386783] ? __might_fault+0x110/0x1d0
[ 42.387324] ? lock_downgrade+0x660/0x660
[ 42.387880] ? __fget_light+0xa1/0x1f0
[ 42.388403] ? __fdget+0x18/0x20
[ 42.388851] ? sock_common_setsockopt+0x95/0xd0
[ 42.389472] ? SyS_setsockopt+0x17f/0x260
[ 42.390021] ? entry_SYSCALL_64_fastpath+0x5/0xbe
[ 42.390650] SyS_sendto+0x40/0x50
[ 42.391103] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.391731] RIP: 0033:0x7fbbb711e383
[ 42.392217] RSP: 002b:00007ffff4d34f28 EFLAGS: 00000246 ORIG_RAX: 000000000000002c
[ 42.393235] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fbbb711e383
[ 42.394195] RDX: 0000000000001000 RSI: 00007ffff4d34f60 RDI: 0000000000000003
[ 42.395145] RBP: 0000000000000046 R08: 00007ffff4d34f40 R09: 0000000000000018
[ 42.396056] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000400aad
[ 42.396598] R13: 0000000000000066 R14: 00007ffff4d34ee0 R15: 00007fbbb717af00
[ 42.397257]
[ 42.397411] Allocated by task 3789:
[ 42.397702] save_stack_trace+0x16/0x20
[ 42.398005] save_stack+0x46/0xd0
[ 42.398267] kasan_kmalloc+0xad/0xe0
[ 42.398548] kasan_slab_alloc+0x12/0x20
[ 42.398848] __kmalloc_node_track_caller+0xcb/0x380
[ 42.399224] __kmalloc_reserve.isra.32+0x41/0xe0
[ 42.399654] __alloc_skb+0xf8/0x580
[ 42.400003] sock_wmalloc+0xab/0xf0
[ 42.400346] __ip6_append_data.isra.41+0x2472/0x33d0
[ 42.400813] ip6_append_data+0x1a8/0x2f0
[ 42.401122] rawv6_sendmsg+0x11ee/0x2db0
[ 42.401505] inet_sendmsg+0x123/0x500
[ 42.401860] sock_sendmsg+0xca/0x110
[ 42.402209] ___sys_sendmsg+0x7cb/0x930
[ 42.402582] __sys_sendmsg+0xd9/0x190
[ 42.402941] SyS_sendmsg+0x2d/0x50
[ 42.403273] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.403718]
[ 42.403871] Freed by task 1794:
[ 42.404146] save_stack_trace+0x16/0x20
[ 42.404515] save_stack+0x46/0xd0
[ 42.404827] kasan_slab_free+0x72/0xc0
[ 42.405167] kfree+0xe8/0x2b0
[ 42.405462] skb_free_head+0x74/0xb0
[ 42.405806] skb_release_data+0x30e/0x3a0
[ 42.406198] skb_release_all+0x4a/0x60
[ 42.406563] consume_skb+0x113/0x2e0
[ 42.406910] skb_free_datagram+0x1a/0xe0
[ 42.407288] netlink_recvmsg+0x60d/0xe40
[ 42.407667] sock_recvmsg+0xd7/0x110
[ 42.408022] ___sys_recvmsg+0x25c/0x580
[ 42.408395] __sys_recvmsg+0xd6/0x190
[ 42.408753] SyS_recvmsg+0x2d/0x50
[ 42.409086] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 42.409513]
[ 42.409665] The buggy address belongs to the object at ffff88000969e780
[ 42.409665] which belongs to the cache kmalloc-512 of size 512
[ 42.410846] The buggy address is located 24 bytes inside of
[ 42.410846] 512-byte region [ffff88000969e780, ffff88000969e980)
[ 42.411941] The buggy address belongs to the page:
[ 42.412405] page:ffffea000025a780 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0
[ 42.413298] flags: 0x100000000008100(slab|head)
[ 42.413729] raw: 0100000000008100 0000000000000000 0000000000000000 00000001800c000c
[ 42.414387] raw: ffffea00002a9500 0000000900000007 ffff88000c401280 0000000000000000
[ 42.415074] page dumped because: kasan: bad access detected
[ 42.415604]
[ 42.415757] Memory state around the buggy address:
[ 42.416222] ffff88000969e880: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 42.416904] ffff88000969e900: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 42.417591] >ffff88000969e980: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 42.418273] ^
[ 42.418588] ffff88000969ea00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 42.419273] ffff88000969ea80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 42.419882] ==================================================================
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: Craig Gallek <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst,
struct request_sock *req_unhash,
bool *own_req)
{
struct inet_request_sock *ireq;
struct ipv6_pinfo *newnp;
const struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_txoptions *opt;
struct tcp6_sock *newtcp6sk;
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key;
#endif
struct flowi6 fl6;
if (skb->protocol == htons(ETH_P_IP)) {
/*
* v6 mapped
*/
newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
req_unhash, own_req);
if (!newsk)
return NULL;
newtcp6sk = (struct tcp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);
newtp = tcp_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
newnp->saddr = newsk->sk_v6_rcv_saddr;
inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
newsk->sk_backlog_rcv = tcp_v4_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = tcp_v6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
if (np->repflow)
newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
* here, tcp_create_openreq_child now does this for us, see the comment in
* that function for the gory details. -acme
*/
/* It is tricky place. Until this moment IPv4 tcp
worked with IPv6 icsk.icsk_af_ops.
Sync it now.
*/
tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
return newsk;
}
ireq = inet_rsk(req);
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (!dst) {
dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
if (!dst)
goto out;
}
newsk = tcp_create_openreq_child(sk, req, skb);
if (!newsk)
goto out_nonewsk;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks
* count here, tcp_create_openreq_child now does this for us, see the
* comment in that function for the gory details. -acme
*/
newsk->sk_gso_type = SKB_GSO_TCPV6;
ip6_dst_store(newsk, dst, NULL, NULL);
inet6_sk_rx_dst_set(newsk, skb);
newtcp6sk = (struct tcp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
newnp->saddr = ireq->ir_v6_loc_addr;
newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
newsk->sk_bound_dev_if = ireq->ir_iif;
/* Now IPv6 options...
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = tcp_v6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
if (np->repflow)
newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
/* Clone native IPv6 options from listening socket (if any)
Yes, keeping reference count would be much more clever,
but we make one more one thing there: reattach optmem
to newsk.
*/
opt = ireq->ipv6_opt;
if (!opt)
opt = rcu_dereference(np->opt);
if (opt) {
opt = ipv6_dup_options(newsk, opt);
RCU_INIT_POINTER(newnp->opt, opt);
}
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (opt)
inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
opt->opt_flen;
tcp_ca_openreq_child(newsk, dst);
tcp_sync_mss(newsk, dst_mtu(dst));
newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
tcp_initialize_rcv_mss(newsk);
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
if (key) {
/* We're using one, so create a matching key
* on the newsk structure. If we fail to get
* memory, then we end up not copying the key
* across. Shucks.
*/
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
AF_INET6, key->key, key->keylen,
sk_gfp_mask(sk, GFP_ATOMIC));
}
#endif
if (__inet_inherit_port(sk, newsk) < 0) {
inet_csk_prepare_forced_close(newsk);
tcp_done(newsk);
goto out;
}
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
if (*own_req) {
tcp_move_syn(newtp, req);
/* Clone pktoptions received with SYN, if we own the req */
if (ireq->pktopts) {
newnp->pktoptions = skb_clone(ireq->pktopts,
sk_gfp_mask(sk, GFP_ATOMIC));
consume_skb(ireq->pktopts);
ireq->pktopts = NULL;
if (newnp->pktoptions) {
tcp_v6_restore_cb(newnp->pktoptions);
skb_set_owner_r(newnp->pktoptions, newsk);
}
}
}
return newsk;
out_overflow:
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
dst_release(dst);
out:
tcp_listendrop(sk);
return NULL;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-241'], 'message': 'ipv6/dccp: do not inherit ipv6_mc_list from parent
Like commit 657831ffc38e ("dccp/tcp: do not inherit mc_list from parent")
we should clear ipv6_mc_list etc. for IPv6 sockets too.
Cc: Eric Dumazet <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Acked-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
struct sctp_association *asoc,
bool kern)
{
struct sock *newsk;
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct sctp6_sock *newsctp6sk;
struct ipv6_txoptions *opt;
newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern);
if (!newsk)
goto out;
sock_init_data(NULL, newsk);
sctp_copy_sock(newsk, sk, asoc);
sock_reset_flag(sk, SOCK_ZAPPED);
newsctp6sk = (struct sctp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newsctp6sk->inet6;
sctp_sk(newsk)->v4mapped = sctp_sk(sk)->v4mapped;
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
rcu_read_lock();
opt = rcu_dereference(np->opt);
if (opt)
opt = ipv6_dup_options(newsk, opt);
RCU_INIT_POINTER(newnp->opt, opt);
rcu_read_unlock();
/* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname()
* and getpeername().
*/
sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk);
newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
sk_refcnt_debug_inc(newsk);
if (newsk->sk_prot->init(newsk)) {
sk_common_release(newsk);
newsk = NULL;
}
out:
return newsk;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-241'], 'message': 'sctp: do not inherit ipv6_{mc|ac|fl}_list from parent
SCTP needs fixes similar to 83eaddab4378 ("ipv6/dccp: do not inherit
ipv6_mc_list from parent"), otherwise bad things can happen.
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Tested-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static void print_bpf_insn(struct bpf_insn *insn)
{
u8 class = BPF_CLASS(insn->code);
if (class == BPF_ALU || class == BPF_ALU64) {
if (BPF_SRC(insn->code) == BPF_X)
verbose("(%02x) %sr%d %s %sr%d\n",
insn->code, class == BPF_ALU ? "(u32) " : "",
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
class == BPF_ALU ? "(u32) " : "",
insn->src_reg);
else
verbose("(%02x) %sr%d %s %s%d\n",
insn->code, class == BPF_ALU ? "(u32) " : "",
insn->dst_reg,
bpf_alu_string[BPF_OP(insn->code) >> 4],
class == BPF_ALU ? "(u32) " : "",
insn->imm);
} else if (class == BPF_STX) {
if (BPF_MODE(insn->code) == BPF_MEM)
verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg,
insn->off, insn->src_reg);
else if (BPF_MODE(insn->code) == BPF_XADD)
verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg, insn->off,
insn->src_reg);
else
verbose("BUG_%02x\n", insn->code);
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM) {
verbose("BUG_st_%02x\n", insn->code);
return;
}
verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->dst_reg,
insn->off, insn->imm);
} else if (class == BPF_LDX) {
if (BPF_MODE(insn->code) != BPF_MEM) {
verbose("BUG_ldx_%02x\n", insn->code);
return;
}
verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
insn->code, insn->dst_reg,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->off);
} else if (class == BPF_LD) {
if (BPF_MODE(insn->code) == BPF_ABS) {
verbose("(%02x) r0 = *(%s *)skb[%d]\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->imm);
} else if (BPF_MODE(insn->code) == BPF_IND) {
verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
insn->code,
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
insn->src_reg, insn->imm);
} else if (BPF_MODE(insn->code) == BPF_IMM) {
verbose("(%02x) r%d = 0x%x\n",
insn->code, insn->dst_reg, insn->imm);
} else {
verbose("BUG_ld_%02x\n", insn->code);
return;
}
} else if (class == BPF_JMP) {
u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) {
verbose("(%02x) call %s#%d\n", insn->code,
func_id_name(insn->imm), insn->imm);
} else if (insn->code == (BPF_JMP | BPF_JA)) {
verbose("(%02x) goto pc%+d\n",
insn->code, insn->off);
} else if (insn->code == (BPF_JMP | BPF_EXIT)) {
verbose("(%02x) exit\n", insn->code);
} else if (BPF_SRC(insn->code) == BPF_X) {
verbose("(%02x) if r%d %s r%d goto pc%+d\n",
insn->code, insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->src_reg, insn->off);
} else {
verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
insn->code, insn->dst_reg,
bpf_jmp_string[BPF_OP(insn->code) >> 4],
insn->imm, insn->off);
}
} else {
verbose("(%02x) %s\n", insn->code, bpf_class_string[class]);
}
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-200'], 'message': 'bpf: don't let ldimm64 leak map addresses on unprivileged
The patch fixes two things at once:
1) It checks the env->allow_ptr_leaks and only prints the map address to
the log if we have the privileges to do so, otherwise it just dumps 0
as we would when kptr_restrict is enabled on %pK. Given the latter is
off by default and not every distro sets it, I don't want to rely on
this, hence the 0 by default for unprivileged.
2) Printing of ldimm64 in the verifier log is currently broken in that
we don't print the full immediate, but only the 32 bit part of the
first insn part for ldimm64. Thus, fix this up as well; it's okay to
access, since we verified all ldimm64 earlier already (including just
constants) through replace_map_fd_with_map_ptr().
Fixes: 1be7f75d1668 ("bpf: enable non-root eBPF programs")
Fixes: cbd357008604 ("bpf: verifier (add ability to receive verification log)")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
return crypto_init_skcipher_ops_blkcipher(tfm);
if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
tfm->__crt_alg->cra_type == &crypto_givcipher_type)
return crypto_init_skcipher_ops_ablkcipher(tfm);
skcipher->setkey = alg->setkey;
skcipher->encrypt = alg->encrypt;
skcipher->decrypt = alg->decrypt;
skcipher->ivsize = alg->ivsize;
skcipher->keysize = alg->max_keysize;
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
if (alg->init)
return alg->init(skcipher);
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476', 'CWE-703'], 'message': 'crypto: skcipher - Add missing API setkey checks
The API setkey checks for key sizes and alignment went AWOL during the
skcipher conversion. This patch restores them.
Cc: <[email protected]>
Fixes: 4e6c3df4d729 ("crypto: skcipher - Add low-level skcipher...")
Reported-by: Baozeng <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
v4l2_kioctl func)
{
char sbuf[128];
void *mbuf = NULL;
void *parg = NULL;
long err = -EINVAL;
int is_ext_ctrl;
size_t ctrls_size = 0;
void __user *user_ptr = NULL;
is_ext_ctrl = (cmd == VIDIOC_S_EXT_CTRLS || cmd == VIDIOC_G_EXT_CTRLS ||
cmd == VIDIOC_TRY_EXT_CTRLS);
/* Copy arguments into temp kernel buffer */
switch (_IOC_DIR(cmd)) {
case _IOC_NONE:
parg = NULL;
break;
case _IOC_READ:
case _IOC_WRITE:
case (_IOC_WRITE | _IOC_READ):
if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
parg = sbuf;
} else {
/* too big to allocate from stack */
mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
}
err = -EFAULT;
if (_IOC_DIR(cmd) & _IOC_WRITE)
if (copy_from_user(parg, (void __user *)arg, _IOC_SIZE(cmd)))
goto out;
break;
}
if (is_ext_ctrl) {
struct v4l2_ext_controls *p = parg;
/* In case of an error, tell the caller that it wasn't
a specific control that caused it. */
p->error_idx = p->count;
user_ptr = (void __user *)p->controls;
if (p->count) {
ctrls_size = sizeof(struct v4l2_ext_control) * p->count;
/* Note: v4l2_ext_controls fits in sbuf[] so mbuf is still NULL. */
mbuf = kmalloc(ctrls_size, GFP_KERNEL);
err = -ENOMEM;
if (NULL == mbuf)
goto out_ext_ctrl;
err = -EFAULT;
if (copy_from_user(mbuf, user_ptr, ctrls_size))
goto out_ext_ctrl;
p->controls = mbuf;
}
}
/* call driver */
err = func(file, cmd, parg);
if (err == -ENOIOCTLCMD)
err = -EINVAL;
if (is_ext_ctrl) {
struct v4l2_ext_controls *p = parg;
p->controls = (void *)user_ptr;
if (p->count && err == 0 && copy_to_user(user_ptr, mbuf, ctrls_size))
err = -EFAULT;
goto out_ext_ctrl;
}
if (err < 0)
goto out;
out_ext_ctrl:
/* Copy results into user buffer */
switch (_IOC_DIR(cmd)) {
case _IOC_READ:
case (_IOC_WRITE | _IOC_READ):
if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
err = -EFAULT;
break;
}
out:
kfree(mbuf);
return err;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-399'], 'message': '[media] v4l: Share code between video_usercopy and video_ioctl2
The two functions are mostly identical. They handle the copy_from_user
and copy_to_user operations related with V4L2 ioctls and call the real
ioctl handler.
Create a __video_usercopy function that implements the core of
video_usercopy and video_ioctl2, and call that function from both.
Signed-off-by: Laurent Pinchart <[email protected]>
Acked-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: do_command (unsigned char c)
{
static int dtr_up = 0;
int newbaud, newflow, newparity, newbits;
const char *xfr_cmd;
char *fname;
int r;
switch (c) {
case KEY_EXIT:
return 1;
case KEY_QUIT:
term_set_hupcl(tty_fd, 0);
term_flush(tty_fd);
term_apply(tty_fd);
term_erase(tty_fd);
return 1;
case KEY_STATUS:
show_status(dtr_up);
break;
case KEY_PULSE:
fd_printf(STO, "\r\n*** pulse DTR ***\r\n");
if ( term_pulse_dtr(tty_fd) < 0 )
fd_printf(STO, "*** FAILED\r\n");
break;
case KEY_TOGGLE:
if ( dtr_up )
r = term_lower_dtr(tty_fd);
else
r = term_raise_dtr(tty_fd);
if ( r >= 0 ) dtr_up = ! dtr_up;
fd_printf(STO, "\r\n*** DTR: %s ***\r\n",
dtr_up ? "up" : "down");
break;
case KEY_BAUD_UP:
case KEY_BAUD_DN:
if (c == KEY_BAUD_UP)
opts.baud = baud_up(opts.baud);
else
opts.baud = baud_down(opts.baud);
term_set_baudrate(tty_fd, opts.baud);
tty_q.len = 0; term_flush(tty_fd);
term_apply(tty_fd);
newbaud = term_get_baudrate(tty_fd, NULL);
if ( opts.baud != newbaud ) {
fd_printf(STO, "\r\n*** baud: %d (%d) ***\r\n",
opts.baud, newbaud);
} else {
fd_printf(STO, "\r\n*** baud: %d ***\r\n", opts.baud);
}
set_tty_write_sz(newbaud);
break;
case KEY_FLOW:
opts.flow = flow_next(opts.flow);
term_set_flowcntrl(tty_fd, opts.flow);
tty_q.len = 0; term_flush(tty_fd);
term_apply(tty_fd);
newflow = term_get_flowcntrl(tty_fd);
if ( opts.flow != newflow ) {
fd_printf(STO, "\r\n*** flow: %s (%s) ***\r\n",
flow_str[opts.flow], flow_str[newflow]);
} else {
fd_printf(STO, "\r\n*** flow: %s ***\r\n",
flow_str[opts.flow]);
}
break;
case KEY_PARITY:
opts.parity = parity_next(opts.parity);
term_set_parity(tty_fd, opts.parity);
tty_q.len = 0; term_flush(tty_fd);
term_apply(tty_fd);
newparity = term_get_parity(tty_fd);
if (opts.parity != newparity ) {
fd_printf(STO, "\r\n*** parity: %s (%s) ***\r\n",
parity_str[opts.parity],
parity_str[newparity]);
} else {
fd_printf(STO, "\r\n*** parity: %s ***\r\n",
parity_str[opts.parity]);
}
break;
case KEY_BITS:
opts.databits = bits_next(opts.databits);
term_set_databits(tty_fd, opts.databits);
tty_q.len = 0; term_flush(tty_fd);
term_apply(tty_fd);
newbits = term_get_databits(tty_fd);
if (opts.databits != newbits ) {
fd_printf(STO, "\r\n*** databits: %d (%d) ***\r\n",
opts.databits, newbits);
} else {
fd_printf(STO, "\r\n*** databits: %d ***\r\n",
opts.databits);
}
break;
case KEY_LECHO:
opts.lecho = ! opts.lecho;
fd_printf(STO, "\r\n*** local echo: %s ***\r\n",
opts.lecho ? "yes" : "no");
break;
case KEY_SEND:
case KEY_RECEIVE:
xfr_cmd = (c == KEY_SEND) ? opts.send_cmd : opts.receive_cmd;
if ( xfr_cmd[0] == '\0' ) {
fd_printf(STO, "\r\n*** command disabled ***\r\n");
break;
}
fname = read_filename();
if (fname == NULL) {
fd_printf(STO, "*** cannot read filename ***\r\n");
break;
}
run_cmd(tty_fd, xfr_cmd, fname, NULL);
free(fname);
break;
case KEY_BREAK:
term_break(tty_fd);
fd_printf(STO, "\r\n*** break sent ***\r\n");
break;
default:
break;
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-77', 'CWE-125'], 'message': 'Do not use "/bin/sh" to run external commands.
Picocom no longer uses /bin/sh to run external commands for
file-transfer operations. Parsing the command line and spliting it into
arguments is now performed internally by picocom, using quoting rules
very similar to those of the Unix shell. Hopefully, this makes it
impossible to inject shell-commands when supplying filenames or
extra arguments to the send- and receive-file commands.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: jbig2_decode_symbol_dict(Jbig2Ctx *ctx,
Jbig2Segment *segment,
const Jbig2SymbolDictParams *params, const byte *data, size_t size, Jbig2ArithCx *GB_stats, Jbig2ArithCx *GR_stats)
{
Jbig2SymbolDict *SDNEWSYMS = NULL;
Jbig2SymbolDict *SDEXSYMS = NULL;
uint32_t HCHEIGHT;
uint32_t NSYMSDECODED;
uint32_t SYMWIDTH, TOTWIDTH;
uint32_t HCFIRSTSYM;
uint32_t *SDNEWSYMWIDTHS = NULL;
int SBSYMCODELEN = 0;
Jbig2WordStream *ws = NULL;
Jbig2HuffmanState *hs = NULL;
Jbig2HuffmanTable *SDHUFFRDX = NULL;
Jbig2HuffmanTable *SBHUFFRSIZE = NULL;
Jbig2ArithState *as = NULL;
Jbig2ArithIntCtx *IADH = NULL;
Jbig2ArithIntCtx *IADW = NULL;
Jbig2ArithIntCtx *IAEX = NULL;
Jbig2ArithIntCtx *IAAI = NULL;
Jbig2ArithIaidCtx *IAID = NULL;
Jbig2ArithIntCtx *IARDX = NULL;
Jbig2ArithIntCtx *IARDY = NULL;
int code = 0;
Jbig2SymbolDict **refagg_dicts = NULL;
int n_refagg_dicts = 1;
Jbig2TextRegionParams *tparams = NULL;
/* 6.5.5 (3) */
HCHEIGHT = 0;
NSYMSDECODED = 0;
ws = jbig2_word_stream_buf_new(ctx, data, size);
if (ws == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "failed to allocate ws in jbig2_decode_symbol_dict");
return NULL;
}
as = jbig2_arith_new(ctx, ws);
if (as == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "failed to allocate as in jbig2_decode_symbol_dict");
jbig2_word_stream_buf_free(ctx, ws);
return NULL;
}
if (!params->SDHUFF) {
IADH = jbig2_arith_int_ctx_new(ctx);
IADW = jbig2_arith_int_ctx_new(ctx);
IAEX = jbig2_arith_int_ctx_new(ctx);
IAAI = jbig2_arith_int_ctx_new(ctx);
if ((IADH == NULL) || (IADW == NULL) || (IAEX == NULL) || (IAAI == NULL)) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "failed to allocate storage for symbol bitmap");
goto cleanup1;
}
if (params->SDREFAGG) {
int64_t tmp = params->SDNUMINSYMS + params->SDNUMNEWSYMS;
for (SBSYMCODELEN = 0; ((int64_t) 1 << SBSYMCODELEN) < tmp; SBSYMCODELEN++);
IAID = jbig2_arith_iaid_ctx_new(ctx, SBSYMCODELEN);
IARDX = jbig2_arith_int_ctx_new(ctx);
IARDY = jbig2_arith_int_ctx_new(ctx);
if ((IAID == NULL) || (IARDX == NULL) || (IARDY == NULL)) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "failed to allocate storage for symbol bitmap");
goto cleanup2;
}
}
} else {
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "huffman coded symbol dictionary");
hs = jbig2_huffman_new(ctx, ws);
SDHUFFRDX = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O);
SBHUFFRSIZE = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_A);
if ((hs == NULL) || (SDHUFFRDX == NULL) || (SBHUFFRSIZE == NULL)) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "failed to allocate storage for symbol bitmap");
goto cleanup2;
}
if (!params->SDREFAGG) {
SDNEWSYMWIDTHS = jbig2_new(ctx, uint32_t, params->SDNUMNEWSYMS);
if (SDNEWSYMWIDTHS == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "could not allocate storage for (%u) symbol widths", params->SDNUMNEWSYMS);
goto cleanup2;
}
}
}
SDNEWSYMS = jbig2_sd_new(ctx, params->SDNUMNEWSYMS);
if (SDNEWSYMS == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "could not allocate storage for (%u) new symbols", params->SDNUMNEWSYMS);
goto cleanup2;
}
/* 6.5.5 (4a) */
while (NSYMSDECODED < params->SDNUMNEWSYMS) {
int32_t HCDH, DW;
/* 6.5.6 */
if (params->SDHUFF) {
HCDH = jbig2_huffman_get(hs, params->SDHUFFDH, &code);
} else {
code = jbig2_arith_int_decode(IADH, as, &HCDH);
}
if (code != 0) {
jbig2_error(ctx, JBIG2_SEVERITY_WARNING, segment->number, "error or OOB decoding height class delta (%d)\n", code);
}
if (!params->SDHUFF && jbig2_arith_has_reached_marker(as)) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "prevent DOS while decoding height classes");
goto cleanup2;
}
/* 6.5.5 (4b) */
HCHEIGHT = HCHEIGHT + HCDH;
SYMWIDTH = 0;
TOTWIDTH = 0;
HCFIRSTSYM = NSYMSDECODED;
if ((int32_t) HCHEIGHT < 0) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Invalid HCHEIGHT value");
goto cleanup2;
}
#ifdef JBIG2_DEBUG
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "HCHEIGHT = %d", HCHEIGHT);
#endif
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "decoding height class %d with %d syms decoded", HCHEIGHT, NSYMSDECODED);
for (;;) {
/* 6.5.7 */
if (params->SDHUFF) {
DW = jbig2_huffman_get(hs, params->SDHUFFDW, &code);
} else {
code = jbig2_arith_int_decode(IADW, as, &DW);
}
if (code < 0)
goto cleanup4;
/* 6.5.5 (4c.i) */
if (code == 1) {
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, " OOB signals end of height class %d", HCHEIGHT);
break;
}
/* check for broken symbol table */
if (NSYMSDECODED >= params->SDNUMNEWSYMS) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "No OOB signalling end of height class %d", HCHEIGHT);
goto cleanup4;
}
SYMWIDTH = SYMWIDTH + DW;
TOTWIDTH = TOTWIDTH + SYMWIDTH;
if ((int32_t) SYMWIDTH < 0) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Invalid SYMWIDTH value (%d) at symbol %d", SYMWIDTH, NSYMSDECODED + 1);
goto cleanup4;
}
#ifdef JBIG2_DEBUG
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "SYMWIDTH = %d TOTWIDTH = %d", SYMWIDTH, TOTWIDTH);
#endif
/* 6.5.5 (4c.ii) */
if (!params->SDHUFF || params->SDREFAGG) {
#ifdef JBIG2_DEBUG
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "SDHUFF = %d; SDREFAGG = %d", params->SDHUFF, params->SDREFAGG);
#endif
/* 6.5.8 */
if (!params->SDREFAGG) {
Jbig2GenericRegionParams region_params;
int sdat_bytes;
Jbig2Image *image;
/* Table 16 */
region_params.MMR = 0;
region_params.GBTEMPLATE = params->SDTEMPLATE;
region_params.TPGDON = 0;
region_params.USESKIP = 0;
sdat_bytes = params->SDTEMPLATE == 0 ? 8 : 2;
memcpy(region_params.gbat, params->sdat, sdat_bytes);
image = jbig2_image_new(ctx, SYMWIDTH, HCHEIGHT);
if (image == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate image in jbig2_decode_symbol_dict");
goto cleanup4;
}
code = jbig2_decode_generic_region(ctx, segment, ®ion_params, as, image, GB_stats);
if (code < 0) {
jbig2_image_release(ctx, image);
goto cleanup4;
}
SDNEWSYMS->glyphs[NSYMSDECODED] = image;
} else {
/* 6.5.8.2 refinement/aggregate symbol */
uint32_t REFAGGNINST;
if (params->SDHUFF) {
REFAGGNINST = jbig2_huffman_get(hs, params->SDHUFFAGGINST, &code);
} else {
code = jbig2_arith_int_decode(IAAI, as, (int32_t *) & REFAGGNINST);
}
if (code || (int32_t) REFAGGNINST <= 0) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "invalid number of symbols or OOB in aggregate glyph");
goto cleanup4;
}
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "aggregate symbol coding (%d instances)", REFAGGNINST);
if (REFAGGNINST > 1) {
Jbig2Image *image;
uint32_t i;
if (tparams == NULL) {
/* First time through, we need to initialise the */
/* various tables for Huffman or adaptive encoding */
/* as well as the text region parameters structure */
refagg_dicts = jbig2_new(ctx, Jbig2SymbolDict *, n_refagg_dicts);
if (refagg_dicts == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Out of memory allocating dictionary array");
goto cleanup4;
}
refagg_dicts[0] = jbig2_sd_new(ctx, params->SDNUMINSYMS + params->SDNUMNEWSYMS);
if (refagg_dicts[0] == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Out of memory allocating symbol dictionary");
goto cleanup4;
}
for (i = 0; i < params->SDNUMINSYMS; i++) {
refagg_dicts[0]->glyphs[i] = jbig2_image_clone(ctx, params->SDINSYMS->glyphs[i]);
}
tparams = jbig2_new(ctx, Jbig2TextRegionParams, 1);
if (tparams == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Out of memory creating text region params");
goto cleanup4;
}
if (!params->SDHUFF) {
/* Values from Table 17, section 6.5.8.2 (2) */
tparams->IADT = jbig2_arith_int_ctx_new(ctx);
tparams->IAFS = jbig2_arith_int_ctx_new(ctx);
tparams->IADS = jbig2_arith_int_ctx_new(ctx);
tparams->IAIT = jbig2_arith_int_ctx_new(ctx);
/* Table 31 */
for (SBSYMCODELEN = 0; (1 << SBSYMCODELEN) < (int)(params->SDNUMINSYMS + params->SDNUMNEWSYMS); SBSYMCODELEN++);
tparams->IAID = jbig2_arith_iaid_ctx_new(ctx, SBSYMCODELEN);
tparams->IARI = jbig2_arith_int_ctx_new(ctx);
tparams->IARDW = jbig2_arith_int_ctx_new(ctx);
tparams->IARDH = jbig2_arith_int_ctx_new(ctx);
tparams->IARDX = jbig2_arith_int_ctx_new(ctx);
tparams->IARDY = jbig2_arith_int_ctx_new(ctx);
} else {
tparams->SBHUFFFS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_F); /* Table B.6 */
tparams->SBHUFFDS = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_H); /* Table B.8 */
tparams->SBHUFFDT = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_K); /* Table B.11 */
tparams->SBHUFFRDW = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O); /* Table B.15 */
tparams->SBHUFFRDH = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O); /* Table B.15 */
tparams->SBHUFFRDX = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O); /* Table B.15 */
tparams->SBHUFFRDY = jbig2_build_huffman_table(ctx, &jbig2_huffman_params_O); /* Table B.15 */
}
tparams->SBHUFF = params->SDHUFF;
tparams->SBREFINE = 1;
tparams->SBSTRIPS = 1;
tparams->SBDEFPIXEL = 0;
tparams->SBCOMBOP = JBIG2_COMPOSE_OR;
tparams->TRANSPOSED = 0;
tparams->REFCORNER = JBIG2_CORNER_TOPLEFT;
tparams->SBDSOFFSET = 0;
tparams->SBRTEMPLATE = params->SDRTEMPLATE;
}
tparams->SBNUMINSTANCES = REFAGGNINST;
image = jbig2_image_new(ctx, SYMWIDTH, HCHEIGHT);
if (image == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Out of memory creating symbol image");
goto cleanup4;
}
/* multiple symbols are handled as a text region */
jbig2_decode_text_region(ctx, segment, tparams, (const Jbig2SymbolDict * const *)refagg_dicts,
n_refagg_dicts, image, data, size, GR_stats, as, ws);
SDNEWSYMS->glyphs[NSYMSDECODED] = image;
refagg_dicts[0]->glyphs[params->SDNUMINSYMS + NSYMSDECODED] = jbig2_image_clone(ctx, SDNEWSYMS->glyphs[NSYMSDECODED]);
} else {
/* 6.5.8.2.2 */
/* bool SBHUFF = params->SDHUFF; */
Jbig2RefinementRegionParams rparams;
Jbig2Image *image;
uint32_t ID;
int32_t RDX, RDY;
int BMSIZE = 0;
uint32_t ninsyms = params->SDNUMINSYMS;
int code1 = 0;
int code2 = 0;
int code3 = 0;
int code4 = 0;
/* 6.5.8.2.2 (2, 3, 4, 5) */
if (params->SDHUFF) {
ID = jbig2_huffman_get_bits(hs, SBSYMCODELEN, &code4);
RDX = jbig2_huffman_get(hs, SDHUFFRDX, &code1);
RDY = jbig2_huffman_get(hs, SDHUFFRDX, &code2);
BMSIZE = jbig2_huffman_get(hs, SBHUFFRSIZE, &code3);
jbig2_huffman_skip(hs);
} else {
code1 = jbig2_arith_iaid_decode(IAID, as, (int32_t *) & ID);
code2 = jbig2_arith_int_decode(IARDX, as, &RDX);
code3 = jbig2_arith_int_decode(IARDY, as, &RDY);
}
if ((code1 < 0) || (code2 < 0) || (code3 < 0) || (code4 < 0)) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to decode data");
goto cleanup4;
}
if (ID >= ninsyms + NSYMSDECODED) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "refinement references unknown symbol %d", ID);
goto cleanup4;
}
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number,
"symbol is a refinement of id %d with the " "refinement applied at (%d,%d)", ID, RDX, RDY);
image = jbig2_image_new(ctx, SYMWIDTH, HCHEIGHT);
if (image == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "Out of memory creating symbol image");
goto cleanup4;
}
/* Table 18 */
rparams.GRTEMPLATE = params->SDRTEMPLATE;
rparams.reference = (ID < ninsyms) ? params->SDINSYMS->glyphs[ID] : SDNEWSYMS->glyphs[ID - ninsyms];
/* SumatraPDF: fail on missing glyphs */
if (rparams.reference == NULL) {
code = jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "missing glyph %d/%d!", ID, ninsyms);
jbig2_image_release(ctx, image);
goto cleanup4;
}
rparams.DX = RDX;
rparams.DY = RDY;
rparams.TPGRON = 0;
memcpy(rparams.grat, params->sdrat, 4);
code = jbig2_decode_refinement_region(ctx, segment, &rparams, as, image, GR_stats);
if (code < 0)
goto cleanup4;
SDNEWSYMS->glyphs[NSYMSDECODED] = image;
/* 6.5.8.2.2 (7) */
if (params->SDHUFF) {
if (BMSIZE == 0)
BMSIZE = image->height * image->stride;
jbig2_huffman_advance(hs, BMSIZE);
}
}
}
#ifdef OUTPUT_PBM
{
char name[64];
FILE *out;
snprintf(name, 64, "sd.%04d.%04d.pbm", segment->number, NSYMSDECODED);
out = fopen(name, "wb");
jbig2_image_write_pbm(SDNEWSYMS->glyphs[NSYMSDECODED], out);
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "writing out glyph as '%s' ...", name);
fclose(out);
}
#endif
}
/* 6.5.5 (4c.iii) */
if (params->SDHUFF && !params->SDREFAGG) {
SDNEWSYMWIDTHS[NSYMSDECODED] = SYMWIDTH;
}
/* 6.5.5 (4c.iv) */
NSYMSDECODED = NSYMSDECODED + 1;
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number, "decoded symbol %u of %u (%ux%u)", NSYMSDECODED, params->SDNUMNEWSYMS, SYMWIDTH, HCHEIGHT);
} /* end height class decode loop */
/* 6.5.5 (4d) */
if (params->SDHUFF && !params->SDREFAGG) {
/* 6.5.9 */
Jbig2Image *image;
uint32_t BMSIZE = jbig2_huffman_get(hs, params->SDHUFFBMSIZE, &code);
uint32_t j;
int x;
if (code) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "error decoding size of collective bitmap!");
goto cleanup4;
}
/* skip any bits before the next byte boundary */
jbig2_huffman_skip(hs);
image = jbig2_image_new(ctx, TOTWIDTH, HCHEIGHT);
if (image == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "could not allocate collective bitmap image!");
goto cleanup4;
}
if (BMSIZE == 0) {
/* if BMSIZE == 0 bitmap is uncompressed */
const byte *src = data + jbig2_huffman_offset(hs);
const int stride = (image->width >> 3) + ((image->width & 7) ? 1 : 0);
byte *dst = image->data;
/* SumatraPDF: prevent read access violation */
if ((size - jbig2_huffman_offset(hs) < image->height * stride) || (size < jbig2_huffman_offset(hs))) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "not enough data for decoding (%d/%d)", image->height * stride,
size - jbig2_huffman_offset(hs));
jbig2_image_release(ctx, image);
goto cleanup4;
}
BMSIZE = image->height * stride;
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number,
"reading %dx%d uncompressed bitmap" " for %d symbols (%d bytes)", image->width, image->height, NSYMSDECODED - HCFIRSTSYM, BMSIZE);
for (j = 0; j < image->height; j++) {
memcpy(dst, src, stride);
dst += image->stride;
src += stride;
}
} else {
Jbig2GenericRegionParams rparams;
/* SumatraPDF: prevent read access violation */
if (size - jbig2_huffman_offset(hs) < BMSIZE) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "not enough data for decoding (%d/%d)", BMSIZE, size - jbig2_huffman_offset(hs));
jbig2_image_release(ctx, image);
goto cleanup4;
}
jbig2_error(ctx, JBIG2_SEVERITY_DEBUG, segment->number,
"reading %dx%d collective bitmap for %d symbols (%d bytes)", image->width, image->height, NSYMSDECODED - HCFIRSTSYM, BMSIZE);
rparams.MMR = 1;
code = jbig2_decode_generic_mmr(ctx, segment, &rparams, data + jbig2_huffman_offset(hs), BMSIZE, image);
if (code) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "error decoding MMR bitmap image!");
jbig2_image_release(ctx, image);
goto cleanup4;
}
}
/* advance past the data we've just read */
jbig2_huffman_advance(hs, BMSIZE);
/* copy the collective bitmap into the symbol dictionary */
x = 0;
for (j = HCFIRSTSYM; j < NSYMSDECODED; j++) {
Jbig2Image *glyph;
glyph = jbig2_image_new(ctx, SDNEWSYMWIDTHS[j], HCHEIGHT);
if (glyph == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to copy the collective bitmap into symbol dictionary");
jbig2_image_release(ctx, image);
goto cleanup4;
}
jbig2_image_compose(ctx, glyph, image, -x, 0, JBIG2_COMPOSE_REPLACE);
x += SDNEWSYMWIDTHS[j];
SDNEWSYMS->glyphs[j] = glyph;
}
jbig2_image_release(ctx, image);
}
} /* end of symbol decode loop */
/* 6.5.10 */
SDEXSYMS = jbig2_sd_new(ctx, params->SDNUMEXSYMS);
if (SDEXSYMS == NULL) {
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to allocate symbols exported from symbols dictionary");
goto cleanup4;
} else {
uint32_t i = 0;
uint32_t j = 0;
uint32_t k;
int exflag = 0;
uint32_t limit = params->SDNUMINSYMS + params->SDNUMNEWSYMS;
uint32_t exrunlength;
int zerolength = 0;
while (i < limit) {
if (params->SDHUFF)
exrunlength = jbig2_huffman_get(hs, SBHUFFRSIZE, &code);
else
code = jbig2_arith_int_decode(IAEX, as, (int32_t *)&exrunlength);
/* prevent infinite loop */
zerolength = exrunlength > 0 ? 0 : zerolength + 1;
if (code || (exrunlength > limit - i) || (zerolength > 4) || (exflag && (exrunlength + j > params->SDNUMEXSYMS))) {
if (code)
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "failed to decode exrunlength for exported symbols");
else if (exrunlength <= 0)
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number, "runlength too small in export symbol table (%d <= 0)\n", exrunlength);
else
jbig2_error(ctx, JBIG2_SEVERITY_FATAL, segment->number,
"runlength too large in export symbol table (%d > %d - %d)\n", exrunlength, params->SDNUMEXSYMS, j);
/* skip to the cleanup code and return SDEXSYMS = NULL */
jbig2_sd_release(ctx, SDEXSYMS);
SDEXSYMS = NULL;
break;
}
for (k = 0; k < exrunlength; k++) {
if (exflag) {
SDEXSYMS->glyphs[j++] = (i < params->SDNUMINSYMS) ?
jbig2_image_clone(ctx, params->SDINSYMS->glyphs[i]) : jbig2_image_clone(ctx, SDNEWSYMS->glyphs[i - params->SDNUMINSYMS]);
}
i++;
}
exflag = !exflag;
}
}
cleanup4:
if (tparams != NULL) {
if (!params->SDHUFF) {
jbig2_arith_int_ctx_free(ctx, tparams->IADT);
jbig2_arith_int_ctx_free(ctx, tparams->IAFS);
jbig2_arith_int_ctx_free(ctx, tparams->IADS);
jbig2_arith_int_ctx_free(ctx, tparams->IAIT);
jbig2_arith_iaid_ctx_free(ctx, tparams->IAID);
jbig2_arith_int_ctx_free(ctx, tparams->IARI);
jbig2_arith_int_ctx_free(ctx, tparams->IARDW);
jbig2_arith_int_ctx_free(ctx, tparams->IARDH);
jbig2_arith_int_ctx_free(ctx, tparams->IARDX);
jbig2_arith_int_ctx_free(ctx, tparams->IARDY);
} else {
jbig2_release_huffman_table(ctx, tparams->SBHUFFFS);
jbig2_release_huffman_table(ctx, tparams->SBHUFFDS);
jbig2_release_huffman_table(ctx, tparams->SBHUFFDT);
jbig2_release_huffman_table(ctx, tparams->SBHUFFRDX);
jbig2_release_huffman_table(ctx, tparams->SBHUFFRDY);
jbig2_release_huffman_table(ctx, tparams->SBHUFFRDW);
jbig2_release_huffman_table(ctx, tparams->SBHUFFRDH);
}
jbig2_free(ctx->allocator, tparams);
}
if (refagg_dicts != NULL) {
if (refagg_dicts[0] != NULL)
jbig2_sd_release(ctx, refagg_dicts[0]);
jbig2_free(ctx->allocator, refagg_dicts);
}
cleanup2:
jbig2_sd_release(ctx, SDNEWSYMS);
if (params->SDHUFF && !params->SDREFAGG) {
jbig2_free(ctx->allocator, SDNEWSYMWIDTHS);
}
jbig2_release_huffman_table(ctx, SDHUFFRDX);
jbig2_release_huffman_table(ctx, SBHUFFRSIZE);
jbig2_huffman_free(ctx, hs);
jbig2_arith_iaid_ctx_free(ctx, IAID);
jbig2_arith_int_ctx_free(ctx, IARDX);
jbig2_arith_int_ctx_free(ctx, IARDY);
cleanup1:
jbig2_word_stream_buf_free(ctx, ws);
jbig2_free(ctx->allocator, as);
jbig2_arith_int_ctx_free(ctx, IADH);
jbig2_arith_int_ctx_free(ctx, IADW);
jbig2_arith_int_ctx_free(ctx, IAEX);
jbig2_arith_int_ctx_free(ctx, IAAI);
return SDEXSYMS;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-476'], 'message': 'Bug 697934: Fix SEGV due to error code being ignored.
The return code from jbig2_decode_text_region was being ignored so the
code continued to try and parse the invalid file using incomplete/empty
structures.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
int completion)
{
int dir;
size_t len = 0;
const char *str = NULL;
int pid;
int ret;
int i;
USBDevice *dev;
USBEndpoint *ep;
struct ohci_iso_td iso_td;
uint32_t addr;
uint16_t starting_frame;
int16_t relative_frame_number;
int frame_count;
uint32_t start_offset, next_offset, end_offset = 0;
uint32_t start_addr, end_addr;
addr = ed->head & OHCI_DPTR_MASK;
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
return 0;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
frame_count = OHCI_BM(iso_td.flags, TD_FC);
relative_frame_number = USUB(ohci->frame_number, starting_frame);
trace_usb_ohci_iso_td_head(
ed->head & OHCI_DPTR_MASK, ed->tail & OHCI_DPTR_MASK,
iso_td.flags, iso_td.bp, iso_td.next, iso_td.be,
ohci->frame_number, starting_frame,
frame_count, relative_frame_number);
trace_usb_ohci_iso_td_head_offset(
iso_td.offset[0], iso_td.offset[1],
iso_td.offset[2], iso_td.offset[3],
iso_td.offset[4], iso_td.offset[5],
iso_td.offset[6], iso_td.offset[7]);
if (relative_frame_number < 0) {
trace_usb_ohci_iso_td_relative_frame_number_neg(relative_frame_number);
return 1;
} else if (relative_frame_number > frame_count) {
/* ISO TD expired - retire the TD to the Done Queue and continue with
the next ISO TD of the same ED */
trace_usb_ohci_iso_td_relative_frame_number_big(relative_frame_number,
frame_count);
OHCI_SET_BM(iso_td.flags, TD_CC, OHCI_CC_DATAOVERRUN);
ed->head &= ~OHCI_DPTR_MASK;
ed->head |= (iso_td.next & OHCI_DPTR_MASK);
iso_td.next = ohci->done;
ohci->done = addr;
i = OHCI_BM(iso_td.flags, TD_DI);
if (i < ohci->done_count)
ohci->done_count = i;
if (ohci_put_iso_td(ohci, addr, &iso_td)) {
ohci_die(ohci);
return 1;
}
return 0;
}
dir = OHCI_BM(ed->flags, ED_D);
switch (dir) {
case OHCI_TD_DIR_IN:
str = "in";
pid = USB_TOKEN_IN;
break;
case OHCI_TD_DIR_OUT:
str = "out";
pid = USB_TOKEN_OUT;
break;
case OHCI_TD_DIR_SETUP:
str = "setup";
pid = USB_TOKEN_SETUP;
break;
default:
trace_usb_ohci_iso_td_bad_direction(dir);
return 1;
}
if (!iso_td.bp || !iso_td.be) {
trace_usb_ohci_iso_td_bad_bp_be(iso_td.bp, iso_td.be);
return 1;
}
start_offset = iso_td.offset[relative_frame_number];
next_offset = iso_td.offset[relative_frame_number + 1];
if (!(OHCI_BM(start_offset, TD_PSW_CC) & 0xe) ||
((relative_frame_number < frame_count) &&
!(OHCI_BM(next_offset, TD_PSW_CC) & 0xe))) {
trace_usb_ohci_iso_td_bad_cc_not_accessed(start_offset, next_offset);
return 1;
}
if ((relative_frame_number < frame_count) && (start_offset > next_offset)) {
trace_usb_ohci_iso_td_bad_cc_overrun(start_offset, next_offset);
return 1;
}
if ((start_offset & 0x1000) == 0) {
start_addr = (iso_td.bp & OHCI_PAGE_MASK) |
(start_offset & OHCI_OFFSET_MASK);
} else {
start_addr = (iso_td.be & OHCI_PAGE_MASK) |
(start_offset & OHCI_OFFSET_MASK);
}
if (relative_frame_number < frame_count) {
end_offset = next_offset - 1;
if ((end_offset & 0x1000) == 0) {
end_addr = (iso_td.bp & OHCI_PAGE_MASK) |
(end_offset & OHCI_OFFSET_MASK);
} else {
end_addr = (iso_td.be & OHCI_PAGE_MASK) |
(end_offset & OHCI_OFFSET_MASK);
}
} else {
/* Last packet in the ISO TD */
end_addr = iso_td.be;
}
if ((start_addr & OHCI_PAGE_MASK) != (end_addr & OHCI_PAGE_MASK)) {
len = (end_addr & OHCI_OFFSET_MASK) + 0x1001
- (start_addr & OHCI_OFFSET_MASK);
} else {
len = end_addr - start_addr + 1;
}
if (len && dir != OHCI_TD_DIR_IN) {
if (ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, len,
DMA_DIRECTION_TO_DEVICE)) {
ohci_die(ohci);
return 1;
}
}
if (!completion) {
bool int_req = relative_frame_number == frame_count &&
OHCI_BM(iso_td.flags, TD_DI) == 0;
dev = ohci_find_device(ohci, OHCI_BM(ed->flags, ED_FA));
ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN));
usb_packet_setup(&ohci->usb_packet, pid, ep, 0, addr, false, int_req);
usb_packet_addbuf(&ohci->usb_packet, ohci->usb_buf, len);
usb_handle_packet(dev, &ohci->usb_packet);
if (ohci->usb_packet.status == USB_RET_ASYNC) {
usb_device_flush_ep_queue(dev, ep);
return 1;
}
}
if (ohci->usb_packet.status == USB_RET_SUCCESS) {
ret = ohci->usb_packet.actual_length;
} else {
ret = ohci->usb_packet.status;
}
trace_usb_ohci_iso_td_so(start_offset, end_offset, start_addr, end_addr,
str, len, ret);
/* Writeback */
if (dir == OHCI_TD_DIR_IN && ret >= 0 && ret <= len) {
/* IN transfer succeeded */
if (ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, ret,
DMA_DIRECTION_FROM_DEVICE)) {
ohci_die(ohci);
return 1;
}
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC,
OHCI_CC_NOERROR);
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, ret);
} else if (dir == OHCI_TD_DIR_OUT && ret == len) {
/* OUT transfer succeeded */
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC,
OHCI_CC_NOERROR);
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, 0);
} else {
if (ret > (ssize_t) len) {
trace_usb_ohci_iso_td_data_overrun(ret, len);
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC,
OHCI_CC_DATAOVERRUN);
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE,
len);
} else if (ret >= 0) {
trace_usb_ohci_iso_td_data_underrun(ret);
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC,
OHCI_CC_DATAUNDERRUN);
} else {
switch (ret) {
case USB_RET_IOERROR:
case USB_RET_NODEV:
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC,
OHCI_CC_DEVICENOTRESPONDING);
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE,
0);
break;
case USB_RET_NAK:
case USB_RET_STALL:
trace_usb_ohci_iso_td_nak(ret);
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC,
OHCI_CC_STALL);
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE,
0);
break;
default:
trace_usb_ohci_iso_td_bad_response(ret);
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC,
OHCI_CC_UNDEXPETEDPID);
break;
}
}
}
if (relative_frame_number == frame_count) {
/* Last data packet of ISO TD - retire the TD to the Done Queue */
OHCI_SET_BM(iso_td.flags, TD_CC, OHCI_CC_NOERROR);
ed->head &= ~OHCI_DPTR_MASK;
ed->head |= (iso_td.next & OHCI_DPTR_MASK);
iso_td.next = ohci->done;
ohci->done = addr;
i = OHCI_BM(iso_td.flags, TD_DI);
if (i < ohci->done_count)
ohci->done_count = i;
}
if (ohci_put_iso_td(ohci, addr, &iso_td)) {
ohci_die(ohci);
}
return 1;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-835'], 'message': 'usb: ohci: fix error return code in servicing iso td
It should return 1 if an error occurs when reading iso td.
This will avoid an infinite loop issue in ohci_service_ed_list.
Signed-off-by: Li Qiang <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: videobuf_vm_close(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
struct videobuf_queue *q = map->q;
int i;
dprintk(2,"vm_close %p [count=%d,vma=%08lx-%08lx]\n",map,
map->count,vma->vm_start,vma->vm_end);
map->count--;
if (0 == map->count) {
dprintk(1,"munmap %p q=%p\n",map,q);
mutex_lock(&q->lock);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
if (q->bufs[i]->map != map)
continue;
q->ops->buf_release(q,q->bufs[i]);
q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
}
mutex_unlock(&q->lock);
kfree(map);
}
return;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'V4L/DVB (6751): V4L: Memory leak! Fix count in videobuf-vmalloc mmap
This is pretty serious bug. map->count is never initialized after the
call to kmalloc making the count start at some random trash value. The
end result is leaking videobufs.
Also, fix up the debug statements to print unsigned values.
Pushed to http://ifup.org/hg/v4l-dvb too
Signed-off-by: Brandon Philips <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: void ahci_uninit(AHCIState *s)
{
g_free(s->dev);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-772', 'CWE-401'], 'message': 'ide: ahci: call cleanup function in ahci unit
This can avoid memory leak when hotunplug the ahci device.
Signed-off-by: Li Qiang <[email protected]>
Message-id: [email protected]
Signed-off-by: John Snow <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static void xhci_kick_epctx(XHCIEPContext *epctx, unsigned int streamid)
{
XHCIState *xhci = epctx->xhci;
XHCIStreamContext *stctx;
XHCITransfer *xfer;
XHCIRing *ring;
USBEndpoint *ep = NULL;
uint64_t mfindex;
int length;
int i;
trace_usb_xhci_ep_kick(epctx->slotid, epctx->epid, streamid);
/* If the device has been detached, but the guest has not noticed this
yet the 2 above checks will succeed, but we must NOT continue */
if (!xhci->slots[epctx->slotid - 1].uport ||
!xhci->slots[epctx->slotid - 1].uport->dev ||
!xhci->slots[epctx->slotid - 1].uport->dev->attached) {
return;
}
if (epctx->retry) {
XHCITransfer *xfer = epctx->retry;
trace_usb_xhci_xfer_retry(xfer);
assert(xfer->running_retry);
if (xfer->timed_xfer) {
/* time to kick the transfer? */
mfindex = xhci_mfindex_get(xhci);
xhci_check_intr_iso_kick(xhci, xfer, epctx, mfindex);
if (xfer->running_retry) {
return;
}
xfer->timed_xfer = 0;
xfer->running_retry = 1;
}
if (xfer->iso_xfer) {
/* retry iso transfer */
if (xhci_setup_packet(xfer) < 0) {
return;
}
usb_handle_packet(xfer->packet.ep->dev, &xfer->packet);
assert(xfer->packet.status != USB_RET_NAK);
xhci_try_complete_packet(xfer);
} else {
/* retry nak'ed transfer */
if (xhci_setup_packet(xfer) < 0) {
return;
}
usb_handle_packet(xfer->packet.ep->dev, &xfer->packet);
if (xfer->packet.status == USB_RET_NAK) {
return;
}
xhci_try_complete_packet(xfer);
}
assert(!xfer->running_retry);
if (xfer->complete) {
xhci_ep_free_xfer(epctx->retry);
}
epctx->retry = NULL;
}
if (epctx->state == EP_HALTED) {
DPRINTF("xhci: ep halted, not running schedule\n");
return;
}
if (epctx->nr_pstreams) {
uint32_t err;
stctx = xhci_find_stream(epctx, streamid, &err);
if (stctx == NULL) {
return;
}
ring = &stctx->ring;
xhci_set_ep_state(xhci, epctx, stctx, EP_RUNNING);
} else {
ring = &epctx->ring;
streamid = 0;
xhci_set_ep_state(xhci, epctx, NULL, EP_RUNNING);
}
assert(ring->dequeue != 0);
while (1) {
length = xhci_ring_chain_length(xhci, ring);
if (length <= 0) {
break;
}
xfer = xhci_ep_alloc_xfer(epctx, length);
if (xfer == NULL) {
break;
}
for (i = 0; i < length; i++) {
TRBType type;
type = xhci_ring_fetch(xhci, ring, &xfer->trbs[i], NULL);
assert(type);
}
xfer->streamid = streamid;
if (epctx->epid == 1) {
xhci_fire_ctl_transfer(xhci, xfer);
} else {
xhci_fire_transfer(xhci, xfer, epctx);
}
if (xfer->complete) {
xhci_ep_free_xfer(xfer);
xfer = NULL;
}
if (epctx->state == EP_HALTED) {
break;
}
if (xfer != NULL && xfer->running_retry) {
DPRINTF("xhci: xfer nacked, stopping schedule\n");
epctx->retry = xfer;
break;
}
}
ep = xhci_epid_to_usbep(epctx);
if (ep) {
usb_device_flush_ep_queue(ep->dev, ep);
}
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-835'], 'message': 'xhci: guard xhci_kick_epctx against recursive calls
Track xhci_kick_epctx processing being active in a variable. Check the
variable before calling xhci_kick_epctx from xhci_kick_ep. Add an
assert to make sure we don't call recursively into xhci_kick_epctx.
Cc: [email protected]
Fixes: 94b037f2a451b3dc855f9f2c346e5049a361bd55
Reported-by: Fabian Lesniak <[email protected]>
Signed-off-by: Gerd Hoffmann <[email protected]>
Message-id: [email protected]
Message-id: [email protected]'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: bool TestRandomPool()
{
std::cout << "\nTesting RandomPool generator...\n\n";
bool pass=true, fail;
{
RandomPool prng;
static const unsigned int ENTROPY_SIZE = 32;
MeterFilter meter(new Redirector(TheBitBucket()));
RandomNumberSource test(prng, 100000, true, new Deflator(new Redirector(meter)));
fail = false;
if (meter.GetTotalBytes() < 100000)
fail = true;
pass &= !fail;
if (fail)
std::cout << "FAILED:";
else
std::cout << "passed:";
std::cout << " 100000 generated bytes compressed to " << meter.GetTotalBytes() << " bytes by DEFLATE\n";
try
{
fail = false;
prng.DiscardBytes(100000);
}
catch (const Exception&)
{
fail = true;
}
pass &= !fail;
if (fail)
std::cout << "FAILED:";
else
std::cout << "passed:";
std::cout << " discarded 10000 bytes" << std::endl;
try
{
fail = false;
if(prng.CanIncorporateEntropy())
{
SecByteBlock entropy(ENTROPY_SIZE);
GlobalRNG().GenerateBlock(entropy, entropy.SizeInBytes());
prng.IncorporateEntropy(entropy, entropy.SizeInBytes());
prng.IncorporateEntropy(entropy, entropy.SizeInBytes());
prng.IncorporateEntropy(entropy, entropy.SizeInBytes());
prng.IncorporateEntropy(entropy, entropy.SizeInBytes());
}
}
catch (const Exception& /*ex*/)
{
fail = true;
}
pass &= !fail;
if (fail)
std::cout << "FAILED:";
else
std::cout << "passed:";
std::cout << " IncorporateEntropy with " << 4*ENTROPY_SIZE << " bytes\n";
try
{
// Miscellaneous for code coverage
(void)prng.AlgorithmName(); // "unknown"
word32 result = prng.GenerateWord32();
result = prng.GenerateWord32((result & 0xff), 0xffffffff - (result & 0xff));
prng.GenerateBlock(reinterpret_cast<byte*>(&result), 4);
prng.GenerateBlock(reinterpret_cast<byte*>(&result), 3);
prng.GenerateBlock(reinterpret_cast<byte*>(&result), 2);
prng.GenerateBlock(reinterpret_cast<byte*>(&result), 1);
}
catch (const Exception&)
{
fail = true;
}
pass &= !fail;
if (fail)
std::cout << "FAILED:";
else
std::cout << "passed:";
std::cout << " GenerateWord32 and Crop\n";
}
#if !defined(NO_OS_DEPENDENCE)
std::cout << "\nTesting AutoSeeded RandomPool generator...\n\n";
{
AutoSeededRandomPool prng;
static const unsigned int ENTROPY_SIZE = 32;
MeterFilter meter(new Redirector(TheBitBucket()));
RandomNumberSource test(prng, 100000, true, new Deflator(new Redirector(meter)));
fail = false;
if (meter.GetTotalBytes() < 100000)
fail = true;
pass &= !fail;
if (fail)
std::cout << "FAILED:";
else
std::cout << "passed:";
std::cout << " 100000 generated bytes compressed to " << meter.GetTotalBytes() << " bytes by DEFLATE\n";
try
{
fail = false;
prng.DiscardBytes(100000);
}
catch (const Exception&)
{
fail = true;
}
pass &= !fail;
if (fail)
std::cout << "FAILED:";
else
std::cout << "passed:";
std::cout << " discarded 10000 bytes" << std::endl;
try
{
fail = false;
if(prng.CanIncorporateEntropy())
{
SecByteBlock entropy(ENTROPY_SIZE);
GlobalRNG().GenerateBlock(entropy, entropy.SizeInBytes());
prng.IncorporateEntropy(entropy, entropy.SizeInBytes());
prng.IncorporateEntropy(entropy, entropy.SizeInBytes());
prng.IncorporateEntropy(entropy, entropy.SizeInBytes());
prng.IncorporateEntropy(entropy, entropy.SizeInBytes());
}
}
catch (const Exception& /*ex*/)
{
fail = true;
}
pass &= !fail;
if (fail)
std::cout << "FAILED:";
else
std::cout << "passed:";
std::cout << " IncorporateEntropy with " << 4*ENTROPY_SIZE << " bytes\n";
try
{
// Miscellaneous for code coverage
fail = false;
(void)prng.AlgorithmName(); // "unknown"
word32 result = prng.GenerateWord32();
result = prng.GenerateWord32((result & 0xff), 0xffffffff - (result & 0xff));
prng.GenerateBlock(reinterpret_cast<byte*>(&result), 4);
prng.GenerateBlock(reinterpret_cast<byte*>(&result), 3);
prng.GenerateBlock(reinterpret_cast<byte*>(&result), 2);
prng.GenerateBlock(reinterpret_cast<byte*>(&result), 1);
}
catch (const Exception&)
{
fail = true;
}
pass &= !fail;
if (fail)
std::cout << "FAILED:";
else
std::cout << "passed:";
std::cout << " GenerateWord32 and Crop\n";
}
#endif
std::cout.flush();
return pass;
}
; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190', 'CWE-125'], 'message': 'Add Inflator::BadDistanceErr exception (Issue 414)
The improved validation and excpetion clears the Address Sanitizer and Undefined Behavior Sanitizer findings'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static Image *ReadICONImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
IconFile
icon_file;
IconInfo
icon_info;
Image
*image;
MagickBooleanType
status;
MagickSizeType
extent;
register ssize_t
i,
x;
register Quantum
*q;
register unsigned char
*p;
size_t
bit,
byte,
bytes_per_line,
one,
scanline_pad;
ssize_t
count,
offset,
y;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"%s",image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
icon_file.reserved=(short) ReadBlobLSBShort(image);
icon_file.resource_type=(short) ReadBlobLSBShort(image);
icon_file.count=(short) ReadBlobLSBShort(image);
if ((icon_file.reserved != 0) ||
((icon_file.resource_type != 1) && (icon_file.resource_type != 2)) ||
(icon_file.count > MaxIcons))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
extent=0;
for (i=0; i < icon_file.count; i++)
{
icon_file.directory[i].width=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].height=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].colors=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].reserved=(unsigned char) ReadBlobByte(image);
icon_file.directory[i].planes=(unsigned short) ReadBlobLSBShort(image);
icon_file.directory[i].bits_per_pixel=(unsigned short)
ReadBlobLSBShort(image);
icon_file.directory[i].size=ReadBlobLSBLong(image);
icon_file.directory[i].offset=ReadBlobLSBLong(image);
if (EOFBlob(image) != MagickFalse)
break;
extent=MagickMax(extent,icon_file.directory[i].size);
}
if ((EOFBlob(image) != MagickFalse) || (extent > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
one=1;
for (i=0; i < icon_file.count; i++)
{
/*
Verify Icon identifier.
*/
offset=(ssize_t) SeekBlob(image,(MagickOffsetType)
icon_file.directory[i].offset,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
icon_info.size=ReadBlobLSBLong(image);
icon_info.width=(unsigned char) ReadBlobLSBSignedLong(image);
icon_info.height=(unsigned char) (ReadBlobLSBSignedLong(image)/2);
icon_info.planes=ReadBlobLSBShort(image);
icon_info.bits_per_pixel=ReadBlobLSBShort(image);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
if (((icon_info.planes == 18505) && (icon_info.bits_per_pixel == 21060)) ||
(icon_info.size == 0x474e5089))
{
Image
*icon_image;
ImageInfo
*read_info;
size_t
length;
unsigned char
*png;
/*
Icon image encoded as a compressed PNG image.
*/
length=icon_file.directory[i].size;
if ((length < 16) || (~length < 16))
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
png=(unsigned char *) AcquireQuantumMemory(length+16,sizeof(*png));
if (png == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) CopyMagickMemory(png,"\211PNG\r\n\032\n\000\000\000\015",12);
png[12]=(unsigned char) icon_info.planes;
png[13]=(unsigned char) (icon_info.planes >> 8);
png[14]=(unsigned char) icon_info.bits_per_pixel;
png[15]=(unsigned char) (icon_info.bits_per_pixel >> 8);
count=ReadBlob(image,length-16,png+16);
icon_image=(Image *) NULL;
if (count > 0)
{
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(read_info->magick,"PNG",MagickPathExtent);
icon_image=BlobToImage(read_info,png,length+16,exception);
read_info=DestroyImageInfo(read_info);
}
png=(unsigned char *) RelinquishMagickMemory(png);
if (icon_image == (Image *) NULL)
{
if (count != (ssize_t) (length-16))
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
image=DestroyImageList(image);
return((Image *) NULL);
}
DestroyBlob(icon_image);
icon_image->blob=ReferenceBlob(image->blob);
ReplaceImageInList(&image,icon_image);
}
else
{
if (icon_info.bits_per_pixel > 32)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
icon_info.compression=ReadBlobLSBLong(image);
icon_info.image_size=ReadBlobLSBLong(image);
icon_info.x_pixels=ReadBlobLSBLong(image);
icon_info.y_pixels=ReadBlobLSBLong(image);
icon_info.number_colors=ReadBlobLSBLong(image);
icon_info.colors_important=ReadBlobLSBLong(image);
image->alpha_trait=BlendPixelTrait;
image->columns=(size_t) icon_file.directory[i].width;
if ((ssize_t) image->columns > icon_info.width)
image->columns=(size_t) icon_info.width;
if (image->columns == 0)
image->columns=256;
image->rows=(size_t) icon_file.directory[i].height;
if ((ssize_t) image->rows > icon_info.height)
image->rows=(size_t) icon_info.height;
if (image->rows == 0)
image->rows=256;
image->depth=icon_info.bits_per_pixel;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" scene = %.20g",(double) i);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" size = %.20g",(double) icon_info.size);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" width = %.20g",(double) icon_file.directory[i].width);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" height = %.20g",(double) icon_file.directory[i].height);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" colors = %.20g",(double ) icon_info.number_colors);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" planes = %.20g",(double) icon_info.planes);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" bpp = %.20g",(double) icon_info.bits_per_pixel);
}
if ((icon_info.number_colors != 0) || (icon_info.bits_per_pixel <= 16U))
{
image->storage_class=PseudoClass;
image->colors=icon_info.number_colors;
if (image->colors == 0)
image->colors=one << icon_info.bits_per_pixel;
}
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
unsigned char
*icon_colormap;
/*
Read Icon raster colormap.
*/
if (AcquireImageColormap(image,image->colors,exception) ==
MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
icon_colormap=(unsigned char *) AcquireQuantumMemory((size_t)
image->colors,4UL*sizeof(*icon_colormap));
if (icon_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) (4*image->colors),icon_colormap);
if (count != (ssize_t) (4*image->colors))
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
p=icon_colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].blue=(Quantum) ScaleCharToQuantum(*p++);
image->colormap[i].green=(Quantum) ScaleCharToQuantum(*p++);
image->colormap[i].red=(Quantum) ScaleCharToQuantum(*p++);
p++;
}
icon_colormap=(unsigned char *) RelinquishMagickMemory(icon_colormap);
}
/*
Convert Icon raster image to pixel packets.
*/
if ((image_info->ping != MagickFalse) &&
(image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
bytes_per_line=(((image->columns*icon_info.bits_per_pixel)+31) &
~31) >> 3;
(void) bytes_per_line;
scanline_pad=((((image->columns*icon_info.bits_per_pixel)+31) & ~31)-
(image->columns*icon_info.bits_per_pixel)) >> 3;
switch (icon_info.bits_per_pixel)
{
case 1:
{
/*
Convert bitmap scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) (image->columns-7); x+=8)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < 8; bit++)
{
SetPixelIndex(image,((byte & (0x80 >> bit)) != 0 ? 0x01 :
0x00),q);
q+=GetPixelChannels(image);
}
}
if ((image->columns % 8) != 0)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < (image->columns % 8); bit++)
{
SetPixelIndex(image,((byte & (0x80 >> bit)) != 0 ? 0x01 :
0x00),q);
q+=GetPixelChannels(image);
}
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 4:
{
/*
Read 4-bit Icon scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-1); x+=2)
{
byte=(size_t) ReadBlobByte(image);
SetPixelIndex(image,((byte >> 4) & 0xf),q);
q+=GetPixelChannels(image);
SetPixelIndex(image,((byte) & 0xf),q);
q+=GetPixelChannels(image);
}
if ((image->columns % 2) != 0)
{
byte=(size_t) ReadBlobByte(image);
SetPixelIndex(image,((byte >> 4) & 0xf),q);
q+=GetPixelChannels(image);
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 8:
{
/*
Convert PseudoColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte=(size_t) ReadBlobByte(image);
SetPixelIndex(image,(Quantum) byte,q);
q+=GetPixelChannels(image);
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 16:
{
/*
Convert PseudoColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte=(size_t) ReadBlobByte(image);
byte|=(size_t) (ReadBlobByte(image) << 8);
SetPixelIndex(image,(Quantum) byte,q);
q+=GetPixelChannels(image);
}
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 24:
case 32:
{
/*
Convert DirectColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (icon_info.bits_per_pixel == 32)
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
q+=GetPixelChannels(image);
}
if (icon_info.bits_per_pixel == 24)
for (x=0; x < (ssize_t) scanline_pad; x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,image->rows-y-1,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
default:
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if ((image_info->ping == MagickFalse) &&
(icon_info.bits_per_pixel <= 16))
(void) SyncImage(image,exception);
if (icon_info.bits_per_pixel != 32)
{
/*
Read the ICON alpha mask.
*/
image->storage_class=DirectClass;
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < 8; bit++)
{
SetPixelAlpha(image,(((byte & (0x80 >> bit)) != 0) ?
TransparentAlpha : OpaqueAlpha),q);
q+=GetPixelChannels(image);
}
}
if ((image->columns % 8) != 0)
{
byte=(size_t) ReadBlobByte(image);
for (bit=0; bit < (image->columns % 8); bit++)
{
SetPixelAlpha(image,(((byte & (0x80 >> bit)) != 0) ?
TransparentAlpha : OpaqueAlpha),q);
q+=GetPixelChannels(image);
}
}
if ((image->columns % 32) != 0)
for (x=0; x < (ssize_t) ((32-(image->columns % 32))/8); x++)
(void) ReadBlobByte(image);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,
"UnexpectedEndOfFile",image->filename);
break;
}
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if (i < (ssize_t) (icon_file.count-1))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-772'], 'message': 'Fixed memory leak reported in #457.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static Image *ReadMPCImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
cache_filename[MagickPathExtent],
id[MagickPathExtent],
keyword[MagickPathExtent],
*options;
const unsigned char
*p;
GeometryInfo
geometry_info;
Image
*image;
int
c;
LinkedListInfo
*profiles;
MagickBooleanType
status;
MagickOffsetType
offset;
MagickStatusType
flags;
register ssize_t
i;
size_t
depth,
length;
ssize_t
count;
StringInfo
*profile;
unsigned int
signature;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) CopyMagickString(cache_filename,image->filename,MagickPathExtent);
AppendImageFormat("cache",cache_filename);
c=ReadBlobByte(image);
if (c == EOF)
{
image=DestroyImage(image);
return((Image *) NULL);
}
*id='\0';
(void) ResetMagickMemory(keyword,0,sizeof(keyword));
offset=0;
do
{
/*
Decode image header; header terminates one character beyond a ':'.
*/
profiles=(LinkedListInfo *) NULL;
length=MagickPathExtent;
options=AcquireString((char *) NULL);
signature=GetMagickSignature((const StringInfo *) NULL);
image->depth=8;
image->compression=NoCompression;
while ((isgraph(c) != MagickFalse) && (c != (int) ':'))
{
register char
*p;
if (c == (int) '{')
{
char
*comment;
/*
Read comment-- any text between { }.
*/
length=MagickPathExtent;
comment=AcquireString((char *) NULL);
for (p=comment; comment != (char *) NULL; p++)
{
c=ReadBlobByte(image);
if (c == (int) '\\')
c=ReadBlobByte(image);
else
if ((c == EOF) || (c == (int) '}'))
break;
if ((size_t) (p-comment+1) >= length)
{
*p='\0';
length<<=1;
comment=(char *) ResizeQuantumMemory(comment,length+
MagickPathExtent,sizeof(*comment));
if (comment == (char *) NULL)
break;
p=comment+strlen(comment);
}
*p=(char) c;
}
if (comment == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
*p='\0';
(void) SetImageProperty(image,"comment",comment,exception);
comment=DestroyString(comment);
c=ReadBlobByte(image);
}
else
if (isalnum(c) != MagickFalse)
{
/*
Get the keyword.
*/
length=MagickPathExtent;
p=keyword;
do
{
if (c == (int) '=')
break;
if ((size_t) (p-keyword) < (MagickPathExtent-1))
*p++=(char) c;
c=ReadBlobByte(image);
} while (c != EOF);
*p='\0';
p=options;
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
if (c == (int) '=')
{
/*
Get the keyword value.
*/
c=ReadBlobByte(image);
while ((c != (int) '}') && (c != EOF))
{
if ((size_t) (p-options+1) >= length)
{
*p='\0';
length<<=1;
options=(char *) ResizeQuantumMemory(options,length+
MagickPathExtent,sizeof(*options));
if (options == (char *) NULL)
break;
p=options+strlen(options);
}
*p++=(char) c;
c=ReadBlobByte(image);
if (c == '\\')
{
c=ReadBlobByte(image);
if (c == (int) '}')
{
*p++=(char) c;
c=ReadBlobByte(image);
}
}
if (*options != '{')
if (isspace((int) ((unsigned char) c)) != 0)
break;
}
if (options == (char *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
*p='\0';
if (*options == '{')
(void) CopyMagickString(options,options+1,strlen(options));
/*
Assign a value to the specified keyword.
*/
switch (*keyword)
{
case 'a':
case 'A':
{
if (LocaleCompare(keyword,"alpha-trait") == 0)
{
ssize_t
alpha_trait;
alpha_trait=ParseCommandOption(MagickPixelTraitOptions,
MagickFalse,options);
if (alpha_trait < 0)
break;
image->alpha_trait=(PixelTrait) alpha_trait;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'b':
case 'B':
{
if (LocaleCompare(keyword,"background-color") == 0)
{
(void) QueryColorCompliance(options,AllCompliance,
&image->background_color,exception);
break;
}
if (LocaleCompare(keyword,"blue-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=
image->chromaticity.blue_primary.x;
break;
}
if (LocaleCompare(keyword,"border-color") == 0)
{
(void) QueryColorCompliance(options,AllCompliance,
&image->border_color,exception);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'c':
case 'C':
{
if (LocaleCompare(keyword,"class") == 0)
{
ssize_t
storage_class;
storage_class=ParseCommandOption(MagickClassOptions,
MagickFalse,options);
if (storage_class < 0)
break;
image->storage_class=(ClassType) storage_class;
break;
}
if (LocaleCompare(keyword,"colors") == 0)
{
image->colors=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"colorspace") == 0)
{
ssize_t
colorspace;
colorspace=ParseCommandOption(MagickColorspaceOptions,
MagickFalse,options);
if (colorspace < 0)
break;
image->colorspace=(ColorspaceType) colorspace;
break;
}
if (LocaleCompare(keyword,"compression") == 0)
{
ssize_t
compression;
compression=ParseCommandOption(MagickCompressOptions,
MagickFalse,options);
if (compression < 0)
break;
image->compression=(CompressionType) compression;
break;
}
if (LocaleCompare(keyword,"columns") == 0)
{
image->columns=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'd':
case 'D':
{
if (LocaleCompare(keyword,"delay") == 0)
{
image->delay=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"depth") == 0)
{
image->depth=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"dispose") == 0)
{
ssize_t
dispose;
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
options);
if (dispose < 0)
break;
image->dispose=(DisposeType) dispose;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'e':
case 'E':
{
if (LocaleCompare(keyword,"endian") == 0)
{
ssize_t
endian;
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,
options);
if (endian < 0)
break;
image->endian=(EndianType) endian;
break;
}
if (LocaleCompare(keyword,"error") == 0)
{
image->error.mean_error_per_pixel=StringToDouble(options,
(char **) NULL);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'g':
case 'G':
{
if (LocaleCompare(keyword,"gamma") == 0)
{
image->gamma=StringToDouble(options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"green-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=
image->chromaticity.green_primary.x;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'i':
case 'I':
{
if (LocaleCompare(keyword,"id") == 0)
{
(void) CopyMagickString(id,options,MagickPathExtent);
break;
}
if (LocaleCompare(keyword,"iterations") == 0)
{
image->iterations=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'm':
case 'M':
{
if (LocaleCompare(keyword,"magick-signature") == 0)
{
signature=(unsigned int) StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"mattecolor") == 0)
{
(void) QueryColorCompliance(options,AllCompliance,
&image->matte_color,exception);
break;
}
if (LocaleCompare(keyword,"maximum-error") == 0)
{
image->error.normalized_maximum_error=StringToDouble(
options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"mean-error") == 0)
{
image->error.normalized_mean_error=StringToDouble(options,
(char **) NULL);
break;
}
if (LocaleCompare(keyword,"montage") == 0)
{
(void) CloneString(&image->montage,options);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'n':
case 'N':
{
if (LocaleCompare(keyword,"number-channels") == 0)
{
image->number_channels=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"number-meta-channels") == 0)
{
image->number_meta_channels=StringToUnsignedLong(options);
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare(keyword,"orientation") == 0)
{
ssize_t
orientation;
orientation=ParseCommandOption(MagickOrientationOptions,
MagickFalse,options);
if (orientation < 0)
break;
image->orientation=(OrientationType) orientation;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'p':
case 'P':
{
if (LocaleCompare(keyword,"page") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"pixel-intensity") == 0)
{
ssize_t
intensity;
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,options);
if (intensity < 0)
break;
image->intensity=(PixelIntensityMethod) intensity;
break;
}
if ((LocaleNCompare(keyword,"profile:",8) == 0) ||
(LocaleNCompare(keyword,"profile-",8) == 0))
{
if (profiles == (LinkedListInfo *) NULL)
profiles=NewLinkedList(0);
(void) AppendValueToLinkedList(profiles,
AcquireString(keyword+8));
profile=BlobToStringInfo((const void *) NULL,(size_t)
StringToLong(options));
if (profile == (StringInfo *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
(void) SetImageProfile(image,keyword+8,profile,exception);
profile=DestroyStringInfo(profile);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'q':
case 'Q':
{
if (LocaleCompare(keyword,"quality") == 0)
{
image->quality=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'r':
case 'R':
{
if (LocaleCompare(keyword,"red-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
if ((flags & SigmaValue) != 0)
image->chromaticity.red_primary.y=geometry_info.sigma;
break;
}
if (LocaleCompare(keyword,"rendering-intent") == 0)
{
ssize_t
rendering_intent;
rendering_intent=ParseCommandOption(MagickIntentOptions,
MagickFalse,options);
if (rendering_intent < 0)
break;
image->rendering_intent=(RenderingIntent) rendering_intent;
break;
}
if (LocaleCompare(keyword,"resolution") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
break;
}
if (LocaleCompare(keyword,"rows") == 0)
{
image->rows=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 's':
case 'S':
{
if (LocaleCompare(keyword,"scene") == 0)
{
image->scene=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 't':
case 'T':
{
if (LocaleCompare(keyword,"ticks-per-second") == 0)
{
image->ticks_per_second=(ssize_t) StringToLong(options);
break;
}
if (LocaleCompare(keyword,"tile-offset") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
if (LocaleCompare(keyword,"type") == 0)
{
ssize_t
type;
type=ParseCommandOption(MagickTypeOptions,MagickFalse,
options);
if (type < 0)
break;
image->type=(ImageType) type;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'u':
case 'U':
{
if (LocaleCompare(keyword,"units") == 0)
{
ssize_t
units;
units=ParseCommandOption(MagickResolutionOptions,
MagickFalse,options);
if (units < 0)
break;
image->units=(ResolutionType) units;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'w':
case 'W':
{
if (LocaleCompare(keyword,"white-point") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=
image->chromaticity.white_point.x;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
default:
{
(void) SetImageProperty(image,keyword,options,exception);
break;
}
}
}
else
c=ReadBlobByte(image);
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
}
options=DestroyString(options);
(void) ReadBlobByte(image);
/*
Verify that required image information is defined.
*/
if ((LocaleCompare(id,"MagickCache") != 0) ||
(image->storage_class == UndefinedClass) ||
(image->compression == UndefinedCompression) || (image->columns == 0) ||
(image->rows == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (signature != GetMagickSignature((const StringInfo *) NULL))
ThrowReaderException(CacheError,"IncompatibleAPI");
if (image->montage != (char *) NULL)
{
register char
*p;
/*
Image directory.
*/
length=MagickPathExtent;
image->directory=AcquireString((char *) NULL);
p=image->directory;
do
{
*p='\0';
if ((strlen(image->directory)+MagickPathExtent) >= length)
{
/*
Allocate more memory for the image directory.
*/
length<<=1;
image->directory=(char *) ResizeQuantumMemory(image->directory,
length+MagickPathExtent,sizeof(*image->directory));
if (image->directory == (char *) NULL)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
p=image->directory+strlen(image->directory);
}
c=ReadBlobByte(image);
*p++=(char) c;
} while (c != (int) '\0');
}
if (profiles != (LinkedListInfo *) NULL)
{
const char
*name;
const StringInfo
*profile;
register unsigned char
*p;
/*
Read image profiles.
*/
ResetLinkedListIterator(profiles);
name=(const char *) GetNextValueInLinkedList(profiles);
while (name != (const char *) NULL)
{
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
p=GetStringInfoDatum(profile);
count=ReadBlob(image,GetStringInfoLength(profile),p);
}
name=(const char *) GetNextValueInLinkedList(profiles);
}
profiles=DestroyLinkedList(profiles,RelinquishMagickMemory);
}
depth=GetImageQuantumDepth(image,MagickFalse);
if (image->storage_class == PseudoClass)
{
/*
Create image colormap.
*/
image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1,
sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->colors != 0)
{
size_t
packet_size;
unsigned char
*colormap;
/*
Read image colormap from file.
*/
packet_size=(size_t) (3UL*depth/8UL);
colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
packet_size*sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,packet_size*image->colors,colormap);
if (count != (ssize_t) (packet_size*image->colors))
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
p=colormap;
switch (depth)
{
default:
ThrowReaderException(CorruptImageError,
"ImageDepthNotSupported");
case 8:
{
unsigned char
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushCharPixel(p,&pixel);
image->colormap[i].red=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].green=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].blue=ScaleCharToQuantum(pixel);
}
break;
}
case 16:
{
unsigned short
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleShortToQuantum(pixel);
}
break;
}
case 32:
{
unsigned int
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleLongToQuantum(pixel);
}
break;
}
}
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
}
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) ||
(AcquireMagickResource(HeightResource,image->rows) == MagickFalse))
ThrowReaderException(ImageError,"WidthOrHeightExceedsLimit");
/*
Attach persistent pixel cache.
*/
status=PersistPixelCache(image,cache_filename,MagickTrue,&offset,exception);
if (status == MagickFalse)
ThrowReaderException(CacheError,"UnableToPersistPixelCache");
/*
Proceed to next image.
*/
do
{
c=ReadBlobByte(image);
} while ((isgraph(c) == MagickFalse) && (c != EOF));
if (c != EOF)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (c != EOF);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-772'], 'message': 'Fixed memory leak reported in #458.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code:
static void
yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, void *yyscanner, HEX_LEX_ENVIRONMENT *lex_env)
{
YYUSE (yyvaluep);
YYUSE (yyscanner);
YYUSE (lex_env);
if (!yymsg)
yymsg = "Deleting";
YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
switch (yytype)
{
case 16: /* tokens */
#line 94 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1023 "hex_grammar.c" /* yacc.c:1257 */
break;
case 17: /* token_sequence */
#line 95 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1029 "hex_grammar.c" /* yacc.c:1257 */
break;
case 18: /* token_or_range */
#line 96 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1035 "hex_grammar.c" /* yacc.c:1257 */
break;
case 19: /* token */
#line 97 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1041 "hex_grammar.c" /* yacc.c:1257 */
break;
case 21: /* range */
#line 100 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1047 "hex_grammar.c" /* yacc.c:1257 */
break;
case 22: /* alternatives */
#line 99 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1053 "hex_grammar.c" /* yacc.c:1257 */
break;
case 23: /* byte */
#line 98 "hex_grammar.y" /* yacc.c:1257 */
{ yr_re_node_destroy(((*yyvaluep).re_node)); }
#line 1059 "hex_grammar.c" /* yacc.c:1257 */
break;
default:
break;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-674', 'CWE-787'], 'message': 'Fix issue #674 for hex strings.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: _gcry_ecc_eddsa_sign (gcry_mpi_t input, ECC_secret_key *skey,
gcry_mpi_t r_r, gcry_mpi_t s, int hashalgo, gcry_mpi_t pk)
{
int rc;
mpi_ec_t ctx = NULL;
int b;
unsigned int tmp;
unsigned char *digest = NULL;
gcry_buffer_t hvec[3];
const void *mbuf;
size_t mlen;
unsigned char *rawmpi = NULL;
unsigned int rawmpilen;
unsigned char *encpk = NULL; /* Encoded public key. */
unsigned int encpklen;
mpi_point_struct I; /* Intermediate value. */
mpi_point_struct Q; /* Public key. */
gcry_mpi_t a, x, y, r;
memset (hvec, 0, sizeof hvec);
if (!mpi_is_opaque (input))
return GPG_ERR_INV_DATA;
/* Initialize some helpers. */
point_init (&I);
point_init (&Q);
a = mpi_snew (0);
x = mpi_new (0);
y = mpi_new (0);
r = mpi_new (0);
ctx = _gcry_mpi_ec_p_internal_new (skey->E.model, skey->E.dialect, 0,
skey->E.p, skey->E.a, skey->E.b);
b = (ctx->nbits+7)/8;
if (b != 256/8) {
rc = GPG_ERR_INTERNAL; /* We only support 256 bit. */
goto leave;
}
rc = _gcry_ecc_eddsa_compute_h_d (&digest, skey->d, ctx);
if (rc)
goto leave;
_gcry_mpi_set_buffer (a, digest, 32, 0);
/* Compute the public key if it has not been supplied as optional
parameter. */
if (pk)
{
rc = _gcry_ecc_eddsa_decodepoint (pk, ctx, &Q, &encpk, &encpklen);
if (rc)
goto leave;
if (DBG_CIPHER)
log_printhex ("* e_pk", encpk, encpklen);
if (!_gcry_mpi_ec_curve_point (&Q, ctx))
{
rc = GPG_ERR_BROKEN_PUBKEY;
goto leave;
}
}
else
{
_gcry_mpi_ec_mul_point (&Q, a, &skey->E.G, ctx);
rc = _gcry_ecc_eddsa_encodepoint (&Q, ctx, x, y, 0, &encpk, &encpklen);
if (rc)
goto leave;
if (DBG_CIPHER)
log_printhex (" e_pk", encpk, encpklen);
}
/* Compute R. */
mbuf = mpi_get_opaque (input, &tmp);
mlen = (tmp +7)/8;
if (DBG_CIPHER)
log_printhex (" m", mbuf, mlen);
hvec[0].data = digest;
hvec[0].off = 32;
hvec[0].len = 32;
hvec[1].data = (char*)mbuf;
hvec[1].len = mlen;
rc = _gcry_md_hash_buffers (hashalgo, 0, digest, hvec, 2);
if (rc)
goto leave;
reverse_buffer (digest, 64);
if (DBG_CIPHER)
log_printhex (" r", digest, 64);
_gcry_mpi_set_buffer (r, digest, 64, 0);
_gcry_mpi_ec_mul_point (&I, r, &skey->E.G, ctx);
if (DBG_CIPHER)
log_printpnt (" r", &I, ctx);
/* Convert R into affine coordinates and apply encoding. */
rc = _gcry_ecc_eddsa_encodepoint (&I, ctx, x, y, 0, &rawmpi, &rawmpilen);
if (rc)
goto leave;
if (DBG_CIPHER)
log_printhex (" e_r", rawmpi, rawmpilen);
/* S = r + a * H(encodepoint(R) + encodepoint(pk) + m) mod n */
hvec[0].data = rawmpi; /* (this is R) */
hvec[0].off = 0;
hvec[0].len = rawmpilen;
hvec[1].data = encpk;
hvec[1].off = 0;
hvec[1].len = encpklen;
hvec[2].data = (char*)mbuf;
hvec[2].off = 0;
hvec[2].len = mlen;
rc = _gcry_md_hash_buffers (hashalgo, 0, digest, hvec, 3);
if (rc)
goto leave;
/* No more need for RAWMPI thus we now transfer it to R_R. */
mpi_set_opaque (r_r, rawmpi, rawmpilen*8);
rawmpi = NULL;
reverse_buffer (digest, 64);
if (DBG_CIPHER)
log_printhex (" H(R+)", digest, 64);
_gcry_mpi_set_buffer (s, digest, 64, 0);
mpi_mulm (s, s, a, skey->E.n);
mpi_addm (s, s, r, skey->E.n);
rc = eddsa_encodempi (s, b, &rawmpi, &rawmpilen);
if (rc)
goto leave;
if (DBG_CIPHER)
log_printhex (" e_s", rawmpi, rawmpilen);
mpi_set_opaque (s, rawmpi, rawmpilen*8);
rawmpi = NULL;
rc = 0;
leave:
_gcry_mpi_release (a);
_gcry_mpi_release (x);
_gcry_mpi_release (y);
_gcry_mpi_release (r);
xfree (digest);
_gcry_mpi_ec_free (ctx);
point_free (&I);
point_free (&Q);
xfree (encpk);
xfree (rawmpi);
return rc;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-200'], 'message': 'ecc: Store EdDSA session key in secure memory.
* cipher/ecc-eddsa.c (_gcry_ecc_eddsa_sign): use mpi_snew to allocate
session key.
--
An attacker who learns the EdDSA session key from side-channel
observation during the signing process, can easily revover the long-
term secret key. Storing the session key in secure memory ensures that
constant time point operations are used in the MPI library.
Signed-off-by: Jo Van Bulck <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
edge_connection_t *conn,
crypt_path_t *layer_hint)
{
static int num_seen=0;
relay_header_t rh;
unsigned domain = layer_hint?LD_APP:LD_EXIT;
int reason;
int optimistic_data = 0; /* Set to 1 if we receive data on a stream
* that's in the EXIT_CONN_STATE_RESOLVING
* or EXIT_CONN_STATE_CONNECTING states. */
tor_assert(cell);
tor_assert(circ);
relay_header_unpack(&rh, cell->payload);
// log_fn(LOG_DEBUG,"command %d stream %d", rh.command, rh.stream_id);
num_seen++;
log_debug(domain, "Now seen %d relay cells here (command %d, stream %d).",
num_seen, rh.command, rh.stream_id);
if (rh.length > RELAY_PAYLOAD_SIZE) {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"Relay cell length field too long. Closing circuit.");
return - END_CIRC_REASON_TORPROTOCOL;
}
if (rh.stream_id == 0) {
switch (rh.command) {
case RELAY_COMMAND_BEGIN:
case RELAY_COMMAND_CONNECTED:
case RELAY_COMMAND_DATA:
case RELAY_COMMAND_END:
case RELAY_COMMAND_RESOLVE:
case RELAY_COMMAND_RESOLVED:
case RELAY_COMMAND_BEGIN_DIR:
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay command %d with zero "
"stream_id. Dropping.", (int)rh.command);
return 0;
default:
;
}
}
/* either conn is NULL, in which case we've got a control cell, or else
* conn points to the recognized stream. */
if (conn && !connection_state_is_open(TO_CONN(conn))) {
if (conn->base_.type == CONN_TYPE_EXIT &&
(conn->base_.state == EXIT_CONN_STATE_CONNECTING ||
conn->base_.state == EXIT_CONN_STATE_RESOLVING) &&
rh.command == RELAY_COMMAND_DATA) {
/* Allow DATA cells to be delivered to an exit node in state
* EXIT_CONN_STATE_CONNECTING or EXIT_CONN_STATE_RESOLVING.
* This speeds up HTTP, for example. */
optimistic_data = 1;
} else {
return connection_edge_process_relay_cell_not_open(
&rh, cell, circ, conn, layer_hint);
}
}
switch (rh.command) {
case RELAY_COMMAND_DROP:
// log_info(domain,"Got a relay-level padding cell. Dropping.");
return 0;
case RELAY_COMMAND_BEGIN:
case RELAY_COMMAND_BEGIN_DIR:
if (layer_hint &&
circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) {
log_fn(LOG_PROTOCOL_WARN, LD_APP,
"Relay begin request unsupported at AP. Dropping.");
return 0;
}
if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED &&
layer_hint != TO_ORIGIN_CIRCUIT(circ)->cpath->prev) {
log_fn(LOG_PROTOCOL_WARN, LD_APP,
"Relay begin request to Hidden Service "
"from intermediary node. Dropping.");
return 0;
}
if (conn) {
log_fn(LOG_PROTOCOL_WARN, domain,
"Begin cell for known stream. Dropping.");
return 0;
}
if (rh.command == RELAY_COMMAND_BEGIN_DIR) {
/* Assign this circuit and its app-ward OR connection a unique ID,
* so that we can measure download times. The local edge and dir
* connection will be assigned the same ID when they are created
* and linked. */
static uint64_t next_id = 0;
circ->dirreq_id = ++next_id;
TO_OR_CIRCUIT(circ)->p_chan->dirreq_id = circ->dirreq_id;
}
return connection_exit_begin_conn(cell, circ);
case RELAY_COMMAND_DATA:
++stats_n_data_cells_received;
if (( layer_hint && --layer_hint->deliver_window < 0) ||
(!layer_hint && --circ->deliver_window < 0)) {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"(relay data) circ deliver_window below 0. Killing.");
if (conn) {
/* XXXX Do we actually need to do this? Will killing the circuit
* not send an END and mark the stream for close as appropriate? */
connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
connection_mark_for_close(TO_CONN(conn));
}
return -END_CIRC_REASON_TORPROTOCOL;
}
log_debug(domain,"circ deliver_window now %d.", layer_hint ?
layer_hint->deliver_window : circ->deliver_window);
circuit_consider_sending_sendme(circ, layer_hint);
if (!conn) {
log_info(domain,"data cell dropped, unknown stream (streamid %d).",
rh.stream_id);
return 0;
}
if (--conn->deliver_window < 0) { /* is it below 0 after decrement? */
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"(relay data) conn deliver_window below 0. Killing.");
return -END_CIRC_REASON_TORPROTOCOL;
}
stats_n_data_bytes_received += rh.length;
connection_write_to_buf((char*)(cell->payload + RELAY_HEADER_SIZE),
rh.length, TO_CONN(conn));
if (!optimistic_data) {
/* Only send a SENDME if we're not getting optimistic data; otherwise
* a SENDME could arrive before the CONNECTED.
*/
connection_edge_consider_sending_sendme(conn);
}
return 0;
case RELAY_COMMAND_END:
reason = rh.length > 0 ?
get_uint8(cell->payload+RELAY_HEADER_SIZE) : END_STREAM_REASON_MISC;
if (!conn) {
log_info(domain,"end cell (%s) dropped, unknown stream.",
stream_end_reason_to_string(reason));
return 0;
}
/* XXX add to this log_fn the exit node's nickname? */
log_info(domain,TOR_SOCKET_T_FORMAT": end cell (%s) for stream %d. "
"Removing stream.",
conn->base_.s,
stream_end_reason_to_string(reason),
conn->stream_id);
if (conn->base_.type == CONN_TYPE_AP) {
entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
if (entry_conn->socks_request &&
!entry_conn->socks_request->has_finished)
log_warn(LD_BUG,
"open stream hasn't sent socks answer yet? Closing.");
}
/* We just *got* an end; no reason to send one. */
conn->edge_has_sent_end = 1;
if (!conn->end_reason)
conn->end_reason = reason | END_STREAM_REASON_FLAG_REMOTE;
if (!conn->base_.marked_for_close) {
/* only mark it if not already marked. it's possible to
* get the 'end' right around when the client hangs up on us. */
connection_mark_and_flush(TO_CONN(conn));
}
return 0;
case RELAY_COMMAND_EXTEND:
case RELAY_COMMAND_EXTEND2: {
static uint64_t total_n_extend=0, total_nonearly=0;
total_n_extend++;
if (rh.stream_id) {
log_fn(LOG_PROTOCOL_WARN, domain,
"'extend' cell received for non-zero stream. Dropping.");
return 0;
}
if (cell->command != CELL_RELAY_EARLY &&
!networkstatus_get_param(NULL,"AllowNonearlyExtend",0,0,1)) {
#define EARLY_WARNING_INTERVAL 3600
static ratelim_t early_warning_limit =
RATELIM_INIT(EARLY_WARNING_INTERVAL);
char *m;
if (cell->command == CELL_RELAY) {
++total_nonearly;
if ((m = rate_limit_log(&early_warning_limit, approx_time()))) {
double percentage = ((double)total_nonearly)/total_n_extend;
percentage *= 100;
log_fn(LOG_PROTOCOL_WARN, domain, "EXTEND cell received, "
"but not via RELAY_EARLY. Dropping.%s", m);
log_fn(LOG_PROTOCOL_WARN, domain, " (We have dropped %.02f%% of "
"all EXTEND cells for this reason)", percentage);
tor_free(m);
}
} else {
log_fn(LOG_WARN, domain,
"EXTEND cell received, in a cell with type %d! Dropping.",
cell->command);
}
return 0;
}
return circuit_extend(cell, circ);
}
case RELAY_COMMAND_EXTENDED:
case RELAY_COMMAND_EXTENDED2:
if (!layer_hint) {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"'extended' unsupported at non-origin. Dropping.");
return 0;
}
log_debug(domain,"Got an extended cell! Yay.");
{
extended_cell_t extended_cell;
if (extended_cell_parse(&extended_cell, rh.command,
(const uint8_t*)cell->payload+RELAY_HEADER_SIZE,
rh.length)<0) {
log_warn(LD_PROTOCOL,
"Can't parse EXTENDED cell; killing circuit.");
return -END_CIRC_REASON_TORPROTOCOL;
}
if ((reason = circuit_finish_handshake(TO_ORIGIN_CIRCUIT(circ),
&extended_cell.created_cell)) < 0) {
log_warn(domain,"circuit_finish_handshake failed.");
return reason;
}
}
if ((reason=circuit_send_next_onion_skin(TO_ORIGIN_CIRCUIT(circ)))<0) {
log_info(domain,"circuit_send_next_onion_skin() failed.");
return reason;
}
return 0;
case RELAY_COMMAND_TRUNCATE:
if (layer_hint) {
log_fn(LOG_PROTOCOL_WARN, LD_APP,
"'truncate' unsupported at origin. Dropping.");
return 0;
}
if (circ->n_hop) {
if (circ->n_chan)
log_warn(LD_BUG, "n_chan and n_hop set on the same circuit!");
extend_info_free(circ->n_hop);
circ->n_hop = NULL;
tor_free(circ->n_chan_create_cell);
circuit_set_state(circ, CIRCUIT_STATE_OPEN);
}
if (circ->n_chan) {
uint8_t trunc_reason = get_uint8(cell->payload + RELAY_HEADER_SIZE);
circuit_clear_cell_queue(circ, circ->n_chan);
channel_send_destroy(circ->n_circ_id, circ->n_chan,
trunc_reason);
circuit_set_n_circid_chan(circ, 0, NULL);
}
log_debug(LD_EXIT, "Processed 'truncate', replying.");
{
char payload[1];
payload[0] = (char)END_CIRC_REASON_REQUESTED;
relay_send_command_from_edge(0, circ, RELAY_COMMAND_TRUNCATED,
payload, sizeof(payload), NULL);
}
return 0;
case RELAY_COMMAND_TRUNCATED:
if (!layer_hint) {
log_fn(LOG_PROTOCOL_WARN, LD_EXIT,
"'truncated' unsupported at non-origin. Dropping.");
return 0;
}
circuit_truncated(TO_ORIGIN_CIRCUIT(circ), layer_hint,
get_uint8(cell->payload + RELAY_HEADER_SIZE));
return 0;
case RELAY_COMMAND_CONNECTED:
if (conn) {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"'connected' unsupported while open. Closing circ.");
return -END_CIRC_REASON_TORPROTOCOL;
}
log_info(domain,
"'connected' received, no conn attached anymore. Ignoring.");
return 0;
case RELAY_COMMAND_SENDME:
if (!rh.stream_id) {
if (layer_hint) {
if (layer_hint->package_window + CIRCWINDOW_INCREMENT >
CIRCWINDOW_START_MAX) {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"Unexpected sendme cell from exit relay. "
"Closing circ.");
return -END_CIRC_REASON_TORPROTOCOL;
}
layer_hint->package_window += CIRCWINDOW_INCREMENT;
log_debug(LD_APP,"circ-level sendme at origin, packagewindow %d.",
layer_hint->package_window);
circuit_resume_edge_reading(circ, layer_hint);
} else {
if (circ->package_window + CIRCWINDOW_INCREMENT >
CIRCWINDOW_START_MAX) {
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"Unexpected sendme cell from client. "
"Closing circ (window %d).",
circ->package_window);
return -END_CIRC_REASON_TORPROTOCOL;
}
circ->package_window += CIRCWINDOW_INCREMENT;
log_debug(LD_APP,
"circ-level sendme at non-origin, packagewindow %d.",
circ->package_window);
circuit_resume_edge_reading(circ, layer_hint);
}
return 0;
}
if (!conn) {
log_info(domain,"sendme cell dropped, unknown stream (streamid %d).",
rh.stream_id);
return 0;
}
conn->package_window += STREAMWINDOW_INCREMENT;
log_debug(domain,"stream-level sendme, packagewindow now %d.",
conn->package_window);
if (circuit_queue_streams_are_blocked(circ)) {
/* Still waiting for queue to flush; don't touch conn */
return 0;
}
connection_start_reading(TO_CONN(conn));
/* handle whatever might still be on the inbuf */
if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) {
/* (We already sent an end cell if possible) */
connection_mark_for_close(TO_CONN(conn));
return 0;
}
return 0;
case RELAY_COMMAND_RESOLVE:
if (layer_hint) {
log_fn(LOG_PROTOCOL_WARN, LD_APP,
"resolve request unsupported at AP; dropping.");
return 0;
} else if (conn) {
log_fn(LOG_PROTOCOL_WARN, domain,
"resolve request for known stream; dropping.");
return 0;
} else if (circ->purpose != CIRCUIT_PURPOSE_OR) {
log_fn(LOG_PROTOCOL_WARN, domain,
"resolve request on circ with purpose %d; dropping",
circ->purpose);
return 0;
}
connection_exit_begin_resolve(cell, TO_OR_CIRCUIT(circ));
return 0;
case RELAY_COMMAND_RESOLVED:
if (conn) {
log_fn(LOG_PROTOCOL_WARN, domain,
"'resolved' unsupported while open. Closing circ.");
return -END_CIRC_REASON_TORPROTOCOL;
}
log_info(domain,
"'resolved' received, no conn attached anymore. Ignoring.");
return 0;
case RELAY_COMMAND_ESTABLISH_INTRO:
case RELAY_COMMAND_ESTABLISH_RENDEZVOUS:
case RELAY_COMMAND_INTRODUCE1:
case RELAY_COMMAND_INTRODUCE2:
case RELAY_COMMAND_INTRODUCE_ACK:
case RELAY_COMMAND_RENDEZVOUS1:
case RELAY_COMMAND_RENDEZVOUS2:
case RELAY_COMMAND_INTRO_ESTABLISHED:
case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED:
rend_process_relay_cell(circ, layer_hint,
rh.command, rh.length,
cell->payload+RELAY_HEADER_SIZE);
return 0;
}
log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
"Received unknown relay command %d. Perhaps the other side is using "
"a newer version of Tor? Dropping.",
rh.command);
return 0; /* for forward compatibility, don't kill the circuit */
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-200', 'CWE-617'], 'message': 'TROVE-2017-005: Fix assertion failure in connection_edge_process_relay_cell
On an hidden service rendezvous circuit, a BEGIN_DIR could be sent
(maliciously) which would trigger a tor_assert() because
connection_edge_process_relay_cell() thought that the circuit is an
or_circuit_t but is an origin circuit in reality.
Fixes #22494
Reported-by: Roger Dingledine <[email protected]>
Signed-off-by: David Goulet <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info;
/*
* We only need to do colour alignment if D cache aliases.
*/
if (aliasing)
do_align = filp || (flags & MAP_SHARED);
/*
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
if (aliasing && flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}
if (len > TASK_SIZE)
return -ENOMEM;
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
/*
* Adjust search limits by the desired length.
* See implementation comment at top of unmapped_area().
*/
gap_end = info->high_limit;
if (gap_end < length)
return -ENOMEM;
high_limit = gap_end - length;
if (info->low_limit > high_limit)
return -ENOMEM;
low_limit = info->low_limit + length;
/* Check highest gap, which does not precede any rbtree node */
gap_start = mm->highest_vm_end;
if (gap_start <= high_limit)
goto found_highest;
/* Check if rbtree root looks promising */
if (RB_EMPTY_ROOT(&mm->mm_rb))
return -ENOMEM;
vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
if (vma->rb_subtree_gap < length)
return -ENOMEM;
while (true) {
/* Visit right subtree if it looks promising */
gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
struct vm_area_struct, vm_rb);
if (right->rb_subtree_gap >= length) {
vma = right;
continue;
}
}
check_current:
/* Check if current node has a suitable gap */
gap_end = vma->vm_start;
if (gap_end < low_limit)
return -ENOMEM;
if (gap_start <= high_limit && gap_end - gap_start >= length)
goto found;
/* Visit left subtree if it looks promising */
if (vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb);
if (left->rb_subtree_gap >= length) {
vma = left;
continue;
}
}
/* Go back up the rbtree to find next candidate node */
while (true) {
struct rb_node *prev = &vma->vm_rb;
if (!rb_parent(prev))
return -ENOMEM;
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
gap_start = vma->vm_prev ?
vma->vm_prev->vm_end : 0;
goto check_current;
}
}
}
found:
/* We found a suitable gap. Clip it with the original high_limit. */
if (gap_end > info->high_limit)
gap_end = info->high_limit;
found_highest:
/* Compute highest gap address at the desired alignment */
gap_end -= info->length;
gap_end -= (gap_end - info->align_offset) & info->align_mask;
VM_BUG_ON(gap_end < info->low_limit);
VM_BUG_ON(gap_end < gap_start);
return gap_end;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static inline int stack_guard_page_end(struct vm_area_struct *vma,
unsigned long addr)
{
return (vma->vm_flags & VM_GROWSUP) &&
(vma->vm_end == addr) &&
!vma_growsup(vma->vm_next, addr);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
/*
* We implement the search by looking for an rbtree node that
* immediately follows a suitable gap. That is,
* - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
* - gap_end = vma->vm_start >= info->low_limit + length;
* - gap_end - gap_start >= length
*/
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long length, low_limit, high_limit, gap_start, gap_end;
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
/* Adjust search limits by the desired length */
if (info->high_limit < length)
return -ENOMEM;
high_limit = info->high_limit - length;
if (info->low_limit > high_limit)
return -ENOMEM;
low_limit = info->low_limit + length;
/* Check if rbtree root looks promising */
if (RB_EMPTY_ROOT(&mm->mm_rb))
goto check_highest;
vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
if (vma->rb_subtree_gap < length)
goto check_highest;
while (true) {
/* Visit left subtree if it looks promising */
gap_end = vma->vm_start;
if (gap_end >= low_limit && vma->vm_rb.rb_left) {
struct vm_area_struct *left =
rb_entry(vma->vm_rb.rb_left,
struct vm_area_struct, vm_rb);
if (left->rb_subtree_gap >= length) {
vma = left;
continue;
}
}
gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
check_current:
/* Check if current node has a suitable gap */
if (gap_start > high_limit)
return -ENOMEM;
if (gap_end >= low_limit && gap_end - gap_start >= length)
goto found;
/* Visit right subtree if it looks promising */
if (vma->vm_rb.rb_right) {
struct vm_area_struct *right =
rb_entry(vma->vm_rb.rb_right,
struct vm_area_struct, vm_rb);
if (right->rb_subtree_gap >= length) {
vma = right;
continue;
}
}
/* Go back up the rbtree to find next candidate node */
while (true) {
struct rb_node *prev = &vma->vm_rb;
if (!rb_parent(prev))
goto check_highest;
vma = rb_entry(rb_parent(prev),
struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_left) {
gap_start = vma->vm_prev->vm_end;
gap_end = vma->vm_start;
goto check_current;
}
}
}
check_highest:
/* Check highest gap, which does not precede any rbtree node */
gap_start = mm->highest_vm_end;
gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
if (gap_start > high_limit)
return -ENOMEM;
found:
/* We found a suitable gap. Clip it with the original low_limit. */
if (gap_start < info->low_limit)
gap_start = info->low_limit;
/* Adjust gap address to the desired alignment */
gap_start += (info->align_offset - gap_start) & info->align_mask;
VM_BUG_ON(gap_start + info->length > info->high_limit);
VM_BUG_ON(gap_start + info->length > gap_end);
return gap_start;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
{
return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int do_anonymous_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct mem_cgroup *memcg;
struct page *page;
pte_t entry;
/* File mapping without ->vm_ops ? */
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, vmf->address) < 0)
return VM_FAULT_SIGSEGV;
/*
* Use pte_alloc() instead of pte_alloc_map(). We can't run
* pte_offset_map() on pmds where a huge pmd might be created
* from a different thread.
*
* pte_alloc_map() is safe to use under down_write(mmap_sem) or when
* parallel threads are excluded by other means.
*
* Here we only have down_read(mmap_sem).
*/
if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
return VM_FAULT_OOM;
/* See the comment in pte_alloc_one_map() */
if (unlikely(pmd_trans_unstable(vmf->pmd)))
return 0;
/* Use the zero-page for reads */
if (!(vmf->flags & FAULT_FLAG_WRITE) &&
!mm_forbids_zeropage(vma->vm_mm)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
vma->vm_page_prot));
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (!pte_none(*vmf->pte))
goto unlock;
/* Deliver the page fault to userland, check inside PT lock */
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return handle_userfault(vmf, VM_UFFD_MISSING);
}
goto setpte;
}
/* Allocate our own private page. */
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
if (!page)
goto oom;
if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
goto oom_free_page;
/*
* The memory barrier inside __SetPageUptodate makes sure that
* preceeding stores to the page contents become visible before
* the set_pte_at() write.
*/
__SetPageUptodate(page);
entry = mk_pte(page, vma->vm_page_prot);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (!pte_none(*vmf->pte))
goto release;
/* Deliver the page fault to userland, check inside PT lock */
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
mem_cgroup_cancel_charge(page, memcg, false);
put_page(page);
return handle_userfault(vmf, VM_UFFD_MISSING);
}
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, vmf->address, false);
mem_cgroup_commit_charge(page, memcg, false, false);
lru_cache_add_active_or_unevictable(page, vma);
setpte:
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, vmf->address, vmf->pte);
unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
release:
mem_cgroup_cancel_charge(page, memcg, false);
put_page(page);
goto unlock;
oom_free_page:
put_page(page);
oom:
return VM_FAULT_OOM;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
struct vm_area_struct *expand)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
struct address_space *mapping = NULL;
struct rb_root *root = NULL;
struct anon_vma *anon_vma = NULL;
struct file *file = vma->vm_file;
bool start_changed = false, end_changed = false;
long adjust_next = 0;
int remove_next = 0;
if (next && !insert) {
struct vm_area_struct *exporter = NULL, *importer = NULL;
if (end >= next->vm_end) {
/*
* vma expands, overlapping all the next, and
* perhaps the one after too (mprotect case 6).
* The only other cases that gets here are
* case 1, case 7 and case 8.
*/
if (next == expand) {
/*
* The only case where we don't expand "vma"
* and we expand "next" instead is case 8.
*/
VM_WARN_ON(end != next->vm_end);
/*
* remove_next == 3 means we're
* removing "vma" and that to do so we
* swapped "vma" and "next".
*/
remove_next = 3;
VM_WARN_ON(file != next->vm_file);
swap(vma, next);
} else {
VM_WARN_ON(expand != vma);
/*
* case 1, 6, 7, remove_next == 2 is case 6,
* remove_next == 1 is case 1 or 7.
*/
remove_next = 1 + (end > next->vm_end);
VM_WARN_ON(remove_next == 2 &&
end != next->vm_next->vm_end);
VM_WARN_ON(remove_next == 1 &&
end != next->vm_end);
/* trim end to next, for case 6 first pass */
end = next->vm_end;
}
exporter = next;
importer = vma;
/*
* If next doesn't have anon_vma, import from vma after
* next, if the vma overlaps with it.
*/
if (remove_next == 2 && !next->anon_vma)
exporter = next->vm_next;
} else if (end > next->vm_start) {
/*
* vma expands, overlapping part of the next:
* mprotect case 5 shifting the boundary up.
*/
adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
exporter = next;
importer = vma;
VM_WARN_ON(expand != importer);
} else if (end < vma->vm_end) {
/*
* vma shrinks, and !insert tells it's not
* split_vma inserting another: so it must be
* mprotect case 4 shifting the boundary down.
*/
adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
exporter = vma;
importer = next;
VM_WARN_ON(expand != importer);
}
/*
* Easily overlooked: when mprotect shifts the boundary,
* make sure the expanding vma has anon_vma set if the
* shrinking vma had, to cover any anon pages imported.
*/
if (exporter && exporter->anon_vma && !importer->anon_vma) {
int error;
importer->anon_vma = exporter->anon_vma;
error = anon_vma_clone(importer, exporter);
if (error)
return error;
}
}
again:
vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
if (file) {
mapping = file->f_mapping;
root = &mapping->i_mmap;
uprobe_munmap(vma, vma->vm_start, vma->vm_end);
if (adjust_next)
uprobe_munmap(next, next->vm_start, next->vm_end);
i_mmap_lock_write(mapping);
if (insert) {
/*
* Put into interval tree now, so instantiated pages
* are visible to arm/parisc __flush_dcache_page
* throughout; but we cannot insert into address
* space until vma start or end is updated.
*/
__vma_link_file(insert);
}
}
anon_vma = vma->anon_vma;
if (!anon_vma && adjust_next)
anon_vma = next->anon_vma;
if (anon_vma) {
VM_WARN_ON(adjust_next && next->anon_vma &&
anon_vma != next->anon_vma);
anon_vma_lock_write(anon_vma);
anon_vma_interval_tree_pre_update_vma(vma);
if (adjust_next)
anon_vma_interval_tree_pre_update_vma(next);
}
if (root) {
flush_dcache_mmap_lock(mapping);
vma_interval_tree_remove(vma, root);
if (adjust_next)
vma_interval_tree_remove(next, root);
}
if (start != vma->vm_start) {
vma->vm_start = start;
start_changed = true;
}
if (end != vma->vm_end) {
vma->vm_end = end;
end_changed = true;
}
vma->vm_pgoff = pgoff;
if (adjust_next) {
next->vm_start += adjust_next << PAGE_SHIFT;
next->vm_pgoff += adjust_next;
}
if (root) {
if (adjust_next)
vma_interval_tree_insert(next, root);
vma_interval_tree_insert(vma, root);
flush_dcache_mmap_unlock(mapping);
}
if (remove_next) {
/*
* vma_merge has merged next into vma, and needs
* us to remove next before dropping the locks.
*/
if (remove_next != 3)
__vma_unlink_prev(mm, next, vma);
else
/*
* vma is not before next if they've been
* swapped.
*
* pre-swap() next->vm_start was reduced so
* tell validate_mm_rb to ignore pre-swap()
* "next" (which is stored in post-swap()
* "vma").
*/
__vma_unlink_common(mm, next, NULL, false, vma);
if (file)
__remove_shared_vm_struct(next, file, mapping);
} else if (insert) {
/*
* split_vma has split insert from vma, and needs
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
__insert_vm_struct(mm, insert);
} else {
if (start_changed)
vma_gap_update(vma);
if (end_changed) {
if (!next)
mm->highest_vm_end = end;
else if (!adjust_next)
vma_gap_update(next);
}
}
if (anon_vma) {
anon_vma_interval_tree_post_update_vma(vma);
if (adjust_next)
anon_vma_interval_tree_post_update_vma(next);
anon_vma_unlock_write(anon_vma);
}
if (mapping)
i_mmap_unlock_write(mapping);
if (root) {
uprobe_mmap(vma);
if (adjust_next)
uprobe_mmap(next);
}
if (remove_next) {
if (file) {
uprobe_munmap(next, next->vm_start, next->vm_end);
fput(file);
}
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
mpol_put(vma_policy(next));
kmem_cache_free(vm_area_cachep, next);
/*
* In mprotect's case 6 (see comments on vma_merge),
* we must remove another next too. It would clutter
* up the code too much to do both in one go.
*/
if (remove_next != 3) {
/*
* If "next" was removed and vma->vm_end was
* expanded (up) over it, in turn
* "next->vm_prev->vm_end" changed and the
* "vma->vm_next" gap must be updated.
*/
next = vma->vm_next;
} else {
/*
* For the scope of the comment "next" and
* "vma" considered pre-swap(): if "vma" was
* removed, next->vm_start was expanded (down)
* over it and the "next" gap must be updated.
* Because of the swap() the post-swap() "vma"
* actually points to pre-swap() "next"
* (post-swap() "next" as opposed is now a
* dangling pointer).
*/
next = vma;
}
if (remove_next == 2) {
remove_next = 1;
end = next->vm_end;
goto again;
}
else if (next)
vma_gap_update(next);
else {
/*
* If remove_next == 2 we obviously can't
* reach this path.
*
* If remove_next == 3 we can't reach this
* path because pre-swap() next is always not
* NULL. pre-swap() "next" is not being
* removed and its next->vm_end is not altered
* (and furthermore "end" already matches
* next->vm_end in remove_next == 3).
*
* We reach this only in the remove_next == 1
* case if the "next" vma that was removed was
* the highest vma of the mm. However in such
* case next->vm_end == "end" and the extended
* "vma" has vma->vm_end == next->vm_end so
* mm->highest_vm_end doesn't need any update
* in remove_next == 1 case.
*/
VM_WARN_ON(mm->highest_vm_end != end);
}
}
if (insert && file)
uprobe_mmap(insert);
validate_mm(mm);
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
if (len & ~huge_page_mask(h))
return -EINVAL;
if (len > TASK_SIZE)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
return addr;
}
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
else
return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: radix__arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long addr0,
const unsigned long len,
const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
if (unlikely(addr > mm->context.addr_limit &&
mm->context.addr_limit != TASK_SIZE))
mm->context.addr_limit = TASK_SIZE;
/* requested length too big for entire address space */
if (len > mm->task_size - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
info.align_mask = 0;
if (addr > DEFAULT_MAP_WINDOW)
info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
VM_BUG_ON(addr != -ENOMEM);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE))
mm->context.addr_limit = TASK_SIZE;
if (len & ~huge_page_mask(h))
return -EINVAL;
if (len > mm->task_size)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
return addr;
}
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (mm->task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
/*
* We are always doing an topdown search here. Slice code
* does that too.
*/
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
if (addr > DEFAULT_MAP_WINDOW)
info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
return vm_unmapped_area(&info);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long task_size = TASK_SIZE;
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
if (len > task_size)
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
last_mmap = GET_LAST_MMAP(filp);
if (flags & MAP_FIXED) {
if ((flags & MAP_SHARED) && last_mmap &&
(addr - shared_align_offset(last_mmap, pgoff))
& (SHM_COLOUR - 1))
return -EINVAL;
goto found_addr;
}
if (addr) {
if (do_color_align && last_mmap)
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start))
goto found_addr;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_legacy_base;
info.high_limit = mmap_upper_limit();
info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(last_mmap, pgoff);
addr = vm_unmapped_area(&info);
found_addr:
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
return addr;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
{
address &= PAGE_MASK;
if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
struct vm_area_struct *prev = vma->vm_prev;
/*
* Is there a mapping abutting this one below?
*
* That's only ok if it's the same stack mapping
* that has gotten split..
*/
if (prev && prev->vm_end == address)
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
return expand_downwards(vma, address - PAGE_SIZE);
}
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
struct vm_area_struct *next = vma->vm_next;
/* As VM_GROWSDOWN but s/below/above/ */
if (next && next->vm_start == address + PAGE_SIZE)
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
return expand_upwards(vma, address + PAGE_SIZE);
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
struct rb_node **rb_link, struct rb_node *rb_parent)
{
/* Update tracking information for the gap following the new vma. */
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
mm->highest_vm_end = vma->vm_end;
/*
* vma->vm_prev wasn't known when we followed the rbtree to find the
* correct insertion point for that vma. As a result, we could not
* update the vma vm_rb parents rb_subtree_gap values on the way down.
* So, we first insert the vma with a zero rb_subtree_gap value
* (to be consistent with what we did on the way down), and then
* immediately update the gap to the correct value. Finally we
* rebalance the rbtree after all augmented values have been set.
*/
rb_link_node(&vma->vm_rb, rb_parent, rb_link);
vma->rb_subtree_gap = 0;
vma_gap_update(vma);
vma_rb_insert(vma, &mm->mm_rb);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
unsigned long address, unsigned int *flags, int *nonblocking)
{
unsigned int fault_flags = 0;
int ret;
/* mlock all present pages, but do not fault in new pages */
if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
return -ENOENT;
/* For mm_populate(), just skip the stack guard page. */
if ((*flags & FOLL_POPULATE) &&
(stack_guard_page_start(vma, address) ||
stack_guard_page_end(vma, address + PAGE_SIZE)))
return -ENOENT;
if (*flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (*flags & FOLL_REMOTE)
fault_flags |= FAULT_FLAG_REMOTE;
if (nonblocking)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
if (*flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
if (*flags & FOLL_TRIED) {
VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
fault_flags |= FAULT_FLAG_TRIED;
}
ret = handle_mm_fault(vma, address, fault_flags);
if (ret & VM_FAULT_ERROR) {
int err = vm_fault_to_errno(ret, *flags);
if (err)
return err;
BUG();
}
if (tsk) {
if (ret & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
}
if (ret & VM_FAULT_RETRY) {
if (nonblocking)
*nonblocking = 0;
return -EBUSY;
}
/*
* The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
* necessary, even if maybe_mkwrite decided not to set pte_write. We
* can thus safely do subsequent page lookups as if they were reads.
* But only do so when looping for pte_write is futile: in some cases
* userspace may also be wanting to write to the gotten user page,
* which a read fault here might prevent (a readonly page might get
* reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
*flags |= FOLL_COW;
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
struct vm_area_struct *next;
address &= PAGE_MASK;
next = vma->vm_next;
if (next && next->vm_start == address + PAGE_SIZE) {
if (!(next->vm_flags & VM_GROWSUP))
return -ENOMEM;
}
return expand_upwards(vma, address);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
{
struct mm_struct *mm = vma->vm_mm;
struct file *file = vma->vm_file;
struct proc_maps_private *priv = m->private;
vm_flags_t flags = vma->vm_flags;
unsigned long ino = 0;
unsigned long long pgoff = 0;
unsigned long start, end;
dev_t dev = 0;
const char *name = NULL;
if (file) {
struct inode *inode = file_inode(vma->vm_file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
}
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
if (stack_guard_page_start(vma, start))
start += PAGE_SIZE;
end = vma->vm_end;
if (stack_guard_page_end(vma, end))
end -= PAGE_SIZE;
seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
start,
end,
flags & VM_READ ? 'r' : '-',
flags & VM_WRITE ? 'w' : '-',
flags & VM_EXEC ? 'x' : '-',
flags & VM_MAYSHARE ? 's' : 'p',
pgoff,
MAJOR(dev), MINOR(dev), ino);
/*
* Print the dentry name for named mappings, and a
* special [heap] marker for the heap:
*/
if (file) {
seq_pad(m, ' ');
seq_file_path(m, file, "\n");
goto done;
}
if (vma->vm_ops && vma->vm_ops->name) {
name = vma->vm_ops->name(vma);
if (name)
goto done;
}
name = arch_vma_name(vma);
if (!name) {
if (!mm) {
name = "[vdso]";
goto done;
}
if (vma->vm_start <= mm->brk &&
vma->vm_end >= mm->start_brk) {
name = "[heap]";
goto done;
}
if (is_stack(priv, vma))
name = "[stack]";
}
done:
if (name) {
seq_pad(m, ' ');
seq_puts(m, name);
}
seq_putc(m, '\n');
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
/* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
/* for MAP_32BIT mappings we force the legacy mmap base */
if (!in_compat_syscall() && (flags & MAP_32BIT))
goto bottomup;
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = get_mmap_base(0);
info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
if (filp) {
info.align_mask = get_align_mask();
info.align_offset += get_align_bits();
}
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
VM_BUG_ON(addr != -ENOMEM);
bottomup:
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: SYSCALL_DEFINE1(brk, unsigned long, brk)
{
unsigned long retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
unsigned long min_brk;
bool populate;
LIST_HEAD(uf);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
#ifdef CONFIG_COMPAT_BRK
/*
* CONFIG_COMPAT_BRK can still be overridden by setting
* randomize_va_space to 2, which will still cause mm->start_brk
* to be arbitrarily shifted
*/
if (current->brk_randomized)
min_brk = mm->start_brk;
else
min_brk = mm->end_data;
#else
min_brk = mm->start_brk;
#endif
if (brk < min_brk)
goto out;
/*
* Check against rlimit here. If this check is done later after the test
* of oldbrk with newbrk then it can escape the test and let the data
* segment grow beyond its set limit the in case where the limit is
* not page aligned -Ram Gupta
*/
if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
mm->end_data, mm->start_data))
goto out;
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
if (oldbrk == newbrk)
goto set_brk;
/* Always allow shrinking brk. */
if (brk <= mm->brk) {
if (!do_munmap(mm, newbrk, oldbrk-newbrk, &uf))
goto set_brk;
goto out;
}
/* Check against existing mmap mappings. */
if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
goto out;
/* Ok, looks good - let it rip. */
if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0)
goto out;
set_brk:
mm->brk = brk;
populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
up_write(&mm->mmap_sem);
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(oldbrk, newbrk - oldbrk);
return brk;
out:
retval = mm->brk;
up_write(&mm->mmap_sem);
return retval;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
#ifdef CONFIG_64BIT
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
#endif
/* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
last_mmap = GET_LAST_MMAP(filp);
if (flags & MAP_FIXED) {
if ((flags & MAP_SHARED) && last_mmap &&
(addr - shared_align_offset(last_mmap, pgoff))
& (SHM_COLOUR - 1))
return -EINVAL;
goto found_addr;
}
/* requesting a specific address */
if (addr) {
if (do_color_align && last_mmap)
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
goto found_addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(last_mmap, pgoff);
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
goto found_addr;
VM_BUG_ON(addr != -ENOMEM);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
found_addr:
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
return addr;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
unsigned long task_size = TASK_SIZE;
int do_color_align;
struct vm_unmapped_area_info info;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
}
if (test_thread_flag(TIF_32BIT))
task_size = STACK_TOP32;
if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
if (addr) {
if (do_color_align)
addr = COLOR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
info.flags = 0;
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = min(task_size, VA_EXCLUDE_START);
info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
VM_BUG_ON(addr != -ENOMEM);
info.low_limit = VA_EXCLUDE_END;
info.high_limit = task_size;
addr = vm_unmapped_area(&info);
}
return addr;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
/* Guard against wrapping around to address 0. */
if (address < PAGE_ALIGN(address+4))
address = PAGE_ALIGN(address+4);
else
return -ENOMEM;
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */
if (address > vma->vm_end) {
unsigned long size, grow;
size = address - vma->vm_start;
grow = (address - vma->vm_end) >> PAGE_SHIFT;
error = -ENOMEM;
if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
error = acct_stack_growth(vma, size, grow);
if (!error) {
/*
* vma_gap_update() doesn't support concurrent
* updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against
* concurrent vma expansions.
* anon_vma_lock_write() doesn't help here, as
* we don't guarantee that all growable vmas
* in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard
* against concurrent vma expansions.
*/
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, grow);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
anon_vma_interval_tree_post_update_vma(vma);
if (vma->vm_next)
vma_gap_update(vma->vm_next);
else
mm->highest_vm_end = address;
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
}
}
}
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
return error;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static unsigned long arch_get_unmapped_area_common(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr = addr0;
int do_color_align;
struct vm_unmapped_area_info info;
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
if (TASK_SIZE - len < addr)
return -EINVAL;
/*
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
/* requesting a specific address */
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
info.length = len;
info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
}
info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
return vm_unmapped_area(&info);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info;
/*
* We only need to do colour alignment if either the I or D
* caches alias.
*/
if (aliasing)
do_align = filp || (flags & MAP_SHARED);
/* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (aliasing && flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}
/* requesting a specific address */
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = FIRST_USER_ADDRESS;
info.high_limit = mm->mmap_base;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (addr & ~PAGE_MASK) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
return addr;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': 'mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
(void) ReadBlob(image,compact_size,compact_pixels);
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
}
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-772'], 'message': 'Fixed memory leak reported in #462.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static Image *ReadMIFFImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
#define BZipMaxExtent(x) ((x)+((x)/100)+600)
#define LZMAMaxExtent(x) ((x)+((x)/3)+128)
#define ZipMaxExtent(x) ((x)+(((x)+7) >> 3)+(((x)+63) >> 6)+11)
#if defined(MAGICKCORE_BZLIB_DELEGATE)
bz_stream
bzip_info;
#endif
char
id[MagickPathExtent],
keyword[MagickPathExtent],
*options;
const unsigned char
*p;
double
version;
GeometryInfo
geometry_info;
Image
*image;
int
c;
LinkedListInfo
*profiles;
#if defined(MAGICKCORE_LZMA_DELEGATE)
lzma_stream
initialize_lzma = LZMA_STREAM_INIT,
lzma_info;
lzma_allocator
allocator;
#endif
MagickBooleanType
status;
PixelInfo
pixel;
MagickStatusType
flags;
QuantumFormatType
quantum_format;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register ssize_t
i;
size_t
compress_extent,
length,
packet_size;
ssize_t
count;
unsigned char
*compress_pixels,
*pixels;
size_t
colors;
ssize_t
y;
#if defined(MAGICKCORE_ZLIB_DELEGATE)
z_stream
zip_info;
#endif
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Decode image header; header terminates one character beyond a ':'.
*/
c=ReadBlobByte(image);
if (c == EOF)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
*id='\0';
(void) ResetMagickMemory(keyword,0,sizeof(keyword));
version=0.0;
(void) version;
do
{
/*
Decode image header; header terminates one character beyond a ':'.
*/
length=MagickPathExtent;
options=AcquireString((char *) NULL);
quantum_format=UndefinedQuantumFormat;
profiles=(LinkedListInfo *) NULL;
colors=0;
image->depth=8UL;
image->compression=NoCompression;
while ((isgraph(c) != MagickFalse) && (c != (int) ':'))
{
register char
*p;
if (c == (int) '{')
{
char
*comment;
/*
Read comment-- any text between { }.
*/
length=MagickPathExtent;
comment=AcquireString((char *) NULL);
for (p=comment; comment != (char *) NULL; p++)
{
c=ReadBlobByte(image);
if (c == (int) '\\')
c=ReadBlobByte(image);
else
if ((c == EOF) || (c == (int) '}'))
break;
if ((size_t) (p-comment+1) >= length)
{
*p='\0';
length<<=1;
comment=(char *) ResizeQuantumMemory(comment,length+
MagickPathExtent,sizeof(*comment));
if (comment == (char *) NULL)
break;
p=comment+strlen(comment);
}
*p=(char) c;
}
if (comment == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
*p='\0';
(void) SetImageProperty(image,"comment",comment,exception);
comment=DestroyString(comment);
c=ReadBlobByte(image);
}
else
if (isalnum(c) != MagickFalse)
{
/*
Get the keyword.
*/
length=MagickPathExtent;
p=keyword;
do
{
if (c == (int) '=')
break;
if ((size_t) (p-keyword) < (MagickPathExtent-1))
*p++=(char) c;
c=ReadBlobByte(image);
} while (c != EOF);
*p='\0';
p=options;
while ((isspace((int) ((unsigned char) c)) != 0) && (c != EOF))
c=ReadBlobByte(image);
if (c == (int) '=')
{
/*
Get the keyword value.
*/
c=ReadBlobByte(image);
while ((c != (int) '}') && (c != EOF))
{
if ((size_t) (p-options+1) >= length)
{
*p='\0';
length<<=1;
options=(char *) ResizeQuantumMemory(options,length+
MagickPathExtent,sizeof(*options));
if (options == (char *) NULL)
break;
p=options+strlen(options);
}
*p++=(char) c;
c=ReadBlobByte(image);
if (c == '\\')
{
c=ReadBlobByte(image);
if (c == (int) '}')
{
*p++=(char) c;
c=ReadBlobByte(image);
}
}
if (*options != '{')
if (isspace((int) ((unsigned char) c)) != 0)
break;
}
if (options == (char *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
}
*p='\0';
if (*options == '{')
(void) CopyMagickString(options,options+1,strlen(options));
/*
Assign a value to the specified keyword.
*/
switch (*keyword)
{
case 'a':
case 'A':
{
if (LocaleCompare(keyword,"alpha-trait") == 0)
{
ssize_t
alpha_trait;
alpha_trait=ParseCommandOption(MagickPixelTraitOptions,
MagickFalse,options);
if (alpha_trait < 0)
break;
image->alpha_trait=(PixelTrait) alpha_trait;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'b':
case 'B':
{
if (LocaleCompare(keyword,"background-color") == 0)
{
(void) QueryColorCompliance(options,AllCompliance,
&image->background_color,exception);
break;
}
if (LocaleCompare(keyword,"blue-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=
image->chromaticity.blue_primary.x;
break;
}
if (LocaleCompare(keyword,"border-color") == 0)
{
(void) QueryColorCompliance(options,AllCompliance,
&image->border_color,exception);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'c':
case 'C':
{
if (LocaleCompare(keyword,"class") == 0)
{
ssize_t
storage_class;
storage_class=ParseCommandOption(MagickClassOptions,
MagickFalse,options);
if (storage_class < 0)
break;
image->storage_class=(ClassType) storage_class;
break;
}
if (LocaleCompare(keyword,"colors") == 0)
{
colors=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"colorspace") == 0)
{
ssize_t
colorspace;
colorspace=ParseCommandOption(MagickColorspaceOptions,
MagickFalse,options);
if (colorspace < 0)
break;
image->colorspace=(ColorspaceType) colorspace;
break;
}
if (LocaleCompare(keyword,"compression") == 0)
{
ssize_t
compression;
compression=ParseCommandOption(MagickCompressOptions,
MagickFalse,options);
if (compression < 0)
break;
image->compression=(CompressionType) compression;
break;
}
if (LocaleCompare(keyword,"columns") == 0)
{
image->columns=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'd':
case 'D':
{
if (LocaleCompare(keyword,"delay") == 0)
{
image->delay=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"depth") == 0)
{
image->depth=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"dispose") == 0)
{
ssize_t
dispose;
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
options);
if (dispose < 0)
break;
image->dispose=(DisposeType) dispose;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'e':
case 'E':
{
if (LocaleCompare(keyword,"endian") == 0)
{
ssize_t
endian;
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,
options);
if (endian < 0)
break;
image->endian=(EndianType) endian;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'g':
case 'G':
{
if (LocaleCompare(keyword,"gamma") == 0)
{
image->gamma=StringToDouble(options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"gravity") == 0)
{
ssize_t
gravity;
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,
options);
if (gravity < 0)
break;
image->gravity=(GravityType) gravity;
break;
}
if (LocaleCompare(keyword,"green-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=
image->chromaticity.green_primary.x;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'i':
case 'I':
{
if (LocaleCompare(keyword,"id") == 0)
{
(void) CopyMagickString(id,options,MagickPathExtent);
break;
}
if (LocaleCompare(keyword,"iterations") == 0)
{
image->iterations=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'm':
case 'M':
{
if (LocaleCompare(keyword,"matte") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->alpha_trait=matte == 0 ? UndefinedPixelTrait :
BlendPixelTrait;
break;
}
if (LocaleCompare(keyword,"mattecolor") == 0)
{
(void) QueryColorCompliance(options,AllCompliance,
&image->matte_color,exception);
break;
}
if (LocaleCompare(keyword,"montage") == 0)
{
(void) CloneString(&image->montage,options);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'o':
case 'O':
{
if (LocaleCompare(keyword,"orientation") == 0)
{
ssize_t
orientation;
orientation=ParseCommandOption(MagickOrientationOptions,
MagickFalse,options);
if (orientation < 0)
break;
image->orientation=(OrientationType) orientation;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'p':
case 'P':
{
if (LocaleCompare(keyword,"page") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"pixel-intensity") == 0)
{
ssize_t
intensity;
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,options);
if (intensity < 0)
break;
image->intensity=(PixelIntensityMethod) intensity;
break;
}
if ((LocaleNCompare(keyword,"profile:",8) == 0) ||
(LocaleNCompare(keyword,"profile-",8) == 0))
{
StringInfo
*profile;
if (profiles == (LinkedListInfo *) NULL)
profiles=NewLinkedList(0);
(void) AppendValueToLinkedList(profiles,
AcquireString(keyword+8));
profile=BlobToStringInfo((const void *) NULL,(size_t)
StringToLong(options));
if (profile == (StringInfo *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
(void) SetImageProfile(image,keyword+8,profile,exception);
profile=DestroyStringInfo(profile);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'q':
case 'Q':
{
if (LocaleCompare(keyword,"quality") == 0)
{
image->quality=StringToUnsignedLong(options);
break;
}
if ((LocaleCompare(keyword,"quantum-format") == 0) ||
(LocaleCompare(keyword,"quantum:format") == 0))
{
ssize_t
format;
format=ParseCommandOption(MagickQuantumFormatOptions,
MagickFalse,options);
if (format < 0)
break;
quantum_format=(QuantumFormatType) format;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'r':
case 'R':
{
if (LocaleCompare(keyword,"red-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=
image->chromaticity.red_primary.x;
break;
}
if (LocaleCompare(keyword,"rendering-intent") == 0)
{
ssize_t
rendering_intent;
rendering_intent=ParseCommandOption(MagickIntentOptions,
MagickFalse,options);
if (rendering_intent < 0)
break;
image->rendering_intent=(RenderingIntent) rendering_intent;
break;
}
if (LocaleCompare(keyword,"resolution") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
break;
}
if (LocaleCompare(keyword,"rows") == 0)
{
image->rows=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 's':
case 'S':
{
if (LocaleCompare(keyword,"scene") == 0)
{
image->scene=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 't':
case 'T':
{
if (LocaleCompare(keyword,"ticks-per-second") == 0)
{
image->ticks_per_second=(ssize_t) StringToLong(options);
break;
}
if (LocaleCompare(keyword,"tile-offset") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"type") == 0)
{
ssize_t
type;
type=ParseCommandOption(MagickTypeOptions,MagickFalse,
options);
if (type < 0)
break;
image->type=(ImageType) type;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'u':
case 'U':
{
if (LocaleCompare(keyword,"units") == 0)
{
ssize_t
units;
units=ParseCommandOption(MagickResolutionOptions,
MagickFalse,options);
if (units < 0)
break;
image->units=(ResolutionType) units;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'v':
case 'V':
{
if (LocaleCompare(keyword,"version") == 0)
{
version=StringToDouble(options,(char **) NULL);
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
case 'w':
case 'W':
{
if (LocaleCompare(keyword,"white-point") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=
image->chromaticity.white_point.x;
break;
}
(void) SetImageProperty(image,keyword,options,exception);
break;
}
default:
{
(void) SetImageProperty(image,keyword,options,exception);
break;
}
}
}
else
c=ReadBlobByte(image);
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
}
options=DestroyString(options);
(void) ReadBlobByte(image);
/*
Verify that required image information is defined.
*/
if ((LocaleCompare(id,"ImageMagick") != 0) ||
(image->storage_class == UndefinedClass) ||
(image->compression == UndefinedCompression) ||
(image->colorspace == UndefinedColorspace) ||
(image->columns == 0) || (image->rows == 0))
{
if (image->previous == (Image *) NULL)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"ImproperImageHeader","`%s'",image->filename);
break;
}
if (image->montage != (char *) NULL)
{
register char
*p;
/*
Image directory.
*/
length=MagickPathExtent;
image->directory=AcquireString((char *) NULL);
p=image->directory;
do
{
*p='\0';
if ((strlen(image->directory)+MagickPathExtent) >= length)
{
/*
Allocate more memory for the image directory.
*/
length<<=1;
image->directory=(char *) ResizeQuantumMemory(image->directory,
length+MagickPathExtent,sizeof(*image->directory));
if (image->directory == (char *) NULL)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
p=image->directory+strlen(image->directory);
}
c=ReadBlobByte(image);
*p++=(char) c;
} while (c != (int) '\0');
}
if (profiles != (LinkedListInfo *) NULL)
{
const char
*name;
const StringInfo
*profile;
/*
Read image profiles.
*/
ResetLinkedListIterator(profiles);
name=(const char *) GetNextValueInLinkedList(profiles);
while (name != (const char *) NULL)
{
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
register unsigned char
*p;
p=GetStringInfoDatum(profile);
count=ReadBlob(image,GetStringInfoLength(profile),p);
(void) count;
}
name=(const char *) GetNextValueInLinkedList(profiles);
}
profiles=DestroyLinkedList(profiles,RelinquishMagickMemory);
}
image->depth=GetImageQuantumDepth(image,MagickFalse);
if (image->storage_class == PseudoClass)
{
/*
Create image colormap.
*/
status=AcquireImageColormap(image,colors != 0 ? colors : 256,exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (colors != 0)
{
size_t
packet_size;
unsigned char
*colormap;
/*
Read image colormap from file.
*/
packet_size=(size_t) (3UL*image->depth/8UL);
colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
packet_size*sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,packet_size*image->colors,colormap);
p=colormap;
switch (image->depth)
{
default:
ThrowReaderException(CorruptImageError,
"ImageDepthNotSupported");
case 8:
{
unsigned char
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushCharPixel(p,&pixel);
image->colormap[i].red=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].green=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].blue=ScaleCharToQuantum(pixel);
}
break;
}
case 16:
{
unsigned short
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleShortToQuantum(pixel);
}
break;
}
case 32:
{
unsigned int
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleLongToQuantum(pixel);
}
break;
}
}
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
}
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Allocate image pixels.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (quantum_format != UndefinedQuantumFormat)
{
status=SetQuantumFormat(image,quantum_info,quantum_format);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
packet_size=(size_t) (quantum_info->depth/8);
if (image->storage_class == DirectClass)
packet_size=(size_t) (3*quantum_info->depth/8);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
packet_size=quantum_info->depth/8;
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=quantum_info->depth/8;
if (image->colorspace == CMYKColorspace)
packet_size+=quantum_info->depth/8;
if (image->compression == RLECompression)
packet_size++;
compress_extent=MagickMax(MagickMax(BZipMaxExtent(packet_size*
image->columns),LZMAMaxExtent(packet_size*image->columns)),
ZipMaxExtent(packet_size*image->columns));
compress_pixels=(unsigned char *) AcquireQuantumMemory(compress_extent,
sizeof(*compress_pixels));
if (compress_pixels == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/*
Read image pixels.
*/
quantum_type=RGBQuantum;
if (image->alpha_trait != UndefinedPixelTrait)
quantum_type=RGBAQuantum;
if (image->colorspace == CMYKColorspace)
{
quantum_type=CMYKQuantum;
if (image->alpha_trait != UndefinedPixelTrait)
quantum_type=CMYKAQuantum;
}
if (IsGrayColorspace(image->colorspace) != MagickFalse)
{
quantum_type=GrayQuantum;
if (image->alpha_trait != UndefinedPixelTrait)
quantum_type=GrayAlphaQuantum;
}
if (image->storage_class == PseudoClass)
{
quantum_type=IndexQuantum;
if (image->alpha_trait != UndefinedPixelTrait)
quantum_type=IndexAlphaQuantum;
}
status=MagickTrue;
GetPixelInfo(image,&pixel);
#if defined(MAGICKCORE_BZLIB_DELEGATE)
(void) ResetMagickMemory(&bzip_info,0,sizeof(bzip_info));
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
(void) ResetMagickMemory(&allocator,0,sizeof(allocator));
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
(void) ResetMagickMemory(&zip_info,0,sizeof(zip_info));
#endif
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
int
code;
bzip_info.bzalloc=AcquireBZIPMemory;
bzip_info.bzfree=RelinquishBZIPMemory;
bzip_info.opaque=(void *) NULL;
code=BZ2_bzDecompressInit(&bzip_info,(int) image_info->verbose,
MagickFalse);
if (code != BZ_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
int
code;
allocator.alloc=AcquireLZMAMemory;
allocator.free=RelinquishLZMAMemory;
lzma_info=initialize_lzma;
lzma_info.allocator=(&allocator);
code=lzma_auto_decoder(&lzma_info,-1,0);
if (code != LZMA_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
int
code;
zip_info.zalloc=AcquireZIPMemory;
zip_info.zfree=RelinquishZIPMemory;
zip_info.opaque=(voidpf) NULL;
code=inflateInit(&zip_info);
if (code != Z_OK)
status=MagickFalse;
break;
}
#endif
case RLECompression:
break;
default:
break;
}
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
break;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
bzip_info.next_out=(char *) pixels;
bzip_info.avail_out=(unsigned int) (packet_size*image->columns);
do
{
int
code;
if (bzip_info.avail_in == 0)
{
bzip_info.next_in=(char *) compress_pixels;
length=(size_t) BZipMaxExtent(packet_size*image->columns);
if (version != 0.0)
length=(size_t) ReadBlobMSBLong(image);
if (length > compress_extent)
{
(void) BZ2_bzDecompressEnd(&bzip_info);
ThrowReaderException(CorruptImageError,
"UnableToReadImageData");
}
bzip_info.avail_in=(unsigned int) ReadBlob(image,length,
(unsigned char *) bzip_info.next_in);
}
code=BZ2_bzDecompress(&bzip_info);
if (code < 0)
{
status=MagickFalse;
break;
}
if (code == BZ_STREAM_END)
break;
} while (bzip_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
lzma_info.next_out=pixels;
lzma_info.avail_out=packet_size*image->columns;
do
{
int
code;
if (lzma_info.avail_in == 0)
{
lzma_info.next_in=compress_pixels;
length=(size_t) ReadBlobMSBLong(image);
if (length > compress_extent)
{
lzma_end(&lzma_info);
ThrowReaderException(CorruptImageError,
"UnableToReadImageData");
}
lzma_info.avail_in=(unsigned int) ReadBlob(image,length,
(unsigned char *) lzma_info.next_in);
}
code=lzma_code(&lzma_info,LZMA_RUN);
if (code < 0)
{
status=MagickFalse;
break;
}
if (code == LZMA_STREAM_END)
break;
} while (lzma_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
zip_info.next_out=pixels;
zip_info.avail_out=(uInt) (packet_size*image->columns);
do
{
int
code;
if (zip_info.avail_in == 0)
{
zip_info.next_in=compress_pixels;
length=(size_t) ZipMaxExtent(packet_size*image->columns);
if (version != 0.0)
length=(size_t) ReadBlobMSBLong(image);
if (length > compress_extent)
{
(void) inflateEnd(&zip_info);
ThrowReaderException(CorruptImageError,
"UnableToReadImageData");
}
zip_info.avail_in=(unsigned int) ReadBlob(image,length,
zip_info.next_in);
}
code=inflate(&zip_info,Z_SYNC_FLUSH);
if (code < 0)
{
status=MagickFalse;
break;
}
if (code == Z_STREAM_END)
break;
} while (zip_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
case RLECompression:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
if (length == 0)
{
count=ReadBlob(image,packet_size,pixels);
PushRunlengthPacket(image,pixels,&length,&pixel,exception);
}
length--;
if (image->storage_class == PseudoClass)
SetPixelIndex(image,ClampToQuantum(pixel.index),q);
else
{
SetPixelRed(image,ClampToQuantum(pixel.red),q);
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
}
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
break;
}
default:
{
count=ReadBlob(image,packet_size*image->columns,pixels);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
SetQuantumImageType(image,quantum_type);
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
int
code;
if (version == 0.0)
{
MagickOffsetType
offset;
offset=SeekBlob(image,-((MagickOffsetType)
bzip_info.avail_in),SEEK_CUR);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
code=BZ2_bzDecompressEnd(&bzip_info);
if (code != BZ_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
int
code;
code=lzma_code(&lzma_info,LZMA_FINISH);
if ((code != LZMA_STREAM_END) && (code != LZMA_OK))
status=MagickFalse;
lzma_end(&lzma_info);
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
int
code;
if (version == 0.0)
{
MagickOffsetType
offset;
offset=SeekBlob(image,-((MagickOffsetType) zip_info.avail_in),
SEEK_CUR);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
code=inflateEnd(&zip_info);
if (code != LZMA_OK)
status=MagickFalse;
break;
}
#endif
default:
break;
}
quantum_info=DestroyQuantumInfo(quantum_info);
compress_pixels=(unsigned char *) RelinquishMagickMemory(compress_pixels);
if (((y != (ssize_t) image->rows)) || (status == MagickFalse))
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
do
{
c=ReadBlobByte(image);
} while ((isgraph(c) == MagickFalse) && (c != EOF));
if (c != EOF)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (c != EOF);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-617'], 'message': 'https://github.com/ImageMagick/ImageMagick/issues/500'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
struct vmw_surface *srf;
struct vmw_resource *res;
struct vmw_resource *tmp;
union drm_vmw_gb_surface_create_arg *arg =
(union drm_vmw_gb_surface_create_arg *)data;
struct drm_vmw_gb_surface_create_req *req = &arg->req;
struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint32_t size;
uint32_t backup_handle;
if (req->multisample_count != 0)
return -EINVAL;
if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
return -EINVAL;
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
size = vmw_user_surface_size + 128;
/* Define a surface based on the parameters. */
ret = vmw_surface_gb_priv_define(dev,
size,
req->svga3d_flags,
req->format,
req->drm_surface_flags & drm_vmw_surface_flag_scanout,
req->mip_levels,
req->multisample_count,
req->array_size,
req->base_size,
&srf);
if (unlikely(ret != 0))
return ret;
user_srf = container_of(srf, struct vmw_user_surface, srf);
if (drm_is_primary_client(file_priv))
user_srf->master = drm_master_get(file_priv->master);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
res = &user_srf->srf.res;
if (req->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup,
&user_srf->backup_base);
if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
res->backup_size) {
DRM_ERROR("Surface backup buffer is too small.\n");
vmw_dmabuf_unreference(&res->backup);
ret = -EINVAL;
goto out_unlock;
}
} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
res->backup_size,
req->drm_surface_flags &
drm_vmw_surface_flag_shareable,
&backup_handle,
&res->backup,
&user_srf->backup_base);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->drm_surface_flags &
drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
&vmw_user_surface_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
vmw_resource_unreference(&res);
goto out_unlock;
}
rep->handle = user_srf->prime.base.hash.key;
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->backup->base.vma_node);
rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
rep->buffer_handle = backup_handle;
} else {
rep->buffer_map_handle = 0;
rep->buffer_size = 0;
rep->buffer_handle = SVGA3D_INVALID_ID;
}
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-200'], 'message': 'drm/vmwgfx: Make sure backup_handle is always valid
When vmw_gb_surface_define_ioctl() is called with an existing buffer,
we end up returning an uninitialized variable in the backup_handle.
The fix is to first initialize backup_handle to 0 just to be sure, and
second, when a user-provided buffer is found, we will use the
req->buffer_handle as the backup_handle.
Cc: <[email protected]>
Reported-by: Murray McAllister <[email protected]>
Signed-off-by: Sinclair Yeh <[email protected]>
Reviewed-by: Deepak Rawat <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: TProtocol(boost::shared_ptr<TTransport> ptrans)
: ptrans_(ptrans), recursion_depth_(0), recursion_limit_(DEFAULT_RECURSION_LIMIT) {} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'THRIFT-3231 CPP: Limit recursion depth to 64
Client: cpp
Patch: Ben Craig <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: virtual uint32_t skip_virt(TType type) { return ::apache::thrift::protocol::skip(*this, type); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'THRIFT-3231 CPP: Limit recursion depth to 64
Client: cpp
Patch: Ben Craig <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: virtual ~TProtocolFactory() {} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'THRIFT-3231 CPP: Limit recursion depth to 64
Client: cpp
Patch: Ben Craig <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: void t_cpp_generator::generate_struct_reader(ofstream& out, t_struct* tstruct, bool pointers) {
if (gen_templates_) {
out << indent() << "template <class Protocol_>" << endl << indent() << "uint32_t "
<< tstruct->get_name() << "::read(Protocol_* iprot) {" << endl;
} else {
indent(out) << "uint32_t " << tstruct->get_name()
<< "::read(::apache::thrift::protocol::TProtocol* iprot) {" << endl;
}
indent_up();
const vector<t_field*>& fields = tstruct->get_members();
vector<t_field*>::const_iterator f_iter;
// Declare stack tmp variables
out << endl << indent() << "uint32_t xfer = 0;" << endl << indent() << "std::string fname;"
<< endl << indent() << "::apache::thrift::protocol::TType ftype;" << endl << indent()
<< "int16_t fid;" << endl << endl << indent() << "xfer += iprot->readStructBegin(fname);"
<< endl << endl << indent() << "using ::apache::thrift::protocol::TProtocolException;" << endl
<< endl;
// Required variables aren't in __isset, so we need tmp vars to check them.
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if ((*f_iter)->get_req() == t_field::T_REQUIRED)
indent(out) << "bool isset_" << (*f_iter)->get_name() << " = false;" << endl;
}
out << endl;
// Loop over reading in fields
indent(out) << "while (true)" << endl;
scope_up(out);
// Read beginning field marker
indent(out) << "xfer += iprot->readFieldBegin(fname, ftype, fid);" << endl;
// Check for field STOP marker
out << indent() << "if (ftype == ::apache::thrift::protocol::T_STOP) {" << endl << indent()
<< " break;" << endl << indent() << "}" << endl;
if (fields.empty()) {
out << indent() << "xfer += iprot->skip(ftype);" << endl;
} else {
// Switch statement on the field we are reading
indent(out) << "switch (fid)" << endl;
scope_up(out);
// Generate deserialization code for known cases
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
indent(out) << "case " << (*f_iter)->get_key() << ":" << endl;
indent_up();
indent(out) << "if (ftype == " << type_to_enum((*f_iter)->get_type()) << ") {" << endl;
indent_up();
const char* isset_prefix = ((*f_iter)->get_req() != t_field::T_REQUIRED) ? "this->__isset."
: "isset_";
#if 0
// This code throws an exception if the same field is encountered twice.
// We've decided to leave it out for performance reasons.
// TODO(dreiss): Generate this code and "if" it out to make it easier
// for people recompiling thrift to include it.
out <<
indent() << "if (" << isset_prefix << (*f_iter)->get_name() << ")" << endl <<
indent() << " throw TProtocolException(TProtocolException::INVALID_DATA);" << endl;
#endif
if (pointers && !(*f_iter)->get_type()->is_xception()) {
generate_deserialize_field(out, *f_iter, "(*(this->", "))");
} else {
generate_deserialize_field(out, *f_iter, "this->");
}
out << indent() << isset_prefix << (*f_iter)->get_name() << " = true;" << endl;
indent_down();
out << indent() << "} else {" << endl << indent() << " xfer += iprot->skip(ftype);" << endl
<<
// TODO(dreiss): Make this an option when thrift structs
// have a common base class.
// indent() << " throw TProtocolException(TProtocolException::INVALID_DATA);" << endl <<
indent() << "}" << endl << indent() << "break;" << endl;
indent_down();
}
// In the default case we skip the field
out << indent() << "default:" << endl << indent() << " xfer += iprot->skip(ftype);" << endl
<< indent() << " break;" << endl;
scope_down(out);
} //!fields.empty()
// Read field end marker
indent(out) << "xfer += iprot->readFieldEnd();" << endl;
scope_down(out);
out << endl << indent() << "xfer += iprot->readStructEnd();" << endl;
// Throw if any required fields are missing.
// We do this after reading the struct end so that
// there might possibly be a chance of continuing.
out << endl;
for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) {
if ((*f_iter)->get_req() == t_field::T_REQUIRED)
out << indent() << "if (!isset_" << (*f_iter)->get_name() << ')' << endl << indent()
<< " throw TProtocolException(TProtocolException::INVALID_DATA);" << endl;
}
indent(out) << "return xfer;" << endl;
indent_down();
indent(out) << "}" << endl << endl;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20'], 'message': 'THRIFT-3231 CPP: Limit recursion depth to 64
Client: cpp
Patch: Ben Craig <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: ares_parse_naptr_reply (const unsigned char *abuf, int alen,
struct ares_naptr_reply **naptr_out)
{
unsigned int qdcount, ancount, i;
const unsigned char *aptr, *vptr;
int status, rr_type, rr_class, rr_len;
long len;
char *hostname = NULL, *rr_name = NULL;
struct ares_naptr_reply *naptr_head = NULL;
struct ares_naptr_reply *naptr_last = NULL;
struct ares_naptr_reply *naptr_curr;
/* Set *naptr_out to NULL for all failure cases. */
*naptr_out = NULL;
/* Give up if abuf doesn't have room for a header. */
if (alen < HFIXEDSZ)
return ARES_EBADRESP;
/* Fetch the question and answer count from the header. */
qdcount = DNS_HEADER_QDCOUNT (abuf);
ancount = DNS_HEADER_ANCOUNT (abuf);
if (qdcount != 1)
return ARES_EBADRESP;
if (ancount == 0)
return ARES_ENODATA;
/* Expand the name from the question, and skip past the question. */
aptr = abuf + HFIXEDSZ;
status = ares_expand_name (aptr, abuf, alen, &hostname, &len);
if (status != ARES_SUCCESS)
return status;
if (aptr + len + QFIXEDSZ > abuf + alen)
{
ares_free (hostname);
return ARES_EBADRESP;
}
aptr += len + QFIXEDSZ;
/* Examine each answer resource record (RR) in turn. */
for (i = 0; i < ancount; i++)
{
/* Decode the RR up to the data field. */
status = ares_expand_name (aptr, abuf, alen, &rr_name, &len);
if (status != ARES_SUCCESS)
{
break;
}
aptr += len;
if (aptr + RRFIXEDSZ > abuf + alen)
{
status = ARES_EBADRESP;
break;
}
rr_type = DNS_RR_TYPE (aptr);
rr_class = DNS_RR_CLASS (aptr);
rr_len = DNS_RR_LEN (aptr);
aptr += RRFIXEDSZ;
if (aptr + rr_len > abuf + alen)
{
status = ARES_EBADRESP;
break;
}
/* RR must contain at least 7 bytes = 2 x int16 + 3 x name */
if (rr_len < 7)
{
status = ARES_EBADRESP;
break;
}
/* Check if we are really looking at a NAPTR record */
if (rr_class == C_IN && rr_type == T_NAPTR)
{
/* parse the NAPTR record itself */
/* Allocate storage for this NAPTR answer appending it to the list */
naptr_curr = ares_malloc_data(ARES_DATATYPE_NAPTR_REPLY);
if (!naptr_curr)
{
status = ARES_ENOMEM;
break;
}
if (naptr_last)
{
naptr_last->next = naptr_curr;
}
else
{
naptr_head = naptr_curr;
}
naptr_last = naptr_curr;
vptr = aptr;
naptr_curr->order = DNS__16BIT(vptr);
vptr += sizeof(unsigned short);
naptr_curr->preference = DNS__16BIT(vptr);
vptr += sizeof(unsigned short);
status = ares_expand_string(vptr, abuf, alen, &naptr_curr->flags, &len);
if (status != ARES_SUCCESS)
break;
vptr += len;
status = ares_expand_string(vptr, abuf, alen, &naptr_curr->service, &len);
if (status != ARES_SUCCESS)
break;
vptr += len;
status = ares_expand_string(vptr, abuf, alen, &naptr_curr->regexp, &len);
if (status != ARES_SUCCESS)
break;
vptr += len;
status = ares_expand_name(vptr, abuf, alen, &naptr_curr->replacement, &len);
if (status != ARES_SUCCESS)
break;
}
/* Don't lose memory in the next iteration */
ares_free (rr_name);
rr_name = NULL;
/* Move on to the next record */
aptr += rr_len;
}
if (hostname)
ares_free (hostname);
if (rr_name)
ares_free (rr_name);
/* clean up on error */
if (status != ARES_SUCCESS)
{
if (naptr_head)
ares_free_data (naptr_head);
return status;
}
/* everything looks fine, return the data */
*naptr_out = naptr_head;
return ARES_SUCCESS;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-200'], 'message': 'ares_parse_naptr_reply: make buffer length check more accurate
9478908a490a6bf009ba58d81de8c1d06d50a117 introduced a length check
for records parsed by `ares_parse_naptr_reply()`. However, that
function is designed to parse replies which also contain non-NAPTR
records; for A records, the `rr_len > 7` check will fail as there
are only 4 bytes of payload.
In particular, parsing ANY replies for NAPTR records was broken
by that patch.
Fix that by moving the check into the case in which it is already
known that the record is a NAPTR record.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
}
__err:
return err;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-200'], 'message': 'ALSA: timer: Fix missing queue indices reset at SNDRV_TIMER_IOCTL_SELECT
snd_timer_user_tselect() reallocates the queue buffer dynamically, but
it forgot to reset its indices. Since the read may happen
concurrently with ioctl and snd_timer_user_tselect() allocates the
buffer via kmalloc(), this may lead to the leak of uninitialized
kernel-space data, as spotted via KMSAN:
BUG: KMSAN: use of unitialized memory in snd_timer_user_read+0x6c4/0xa10
CPU: 0 PID: 1037 Comm: probe Not tainted 4.11.0-rc5+ #2739
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:16
dump_stack+0x143/0x1b0 lib/dump_stack.c:52
kmsan_report+0x12a/0x180 mm/kmsan/kmsan.c:1007
kmsan_check_memory+0xc2/0x140 mm/kmsan/kmsan.c:1086
copy_to_user ./arch/x86/include/asm/uaccess.h:725
snd_timer_user_read+0x6c4/0xa10 sound/core/timer.c:2004
do_loop_readv_writev fs/read_write.c:716
__do_readv_writev+0x94c/0x1380 fs/read_write.c:864
do_readv_writev fs/read_write.c:894
vfs_readv fs/read_write.c:908
do_readv+0x52a/0x5d0 fs/read_write.c:934
SYSC_readv+0xb6/0xd0 fs/read_write.c:1021
SyS_readv+0x87/0xb0 fs/read_write.c:1018
This patch adds the missing reset of queue indices. Together with the
previous fix for the ioctl/read race, we cover the whole problem.
Reported-by: Alexander Potapenko <[email protected]>
Tested-by: Alexander Potapenko <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock)
{
struct grub_ext2_data *data = node->data;
struct grub_ext2_inode *inode = &node->inode;
grub_disk_addr_t blknr = -1;
unsigned int blksz = EXT2_BLOCK_SIZE (data);
int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data);
if (inode->flags & grub_cpu_to_le32_compile_time (EXT4_EXTENTS_FLAG))
{
GRUB_PROPERLY_ALIGNED_ARRAY (buf, EXT2_BLOCK_SIZE(data));
struct grub_ext4_extent_header *leaf;
struct grub_ext4_extent *ext;
int i;
leaf = grub_ext4_find_leaf (data, buf,
(struct grub_ext4_extent_header *) inode->blocks.dir_blocks,
fileblock);
if (! leaf)
{
grub_error (GRUB_ERR_BAD_FS, "invalid extent");
return -1;
}
ext = (struct grub_ext4_extent *) (leaf + 1);
for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++)
{
if (fileblock < grub_le_to_cpu32 (ext[i].block))
break;
}
if (--i >= 0)
{
fileblock -= grub_le_to_cpu32 (ext[i].block);
if (fileblock >= grub_le_to_cpu16 (ext[i].len))
return 0;
else
{
grub_disk_addr_t start;
start = grub_le_to_cpu16 (ext[i].start_hi);
start = (start << 32) + grub_le_to_cpu32 (ext[i].start);
return fileblock + start;
}
}
else
{
grub_error (GRUB_ERR_BAD_FS, "something wrong with extent");
return -1;
}
}
/* Direct blocks. */
if (fileblock < INDIRECT_BLOCKS)
blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]);
/* Indirect. */
else if (fileblock < INDIRECT_BLOCKS + blksz / 4)
{
grub_uint32_t indir[blksz / 4];
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (inode->blocks.indir_block))
<< log2_blksz,
0, blksz, indir))
return grub_errno;
blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]);
}
/* Double indirect. */
else if (fileblock < INDIRECT_BLOCKS
+ blksz / 4 * ((grub_disk_addr_t) blksz / 4 + 1))
{
int log_perblock = log2_blksz + 9 - 2;
grub_disk_addr_t rblock = fileblock - (INDIRECT_BLOCKS
+ blksz / 4);
grub_uint32_t indir[blksz / 4];
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (inode->blocks.double_indir_block))
<< log2_blksz,
0, blksz, indir))
return grub_errno;
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (indir[rblock >> log_perblock]))
<< log2_blksz,
0, blksz, indir))
return grub_errno;
blknr = grub_le_to_cpu32 (indir[rblock & ((1 << log_perblock) - 1)]);
}
/* triple indirect. */
else if (fileblock < INDIRECT_BLOCKS + blksz / 4 * ((grub_disk_addr_t) blksz / 4 + 1)
+ ((grub_disk_addr_t) blksz / 4) * ((grub_disk_addr_t) blksz / 4)
* ((grub_disk_addr_t) blksz / 4 + 1))
{
int log_perblock = log2_blksz + 9 - 2;
grub_disk_addr_t rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4
* (blksz / 4 + 1));
grub_uint32_t indir[blksz / 4];
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (inode->blocks.triple_indir_block))
<< log2_blksz,
0, blksz, indir))
return grub_errno;
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (indir[(rblock >> log_perblock) >> log_perblock]))
<< log2_blksz,
0, blksz, indir))
return grub_errno;
if (grub_disk_read (data->disk,
((grub_disk_addr_t)
grub_le_to_cpu32 (indir[(rblock >> log_perblock) & ((1 << log_perblock) - 1)]))
<< log2_blksz,
0, blksz, indir))
return grub_errno;
blknr = grub_le_to_cpu32 (indir[rblock & ((1 << log_perblock) - 1)]);
}
else
{
grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET,
"ext2fs doesn't support quadruple indirect blocks");
}
return blknr;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': '* grub-core/fs/ext2.c: Remove variable length arrays.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: grub_ext4_find_leaf (struct grub_ext2_data *data, grub_properly_aligned_t *buf,
struct grub_ext4_extent_header *ext_block,
grub_uint32_t fileblock)
{
struct grub_ext4_extent_idx *index;
while (1)
{
int i;
grub_disk_addr_t block;
index = (struct grub_ext4_extent_idx *) (ext_block + 1);
if (grub_le_to_cpu16(ext_block->magic) != EXT4_EXT_MAGIC)
return 0;
if (ext_block->depth == 0)
return ext_block;
for (i = 0; i < grub_le_to_cpu16 (ext_block->entries); i++)
{
if (fileblock < grub_le_to_cpu32(index[i].block))
break;
}
if (--i < 0)
return 0;
block = grub_le_to_cpu16 (index[i].leaf_hi);
block = (block << 32) | grub_le_to_cpu32 (index[i].leaf);
if (grub_disk_read (data->disk,
block << LOG2_EXT2_BLOCK_SIZE (data),
0, EXT2_BLOCK_SIZE(data), buf))
return 0;
ext_block = (struct grub_ext4_extent_header *) buf;
}
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-119'], 'message': '* grub-core/fs/ext2.c: Remove variable length arrays.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: ptp_unpack_OPL (PTPParams *params, unsigned char* data, MTPProperties **pprops, unsigned int len)
{
uint32_t prop_count = dtoh32a(data);
MTPProperties *props = NULL;
unsigned int offset = 0, i;
if (prop_count == 0) {
*pprops = NULL;
return 0;
}
ptp_debug (params ,"Unpacking MTP OPL, size %d (prop_count %d)", len, prop_count);
data += sizeof(uint32_t);
len -= sizeof(uint32_t);
props = malloc(prop_count * sizeof(MTPProperties));
if (!props) return 0;
for (i = 0; i < prop_count; i++) {
if (len <= 0) {
ptp_debug (params ,"short MTP Object Property List at property %d (of %d)", i, prop_count);
ptp_debug (params ,"device probably needs DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST_ALL", i);
ptp_debug (params ,"or even DEVICE_FLAG_BROKEN_MTPGETOBJPROPLIST", i);
qsort (props, i, sizeof(MTPProperties),_compare_func);
*pprops = props;
return i;
}
props[i].ObjectHandle = dtoh32a(data);
data += sizeof(uint32_t);
len -= sizeof(uint32_t);
props[i].property = dtoh16a(data);
data += sizeof(uint16_t);
len -= sizeof(uint16_t);
props[i].datatype = dtoh16a(data);
data += sizeof(uint16_t);
len -= sizeof(uint16_t);
offset = 0;
ptp_unpack_DPV(params, data, &offset, len, &props[i].propval, props[i].datatype);
data += offset;
len -= offset;
}
qsort (props, prop_count, sizeof(MTPProperties),_compare_func);
*pprops = props;
return prop_count;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-190'], 'message': 'check for an integer overflow in ptp_unpack_OPL'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: static TPM_RC StartAuthSession(TSS2_SYS_CONTEXT *sapi_context, SESSION *session )
{
TPM_RC rval;
TPM2B_ENCRYPTED_SECRET key;
char label[] = "ATH";
UINT16 bytes;
int i;
key.t.size = 0;
if( session->nonceOlder.t.size == 0 )
{
/* this is an internal routine to TSS and should be removed */
session->nonceOlder.t.size = GetDigestSize( TPM_ALG_SHA1 );
for( i = 0; i < session->nonceOlder.t.size; i++ )
session->nonceOlder.t.buffer[i] = 0;
}
session->nonceNewer.t.size = session->nonceOlder.t.size;
rval = Tss2_Sys_StartAuthSession( sapi_context, session->tpmKey, session->bind, 0,
&( session->nonceOlder ), &( session->encryptedSalt ), session->sessionType,
&( session->symmetric ), session->authHash, &( session->sessionHandle ),
&( session->nonceNewer ), 0 );
if( rval == TPM_RC_SUCCESS )
{
if( session->tpmKey == TPM_RH_NULL )
session->salt.t.size = 0;
if( session->bind == TPM_RH_NULL )
session->authValueBind.t.size = 0;
if( session->tpmKey == TPM_RH_NULL && session->bind == TPM_RH_NULL )
{
session->sessionKey.b.size = 0;
}
else
{
// Generate the key used as input to the KDF.
// Generate the key used as input to the KDF.
bool result = string_bytes_concat_buffer( (TPM2B_MAX_BUFFER *)&key, &( session->authValueBind.b ) );
if (!result)
{
return TSS2_SYS_RC_BAD_VALUE;
}
result = string_bytes_concat_buffer( (TPM2B_MAX_BUFFER *)&key, &( session->salt.b ) );
if (!result)
{
return TSS2_SYS_RC_BAD_VALUE;
}
bytes = GetDigestSize( session->authHash );
if( key.t.size == 0 )
{
session->sessionKey.t.size = 0;
}
else
{
rval = tpm_kdfa(sapi_context, session->authHash, &(key.b), label, &( session->nonceNewer.b ),
&( session->nonceOlder.b ), bytes * 8, (TPM2B_MAX_BUFFER *)&( session->sessionKey ) );
}
if( rval != TPM_RC_SUCCESS )
{
return( TSS2_APP_RC_CREATE_SESSION_KEY_FAILED );
}
}
session->nonceTpmDecrypt.b.size = 0;
session->nonceTpmEncrypt.b.size = 0;
session->nvNameChanged = 0;
}
return rval;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-522', 'CWE-200'], 'message': 'kdfa: use openssl for hmac not tpm
While not reachable in the current code base tools, a potential
security bug lurked in tpm_kdfa().
If using that routine for an hmac authorization, the hmac was
calculated using the tpm. A user of an object wishing to
authenticate via hmac, would expect that the password is never
sent to the tpm. However, since the hmac calculation relies on
password, and is performed by the tpm, the password ends up
being sent in plain text to the tpm.
The fix is to use openssl to generate the hmac on the host.
Fixes: CVE-2017-7524
Signed-off-by: William Roberts <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: secret_core_crt (gcry_mpi_t M, gcry_mpi_t C,
gcry_mpi_t D, unsigned int Nlimbs,
gcry_mpi_t P, gcry_mpi_t Q, gcry_mpi_t U)
{
gcry_mpi_t m1 = mpi_alloc_secure ( Nlimbs + 1 );
gcry_mpi_t m2 = mpi_alloc_secure ( Nlimbs + 1 );
gcry_mpi_t h = mpi_alloc_secure ( Nlimbs + 1 );
/* m1 = c ^ (d mod (p-1)) mod p */
mpi_sub_ui ( h, P, 1 );
mpi_fdiv_r ( h, D, h );
mpi_powm ( m1, C, h, P );
/* m2 = c ^ (d mod (q-1)) mod q */
mpi_sub_ui ( h, Q, 1 );
mpi_fdiv_r ( h, D, h );
mpi_powm ( m2, C, h, Q );
/* h = u * ( m2 - m1 ) mod q */
mpi_sub ( h, m2, m1 );
if ( mpi_has_sign ( h ) )
mpi_add ( h, h, Q );
mpi_mulm ( h, U, h, Q );
/* m = m1 + h * p */
mpi_mul ( h, h, P );
mpi_add ( M, m1, h );
mpi_free ( h );
mpi_free ( m1 );
mpi_free ( m2 );
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-310'], 'message': 'rsa: Add exponent blinding.
* cipher/rsa.c (secret_core_crt): Blind secret D with randomized
nonce R for mpi_powm computation.
--
Co-authored-by: Werner Koch <[email protected]>
Signed-off-by: NIIBE Yutaka <[email protected]>
The paper describing attack: https://eprint.iacr.org/2017/627
Sliding right into disaster: Left-to-right sliding windows leak
by Daniel J. Bernstein and Joachim Breitner and Daniel Genkin and
Leon Groot Bruinderink and Nadia Heninger and Tanja Lange and
Christine van Vredendaal and Yuval Yarom
It is well known that constant-time implementations of modular
exponentiation cannot use sliding windows. However, software
libraries such as Libgcrypt, used by GnuPG, continue to use sliding
windows. It is widely believed that, even if the complete pattern of
squarings and multiplications is observed through a side-channel
attack, the number of exponent bits leaked is not sufficient to
carry out a full key-recovery attack against RSA. Specifically,
4-bit sliding windows leak only 40% of the bits, and 5-bit sliding
windows leak only 33% of the bits.
In this paper we demonstrate a complete break of RSA-1024 as
implemented in Libgcrypt. Our attack makes essential use of the fact
that Libgcrypt uses the left-to-right method for computing the
sliding-window expansion. We show for the first time that the
direction of the encoding matters: the pattern of squarings and
multiplications in left-to-right sliding windows leaks significantly
more information about exponent bits than for right-to-left. We show
how to incorporate this additional information into the
Heninger-Shacham algorithm for partial key reconstruction, and use
it to obtain very efficient full key recovery for RSA-1024. We also
provide strong evidence that the same attack works for RSA-2048 with
only moderately more computation.
Exponent blinding is a kind of workaround to add noise. Signal (leak)
is still there for non-constant-time implementation.'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: _gcry_mpi_powm (gcry_mpi_t res,
gcry_mpi_t base, gcry_mpi_t expo, gcry_mpi_t mod)
{
/* Pointer to the limbs of the arguments, their size and signs. */
mpi_ptr_t rp, ep, mp, bp;
mpi_size_t esize, msize, bsize, rsize;
int msign, bsign, rsign;
/* Flags telling the secure allocation status of the arguments. */
int esec, msec, bsec;
/* Size of the result including space for temporary values. */
mpi_size_t size;
/* Helper. */
int mod_shift_cnt;
int negative_result;
mpi_ptr_t mp_marker = NULL;
mpi_ptr_t bp_marker = NULL;
mpi_ptr_t ep_marker = NULL;
mpi_ptr_t xp_marker = NULL;
unsigned int mp_nlimbs = 0;
unsigned int bp_nlimbs = 0;
unsigned int ep_nlimbs = 0;
unsigned int xp_nlimbs = 0;
mpi_ptr_t precomp[SIZE_PRECOMP]; /* Pre-computed array: BASE^1, ^3, ^5, ... */
mpi_size_t precomp_size[SIZE_PRECOMP];
mpi_size_t W;
mpi_ptr_t base_u;
mpi_size_t base_u_size;
mpi_size_t max_u_size;
esize = expo->nlimbs;
msize = mod->nlimbs;
size = 2 * msize;
msign = mod->sign;
ep = expo->d;
MPN_NORMALIZE(ep, esize);
if (esize * BITS_PER_MPI_LIMB > 512)
W = 5;
else if (esize * BITS_PER_MPI_LIMB > 256)
W = 4;
else if (esize * BITS_PER_MPI_LIMB > 128)
W = 3;
else if (esize * BITS_PER_MPI_LIMB > 64)
W = 2;
else
W = 1;
esec = mpi_is_secure(expo);
msec = mpi_is_secure(mod);
bsec = mpi_is_secure(base);
rp = res->d;
if (!msize)
_gcry_divide_by_zero();
if (!esize)
{
/* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 depending
on if MOD equals 1. */
res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
if (res->nlimbs)
{
RESIZE_IF_NEEDED (res, 1);
rp = res->d;
rp[0] = 1;
}
res->sign = 0;
goto leave;
}
/* Normalize MOD (i.e. make its most significant bit set) as
required by mpn_divrem. This will make the intermediate values
in the calculation slightly larger, but the correct result is
obtained after a final reduction using the original MOD value. */
mp_nlimbs = msec? msize:0;
mp = mp_marker = mpi_alloc_limb_space(msize, msec);
count_leading_zeros (mod_shift_cnt, mod->d[msize-1]);
if (mod_shift_cnt)
_gcry_mpih_lshift (mp, mod->d, msize, mod_shift_cnt);
else
MPN_COPY( mp, mod->d, msize );
bsize = base->nlimbs;
bsign = base->sign;
if (bsize > msize)
{
/* The base is larger than the module. Reduce it.
Allocate (BSIZE + 1) with space for remainder and quotient.
(The quotient is (bsize - msize + 1) limbs.) */
bp_nlimbs = bsec ? (bsize + 1):0;
bp = bp_marker = mpi_alloc_limb_space( bsize + 1, bsec );
MPN_COPY ( bp, base->d, bsize );
/* We don't care about the quotient, store it above the
* remainder, at BP + MSIZE. */
_gcry_mpih_divrem( bp + msize, 0, bp, bsize, mp, msize );
bsize = msize;
/* Canonicalize the base, since we are going to multiply with it
quite a few times. */
MPN_NORMALIZE( bp, bsize );
}
else
bp = base->d;
if (!bsize)
{
res->nlimbs = 0;
res->sign = 0;
goto leave;
}
/* Make BASE, EXPO not overlap with RES. We don't need to check MOD
because that has already been copied to the MP var. */
if ( rp == bp )
{
/* RES and BASE are identical. Allocate temp. space for BASE. */
gcry_assert (!bp_marker);
bp_nlimbs = bsec? bsize:0;
bp = bp_marker = mpi_alloc_limb_space( bsize, bsec );
MPN_COPY(bp, rp, bsize);
}
if ( rp == ep )
{
/* RES and EXPO are identical. Allocate temp. space for EXPO. */
ep_nlimbs = esec? esize:0;
ep = ep_marker = mpi_alloc_limb_space( esize, esec );
MPN_COPY(ep, rp, esize);
}
/* Copy base to the result. */
if (res->alloced < size)
{
mpi_resize (res, size);
rp = res->d;
}
/* Main processing. */
{
mpi_size_t i, j, k;
mpi_ptr_t xp;
mpi_size_t xsize;
int c;
mpi_limb_t e;
mpi_limb_t carry_limb;
struct karatsuba_ctx karactx;
mpi_ptr_t tp;
xp_nlimbs = msec? (2 * (msize + 1)):0;
xp = xp_marker = mpi_alloc_limb_space( 2 * (msize + 1), msec );
memset( &karactx, 0, sizeof karactx );
negative_result = (ep[0] & 1) && bsign;
/* Precompute PRECOMP[], BASE^(2 * i + 1), BASE^1, ^3, ^5, ... */
if (W > 1) /* X := BASE^2 */
mul_mod (xp, &xsize, bp, bsize, bp, bsize, mp, msize, &karactx);
base_u = precomp[0] = mpi_alloc_limb_space (bsize, esec);
base_u_size = max_u_size = precomp_size[0] = bsize;
MPN_COPY (precomp[0], bp, bsize);
for (i = 1; i < (1 << (W - 1)); i++)
{ /* PRECOMP[i] = BASE^(2 * i + 1) */
if (xsize >= base_u_size)
mul_mod (rp, &rsize, xp, xsize, base_u, base_u_size,
mp, msize, &karactx);
else
mul_mod (rp, &rsize, base_u, base_u_size, xp, xsize,
mp, msize, &karactx);
base_u = precomp[i] = mpi_alloc_limb_space (rsize, esec);
base_u_size = precomp_size[i] = rsize;
if (max_u_size < base_u_size)
max_u_size = base_u_size;
MPN_COPY (precomp[i], rp, rsize);
}
base_u = mpi_alloc_limb_space (max_u_size, esec);
MPN_ZERO (base_u, max_u_size);
i = esize - 1;
/* Main loop.
Make the result be pointed to alternately by XP and RP. This
helps us avoid block copying, which would otherwise be
necessary with the overlap restrictions of
_gcry_mpih_divmod. With 50% probability the result after this
loop will be in the area originally pointed by RP (==RES->d),
and with 50% probability in the area originally pointed to by XP. */
rsign = 0;
if (W == 1)
{
rsize = bsize;
}
else
{
rsize = msize;
MPN_ZERO (rp, rsize);
}
MPN_COPY ( rp, bp, bsize );
e = ep[i];
count_leading_zeros (c, e);
e = (e << c) << 1;
c = BITS_PER_MPI_LIMB - 1 - c;
j = 0;
for (;;)
if (e == 0)
{
j += c;
if ( --i < 0 )
break;
e = ep[i];
c = BITS_PER_MPI_LIMB;
}
else
{
int c0;
mpi_limb_t e0;
count_leading_zeros (c0, e);
e = (e << c0);
c -= c0;
j += c0;
e0 = (e >> (BITS_PER_MPI_LIMB - W));
if (c >= W)
c0 = 0;
else
{
if ( --i < 0 )
{
e0 = (e >> (BITS_PER_MPI_LIMB - c));
j += c - W;
goto last_step;
}
else
{
c0 = c;
e = ep[i];
c = BITS_PER_MPI_LIMB;
e0 |= (e >> (BITS_PER_MPI_LIMB - (W - c0)));
}
}
e = e << (W - c0);
c -= (W - c0);
last_step:
count_trailing_zeros (c0, e0);
e0 = (e0 >> c0) >> 1;
/*
* base_u <= precomp[e0]
* base_u_size <= precomp_size[e0]
*/
base_u_size = 0;
for (k = 0; k < (1<< (W - 1)); k++)
{
struct gcry_mpi w, u;
w.alloced = w.nlimbs = precomp_size[k];
u.alloced = u.nlimbs = precomp_size[k];
w.sign = u.sign = 0;
w.flags = u.flags = 0;
w.d = base_u;
u.d = precomp[k];
mpi_set_cond (&w, &u, k == e0);
base_u_size |= ( precomp_size[k] & ((mpi_size_t)0 - (k == e0)) );
}
for (j += W - c0; j >= 0; j--)
{
mul_mod (xp, &xsize, rp, rsize,
j == 0 ? base_u : rp, j == 0 ? base_u_size : rsize,
mp, msize, &karactx);
tp = rp; rp = xp; xp = tp;
rsize = xsize;
}
j = c0;
if ( i < 0 )
break;
}
while (j--)
{
mul_mod (xp, &xsize, rp, rsize, rp, rsize, mp, msize, &karactx);
tp = rp; rp = xp; xp = tp;
rsize = xsize;
}
/* We shifted MOD, the modulo reduction argument, left
MOD_SHIFT_CNT steps. Adjust the result by reducing it with the
original MOD.
Also make sure the result is put in RES->d (where it already
might be, see above). */
if ( mod_shift_cnt )
{
carry_limb = _gcry_mpih_lshift( res->d, rp, rsize, mod_shift_cnt);
rp = res->d;
if ( carry_limb )
{
rp[rsize] = carry_limb;
rsize++;
}
}
else if (res->d != rp)
{
MPN_COPY (res->d, rp, rsize);
rp = res->d;
}
if ( rsize >= msize )
{
_gcry_mpih_divrem(rp + msize, 0, rp, rsize, mp, msize);
rsize = msize;
}
/* Remove any leading zero words from the result. */
if ( mod_shift_cnt )
_gcry_mpih_rshift( rp, rp, rsize, mod_shift_cnt);
MPN_NORMALIZE (rp, rsize);
_gcry_mpih_release_karatsuba_ctx (&karactx );
for (i = 0; i < (1 << (W - 1)); i++)
_gcry_mpi_free_limb_space( precomp[i], esec ? precomp_size[i] : 0 );
_gcry_mpi_free_limb_space (base_u, esec ? max_u_size : 0);
}
/* Fixup for negative results. */
if ( negative_result && rsize )
{
if ( mod_shift_cnt )
_gcry_mpih_rshift( mp, mp, msize, mod_shift_cnt);
_gcry_mpih_sub( rp, mp, msize, rp, rsize);
rsize = msize;
rsign = msign;
MPN_NORMALIZE(rp, rsize);
}
gcry_assert (res->d == rp);
res->nlimbs = rsize;
res->sign = rsign;
leave:
if (mp_marker)
_gcry_mpi_free_limb_space( mp_marker, mp_nlimbs );
if (bp_marker)
_gcry_mpi_free_limb_space( bp_marker, bp_nlimbs );
if (ep_marker)
_gcry_mpi_free_limb_space( ep_marker, ep_nlimbs );
if (xp_marker)
_gcry_mpi_free_limb_space( xp_marker, xp_nlimbs );
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-310'], 'message': 'Same computation for square and multiply.
* mpi/mpi-pow.c (_gcry_mpi_powm): Compare msize for max_u_size. Move
the assignment to base_u into the loop. Copy content refered by RP to
BASE_U except the last of the loop.
--
Signed-off-by: NIIBE Yutaka <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
{
if (!exp->ex_layout_types) {
dprintk("%s: export does not support pNFS\n", __func__);
return NULL;
}
if (!(exp->ex_layout_types & (1 << layout_type))) {
dprintk("%s: layout type %d not supported\n",
__func__, layout_type);
return NULL;
}
return nfsd4_layout_ops[layout_type];
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20', 'CWE-129'], 'message': 'nfsd: fix undefined behavior in nfsd4_layout_verify
UBSAN: Undefined behaviour in fs/nfsd/nfs4proc.c:1262:34
shift exponent 128 is too large for 32-bit type 'int'
Depending on compiler+architecture, this may cause the check for
layout_type to succeed for overly large values (which seems to be the
case with amd64). The large value will be later used in de-referencing
nfsd4_layout_ops for function pointers.
Reported-by: Jani Tuovila <[email protected]>
Signed-off-by: Ari Kauppi <[email protected]>
[[email protected]: use LAYOUT_TYPE_MAX instead of 32]
Cc: [email protected]
Reviewed-by: Dan Carpenter <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: J. Bruce Fields <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_layoutget *lgp)
{
struct xdr_stream *xdr = &resp->xdr;
const struct nfsd4_layout_ops *ops =
nfsd4_layout_ops[lgp->lg_layout_type];
__be32 *p;
dprintk("%s: err %d\n", __func__, nfserr);
if (nfserr)
goto out;
nfserr = nfserr_resource;
p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t));
if (!p)
goto out;
*p++ = cpu_to_be32(1); /* we always set return-on-close */
*p++ = cpu_to_be32(lgp->lg_sid.si_generation);
p = xdr_encode_opaque_fixed(p, &lgp->lg_sid.si_opaque,
sizeof(stateid_opaque_t));
*p++ = cpu_to_be32(1); /* we always return a single layout */
p = xdr_encode_hyper(p, lgp->lg_seg.offset);
p = xdr_encode_hyper(p, lgp->lg_seg.length);
*p++ = cpu_to_be32(lgp->lg_seg.iomode);
*p++ = cpu_to_be32(lgp->lg_layout_type);
nfserr = ops->encode_layoutget(xdr, lgp);
out:
kfree(lgp->lg_content);
return nfserr;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-20', 'CWE-129'], 'message': 'nfsd: encoders mustn't use unitialized values in error cases
In error cases, lgp->lg_layout_type may be out of bounds; so we
shouldn't be using it until after the check of nfserr.
This was seen to crash nfsd threads when the server receives a LAYOUTGET
request with a large layout type.
GETDEVICEINFO has the same problem.
Reported-by: Ari Kauppi <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: [email protected]
Signed-off-by: J. Bruce Fields <[email protected]>'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: int bdecode(char const* start, char const* end, bdecode_node& ret
, error_code& ec, int* error_pos, int depth_limit, int token_limit)
{
ec.clear();
ret.clear();
if (end - start > bdecode_token::max_offset)
{
if (error_pos) *error_pos = 0;
ec = bdecode_errors::limit_exceeded;
return -1;
}
// this is the stack of bdecode_token indices, into m_tokens.
// sp is the stack pointer, as index into the array, stack
int sp = 0;
stack_frame* stack = TORRENT_ALLOCA(stack_frame, depth_limit);
char const* const orig_start = start;
if (start == end)
TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
while (start <= end)
{
if (start >= end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
if (sp >= depth_limit)
TORRENT_FAIL_BDECODE(bdecode_errors::depth_exceeded);
--token_limit;
if (token_limit < 0)
TORRENT_FAIL_BDECODE(bdecode_errors::limit_exceeded);
// look for a new token
const char t = *start;
const int current_frame = sp;
// if we're currently parsing a dictionary, assert that
// every other node is a string.
if (current_frame > 0
&& ret.m_tokens[stack[current_frame-1].token].type == bdecode_token::dict)
{
if (stack[current_frame-1].state == 0)
{
// the current parent is a dict and we are parsing a key.
// only allow a digit (for a string) or 'e' to terminate
if (!numeric(t) && t != 'e')
TORRENT_FAIL_BDECODE(bdecode_errors::expected_digit);
}
}
switch (t)
{
case 'd':
stack[sp++] = ret.m_tokens.size();
// we push it into the stack so that we know where to fill
// in the next_node field once we pop this node off the stack.
// i.e. get to the node following the dictionary in the buffer
ret.m_tokens.push_back(bdecode_token(start - orig_start
, bdecode_token::dict));
++start;
break;
case 'l':
stack[sp++] = ret.m_tokens.size();
// we push it into the stack so that we know where to fill
// in the next_node field once we pop this node off the stack.
// i.e. get to the node following the list in the buffer
ret.m_tokens.push_back(bdecode_token(start - orig_start
, bdecode_token::list));
++start;
break;
case 'i':
{
char const* int_start = start;
bdecode_errors::error_code_enum e = bdecode_errors::no_error;
// +1 here to point to the first digit, rather than 'i'
start = check_integer(start + 1, end, e);
if (e)
{
// in order to gracefully terminate the tree,
// make sure the end of the previous token is set correctly
if (error_pos) *error_pos = start - orig_start;
error_pos = NULL;
start = int_start;
TORRENT_FAIL_BDECODE(e);
}
ret.m_tokens.push_back(bdecode_token(int_start - orig_start
, 1, bdecode_token::integer, 1));
TORRENT_ASSERT(*start == 'e');
// skip 'e'
++start;
break;
}
case 'e':
{
// this is the end of a list or dict
if (sp == 0)
TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
if (sp > 0
&& ret.m_tokens[stack[sp-1].token].type == bdecode_token::dict
&& stack[sp-1].state == 1)
{
// this means we're parsing a dictionary and about to parse a
// value associated with a key. Instead, we got a termination
TORRENT_FAIL_BDECODE(bdecode_errors::expected_value);
}
// insert the end-of-sequence token
ret.m_tokens.push_back(bdecode_token(start - orig_start, 1
, bdecode_token::end));
// and back-patch the start of this sequence with the offset
// to the next token we'll insert
int top = stack[sp-1].token;
// subtract the token's own index, since this is a relative
// offset
if (ret.m_tokens.size() - top > bdecode_token::max_next_item)
TORRENT_FAIL_BDECODE(bdecode_errors::limit_exceeded);
ret.m_tokens[top].next_item = ret.m_tokens.size() - top;
// and pop it from the stack.
--sp;
++start;
break;
}
default:
{
// this is the case for strings. The start character is any
// numeric digit
if (!numeric(t))
TORRENT_FAIL_BDECODE(bdecode_errors::expected_value);
boost::int64_t len = t - '0';
char const* str_start = start;
++start;
bdecode_errors::error_code_enum e = bdecode_errors::no_error;
start = parse_int(start, end, ':', len, e);
if (e)
TORRENT_FAIL_BDECODE(e);
// remaining buffer size excluding ':'
const ptrdiff_t buff_size = end - start - 1;
if (len > buff_size)
TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
if (len < 0)
TORRENT_FAIL_BDECODE(bdecode_errors::overflow);
// skip ':'
++start;
if (start >= end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
// the bdecode_token only has 8 bits to keep the header size
// in. If it overflows, fail!
if (start - str_start - 2 > detail::bdecode_token::max_header)
TORRENT_FAIL_BDECODE(bdecode_errors::limit_exceeded);
ret.m_tokens.push_back(bdecode_token(str_start - orig_start
, 1, bdecode_token::string, start - str_start));
start += len;
break;
}
}
if (current_frame > 0
&& ret.m_tokens[stack[current_frame-1].token].type == bdecode_token::dict)
{
// the next item we parse is the opposite
stack[current_frame-1].state = ~stack[current_frame-1].state;
}
// this terminates the top level node, we're done!
if (sp == 0) break;
}
done:
// if parse failed, sp will be greater than 1
// unwind the stack by inserting terminator to make whatever we have
// so far valid
while (sp > 0) {
TORRENT_ASSERT(ec);
--sp;
// we may need to insert a dummy token to properly terminate the tree,
// in case we just parsed a key to a dict and failed in the value
if (ret.m_tokens[stack[sp].token].type == bdecode_token::dict
&& stack[sp].state == 1)
{
// insert an empty dictionary as the value
ret.m_tokens.push_back(bdecode_token(start - orig_start
, 2, bdecode_token::dict));
ret.m_tokens.push_back(bdecode_token(start - orig_start
, bdecode_token::end));
}
int top = stack[sp].token;
TORRENT_ASSERT(ret.m_tokens.size() - top <= bdecode_token::max_next_item);
ret.m_tokens[top].next_item = ret.m_tokens.size() - top;
ret.m_tokens.push_back(bdecode_token(start - orig_start, 1, bdecode_token::end));
}
ret.m_tokens.push_back(bdecode_token(start - orig_start, 0
, bdecode_token::end));
ret.m_token_idx = 0;
ret.m_buffer = orig_start;
ret.m_buffer_size = start - orig_start;
ret.m_root_tokens = &ret.m_tokens[0];
return ec ? -1 : 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-125'], 'message': 'fix out-of-bounds read in bdecode
Fixes #2099'</s> |
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array.
Code: unicode_unfold_key(OnigCodePoint code)
{
static const struct ByUnfoldKey wordlist[] =
{
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0},
{0x1040a, 3267, 1},
{0x1e0a, 1727, 1},
{0x040a, 1016, 1},
{0x010a, 186, 1},
{0x1f0a, 2088, 1},
{0x2c0a, 2451, 1},
{0x0189, 619, 1},
{0x1f89, 134, 2},
{0x1f85, 154, 2},
{0x0389, 733, 1},
{0x03ff, 724, 1},
{0xab89, 1523, 1},
{0xab85, 1511, 1},
{0x10c89, 3384, 1},
{0x10c85, 3372, 1},
{0x1e84, 1911, 1},
{0x03f5, 752, 1},
{0x0184, 360, 1},
{0x1f84, 149, 2},
{0x2c84, 2592, 1},
{0x017d, 351, 1},
{0x1ff3, 96, 2},
{0xab84, 1508, 1},
{0xa784, 3105, 1},
{0x10c84, 3369, 1},
{0xab7d, 1487, 1},
{0xa77d, 1706, 1},
{0x1e98, 38, 2},
{0x0498, 1106, 1},
{0x0198, 375, 1},
{0x1f98, 169, 2},
{0x2c98, 2622, 1},
{0x0398, 762, 1},
{0xa684, 2940, 1},
{0xab98, 1568, 1},
{0xa798, 3123, 1},
{0x10c98, 3429, 1},
{0x050a, 1277, 1},
{0x1ffb, 2265, 1},
{0x1e96, 16, 2},
{0x0496, 1103, 1},
{0x0196, 652, 1},
{0x1f96, 199, 2},
{0x2c96, 2619, 1},
{0x0396, 756, 1},
{0xa698, 2970, 1},
{0xab96, 1562, 1},
{0xa796, 3120, 1},
{0x10c96, 3423, 1},
{0x1feb, 2259, 1},
{0x2ceb, 2736, 1},
{0x1e90, 1929, 1},
{0x0490, 1094, 1},
{0x0190, 628, 1},
{0x1f90, 169, 2},
{0x2c90, 2610, 1},
{0x0390, 25, 3},
{0xa696, 2967, 1},
{0xab90, 1544, 1},
{0xa790, 3114, 1},
{0x10c90, 3405, 1},
{0x01d7, 444, 1},
{0x1fd7, 31, 3},
{0x1ea6, 1947, 1},
{0x04a6, 1127, 1},
{0x01a6, 676, 1},
{0x1fa6, 239, 2},
{0x2ca6, 2643, 1},
{0x03a6, 810, 1},
{0xa690, 2958, 1},
{0xaba6, 1610, 1},
{0xa7a6, 3144, 1},
{0x10ca6, 3471, 1},
{0x1ea4, 1944, 1},
{0x04a4, 1124, 1},
{0x01a4, 390, 1},
{0x1fa4, 229, 2},
{0x2ca4, 2640, 1},
{0x03a4, 804, 1},
{0x10a6, 2763, 1},
{0xaba4, 1604, 1},
{0xa7a4, 3141, 1},
{0x10ca4, 3465, 1},
{0x1ea0, 1938, 1},
{0x04a0, 1118, 1},
{0x01a0, 384, 1},
{0x1fa0, 209, 2},
{0x2ca0, 2634, 1},
{0x03a0, 792, 1},
{0x10a4, 2757, 1},
{0xaba0, 1592, 1},
{0xa7a0, 3135, 1},
{0x10ca0, 3453, 1},
{0x1eb2, 1965, 1},
{0x04b2, 1145, 1},
{0x01b2, 694, 1},
{0x1fb2, 249, 2},
{0x2cb2, 2661, 1},
{0x03fd, 718, 1},
{0x10a0, 2745, 1},
{0xabb2, 1646, 1},
{0xa7b2, 703, 1},
{0x10cb2, 3507, 1},
{0x1eac, 1956, 1},
{0x04ac, 1136, 1},
{0x01ac, 396, 1},
{0x1fac, 229, 2},
{0x2cac, 2652, 1},
{0x0537, 1352, 1},
{0x10b2, 2799, 1},
{0xabac, 1628, 1},
{0xa7ac, 637, 1},
{0x10cac, 3489, 1},
{0x1eaa, 1953, 1},
{0x04aa, 1133, 1},
{0x00dd, 162, 1},
{0x1faa, 219, 2},
{0x2caa, 2649, 1},
{0x03aa, 824, 1},
{0x10ac, 2781, 1},
{0xabaa, 1622, 1},
{0xa7aa, 646, 1},
{0x10caa, 3483, 1},
{0x1ea8, 1950, 1},
{0x04a8, 1130, 1},
{0x020a, 517, 1},
{0x1fa8, 209, 2},
{0x2ca8, 2646, 1},
{0x03a8, 817, 1},
{0x10aa, 2775, 1},
{0xaba8, 1616, 1},
{0xa7a8, 3147, 1},
{0x10ca8, 3477, 1},
{0x1ea2, 1941, 1},
{0x04a2, 1121, 1},
{0x01a2, 387, 1},
{0x1fa2, 219, 2},
{0x2ca2, 2637, 1},
{0x118a6, 3528, 1},
{0x10a8, 2769, 1},
{0xaba2, 1598, 1},
{0xa7a2, 3138, 1},
{0x10ca2, 3459, 1},
{0x2ced, 2739, 1},
{0x1fe9, 2283, 1},
{0x1fe7, 47, 3},
{0x1eb0, 1962, 1},
{0x04b0, 1142, 1},
{0x118a4, 3522, 1},
{0x10a2, 2751, 1},
{0x2cb0, 2658, 1},
{0x03b0, 41, 3},
{0x1fe3, 41, 3},
{0xabb0, 1640, 1},
{0xa7b0, 706, 1},
{0x10cb0, 3501, 1},
{0x01d9, 447, 1},
{0x1fd9, 2277, 1},
{0x118a0, 3510, 1},
{0x00df, 24, 2},
{0x00d9, 150, 1},
{0xab77, 1469, 1},
{0x10b0, 2793, 1},
{0x1eae, 1959, 1},
{0x04ae, 1139, 1},
{0x01ae, 685, 1},
{0x1fae, 239, 2},
{0x2cae, 2655, 1},
{0x118b2, 3564, 1},
{0xab73, 1457, 1},
{0xabae, 1634, 1},
{0xab71, 1451, 1},
{0x10cae, 3495, 1},
{0x1e2a, 1775, 1},
{0x042a, 968, 1},
{0x012a, 234, 1},
{0x1f2a, 2130, 1},
{0x2c2a, 2547, 1},
{0x118ac, 3546, 1},
{0x10ae, 2787, 1},
{0x0535, 1346, 1},
{0xa72a, 2988, 1},
{0x1e9a, 0, 2},
{0x049a, 1109, 1},
{0xff37, 3225, 1},
{0x1f9a, 179, 2},
{0x2c9a, 2625, 1},
{0x039a, 772, 1},
{0x118aa, 3540, 1},
{0xab9a, 1574, 1},
{0xa79a, 3126, 1},
{0x10c9a, 3435, 1},
{0x1e94, 1935, 1},
{0x0494, 1100, 1},
{0x0194, 640, 1},
{0x1f94, 189, 2},
{0x2c94, 2616, 1},
{0x0394, 749, 1},
{0x118a8, 3534, 1},
{0xab94, 1556, 1},
{0xa69a, 2973, 1},
{0x10c94, 3417, 1},
{0x10402, 3243, 1},
{0x1e02, 1715, 1},
{0x0402, 992, 1},
{0x0102, 174, 1},
{0x0533, 1340, 1},
{0x2c02, 2427, 1},
{0x118a2, 3516, 1},
{0x052a, 1325, 1},
{0xa694, 2964, 1},
{0x1e92, 1932, 1},
{0x0492, 1097, 1},
{0x2165, 2307, 1},
{0x1f92, 179, 2},
{0x2c92, 2613, 1},
{0x0392, 742, 1},
{0x2161, 2295, 1},
{0xab92, 1550, 1},
{0xa792, 3117, 1},
{0x10c92, 3411, 1},
{0x118b0, 3558, 1},
{0x1f5f, 2199, 1},
{0x1e8e, 1926, 1},
{0x048e, 1091, 1},
{0x018e, 453, 1},
{0x1f8e, 159, 2},
{0x2c8e, 2607, 1},
{0x038e, 833, 1},
{0xa692, 2961, 1},
{0xab8e, 1538, 1},
{0x0055, 59, 1},
{0x10c8e, 3399, 1},
{0x1f5d, 2196, 1},
{0x212a, 27, 1},
{0x04cb, 1181, 1},
{0x01cb, 425, 1},
{0x1fcb, 2241, 1},
{0x118ae, 3552, 1},
{0x0502, 1265, 1},
{0x00cb, 111, 1},
{0xa68e, 2955, 1},
{0x1e8a, 1920, 1},
{0x048a, 1085, 1},
{0x018a, 622, 1},
{0x1f8a, 139, 2},
{0x2c8a, 2601, 1},
{0x038a, 736, 1},
{0x2c67, 2571, 1},
{0xab8a, 1526, 1},
{0x1e86, 1914, 1},
{0x10c8a, 3387, 1},
{0x0186, 616, 1},
{0x1f86, 159, 2},
{0x2c86, 2595, 1},
{0x0386, 727, 1},
{0xff35, 3219, 1},
{0xab86, 1514, 1},
{0xa786, 3108, 1},
{0x10c86, 3375, 1},
{0xa68a, 2949, 1},
{0x0555, 1442, 1},
{0x1ebc, 1980, 1},
{0x04bc, 1160, 1},
{0x01bc, 411, 1},
{0x1fbc, 62, 2},
{0x2cbc, 2676, 1},
{0x1f5b, 2193, 1},
{0xa686, 2943, 1},
{0xabbc, 1676, 1},
{0x1eb8, 1974, 1},
{0x04b8, 1154, 1},
{0x01b8, 408, 1},
{0x1fb8, 2268, 1},
{0x2cb8, 2670, 1},
{0x01db, 450, 1},
{0x1fdb, 2247, 1},
{0xabb8, 1664, 1},
{0x10bc, 2829, 1},
{0x00db, 156, 1},
{0x1eb6, 1971, 1},
{0x04b6, 1151, 1},
{0xff33, 3213, 1},
{0x1fb6, 58, 2},
{0x2cb6, 2667, 1},
{0xff2a, 3186, 1},
{0x10b8, 2817, 1},
{0xabb6, 1658, 1},
{0xa7b6, 3153, 1},
{0x10426, 3351, 1},
{0x1e26, 1769, 1},
{0x0426, 956, 1},
{0x0126, 228, 1},
{0x0053, 52, 1},
{0x2c26, 2535, 1},
{0x0057, 65, 1},
{0x10b6, 2811, 1},
{0x022a, 562, 1},
{0xa726, 2982, 1},
{0x1e2e, 1781, 1},
{0x042e, 980, 1},
{0x012e, 240, 1},
{0x1f2e, 2142, 1},
{0x2c2e, 2559, 1},
{0xffffffff, -1, 0},
{0x2167, 2313, 1},
{0xffffffff, -1, 0},
{0xa72e, 2994, 1},
{0x1e2c, 1778, 1},
{0x042c, 974, 1},
{0x012c, 237, 1},
{0x1f2c, 2136, 1},
{0x2c2c, 2553, 1},
{0x1f6f, 2223, 1},
{0x2c6f, 604, 1},
{0xabbf, 1685, 1},
{0xa72c, 2991, 1},
{0x1e28, 1772, 1},
{0x0428, 962, 1},
{0x0128, 231, 1},
{0x1f28, 2124, 1},
{0x2c28, 2541, 1},
{0xffffffff, -1, 0},
{0x0553, 1436, 1},
{0x10bf, 2838, 1},
{0xa728, 2985, 1},
{0x0526, 1319, 1},
{0x0202, 505, 1},
{0x1e40, 1808, 1},
{0x10424, 3345, 1},
{0x1e24, 1766, 1},
{0x0424, 950, 1},
{0x0124, 225, 1},
{0xffffffff, -1, 0},
{0x2c24, 2529, 1},
{0x052e, 1331, 1},
{0xa740, 3018, 1},
{0x118bc, 3594, 1},
{0xa724, 2979, 1},
{0x1ef2, 2061, 1},
{0x04f2, 1241, 1},
{0x01f2, 483, 1},
{0x1ff2, 257, 2},
{0x2cf2, 2742, 1},
{0x052c, 1328, 1},
{0x118b8, 3582, 1},
{0xa640, 2865, 1},
{0x10422, 3339, 1},
{0x1e22, 1763, 1},
{0x0422, 944, 1},
{0x0122, 222, 1},
{0x2126, 820, 1},
{0x2c22, 2523, 1},
{0x0528, 1322, 1},
{0x01f1, 483, 1},
{0x118b6, 3576, 1},
{0xa722, 2976, 1},
{0x03f1, 796, 1},
{0x1ebe, 1983, 1},
{0x04be, 1163, 1},
{0xfb02, 12, 2},
{0x1fbe, 767, 1},
{0x2cbe, 2679, 1},
{0x01b5, 405, 1},
{0x0540, 1379, 1},
{0xabbe, 1682, 1},
{0x0524, 1316, 1},
{0x00b5, 779, 1},
{0xabb5, 1655, 1},
{0x1eba, 1977, 1},
{0x04ba, 1157, 1},
{0x216f, 2337, 1},
{0x1fba, 2226, 1},
{0x2cba, 2673, 1},
{0x10be, 2835, 1},
{0x0051, 46, 1},
{0xabba, 1670, 1},
{0x10b5, 2808, 1},
{0x1e6e, 1878, 1},
{0x046e, 1055, 1},
{0x016e, 330, 1},
{0x1f6e, 2220, 1},
{0x2c6e, 664, 1},
{0x118bf, 3603, 1},
{0x0522, 1313, 1},
{0x10ba, 2823, 1},
{0xa76e, 3087, 1},
{0x1eb4, 1968, 1},
{0x04b4, 1148, 1},
{0x2c75, 2583, 1},
{0x1fb4, 50, 2},
{0x2cb4, 2664, 1},
{0xab75, 1463, 1},
{0x1ec2, 1989, 1},
{0xabb4, 1652, 1},
{0xa7b4, 3150, 1},
{0x1fc2, 253, 2},
{0x2cc2, 2685, 1},
{0x03c2, 800, 1},
{0x00c2, 83, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xff26, 3174, 1},
{0x10b4, 2805, 1},
{0x1eca, 2001, 1},
{0x0551, 1430, 1},
{0x01ca, 425, 1},
{0x1fca, 2238, 1},
{0x2cca, 2697, 1},
{0x10c2, 2847, 1},
{0x00ca, 108, 1},
{0xff2e, 3198, 1},
{0x1e8c, 1923, 1},
{0x048c, 1088, 1},
{0x0226, 556, 1},
{0x1f8c, 149, 2},
{0x2c8c, 2604, 1},
{0x038c, 830, 1},
{0xffffffff, -1, 0},
{0xab8c, 1532, 1},
{0xff2c, 3192, 1},
{0x10c8c, 3393, 1},
{0x1ec4, 1992, 1},
{0x022e, 568, 1},
{0x01c4, 417, 1},
{0x1fc4, 54, 2},
{0x2cc4, 2688, 1},
{0xffffffff, -1, 0},
{0x00c4, 89, 1},
{0xff28, 3180, 1},
{0xa68c, 2952, 1},
{0x01cf, 432, 1},
{0x022c, 565, 1},
{0x118be, 3600, 1},
{0x03cf, 839, 1},
{0x00cf, 123, 1},
{0x118b5, 3573, 1},
{0xffffffff, -1, 0},
{0x10c4, 2853, 1},
{0x216e, 2334, 1},
{0x24cb, 2406, 1},
{0x0228, 559, 1},
{0xff24, 3168, 1},
{0xffffffff, -1, 0},
{0x118ba, 3588, 1},
{0x1efe, 2079, 1},
{0x04fe, 1259, 1},
{0x01fe, 499, 1},
{0x1e9e, 24, 2},
{0x049e, 1115, 1},
{0x03fe, 721, 1},
{0x1f9e, 199, 2},
{0x2c9e, 2631, 1},
{0x039e, 786, 1},
{0x0224, 553, 1},
{0xab9e, 1586, 1},
{0xa79e, 3132, 1},
{0x10c9e, 3447, 1},
{0x01f7, 414, 1},
{0x1ff7, 67, 3},
{0xff22, 3162, 1},
{0x03f7, 884, 1},
{0x118b4, 3570, 1},
{0x049c, 1112, 1},
{0x019c, 661, 1},
{0x1f9c, 189, 2},
{0x2c9c, 2628, 1},
{0x039c, 779, 1},
{0x24bc, 2361, 1},
{0xab9c, 1580, 1},
{0xa79c, 3129, 1},
{0x10c9c, 3441, 1},
{0x0222, 550, 1},
{0x1e7c, 1899, 1},
{0x047c, 1076, 1},
{0x1e82, 1908, 1},
{0x24b8, 2349, 1},
{0x0182, 357, 1},
{0x1f82, 139, 2},
{0x2c82, 2589, 1},
{0xab7c, 1484, 1},
{0xffffffff, -1, 0},
{0xab82, 1502, 1},
{0xa782, 3102, 1},
{0x10c82, 3363, 1},
{0x2c63, 1709, 1},
{0x24b6, 2343, 1},
{0x1e80, 1905, 1},
{0x0480, 1082, 1},
{0x1f59, 2190, 1},
{0x1f80, 129, 2},
{0x2c80, 2586, 1},
{0x0059, 71, 1},
{0xa682, 2937, 1},
{0xab80, 1496, 1},
{0xa780, 3099, 1},
{0x10c80, 3357, 1},
{0xffffffff, -1, 0},
{0x1e4c, 1826, 1},
{0x0145, 270, 1},
{0x014c, 279, 1},
{0x1f4c, 2184, 1},
{0x0345, 767, 1},
{0x0045, 12, 1},
{0x004c, 31, 1},
{0xa680, 2934, 1},
{0xa74c, 3036, 1},
{0x1e4a, 1823, 1},
{0x01d5, 441, 1},
{0x014a, 276, 1},
{0x1f4a, 2178, 1},
{0x03d5, 810, 1},
{0x00d5, 141, 1},
{0x004a, 24, 1},
{0x24bf, 2370, 1},
{0xa74a, 3033, 1},
{0xa64c, 2883, 1},
{0x1041c, 3321, 1},
{0x1e1c, 1754, 1},
{0x041c, 926, 1},
{0x011c, 213, 1},
{0x1f1c, 2118, 1},
{0x2c1c, 2505, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xa64a, 2880, 1},
{0x1041a, 3315, 1},
{0x1e1a, 1751, 1},
{0x041a, 920, 1},
{0x011a, 210, 1},
{0x1f1a, 2112, 1},
{0x2c1a, 2499, 1},
{0xabbd, 1679, 1},
{0x0545, 1394, 1},
{0x054c, 1415, 1},
{0x10418, 3309, 1},
{0x1e18, 1748, 1},
{0x0418, 914, 1},
{0x0118, 207, 1},
{0x1f18, 2106, 1},
{0x2c18, 2493, 1},
{0x10bd, 2832, 1},
{0x2163, 2301, 1},
{0x054a, 1409, 1},
{0x1040e, 3279, 1},
{0x1e0e, 1733, 1},
{0x040e, 1028, 1},
{0x010e, 192, 1},
{0x1f0e, 2100, 1},
{0x2c0e, 2463, 1},
{0x1efc, 2076, 1},
{0x04fc, 1256, 1},
{0x01fc, 496, 1},
{0x1ffc, 96, 2},
{0x051c, 1304, 1},
{0x1040c, 3273, 1},
{0x1e0c, 1730, 1},
{0x040c, 1022, 1},
{0x010c, 189, 1},
{0x1f0c, 2094, 1},
{0x2c0c, 2457, 1},
{0x1f6d, 2217, 1},
{0x2c6d, 607, 1},
{0x051a, 1301, 1},
{0x24be, 2367, 1},
{0x10408, 3261, 1},
{0x1e08, 1724, 1},
{0x0408, 1010, 1},
{0x0108, 183, 1},
{0x1f08, 2082, 1},
{0x2c08, 2445, 1},
{0x04c9, 1178, 1},
{0x0518, 1298, 1},
{0x1fc9, 2235, 1},
{0xffffffff, -1, 0},
{0x24ba, 2355, 1},
{0x00c9, 105, 1},
{0x10416, 3303, 1},
{0x1e16, 1745, 1},
{0x0416, 908, 1},
{0x0116, 204, 1},
{0x050e, 1283, 1},
{0x2c16, 2487, 1},
{0x10414, 3297, 1},
{0x1e14, 1742, 1},
{0x0414, 902, 1},
{0x0114, 201, 1},
{0x042b, 971, 1},
{0x2c14, 2481, 1},
{0x1f2b, 2133, 1},
{0x2c2b, 2550, 1},
{0xffffffff, -1, 0},
{0x050c, 1280, 1},
{0x10406, 3255, 1},
{0x1e06, 1721, 1},
{0x0406, 1004, 1},
{0x0106, 180, 1},
{0x13fb, 1697, 1},
{0x2c06, 2439, 1},
{0x24c2, 2379, 1},
{0x118bd, 3597, 1},
{0xffffffff, -1, 0},
{0x0508, 1274, 1},
{0x10404, 3249, 1},
{0x1e04, 1718, 1},
{0x0404, 998, 1},
{0x0104, 177, 1},
{0x1f95, 194, 2},
{0x2c04, 2433, 1},
{0x0395, 752, 1},
{0x24ca, 2403, 1},
{0xab95, 1559, 1},
{0x0531, 1334, 1},
{0x10c95, 3420, 1},
{0x0516, 1295, 1},
{0x1e6c, 1875, 1},
{0x046c, 1052, 1},
{0x016c, 327, 1},
{0x1f6c, 2214, 1},
{0x216d, 2331, 1},
{0x0514, 1292, 1},
{0x0245, 697, 1},
{0x024c, 598, 1},
{0xa76c, 3084, 1},
{0x10400, 3237, 1},
{0x1e00, 1712, 1},
{0x0400, 986, 1},
{0x0100, 171, 1},
{0x24c4, 2385, 1},
{0x2c00, 2421, 1},
{0x0506, 1271, 1},
{0x024a, 595, 1},
{0x1fab, 224, 2},
{0xa66c, 2931, 1},
{0x03ab, 827, 1},
{0x24cf, 2418, 1},
{0xabab, 1625, 1},
{0xa7ab, 631, 1},
{0x10cab, 3486, 1},
{0xffffffff, -1, 0},
{0x0504, 1268, 1},
{0xffffffff, -1, 0},
{0x021c, 544, 1},
{0x01a9, 679, 1},
{0x1fa9, 214, 2},
{0x10ab, 2778, 1},
{0x03a9, 820, 1},
{0x212b, 92, 1},
{0xaba9, 1619, 1},
{0x1e88, 1917, 1},
{0x10ca9, 3480, 1},
{0x021a, 541, 1},
{0x1f88, 129, 2},
{0x2c88, 2598, 1},
{0x0388, 730, 1},
{0x13fd, 1703, 1},
{0xab88, 1520, 1},
{0x10a9, 2772, 1},
{0x10c88, 3381, 1},
{0xffffffff, -1, 0},
{0x0218, 538, 1},
{0x0500, 1262, 1},
{0x1f4d, 2187, 1},
{0x01a7, 393, 1},
{0x1fa7, 244, 2},
{0x004d, 34, 1},
{0x03a7, 814, 1},
{0xa688, 2946, 1},
{0xaba7, 1613, 1},
{0x020e, 523, 1},
{0x10ca7, 3474, 1},
{0x1e6a, 1872, 1},
{0x046a, 1049, 1},
{0x016a, 324, 1},
{0x1f6a, 2208, 1},
{0xffffffff, -1, 0},
{0x216c, 2328, 1},
{0x10a7, 2766, 1},
{0x01d1, 435, 1},
{0xa76a, 3081, 1},
{0x020c, 520, 1},
{0x03d1, 762, 1},
{0x00d1, 129, 1},
{0x1e68, 1869, 1},
{0x0468, 1046, 1},
{0x0168, 321, 1},
{0x1f68, 2202, 1},
{0xffffffff, -1, 0},
{0xff31, 3207, 1},
{0xa66a, 2928, 1},
{0x0208, 514, 1},
{0xa768, 3078, 1},
{0x1e64, 1863, 1},
{0x0464, 1040, 1},
{0x0164, 315, 1},
{0x054d, 1418, 1},
{0x2c64, 673, 1},
{0xffffffff, -1, 0},
{0xff2b, 3189, 1},
{0xffffffff, -1, 0},
{0xa764, 3072, 1},
{0xa668, 2925, 1},
{0x0216, 535, 1},
{0xffffffff, -1, 0},
{0x118ab, 3543, 1},
{0x1e62, 1860, 1},
{0x0462, 1037, 1},
{0x0162, 312, 1},
{0x0214, 532, 1},
{0x2c62, 655, 1},
{0xa664, 2919, 1},
{0x1ed2, 2013, 1},
{0x04d2, 1193, 1},
{0xa762, 3069, 1},
{0x1fd2, 20, 3},
{0x2cd2, 2709, 1},
{0x118a9, 3537, 1},
{0x00d2, 132, 1},
{0x0206, 511, 1},
{0x10420, 3333, 1},
{0x1e20, 1760, 1},
{0x0420, 938, 1},
{0x0120, 219, 1},
{0xa662, 2916, 1},
{0x2c20, 2517, 1},
{0x1e60, 1856, 1},
{0x0460, 1034, 1},
{0x0160, 309, 1},
{0x0204, 508, 1},
{0x2c60, 2562, 1},
{0xffffffff, -1, 0},
{0x24bd, 2364, 1},
{0x216a, 2322, 1},
{0xa760, 3066, 1},
{0xffffffff, -1, 0},
{0xfb16, 125, 2},
{0x118a7, 3531, 1},
{0x1efa, 2073, 1},
{0x04fa, 1253, 1},
{0x01fa, 493, 1},
{0x1ffa, 2262, 1},
{0xfb14, 109, 2},
{0x03fa, 887, 1},
{0xa660, 2913, 1},
{0x2168, 2316, 1},
{0x01b7, 700, 1},
{0x1fb7, 10, 3},
{0x1f6b, 2211, 1},
{0x2c6b, 2577, 1},
{0x0200, 502, 1},
{0xabb7, 1661, 1},
{0xfb06, 29, 2},
{0x1e56, 1841, 1},
{0x2164, 2304, 1},
{0x0156, 294, 1},
{0x1f56, 62, 3},
{0x0520, 1310, 1},
{0x004f, 40, 1},
{0x0056, 62, 1},
{0x10b7, 2814, 1},
{0xa756, 3051, 1},
{0xfb04, 5, 3},
{0x1e78, 1893, 1},
{0x0478, 1070, 1},
{0x0178, 168, 1},
{0x1e54, 1838, 1},
{0x2162, 2298, 1},
{0x0154, 291, 1},
{0x1f54, 57, 3},
{0xab78, 1472, 1},
{0xa656, 2898, 1},
{0x0054, 56, 1},
{0x1e52, 1835, 1},
{0xa754, 3048, 1},
{0x0152, 288, 1},
{0x1f52, 52, 3},
{0x24c9, 2400, 1},
{0x1e32, 1787, 1},
{0x0052, 49, 1},
{0x0132, 243, 1},
{0xa752, 3045, 1},
{0xffffffff, -1, 0},
{0xfb00, 4, 2},
{0xa654, 2895, 1},
{0xffffffff, -1, 0},
{0xa732, 2997, 1},
{0x2160, 2292, 1},
{0x054f, 1424, 1},
{0x0556, 1445, 1},
{0x1e50, 1832, 1},
{0xa652, 2892, 1},
{0x0150, 285, 1},
{0x1f50, 84, 2},
{0x017b, 348, 1},
{0x1e4e, 1829, 1},
{0x0050, 43, 1},
{0x014e, 282, 1},
{0xa750, 3042, 1},
{0xab7b, 1481, 1},
{0xa77b, 3093, 1},
{0x004e, 37, 1},
{0x0554, 1439, 1},
{0xa74e, 3039, 1},
{0x1e48, 1820, 1},
{0xffffffff, -1, 0},
{0x216b, 2325, 1},
{0x1f48, 2172, 1},
{0xa650, 2889, 1},
{0x0552, 1433, 1},
{0x0048, 21, 1},
{0xffffffff, -1, 0},
{0xa748, 3030, 1},
{0xa64e, 2886, 1},
{0x0532, 1337, 1},
{0x1041e, 3327, 1},
{0x1e1e, 1757, 1},
{0x041e, 932, 1},
{0x011e, 216, 1},
{0x118b7, 3579, 1},
{0x2c1e, 2511, 1},
{0xffffffff, -1, 0},
{0xa648, 2877, 1},
{0x1ff9, 2253, 1},
{0xffffffff, -1, 0},
{0x03f9, 878, 1},
{0x0550, 1427, 1},
{0x10412, 3291, 1},
{0x1e12, 1739, 1},
{0x0412, 896, 1},
{0x0112, 198, 1},
{0x054e, 1421, 1},
{0x2c12, 2475, 1},
{0x10410, 3285, 1},
{0x1e10, 1736, 1},
{0x0410, 890, 1},
{0x0110, 195, 1},
{0xffffffff, -1, 0},
{0x2c10, 2469, 1},
{0x2132, 2289, 1},
{0x0548, 1403, 1},
{0x1ef8, 2070, 1},
{0x04f8, 1250, 1},
{0x01f8, 490, 1},
{0x1ff8, 2250, 1},
{0x0220, 381, 1},
{0x1ee2, 2037, 1},
{0x04e2, 1217, 1},
{0x01e2, 462, 1},
{0x1fe2, 36, 3},
{0x2ce2, 2733, 1},
{0x03e2, 857, 1},
{0x051e, 1307, 1},
{0x1ede, 2031, 1},
{0x04de, 1211, 1},
{0x01de, 456, 1},
{0xffffffff, -1, 0},
{0x2cde, 2727, 1},
{0x03de, 851, 1},
{0x00de, 165, 1},
{0x1f69, 2205, 1},
{0x2c69, 2574, 1},
{0x1eda, 2025, 1},
{0x04da, 1205, 1},
{0x0512, 1289, 1},
{0x1fda, 2244, 1},
{0x2cda, 2721, 1},
{0x03da, 845, 1},
{0x00da, 153, 1},
{0xffffffff, -1, 0},
{0x0510, 1286, 1},
{0x1ed8, 2022, 1},
{0x04d8, 1202, 1},
{0xffffffff, -1, 0},
{0x1fd8, 2274, 1},
{0x2cd8, 2718, 1},
{0x03d8, 842, 1},
{0x00d8, 147, 1},
{0x1ed6, 2019, 1},
{0x04d6, 1199, 1},
{0xffffffff, -1, 0},
{0x1fd6, 76, 2},
{0x2cd6, 2715, 1},
{0x03d6, 792, 1},
{0x00d6, 144, 1},
{0x1ec8, 1998, 1},
{0xffffffff, -1, 0},
{0x01c8, 421, 1},
{0x1fc8, 2232, 1},
{0x2cc8, 2694, 1},
{0xff32, 3210, 1},
{0x00c8, 102, 1},
{0x04c7, 1175, 1},
{0x01c7, 421, 1},
{0x1fc7, 15, 3},
{0x1ec0, 1986, 1},
{0x04c0, 1187, 1},
{0x00c7, 99, 1},
{0xffffffff, -1, 0},
{0x2cc0, 2682, 1},
{0x0179, 345, 1},
{0x00c0, 77, 1},
{0x0232, 574, 1},
{0x01b3, 402, 1},
{0x1fb3, 62, 2},
{0xab79, 1475, 1},
{0xa779, 3090, 1},
{0x10c7, 2859, 1},
{0xabb3, 1649, 1},
{0xa7b3, 3156, 1},
{0x1fa5, 234, 2},
{0x10c0, 2841, 1},
{0x03a5, 807, 1},
{0xffffffff, -1, 0},
{0xaba5, 1607, 1},
{0x01b1, 691, 1},
{0x10ca5, 3468, 1},
{0x10b3, 2802, 1},
{0x2169, 2319, 1},
{0x024e, 601, 1},
{0xabb1, 1643, 1},
{0xa7b1, 682, 1},
{0x10cb1, 3504, 1},
{0x10a5, 2760, 1},
{0xffffffff, -1, 0},
{0x01af, 399, 1},
{0x1faf, 244, 2},
{0xffffffff, -1, 0},
{0x0248, 592, 1},
{0x10b1, 2796, 1},
{0xabaf, 1637, 1},
{0x1fad, 234, 2},
{0x10caf, 3498, 1},
{0x04cd, 1184, 1},
{0x01cd, 429, 1},
{0xabad, 1631, 1},
{0xa7ad, 658, 1},
{0x10cad, 3492, 1},
{0x00cd, 117, 1},
{0x10af, 2790, 1},
{0x021e, 547, 1},
{0x1fa3, 224, 2},
{0xffffffff, -1, 0},
{0x03a3, 800, 1},
{0x10ad, 2784, 1},
{0xaba3, 1601, 1},
{0xffffffff, -1, 0},
{0x10ca3, 3462, 1},
{0x10cd, 2862, 1},
{0x1fa1, 214, 2},
{0x24b7, 2346, 1},
{0x03a1, 796, 1},
{0x0212, 529, 1},
{0xaba1, 1595, 1},
{0x10a3, 2754, 1},
{0x10ca1, 3456, 1},
{0x01d3, 438, 1},
{0x1fd3, 25, 3},
{0x0210, 526, 1},
{0xffffffff, -1, 0},
{0x00d3, 135, 1},
{0x1e97, 34, 2},
{0x10a1, 2748, 1},
{0x0197, 649, 1},
{0x1f97, 204, 2},
{0xffffffff, -1, 0},
{0x0397, 759, 1},
{0x1041d, 3324, 1},
{0xab97, 1565, 1},
{0x041d, 929, 1},
{0x10c97, 3426, 1},
{0x1f1d, 2121, 1},
{0x2c1d, 2508, 1},
{0x1e72, 1884, 1},
{0x0472, 1061, 1},
{0x0172, 336, 1},
{0x118b3, 3567, 1},
{0x2c72, 2580, 1},
{0x0372, 712, 1},
{0x1041b, 3318, 1},
{0xab72, 1454, 1},
{0x041b, 923, 1},
{0x118a5, 3525, 1},
{0x1f1b, 2115, 1},
{0x2c1b, 2502, 1},
{0x1e70, 1881, 1},
{0x0470, 1058, 1},
{0x0170, 333, 1},
{0x118b1, 3561, 1},
{0x2c70, 610, 1},
{0x0370, 709, 1},
{0x1e46, 1817, 1},
{0xab70, 1448, 1},
{0x1e66, 1866, 1},
{0x0466, 1043, 1},
{0x0166, 318, 1},
{0x1e44, 1814, 1},
{0x0046, 15, 1},
{0x118af, 3555, 1},
{0xa746, 3027, 1},
{0xffffffff, -1, 0},
{0xa766, 3075, 1},
{0x0044, 9, 1},
{0x118ad, 3549, 1},
{0xa744, 3024, 1},
{0x1e7a, 1896, 1},
{0x047a, 1073, 1},
{0x1e3a, 1799, 1},
{0xffffffff, -1, 0},
{0xa646, 2874, 1},
{0x1f3a, 2154, 1},
{0xa666, 2922, 1},
{0xab7a, 1478, 1},
{0x118a3, 3519, 1},
{0xa644, 2871, 1},
{0xa73a, 3009, 1},
{0xffffffff, -1, 0},
{0x1ef4, 2064, 1},
{0x04f4, 1244, 1},
{0x01f4, 487, 1},
{0x1ff4, 101, 2},
{0x118a1, 3513, 1},
{0x03f4, 762, 1},
{0x1eec, 2052, 1},
{0x04ec, 1232, 1},
{0x01ec, 477, 1},
{0x1fec, 2286, 1},
{0x0546, 1397, 1},
{0x03ec, 872, 1},
{0xffffffff, -1, 0},
{0x013f, 261, 1},
{0x1f3f, 2169, 1},
{0x0544, 1391, 1},
{0x1eea, 2049, 1},
{0x04ea, 1229, 1},
{0x01ea, 474, 1},
{0x1fea, 2256, 1},
{0xffffffff, -1, 0},
{0x03ea, 869, 1},
{0x1ee8, 2046, 1},
{0x04e8, 1226, 1},
{0x01e8, 471, 1},
{0x1fe8, 2280, 1},
{0x053a, 1361, 1},
{0x03e8, 866, 1},
{0x1ee6, 2043, 1},
{0x04e6, 1223, 1},
{0x01e6, 468, 1},
{0x1fe6, 88, 2},
{0x1f4b, 2181, 1},
{0x03e6, 863, 1},
{0x1e5e, 1853, 1},
{0x004b, 27, 1},
{0x015e, 306, 1},
{0x2166, 2310, 1},
{0x1ee4, 2040, 1},
{0x04e4, 1220, 1},
{0x01e4, 465, 1},
{0x1fe4, 80, 2},
{0xa75e, 3063, 1},
{0x03e4, 860, 1},
{0x1ee0, 2034, 1},
{0x04e0, 1214, 1},
{0x01e0, 459, 1},
{0x053f, 1376, 1},
{0x2ce0, 2730, 1},
{0x03e0, 854, 1},
{0x1edc, 2028, 1},
{0x04dc, 1208, 1},
{0xa65e, 2910, 1},
{0xffffffff, -1, 0},
{0x2cdc, 2724, 1},
{0x03dc, 848, 1},
{0x00dc, 159, 1},
{0x1ed0, 2010, 1},
{0x04d0, 1190, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0x2cd0, 2706, 1},
{0x03d0, 742, 1},
{0x00d0, 126, 1},
{0x1ecc, 2004, 1},
{0x054b, 1412, 1},
{0xffffffff, -1, 0},
{0x1fcc, 71, 2},
{0x2ccc, 2700, 1},
{0x1ec6, 1995, 1},
{0x00cc, 114, 1},
{0xffffffff, -1, 0},
{0x1fc6, 67, 2},
{0x2cc6, 2691, 1},
{0x24c8, 2397, 1},
{0x00c6, 96, 1},
{0x04c5, 1172, 1},
{0x01c5, 417, 1},
{0xffffffff, -1, 0},
{0x1fbb, 2229, 1},
{0x24c7, 2394, 1},
{0x00c5, 92, 1},
{0x1fb9, 2271, 1},
{0xabbb, 1673, 1},
{0x24c0, 2373, 1},
{0x04c3, 1169, 1},
{0xabb9, 1667, 1},
{0x1fc3, 71, 2},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0x00c3, 86, 1},
{0x10c5, 2856, 1},
{0x10bb, 2826, 1},
{0x1ed4, 2016, 1},
{0x04d4, 1196, 1},
{0x10b9, 2820, 1},
{0x13fc, 1700, 1},
{0x2cd4, 2712, 1},
{0x0246, 589, 1},
{0x00d4, 138, 1},
{0x10c3, 2850, 1},
{0xffffffff, -1, 0},
{0xff3a, 3234, 1},
{0x0244, 688, 1},
{0x019f, 670, 1},
{0x1f9f, 204, 2},
{0xffffffff, -1, 0},
{0x039f, 789, 1},
{0xffffffff, -1, 0},
{0xab9f, 1589, 1},
{0xffffffff, -1, 0},
{0x10c9f, 3450, 1},
{0x019d, 667, 1},
{0x1f9d, 194, 2},
{0x023a, 2565, 1},
{0x039d, 783, 1},
{0x1e5a, 1847, 1},
{0xab9d, 1583, 1},
{0x015a, 300, 1},
{0x10c9d, 3444, 1},
{0x1e9b, 1856, 1},
{0x24cd, 2412, 1},
{0x005a, 74, 1},
{0x1f9b, 184, 2},
{0xa75a, 3057, 1},
{0x039b, 776, 1},
{0x1ece, 2007, 1},
{0xab9b, 1577, 1},
{0x1e99, 42, 2},
{0x10c9b, 3438, 1},
{0x2cce, 2703, 1},
{0x1f99, 174, 2},
{0x00ce, 120, 1},
{0x0399, 767, 1},
{0xa65a, 2904, 1},
{0xab99, 1571, 1},
{0xffffffff, -1, 0},
{0x10c99, 3432, 1},
{0x0193, 634, 1},
{0x1f93, 184, 2},
{0x1e58, 1844, 1},
{0x0393, 746, 1},
{0x0158, 297, 1},
{0xab93, 1553, 1},
{0xffffffff, -1, 0},
{0x10c93, 3414, 1},
{0x0058, 68, 1},
{0x042d, 977, 1},
{0xa758, 3054, 1},
{0x1f2d, 2139, 1},
{0x2c2d, 2556, 1},
{0x118bb, 3591, 1},
{0x0191, 369, 1},
{0x1f91, 174, 2},
{0x118b9, 3585, 1},
{0x0391, 739, 1},
{0xffffffff, -1, 0},
{0xab91, 1547, 1},
{0xa658, 2901, 1},
{0x10c91, 3408, 1},
{0x018f, 625, 1},
{0x1f8f, 164, 2},
{0xffffffff, -1, 0},
{0x038f, 836, 1},
{0xffffffff, -1, 0},
{0xab8f, 1541, 1},
{0xffffffff, -1, 0},
{0x10c8f, 3402, 1},
{0x018b, 366, 1},
{0x1f8b, 144, 2},
{0xffffffff, -1, 0},
{0x0187, 363, 1},
{0x1f87, 164, 2},
{0xab8b, 1529, 1},
{0xa78b, 3111, 1},
{0x10c8b, 3390, 1},
{0xab87, 1517, 1},
{0x04c1, 1166, 1},
{0x10c87, 3378, 1},
{0x1e7e, 1902, 1},
{0x047e, 1079, 1},
{0xffffffff, -1, 0},
{0x00c1, 80, 1},
{0x2c7e, 580, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xab7e, 1490, 1},
{0xa77e, 3096, 1},
{0x1e76, 1890, 1},
{0x0476, 1067, 1},
{0x0176, 342, 1},
{0x1e42, 1811, 1},
{0x10c1, 2844, 1},
{0x0376, 715, 1},
{0x1e36, 1793, 1},
{0xab76, 1466, 1},
{0x0136, 249, 1},
{0x0042, 3, 1},
{0x1e3e, 1805, 1},
{0xa742, 3021, 1},
{0x1e38, 1796, 1},
{0x1f3e, 2166, 1},
{0xa736, 3003, 1},
{0x1f38, 2148, 1},
{0xffffffff, -1, 0},
{0x0587, 105, 2},
{0xa73e, 3015, 1},
{0xffffffff, -1, 0},
{0xa738, 3006, 1},
{0xa642, 2868, 1},
{0x1e5c, 1850, 1},
{0x1e34, 1790, 1},
{0x015c, 303, 1},
{0x0134, 246, 1},
{0x1ef6, 2067, 1},
{0x04f6, 1247, 1},
{0x01f6, 372, 1},
{0x1ff6, 92, 2},
{0xa75c, 3060, 1},
{0xa734, 3000, 1},
{0x1ef0, 2058, 1},
{0x04f0, 1238, 1},
{0x01f0, 20, 2},
{0xffffffff, -1, 0},
{0x1e30, 1784, 1},
{0x03f0, 772, 1},
{0x0130, 261, 2},
{0x0542, 1385, 1},
{0xa65c, 2907, 1},
{0x1f83, 144, 2},
{0x0536, 1349, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xab83, 1505, 1},
{0x053e, 1373, 1},
{0x10c83, 3366, 1},
{0x0538, 1355, 1},
{0x1eee, 2055, 1},
{0x04ee, 1235, 1},
{0x01ee, 480, 1},
{0x1f8d, 154, 2},
{0xffffffff, -1, 0},
{0x03ee, 875, 1},
{0xffffffff, -1, 0},
{0xab8d, 1535, 1},
{0xa78d, 643, 1},
{0x10c8d, 3396, 1},
{0x0534, 1343, 1},
{0x0181, 613, 1},
{0x1f81, 134, 2},
{0x013d, 258, 1},
{0x1f3d, 2163, 1},
{0xffffffff, -1, 0},
{0xab81, 1499, 1},
{0x017f, 52, 1},
{0x10c81, 3360, 1},
{0x2c7f, 583, 1},
{0x037f, 881, 1},
{0xff2d, 3195, 1},
{0xab7f, 1493, 1},
{0x1e74, 1887, 1},
{0x0474, 1064, 1},
{0x0174, 339, 1},
{0x1e3c, 1802, 1},
{0x0149, 46, 2},
{0x1f49, 2175, 1},
{0x1f3c, 2160, 1},
{0xab74, 1460, 1},
{0x0049, 3606, 1},
{0x0143, 267, 1},
{0x24cc, 2409, 1},
{0xa73c, 3012, 1},
{0xffffffff, -1, 0},
{0x0043, 6, 1},
{0x0141, 264, 1},
{0x24c6, 2391, 1},
{0x013b, 255, 1},
{0x1f3b, 2157, 1},
{0x0041, 0, 1},
{0x0139, 252, 1},
{0x1f39, 2151, 1},
{0x24c5, 2388, 1},
{0x24bb, 2358, 1},
{0x13fa, 1694, 1},
{0x053d, 1370, 1},
{0x24b9, 2352, 1},
{0x0429, 965, 1},
{0x2183, 2340, 1},
{0x1f29, 2127, 1},
{0x2c29, 2544, 1},
{0x24c3, 2382, 1},
{0x10427, 3354, 1},
{0x10425, 3348, 1},
{0x0427, 959, 1},
{0x0425, 953, 1},
{0xffffffff, -1, 0},
{0x2c27, 2538, 1},
{0x2c25, 2532, 1},
{0x0549, 1406, 1},
{0x053c, 1367, 1},
{0x10423, 3342, 1},
{0xffffffff, -1, 0},
{0x0423, 947, 1},
{0x0543, 1388, 1},
{0xffffffff, -1, 0},
{0x2c23, 2526, 1},
{0xff36, 3222, 1},
{0xffffffff, -1, 0},
{0x0541, 1382, 1},
{0x10421, 3336, 1},
{0x053b, 1364, 1},
{0x0421, 941, 1},
{0xff38, 3228, 1},
{0x0539, 1358, 1},
{0x2c21, 2520, 1},
{0x10419, 3312, 1},
{0x10417, 3306, 1},
{0x0419, 917, 1},
{0x0417, 911, 1},
{0x1f19, 2109, 1},
{0x2c19, 2496, 1},
{0x2c17, 2490, 1},
{0x023e, 2568, 1},
{0xff34, 3216, 1},
{0x10415, 3300, 1},
{0x10413, 3294, 1},
{0x0415, 905, 1},
{0x0413, 899, 1},
{0xffffffff, -1, 0},
{0x2c15, 2484, 1},
{0x2c13, 2478, 1},
{0xffffffff, -1, 0},
{0x24ce, 2415, 1},
{0x1040f, 3282, 1},
{0xffffffff, -1, 0},
{0x040f, 1031, 1},
{0xff30, 3204, 1},
{0x1f0f, 2103, 1},
{0x2c0f, 2466, 1},
{0x1040d, 3276, 1},
{0xffffffff, -1, 0},
{0x040d, 1025, 1},
{0x0147, 273, 1},
{0x1f0d, 2097, 1},
{0x2c0d, 2460, 1},
{0x1040b, 3270, 1},
{0x0047, 18, 1},
{0x040b, 1019, 1},
{0x0230, 571, 1},
{0x1f0b, 2091, 1},
{0x2c0b, 2454, 1},
{0x10409, 3264, 1},
{0x10405, 3252, 1},
{0x0409, 1013, 1},
{0x0405, 1001, 1},
{0x1f09, 2085, 1},
{0x2c09, 2448, 1},
{0x2c05, 2436, 1},
{0x10403, 3246, 1},
{0x10401, 3240, 1},
{0x0403, 995, 1},
{0x0401, 989, 1},
{0xffffffff, -1, 0},
{0x2c03, 2430, 1},
{0x2c01, 2424, 1},
{0x13f9, 1691, 1},
{0x042f, 983, 1},
{0xffffffff, -1, 0},
{0x1f2f, 2145, 1},
{0x1041f, 3330, 1},
{0xffffffff, -1, 0},
{0x041f, 935, 1},
{0x023d, 378, 1},
{0x10411, 3288, 1},
{0x2c1f, 2514, 1},
{0x0411, 893, 1},
{0x0547, 1400, 1},
{0xffffffff, -1, 0},
{0x2c11, 2472, 1},
{0x10407, 3258, 1},
{0xffffffff, -1, 0},
{0x0407, 1007, 1},
{0x24c1, 2376, 1},
{0xffffffff, -1, 0},
{0x2c07, 2442, 1},
{0xffffffff, -1, 0},
{0x13f8, 1688, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xff39, 3231, 1},
{0xffffffff, -1, 0},
{0x0243, 354, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0x0241, 586, 1},
{0xff29, 3183, 1},
{0x023b, 577, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xff27, 3177, 1},
{0xff25, 3171, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xff23, 3165, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xff21, 3159, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0},
{0xfb17, 117, 2},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xff2f, 3201, 1},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xfb15, 113, 2},
{0xfb13, 121, 2},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xffffffff, -1, 0},
{0xfb05, 29, 2},
{0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0}, {0xffffffff, -1, 0},
{0xfb03, 0, 3},
{0xfb01, 8, 2}
};
if (0 == 0)
{
int key = hash(&code);
if (key <= MAX_HASH_VALUE && key >= 0)
{
OnigCodePoint gcode = wordlist[key].code;
if (code == gcode)
return &wordlist[key];
}
}
return 0;
} ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx"
[/INST] 'cwe': ['CWE-787'], 'message': 'fix #56 : return invalid result for codepoint 0xFFFFFFFF'</s> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.