]> git.baikalelectronics.ru Git - kernel.git/commitdiff
vhost: fix OOB in get_rx_bufs()
authorJason Wang <jasowang@redhat.com>
Mon, 28 Jan 2019 07:05:05 +0000 (15:05 +0800)
committerDavid S. Miller <davem@davemloft.net>
Tue, 29 Jan 2019 06:53:09 +0000 (22:53 -0800)
After batched used ring updating was introduced in commit c5c6fa6154ba
("vhost_net: batch used ring update in rx"). We tend to batch heads in
vq->heads for more than one packet. But the quota passed to
get_rx_bufs() was not correctly limited, which can result a OOB write
in vq->heads.

        headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
                    vhost_len, &in, vq_log, &log,
                    likely(mergeable) ? UIO_MAXIOV : 1);

UIO_MAXIOV was still used which is wrong since we could have batched
used in vq->heads, this will cause OOB if the next buffer needs more
than 960 (1024 (UIO_MAXIOV) - 64 (VHOST_NET_BATCH)) heads after we've
batched 64 (VHOST_NET_BATCH) heads:
Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
=============================================================================
BUG kmalloc-8k (Tainted: G    B            ): Redzone overwritten
-----------------------------------------------------------------------------

INFO: 0x00000000fd93b7a2-0x00000000f0713384. First byte 0xa9 instead of 0xcc
INFO: Allocated in alloc_pd+0x22/0x60 age=3933677 cpu=2 pid=2674
    kmem_cache_alloc_trace+0xbb/0x140
    alloc_pd+0x22/0x60
    gen8_ppgtt_create+0x11d/0x5f0
    i915_ppgtt_create+0x16/0x80
    i915_gem_create_context+0x248/0x390
    i915_gem_context_create_ioctl+0x4b/0xe0
    drm_ioctl_kernel+0xa5/0xf0
    drm_ioctl+0x2ed/0x3a0
    do_vfs_ioctl+0x9f/0x620
    ksys_ioctl+0x6b/0x80
    __x64_sys_ioctl+0x11/0x20
    do_syscall_64+0x43/0xf0
    entry_SYSCALL_64_after_hwframe+0x44/0xa9
INFO: Slab 0x00000000d13e87af objects=3 used=3 fp=0x          (null) flags=0x200000000010201
INFO: Object 0x0000000003278802 @offset=17064 fp=0x00000000e2e6652b

Fixing this by allocating UIO_MAXIOV + VHOST_NET_BATCH iovs for
vhost-net. This is done through set the limitation through
vhost_dev_init(), then set_owner can allocate the number of iov in a
per device manner.

This fixes CVE-2018-16880.

Fixes: c5c6fa6154ba ("vhost_net: batch used ring update in rx")
Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/vhost/vsock.c

index bca86bf7189fa7a14d6e1133c3623cf70869df2c..df51a35cf537c96caf570d0ce06dbbc004b61af1 100644 (file)
@@ -1337,7 +1337,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
                n->vqs[i].rx_ring = NULL;
                vhost_net_buf_init(&n->vqs[i].rxq);
        }
-       vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
+       vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
+                      UIO_MAXIOV + VHOST_NET_BATCH);
 
        vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
        vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
index 344684f3e2e4a85836d021f2abe0c66d30fd2160..23593cb23dd0229abdbbef2c7178d3ba487b383a 100644 (file)
@@ -1627,7 +1627,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
                vqs[i] = &vs->vqs[i].vq;
                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
        }
-       vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
+       vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
 
        vhost_scsi_init_inflight(vs, NULL);
 
index 15a216cdd507772bc8bbb2539755250aa8001fce..24a129fcdd61b38bd8bb5045d22dd4ca0884a59f 100644 (file)
@@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
                vq->indirect = kmalloc_array(UIO_MAXIOV,
                                             sizeof(*vq->indirect),
                                             GFP_KERNEL);
-               vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
+               vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
                                        GFP_KERNEL);
-               vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
+               vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
                                          GFP_KERNEL);
                if (!vq->indirect || !vq->log || !vq->heads)
                        goto err_nomem;
@@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 }
 
 void vhost_dev_init(struct vhost_dev *dev,
-                   struct vhost_virtqueue **vqs, int nvqs)
+                   struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
 {
        struct vhost_virtqueue *vq;
        int i;
@@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev,
        dev->iotlb = NULL;
        dev->mm = NULL;
        dev->worker = NULL;
+       dev->iov_limit = iov_limit;
        init_llist_head(&dev->work_list);
        init_waitqueue_head(&dev->wait);
        INIT_LIST_HEAD(&dev->read_list);
index 1b675dad5e0583607253db48c0bfe95d2a41ba7c..9490e7ddb3404891515908cb8e67d74563640ebd 100644 (file)
@@ -170,9 +170,11 @@ struct vhost_dev {
        struct list_head read_list;
        struct list_head pending_list;
        wait_queue_head_t wait;
+       int iov_limit;
 };
 
-void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
+                   int nvqs, int iov_limit);
 long vhost_dev_set_owner(struct vhost_dev *dev);
 bool vhost_dev_has_owner(struct vhost_dev *dev);
 long vhost_dev_check_owner(struct vhost_dev *);
index 3fbc068eaa9b6c7745562a6a48560dc44104fd6f..bb5fc0e9fbc2d7cbb9948afa207453694f092877 100644 (file)
@@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
        vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
        vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
 
-       vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
+       vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
 
        file->private_data = vsock;
        spin_lock_init(&vsock->send_pkt_list_lock);