Commit f1f9e6c5 authored by Greg Kurz's avatar Greg Kurz Committed by Michael S. Tsirkin
Browse files

vhost: adapt vhost_verify_ring_mappings() to virtio 1 ring layout



With virtio 1, the vring layout is split in 3 separate regions of
contiguous memory for the descriptor table, the available ring and the
used ring, as opposed with legacy virtio which uses a single region.

In case of memory re-mapping, the code ensures it doesn't affect the
vring mapping. This is done in vhost_verify_ring_mappings() which assumes
the device is legacy.

This patch changes vhost_verify_ring_mappings() to check the mappings of
each part of the vring separately.

This works for legacy mappings as well.

Cc: qemu-stable@nongnu.org
Signed-off-by: default avatarGreg Kurz <groug@kaod.org>
Reviewed-by: default avatarCornelia Huck <cornelia.huck@de.ibm.com>
Reviewed-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent cb88ebd7
Loading
Loading
Loading
Loading
+60 −19
Original line number Diff line number Diff line
@@ -421,32 +421,73 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
    dev->log_size = size;
}

static int vhost_verify_ring_mappings(struct vhost_dev *dev,

static int vhost_verify_ring_part_mapping(void *part,
                                          uint64_t part_addr,
                                          uint64_t part_size,
                                          uint64_t start_addr,
                                          uint64_t size)
{
    int i;
    int r = 0;

    for (i = 0; !r && i < dev->nvqs; ++i) {
        struct vhost_virtqueue *vq = dev->vqs + i;
    hwaddr l;
    void *p;
    int r = 0;

        if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
            continue;
    if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
        return 0;
    }
        l = vq->ring_size;
        p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
        if (!p || l != vq->ring_size) {
            error_report("Unable to map ring buffer for ring %d", i);
    l = part_size;
    p = cpu_physical_memory_map(part_addr, &l, 1);
    if (!p || l != part_size) {
        r = -ENOMEM;
    }
        if (p != vq->ring) {
            error_report("Ring buffer relocated for ring %d", i);
    if (p != part) {
        r = -EBUSY;
    }
    cpu_physical_memory_unmap(p, l, 0, 0);
    return r;
}

static int vhost_verify_ring_mappings(struct vhost_dev *dev,
                                      uint64_t start_addr,
                                      uint64_t size)
{
    int i, j;
    int r = 0;
    const char *part_name[] = {
        "descriptor table",
        "available ring",
        "used ring"
    };

    for (i = 0; i < dev->nvqs; ++i) {
        struct vhost_virtqueue *vq = dev->vqs + i;

        j = 0;
        r = vhost_verify_ring_part_mapping(vq->desc, vq->desc_phys,
                                           vq->desc_size, start_addr, size);
        if (!r) {
            break;
        }

        j++;
        r = vhost_verify_ring_part_mapping(vq->avail, vq->avail_phys,
                                           vq->avail_size, start_addr, size);
        if (!r) {
            break;
        }

        j++;
        r = vhost_verify_ring_part_mapping(vq->used, vq->used_phys,
                                           vq->used_size, start_addr, size);
        if (!r) {
            break;
        }
    }

    if (r == -ENOMEM) {
        error_report("Unable to map %s for ring %d", part_name[j], i);
    } else if (r == -EBUSY) {
        error_report("%s relocated for ring %d", part_name[j], i);
    }
    return r;
}
@@ -860,15 +901,15 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
        }
    }

    s = l = virtio_queue_get_desc_size(vdev, idx);
    a = virtio_queue_get_desc_addr(vdev, idx);
    vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
    vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
    vq->desc = cpu_physical_memory_map(a, &l, 0);
    if (!vq->desc || l != s) {
        r = -ENOMEM;
        goto fail_alloc_desc;
    }
    s = l = virtio_queue_get_avail_size(vdev, idx);
    a = virtio_queue_get_avail_addr(vdev, idx);
    vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
    vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
    vq->avail = cpu_physical_memory_map(a, &l, 0);
    if (!vq->avail || l != s) {
        r = -ENOMEM;
+4 −0
Original line number Diff line number Diff line
@@ -14,6 +14,10 @@ struct vhost_virtqueue {
    void *avail;
    void *used;
    int num;
    unsigned long long desc_phys;
    unsigned desc_size;
    unsigned long long avail_phys;
    unsigned avail_size;
    unsigned long long used_phys;
    unsigned used_size;
    void *ring;