Commit f73ca736 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging



virtio, vhost, pc fixes for 2.4

The only notable thing here is vhost-user multiqueue
revert. We'll work on making it stable in 2.5,
reverting now means we won't have to maintain
bug for bug compability forever.

Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Mon Jul 20 12:24:00 2015 BST using RSA key ID D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"

* remotes/mst/tags/for_upstream:
  virtio-net: remove virtio queues if the guest doesn't support multiqueue
  virtio-net: Flush incoming queues when DRIVER_OK is being set
  pci_add_capability: remove duplicate comments
  virtio-net: unbreak any layout
  Revert "vhost-user: add multi queue support"
  ich9: fix skipped vmstate_memhp_state subsection

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 71358470 f9d6dbf0
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -127,11 +127,6 @@ in the ancillary data:
If Master is unable to send the full message or receives a wrong reply it will
close the connection. An optional reconnection mechanism can be implemented.

Multi queue support
-------------------
The protocol supports multiple queues by setting all index fields in the sent
messages to a properly calculated value.

Message types
-------------

+0 −3
Original line number Diff line number Diff line
@@ -206,9 +206,6 @@ const VMStateDescription vmstate_ich9_pm = {
    },
    .subsections = (const VMStateDescription*[]) {
        &vmstate_memhp_state,
        NULL
    },
    .subsections = (const VMStateDescription*[]) {
        &vmstate_tco_io_state,
        NULL
    }
+1 −2
Original line number Diff line number Diff line
@@ -160,7 +160,6 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)

    net->dev.nvqs = 2;
    net->dev.vqs = net->vqs;
    net->dev.vq_index = net->nc->queue_index;

    r = vhost_dev_init(&net->dev, options->opaque,
                       options->backend_type);
@@ -287,7 +286,7 @@ static void vhost_net_stop_one(struct vhost_net *net,
        for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
            const VhostOps *vhost_ops = net->dev.vhost_ops;
            int r = vhost_ops->vhost_call(&net->dev, VHOST_RESET_OWNER,
                                          &file);
                                          NULL);
            assert(r >= 0);
        }
    }
+109 −34
Original line number Diff line number Diff line
@@ -162,6 +162,8 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
    virtio_net_vhost_status(n, status);

    for (i = 0; i < n->max_queues; i++) {
        NetClientState *ncs = qemu_get_subqueue(n->nic, i);
        bool queue_started;
        q = &n->vqs[i];

        if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
@@ -169,12 +171,18 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
        } else {
            queue_status = status;
        }
        queue_started =
            virtio_net_started(n, queue_status) && !n->vhost_started;

        if (queue_started) {
            qemu_flush_queued_packets(ncs);
        }

        if (!q->tx_waiting) {
            continue;
        }

        if (virtio_net_started(n, queue_status) && !n->vhost_started) {
        if (queue_started) {
            if (q->tx_timer) {
                timer_mod(q->tx_timer,
                               qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
@@ -1142,7 +1150,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
        ssize_t ret, len;
        unsigned int out_num = elem.out_num;
        struct iovec *out_sg = &elem.out_sg[0];
        struct iovec sg[VIRTQUEUE_MAX_SIZE];
        struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1];
        struct virtio_net_hdr_mrg_rxbuf mhdr;

        if (out_num < 1) {
            error_report("virtio-net header not in first element");
@@ -1150,13 +1159,25 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
        }

        if (n->has_vnet_hdr) {
            if (out_sg[0].iov_len < n->guest_hdr_len) {
            if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
                n->guest_hdr_len) {
                error_report("virtio-net header incorrect");
                exit(1);
            }
            virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base);
            if (virtio_needs_swap(vdev)) {
                virtio_net_hdr_swap(vdev, (void *) &mhdr);
                sg2[0].iov_base = &mhdr;
                sg2[0].iov_len = n->guest_hdr_len;
                out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
                                   out_sg, out_num,
                                   n->guest_hdr_len, -1);
                if (out_num == VIRTQUEUE_MAX_SIZE) {
                    goto drop;
		}
                out_num += 1;
                out_sg = sg2;
	    }
        }

        /*
         * If host wants to see the guest header as is, we can
         * pass it on unchanged. Otherwise, copy just the parts
@@ -1186,7 +1207,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
        }

        len += ret;

drop:
        virtqueue_push(q->tx_vq, &elem, 0);
        virtio_notify(vdev, q->tx_vq);

@@ -1306,9 +1327,86 @@ static void virtio_net_tx_bh(void *opaque)
    }
}

static void virtio_net_add_queue(VirtIONet *n, int index)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);

    n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
    if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
        n->vqs[index].tx_vq =
            virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
        n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                              virtio_net_tx_timer,
                                              &n->vqs[index]);
    } else {
        n->vqs[index].tx_vq =
            virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
        n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
    }

    n->vqs[index].tx_waiting = 0;
    n->vqs[index].n = n;
}

static void virtio_net_del_queue(VirtIONet *n, int index)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    VirtIONetQueue *q = &n->vqs[index];
    NetClientState *nc = qemu_get_subqueue(n->nic, index);

    qemu_purge_queued_packets(nc);

    virtio_del_queue(vdev, index * 2);
    if (q->tx_timer) {
        timer_del(q->tx_timer);
        timer_free(q->tx_timer);
    } else {
        qemu_bh_delete(q->tx_bh);
    }
    virtio_del_queue(vdev, index * 2 + 1);
}

static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(n);
    int old_num_queues = virtio_get_num_queues(vdev);
    int new_num_queues = new_max_queues * 2 + 1;
    int i;

    assert(old_num_queues >= 3);
    assert(old_num_queues % 2 == 1);

    if (old_num_queues == new_num_queues) {
        return;
    }

    /*
     * We always need to remove and add ctrl vq if
     * old_num_queues != new_num_queues. Remove ctrl_vq first,
     * and then we only enter one of the following too loops.
     */
    virtio_del_queue(vdev, old_num_queues - 1);

    for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
        /* new_num_queues < old_num_queues */
        virtio_net_del_queue(n, i / 2);
    }

    for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
        /* new_num_queues > old_num_queues */
        virtio_net_add_queue(n, i / 2);
    }

    /* add ctrl_vq last */
    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
}

static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
{
    int max = multiqueue ? n->max_queues : 1;

    n->multiqueue = multiqueue;
    virtio_net_change_num_queues(n, max);

    virtio_net_set_queues(n);
}
@@ -1583,21 +1681,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
    }

    for (i = 0; i < n->max_queues; i++) {
        n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
        if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
            n->vqs[i].tx_vq =
                virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
            n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                              virtio_net_tx_timer,
                                              &n->vqs[i]);
        } else {
            n->vqs[i].tx_vq =
                virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
            n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
        }

        n->vqs[i].tx_waiting = 0;
        n->vqs[i].n = n;
        virtio_net_add_queue(n, i);
    }

    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
@@ -1651,7 +1735,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
{
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
    VirtIONet *n = VIRTIO_NET(dev);
    int i;
    int i, max_queues;

    /* This will stop vhost backend if appropriate. */
    virtio_net_set_status(vdev, 0);
@@ -1666,18 +1750,9 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
    g_free(n->mac_table.macs);
    g_free(n->vlans);

    for (i = 0; i < n->max_queues; i++) {
        VirtIONetQueue *q = &n->vqs[i];
        NetClientState *nc = qemu_get_subqueue(n->nic, i);

        qemu_purge_queued_packets(nc);

        if (q->tx_timer) {
            timer_del(q->tx_timer);
            timer_free(q->tx_timer);
        } else if (q->tx_bh) {
            qemu_bh_delete(q->tx_bh);
        }
    max_queues = n->multiqueue ? n->max_queues : 1;
    for (i = 0; i < max_queues; i++) {
        virtio_net_del_queue(n, i);
    }

    timer_del(n->announce_timer);
+2 −4
Original line number Diff line number Diff line
@@ -2101,12 +2101,10 @@ static void pci_del_option_rom(PCIDevice *pdev)
}

/*
 * if !offset
 * Reserve space and add capability to the linked list in pci config space
 *
 * if offset = 0,
 * Find and reserve space and add capability to the linked list
 * in pci config space */
 * in pci config space
 */
int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
                       uint8_t offset, uint8_t size)
{
Loading