Commit c35d17ca authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging



virtio, pc, pci: features, fixes, cleanups

virtio-pmem support.
libvhost user mq support.
A bunch of fixes all over the place.

Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Thu 04 Jul 2019 22:00:49 BST
# gpg:                using RSA key 281F0DB8D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* remotes/mst/tags/for_upstream: (22 commits)
  docs: avoid vhost-user-net specifics in multiqueue section
  libvhost-user: implement VHOST_USER_PROTOCOL_F_MQ
  libvhost-user: support many virtqueues
  libvhost-user: add vmsg_set_reply_u64() helper
  pc: Move compat_apic_id_mode variable to PCMachineClass
  virtio: Don't change "started" flag on virtio_vmstate_change()
  virtio: Make sure we get correct state of device on handle_aio_output()
  virtio: Set "start_on_kick" on virtio_set_features()
  virtio: Set "start_on_kick" for legacy devices
  virtio: add "use-started" property
  virtio-pci: fix missing device properties
  pc: Support for virtio-pmem-pci
  numa: Handle virtio-pmem in NUMA stats
  hmp: Handle virtio-pmem when printing memory device infos
  virtio-pci: Proxy for virtio-pmem
  virtio-pmem: sync linux headers
  virtio-pci: Allow to specify additional interfaces for the base type
  virtio-pmem: add virtio device
  pcie: minor cleanups for slot control/status
  pcie: work around for racy guest init
  ...

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 57dfc2c4 3ef4dff2
Loading
Loading
Loading
Loading
+9 −3
Original line number Diff line number Diff line
@@ -131,18 +131,24 @@ static void vug_watch(VuDev *dev, int condition, void *data)
    }
}

void
vug_init(VugDev *dev, int socket,
bool
vug_init(VugDev *dev, uint16_t max_queues, int socket,
         vu_panic_cb panic, const VuDevIface *iface)
{
    g_assert(dev);
    g_assert(iface);

    vu_init(&dev->parent, socket, panic, set_watch, remove_watch, iface);
    if (!vu_init(&dev->parent, max_queues, socket, panic, set_watch,
                 remove_watch, iface)) {
        return false;
    }

    dev->fdmap = g_hash_table_new_full(NULL, NULL, NULL,
                                       (GDestroyNotify) g_source_destroy);

    dev->src = vug_source_new(dev, socket, G_IO_IN, vug_watch, NULL);

    return true;
}

void
+1 −1
Original line number Diff line number Diff line
@@ -25,7 +25,7 @@ typedef struct VugDev {
    GSource *src;
} VugDev;

void vug_init(VugDev *dev, int socket,
bool vug_init(VugDev *dev, uint16_t max_queues, int socket,
              vu_panic_cb panic, const VuDevIface *iface);
void vug_deinit(VugDev *dev);

+41 −25
Original line number Diff line number Diff line
@@ -216,6 +216,15 @@ vmsg_close_fds(VhostUserMsg *vmsg)
    }
}

/* Set reply payload.u64 and clear request flags and fd_num */
static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val)
{
    vmsg->flags = 0; /* defaults will be set by vu_send_reply() */
    vmsg->size = sizeof(vmsg->payload.u64);
    vmsg->payload.u64 = val;
    vmsg->fd_num = 0;
}

/* A test to see if we have userfault available */
static bool
have_userfault(void)
@@ -484,9 +493,9 @@ vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
static void
vu_set_enable_all_rings(VuDev *dev, bool enabled)
{
    int i;
    uint16_t i;

    for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
    for (i = 0; i < dev->max_queues; i++) {
        dev->vq[i].enable = enabled;
    }
}
@@ -907,7 +916,7 @@ vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
{
    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;

    if (index >= VHOST_MAX_NR_VIRTQUEUE) {
    if (index >= dev->max_queues) {
        vmsg_close_fds(vmsg);
        vu_panic(dev, "Invalid queue index: %u", index);
        return false;
@@ -1151,7 +1160,8 @@ vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg)
static bool
vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
{
    uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
    uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ |
                        1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
                        1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ |
                        1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
                        1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD;
@@ -1168,10 +1178,7 @@ vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
        features |= dev->iface->get_protocol_features(dev);
    }

    vmsg->payload.u64 = features;
    vmsg->size = sizeof(vmsg->payload.u64);
    vmsg->fd_num = 0;

    vmsg_set_reply_u64(vmsg, features);
    return true;
}

@@ -1194,8 +1201,8 @@ vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
static bool
vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg)
{
    DPRINT("Function %s() not implemented yet.\n", __func__);
    return false;
    vmsg_set_reply_u64(vmsg, dev->max_queues);
    return true;
}

static bool
@@ -1207,7 +1214,7 @@ vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
    DPRINT("State.index: %d\n", index);
    DPRINT("State.enable:   %d\n", enable);

    if (index >= VHOST_MAX_NR_VIRTQUEUE) {
    if (index >= dev->max_queues) {
        vu_panic(dev, "Invalid vring_enable index: %u", index);
        return false;
    }
@@ -1307,17 +1314,14 @@ out:
static bool
vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg)
{
    vmsg->payload.u64 = -1;
    vmsg->size = sizeof(vmsg->payload.u64);

    if (dev->nregions) {
        vu_panic(dev, "Regions already registered at postcopy-listen");
        vmsg_set_reply_u64(vmsg, -1);
        return true;
    }
    dev->postcopy_listening = true;

    vmsg->flags = VHOST_USER_VERSION |  VHOST_USER_REPLY_MASK;
    vmsg->payload.u64 = 0; /* Success */
    vmsg_set_reply_u64(vmsg, 0);
    return true;
}

@@ -1332,10 +1336,7 @@ vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg)
        DPRINT("%s: Done close\n", __func__);
    }

    vmsg->fd_num = 0;
    vmsg->payload.u64 = 0;
    vmsg->size = sizeof(vmsg->payload.u64);
    vmsg->flags = VHOST_USER_VERSION |  VHOST_USER_REPLY_MASK;
    vmsg_set_reply_u64(vmsg, 0);
    DPRINT("%s: exit\n", __func__);
    return true;
}
@@ -1582,7 +1583,7 @@ vu_deinit(VuDev *dev)
    }
    dev->nregions = 0;

    for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
    for (i = 0; i < dev->max_queues; i++) {
        VuVirtq *vq = &dev->vq[i];

        if (vq->call_fd != -1) {
@@ -1627,18 +1628,23 @@ vu_deinit(VuDev *dev)
    if (dev->sock != -1) {
        close(dev->sock);
    }

    free(dev->vq);
    dev->vq = NULL;
}

void
bool
vu_init(VuDev *dev,
        uint16_t max_queues,
        int socket,
        vu_panic_cb panic,
        vu_set_watch_cb set_watch,
        vu_remove_watch_cb remove_watch,
        const VuDevIface *iface)
{
    int i;
    uint16_t i;

    assert(max_queues > 0);
    assert(socket >= 0);
    assert(set_watch);
    assert(remove_watch);
@@ -1654,18 +1660,28 @@ vu_init(VuDev *dev,
    dev->iface = iface;
    dev->log_call_fd = -1;
    dev->slave_fd = -1;
    for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
    dev->max_queues = max_queues;

    dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
    if (!dev->vq) {
        DPRINT("%s: failed to malloc virtqueues\n", __func__);
        return false;
    }

    for (i = 0; i < max_queues; i++) {
        dev->vq[i] = (VuVirtq) {
            .call_fd = -1, .kick_fd = -1, .err_fd = -1,
            .notification = true,
        };
    }

    return true;
}

VuVirtq *
vu_get_queue(VuDev *dev, int qidx)
{
    assert(qidx < VHOST_MAX_NR_VIRTQUEUE);
    assert(qidx < dev->max_queues);
    return &dev->vq[qidx];
}

+7 −3
Original line number Diff line number Diff line
@@ -25,7 +25,6 @@
#define VHOST_USER_F_PROTOCOL_FEATURES 30
#define VHOST_LOG_PAGE 4096

#define VHOST_MAX_NR_VIRTQUEUE 8
#define VIRTQUEUE_MAX_SIZE 1024

#define VHOST_MEMORY_MAX_NREGIONS 8
@@ -353,7 +352,7 @@ struct VuDev {
    int sock;
    uint32_t nregions;
    VuDevRegion regions[VHOST_MEMORY_MAX_NREGIONS];
    VuVirtq vq[VHOST_MAX_NR_VIRTQUEUE];
    VuVirtq *vq;
    VuDevInflightInfo inflight_info;
    int log_call_fd;
    int slave_fd;
@@ -362,6 +361,7 @@ struct VuDev {
    uint64_t features;
    uint64_t protocol_features;
    bool broken;
    uint16_t max_queues;

    /* @set_watch: add or update the given fd to the watch set,
     * call cb when condition is met */
@@ -391,6 +391,7 @@ typedef struct VuVirtqElement {
/**
 * vu_init:
 * @dev: a VuDev context
 * @max_queues: maximum number of virtqueues
 * @socket: the socket connected to vhost-user master
 * @panic: a panic callback
 * @set_watch: a set_watch callback
@@ -398,8 +399,11 @@ typedef struct VuVirtqElement {
 * @iface: a VuDevIface structure with vhost-user device callbacks
 *
 * Intializes a VuDev vhost-user context.
 *
 * Returns: true on success, false on failure.
 **/
void vu_init(VuDev *dev,
bool vu_init(VuDev *dev,
             uint16_t max_queues,
             int socket,
             vu_panic_cb panic,
             vu_set_watch_cb set_watch,
+9 −7
Original line number Diff line number Diff line
@@ -25,6 +25,10 @@
#include <sys/ioctl.h>
#endif

enum {
    VHOST_USER_BLK_MAX_QUEUES = 8,
};

struct virtio_blk_inhdr {
    unsigned char status;
};
@@ -334,12 +338,6 @@ static void vub_process_vq(VuDev *vu_dev, int idx)
    VuVirtq *vq;
    int ret;

    if ((idx < 0) || (idx >= VHOST_MAX_NR_VIRTQUEUE)) {
        fprintf(stderr, "VQ Index out of range: %d\n", idx);
        vub_panic_cb(vu_dev, NULL);
        return;
    }

    gdev = container_of(vu_dev, VugDev, parent);
    vdev_blk = container_of(gdev, VubDev, parent);
    assert(vdev_blk);
@@ -631,7 +629,11 @@ int main(int argc, char **argv)
        vdev_blk->enable_ro = true;
    }

    vug_init(&vdev_blk->parent, csock, vub_panic_cb, &vub_iface);
    if (!vug_init(&vdev_blk->parent, VHOST_USER_BLK_MAX_QUEUES, csock,
                  vub_panic_cb, &vub_iface)) {
        fprintf(stderr, "Failed to initialized libvhost-user-glib\n");
        goto err;
    }

    g_main_loop_run(vdev_blk->loop);

Loading