Commit ab67678a authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/aperard/tags/pull-xen-20190624' into staging



Xen queue

* Fix build
* xen-block: support feature-large-sector-size
* xen-block: Support IOThread polling for PV shared rings
* Avoid usage of a VLA
* Cleanup Xen headers usage

# gpg: Signature made Mon 24 Jun 2019 16:30:32 BST
# gpg:                using RSA key F80C006308E22CFD8A92E7980CF5572FD7FB55AF
# gpg:                issuer "anthony.perard@citrix.com"
# gpg: Good signature from "Anthony PERARD <anthony.perard@gmail.com>" [marginal]
# gpg:                 aka "Anthony PERARD <anthony.perard@citrix.com>" [marginal]
# gpg: WARNING: This key is not certified with sufficiently trusted signatures!
# gpg:          It is not certain that the signature belongs to the owner.
# Primary key fingerprint: 5379 2F71 024C 600F 778A  7161 D8D5 7199 DF83 42C8
#      Subkey fingerprint: F80C 0063 08E2 2CFD 8A92  E798 0CF5 572F D7FB 55AF

* remotes/aperard/tags/pull-xen-20190624:
  xen: Import other xen/io/*.h
  Revert xen/io/ring.h of "Clean up a few header guard symbols"
  xen: Drop includes of xen/hvm/params.h
  xen: Avoid VLA
  xen-bus / xen-block: add support for event channel polling
  xen-bus: allow AioContext to be specified for each event channel
  xen-bus: use a separate fd for each event channel
  xen-block: support feature-large-sector-size

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 7fec76a0 a3434a2d
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -13,8 +13,8 @@
#ifndef HW_9PFS_XEN_9PFS_H
#define HW_9PFS_XEN_9PFS_H

#include <xen/io/protocols.h>
#include "hw/xen/io/ring.h"
#include "hw/xen/interface/io/protocols.h"
#include "hw/xen/interface/io/ring.h"

/*
 * Do not merge into xen-9p-backend.c: clang doesn't allow unused static
+24 −20
Original line number Diff line number Diff line
@@ -58,6 +58,7 @@ struct XenBlockDataPlane {
    int requests_inflight;
    unsigned int max_requests;
    BlockBackend *blk;
    unsigned int sector_size;
    QEMUBH *bh;
    IOThread *iothread;
    AioContext *ctx;
@@ -167,7 +168,7 @@ static int xen_block_parse_request(XenBlockRequest *request)
        goto err;
    }

    request->start = request->req.sector_number * XEN_BLKIF_SECTOR_SIZE;
    request->start = request->req.sector_number * dataplane->sector_size;
    for (i = 0; i < request->req.nr_segments; i++) {
        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
            error_report("error: nr_segments too big");
@@ -177,14 +178,14 @@ static int xen_block_parse_request(XenBlockRequest *request)
            error_report("error: first > last sector");
            goto err;
        }
        if (request->req.seg[i].last_sect * XEN_BLKIF_SECTOR_SIZE >=
        if (request->req.seg[i].last_sect * dataplane->sector_size >=
            XC_PAGE_SIZE) {
            error_report("error: page crossing");
            goto err;
        }

        len = (request->req.seg[i].last_sect -
               request->req.seg[i].first_sect + 1) * XEN_BLKIF_SECTOR_SIZE;
               request->req.seg[i].first_sect + 1) * dataplane->sector_size;
        request->size += len;
    }
    if (request->start + request->size > blk_getlength(dataplane->blk)) {
@@ -218,17 +219,17 @@ static int xen_block_copy_request(XenBlockRequest *request)
        if (to_domain) {
            segs[i].dest.foreign.ref = request->req.seg[i].gref;
            segs[i].dest.foreign.offset = request->req.seg[i].first_sect *
                XEN_BLKIF_SECTOR_SIZE;
                dataplane->sector_size;
            segs[i].source.virt = virt;
        } else {
            segs[i].source.foreign.ref = request->req.seg[i].gref;
            segs[i].source.foreign.offset = request->req.seg[i].first_sect *
                XEN_BLKIF_SECTOR_SIZE;
                dataplane->sector_size;
            segs[i].dest.virt = virt;
        }
        segs[i].len = (request->req.seg[i].last_sect -
                       request->req.seg[i].first_sect + 1) *
                      XEN_BLKIF_SECTOR_SIZE;
                      dataplane->sector_size;
        virt += segs[i].len;
    }

@@ -317,7 +318,9 @@ static void xen_block_complete_aio(void *opaque, int ret)
    }
    xen_block_release_request(request);

    if (dataplane->more_work) {
        qemu_bh_schedule(dataplane->bh);
    }

done:
    aio_context_release(dataplane->ctx);
@@ -336,12 +339,12 @@ static bool xen_block_split_discard(XenBlockRequest *request,

    /* Wrap around, or overflowing byte limit? */
    if (sec_start + sec_count < sec_count ||
        sec_start + sec_count > INT64_MAX / XEN_BLKIF_SECTOR_SIZE) {
        sec_start + sec_count > INT64_MAX / dataplane->sector_size) {
        return false;
    }

    byte_offset = sec_start * XEN_BLKIF_SECTOR_SIZE;
    byte_remaining = sec_count * XEN_BLKIF_SECTOR_SIZE;
    byte_offset = sec_start * dataplane->sector_size;
    byte_remaining = sec_count * dataplane->sector_size;

    do {
        byte_chunk = byte_remaining > BDRV_REQUEST_MAX_BYTES ?
@@ -514,12 +517,13 @@ static int xen_block_get_request(XenBlockDataPlane *dataplane,
 */
#define IO_PLUG_THRESHOLD 1

static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
static bool xen_block_handle_requests(XenBlockDataPlane *dataplane)
{
    RING_IDX rc, rp;
    XenBlockRequest *request;
    int inflight_atstart = dataplane->requests_inflight;
    int batched = 0;
    bool done_something = false;

    dataplane->more_work = 0;

@@ -551,6 +555,7 @@ static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
        }
        xen_block_get_request(dataplane, request, rc);
        dataplane->rings.common.req_cons = ++rc;
        done_something = true;

        /* parse them */
        if (xen_block_parse_request(request) != 0) {
@@ -602,10 +607,7 @@ static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
        blk_io_unplug(dataplane->blk);
    }

    if (dataplane->more_work &&
        dataplane->requests_inflight < dataplane->max_requests) {
        qemu_bh_schedule(dataplane->bh);
    }
    return done_something;
}

static void xen_block_dataplane_bh(void *opaque)
@@ -617,21 +619,23 @@ static void xen_block_dataplane_bh(void *opaque)
    aio_context_release(dataplane->ctx);
}

static void xen_block_dataplane_event(void *opaque)
static bool xen_block_dataplane_event(void *opaque)
{
    XenBlockDataPlane *dataplane = opaque;

    qemu_bh_schedule(dataplane->bh);
    return xen_block_handle_requests(dataplane);
}

XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
                                              BlockConf *conf,
                                              BlockBackend *blk,
                                              unsigned int sector_size,
                                              IOThread *iothread)
{
    XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1);

    dataplane->xendev = xendev;
    dataplane->blk = conf->blk;
    dataplane->blk = blk;
    dataplane->sector_size = sector_size;

    QLIST_INIT(&dataplane->inflight);
    QLIST_INIT(&dataplane->freelist);
@@ -803,7 +807,7 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
    }

    dataplane->event_channel =
        xen_device_bind_event_channel(xendev, event_channel,
        xen_device_bind_event_channel(xendev, dataplane->ctx, event_channel,
                                      xen_block_dataplane_event, dataplane,
                                      &local_err);
    if (local_err) {
+2 −1
Original line number Diff line number Diff line
@@ -15,7 +15,8 @@
typedef struct XenBlockDataPlane XenBlockDataPlane;

XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
                                              BlockConf *conf,
                                              BlockBackend *blk,
                                              unsigned int sector_size,
                                              IOThread *iothread);
void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane);
void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
+24 −14
Original line number Diff line number Diff line
@@ -52,11 +52,25 @@ static void xen_block_connect(XenDevice *xendev, Error **errp)
    XenBlockDevice *blockdev = XEN_BLOCK_DEVICE(xendev);
    const char *type = object_get_typename(OBJECT(blockdev));
    XenBlockVdev *vdev = &blockdev->props.vdev;
    BlockConf *conf = &blockdev->props.conf;
    unsigned int feature_large_sector_size;
    unsigned int order, nr_ring_ref, *ring_ref, event_channel, protocol;
    char *str;

    trace_xen_block_connect(type, vdev->disk, vdev->partition);

    if (xen_device_frontend_scanf(xendev, "feature-large-sector-size", "%u",
                                  &feature_large_sector_size) != 1) {
        feature_large_sector_size = 0;
    }

    if (feature_large_sector_size != 1 &&
        conf->logical_block_size != XEN_BLKIF_SECTOR_SIZE) {
        error_setg(errp, "logical_block_size != %u not supported by frontend",
                   XEN_BLKIF_SECTOR_SIZE);
        return;
    }

    if (xen_device_frontend_scanf(xendev, "ring-page-order", "%u",
                                  &order) != 1) {
        nr_ring_ref = 1;
@@ -150,7 +164,7 @@ static void xen_block_set_size(XenBlockDevice *blockdev)
    const char *type = object_get_typename(OBJECT(blockdev));
    XenBlockVdev *vdev = &blockdev->props.vdev;
    BlockConf *conf = &blockdev->props.conf;
    int64_t sectors = blk_getlength(conf->blk) / XEN_BLKIF_SECTOR_SIZE;
    int64_t sectors = blk_getlength(conf->blk) / conf->logical_block_size;
    XenDevice *xendev = XEN_DEVICE(blockdev);

    trace_xen_block_size(type, vdev->disk, vdev->partition, sectors);
@@ -185,6 +199,7 @@ static void xen_block_realize(XenDevice *xendev, Error **errp)
    const char *type = object_get_typename(OBJECT(blockdev));
    XenBlockVdev *vdev = &blockdev->props.vdev;
    BlockConf *conf = &blockdev->props.conf;
    BlockBackend *blk = conf->blk;
    Error *local_err = NULL;

    if (vdev->type == XEN_BLOCK_VDEV_TYPE_INVALID) {
@@ -206,8 +221,8 @@ static void xen_block_realize(XenDevice *xendev, Error **errp)
     * The blkif protocol does not deal with removable media, so it must
     * always be present, even for CDRom devices.
     */
    assert(conf->blk);
    if (!blk_is_inserted(conf->blk)) {
    assert(blk);
    if (!blk_is_inserted(blk)) {
        error_setg(errp, "device needs media, but drive is empty");
        return;
    }
@@ -224,26 +239,20 @@ static void xen_block_realize(XenDevice *xendev, Error **errp)

    blkconf_blocksizes(conf);

    if (conf->logical_block_size != XEN_BLKIF_SECTOR_SIZE) {
        error_setg(errp, "logical_block_size != %u not supported",
                   XEN_BLKIF_SECTOR_SIZE);
        return;
    }

    if (conf->logical_block_size > conf->physical_block_size) {
        error_setg(
            errp, "logical_block_size > physical_block_size not supported");
        return;
    }

    blk_set_dev_ops(conf->blk, &xen_block_dev_ops, blockdev);
    blk_set_guest_block_size(conf->blk, conf->logical_block_size);
    blk_set_dev_ops(blk, &xen_block_dev_ops, blockdev);
    blk_set_guest_block_size(blk, conf->logical_block_size);

    if (conf->discard_granularity == -1) {
        conf->discard_granularity = conf->physical_block_size;
    }

    if (blk_get_flags(conf->blk) & BDRV_O_UNMAP) {
    if (blk_get_flags(blk) & BDRV_O_UNMAP) {
        xen_device_backend_printf(xendev, "feature-discard", "%u", 1);
        xen_device_backend_printf(xendev, "discard-granularity", "%u",
                                  conf->discard_granularity);
@@ -260,12 +269,13 @@ static void xen_block_realize(XenDevice *xendev, Error **errp)
                               blockdev->device_type);

    xen_device_backend_printf(xendev, "sector-size", "%u",
                              XEN_BLKIF_SECTOR_SIZE);
                              conf->logical_block_size);

    xen_block_set_size(blockdev);

    blockdev->dataplane =
        xen_block_dataplane_create(xendev, conf, blockdev->props.iothread);
        xen_block_dataplane_create(xendev, blk, conf->logical_block_size,
                                   blockdev->props.iothread);
}

static void xen_block_frontend_changed(XenDevice *xendev,
+2 −3
Original line number Diff line number Diff line
#ifndef XEN_BLKIF_H
#define XEN_BLKIF_H

#include "hw/xen/io/ring.h"
#include <xen/io/blkif.h>
#include <xen/io/protocols.h>
#include "hw/xen/interface/io/blkif.h"
#include "hw/xen/interface/io/protocols.h"

/*
 * Not a real protocol.  Used to generate ring structs which contain
Loading