Commit 5dfc05cb authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging



Block pull request

# gpg: Signature made Fri 06 Jun 2014 17:08:50 BST using RSA key ID 81AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>"
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>"

* remotes/stefanha/tags/block-pull-request: (42 commits)
  qapi: Extract qapi/block.json definitions
  qapi: Extract qapi/block-core.json definitions
  qapi: create two block related json modules
  qapi: Extract qapi/common.json definitions
  sheepdog: reload only header in a case of live snapshot
  sheepdog: fix vdi object update after live snapshot
  rbd: Fix leaks in rbd_start_aio() error path
  qemu-img: Document check exit codes
  block: fix wrong order in live block migration setup
  blockdev: acquire AioContext in block_set_io_throttle
  throttle: add detach/attach test case
  throttle: add throttle_detach/attach_aio_context()
  dataplane: Support VIRTIO_BLK_T_SCSI_CMD
  virtio-blk: Factor out virtio_blk_handle_scsi_req from virtio_blk_handle_scsi
  virtio-blk: Allow config-wce in dataplane
  block: Move declaration of bdrv_get_aio_context to block.h
  raw-posix: drop raw_get_aio_fd() since it is no longer used
  dataplane: implement async flush
  dataplane: delete IOQueue since it is no longer used
  dataplane: use the QEMU block layer for I/O
  ...

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 959e4147 2e95fa17
Loading
Loading
Loading
Loading
+10 −4
Original line number Diff line number Diff line
@@ -117,15 +117,21 @@ void qemu_bh_schedule_idle(QEMUBH *bh)

void qemu_bh_schedule(QEMUBH *bh)
{
    AioContext *ctx;

    if (bh->scheduled)
        return;
    ctx = bh->ctx;
    bh->idle = 0;
    /* Make sure that idle & any writes needed by the callback are done
     * before the locations are read in the aio_bh_poll.
    /* Make sure that:
     * 1. idle & any writes needed by the callback are done before the
     *    locations are read in the aio_bh_poll.
     * 2. ctx is loaded before scheduled is set and the callback has a chance
     *    to execute.
     */
    smp_wmb();
    smp_mb();
    bh->scheduled = 1;
    aio_notify(bh->ctx);
    aio_notify(ctx);
}


+1 −2
Original line number Diff line number Diff line
@@ -629,6 +629,7 @@ static int block_save_setup(QEMUFile *f, void *opaque)
            block_mig_state.submitted, block_mig_state.transferred);

    qemu_mutex_lock_iothread();
    init_blk_migration(f);

    /* start track dirty blocks */
    ret = set_dirty_tracking();
@@ -638,8 +639,6 @@ static int block_save_setup(QEMUFile *f, void *opaque)
        return ret;
    }

    init_blk_migration(f);

    qemu_mutex_unlock_iothread();

    ret = flush_blks(f);
+114 −26
Original line number Diff line number Diff line
@@ -179,6 +179,7 @@ void bdrv_io_limits_enable(BlockDriverState *bs)
{
    assert(!bs->io_limits_enabled);
    throttle_init(&bs->throttle_state,
                  bdrv_get_aio_context(bs),
                  QEMU_CLOCK_VIRTUAL,
                  bdrv_throttle_read_timer_cb,
                  bdrv_throttle_write_timer_cb,
@@ -363,6 +364,7 @@ BlockDriverState *bdrv_new(const char *device_name, Error **errp)
    qemu_co_queue_init(&bs->throttled_reqs[0]);
    qemu_co_queue_init(&bs->throttled_reqs[1]);
    bs->refcnt = 1;
    bs->aio_context = qemu_get_aio_context();

    return bs;
}
@@ -1856,7 +1858,11 @@ void bdrv_close_all(void)
    BlockDriverState *bs;

    QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
        AioContext *aio_context = bdrv_get_aio_context(bs);

        aio_context_acquire(aio_context);
        bdrv_close(bs);
        aio_context_release(aio_context);
    }
}

@@ -1881,17 +1887,6 @@ static bool bdrv_requests_pending(BlockDriverState *bs)
    return false;
}

static bool bdrv_requests_pending_all(void)
{
    BlockDriverState *bs;
    QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
        if (bdrv_requests_pending(bs)) {
            return true;
        }
    }
    return false;
}

/*
 * Wait for pending requests to complete across all BlockDriverStates
 *
@@ -1911,12 +1906,20 @@ void bdrv_drain_all(void)
    BlockDriverState *bs;

    while (busy) {
        busy = false;

        QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
            AioContext *aio_context = bdrv_get_aio_context(bs);
            bool bs_busy;

            aio_context_acquire(aio_context);
            bdrv_start_throttled_reqs(bs);
        }
            bs_busy = bdrv_requests_pending(bs);
            bs_busy |= aio_poll(aio_context, bs_busy);
            aio_context_release(aio_context);

        busy = bdrv_requests_pending_all();
        busy |= aio_poll(qemu_get_aio_context(), busy);
            busy |= bs_busy;
        }
    }
}

@@ -2352,12 +2355,17 @@ int bdrv_commit_all(void)
    BlockDriverState *bs;

    QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
        AioContext *aio_context = bdrv_get_aio_context(bs);

        aio_context_acquire(aio_context);
        if (bs->drv && bs->backing_hd) {
            int ret = bdrv_commit(bs);
            if (ret < 0) {
                aio_context_release(aio_context);
                return ret;
            }
        }
        aio_context_release(aio_context);
    }
    return 0;
}
@@ -2775,10 +2783,12 @@ static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
        /* Fast-path if already in coroutine context */
        bdrv_rw_co_entry(&rwco);
    } else {
        AioContext *aio_context = bdrv_get_aio_context(bs);

        co = qemu_coroutine_create(bdrv_rw_co_entry);
        qemu_coroutine_enter(co, &rwco);
        while (rwco.ret == NOT_DONE) {
            qemu_aio_wait();
            aio_poll(aio_context, true);
        }
    }
    return rwco.ret;
@@ -3831,10 +3841,15 @@ int bdrv_flush_all(void)
    int result = 0;

    QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
        int ret = bdrv_flush(bs);
        AioContext *aio_context = bdrv_get_aio_context(bs);
        int ret;

        aio_context_acquire(aio_context);
        ret = bdrv_flush(bs);
        if (ret < 0 && !result) {
            result = ret;
        }
        aio_context_release(aio_context);
    }

    return result;
@@ -4025,10 +4040,12 @@ int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
        /* Fast-path if already in coroutine context */
        bdrv_get_block_status_co_entry(&data);
    } else {
        AioContext *aio_context = bdrv_get_aio_context(bs);

        co = qemu_coroutine_create(bdrv_get_block_status_co_entry);
        qemu_coroutine_enter(co, &data);
        while (!data.done) {
            qemu_aio_wait();
            aio_poll(aio_context, true);
        }
    }
    return data.ret;
@@ -4621,7 +4638,7 @@ static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
    acb->is_write = is_write;
    acb->qiov = qiov;
    acb->bounce = qemu_blockalign(bs, qiov->size);
    acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
    acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);

    if (is_write) {
        qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
@@ -4660,13 +4677,14 @@ typedef struct BlockDriverAIOCBCoroutine {

static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
{
    AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
    BlockDriverAIOCBCoroutine *acb =
        container_of(blockacb, BlockDriverAIOCBCoroutine, common);
    bool done = false;

    acb->done = &done;
    while (!done) {
        qemu_aio_wait();
        aio_poll(aio_context, true);
    }
}

@@ -4703,7 +4721,7 @@ static void coroutine_fn bdrv_co_do_rw(void *opaque)
            acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
    }

    acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
    acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
    qemu_bh_schedule(acb->bh);
}

@@ -4739,7 +4757,7 @@ static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
    BlockDriverState *bs = acb->common.bs;

    acb->req.error = bdrv_co_flush(bs);
    acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
    acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
    qemu_bh_schedule(acb->bh);
}

@@ -4766,7 +4784,7 @@ static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
    BlockDriverState *bs = acb->common.bs;

    acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
    acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
    acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
    qemu_bh_schedule(acb->bh);
}

@@ -4977,7 +4995,11 @@ void bdrv_invalidate_cache_all(Error **errp)
    Error *local_err = NULL;

    QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
        AioContext *aio_context = bdrv_get_aio_context(bs);

        aio_context_acquire(aio_context);
        bdrv_invalidate_cache(bs, &local_err);
        aio_context_release(aio_context);
        if (local_err) {
            error_propagate(errp, local_err);
            return;
@@ -4990,7 +5012,11 @@ void bdrv_clear_incoming_migration_all(void)
    BlockDriverState *bs;

    QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
        AioContext *aio_context = bdrv_get_aio_context(bs);

        aio_context_acquire(aio_context);
        bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
        aio_context_release(aio_context);
    }
}

@@ -5006,10 +5032,12 @@ int bdrv_flush(BlockDriverState *bs)
        /* Fast-path if already in coroutine context */
        bdrv_flush_co_entry(&rwco);
    } else {
        AioContext *aio_context = bdrv_get_aio_context(bs);

        co = qemu_coroutine_create(bdrv_flush_co_entry);
        qemu_coroutine_enter(co, &rwco);
        while (rwco.ret == NOT_DONE) {
            qemu_aio_wait();
            aio_poll(aio_context, true);
        }
    }

@@ -5119,10 +5147,12 @@ int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
        /* Fast-path if already in coroutine context */
        bdrv_discard_co_entry(&rwco);
    } else {
        AioContext *aio_context = bdrv_get_aio_context(bs);

        co = qemu_coroutine_create(bdrv_discard_co_entry);
        qemu_coroutine_enter(co, &rwco);
        while (rwco.ret == NOT_DONE) {
            qemu_aio_wait();
            aio_poll(aio_context, true);
        }
    }

@@ -5633,8 +5663,66 @@ out:

AioContext *bdrv_get_aio_context(BlockDriverState *bs)
{
    /* Currently BlockDriverState always uses the main loop AioContext */
    return qemu_get_aio_context();
    return bs->aio_context;
}

void bdrv_detach_aio_context(BlockDriverState *bs)
{
    if (!bs->drv) {
        return;
    }

    if (bs->io_limits_enabled) {
        throttle_detach_aio_context(&bs->throttle_state);
    }
    if (bs->drv->bdrv_detach_aio_context) {
        bs->drv->bdrv_detach_aio_context(bs);
    }
    if (bs->file) {
        bdrv_detach_aio_context(bs->file);
    }
    if (bs->backing_hd) {
        bdrv_detach_aio_context(bs->backing_hd);
    }

    bs->aio_context = NULL;
}

void bdrv_attach_aio_context(BlockDriverState *bs,
                             AioContext *new_context)
{
    if (!bs->drv) {
        return;
    }

    bs->aio_context = new_context;

    if (bs->backing_hd) {
        bdrv_attach_aio_context(bs->backing_hd, new_context);
    }
    if (bs->file) {
        bdrv_attach_aio_context(bs->file, new_context);
    }
    if (bs->drv->bdrv_attach_aio_context) {
        bs->drv->bdrv_attach_aio_context(bs, new_context);
    }
    if (bs->io_limits_enabled) {
        throttle_attach_aio_context(&bs->throttle_state, new_context);
    }
}

void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
{
    bdrv_drain_all(); /* ensure there are no in-flight requests */

    bdrv_detach_aio_context(bs);

    /* This function executes in the old AioContext so acquire the new one in
     * case it runs in a different thread.
     */
    aio_context_acquire(new_context);
    bdrv_attach_aio_context(bs, new_context);
    aio_context_release(new_context);
}

void bdrv_add_before_write_notifier(BlockDriverState *bs,
+1 −1
Original line number Diff line number Diff line
@@ -471,7 +471,7 @@ static BlockDriverAIOCB *inject_error(BlockDriverState *bs,
    acb = qemu_aio_get(&blkdebug_aiocb_info, bs, cb, opaque);
    acb->ret = -error;

    bh = qemu_bh_new(error_callback_bh, acb);
    bh = aio_bh_new(bdrv_get_aio_context(bs), error_callback_bh, acb);
    acb->bh = bh;
    qemu_bh_schedule(bh);

+34 −13
Original line number Diff line number Diff line
@@ -39,12 +39,13 @@ struct BlkverifyAIOCB {
static void blkverify_aio_cancel(BlockDriverAIOCB *blockacb)
{
    BlkverifyAIOCB *acb = (BlkverifyAIOCB *)blockacb;
    AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
    bool finished = false;

    /* Wait until request completes, invokes its callback, and frees itself */
    acb->finished = &finished;
    while (!finished) {
        qemu_aio_wait();
        aio_poll(aio_context, true);
    }
}

@@ -228,7 +229,8 @@ static void blkverify_aio_cb(void *opaque, int ret)
            acb->verify(acb);
        }

        acb->bh = qemu_bh_new(blkverify_aio_bh, acb);
        acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs),
                             blkverify_aio_bh, acb);
        qemu_bh_schedule(acb->bh);
        break;
    }
@@ -302,6 +304,22 @@ static bool blkverify_recurse_is_first_non_filter(BlockDriverState *bs,
    return bdrv_recurse_is_first_non_filter(s->test_file, candidate);
}

/* Propagate AioContext changes to ->test_file */
static void blkverify_detach_aio_context(BlockDriverState *bs)
{
    BDRVBlkverifyState *s = bs->opaque;

    bdrv_detach_aio_context(s->test_file);
}

static void blkverify_attach_aio_context(BlockDriverState *bs,
                                         AioContext *new_context)
{
    BDRVBlkverifyState *s = bs->opaque;

    bdrv_attach_aio_context(s->test_file, new_context);
}

static BlockDriver bdrv_blkverify = {
    .format_name                      = "blkverify",
    .protocol_name                    = "blkverify",
@@ -316,6 +334,9 @@ static BlockDriver bdrv_blkverify = {
    .bdrv_aio_writev                  = blkverify_aio_writev,
    .bdrv_aio_flush                   = blkverify_aio_flush,

    .bdrv_attach_aio_context          = blkverify_attach_aio_context,
    .bdrv_detach_aio_context          = blkverify_detach_aio_context,

    .is_filter                        = true,
    .bdrv_recurse_is_first_non_filter = blkverify_recurse_is_first_non_filter,
};
Loading