Commit 5273a45e authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/famz/tags/for-upstream' into staging



# gpg: Signature made Fri 28 Oct 2016 15:47:39 BST
# gpg:                using RSA key 0xCA35624C6A9171C6
# gpg: Good signature from "Fam Zheng <famz@redhat.com>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 5003 7CB7 9706 0F76 F021  AD56 CA35 624C 6A91 71C6

* remotes/famz/tags/for-upstream:
  aio: convert from RFifoLock to QemuRecMutex
  qemu-thread: introduce QemuRecMutex
  iothread: release AioContext around aio_poll
  block: only call aio_poll on the current thread's AioContext
  qemu-img: call aio_context_acquire/release around block job
  qemu-io: acquire AioContext
  block: prepare bdrv_reopen_multiple to release AioContext
  replication: pass BlockDriverState to reopen_backing_file
  iothread: detach all block devices before stopping them
  aio: introduce qemu_get_current_aio_context
  sheepdog: use BDRV_POLL_WHILE
  nfs: use BDRV_POLL_WHILE
  nfs: move nfs_set_events out of the while loops
  block: introduce BDRV_POLL_WHILE
  qed: Implement .bdrv_drain
  block: change drain to look only at one child at a time
  block: add BDS field to count in-flight requests
  mirror: use bdrv_drained_begin/bdrv_drained_end
  blockjob: introduce .drain callback for jobs
  replication: interrupt failover if the main device is closed

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 2dfe5113 3fe71223
Loading
Loading
Loading
Loading
+7 −22
Original line number Diff line number Diff line
@@ -61,6 +61,7 @@ void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
    smp_wmb();
    ctx->first_bh = bh;
    qemu_mutex_unlock(&ctx->bh_lock);
    aio_notify(ctx);
}

QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
@@ -106,8 +107,8 @@ int aio_bh_poll(AioContext *ctx)
         * aio_notify again if necessary.
         */
        if (atomic_xchg(&bh->scheduled, 0)) {
            /* Idle BHs and the notify BH don't count as progress */
            if (!bh->idle && bh != ctx->notify_dummy_bh) {
            /* Idle BHs don't count as progress */
            if (!bh->idle) {
                ret = 1;
            }
            bh->idle = 0;
@@ -259,7 +260,6 @@ aio_ctx_finalize(GSource *source)
{
    AioContext *ctx = (AioContext *) source;

    qemu_bh_delete(ctx->notify_dummy_bh);
    thread_pool_free(ctx->thread_pool);

#ifdef CONFIG_LINUX_AIO
@@ -284,7 +284,7 @@ aio_ctx_finalize(GSource *source)

    aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
    event_notifier_cleanup(&ctx->notifier);
    rfifolock_destroy(&ctx->lock);
    qemu_rec_mutex_destroy(&ctx->lock);
    qemu_mutex_destroy(&ctx->bh_lock);
    timerlistgroup_deinit(&ctx->tlg);
}
@@ -345,19 +345,6 @@ static void aio_timerlist_notify(void *opaque)
    aio_notify(opaque);
}

static void aio_rfifolock_cb(void *opaque)
{
    AioContext *ctx = opaque;

    /* Kick owner thread in case they are blocked in aio_poll() */
    qemu_bh_schedule(ctx->notify_dummy_bh);
}

static void notify_dummy_bh(void *opaque)
{
    /* Do nothing, we were invoked just to force the event loop to iterate */
}

static void event_notifier_dummy_cb(EventNotifier *e)
{
}
@@ -385,11 +372,9 @@ AioContext *aio_context_new(Error **errp)
#endif
    ctx->thread_pool = NULL;
    qemu_mutex_init(&ctx->bh_lock);
    rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
    qemu_rec_mutex_init(&ctx->lock);
    timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);

    ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);

    return ctx;
fail:
    g_source_destroy(&ctx->source);
@@ -408,10 +393,10 @@ void aio_context_unref(AioContext *ctx)

void aio_context_acquire(AioContext *ctx)
{
    rfifolock_lock(&ctx->lock);
    qemu_rec_mutex_lock(&ctx->lock);
}

void aio_context_release(AioContext *ctx)
{
    rfifolock_unlock(&ctx->lock);
    qemu_rec_mutex_unlock(&ctx->lock);
}
+4 −2
Original line number Diff line number Diff line
@@ -2082,7 +2082,7 @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
 * to all devices.
 *
 */
int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **errp)
{
    int ret = -1;
    BlockReopenQueueEntry *bs_entry, *next;
@@ -2090,7 +2090,9 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)

    assert(bs_queue != NULL);

    aio_context_release(ctx);
    bdrv_drain_all();
    aio_context_acquire(ctx);

    QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
        if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
@@ -2131,7 +2133,7 @@ int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
    Error *local_err = NULL;
    BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, NULL, bdrv_flags);

    ret = bdrv_reopen_multiple(queue, &local_err);
    ret = bdrv_reopen_multiple(bdrv_get_aio_context(bs), queue, &local_err);
    if (local_err != NULL) {
        error_propagate(errp, local_err);
    }
+17 −0
Original line number Diff line number Diff line
@@ -300,6 +300,21 @@ void backup_cow_request_end(CowRequest *req)
    cow_request_end(req);
}

static void backup_drain(BlockJob *job)
{
    BackupBlockJob *s = container_of(job, BackupBlockJob, common);

    /* Need to keep a reference in case blk_drain triggers execution
     * of backup_complete...
     */
    if (s->target) {
        BlockBackend *target = s->target;
        blk_ref(target);
        blk_drain(target);
        blk_unref(target);
    }
}

static const BlockJobDriver backup_job_driver = {
    .instance_size          = sizeof(BackupBlockJob),
    .job_type               = BLOCK_JOB_TYPE_BACKUP,
@@ -307,6 +322,7 @@ static const BlockJobDriver backup_job_driver = {
    .commit                 = backup_commit,
    .abort                  = backup_abort,
    .attached_aio_context   = backup_attached_aio_context,
    .drain                  = backup_drain,
};

static BlockErrorAction backup_error_action(BackupBlockJob *job,
@@ -331,6 +347,7 @@ static void backup_complete(BlockJob *job, void *opaque)
    BackupCompleteData *data = opaque;

    blk_unref(s->target);
    s->target = NULL;

    block_job_completed(job, data->ret);
    g_free(data);
+20 −10
Original line number Diff line number Diff line
@@ -799,20 +799,25 @@ int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
                               BdrvRequestFlags flags)
{
    int ret;
    BlockDriverState *bs = blk_bs(blk);

    trace_blk_co_preadv(blk, blk_bs(blk), offset, bytes, flags);
    trace_blk_co_preadv(blk, bs, offset, bytes, flags);

    ret = blk_check_byte_request(blk, offset, bytes);
    if (ret < 0) {
        return ret;
    }

    bdrv_inc_in_flight(bs);

    /* throttling disk I/O */
    if (blk->public.throttle_state) {
        throttle_group_co_io_limits_intercept(blk, bytes, false);
    }

    return bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
    ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
    bdrv_dec_in_flight(bs);
    return ret;
}

int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
@@ -820,14 +825,17 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
                                BdrvRequestFlags flags)
{
    int ret;
    BlockDriverState *bs = blk_bs(blk);

    trace_blk_co_pwritev(blk, blk_bs(blk), offset, bytes, flags);
    trace_blk_co_pwritev(blk, bs, offset, bytes, flags);

    ret = blk_check_byte_request(blk, offset, bytes);
    if (ret < 0) {
        return ret;
    }

    bdrv_inc_in_flight(bs);

    /* throttling disk I/O */
    if (blk->public.throttle_state) {
        throttle_group_co_io_limits_intercept(blk, bytes, true);
@@ -837,7 +845,9 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
        flags |= BDRV_REQ_FUA;
    }

    return bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
    ret = bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
    bdrv_dec_in_flight(bs);
    return ret;
}

typedef struct BlkRwCo {
@@ -868,7 +878,6 @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
                   int64_t bytes, CoroutineEntry co_entry,
                   BdrvRequestFlags flags)
{
    AioContext *aio_context;
    QEMUIOVector qiov;
    struct iovec iov;
    Coroutine *co;
@@ -890,11 +899,7 @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,

    co = qemu_coroutine_create(co_entry, &rwco);
    qemu_coroutine_enter(co);

    aio_context = blk_get_aio_context(blk);
    while (rwco.ret == NOT_DONE) {
        aio_poll(aio_context, true);
    }
    BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);

    return rwco.ret;
}
@@ -930,6 +935,8 @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
static void error_callback_bh(void *opaque)
{
    struct BlockBackendAIOCB *acb = opaque;

    bdrv_dec_in_flight(acb->common.bs);
    acb->common.cb(acb->common.opaque, acb->ret);
    qemu_aio_unref(acb);
}
@@ -940,6 +947,7 @@ BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
{
    struct BlockBackendAIOCB *acb;

    bdrv_inc_in_flight(blk_bs(blk));
    acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
    acb->blk = blk;
    acb->ret = ret;
@@ -962,6 +970,7 @@ static const AIOCBInfo blk_aio_em_aiocb_info = {
static void blk_aio_complete(BlkAioEmAIOCB *acb)
{
    if (acb->has_returned) {
        bdrv_dec_in_flight(acb->common.bs);
        acb->common.cb(acb->common.opaque, acb->rwco.ret);
        qemu_aio_unref(acb);
    }
@@ -983,6 +992,7 @@ static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
    BlkAioEmAIOCB *acb;
    Coroutine *co;

    bdrv_inc_in_flight(blk_bs(blk));
    acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
    acb->rwco = (BlkRwCo) {
        .blk    = blk,
+1 −1
Original line number Diff line number Diff line
@@ -251,7 +251,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
                                         orig_overlay_flags | BDRV_O_RDWR);
    }
    if (reopen_queue) {
        bdrv_reopen_multiple(reopen_queue, &local_err);
        bdrv_reopen_multiple(bdrv_get_aio_context(bs), reopen_queue, &local_err);
        if (local_err != NULL) {
            error_propagate(errp, local_err);
            block_job_unref(&s->common);
Loading