Commit 02b351d8 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging



# gpg: Signature made Mon 16 Jan 2017 13:38:52 GMT
# gpg:                using RSA key 0x9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>"
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>"
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha/tags/block-pull-request:
  async: optimize aio_bh_poll
  aio: document locking
  aio-win32: remove walking_handlers, protecting AioHandler list with list_lock
  aio-posix: remove walking_handlers, protecting AioHandler list with list_lock
  aio: tweak walking in dispatch phase
  aio-posix: split aio_dispatch_handlers out of aio_dispatch
  qemu-thread: optimize QemuLockCnt with futexes on Linux
  aio: make ctx->list_lock a QemuLockCnt, subsuming ctx->walking_bh
  qemu-thread: introduce QemuLockCnt
  aio: rename bh_lock to list_lock
  block: get rid of bdrv_io_unplugged_begin/end

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents a8c611e1 7d506c90
Loading
Loading
Loading
Loading
+67 −49
Original line number Diff line number Diff line
@@ -16,7 +16,7 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "block/block.h"
#include "qemu/queue.h"
#include "qemu/rcu_queue.h"
#include "qemu/sockets.h"
#include "qemu/cutils.h"
#include "trace.h"
@@ -66,7 +66,7 @@ static bool aio_epoll_try_enable(AioContext *ctx)
    AioHandler *node;
    struct epoll_event event;

    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
        int r;
        if (node->deleted || !node->pfd.events) {
            continue;
@@ -212,24 +212,27 @@ void aio_set_fd_handler(AioContext *ctx,
    bool is_new = false;
    bool deleted = false;

    qemu_lockcnt_lock(&ctx->list_lock);

    node = find_aio_handler(ctx, fd);

    /* Are we deleting the fd handler? */
    if (!io_read && !io_write && !io_poll) {
        if (node == NULL) {
            qemu_lockcnt_unlock(&ctx->list_lock);
            return;
        }

        g_source_remove_poll(&ctx->source, &node->pfd);

        /* If the lock is held, just mark the node as deleted */
        if (ctx->walking_handlers) {
        if (qemu_lockcnt_count(&ctx->list_lock)) {
            node->deleted = 1;
            node->pfd.revents = 0;
        } else {
            /* Otherwise, delete it for real.  We can't just mark it as
             * deleted because deleted nodes are only cleaned up after
             * releasing the walking_handlers lock.
             * deleted because deleted nodes are only cleaned up while
             * no one is walking the handlers list.
             */
            QLIST_REMOVE(node, node);
            deleted = true;
@@ -243,7 +246,7 @@ void aio_set_fd_handler(AioContext *ctx,
            /* Alloc and insert if it's not already there */
            node = g_new0(AioHandler, 1);
            node->pfd.fd = fd;
            QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
            QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);

            g_source_add_poll(&ctx->source, &node->pfd);
            is_new = true;
@@ -265,6 +268,7 @@ void aio_set_fd_handler(AioContext *ctx,
    }

    aio_epoll_update(ctx, node, is_new);
    qemu_lockcnt_unlock(&ctx->list_lock);
    aio_notify(ctx);

    if (deleted) {
@@ -316,8 +320,8 @@ static void poll_set_started(AioContext *ctx, bool started)

    ctx->poll_started = started;

    ctx->walking_handlers++;
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
    qemu_lockcnt_inc(&ctx->list_lock);
    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
        IOHandler *fn;

        if (node->deleted) {
@@ -334,7 +338,7 @@ static void poll_set_started(AioContext *ctx, bool started)
            fn(node->opaque);
        }
    }
    ctx->walking_handlers--;
    qemu_lockcnt_dec(&ctx->list_lock);
}


@@ -349,54 +353,47 @@ bool aio_prepare(AioContext *ctx)
bool aio_pending(AioContext *ctx)
{
    AioHandler *node;
    bool result = false;

    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
    /*
     * We have to walk very carefully in case aio_set_fd_handler is
     * called while we're walking.
     */
    qemu_lockcnt_inc(&ctx->list_lock);

    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
        int revents;

        revents = node->pfd.revents & node->pfd.events;
        if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read &&
            aio_node_check(ctx, node->is_external)) {
            return true;
            result = true;
            break;
        }
        if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write &&
            aio_node_check(ctx, node->is_external)) {
            return true;
            result = true;
            break;
        }
    }
    qemu_lockcnt_dec(&ctx->list_lock);

    return false;
    return result;
}

/*
 * Note that dispatch_fds == false has the side-effect of post-poning the
 * freeing of deleted handlers.
 */
bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
static bool aio_dispatch_handlers(AioContext *ctx)
{
    AioHandler *node = NULL;
    AioHandler *node, *tmp;
    bool progress = false;

    /*
     * If there are callbacks left that have been queued, we need to call them.
     * Do not call select in this case, because it is possible that the caller
     * does not need a complete flush (as is the case for aio_poll loops).
     */
    if (aio_bh_poll(ctx)) {
        progress = true;
    }

    /*
     * We have to walk very carefully in case aio_set_fd_handler is
     * called while we're walking.
     */
    if (dispatch_fds) {
        node = QLIST_FIRST(&ctx->aio_handlers);
    }
    while (node) {
        AioHandler *tmp;
        int revents;
    qemu_lockcnt_inc(&ctx->list_lock);

        ctx->walking_handlers++;
    QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
        int revents;

        revents = node->pfd.revents & node->pfd.events;
        node->pfd.revents = 0;
@@ -420,15 +417,36 @@ bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
            progress = true;
        }

        tmp = node;
        node = QLIST_NEXT(node, node);

        ctx->walking_handlers--;
        if (node->deleted) {
            if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
                QLIST_REMOVE(node, node);
                g_free(node);
                qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
            }
        }
    }

        if (!ctx->walking_handlers && tmp->deleted) {
            QLIST_REMOVE(tmp, node);
            g_free(tmp);
    qemu_lockcnt_dec(&ctx->list_lock);
    return progress;
}

/*
 * Note that dispatch_fds == false has the side-effect of post-poning the
 * freeing of deleted handlers.
 */
bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
{
    bool progress;

    /*
     * If there are callbacks left that have been queued, we need to call them.
     * Do not call select in this case, because it is possible that the caller
     * does not need a complete flush (as is the case for aio_poll loops).
     */
    progress = aio_bh_poll(ctx);

    if (dispatch_fds) {
        progress |= aio_dispatch_handlers(ctx);
    }

    /* Run our timers */
@@ -488,7 +506,7 @@ static bool run_poll_handlers_once(AioContext *ctx)
    bool progress = false;
    AioHandler *node;

    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
        if (!node->deleted && node->io_poll &&
                node->io_poll(node->opaque)) {
            progress = true;
@@ -509,7 +527,7 @@ static bool run_poll_handlers_once(AioContext *ctx)
 * Note that ctx->notify_me must be non-zero so this function can detect
 * aio_notify().
 *
 * Note that the caller must have incremented ctx->walking_handlers.
 * Note that the caller must have incremented ctx->list_lock.
 *
 * Returns: true if progress was made, false otherwise
 */
@@ -519,7 +537,7 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
    int64_t end_time;

    assert(ctx->notify_me);
    assert(ctx->walking_handlers > 0);
    assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
    assert(ctx->poll_disable_cnt == 0);

    trace_run_poll_handlers_begin(ctx, max_ns);
@@ -541,7 +559,7 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
 *
 * ctx->notify_me must be non-zero so this function can detect aio_notify().
 *
 * Note that the caller must have incremented ctx->walking_handlers.
 * Note that the caller must have incremented ctx->list_lock.
 *
 * Returns: true if progress was made, false otherwise
 */
@@ -592,7 +610,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
        atomic_add(&ctx->notify_me, 2);
    }

    ctx->walking_handlers++;
    qemu_lockcnt_inc(&ctx->list_lock);

    if (ctx->poll_max_ns) {
        start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
@@ -606,7 +624,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
        /* fill pollfds */

        if (!aio_epoll_enabled(ctx)) {
            QLIST_FOREACH(node, &ctx->aio_handlers, node) {
            QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
                if (!node->deleted && node->pfd.events
                    && aio_node_check(ctx, node->is_external)) {
                    add_pollfd(node);
@@ -691,7 +709,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
    }

    npfd = 0;
    ctx->walking_handlers--;
    qemu_lockcnt_dec(&ctx->list_lock);

    /* Run dispatch even if there were no readable fds to run timers */
    if (aio_dispatch(ctx, ret > 0)) {
+51 −32
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include "qemu/queue.h"
#include "qemu/sockets.h"
#include "qapi/error.h"
#include "qemu/rcu_queue.h"

struct AioHandler {
    EventNotifier *e;
@@ -45,6 +46,7 @@ void aio_set_fd_handler(AioContext *ctx,
    /* fd is a SOCKET in our case */
    AioHandler *node;

    qemu_lockcnt_lock(&ctx->list_lock);
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
        if (node->pfd.fd == fd && !node->deleted) {
            break;
@@ -54,14 +56,14 @@ void aio_set_fd_handler(AioContext *ctx,
    /* Are we deleting the fd handler? */
    if (!io_read && !io_write) {
        if (node) {
            /* If the lock is held, just mark the node as deleted */
            if (ctx->walking_handlers) {
            /* If aio_poll is in progress, just mark the node as deleted */
            if (qemu_lockcnt_count(&ctx->list_lock)) {
                node->deleted = 1;
                node->pfd.revents = 0;
            } else {
                /* Otherwise, delete it for real.  We can't just mark it as
                 * deleted because deleted nodes are only cleaned up after
                 * releasing the walking_handlers lock.
                 * releasing the list_lock.
                 */
                QLIST_REMOVE(node, node);
                g_free(node);
@@ -74,7 +76,7 @@ void aio_set_fd_handler(AioContext *ctx,
            /* Alloc and insert if it's not already there */
            node = g_new0(AioHandler, 1);
            node->pfd.fd = fd;
            QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
            QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
        }

        node->pfd.events = 0;
@@ -99,6 +101,7 @@ void aio_set_fd_handler(AioContext *ctx,
                       FD_CONNECT | FD_WRITE | FD_OOB);
    }

    qemu_lockcnt_unlock(&ctx->list_lock);
    aio_notify(ctx);
}

@@ -117,6 +120,7 @@ void aio_set_event_notifier(AioContext *ctx,
{
    AioHandler *node;

    qemu_lockcnt_lock(&ctx->list_lock);
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
        if (node->e == e && !node->deleted) {
            break;
@@ -128,14 +132,14 @@ void aio_set_event_notifier(AioContext *ctx,
        if (node) {
            g_source_remove_poll(&ctx->source, &node->pfd);

            /* If the lock is held, just mark the node as deleted */
            if (ctx->walking_handlers) {
            /* aio_poll is in progress, just mark the node as deleted */
            if (qemu_lockcnt_count(&ctx->list_lock)) {
                node->deleted = 1;
                node->pfd.revents = 0;
            } else {
                /* Otherwise, delete it for real.  We can't just mark it as
                 * deleted because deleted nodes are only cleaned up after
                 * releasing the walking_handlers lock.
                 * releasing the list_lock.
                 */
                QLIST_REMOVE(node, node);
                g_free(node);
@@ -149,7 +153,7 @@ void aio_set_event_notifier(AioContext *ctx,
            node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
            node->pfd.events = G_IO_IN;
            node->is_external = is_external;
            QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
            QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);

            g_source_add_poll(&ctx->source, &node->pfd);
        }
@@ -157,6 +161,7 @@ void aio_set_event_notifier(AioContext *ctx,
        node->io_notify = io_notify;
    }

    qemu_lockcnt_unlock(&ctx->list_lock);
    aio_notify(ctx);
}

@@ -175,10 +180,16 @@ bool aio_prepare(AioContext *ctx)
    bool have_select_revents = false;
    fd_set rfds, wfds;

    /*
     * We have to walk very carefully in case aio_set_fd_handler is
     * called while we're walking.
     */
    qemu_lockcnt_inc(&ctx->list_lock);

    /* fill fd sets */
    FD_ZERO(&rfds);
    FD_ZERO(&wfds);
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
        if (node->io_read) {
            FD_SET ((SOCKET)node->pfd.fd, &rfds);
        }
@@ -188,7 +199,7 @@ bool aio_prepare(AioContext *ctx)
    }

    if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
        QLIST_FOREACH(node, &ctx->aio_handlers, node) {
        QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
            node->pfd.revents = 0;
            if (FD_ISSET(node->pfd.fd, &rfds)) {
                node->pfd.revents |= G_IO_IN;
@@ -202,45 +213,55 @@ bool aio_prepare(AioContext *ctx)
        }
    }

    qemu_lockcnt_dec(&ctx->list_lock);
    return have_select_revents;
}

bool aio_pending(AioContext *ctx)
{
    AioHandler *node;
    bool result = false;

    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
    /*
     * We have to walk very carefully in case aio_set_fd_handler is
     * called while we're walking.
     */
    qemu_lockcnt_inc(&ctx->list_lock);
    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
        if (node->pfd.revents && node->io_notify) {
            return true;
            result = true;
            break;
        }

        if ((node->pfd.revents & G_IO_IN) && node->io_read) {
            return true;
            result = true;
            break;
        }
        if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
            return true;
            result = true;
            break;
        }
    }

    return false;
    qemu_lockcnt_dec(&ctx->list_lock);
    return result;
}

static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
{
    AioHandler *node;
    bool progress = false;
    AioHandler *tmp;

    qemu_lockcnt_inc(&ctx->list_lock);

    /*
     * We have to walk very carefully in case aio_set_fd_handler is
     * called while we're walking.
     */
    node = QLIST_FIRST(&ctx->aio_handlers);
    while (node) {
        AioHandler *tmp;
    QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
        int revents = node->pfd.revents;

        ctx->walking_handlers++;

        if (!node->deleted &&
            (revents || event_notifier_get_handle(node->e) == event) &&
            node->io_notify) {
@@ -275,17 +296,16 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
            }
        }

        tmp = node;
        node = QLIST_NEXT(node, node);

        ctx->walking_handlers--;

        if (!ctx->walking_handlers && tmp->deleted) {
            QLIST_REMOVE(tmp, node);
            g_free(tmp);
        if (node->deleted) {
            if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
                QLIST_REMOVE(node, node);
                g_free(node);
                qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
            }
        }
    }

    qemu_lockcnt_dec(&ctx->list_lock);
    return progress;
}

@@ -323,20 +343,19 @@ bool aio_poll(AioContext *ctx, bool blocking)
        atomic_add(&ctx->notify_me, 2);
    }

    qemu_lockcnt_inc(&ctx->list_lock);
    have_select_revents = aio_prepare(ctx);

    ctx->walking_handlers++;

    /* fill fd sets */
    count = 0;
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
        if (!node->deleted && node->io_notify
            && aio_node_check(ctx, node->is_external)) {
            events[count++] = event_notifier_get_handle(node->e);
        }
    }

    ctx->walking_handlers--;
    qemu_lockcnt_dec(&ctx->list_lock);
    first = true;

    /* ctx->notifier is always registered.  */
+25 −20
Original line number Diff line number Diff line
@@ -53,14 +53,14 @@ void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
        .cb = cb,
        .opaque = opaque,
    };
    qemu_mutex_lock(&ctx->bh_lock);
    qemu_lockcnt_lock(&ctx->list_lock);
    bh->next = ctx->first_bh;
    bh->scheduled = 1;
    bh->deleted = 1;
    /* Make sure that the members are ready before putting bh into list */
    smp_wmb();
    ctx->first_bh = bh;
    qemu_mutex_unlock(&ctx->bh_lock);
    qemu_lockcnt_unlock(&ctx->list_lock);
    aio_notify(ctx);
}

@@ -73,12 +73,12 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
        .cb = cb,
        .opaque = opaque,
    };
    qemu_mutex_lock(&ctx->bh_lock);
    qemu_lockcnt_lock(&ctx->list_lock);
    bh->next = ctx->first_bh;
    /* Make sure that the members are ready before putting bh into list */
    smp_wmb();
    ctx->first_bh = bh;
    qemu_mutex_unlock(&ctx->bh_lock);
    qemu_lockcnt_unlock(&ctx->list_lock);
    return bh;
}

@@ -92,14 +92,13 @@ int aio_bh_poll(AioContext *ctx)
{
    QEMUBH *bh, **bhp, *next;
    int ret;
    bool deleted = false;

    ctx->walking_bh++;
    qemu_lockcnt_inc(&ctx->list_lock);

    ret = 0;
    for (bh = ctx->first_bh; bh; bh = next) {
        /* Make sure that fetching bh happens before accessing its members */
        smp_read_barrier_depends();
        next = bh->next;
    for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
        next = atomic_rcu_read(&bh->next);
        /* The atomic_xchg is paired with the one in qemu_bh_schedule.  The
         * implicit memory barrier ensures that the callback sees all writes
         * done by the scheduling thread.  It also ensures that the scheduling
@@ -114,13 +113,18 @@ int aio_bh_poll(AioContext *ctx)
            bh->idle = 0;
            aio_bh_call(bh);
        }
        if (bh->deleted) {
            deleted = true;
        }
    }

    ctx->walking_bh--;

    /* remove deleted bhs */
    if (!ctx->walking_bh) {
        qemu_mutex_lock(&ctx->bh_lock);
    if (!deleted) {
        qemu_lockcnt_dec(&ctx->list_lock);
        return ret;
    }

    if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) {
        bhp = &ctx->first_bh;
        while (*bhp) {
            bh = *bhp;
@@ -131,9 +135,8 @@ int aio_bh_poll(AioContext *ctx)
                bhp = &bh->next;
            }
        }
        qemu_mutex_unlock(&ctx->bh_lock);
        qemu_lockcnt_unlock(&ctx->list_lock);
    }

    return ret;
}

@@ -187,7 +190,8 @@ aio_compute_timeout(AioContext *ctx)
    int timeout = -1;
    QEMUBH *bh;

    for (bh = ctx->first_bh; bh; bh = bh->next) {
    for (bh = atomic_rcu_read(&ctx->first_bh); bh;
         bh = atomic_rcu_read(&bh->next)) {
        if (bh->scheduled) {
            if (bh->idle) {
                /* idle bottom halves will be polled at least
@@ -270,7 +274,8 @@ aio_ctx_finalize(GSource *source)
    }
#endif

    qemu_mutex_lock(&ctx->bh_lock);
    qemu_lockcnt_lock(&ctx->list_lock);
    assert(!qemu_lockcnt_count(&ctx->list_lock));
    while (ctx->first_bh) {
        QEMUBH *next = ctx->first_bh->next;

@@ -280,12 +285,12 @@ aio_ctx_finalize(GSource *source)
        g_free(ctx->first_bh);
        ctx->first_bh = next;
    }
    qemu_mutex_unlock(&ctx->bh_lock);
    qemu_lockcnt_unlock(&ctx->list_lock);

    aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
    event_notifier_cleanup(&ctx->notifier);
    qemu_rec_mutex_destroy(&ctx->lock);
    qemu_mutex_destroy(&ctx->bh_lock);
    qemu_lockcnt_destroy(&ctx->list_lock);
    timerlistgroup_deinit(&ctx->tlg);
}

@@ -372,6 +377,7 @@ AioContext *aio_context_new(Error **errp)
        goto fail;
    }
    g_source_set_can_recurse(&ctx->source, true);
    qemu_lockcnt_init(&ctx->list_lock);
    aio_set_event_notifier(ctx, &ctx->notifier,
                           false,
                           (EventNotifierHandler *)
@@ -381,7 +387,6 @@ AioContext *aio_context_new(Error **errp)
    ctx->linux_aio = NULL;
#endif
    ctx->thread_pool = NULL;
    qemu_mutex_init(&ctx->bh_lock);
    qemu_rec_mutex_init(&ctx->lock);
    timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);

+2 −39
Original line number Diff line number Diff line
@@ -228,9 +228,7 @@ void bdrv_drained_begin(BlockDriverState *bs)
        bdrv_parent_drained_begin(bs);
    }

    bdrv_io_unplugged_begin(bs);
    bdrv_drain_recurse(bs);
    bdrv_io_unplugged_end(bs);
}

void bdrv_drained_end(BlockDriverState *bs)
@@ -302,7 +300,6 @@ void bdrv_drain_all_begin(void)

        aio_context_acquire(aio_context);
        bdrv_parent_drained_begin(bs);
        bdrv_io_unplugged_begin(bs);
        aio_disable_external(aio_context);
        aio_context_release(aio_context);

@@ -347,7 +344,6 @@ void bdrv_drain_all_end(void)

        aio_context_acquire(aio_context);
        aio_enable_external(aio_context);
        bdrv_io_unplugged_end(bs);
        bdrv_parent_drained_end(bs);
        aio_context_release(aio_context);
    }
@@ -2650,7 +2646,7 @@ void bdrv_io_plug(BlockDriverState *bs)
        bdrv_io_plug(child->bs);
    }

    if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
    if (bs->io_plugged++ == 0) {
        BlockDriver *drv = bs->drv;
        if (drv && drv->bdrv_io_plug) {
            drv->bdrv_io_plug(bs);
@@ -2663,7 +2659,7 @@ void bdrv_io_unplug(BlockDriverState *bs)
    BdrvChild *child;

    assert(bs->io_plugged);
    if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
    if (--bs->io_plugged == 0) {
        BlockDriver *drv = bs->drv;
        if (drv && drv->bdrv_io_unplug) {
            drv->bdrv_io_unplug(bs);
@@ -2674,36 +2670,3 @@ void bdrv_io_unplug(BlockDriverState *bs)
        bdrv_io_unplug(child->bs);
    }
}

void bdrv_io_unplugged_begin(BlockDriverState *bs)
{
    BdrvChild *child;

    if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
        BlockDriver *drv = bs->drv;
        if (drv && drv->bdrv_io_unplug) {
            drv->bdrv_io_unplug(bs);
        }
    }

    QLIST_FOREACH(child, &bs->children, next) {
        bdrv_io_unplugged_begin(child->bs);
    }
}

void bdrv_io_unplugged_end(BlockDriverState *bs)
{
    BdrvChild *child;

    assert(bs->io_plug_disabled);
    QLIST_FOREACH(child, &bs->children, next) {
        bdrv_io_unplugged_end(child->bs);
    }

    if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
        BlockDriver *drv = bs->drv;
        if (drv && drv->bdrv_io_plug) {
            drv->bdrv_io_plug(bs);
        }
    }
}

docs/lockcnt.txt

0 → 100644
+277 −0

File added.

Preview size limit exceeded, changes collapsed.

Loading