Commit a9392bc9 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging



Block patches

# gpg: Signature made Tue Apr 28 15:35:05 2015 BST using RSA key ID C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>"

* remotes/kevin/tags/for-upstream: (76 commits)
  block: move I/O request processing to block/io.c
  block: extract bdrv_setup_io_funcs()
  block: add bdrv_set_dirty()/bdrv_reset_dirty() to block_int.h
  block: replace bdrv_states iteration with bdrv_next()
  vmdk: Widen before shifting 32 bit header field
  block/dmg: make it modular
  block/mirror: Always call block_job_sleep_ns()
  iotests: add incremental backup granularity tests
  iotests: add incremental backup failure recovery test
  iotests: add simple incremental backup case
  iotests: add QMP event waiting queue
  iotests: add invalid input incremental backup tests
  hbitmap: truncate tests
  block: Resize bitmaps on bdrv_truncate
  block: Ensure consistent bitmap function prototypes
  block: add BdrvDirtyBitmap documentation
  qmp: Add dirty bitmap status field in query-block
  qmp: add block-dirty-bitmap-clear
  qmp: Add support of "dirty-bitmap" sync mode for drive-backup
  block: Add bitmap successors
  ...

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 84cbd63f 61007b31
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -1182,7 +1182,19 @@ S: Supported
F: block/gluster.c
T: git git://github.com/codyprime/qemu-kvm-jtc.git block

Null Block Driver
M: Fam Zheng <famz@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: block/null.c

Bootdevice
M: Gonglei <arei.gonglei@huawei.com>
S: Maintained
F: bootdevice.c

Quorum
M: Alberto Garcia <berto@igalia.com>
S: Supported
F: block/quorum.c
L: qemu-block@nongnu.org
+66 −21
Original line number Diff line number Diff line
@@ -24,7 +24,6 @@ struct AioHandler
    IOHandler *io_read;
    IOHandler *io_write;
    int deleted;
    int pollfds_idx;
    void *opaque;
    QLIST_ENTRY(AioHandler) node;
};
@@ -83,7 +82,6 @@ void aio_set_fd_handler(AioContext *ctx,
        node->io_read = io_read;
        node->io_write = io_write;
        node->opaque = opaque;
        node->pollfds_idx = -1;

        node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
        node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
@@ -186,13 +184,61 @@ bool aio_dispatch(AioContext *ctx)
    return progress;
}

/* These thread-local variables are used only in a small part of aio_poll
 * around the call to the poll() system call.  In particular they are not
 * used while aio_poll is performing callbacks, which makes it much easier
 * to think about reentrancy!
 *
 * Stack-allocated arrays would be perfect but they have size limitations;
 * heap allocation is expensive enough that we want to reuse arrays across
 * calls to aio_poll().  And because poll() has to be called without holding
 * any lock, the arrays cannot be stored in AioContext.  Thread-local data
 * has none of the disadvantages of these three options.
 */
static __thread GPollFD *pollfds;
static __thread AioHandler **nodes;
static __thread unsigned npfd, nalloc;
static __thread Notifier pollfds_cleanup_notifier;

static void pollfds_cleanup(Notifier *n, void *unused)
{
    g_assert(npfd == 0);
    g_free(pollfds);
    g_free(nodes);
    nalloc = 0;
}

static void add_pollfd(AioHandler *node)
{
    if (npfd == nalloc) {
        if (nalloc == 0) {
            pollfds_cleanup_notifier.notify = pollfds_cleanup;
            qemu_thread_atexit_add(&pollfds_cleanup_notifier);
            nalloc = 8;
        } else {
            g_assert(nalloc <= INT_MAX);
            nalloc *= 2;
        }
        pollfds = g_renew(GPollFD, pollfds, nalloc);
        nodes = g_renew(AioHandler *, nodes, nalloc);
    }
    nodes[npfd] = node;
    pollfds[npfd] = (GPollFD) {
        .fd = node->pfd.fd,
        .events = node->pfd.events,
    };
    npfd++;
}

bool aio_poll(AioContext *ctx, bool blocking)
{
    AioHandler *node;
    bool was_dispatching;
    int ret;
    int i, ret;
    bool progress;
    int64_t timeout;

    aio_context_acquire(ctx);
    was_dispatching = ctx->dispatching;
    progress = false;

@@ -210,39 +256,36 @@ bool aio_poll(AioContext *ctx, bool blocking)

    ctx->walking_handlers++;

    g_array_set_size(ctx->pollfds, 0);
    assert(npfd == 0);

    /* fill pollfds */
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
        node->pollfds_idx = -1;
        if (!node->deleted && node->pfd.events) {
            GPollFD pfd = {
                .fd = node->pfd.fd,
                .events = node->pfd.events,
            };
            node->pollfds_idx = ctx->pollfds->len;
            g_array_append_val(ctx->pollfds, pfd);
            add_pollfd(node);
        }
    }

    ctx->walking_handlers--;
    timeout = blocking ? aio_compute_timeout(ctx) : 0;

    /* wait until next event */
    ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data,
                         ctx->pollfds->len,
                         blocking ? aio_compute_timeout(ctx) : 0);
    if (timeout) {
        aio_context_release(ctx);
    }
    ret = qemu_poll_ns((GPollFD *)pollfds, npfd, timeout);
    if (timeout) {
        aio_context_acquire(ctx);
    }

    /* if we have any readable fds, dispatch event */
    if (ret > 0) {
        QLIST_FOREACH(node, &ctx->aio_handlers, node) {
            if (node->pollfds_idx != -1) {
                GPollFD *pfd = &g_array_index(ctx->pollfds, GPollFD,
                                              node->pollfds_idx);
                node->pfd.revents = pfd->revents;
            }
        for (i = 0; i < npfd; i++) {
            nodes[i]->pfd.revents = pollfds[i].revents;
        }
    }

    npfd = 0;
    ctx->walking_handlers--;

    /* Run dispatch even if there were no readable fds to run timers */
    aio_set_dispatching(ctx, true);
    if (aio_dispatch(ctx)) {
@@ -250,5 +293,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
    }

    aio_set_dispatching(ctx, was_dispatching);
    aio_context_release(ctx);

    return progress;
}
+8 −0
Original line number Diff line number Diff line
@@ -283,6 +283,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
    int count;
    int timeout;

    aio_context_acquire(ctx);
    have_select_revents = aio_prepare(ctx);
    if (have_select_revents) {
        blocking = false;
@@ -323,7 +324,13 @@ bool aio_poll(AioContext *ctx, bool blocking)

        timeout = blocking
            ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
        if (timeout) {
            aio_context_release(ctx);
        }
        ret = WaitForMultipleObjects(count, events, FALSE, timeout);
        if (timeout) {
            aio_context_acquire(ctx);
        }
        aio_set_dispatching(ctx, true);

        if (first && aio_bh_poll(ctx)) {
@@ -349,5 +356,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
    progress |= timerlistgroup_run_timers(&ctx->tlg);

    aio_set_dispatching(ctx, was_dispatching);
    aio_context_release(ctx);
    return progress;
}
+1 −9
Original line number Diff line number Diff line
@@ -230,7 +230,6 @@ aio_ctx_finalize(GSource *source)
    event_notifier_cleanup(&ctx->notifier);
    rfifolock_destroy(&ctx->lock);
    qemu_mutex_destroy(&ctx->bh_lock);
    g_array_free(ctx->pollfds, TRUE);
    timerlistgroup_deinit(&ctx->tlg);
}

@@ -281,12 +280,6 @@ static void aio_timerlist_notify(void *opaque)
    aio_notify(opaque);
}

static void aio_rfifolock_cb(void *opaque)
{
    /* Kick owner thread in case they are blocked in aio_poll() */
    aio_notify(opaque);
}

AioContext *aio_context_new(Error **errp)
{
    int ret;
@@ -302,10 +295,9 @@ AioContext *aio_context_new(Error **errp)
    aio_set_event_notifier(ctx, &ctx->notifier,
                           (EventNotifierHandler *)
                           event_notifier_test_and_clear);
    ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
    ctx->thread_pool = NULL;
    qemu_mutex_init(&ctx->bh_lock);
    rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
    rfifolock_init(&ctx->lock, NULL, NULL);
    timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);

    return ctx;
+314 −2540

File changed.

Preview size limit exceeded, changes collapsed.

Loading