Commit 9176e8fb authored by Anthony Liguori's avatar Anthony Liguori
Browse files

Merge remote-tracking branch 'stefanha/block-next' into staging



# By Stefan Hajnoczi
# Via Stefan Hajnoczi
* stefanha/block-next:
  aio: drop io_flush argument
  tests: drop event_active_cb()
  thread-pool: drop thread_pool_active()
  dataplane/virtio-blk: drop flush_true() and flush_io()
  block/ssh: drop return_true()
  block/sheepdog: drop have_co_req() and aio_flush_request()
  block/rbd: drop qemu_rbd_aio_flush_cb()
  block/nbd: drop nbd_have_request()
  block/linux-aio: drop qemu_laio_completion_cb()
  block/iscsi: drop iscsi_process_flush()
  block/gluster: drop qemu_gluster_aio_flush_cb()
  block/curl: drop curl_aio_flush()
  aio: stop using .io_flush()
  tests: adjust test-thread-pool to new aio_poll() semantics
  tests: adjust test-aio to new aio_poll() semantics
  dataplane/virtio-blk: check exit conditions before aio_poll()
  block: stop relying on io_flush() in bdrv_drain_all()
  block: ensure bdrv_drain_all() works during bdrv_delete()

Message-id: 1376921877-9576-1-git-send-email-stefanha@redhat.com
Signed-off-by: default avatarAnthony Liguori <anthony@codemonkey.ws>
parents 72420ce9 f2e5dca4
Loading
Loading
Loading
Loading
+11 −25
Original line number Diff line number Diff line
@@ -23,7 +23,6 @@ struct AioHandler
    GPollFD pfd;
    IOHandler *io_read;
    IOHandler *io_write;
    AioFlushHandler *io_flush;
    int deleted;
    int pollfds_idx;
    void *opaque;
@@ -47,7 +46,6 @@ void aio_set_fd_handler(AioContext *ctx,
                        int fd,
                        IOHandler *io_read,
                        IOHandler *io_write,
                        AioFlushHandler *io_flush,
                        void *opaque)
{
    AioHandler *node;
@@ -84,7 +82,6 @@ void aio_set_fd_handler(AioContext *ctx,
        /* Update handler with latest information */
        node->io_read = io_read;
        node->io_write = io_write;
        node->io_flush = io_flush;
        node->opaque = opaque;
        node->pollfds_idx = -1;

@@ -97,12 +94,10 @@ void aio_set_fd_handler(AioContext *ctx,

void aio_set_event_notifier(AioContext *ctx,
                            EventNotifier *notifier,
                            EventNotifierHandler *io_read,
                            AioFlushEventNotifierHandler *io_flush)
                            EventNotifierHandler *io_read)
{
    aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
                       (IOHandler *)io_read, NULL,
                       (AioFlushHandler *)io_flush, notifier);
                       (IOHandler *)io_read, NULL, notifier);
}

bool aio_pending(AioContext *ctx)
@@ -147,8 +142,12 @@ static bool aio_dispatch(AioContext *ctx)
            (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
            node->io_read) {
            node->io_read(node->opaque);

            /* aio_notify() does not count as progress */
            if (node->opaque != &ctx->notifier) {
                progress = true;
            }
        }
        if (!node->deleted &&
            (revents & (G_IO_OUT | G_IO_ERR)) &&
            node->io_write) {
@@ -173,7 +172,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
{
    AioHandler *node;
    int ret;
    bool busy, progress;
    bool progress;

    progress = false;

@@ -200,20 +199,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
    g_array_set_size(ctx->pollfds, 0);

    /* fill pollfds */
    busy = false;
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
        node->pollfds_idx = -1;

        /* If there aren't pending AIO operations, don't invoke callbacks.
         * Otherwise, if there are no AIO requests, qemu_aio_wait() would
         * wait indefinitely.
         */
        if (!node->deleted && node->io_flush) {
            if (node->io_flush(node->opaque) == 0) {
                continue;
            }
            busy = true;
        }
        if (!node->deleted && node->pfd.events) {
            GPollFD pfd = {
                .fd = node->pfd.fd,
@@ -226,8 +213,8 @@ bool aio_poll(AioContext *ctx, bool blocking)

    ctx->walking_handlers--;

    /* No AIO operations?  Get us out of here */
    if (!busy) {
    /* early return if we only have the aio_notify() fd */
    if (ctx->pollfds->len == 1) {
        return progress;
    }

@@ -250,6 +237,5 @@ bool aio_poll(AioContext *ctx, bool blocking)
        }
    }

    assert(progress || busy);
    return true;
    return progress;
}
+15 −22
Original line number Diff line number Diff line
@@ -23,7 +23,6 @@
struct AioHandler {
    EventNotifier *e;
    EventNotifierHandler *io_notify;
    AioFlushEventNotifierHandler *io_flush;
    GPollFD pfd;
    int deleted;
    QLIST_ENTRY(AioHandler) node;
@@ -31,8 +30,7 @@ struct AioHandler {

void aio_set_event_notifier(AioContext *ctx,
                            EventNotifier *e,
                            EventNotifierHandler *io_notify,
                            AioFlushEventNotifierHandler *io_flush)
                            EventNotifierHandler *io_notify)
{
    AioHandler *node;

@@ -73,7 +71,6 @@ void aio_set_event_notifier(AioContext *ctx,
        }
        /* Update handler with latest information */
        node->io_notify = io_notify;
        node->io_flush = io_flush;
    }

    aio_notify(ctx);
@@ -96,7 +93,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
{
    AioHandler *node;
    HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
    bool busy, progress;
    bool progress;
    int count;

    progress = false;
@@ -126,8 +123,12 @@ bool aio_poll(AioContext *ctx, bool blocking)
        if (node->pfd.revents && node->io_notify) {
            node->pfd.revents = 0;
            node->io_notify(node->e);

            /* aio_notify() does not count as progress */
            if (node->opaque != &ctx->notifier) {
                progress = true;
            }
        }

        tmp = node;
        node = QLIST_NEXT(node, node);
@@ -147,19 +148,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
    ctx->walking_handlers++;

    /* fill fd sets */
    busy = false;
    count = 0;
    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
        /* If there aren't pending AIO operations, don't invoke callbacks.
         * Otherwise, if there are no AIO requests, qemu_aio_wait() would
         * wait indefinitely.
         */
        if (!node->deleted && node->io_flush) {
            if (node->io_flush(node->e) == 0) {
                continue;
            }
            busy = true;
        }
        if (!node->deleted && node->io_notify) {
            events[count++] = event_notifier_get_handle(node->e);
        }
@@ -167,8 +157,8 @@ bool aio_poll(AioContext *ctx, bool blocking)

    ctx->walking_handlers--;

    /* No AIO operations?  Get us out of here */
    if (!busy) {
    /* early return if we only have the aio_notify() fd */
    if (count == 1) {
        return progress;
    }

@@ -196,8 +186,12 @@ bool aio_poll(AioContext *ctx, bool blocking)
                event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] &&
                node->io_notify) {
                node->io_notify(node->e);

                /* aio_notify() does not count as progress */
                if (node->opaque != &ctx->notifier) {
                    progress = true;
                }
            }

            tmp = node;
            node = QLIST_NEXT(node, node);
@@ -214,6 +208,5 @@ bool aio_poll(AioContext *ctx, bool blocking)
        events[ret - WAIT_OBJECT_0] = events[--count];
    }

    assert(progress || busy);
    return true;
    return progress;
}
+2 −2
Original line number Diff line number Diff line
@@ -201,7 +201,7 @@ aio_ctx_finalize(GSource *source)
    AioContext *ctx = (AioContext *) source;

    thread_pool_free(ctx->thread_pool);
    aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
    aio_set_event_notifier(ctx, &ctx->notifier, NULL);
    event_notifier_cleanup(&ctx->notifier);
    qemu_mutex_destroy(&ctx->bh_lock);
    g_array_free(ctx->pollfds, TRUE);
@@ -243,7 +243,7 @@ AioContext *aio_context_new(void)
    event_notifier_init(&ctx->notifier, false);
    aio_set_event_notifier(ctx, &ctx->notifier, 
                           (EventNotifierHandler *)
                           event_notifier_test_and_clear, NULL);
                           event_notifier_test_and_clear);

    return ctx;
}
+37 −12
Original line number Diff line number Diff line
@@ -148,7 +148,6 @@ static void bdrv_block_timer(void *opaque)

void bdrv_io_limits_enable(BlockDriverState *bs)
{
    qemu_co_queue_init(&bs->throttled_reqs);
    bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs);
    bs->io_limits_enabled = true;
}
@@ -306,6 +305,7 @@ BlockDriverState *bdrv_new(const char *device_name)
    bdrv_iostatus_disable(bs);
    notifier_list_init(&bs->close_notifiers);
    notifier_with_return_list_init(&bs->before_write_notifiers);
    qemu_co_queue_init(&bs->throttled_reqs);

    return bs;
}
@@ -1428,6 +1428,35 @@ void bdrv_close_all(void)
    }
}

/* Check if any requests are in-flight (including throttled requests) */
static bool bdrv_requests_pending(BlockDriverState *bs)
{
    if (!QLIST_EMPTY(&bs->tracked_requests)) {
        return true;
    }
    if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
        return true;
    }
    if (bs->file && bdrv_requests_pending(bs->file)) {
        return true;
    }
    if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
        return true;
    }
    return false;
}

static bool bdrv_requests_pending_all(void)
{
    BlockDriverState *bs;
    QTAILQ_FOREACH(bs, &bdrv_states, list) {
        if (bdrv_requests_pending(bs)) {
            return true;
        }
    }
    return false;
}

/*
 * Wait for pending requests to complete across all BlockDriverStates
 *
@@ -1442,12 +1471,11 @@ void bdrv_close_all(void)
 */
void bdrv_drain_all(void)
{
    /* Always run first iteration so any pending completion BHs run */
    bool busy = true;
    BlockDriverState *bs;
    bool busy;

    do {
        busy = qemu_aio_wait();

    while (busy) {
        /* FIXME: We do not have timer support here, so this is effectively
         * a busy wait.
         */
@@ -1456,12 +1484,9 @@ void bdrv_drain_all(void)
                busy = true;
            }
        }
    } while (busy);

    /* If requests are still pending there is a bug somewhere */
    QTAILQ_FOREACH(bs, &bdrv_states, list) {
        assert(QLIST_EMPTY(&bs->tracked_requests));
        assert(qemu_co_queue_empty(&bs->throttled_reqs));
        busy = bdrv_requests_pending_all();
        busy |= aio_poll(qemu_get_aio_context(), busy);
    }
}

@@ -1606,11 +1631,11 @@ void bdrv_delete(BlockDriverState *bs)
    assert(!bs->job);
    assert(!bs->in_use);

    bdrv_close(bs);

    /* remove from list, if necessary */
    bdrv_make_anon(bs);

    bdrv_close(bs);

    g_free(bs);
}

+4 −21
Original line number Diff line number Diff line
@@ -86,7 +86,6 @@ typedef struct BDRVCURLState {

static void curl_clean_state(CURLState *s);
static void curl_multi_do(void *arg);
static int curl_aio_flush(void *opaque);

static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
                        void *s, void *sp)
@@ -94,17 +93,16 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
    DPRINTF("CURL (AIO): Sock action %d on fd %d\n", action, fd);
    switch (action) {
        case CURL_POLL_IN:
            qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, curl_aio_flush, s);
            qemu_aio_set_fd_handler(fd, curl_multi_do, NULL, s);
            break;
        case CURL_POLL_OUT:
            qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, curl_aio_flush, s);
            qemu_aio_set_fd_handler(fd, NULL, curl_multi_do, s);
            break;
        case CURL_POLL_INOUT:
            qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do,
                                    curl_aio_flush, s);
            qemu_aio_set_fd_handler(fd, curl_multi_do, curl_multi_do, s);
            break;
        case CURL_POLL_REMOVE:
            qemu_aio_set_fd_handler(fd, NULL, NULL, NULL, NULL);
            qemu_aio_set_fd_handler(fd, NULL, NULL, NULL);
            break;
    }

@@ -495,21 +493,6 @@ out_noclean:
    return -EINVAL;
}

static int curl_aio_flush(void *opaque)
{
    BDRVCURLState *s = opaque;
    int i, j;

    for (i=0; i < CURL_NUM_STATES; i++) {
        for(j=0; j < CURL_NUM_ACB; j++) {
            if (s->states[i].acb[j]) {
                return 1;
            }
        }
    }
    return 0;
}

static void curl_aio_cancel(BlockDriverAIOCB *blockacb)
{
    // Do we have to implement canceling? Seems to work without...
Loading