Commit f5b4c310 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging



Pull request

# gpg: Signature made Fri 08 Mar 2019 16:53:34 GMT
# gpg:                using RSA key 9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full]
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>" [full]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha/tags/block-pull-request:
  iothread: document about why we need explicit aio_poll()
  iothread: push gcontext earlier in the thread_fn
  iothread: create main loop unconditionally
  iothread: create the gcontext unconditionally
  iothread: replace init_done_cond with a semaphore
  hw/block/virtio-blk: Clean req->dev repetitions
  MAINTAINERS: add missing support status fields

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 4c761374 6ca20620
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -338,6 +338,7 @@ F: include/hw/tricore/

Multiarch Linux User Tests
M: Alex Bennée <alex.bennee@linaro.org>
S: Maintained
F: tests/tcg/multiarch/

Guest CPU Cores (KVM):
@@ -2094,6 +2095,7 @@ F: qemu.sasl
Coroutines
M: Stefan Hajnoczi <stefanha@redhat.com>
M: Kevin Wolf <kwolf@redhat.com>
S: Maintained
F: util/*coroutine*
F: include/qemu/coroutine*
F: tests/test-coroutine.c
@@ -2540,6 +2542,7 @@ F: .gitlab-ci.yml
Guest Test Compilation Support
M: Alex Bennée <alex.bennee@linaro.org>
R: Philippe Mathieu-Daudé <f4bug@amsat.org>
S: Maintained
F: tests/tcg/Makefile
F: tests/tcg/Makefile.include
L: qemu-devel@nongnu.org
+9 −7
Original line number Diff line number Diff line
@@ -127,7 +127,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
        }

        if (ret) {
            int p = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
            int p = virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type);
            bool is_read = !(p & VIRTIO_BLK_T_OUT);
            /* Note that memory may be dirtied on read failure.  If the
             * virtio request is not completed here, as is the case for
@@ -143,7 +143,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
        }

        virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
        block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
        block_acct_done(blk_get_stats(s->blk), &req->acct);
        virtio_blk_free_request(req);
    }
    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
@@ -260,9 +260,9 @@ static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
{
    int status = VIRTIO_BLK_S_OK;
    struct virtio_scsi_inhdr *scsi = NULL;
    VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
    VirtQueueElement *elem = &req->elem;
    VirtIOBlock *blk = req->dev;
    VirtIODevice *vdev = VIRTIO_DEVICE(blk);
    VirtQueueElement *elem = &req->elem;

#ifdef __linux__
    int i;
@@ -492,16 +492,18 @@ static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)

static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
    block_acct_start(blk_get_stats(req->dev->blk), &req->acct, 0,
    VirtIOBlock *s = req->dev;

    block_acct_start(blk_get_stats(s->blk), &req->acct, 0,
                     BLOCK_ACCT_FLUSH);

    /*
     * Make sure all outstanding writes are posted to the backing device.
     */
    if (mrb->is_write && mrb->num_reqs > 0) {
        virtio_blk_submit_multireq(req->dev->blk, mrb);
        virtio_blk_submit_multireq(s->blk, mrb);
    }
    blk_aio_flush(req->dev->blk, virtio_blk_flush_complete, req);
    blk_aio_flush(s->blk, virtio_blk_flush_complete, req);
}

static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
+2 −3
Original line number Diff line number Diff line
@@ -24,11 +24,10 @@ typedef struct {

    QemuThread thread;
    AioContext *ctx;
    bool run_gcontext;          /* whether we should run gcontext */
    GMainContext *worker_context;
    GMainLoop *main_loop;
    GOnce once;
    QemuMutex init_done_lock;
    QemuCond init_done_cond;    /* is thread initialization done? */
    QemuSemaphore init_done_sem; /* is thread init done? */
    bool stopping;              /* has iothread_stop() been called? */
    bool running;               /* should iothread_run() continue? */
    int thread_id;
+43 −47
Original line number Diff line number Diff line
@@ -53,36 +53,37 @@ static void *iothread_run(void *opaque)
    IOThread *iothread = opaque;

    rcu_register_thread();

    /*
     * g_main_context_push_thread_default() must be called before anything
     * in this new thread uses glib.
     */
    g_main_context_push_thread_default(iothread->worker_context);
    my_iothread = iothread;
    qemu_mutex_lock(&iothread->init_done_lock);
    iothread->thread_id = qemu_get_thread_id();
    qemu_cond_signal(&iothread->init_done_cond);
    qemu_mutex_unlock(&iothread->init_done_lock);
    qemu_sem_post(&iothread->init_done_sem);

    while (iothread->running) {
        /*
         * Note: from functional-wise the g_main_loop_run() below can
         * already cover the aio_poll() events, but we can't run the
         * main loop unconditionally because explicit aio_poll() here
         * is faster than g_main_loop_run() when we do not need the
         * gcontext at all (e.g., pure block layer iothreads).  In
         * other words, when we want to run the gcontext with the
         * iothread we need to pay some performance for functionality.
         */
        aio_poll(iothread->ctx, true);

        /*
         * We must check the running state again in case it was
         * changed in previous aio_poll()
         */
        if (iothread->running && atomic_read(&iothread->worker_context)) {
            GMainLoop *loop;

            g_main_context_push_thread_default(iothread->worker_context);
            iothread->main_loop =
                g_main_loop_new(iothread->worker_context, TRUE);
            loop = iothread->main_loop;

        if (iothread->running && atomic_read(&iothread->run_gcontext)) {
            g_main_loop_run(iothread->main_loop);
            iothread->main_loop = NULL;
            g_main_loop_unref(loop);

            g_main_context_pop_thread_default(iothread->worker_context);
        }
    }

    g_main_context_pop_thread_default(iothread->worker_context);
    rcu_unregister_thread();
    return NULL;
}
@@ -115,6 +116,9 @@ static void iothread_instance_init(Object *obj)

    iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT;
    iothread->thread_id = -1;
    qemu_sem_init(&iothread->init_done_sem, 0);
    /* By default, we don't run gcontext */
    atomic_set(&iothread->run_gcontext, 0);
}

static void iothread_instance_finalize(Object *obj)
@@ -123,10 +127,6 @@ static void iothread_instance_finalize(Object *obj)

    iothread_stop(iothread);

    if (iothread->thread_id != -1) {
        qemu_cond_destroy(&iothread->init_done_cond);
        qemu_mutex_destroy(&iothread->init_done_lock);
    }
    /*
     * Before glib2 2.33.10, there is a glib2 bug that GSource context
     * pointer may not be cleared even if the context has already been
@@ -144,7 +144,21 @@ static void iothread_instance_finalize(Object *obj)
    if (iothread->worker_context) {
        g_main_context_unref(iothread->worker_context);
        iothread->worker_context = NULL;
        g_main_loop_unref(iothread->main_loop);
        iothread->main_loop = NULL;
    }
    qemu_sem_destroy(&iothread->init_done_sem);
}

static void iothread_init_gcontext(IOThread *iothread)
{
    GSource *source;

    iothread->worker_context = g_main_context_new();
    source = aio_get_g_source(iothread_get_aio_context(iothread));
    g_source_attach(source, iothread->worker_context);
    g_source_unref(source);
    iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE);
}

static void iothread_complete(UserCreatable *obj, Error **errp)
@@ -161,6 +175,12 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
        return;
    }

    /*
     * Init one GMainContext for the iothread unconditionally, even if
     * it's not used
     */
    iothread_init_gcontext(iothread);

    aio_context_set_poll_params(iothread->ctx,
                                iothread->poll_max_ns,
                                iothread->poll_grow,
@@ -173,10 +193,6 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
        return;
    }

    qemu_mutex_init(&iothread->init_done_lock);
    qemu_cond_init(&iothread->init_done_cond);
    iothread->once = (GOnce) G_ONCE_INIT;

    /* This assumes we are called from a thread with useful CPU affinity for us
     * to inherit.
     */
@@ -188,12 +204,9 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
    g_free(name);

    /* Wait for initialization to complete */
    qemu_mutex_lock(&iothread->init_done_lock);
    while (iothread->thread_id == -1) {
        qemu_cond_wait(&iothread->init_done_cond,
                       &iothread->init_done_lock);
        qemu_sem_wait(&iothread->init_done_sem);
    }
    qemu_mutex_unlock(&iothread->init_done_lock);
}

typedef struct {
@@ -342,27 +355,10 @@ IOThreadInfoList *qmp_query_iothreads(Error **errp)
    return head;
}

static gpointer iothread_g_main_context_init(gpointer opaque)
{
    AioContext *ctx;
    IOThread *iothread = opaque;
    GSource *source;

    iothread->worker_context = g_main_context_new();

    ctx = iothread_get_aio_context(iothread);
    source = aio_get_g_source(ctx);
    g_source_attach(source, iothread->worker_context);
    g_source_unref(source);

    aio_notify(iothread->ctx);
    return NULL;
}

GMainContext *iothread_get_g_main_context(IOThread *iothread)
{
    g_once(&iothread->once, iothread_g_main_context_init, iothread);

    atomic_set(&iothread->run_gcontext, 1);
    aio_notify(iothread->ctx);
    return iothread->worker_context;
}