Commit 3b170dc8 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Fam Zheng
Browse files

throttle-groups: do not use qemu_co_enter_next



Prepare for removing this function; always restart throttled requests
from coroutine context.  This will matter when restarting throttled
requests will have to acquire a CoMutex.

Reviewed-by: default avatarAlberto Garcia <berto@igalia.com>
Reviewed-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20170605123908.18777-9-pbonzini@redhat.com>
Signed-off-by: default avatarFam Zheng <famz@redhat.com>
parent 7258ed93
Loading
Loading
Loading
Loading
+37 −5
Original line number Diff line number Diff line
@@ -260,6 +260,20 @@ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
    return must_wait;
}

/* Start the next pending I/O request for a BlockBackend.  Return whether
 * any request was actually pending.
 *
 * @blk:       the current BlockBackend
 * @is_write:  the type of operation (read/write)
 */
static bool coroutine_fn throttle_group_co_restart_queue(BlockBackend *blk,
                                                         bool is_write)
{
    BlockBackendPublic *blkp = blk_get_public(blk);

    return qemu_co_queue_next(&blkp->throttled_reqs[is_write]);
}

/* Look for the next pending I/O request and schedule it.
 *
 * This assumes that tg->lock is held.
@@ -287,7 +301,7 @@ static void schedule_next_request(BlockBackend *blk, bool is_write)
    if (!must_wait) {
        /* Give preference to requests from the current blk */
        if (qemu_in_coroutine() &&
            qemu_co_queue_next(&blkp->throttled_reqs[is_write])) {
            throttle_group_co_restart_queue(blk, is_write)) {
            token = blk;
        } else {
            ThrottleTimers *tt = &blk_get_public(token)->throttle_timers;
@@ -340,15 +354,21 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
    qemu_mutex_unlock(&tg->lock);
}

static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
typedef struct {
    BlockBackend *blk;
    bool is_write;
} RestartData;

static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
{
    RestartData *data = opaque;
    BlockBackend *blk = data->blk;
    bool is_write = data->is_write;
    BlockBackendPublic *blkp = blk_get_public(blk);
    ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
    bool empty_queue;

    aio_context_acquire(blk_get_aio_context(blk));
    empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]);
    aio_context_release(blk_get_aio_context(blk));
    empty_queue = !throttle_group_co_restart_queue(blk, is_write);

    /* If the request queue was empty then we have to take care of
     * scheduling the next one */
@@ -359,6 +379,18 @@ static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
    }
}

static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
{
    Coroutine *co;
    RestartData rd = {
        .blk = blk,
        .is_write = is_write
    };

    co = qemu_coroutine_create(throttle_group_restart_queue_entry, &rd);
    aio_co_enter(blk_get_aio_context(blk), co);
}

void throttle_group_restart_blk(BlockBackend *blk)
{
    BlockBackendPublic *blkp = blk_get_public(blk);