Commit 7376eda7 authored by Stefan Hajnoczi's avatar Stefan Hajnoczi
Browse files

block: make BDRV_POLL_WHILE() re-entrancy safe



Nested BDRV_POLL_WHILE() calls can occur.  Currently
assert(!wait_->wakeup) fails in AIO_WAIT_WHILE() when this happens.

This patch converts the bool wait_->need_kick flag to an unsigned
wait_->num_waiters counter.

Nesting works correctly because outer AIO_WAIT_WHILE() callers evaluate
the condition again after the inner caller completes (invoking the inner
caller counts as aio_poll() progress).

Reported-by: default avatar"fuweiwei (C)" <fuweiwei2@huawei.com>
Reviewed-by: default avatarEric Blake <eblake@redhat.com>
Signed-off-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
Message-id: 20180307124619.6218-1-stefanha@redhat.com
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
parent e4ae62b8
Loading
Loading
Loading
Loading
+30 −31
Original line number Diff line number Diff line
@@ -50,8 +50,8 @@
 *   }
 */
typedef struct {
    /* Is the main loop waiting for a kick?  Accessed with atomic ops. */
    bool need_kick;
    /* Number of waiting AIO_WAIT_WHILE() callers. Accessed with atomic ops. */
    unsigned num_waiters;
} AioWait;

/**
@@ -84,9 +84,8 @@ typedef struct {
    } else {                                                       \
        assert(qemu_get_current_aio_context() ==                   \
               qemu_get_aio_context());                            \
        assert(!wait_->need_kick);                          \
        /* Set wait_->need_kick before evaluating cond.  */ \
        atomic_mb_set(&wait_->need_kick, true);             \
        /* Increment wait_->num_waiters before evaluating cond. */ \
        atomic_inc(&wait_->num_waiters);                           \
        while (busy_) {                                            \
            if ((cond)) {                                          \
                waited_ = busy_ = true;                            \
@@ -98,7 +97,7 @@ typedef struct {
                waited_ |= busy_;                                  \
            }                                                      \
        }                                                          \
        atomic_set(&wait_->need_kick, false);               \
        atomic_dec(&wait_->num_waiters);                           \
    }                                                              \
    waited_; })

+1 −1
Original line number Diff line number Diff line
@@ -34,7 +34,7 @@ static void dummy_bh_cb(void *opaque)
void aio_wait_kick(AioWait *wait)
{
    /* The barrier (or an atomic op) is in the caller.  */
    if (atomic_read(&wait->need_kick)) {
    if (atomic_read(&wait->num_waiters)) {
        aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
    }
}