Commit 667221c1 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Fam Zheng
Browse files

coroutine-lock: add qemu_co_rwlock_downgrade and qemu_co_rwlock_upgrade



These functions are more efficient in the presence of contention.
qemu_co_rwlock_downgrade also guarantees not to block, which may
be useful in some algorithms too.

Reviewed-by: default avatarEric Blake <eblake@redhat.com>
Reviewed-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: default avatarFam Zheng <famz@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20170629132749.997-3-pbonzini@redhat.com>
Signed-off-by: default avatarFam Zheng <famz@redhat.com>
parent a8c57408
Loading
Loading
Loading
Loading
+18 −0
Original line number Diff line number Diff line
@@ -228,6 +228,24 @@ void qemu_co_rwlock_init(CoRwlock *lock);
 */
void qemu_co_rwlock_rdlock(CoRwlock *lock);

/**
 * Write Locks the CoRwlock from a reader.  This is a bit more efficient than
 * @qemu_co_rwlock_unlock followed by a separate @qemu_co_rwlock_wrlock.
 * However, if the lock cannot be upgraded immediately, control is transferred
 * to the caller of the current coroutine.  Also, @qemu_co_rwlock_upgrade
 * only overrides CoRwlock fairness if there are no concurrent readers, so
 * another writer might run while @qemu_co_rwlock_upgrade blocks.
 */
void qemu_co_rwlock_upgrade(CoRwlock *lock);

/**
 * Downgrades a write-side critical section to a reader.  Downgrading with
 * @qemu_co_rwlock_downgrade never blocks, unlike @qemu_co_rwlock_unlock
 * followed by @qemu_co_rwlock_rdlock.  This makes it more efficient, but
 * may also sometimes be necessary for correctness.
 */
void qemu_co_rwlock_downgrade(CoRwlock *lock);

/**
 * Write Locks the mutex. If the lock cannot be taken immediately because
 * of a parallel reader, control is transferred to the caller of the current
+35 −0
Original line number Diff line number Diff line
@@ -402,6 +402,21 @@ void qemu_co_rwlock_unlock(CoRwlock *lock)
    qemu_co_mutex_unlock(&lock->mutex);
}

void qemu_co_rwlock_downgrade(CoRwlock *lock)
{
    Coroutine *self = qemu_coroutine_self();

    /* lock->mutex critical section started in qemu_co_rwlock_wrlock or
     * qemu_co_rwlock_upgrade.
     */
    assert(lock->reader == 0);
    lock->reader++;
    qemu_co_mutex_unlock(&lock->mutex);

    /* The rest of the read-side critical section is run without the mutex.  */
    self->locks_held++;
}

void qemu_co_rwlock_wrlock(CoRwlock *lock)
{
    qemu_co_mutex_lock(&lock->mutex);
@@ -416,3 +431,23 @@ void qemu_co_rwlock_wrlock(CoRwlock *lock)
     * There is no need to update self->locks_held.
     */
}

void qemu_co_rwlock_upgrade(CoRwlock *lock)
{
    Coroutine *self = qemu_coroutine_self();

    qemu_co_mutex_lock(&lock->mutex);
    assert(lock->reader > 0);
    lock->reader--;
    lock->pending_writer++;
    while (lock->reader) {
        qemu_co_queue_wait(&lock->queue, &lock->mutex);
    }
    lock->pending_writer--;

    /* The rest of the write-side critical section is run with
     * the mutex taken, similar to qemu_co_rwlock_wrlock.  Do
     * not account for the lock twice in self->locks_held.
     */
    self->locks_held--;
}