Commit 77a8b846 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

rcu: make memory barriers more explicit



Prepare for introducing smp_mb_placeholder() and smp_mb_global().
The new smp_mb() in synchronize_rcu() is not strictly necessary, since
the first atomic_mb_set for rcu_gp_ctr provides the required ordering.
However, synchronize_rcu is not performance critical, and it *will* be
necessary to introduce a smp_mb_global before calling wait_for_readers().

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 729c0ddd
Loading
Loading
Loading
Loading
+13 −2
Original line number Diff line number Diff line
@@ -79,7 +79,10 @@ static inline void rcu_read_lock(void)
    }

    ctr = atomic_read(&rcu_gp_ctr);
    atomic_xchg(&p_rcu_reader->ctr, ctr);
    atomic_set(&p_rcu_reader->ctr, ctr);

    /* Write p_rcu_reader->ctr before reading RCU-protected pointers.  */
    smp_mb();
}

static inline void rcu_read_unlock(void)
@@ -91,7 +94,15 @@ static inline void rcu_read_unlock(void)
        return;
    }

    atomic_xchg(&p_rcu_reader->ctr, 0);
    /* Ensure that the critical section is seen to precede the
     * store to p_rcu_reader->ctr.  Together with the following
     * smp_mb(), this ensures writes to p_rcu_reader->ctr
     * are sequentially consistent.
     */
    atomic_store_release(&p_rcu_reader->ctr, 0);

    /* Write p_rcu_reader->ctr before reading p_rcu_reader->waiting.  */
    smp_mb();
    if (unlikely(atomic_read(&p_rcu_reader->waiting))) {
        atomic_set(&p_rcu_reader->waiting, false);
        qemu_event_set(&rcu_gp_event);
+9 −3
Original line number Diff line number Diff line
@@ -92,8 +92,9 @@ static void wait_for_readers(void)
            atomic_set(&index->waiting, true);
        }

        /* Here, order the stores to index->waiting before the
         * loads of index->ctr.
        /* Here, order the stores to index->waiting before the loads of
         * index->ctr.  Pairs with smp_mb() in rcu_read_unlock(),
         * ensuring that the loads of index->ctr are sequentially consistent.
         */
        smp_mb();

@@ -142,8 +143,13 @@ static void wait_for_readers(void)
void synchronize_rcu(void)
{
    qemu_mutex_lock(&rcu_sync_lock);
    qemu_mutex_lock(&rcu_registry_lock);

    /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
     * Pairs with smp_mb() in rcu_read_lock().
     */
    smp_mb();

    qemu_mutex_lock(&rcu_registry_lock);
    if (!QLIST_EMPTY(&registry)) {
        /* In either case, the atomic_mb_set below blocks stores that free
         * old RCU-protected pointers.