Commit b7542f7f authored by Emilio G. Cota's avatar Emilio G. Cota Committed by Richard Henderson
Browse files

cputlb: remove tb_lock from tlb_flush functions



The acquisition of tb_lock was added when the async tlb_flush
was introduced in e3b9ca81 ("cputlb: introduce tlb_flush_* async work.")

tb_lock was there to allow us to do memset() on the tb_jmp_cache's.
However, since f3ced3c5 ("tcg: consistently access cpu->tb_jmp_cache
atomically") all accesses to tb_jmp_cache are atomic, so tb_lock
is not needed here. Get rid of it.

Reviewed-by: default avatarAlex Bennée <alex.bennee@linaro.org>
Reviewed-by: default avatarRichard Henderson <richard.henderson@linaro.org>
Signed-off-by: default avatarEmilio G. Cota <cota@braap.org>
Signed-off-by: default avatarRichard Henderson <richard.henderson@linaro.org>
parent 194125e3
Loading
Loading
Loading
Loading
+0 −8
Original line number Diff line number Diff line
@@ -125,8 +125,6 @@ static void tlb_flush_nocheck(CPUState *cpu)
    atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
    tlb_debug("(count: %zu)\n", tlb_flush_count());

    tb_lock();

    memset(env->tlb_table, -1, sizeof(env->tlb_table));
    memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
    cpu_tb_jmp_cache_clear(cpu);
@@ -135,8 +133,6 @@ static void tlb_flush_nocheck(CPUState *cpu)
    env->tlb_flush_addr = -1;
    env->tlb_flush_mask = 0;

    tb_unlock();

    atomic_mb_set(&cpu->pending_tlb_flush, 0);
}

@@ -180,8 +176,6 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)

    assert_cpu_is_self(cpu);

    tb_lock();

    tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);

    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
@@ -197,8 +191,6 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
    cpu_tb_jmp_cache_clear(cpu);

    tlb_debug("done\n");

    tb_unlock();
}

void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)