Commit fd479c60 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20170603' into staging



Queued TCG patches

# gpg: Signature made Fri 30 Jun 2017 20:03:53 BST
# gpg:                using RSA key 0xAD1270CC4DD0279B
# gpg: Good signature from "Richard Henderson <rth7680@gmail.com>"
# gpg:                 aka "Richard Henderson <rth@redhat.com>"
# gpg:                 aka "Richard Henderson <rth@twiddle.net>"
# Primary key fingerprint: 9CB1 8DDA F8E8 49AD 2AFC  16A4 AD12 70CC 4DD0 279B

* remotes/rth/tags/pull-tcg-20170603:
  tcg: consistently access cpu->tb_jmp_cache atomically
  gen-icount: use tcg_ctx.tcg_env instead of cpu_env
  gen-icount: add missing inline to gen_tb_end

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 82d76dc7 f3ced3c5
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -118,7 +118,7 @@ static void tlb_flush_nocheck(CPUState *cpu)

    memset(env->tlb_table, -1, sizeof(env->tlb_table));
    memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
    cpu_tb_jmp_cache_clear(cpu);

    env->vtlb_index = 0;
    env->tlb_flush_addr = -1;
@@ -183,7 +183,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
        }
    }

    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
    cpu_tb_jmp_cache_clear(cpu);

    tlb_debug("done\n");

+12 −14
Original line number Diff line number Diff line
@@ -928,11 +928,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
    }

    CPU_FOREACH(cpu) {
        int i;

        for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
            atomic_set(&cpu->tb_jmp_cache[i], NULL);
        }
        cpu_tb_jmp_cache_clear(cpu);
    }

    tcg_ctx.tb_ctx.nb_tbs = 0;
@@ -1813,19 +1809,21 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
    cpu_loop_exit_noexc(cpu);
}

void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
{
    unsigned int i;
    unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);

    for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
        atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
    }
}

void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
{
    /* Discard jump cache entries for any tb which might potentially
       overlap the flushed page.  */
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
    memset(&cpu->tb_jmp_cache[i], 0,
           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));

    i = tb_jmp_cache_hash_page(addr);
    memset(&cpu->tb_jmp_cache[i], 0,
           TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
    tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
    tb_jmp_cache_clear_page(cpu, addr);
}

static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
+7 −5
Original line number Diff line number Diff line
@@ -19,7 +19,7 @@ static inline void gen_tb_start(TranslationBlock *tb)
        count = tcg_temp_new_i32();
    }

    tcg_gen_ld_i32(count, cpu_env,
    tcg_gen_ld_i32(count, tcg_ctx.tcg_env,
                   -ENV_OFFSET + offsetof(CPUState, icount_decr.u32));

    if (tb->cflags & CF_USE_ICOUNT) {
@@ -37,14 +37,14 @@ static inline void gen_tb_start(TranslationBlock *tb)
    tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, exitreq_label);

    if (tb->cflags & CF_USE_ICOUNT) {
        tcg_gen_st16_i32(count, cpu_env,
        tcg_gen_st16_i32(count, tcg_ctx.tcg_env,
                         -ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
    }

    tcg_temp_free_i32(count);
}

static void gen_tb_end(TranslationBlock *tb, int num_insns)
static inline void gen_tb_end(TranslationBlock *tb, int num_insns)
{
    if (tb->cflags & CF_USE_ICOUNT) {
        /* Update the num_insn immediate parameter now that we know
@@ -62,14 +62,16 @@ static void gen_tb_end(TranslationBlock *tb, int num_insns)
static inline void gen_io_start(void)
{
    TCGv_i32 tmp = tcg_const_i32(1);
    tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
    tcg_gen_st_i32(tmp, tcg_ctx.tcg_env,
                   -ENV_OFFSET + offsetof(CPUState, can_do_io));
    tcg_temp_free_i32(tmp);
}

static inline void gen_io_end(void)
{
    TCGv_i32 tmp = tcg_const_i32(0);
    tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
    tcg_gen_st_i32(tmp, tcg_ctx.tcg_env,
                   -ENV_OFFSET + offsetof(CPUState, can_do_io));
    tcg_temp_free_i32(tmp);
}

+10 −1
Original line number Diff line number Diff line
@@ -346,7 +346,7 @@ struct CPUState {

    void *env_ptr; /* CPUArchState */

    /* Writes protected by tb_lock, reads not thread-safe  */
    /* Accessed in parallel; all accesses must be atomic */
    struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];

    struct GDBRegisterState *gdb_regs;
@@ -422,6 +422,15 @@ extern struct CPUTailQ cpus;

extern __thread CPUState *current_cpu;

static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
{
    unsigned int i;

    for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
        atomic_set(&cpu->tb_jmp_cache[i], NULL);
    }
}

/**
 * qemu_tcg_mttcg_enabled:
 * Check whether we are running MultiThread TCG or not.
+1 −4
Original line number Diff line number Diff line
@@ -274,7 +274,6 @@ void cpu_reset(CPUState *cpu)
static void cpu_common_reset(CPUState *cpu)
{
    CPUClass *cc = CPU_GET_CLASS(cpu);
    int i;

    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
        qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
@@ -292,9 +291,7 @@ static void cpu_common_reset(CPUState *cpu)
    cpu->crash_occurred = false;

    if (tcg_enabled()) {
        for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
            atomic_set(&cpu->tb_jmp_cache[i], NULL);
        }
        cpu_tb_jmp_cache_clear(cpu);

#ifdef CONFIG_SOFTMMU
        tlb_flush(cpu, 0);