Commit 2399d4e7 authored by Emilio G. Cota's avatar Emilio G. Cota Committed by Richard Henderson
Browse files

target/arm: check CF_PARALLEL instead of parallel_cpus



Thereby decoupling the resulting translated code from the current state
of the system.

Reviewed-by: default avatarRichard Henderson <rth@twiddle.net>
Signed-off-by: default avatarEmilio G. Cota <cota@braap.org>
Signed-off-by: default avatarRichard Henderson <richard.henderson@linaro.org>
parent c5a49c63
Loading
Loading
Loading
Loading
+32 −6
Original line number Diff line number Diff line
@@ -430,8 +430,9 @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
}

/* Returns 0 on success; 1 otherwise.  */
uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
                                     uint64_t new_lo, uint64_t new_hi)
static uint64_t do_paired_cmpxchg64_le(CPUARMState *env, uint64_t addr,
                                       uint64_t new_lo, uint64_t new_hi,
                                       bool parallel)
{
    uintptr_t ra = GETPC();
    Int128 oldv, cmpv, newv;
@@ -440,7 +441,7 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
    cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
    newv = int128_make128(new_lo, new_hi);

    if (parallel_cpus) {
    if (parallel) {
#ifndef CONFIG_ATOMIC128
        cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else
@@ -484,8 +485,21 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
    return !success;
}

uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
                                              uint64_t new_lo, uint64_t new_hi)
{
    return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, false);
}

uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
                                              uint64_t new_lo, uint64_t new_hi)
{
    return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, true);
}

static uint64_t do_paired_cmpxchg64_be(CPUARMState *env, uint64_t addr,
                                       uint64_t new_lo, uint64_t new_hi,
                                       bool parallel)
{
    uintptr_t ra = GETPC();
    Int128 oldv, cmpv, newv;
@@ -494,7 +508,7 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
    cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
    newv = int128_make128(new_lo, new_hi);

    if (parallel_cpus) {
    if (parallel) {
#ifndef CONFIG_ATOMIC128
        cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else
@@ -537,3 +551,15 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,

    return !success;
}

uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
                                     uint64_t new_lo, uint64_t new_hi)
{
    return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, false);
}

uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
                                     uint64_t new_lo, uint64_t new_hi)
{
    return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, true);
}
+4 −0
Original line number Diff line number Diff line
@@ -43,4 +43,8 @@ DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env)
DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
DEF_HELPER_FLAGS_4(paired_cmpxchg64_le, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(paired_cmpxchg64_le_parallel, TCG_CALL_NO_WG,
                   i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(paired_cmpxchg64_be, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(paired_cmpxchg64_be_parallel, TCG_CALL_NO_WG,
                   i64, env, i64, i64, i64)
+0 −7
Original line number Diff line number Diff line
@@ -502,13 +502,6 @@ void HELPER(yield)(CPUARMState *env)
    ARMCPU *cpu = arm_env_get_cpu(env);
    CPUState *cs = CPU(cpu);

    /* When running in MTTCG we don't generate jumps to the yield and
     * WFE helpers as it won't affect the scheduling of other vCPUs.
     * If we wanted to more completely model WFE/SEV so we don't busy
     * spin unnecessarily we would need to do something more involved.
     */
    g_assert(!parallel_cpus);

    /* This is a non-trappable hint instruction that generally indicates
     * that the guest is currently busy-looping. Yield control back to the
     * top level loop so that a more deserving VCPU has a chance to run.
+25 −6
Original line number Diff line number Diff line
@@ -1336,13 +1336,18 @@ static void handle_hint(DisasContext *s, uint32_t insn,
    case 3: /* WFI */
        s->base.is_jmp = DISAS_WFI;
        return;
        /* When running in MTTCG we don't generate jumps to the yield and
         * WFE helpers as it won't affect the scheduling of other vCPUs.
         * If we wanted to more completely model WFE/SEV so we don't busy
         * spin unnecessarily we would need to do something more involved.
         */
    case 1: /* YIELD */
        if (!parallel_cpus) {
        if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
            s->base.is_jmp = DISAS_YIELD;
        }
        return;
    case 2: /* WFE */
        if (!parallel_cpus) {
        if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
            s->base.is_jmp = DISAS_WFE;
        }
        return;
@@ -1931,12 +1936,26 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
                                       MO_64 | MO_ALIGN | s->be_data);
            tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
        } else if (s->be_data == MO_LE) {
            if (tb_cflags(s->base.tb) & CF_PARALLEL) {
                gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env,
                                                        cpu_exclusive_addr,
                                                        cpu_reg(s, rt),
                                                        cpu_reg(s, rt2));
            } else {
                gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr,
                                               cpu_reg(s, rt), cpu_reg(s, rt2));
            }
        } else {
            if (tb_cflags(s->base.tb) & CF_PARALLEL) {
                gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env,
                                                        cpu_exclusive_addr,
                                                        cpu_reg(s, rt),
                                                        cpu_reg(s, rt2));
            } else {
                gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr,
                                               cpu_reg(s, rt), cpu_reg(s, rt2));
            }
        }
    } else {
        tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val,
                                   cpu_reg(s, rt), get_mem_index(s),
+7 −2
Original line number Diff line number Diff line
@@ -4546,8 +4546,13 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
static void gen_nop_hint(DisasContext *s, int val)
{
    switch (val) {
        /* When running in MTTCG we don't generate jumps to the yield and
         * WFE helpers as it won't affect the scheduling of other vCPUs.
         * If we wanted to more completely model WFE/SEV so we don't busy
         * spin unnecessarily we would need to do something more involved.
         */
    case 1: /* yield */
        if (!parallel_cpus) {
        if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
            gen_set_pc_im(s, s->pc);
            s->base.is_jmp = DISAS_YIELD;
        }
@@ -4557,7 +4562,7 @@ static void gen_nop_hint(DisasContext *s, int val)
        s->base.is_jmp = DISAS_WFI;
        break;
    case 2: /* wfe */
        if (!parallel_cpus) {
        if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
            gen_set_pc_im(s, s->pc);
            s->base.is_jmp = DISAS_WFE;
        }