Commit 94bf2658 authored by Richard Henderson's avatar Richard Henderson Committed by David Gibson
Browse files

target/ppc: Use atomic load for LQ and LQARX



Section 1.4 of the Power ISA v3.0B states that both of these
instructions are single-copy atomic.  As we cannot (yet) issue
128-bit loads within TCG, use the generic helpers provided.

Since TCG cannot (yet) return a 128-bit value, add a slot within
CPUPPCState for returning the high half of a 128-bit return value.
This solution is preferred to the helper assigning to architectural
registers directly, as it avoids clobbering all TCG live values.

Signed-off-by: default avatarRichard Henderson <richard.henderson@linaro.org>
Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
parent 0f3110fa
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -1015,6 +1015,9 @@ struct CPUPPCState {
    /* Next instruction pointer */
    target_ulong nip;

    /* High part of 128-bit helper return.  */
    uint64_t retxh;

    int access_type; /* when a memory exception occurs, the access
                        type is stored here */

+5 −0
Original line number Diff line number Diff line
@@ -799,3 +799,8 @@ DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32)

DEF_HELPER_1(tbegin, void, env)
DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env)

#if defined(TARGET_PPC64) && defined(CONFIG_ATOMIC128)
DEF_HELPER_FLAGS_3(lq_le_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
DEF_HELPER_FLAGS_3(lq_be_parallel, TCG_CALL_NO_WG, i64, env, tl, i32)
#endif
+19 −1
Original line number Diff line number Diff line
@@ -21,9 +21,9 @@
#include "exec/exec-all.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"

#include "helper_regs.h"
#include "exec/cpu_ldst.h"
#include "tcg.h"
#include "internal.h"

//#define DEBUG_OP
@@ -215,6 +215,24 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
    return i;
}

#if defined(TARGET_PPC64) && defined(CONFIG_ATOMIC128)
uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr,
                               uint32_t opidx)
{
    Int128 ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC());
    env->retxh = int128_gethi(ret);
    return int128_getlo(ret);
}

uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr,
                               uint32_t opidx)
{
    Int128 ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC());
    env->retxh = int128_gethi(ret);
    return int128_getlo(ret);
}
#endif

/*****************************************************************************/
/* Altivec extension helpers */
#if defined(HOST_WORDS_BIGENDIAN)
+67 −24
Original line number Diff line number Diff line
@@ -2607,7 +2607,7 @@ static void gen_ld(DisasContext *ctx)
static void gen_lq(DisasContext *ctx)
{
    int ra, rd;
    TCGv EA;
    TCGv EA, hi, lo;

    /* lq is a legal user mode instruction starting in ISA 2.07 */
    bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
@@ -2633,16 +2633,35 @@ static void gen_lq(DisasContext *ctx)
    EA = tcg_temp_new();
    gen_addr_imm_index(ctx, EA, 0x0F);

    /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
       necessary 64-bit byteswap already. */
    if (unlikely(ctx->le_mode)) {
        gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
    /* Note that the low part is always in RD+1, even in LE mode.  */
    lo = cpu_gpr[rd + 1];
    hi = cpu_gpr[rd];

    if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
#ifdef CONFIG_ATOMIC128
        TCGv_i32 oi = tcg_temp_new_i32();
        if (ctx->le_mode) {
            tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
            gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
        } else {
            tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
            gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
        }
        tcg_temp_free_i32(oi);
        tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
#else
        /* Restart with exclusive lock.  */
        gen_helper_exit_atomic(cpu_env);
        ctx->base.is_jmp = DISAS_NORETURN;
#endif
    } else if (ctx->le_mode) {
        tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ);
        gen_addr_add(ctx, EA, EA, 8);
        gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
        tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
    } else {
        gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
        tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ);
        gen_addr_add(ctx, EA, EA, 8);
        gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
        tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
    }
    tcg_temp_free(EA);
}
@@ -3236,9 +3255,8 @@ STCX(stdcx_, DEF_MEMOP(MO_Q))
/* lqarx */
static void gen_lqarx(DisasContext *ctx)
{
    TCGv EA;
    int rd = rD(ctx->opcode);
    TCGv gpr1, gpr2;
    TCGv EA, hi, lo;

    if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
                 (rd == rB(ctx->opcode)))) {
@@ -3247,24 +3265,49 @@ static void gen_lqarx(DisasContext *ctx)
    }

    gen_set_access_type(ctx, ACCESS_RES);
    EA = tcg_temp_local_new();
    EA = tcg_temp_new();
    gen_addr_reg_index(ctx, EA);
    gen_check_align(ctx, EA, 15);
    if (unlikely(ctx->le_mode)) {
        gpr1 = cpu_gpr[rd+1];
        gpr2 = cpu_gpr[rd];

    /* Note that the low part is always in RD+1, even in LE mode.  */
    lo = cpu_gpr[rd + 1];
    hi = cpu_gpr[rd];

    if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
#ifdef CONFIG_ATOMIC128
        TCGv_i32 oi = tcg_temp_new_i32();
        if (ctx->le_mode) {
            tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16,
                                                ctx->mem_idx));
            gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
        } else {
        gpr1 = cpu_gpr[rd];
        gpr2 = cpu_gpr[rd+1];
            tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16,
                                                ctx->mem_idx));
            gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
        }
    tcg_gen_qemu_ld_i64(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
        tcg_temp_free_i32(oi);
        tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
#else
        /* Restart with exclusive lock.  */
        gen_helper_exit_atomic(cpu_env);
        ctx->base.is_jmp = DISAS_NORETURN;
        tcg_temp_free(EA);
        return;
#endif
    } else if (ctx->le_mode) {
        tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16);
        tcg_gen_mov_tl(cpu_reserve, EA);
        gen_addr_add(ctx, EA, EA, 8);
    tcg_gen_qemu_ld_i64(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));

    tcg_gen_st_tl(gpr1, cpu_env, offsetof(CPUPPCState, reserve_val));
    tcg_gen_st_tl(gpr2, cpu_env, offsetof(CPUPPCState, reserve_val2));
        tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
    } else {
        tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ | MO_ALIGN_16);
        tcg_gen_mov_tl(cpu_reserve, EA);
        gen_addr_add(ctx, EA, EA, 8);
        tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
    }
    tcg_temp_free(EA);

    tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
    tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
}

/* stqcx. */