Commit cd2b9b86 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20140512' into staging



tcg updates for 20140512

# gpg: Signature made Tue 13 May 2014 00:19:56 BST using RSA key ID 4DD0279B
# gpg: Can't check signature: public key not found

* remotes/rth/tags/pull-tcg-20140512: (26 commits)
  tcg: Remove unreachable code in tcg_out_op and op_defs
  tcg: Use tcg_target_available_regs in tcg_reg_alloc_mov
  tcg: Make call address a constant parameter
  tci: Create tcg_out_call
  tcg-mips: Split out tcg_out_call
  tcg-sparc: Create tcg_out_call
  tcg-ppc64: Rename tcg_out_calli to tcg_out_call
  tcg-ppc: Split out tcg_out_call
  tcg-s390: Rename tgen_calli to tcg_out_call
  tcg-i386: Rename tcg_out_calli to tcg_out_call
  tcg: Require TCG_TARGET_INSN_UNIT_SIZE
  tci: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-mips: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-ia64: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-s390: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-aarch64: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-arm: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-sparc: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-ppc: Define TCG_TARGET_INSN_UNIT_SIZE
  tcg-ppc64: Define TCG_TARGET_INSN_UNIT_SIZE
  ...

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents fccae322 96d0ee7f
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -145,7 +145,7 @@ struct TranslationBlock {
#define CF_COUNT_MASK  0x7fff
#define CF_LAST_IO     0x8000 /* Last insn may be an IO access.  */

    uint8_t *tc_ptr;    /* pointer to the translated code */
    void *tc_ptr;    /* pointer to the translated code */
    /* next matching tb for physical address. */
    struct TranslationBlock *phys_hash_next;
    /* first and second physical page containing code. The lower bit
@@ -229,7 +229,7 @@ void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
    /* patch the branch destination */
    *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
    stl_p((void*)jmp_addr, addr - (jmp_addr + 4));
    /* no need to flush icache explicitly */
}
#elif defined(__aarch64__)
+54 −87
Original line number Diff line number Diff line
@@ -63,40 +63,34 @@ static const int tcg_target_call_oarg_regs[1] = {
# endif
#endif

static inline void reloc_pc26(void *code_ptr, intptr_t target)
static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
{
    intptr_t offset = (target - (intptr_t)code_ptr) / 4;
    ptrdiff_t offset = target - code_ptr;
    assert(offset == sextract64(offset, 0, 26));
    /* read instruction, mask away previous PC_REL26 parameter contents,
       set the proper offset, then write back the instruction. */
    uint32_t insn = *(uint32_t *)code_ptr;
    insn = deposit32(insn, 0, 26, offset);
    *(uint32_t *)code_ptr = insn;
    *code_ptr = deposit32(*code_ptr, 0, 26, offset);
}

static inline void reloc_pc19(void *code_ptr, intptr_t target)
static inline void reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
{
    intptr_t offset = (target - (intptr_t)code_ptr) / 4;
    /* read instruction, mask away previous PC_REL19 parameter contents,
       set the proper offset, then write back the instruction. */
    uint32_t insn = *(uint32_t *)code_ptr;
    insn = deposit32(insn, 5, 19, offset);
    *(uint32_t *)code_ptr = insn;
    ptrdiff_t offset = target - code_ptr;
    assert(offset == sextract64(offset, 0, 19));
    *code_ptr = deposit32(*code_ptr, 5, 19, offset);
}

static inline void patch_reloc(uint8_t *code_ptr, int type,
static inline void patch_reloc(tcg_insn_unit *code_ptr, int type,
                               intptr_t value, intptr_t addend)
{
    value += addend;

    assert(addend == 0);
    switch (type) {
    case R_AARCH64_JUMP26:
    case R_AARCH64_CALL26:
        reloc_pc26(code_ptr, value);
        reloc_pc26(code_ptr, (tcg_insn_unit *)value);
        break;
    case R_AARCH64_CONDBR19:
        reloc_pc19(code_ptr, value);
        reloc_pc19(code_ptr, (tcg_insn_unit *)value);
        break;

    default:
        tcg_abort();
    }
@@ -794,15 +788,10 @@ static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
    }
}

static inline void tcg_out_goto(TCGContext *s, intptr_t target)
static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target)
{
    intptr_t offset = (target - (intptr_t)s->code_ptr) / 4;

    if (offset < -0x02000000 || offset >= 0x02000000) {
        /* out of 26bit range */
        tcg_abort();
    }

    ptrdiff_t offset = target - s->code_ptr;
    assert(offset == sextract64(offset, 0, 26));
    tcg_out_insn(s, 3206, B, offset);
}

@@ -828,29 +817,23 @@ static inline void tcg_out_callr(TCGContext *s, TCGReg reg)
    tcg_out_insn(s, 3207, BLR, reg);
}

static inline void tcg_out_call(TCGContext *s, intptr_t target)
static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
{
    intptr_t offset = (target - (intptr_t)s->code_ptr) / 4;

    if (offset < -0x02000000 || offset >= 0x02000000) { /* out of 26bit rng */
        tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, target);
        tcg_out_callr(s, TCG_REG_TMP);
    } else {
    ptrdiff_t offset = target - s->code_ptr;
    if (offset == sextract64(offset, 0, 26)) {
        tcg_out_insn(s, 3206, BL, offset);
    } else {
        tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
        tcg_out_callr(s, TCG_REG_TMP);
    }
}

void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
{
    intptr_t target = addr;
    intptr_t offset = (target - (intptr_t)jmp_addr) / 4;
    tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr;
    tcg_insn_unit *target = (tcg_insn_unit *)addr;

    if (offset < -0x02000000 || offset >= 0x02000000) {
        /* out of 26bit range */
        tcg_abort();
    }

    patch_reloc((uint8_t *)jmp_addr, R_AARCH64_JUMP26, target, 0);
    reloc_pc26(code_ptr, target);
    flush_icache_range(jmp_addr, jmp_addr + 4);
}

@@ -862,7 +845,7 @@ static inline void tcg_out_goto_label(TCGContext *s, int label_index)
        tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, label_index, 0);
        tcg_out_goto_noaddr(s);
    } else {
        tcg_out_goto(s, l->u.value);
        tcg_out_goto(s, l->u.value_ptr);
    }
}

@@ -884,9 +867,8 @@ static void tcg_out_brcond(TCGContext *s, TCGMemOp ext, TCGCond c, TCGArg a,
        tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, label, 0);
        offset = tcg_in32(s) >> 5;
    } else {
        offset = l->u.value - (uintptr_t)s->code_ptr;
        offset >>= 2;
        assert(offset >= -0x40000 && offset < 0x40000);
        offset = l->u.value_ptr - s->code_ptr;
        assert(offset == sextract64(offset, 0, 19));
    }

    if (need_cmp) {
@@ -982,7 +964,7 @@ static inline void tcg_out_addsub2(TCGContext *s, int ext, TCGReg rl,
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
 *                                     int mmu_idx, uintptr_t ra)
 */
static const void * const qemu_ld_helpers[16] = {
static void * const qemu_ld_helpers[16] = {
    [MO_UB]   = helper_ret_ldub_mmu,
    [MO_LEUW] = helper_le_lduw_mmu,
    [MO_LEUL] = helper_le_ldul_mmu,
@@ -995,7 +977,7 @@ static const void * const qemu_ld_helpers[16] = {
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
 *                                     uintxx_t val, int mmu_idx, uintptr_t ra)
 */
static const void * const qemu_st_helpers[16] = {
static void * const qemu_st_helpers[16] = {
    [MO_UB]   = helper_ret_stb_mmu,
    [MO_LEUW] = helper_le_stw_mmu,
    [MO_LEUL] = helper_le_stl_mmu,
@@ -1005,11 +987,11 @@ static const void * const qemu_st_helpers[16] = {
    [MO_BEQ]  = helper_be_stq_mmu,
};

static inline void tcg_out_adr(TCGContext *s, TCGReg rd, uintptr_t addr)
static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target)
{
    addr -= (uintptr_t)s->code_ptr;
    assert(addr == sextract64(addr, 0, 21));
    tcg_out_insn(s, 3406, ADR, rd, addr);
    ptrdiff_t offset = tcg_pcrel_diff(s, target);
    assert(offset == sextract64(offset, 0, 21));
    tcg_out_insn(s, 3406, ADR, rd, offset);
}

static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
@@ -1017,20 +999,20 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
    TCGMemOp opc = lb->opc;
    TCGMemOp size = opc & MO_SIZE;

    reloc_pc19(lb->label_ptr[0], (intptr_t)s->code_ptr);
    reloc_pc19(lb->label_ptr[0], s->code_ptr);

    tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0);
    tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index);
    tcg_out_adr(s, TCG_REG_X3, (intptr_t)lb->raddr);
    tcg_out_call(s, (intptr_t)qemu_ld_helpers[opc & ~MO_SIGN]);
    tcg_out_adr(s, TCG_REG_X3, lb->raddr);
    tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]);
    if (opc & MO_SIGN) {
        tcg_out_sxt(s, TCG_TYPE_I64, size, lb->datalo_reg, TCG_REG_X0);
    } else {
        tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0);
    }

    tcg_out_goto(s, (intptr_t)lb->raddr);
    tcg_out_goto(s, lb->raddr);
}

static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
@@ -1038,21 +1020,21 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
    TCGMemOp opc = lb->opc;
    TCGMemOp size = opc & MO_SIZE;

    reloc_pc19(lb->label_ptr[0], (intptr_t)s->code_ptr);
    reloc_pc19(lb->label_ptr[0], s->code_ptr);

    tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0);
    tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
    tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg);
    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index);
    tcg_out_adr(s, TCG_REG_X4, (intptr_t)lb->raddr);
    tcg_out_call(s, (intptr_t)qemu_st_helpers[opc]);
    tcg_out_goto(s, (intptr_t)lb->raddr);
    tcg_out_adr(s, TCG_REG_X4, lb->raddr);
    tcg_out_call(s, qemu_st_helpers[opc]);
    tcg_out_goto(s, lb->raddr);
}

static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
                                TCGReg data_reg, TCGReg addr_reg,
                                int mem_index,
                                uint8_t *raddr, uint8_t *label_ptr)
                                int mem_index, tcg_insn_unit *raddr,
                                tcg_insn_unit *label_ptr)
{
    TCGLabelQemuLdst *label = new_ldst_label(s);

@@ -1070,7 +1052,8 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
   the slow path. Generated code returns the host addend in X1,
   clobbers X0,X2,X3,TMP. */
static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits,
                             uint8_t **label_ptr, int mem_index, bool is_read)
                             tcg_insn_unit **label_ptr, int mem_index,
                             bool is_read)
{
    TCGReg base = TCG_AREG0;
    int tlb_offset = is_read ?
@@ -1218,7 +1201,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
{
#ifdef CONFIG_SOFTMMU
    TCGMemOp s_bits = memop & MO_SIZE;
    uint8_t *label_ptr;
    tcg_insn_unit *label_ptr;

    tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1);
    tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg, TCG_REG_X1);
@@ -1235,7 +1218,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
{
#ifdef CONFIG_SOFTMMU
    TCGMemOp s_bits = memop & MO_SIZE;
    uint8_t *label_ptr;
    tcg_insn_unit *label_ptr;

    tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0);
    tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1);
@@ -1247,7 +1230,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
#endif /* CONFIG_SOFTMMU */
}

static uint8_t *tb_ret_addr;
static tcg_insn_unit *tb_ret_addr;

static void tcg_out_op(TCGContext *s, TCGOpcode opc,
                       const TCGArg args[TCG_MAX_OP_ARGS],
@@ -1270,7 +1253,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
    switch (opc) {
    case INDEX_op_exit_tb:
        tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
        tcg_out_goto(s, (intptr_t)tb_ret_addr);
        tcg_out_goto(s, tb_ret_addr);
        break;

    case INDEX_op_goto_tb:
@@ -1278,19 +1261,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
#error "USE_DIRECT_JUMP required for aarch64"
#endif
        assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */
        s->tb_jmp_offset[a0] = s->code_ptr - s->code_buf;
        s->tb_jmp_offset[a0] = tcg_current_code_size(s);
        /* actual branch destination will be patched by
           aarch64_tb_set_jmp_target later, beware retranslation. */
        tcg_out_goto_noaddr(s);
        s->tb_next_offset[a0] = s->code_ptr - s->code_buf;
        break;

    case INDEX_op_call:
        if (const_args[0]) {
            tcg_out_call(s, a0);
        } else {
            tcg_out_callr(s, a0);
        }
        s->tb_next_offset[a0] = tcg_current_code_size(s);
        break;

    case INDEX_op_br:
@@ -1613,13 +1588,12 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
        tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2);
        break;

    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
    case INDEX_op_mov_i64:
    case INDEX_op_mov_i32:
    case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi.  */
    case INDEX_op_movi_i64:
    case INDEX_op_movi_i32:
        /* Always implemented with tcg_out_mov/i, never with tcg_out_op.  */
    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
    default:
        /* Opcode not implemented.  */
        tcg_abort();
    }

@@ -1629,15 +1603,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
static const TCGTargetOpDef aarch64_op_defs[] = {
    { INDEX_op_exit_tb, { } },
    { INDEX_op_goto_tb, { } },
    { INDEX_op_call, { "ri" } },
    { INDEX_op_br, { } },

    { INDEX_op_mov_i32, { "r", "r" } },
    { INDEX_op_mov_i64, { "r", "r" } },

    { INDEX_op_movi_i32, { "r" } },
    { INDEX_op_movi_i64, { "r" } },

    { INDEX_op_ld8u_i32, { "r", "r" } },
    { INDEX_op_ld8s_i32, { "r", "r" } },
    { INDEX_op_ld16u_i32, { "r", "r" } },
+1 −0
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@
#ifndef TCG_TARGET_AARCH64
#define TCG_TARGET_AARCH64 1

#define TCG_TARGET_INSN_UNIT_SIZE  4
#undef TCG_TARGET_STACK_GROWSUP

typedef enum {
+53 −121
Original line number Diff line number Diff line
@@ -115,36 +115,18 @@ static const int tcg_target_call_oarg_regs[2] = {

#define TCG_REG_TMP  TCG_REG_R12

static inline void reloc_abs32(void *code_ptr, intptr_t target)
static inline void reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
{
    *(uint32_t *) code_ptr = target;
    ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2;
    *code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff);
}

static inline void reloc_pc24(void *code_ptr, intptr_t target)
{
    uint32_t offset = ((target - ((intptr_t)code_ptr + 8)) >> 2);

    *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
                             | (offset & 0xffffff);
}

static void patch_reloc(uint8_t *code_ptr, int type,
static void patch_reloc(tcg_insn_unit *code_ptr, int type,
                        intptr_t value, intptr_t addend)
{
    switch (type) {
    case R_ARM_ABS32:
        reloc_abs32(code_ptr, value);
        break;

    case R_ARM_CALL:
    case R_ARM_JUMP24:
    default:
        tcg_abort();

    case R_ARM_PC24:
        reloc_pc24(code_ptr, value);
        break;
    }
    assert(type == R_ARM_PC24);
    assert(addend == 0);
    reloc_pc24(code_ptr, (tcg_insn_unit *)value);
}

#define TCG_CT_CONST_ARM  0x100
@@ -379,20 +361,18 @@ static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)

static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
{
    /* We pay attention here to not modify the branch target by skipping
    /* We pay attention here to not modify the branch target by masking
       the corresponding bytes.  This ensure that caches and memory are
       kept coherent during retranslation. */
    s->code_ptr += 3;
    tcg_out8(s, (cond << 4) | 0x0a);
    tcg_out32(s, deposit32(*s->code_ptr, 24, 8, (cond << 4) | 0x0a));
}

static inline void tcg_out_bl_noaddr(TCGContext *s, int cond)
{
    /* We pay attention here to not modify the branch target by skipping
    /* We pay attention here to not modify the branch target by masking
       the corresponding bytes.  This ensure that caches and memory are
       kept coherent during retranslation. */
    s->code_ptr += 3;
    tcg_out8(s, (cond << 4) | 0x0b);
    tcg_out32(s, deposit32(*s->code_ptr, 24, 8, (cond << 4) | 0x0b));
}

static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
@@ -1010,20 +990,21 @@ static inline void tcg_out_st8(TCGContext *s, int cond,
 * with the code buffer limited to 16MB we wouldn't need the long case.
 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
 */
static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
static inline void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
{
    int32_t disp = addr - (tcg_target_long) s->code_ptr;
    intptr_t addri = (intptr_t)addr;
    ptrdiff_t disp = tcg_pcrel_diff(s, addr);

    if ((addr & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
    if ((addri & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
        tcg_out_b(s, cond, disp);
        return;
    }

    tcg_out_movi32(s, cond, TCG_REG_TMP, addr);
    tcg_out_movi32(s, cond, TCG_REG_TMP, addri);
    if (use_armv5t_instructions) {
        tcg_out_bx(s, cond, TCG_REG_TMP);
    } else {
        if (addr & 1) {
        if (addri & 1) {
            tcg_abort();
        }
        tcg_out_mov_reg(s, cond, TCG_REG_PC, TCG_REG_TMP);
@@ -1032,39 +1013,28 @@ static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)

/* The call case is mostly used for helpers - so it's not unreasonable
 * for them to be beyond branch range */
static inline void tcg_out_call(TCGContext *s, uint32_t addr)
static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
{
    int32_t val;
    intptr_t addri = (intptr_t)addr;
    ptrdiff_t disp = tcg_pcrel_diff(s, addr);

    val = addr - (tcg_target_long) s->code_ptr;
    if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) {
        if (addr & 1) {
    if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
        if (addri & 1) {
            /* Use BLX if the target is in Thumb mode */
            if (!use_armv5t_instructions) {
                tcg_abort();
            }
            tcg_out_blx_imm(s, val);
            tcg_out_blx_imm(s, disp);
        } else {
            tcg_out_bl(s, COND_AL, val);
            tcg_out_bl(s, COND_AL, disp);
        }
    } else if (use_armv7_instructions) {
        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addr);
        tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
        tcg_out_blx(s, COND_AL, TCG_REG_TMP);
    } else {
        tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
        tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
        tcg_out32(s, addr);
    }
}

static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
{
    if (use_armv5t_instructions) {
        tcg_out_blx(s, cond, arg);
    } else {
        tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
                        TCG_REG_PC, SHIFT_IMM_LSL(0));
        tcg_out_bx(s, cond, arg);
        tcg_out32(s, addri);
    }
}

@@ -1073,9 +1043,9 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
    TCGLabel *l = &s->labels[label_index];

    if (l->has_value) {
        tcg_out_goto(s, cond, l->u.value);
        tcg_out_goto(s, cond, l->u.value_ptr);
    } else {
        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
        tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 0);
        tcg_out_b_noaddr(s, cond);
    }
}
@@ -1084,7 +1054,7 @@ static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
 *                                     int mmu_idx, uintptr_t ra)
 */
static const void * const qemu_ld_helpers[16] = {
static void * const qemu_ld_helpers[16] = {
    [MO_UB]   = helper_ret_ldub_mmu,
    [MO_SB]   = helper_ret_ldsb_mmu,

@@ -1104,7 +1074,7 @@ static const void * const qemu_ld_helpers[16] = {
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
 *                                     uintxx_t val, int mmu_idx, uintptr_t ra)
 */
static const void * const qemu_st_helpers[16] = {
static void * const qemu_st_helpers[16] = {
    [MO_UB]   = helper_ret_stb_mmu,
    [MO_LEUW] = helper_le_stw_mmu,
    [MO_LEUL] = helper_le_stl_mmu,
@@ -1256,7 +1226,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
                                TCGReg datalo, TCGReg datahi, TCGReg addrlo,
                                TCGReg addrhi, int mem_index,
                                uint8_t *raddr, uint8_t *label_ptr)
                                tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
{
    TCGLabelQemuLdst *label = new_ldst_label(s);

@@ -1275,9 +1245,9 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
    TCGReg argreg, datalo, datahi;
    TCGMemOp opc = lb->opc;
    uintptr_t func;
    void *func;

    reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
    reloc_pc24(lb->label_ptr[0], s->code_ptr);

    argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
    if (TARGET_LONG_BITS == 64) {
@@ -1292,9 +1262,9 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
       icache usage.  For pre-armv6, use the signed helpers since we do
       not have a single insn sign-extend.  */
    if (use_armv6_instructions) {
        func = (uintptr_t)qemu_ld_helpers[opc & ~MO_SIGN];
        func = qemu_ld_helpers[opc & ~MO_SIGN];
    } else {
        func = (uintptr_t)qemu_ld_helpers[opc];
        func = qemu_ld_helpers[opc];
        if (opc & MO_SIGN) {
            opc = MO_UL;
        }
@@ -1328,7 +1298,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
        break;
    }

    tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
    tcg_out_goto(s, COND_AL, lb->raddr);
}

static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
@@ -1336,7 +1306,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
    TCGReg argreg, datalo, datahi;
    TCGMemOp opc = lb->opc;

    reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
    reloc_pc24(lb->label_ptr[0], s->code_ptr);

    argreg = TCG_REG_R0;
    argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
@@ -1368,7 +1338,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
    argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);

    /* Tail-call to the helper, which will return to the fast path.  */
    tcg_out_goto(s, COND_AL, (uintptr_t)qemu_st_helpers[opc]);
    tcg_out_goto(s, COND_AL, qemu_st_helpers[opc]);
}
#endif /* SOFTMMU */

@@ -1499,7 +1469,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
#ifdef CONFIG_SOFTMMU
    int mem_index;
    TCGReg addend;
    uint8_t *label_ptr;
    tcg_insn_unit *label_ptr;
#endif

    datalo = *args++;
@@ -1628,7 +1598,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
#ifdef CONFIG_SOFTMMU
    int mem_index;
    TCGReg addend;
    uint8_t *label_ptr;
    tcg_insn_unit *label_ptr;
#endif

    datalo = *args++;
@@ -1660,7 +1630,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
#endif
}

static uint8_t *tb_ret_addr;
static tcg_insn_unit *tb_ret_addr;

static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
                const TCGArg *args, const int *const_args)
@@ -1670,51 +1640,21 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,

    switch (opc) {
    case INDEX_op_exit_tb:
        if (use_armv7_instructions || check_fit_imm(args[0])) {
        tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
            tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
        } else {
            uint8_t *ld_ptr = s->code_ptr;
            tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
            tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
            *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
            tcg_out32(s, args[0]);
        }
        tcg_out_goto(s, COND_AL, tb_ret_addr);
        break;
    case INDEX_op_goto_tb:
        if (s->tb_jmp_offset) {
            /* Direct jump method */
#if defined(USE_DIRECT_JUMP)
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
            s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
            tcg_out_b_noaddr(s, COND_AL);
#else
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
            s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
            tcg_out32(s, 0);
#endif
        } else {
            /* Indirect jump method */
#if 1
            c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
            if (c > 0xfff || c < -0xfff) {
                tcg_out_movi32(s, COND_AL, TCG_REG_R0,
                                (tcg_target_long) (s->tb_next + args[0]));
                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
            } else
                tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
#else
            tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
            tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
#endif
            intptr_t ptr = (intptr_t)(s->tb_next + args[0]);
            tcg_out_movi32(s, COND_AL, TCG_REG_R0, ptr & ~0xfff);
            tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, ptr & 0xfff);
        }
        s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
        break;
    case INDEX_op_call:
        if (const_args[0])
            tcg_out_call(s, args[0]);
        else
            tcg_out_callr(s, COND_AL, args[0]);
        s->tb_next_offset[args[0]] = tcg_current_code_size(s);
        break;
    case INDEX_op_br:
        tcg_out_goto_label(s, COND_AL, args[0]);
@@ -1745,13 +1685,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
        tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
        break;

    case INDEX_op_mov_i32:
        tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
                        args[0], 0, args[1], SHIFT_IMM_LSL(0));
        break;
    case INDEX_op_movi_i32:
        tcg_out_movi32(s, COND_AL, args[0], args[1]);
        break;
    case INDEX_op_movcond_i32:
        /* Constraints mean that v2 is always in the same register as dest,
         * so we only need to do "if condition passed, move v1 to dest".
@@ -1967,6 +1900,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
        tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
        break;

    case INDEX_op_mov_i32:  /* Always emitted via tcg_out_mov.  */
    case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi.  */
    case INDEX_op_call:     /* Always emitted via tcg_out_call.  */
    default:
        tcg_abort();
    }
@@ -1975,12 +1911,8 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
static const TCGTargetOpDef arm_op_defs[] = {
    { INDEX_op_exit_tb, { } },
    { INDEX_op_goto_tb, { } },
    { INDEX_op_call, { "ri" } },
    { INDEX_op_br, { } },

    { INDEX_op_mov_i32, { "r", "r" } },
    { INDEX_op_movi_i32, { "r" } },

    { INDEX_op_ld8u_i32, { "r", "r" } },
    { INDEX_op_ld8s_i32, { "r", "r" } },
    { INDEX_op_ld16u_i32, { "r", "r" } },
+1 −0
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@
#define TCG_TARGET_ARM 1

#undef TCG_TARGET_STACK_GROWSUP
#define TCG_TARGET_INSN_UNIT_SIZE 4

typedef enum {
    TCG_REG_R0 = 0,
Loading