Loading target/arm/translate-vfp.inc.c +73 −0 Original line number Diff line number Diff line Loading @@ -853,3 +853,76 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a) return true; } static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) { uint32_t offset; TCGv_i32 addr; if (!vfp_access_check(s)) { return true; } offset = a->imm << 2; if (!a->u) { offset = -offset; } if (s->thumb && a->rn == 15) { /* This is actually UNPREDICTABLE */ addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, s->pc & ~2); } else { addr = load_reg(s, a->rn); } tcg_gen_addi_i32(addr, addr, offset); if (a->l) { gen_vfp_ld(s, false, addr); gen_mov_vreg_F0(false, a->vd); } else { gen_mov_F0_vreg(false, a->vd); gen_vfp_st(s, false, addr); } tcg_temp_free_i32(addr); return true; } static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a) { uint32_t offset; TCGv_i32 addr; /* UNDEF accesses to D16-D31 if they don't exist */ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } offset = a->imm << 2; if (!a->u) { offset = -offset; } if (s->thumb && a->rn == 15) { /* This is actually UNPREDICTABLE */ addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, s->pc & ~2); } else { addr = load_reg(s, a->rn); } tcg_gen_addi_i32(addr, addr, offset); if (a->l) { gen_vfp_ld(s, true, addr); gen_mov_vreg_F0(true, a->vd); } else { gen_mov_F0_vreg(true, a->vd); gen_vfp_st(s, true, addr); } tcg_temp_free_i32(addr); return true; } target/arm/translate.c +2 −20 Original line number Diff line number Diff line Loading @@ -3713,26 +3713,8 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) else rd = VFP_SREG_D(insn); if ((insn & 0x01200000) == 0x01000000) { /* Single load/store */ offset = (insn & 0xff) << 2; if ((insn & (1 << 23)) == 0) offset = -offset; if (s->thumb && rn == 15) { /* This is actually UNPREDICTABLE */ addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, s->pc & ~2); } else { addr = load_reg(s, rn); } tcg_gen_addi_i32(addr, addr, offset); if (insn & (1 << 20)) { gen_vfp_ld(s, dp, addr); gen_mov_vreg_F0(dp, rd); } else { gen_mov_F0_vreg(dp, rd); gen_vfp_st(s, dp, addr); } tcg_temp_free_i32(addr); /* Already handled by decodetree */ return 1; } else { /* load/store multiple */ int w = insn & (1 << 21); Loading target/arm/vfp.decode +7 −0 Original line number Diff line number Diff line Loading @@ -71,3 +71,10 @@ VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... \ vm=%vm_sp VMOV_64_dp ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... \ vm=%vm_dp # Note that the half-precision variants of VLDR and VSTR are # not part of this decodetree at all because they have bits [9:8] == 0b01 VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 \ vd=%vd_sp VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 \ vd=%vd_dp Loading
target/arm/translate-vfp.inc.c +73 −0 Original line number Diff line number Diff line Loading @@ -853,3 +853,76 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a) return true; } static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) { uint32_t offset; TCGv_i32 addr; if (!vfp_access_check(s)) { return true; } offset = a->imm << 2; if (!a->u) { offset = -offset; } if (s->thumb && a->rn == 15) { /* This is actually UNPREDICTABLE */ addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, s->pc & ~2); } else { addr = load_reg(s, a->rn); } tcg_gen_addi_i32(addr, addr, offset); if (a->l) { gen_vfp_ld(s, false, addr); gen_mov_vreg_F0(false, a->vd); } else { gen_mov_F0_vreg(false, a->vd); gen_vfp_st(s, false, addr); } tcg_temp_free_i32(addr); return true; } static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a) { uint32_t offset; TCGv_i32 addr; /* UNDEF accesses to D16-D31 if they don't exist */ if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { return false; } if (!vfp_access_check(s)) { return true; } offset = a->imm << 2; if (!a->u) { offset = -offset; } if (s->thumb && a->rn == 15) { /* This is actually UNPREDICTABLE */ addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, s->pc & ~2); } else { addr = load_reg(s, a->rn); } tcg_gen_addi_i32(addr, addr, offset); if (a->l) { gen_vfp_ld(s, true, addr); gen_mov_vreg_F0(true, a->vd); } else { gen_mov_F0_vreg(true, a->vd); gen_vfp_st(s, true, addr); } tcg_temp_free_i32(addr); return true; }
target/arm/translate.c +2 −20 Original line number Diff line number Diff line Loading @@ -3713,26 +3713,8 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) else rd = VFP_SREG_D(insn); if ((insn & 0x01200000) == 0x01000000) { /* Single load/store */ offset = (insn & 0xff) << 2; if ((insn & (1 << 23)) == 0) offset = -offset; if (s->thumb && rn == 15) { /* This is actually UNPREDICTABLE */ addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, s->pc & ~2); } else { addr = load_reg(s, rn); } tcg_gen_addi_i32(addr, addr, offset); if (insn & (1 << 20)) { gen_vfp_ld(s, dp, addr); gen_mov_vreg_F0(dp, rd); } else { gen_mov_F0_vreg(dp, rd); gen_vfp_st(s, dp, addr); } tcg_temp_free_i32(addr); /* Already handled by decodetree */ return 1; } else { /* load/store multiple */ int w = insn & (1 << 21); Loading
target/arm/vfp.decode +7 −0 Original line number Diff line number Diff line Loading @@ -71,3 +71,10 @@ VMOV_64_sp ---- 1100 010 op:1 rt2:4 rt:4 1010 00.1 .... \ vm=%vm_sp VMOV_64_dp ---- 1100 010 op:1 rt2:4 rt:4 1011 00.1 .... \ vm=%vm_dp # Note that the half-precision variants of VLDR and VSTR are # not part of this decodetree at all because they have bits [9:8] == 0b01 VLDR_VSTR_sp ---- 1101 u:1 .0 l:1 rn:4 .... 1010 imm:8 \ vd=%vd_sp VLDR_VSTR_dp ---- 1101 u:1 .0 l:1 rn:4 .... 1011 imm:8 \ vd=%vd_dp