Commit 3a7a2b4e authored by Richard Henderson's avatar Richard Henderson Committed by Peter Maydell
Browse files

target/arm: Use tcg_gen_gvec_bitsel



This replaces 3 target-specific implementations for BIT, BIF, and BSL.

Signed-off-by: default avatarRichard Henderson <richard.henderson@linaro.org>
Reviewed-by: default avatarPeter Maydell <peter.maydell@linaro.org>
Message-id: 20190518191934.21887-3-richard.henderson@linaro.org
Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parent 785a602e
Loading
Loading
Loading
Loading
+12 −3
Original line number Diff line number Diff line
@@ -704,6 +704,15 @@ static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
            vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
}

/* Expand a 4-operand AdvSIMD vector operation using an expander function.  */
static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
                         int rx, GVecGen4Fn *gvec_fn, int vece)
{
    gvec_fn(vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
            vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
            is_q ? 16 : 8, vec_full_reg_size(s));
}

/* Expand a 2-operand + immediate AdvSIMD vector operation using
 * an op descriptor.
 */
@@ -10918,13 +10927,13 @@ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
        return;

    case 5: /* BSL bitwise select */
        gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
        gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
        return;
    case 6: /* BIT, bitwise insert if true */
        gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
        gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
        return;
    case 7: /* BIF, bitwise insert if false */
        gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
        gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
        return;

    default:
+2 −0
Original line number Diff line number Diff line
@@ -122,5 +122,7 @@ typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
                         uint32_t, uint32_t);
typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
                        uint32_t, uint32_t, uint32_t);
typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
                        uint32_t, uint32_t, uint32_t);

#endif /* TARGET_ARM_TRANSLATE_A64_H */
+6 −72
Original line number Diff line number Diff line
@@ -5755,72 +5755,6 @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
    return 1;
}

/*
 * Expanders for VBitOps_VBIF, VBIT, VBSL.
 */
static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
    tcg_gen_xor_i64(rn, rn, rm);
    tcg_gen_and_i64(rn, rn, rd);
    tcg_gen_xor_i64(rd, rm, rn);
}

static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
    tcg_gen_xor_i64(rn, rn, rd);
    tcg_gen_and_i64(rn, rn, rm);
    tcg_gen_xor_i64(rd, rd, rn);
}

static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
    tcg_gen_xor_i64(rn, rn, rd);
    tcg_gen_andc_i64(rn, rn, rm);
    tcg_gen_xor_i64(rd, rd, rn);
}

static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
    tcg_gen_xor_vec(vece, rn, rn, rm);
    tcg_gen_and_vec(vece, rn, rn, rd);
    tcg_gen_xor_vec(vece, rd, rm, rn);
}

static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
    tcg_gen_xor_vec(vece, rn, rn, rd);
    tcg_gen_and_vec(vece, rn, rn, rm);
    tcg_gen_xor_vec(vece, rd, rd, rn);
}

static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
    tcg_gen_xor_vec(vece, rn, rn, rd);
    tcg_gen_andc_vec(vece, rn, rn, rm);
    tcg_gen_xor_vec(vece, rd, rd, rn);
}

const GVecGen3 bsl_op = {
    .fni8 = gen_bsl_i64,
    .fniv = gen_bsl_vec,
    .prefer_i64 = TCG_TARGET_REG_BITS == 64,
    .load_dest = true
};

const GVecGen3 bit_op = {
    .fni8 = gen_bit_i64,
    .fniv = gen_bit_vec,
    .prefer_i64 = TCG_TARGET_REG_BITS == 64,
    .load_dest = true
};

const GVecGen3 bif_op = {
    .fni8 = gen_bif_i64,
    .fniv = gen_bif_vec,
    .prefer_i64 = TCG_TARGET_REG_BITS == 64,
    .load_dest = true
};

static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
{
    tcg_gen_vec_sar8i_i64(a, a, shift);
@@ -6570,16 +6504,16 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
                                 vec_size, vec_size);
                break;
            case 5: /* VBSL */
                tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
                               vec_size, vec_size, &bsl_op);
                tcg_gen_gvec_bitsel(MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
                                    vec_size, vec_size);
                break;
            case 6: /* VBIT */
                tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
                               vec_size, vec_size, &bit_op);
                tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
                                    vec_size, vec_size);
                break;
            case 7: /* VBIF */
                tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
                               vec_size, vec_size, &bif_op);
                tcg_gen_gvec_bitsel(MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
                                    vec_size, vec_size);
                break;
            }
            return 0;
+0 −3
Original line number Diff line number Diff line
@@ -238,9 +238,6 @@ static inline void gen_ss_advance(DisasContext *s)
}

/* Vector operations shared between ARM and AArch64.  */
extern const GVecGen3 bsl_op;
extern const GVecGen3 bit_op;
extern const GVecGen3 bif_op;
extern const GVecGen3 mla_op[4];
extern const GVecGen3 mls_op[4];
extern const GVecGen3 cmtst_op[4];