Loading accel/tcg/tcg-runtime-gvec.c +36 −0 Original line number Diff line number Diff line Loading @@ -467,3 +467,39 @@ void HELPER(gvec_sar64i)(void *d, void *a, uint32_t desc) } clear_high(d, oprsz, desc); } /* If vectors are enabled, the compiler fills in -1 for true. Otherwise, we must take care of this by hand. */ #ifdef CONFIG_VECTOR16 # define DO_CMP0(X) X #else # define DO_CMP0(X) -(X) #endif #define DO_CMP1(NAME, TYPE, OP) \ void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \ { \ intptr_t oprsz = simd_oprsz(desc); \ intptr_t i; \ for (i = 0; i < oprsz; i += sizeof(vec64)) { \ *(TYPE *)(d + i) = DO_CMP0(*(TYPE *)(a + i) OP *(TYPE *)(b + i)); \ } \ clear_high(d, oprsz, desc); \ } #define DO_CMP2(SZ) \ DO_CMP1(gvec_eq##SZ, vec##SZ, ==) \ DO_CMP1(gvec_ne##SZ, vec##SZ, !=) \ DO_CMP1(gvec_lt##SZ, svec##SZ, <) \ DO_CMP1(gvec_le##SZ, svec##SZ, <=) \ DO_CMP1(gvec_ltu##SZ, vec##SZ, <) \ DO_CMP1(gvec_leu##SZ, vec##SZ, <=) DO_CMP2(8) DO_CMP2(16) DO_CMP2(32) DO_CMP2(64) #undef DO_CMP0 #undef DO_CMP1 #undef DO_CMP2 accel/tcg/tcg-runtime.h +30 −0 Original line number Diff line number Diff line Loading @@ -178,3 +178,33 @@ DEF_HELPER_FLAGS_3(gvec_sar8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_sar16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) tcg/README +4 −0 Original line number Diff line number Diff line Loading @@ -581,6 +581,10 @@ E.g. VECL=1 -> 64 << 1 -> v128, and VECE=2 -> 1 << 2 -> i32. Similarly for logical and arithmetic right shift. * cmp_vec v0, v1, v2, cond Compare vectors by element, storing -1 for true and 0 for false. ********* Note 1: Some shortcuts are defined when the last operand is known to be Loading tcg/tcg-op-gvec.c +151 −0 Original line number Diff line number Diff line Loading @@ -1583,3 +1583,154 @@ void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs, tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]); } } /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */ static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, TCGCond cond) { TCGv_i32 t0 = tcg_temp_new_i32(); TCGv_i32 t1 = tcg_temp_new_i32(); uint32_t i; for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(t0, cpu_env, aofs + i); tcg_gen_ld_i32(t1, cpu_env, bofs + i); tcg_gen_setcond_i32(cond, t0, t0, t1); tcg_gen_neg_i32(t0, t0); tcg_gen_st_i32(t0, cpu_env, dofs + i); } tcg_temp_free_i32(t1); tcg_temp_free_i32(t0); } static void expand_cmp_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, TCGCond cond) { TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); uint32_t i; for (i = 0; i < oprsz; i += 8) { tcg_gen_ld_i64(t0, cpu_env, aofs + i); tcg_gen_ld_i64(t1, cpu_env, bofs + i); tcg_gen_setcond_i64(cond, t0, t0, t1); tcg_gen_neg_i64(t0, t0); tcg_gen_st_i64(t0, cpu_env, dofs + i); } tcg_temp_free_i64(t1); tcg_temp_free_i64(t0); } static void expand_cmp_vec(unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t tysz, TCGType type, TCGCond cond) { TCGv_vec t0 = tcg_temp_new_vec(type); TCGv_vec t1 = tcg_temp_new_vec(type); uint32_t i; for (i = 0; i < oprsz; i += tysz) { tcg_gen_ld_vec(t0, cpu_env, aofs + i); tcg_gen_ld_vec(t1, cpu_env, bofs + i); tcg_gen_cmp_vec(cond, vece, t0, t0, t1); tcg_gen_st_vec(t0, cpu_env, dofs + i); } tcg_temp_free_vec(t1); tcg_temp_free_vec(t0); } void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static gen_helper_gvec_3 * const eq_fn[4] = { gen_helper_gvec_eq8, gen_helper_gvec_eq16, gen_helper_gvec_eq32, gen_helper_gvec_eq64 }; static gen_helper_gvec_3 * const ne_fn[4] = { gen_helper_gvec_ne8, gen_helper_gvec_ne16, gen_helper_gvec_ne32, gen_helper_gvec_ne64 }; static gen_helper_gvec_3 * const lt_fn[4] = { gen_helper_gvec_lt8, gen_helper_gvec_lt16, gen_helper_gvec_lt32, gen_helper_gvec_lt64 }; static gen_helper_gvec_3 * const le_fn[4] = { gen_helper_gvec_le8, gen_helper_gvec_le16, gen_helper_gvec_le32, gen_helper_gvec_le64 }; static gen_helper_gvec_3 * const ltu_fn[4] = { gen_helper_gvec_ltu8, gen_helper_gvec_ltu16, gen_helper_gvec_ltu32, gen_helper_gvec_ltu64 }; static gen_helper_gvec_3 * const leu_fn[4] = { gen_helper_gvec_leu8, gen_helper_gvec_leu16, gen_helper_gvec_leu32, gen_helper_gvec_leu64 }; static gen_helper_gvec_3 * const * const fns[16] = { [TCG_COND_EQ] = eq_fn, [TCG_COND_NE] = ne_fn, [TCG_COND_LT] = lt_fn, [TCG_COND_LE] = le_fn, [TCG_COND_LTU] = ltu_fn, [TCG_COND_LEU] = leu_fn, }; check_size_align(oprsz, maxsz, dofs | aofs | bofs); check_overlap_3(dofs, aofs, bofs, maxsz); if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) { do_dup(MO_8, dofs, oprsz, maxsz, NULL, NULL, -(cond == TCG_COND_ALWAYS)); return; } /* Recall that ARM SVE allows vector sizes that are not a power of 2. Expand with successively smaller host vector sizes. The intent is that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */ if (TCG_TARGET_HAS_v256 && check_size_impl(oprsz, 32) && tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V256, vece)) { uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32); expand_cmp_vec(vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, cond); if (some == oprsz) { goto done; } dofs += some; aofs += some; bofs += some; oprsz -= some; maxsz -= some; } if (TCG_TARGET_HAS_v128 && check_size_impl(oprsz, 16) && tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V128, vece)) { expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, cond); } else if (TCG_TARGET_HAS_v64 && check_size_impl(oprsz, 8) && (TCG_TARGET_REG_BITS == 32 || vece != MO_64) && tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V64, vece)) { expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, cond); } else if (vece == MO_64 && check_size_impl(oprsz, 8)) { expand_cmp_i64(dofs, aofs, bofs, oprsz, cond); } else if (vece == MO_32 && check_size_impl(oprsz, 4)) { expand_cmp_i32(dofs, aofs, bofs, oprsz, cond); } else { gen_helper_gvec_3 * const *fn = fns[cond]; if (fn == NULL) { uint32_t tmp; tmp = aofs, aofs = bofs, bofs = tmp; cond = tcg_swap_cond(cond); fn = fns[cond]; assert(fn != NULL); } tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]); return; } done: if (oprsz < maxsz) { expand_clr(dofs + oprsz, maxsz - oprsz); } } tcg/tcg-op-gvec.h +4 −0 Original line number Diff line number Diff line Loading @@ -207,6 +207,10 @@ void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs, int64_t shift, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); /* * 64-bit vector operations. Use these when the register has been allocated * with tcg_global_mem_new_i64, and so we cannot also address it via pointer. Loading Loading
accel/tcg/tcg-runtime-gvec.c +36 −0 Original line number Diff line number Diff line Loading @@ -467,3 +467,39 @@ void HELPER(gvec_sar64i)(void *d, void *a, uint32_t desc) } clear_high(d, oprsz, desc); } /* If vectors are enabled, the compiler fills in -1 for true. Otherwise, we must take care of this by hand. */ #ifdef CONFIG_VECTOR16 # define DO_CMP0(X) X #else # define DO_CMP0(X) -(X) #endif #define DO_CMP1(NAME, TYPE, OP) \ void HELPER(NAME)(void *d, void *a, void *b, uint32_t desc) \ { \ intptr_t oprsz = simd_oprsz(desc); \ intptr_t i; \ for (i = 0; i < oprsz; i += sizeof(vec64)) { \ *(TYPE *)(d + i) = DO_CMP0(*(TYPE *)(a + i) OP *(TYPE *)(b + i)); \ } \ clear_high(d, oprsz, desc); \ } #define DO_CMP2(SZ) \ DO_CMP1(gvec_eq##SZ, vec##SZ, ==) \ DO_CMP1(gvec_ne##SZ, vec##SZ, !=) \ DO_CMP1(gvec_lt##SZ, svec##SZ, <) \ DO_CMP1(gvec_le##SZ, svec##SZ, <=) \ DO_CMP1(gvec_ltu##SZ, vec##SZ, <) \ DO_CMP1(gvec_leu##SZ, vec##SZ, <=) DO_CMP2(8) DO_CMP2(16) DO_CMP2(32) DO_CMP2(64) #undef DO_CMP0 #undef DO_CMP1 #undef DO_CMP2
accel/tcg/tcg-runtime.h +30 −0 Original line number Diff line number Diff line Loading @@ -178,3 +178,33 @@ DEF_HELPER_FLAGS_3(gvec_sar8i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_sar16i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_sar32i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(gvec_sar64i, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_eq64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ne64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_lt64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_le64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_ltu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
tcg/README +4 −0 Original line number Diff line number Diff line Loading @@ -581,6 +581,10 @@ E.g. VECL=1 -> 64 << 1 -> v128, and VECE=2 -> 1 << 2 -> i32. Similarly for logical and arithmetic right shift. * cmp_vec v0, v1, v2, cond Compare vectors by element, storing -1 for true and 0 for false. ********* Note 1: Some shortcuts are defined when the last operand is known to be Loading
tcg/tcg-op-gvec.c +151 −0 Original line number Diff line number Diff line Loading @@ -1583,3 +1583,154 @@ void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs, tcg_gen_gvec_2i(dofs, aofs, oprsz, maxsz, shift, &g[vece]); } } /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */ static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, TCGCond cond) { TCGv_i32 t0 = tcg_temp_new_i32(); TCGv_i32 t1 = tcg_temp_new_i32(); uint32_t i; for (i = 0; i < oprsz; i += 4) { tcg_gen_ld_i32(t0, cpu_env, aofs + i); tcg_gen_ld_i32(t1, cpu_env, bofs + i); tcg_gen_setcond_i32(cond, t0, t0, t1); tcg_gen_neg_i32(t0, t0); tcg_gen_st_i32(t0, cpu_env, dofs + i); } tcg_temp_free_i32(t1); tcg_temp_free_i32(t0); } static void expand_cmp_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, TCGCond cond) { TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); uint32_t i; for (i = 0; i < oprsz; i += 8) { tcg_gen_ld_i64(t0, cpu_env, aofs + i); tcg_gen_ld_i64(t1, cpu_env, bofs + i); tcg_gen_setcond_i64(cond, t0, t0, t1); tcg_gen_neg_i64(t0, t0); tcg_gen_st_i64(t0, cpu_env, dofs + i); } tcg_temp_free_i64(t1); tcg_temp_free_i64(t0); } static void expand_cmp_vec(unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t tysz, TCGType type, TCGCond cond) { TCGv_vec t0 = tcg_temp_new_vec(type); TCGv_vec t1 = tcg_temp_new_vec(type); uint32_t i; for (i = 0; i < oprsz; i += tysz) { tcg_gen_ld_vec(t0, cpu_env, aofs + i); tcg_gen_ld_vec(t1, cpu_env, bofs + i); tcg_gen_cmp_vec(cond, vece, t0, t0, t1); tcg_gen_st_vec(t0, cpu_env, dofs + i); } tcg_temp_free_vec(t1); tcg_temp_free_vec(t0); } void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz) { static gen_helper_gvec_3 * const eq_fn[4] = { gen_helper_gvec_eq8, gen_helper_gvec_eq16, gen_helper_gvec_eq32, gen_helper_gvec_eq64 }; static gen_helper_gvec_3 * const ne_fn[4] = { gen_helper_gvec_ne8, gen_helper_gvec_ne16, gen_helper_gvec_ne32, gen_helper_gvec_ne64 }; static gen_helper_gvec_3 * const lt_fn[4] = { gen_helper_gvec_lt8, gen_helper_gvec_lt16, gen_helper_gvec_lt32, gen_helper_gvec_lt64 }; static gen_helper_gvec_3 * const le_fn[4] = { gen_helper_gvec_le8, gen_helper_gvec_le16, gen_helper_gvec_le32, gen_helper_gvec_le64 }; static gen_helper_gvec_3 * const ltu_fn[4] = { gen_helper_gvec_ltu8, gen_helper_gvec_ltu16, gen_helper_gvec_ltu32, gen_helper_gvec_ltu64 }; static gen_helper_gvec_3 * const leu_fn[4] = { gen_helper_gvec_leu8, gen_helper_gvec_leu16, gen_helper_gvec_leu32, gen_helper_gvec_leu64 }; static gen_helper_gvec_3 * const * const fns[16] = { [TCG_COND_EQ] = eq_fn, [TCG_COND_NE] = ne_fn, [TCG_COND_LT] = lt_fn, [TCG_COND_LE] = le_fn, [TCG_COND_LTU] = ltu_fn, [TCG_COND_LEU] = leu_fn, }; check_size_align(oprsz, maxsz, dofs | aofs | bofs); check_overlap_3(dofs, aofs, bofs, maxsz); if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) { do_dup(MO_8, dofs, oprsz, maxsz, NULL, NULL, -(cond == TCG_COND_ALWAYS)); return; } /* Recall that ARM SVE allows vector sizes that are not a power of 2. Expand with successively smaller host vector sizes. The intent is that e.g. oprsz == 80 would be expanded with 2x32 + 1x16. */ if (TCG_TARGET_HAS_v256 && check_size_impl(oprsz, 32) && tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V256, vece)) { uint32_t some = QEMU_ALIGN_DOWN(oprsz, 32); expand_cmp_vec(vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256, cond); if (some == oprsz) { goto done; } dofs += some; aofs += some; bofs += some; oprsz -= some; maxsz -= some; } if (TCG_TARGET_HAS_v128 && check_size_impl(oprsz, 16) && tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V128, vece)) { expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128, cond); } else if (TCG_TARGET_HAS_v64 && check_size_impl(oprsz, 8) && (TCG_TARGET_REG_BITS == 32 || vece != MO_64) && tcg_can_emit_vec_op(INDEX_op_cmp_vec, TCG_TYPE_V64, vece)) { expand_cmp_vec(vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64, cond); } else if (vece == MO_64 && check_size_impl(oprsz, 8)) { expand_cmp_i64(dofs, aofs, bofs, oprsz, cond); } else if (vece == MO_32 && check_size_impl(oprsz, 4)) { expand_cmp_i32(dofs, aofs, bofs, oprsz, cond); } else { gen_helper_gvec_3 * const *fn = fns[cond]; if (fn == NULL) { uint32_t tmp; tmp = aofs, aofs = bofs, bofs = tmp; cond = tcg_swap_cond(cond); fn = fns[cond]; assert(fn != NULL); } tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, 0, fn[vece]); return; } done: if (oprsz < maxsz) { expand_clr(dofs + oprsz, maxsz - oprsz); } }
tcg/tcg-op-gvec.h +4 −0 Original line number Diff line number Diff line Loading @@ -207,6 +207,10 @@ void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs, void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs, int64_t shift, uint32_t oprsz, uint32_t maxsz); void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t oprsz, uint32_t maxsz); /* * 64-bit vector operations. Use these when the register has been allocated * with tcg_global_mem_new_i64, and so we cannot also address it via pointer. Loading