Commit 0ef83bf2 authored by Mark Cave-Ayland's avatar Mark Cave-Ayland Committed by David Gibson
Browse files

target/ppc: remove ROTRu32 and ROTRu64 macros from int_helper.c



Richard points out that these macros suffer from a -fsanitize=shift bug in that
they improperly handle n == 0 turning it into a shift by 32/64 respectively.
Replace them with QEMU's existing ror32() and ror64() functions instead.

Signed-off-by: default avatarMark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Reviewed-by: default avatarRichard Henderson <richard.henderson@linaro.org>
Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
parent 634c5835
Loading
Loading
Loading
Loading
+20 −28
Original line number Diff line number Diff line
@@ -3306,8 +3306,6 @@ void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
    *r = result;
}

#define ROTRu32(v, n) (((v) >> (n)) | ((v) << (32 - n)))

void helper_vshasigmaw(ppc_avr_t *r,  ppc_avr_t *a, uint32_t st_six)
{
    int st = (st_six & 0x10) != 0;
@@ -3317,32 +3315,28 @@ void helper_vshasigmaw(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
    for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
        if (st == 0) {
            if ((six & (0x8 >> i)) == 0) {
                r->VsrW(i) = ROTRu32(a->VsrW(i), 7) ^
                             ROTRu32(a->VsrW(i), 18) ^
                r->VsrW(i) = ror32(a->VsrW(i), 7) ^
                             ror32(a->VsrW(i), 18) ^
                             (a->VsrW(i) >> 3);
            } else { /* six.bit[i] == 1 */
                r->VsrW(i) = ROTRu32(a->VsrW(i), 17) ^
                             ROTRu32(a->VsrW(i), 19) ^
                r->VsrW(i) = ror32(a->VsrW(i), 17) ^
                             ror32(a->VsrW(i), 19) ^
                             (a->VsrW(i) >> 10);
            }
        } else { /* st == 1 */
            if ((six & (0x8 >> i)) == 0) {
                r->VsrW(i) = ROTRu32(a->VsrW(i), 2) ^
                             ROTRu32(a->VsrW(i), 13) ^
                             ROTRu32(a->VsrW(i), 22);
                r->VsrW(i) = ror32(a->VsrW(i), 2) ^
                             ror32(a->VsrW(i), 13) ^
                             ror32(a->VsrW(i), 22);
            } else { /* six.bit[i] == 1 */
                r->VsrW(i) = ROTRu32(a->VsrW(i), 6) ^
                             ROTRu32(a->VsrW(i), 11) ^
                             ROTRu32(a->VsrW(i), 25);
                r->VsrW(i) = ror32(a->VsrW(i), 6) ^
                             ror32(a->VsrW(i), 11) ^
                             ror32(a->VsrW(i), 25);
            }
        }
    }
}

#undef ROTRu32

#define ROTRu64(v, n) (((v) >> (n)) | ((v) << (64-n)))

void helper_vshasigmad(ppc_avr_t *r,  ppc_avr_t *a, uint32_t st_six)
{
    int st = (st_six & 0x10) != 0;
@@ -3352,30 +3346,28 @@ void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
    for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
        if (st == 0) {
            if ((six & (0x8 >> (2*i))) == 0) {
                r->VsrD(i) = ROTRu64(a->VsrD(i), 1) ^
                             ROTRu64(a->VsrD(i), 8) ^
                r->VsrD(i) = ror64(a->VsrD(i), 1) ^
                             ror64(a->VsrD(i), 8) ^
                             (a->VsrD(i) >> 7);
            } else { /* six.bit[2*i] == 1 */
                r->VsrD(i) = ROTRu64(a->VsrD(i), 19) ^
                             ROTRu64(a->VsrD(i), 61) ^
                r->VsrD(i) = ror64(a->VsrD(i), 19) ^
                             ror64(a->VsrD(i), 61) ^
                             (a->VsrD(i) >> 6);
            }
        } else { /* st == 1 */
            if ((six & (0x8 >> (2*i))) == 0) {
                r->VsrD(i) = ROTRu64(a->VsrD(i), 28) ^
                             ROTRu64(a->VsrD(i), 34) ^
                             ROTRu64(a->VsrD(i), 39);
                r->VsrD(i) = ror64(a->VsrD(i), 28) ^
                             ror64(a->VsrD(i), 34) ^
                             ror64(a->VsrD(i), 39);
            } else { /* six.bit[2*i] == 1 */
                r->VsrD(i) = ROTRu64(a->VsrD(i), 14) ^
                             ROTRu64(a->VsrD(i), 18) ^
                             ROTRu64(a->VsrD(i), 41);
                r->VsrD(i) = ror64(a->VsrD(i), 14) ^
                             ror64(a->VsrD(i), 18) ^
                             ror64(a->VsrD(i), 41);
            }
        }
    }
}

#undef ROTRu64

void helper_vpermxor(ppc_avr_t *r,  ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
    ppc_avr_t result;