Commit f5583c52 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20180510' into staging



target-arm queue:
 * hw/arm/iotkit.c: fix minor memory leak
 * softfloat: fix wrong-exception-flags bug for multiply-add corner case
 * arm: isolate and clean up DTB generation
 * implement Arm v8.1-Atomics extension
 * Fix some bugs and missing instructions in the v8.2-FP16 extension

# gpg: Signature made Thu 10 May 2018 18:44:34 BST
# gpg:                using RSA key 3C2525ED14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>"
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>"
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20180510: (21 commits)
  target/arm: Clear SVE high bits for FMOV
  target/arm: Fix float16 to/from int16
  target/arm: Implement vector shifted FCVT for fp16
  target/arm: Implement vector shifted SCVF/UCVF for fp16
  target/arm: Enable ARM_FEATURE_V8_ATOMICS for user-only
  target/arm: Implement CAS and CASP
  target/arm: Fill in disas_ldst_atomic
  target/arm: Introduce ARM_FEATURE_V8_ATOMICS and initial decode
  target/riscv: Use new atomic min/max expanders
  tcg: Use GEN_ATOMIC_HELPER_FN for opposite endian atomic add
  tcg: Introduce atomic helpers for integer min/max
  target/xtensa: Use new min/max expanders
  target/arm: Use new min/max expanders
  tcg: Introduce helpers for integer min/max
  atomic.h: Work around gcc spurious "unused value" warning
  make sure that we aren't overwriting mc->get_hotplug_handler by accident
  arm/boot: split load_dtb() from arm_load_kernel()
  platform-bus-device: use device plug callback instead of machine_done notifier
  pc: simplify MachineClass::get_hotplug_handler handling
  softfloat: Handle default NaN mode after pickNaNMulAdd, not before
  ...

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>

# Conflicts:
#	target/riscv/translate.c
parents c74e62ee 9a9f1f59
Loading
Loading
Loading
Loading
+74 −38
Original line number Diff line number Diff line
@@ -25,18 +25,22 @@
#elif DATA_SIZE == 8
# define SUFFIX     q
# define DATA_TYPE  uint64_t
# define SDATA_TYPE int64_t
# define BSWAP      bswap64
#elif DATA_SIZE == 4
# define SUFFIX     l
# define DATA_TYPE  uint32_t
# define SDATA_TYPE int32_t
# define BSWAP      bswap32
#elif DATA_SIZE == 2
# define SUFFIX     w
# define DATA_TYPE  uint16_t
# define SDATA_TYPE int16_t
# define BSWAP      bswap16
#elif DATA_SIZE == 1
# define SUFFIX     b
# define DATA_TYPE  uint8_t
# define SDATA_TYPE int8_t
# define BSWAP
#else
# error unsupported data size
@@ -118,6 +122,39 @@ GEN_ATOMIC_HELPER(or_fetch)
GEN_ATOMIC_HELPER(xor_fetch)

#undef GEN_ATOMIC_HELPER

/* These helpers are, as a whole, full barriers.  Within the helper,
 * the leading barrier is explicit and the trailing barrier is within
 * cmpxchg primitive.
 */
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
                        ABI_TYPE xval EXTRA_ARGS)                   \
{                                                                   \
    ATOMIC_MMU_DECLS;                                               \
    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                          \
    XDATA_TYPE cmp, old, new, val = xval;                           \
    smp_mb();                                                       \
    cmp = atomic_read__nocheck(haddr);                              \
    do {                                                            \
        old = cmp; new = FN(old, val);                              \
        cmp = atomic_cmpxchg__nocheck(haddr, old, new);             \
    } while (cmp != old);                                           \
    ATOMIC_MMU_CLEANUP;                                             \
    return RET;                                                     \
}

GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umin, MIN,  DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umax, MAX,  DATA_TYPE, old)

GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umin_fetch, MIN,  DATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new)

#undef GEN_ATOMIC_HELPER_FN
#endif /* DATA SIZE >= 16 */

#undef END
@@ -192,47 +229,45 @@ GEN_ATOMIC_HELPER(xor_fetch)

#undef GEN_ATOMIC_HELPER

/* These helpers are, as a whole, full barriers.  Within the helper,
 * the leading barrier is explicit and the trailing barrier is within
 * cmpxchg primitive.
 */
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
                        ABI_TYPE xval EXTRA_ARGS)                   \
{                                                                   \
    ATOMIC_MMU_DECLS;                                               \
    XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;                          \
    XDATA_TYPE ldo, ldn, old, new, val = xval;                      \
    smp_mb();                                                       \
    ldn = atomic_read__nocheck(haddr);                              \
    do {                                                            \
        ldo = ldn; old = BSWAP(ldo); new = FN(old, val);            \
        ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new));      \
    } while (ldo != ldn);                                           \
    ATOMIC_MMU_CLEANUP;                                             \
    return RET;                                                     \
}

GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umin, MIN,  DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(fetch_umax, MAX,  DATA_TYPE, old)

GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umin_fetch, MIN,  DATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new)

/* Note that for addition, we need to use a separate cmpxchg loop instead
   of bswaps for the reverse-host-endian helpers.  */
ABI_TYPE ATOMIC_NAME(fetch_add)(CPUArchState *env, target_ulong addr,
                         ABI_TYPE val EXTRA_ARGS)
{
    ATOMIC_MMU_DECLS;
    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
    DATA_TYPE ldo, ldn, ret, sto;

    ldo = atomic_read__nocheck(haddr);
    while (1) {
        ret = BSWAP(ldo);
        sto = BSWAP(ret + val);
        ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto);
        if (ldn == ldo) {
            ATOMIC_MMU_CLEANUP;
            return ret;
        }
        ldo = ldn;
    }
}
#define ADD(X, Y)   (X + Y)
GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old)
GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#undef ADD

ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr,
                         ABI_TYPE val EXTRA_ARGS)
{
    ATOMIC_MMU_DECLS;
    DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
    DATA_TYPE ldo, ldn, ret, sto;

    ldo = atomic_read__nocheck(haddr);
    while (1) {
        ret = BSWAP(ldo) + val;
        sto = BSWAP(ret);
        ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto);
        if (ldn == ldo) {
            ATOMIC_MMU_CLEANUP;
            return ret;
        }
        ldo = ldn;
    }
}
#undef GEN_ATOMIC_HELPER_FN
#endif /* DATA_SIZE >= 16 */

#undef END
@@ -241,5 +276,6 @@ ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr,
#undef BSWAP
#undef ABI_TYPE
#undef DATA_TYPE
#undef SDATA_TYPE
#undef SUFFIX
#undef DATA_SIZE
+8 −0
Original line number Diff line number Diff line
@@ -125,11 +125,19 @@ GEN_ATOMIC_HELPERS(fetch_add)
GEN_ATOMIC_HELPERS(fetch_and)
GEN_ATOMIC_HELPERS(fetch_or)
GEN_ATOMIC_HELPERS(fetch_xor)
GEN_ATOMIC_HELPERS(fetch_smin)
GEN_ATOMIC_HELPERS(fetch_umin)
GEN_ATOMIC_HELPERS(fetch_smax)
GEN_ATOMIC_HELPERS(fetch_umax)

GEN_ATOMIC_HELPERS(add_fetch)
GEN_ATOMIC_HELPERS(and_fetch)
GEN_ATOMIC_HELPERS(or_fetch)
GEN_ATOMIC_HELPERS(xor_fetch)
GEN_ATOMIC_HELPERS(smin_fetch)
GEN_ATOMIC_HELPERS(umin_fetch)
GEN_ATOMIC_HELPERS(smax_fetch)
GEN_ATOMIC_HELPERS(umax_fetch)

GEN_ATOMIC_HELPERS(xchg)

+28 −20
Original line number Diff line number Diff line
@@ -602,17 +602,26 @@ static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s)
static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c,
                                  bool inf_zero, float_status *s)
{
    int which;

    if (is_snan(a.cls) || is_snan(b.cls) || is_snan(c.cls)) {
        s->float_exception_flags |= float_flag_invalid;
    }

    if (s->default_nan_mode) {
        a.cls = float_class_dnan;
    } else {
        switch (pickNaNMulAdd(is_qnan(a.cls), is_snan(a.cls),
    which = pickNaNMulAdd(is_qnan(a.cls), is_snan(a.cls),
                          is_qnan(b.cls), is_snan(b.cls),
                          is_qnan(c.cls), is_snan(c.cls),
                              inf_zero, s)) {
                          inf_zero, s);

    if (s->default_nan_mode) {
        /* Note that this check is after pickNaNMulAdd so that function
         * has an opportunity to set the Invalid flag.
         */
        a.cls = float_class_dnan;
        return a;
    }

    switch (which) {
    case 0:
        break;
    case 1:
@@ -627,9 +636,8 @@ static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c,
    default:
        g_assert_not_reached();
    }

    a.cls = float_class_msnan;
    }

    return a;
}

+19 −53
Original line number Diff line number Diff line
@@ -36,7 +36,7 @@
#define ARM64_TEXT_OFFSET_OFFSET    8
#define ARM64_MAGIC_OFFSET          56

static AddressSpace *arm_boot_address_space(ARMCPU *cpu,
AddressSpace *arm_boot_address_space(ARMCPU *cpu,
                                     const struct arm_boot_info *info)
{
    /* Return the address space to use for bootloader reads and writes.
@@ -486,28 +486,7 @@ static void fdt_add_psci_node(void *fdt)
    qemu_fdt_setprop_cell(fdt, "/psci", "migrate", migrate_fn);
}

/**
 * load_dtb() - load a device tree binary image into memory
 * @addr:       the address to load the image at
 * @binfo:      struct describing the boot environment
 * @addr_limit: upper limit of the available memory area at @addr
 * @as:         address space to load image to
 *
 * Load a device tree supplied by the machine or by the user  with the
 * '-dtb' command line option, and put it at offset @addr in target
 * memory.
 *
 * If @addr_limit contains a meaningful value (i.e., it is strictly greater
 * than @addr), the device tree is only loaded if its size does not exceed
 * the limit.
 *
 * Returns: the size of the device tree image on success,
 *          0 if the image size exceeds the limit,
 *          -1 on errors.
 *
 * Note: Must not be called unless have_dtb(binfo) is true.
 */
static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
                 hwaddr addr_limit, AddressSpace *as)
{
    void *fdt = NULL;
@@ -935,7 +914,7 @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
    return size;
}

static void arm_load_kernel_notify(Notifier *notifier, void *data)
void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
{
    CPUState *cs;
    int kernel_size;
@@ -945,11 +924,6 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
    int elf_machine;
    hwaddr entry;
    static const ARMInsnFixup *primary_loader;
    ArmLoadKernelNotifier *n = DO_UPCAST(ArmLoadKernelNotifier,
                                         notifier, notifier);
    ARMCPU *cpu = n->cpu;
    struct arm_boot_info *info =
        container_of(n, struct arm_boot_info, load_kernel_notifier);
    AddressSpace *as = arm_boot_address_space(cpu, info);

    /* The board code is not supposed to set secure_board_setup unless
@@ -959,6 +933,7 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
    assert(!(info->secure_board_setup && kvm_enabled()));

    info->dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
    info->dtb_limit = 0;

    /* Load the kernel.  */
    if (!info->kernel_filename || info->firmware_loaded) {
@@ -968,9 +943,7 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
             * the kernel is supposed to be loaded by the bootloader), copy the
             * DTB to the base of RAM for the bootloader to pick up.
             */
            if (load_dtb(info->loader_start, info, 0, as) < 0) {
                exit(1);
            }
            info->dtb_start = info->loader_start;
        }

        if (info->kernel_filename) {
@@ -1050,15 +1023,14 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
         */
        if (elf_low_addr > info->loader_start
            || elf_high_addr < info->loader_start) {
            /* Pass elf_low_addr as address limit to load_dtb if it may be
            /* Set elf_low_addr as address limit for arm_load_dtb if it may be
             * pointing into RAM, otherwise pass '0' (no limit)
             */
            if (elf_low_addr < info->loader_start) {
                elf_low_addr = 0;
            }
            if (load_dtb(info->loader_start, info, elf_low_addr, as) < 0) {
                exit(1);
            }
            info->dtb_start = info->loader_start;
            info->dtb_limit = elf_low_addr;
        }
    }
    entry = elf_entry;
@@ -1116,7 +1088,6 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
         */
        if (have_dtb(info)) {
            hwaddr align;
            hwaddr dtb_start;

            if (elf_machine == EM_AARCH64) {
                /*
@@ -1136,11 +1107,9 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
            }

            /* Place the DTB after the initrd in memory with alignment. */
            dtb_start = QEMU_ALIGN_UP(info->initrd_start + initrd_size, align);
            if (load_dtb(dtb_start, info, 0, as) < 0) {
                exit(1);
            }
            fixupcontext[FIXUP_ARGPTR] = dtb_start;
            info->dtb_start = QEMU_ALIGN_UP(info->initrd_start + initrd_size,
                                           align);
            fixupcontext[FIXUP_ARGPTR] = info->dtb_start;
        } else {
            fixupcontext[FIXUP_ARGPTR] = info->loader_start + KERNEL_ARGS_ADDR;
            if (info->ram_size >= (1ULL << 32)) {
@@ -1173,15 +1142,6 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data)
    for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
        ARM_CPU(cs)->env.boot_info = info;
    }
}

void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
{
    CPUState *cs;

    info->load_kernel_notifier.cpu = cpu;
    info->load_kernel_notifier.notifier.notify = arm_load_kernel_notify;
    qemu_add_machine_init_done_notifier(&info->load_kernel_notifier.notifier);

    /* CPU objects (unlike devices) are not automatically reset on system
     * reset, so we must always register a handler to do so. If we're
@@ -1191,6 +1151,12 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
    for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) {
        qemu_register_reset(do_cpu_reset, ARM_CPU(cs));
    }

    if (!info->skip_dtb_autoload && have_dtb(info)) {
        if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as) < 0) {
            exit(1);
        }
    }
}

static const TypeInfo arm_linux_boot_if_info = {
+1 −0
Original line number Diff line number Diff line
@@ -517,6 +517,7 @@ static void iotkit_realize(DeviceState *dev, Error **errp)
                              qdev_get_gpio_in(DEVICE(&s->ppc_irq_orgate), i));
        qdev_connect_gpio_out_named(DEVICE(ppc), "irq", 0,
                                    qdev_get_gpio_in(devs, 0));
        g_free(gpioname);
    }

    iotkit_forward_sec_resp_cfg(s);
Loading