Commit e60b38f4 authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20181024' into staging



target-arm queue:
 * ssi-sd: Make devices picking up backends unavailable with -device
 * Add support for VCPU event states
 * Move towards making ID registers the source of truth for
   whether a guest CPU implements a feature, rather than having
   parallel ID registers and feature bit flags
 * Implement various HCR hypervisor trap/config bits
 * Get IL bit correct for v7 syndrome values
 * Report correct syndrome for FP/SIMD traps to Hyp mode
 * hw/arm/boot: Increase compliance with kernel arm64 boot protocol
 * Refactor A32 Neon to use generic vector infrastructure
 * Fix a bug in A32 VLD2 "(multiple 2-element structures)" insn
 * net: cadence_gem: Report features correctly in ID register
 * Avoid some unnecessary TLB flushes on TTBR register writes

# gpg: Signature made Wed 24 Oct 2018 10:46:01 BST
# gpg:                using RSA key 3C2525ED14360CDE
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>"
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>"
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>"
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20181024: (44 commits)
  target/arm: Only flush tlb if ASID changes
  target/arm: Remove writefn from TTBR0_EL3
  net: cadence_gem: Announce 64bit addressing support
  net: cadence_gem: Announce availability of priority queues
  target/arm: Reorg NEON VLD/VST single element to one lane
  target/arm: Promote consecutive memory ops for aa32
  target/arm: Reorg NEON VLD/VST all elements
  target/arm: Use gvec for NEON VLD all lanes
  target/arm: Use gvec for NEON_3R_VTST_VCEQ, NEON_3R_VCGT, NEON_3R_VCGE
  target/arm: Use gvec for NEON_3R_VML
  target/arm: Use gvec for VSRI, VSLI
  target/arm: Use gvec for VSRA
  target/arm: Use gvec for VSHR, VSHL
  target/arm: Use gvec for NEON_3R_VMUL
  target/arm: Use gvec for NEON_2RM_VMN, NEON_2RM_VNEG
  target/arm: Use gvec for NEON_3R_VADD_VSUB insns
  target/arm: Use gvec for NEON_3R_LOGIC insns
  target/arm: Use gvec for NEON VMOV, VMVN, VBIC & VORR (immediate)
  target/arm: Use gvec for NEON VDUP
  target/arm: Mark some arrays const
  ...

Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents 13399aad 93f379b0
Loading
Loading
Loading
Loading
+18 −0
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@
#include "qemu/config-file.h"
#include "qemu/option.h"
#include "exec/address-spaces.h"
#include "qemu/units.h"

/* Kernel boot protocol is specified in the kernel docs
 * Documentation/arm/Booting and Documentation/arm64/booting.txt
@@ -36,6 +37,8 @@
#define ARM64_TEXT_OFFSET_OFFSET    8
#define ARM64_MAGIC_OFFSET          56

#define BOOTLOADER_MAX_SIZE         (4 * KiB)

AddressSpace *arm_boot_address_space(ARMCPU *cpu,
                                     const struct arm_boot_info *info)
{
@@ -184,6 +187,8 @@ static void write_bootloader(const char *name, hwaddr addr,
        code[i] = tswap32(insn);
    }

    assert((len * sizeof(uint32_t)) < BOOTLOADER_MAX_SIZE);

    rom_add_blob_fixed_as(name, code, len * sizeof(uint32_t), addr, as);

    g_free(code);
@@ -919,6 +924,19 @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
        memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals));
        if (hdrvals[1] != 0) {
            kernel_load_offset = le64_to_cpu(hdrvals[0]);

            /*
             * We write our startup "bootloader" at the very bottom of RAM,
             * so that bit can't be used for the image. Luckily the Image
             * format specification is that the image requests only an offset
             * from a 2MB boundary, not an absolute load address. So if the
             * image requests an offset that might mean it overlaps with the
             * bootloader, we can just load it starting at 2MB+offset rather
             * than 0MB + offset.
             */
            if (kernel_load_offset < BOOTLOADER_MAX_SIZE) {
                kernel_load_offset += 2 * MiB;
            }
        }
    }

+6 −6
Original line number Diff line number Diff line
@@ -1055,17 +1055,17 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
    case 0xd5c: /* MMFR3.  */
        return cpu->id_mmfr3;
    case 0xd60: /* ISAR0.  */
        return cpu->id_isar0;
        return cpu->isar.id_isar0;
    case 0xd64: /* ISAR1.  */
        return cpu->id_isar1;
        return cpu->isar.id_isar1;
    case 0xd68: /* ISAR2.  */
        return cpu->id_isar2;
        return cpu->isar.id_isar2;
    case 0xd6c: /* ISAR3.  */
        return cpu->id_isar3;
        return cpu->isar.id_isar3;
    case 0xd70: /* ISAR4.  */
        return cpu->id_isar4;
        return cpu->isar.id_isar4;
    case 0xd74: /* ISAR5.  */
        return cpu->id_isar5;
        return cpu->isar.id_isar5;
    case 0xd78: /* CLIDR */
        return cpu->clidr;
    case 0xd7c: /* CTR */
+8 −1
Original line number Diff line number Diff line
@@ -142,6 +142,7 @@
#define GEM_DESCONF4      (0x0000028C/4)
#define GEM_DESCONF5      (0x00000290/4)
#define GEM_DESCONF6      (0x00000294/4)
#define GEM_DESCONF6_64B_MASK (1U << 23)
#define GEM_DESCONF7      (0x00000298/4)

#define GEM_INT_Q1_STATUS               (0x00000400 / 4)
@@ -1283,6 +1284,7 @@ static void gem_reset(DeviceState *d)
    int i;
    CadenceGEMState *s = CADENCE_GEM(d);
    const uint8_t *a;
    uint32_t queues_mask = 0;

    DB_PRINT("\n");

@@ -1299,7 +1301,12 @@ static void gem_reset(DeviceState *d)
    s->regs[GEM_DESCONF] = 0x02500111;
    s->regs[GEM_DESCONF2] = 0x2ab13fff;
    s->regs[GEM_DESCONF5] = 0x002f2045;
    s->regs[GEM_DESCONF6] = 0x00000200;
    s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK;

    if (s->num_priority_queues > 1) {
        queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
        s->regs[GEM_DESCONF6] |= queues_mask;
    }

    /* Set MAC address */
    a = &s->conf.macaddr.a[0];
+2 −0
Original line number Diff line number Diff line
@@ -284,6 +284,8 @@ static void ssi_sd_class_init(ObjectClass *klass, void *data)
    k->cs_polarity = SSI_CS_LOW;
    dc->vmsd = &vmstate_ssi_sd;
    dc->reset = ssi_sd_reset;
    /* Reason: init() method uses drive_get_next() */
    dc->user_creatable = false;
}

static const TypeInfo ssi_sd_info = {
+2 −2
Original line number Diff line number Diff line
@@ -314,7 +314,7 @@ static int target_restore_sigframe(CPUARMState *env,
            break;

        case TARGET_SVE_MAGIC:
            if (arm_feature(env, ARM_FEATURE_SVE)) {
            if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
                vq = (env->vfp.zcr_el[1] & 0xf) + 1;
                sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
                if (!sve && size == sve_size) {
@@ -433,7 +433,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
                                      &layout);

    /* SVE state needs saving only if it exists.  */
    if (arm_feature(env, ARM_FEATURE_SVE)) {
    if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
        vq = (env->vfp.zcr_el[1] & 0xf) + 1;
        sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
        sve_ofs = alloc_sigframe_space(sve_size, &layout);
Loading