Commit 88ddf0df authored by Will Deacon's avatar Will Deacon
Browse files

Merge branch 'for-next/cpufeature' into for-next/core

Support for overriding CPU ID register fields on the command-line, which
allows us to disable certain features which the kernel would otherwise
use unconditionally when detected.

* for-next/cpufeature: (22 commits)
  arm64: cpufeatures: Allow disabling of Pointer Auth from the command-line
  arm64: Defer enabling pointer authentication on boot core
  arm64: cpufeatures: Allow disabling of BTI from the command-line
  arm64: Move "nokaslr" over to the early cpufeature infrastructure
  KVM: arm64: Document HVC_VHE_RESTART stub hypercall
  arm64: Make kvm-arm.mode={nvhe, protected} an alias of id_aa64mmfr1.vh=0
  arm64: Add an aliasing facility for the idreg override
  arm64: Honor VHE being disabled from the command-line
  arm64: Allow ID_AA64MMFR1_EL1.VH to be overridden from the command line
  arm64: cpufeature: Add an early command-line cpufeature override facility
  arm64: Extract early FDT mapping from kaslr_early_init()
  arm64: cpufeature: Use IDreg override in __read_sysreg_by_encoding()
  arm64: cpufeature: Add global feature override facility
  arm64: Move SCTLR_EL1 initialisation to EL-agnostic code
  arm64: Simplify init_el2_state to be non-VHE only
  arm64: Move VHE-specific SPE setup to mutate_to_vhe()
  arm64: Drop early setting of MDSCR_EL2.TPMS
  arm64: Initialise as nVHE before switching to VHE
  arm64: Provide an 'upgrade to VHE' stub hypercall
  arm64: Turn the MMU-on sequence into a macro
  ...
parents bab8443b f8da5752
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -373,6 +373,12 @@
	arcrimi=	[HW,NET] ARCnet - "RIM I" (entirely mem-mapped) cards
			Format: <io>,<irq>,<nodeID>

	arm64.nobti	[ARM64] Unconditionally disable Branch Target
			Identification support

	arm64.nopauth	[ARM64] Unconditionally disable Pointer Authentication
			support

	ataflop=	[HW,M68k]

	atarimouse=	[HW,MOUSE] Atari Mouse
@@ -2257,6 +2263,9 @@
	kvm-arm.mode=
			[KVM,ARM] Select one of KVM/arm64's modes of operation.

			nvhe: Standard nVHE-based mode, without support for
			      protected guests.

			protected: nVHE-based mode with support for guests whose
				   state is kept private from the host.
				   Not valid if the kernel is running in EL2.
+9 −0
Original line number Diff line number Diff line
@@ -58,6 +58,15 @@ these functions (see arch/arm{,64}/include/asm/virt.h):
  into place (arm64 only), and jump to the restart address while at HYP/EL2.
  This hypercall is not expected to return to its caller.

* ::

    x0 = HVC_VHE_RESTART (arm64 only)

  Attempt to upgrade the kernel's exception level from EL1 to EL2 by enabling
  the VHE mode. This is conditioned by the CPU supporting VHE, the EL2 MMU
  being off, and VHE not being disabled by any other means (command line
  option, for example).

Any other value of r0/x0 triggers a hypervisor-specific handling,
which is not documented here.

+17 −0
Original line number Diff line number Diff line
@@ -675,6 +675,23 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
	.endif
	.endm

/*
 * Set SCTLR_EL1 to the passed value, and invalidate the local icache
 * in the process. This is called when setting the MMU on.
 */
.macro set_sctlr_el1, reg
	msr	sctlr_el1, \reg
	isb
	/*
	 * Invalidate the local I-cache so that any instructions fetched
	 * speculatively from the PoC are discarded, since they may have
	 * been dynamically patched at the PoU.
	 */
	ic	iallu
	dsb	nsh
	isb
.endm

/*
 * Check whether to yield to another runnable task from kernel mode NEON code
 * (which runs with preemption disabled).
+11 −0
Original line number Diff line number Diff line
@@ -63,6 +63,11 @@ struct arm64_ftr_bits {
	s64		safe_val; /* safe value for FTR_EXACT features */
};

struct arm64_ftr_override {
	u64		val;
	u64		mask;
};

/*
 * @arm64_ftr_reg - Feature register
 * @strict_mask		Bits which should match across all CPUs for sanity.
@@ -74,6 +79,7 @@ struct arm64_ftr_reg {
	u64				user_mask;
	u64				sys_val;
	u64				user_val;
	struct arm64_ftr_override	*override;
	const struct arm64_ftr_bits	*ftr_bits;
};

@@ -600,6 +606,7 @@ void __init setup_cpu_features(void);
void check_local_cpu_capabilities(void);

u64 read_sanitised_ftr_reg(u32 id);
u64 __read_sysreg_by_encoding(u32 sys_id);

static inline bool cpu_supports_mixed_endian_el0(void)
{
@@ -811,6 +818,10 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
	return 8;
}

extern struct arm64_ftr_override id_aa64mmfr1_override;
extern struct arm64_ftr_override id_aa64pfr1_override;
extern struct arm64_ftr_override id_aa64isar1_override;

u32 get_kvm_ipa_limit(void);
void dump_cpu_features(void);

+20 −40
Original line number Diff line number Diff line
@@ -32,46 +32,39 @@
 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
 * EL2.
 */
.macro __init_el2_timers mode
.ifeqs "\mode", "nvhe"
.macro __init_el2_timers
	mrs	x0, cnthctl_el2
	orr	x0, x0, #3			// Enable EL1 physical timers
	msr	cnthctl_el2, x0
.endif
	msr	cntvoff_el2, xzr		// Clear virtual offset
.endm

.macro __init_el2_debug mode
.macro __init_el2_debug
	mrs	x1, id_aa64dfr0_el1
	sbfx	x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
	cmp	x0, #1
	b.lt	1f				// Skip if no PMU present
	b.lt	.Lskip_pmu_\@			// Skip if no PMU present
	mrs	x0, pmcr_el0			// Disable debug access traps
	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
1:
.Lskip_pmu_\@:
	csel	x2, xzr, x0, lt			// all PMU counters from EL1

	/* Statistical profiling */
	ubfx	x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
	cbz	x0, 3f				// Skip if SPE not present
	cbz	x0, .Lskip_spe_\@		// Skip if SPE not present

.ifeqs "\mode", "nvhe"
	mrs_s	x0, SYS_PMBIDR_EL1              // If SPE available at EL2,
	and	x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
	cbnz	x0, 2f				// then permit sampling of physical
	cbnz	x0, .Lskip_spe_el2_\@		// then permit sampling of physical
	mov	x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
		      1 << SYS_PMSCR_EL2_PA_SHIFT)
	msr_s	SYS_PMSCR_EL2, x0		// addresses and physical counter
2:
.Lskip_spe_el2_\@:
	mov	x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
	orr	x2, x2, x0			// If we don't have VHE, then
						// use EL1&0 translation.
.else
	orr	x2, x2, #MDCR_EL2_TPMS		// For VHE, use EL2 translation
						// and disable access from EL1
.endif

3:
.Lskip_spe_\@:
	msr	mdcr_el2, x2			// Configure debug traps
.endm

@@ -79,9 +72,9 @@
.macro __init_el2_lor
	mrs	x1, id_aa64mmfr1_el1
	ubfx	x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
	cbz	x0, 1f
	cbz	x0, .Lskip_lor_\@
	msr_s	SYS_LORC_EL1, xzr
1:
.Lskip_lor_\@:
.endm

/* Stage-2 translation */
@@ -93,7 +86,7 @@
.macro __init_el2_gicv3
	mrs	x0, id_aa64pfr0_el1
	ubfx	x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
	cbz	x0, 1f
	cbz	x0, .Lskip_gicv3_\@

	mrs_s	x0, SYS_ICC_SRE_EL2
	orr	x0, x0, #ICC_SRE_EL2_SRE	// Set ICC_SRE_EL2.SRE==1
@@ -103,7 +96,7 @@
	mrs_s	x0, SYS_ICC_SRE_EL2		// Read SRE back,
	tbz	x0, #0, 1f			// and check that it sticks
	msr_s	SYS_ICH_HCR_EL2, xzr		// Reset ICC_HCR_EL2 to defaults
1:
.Lskip_gicv3_\@:
.endm

.macro __init_el2_hstr
@@ -128,14 +121,14 @@
.macro __init_el2_nvhe_sve
	mrs	x1, id_aa64pfr0_el1
	ubfx	x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
	cbz	x1, 1f
	cbz	x1, .Lskip_sve_\@

	bic	x0, x0, #CPTR_EL2_TZ		// Also disable SVE traps
	msr	cptr_el2, x0			// Disable copro. traps to EL2
	isb
	mov	x1, #ZCR_ELx_LEN_MASK		// SVE: Enable full vector
	msr_s	SYS_ZCR_EL2, x1			// length for EL1.
1:
.Lskip_sve_\@:
.endm

.macro __init_el2_nvhe_prepare_eret
@@ -145,37 +138,24 @@

/**
 * Initialize EL2 registers to sane values. This should be called early on all
 * cores that were booted in EL2.
 * cores that were booted in EL2. Note that everything gets initialised as
 * if VHE was not evailable. The kernel context will be upgraded to VHE
 * if possible later on in the boot process
 *
 * Regs: x0, x1 and x2 are clobbered.
 */
.macro init_el2_state mode
.ifnes "\mode", "vhe"
.ifnes "\mode", "nvhe"
.error "Invalid 'mode' argument"
.endif
.endif

.macro init_el2_state
	__init_el2_sctlr
	__init_el2_timers \mode
	__init_el2_debug \mode
	__init_el2_timers
	__init_el2_debug
	__init_el2_lor
	__init_el2_stage2
	__init_el2_gicv3
	__init_el2_hstr

	/*
	 * When VHE is not in use, early init of EL2 needs to be done here.
	 * When VHE _is_ in use, EL1 will not be used in the host and
	 * requires no configuration, and all non-hyp-specific EL2 setup
	 * will be done via the _EL1 system register aliases in __cpu_setup.
	 */
.ifeqs "\mode", "nvhe"
	__init_el2_nvhe_idregs
	__init_el2_nvhe_cptr
	__init_el2_nvhe_sve
	__init_el2_nvhe_prepare_eret
.endif
.endm

#endif /* __ARM_KVM_INIT_H__ */
Loading