Commit c4792b6d authored by Will Deacon's avatar Will Deacon Committed by Marc Zyngier
Browse files

arm64: spectre: Rename ARM64_HARDEN_EL2_VECTORS to ARM64_SPECTRE_V3A



Since ARM64_HARDEN_EL2_VECTORS is really a mitigation for Spectre-v3a,
rename it accordingly for consistency with the v2 and v4 mitigation.

Signed-off-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20201113113847.21619-9-will@kernel.org
parent b881cdce
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -100,7 +100,7 @@ hypervisor maps kernel pages in EL2 at a fixed (and potentially
random) offset from the linear mapping. See the kern_hyp_va macro and
kvm_update_va_mask function for more details. MMIO devices such as
GICv2 gets mapped next to the HYP idmap page, as do vectors when
ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
ARM64_SPECTRE_V3A is enabled for particular CPUs.

When using KVM with the Virtualization Host Extensions, no additional
mappings are created, since the host kernel runs directly in EL2.
+1 −1
Original line number Diff line number Diff line
@@ -21,7 +21,7 @@
#define ARM64_HAS_VIRT_HOST_EXTN		11
#define ARM64_WORKAROUND_CAVIUM_27456		12
#define ARM64_HAS_32BIT_EL0			13
#define ARM64_HARDEN_EL2_VECTORS		14
#define ARM64_SPECTRE_V3A			14
#define ARM64_HAS_CNP				15
#define ARM64_HAS_NO_FPSIMD			16
#define ARM64_WORKAROUND_REPEAT_TLBI		17
+1 −1
Original line number Diff line number Diff line
@@ -83,7 +83,7 @@ enum mitigation_state arm64_get_spectre_v2_state(void);
bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);

void cpu_el2_vector_harden_enable(const struct arm64_cpu_capabilities *__unused);
void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused);

enum mitigation_state arm64_get_spectre_v4_state(void);
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
+3 −3
Original line number Diff line number Diff line
@@ -460,10 +460,10 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_RANDOMIZE_BASE
	{
	/* Must come after the Spectre-v2 entry */
		.desc = "EL2 vector hardening",
		.capability = ARM64_HARDEN_EL2_VECTORS,
		.desc = "Spectre-v3a",
		.capability = ARM64_SPECTRE_V3A,
		ERRATA_MIDR_RANGE_LIST(ca57_a72),
		.cpu_enable = cpu_el2_vector_harden_enable,
		.cpu_enable = spectre_v3a_enable_mitigation,
	},
#endif
	{
+10 −3
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
 * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
 * detailed at:
 *
 *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
@@ -270,11 +270,18 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
	update_mitigation_state(&spectre_v2_state, state);
}

void cpu_el2_vector_harden_enable(const struct arm64_cpu_capabilities *__unused)
/*
 * Spectre-v3a.
 *
 * Phew, there's not an awful lot to do here! We just instruct EL2 to use
 * an indirect trampoline for the hyp vectors so that guests can't read
 * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
 */
void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
{
	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);

	if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS))
	if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
		data->slot += HYP_VECTOR_INDIRECT;
}

Loading