Commit dff9b3dd authored by James Morse's avatar James Morse Committed by Zheng Zengkai
Browse files

KVM: arm64: Allow indirect vectors to be used without SPECTRE_V3A

stable inclusion
from stable-v5.10.105
commit 192023e6baf7cce7fb76ff3a5c24c55968c774ff
category: bugfix
bugzilla: 186460 https://gitee.com/src-openeuler/kernel/issues/I53MHA
CVE: CVE-2022-23960

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=192023e6baf7



--------------------------------

commit 5bdf3437 upstream.

CPUs vulnerable to Spectre-BHB either need to make an SMC-CC firmware
call from the vectors, or run a sequence of branches. This gets added
to the hyp vectors. If there is no support for arch-workaround-1 in
firmware, the indirect vector will be used.

kvm_init_vector_slots() only initialises the two indirect slots if
the platform is vulnerable to Spectre-v3a. pKVM's hyp_map_vectors()
only initialises __hyp_bp_vect_base if the platform is vulnerable to
Spectre-v3a.

As there are about to more users of the indirect vectors, ensure
their entries in hyp_spectre_vector_selector[] are always initialised,
and __hyp_bp_vect_base defaults to the regular VA mapping.

The Spectre-v3a check is moved to a helper
kvm_system_needs_idmapped_vectors(), and merged with the code
that creates the hyp mappings.

Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarChen Jiahao <chenjiahao16@huawei.com>
Reviewed-by: default avatarLiao Chang <liaochang1@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent adad10b3
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -71,6 +71,7 @@
#define ARM64_WORKAROUND_HISILICON_1980005	63
#define ARM64_HAS_ECV				64
#define ARM64_HAS_EPAN				65
#define ARM64_SPECTRE_BHB			66

#define ARM64_NCAPS				80

+6 −0
Original line number Diff line number Diff line
@@ -35,6 +35,8 @@
#define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)

#define __SMCCC_WORKAROUND_1_SMC_SZ 36
#define __SMCCC_WORKAROUND_3_SMC_SZ 36
#define __SPECTRE_BHB_LOOP_SZ       44

#define KVM_HOST_SMCCC_ID(id)						\
	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
@@ -199,6 +201,10 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);

extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
extern char __smccc_workaround_3_smc[__SMCCC_WORKAROUND_3_SMC_SZ];
extern char __spectre_bhb_loop_k8[__SPECTRE_BHB_LOOP_SZ];
extern char __spectre_bhb_loop_k24[__SPECTRE_BHB_LOOP_SZ];
extern char __spectre_bhb_loop_k32[__SPECTRE_BHB_LOOP_SZ];

/*
 * Obtain the PC-relative address of a kernel symbol
+2 −1
Original line number Diff line number Diff line
@@ -237,7 +237,8 @@ static inline void *kvm_get_hyp_vector(void)
	void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
	int slot = -1;

	if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
	if ((cpus_have_const_cap(ARM64_SPECTRE_V2) ||
	     cpus_have_const_cap(ARM64_SPECTRE_BHB)) && data->template_start) {
		vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
		slot = data->hyp_vectors_slot;
	}
+6 −0
Original line number Diff line number Diff line
@@ -67,6 +67,12 @@ typedef void (*bp_hardening_cb_t)(void);
struct bp_hardening_data {
	int			hyp_vectors_slot;
	bp_hardening_cb_t	fn;

	/*
	 * template_start is only used by the BHB mitigation to identify the
	 * hyp_vectors_slot sequence.
	 */
	const char *template_start;
};

DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+46 −1
Original line number Diff line number Diff line
@@ -221,9 +221,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
	__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}

static DEFINE_RAW_SPINLOCK(bp_lock);
static void install_bp_hardening_cb(bp_hardening_cb_t fn)
{
	static DEFINE_RAW_SPINLOCK(bp_lock);
	int cpu, slot = -1;
	const char *hyp_vecs_start = __smccc_workaround_1_smc;
	const char *hyp_vecs_end = __smccc_workaround_1_smc +
@@ -254,6 +254,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn)

	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
	__this_cpu_write(bp_hardening_data.fn, fn);
	__this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
	raw_spin_unlock(&bp_lock);
}
#else
@@ -820,3 +821,47 @@ enum mitigation_state arm64_get_spectre_bhb_state(void)
{
	return spectre_bhb_state;
}

static int kvm_bhb_get_vecs_size(const char *start)
{
	if (start == __smccc_workaround_3_smc)
		return __SMCCC_WORKAROUND_3_SMC_SZ;
	else if (start == __spectre_bhb_loop_k8 ||
		 start == __spectre_bhb_loop_k24 ||
		 start == __spectre_bhb_loop_k32)
		return __SPECTRE_BHB_LOOP_SZ;

	return 0;
}

void kvm_setup_bhb_slot(const char *hyp_vecs_start)
{
	int cpu, slot = -1, size;
	const char *hyp_vecs_end;

	if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
		return;

	size = kvm_bhb_get_vecs_size(hyp_vecs_start);
	if (WARN_ON_ONCE(!hyp_vecs_start || !size))
		return;
	hyp_vecs_end = hyp_vecs_start + size;

	raw_spin_lock(&bp_lock);
	for_each_possible_cpu(cpu) {
		if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
			break;
		}
	}

	if (slot == -1) {
		slot = atomic_inc_return(&arm64_el2_vector_last_slot);
		BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
	}

	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
	__this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
	raw_spin_unlock(&bp_lock);
}
Loading