Commit 3864d17f authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch kvm-arm64/pkvm/restrict-hypercalls into kvmarm-master/next



* kvm-arm64/pkvm/restrict-hypercalls:
  : .
  : Restrict the use of some hypercalls as well as kexec once
  : the protected KVM mode has been initialised.
  : .
  KVM: arm64: Disable privileged hypercalls after pKVM finalisation
  KVM: arm64: Prevent re-finalisation of pKVM for a given CPU
  KVM: arm64: Propagate errors from __pkvm_prot_finalize hypercall
  KVM: arm64: Reject stub hypercalls after pKVM has been initialised
  arm64: Prevent kexec and hibernation if is_protected_kvm_enabled()
  KVM: arm64: Turn __KVM_HOST_SMCCC_FUNC_* into an enum (mostly)

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 9e1ff307 057bed20
Loading
Loading
Loading
Loading
+27 −20
Original line number Diff line number Diff line
@@ -44,31 +44,38 @@
#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)

#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init			0
#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run			1
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context		2
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa		3
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid		4
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context		5
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff		6
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs			7
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config		8
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr		9
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr		10
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs		11
#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2		12
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs		13
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs		14
#define __KVM_HOST_SMCCC_FUNC___pkvm_init			15
#define __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp		16
#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping	17
#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector		18
#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize		19
#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc			20

#ifndef __ASSEMBLY__

#include <linux/mm.h>

enum __kvm_host_smccc_func {
	/* Hypercalls available only prior to pKVM finalisation */
	/* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
	__KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
	__KVM_HOST_SMCCC_FUNC___pkvm_init,
	__KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
	__KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
	__KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
	__KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
	__KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
	__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,

	/* Hypercalls available after pKVM finalisation */
	__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
	__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
	__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
	__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
	__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
	__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
	__KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
	__KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
	__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
	__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
};

#define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
#define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]

+2 −1
Original line number Diff line number Diff line
@@ -1128,5 +1128,6 @@ bool cpus_are_stuck_in_kernel(void)
{
	bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());

	return !!cpus_stuck_in_kernel || smp_spin_tables;
	return !!cpus_stuck_in_kernel || smp_spin_tables ||
		is_protected_kvm_enabled();
}
+42 −19
Original line number Diff line number Diff line
@@ -1579,25 +1579,33 @@ static void cpu_set_hyp_vector(void)
		kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
}

static void cpu_hyp_reinit(void)
static void cpu_hyp_init_context(void)
{
	kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);

	cpu_hyp_reset();

	if (is_kernel_in_hyp_mode())
		kvm_timer_init_vhe();
	else
	if (!is_kernel_in_hyp_mode())
		cpu_init_hyp_mode();
}

static void cpu_hyp_init_features(void)
{
	cpu_set_hyp_vector();

	kvm_arm_init_debug();

	if (is_kernel_in_hyp_mode())
		kvm_timer_init_vhe();

	if (vgic_present)
		kvm_vgic_init_cpu_hardware();
}

static void cpu_hyp_reinit(void)
{
	cpu_hyp_reset();
	cpu_hyp_init_context();
	cpu_hyp_init_features();
}

static void _kvm_arch_hardware_enable(void *discard)
{
	if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
@@ -1788,10 +1796,17 @@ static int do_pkvm_init(u32 hyp_va_bits)
	int ret;

	preempt_disable();
	hyp_install_host_vector();
	cpu_hyp_init_context();
	ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
				num_possible_cpus(), kern_hyp_va(per_cpu_base),
				hyp_va_bits);
	cpu_hyp_init_features();

	/*
	 * The stub hypercalls are now disabled, so set our local flag to
	 * prevent a later re-init attempt in kvm_arch_hardware_enable().
	 */
	__this_cpu_write(kvm_arm_hardware_enabled, 1);
	preempt_enable();

	return ret;
@@ -1971,9 +1986,25 @@ static int init_hyp_mode(void)
	return err;
}

static void _kvm_host_prot_finalize(void *discard)
static void _kvm_host_prot_finalize(void *arg)
{
	WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize));
	int *err = arg;

	if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)))
		WRITE_ONCE(*err, -EINVAL);
}

static int pkvm_drop_host_privileges(void)
{
	int ret = 0;

	/*
	 * Flip the static key upfront as that may no longer be possible
	 * once the host stage 2 is installed.
	 */
	static_branch_enable(&kvm_protected_mode_initialized);
	on_each_cpu(_kvm_host_prot_finalize, &ret, 1);
	return ret;
}

static int finalize_hyp_mode(void)
@@ -1987,15 +2018,7 @@ static int finalize_hyp_mode(void)
	 * None of other sections should ever be introspected.
	 */
	kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);

	/*
	 * Flip the static key upfront as that may no longer be possible
	 * once the host stage 2 is installed.
	 */
	static_branch_enable(&kvm_protected_mode_initialized);
	on_each_cpu(_kvm_host_prot_finalize, NULL, 1);

	return 0;
	return pkvm_drop_host_privileges();
}

struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
+17 −9
Original line number Diff line number Diff line
@@ -110,17 +110,14 @@ SYM_FUNC_START(__hyp_do_panic)
	b	__host_enter_for_panic
SYM_FUNC_END(__hyp_do_panic)

.macro host_el1_sync_vect
	.align 7
.L__vect_start\@:
	stp	x0, x1, [sp, #-16]!
	mrs	x0, esr_el2
	lsr	x0, x0, #ESR_ELx_EC_SHIFT
	cmp	x0, #ESR_ELx_EC_HVC64
	b.ne	__host_exit

SYM_FUNC_START(__host_hvc)
	ldp	x0, x1, [sp]		// Don't fixup the stack yet

	/* No stub for you, sonny Jim */
alternative_if ARM64_KVM_PROTECTED_MODE
	b	__host_exit
alternative_else_nop_endif

	/* Check for a stub HVC call */
	cmp	x0, #HVC_STUB_HCALL_NR
	b.hs	__host_exit
@@ -137,6 +134,17 @@ SYM_FUNC_END(__hyp_do_panic)
	ldr	x5, =__kvm_handle_stub_hvc
	hyp_pa	x5, x6
	br	x5
SYM_FUNC_END(__host_hvc)

.macro host_el1_sync_vect
	.align 7
.L__vect_start\@:
	stp	x0, x1, [sp, #-16]!
	mrs	x0, esr_el2
	lsr	x0, x0, #ESR_ELx_EC_SHIFT
	cmp	x0, #ESR_ELx_EC_HVC64
	b.eq	__host_hvc
	b	__host_exit
.L__vect_end\@:
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
	.error "host_el1_sync_vect larger than vector entry"
+26 −11
Original line number Diff line number Diff line
@@ -165,36 +165,51 @@ typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x)	[__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x

static const hcall_t host_hcall[] = {
	HANDLE_FUNC(__kvm_vcpu_run),
	/* ___kvm_hyp_init */
	HANDLE_FUNC(__kvm_get_mdcr_el2),
	HANDLE_FUNC(__pkvm_init),
	HANDLE_FUNC(__pkvm_create_private_mapping),
	HANDLE_FUNC(__pkvm_cpu_set_vector),
	HANDLE_FUNC(__kvm_enable_ssbs),
	HANDLE_FUNC(__vgic_v3_init_lrs),
	HANDLE_FUNC(__vgic_v3_get_gic_config),
	HANDLE_FUNC(__pkvm_prot_finalize),

	HANDLE_FUNC(__pkvm_host_share_hyp),
	HANDLE_FUNC(__kvm_adjust_pc),
	HANDLE_FUNC(__kvm_vcpu_run),
	HANDLE_FUNC(__kvm_flush_vm_context),
	HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
	HANDLE_FUNC(__kvm_tlb_flush_vmid),
	HANDLE_FUNC(__kvm_flush_cpu_context),
	HANDLE_FUNC(__kvm_timer_set_cntvoff),
	HANDLE_FUNC(__kvm_enable_ssbs),
	HANDLE_FUNC(__vgic_v3_get_gic_config),
	HANDLE_FUNC(__vgic_v3_read_vmcr),
	HANDLE_FUNC(__vgic_v3_write_vmcr),
	HANDLE_FUNC(__vgic_v3_init_lrs),
	HANDLE_FUNC(__kvm_get_mdcr_el2),
	HANDLE_FUNC(__vgic_v3_save_aprs),
	HANDLE_FUNC(__vgic_v3_restore_aprs),
	HANDLE_FUNC(__pkvm_init),
	HANDLE_FUNC(__pkvm_cpu_set_vector),
	HANDLE_FUNC(__pkvm_host_share_hyp),
	HANDLE_FUNC(__pkvm_create_private_mapping),
	HANDLE_FUNC(__pkvm_prot_finalize),
};

static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
{
	DECLARE_REG(unsigned long, id, host_ctxt, 0);
	unsigned long hcall_min = 0;
	hcall_t hfn;

	/*
	 * If pKVM has been initialised then reject any calls to the
	 * early "privileged" hypercalls. Note that we cannot reject
	 * calls to __pkvm_prot_finalize for two reasons: (1) The static
	 * key used to determine initialisation must be toggled prior to
	 * finalisation and (2) finalisation is performed on a per-CPU
	 * basis. This is all fine, however, since __pkvm_prot_finalize
	 * returns -EPERM after the first call for a given CPU.
	 */
	if (static_branch_unlikely(&kvm_protected_mode_initialized))
		hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;

	id -= KVM_HOST_SMCCC_ID(0);

	if (unlikely(id >= ARRAY_SIZE(host_hcall)))
	if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
		goto inval;

	hfn = host_hcall[id];
Loading