Commit a027b2ec authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull kvm fixes from Paolo Bonzini:
 "x86:

   - Fix SEV race condition

  ARM:

   - Fixes for the configuration of SVE/SME traps when hVHE mode is in
     use

   - Allow use of pKVM on systems with FF-A implementations that are
     v1.0 compatible

   - Request/release percpu IRQs (arch timer, vGIC maintenance)
     correctly when pKVM is in use

   - Fix function prototype after __kvm_host_psci_cpu_entry() rename

   - Skip to the next instruction when emulating writes to TCR_EL1 on
     AmpereOne systems

  Selftests:

   - Fix missing include"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  selftests/rseq: Fix build with undefined __weak
  KVM: SEV: remove ghcb variable declarations
  KVM: SEV: only access GHCB fields once
  KVM: SEV: snapshot the GHCB before accessing it
  KVM: arm64: Skip instruction after emulating write to TCR_EL1
  KVM: arm64: fix __kvm_host_psci_cpu_entry() prototype
  KVM: arm64: Fix resetting SME trap values on reset for (h)VHE
  KVM: arm64: Fix resetting SVE trap values on reset for hVHE
  KVM: arm64: Use the appropriate feature trap register when activating traps
  KVM: arm64: Helper to write to appropriate feature trap register based on mode
  KVM: arm64: Disable SME traps for (h)VHE at setup
  KVM: arm64: Use the appropriate feature trap register for SVE at EL2 setup
  KVM: arm64: Factor out code for checking (h)VHE mode into a macro
  KVM: arm64: Rephrase percpu enable/disable tracking in terms of hyp
  KVM: arm64: Fix hardware enable/disable flows for pKVM
  KVM: arm64: Allow pKVM on v1.0 compatible FF-A implementations
parents 016ce297 d5ad9aae
Loading
Loading
Loading
Loading
+29 −15
Original line number Diff line number Diff line
@@ -31,6 +31,13 @@
.Lskip_hcrx_\@:
.endm

/* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */
.macro __check_hvhe fail, tmp
	mrs	\tmp, hcr_el2
	and	\tmp, \tmp, #HCR_E2H
	cbz	\tmp, \fail
.endm

/*
 * Allow Non-secure EL1 and EL0 to access physical timer and counter.
 * This is not necessary for VHE, since the host kernel runs in EL2,
@@ -43,9 +50,7 @@
 */
.macro __init_el2_timers
	mov	x0, #3				// Enable EL1 physical timers
	mrs	x1, hcr_el2
	and	x1, x1, #HCR_E2H
	cbz	x1, .LnVHE_\@
	__check_hvhe .LnVHE_\@, x1
	lsl	x0, x0, #10
.LnVHE_\@:
	msr	cnthctl_el2, x0
@@ -139,15 +144,14 @@

/* Coprocessor traps */
.macro __init_el2_cptr
	mrs	x1, hcr_el2
	and	x1, x1, #HCR_E2H
	cbz	x1, .LnVHE_\@
	__check_hvhe .LnVHE_\@, x1
	mov	x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
	b	.Lset_cptr_\@
	msr	cpacr_el1, x0
	b	.Lskip_set_cptr_\@
.LnVHE_\@:
	mov	x0, #0x33ff
.Lset_cptr_\@:
	msr	cptr_el2, x0			// Disable copro. traps to EL2
.Lskip_set_cptr_\@:
.endm

/* Disable any fine grained traps */
@@ -268,19 +272,19 @@
	check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2

.Linit_sve_\@:	/* SVE register access */
	mrs	x0, cptr_el2			// Disable SVE traps
	mrs	x1, hcr_el2
	and	x1, x1, #HCR_E2H
	cbz	x1, .Lcptr_nvhe_\@
	__check_hvhe .Lcptr_nvhe_\@, x1

	// VHE case
	// (h)VHE case
	mrs	x0, cpacr_el1			// Disable SVE traps
	orr	x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
	b	.Lset_cptr_\@
	msr	cpacr_el1, x0
	b	.Lskip_set_cptr_\@

.Lcptr_nvhe_\@: // nVHE case
	mrs	x0, cptr_el2			// Disable SVE traps
	bic	x0, x0, #CPTR_EL2_TZ
.Lset_cptr_\@:
	msr	cptr_el2, x0
.Lskip_set_cptr_\@:
	isb
	mov	x1, #ZCR_ELx_LEN_MASK		// SVE: Enable full vector
	msr_s	SYS_ZCR_EL2, x1			// length for EL1.
@@ -289,9 +293,19 @@
	check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2

.Linit_sme_\@:	/* SME register access and priority mapping */
	__check_hvhe .Lcptr_nvhe_sme_\@, x1

	// (h)VHE case
	mrs	x0, cpacr_el1			// Disable SME traps
	orr	x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN)
	msr	cpacr_el1, x0
	b	.Lskip_set_cptr_sme_\@

.Lcptr_nvhe_sme_\@: // nVHE case
	mrs	x0, cptr_el2			// Disable SME traps
	bic	x0, x0, #CPTR_EL2_TSM
	msr	cptr_el2, x0
.Lskip_set_cptr_sme_\@:
	isb

	mrs	x1, sctlr_el2
+1 −1
Original line number Diff line number Diff line
@@ -278,7 +278,7 @@ asmlinkage void __noreturn hyp_panic_bad_stack(void);
asmlinkage void kvm_unexpected_el2_exception(void);
struct kvm_cpu_context;
void handle_trap(struct kvm_cpu_context *host_ctxt);
asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on);
asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on);
void __noreturn __pkvm_init_finalise(void);
void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
void kvm_patch_vector_branch(struct alt_instr *alt,
+17 −4
Original line number Diff line number Diff line
@@ -571,6 +571,14 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
	return test_bit(feature, vcpu->arch.features);
}

static __always_inline void kvm_write_cptr_el2(u64 val)
{
	if (has_vhe() || has_hvhe())
		write_sysreg(val, cpacr_el1);
	else
		write_sysreg(val, cptr_el2);
}

static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
{
	u64 val;
@@ -578,8 +586,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
	if (has_vhe()) {
		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
		       CPACR_EL1_ZEN_EL1EN);
		if (cpus_have_final_cap(ARM64_SME))
			val |= CPACR_EL1_SMEN_EL1EN;
	} else if (has_hvhe()) {
		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);

		if (!vcpu_has_sve(vcpu) ||
		    (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
			val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
		if (cpus_have_final_cap(ARM64_SME))
			val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
	} else {
		val = CPTR_NVHE_EL2_RES1;

@@ -597,9 +613,6 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
{
	u64 val = kvm_get_reset_cptr_el2(vcpu);

	if (has_vhe() || has_hvhe())
		write_sysreg(val, cpacr_el1);
	else
		write_sysreg(val, cptr_el2);
	kvm_write_cptr_el2(val);
}
#endif /* __ARM64_KVM_EMULATE_H__ */
+27 −34
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@ DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);

static bool vgic_present, kvm_arm_initialised;

static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized);
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);

bool is_kvm_arm_initialised(void)
@@ -1864,18 +1864,24 @@ static void cpu_hyp_reinit(void)
	cpu_hyp_init_features();
}

static void _kvm_arch_hardware_enable(void *discard)
static void cpu_hyp_init(void *discard)
{
	if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
	if (!__this_cpu_read(kvm_hyp_initialized)) {
		cpu_hyp_reinit();
		__this_cpu_write(kvm_arm_hardware_enabled, 1);
		__this_cpu_write(kvm_hyp_initialized, 1);
	}
}

int kvm_arch_hardware_enable(void)
static void cpu_hyp_uninit(void *discard)
{
	int was_enabled;
	if (__this_cpu_read(kvm_hyp_initialized)) {
		cpu_hyp_reset();
		__this_cpu_write(kvm_hyp_initialized, 0);
	}
}

int kvm_arch_hardware_enable(void)
{
	/*
	 * Most calls to this function are made with migration
	 * disabled, but not with preemption disabled. The former is
@@ -1884,36 +1890,23 @@ int kvm_arch_hardware_enable(void)
	 */
	preempt_disable();

	was_enabled = __this_cpu_read(kvm_arm_hardware_enabled);
	_kvm_arch_hardware_enable(NULL);
	cpu_hyp_init(NULL);

	if (!was_enabled) {
	kvm_vgic_cpu_up();
	kvm_timer_cpu_up();
	}

	preempt_enable();

	return 0;
}

static void _kvm_arch_hardware_disable(void *discard)
{
	if (__this_cpu_read(kvm_arm_hardware_enabled)) {
		cpu_hyp_reset();
		__this_cpu_write(kvm_arm_hardware_enabled, 0);
	}
}

void kvm_arch_hardware_disable(void)
{
	if (__this_cpu_read(kvm_arm_hardware_enabled)) {
	kvm_timer_cpu_down();
	kvm_vgic_cpu_down();
	}

	if (!is_protected_kvm_enabled())
		_kvm_arch_hardware_disable(NULL);
		cpu_hyp_uninit(NULL);
}

#ifdef CONFIG_CPU_PM
@@ -1922,16 +1915,16 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
				    void *v)
{
	/*
	 * kvm_arm_hardware_enabled is left with its old value over
	 * kvm_hyp_initialized is left with its old value over
	 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
	 * re-enable hyp.
	 */
	switch (cmd) {
	case CPU_PM_ENTER:
		if (__this_cpu_read(kvm_arm_hardware_enabled))
		if (__this_cpu_read(kvm_hyp_initialized))
			/*
			 * don't update kvm_arm_hardware_enabled here
			 * so that the hardware will be re-enabled
			 * don't update kvm_hyp_initialized here
			 * so that the hyp will be re-enabled
			 * when we resume. See below.
			 */
			cpu_hyp_reset();
@@ -1939,8 +1932,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
		return NOTIFY_OK;
	case CPU_PM_ENTER_FAILED:
	case CPU_PM_EXIT:
		if (__this_cpu_read(kvm_arm_hardware_enabled))
			/* The hardware was enabled before suspend. */
		if (__this_cpu_read(kvm_hyp_initialized))
			/* The hyp was enabled before suspend. */
			cpu_hyp_reinit();

		return NOTIFY_OK;
@@ -2021,7 +2014,7 @@ static int __init init_subsystems(void)
	/*
	 * Enable hardware so that subsystem initialisation can access EL2.
	 */
	on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
	on_each_cpu(cpu_hyp_init, NULL, 1);

	/*
	 * Register CPU lower-power notifier
@@ -2059,7 +2052,7 @@ static int __init init_subsystems(void)
		hyp_cpu_pm_exit();

	if (err || !is_protected_kvm_enabled())
		on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
		on_each_cpu(cpu_hyp_uninit, NULL, 1);

	return err;
}
@@ -2097,7 +2090,7 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
	 * The stub hypercalls are now disabled, so set our local flag to
	 * prevent a later re-init attempt in kvm_arch_hardware_enable().
	 */
	__this_cpu_write(kvm_arm_hardware_enabled, 1);
	__this_cpu_write(kvm_hyp_initialized, 1);
	preempt_enable();

	return ret;
+1 −0
Original line number Diff line number Diff line
@@ -457,6 +457,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
	 */
	val &= ~(TCR_HD | TCR_HA);
	write_sysreg_el1(val, SYS_TCR);
	__kvm_skip_instr(vcpu);
	return true;
}

Loading