Commit 4efc0ede authored by Marc Zyngier's avatar Marc Zyngier
Browse files

KVM: arm64: Unify stage-2 programming behind __load_stage2()



The protected mode relies on a separate helper to load the
S2 context. Move over to the __load_guest_stage2() helper
instead, and rename it to __load_stage2() to present a unified
interface.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Jade Alglave <jade.alglave@arm.com>
Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210806113109.2475-5-will@kernel.org
parent 923a547d
Loading
Loading
Loading
Loading
+3 −8
Original line number Diff line number Diff line
@@ -267,9 +267,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
 * Must be called from hyp code running at EL2 with an updated VTTBR
 * and interrupts disabled.
 */
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
					  struct kvm_arch *arch)
{
	write_sysreg(vtcr, vtcr_el2);
	write_sysreg(arch->vtcr, vtcr_el2);
	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);

	/*
@@ -280,12 +281,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
}

static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
						struct kvm_arch *arch)
{
	__load_stage2(mmu, arch->vtcr);
}

static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
{
	return container_of(mmu->arch, struct kvm, arch);
+1 −1
Original line number Diff line number Diff line
@@ -29,7 +29,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
static __always_inline void __load_host_stage2(void)
{
	if (static_branch_likely(&kvm_protected_mode_initialized))
		__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
		__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
	else
		write_sysreg(0, vttbr_el2);
}
+1 −1
Original line number Diff line number Diff line
@@ -126,7 +126,7 @@ int __pkvm_prot_finalize(void)
	kvm_flush_dcache_to_poc(params, sizeof(*params));

	write_sysreg(params->hcr_el2, hcr_el2);
	__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
	__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);

	/*
	 * Make sure to have an ISB before the TLB maintenance below but only
+1 −1
Original line number Diff line number Diff line
@@ -215,7 +215,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
	__sysreg_restore_state_nvhe(guest_ctxt);

	mmu = kern_hyp_va(vcpu->arch.hw_mmu);
	__load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
	__load_stage2(mmu, kern_hyp_va(mmu->arch));
	__activate_traps(vcpu);

	__hyp_vgic_restore_state(vcpu);
+2 −2
Original line number Diff line number Diff line
@@ -34,12 +34,12 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
	}

	/*
	 * __load_guest_stage2() includes an ISB only when the AT
	 * __load_stage2() includes an ISB only when the AT
	 * workaround is applied. Take care of the opposite condition,
	 * ensuring that we always have an ISB, but not two ISBs back
	 * to back.
	 */
	__load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
	__load_stage2(mmu, kern_hyp_va(mmu->arch));
	asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
}

Loading