Commit deb151a5 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch kvm-arm64/mmu/vmid-cleanups into kvmarm-master/next



* kvm-arm64/mmu/vmid-cleanups:
  : Cleanup the stage-2 configuration by providing a single helper,
  : and tidy up some of the ordering requirements for the VMID
  : allocator.
  KVM: arm64: Upgrade VMID accesses to {READ,WRITE}_ONCE
  KVM: arm64: Unify stage-2 programming behind __load_stage2()
  KVM: arm64: Move kern_hyp_va() usage in __load_guest_stage2() into the callers

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents ca3385a5 cf364e08
Loading
Loading
Loading
Loading
+9 −8
Original line number Diff line number Diff line
@@ -252,6 +252,11 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,

#define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)

/*
 * When this is (directly or indirectly) used on the TLB invalidation
 * path, we rely on a previously issued DSB so that page table updates
 * and VMID reads are correctly ordered.
 */
static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
{
	struct kvm_vmid *vmid = &mmu->vmid;
@@ -259,7 +264,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
	u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;

	baddr = mmu->pgd_phys;
	vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
	vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT;
	return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
}

@@ -267,9 +272,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
 * Must be called from hyp code running at EL2 with an updated VTTBR
 * and interrupts disabled.
 */
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
					  struct kvm_arch *arch)
{
	write_sysreg(vtcr, vtcr_el2);
	write_sysreg(arch->vtcr, vtcr_el2);
	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);

	/*
@@ -280,11 +286,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
}

static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
{
	__load_stage2(mmu, kern_hyp_va(mmu->arch)->vtcr);
}

static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
{
	return container_of(mmu->arch, struct kvm, arch);
+1 −1
Original line number Diff line number Diff line
@@ -573,7 +573,7 @@ static void update_vmid(struct kvm_vmid *vmid)
		kvm_call_hyp(__kvm_flush_vm_context);
	}

	vmid->vmid = kvm_next_vmid;
	WRITE_ONCE(vmid->vmid, kvm_next_vmid);
	kvm_next_vmid++;
	kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;

+1 −1
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
static __always_inline void __load_host_stage2(void)
{
	if (static_branch_likely(&kvm_protected_mode_initialized))
		__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
		__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
	else
		write_sysreg(0, vttbr_el2);
}
+3 −3
Original line number Diff line number Diff line
@@ -112,8 +112,8 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
	mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
	mmu->arch = &host_kvm.arch;
	mmu->pgt = &host_kvm.pgt;
	mmu->vmid.vmid_gen = 0;
	mmu->vmid.vmid = 0;
	WRITE_ONCE(mmu->vmid.vmid_gen, 0);
	WRITE_ONCE(mmu->vmid.vmid, 0);

	return 0;
}
@@ -129,7 +129,7 @@ int __pkvm_prot_finalize(void)
	kvm_flush_dcache_to_poc(params, sizeof(*params));

	write_sysreg(params->hcr_el2, hcr_el2);
	__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
	__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);

	/*
	 * Make sure to have an ISB before the TLB maintenance below but only
+3 −1
Original line number Diff line number Diff line
@@ -170,6 +170,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
	struct kvm_cpu_context *host_ctxt;
	struct kvm_cpu_context *guest_ctxt;
	struct kvm_s2_mmu *mmu;
	bool pmu_switch_needed;
	u64 exit_code;

@@ -213,7 +214,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
	__sysreg32_restore_state(vcpu);
	__sysreg_restore_state_nvhe(guest_ctxt);

	__load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
	mmu = kern_hyp_va(vcpu->arch.hw_mmu);
	__load_stage2(mmu, kern_hyp_va(mmu->arch));
	__activate_traps(vcpu);

	__hyp_vgic_restore_state(vcpu);
Loading