Commit 61b05a9f authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Paolo Bonzini
Browse files

KVM: X86: Don't unload MMU in kvm_vcpu_flush_tlb_guest()



kvm_mmu_unload() destroys all the PGD caches.  Use the lighter
kvm_mmu_sync_roots() and kvm_mmu_sync_prev_roots() instead.

Signed-off-by: default avatarLai Jiangshan <laijs@linux.alibaba.com>
Message-Id: <20211019110154.4091-5-jiangshanlai@gmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 264d3dc1
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -79,6 +79,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);

static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
+16 −0
Original line number Diff line number Diff line
@@ -3647,6 +3647,9 @@ static bool is_unsync_root(hpa_t root)
{
	struct kvm_mmu_page *sp;

	if (!VALID_PAGE(root))
		return false;

	/*
	 * The read barrier orders the CPU's read of SPTE.W during the page table
	 * walk before the reads of sp->unsync/sp->unsync_children here.
@@ -3714,6 +3717,19 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
	write_unlock(&vcpu->kvm->mmu_lock);
}

void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
{
	unsigned long roots_to_free = 0;
	int i;

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);

	/* sync prev_roots by simply freeing them */
	kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
}

static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
				  u32 access, struct x86_exception *exception)
{
+5 −6
Original line number Diff line number Diff line
@@ -3248,12 +3248,11 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
		/*
		 * A TLB flush on behalf of the guest is equivalent to
		 * INVPCID(all), toggling CR4.PGE, etc., which requires
		 * a forced sync of the shadow page tables.  Unload the
		 * entire MMU here and the subsequent load will sync the
		 * shadow page tables, and also flush the TLB.
		 * a forced sync of the shadow page tables.  Ensure all the
		 * roots are synced and the guest TLB in hardware is clean.
		 */
		kvm_mmu_unload(vcpu);
		return;
		kvm_mmu_sync_roots(vcpu);
		kvm_mmu_sync_prev_roots(vcpu);
	}

	static_call(kvm_x86_tlb_flush_guest)(vcpu);