Commit 20ec3ebd authored by Chao Peng's avatar Chao Peng Committed by Paolo Bonzini
Browse files

KVM: Rename mmu_notifier_* to mmu_invalidate_*



The motivation of this renaming is to make these variables and related
helper functions less mmu_notifier bound and can also be used for non
mmu_notifier based page invalidation. mmu_invalidate_* was chosen to
better describe the purpose of 'invalidating' a page that those
variables are used for.

  - mmu_notifier_seq/range_start/range_end are renamed to
    mmu_invalidate_seq/range_start/range_end.

  - mmu_notifier_retry{_hva} helper functions are renamed to
    mmu_invalidate_retry{_hva}.

  - mmu_notifier_count is renamed to mmu_invalidate_in_progress to
    avoid confusion with mn_active_invalidate_count.

  - While here, also update kvm_inc/dec_notifier_count() to
    kvm_mmu_invalidate_begin/end() to match the change for
    mmu_notifier_count.

No functional change intended.

Signed-off-by: default avatarChao Peng <chao.p.peng@linux.intel.com>
Message-Id: <20220816125322.1110439-3-chao.p.peng@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent bdd1c37a
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -993,7 +993,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
		 * THP doesn't start to split while we are adjusting the
		 * refcounts.
		 *
		 * We are sure this doesn't happen, because mmu_notifier_retry
		 * We are sure this doesn't happen, because mmu_invalidate_retry
		 * was successful and we are holding the mmu_lock, so if this
		 * THP is trying to split, it will be blocked in the mmu
		 * notifier before touching any of the pages, specifically
@@ -1188,9 +1188,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
			return ret;
	}

	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	mmu_seq = vcpu->kvm->mmu_invalidate_seq;
	/*
	 * Ensure the read of mmu_notifier_seq happens before we call
	 * Ensure the read of mmu_invalidate_seq happens before we call
	 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
	 * the page we just got a reference to gets unmapped before we have a
	 * chance to grab the mmu_lock, which ensure that if the page gets
@@ -1246,7 +1246,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
	else
		write_lock(&kvm->mmu_lock);
	pgt = vcpu->arch.hw_mmu->pgt;
	if (mmu_notifier_retry(kvm, mmu_seq))
	if (mmu_invalidate_retry(kvm, mmu_seq))
		goto out_unlock;

	/*
+6 −6
Original line number Diff line number Diff line
@@ -615,17 +615,17 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
	 * Used to check for invalidations in progress, of the pfn that is
	 * returned by pfn_to_pfn_prot below.
	 */
	mmu_seq = kvm->mmu_notifier_seq;
	mmu_seq = kvm->mmu_invalidate_seq;
	/*
	 * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
	 * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
	 * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
	 * in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
	 * risk the page we get a reference to getting unmapped before we have a
	 * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
	 * chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
	 *
	 * This smp_rmb() pairs with the effective smp_wmb() of the combination
	 * of the pte_unmap_unlock() after the PTE is zapped, and the
	 * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
	 * mmu_notifier_seq is incremented.
	 * mmu_invalidate_seq is incremented.
	 */
	smp_rmb();

@@ -638,7 +638,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,

	spin_lock(&kvm->mmu_lock);
	/* Check if an invalidation has taken place since we got pfn */
	if (mmu_notifier_retry(kvm, mmu_seq)) {
	if (mmu_invalidate_retry(kvm, mmu_seq)) {
		/*
		 * This can happen when mappings are changed asynchronously, but
		 * also synchronously if a COW is triggered by
+1 −1
Original line number Diff line number Diff line
@@ -666,7 +666,7 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
	VM_WARN(!spin_is_locked(&kvm->mmu_lock),
		"%s called with kvm mmu_lock not held \n", __func__);

	if (mmu_notifier_retry(kvm, mmu_seq))
	if (mmu_invalidate_retry(kvm, mmu_seq))
		return NULL;

	pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
+2 −2
Original line number Diff line number Diff line
@@ -90,7 +90,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
	unsigned long pfn;

	/* used to check for invalidations in progress */
	mmu_seq = kvm->mmu_notifier_seq;
	mmu_seq = kvm->mmu_invalidate_seq;
	smp_rmb();

	/* Get host physical address for gpa */
@@ -151,7 +151,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
	cpte = kvmppc_mmu_hpte_cache_next(vcpu);

	spin_lock(&kvm->mmu_lock);
	if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
	if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) {
		r = -EAGAIN;
		goto out_unlock;
	}
+2 −2
Original line number Diff line number Diff line
@@ -578,7 +578,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
		return -EFAULT;

	/* used to check for invalidations in progress */
	mmu_seq = kvm->mmu_notifier_seq;
	mmu_seq = kvm->mmu_invalidate_seq;
	smp_rmb();

	ret = -EFAULT;
@@ -693,7 +693,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,

	/* Check if we might have been invalidated; let the guest retry if so */
	ret = RESUME_GUEST;
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
	if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) {
		unlock_rmap(rmap);
		goto out_unlock;
	}
Loading