Commit 706c9c55 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Don't treat fully writable SPTEs as volatile (modulo A/D)



Don't treat SPTEs that are truly writable, i.e. writable in hardware, as
being volatile (unless they're volatile for other reasons, e.g. A/D bits).
KVM _sets_ the WRITABLE bit out of mmu_lock, but never _clears_ the bit
out of mmu_lock, so if the WRITABLE bit is set, it cannot magically get
cleared just because the SPTE is MMU-writable.

Rename the wrapper of MMU-writable to be more literal, the previous name
of spte_can_locklessly_be_made_writable() is wrong and misleading.

Fixes: c7ba5b48 ("KVM: MMU: fast path of handling guest page fault")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220423034752.1161007-2-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 44187235
Loading
Loading
Loading
Loading
+9 −8
Original line number Diff line number Diff line
@@ -484,13 +484,15 @@ static bool spte_has_volatile_bits(u64 spte)
	 * also, it can help us to get a stable is_writable_pte()
	 * to ensure tlb flush is not missed.
	 */
	if (spte_can_locklessly_be_made_writable(spte) ||
	    is_access_track_spte(spte))
	if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
		return true;

	if (is_access_track_spte(spte))
		return true;

	if (spte_ad_enabled(spte)) {
		if ((spte & shadow_accessed_mask) == 0 ||
	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
		if (!(spte & shadow_accessed_mask) ||
		    (is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
			return true;
	}

@@ -557,7 +559,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
	 * we always atomically update it, see the comments in
	 * spte_has_volatile_bits().
	 */
	if (spte_can_locklessly_be_made_writable(old_spte) &&
	if (is_mmu_writable_spte(old_spte) &&
	      !is_writable_pte(new_spte))
		flush = true;

@@ -1187,7 +1189,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
	u64 spte = *sptep;

	if (!is_writable_pte(spte) &&
	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
	    !(pt_protect && is_mmu_writable_spte(spte)))
		return false;

	rmap_printk("spte %p %llx\n", sptep, *sptep);
@@ -3196,8 +3198,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
		 * be removed in the fast path only if the SPTE was
		 * write-protected for dirty-logging or access tracking.
		 */
		if (fault->write &&
		    spte_can_locklessly_be_made_writable(spte)) {
		if (fault->write && is_mmu_writable_spte(spte)) {
			new_spte |= PT_WRITABLE_MASK;

			/*
+1 −1
Original line number Diff line number Diff line
@@ -390,7 +390,7 @@ static inline void check_spte_writable_invariants(u64 spte)
			  "kvm: Writable SPTE is not MMU-writable: %llx", spte);
}

static inline bool spte_can_locklessly_be_made_writable(u64 spte)
static inline bool is_mmu_writable_spte(u64 spte)
{
	return spte & shadow_mmu_writable_mask;
}