Commit 41e07665 authored by Vipin Sharma's avatar Vipin Sharma Committed by Sean Christopherson
Browse files

KVM: x86/mmu: Add a helper function to check if an SPTE needs atomic write



Move conditions in kvm_tdp_mmu_write_spte() to check if an SPTE should
be written atomically or not to a separate function.

This new function, kvm_tdp_mmu_spte_need_atomic_write(),  will be used
in future commits to optimize clearing bits in SPTEs.

Signed-off-by: default avatarVipin Sharma <vipinsh@google.com>
Reviewed-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarBen Gardon <bgardon@google.com>
Link: https://lore.kernel.org/r/20230321220021.2119033-2-seanjc@google.com


Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 50f13998
Loading
Loading
Loading
Loading
+20 −14
Original line number Diff line number Diff line
@@ -29,14 +29,11 @@ static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
	WRITE_ONCE(*rcu_dereference(sptep), new_spte);
}

static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
					 u64 new_spte, int level)
{
/*
	 * Atomically write the SPTE if it is a shadow-present, leaf SPTE with
	 * volatile bits, i.e. has bits that can be set outside of mmu_lock.
	 * The Writable bit can be set by KVM's fast page fault handler, and
	 * Accessed and Dirty bits can be set by the CPU.
 * SPTEs must be modified atomically if they are shadow-present, leaf
 * SPTEs, and have volatile bits, i.e. has bits that can be set outside
 * of mmu_lock.  The Writable bit can be set by KVM's fast page fault
 * handler, and Accessed and Dirty bits can be set by the CPU.
 *
 * Note, non-leaf SPTEs do have Accessed bits and those bits are
 * technically volatile, but KVM doesn't consume the Accessed bit of
@@ -44,8 +41,17 @@ static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
 * logic needs to be reassessed if KVM were to use non-leaf Accessed
 * bits, e.g. to skip stepping down into child SPTEs when aging SPTEs.
 */
	if (is_shadow_present_pte(old_spte) && is_last_spte(old_spte, level) &&
	    spte_has_volatile_bits(old_spte))
static inline bool kvm_tdp_mmu_spte_need_atomic_write(u64 old_spte, int level)
{
	return is_shadow_present_pte(old_spte) &&
	       is_last_spte(old_spte, level) &&
	       spte_has_volatile_bits(old_spte);
}

static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
					 u64 new_spte, int level)
{
	if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level))
		return kvm_tdp_mmu_write_spte_atomic(sptep, new_spte);

	__kvm_tdp_mmu_write_spte(sptep, new_spte);