Loading arch/x86/kvm/mmu/mmu.c +4 −4 Original line number Diff line number Diff line Loading @@ -2683,7 +2683,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, int was_rmapped = 0; int ret = RET_PF_FIXED; bool flush = false; int make_spte_ret; bool wrprot; u64 spte; pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, Loading Loading @@ -2715,7 +2715,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, was_rmapped = 1; } make_spte_ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, wrprot = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, true, host_writable, sp_ad_disabled(sp), &spte); if (*sptep == spte) { Loading @@ -2725,7 +2725,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, flush |= mmu_spte_update(sptep, spte); } if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { if (wrprot) { if (write_fault) ret = RET_PF_EMULATE; } Loading arch/x86/kvm/mmu/mmu_internal.h +0 −5 Original line number Diff line number Diff line Loading @@ -150,11 +150,6 @@ enum { RET_PF_SPURIOUS, }; /* Bits which may be returned by set_spte() */ #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_SPURIOUS BIT(2) int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t pfn, int max_level); Loading arch/x86/kvm/mmu/spte.c +4 −4 Original line number Diff line number Diff line Loading @@ -89,13 +89,13 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) E820_TYPE_RAM); } int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, bool can_unsync, bool host_writable, bool ad_disabled, u64 *new_spte) { u64 spte = SPTE_MMU_PRESENT_MASK; int ret = 0; bool wrprot = false; if (ad_disabled) spte |= SPTE_TDP_AD_DISABLED_MASK; Loading Loading @@ -162,7 +162,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync, speculative)) { pgprintk("%s: found shadow page for %llx, marking ro\n", __func__, gfn); ret |= SET_SPTE_WRITE_PROTECTED_PT; wrprot = true; pte_access &= ~ACC_WRITE_MASK; spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); } Loading @@ -183,7 +183,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, kvm_vcpu_mark_page_dirty(vcpu, gfn); *new_spte = spte; return ret; return wrprot; } u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) Loading arch/x86/kvm/mmu/spte.h +1 −6 Original line number Diff line number Diff line Loading @@ -334,12 +334,7 @@ static inline u64 get_mmio_spte_generation(u64 spte) return gen; } /* Bits which may be returned by set_spte() */ #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_SPURIOUS BIT(2) int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, bool can_unsync, bool host_writable, bool ad_disabled, u64 *new_spte); Loading arch/x86/kvm/mmu/tdp_mmu.c +3 −3 Original line number Diff line number Diff line Loading @@ -898,12 +898,12 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, { u64 new_spte; int ret = RET_PF_FIXED; int make_spte_ret = 0; bool wrprot = false; if (unlikely(!fault->slot)) new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); else make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, wrprot = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, fault->pfn, iter->old_spte, fault->prefault, true, fault->map_writable, !shadow_accessed_mask, &new_spte); Loading @@ -918,7 +918,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, * protected, emulation is needed. If the emulation was skipped, * the vCPU would have the same fault again. */ if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { if (wrprot) { if (fault->write) ret = RET_PF_EMULATE; } Loading Loading
arch/x86/kvm/mmu/mmu.c +4 −4 Original line number Diff line number Diff line Loading @@ -2683,7 +2683,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, int was_rmapped = 0; int ret = RET_PF_FIXED; bool flush = false; int make_spte_ret; bool wrprot; u64 spte; pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, Loading Loading @@ -2715,7 +2715,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, was_rmapped = 1; } make_spte_ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, wrprot = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, true, host_writable, sp_ad_disabled(sp), &spte); if (*sptep == spte) { Loading @@ -2725,7 +2725,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, flush |= mmu_spte_update(sptep, spte); } if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { if (wrprot) { if (write_fault) ret = RET_PF_EMULATE; } Loading
arch/x86/kvm/mmu/mmu_internal.h +0 −5 Original line number Diff line number Diff line Loading @@ -150,11 +150,6 @@ enum { RET_PF_SPURIOUS, }; /* Bits which may be returned by set_spte() */ #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_SPURIOUS BIT(2) int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, kvm_pfn_t pfn, int max_level); Loading
arch/x86/kvm/mmu/spte.c +4 −4 Original line number Diff line number Diff line Loading @@ -89,13 +89,13 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) E820_TYPE_RAM); } int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, bool can_unsync, bool host_writable, bool ad_disabled, u64 *new_spte) { u64 spte = SPTE_MMU_PRESENT_MASK; int ret = 0; bool wrprot = false; if (ad_disabled) spte |= SPTE_TDP_AD_DISABLED_MASK; Loading Loading @@ -162,7 +162,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync, speculative)) { pgprintk("%s: found shadow page for %llx, marking ro\n", __func__, gfn); ret |= SET_SPTE_WRITE_PROTECTED_PT; wrprot = true; pte_access &= ~ACC_WRITE_MASK; spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); } Loading @@ -183,7 +183,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, kvm_vcpu_mark_page_dirty(vcpu, gfn); *new_spte = spte; return ret; return wrprot; } u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) Loading
arch/x86/kvm/mmu/spte.h +1 −6 Original line number Diff line number Diff line Loading @@ -334,12 +334,7 @@ static inline u64 get_mmio_spte_generation(u64 spte) return gen; } /* Bits which may be returned by set_spte() */ #define SET_SPTE_WRITE_PROTECTED_PT BIT(0) #define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1) #define SET_SPTE_SPURIOUS BIT(2) int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, bool can_unsync, bool host_writable, bool ad_disabled, u64 *new_spte); Loading
arch/x86/kvm/mmu/tdp_mmu.c +3 −3 Original line number Diff line number Diff line Loading @@ -898,12 +898,12 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, { u64 new_spte; int ret = RET_PF_FIXED; int make_spte_ret = 0; bool wrprot = false; if (unlikely(!fault->slot)) new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); else make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, wrprot = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, fault->pfn, iter->old_spte, fault->prefault, true, fault->map_writable, !shadow_accessed_mask, &new_spte); Loading @@ -918,7 +918,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, * protected, emulation is needed. If the emulation was skipped, * the vCPU would have the same fault again. */ if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { if (wrprot) { if (fault->write) ret = RET_PF_EMULATE; } Loading