Loading arch/x86/kvm/mmu/mmu.c +1 −2 Original line number Diff line number Diff line Loading @@ -3988,8 +3988,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault goto out_unlock; if (is_tdp_mmu_fault) r = kvm_tdp_mmu_map(vcpu, gpa, error_code, fault->map_writable, fault->max_level, fault->pfn, fault->prefault); r = kvm_tdp_mmu_map(vcpu, fault); else r = __direct_map(vcpu, fault); Loading arch/x86/kvm/mmu/tdp_mmu.c +9 −14 Original line number Diff line number Diff line Loading @@ -985,35 +985,30 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing * page tables and SPTEs to translate the faulting guest physical address. */ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, int map_writable, int max_level, kvm_pfn_t pfn, bool prefault) int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); bool write = error_code & PFERR_WRITE_MASK; bool exec = error_code & PFERR_FETCH_MASK; bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled; struct kvm_mmu *mmu = vcpu->arch.mmu; struct tdp_iter iter; struct kvm_mmu_page *sp; u64 *child_pt; u64 new_spte; int ret; gfn_t gfn = gpa >> PAGE_SHIFT; int level; int req_level; level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn, level = kvm_mmu_hugepage_adjust(vcpu, fault->gfn, fault->max_level, &fault->pfn, huge_page_disallowed, &req_level); trace_kvm_mmu_spte_requested(gpa, level, pfn); trace_kvm_mmu_spte_requested(fault->addr, level, fault->pfn); rcu_read_lock(); tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { if (nx_huge_page_workaround_enabled) disallowed_hugepage_adjust(iter.old_spte, gfn, iter.level, &pfn, &level); disallowed_hugepage_adjust(iter.old_spte, fault->gfn, iter.level, &fault->pfn, &level); if (iter.level == level) break; Loading Loading @@ -1069,8 +1064,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, return RET_PF_RETRY; } ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter, pfn, prefault); ret = tdp_mmu_map_handle_target_level(vcpu, fault->write, fault->map_writable, &iter, fault->pfn, fault->prefault); rcu_read_unlock(); return ret; Loading arch/x86/kvm/mmu/tdp_mmu.h +1 −3 Original line number Diff line number Diff line Loading @@ -48,9 +48,7 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm); void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, int map_writable, int max_level, kvm_pfn_t pfn, bool prefault); int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush); Loading Loading
arch/x86/kvm/mmu/mmu.c +1 −2 Original line number Diff line number Diff line Loading @@ -3988,8 +3988,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault goto out_unlock; if (is_tdp_mmu_fault) r = kvm_tdp_mmu_map(vcpu, gpa, error_code, fault->map_writable, fault->max_level, fault->pfn, fault->prefault); r = kvm_tdp_mmu_map(vcpu, fault); else r = __direct_map(vcpu, fault); Loading
arch/x86/kvm/mmu/tdp_mmu.c +9 −14 Original line number Diff line number Diff line Loading @@ -985,35 +985,30 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write, * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing * page tables and SPTEs to translate the faulting guest physical address. */ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, int map_writable, int max_level, kvm_pfn_t pfn, bool prefault) int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); bool write = error_code & PFERR_WRITE_MASK; bool exec = error_code & PFERR_FETCH_MASK; bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled; struct kvm_mmu *mmu = vcpu->arch.mmu; struct tdp_iter iter; struct kvm_mmu_page *sp; u64 *child_pt; u64 new_spte; int ret; gfn_t gfn = gpa >> PAGE_SHIFT; int level; int req_level; level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn, level = kvm_mmu_hugepage_adjust(vcpu, fault->gfn, fault->max_level, &fault->pfn, huge_page_disallowed, &req_level); trace_kvm_mmu_spte_requested(gpa, level, pfn); trace_kvm_mmu_spte_requested(fault->addr, level, fault->pfn); rcu_read_lock(); tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) { if (nx_huge_page_workaround_enabled) disallowed_hugepage_adjust(iter.old_spte, gfn, iter.level, &pfn, &level); disallowed_hugepage_adjust(iter.old_spte, fault->gfn, iter.level, &fault->pfn, &level); if (iter.level == level) break; Loading Loading @@ -1069,8 +1064,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, return RET_PF_RETRY; } ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter, pfn, prefault); ret = tdp_mmu_map_handle_target_level(vcpu, fault->write, fault->map_writable, &iter, fault->pfn, fault->prefault); rcu_read_unlock(); return ret; Loading
arch/x86/kvm/mmu/tdp_mmu.h +1 −3 Original line number Diff line number Diff line Loading @@ -48,9 +48,7 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm); void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, int map_writable, int max_level, kvm_pfn_t pfn, bool prefault); int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush); Loading