Commit 43b74355 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: MMU: change __direct_map() arguments to kvm_page_fault



Pass struct kvm_page_fault to __direct_map() instead of
extracting the arguments from the struct.

Suggested-by: default avatarIsaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3a13f4fe
Loading
Loading
Loading
Loading
+13 −19
Original line number Diff line number Diff line
@@ -2982,34 +2982,29 @@ void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
	}
}

static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
			int map_writable, int max_level, kvm_pfn_t pfn,
			bool prefault, bool is_tdp)
static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
	bool write = error_code & PFERR_WRITE_MASK;
	bool exec = error_code & PFERR_FETCH_MASK;
	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
	bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled;
	struct kvm_shadow_walk_iterator it;
	struct kvm_mmu_page *sp;
	int level, req_level, ret;
	gfn_t gfn = gpa >> PAGE_SHIFT;
	gfn_t base_gfn = gfn;
	gfn_t base_gfn = fault->gfn;

	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
	level = kvm_mmu_hugepage_adjust(vcpu, fault->gfn, fault->max_level, &fault->pfn,
					huge_page_disallowed, &req_level);

	trace_kvm_mmu_spte_requested(gpa, level, pfn);
	for_each_shadow_entry(vcpu, gpa, it) {
	trace_kvm_mmu_spte_requested(fault->addr, level, fault->pfn);
	for_each_shadow_entry(vcpu, fault->addr, it) {
		/*
		 * We cannot overwrite existing page tables with an NX
		 * large page, as the leaf could be executable.
		 */
		if (nx_huge_page_workaround_enabled)
			disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
						   &pfn, &level);
			disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level,
						   &fault->pfn, &level);

		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
		base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
		if (it.level == level)
			break;

@@ -3021,14 +3016,14 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
				      it.level - 1, true, ACC_ALL);

		link_shadow_page(vcpu, it.sptep, sp);
		if (is_tdp && huge_page_disallowed &&
		if (fault->is_tdp && huge_page_disallowed &&
		    req_level >= it.level)
			account_huge_nx_page(vcpu->kvm, sp);
	}

	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
			   write, level, base_gfn, pfn, prefault,
			   map_writable);
			   fault->write, level, base_gfn, fault->pfn,
			   fault->prefault, fault->map_writable);
	if (ret == RET_PF_SPURIOUS)
		return ret;

@@ -3996,8 +3991,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
		r = kvm_tdp_mmu_map(vcpu, gpa, error_code, fault->map_writable, fault->max_level,
				    fault->pfn, fault->prefault);
	else
		r = __direct_map(vcpu, gpa, error_code, fault->map_writable, fault->max_level,
				 fault->pfn, fault->prefault, fault->is_tdp);
		r = __direct_map(vcpu, fault);

out_unlock:
	if (is_tdp_mmu_fault)