Commit 9c03b182 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: MMU: change FNAME(fetch)() arguments to kvm_page_fault



Pass struct kvm_page_fault to FNAME(fetch)() instead of
extracting the arguments from the struct.

Suggested-by: default avatarIsaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 43b74355
Loading
Loading
Loading
Loading
+22 −30
Original line number Diff line number Diff line
@@ -655,21 +655,18 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 * If the guest tries to write a write-protected page, we need to
 * emulate this operation, return 1 to indicate this case.
 */
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
			 struct guest_walker *gw, u32 error_code,
			 int max_level, kvm_pfn_t pfn, bool map_writable,
			 bool prefault)
static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
			 struct guest_walker *gw)
{
	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
	bool write_fault = error_code & PFERR_WRITE_MASK;
	bool exec = error_code & PFERR_FETCH_MASK;
	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
	bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled;
	struct kvm_mmu_page *sp = NULL;
	struct kvm_shadow_walk_iterator it;
	unsigned int direct_access, access;
	int top_level, level, req_level, ret;
	gfn_t base_gfn = gw->gfn;
	gfn_t base_gfn = fault->gfn;

	WARN_ON_ONCE(gw->gfn != base_gfn);
	direct_access = gw->pte_access;

	top_level = vcpu->arch.mmu->root_level;
@@ -687,7 +684,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
		goto out_gpte_changed;

	for (shadow_walk_init(&it, vcpu, addr);
	for (shadow_walk_init(&it, vcpu, fault->addr);
	     shadow_walk_okay(&it) && it.level > gw->level;
	     shadow_walk_next(&it)) {
		gfn_t table_gfn;
@@ -699,7 +696,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
		if (!is_shadow_present_pte(*it.sptep)) {
			table_gfn = gw->table_gfn[it.level - 2];
			access = gw->pt_access[it.level - 2];
			sp = kvm_mmu_get_page(vcpu, table_gfn, addr,
			sp = kvm_mmu_get_page(vcpu, table_gfn, fault->addr,
					      it.level-1, false, access);
			/*
			 * We must synchronize the pagetable before linking it
@@ -733,10 +730,10 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
			link_shadow_page(vcpu, it.sptep, sp);
	}

	level = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn,
	level = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, fault->max_level, &fault->pfn,
					huge_page_disallowed, &req_level);

	trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
	trace_kvm_mmu_spte_requested(fault->addr, gw->level, fault->pfn);

	for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
		clear_sp_write_flooding_count(it.sptep);
@@ -746,10 +743,10 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
		 * large page, as the leaf could be executable.
		 */
		if (nx_huge_page_workaround_enabled)
			disallowed_hugepage_adjust(*it.sptep, gw->gfn, it.level,
						   &pfn, &level);
			disallowed_hugepage_adjust(*it.sptep, fault->gfn, it.level,
						   &fault->pfn, &level);

		base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
		base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
		if (it.level == level)
			break;

@@ -758,7 +755,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
		drop_large_spte(vcpu, it.sptep);

		if (!is_shadow_present_pte(*it.sptep)) {
			sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
			sp = kvm_mmu_get_page(vcpu, base_gfn, fault->addr,
					      it.level - 1, true, direct_access);
			link_shadow_page(vcpu, it.sptep, sp);
			if (huge_page_disallowed && req_level >= it.level)
@@ -766,8 +763,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
		}
	}

	ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
			   it.level, base_gfn, pfn, prefault, map_writable);
	ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, fault->write,
			   it.level, base_gfn, fault->pfn, fault->prefault,
			   fault->map_writable);
	if (ret == RET_PF_SPURIOUS)
		return ret;

@@ -835,26 +833,21 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
 */
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
	gpa_t addr = fault->addr;
	u32 error_code = fault->error_code;
	struct guest_walker walker;
	int r;
	unsigned long mmu_seq;
	bool is_self_change_mapping;

	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
	pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code);
	WARN_ON_ONCE(fault->is_tdp);

	/*
	 * Look up the guest pte for the faulting address.
	 * If PFEC.RSVD is set, this is a shadow page fault.
	 * The bit needs to be cleared before walking guest page tables.
	 */
	error_code &= ~PFERR_RSVD_MASK;

	/*
	 * Look up the guest pte for the faulting address.
	 */
	r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
	r = FNAME(walk_addr)(&walker, vcpu, fault->addr,
			     fault->error_code & ~PFERR_RSVD_MASK);

	/*
	 * The page is not mapped by the guest.  Let the guest handle it.
@@ -869,7 +862,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault

	fault->gfn = walker.gfn;
	if (page_fault_handle_page_track(vcpu, fault)) {
		shadow_page_table_clear_flood(vcpu, addr);
		shadow_page_table_clear_flood(vcpu, fault->addr);
		return RET_PF_EMULATE;
	}

@@ -924,8 +917,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
	r = make_mmu_pages_available(vcpu);
	if (r)
		goto out_unlock;
	r = FNAME(fetch)(vcpu, addr, &walker, error_code, fault->max_level, fault->pfn,
			 fault->map_writable, fault->prefault);
	r = FNAME(fetch)(vcpu, fault, &walker);
	kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);

out_unlock: