Commit 887c08ac authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity
Browse files

KVM: MMU: introduce hva_to_pfn_atomic function



Introduce hva_to_pfn_atomic(), it's the fast path and can used in atomic
context, the later patch will use it

Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 45888a0c
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -296,6 +296,7 @@ void kvm_release_page_dirty(struct page *page);
void kvm_set_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);

pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
			 struct kvm_memory_slot *slot, gfn_t gfn);
@@ -518,6 +519,12 @@ static inline void kvm_guest_exit(void)
	current->flags &= ~PF_VCPU;
}

static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
					       gfn_t gfn)
{
	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
}

static inline gpa_t gfn_to_gpa(gfn_t gfn)
{
	return (gpa_t)gfn << PAGE_SHIFT;
+19 −11
Original line number Diff line number Diff line
@@ -927,11 +927,6 @@ int memslot_id(struct kvm *kvm, gfn_t gfn)
	return memslot - slots->memslots;
}

static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
}

unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
	struct kvm_memory_slot *slot;
@@ -943,19 +938,25 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_hva);

static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
{
	struct page *page[1];
	int npages;
	pfn_t pfn;

	if (atomic)
		npages = __get_user_pages_fast(addr, 1, 1, page);
	else {
		might_sleep();

		npages = get_user_pages_fast(addr, 1, 1, page);
	}

	if (unlikely(npages != 1)) {
		struct vm_area_struct *vma;

		if (atomic)
			goto return_fault_page;

		down_read(&current->mm->mmap_sem);
		if (is_hwpoison_address(addr)) {
			up_read(&current->mm->mmap_sem);
@@ -968,6 +969,7 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
		if (vma == NULL || addr < vma->vm_start ||
		    !(vma->vm_flags & VM_PFNMAP)) {
			up_read(&current->mm->mmap_sem);
return_fault_page:
			get_page(fault_page);
			return page_to_pfn(fault_page);
		}
@@ -981,6 +983,12 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
	return pfn;
}

pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
{
	return hva_to_pfn(kvm, addr, true);
}
EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);

pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
	unsigned long addr;
@@ -991,7 +999,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
		return page_to_pfn(bad_page);
	}

	return hva_to_pfn(kvm, addr);
	return hva_to_pfn(kvm, addr, false);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn);

@@ -999,7 +1007,7 @@ pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
			 struct kvm_memory_slot *slot, gfn_t gfn)
{
	unsigned long addr = gfn_to_hva_memslot(slot, gfn);
	return hva_to_pfn(kvm, addr);
	return hva_to_pfn(kvm, addr, false);
}

struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)