Commit 8a6e85f7 authored by Yonghua Huang's avatar Yonghua Huang Committed by Greg Kroah-Hartman
Browse files

virt: acrn: obtain pa from VMA with PFNMAP flag



 acrn_vm_ram_map can't pin the user pages with VM_PFNMAP flag
 by calling get_user_pages_fast(), the PA(physical pages)
 may be mapped by kernel driver and set PFNMAP flag.

 This patch fixes logic to setup EPT mapping for PFN mapped RAM region
 by checking the memory attribute before adding EPT mapping for them.

Fixes: 88f537d5 ("virt: acrn: Introduce EPT mapping management")
Signed-off-by: default avatarYonghua Huang <yonghua.huang@intel.com>
Signed-off-by: default avatarFei Li <fei1.li@intel.com>
Link: https://lore.kernel.org/r/20220228022212.419406-1-yonghua.huang@intel.com


Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent fbeac3df
Loading
Loading
Loading
Loading
+24 −0
Original line number Diff line number Diff line
@@ -162,10 +162,34 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
	void *remap_vaddr;
	int ret, pinned;
	u64 user_vm_pa;
	unsigned long pfn;
	struct vm_area_struct *vma;

	if (!vm || !memmap)
		return -EINVAL;

	mmap_read_lock(current->mm);
	vma = vma_lookup(current->mm, memmap->vma_base);
	if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
		if ((memmap->vma_base + memmap->len) > vma->vm_end) {
			mmap_read_unlock(current->mm);
			return -EINVAL;
		}

		ret = follow_pfn(vma, memmap->vma_base, &pfn);
		mmap_read_unlock(current->mm);
		if (ret < 0) {
			dev_dbg(acrn_dev.this_device,
				"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
			return ret;
		}

		return acrn_mm_region_add(vm, memmap->user_vm_pa,
			 PFN_PHYS(pfn), memmap->len,
			 ACRN_MEM_TYPE_WB, memmap->attr);
	}
	mmap_read_unlock(current->mm);

	/* Get the page number of the map region */
	nr_pages = memmap->len >> PAGE_SHIFT;
	pages = vzalloc(nr_pages * sizeof(struct page *));