Commit 07956b62 authored by Alex Williamson's avatar Alex Williamson
Browse files

vfio/type1: Use follow_pte()



follow_pfn() doesn't make sure that we're using the correct page
protections, get the pte with follow_pte() so that we can test
protections and get the pfn from the pte.

Fixes: 5cbf3264 ("vfio/type1: Fix VA->PA translation for PFNMAP VMAs in vaddr_get_pfn()")
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarCornelia Huck <cohuck@redhat.com>
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parent 72d6e487
Loading
Loading
Loading
Loading
+13 −2
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -462,9 +463,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
			    unsigned long vaddr, unsigned long *pfn,
			    bool write_fault)
{
	pte_t *ptep;
	spinlock_t *ptl;
	int ret;

	ret = follow_pfn(vma, vaddr, pfn);
	ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
	if (ret) {
		bool unlocked = false;

@@ -478,9 +481,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
		if (ret)
			return ret;

		ret = follow_pfn(vma, vaddr, pfn);
		ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
		if (ret)
			return ret;
	}

	if (write_fault && !pte_write(*ptep))
		ret = -EFAULT;
	else
		*pfn = pte_pfn(*ptep);

	pte_unmap_unlock(ptep, ptl);
	return ret;
}