Unverified Commit bcc22f76 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!8212 fix CVE-2024-35877

Merge Pull Request from: @ci-robot 
 
PR sync from: Wupeng Ma <mawupeng1@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/DZHPHLFQPKLDAFECMRJU7VXPRNDRXBS5/ 
From: Ma Wupeng <mawupeng1@huawei.com>

backport "x86/mm/pat: fix VM_PAT handling in COW mappings" to fix
CVE-2024-35877

David Hildenbrand (1):
  x86/mm/pat: fix VM_PAT handling in COW mappings


-- 
2.25.1
 
https://gitee.com/src-openeuler/kernel/issues/I9QG8B 
 
Link:https://gitee.com/openeuler/kernel/pulls/8212

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents df5858b1 4dbd7ad1
Loading
Loading
Loading
Loading
+36 −14
Original line number Diff line number Diff line
@@ -56,6 +56,7 @@

#include "memtype.h"
#include "../mm_internal.h"
#include "../../../mm/internal.h"	/* is_cow_mapping() */

#undef pr_fmt
#define pr_fmt(fmt) "" fmt
@@ -987,6 +988,38 @@ static void free_pfn_range(u64 paddr, unsigned long size)
		memtype_free(paddr, paddr + size);
}

static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
		pgprot_t *pgprot)
{
	unsigned long prot;

	VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));

	/*
	 * We need the starting PFN and cachemode used for track_pfn_remap()
	 * that covered the whole VMA. For most mappings, we can obtain that
	 * information from the page tables. For COW mappings, we might now
	 * suddenly have anon folios mapped and follow_phys() will fail.
	 *
	 * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
	 * detect the PFN. If we need the cachemode as well, we're out of luck
	 * for now and have to fail fork().
	 */
	if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) {
		if (pgprot)
			*pgprot = __pgprot(prot);
		return 0;
	}
	if (is_cow_mapping(vma->vm_flags)) {
		if (pgprot)
			return -EINVAL;
		*paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
		return 0;
	}
	WARN_ON_ONCE(1);
	return -EINVAL;
}

/*
 * track_pfn_copy is called when vma that is covering the pfnmap gets
 * copied through copy_page_range().
@@ -997,20 +1030,13 @@ static void free_pfn_range(u64 paddr, unsigned long size)
int track_pfn_copy(struct vm_area_struct *vma)
{
	resource_size_t paddr;
	unsigned long prot;
	unsigned long vma_size = vma->vm_end - vma->vm_start;
	pgprot_t pgprot;

	if (vma->vm_flags & VM_PAT) {
		/*
		 * reserve the whole chunk covered by vma. We need the
		 * starting address and protection from pte.
		 */
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
			WARN_ON_ONCE(1);
		if (get_pat_info(vma, &paddr, &pgprot))
			return -EINVAL;
		}
		pgprot = __pgprot(prot);
		/* reserve the whole chunk covered by vma. */
		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
	}

@@ -1085,7 +1111,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
		 unsigned long size)
{
	resource_size_t paddr;
	unsigned long prot;

	if (vma && !(vma->vm_flags & VM_PAT))
		return;
@@ -1093,11 +1118,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
	/* free the chunk starting from pfn or the whole chunk */
	paddr = (resource_size_t)pfn << PAGE_SHIFT;
	if (!paddr && !size) {
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
			WARN_ON_ONCE(1);
		if (get_pat_info(vma, &paddr, NULL))
			return;
		}

		size = vma->vm_end - vma->vm_start;
	}
	free_pfn_range(paddr, size);
+4 −0
Original line number Diff line number Diff line
@@ -5074,6 +5074,10 @@ int follow_phys(struct vm_area_struct *vma,
		goto out;
	pte = *ptep;

	/* Never return PFNs of anon folios in COW mappings. */
	if (vm_normal_page(vma, address, pte))
		goto unlock;

	if ((flags & FOLL_WRITE) && !pte_write(pte))
		goto unlock;