Unverified Commit d87fcf95 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15124 v2 fix CVE-2024-53056 for olk-5.10

Merge Pull Request from: @ci-robot 
 
PR sync from: Wupeng Ma <mawupeng1@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/TIJ3J45YJJ3GEWFFKAVCGATZ7PUI5DJO/ 
From: Ma Wupeng <mawupeng1@huawei.com>

fix CVE-2024-53056 for olk-5.10.

changelog since v1:
 - drop commit("mm: refactor arch_calc_vm_flag_bits() and arm64 MTE
   handling") which is not releated to this cve and will lead to
   kabi changes.

Lorenzo Stoakes (3):
  mm: avoid unsafe VMA hook invocation when error arises on mmap hook
  mm: unconditionally close VMAs on error
  mm: resolve faulty mmap_region() error path behaviour


-- 
2.43.0
 
https://gitee.com/src-openeuler/kernel/issues/IB7051 
 
Link:https://gitee.com/openeuler/kernel/pulls/15124

 

Reviewed-by: default avatarLi Nan <linan122@huawei.com>
Signed-off-by: default avatarLi Nan <linan122@huawei.com>
parents df85e99b 75e26b59
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
@@ -49,6 +49,25 @@

void page_writeback_init(void);

/*
 * This is a file-backed mapping, and is about to be memory mapped - invoke its
 * mmap hook and safely handle error conditions. On error, VMA hooks will be
 * mutated.
 *
 * @file: File which backs the mapping.
 * @vma:  VMA which we are mapping.
 *
 * Returns: 0 if success, error otherwise.
 */
int mmap_file(struct file *file, struct vm_area_struct *vma);

/*
 * If the VMA has a close hook then close it, and since closing it might leave
 * it in an inconsistent state which makes the use of any hooks suspect, clear
 * them down by installing dummy empty hooks.
 */
void vma_close(struct vm_area_struct *vma);

vm_fault_t do_swap_page(struct vm_fault *vmf);

void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
+58 −39
Original line number Diff line number Diff line
@@ -185,8 +185,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
	struct vm_area_struct *next = vma->vm_next;

	might_sleep();
	if (vma->vm_ops && vma->vm_ops->close)
		vma->vm_ops->close(vma);
	vma_close(vma);
	if (vma->vm_file)
		fput(vma->vm_file);
	mpol_put(vma_policy(vma));
@@ -1412,7 +1411,7 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode,
	return true;
}

static unsigned long __mmap_region(struct mm_struct *mm,
static unsigned long __mmap_region_ext(struct mm_struct *mm,
				   struct file *file, unsigned long addr,
				   unsigned long len, vm_flags_t vm_flags,
				   unsigned long pgoff, struct list_head *uf);
@@ -1607,7 +1606,7 @@ unsigned long __do_mmap_mm(struct mm_struct *mm, struct file *file,
	if (flags & MAP_CHECKNODE)
		set_vm_checknode(&vm_flags, flags);

	addr = __mmap_region(mm, file, addr, len, vm_flags, pgoff, uf);
	addr = __mmap_region_ext(mm, file, addr, len, vm_flags, pgoff, uf);
	if (!IS_ERR_VALUE(addr) &&
	    ((vm_flags & VM_LOCKED) ||
	     (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
@@ -1853,11 +1852,6 @@ static unsigned long __mmap_region(struct mm_struct *mm, struct file *file,
			if (error)
				goto free_vma;
		}
		if (vm_flags & VM_SHARED) {
			error = mapping_map_writable(file->f_mapping);
			if (error)
				goto allow_write_and_free_vma;
		}

		/* ->mmap() can change vma->vm_file, but must guarantee that
		 * vma_link() below can deny write-access if VM_DENYWRITE is set
@@ -1865,9 +1859,9 @@ static unsigned long __mmap_region(struct mm_struct *mm, struct file *file,
		 * new file must not have been exposed to user-space, yet.
		 */
		vma->vm_file = get_file(file);
		error = call_mmap(file, vma);
		error = mmap_file(file, vma);
		if (error)
			goto unmap_and_free_vma;
			goto unmap_and_free_file_vma;

		/* Can addr have changed??
		 *
@@ -1878,9 +1872,17 @@ static unsigned long __mmap_region(struct mm_struct *mm, struct file *file,
		 */
		WARN_ON_ONCE(addr != vma->vm_start);

		/*
		 * Drivers should not permit writability when previously it was
		 * disallowed.
		 */
		VM_WARN_ON_ONCE(vm_flags != vma->vm_flags &&
				!(vm_flags & VM_MAYWRITE) &&
				(vma->vm_flags & VM_MAYWRITE));

		addr = vma->vm_start;

		/* If vm_flags changed after call_mmap(), we should try merge vma again
		/* If vm_flags changed after mmap_file(), we should try merge vma again
		 * as we may succeed this time.
		 */
		if (unlikely(vm_flags != vma->vm_flags && prev)) {
@@ -1909,21 +1911,14 @@ static unsigned long __mmap_region(struct mm_struct *mm, struct file *file,
		vma_set_anonymous(vma);
	}

	/* Allow architectures to sanity-check the vm_flags */
	if (!arch_validate_flags(vma->vm_flags)) {
		error = -EINVAL;
		if (file)
			goto close_and_free_vma;
		else
			goto free_vma;
	}
#ifdef CONFIG_SPARC64
	/* TODO: Fix SPARC ADI! */
	WARN_ON_ONCE(!arch_validate_flags(vm_flags));
#endif

	vma_link(mm, vma, prev, rb_link, rb_parent);
	/* Once vma denies write, undo our temporary denial count */
	if (file) {
unmap_writable:
		if (vm_flags & VM_SHARED)
			mapping_unmap_writable(file->f_mapping);
		if (vm_flags & VM_DENYWRITE)
			allow_write_access(file);
	}
@@ -1958,18 +1953,12 @@ static unsigned long __mmap_region(struct mm_struct *mm, struct file *file,

	return addr;

close_and_free_vma:
	if (vma->vm_ops && vma->vm_ops->close)
		vma->vm_ops->close(vma);
unmap_and_free_vma:
unmap_and_free_file_vma:
	vma->vm_file = NULL;
	fput(file);

	/* Undo any partial mapping done by a device driver. */
	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
	if (vm_flags & VM_SHARED)
		mapping_unmap_writable(file->f_mapping);
allow_write_and_free_vma:
	if (vm_flags & VM_DENYWRITE)
		allow_write_access(file);
free_vma:
@@ -1980,13 +1969,6 @@ static unsigned long __mmap_region(struct mm_struct *mm, struct file *file,
	return error;
}

unsigned long mmap_region(struct file *file, unsigned long addr,
		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
		struct list_head *uf)
{
	return __mmap_region(current->mm, file, addr, len, vm_flags, pgoff, uf);
}

static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
	/*
@@ -2885,8 +2867,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
		return 0;

	/* Clean everything up if vma_adjust failed. */
	if (new->vm_ops && new->vm_ops->close)
		new->vm_ops->close(new);
	vma_close(new);
	if (new->vm_file)
		fput(new->vm_file);
	unlink_anon_vmas(new);
@@ -3031,6 +3012,44 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
	return __do_munmap(mm, start, len, uf, false);
}

static unsigned long __mmap_region_ext(struct mm_struct *mm, struct file *file,
				       unsigned long addr, unsigned long len,
				       vm_flags_t vm_flags, unsigned long pgoff,
				       struct list_head *uf)
{
	unsigned long ret;
	bool writable_file_mapping = false;

	/* Allow architectures to sanity-check the vm_flags. */
	if (!arch_validate_flags(vm_flags))
		return -EINVAL;

	/* Map writable and ensure this isn't a sealed memfd. */
	if (file && (vm_flags & VM_SHARED)) {
		int error = mapping_map_writable(file->f_mapping);

		if (error)
			return error;
		writable_file_mapping = true;
	}

	ret = __mmap_region(mm, file, addr, len, vm_flags, pgoff, uf);

	/* Clear our write mapping regardless of error. */
	if (writable_file_mapping)
		mapping_unmap_writable(file->f_mapping);

	validate_mm(current->mm);
	return ret;
}

unsigned long mmap_region(struct file *file, unsigned long addr,
		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
		struct list_head *uf)
{
	return __mmap_region_ext(current->mm, file, addr, len, vm_flags, pgoff, uf);
}

static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
{
	int ret;
+3 −4
Original line number Diff line number Diff line
@@ -662,8 +662,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
 */
static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
{
	if (vma->vm_ops && vma->vm_ops->close)
		vma->vm_ops->close(vma);
	vma_close(vma);
	if (vma->vm_file)
		fput(vma->vm_file);
	put_nommu_region(vma->vm_region);
@@ -955,7 +954,7 @@ static int do_mmap_shared_file(struct vm_area_struct *vma)
{
	int ret;

	ret = call_mmap(vma->vm_file, vma);
	ret = mmap_file(vma->vm_file, vma);
	if (ret == 0) {
		vma->vm_region->vm_top = vma->vm_region->vm_end;
		return 0;
@@ -986,7 +985,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
	 * - VM_MAYSHARE will be set if it may attempt to share
	 */
	if (capabilities & NOMMU_MAP_DIRECT) {
		ret = call_mmap(vma->vm_file, vma);
		ret = mmap_file(vma->vm_file, vma);
		if (ret == 0) {
			/* shouldn't return success if we're not sharing */
			BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
+33 −0
Original line number Diff line number Diff line
@@ -1079,3 +1079,36 @@ int __weak memcmp_pages(struct page *page1, struct page *page2)
	kunmap_atomic(addr1);
	return ret;
}

int mmap_file(struct file *file, struct vm_area_struct *vma)
{
	static const struct vm_operations_struct dummy_vm_ops = {};
	int err = call_mmap(file, vma);

	if (likely(!err))
		return 0;

	/*
	 * OK, we tried to call the file hook for mmap(), but an error
	 * arose. The mapping is in an inconsistent state and we most not invoke
	 * any further hooks on it.
	 */
	vma->vm_ops = &dummy_vm_ops;

	return err;
}

void vma_close(struct vm_area_struct *vma)
{
	static const struct vm_operations_struct dummy_vm_ops = {};

	if (vma->vm_ops && vma->vm_ops->close) {
		vma->vm_ops->close(vma);

		/*
		 * The mapping is in an inconsistent state, and no further hooks
		 * may be invoked upon it.
		 */
		vma->vm_ops = &dummy_vm_ops;
	}
}