Commit fa8596f2 authored by Guo Mengqi's avatar Guo Mengqi Committed by Zheng Zengkai
Browse files

mm: sharepool: sp_alloc_mmap_populate bugfix

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5J0Z9


CVE: NA

--------------------------------

when there is only one mm in a group allocating memory,
if process is killed, the error path in sp_alloc_mmap_populate tries to
access the next spg_node->master->mm in group's proc list. However, in
this case the next spg_node in proc list is head and spg_node->master
would be NULL, which leads to log below:

[file:test_sp_alloc.c, func:alloc_large_repeat, line:437] start to alloc...
[  264.699086][ T1772] share pool: gonna sp_alloc_unmap...
[  264.699939][ T1772] share pool: list_next_entry(spg_node, proc_node) is ffff0004c4907028
[  264.700380][ T1772] share pool: master is 0
[  264.701240][ T1772] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000018
...
[  264.704764][ T1772] Internal error: Oops: 96000006 [#1] SMP
[  264.705166][ T1772] Modules linked in: sharepool_dev(OE)
[  264.705823][ T1772] CPU: 3 PID: 1772 Comm: test_sp_alloc Tainted: G           OE     5.10.0+ #23
...
[  264.712513][ T1772] Call trace:
[  264.713057][ T1772]  sp_alloc+0x528/0xa88
[  264.713740][ T1772]  dev_ioctl+0x6ec/0x1d00 [sharepool_dev]
[  264.714035][ T1772]  __arm64_sys_ioctl+0xb0/0xe8
...
[  264.716891][ T1772] ---[ end trace 1587677032f666c6 ]---
[  264.717457][ T1772] Kernel panic - not syncing: Oops: Fatal exception
[  264.717961][ T1772] SMP: stopping secondary CPUs
[  264.718787][ T1772] Kernel Offset: disabled
[  264.719718][ T1772] Memory Limit: none
[  264.720333][ T1772] ---[ end Kernel panic - not syncing: Oops: Fatal exception ]---

Add a list_is_last check to avoid this null pointer access.

Signed-off-by: default avatarGuo Mengqi <guomengqi3@huawei.com>
Reviewed-by: default avatarWeilong Chen <chenweilong@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent f30182b3
Loading
Loading
Loading
Loading
+24 −15
Original line number Diff line number Diff line
@@ -2631,6 +2631,7 @@ static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa,
		if (ret)
			sp_add_work_compact();
	}

	return ret;
}

@@ -2651,14 +2652,8 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
	int ret;

	ret = sp_alloc_mmap(mm, spa, spg_node, ac);
	if (ret < 0) {
		if (ac->need_fallocate) {
			/* e.g. second sp_mmap fail */
			sp_fallocate(spa);
			ac->need_fallocate = false;
		}
	if (ret < 0)
		return ret;
	}

	if (!ac->have_mbind) {
		ret = sp_mbind(mm, spa->va_start, spa->real_size, spa->node_id);
@@ -2673,18 +2668,13 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
	ret = sp_alloc_populate(mm, spa, ac);
	if (ret) {
err:
		sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);

		if (unlikely(fatal_signal_pending(current)))
			pr_warn_ratelimited("allocation failed, current thread is killed\n");
		else
			pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n",
					ret);
		sp_fallocate(spa);  /* need this, otherwise memleak */
		sp_alloc_fallback(spa, ac);
	} else
		ac->need_fallocate = true;

	return ret;
}

@@ -2693,7 +2683,7 @@ static int sp_alloc_mmap_populate(struct sp_area *spa,
{
	int ret = -EINVAL;
	int mmap_ret = 0;
	struct mm_struct *mm;
	struct mm_struct *mm, *end_mm = NULL;
	struct sp_group_node *spg_node;

	/* create mapping for each process in the group */
@@ -2702,7 +2692,7 @@ static int sp_alloc_mmap_populate(struct sp_area *spa,
		mmap_ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac);
		if (mmap_ret) {
			if (ac->state != ALLOC_COREDUMP)
				return mmap_ret;
				goto unmap;
			ac->state = ALLOC_NORMAL;
			continue;
		}
@@ -2710,6 +2700,25 @@ static int sp_alloc_mmap_populate(struct sp_area *spa,
	}

	return ret;

unmap:
	/* use the next mm in proc list as end mark */
	if (!list_is_last(&spg_node->proc_node, &spa->spg->procs))
		end_mm = list_next_entry(spg_node, proc_node)->master->mm;
	sp_alloc_unmap(end_mm, spa, spg_node);

	/* only fallocate spa if physical memory had been allocated */
	if (ac->need_fallocate) {
		sp_fallocate(spa);
		ac->need_fallocate = false;
	}

	/* if hugepage allocation fails, this will transfer to normal page
	 * and try again. (only if SP_HUGEPAGE_ONLY is not flagged
	 */
	sp_alloc_fallback(spa, ac);

	return mmap_ret;
}

/* spa maybe an error pointer, so introduce variable spg */