Commit 74bfdf15 authored by Ma Wupeng's avatar Ma Wupeng Committed by Wang Wensheng
Browse files

mm/hugetlb: Hugetlb use non-mirrored memory if memory reliable is enabled

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S


CVE: NA

--------------------------------

Previous memory allocation in memblock for hugetlb may use mirrored or
non-mirrored memory depends on the system's memory status. However this is
not suitable if hugetlb user want to alloc memory from non-mirrored memory
if memory reliable is enabled.

In order to solve this problem, hugetlb use MEMBLOCK_NOMIRROR flag to alloc
memory from non-mirrored region without fallback to mirrored region.

Signed-off-by: default avatarMa Wupeng <mawupeng1@huawei.com>
Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
parent 8525dfb2
Loading
Loading
Loading
Loading
+16 −2
Original line number Diff line number Diff line
@@ -2697,6 +2697,20 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
	return ERR_PTR(-ENOSPC);
}

static void *__init __alloc_bootmem_huge_page_inner(phys_addr_t size,
						    phys_addr_t align,
						    phys_addr_t min_addr,
						    phys_addr_t max_addr,
						    int nid)
{
	if (!mem_reliable_is_enabled())
		return memblock_alloc_try_nid_raw(size, align, max_addr,
						  max_addr, nid);

	return memblock_alloc_try_nid_raw_flags(size, align, max_addr, max_addr,
						nid, MEMBLOCK_NOMIRROR);
}

int alloc_bootmem_huge_page(struct hstate *h, int nid)
	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
int __alloc_bootmem_huge_page(struct hstate *h, int nid)
@@ -2712,7 +2726,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)

	/* do node specific alloc */
	if (nid != NUMA_NO_NODE) {
		m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
		m = __alloc_bootmem_huge_page_inner(huge_page_size(h), huge_page_size(h),
				0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
		if (!m)
			return 0;
@@ -2720,7 +2734,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
	}
	/* allocate from next node when distributing huge pages */
	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
		m = memblock_alloc_try_nid_raw(
		m = __alloc_bootmem_huge_page_inner(
				huge_page_size(h), huge_page_size(h),
				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
		/*