Commit 739e49e0 authored by Kefeng Wang's avatar Kefeng Wang Committed by Catalin Marinas
Browse files

arm64: mm: handle ARM64_KERNEL_USES_PMD_MAPS in vmemmap_populate()



Directly check ARM64_SWAPPER_USES_SECTION_MAPS to choose base page
or PMD level huge page mapping in vmemmap_populate() to simplify
code a bit.

Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/r/20220920014951.196191-1-wangkefeng.wang@huawei.com


Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent c44094ee
Loading
Loading
Loading
Loading
+4 −9
Original line number Diff line number Diff line
@@ -1180,14 +1180,6 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
}
#endif

#if !ARM64_KERNEL_USES_PMD_MAPS
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
		struct vmem_altmap *altmap)
{
	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
	return vmemmap_populate_basepages(start, end, node, altmap);
}
#else	/* !ARM64_KERNEL_USES_PMD_MAPS */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
		struct vmem_altmap *altmap)
{
@@ -1199,6 +1191,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
	pmd_t *pmdp;

	WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));

	if (!ARM64_KERNEL_USES_PMD_MAPS)
		return vmemmap_populate_basepages(start, end, node, altmap);

	do {
		next = pmd_addr_end(addr, end);

@@ -1232,7 +1228,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,

	return 0;
}
#endif	/* !ARM64_KERNEL_USES_PMD_MAPS */

#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,