Commit 3382bbee authored by Christophe Leroy's avatar Christophe Leroy Committed by Linus Torvalds
Browse files

mm/vmalloc: enable mapping of huge pages at pte level in vmalloc

On some architectures like powerpc, there are huge pages that are mapped
at pte level.

Enable it in vmalloc.

For that, architectures can provide arch_vmap_pte_supported_shift() that
returns the shift for pages to map at pte level.

Link: https://lkml.kernel.org/r/2c717e3b1fba1894d890feb7669f83025bfa314d.1620795204.git.christophe.leroy@csgroup.eu


Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f7ee1f13
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -112,6 +112,13 @@ static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, uns
}
#endif

#ifndef arch_vmap_pte_supported_shift
static inline int arch_vmap_pte_supported_shift(unsigned long size)
{
	return PAGE_SHIFT;
}
#endif

/*
 *	Highlevel APIs for driver use
 */
+7 −6
Original line number Diff line number Diff line
@@ -2927,8 +2927,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
		return NULL;
	}

	if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) &&
			arch_vmap_pmd_supported(prot)) {
	if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
		unsigned long size_per_node;

		/*
@@ -2941,12 +2940,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
		size_per_node = size;
		if (node == NUMA_NO_NODE)
			size_per_node /= num_online_nodes();
		if (size_per_node >= PMD_SIZE) {
		if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
			shift = PMD_SHIFT;
		else
			shift = arch_vmap_pte_supported_shift(size_per_node);

		align = max(real_align, 1UL << shift);
		size = ALIGN(real_size, 1UL << shift);
	}
	}

again:
	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |