Commit bbc180a5 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Linus Torvalds
Browse files

mm: HUGE_VMAP arch support cleanup

This changes the awkward approach where architectures provide init
functions to determine which levels they can provide large mappings for,
to one where the arch is queried for each call.

This removes code and indirection, and allows constant-folding of dead
code for unsupported levels.

This also adds a prot argument to the arch query.  This is unused
currently but could help with some architectures (e.g., some powerpc
processors can't map uncacheable memory with large pages).

Link: https://lkml.kernel.org/r/20210317062402.533919-7-npiggin@gmail.com


Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarDing Tianhong <dingtianhong@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Cc: Will Deacon <will@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 95f0ddf0
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
#ifndef _ASM_ARM64_VMALLOC_H
#define _ASM_ARM64_VMALLOC_H

#include <asm/page.h>

#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
bool arch_vmap_p4d_supported(pgprot_t prot);
bool arch_vmap_pud_supported(pgprot_t prot);
bool arch_vmap_pmd_supported(pgprot_t prot);
#endif

#endif /* _ASM_ARM64_VMALLOC_H */
+5 −5
Original line number Diff line number Diff line
@@ -1339,12 +1339,12 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
	return dt_virt;
}

int __init arch_ioremap_p4d_supported(void)
bool arch_vmap_p4d_supported(pgprot_t prot)
{
	return 0;
	return false;
}

int __init arch_ioremap_pud_supported(void)
bool arch_vmap_pud_supported(pgprot_t prot)
{
	/*
	 * Only 4k granule supports level 1 block mappings.
@@ -1354,9 +1354,9 @@ int __init arch_ioremap_pud_supported(void)
	       !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
}

int __init arch_ioremap_pmd_supported(void)
bool arch_vmap_pmd_supported(pgprot_t prot)
{
	/* See arch_ioremap_pud_supported() */
	/* See arch_vmap_pud_supported() */
	return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
}

+8 −0
Original line number Diff line number Diff line
#ifndef _ASM_POWERPC_VMALLOC_H
#define _ASM_POWERPC_VMALLOC_H

#include <asm/page.h>

#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
bool arch_vmap_p4d_supported(pgprot_t prot);
bool arch_vmap_pud_supported(pgprot_t prot);
bool arch_vmap_pmd_supported(pgprot_t prot);
#endif

#endif /* _ASM_POWERPC_VMALLOC_H */
+4 −4
Original line number Diff line number Diff line
@@ -1082,13 +1082,13 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
	set_pte_at(mm, addr, ptep, pte);
}

int __init arch_ioremap_pud_supported(void)
bool arch_vmap_pud_supported(pgprot_t prot)
{
	/* HPT does not cope with large pages in the vmalloc area */
	return radix_enabled();
}

int __init arch_ioremap_pmd_supported(void)
bool arch_vmap_pmd_supported(pgprot_t prot)
{
	return radix_enabled();
}
@@ -1182,7 +1182,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
	return 1;
}

int __init arch_ioremap_p4d_supported(void)
bool arch_vmap_p4d_supported(pgprot_t prot)
{
	return 0;
	return false;
}
+7 −0
Original line number Diff line number Diff line
#ifndef _ASM_X86_VMALLOC_H
#define _ASM_X86_VMALLOC_H

#include <asm/page.h>
#include <asm/pgtable_areas.h>

#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
bool arch_vmap_p4d_supported(pgprot_t prot);
bool arch_vmap_pud_supported(pgprot_t prot);
bool arch_vmap_pmd_supported(pgprot_t prot);
#endif

#endif /* _ASM_X86_VMALLOC_H */
Loading