Commit ef26b76d authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/hugetlb/cma: Allocate gigantic hugetlb pages using CMA



commit: cf11e85f ("mm: hugetlb: optionally allocate gigantic hugepages using cma")
added support for allocating gigantic hugepages using CMA. This patch
enables the same for powerpc

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200713150749.25245-1-aneesh.kumar@linux.ibm.com
parent 81a41325
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -57,6 +57,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
			       unsigned long addr, pte_t *ptep,
			       pte_t pte, int dirty);

void gigantic_hugetlb_cma_reserve(void) __init;
#include <asm-generic/hugetlb.h>

#else /* ! CONFIG_HUGETLB_PAGE */
@@ -71,6 +72,12 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
{
	return NULL;
}


static inline void __init gigantic_hugetlb_cma_reserve(void)
{
}

#endif /* CONFIG_HUGETLB_PAGE */

#endif /* _ASM_POWERPC_HUGETLB_H */
+3 −0
Original line number Diff line number Diff line
@@ -928,6 +928,9 @@ void __init setup_arch(char **cmdline_p)
	/* Reserve large chunks of memory for use by CMA for KVM. */
	kvm_cma_reserve();

	/*  Reserve large chunks of memory for us by CMA for hugetlb */
	gigantic_hugetlb_cma_reserve();

	klp_init_thread_info(&init_task);

	init_mm.start_code = (unsigned long)_stext;
+18 −0
Original line number Diff line number Diff line
@@ -684,3 +684,21 @@ void flush_dcache_icache_hugepage(struct page *page)
		}
	}
}

void __init gigantic_hugetlb_cma_reserve(void)
{
	unsigned long order = 0;

	if (radix_enabled())
		order = PUD_SHIFT - PAGE_SHIFT;
	else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift)
		/*
		 * For pseries we do use ibm,expected#pages for reserving 16G pages.
		 */
		order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;

	if (order) {
		VM_WARN_ON(order < MAX_ORDER);
		hugetlb_cma_reserve(order);
	}
}