Commit b8d33f48 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Kefeng Wang
Browse files

mm: simplify thp_vma_allowable_order

mainline inclusion
from mainline-v6.9-rc2
commit e0ffb29bc54d86b9ab10ebafc66eb1b7229e0cd7
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I9S4Z4


CVE: NA

-------------------------------------------------

Combine the three boolean arguments into one flags argument for
readability.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
(cherry picked from commit e0ffb29bc54d86b9ab10ebafc66eb1b7229e0cd7)
Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
parent e0e45311
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -869,8 +869,8 @@ static int show_smap(struct seq_file *m, void *v)
	__show_smap(m, &mss, false);

	seq_printf(m, "THPeligible:    %8u\n",
		   !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
					      true, THP_ORDERS_ALL));
		   !!thp_vma_allowable_orders(vma, vma->vm_flags,
			   TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));

	if (arch_pkeys_enabled())
		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
+15 −14
Original line number Diff line number Diff line
@@ -89,8 +89,12 @@ extern struct kobj_attribute shmem_enabled_attr;
 */
#define THP_ORDERS_ALL		(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)

#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
	(!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
#define TVA_SMAPS		(1 << 0)	/* Will be used for procfs */
#define TVA_IN_PF		(1 << 1)	/* Page fault handler */
#define TVA_ENFORCE_SYSFS	(1 << 2)	/* Obey sysfs configuration */

#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
	(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define HPAGE_PMD_SHIFT PMD_SHIFT
@@ -216,17 +220,15 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
}

unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
					 unsigned long vm_flags, bool smaps,
					 bool in_pf, bool enforce_sysfs,
					 unsigned long vm_flags,
					 unsigned long tva_flags,
					 unsigned long orders);

/**
 * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
 * @vma:  the vm area to check
 * @vm_flags: use these vm_flags instead of vma->vm_flags
 * @smaps: whether answer will be used for smaps file
 * @in_pf: whether answer will be used by page fault handler
 * @enforce_sysfs: whether sysfs config should be taken into account
 * @tva_flags: Which TVA flags to honour
 * @orders: bitfield of all orders to consider
 *
 * Calculates the intersection of the requested hugepage orders and the allowed
@@ -239,12 +241,12 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
 */
static inline
unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
				       unsigned long vm_flags, bool smaps,
				       bool in_pf, bool enforce_sysfs,
				       unsigned long vm_flags,
				       unsigned long tva_flags,
				       unsigned long orders)
{
	/* Optimization to check if required orders are enabled early. */
	if (enforce_sysfs && vma_is_anonymous(vma)) {
	if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
		unsigned long mask = READ_ONCE(huge_anon_orders_always);

		if (vm_flags & VM_HUGEPAGE)
@@ -258,8 +260,7 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
			return 0;
	}

	return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
					  enforce_sysfs, orders);
	return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
}

enum mthp_stat_item {
@@ -437,8 +438,8 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
}

static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
					unsigned long vm_flags, bool smaps,
					bool in_pf, bool enforce_sysfs,
					unsigned long vm_flags,
					unsigned long tva_flags,
					unsigned long orders)
{
	return 0;
+5 −2
Original line number Diff line number Diff line
@@ -77,10 +77,13 @@ unsigned long huge_anon_orders_inherit __read_mostly;
unsigned long huge_pcp_allow_orders __read_mostly;

unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
					 unsigned long vm_flags, bool smaps,
					 bool in_pf, bool enforce_sysfs,
					 unsigned long vm_flags,
					 unsigned long tva_flags,
					 unsigned long orders)
{
	bool smaps = tva_flags & TVA_SMAPS;
	bool in_pf = tva_flags & TVA_IN_PF;
	bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
	/* Check the intersection of requested and supported orders. */
	orders &= vma_is_anonymous(vma) ?
			THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
+7 −9
Original line number Diff line number Diff line
@@ -459,7 +459,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
{
	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
	    hugepage_flags_enabled()) {
		if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
		if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
					    PMD_ORDER))
			__khugepaged_enter(vma->vm_mm);
	}
@@ -925,6 +925,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
				   struct collapse_control *cc)
{
	struct vm_area_struct *vma;
	unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;

	if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
		return SCAN_ANY_PROCESS;
@@ -935,8 +936,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,

	if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
		return SCAN_ADDRESS_RANGE;
	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
				     cc->is_khugepaged, PMD_ORDER))
	if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
		return SCAN_VMA_CHECK;
	/*
	 * Anon VMA expected, the address may be unmapped then
@@ -1527,8 +1527,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
	 * analogously elide sysfs THP settings here.
	 */
	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
				     PMD_ORDER))
	if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
		return SCAN_VMA_CHECK;

	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -2403,8 +2402,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
			progress++;
			break;
		}
		if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
					     true, PMD_ORDER)) {
		if (!thp_vma_allowable_order(vma, vma->vm_flags,
					TVA_ENFORCE_SYSFS, PMD_ORDER)) {
skip:
			progress++;
			continue;
@@ -2741,8 +2740,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,

	*prev = vma;

	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
				     PMD_ORDER))
	if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
		return -EINVAL;

	if (task_in_dynamic_pool(current))
+6 −4
Original line number Diff line number Diff line
@@ -4343,8 +4343,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
	 * for this vma. Then filter out the orders that can't be allocated over
	 * the faulting address and still be fully contained in the vma.
	 */
	orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
					  BIT(PMD_ORDER) - 1);
	orders = thp_vma_allowable_orders(vma, vma->vm_flags,
			TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
	orders = thp_vma_suitable_orders(vma, vmf->address, orders);

	if (!orders)
@@ -5445,7 +5445,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
		return VM_FAULT_OOM;
retry_pud:
	if (pud_none(*vmf.pud) &&
	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER) &&
	    thp_vma_allowable_order(vma, vm_flags,
				TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER) &&
	    !task_in_dynamic_pool(current)) {
		ret = create_huge_pud(&vmf);
		if (!(ret & VM_FAULT_FALLBACK))
@@ -5480,7 +5481,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
		goto retry_pud;

	if (pmd_none(*vmf.pmd) &&
	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER) &&
	    thp_vma_allowable_order(vma, vm_flags,
				TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER) &&
	    !task_in_dynamic_pool(current)) {
		ret = create_huge_pmd(&vmf);
		if (!(ret & VM_FAULT_FALLBACK))