Commit 4fa6893f authored by Yang Shi's avatar Yang Shi Committed by Andrew Morton
Browse files

mm: thp: consolidate vma size check to transhuge_vma_suitable

There are couple of places that check whether the vma size is ok for THP
or whether address fits, they are open coded and duplicate, use
transhuge_vma_suitable() to do the job by passing in (vma->end -
HPAGE_PMD_SIZE).

Move vma size check into hugepage_vma_check().  This will make
khugepaged_enter() is as same as khugepaged_enter_vma().  There is just
one caller for khugepaged_enter(), replace it to khugepaged_enter_vma()
and remove khugepaged_enter().

Link: https://lkml.kernel.org/r/20220616174840.1202070-3-shy828301@gmail.com


Signed-off-by: default avatarYang Shi <shy828301@gmail.com>
Reviewed-by: default avatarZach O'Keefe <zokeefe@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 66137fb3
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -116,6 +116,17 @@ extern struct kobj_attribute shmem_enabled_attr;

extern unsigned long transparent_hugepage_flags;

/*
 * Do the below checks:
 *   - For file vma, check if the linear page offset of vma is
 *     HPAGE_PMD_NR aligned within the file.  The hugepage is
 *     guaranteed to be hugepage-aligned within the file, but we must
 *     check that the PMD-aligned addresses in the VMA map to
 *     PMD-aligned offsets within the file, else the hugepage will
 *     not be PMD-mappable.
 *   - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
 *     area.
 */
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
		unsigned long addr)
{
+0 −14
Original line number Diff line number Diff line
@@ -51,16 +51,6 @@ static inline void khugepaged_exit(struct mm_struct *mm)
	if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
		__khugepaged_exit(mm);
}

static inline void khugepaged_enter(struct vm_area_struct *vma,
				   unsigned long vm_flags)
{
	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
	    khugepaged_enabled()) {
		if (hugepage_vma_check(vma, vm_flags))
			__khugepaged_enter(vma->vm_mm);
	}
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
@@ -68,10 +58,6 @@ static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
static inline void khugepaged_enter(struct vm_area_struct *vma,
				    unsigned long vm_flags)
{
}
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
					unsigned long vm_flags)
{
+1 −1
Original line number Diff line number Diff line
@@ -726,7 +726,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
		return VM_FAULT_FALLBACK;
	if (unlikely(anon_vma_prepare(vma)))
		return VM_FAULT_OOM;
	khugepaged_enter(vma, vma->vm_flags);
	khugepaged_enter_vma(vma, vma->vm_flags);

	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
			!mm_forbids_zeropage(vma->vm_mm) &&
+6 −13
Original line number Diff line number Diff line
@@ -443,8 +443,8 @@ bool hugepage_vma_check(struct vm_area_struct *vma,
	if (vma_is_dax(vma))
		return false;

	if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
				vma->vm_pgoff, HPAGE_PMD_NR))
	/* Check alignment for file vma and size for both file and anon vma */
	if (!transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
		return false;

	/* Enabled via shmem mount options or sysfs settings. */
@@ -505,9 +505,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
			  unsigned long vm_flags)
{
	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
	    khugepaged_enabled() &&
	    (((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
	     (vma->vm_end & HPAGE_PMD_MASK))) {
	    khugepaged_enabled()) {
		if (hugepage_vma_check(vma, vm_flags))
			__khugepaged_enter(vma->vm_mm);
	}
@@ -948,7 +946,6 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
		struct vm_area_struct **vmap)
{
	struct vm_area_struct *vma;
	unsigned long hstart, hend;

	if (unlikely(khugepaged_test_exit(mm)))
		return SCAN_ANY_PROCESS;
@@ -957,9 +954,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
	if (!vma)
		return SCAN_VMA_NULL;

	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
	hend = vma->vm_end & HPAGE_PMD_MASK;
	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
	if (!transhuge_vma_suitable(vma, address))
		return SCAN_ADDRESS_RANGE;
	if (!hugepage_vma_check(vma, vma->vm_flags))
		return SCAN_VMA_CHECK;
@@ -2135,10 +2130,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
			progress++;
			continue;
		}
		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
		hend = vma->vm_end & HPAGE_PMD_MASK;
		if (hstart >= hend)
			goto skip;
		hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
		hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
		if (khugepaged_scan.address > hend)
			goto skip;
		if (khugepaged_scan.address < hstart)