Commit 330c367c authored by Kefeng Wang's avatar Kefeng Wang Committed by Euler
Browse files

mm: shmem: remove __shmem_huge_global_enabled()

mainline inclusion
from mainline-v6.12-rc1
commit 4a9a27fdf7bfd29013491aea45e3512988cc5876
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IBG3J8

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=4a9a27fdf7bfd29013491aea45e3512988cc5876

--------------------------------

Remove __shmem_huge_global_enabled() since it as only one caller, and
remove repeated check of VM_NOHUGEPAGE/MMF_DISABLE_THP as they are checked
in shmem_allowable_huge_orders(), also remove unnecessary vma parameter.

Link: https://lkml.kernel.org/r/20241017141457.1169092-2-wangkefeng.wang@huawei.com


Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarWang Lian <dev01404@linx-info.com>
--
parent 8be4868c
Loading
Loading
Loading
Loading
+10 −23
Original line number Diff line number Diff line
@@ -543,17 +543,15 @@ static bool shmem_confirm_swap(struct address_space *mapping,

static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;

static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
				      loff_t write_end, bool shmem_huge_force,
					struct vm_area_struct *vma,
				      unsigned long vm_flags)
{
	struct mm_struct *mm = vma ? vma->vm_mm : NULL;
	loff_t i_size;

	if (!S_ISREG(inode->i_mode))
	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
		return false;
	if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags)))
	if (!S_ISREG(inode->i_mode))
		return false;
	if (shmem_huge == SHMEM_HUGE_DENY)
		return false;
@@ -571,7 +569,7 @@ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
			return true;
		fallthrough;
	case SHMEM_HUGE_ADVISE:
		if (mm && (vm_flags & VM_HUGEPAGE))
		if (vm_flags & VM_HUGEPAGE)
			return true;
		fallthrough;
	default:
@@ -579,17 +577,6 @@ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
	}
}

static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
		   loff_t write_end, bool shmem_huge_force,
		   struct vm_area_struct *vma, unsigned long vm_flags)
{
	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
		return false;

	return __shmem_huge_global_enabled(inode, index, write_end,
					   shmem_huge_force, vma, vm_flags);
}

static int shmem_parse_huge(const char *str)
{
	int huge;
@@ -788,7 +775,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,

static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
				      loff_t write_end, bool shmem_huge_force,
		struct vm_area_struct *vma, unsigned long vm_flags)
				      unsigned long vm_flags)
{
	return false;
}
@@ -1185,7 +1172,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
			STATX_ATTR_NODUMP);
	generic_fillattr(idmap, request_mask, inode, stat);

	if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
	if (shmem_huge_global_enabled(inode, 0, 0, false, 0))
		stat->blksize = HPAGE_PMD_SIZE;

	if (request_mask & STATX_BTIME) {
@@ -1706,7 +1693,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
		return 0;

	global_huge = shmem_huge_global_enabled(inode, index, write_end,
					shmem_huge_force, vma, vm_flags);
						shmem_huge_force, vm_flags);
	if (!vma || !vma_is_anon_shmem(vma)) {
		/*
		 * For tmpfs, we now only support PMD sized THP if huge page