Unverified Commit ed17ddff authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!14823 mm:some shmem bugfix

Merge Pull Request from: @wedm23414 
 
Baolin Wang (3):
  mm: shmem: fix incorrect index alignment for within_size policy
  mm: shmem: fix the update of 'shmem_falloc->nr_unswapped'
  docs: mm: fix the incorrect 'FileHugeMapped' field

Hugh Dickins (3):
  mm: shmem: fix minor off-by-one in shrinkable calculation
  mm: shmem: extend shmem_unused_huge_shrink() to all sizes
  mm: shmem: fix ShmemHugePages at swapout

-- 
2.34.1
 
 
Link:https://gitee.com/openeuler/kernel/pulls/14823

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarZhang Peng <zhangpeng362@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents ecd893e8 89e8f601
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -459,7 +459,7 @@ AnonHugePmdMapped).
The number of file transparent huge pages mapped to userspace is available
by reading ShmemPmdMapped and ShmemHugePages fields in ``/proc/meminfo``.
To identify what applications are mapping file transparent huge pages, it
is necessary to read ``/proc/PID/smaps`` and count the FileHugeMapped fields
is necessary to read ``/proc/PID/smaps`` and count the FilePmdMapped fields
for each mapping.

Note that reading the smaps file is expensive and reading it
+37 −39
Original line number Diff line number Diff line
@@ -635,15 +635,14 @@ static const char *shmem_format_huge(int huge)
#endif

static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
		struct shrink_control *sc, unsigned long nr_to_split)
		struct shrink_control *sc, unsigned long nr_to_free)
{
	LIST_HEAD(list), *pos, *next;
	LIST_HEAD(to_remove);
	struct inode *inode;
	struct shmem_inode_info *info;
	struct folio *folio;
	unsigned long batch = sc ? sc->nr_to_scan : 128;
	int split = 0;
	unsigned long split = 0, freed = 0;

	if (list_empty(&sbinfo->shrinklist))
		return SHRINK_STOP;
@@ -661,13 +660,6 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
			goto next;
		}

		/* Check if there's anything to gain */
		if (round_up(inode->i_size, PAGE_SIZE) ==
				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
			list_move(&info->shrinklist, &to_remove);
			goto next;
		}

		list_move(&info->shrinklist, &list);
next:
		sbinfo->shrinklist_len--;
@@ -676,34 +668,36 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
	}
	spin_unlock(&sbinfo->shrinklist_lock);

	list_for_each_safe(pos, next, &to_remove) {
		info = list_entry(pos, struct shmem_inode_info, shrinklist);
		inode = &info->vfs_inode;
		list_del_init(&info->shrinklist);
		iput(inode);
	}

	list_for_each_safe(pos, next, &list) {
		pgoff_t next, end;
		loff_t i_size;
		int ret;
		pgoff_t index;

		info = list_entry(pos, struct shmem_inode_info, shrinklist);
		inode = &info->vfs_inode;

		if (nr_to_split && split >= nr_to_split)
		if (nr_to_free && freed >= nr_to_free)
			goto move_back;

		index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
		folio = filemap_get_folio(inode->i_mapping, index);
		if (IS_ERR(folio))
		i_size = i_size_read(inode);
		folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
		if (!folio || xa_is_value(folio))
			goto drop;

		/* No huge page at the end of the file: nothing to split */
		/* No large folio at the end of the file: nothing to split */
		if (!folio_test_large(folio)) {
			folio_put(folio);
			goto drop;
		}

		/* Check if there is anything to gain from splitting */
		next = folio_next_index(folio);
		end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
		if (end <= folio->index || end >= next) {
			folio_put(folio);
			goto drop;
		}

		/*
		 * Move the inode on the list back to shrinklist if we failed
		 * to lock the page at this time.
@@ -724,6 +718,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
		if (ret)
			goto move_back;

		freed += next - end;
		split++;
drop:
		list_del_init(&info->shrinklist);
@@ -768,7 +763,7 @@ static long shmem_unused_huge_count(struct super_block *sb,
#define shmem_huge SHMEM_HUGE_DENY

static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
		struct shrink_control *sc, unsigned long nr_to_split)
		struct shrink_control *sc, unsigned long nr_to_free)
{
	return 0;
}
@@ -781,6 +776,14 @@ static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

static void shmem_update_stats(struct folio *folio, int nr_pages)
{
	if (folio_test_pmd_mappable(folio))
		__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
	__lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
}

/*
 * Somewhat like filemap_add_folio, but error if expected item has gone.
 */
@@ -815,10 +818,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
		xas_store(&xas, folio);
		if (xas_error(&xas))
			goto unlock;
		if (folio_test_pmd_mappable(folio))
			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
		__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
		__lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
		shmem_update_stats(folio, nr);
		shmem_reliable_folio_add(folio, nr);
		mapping->nrpages += nr;
unlock:
@@ -847,8 +847,7 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
	error = shmem_replace_entry(mapping, folio->index, folio, radswap);
	folio->mapping = NULL;
	mapping->nrpages -= nr;
	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
	__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
	shmem_update_stats(folio, -nr);
	shmem_reliable_folio_add(folio, -nr);
	xa_unlock_irq(&mapping->i_pages);
	folio_put_refs(folio, nr);
@@ -1527,7 +1526,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
			    !shmem_falloc->waitq &&
			    index >= shmem_falloc->start &&
			    index < shmem_falloc->next)
				shmem_falloc->nr_unswapped++;
				shmem_falloc->nr_unswapped += nr_pages;
			else
				shmem_falloc = NULL;
			spin_unlock(&inode->i_lock);
@@ -1685,6 +1684,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
	unsigned long mask = READ_ONCE(huge_shmem_orders_always);
	unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
	unsigned long vm_flags = vma ? vma->vm_flags : 0;
	pgoff_t aligned_index;
	bool global_huge;
	loff_t i_size;
	int order;
@@ -1719,9 +1719,9 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
	/* Allow mTHP that will be fully within i_size. */
	order = highest_order(within_size_orders);
	while (within_size_orders) {
		index = round_up(index + 1, order);
		aligned_index = round_up(index + 1, 1 << order);
		i_size = round_up(i_size_read(inode), PAGE_SIZE);
		if (i_size >> PAGE_SHIFT >= index) {
		if (i_size >> PAGE_SHIFT >= aligned_index) {
			mask |= within_size_orders;
			break;
		}
@@ -1872,7 +1872,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
		 * Try to reclaim some space by splitting a few
		 * large folios beyond i_size on the filesystem.
		 */
		shmem_unused_huge_shrink(sbinfo, NULL, 2);
		shmem_unused_huge_shrink(sbinfo, NULL, pages);
		/*
		 * And do a shmem_recalc_inode() to account for freed pages:
		 * except our folio is there in cache, so not quite balanced.
@@ -1973,11 +1973,9 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
	}
	if (!error) {
		mem_cgroup_replace_folio(old, new);
		__lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages);
		__lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages);
		shmem_update_stats(new, nr_pages);
		shmem_reliable_folio_add(new, nr_pages);
		__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages);
		__lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages);
		shmem_update_stats(old, -nr_pages);
		shmem_reliable_folio_add(old, -nr_pages);
	}
	xa_unlock_irq(&swap_mapping->i_pages);
@@ -2372,7 +2370,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
	alloced = true;
	if (folio_test_large(folio) &&
	    DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
					folio_next_index(folio) - 1) {
					folio_next_index(folio)) {
		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
		struct shmem_inode_info *info = SHMEM_I(inode);
		/*