Unverified Commit 27e277a7 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!14611 merge mainline patches into OLK-6.6

Merge Pull Request from: @ci-robot 
 
PR sync from: Kaixiong Yu <yukaixiong@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/DZ6X4TAPOCKGF3QLCAXM3BI7IRHJQHPP/ 
merge mainline patches into OLK-6.6

Jeongjun Park (1):
  mm: migrate: annotate data-race in migrate_folio_unmap()

Kefeng Wang (1):
  tmpfs: don't enable large folios if not supported

Pei Li (1):
  mm: ignore data-race in __swap_writepage

Sergey Senozhatsky (1):
  mm: Kconfig: fixup zsmalloc configuration

Wei Xu (1):
  mm/mglru: reset page lru tier bits when activating

Zeng Jingxiang (1):
  mm/vmscan: wake up flushers conditionally to avoid cgroup OOM


-- 
2.34.1
 
https://gitee.com/openeuler/kernel/issues/IBET92 
 
Link:https://gitee.com/openeuler/kernel/pulls/14611

 

Reviewed-by: default avatarZhang Peng <zhangpeng362@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 4a0348cc 038ff4e1
Loading
Loading
Loading
Loading
+14 −1
Original line number Diff line number Diff line
@@ -155,6 +155,11 @@ static inline int folio_lru_refs(struct folio *folio)
	return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
}

static inline void folio_clear_lru_refs(struct folio *folio)
{
	set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0);
}

static inline int folio_lru_gen(struct folio *folio)
{
	unsigned long flags = READ_ONCE(folio->flags);
@@ -224,6 +229,7 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
{
	unsigned long seq;
	unsigned long flags;
	unsigned long mask;
	int gen = folio_lru_gen(folio);
	int type = folio_is_file_lru(folio);
	int zone = folio_zonenum(folio);
@@ -259,7 +265,14 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
	gen = lru_gen_from_seq(seq);
	flags = (gen + 1UL) << LRU_GEN_PGOFF;
	/* see the comment on MIN_NR_GENS about PG_active */
	set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
	mask = LRU_GEN_MASK;
	/*
	 * Don't clear PG_workingset here because it can affect PSI accounting
	 * if the activation is due to workingset refault.
	 */
	if (folio_test_active(folio))
		mask |= LRU_REFS_MASK | BIT(PG_referenced) | BIT(PG_active);
	set_mask_bits(&folio->flags, mask, flags);

	lru_gen_update_size(lruvec, folio, -1, gen);
	/* for folio_rotate_reclaimable() */
+2 −0
Original line number Diff line number Diff line
@@ -404,6 +404,8 @@ enum {
	NR_LRU_GEN_CAPS
};

#define LRU_REFS_FLAGS		(BIT(PG_referenced) | BIT(PG_workingset))

#define MIN_LRU_BATCH		BITS_PER_LONG
#define MAX_LRU_BATCH		(MIN_LRU_BATCH * 64)

+1 −1
Original line number Diff line number Diff line
@@ -190,7 +190,7 @@ config Z3FOLD

config ZSMALLOC
	tristate
	prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
	prompt "N:1 compression allocator (zsmalloc)" if (ZSWAP || ZRAM)
	depends on MMU
	help
	  zsmalloc is a slab-based memory allocator designed to store
+1 −1
Original line number Diff line number Diff line
@@ -1137,7 +1137,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
	int rc = -EAGAIN;
	int old_page_state = 0;
	struct anon_vma *anon_vma = NULL;
	bool is_lru = !__folio_test_movable(src);
	bool is_lru = data_race(!__folio_test_movable(src));
	bool locked = false;
	bool dst_locked = false;

+6 −1
Original line number Diff line number Diff line
@@ -381,7 +381,12 @@ void __swap_writepage(struct page *page, struct writeback_control *wbc)
	 */
	if (data_race(sis->flags & SWP_FS_OPS))
		swap_writepage_fs(page, wbc);
	else if (sis->flags & SWP_SYNCHRONOUS_IO)
	/*
	 * ->flags can be updated non-atomicially (scan_swap_map_slots),
	 * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race
	 * is safe.
	 */
	else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO))
		swap_writepage_bdev_sync(page, wbc, sis);
	else
		swap_writepage_bdev_async(page, wbc, sis);
Loading