Commit 86140453 authored by Yu Zhao's avatar Yu Zhao Committed by Linus Torvalds
Browse files

mm/swap.c: don't pass "enum lru_list" to trace_mm_lru_insertion()

The parameter is redundant in the sense that it can be extracted
from the "struct page" parameter by page_lru() correctly.

Link: https://lore.kernel.org/linux-mm/20201207220949.830352-5-yuzhao@google.com/
Link: https://lkml.kernel.org/r/20210122220600.906146-5-yuzhao@google.com


Signed-off-by: default avatarYu Zhao <yuzhao@google.com>
Reviewed-by: default avatarAlex Shi <alex.shi@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3a9c9788
Loading
Loading
Loading
Loading
+4 −7
Original line number Diff line number Diff line
@@ -27,24 +27,21 @@

TRACE_EVENT(mm_lru_insertion,

	TP_PROTO(
		struct page *page,
		int lru
	),
	TP_PROTO(struct page *page),

	TP_ARGS(page, lru),
	TP_ARGS(page),

	TP_STRUCT__entry(
		__field(struct page *,	page	)
		__field(unsigned long,	pfn	)
		__field(int,		lru	)
		__field(enum lru_list,	lru	)
		__field(unsigned long,	flags	)
	),

	TP_fast_assign(
		__entry->page	= page;
		__entry->pfn	= page_to_pfn(page);
		__entry->lru	= lru;
		__entry->lru	= page_lru(page);
		__entry->flags	= trace_pagemap_flags(page);
	),

+1 −4
Original line number Diff line number Diff line
@@ -957,7 +957,6 @@ EXPORT_SYMBOL(__pagevec_release);

static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
{
	enum lru_list lru;
	int was_unevictable = TestClearPageUnevictable(page);
	int nr_pages = thp_nr_pages(page);

@@ -993,11 +992,9 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
	smp_mb__after_atomic();

	if (page_evictable(page)) {
		lru = page_lru(page);
		if (was_unevictable)
			__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
	} else {
		lru = LRU_UNEVICTABLE;
		ClearPageActive(page);
		SetPageUnevictable(page);
		if (!was_unevictable)
@@ -1005,7 +1002,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
	}

	add_page_to_lru_list(page, lruvec);
	trace_mm_lru_insertion(page, lru);
	trace_mm_lru_insertion(page);
}

/*