Commit b918653b authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Vlastimil Babka
Browse files

mm: Convert [un]account_slab_page() to struct slab



Convert the parameter of these functions to struct slab instead of
struct page and drop _page from the names. For now their callers just
convert page to slab.

[ vbabka@suse.cz: replace existing functions instead of calling them ]

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
parent d122019b
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -1380,7 +1380,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
		return NULL;
	}

	account_slab_page(page, cachep->gfporder, cachep, flags);
	account_slab(page_slab(page), cachep->gfporder, cachep, flags);
	__SetPageSlab(page);
	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1405,7 +1405,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)

	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += 1 << order;
	unaccount_slab_page(page, order, cachep);
	unaccount_slab(page_slab(page), order, cachep);
	__free_pages(page, order);
}

+8 −9
Original line number Diff line number Diff line
@@ -583,24 +583,23 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
	return page->slab_cache;
}

static __always_inline void account_slab_page(struct page *page, int order,
					      struct kmem_cache *s,
					      gfp_t gfp)
static __always_inline void account_slab(struct slab *slab, int order,
					 struct kmem_cache *s, gfp_t gfp)
{
	if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
		memcg_alloc_page_obj_cgroups(page, s, gfp, true);
		memcg_alloc_page_obj_cgroups(slab_page(slab), s, gfp, true);

	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
			    PAGE_SIZE << order);
}

static __always_inline void unaccount_slab_page(struct page *page, int order,
static __always_inline void unaccount_slab(struct slab *slab, int order,
					   struct kmem_cache *s)
{
	if (memcg_kmem_enabled())
		memcg_free_page_obj_cgroups(page);
		memcg_free_page_obj_cgroups(slab_page(slab));

	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
			    -(PAGE_SIZE << order));
}

+2 −2
Original line number Diff line number Diff line
@@ -1943,7 +1943,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)

	page->objects = oo_objects(oo);

	account_slab_page(page, oo_order(oo), s, flags);
	account_slab(page_slab(page), oo_order(oo), s, flags);

	page->slab_cache = s;
	__SetPageSlab(page);
@@ -2014,7 +2014,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
	page->slab_cache = NULL;
	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += pages;
	unaccount_slab_page(page, order, s);
	unaccount_slab(page_slab(page), order, s);
	__free_pages(page, order);
}