Commit 88f2ef73 authored by Muchun Song's avatar Muchun Song Committed by Linus Torvalds
Browse files

mm: introduce kmem_cache_alloc_lru

We currently allocate scope for every memcg to be able to tracked on
every superblock instantiated in the system, regardless of whether that
superblock is even accessible to that memcg.

These huge memcg counts come from container hosts where memcgs are
confined to just a small subset of the total number of superblocks that
instantiated at any given point in time.

For these systems with huge container counts, list_lru does not need the
capability of tracking every memcg on every superblock.  What it comes
down to is that adding the memcg to the list_lru at the first insert.
So introduce kmem_cache_alloc_lru to allocate objects and its list_lru.
In the later patch, we will convert all inode and dentry allocation from
kmem_cache_alloc to kmem_cache_alloc_lru.

Link: https://lkml.kernel.org/r/20220228122126.37293-3-songmuchun@bytedance.com


Signed-off-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Alex Shi <alexs@kernel.org>
Cc: Anna Schumaker <Anna.Schumaker@Netapp.com>
Cc: Chao Yu <chao@kernel.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Fam Zheng <fam.zheng@bytedance.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kari Argillander <kari.argillander@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6a6b7b77
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -56,6 +56,8 @@ struct list_lru {
	struct list_head	list;
	int			shrinker_id;
	bool			memcg_aware;
	/* protects ->mlrus->mlru[i] */
	spinlock_t		lock;
	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
	struct list_lru_memcg	__rcu *mlrus;
#endif
@@ -72,6 +74,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
#define list_lru_init_memcg(lru, shrinker)		\
	__list_lru_init((lru), true, NULL, shrinker)

int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
			 gfp_t gfp);
int memcg_update_all_list_lrus(int num_memcgs);
void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg);

+14 −0
Original line number Diff line number Diff line
@@ -524,6 +524,20 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
	return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
}

static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
{
	struct mem_cgroup *memcg;

	rcu_read_lock();
retry:
	memcg = obj_cgroup_memcg(objcg);
	if (unlikely(!css_tryget(&memcg->css)))
		goto retry;
	rcu_read_unlock();

	return memcg;
}

#ifdef CONFIG_MEMCG_KMEM
/*
 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
+3 −0
Original line number Diff line number Diff line
@@ -135,6 +135,7 @@

#include <linux/kasan.h>

struct list_lru;
struct mem_cgroup;
/*
 * struct kmem_cache related prototypes
@@ -416,6 +417,8 @@ static __always_inline unsigned int __kmalloc_index(size_t size,

void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
			   gfp_t gfpflags) __assume_slab_alignment __malloc;
void kmem_cache_free(struct kmem_cache *s, void *objp);

/*
+95 −9
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/memcontrol.h>
#include "slab.h"
#include "internal.h"

#ifdef CONFIG_MEMCG_KMEM
static LIST_HEAD(memcg_list_lrus);
@@ -338,22 +339,30 @@ static void memcg_destroy_list_lru_range(struct list_lru_memcg *mlrus,
		kfree(mlrus->mlru[i]);
}

static int memcg_init_list_lru_range(struct list_lru_memcg *mlrus,
				     int begin, int end)
static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp)
{
	int i;

	for (i = begin; i < end; i++) {
	int nid;
	struct list_lru_per_memcg *mlru;

		mlru = kmalloc(struct_size(mlru, node, nr_node_ids), GFP_KERNEL);
	mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
	if (!mlru)
			goto fail;
		return NULL;

	for_each_node(nid)
		init_one_lru(&mlru->node[nid]);
		mlrus->mlru[i] = mlru;

	return mlru;
}

static int memcg_init_list_lru_range(struct list_lru_memcg *mlrus,
				     int begin, int end)
{
	int i;

	for (i = begin; i < end; i++) {
		mlrus->mlru[i] = memcg_init_list_lru_one(GFP_KERNEL);
		if (!mlrus->mlru[i])
			goto fail;
	}
	return 0;
fail:
@@ -370,6 +379,8 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
	if (!memcg_aware)
		return 0;

	spin_lock_init(&lru->lock);

	mlrus = kvmalloc(struct_size(mlrus, mlru, size), GFP_KERNEL);
	if (!mlrus)
		return -ENOMEM;
@@ -416,8 +427,11 @@ static int memcg_update_list_lru(struct list_lru *lru, int old_size, int new_siz
		return -ENOMEM;
	}

	spin_lock_irq(&lru->lock);
	memcpy(&new->mlru, &old->mlru, flex_array_size(new, mlru, old_size));
	rcu_assign_pointer(lru->mlrus, new);
	spin_unlock_irq(&lru->lock);

	kvfree_rcu(old, rcu);
	return 0;
}
@@ -502,6 +516,78 @@ void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
		memcg_drain_list_lru(lru, src_idx, dst_memcg);
	mutex_unlock(&list_lrus_mutex);
}

static bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
				     struct list_lru *lru)
{
	bool allocated;
	int idx;

	idx = memcg->kmemcg_id;
	if (unlikely(idx < 0))
		return true;

	rcu_read_lock();
	allocated = !!rcu_dereference(lru->mlrus)->mlru[idx];
	rcu_read_unlock();

	return allocated;
}

int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
			 gfp_t gfp)
{
	int i;
	unsigned long flags;
	struct list_lru_memcg *mlrus;
	struct list_lru_memcg_table {
		struct list_lru_per_memcg *mlru;
		struct mem_cgroup *memcg;
	} *table;

	if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
		return 0;

	gfp &= GFP_RECLAIM_MASK;
	table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
	if (!table)
		return -ENOMEM;

	/*
	 * Because the list_lru can be reparented to the parent cgroup's
	 * list_lru, we should make sure that this cgroup and all its
	 * ancestors have allocated list_lru_per_memcg.
	 */
	for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
		if (memcg_list_lru_allocated(memcg, lru))
			break;

		table[i].memcg = memcg;
		table[i].mlru = memcg_init_list_lru_one(gfp);
		if (!table[i].mlru) {
			while (i--)
				kfree(table[i].mlru);
			kfree(table);
			return -ENOMEM;
		}
	}

	spin_lock_irqsave(&lru->lock, flags);
	mlrus = rcu_dereference_protected(lru->mlrus, true);
	while (i--) {
		int index = table[i].memcg->kmemcg_id;

		if (mlrus->mlru[index])
			kfree(table[i].mlru);
		else
			mlrus->mlru[index] = table[i].mlru;
	}
	spin_unlock_irqrestore(&lru->lock, flags);

	kfree(table);

	return 0;
}
#else
static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
+0 −14
Original line number Diff line number Diff line
@@ -2805,20 +2805,6 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
	folio->memcg_data = (unsigned long)memcg;
}

static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
{
	struct mem_cgroup *memcg;

	rcu_read_lock();
retry:
	memcg = obj_cgroup_memcg(objcg);
	if (unlikely(!css_tryget(&memcg->css)))
		goto retry;
	rcu_read_unlock();

	return memcg;
}

#ifdef CONFIG_MEMCG_KMEM
/*
 * The allocated objcg pointers array is not accounted directly.
Loading