Commit becaba65 authored by Roman Gushchin's avatar Roman Gushchin Committed by Linus Torvalds
Browse files

mm: memcg/slab: fix obj_cgroup_charge() return value handling



Commit 10befea9 ("mm: memcg/slab: use a single set of kmem_caches
for all allocations") introduced a regression into the handling of the
obj_cgroup_charge() return value.  If a non-zero value is returned
(indicating of exceeding one of memory.max limits), the allocation
should fail, instead of falling back to non-accounted mode.

To make the code more readable, move memcg_slab_pre_alloc_hook() and
memcg_slab_post_alloc_hook() calling conditions into bodies of these
hooks.

Fixes: 10befea9 ("mm: memcg/slab: use a single set of kmem_caches for all allocations")
Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: <stable@vger.kernel.org>
Link: https://lkml.kernel.org/r/20201127161828.GD840171@carbon.dhcp.thefacebook.com


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2bf509d9
Loading
Loading
Loading
Loading
+24 −16
Original line number Original line Diff line number Diff line
@@ -274,22 +274,32 @@ static inline size_t obj_full_size(struct kmem_cache *s)
	return s->size + sizeof(struct obj_cgroup *);
	return s->size + sizeof(struct obj_cgroup *);
}
}


static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
/*
							   size_t objects,
 * Returns false if the allocation should fail.
							   gfp_t flags)
 */
static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
					     struct obj_cgroup **objcgp,
					     size_t objects, gfp_t flags)
{
{
	struct obj_cgroup *objcg;
	struct obj_cgroup *objcg;


	if (!memcg_kmem_enabled())
		return true;

	if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
		return true;

	objcg = get_obj_cgroup_from_current();
	objcg = get_obj_cgroup_from_current();
	if (!objcg)
	if (!objcg)
		return NULL;
		return true;


	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
		obj_cgroup_put(objcg);
		obj_cgroup_put(objcg);
		return NULL;
		return false;
	}
	}


	return objcg;
	*objcgp = objcg;
	return true;
}
}


static inline void mod_objcg_state(struct obj_cgroup *objcg,
static inline void mod_objcg_state(struct obj_cgroup *objcg,
@@ -315,7 +325,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
	unsigned long off;
	unsigned long off;
	size_t i;
	size_t i;


	if (!objcg)
	if (!memcg_kmem_enabled() || !objcg)
		return;
		return;


	flags &= ~__GFP_ACCOUNT;
	flags &= ~__GFP_ACCOUNT;
@@ -400,11 +410,11 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
{
{
}
}


static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s,
static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
							   size_t objects,
					     struct obj_cgroup **objcgp,
							   gfp_t flags)
					     size_t objects, gfp_t flags)
{
{
	return NULL;
	return true;
}
}


static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
@@ -508,9 +518,8 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
	if (should_failslab(s, flags))
	if (should_failslab(s, flags))
		return NULL;
		return NULL;


	if (memcg_kmem_enabled() &&
	if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
		return NULL;
		*objcgp = memcg_slab_pre_alloc_hook(s, size, flags);


	return s;
	return s;
}
}
@@ -529,7 +538,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
					 s->flags, flags);
					 s->flags, flags);
	}
	}


	if (memcg_kmem_enabled())
	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
}
}