Commit c31a910c authored by Hyeonggon Yoo's avatar Hyeonggon Yoo Committed by Vlastimil Babka
Browse files

mm/slab: move NUMA-related code to __do_cache_alloc()



To implement slab_alloc_node() independent of NUMA configuration,
move NUMA fallback/alternate allocation code into __do_cache_alloc().

One functional change here is not to check availability of node
when allocating from local node.

Signed-off-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 1c23f9e6
Loading
Loading
Loading
Loading
+31 −37
Original line number Diff line number Diff line
@@ -3180,13 +3180,14 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
	return obj ? obj : fallback_alloc(cachep, flags);
}

static void *__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid);

static __always_inline void *
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size,
		   unsigned long caller)
{
	unsigned long save_flags;
	void *ptr;
	int slab_node = numa_mem_id();
	struct obj_cgroup *objcg = NULL;
	bool init = false;

@@ -3200,30 +3201,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
		goto out_hooks;

	local_irq_save(save_flags);

	if (nodeid == NUMA_NO_NODE)
		nodeid = slab_node;

	if (unlikely(!get_node(cachep, nodeid))) {
		/* Node not bootstrapped yet */
		ptr = fallback_alloc(cachep, flags);
		goto out;
	}

	if (nodeid == slab_node) {
		/*
		 * Use the locally cached objects if possible.
		 * However ____cache_alloc does not allow fallback
		 * to other nodes. It may fail while we still have
		 * objects on other nodes available.
		 */
		ptr = ____cache_alloc(cachep, flags);
		if (ptr)
			goto out;
	}
	/* ___cache_alloc_node can fall back to other nodes */
	ptr = ____cache_alloc_node(cachep, flags, nodeid);
out:
	ptr = __do_cache_alloc(cachep, flags, nodeid);
	local_irq_restore(save_flags);
	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
	init = slab_want_init_on_alloc(flags, cachep);
@@ -3234,31 +3212,46 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
}

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
	void *objp;
	void *objp = NULL;
	int slab_node = numa_mem_id();

	if (nodeid == NUMA_NO_NODE) {
		if (current->mempolicy || cpuset_do_slab_mem_spread()) {
		objp = alternate_node_alloc(cache, flags);
			objp = alternate_node_alloc(cachep, flags);
			if (objp)
				goto out;
		}
	objp = ____cache_alloc(cache, flags);
		/*
		 * Use the locally cached objects if possible.
		 * However ____cache_alloc does not allow fallback
		 * to other nodes. It may fail while we still have
		 * objects on other nodes available.
		 */
		objp = ____cache_alloc(cachep, flags);
		nodeid = slab_node;
	} else if (nodeid == slab_node) {
		objp = ____cache_alloc(cachep, flags);
	} else if (!get_node(cachep, nodeid)) {
		/* Node not bootstrapped yet */
		objp = fallback_alloc(cachep, flags);
		goto out;
	}

	/*
	 * We may just have run out of memory on the local node.
	 * ____cache_alloc_node() knows how to locate memory on other nodes
	 */
	if (!objp)
		objp = ____cache_alloc_node(cache, flags, numa_mem_id());

		objp = ____cache_alloc_node(cachep, flags, nodeid);
out:
	return objp;
}
#else

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unused)
{
	return ____cache_alloc(cachep, flags);
}
@@ -3284,7 +3277,7 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
		goto out;

	local_irq_save(save_flags);
	objp = __do_cache_alloc(cachep, flags);
	objp = __do_cache_alloc(cachep, flags, NUMA_NO_NODE);
	local_irq_restore(save_flags);
	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
	prefetchw(objp);
@@ -3521,7 +3514,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,

	local_irq_disable();
	for (i = 0; i < size; i++) {
		void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
		void *objp = kfence_alloc(s, s->object_size, flags) ?:
			     __do_cache_alloc(s, flags, NUMA_NO_NODE);

		if (unlikely(!objp))
			goto error;