Commit fa417ab7 authored by Vlastimil Babka's avatar Vlastimil Babka
Browse files

mm, slub: move disabling irqs closer to get_partial() in ___slab_alloc()



Continue reducing the irq disabled scope. Check for per-cpu partial slabs with
first with irqs enabled and then recheck with irqs disabled before grabbing
the slab page. Mostly preparatory for the following patches.

Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 0b303fb4
Loading
Loading
Loading
Loading
+25 −9
Original line number Diff line number Diff line
@@ -2706,11 +2706,6 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
		if (unlikely(node != NUMA_NO_NODE &&
			     !node_isset(node, slab_nodes)))
			node = NUMA_NO_NODE;
		local_irq_save(flags);
		if (unlikely(c->page)) {
			local_irq_restore(flags);
			goto reread_page;
		}
		goto new_slab;
	}
redo:
@@ -2751,6 +2746,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,

	if (!freelist) {
		c->page = NULL;
		local_irq_restore(flags);
		stat(s, DEACTIVATE_BYPASS);
		goto new_slab;
	}
@@ -2780,12 +2776,19 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
		goto reread_page;
	}
	deactivate_slab(s, page, c->freelist, c);
	local_irq_restore(flags);

new_slab:

	lockdep_assert_irqs_disabled();

	if (slub_percpu_partial(c)) {
		local_irq_save(flags);
		if (unlikely(c->page)) {
			local_irq_restore(flags);
			goto reread_page;
		}
		if (unlikely(!slub_percpu_partial(c)))
			goto new_objects; /* stolen by an IRQ handler */

		page = c->page = slub_percpu_partial(c);
		slub_set_percpu_partial(c, page);
		local_irq_restore(flags);
@@ -2793,6 +2796,16 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
		goto redo;
	}

	local_irq_save(flags);
	if (unlikely(c->page)) {
		local_irq_restore(flags);
		goto reread_page;
	}

new_objects:

	lockdep_assert_irqs_disabled();

	freelist = get_partial(s, gfpflags, node, &page);
	if (freelist) {
		c->page = page;
@@ -2825,16 +2838,19 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
check_new_page:

	if (kmem_cache_debug(s)) {
		if (!alloc_debug_processing(s, page, freelist, addr))
		if (!alloc_debug_processing(s, page, freelist, addr)) {
			/* Slab failed checks. Next slab needed */
			c->page = NULL;
			local_irq_restore(flags);
			goto new_slab;
		else
		} else {
			/*
			 * For debug case, we don't load freelist so that all
			 * allocations go through alloc_debug_processing()
			 */
			goto return_single;
		}
	}

	if (unlikely(!pfmemalloc_match(page, gfpflags)))
		/*