Commit 94fa31e9 authored by Vlastimil Babka's avatar Vlastimil Babka
Browse files

Merge branch 'slab/for-5.18/cleanups' into slab/for-linus

Non-trivial SLUB code cleanups, notably refactoring of deactivate_slab().
parents acbfab16 6d3a16d0
Loading
Loading
Loading
Loading
+42 −63
Original line number Diff line number Diff line
@@ -2348,10 +2348,10 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
			    void *freelist)
{
	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE, M_FULL_NOLIST };
	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
	int lock = 0, free_delta = 0;
	enum slab_modes l = M_NONE, m = M_NONE;
	int free_delta = 0;
	enum slab_modes mode = M_NONE;
	void *nextfree, *freelist_iter, *freelist_tail;
	int tail = DEACTIVATE_TO_HEAD;
	unsigned long flags = 0;
@@ -2393,14 +2393,10 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
	 * Ensure that the slab is unfrozen while the list presence
	 * reflects the actual number of objects during unfreeze.
	 *
	 * We setup the list membership and then perform a cmpxchg
	 * with the count. If there is a mismatch then the slab
	 * is not unfrozen but the slab is on the wrong list.
	 *
	 * Then we restart the process which may have to remove
	 * the slab from the list that we just put it on again
	 * because the number of objects in the slab may have
	 * changed.
	 * We first perform cmpxchg holding lock and insert to list
	 * when it succeed. If there is mismatch then the slab is not
	 * unfrozen and number of objects in the slab may have changed.
	 * Then release lock and retry cmpxchg again.
	 */
redo:

@@ -2419,61 +2415,52 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,

	new.frozen = 0;

	if (!new.inuse && n->nr_partial >= s->min_partial)
		m = M_FREE;
	else if (new.freelist) {
		m = M_PARTIAL;
		if (!lock) {
			lock = 1;
	if (!new.inuse && n->nr_partial >= s->min_partial) {
		mode = M_FREE;
	} else if (new.freelist) {
		mode = M_PARTIAL;
		/*
		 * Taking the spinlock removes the possibility that
		 * acquire_slab() will see a slab that is frozen
		 */
		spin_lock_irqsave(&n->list_lock, flags);
		}
	} else {
		m = M_FULL;
		if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) {
			lock = 1;
	} else if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) {
		mode = M_FULL;
		/*
		 * This also ensures that the scanning of full
		 * slabs from diagnostic functions will not see
		 * any frozen slabs.
		 */
		spin_lock_irqsave(&n->list_lock, flags);
		}
	} else {
		mode = M_FULL_NOLIST;
	}

	if (l != m) {
		if (l == M_PARTIAL)
			remove_partial(n, slab);
		else if (l == M_FULL)
			remove_full(s, n, slab);

		if (m == M_PARTIAL)
			add_partial(n, slab, tail);
		else if (m == M_FULL)
			add_full(s, n, slab);
	}

	l = m;
	if (!cmpxchg_double_slab(s, slab,
				old.freelist, old.counters,
				new.freelist, new.counters,
				"unfreezing slab"))
				"unfreezing slab")) {
		if (mode == M_PARTIAL || mode == M_FULL)
			spin_unlock_irqrestore(&n->list_lock, flags);
		goto redo;
	}

	if (lock)
		spin_unlock_irqrestore(&n->list_lock, flags);

	if (m == M_PARTIAL)
	if (mode == M_PARTIAL) {
		add_partial(n, slab, tail);
		spin_unlock_irqrestore(&n->list_lock, flags);
		stat(s, tail);
	else if (m == M_FULL)
		stat(s, DEACTIVATE_FULL);
	else if (m == M_FREE) {
	} else if (mode == M_FREE) {
		stat(s, DEACTIVATE_EMPTY);
		discard_slab(s, slab);
		stat(s, FREE_SLAB);
	} else if (mode == M_FULL) {
		add_full(s, n, slab);
		spin_unlock_irqrestore(&n->list_lock, flags);
		stat(s, DEACTIVATE_FULL);
	} else if (mode == M_FULL_NOLIST) {
		stat(s, DEACTIVATE_FULL);
	}
}

@@ -4000,15 +3987,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
	return 1;
}

static void set_min_partial(struct kmem_cache *s, unsigned long min)
{
	if (min < MIN_PARTIAL)
		min = MIN_PARTIAL;
	else if (min > MAX_PARTIAL)
		min = MAX_PARTIAL;
	s->min_partial = min;
}

static void set_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
@@ -4212,7 +4190,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
	 * The larger the object size is, the more slabs we want on the partial
	 * list to avoid pounding the page allocator excessively.
	 */
	set_min_partial(s, ilog2(s->size) / 2);
	s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
	s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);

	set_cpu_partial(s);

@@ -5391,7 +5370,7 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
	if (err)
		return err;

	set_min_partial(s, min);
	s->min_partial = min;
	return length;
}
SLAB_ATTR(min_partial);