Commit 1f04b07d authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Vlastimil Babka
Browse files

slub: Make PREEMPT_RT support less convoluted



The slub code already has a few helpers depending on PREEMPT_RT. Add a few
more and get rid of the CONFIG_PREEMPT_RT conditionals all over the place.

No functional change.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: linux-mm@kvack.org
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 5875e598
Loading
Loading
Loading
Loading
+24 −32
Original line number Diff line number Diff line
@@ -104,9 +104,11 @@
 *   except the stat counters. This is a percpu structure manipulated only by
 *   the local cpu, so the lock protects against being preempted or interrupted
 *   by an irq. Fast path operations rely on lockless operations instead.
 *   On PREEMPT_RT, the local lock does not actually disable irqs (and thus
 *   prevent the lockless operations), so fastpath operations also need to take
 *   the lock and are no longer lockless.
 *
 *   On PREEMPT_RT, the local lock neither disables interrupts nor preemption
 *   which means the lockless fastpath cannot be used as it might interfere with
 *   an in-progress slow path operations. In this case the local lock is always
 *   taken but it still utilizes the freelist for the common operations.
 *
 *   lockless fastpaths
 *
@@ -169,6 +171,7 @@
#ifndef CONFIG_PREEMPT_RT
#define slub_get_cpu_ptr(var)		get_cpu_ptr(var)
#define slub_put_cpu_ptr(var)		put_cpu_ptr(var)
#define USE_LOCKLESS_FAST_PATH()	(true)
#else
#define slub_get_cpu_ptr(var)		\
({					\
@@ -180,6 +183,7 @@ do { \
	(void)(var);			\
	migrate_enable();		\
} while (0)
#define USE_LOCKLESS_FAST_PATH()	(false)
#endif

#ifdef CONFIG_SLUB_DEBUG
@@ -474,7 +478,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
		void *freelist_new, unsigned long counters_new,
		const char *n)
{
	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
	if (USE_LOCKLESS_FAST_PATH())
		lockdep_assert_irqs_disabled();
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
@@ -3288,14 +3292,8 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l

	object = c->freelist;
	slab = c->slab;
	/*
	 * We cannot use the lockless fastpath on PREEMPT_RT because if a
	 * slowpath has taken the local_lock_irqsave(), it is not protected
	 * against a fast path operation in an irq handler. So we need to take
	 * the slow path which uses local_lock. It is still relatively fast if
	 * there is a suitable cpu freelist.
	 */
	if (IS_ENABLED(CONFIG_PREEMPT_RT) ||

	if (!USE_LOCKLESS_FAST_PATH() ||
	    unlikely(!object || !slab || !node_match(slab, node))) {
		object = __slab_alloc(s, gfpflags, node, addr, c);
	} else {
@@ -3555,6 +3553,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
	void *tail_obj = tail ? : head;
	struct kmem_cache_cpu *c;
	unsigned long tid;
	void **freelist;

redo:
	/*
@@ -3569,9 +3568,13 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
	/* Same with comment on barrier() in slab_alloc_node() */
	barrier();

	if (likely(slab == c->slab)) {
#ifndef CONFIG_PREEMPT_RT
		void **freelist = READ_ONCE(c->freelist);
	if (unlikely(slab != c->slab)) {
		__slab_free(s, slab, head, tail_obj, cnt, addr);
		return;
	}

	if (USE_LOCKLESS_FAST_PATH()) {
		freelist = READ_ONCE(c->freelist);

		set_freepointer(s, tail_obj, freelist);

@@ -3583,16 +3586,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
			note_cmpxchg_failure("slab_free", s, tid);
			goto redo;
		}
#else /* CONFIG_PREEMPT_RT */
		/*
		 * We cannot use the lockless fastpath on PREEMPT_RT because if
		 * a slowpath has taken the local_lock_irqsave(), it is not
		 * protected against a fast path operation in an irq handler. So
		 * we need to take the local_lock. We shouldn't simply defer to
		 * __slab_free() as that wouldn't use the cpu freelist at all.
		 */
		void **freelist;

	} else {
		/* Update the free list under the local lock */
		local_lock(&s->cpu_slab->lock);
		c = this_cpu_ptr(s->cpu_slab);
		if (unlikely(slab != c->slab)) {
@@ -3607,11 +3602,8 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
		c->tid = next_tid(tid);

		local_unlock(&s->cpu_slab->lock);
#endif
	}
	stat(s, FREE_FASTPATH);
	} else
		__slab_free(s, slab, head, tail_obj, cnt, addr);

}

static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,