Commit 5875e598 authored by Vlastimil Babka's avatar Vlastimil Babka
Browse files

mm/slub: simplify __cmpxchg_double_slab() and slab_[un]lock()



The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
(through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
preemption and that's sufficient on PREEMPT_RT where no allocation/free
operation is performed in hardirq context and so can't interrupt the
current operation.

That means we no longer need the slab_[un]lock() wrappers, so delete
them and rename the current __slab_[un]lock() to slab_[un]lock().

Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Reviewed-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
parent 4ef3f5a3
Loading
Loading
Loading
Loading
+12 −27
Original line number Diff line number Diff line
@@ -446,7 +446,7 @@ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
/*
 * Per slab locking using the pagelock
 */
static __always_inline void __slab_lock(struct slab *slab)
static __always_inline void slab_lock(struct slab *slab)
{
	struct page *page = slab_page(slab);

@@ -454,7 +454,7 @@ static __always_inline void __slab_lock(struct slab *slab)
	bit_spin_lock(PG_locked, &page->flags);
}

static __always_inline void __slab_unlock(struct slab *slab)
static __always_inline void slab_unlock(struct slab *slab)
{
	struct page *page = slab_page(slab);

@@ -462,24 +462,12 @@ static __always_inline void __slab_unlock(struct slab *slab)
	__bit_spin_unlock(PG_locked, &page->flags);
}

static __always_inline void slab_lock(struct slab *slab, unsigned long *flags)
{
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		local_irq_save(*flags);
	__slab_lock(slab);
}

static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags)
{
	__slab_unlock(slab);
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		local_irq_restore(*flags);
}

/*
 * Interrupts must be disabled (for the fallback code to work right), typically
 * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
 * so we disable interrupts as part of slab_[un]lock().
 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
 * part of bit_spin_lock(), is sufficient because the policy is not to allow any
 * allocation/ free operation in hardirq context. Therefore nothing can
 * interrupt the operation.
 */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
		void *freelist_old, unsigned long counters_old,
@@ -498,18 +486,15 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
	} else
#endif
	{
		/* init to 0 to prevent spurious warnings */
		unsigned long flags = 0;

		slab_lock(slab, &flags);
		slab_lock(slab);
		if (slab->freelist == freelist_old &&
					slab->counters == counters_old) {
			slab->freelist = freelist_new;
			slab->counters = counters_new;
			slab_unlock(slab, &flags);
			slab_unlock(slab);
			return true;
		}
		slab_unlock(slab, &flags);
		slab_unlock(slab);
	}

	cpu_relax();
@@ -540,16 +525,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
		unsigned long flags;

		local_irq_save(flags);
		__slab_lock(slab);
		slab_lock(slab);
		if (slab->freelist == freelist_old &&
					slab->counters == counters_old) {
			slab->freelist = freelist_new;
			slab->counters = counters_new;
			__slab_unlock(slab);
			slab_unlock(slab);
			local_irq_restore(flags);
			return true;
		}
		__slab_unlock(slab);
		slab_unlock(slab);
		local_irq_restore(flags);
	}