Commit 7a9f50a0 authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

irq_work: Cleanup



Get rid of the __call_single_node union and clean up the API a little
to avoid external code relying on the structure layout as much.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarFrederic Weisbecker <frederic@kernel.org>
parent 23e6082a
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -197,7 +197,7 @@ __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))

	llist_for_each_entry_safe(cb, cn,
				  llist_del_all(&rq->execute_cb),
				  work.llnode)
				  work.node.llist)
		fn(&cb->work);
}

@@ -460,7 +460,7 @@ __await_execution(struct i915_request *rq,
	 * callback first, then checking the ACTIVE bit, we serialise with
	 * the completed/retired request.
	 */
	if (llist_add(&cb->work.llnode, &signal->execute_cb)) {
	if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
		if (i915_request_is_active(signal) ||
		    __request_in_flight(signal))
			__notify_execute_cb_imm(signal);
+21 −12
Original line number Diff line number Diff line
@@ -14,28 +14,37 @@
 */

struct irq_work {
	union {
	struct __call_single_node node;
		struct {
			struct llist_node llnode;
			atomic_t flags;
		};
	};
	void (*func)(struct irq_work *);
};

#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){	\
	.node = { .u_flags = (_flags), },			\
	.func = (_func),					\
}

#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY)
#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ)

#define DEFINE_IRQ_WORK(name, _f)				\
	struct irq_work name = IRQ_WORK_INIT(_f)

static inline
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
	atomic_set(&work->flags, 0);
	work->func = func;
	*work = IRQ_WORK_INIT(func);
}

#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = {	\
		.flags = ATOMIC_INIT(0),			\
		.func  = (_f)					\
static inline bool irq_work_is_pending(struct irq_work *work)
{
	return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING;
}

static inline bool irq_work_is_busy(struct irq_work *work)
{
	return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
}

bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
+2 −2
Original line number Diff line number Diff line
@@ -109,12 +109,12 @@ do { \

# define lockdep_irq_work_enter(__work)					\
	  do {								\
		  if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
		  if (!(atomic_read(&__work->node.a_flags) & IRQ_WORK_HARD_IRQ))\
			current->irq_config = 1;			\
	  } while (0)
# define lockdep_irq_work_exit(__work)					\
	  do {								\
		  if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
		  if (!(atomic_read(&__work->node.a_flags) & IRQ_WORK_HARD_IRQ))\
			current->irq_config = 0;			\
	  } while (0)

+1 −1
Original line number Diff line number Diff line
@@ -298,7 +298,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
	if (irqs_disabled()) {
		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
			work = this_cpu_ptr(&up_read_work);
			if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) {
			if (irq_work_is_busy(&work->irq_work)) {
				/* cannot queue more up_read, fallback */
				irq_work_busy = true;
			}
+9 −9
Original line number Diff line number Diff line
@@ -31,7 +31,7 @@ static bool irq_work_claim(struct irq_work *work)
{
	int oflags;

	oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags);
	oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
	/*
	 * If the work is already pending, no need to raise the IPI.
	 * The pairing atomic_fetch_andnot() in irq_work_run() makes sure
@@ -53,12 +53,12 @@ void __weak arch_irq_work_raise(void)
static void __irq_work_queue_local(struct irq_work *work)
{
	/* If the work is "lazy", handle it from next tick if any */
	if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
	if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) {
		if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) &&
		    tick_nohz_tick_stopped())
			arch_irq_work_raise();
	} else {
		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
		if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list)))
			arch_irq_work_raise();
	}
}
@@ -102,7 +102,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
	if (cpu != smp_processor_id()) {
		/* Arch remote IPI send/receive backend aren't NMI safe */
		WARN_ON_ONCE(in_nmi());
		__smp_call_single_queue(cpu, &work->llnode);
		__smp_call_single_queue(cpu, &work->node.llist);
	} else {
		__irq_work_queue_local(work);
	}
@@ -142,7 +142,7 @@ void irq_work_single(void *arg)
	 * to claim that work don't rely on us to handle their data
	 * while we are in the middle of the func.
	 */
	flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
	flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->node.a_flags);

	lockdep_irq_work_enter(work);
	work->func(work);
@@ -152,7 +152,7 @@ void irq_work_single(void *arg)
	 * no-one else claimed it meanwhile.
	 */
	flags &= ~IRQ_WORK_PENDING;
	(void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
	(void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
}

static void irq_work_run_list(struct llist_head *list)
@@ -166,7 +166,7 @@ static void irq_work_run_list(struct llist_head *list)
		return;

	llnode = llist_del_all(list);
	llist_for_each_entry_safe(work, tmp, llnode, llnode)
	llist_for_each_entry_safe(work, tmp, llnode, node.llist)
		irq_work_single(work);
}

@@ -198,7 +198,7 @@ void irq_work_sync(struct irq_work *work)
{
	lockdep_assert_irqs_enabled();

	while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
	while (irq_work_is_busy(work))
		cpu_relax();
}
EXPORT_SYMBOL_GPL(irq_work_sync);
Loading