2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/irq_work.h>
12 #include <linux/percpu.h>
13 #include <linux/hardirq.h>
14 #include <linux/irqflags.h>
15 #include <linux/cpu.h>
16 #include <linux/notifier.h>
17 #include <asm/processor.h>
20 * An entry can be in one of four states:
22 * free NULL, 0 -> {claimed} : free to be used
23 * claimed NULL, 3 -> {pending} : claimed to be enqueued
24 * pending next, 3 -> {busy} : queued, pending callback
25 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
28 #define IRQ_WORK_PENDING 1UL
29 #define IRQ_WORK_BUSY 2UL
30 #define IRQ_WORK_FLAGS 3UL
32 static DEFINE_PER_CPU(struct llist_head, irq_work_list);
35 * Claim the entry so that no one else will poke at it.
37 static bool irq_work_claim(struct irq_work *work)
39 unsigned long flags, oflags, nflags;
42 * Start with our best wish as a premise but only trust any
43 * flag value after cmpxchg() result.
45 flags = work->flags & ~IRQ_WORK_PENDING;
47 nflags = flags | IRQ_WORK_FLAGS;
48 oflags = cmpxchg(&work->flags, flags, nflags);
51 if (oflags & IRQ_WORK_PENDING)
60 void __weak arch_irq_work_raise(void)
63 * Lame architectures will get the timer tick callback
68 * Queue the entry and raise the IPI if needed.
70 static void __irq_work_queue(struct irq_work *work)
76 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
77 /* The list was empty, raise self-interrupt to start processing. */
79 arch_irq_work_raise();
85 * Enqueue the irq_work @entry, returns true on success, failure when the
86 * @entry was already enqueued by someone else.
88 * Can be re-enqueued while the callback is still in progress.
90 bool irq_work_queue(struct irq_work *work)
92 if (!irq_work_claim(work)) {
94 * Already enqueued, can't do!
99 __irq_work_queue(work);
102 EXPORT_SYMBOL_GPL(irq_work_queue);
104 bool irq_work_needs_cpu(void)
106 struct llist_head *this_list;
108 this_list = &__get_cpu_var(irq_work_list);
109 if (llist_empty(this_list))
112 /* All work should have been flushed before going offline */
113 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
118 static void __irq_work_run(void)
120 struct irq_work *work;
121 struct llist_head *this_list;
122 struct llist_node *llnode;
124 this_list = &__get_cpu_var(irq_work_list);
125 if (llist_empty(this_list))
128 BUG_ON(!irqs_disabled());
130 llnode = llist_del_all(this_list);
131 while (llnode != NULL) {
132 work = llist_entry(llnode, struct irq_work, llnode);
134 llnode = llist_next(llnode);
137 * Clear the PENDING bit, after this point the @work
139 * Make it immediately visible so that other CPUs trying
140 * to claim that work don't rely on us to handle their data
141 * while we are in the middle of the func.
143 xchg(&work->flags, IRQ_WORK_BUSY);
146 * Clear the BUSY bit and return to the free state if
147 * no-one else claimed it meanwhile.
149 (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
154 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
155 * context with local IRQs disabled.
157 void irq_work_run(void)
162 EXPORT_SYMBOL_GPL(irq_work_run);
165 * Synchronize against the irq_work @entry, ensures the entry is not
168 void irq_work_sync(struct irq_work *work)
170 WARN_ON_ONCE(irqs_disabled());
172 while (work->flags & IRQ_WORK_BUSY)
175 EXPORT_SYMBOL_GPL(irq_work_sync);
177 #ifdef CONFIG_HOTPLUG_CPU
178 static int irq_work_cpu_notify(struct notifier_block *self,
179 unsigned long action, void *hcpu)
181 long cpu = (long)hcpu;
185 /* Called from stop_machine */
186 if (WARN_ON_ONCE(cpu != smp_processor_id()))
196 static struct notifier_block cpu_notify;
198 static __init int irq_work_init_cpu_notifier(void)
200 cpu_notify.notifier_call = irq_work_cpu_notify;
201 cpu_notify.priority = 0;
202 register_cpu_notifier(&cpu_notify);
205 device_initcall(irq_work_init_cpu_notifier);
207 #endif /* CONFIG_HOTPLUG_CPU */