pinctrl: at91-pio4: add missing of_node_put
[linux-2.6-block.git] / kernel / irq_work.c
CommitLineData
e360adbe 1/*
90eec103 2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
e360adbe
PZ
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
83e3fa6f 8#include <linux/bug.h>
e360adbe 9#include <linux/kernel.h>
9984de1a 10#include <linux/export.h>
e360adbe 11#include <linux/irq_work.h>
967d1f90 12#include <linux/percpu.h>
e360adbe 13#include <linux/hardirq.h>
ef1f0982 14#include <linux/irqflags.h>
bc6679ae
FW
15#include <linux/sched.h>
16#include <linux/tick.h>
c0e980a4
SR
17#include <linux/cpu.h>
18#include <linux/notifier.h>
47885016 19#include <linux/smp.h>
967d1f90 20#include <asm/processor.h>
e360adbe 21
e360adbe 22
b93e0b8f
FW
23static DEFINE_PER_CPU(struct llist_head, raised_list);
24static DEFINE_PER_CPU(struct llist_head, lazy_list);
e360adbe
PZ
25
26/*
27 * Claim the entry so that no one else will poke at it.
28 */
38aaf809 29static bool irq_work_claim(struct irq_work *work)
e360adbe 30{
e0bbe2d8 31 unsigned long flags, oflags, nflags;
e360adbe 32
e0bbe2d8
FW
33 /*
34 * Start with our best wish as a premise but only trust any
35 * flag value after cmpxchg() result.
36 */
37 flags = work->flags & ~IRQ_WORK_PENDING;
38aaf809 38 for (;;) {
6baf9e67 39 nflags = flags | IRQ_WORK_CLAIMED;
e0bbe2d8
FW
40 oflags = cmpxchg(&work->flags, flags, nflags);
41 if (oflags == flags)
38aaf809 42 break;
e0bbe2d8
FW
43 if (oflags & IRQ_WORK_PENDING)
44 return false;
45 flags = oflags;
38aaf809
HY
46 cpu_relax();
47 }
e360adbe
PZ
48
49 return true;
50}
51
e360adbe
PZ
52void __weak arch_irq_work_raise(void)
53{
54 /*
55 * Lame architectures will get the timer tick callback
56 */
57}
58
59/*
47885016 60 * Enqueue the irq_work @work on @cpu unless it's already pending
c02cf5f8 61 * somewhere.
62 *
63 * Can be re-enqueued while the callback is still in progress.
e360adbe 64 */
47885016
FW
65bool irq_work_queue_on(struct irq_work *work, int cpu)
66{
67 /* All work should have been flushed before going offline */
68 WARN_ON_ONCE(cpu_is_offline(cpu));
69
6733bab7
PM
70#ifdef CONFIG_SMP
71
47885016
FW
72 /* Arch remote IPI send/receive backend aren't NMI safe */
73 WARN_ON_ONCE(in_nmi());
74
75 /* Only queue if not already pending */
76 if (!irq_work_claim(work))
77 return false;
78
79 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
80 arch_send_call_function_single_ipi(cpu);
81
6733bab7
PM
82#else /* #ifdef CONFIG_SMP */
83 irq_work_queue(work);
84#endif /* #else #ifdef CONFIG_SMP */
85
47885016
FW
86 return true;
87}
47885016
FW
88
89/* Enqueue the irq work @work on the current CPU */
cd578abb 90bool irq_work_queue(struct irq_work *work)
e360adbe 91{
c02cf5f8 92 /* Only queue if not already pending */
93 if (!irq_work_claim(work))
cd578abb 94 return false;
c02cf5f8 95
96 /* Queue the entry and raise the IPI if needed. */
20b87691 97 preempt_disable();
e360adbe 98
b93e0b8f
FW
99 /* If the work is "lazy", handle it from next tick if any */
100 if (work->flags & IRQ_WORK_LAZY) {
22127e93 101 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
b93e0b8f
FW
102 tick_nohz_tick_stopped())
103 arch_irq_work_raise();
104 } else {
22127e93 105 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
bc6679ae
FW
106 arch_irq_work_raise();
107 }
e360adbe 108
20b87691 109 preempt_enable();
cd578abb
PZ
110
111 return true;
e360adbe 112}
e360adbe
PZ
113EXPORT_SYMBOL_GPL(irq_work_queue);
114
00b42959
FW
115bool irq_work_needs_cpu(void)
116{
b93e0b8f 117 struct llist_head *raised, *lazy;
00b42959 118
22127e93
CL
119 raised = this_cpu_ptr(&raised_list);
120 lazy = this_cpu_ptr(&lazy_list);
76a33061
FW
121
122 if (llist_empty(raised) || arch_irq_work_has_interrupt())
123 if (llist_empty(lazy))
124 return false;
00b42959 125
8aa2acce
SR
126 /* All work should have been flushed before going offline */
127 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
128
00b42959
FW
129 return true;
130}
131
b93e0b8f 132static void irq_work_run_list(struct llist_head *list)
e360adbe 133{
d00a08cf 134 struct irq_work *work, *tmp;
38aaf809 135 struct llist_node *llnode;
d00a08cf 136 unsigned long flags;
e360adbe 137
b93e0b8f 138 BUG_ON(!irqs_disabled());
bc6679ae 139
b93e0b8f 140 if (llist_empty(list))
e360adbe
PZ
141 return;
142
b93e0b8f 143 llnode = llist_del_all(list);
d00a08cf 144 llist_for_each_entry_safe(work, tmp, llnode, llnode) {
e360adbe 145 /*
38aaf809 146 * Clear the PENDING bit, after this point the @work
e360adbe 147 * can be re-used.
c8446b75
FW
148 * Make it immediately visible so that other CPUs trying
149 * to claim that work don't rely on us to handle their data
150 * while we are in the middle of the func.
e360adbe 151 */
bc6679ae
FW
152 flags = work->flags & ~IRQ_WORK_PENDING;
153 xchg(&work->flags, flags);
154
38aaf809 155 work->func(work);
e360adbe
PZ
156 /*
157 * Clear the BUSY bit and return to the free state if
158 * no-one else claimed it meanwhile.
159 */
bc6679ae 160 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
e360adbe
PZ
161 }
162}
c0e980a4
SR
163
164/*
a77353e5
PZ
165 * hotplug calls this through:
166 * hotplug_cfd() -> flush_smp_call_function_queue()
c0e980a4
SR
167 */
168void irq_work_run(void)
169{
22127e93
CL
170 irq_work_run_list(this_cpu_ptr(&raised_list));
171 irq_work_run_list(this_cpu_ptr(&lazy_list));
c0e980a4 172}
e360adbe
PZ
173EXPORT_SYMBOL_GPL(irq_work_run);
174
76a33061
FW
175void irq_work_tick(void)
176{
56e4dea8 177 struct llist_head *raised = this_cpu_ptr(&raised_list);
76a33061
FW
178
179 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
180 irq_work_run_list(raised);
56e4dea8 181 irq_work_run_list(this_cpu_ptr(&lazy_list));
76a33061
FW
182}
183
e360adbe
PZ
184/*
185 * Synchronize against the irq_work @entry, ensures the entry is not
186 * currently in use.
187 */
38aaf809 188void irq_work_sync(struct irq_work *work)
e360adbe 189{
3c7169a3 190 lockdep_assert_irqs_enabled();
e360adbe 191
38aaf809 192 while (work->flags & IRQ_WORK_BUSY)
e360adbe
PZ
193 cpu_relax();
194}
195EXPORT_SYMBOL_GPL(irq_work_sync);