Merge git://git.kernel.org/pub/scm/linux/kernel/git/pkl/squashfs-linus
[linux-2.6-block.git] / include / linux / workqueue.h
CommitLineData
1da177e4
LT
1/*
2 * workqueue.h --- work queue handling for Linux.
3 */
4
5#ifndef _LINUX_WORKQUEUE_H
6#define _LINUX_WORKQUEUE_H
7
8#include <linux/timer.h>
9#include <linux/linkage.h>
10#include <linux/bitops.h>
4e6045f1 11#include <linux/lockdep.h>
7a22ad75 12#include <linux/threads.h>
a08727ba 13#include <asm/atomic.h>
1da177e4
LT
14
15struct workqueue_struct;
16
65f27f38
DH
17struct work_struct;
18typedef void (*work_func_t)(struct work_struct *work);
6bb49e59 19
a08727ba
LT
20/*
21 * The first word is the work queue pointer and the flags rolled into
22 * one
23 */
24#define work_data_bits(work) ((unsigned long *)(&(work)->data))
25
22df02bb
TH
26enum {
27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
e120153d
TH
28 WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */
29 WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */
22df02bb 30#ifdef CONFIG_DEBUG_OBJECTS_WORK
e120153d
TH
31 WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */
32 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
0f900049 33#else
e120153d 34 WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
22df02bb
TH
35#endif
36
73f53c4a
TH
37 WORK_STRUCT_COLOR_BITS = 4,
38
22df02bb 39 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
e120153d 40 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
affee4b2 41 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
22df02bb
TH
42#ifdef CONFIG_DEBUG_OBJECTS_WORK
43 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
44#else
45 WORK_STRUCT_STATIC = 0,
46#endif
47
73f53c4a
TH
48 /*
49 * The last color is no color used for works which don't
50 * participate in workqueue flushing.
51 */
52 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
53 WORK_NO_COLOR = WORK_NR_COLORS,
54
bdbc5dd7 55 /* special cpu IDs */
f3421797
TH
56 WORK_CPU_UNBOUND = NR_CPUS,
57 WORK_CPU_NONE = NR_CPUS + 1,
bdbc5dd7
TH
58 WORK_CPU_LAST = WORK_CPU_NONE,
59
73f53c4a 60 /*
e120153d
TH
61 * Reserve 7 bits off of cwq pointer w/ debugobjects turned
62 * off. This makes cwqs aligned to 128 bytes which isn't too
73f53c4a
TH
63 * excessive while allowing 15 workqueue flush colors.
64 */
65 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
66 WORK_STRUCT_COLOR_BITS,
67
0f900049 68 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
22df02bb 69 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
bdbc5dd7 70 WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
dcd989cb
TH
71
72 /* bit mask for work_busy() return values */
73 WORK_BUSY_PENDING = 1 << 0,
74 WORK_BUSY_RUNNING = 1 << 1,
22df02bb
TH
75};
76
1da177e4 77struct work_struct {
a08727ba 78 atomic_long_t data;
1da177e4 79 struct list_head entry;
6bb49e59 80 work_func_t func;
4e6045f1
JB
81#ifdef CONFIG_LOCKDEP
82 struct lockdep_map lockdep_map;
83#endif
52bad64d
DH
84};
85
7a22ad75
TH
86#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
87#define WORK_DATA_STATIC_INIT() \
88 ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
a08727ba 89
52bad64d
DH
90struct delayed_work {
91 struct work_struct work;
1da177e4
LT
92 struct timer_list timer;
93};
94
bf6aede7
JD
95static inline struct delayed_work *to_delayed_work(struct work_struct *work)
96{
97 return container_of(work, struct delayed_work, work);
98}
99
1fa44eca
JB
100struct execute_work {
101 struct work_struct work;
102};
103
4e6045f1
JB
104#ifdef CONFIG_LOCKDEP
105/*
106 * NB: because we have to copy the lockdep_map, setting _key
107 * here is required, otherwise it could get initialised to the
108 * copy of the lockdep_map!
109 */
110#define __WORK_INIT_LOCKDEP_MAP(n, k) \
111 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
112#else
113#define __WORK_INIT_LOCKDEP_MAP(n, k)
114#endif
115
65f27f38 116#define __WORK_INITIALIZER(n, f) { \
dc186ad7 117 .data = WORK_DATA_STATIC_INIT(), \
23b2e599 118 .entry = { &(n).entry, &(n).entry }, \
65f27f38 119 .func = (f), \
4e6045f1 120 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
65f27f38
DH
121 }
122
123#define __DELAYED_WORK_INITIALIZER(n, f) { \
124 .work = __WORK_INITIALIZER((n).work, (f)), \
125 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
126 }
127
65f27f38
DH
128#define DECLARE_WORK(n, f) \
129 struct work_struct n = __WORK_INITIALIZER(n, f)
130
65f27f38
DH
131#define DECLARE_DELAYED_WORK(n, f) \
132 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
133
1da177e4 134/*
65f27f38 135 * initialize a work item's function pointer
1da177e4 136 */
65f27f38 137#define PREPARE_WORK(_work, _func) \
1da177e4 138 do { \
52bad64d 139 (_work)->func = (_func); \
1da177e4
LT
140 } while (0)
141
65f27f38
DH
142#define PREPARE_DELAYED_WORK(_work, _func) \
143 PREPARE_WORK(&(_work)->work, (_func))
52bad64d 144
dc186ad7
TG
145#ifdef CONFIG_DEBUG_OBJECTS_WORK
146extern void __init_work(struct work_struct *work, int onstack);
147extern void destroy_work_on_stack(struct work_struct *work);
4690c4ab
TH
148static inline unsigned int work_static(struct work_struct *work)
149{
22df02bb 150 return *work_data_bits(work) & WORK_STRUCT_STATIC;
4690c4ab 151}
dc186ad7
TG
152#else
153static inline void __init_work(struct work_struct *work, int onstack) { }
154static inline void destroy_work_on_stack(struct work_struct *work) { }
4690c4ab 155static inline unsigned int work_static(struct work_struct *work) { return 0; }
dc186ad7
TG
156#endif
157
1da177e4 158/*
52bad64d 159 * initialize all of a work item in one go
a08727ba 160 *
b9049df5 161 * NOTE! No point in using "atomic_long_set()": using a direct
a08727ba
LT
162 * assignment of the work data initializer allows the compiler
163 * to generate better code.
1da177e4 164 */
4e6045f1 165#ifdef CONFIG_LOCKDEP
dc186ad7 166#define __INIT_WORK(_work, _func, _onstack) \
65f27f38 167 do { \
4e6045f1
JB
168 static struct lock_class_key __key; \
169 \
dc186ad7 170 __init_work((_work), _onstack); \
23b2e599 171 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
4e6045f1 172 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
65f27f38
DH
173 INIT_LIST_HEAD(&(_work)->entry); \
174 PREPARE_WORK((_work), (_func)); \
175 } while (0)
4e6045f1 176#else
dc186ad7 177#define __INIT_WORK(_work, _func, _onstack) \
4e6045f1 178 do { \
dc186ad7 179 __init_work((_work), _onstack); \
4e6045f1
JB
180 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
181 INIT_LIST_HEAD(&(_work)->entry); \
182 PREPARE_WORK((_work), (_func)); \
183 } while (0)
184#endif
65f27f38 185
dc186ad7
TG
186#define INIT_WORK(_work, _func) \
187 do { \
188 __INIT_WORK((_work), (_func), 0); \
189 } while (0)
190
191#define INIT_WORK_ON_STACK(_work, _func) \
192 do { \
193 __INIT_WORK((_work), (_func), 1); \
194 } while (0)
195
65f27f38
DH
196#define INIT_DELAYED_WORK(_work, _func) \
197 do { \
198 INIT_WORK(&(_work)->work, (_func)); \
199 init_timer(&(_work)->timer); \
52bad64d
DH
200 } while (0)
201
6d612b0f
PZ
202#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \
203 do { \
dc186ad7 204 INIT_WORK_ON_STACK(&(_work)->work, (_func)); \
6d612b0f
PZ
205 init_timer_on_stack(&(_work)->timer); \
206 } while (0)
207
dc186ad7 208#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
28287033
VP
209 do { \
210 INIT_WORK(&(_work)->work, (_func)); \
211 init_timer_deferrable(&(_work)->timer); \
212 } while (0)
213
365970a1
DH
214/**
215 * work_pending - Find out whether a work item is currently pending
216 * @work: The work item in question
217 */
218#define work_pending(work) \
22df02bb 219 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
365970a1
DH
220
221/**
222 * delayed_work_pending - Find out whether a delayable work item is currently
223 * pending
224 * @work: The work item in question
225 */
0221872a
LT
226#define delayed_work_pending(w) \
227 work_pending(&(w)->work)
365970a1 228
65f27f38 229/**
23b2e599
ON
230 * work_clear_pending - for internal use only, mark a work item as not pending
231 * @work: The work item in question
65f27f38 232 */
23b2e599 233#define work_clear_pending(work) \
22df02bb 234 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
65f27f38 235
97e37d7b 236enum {
bdbc5dd7 237 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
c7fc77f7 238 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
bdbc5dd7 239 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
e22bee78 240 WQ_RESCUER = 1 << 3, /* has an rescue worker */
649027d7 241 WQ_HIGHPRI = 1 << 4, /* high priority */
fb0e7beb 242 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
b71ab8c2
TH
243
244 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
f3421797 245 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
b71ab8c2 246 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
97e37d7b 247};
52bad64d 248
f3421797
TH
249/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
250#define WQ_UNBOUND_MAX_ACTIVE \
251 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
65f27f38 252
d320c038
TH
253/*
254 * System-wide workqueues which are always present.
255 *
256 * system_wq is the one used by schedule[_delayed]_work[_on]().
257 * Multi-CPU multi-threaded. There are users which expect relatively
258 * short queue flush time. Don't queue works which can run for too
259 * long.
260 *
261 * system_long_wq is similar to system_wq but may host long running
262 * works. Queue flushing might take relatively long.
263 *
264 * system_nrt_wq is non-reentrant and guarantees that any given work
265 * item is never executed in parallel by multiple CPUs. Queue
266 * flushing might take relatively long.
f3421797
TH
267 *
268 * system_unbound_wq is unbound workqueue. Workers are not bound to
269 * any specific CPU, not concurrency managed, and all queued works are
270 * executed immediately as long as max_active limit is not reached and
271 * resources are available.
d320c038
TH
272 */
273extern struct workqueue_struct *system_wq;
274extern struct workqueue_struct *system_long_wq;
275extern struct workqueue_struct *system_nrt_wq;
f3421797 276extern struct workqueue_struct *system_unbound_wq;
52bad64d 277
4e6045f1 278extern struct workqueue_struct *
d320c038
TH
279__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
280 struct lock_class_key *key, const char *lock_name);
4e6045f1
JB
281
282#ifdef CONFIG_LOCKDEP
d320c038 283#define alloc_workqueue(name, flags, max_active) \
4e6045f1
JB
284({ \
285 static struct lock_class_key __key; \
eb13ba87
JB
286 const char *__lock_name; \
287 \
288 if (__builtin_constant_p(name)) \
289 __lock_name = (name); \
290 else \
291 __lock_name = #name; \
4e6045f1 292 \
d320c038
TH
293 __alloc_workqueue_key((name), (flags), (max_active), \
294 &__key, __lock_name); \
4e6045f1
JB
295})
296#else
d320c038
TH
297#define alloc_workqueue(name, flags, max_active) \
298 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
4e6045f1
JB
299#endif
300
97e37d7b 301#define create_workqueue(name) \
d320c038 302 alloc_workqueue((name), WQ_RESCUER, 1)
97e37d7b 303#define create_freezeable_workqueue(name) \
c7fc77f7 304 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1)
97e37d7b 305#define create_singlethread_workqueue(name) \
c7fc77f7 306 alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1)
1da177e4
LT
307
308extern void destroy_workqueue(struct workqueue_struct *wq);
309
b3c97528 310extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
c1a220e7
ZR
311extern int queue_work_on(int cpu, struct workqueue_struct *wq,
312 struct work_struct *work);
b3c97528
HH
313extern int queue_delayed_work(struct workqueue_struct *wq,
314 struct delayed_work *work, unsigned long delay);
7a6bc1cd 315extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
28e53bdd
ON
316 struct delayed_work *work, unsigned long delay);
317
b3c97528 318extern void flush_workqueue(struct workqueue_struct *wq);
28e53bdd 319extern void flush_scheduled_work(void);
43046b60 320extern void flush_delayed_work(struct delayed_work *work);
1da177e4 321
b3c97528 322extern int schedule_work(struct work_struct *work);
c1a220e7 323extern int schedule_work_on(int cpu, struct work_struct *work);
b3c97528 324extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
28e53bdd
ON
325extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
326 unsigned long delay);
65f27f38 327extern int schedule_on_each_cpu(work_func_t func);
1da177e4
LT
328extern int keventd_up(void);
329
65f27f38 330int execute_in_process_context(work_func_t fn, struct execute_work *);
1da177e4 331
db700897 332extern int flush_work(struct work_struct *work);
1f1f642e 333extern int cancel_work_sync(struct work_struct *work);
28e53bdd 334
dcd989cb
TH
335extern void workqueue_set_max_active(struct workqueue_struct *wq,
336 int max_active);
337extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
338extern unsigned int work_cpu(struct work_struct *work);
339extern unsigned int work_busy(struct work_struct *work);
340
1da177e4
LT
341/*
342 * Kill off a pending schedule_delayed_work(). Note that the work callback
071b6386
ON
343 * function may still be running on return from cancel_delayed_work(), unless
344 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
28e53bdd 345 * cancel_work_sync() to wait on it.
1da177e4 346 */
52bad64d 347static inline int cancel_delayed_work(struct delayed_work *work)
1da177e4
LT
348{
349 int ret;
350
223a10a9 351 ret = del_timer_sync(&work->timer);
1da177e4 352 if (ret)
23b2e599 353 work_clear_pending(&work->work);
1da177e4
LT
354 return ret;
355}
356
4e49627b
ON
357/*
358 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
359 * if it returns 0 the timer function may be running and the queueing is in
360 * progress.
361 */
362static inline int __cancel_delayed_work(struct delayed_work *work)
363{
364 int ret;
365
366 ret = del_timer(&work->timer);
367 if (ret)
368 work_clear_pending(&work->work);
369 return ret;
370}
371
1f1f642e 372extern int cancel_delayed_work_sync(struct delayed_work *work);
1634c48f 373
f5a421a4 374/* Obsolete. use cancel_delayed_work_sync() */
1634c48f
ON
375static inline
376void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
377 struct delayed_work *work)
378{
f5a421a4
ON
379 cancel_delayed_work_sync(work);
380}
381
382/* Obsolete. use cancel_delayed_work_sync() */
383static inline
384void cancel_rearming_delayed_work(struct delayed_work *work)
385{
386 cancel_delayed_work_sync(work);
1634c48f
ON
387}
388
2d3854a3
RR
389#ifndef CONFIG_SMP
390static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
391{
392 return fn(arg);
393}
394#else
395long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
396#endif /* CONFIG_SMP */
a25909a4 397
a0a1a5fd
TH
398#ifdef CONFIG_FREEZER
399extern void freeze_workqueues_begin(void);
400extern bool freeze_workqueues_busy(void);
401extern void thaw_workqueues(void);
402#endif /* CONFIG_FREEZER */
403
a25909a4
PM
404#ifdef CONFIG_LOCKDEP
405int in_workqueue_context(struct workqueue_struct *wq);
406#endif
3b7433b8 407
1da177e4 408#endif