workqueue: reimplement workqueue flushing using color coded works
[linux-2.6-block.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
e1f8e874 12 * Andrew Morton
1da177e4
LT
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679 15 *
cde53535 16 * Made to use alloc_percpu by Christoph Lameter.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
46934023 31#include <linux/mempolicy.h>
341a5958 32#include <linux/freezer.h>
d5abe669
PZ
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
4e6045f1 35#include <linux/lockdep.h>
1da177e4 36
4690c4ab
TH
37/*
38 * Structure fields follow one of the following exclusion rules.
39 *
40 * I: Set during initialization and read-only afterwards.
41 *
42 * L: cwq->lock protected. Access with cwq->lock held.
43 *
73f53c4a
TH
44 * F: wq->flush_mutex protected.
45 *
4690c4ab
TH
46 * W: workqueue_lock protected.
47 */
48
1da177e4 49/*
f756d5e2 50 * The per-CPU workqueue (if single thread, we always use the first
0f900049
TH
51 * possible cpu). The lower WORK_STRUCT_FLAG_BITS of
52 * work_struct->data are used for flags and thus cwqs need to be
53 * aligned at two's power of the number of flag bits.
1da177e4
LT
54 */
55struct cpu_workqueue_struct {
56
57 spinlock_t lock;
58
1da177e4
LT
59 struct list_head worklist;
60 wait_queue_head_t more_work;
3af24433 61 struct work_struct *current_work;
1537663f 62 unsigned int cpu;
1da177e4 63
4690c4ab 64 struct workqueue_struct *wq; /* I: the owning workqueue */
73f53c4a
TH
65 int work_color; /* L: current color */
66 int flush_color; /* L: flushing color */
67 int nr_in_flight[WORK_NR_COLORS];
68 /* L: nr of in_flight works */
4690c4ab 69 struct task_struct *thread;
0f900049 70};
1da177e4 71
73f53c4a
TH
72/*
73 * Structure used to wait for workqueue flush.
74 */
75struct wq_flusher {
76 struct list_head list; /* F: list of flushers */
77 int flush_color; /* F: flush color waiting for */
78 struct completion done; /* flush completion */
79};
80
1da177e4
LT
81/*
82 * The externally visible workqueue abstraction is an array of
83 * per-CPU workqueues:
84 */
85struct workqueue_struct {
97e37d7b 86 unsigned int flags; /* I: WQ_* flags */
4690c4ab
TH
87 struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
88 struct list_head list; /* W: list of all workqueues */
73f53c4a
TH
89
90 struct mutex flush_mutex; /* protects wq flushing */
91 int work_color; /* F: current work color */
92 int flush_color; /* F: current flush color */
93 atomic_t nr_cwqs_to_flush; /* flush in progress */
94 struct wq_flusher *first_flusher; /* F: first flusher */
95 struct list_head flusher_queue; /* F: flush waiters */
96 struct list_head flusher_overflow; /* F: flush overflow list */
97
4690c4ab 98 const char *name; /* I: workqueue name */
4e6045f1 99#ifdef CONFIG_LOCKDEP
4690c4ab 100 struct lockdep_map lockdep_map;
4e6045f1 101#endif
1da177e4
LT
102};
103
dc186ad7
TG
104#ifdef CONFIG_DEBUG_OBJECTS_WORK
105
106static struct debug_obj_descr work_debug_descr;
107
108/*
109 * fixup_init is called when:
110 * - an active object is initialized
111 */
112static int work_fixup_init(void *addr, enum debug_obj_state state)
113{
114 struct work_struct *work = addr;
115
116 switch (state) {
117 case ODEBUG_STATE_ACTIVE:
118 cancel_work_sync(work);
119 debug_object_init(work, &work_debug_descr);
120 return 1;
121 default:
122 return 0;
123 }
124}
125
126/*
127 * fixup_activate is called when:
128 * - an active object is activated
129 * - an unknown object is activated (might be a statically initialized object)
130 */
131static int work_fixup_activate(void *addr, enum debug_obj_state state)
132{
133 struct work_struct *work = addr;
134
135 switch (state) {
136
137 case ODEBUG_STATE_NOTAVAILABLE:
138 /*
139 * This is not really a fixup. The work struct was
140 * statically initialized. We just make sure that it
141 * is tracked in the object tracker.
142 */
22df02bb 143 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
dc186ad7
TG
144 debug_object_init(work, &work_debug_descr);
145 debug_object_activate(work, &work_debug_descr);
146 return 0;
147 }
148 WARN_ON_ONCE(1);
149 return 0;
150
151 case ODEBUG_STATE_ACTIVE:
152 WARN_ON(1);
153
154 default:
155 return 0;
156 }
157}
158
159/*
160 * fixup_free is called when:
161 * - an active object is freed
162 */
163static int work_fixup_free(void *addr, enum debug_obj_state state)
164{
165 struct work_struct *work = addr;
166
167 switch (state) {
168 case ODEBUG_STATE_ACTIVE:
169 cancel_work_sync(work);
170 debug_object_free(work, &work_debug_descr);
171 return 1;
172 default:
173 return 0;
174 }
175}
176
177static struct debug_obj_descr work_debug_descr = {
178 .name = "work_struct",
179 .fixup_init = work_fixup_init,
180 .fixup_activate = work_fixup_activate,
181 .fixup_free = work_fixup_free,
182};
183
184static inline void debug_work_activate(struct work_struct *work)
185{
186 debug_object_activate(work, &work_debug_descr);
187}
188
189static inline void debug_work_deactivate(struct work_struct *work)
190{
191 debug_object_deactivate(work, &work_debug_descr);
192}
193
194void __init_work(struct work_struct *work, int onstack)
195{
196 if (onstack)
197 debug_object_init_on_stack(work, &work_debug_descr);
198 else
199 debug_object_init(work, &work_debug_descr);
200}
201EXPORT_SYMBOL_GPL(__init_work);
202
203void destroy_work_on_stack(struct work_struct *work)
204{
205 debug_object_free(work, &work_debug_descr);
206}
207EXPORT_SYMBOL_GPL(destroy_work_on_stack);
208
209#else
210static inline void debug_work_activate(struct work_struct *work) { }
211static inline void debug_work_deactivate(struct work_struct *work) { }
212#endif
213
95402b38
GS
214/* Serializes the accesses to the list of workqueues. */
215static DEFINE_SPINLOCK(workqueue_lock);
1da177e4
LT
216static LIST_HEAD(workqueues);
217
3af24433 218static int singlethread_cpu __read_mostly;
1da177e4 219
1537663f
TH
220static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
221 struct workqueue_struct *wq)
b1f4ec17 222{
1537663f 223 return per_cpu_ptr(wq->cpu_wq, cpu);
b1f4ec17
ON
224}
225
1537663f
TH
226static struct cpu_workqueue_struct *target_cwq(unsigned int cpu,
227 struct workqueue_struct *wq)
a848e3b6 228{
1537663f 229 if (unlikely(wq->flags & WQ_SINGLE_THREAD))
a848e3b6 230 cpu = singlethread_cpu;
1537663f 231 return get_cwq(cpu, wq);
a848e3b6
ON
232}
233
73f53c4a
TH
234static unsigned int work_color_to_flags(int color)
235{
236 return color << WORK_STRUCT_COLOR_SHIFT;
237}
238
239static int get_work_color(struct work_struct *work)
240{
241 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
242 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
243}
244
245static int work_next_color(int color)
246{
247 return (color + 1) % WORK_NR_COLORS;
248}
249
4594bf15
DH
250/*
251 * Set the workqueue on which a work item is to be run
252 * - Must *only* be called if the pending flag is set
253 */
ed7c0fee 254static inline void set_wq_data(struct work_struct *work,
4690c4ab
TH
255 struct cpu_workqueue_struct *cwq,
256 unsigned long extra_flags)
365970a1 257{
4594bf15 258 BUG_ON(!work_pending(work));
365970a1 259
4690c4ab 260 atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
22df02bb 261 WORK_STRUCT_PENDING | extra_flags);
365970a1
DH
262}
263
4d707b9f
ON
264/*
265 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
266 */
267static inline void clear_wq_data(struct work_struct *work)
268{
4690c4ab 269 atomic_long_set(&work->data, work_static(work));
4d707b9f
ON
270}
271
64166699 272static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
365970a1 273{
64166699
TH
274 return (void *)(atomic_long_read(&work->data) &
275 WORK_STRUCT_WQ_DATA_MASK);
365970a1
DH
276}
277
4690c4ab
TH
278/**
279 * insert_work - insert a work into cwq
280 * @cwq: cwq @work belongs to
281 * @work: work to insert
282 * @head: insertion point
283 * @extra_flags: extra WORK_STRUCT_* flags to set
284 *
285 * Insert @work into @cwq after @head.
286 *
287 * CONTEXT:
288 * spin_lock_irq(cwq->lock).
289 */
b89deed3 290static void insert_work(struct cpu_workqueue_struct *cwq,
4690c4ab
TH
291 struct work_struct *work, struct list_head *head,
292 unsigned int extra_flags)
b89deed3 293{
4690c4ab
TH
294 /* we own @work, set data and link */
295 set_wq_data(work, cwq, extra_flags);
296
6e84d644
ON
297 /*
298 * Ensure that we get the right work->data if we see the
299 * result of list_add() below, see try_to_grab_pending().
300 */
301 smp_wmb();
4690c4ab 302
1a4d9b0a 303 list_add_tail(&work->entry, head);
b89deed3
ON
304 wake_up(&cwq->more_work);
305}
306
4690c4ab 307static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1da177e4
LT
308 struct work_struct *work)
309{
1537663f 310 struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq);
1da177e4
LT
311 unsigned long flags;
312
dc186ad7 313 debug_work_activate(work);
1da177e4 314 spin_lock_irqsave(&cwq->lock, flags);
4690c4ab 315 BUG_ON(!list_empty(&work->entry));
73f53c4a
TH
316 cwq->nr_in_flight[cwq->work_color]++;
317 insert_work(cwq, work, &cwq->worklist,
318 work_color_to_flags(cwq->work_color));
1da177e4
LT
319 spin_unlock_irqrestore(&cwq->lock, flags);
320}
321
0fcb78c2
REB
322/**
323 * queue_work - queue work on a workqueue
324 * @wq: workqueue to use
325 * @work: work to queue
326 *
057647fc 327 * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4 328 *
00dfcaf7
ON
329 * We queue the work to the CPU on which it was submitted, but if the CPU dies
330 * it can be processed by another CPU.
1da177e4 331 */
7ad5b3a5 332int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1da177e4 333{
ef1ca236
ON
334 int ret;
335
336 ret = queue_work_on(get_cpu(), wq, work);
337 put_cpu();
338
1da177e4
LT
339 return ret;
340}
ae90dd5d 341EXPORT_SYMBOL_GPL(queue_work);
1da177e4 342
c1a220e7
ZR
343/**
344 * queue_work_on - queue work on specific cpu
345 * @cpu: CPU number to execute work on
346 * @wq: workqueue to use
347 * @work: work to queue
348 *
349 * Returns 0 if @work was already on a queue, non-zero otherwise.
350 *
351 * We queue the work to a specific CPU, the caller must ensure it
352 * can't go away.
353 */
354int
355queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
356{
357 int ret = 0;
358
22df02bb 359 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
4690c4ab 360 __queue_work(cpu, wq, work);
c1a220e7
ZR
361 ret = 1;
362 }
363 return ret;
364}
365EXPORT_SYMBOL_GPL(queue_work_on);
366
6d141c3f 367static void delayed_work_timer_fn(unsigned long __data)
1da177e4 368{
52bad64d 369 struct delayed_work *dwork = (struct delayed_work *)__data;
ed7c0fee 370 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
1da177e4 371
4690c4ab 372 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
1da177e4
LT
373}
374
0fcb78c2
REB
375/**
376 * queue_delayed_work - queue work on a workqueue after delay
377 * @wq: workqueue to use
af9997e4 378 * @dwork: delayable work to queue
0fcb78c2
REB
379 * @delay: number of jiffies to wait before queueing
380 *
057647fc 381 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 382 */
7ad5b3a5 383int queue_delayed_work(struct workqueue_struct *wq,
52bad64d 384 struct delayed_work *dwork, unsigned long delay)
1da177e4 385{
52bad64d 386 if (delay == 0)
63bc0362 387 return queue_work(wq, &dwork->work);
1da177e4 388
63bc0362 389 return queue_delayed_work_on(-1, wq, dwork, delay);
1da177e4 390}
ae90dd5d 391EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 392
0fcb78c2
REB
393/**
394 * queue_delayed_work_on - queue work on specific CPU after delay
395 * @cpu: CPU number to execute work on
396 * @wq: workqueue to use
af9997e4 397 * @dwork: work to queue
0fcb78c2
REB
398 * @delay: number of jiffies to wait before queueing
399 *
057647fc 400 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 401 */
7a6bc1cd 402int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d 403 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd
VP
404{
405 int ret = 0;
52bad64d
DH
406 struct timer_list *timer = &dwork->timer;
407 struct work_struct *work = &dwork->work;
7a6bc1cd 408
22df02bb 409 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
7a6bc1cd
VP
410 BUG_ON(timer_pending(timer));
411 BUG_ON(!list_empty(&work->entry));
412
8a3e77cc
AL
413 timer_stats_timer_set_start_info(&dwork->timer);
414
ed7c0fee 415 /* This stores cwq for the moment, for the timer_fn */
1537663f 416 set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0);
7a6bc1cd 417 timer->expires = jiffies + delay;
52bad64d 418 timer->data = (unsigned long)dwork;
7a6bc1cd 419 timer->function = delayed_work_timer_fn;
63bc0362
ON
420
421 if (unlikely(cpu >= 0))
422 add_timer_on(timer, cpu);
423 else
424 add_timer(timer);
7a6bc1cd
VP
425 ret = 1;
426 }
427 return ret;
428}
ae90dd5d 429EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 430
73f53c4a
TH
431/**
432 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
433 * @cwq: cwq of interest
434 * @color: color of work which left the queue
435 *
436 * A work either has completed or is removed from pending queue,
437 * decrement nr_in_flight of its cwq and handle workqueue flushing.
438 *
439 * CONTEXT:
440 * spin_lock_irq(cwq->lock).
441 */
442static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
443{
444 /* ignore uncolored works */
445 if (color == WORK_NO_COLOR)
446 return;
447
448 cwq->nr_in_flight[color]--;
449
450 /* is flush in progress and are we at the flushing tip? */
451 if (likely(cwq->flush_color != color))
452 return;
453
454 /* are there still in-flight works? */
455 if (cwq->nr_in_flight[color])
456 return;
457
458 /* this cwq is done, clear flush_color */
459 cwq->flush_color = -1;
460
461 /*
462 * If this was the last cwq, wake up the first flusher. It
463 * will handle the rest.
464 */
465 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
466 complete(&cwq->wq->first_flusher->done);
467}
468
a62428c0
TH
469/**
470 * process_one_work - process single work
471 * @cwq: cwq to process work for
472 * @work: work to process
473 *
474 * Process @work. This function contains all the logics necessary to
475 * process a single work including synchronization against and
476 * interaction with other workers on the same cpu, queueing and
477 * flushing. As long as context requirement is met, any worker can
478 * call this function to process a work.
479 *
480 * CONTEXT:
481 * spin_lock_irq(cwq->lock) which is released and regrabbed.
482 */
483static void process_one_work(struct cpu_workqueue_struct *cwq,
484 struct work_struct *work)
485{
486 work_func_t f = work->func;
73f53c4a 487 int work_color;
a62428c0
TH
488#ifdef CONFIG_LOCKDEP
489 /*
490 * It is permissible to free the struct work_struct from
491 * inside the function that is called from it, this we need to
492 * take into account for lockdep too. To avoid bogus "held
493 * lock freed" warnings as well as problems when looking into
494 * work->lockdep_map, make a copy and use that here.
495 */
496 struct lockdep_map lockdep_map = work->lockdep_map;
497#endif
498 /* claim and process */
a62428c0
TH
499 debug_work_deactivate(work);
500 cwq->current_work = work;
73f53c4a 501 work_color = get_work_color(work);
a62428c0
TH
502 list_del_init(&work->entry);
503
504 spin_unlock_irq(&cwq->lock);
505
506 BUG_ON(get_wq_data(work) != cwq);
507 work_clear_pending(work);
508 lock_map_acquire(&cwq->wq->lockdep_map);
509 lock_map_acquire(&lockdep_map);
510 f(work);
511 lock_map_release(&lockdep_map);
512 lock_map_release(&cwq->wq->lockdep_map);
513
514 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
515 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
516 "%s/0x%08x/%d\n",
517 current->comm, preempt_count(), task_pid_nr(current));
518 printk(KERN_ERR " last function: ");
519 print_symbol("%s\n", (unsigned long)f);
520 debug_show_held_locks(current);
521 dump_stack();
522 }
523
524 spin_lock_irq(&cwq->lock);
525
526 /* we're done with it, release */
527 cwq->current_work = NULL;
73f53c4a 528 cwq_dec_nr_in_flight(cwq, work_color);
a62428c0
TH
529}
530
858119e1 531static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 532{
f293ea92 533 spin_lock_irq(&cwq->lock);
1da177e4
LT
534 while (!list_empty(&cwq->worklist)) {
535 struct work_struct *work = list_entry(cwq->worklist.next,
536 struct work_struct, entry);
a62428c0 537 process_one_work(cwq, work);
1da177e4 538 }
f293ea92 539 spin_unlock_irq(&cwq->lock);
1da177e4
LT
540}
541
4690c4ab
TH
542/**
543 * worker_thread - the worker thread function
544 * @__cwq: cwq to serve
545 *
546 * The cwq worker thread function.
547 */
1da177e4
LT
548static int worker_thread(void *__cwq)
549{
550 struct cpu_workqueue_struct *cwq = __cwq;
3af24433 551 DEFINE_WAIT(wait);
1da177e4 552
97e37d7b 553 if (cwq->wq->flags & WQ_FREEZEABLE)
83144186 554 set_freezable();
1da177e4 555
3af24433 556 for (;;) {
3af24433 557 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
14441960
ON
558 if (!freezing(current) &&
559 !kthread_should_stop() &&
560 list_empty(&cwq->worklist))
1da177e4 561 schedule();
3af24433
ON
562 finish_wait(&cwq->more_work, &wait);
563
85f4186a
ON
564 try_to_freeze();
565
14441960 566 if (kthread_should_stop())
3af24433 567 break;
1da177e4 568
1537663f
TH
569 if (unlikely(!cpumask_equal(&cwq->thread->cpus_allowed,
570 get_cpu_mask(cwq->cpu))))
571 set_cpus_allowed_ptr(cwq->thread,
572 get_cpu_mask(cwq->cpu));
3af24433 573 run_workqueue(cwq);
1da177e4 574 }
3af24433 575
1da177e4
LT
576 return 0;
577}
578
fc2e4d70
ON
579struct wq_barrier {
580 struct work_struct work;
581 struct completion done;
582};
583
584static void wq_barrier_func(struct work_struct *work)
585{
586 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
587 complete(&barr->done);
588}
589
4690c4ab
TH
590/**
591 * insert_wq_barrier - insert a barrier work
592 * @cwq: cwq to insert barrier into
593 * @barr: wq_barrier to insert
594 * @head: insertion point
595 *
596 * Insert barrier @barr into @cwq before @head.
597 *
598 * CONTEXT:
599 * spin_lock_irq(cwq->lock).
600 */
83c22520 601static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1a4d9b0a 602 struct wq_barrier *barr, struct list_head *head)
fc2e4d70 603{
dc186ad7
TG
604 /*
605 * debugobject calls are safe here even with cwq->lock locked
606 * as we know for sure that this will not trigger any of the
607 * checks and call back into the fixup functions where we
608 * might deadlock.
609 */
610 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
22df02bb 611 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
fc2e4d70 612 init_completion(&barr->done);
83c22520 613
dc186ad7 614 debug_work_activate(&barr->work);
73f53c4a 615 insert_work(cwq, &barr->work, head, work_color_to_flags(WORK_NO_COLOR));
fc2e4d70
ON
616}
617
73f53c4a
TH
618/**
619 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
620 * @wq: workqueue being flushed
621 * @flush_color: new flush color, < 0 for no-op
622 * @work_color: new work color, < 0 for no-op
623 *
624 * Prepare cwqs for workqueue flushing.
625 *
626 * If @flush_color is non-negative, flush_color on all cwqs should be
627 * -1. If no cwq has in-flight commands at the specified color, all
628 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
629 * has in flight commands, its cwq->flush_color is set to
630 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
631 * wakeup logic is armed and %true is returned.
632 *
633 * The caller should have initialized @wq->first_flusher prior to
634 * calling this function with non-negative @flush_color. If
635 * @flush_color is negative, no flush color update is done and %false
636 * is returned.
637 *
638 * If @work_color is non-negative, all cwqs should have the same
639 * work_color which is previous to @work_color and all will be
640 * advanced to @work_color.
641 *
642 * CONTEXT:
643 * mutex_lock(wq->flush_mutex).
644 *
645 * RETURNS:
646 * %true if @flush_color >= 0 and there's something to flush. %false
647 * otherwise.
648 */
649static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
650 int flush_color, int work_color)
1da177e4 651{
73f53c4a
TH
652 bool wait = false;
653 unsigned int cpu;
1da177e4 654
73f53c4a
TH
655 if (flush_color >= 0) {
656 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
657 atomic_set(&wq->nr_cwqs_to_flush, 1);
1da177e4 658 }
2355b70f 659
73f53c4a
TH
660 for_each_possible_cpu(cpu) {
661 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
662
663 spin_lock_irq(&cwq->lock);
664
665 if (flush_color >= 0) {
666 BUG_ON(cwq->flush_color != -1);
667
668 if (cwq->nr_in_flight[flush_color]) {
669 cwq->flush_color = flush_color;
670 atomic_inc(&wq->nr_cwqs_to_flush);
671 wait = true;
672 }
673 }
674
675 if (work_color >= 0) {
676 BUG_ON(work_color != work_next_color(cwq->work_color));
677 cwq->work_color = work_color;
678 }
679
680 spin_unlock_irq(&cwq->lock);
dc186ad7 681 }
14441960 682
73f53c4a
TH
683 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
684 complete(&wq->first_flusher->done);
685
686 return wait;
1da177e4
LT
687}
688
0fcb78c2 689/**
1da177e4 690 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 691 * @wq: workqueue to flush
1da177e4
LT
692 *
693 * Forces execution of the workqueue and blocks until its completion.
694 * This is typically used in driver shutdown handlers.
695 *
fc2e4d70
ON
696 * We sleep until all works which were queued on entry have been handled,
697 * but we are not livelocked by new incoming ones.
1da177e4 698 */
7ad5b3a5 699void flush_workqueue(struct workqueue_struct *wq)
1da177e4 700{
73f53c4a
TH
701 struct wq_flusher this_flusher = {
702 .list = LIST_HEAD_INIT(this_flusher.list),
703 .flush_color = -1,
704 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
705 };
706 int next_color;
1da177e4 707
3295f0ef
IM
708 lock_map_acquire(&wq->lockdep_map);
709 lock_map_release(&wq->lockdep_map);
73f53c4a
TH
710
711 mutex_lock(&wq->flush_mutex);
712
713 /*
714 * Start-to-wait phase
715 */
716 next_color = work_next_color(wq->work_color);
717
718 if (next_color != wq->flush_color) {
719 /*
720 * Color space is not full. The current work_color
721 * becomes our flush_color and work_color is advanced
722 * by one.
723 */
724 BUG_ON(!list_empty(&wq->flusher_overflow));
725 this_flusher.flush_color = wq->work_color;
726 wq->work_color = next_color;
727
728 if (!wq->first_flusher) {
729 /* no flush in progress, become the first flusher */
730 BUG_ON(wq->flush_color != this_flusher.flush_color);
731
732 wq->first_flusher = &this_flusher;
733
734 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
735 wq->work_color)) {
736 /* nothing to flush, done */
737 wq->flush_color = next_color;
738 wq->first_flusher = NULL;
739 goto out_unlock;
740 }
741 } else {
742 /* wait in queue */
743 BUG_ON(wq->flush_color == this_flusher.flush_color);
744 list_add_tail(&this_flusher.list, &wq->flusher_queue);
745 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
746 }
747 } else {
748 /*
749 * Oops, color space is full, wait on overflow queue.
750 * The next flush completion will assign us
751 * flush_color and transfer to flusher_queue.
752 */
753 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
754 }
755
756 mutex_unlock(&wq->flush_mutex);
757
758 wait_for_completion(&this_flusher.done);
759
760 /*
761 * Wake-up-and-cascade phase
762 *
763 * First flushers are responsible for cascading flushes and
764 * handling overflow. Non-first flushers can simply return.
765 */
766 if (wq->first_flusher != &this_flusher)
767 return;
768
769 mutex_lock(&wq->flush_mutex);
770
771 wq->first_flusher = NULL;
772
773 BUG_ON(!list_empty(&this_flusher.list));
774 BUG_ON(wq->flush_color != this_flusher.flush_color);
775
776 while (true) {
777 struct wq_flusher *next, *tmp;
778
779 /* complete all the flushers sharing the current flush color */
780 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
781 if (next->flush_color != wq->flush_color)
782 break;
783 list_del_init(&next->list);
784 complete(&next->done);
785 }
786
787 BUG_ON(!list_empty(&wq->flusher_overflow) &&
788 wq->flush_color != work_next_color(wq->work_color));
789
790 /* this flush_color is finished, advance by one */
791 wq->flush_color = work_next_color(wq->flush_color);
792
793 /* one color has been freed, handle overflow queue */
794 if (!list_empty(&wq->flusher_overflow)) {
795 /*
796 * Assign the same color to all overflowed
797 * flushers, advance work_color and append to
798 * flusher_queue. This is the start-to-wait
799 * phase for these overflowed flushers.
800 */
801 list_for_each_entry(tmp, &wq->flusher_overflow, list)
802 tmp->flush_color = wq->work_color;
803
804 wq->work_color = work_next_color(wq->work_color);
805
806 list_splice_tail_init(&wq->flusher_overflow,
807 &wq->flusher_queue);
808 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
809 }
810
811 if (list_empty(&wq->flusher_queue)) {
812 BUG_ON(wq->flush_color != wq->work_color);
813 break;
814 }
815
816 /*
817 * Need to flush more colors. Make the next flusher
818 * the new first flusher and arm cwqs.
819 */
820 BUG_ON(wq->flush_color == wq->work_color);
821 BUG_ON(wq->flush_color != next->flush_color);
822
823 list_del_init(&next->list);
824 wq->first_flusher = next;
825
826 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
827 break;
828
829 /*
830 * Meh... this color is already done, clear first
831 * flusher and repeat cascading.
832 */
833 wq->first_flusher = NULL;
834 }
835
836out_unlock:
837 mutex_unlock(&wq->flush_mutex);
1da177e4 838}
ae90dd5d 839EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4 840
db700897
ON
841/**
842 * flush_work - block until a work_struct's callback has terminated
843 * @work: the work which is to be flushed
844 *
a67da70d
ON
845 * Returns false if @work has already terminated.
846 *
db700897
ON
847 * It is expected that, prior to calling flush_work(), the caller has
848 * arranged for the work to not be requeued, otherwise it doesn't make
849 * sense to use this function.
850 */
851int flush_work(struct work_struct *work)
852{
853 struct cpu_workqueue_struct *cwq;
854 struct list_head *prev;
855 struct wq_barrier barr;
856
857 might_sleep();
858 cwq = get_wq_data(work);
859 if (!cwq)
860 return 0;
861
3295f0ef
IM
862 lock_map_acquire(&cwq->wq->lockdep_map);
863 lock_map_release(&cwq->wq->lockdep_map);
a67da70d 864
db700897
ON
865 spin_lock_irq(&cwq->lock);
866 if (!list_empty(&work->entry)) {
867 /*
868 * See the comment near try_to_grab_pending()->smp_rmb().
869 * If it was re-queued under us we are not going to wait.
870 */
871 smp_rmb();
872 if (unlikely(cwq != get_wq_data(work)))
4690c4ab 873 goto already_gone;
db700897
ON
874 prev = &work->entry;
875 } else {
876 if (cwq->current_work != work)
4690c4ab 877 goto already_gone;
db700897
ON
878 prev = &cwq->worklist;
879 }
880 insert_wq_barrier(cwq, &barr, prev->next);
db700897 881
4690c4ab 882 spin_unlock_irq(&cwq->lock);
db700897 883 wait_for_completion(&barr.done);
dc186ad7 884 destroy_work_on_stack(&barr.work);
db700897 885 return 1;
4690c4ab
TH
886already_gone:
887 spin_unlock_irq(&cwq->lock);
888 return 0;
db700897
ON
889}
890EXPORT_SYMBOL_GPL(flush_work);
891
6e84d644 892/*
1f1f642e 893 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
6e84d644
ON
894 * so this work can't be re-armed in any way.
895 */
896static int try_to_grab_pending(struct work_struct *work)
897{
898 struct cpu_workqueue_struct *cwq;
1f1f642e 899 int ret = -1;
6e84d644 900
22df02bb 901 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1f1f642e 902 return 0;
6e84d644
ON
903
904 /*
905 * The queueing is in progress, or it is already queued. Try to
906 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
907 */
908
909 cwq = get_wq_data(work);
910 if (!cwq)
911 return ret;
912
913 spin_lock_irq(&cwq->lock);
914 if (!list_empty(&work->entry)) {
915 /*
916 * This work is queued, but perhaps we locked the wrong cwq.
917 * In that case we must see the new value after rmb(), see
918 * insert_work()->wmb().
919 */
920 smp_rmb();
921 if (cwq == get_wq_data(work)) {
dc186ad7 922 debug_work_deactivate(work);
6e84d644 923 list_del_init(&work->entry);
73f53c4a 924 cwq_dec_nr_in_flight(cwq, get_work_color(work));
6e84d644
ON
925 ret = 1;
926 }
927 }
928 spin_unlock_irq(&cwq->lock);
929
930 return ret;
931}
932
933static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
b89deed3
ON
934 struct work_struct *work)
935{
936 struct wq_barrier barr;
937 int running = 0;
938
939 spin_lock_irq(&cwq->lock);
940 if (unlikely(cwq->current_work == work)) {
1a4d9b0a 941 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
b89deed3
ON
942 running = 1;
943 }
944 spin_unlock_irq(&cwq->lock);
945
dc186ad7 946 if (unlikely(running)) {
b89deed3 947 wait_for_completion(&barr.done);
dc186ad7
TG
948 destroy_work_on_stack(&barr.work);
949 }
b89deed3
ON
950}
951
6e84d644 952static void wait_on_work(struct work_struct *work)
b89deed3
ON
953{
954 struct cpu_workqueue_struct *cwq;
28e53bdd 955 struct workqueue_struct *wq;
b1f4ec17 956 int cpu;
b89deed3 957
f293ea92
ON
958 might_sleep();
959
3295f0ef
IM
960 lock_map_acquire(&work->lockdep_map);
961 lock_map_release(&work->lockdep_map);
4e6045f1 962
b89deed3 963 cwq = get_wq_data(work);
b89deed3 964 if (!cwq)
3af24433 965 return;
b89deed3 966
28e53bdd 967 wq = cwq->wq;
28e53bdd 968
1537663f 969 for_each_possible_cpu(cpu)
4690c4ab 970 wait_on_cpu_work(get_cwq(cpu, wq), work);
6e84d644
ON
971}
972
1f1f642e
ON
973static int __cancel_work_timer(struct work_struct *work,
974 struct timer_list* timer)
975{
976 int ret;
977
978 do {
979 ret = (timer && likely(del_timer(timer)));
980 if (!ret)
981 ret = try_to_grab_pending(work);
982 wait_on_work(work);
983 } while (unlikely(ret < 0));
984
4d707b9f 985 clear_wq_data(work);
1f1f642e
ON
986 return ret;
987}
988
6e84d644
ON
989/**
990 * cancel_work_sync - block until a work_struct's callback has terminated
991 * @work: the work which is to be flushed
992 *
1f1f642e
ON
993 * Returns true if @work was pending.
994 *
6e84d644
ON
995 * cancel_work_sync() will cancel the work if it is queued. If the work's
996 * callback appears to be running, cancel_work_sync() will block until it
997 * has completed.
998 *
999 * It is possible to use this function if the work re-queues itself. It can
1000 * cancel the work even if it migrates to another workqueue, however in that
1001 * case it only guarantees that work->func() has completed on the last queued
1002 * workqueue.
1003 *
1004 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
1005 * pending, otherwise it goes into a busy-wait loop until the timer expires.
1006 *
1007 * The caller must ensure that workqueue_struct on which this work was last
1008 * queued can't be destroyed before this function returns.
1009 */
1f1f642e 1010int cancel_work_sync(struct work_struct *work)
6e84d644 1011{
1f1f642e 1012 return __cancel_work_timer(work, NULL);
b89deed3 1013}
28e53bdd 1014EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed3 1015
6e84d644 1016/**
f5a421a4 1017 * cancel_delayed_work_sync - reliably kill off a delayed work.
6e84d644
ON
1018 * @dwork: the delayed work struct
1019 *
1f1f642e
ON
1020 * Returns true if @dwork was pending.
1021 *
6e84d644
ON
1022 * It is possible to use this function if @dwork rearms itself via queue_work()
1023 * or queue_delayed_work(). See also the comment for cancel_work_sync().
1024 */
1f1f642e 1025int cancel_delayed_work_sync(struct delayed_work *dwork)
6e84d644 1026{
1f1f642e 1027 return __cancel_work_timer(&dwork->work, &dwork->timer);
6e84d644 1028}
f5a421a4 1029EXPORT_SYMBOL(cancel_delayed_work_sync);
1da177e4 1030
6e84d644 1031static struct workqueue_struct *keventd_wq __read_mostly;
1da177e4 1032
0fcb78c2
REB
1033/**
1034 * schedule_work - put work task in global workqueue
1035 * @work: job to be done
1036 *
5b0f437d
BVA
1037 * Returns zero if @work was already on the kernel-global workqueue and
1038 * non-zero otherwise.
1039 *
1040 * This puts a job in the kernel-global workqueue if it was not already
1041 * queued and leaves it in the same position on the kernel-global
1042 * workqueue otherwise.
0fcb78c2 1043 */
7ad5b3a5 1044int schedule_work(struct work_struct *work)
1da177e4
LT
1045{
1046 return queue_work(keventd_wq, work);
1047}
ae90dd5d 1048EXPORT_SYMBOL(schedule_work);
1da177e4 1049
c1a220e7
ZR
1050/*
1051 * schedule_work_on - put work task on a specific cpu
1052 * @cpu: cpu to put the work task on
1053 * @work: job to be done
1054 *
1055 * This puts a job on a specific cpu
1056 */
1057int schedule_work_on(int cpu, struct work_struct *work)
1058{
1059 return queue_work_on(cpu, keventd_wq, work);
1060}
1061EXPORT_SYMBOL(schedule_work_on);
1062
0fcb78c2
REB
1063/**
1064 * schedule_delayed_work - put work task in global workqueue after delay
52bad64d
DH
1065 * @dwork: job to be done
1066 * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c2
REB
1067 *
1068 * After waiting for a given time this puts a job in the kernel-global
1069 * workqueue.
1070 */
7ad5b3a5 1071int schedule_delayed_work(struct delayed_work *dwork,
82f67cd9 1072 unsigned long delay)
1da177e4 1073{
52bad64d 1074 return queue_delayed_work(keventd_wq, dwork, delay);
1da177e4 1075}
ae90dd5d 1076EXPORT_SYMBOL(schedule_delayed_work);
1da177e4 1077
8c53e463
LT
1078/**
1079 * flush_delayed_work - block until a dwork_struct's callback has terminated
1080 * @dwork: the delayed work which is to be flushed
1081 *
1082 * Any timeout is cancelled, and any pending work is run immediately.
1083 */
1084void flush_delayed_work(struct delayed_work *dwork)
1085{
1086 if (del_timer_sync(&dwork->timer)) {
4690c4ab
TH
1087 __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
1088 &dwork->work);
8c53e463
LT
1089 put_cpu();
1090 }
1091 flush_work(&dwork->work);
1092}
1093EXPORT_SYMBOL(flush_delayed_work);
1094
0fcb78c2
REB
1095/**
1096 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
1097 * @cpu: cpu to use
52bad64d 1098 * @dwork: job to be done
0fcb78c2
REB
1099 * @delay: number of jiffies to wait
1100 *
1101 * After waiting for a given time this puts a job in the kernel-global
1102 * workqueue on the specified CPU.
1103 */
1da177e4 1104int schedule_delayed_work_on(int cpu,
52bad64d 1105 struct delayed_work *dwork, unsigned long delay)
1da177e4 1106{
52bad64d 1107 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1da177e4 1108}
ae90dd5d 1109EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 1110
b6136773
AM
1111/**
1112 * schedule_on_each_cpu - call a function on each online CPU from keventd
1113 * @func: the function to call
b6136773
AM
1114 *
1115 * Returns zero on success.
1116 * Returns -ve errno on failure.
1117 *
b6136773
AM
1118 * schedule_on_each_cpu() is very slow.
1119 */
65f27f38 1120int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
1121{
1122 int cpu;
65a64464 1123 int orig = -1;
b6136773 1124 struct work_struct *works;
15316ba8 1125
b6136773
AM
1126 works = alloc_percpu(struct work_struct);
1127 if (!works)
15316ba8 1128 return -ENOMEM;
b6136773 1129
93981800
TH
1130 get_online_cpus();
1131
65a64464 1132 /*
93981800
TH
1133 * When running in keventd don't schedule a work item on
1134 * itself. Can just call directly because the work queue is
1135 * already bound. This also is faster.
65a64464 1136 */
93981800 1137 if (current_is_keventd())
65a64464 1138 orig = raw_smp_processor_id();
65a64464 1139
15316ba8 1140 for_each_online_cpu(cpu) {
9bfb1839
IM
1141 struct work_struct *work = per_cpu_ptr(works, cpu);
1142
1143 INIT_WORK(work, func);
65a64464 1144 if (cpu != orig)
93981800 1145 schedule_work_on(cpu, work);
65a64464 1146 }
93981800
TH
1147 if (orig >= 0)
1148 func(per_cpu_ptr(works, orig));
1149
1150 for_each_online_cpu(cpu)
1151 flush_work(per_cpu_ptr(works, cpu));
1152
95402b38 1153 put_online_cpus();
b6136773 1154 free_percpu(works);
15316ba8
CL
1155 return 0;
1156}
1157
eef6a7d5
AS
1158/**
1159 * flush_scheduled_work - ensure that any scheduled work has run to completion.
1160 *
1161 * Forces execution of the kernel-global workqueue and blocks until its
1162 * completion.
1163 *
1164 * Think twice before calling this function! It's very easy to get into
1165 * trouble if you don't take great care. Either of the following situations
1166 * will lead to deadlock:
1167 *
1168 * One of the work items currently on the workqueue needs to acquire
1169 * a lock held by your code or its caller.
1170 *
1171 * Your code is running in the context of a work routine.
1172 *
1173 * They will be detected by lockdep when they occur, but the first might not
1174 * occur very often. It depends on what work items are on the workqueue and
1175 * what locks they need, which you have no control over.
1176 *
1177 * In most situations flushing the entire workqueue is overkill; you merely
1178 * need to know that a particular work item isn't queued and isn't running.
1179 * In such cases you should use cancel_delayed_work_sync() or
1180 * cancel_work_sync() instead.
1181 */
1da177e4
LT
1182void flush_scheduled_work(void)
1183{
1184 flush_workqueue(keventd_wq);
1185}
ae90dd5d 1186EXPORT_SYMBOL(flush_scheduled_work);
1da177e4 1187
1fa44eca
JB
1188/**
1189 * execute_in_process_context - reliably execute the routine with user context
1190 * @fn: the function to execute
1fa44eca
JB
1191 * @ew: guaranteed storage for the execute work structure (must
1192 * be available when the work executes)
1193 *
1194 * Executes the function immediately if process context is available,
1195 * otherwise schedules the function for delayed execution.
1196 *
1197 * Returns: 0 - function was executed
1198 * 1 - function was scheduled for execution
1199 */
65f27f38 1200int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
1201{
1202 if (!in_interrupt()) {
65f27f38 1203 fn(&ew->work);
1fa44eca
JB
1204 return 0;
1205 }
1206
65f27f38 1207 INIT_WORK(&ew->work, fn);
1fa44eca
JB
1208 schedule_work(&ew->work);
1209
1210 return 1;
1211}
1212EXPORT_SYMBOL_GPL(execute_in_process_context);
1213
1da177e4
LT
1214int keventd_up(void)
1215{
1216 return keventd_wq != NULL;
1217}
1218
1219int current_is_keventd(void)
1220{
1221 struct cpu_workqueue_struct *cwq;
d243769d 1222 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1da177e4
LT
1223 int ret = 0;
1224
1225 BUG_ON(!keventd_wq);
1226
1537663f 1227 cwq = get_cwq(cpu, keventd_wq);
1da177e4
LT
1228 if (current == cwq->thread)
1229 ret = 1;
1230
1231 return ret;
1232
1233}
1234
0f900049
TH
1235static struct cpu_workqueue_struct *alloc_cwqs(void)
1236{
1237 /*
1238 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
1239 * Make sure that the alignment isn't lower than that of
1240 * unsigned long long.
1241 */
1242 const size_t size = sizeof(struct cpu_workqueue_struct);
1243 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
1244 __alignof__(unsigned long long));
1245 struct cpu_workqueue_struct *cwqs;
1246#ifndef CONFIG_SMP
1247 void *ptr;
1248
1249 /*
1250 * On UP, percpu allocator doesn't honor alignment parameter
1251 * and simply uses arch-dependent default. Allocate enough
1252 * room to align cwq and put an extra pointer at the end
1253 * pointing back to the originally allocated pointer which
1254 * will be used for free.
1255 *
1256 * FIXME: This really belongs to UP percpu code. Update UP
1257 * percpu code to honor alignment and remove this ugliness.
1258 */
1259 ptr = __alloc_percpu(size + align + sizeof(void *), 1);
1260 cwqs = PTR_ALIGN(ptr, align);
1261 *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
1262#else
1263 /* On SMP, percpu allocator can do it itself */
1264 cwqs = __alloc_percpu(size, align);
1265#endif
1266 /* just in case, make sure it's actually aligned */
1267 BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
1268 return cwqs;
1269}
1270
1271static void free_cwqs(struct cpu_workqueue_struct *cwqs)
1272{
1273#ifndef CONFIG_SMP
1274 /* on UP, the pointer to free is stored right after the cwq */
1275 if (cwqs)
1276 free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
1277#else
1278 free_percpu(cwqs);
1279#endif
1280}
1281
3af24433
ON
1282static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
1283{
1284 struct workqueue_struct *wq = cwq->wq;
3af24433
ON
1285 struct task_struct *p;
1286
1537663f 1287 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
3af24433
ON
1288 /*
1289 * Nobody can add the work_struct to this cwq,
1290 * if (caller is __create_workqueue)
1291 * nobody should see this wq
1292 * else // caller is CPU_UP_PREPARE
1293 * cpu is not on cpu_online_map
1294 * so we can abort safely.
1295 */
1296 if (IS_ERR(p))
1297 return PTR_ERR(p);
3af24433 1298 cwq->thread = p;
3af24433
ON
1299
1300 return 0;
1301}
1302
06ba38a9
ON
1303static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
1304{
1305 struct task_struct *p = cwq->thread;
1306
1307 if (p != NULL) {
1308 if (cpu >= 0)
1309 kthread_bind(p, cpu);
1310 wake_up_process(p);
1311 }
1312}
1313
4e6045f1 1314struct workqueue_struct *__create_workqueue_key(const char *name,
97e37d7b 1315 unsigned int flags,
eb13ba87
JB
1316 struct lock_class_key *key,
1317 const char *lock_name)
1da177e4 1318{
1537663f 1319 bool singlethread = flags & WQ_SINGLE_THREAD;
1da177e4 1320 struct workqueue_struct *wq;
3af24433 1321 int err = 0, cpu;
1da177e4 1322
3af24433
ON
1323 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1324 if (!wq)
4690c4ab 1325 goto err;
3af24433 1326
0f900049 1327 wq->cpu_wq = alloc_cwqs();
4690c4ab
TH
1328 if (!wq->cpu_wq)
1329 goto err;
3af24433 1330
97e37d7b 1331 wq->flags = flags;
73f53c4a
TH
1332 mutex_init(&wq->flush_mutex);
1333 atomic_set(&wq->nr_cwqs_to_flush, 0);
1334 INIT_LIST_HEAD(&wq->flusher_queue);
1335 INIT_LIST_HEAD(&wq->flusher_overflow);
3af24433 1336 wq->name = name;
eb13ba87 1337 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
cce1a165 1338 INIT_LIST_HEAD(&wq->list);
3af24433 1339
1537663f
TH
1340 cpu_maps_update_begin();
1341 /*
1342 * We must initialize cwqs for each possible cpu even if we
1343 * are going to call destroy_workqueue() finally. Otherwise
1344 * cpu_up() can hit the uninitialized cwq once we drop the
1345 * lock.
1346 */
1347 for_each_possible_cpu(cpu) {
1348 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1349
0f900049 1350 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
1537663f
TH
1351 cwq->wq = wq;
1352 cwq->cpu = cpu;
73f53c4a 1353 cwq->flush_color = -1;
1537663f
TH
1354 spin_lock_init(&cwq->lock);
1355 INIT_LIST_HEAD(&cwq->worklist);
1356 init_waitqueue_head(&cwq->more_work);
1357
1358 if (err)
1359 continue;
1360 err = create_workqueue_thread(cwq, cpu);
1361 if (cpu_online(cpu) && !singlethread)
06ba38a9 1362 start_workqueue_thread(cwq, cpu);
1537663f
TH
1363 else
1364 start_workqueue_thread(cwq, -1);
3af24433
ON
1365 }
1366
1537663f
TH
1367 spin_lock(&workqueue_lock);
1368 list_add(&wq->list, &workqueues);
1369 spin_unlock(&workqueue_lock);
1370
1371 cpu_maps_update_done();
1372
3af24433
ON
1373 if (err) {
1374 destroy_workqueue(wq);
1375 wq = NULL;
1376 }
1377 return wq;
4690c4ab
TH
1378err:
1379 if (wq) {
0f900049 1380 free_cwqs(wq->cpu_wq);
4690c4ab
TH
1381 kfree(wq);
1382 }
1383 return NULL;
3af24433 1384}
4e6045f1 1385EXPORT_SYMBOL_GPL(__create_workqueue_key);
1da177e4 1386
3af24433
ON
1387/**
1388 * destroy_workqueue - safely terminate a workqueue
1389 * @wq: target workqueue
1390 *
1391 * Safely destroy a workqueue. All work currently pending will be done first.
1392 */
1393void destroy_workqueue(struct workqueue_struct *wq)
1394{
b1f4ec17 1395 int cpu;
3af24433 1396
3da1c84c 1397 cpu_maps_update_begin();
95402b38 1398 spin_lock(&workqueue_lock);
b1f4ec17 1399 list_del(&wq->list);
95402b38 1400 spin_unlock(&workqueue_lock);
1537663f 1401 cpu_maps_update_done();
3af24433 1402
73f53c4a
TH
1403 flush_workqueue(wq);
1404
1405 for_each_possible_cpu(cpu) {
1406 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1407 int i;
1408
1409 if (cwq->thread) {
1410 kthread_stop(cwq->thread);
1411 cwq->thread = NULL;
1412 }
1413
1414 for (i = 0; i < WORK_NR_COLORS; i++)
1415 BUG_ON(cwq->nr_in_flight[i]);
1416 }
9b41ea72 1417
0f900049 1418 free_cwqs(wq->cpu_wq);
3af24433
ON
1419 kfree(wq);
1420}
1421EXPORT_SYMBOL_GPL(destroy_workqueue);
1422
1423static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1424 unsigned long action,
1425 void *hcpu)
1426{
1427 unsigned int cpu = (unsigned long)hcpu;
1428 struct cpu_workqueue_struct *cwq;
1429 struct workqueue_struct *wq;
1430
8bb78442
RW
1431 action &= ~CPU_TASKS_FROZEN;
1432
3af24433 1433 list_for_each_entry(wq, &workqueues, list) {
1537663f
TH
1434 if (wq->flags & WQ_SINGLE_THREAD)
1435 continue;
3af24433 1436
1537663f 1437 cwq = get_cwq(cpu, wq);
3af24433 1438
1537663f 1439 switch (action) {
3da1c84c 1440 case CPU_POST_DEAD:
73f53c4a 1441 flush_workqueue(wq);
3af24433
ON
1442 break;
1443 }
1da177e4
LT
1444 }
1445
1537663f 1446 return notifier_from_errno(0);
1da177e4 1447}
1da177e4 1448
2d3854a3 1449#ifdef CONFIG_SMP
8ccad40d 1450
2d3854a3 1451struct work_for_cpu {
6b44003e 1452 struct completion completion;
2d3854a3
RR
1453 long (*fn)(void *);
1454 void *arg;
1455 long ret;
1456};
1457
6b44003e 1458static int do_work_for_cpu(void *_wfc)
2d3854a3 1459{
6b44003e 1460 struct work_for_cpu *wfc = _wfc;
2d3854a3 1461 wfc->ret = wfc->fn(wfc->arg);
6b44003e
AM
1462 complete(&wfc->completion);
1463 return 0;
2d3854a3
RR
1464}
1465
1466/**
1467 * work_on_cpu - run a function in user context on a particular cpu
1468 * @cpu: the cpu to run on
1469 * @fn: the function to run
1470 * @arg: the function arg
1471 *
31ad9081
RR
1472 * This will return the value @fn returns.
1473 * It is up to the caller to ensure that the cpu doesn't go offline.
6b44003e 1474 * The caller must not hold any locks which would prevent @fn from completing.
2d3854a3
RR
1475 */
1476long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1477{
6b44003e
AM
1478 struct task_struct *sub_thread;
1479 struct work_for_cpu wfc = {
1480 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
1481 .fn = fn,
1482 .arg = arg,
1483 };
1484
1485 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1486 if (IS_ERR(sub_thread))
1487 return PTR_ERR(sub_thread);
1488 kthread_bind(sub_thread, cpu);
1489 wake_up_process(sub_thread);
1490 wait_for_completion(&wfc.completion);
2d3854a3
RR
1491 return wfc.ret;
1492}
1493EXPORT_SYMBOL_GPL(work_on_cpu);
1494#endif /* CONFIG_SMP */
1495
c12920d1 1496void __init init_workqueues(void)
1da177e4 1497{
e7577c50 1498 singlethread_cpu = cpumask_first(cpu_possible_mask);
1da177e4
LT
1499 hotcpu_notifier(workqueue_cpu_callback, 0);
1500 keventd_wq = create_workqueue("events");
1501 BUG_ON(!keventd_wq);
1502}