update Documentation/filesystems/vfs.txt
[linux-2.6-block.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679
CL
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
46934023 31#include <linux/mempolicy.h>
341a5958 32#include <linux/freezer.h>
d5abe669
PZ
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
1da177e4
LT
35
36/*
f756d5e2
NL
37 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
1da177e4
LT
39 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
1da177e4
LT
44 struct list_head worklist;
45 wait_queue_head_t more_work;
3af24433 46 struct work_struct *current_work;
1da177e4
LT
47
48 struct workqueue_struct *wq;
36c8b586 49 struct task_struct *thread;
1da177e4
LT
50
51 int run_depth; /* Detect run_workqueue() recursion depth */
52} ____cacheline_aligned;
53
54/*
55 * The externally visible workqueue abstraction is an array of
56 * per-CPU workqueues:
57 */
58struct workqueue_struct {
89ada679 59 struct cpu_workqueue_struct *cpu_wq;
cce1a165 60 struct list_head list;
1da177e4 61 const char *name;
cce1a165 62 int singlethread;
319c2a98 63 int freezeable; /* Freeze threads during suspend */
1da177e4
LT
64};
65
66/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
67 threads to each one as cpus come/go. */
9b41ea72 68static DEFINE_MUTEX(workqueue_mutex);
1da177e4
LT
69static LIST_HEAD(workqueues);
70
3af24433 71static int singlethread_cpu __read_mostly;
b1f4ec17 72static cpumask_t cpu_singlethread_map __read_mostly;
14441960
ON
73/*
74 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
75 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
76 * which comes in between can't use for_each_online_cpu(). We could
77 * use cpu_possible_map, the cpumask below is more a documentation
78 * than optimization.
79 */
3af24433 80static cpumask_t cpu_populated_map __read_mostly;
f756d5e2 81
1da177e4
LT
82/* If it's single threaded, it isn't in the list of workqueues. */
83static inline int is_single_threaded(struct workqueue_struct *wq)
84{
cce1a165 85 return wq->singlethread;
1da177e4
LT
86}
87
b1f4ec17
ON
88static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
89{
90 return is_single_threaded(wq)
91 ? &cpu_singlethread_map : &cpu_populated_map;
92}
93
a848e3b6
ON
94static
95struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
96{
97 if (unlikely(is_single_threaded(wq)))
98 cpu = singlethread_cpu;
99 return per_cpu_ptr(wq->cpu_wq, cpu);
100}
101
4594bf15
DH
102/*
103 * Set the workqueue on which a work item is to be run
104 * - Must *only* be called if the pending flag is set
105 */
ed7c0fee
ON
106static inline void set_wq_data(struct work_struct *work,
107 struct cpu_workqueue_struct *cwq)
365970a1 108{
4594bf15
DH
109 unsigned long new;
110
111 BUG_ON(!work_pending(work));
365970a1 112
ed7c0fee 113 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
a08727ba
LT
114 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
115 atomic_long_set(&work->data, new);
365970a1
DH
116}
117
ed7c0fee
ON
118static inline
119struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
365970a1 120{
a08727ba 121 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
365970a1
DH
122}
123
b89deed3
ON
124static void insert_work(struct cpu_workqueue_struct *cwq,
125 struct work_struct *work, int tail)
126{
127 set_wq_data(work, cwq);
6e84d644
ON
128 /*
129 * Ensure that we get the right work->data if we see the
130 * result of list_add() below, see try_to_grab_pending().
131 */
132 smp_wmb();
b89deed3
ON
133 if (tail)
134 list_add_tail(&work->entry, &cwq->worklist);
135 else
136 list_add(&work->entry, &cwq->worklist);
137 wake_up(&cwq->more_work);
138}
139
1da177e4
LT
140/* Preempt must be disabled. */
141static void __queue_work(struct cpu_workqueue_struct *cwq,
142 struct work_struct *work)
143{
144 unsigned long flags;
145
146 spin_lock_irqsave(&cwq->lock, flags);
b89deed3 147 insert_work(cwq, work, 1);
1da177e4
LT
148 spin_unlock_irqrestore(&cwq->lock, flags);
149}
150
0fcb78c2
REB
151/**
152 * queue_work - queue work on a workqueue
153 * @wq: workqueue to use
154 * @work: work to queue
155 *
057647fc 156 * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4
LT
157 *
158 * We queue the work to the CPU it was submitted, but there is no
159 * guarantee that it will be processed by that CPU.
160 */
161int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
162{
a848e3b6 163 int ret = 0;
1da177e4 164
a08727ba 165 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1da177e4 166 BUG_ON(!list_empty(&work->entry));
a848e3b6
ON
167 __queue_work(wq_per_cpu(wq, get_cpu()), work);
168 put_cpu();
1da177e4
LT
169 ret = 1;
170 }
1da177e4
LT
171 return ret;
172}
ae90dd5d 173EXPORT_SYMBOL_GPL(queue_work);
1da177e4 174
82f67cd9 175void delayed_work_timer_fn(unsigned long __data)
1da177e4 176{
52bad64d 177 struct delayed_work *dwork = (struct delayed_work *)__data;
ed7c0fee
ON
178 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
179 struct workqueue_struct *wq = cwq->wq;
1da177e4 180
a848e3b6 181 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
1da177e4
LT
182}
183
0fcb78c2
REB
184/**
185 * queue_delayed_work - queue work on a workqueue after delay
186 * @wq: workqueue to use
af9997e4 187 * @dwork: delayable work to queue
0fcb78c2
REB
188 * @delay: number of jiffies to wait before queueing
189 *
057647fc 190 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 191 */
1da177e4 192int fastcall queue_delayed_work(struct workqueue_struct *wq,
52bad64d 193 struct delayed_work *dwork, unsigned long delay)
1da177e4 194{
63bc0362 195 timer_stats_timer_set_start_info(&dwork->timer);
52bad64d 196 if (delay == 0)
63bc0362 197 return queue_work(wq, &dwork->work);
1da177e4 198
63bc0362 199 return queue_delayed_work_on(-1, wq, dwork, delay);
1da177e4 200}
ae90dd5d 201EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 202
0fcb78c2
REB
203/**
204 * queue_delayed_work_on - queue work on specific CPU after delay
205 * @cpu: CPU number to execute work on
206 * @wq: workqueue to use
af9997e4 207 * @dwork: work to queue
0fcb78c2
REB
208 * @delay: number of jiffies to wait before queueing
209 *
057647fc 210 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 211 */
7a6bc1cd 212int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d 213 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd
VP
214{
215 int ret = 0;
52bad64d
DH
216 struct timer_list *timer = &dwork->timer;
217 struct work_struct *work = &dwork->work;
7a6bc1cd 218
a08727ba 219 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
7a6bc1cd
VP
220 BUG_ON(timer_pending(timer));
221 BUG_ON(!list_empty(&work->entry));
222
ed7c0fee 223 /* This stores cwq for the moment, for the timer_fn */
a848e3b6 224 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
7a6bc1cd 225 timer->expires = jiffies + delay;
52bad64d 226 timer->data = (unsigned long)dwork;
7a6bc1cd 227 timer->function = delayed_work_timer_fn;
63bc0362
ON
228
229 if (unlikely(cpu >= 0))
230 add_timer_on(timer, cpu);
231 else
232 add_timer(timer);
7a6bc1cd
VP
233 ret = 1;
234 }
235 return ret;
236}
ae90dd5d 237EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 238
858119e1 239static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 240{
f293ea92 241 spin_lock_irq(&cwq->lock);
1da177e4
LT
242 cwq->run_depth++;
243 if (cwq->run_depth > 3) {
244 /* morton gets to eat his hat */
245 printk("%s: recursion depth exceeded: %d\n",
246 __FUNCTION__, cwq->run_depth);
247 dump_stack();
248 }
249 while (!list_empty(&cwq->worklist)) {
250 struct work_struct *work = list_entry(cwq->worklist.next,
251 struct work_struct, entry);
6bb49e59 252 work_func_t f = work->func;
1da177e4 253
b89deed3 254 cwq->current_work = work;
1da177e4 255 list_del_init(cwq->worklist.next);
f293ea92 256 spin_unlock_irq(&cwq->lock);
1da177e4 257
365970a1 258 BUG_ON(get_wq_data(work) != cwq);
23b2e599 259 work_clear_pending(work);
65f27f38 260 f(work);
1da177e4 261
d5abe669
PZ
262 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
263 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
264 "%s/0x%08x/%d\n",
265 current->comm, preempt_count(),
266 current->pid);
267 printk(KERN_ERR " last function: ");
268 print_symbol("%s\n", (unsigned long)f);
269 debug_show_held_locks(current);
270 dump_stack();
271 }
272
f293ea92 273 spin_lock_irq(&cwq->lock);
b89deed3 274 cwq->current_work = NULL;
1da177e4
LT
275 }
276 cwq->run_depth--;
f293ea92 277 spin_unlock_irq(&cwq->lock);
1da177e4
LT
278}
279
280static int worker_thread(void *__cwq)
281{
282 struct cpu_workqueue_struct *cwq = __cwq;
3af24433 283 DEFINE_WAIT(wait);
1da177e4 284
319c2a98 285 if (!cwq->wq->freezeable)
341a5958 286 current->flags |= PF_NOFREEZE;
1da177e4
LT
287
288 set_user_nice(current, -5);
1da177e4 289
3af24433 290 for (;;) {
3af24433 291 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
14441960
ON
292 if (!freezing(current) &&
293 !kthread_should_stop() &&
294 list_empty(&cwq->worklist))
1da177e4 295 schedule();
3af24433
ON
296 finish_wait(&cwq->more_work, &wait);
297
85f4186a
ON
298 try_to_freeze();
299
14441960 300 if (kthread_should_stop())
3af24433 301 break;
1da177e4 302
3af24433 303 run_workqueue(cwq);
1da177e4 304 }
3af24433 305
1da177e4
LT
306 return 0;
307}
308
fc2e4d70
ON
309struct wq_barrier {
310 struct work_struct work;
311 struct completion done;
312};
313
314static void wq_barrier_func(struct work_struct *work)
315{
316 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
317 complete(&barr->done);
318}
319
83c22520
ON
320static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
321 struct wq_barrier *barr, int tail)
fc2e4d70
ON
322{
323 INIT_WORK(&barr->work, wq_barrier_func);
324 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
325
326 init_completion(&barr->done);
83c22520
ON
327
328 insert_work(cwq, &barr->work, tail);
fc2e4d70
ON
329}
330
14441960 331static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 332{
14441960
ON
333 int active;
334
1da177e4
LT
335 if (cwq->thread == current) {
336 /*
337 * Probably keventd trying to flush its own queue. So simply run
338 * it by hand rather than deadlocking.
339 */
340 run_workqueue(cwq);
14441960 341 active = 1;
1da177e4 342 } else {
fc2e4d70 343 struct wq_barrier barr;
1da177e4 344
14441960 345 active = 0;
83c22520
ON
346 spin_lock_irq(&cwq->lock);
347 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
348 insert_wq_barrier(cwq, &barr, 1);
349 active = 1;
350 }
351 spin_unlock_irq(&cwq->lock);
1da177e4 352
d721304d 353 if (active)
83c22520 354 wait_for_completion(&barr.done);
1da177e4 355 }
14441960
ON
356
357 return active;
1da177e4
LT
358}
359
0fcb78c2 360/**
1da177e4 361 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 362 * @wq: workqueue to flush
1da177e4
LT
363 *
364 * Forces execution of the workqueue and blocks until its completion.
365 * This is typically used in driver shutdown handlers.
366 *
fc2e4d70
ON
367 * We sleep until all works which were queued on entry have been handled,
368 * but we are not livelocked by new incoming ones.
1da177e4
LT
369 *
370 * This function used to run the workqueues itself. Now we just wait for the
371 * helper threads to do it.
372 */
373void fastcall flush_workqueue(struct workqueue_struct *wq)
374{
b1f4ec17 375 const cpumask_t *cpu_map = wq_cpu_map(wq);
cce1a165 376 int cpu;
1da177e4 377
b1f4ec17
ON
378 might_sleep();
379 for_each_cpu_mask(cpu, *cpu_map)
380 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
1da177e4 381}
ae90dd5d 382EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4 383
6e84d644
ON
384/*
385 * Upon a successful return, the caller "owns" WORK_STRUCT_PENDING bit,
386 * so this work can't be re-armed in any way.
387 */
388static int try_to_grab_pending(struct work_struct *work)
389{
390 struct cpu_workqueue_struct *cwq;
391 int ret = 0;
392
393 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
394 return 1;
395
396 /*
397 * The queueing is in progress, or it is already queued. Try to
398 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
399 */
400
401 cwq = get_wq_data(work);
402 if (!cwq)
403 return ret;
404
405 spin_lock_irq(&cwq->lock);
406 if (!list_empty(&work->entry)) {
407 /*
408 * This work is queued, but perhaps we locked the wrong cwq.
409 * In that case we must see the new value after rmb(), see
410 * insert_work()->wmb().
411 */
412 smp_rmb();
413 if (cwq == get_wq_data(work)) {
414 list_del_init(&work->entry);
415 ret = 1;
416 }
417 }
418 spin_unlock_irq(&cwq->lock);
419
420 return ret;
421}
422
423static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
b89deed3
ON
424 struct work_struct *work)
425{
426 struct wq_barrier barr;
427 int running = 0;
428
429 spin_lock_irq(&cwq->lock);
430 if (unlikely(cwq->current_work == work)) {
83c22520 431 insert_wq_barrier(cwq, &barr, 0);
b89deed3
ON
432 running = 1;
433 }
434 spin_unlock_irq(&cwq->lock);
435
3af24433 436 if (unlikely(running))
b89deed3 437 wait_for_completion(&barr.done);
b89deed3
ON
438}
439
6e84d644 440static void wait_on_work(struct work_struct *work)
b89deed3
ON
441{
442 struct cpu_workqueue_struct *cwq;
28e53bdd
ON
443 struct workqueue_struct *wq;
444 const cpumask_t *cpu_map;
b1f4ec17 445 int cpu;
b89deed3 446
f293ea92
ON
447 might_sleep();
448
b89deed3 449 cwq = get_wq_data(work);
b89deed3 450 if (!cwq)
3af24433 451 return;
b89deed3 452
28e53bdd
ON
453 wq = cwq->wq;
454 cpu_map = wq_cpu_map(wq);
455
b1f4ec17 456 for_each_cpu_mask(cpu, *cpu_map)
6e84d644
ON
457 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
458}
459
460/**
461 * cancel_work_sync - block until a work_struct's callback has terminated
462 * @work: the work which is to be flushed
463 *
464 * cancel_work_sync() will cancel the work if it is queued. If the work's
465 * callback appears to be running, cancel_work_sync() will block until it
466 * has completed.
467 *
468 * It is possible to use this function if the work re-queues itself. It can
469 * cancel the work even if it migrates to another workqueue, however in that
470 * case it only guarantees that work->func() has completed on the last queued
471 * workqueue.
472 *
473 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
474 * pending, otherwise it goes into a busy-wait loop until the timer expires.
475 *
476 * The caller must ensure that workqueue_struct on which this work was last
477 * queued can't be destroyed before this function returns.
478 */
479void cancel_work_sync(struct work_struct *work)
480{
481 while (!try_to_grab_pending(work))
482 cpu_relax();
483 wait_on_work(work);
484 work_clear_pending(work);
b89deed3 485}
28e53bdd 486EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed3 487
6e84d644
ON
488/**
489 * cancel_rearming_delayed_work - reliably kill off a delayed work.
490 * @dwork: the delayed work struct
491 *
492 * It is possible to use this function if @dwork rearms itself via queue_work()
493 * or queue_delayed_work(). See also the comment for cancel_work_sync().
494 */
495void cancel_rearming_delayed_work(struct delayed_work *dwork)
496{
497 while (!del_timer(&dwork->timer) &&
498 !try_to_grab_pending(&dwork->work))
499 cpu_relax();
500 wait_on_work(&dwork->work);
501 work_clear_pending(&dwork->work);
502}
503EXPORT_SYMBOL(cancel_rearming_delayed_work);
1da177e4 504
6e84d644 505static struct workqueue_struct *keventd_wq __read_mostly;
1da177e4 506
0fcb78c2
REB
507/**
508 * schedule_work - put work task in global workqueue
509 * @work: job to be done
510 *
511 * This puts a job in the kernel-global workqueue.
512 */
1da177e4
LT
513int fastcall schedule_work(struct work_struct *work)
514{
515 return queue_work(keventd_wq, work);
516}
ae90dd5d 517EXPORT_SYMBOL(schedule_work);
1da177e4 518
0fcb78c2
REB
519/**
520 * schedule_delayed_work - put work task in global workqueue after delay
52bad64d
DH
521 * @dwork: job to be done
522 * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c2
REB
523 *
524 * After waiting for a given time this puts a job in the kernel-global
525 * workqueue.
526 */
82f67cd9
IM
527int fastcall schedule_delayed_work(struct delayed_work *dwork,
528 unsigned long delay)
1da177e4 529{
82f67cd9 530 timer_stats_timer_set_start_info(&dwork->timer);
52bad64d 531 return queue_delayed_work(keventd_wq, dwork, delay);
1da177e4 532}
ae90dd5d 533EXPORT_SYMBOL(schedule_delayed_work);
1da177e4 534
0fcb78c2
REB
535/**
536 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
537 * @cpu: cpu to use
52bad64d 538 * @dwork: job to be done
0fcb78c2
REB
539 * @delay: number of jiffies to wait
540 *
541 * After waiting for a given time this puts a job in the kernel-global
542 * workqueue on the specified CPU.
543 */
1da177e4 544int schedule_delayed_work_on(int cpu,
52bad64d 545 struct delayed_work *dwork, unsigned long delay)
1da177e4 546{
52bad64d 547 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1da177e4 548}
ae90dd5d 549EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 550
b6136773
AM
551/**
552 * schedule_on_each_cpu - call a function on each online CPU from keventd
553 * @func: the function to call
b6136773
AM
554 *
555 * Returns zero on success.
556 * Returns -ve errno on failure.
557 *
558 * Appears to be racy against CPU hotplug.
559 *
560 * schedule_on_each_cpu() is very slow.
561 */
65f27f38 562int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
563{
564 int cpu;
b6136773 565 struct work_struct *works;
15316ba8 566
b6136773
AM
567 works = alloc_percpu(struct work_struct);
568 if (!works)
15316ba8 569 return -ENOMEM;
b6136773 570
e18f3ffb 571 preempt_disable(); /* CPU hotplug */
15316ba8 572 for_each_online_cpu(cpu) {
9bfb1839
IM
573 struct work_struct *work = per_cpu_ptr(works, cpu);
574
575 INIT_WORK(work, func);
576 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
577 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
15316ba8 578 }
e18f3ffb 579 preempt_enable();
15316ba8 580 flush_workqueue(keventd_wq);
b6136773 581 free_percpu(works);
15316ba8
CL
582 return 0;
583}
584
1da177e4
LT
585void flush_scheduled_work(void)
586{
587 flush_workqueue(keventd_wq);
588}
ae90dd5d 589EXPORT_SYMBOL(flush_scheduled_work);
1da177e4 590
1fa44eca
JB
591/**
592 * execute_in_process_context - reliably execute the routine with user context
593 * @fn: the function to execute
1fa44eca
JB
594 * @ew: guaranteed storage for the execute work structure (must
595 * be available when the work executes)
596 *
597 * Executes the function immediately if process context is available,
598 * otherwise schedules the function for delayed execution.
599 *
600 * Returns: 0 - function was executed
601 * 1 - function was scheduled for execution
602 */
65f27f38 603int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
604{
605 if (!in_interrupt()) {
65f27f38 606 fn(&ew->work);
1fa44eca
JB
607 return 0;
608 }
609
65f27f38 610 INIT_WORK(&ew->work, fn);
1fa44eca
JB
611 schedule_work(&ew->work);
612
613 return 1;
614}
615EXPORT_SYMBOL_GPL(execute_in_process_context);
616
1da177e4
LT
617int keventd_up(void)
618{
619 return keventd_wq != NULL;
620}
621
622int current_is_keventd(void)
623{
624 struct cpu_workqueue_struct *cwq;
625 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
626 int ret = 0;
627
628 BUG_ON(!keventd_wq);
629
89ada679 630 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
1da177e4
LT
631 if (current == cwq->thread)
632 ret = 1;
633
634 return ret;
635
636}
637
3af24433
ON
638static struct cpu_workqueue_struct *
639init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
1da177e4 640{
89ada679 641 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4 642
3af24433
ON
643 cwq->wq = wq;
644 spin_lock_init(&cwq->lock);
645 INIT_LIST_HEAD(&cwq->worklist);
646 init_waitqueue_head(&cwq->more_work);
647
648 return cwq;
1da177e4
LT
649}
650
3af24433
ON
651static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
652{
653 struct workqueue_struct *wq = cwq->wq;
654 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
655 struct task_struct *p;
656
657 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
658 /*
659 * Nobody can add the work_struct to this cwq,
660 * if (caller is __create_workqueue)
661 * nobody should see this wq
662 * else // caller is CPU_UP_PREPARE
663 * cpu is not on cpu_online_map
664 * so we can abort safely.
665 */
666 if (IS_ERR(p))
667 return PTR_ERR(p);
668
669 cwq->thread = p;
3af24433
ON
670
671 return 0;
672}
673
06ba38a9
ON
674static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
675{
676 struct task_struct *p = cwq->thread;
677
678 if (p != NULL) {
679 if (cpu >= 0)
680 kthread_bind(p, cpu);
681 wake_up_process(p);
682 }
683}
684
3af24433
ON
685struct workqueue_struct *__create_workqueue(const char *name,
686 int singlethread, int freezeable)
1da177e4 687{
1da177e4 688 struct workqueue_struct *wq;
3af24433
ON
689 struct cpu_workqueue_struct *cwq;
690 int err = 0, cpu;
1da177e4 691
3af24433
ON
692 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
693 if (!wq)
694 return NULL;
695
696 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
697 if (!wq->cpu_wq) {
698 kfree(wq);
699 return NULL;
700 }
701
702 wq->name = name;
cce1a165 703 wq->singlethread = singlethread;
3af24433 704 wq->freezeable = freezeable;
cce1a165 705 INIT_LIST_HEAD(&wq->list);
3af24433
ON
706
707 if (singlethread) {
3af24433
ON
708 cwq = init_cpu_workqueue(wq, singlethread_cpu);
709 err = create_workqueue_thread(cwq, singlethread_cpu);
06ba38a9 710 start_workqueue_thread(cwq, -1);
3af24433 711 } else {
9b41ea72 712 mutex_lock(&workqueue_mutex);
3af24433
ON
713 list_add(&wq->list, &workqueues);
714
715 for_each_possible_cpu(cpu) {
716 cwq = init_cpu_workqueue(wq, cpu);
717 if (err || !cpu_online(cpu))
718 continue;
719 err = create_workqueue_thread(cwq, cpu);
06ba38a9 720 start_workqueue_thread(cwq, cpu);
1da177e4 721 }
3af24433
ON
722 mutex_unlock(&workqueue_mutex);
723 }
724
725 if (err) {
726 destroy_workqueue(wq);
727 wq = NULL;
728 }
729 return wq;
730}
731EXPORT_SYMBOL_GPL(__create_workqueue);
1da177e4 732
3af24433
ON
733static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
734{
14441960
ON
735 /*
736 * Our caller is either destroy_workqueue() or CPU_DEAD,
737 * workqueue_mutex protects cwq->thread
738 */
739 if (cwq->thread == NULL)
740 return;
3af24433 741
14441960
ON
742 /*
743 * If the caller is CPU_DEAD the single flush_cpu_workqueue()
744 * is not enough, a concurrent flush_workqueue() can insert a
745 * barrier after us.
746 * When ->worklist becomes empty it is safe to exit because no
747 * more work_structs can be queued on this cwq: flush_workqueue
748 * checks list_empty(), and a "normal" queue_work() can't use
749 * a dead CPU.
750 */
751 while (flush_cpu_workqueue(cwq))
752 ;
3af24433 753
14441960
ON
754 kthread_stop(cwq->thread);
755 cwq->thread = NULL;
3af24433
ON
756}
757
758/**
759 * destroy_workqueue - safely terminate a workqueue
760 * @wq: target workqueue
761 *
762 * Safely destroy a workqueue. All work currently pending will be done first.
763 */
764void destroy_workqueue(struct workqueue_struct *wq)
765{
b1f4ec17 766 const cpumask_t *cpu_map = wq_cpu_map(wq);
3af24433 767 struct cpu_workqueue_struct *cwq;
b1f4ec17 768 int cpu;
3af24433 769
b1f4ec17
ON
770 mutex_lock(&workqueue_mutex);
771 list_del(&wq->list);
772 mutex_unlock(&workqueue_mutex);
3af24433 773
b1f4ec17
ON
774 for_each_cpu_mask(cpu, *cpu_map) {
775 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
776 cleanup_workqueue_thread(cwq, cpu);
3af24433 777 }
9b41ea72 778
3af24433
ON
779 free_percpu(wq->cpu_wq);
780 kfree(wq);
781}
782EXPORT_SYMBOL_GPL(destroy_workqueue);
783
784static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
785 unsigned long action,
786 void *hcpu)
787{
788 unsigned int cpu = (unsigned long)hcpu;
789 struct cpu_workqueue_struct *cwq;
790 struct workqueue_struct *wq;
791
8bb78442
RW
792 action &= ~CPU_TASKS_FROZEN;
793
3af24433
ON
794 switch (action) {
795 case CPU_LOCK_ACQUIRE:
9b41ea72 796 mutex_lock(&workqueue_mutex);
3af24433 797 return NOTIFY_OK;
9b41ea72 798
3af24433 799 case CPU_LOCK_RELEASE:
9b41ea72 800 mutex_unlock(&workqueue_mutex);
3af24433 801 return NOTIFY_OK;
1da177e4 802
3af24433
ON
803 case CPU_UP_PREPARE:
804 cpu_set(cpu, cpu_populated_map);
805 }
806
807 list_for_each_entry(wq, &workqueues, list) {
808 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
809
810 switch (action) {
811 case CPU_UP_PREPARE:
812 if (!create_workqueue_thread(cwq, cpu))
813 break;
814 printk(KERN_ERR "workqueue for %i failed\n", cpu);
815 return NOTIFY_BAD;
816
817 case CPU_ONLINE:
06ba38a9 818 start_workqueue_thread(cwq, cpu);
3af24433
ON
819 break;
820
821 case CPU_UP_CANCELED:
06ba38a9 822 start_workqueue_thread(cwq, -1);
3af24433
ON
823 case CPU_DEAD:
824 cleanup_workqueue_thread(cwq, cpu);
825 break;
826 }
1da177e4
LT
827 }
828
829 return NOTIFY_OK;
830}
1da177e4 831
c12920d1 832void __init init_workqueues(void)
1da177e4 833{
3af24433 834 cpu_populated_map = cpu_online_map;
f756d5e2 835 singlethread_cpu = first_cpu(cpu_possible_map);
b1f4ec17 836 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
1da177e4
LT
837 hotcpu_notifier(workqueue_cpu_callback, 0);
838 keventd_wq = create_workqueue("events");
839 BUG_ON(!keventd_wq);
840}