make queue_delayed_work() friendly to flush_fork()
[linux-2.6-block.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679
CL
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
46934023 31#include <linux/mempolicy.h>
341a5958 32#include <linux/freezer.h>
d5abe669
PZ
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
1da177e4
LT
35
36/*
f756d5e2
NL
37 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
1da177e4
LT
39 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
1da177e4
LT
44 struct list_head worklist;
45 wait_queue_head_t more_work;
3af24433 46 struct work_struct *current_work;
1da177e4
LT
47
48 struct workqueue_struct *wq;
36c8b586 49 struct task_struct *thread;
3af24433 50 int should_stop;
1da177e4
LT
51
52 int run_depth; /* Detect run_workqueue() recursion depth */
53} ____cacheline_aligned;
54
55/*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59struct workqueue_struct {
89ada679 60 struct cpu_workqueue_struct *cpu_wq;
cce1a165 61 struct list_head list;
1da177e4 62 const char *name;
cce1a165 63 int singlethread;
319c2a98 64 int freezeable; /* Freeze threads during suspend */
1da177e4
LT
65};
66
67/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
68 threads to each one as cpus come/go. */
9b41ea72 69static DEFINE_MUTEX(workqueue_mutex);
1da177e4
LT
70static LIST_HEAD(workqueues);
71
3af24433 72static int singlethread_cpu __read_mostly;
b1f4ec17 73static cpumask_t cpu_singlethread_map __read_mostly;
3af24433
ON
74/* optimization, we could use cpu_possible_map */
75static cpumask_t cpu_populated_map __read_mostly;
f756d5e2 76
1da177e4
LT
77/* If it's single threaded, it isn't in the list of workqueues. */
78static inline int is_single_threaded(struct workqueue_struct *wq)
79{
cce1a165 80 return wq->singlethread;
1da177e4
LT
81}
82
b1f4ec17
ON
83static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
84{
85 return is_single_threaded(wq)
86 ? &cpu_singlethread_map : &cpu_populated_map;
87}
88
4594bf15
DH
89/*
90 * Set the workqueue on which a work item is to be run
91 * - Must *only* be called if the pending flag is set
92 */
ed7c0fee
ON
93static inline void set_wq_data(struct work_struct *work,
94 struct cpu_workqueue_struct *cwq)
365970a1 95{
4594bf15
DH
96 unsigned long new;
97
98 BUG_ON(!work_pending(work));
365970a1 99
ed7c0fee 100 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
a08727ba
LT
101 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
102 atomic_long_set(&work->data, new);
365970a1
DH
103}
104
ed7c0fee
ON
105static inline
106struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
365970a1 107{
a08727ba 108 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
365970a1
DH
109}
110
b89deed3
ON
111static void insert_work(struct cpu_workqueue_struct *cwq,
112 struct work_struct *work, int tail)
113{
114 set_wq_data(work, cwq);
115 if (tail)
116 list_add_tail(&work->entry, &cwq->worklist);
117 else
118 list_add(&work->entry, &cwq->worklist);
119 wake_up(&cwq->more_work);
120}
121
1da177e4
LT
122/* Preempt must be disabled. */
123static void __queue_work(struct cpu_workqueue_struct *cwq,
124 struct work_struct *work)
125{
126 unsigned long flags;
127
128 spin_lock_irqsave(&cwq->lock, flags);
b89deed3 129 insert_work(cwq, work, 1);
1da177e4
LT
130 spin_unlock_irqrestore(&cwq->lock, flags);
131}
132
0fcb78c2
REB
133/**
134 * queue_work - queue work on a workqueue
135 * @wq: workqueue to use
136 * @work: work to queue
137 *
057647fc 138 * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4
LT
139 *
140 * We queue the work to the CPU it was submitted, but there is no
141 * guarantee that it will be processed by that CPU.
142 */
143int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
144{
145 int ret = 0, cpu = get_cpu();
146
a08727ba 147 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1da177e4 148 if (unlikely(is_single_threaded(wq)))
f756d5e2 149 cpu = singlethread_cpu;
1da177e4 150 BUG_ON(!list_empty(&work->entry));
89ada679 151 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
1da177e4
LT
152 ret = 1;
153 }
154 put_cpu();
155 return ret;
156}
ae90dd5d 157EXPORT_SYMBOL_GPL(queue_work);
1da177e4 158
82f67cd9 159void delayed_work_timer_fn(unsigned long __data)
1da177e4 160{
52bad64d 161 struct delayed_work *dwork = (struct delayed_work *)__data;
ed7c0fee
ON
162 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
163 struct workqueue_struct *wq = cwq->wq;
1da177e4
LT
164 int cpu = smp_processor_id();
165
166 if (unlikely(is_single_threaded(wq)))
f756d5e2 167 cpu = singlethread_cpu;
1da177e4 168
52bad64d 169 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
1da177e4
LT
170}
171
0fcb78c2
REB
172/**
173 * queue_delayed_work - queue work on a workqueue after delay
174 * @wq: workqueue to use
af9997e4 175 * @dwork: delayable work to queue
0fcb78c2
REB
176 * @delay: number of jiffies to wait before queueing
177 *
057647fc 178 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 179 */
1da177e4 180int fastcall queue_delayed_work(struct workqueue_struct *wq,
52bad64d 181 struct delayed_work *dwork, unsigned long delay)
1da177e4
LT
182{
183 int ret = 0;
52bad64d
DH
184 struct timer_list *timer = &dwork->timer;
185 struct work_struct *work = &dwork->work;
186
82f67cd9 187 timer_stats_timer_set_start_info(timer);
52bad64d
DH
188 if (delay == 0)
189 return queue_work(wq, work);
1da177e4 190
a08727ba 191 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1da177e4
LT
192 BUG_ON(timer_pending(timer));
193 BUG_ON(!list_empty(&work->entry));
194
ed7c0fee
ON
195 /* This stores cwq for the moment, for the timer_fn */
196 set_wq_data(work,
197 per_cpu_ptr(wq->cpu_wq, raw_smp_processor_id()));
1da177e4 198 timer->expires = jiffies + delay;
52bad64d 199 timer->data = (unsigned long)dwork;
1da177e4
LT
200 timer->function = delayed_work_timer_fn;
201 add_timer(timer);
202 ret = 1;
203 }
204 return ret;
205}
ae90dd5d 206EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 207
0fcb78c2
REB
208/**
209 * queue_delayed_work_on - queue work on specific CPU after delay
210 * @cpu: CPU number to execute work on
211 * @wq: workqueue to use
af9997e4 212 * @dwork: work to queue
0fcb78c2
REB
213 * @delay: number of jiffies to wait before queueing
214 *
057647fc 215 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 216 */
7a6bc1cd 217int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d 218 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd
VP
219{
220 int ret = 0;
52bad64d
DH
221 struct timer_list *timer = &dwork->timer;
222 struct work_struct *work = &dwork->work;
7a6bc1cd 223
a08727ba 224 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
7a6bc1cd
VP
225 BUG_ON(timer_pending(timer));
226 BUG_ON(!list_empty(&work->entry));
227
ed7c0fee
ON
228 /* This stores cwq for the moment, for the timer_fn */
229 set_wq_data(work,
230 per_cpu_ptr(wq->cpu_wq, raw_smp_processor_id()));
7a6bc1cd 231 timer->expires = jiffies + delay;
52bad64d 232 timer->data = (unsigned long)dwork;
7a6bc1cd
VP
233 timer->function = delayed_work_timer_fn;
234 add_timer_on(timer, cpu);
235 ret = 1;
236 }
237 return ret;
238}
ae90dd5d 239EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 240
858119e1 241static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4 242{
f293ea92 243 spin_lock_irq(&cwq->lock);
1da177e4
LT
244 cwq->run_depth++;
245 if (cwq->run_depth > 3) {
246 /* morton gets to eat his hat */
247 printk("%s: recursion depth exceeded: %d\n",
248 __FUNCTION__, cwq->run_depth);
249 dump_stack();
250 }
251 while (!list_empty(&cwq->worklist)) {
252 struct work_struct *work = list_entry(cwq->worklist.next,
253 struct work_struct, entry);
6bb49e59 254 work_func_t f = work->func;
1da177e4 255
b89deed3 256 cwq->current_work = work;
1da177e4 257 list_del_init(cwq->worklist.next);
f293ea92 258 spin_unlock_irq(&cwq->lock);
1da177e4 259
365970a1 260 BUG_ON(get_wq_data(work) != cwq);
a08727ba 261 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
65f27f38
DH
262 work_release(work);
263 f(work);
1da177e4 264
d5abe669
PZ
265 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
266 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
267 "%s/0x%08x/%d\n",
268 current->comm, preempt_count(),
269 current->pid);
270 printk(KERN_ERR " last function: ");
271 print_symbol("%s\n", (unsigned long)f);
272 debug_show_held_locks(current);
273 dump_stack();
274 }
275
f293ea92 276 spin_lock_irq(&cwq->lock);
b89deed3 277 cwq->current_work = NULL;
1da177e4
LT
278 }
279 cwq->run_depth--;
f293ea92 280 spin_unlock_irq(&cwq->lock);
1da177e4
LT
281}
282
3af24433
ON
283/*
284 * NOTE: the caller must not touch *cwq if this func returns true
285 */
286static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
287{
288 int should_stop = cwq->should_stop;
289
290 if (unlikely(should_stop)) {
291 spin_lock_irq(&cwq->lock);
292 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
293 if (should_stop)
294 cwq->thread = NULL;
295 spin_unlock_irq(&cwq->lock);
296 }
297
298 return should_stop;
299}
300
1da177e4
LT
301static int worker_thread(void *__cwq)
302{
303 struct cpu_workqueue_struct *cwq = __cwq;
3af24433 304 DEFINE_WAIT(wait);
1da177e4
LT
305 struct k_sigaction sa;
306 sigset_t blocked;
307
319c2a98 308 if (!cwq->wq->freezeable)
341a5958 309 current->flags |= PF_NOFREEZE;
1da177e4
LT
310
311 set_user_nice(current, -5);
312
313 /* Block and flush all signals */
314 sigfillset(&blocked);
315 sigprocmask(SIG_BLOCK, &blocked, NULL);
316 flush_signals(current);
317
46934023
CL
318 /*
319 * We inherited MPOL_INTERLEAVE from the booting kernel.
320 * Set MPOL_DEFAULT to insure node local allocations.
321 */
322 numa_default_policy();
323
1da177e4
LT
324 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
325 sa.sa.sa_handler = SIG_IGN;
326 sa.sa.sa_flags = 0;
327 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
328 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
329
3af24433 330 for (;;) {
319c2a98 331 if (cwq->wq->freezeable)
341a5958
RW
332 try_to_freeze();
333
3af24433
ON
334 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
335 if (!cwq->should_stop && list_empty(&cwq->worklist))
1da177e4 336 schedule();
3af24433
ON
337 finish_wait(&cwq->more_work, &wait);
338
339 if (cwq_should_stop(cwq))
340 break;
1da177e4 341
3af24433 342 run_workqueue(cwq);
1da177e4 343 }
3af24433 344
1da177e4
LT
345 return 0;
346}
347
fc2e4d70
ON
348struct wq_barrier {
349 struct work_struct work;
350 struct completion done;
351};
352
353static void wq_barrier_func(struct work_struct *work)
354{
355 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
356 complete(&barr->done);
357}
358
83c22520
ON
359static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
360 struct wq_barrier *barr, int tail)
fc2e4d70
ON
361{
362 INIT_WORK(&barr->work, wq_barrier_func);
363 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
364
365 init_completion(&barr->done);
83c22520
ON
366
367 insert_work(cwq, &barr->work, tail);
fc2e4d70
ON
368}
369
1da177e4
LT
370static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
371{
372 if (cwq->thread == current) {
373 /*
374 * Probably keventd trying to flush its own queue. So simply run
375 * it by hand rather than deadlocking.
376 */
377 run_workqueue(cwq);
378 } else {
fc2e4d70 379 struct wq_barrier barr;
83c22520 380 int active = 0;
1da177e4 381
83c22520
ON
382 spin_lock_irq(&cwq->lock);
383 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
384 insert_wq_barrier(cwq, &barr, 1);
385 active = 1;
386 }
387 spin_unlock_irq(&cwq->lock);
1da177e4 388
d721304d 389 if (active)
83c22520 390 wait_for_completion(&barr.done);
1da177e4
LT
391 }
392}
393
0fcb78c2 394/**
1da177e4 395 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 396 * @wq: workqueue to flush
1da177e4
LT
397 *
398 * Forces execution of the workqueue and blocks until its completion.
399 * This is typically used in driver shutdown handlers.
400 *
fc2e4d70
ON
401 * We sleep until all works which were queued on entry have been handled,
402 * but we are not livelocked by new incoming ones.
1da177e4
LT
403 *
404 * This function used to run the workqueues itself. Now we just wait for the
405 * helper threads to do it.
406 */
407void fastcall flush_workqueue(struct workqueue_struct *wq)
408{
b1f4ec17 409 const cpumask_t *cpu_map = wq_cpu_map(wq);
cce1a165 410 int cpu;
1da177e4 411
b1f4ec17
ON
412 might_sleep();
413 for_each_cpu_mask(cpu, *cpu_map)
414 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
1da177e4 415}
ae90dd5d 416EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4 417
b89deed3
ON
418static void wait_on_work(struct cpu_workqueue_struct *cwq,
419 struct work_struct *work)
420{
421 struct wq_barrier barr;
422 int running = 0;
423
424 spin_lock_irq(&cwq->lock);
425 if (unlikely(cwq->current_work == work)) {
83c22520 426 insert_wq_barrier(cwq, &barr, 0);
b89deed3
ON
427 running = 1;
428 }
429 spin_unlock_irq(&cwq->lock);
430
3af24433 431 if (unlikely(running))
b89deed3 432 wait_for_completion(&barr.done);
b89deed3
ON
433}
434
435/**
436 * flush_work - block until a work_struct's callback has terminated
437 * @wq: the workqueue on which the work is queued
438 * @work: the work which is to be flushed
439 *
440 * flush_work() will attempt to cancel the work if it is queued. If the work's
441 * callback appears to be running, flush_work() will block until it has
442 * completed.
443 *
444 * flush_work() is designed to be used when the caller is tearing down data
445 * structures which the callback function operates upon. It is expected that,
446 * prior to calling flush_work(), the caller has arranged for the work to not
447 * be requeued.
448 */
449void flush_work(struct workqueue_struct *wq, struct work_struct *work)
450{
b1f4ec17 451 const cpumask_t *cpu_map = wq_cpu_map(wq);
b89deed3 452 struct cpu_workqueue_struct *cwq;
b1f4ec17 453 int cpu;
b89deed3 454
f293ea92
ON
455 might_sleep();
456
b89deed3
ON
457 cwq = get_wq_data(work);
458 /* Was it ever queued ? */
459 if (!cwq)
3af24433 460 return;
b89deed3
ON
461
462 /*
3af24433
ON
463 * This work can't be re-queued, no need to re-check that
464 * get_wq_data() is still the same when we take cwq->lock.
b89deed3
ON
465 */
466 spin_lock_irq(&cwq->lock);
467 list_del_init(&work->entry);
468 work_release(work);
469 spin_unlock_irq(&cwq->lock);
470
b1f4ec17
ON
471 for_each_cpu_mask(cpu, *cpu_map)
472 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
b89deed3
ON
473}
474EXPORT_SYMBOL_GPL(flush_work);
475
1da177e4
LT
476
477static struct workqueue_struct *keventd_wq;
478
0fcb78c2
REB
479/**
480 * schedule_work - put work task in global workqueue
481 * @work: job to be done
482 *
483 * This puts a job in the kernel-global workqueue.
484 */
1da177e4
LT
485int fastcall schedule_work(struct work_struct *work)
486{
487 return queue_work(keventd_wq, work);
488}
ae90dd5d 489EXPORT_SYMBOL(schedule_work);
1da177e4 490
0fcb78c2
REB
491/**
492 * schedule_delayed_work - put work task in global workqueue after delay
52bad64d
DH
493 * @dwork: job to be done
494 * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c2
REB
495 *
496 * After waiting for a given time this puts a job in the kernel-global
497 * workqueue.
498 */
82f67cd9
IM
499int fastcall schedule_delayed_work(struct delayed_work *dwork,
500 unsigned long delay)
1da177e4 501{
82f67cd9 502 timer_stats_timer_set_start_info(&dwork->timer);
52bad64d 503 return queue_delayed_work(keventd_wq, dwork, delay);
1da177e4 504}
ae90dd5d 505EXPORT_SYMBOL(schedule_delayed_work);
1da177e4 506
0fcb78c2
REB
507/**
508 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
509 * @cpu: cpu to use
52bad64d 510 * @dwork: job to be done
0fcb78c2
REB
511 * @delay: number of jiffies to wait
512 *
513 * After waiting for a given time this puts a job in the kernel-global
514 * workqueue on the specified CPU.
515 */
1da177e4 516int schedule_delayed_work_on(int cpu,
52bad64d 517 struct delayed_work *dwork, unsigned long delay)
1da177e4 518{
52bad64d 519 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1da177e4 520}
ae90dd5d 521EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 522
b6136773
AM
523/**
524 * schedule_on_each_cpu - call a function on each online CPU from keventd
525 * @func: the function to call
b6136773
AM
526 *
527 * Returns zero on success.
528 * Returns -ve errno on failure.
529 *
530 * Appears to be racy against CPU hotplug.
531 *
532 * schedule_on_each_cpu() is very slow.
533 */
65f27f38 534int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
535{
536 int cpu;
b6136773 537 struct work_struct *works;
15316ba8 538
b6136773
AM
539 works = alloc_percpu(struct work_struct);
540 if (!works)
15316ba8 541 return -ENOMEM;
b6136773 542
e18f3ffb 543 preempt_disable(); /* CPU hotplug */
15316ba8 544 for_each_online_cpu(cpu) {
9bfb1839
IM
545 struct work_struct *work = per_cpu_ptr(works, cpu);
546
547 INIT_WORK(work, func);
548 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
549 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
15316ba8 550 }
e18f3ffb 551 preempt_enable();
15316ba8 552 flush_workqueue(keventd_wq);
b6136773 553 free_percpu(works);
15316ba8
CL
554 return 0;
555}
556
1da177e4
LT
557void flush_scheduled_work(void)
558{
559 flush_workqueue(keventd_wq);
560}
ae90dd5d 561EXPORT_SYMBOL(flush_scheduled_work);
1da177e4 562
b89deed3
ON
563void flush_work_keventd(struct work_struct *work)
564{
565 flush_work(keventd_wq, work);
566}
567EXPORT_SYMBOL(flush_work_keventd);
568
1da177e4 569/**
ed7c0fee 570 * cancel_rearming_delayed_workqueue - kill off a delayed work whose handler rearms the delayed work.
1da177e4 571 * @wq: the controlling workqueue structure
52bad64d 572 * @dwork: the delayed work struct
ed7c0fee
ON
573 *
574 * Note that the work callback function may still be running on return from
575 * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it.
1da177e4 576 */
81ddef77 577void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
52bad64d 578 struct delayed_work *dwork)
1da177e4 579{
dfb4b82e
ON
580 /* Was it ever queued ? */
581 if (!get_wq_data(&dwork->work))
582 return;
583
52bad64d 584 while (!cancel_delayed_work(dwork))
1da177e4
LT
585 flush_workqueue(wq);
586}
81ddef77 587EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
1da177e4
LT
588
589/**
ed7c0fee 590 * cancel_rearming_delayed_work - kill off a delayed keventd work whose handler rearms the delayed work.
52bad64d 591 * @dwork: the delayed work struct
1da177e4 592 */
52bad64d 593void cancel_rearming_delayed_work(struct delayed_work *dwork)
1da177e4 594{
52bad64d 595 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
1da177e4
LT
596}
597EXPORT_SYMBOL(cancel_rearming_delayed_work);
598
1fa44eca
JB
599/**
600 * execute_in_process_context - reliably execute the routine with user context
601 * @fn: the function to execute
1fa44eca
JB
602 * @ew: guaranteed storage for the execute work structure (must
603 * be available when the work executes)
604 *
605 * Executes the function immediately if process context is available,
606 * otherwise schedules the function for delayed execution.
607 *
608 * Returns: 0 - function was executed
609 * 1 - function was scheduled for execution
610 */
65f27f38 611int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
612{
613 if (!in_interrupt()) {
65f27f38 614 fn(&ew->work);
1fa44eca
JB
615 return 0;
616 }
617
65f27f38 618 INIT_WORK(&ew->work, fn);
1fa44eca
JB
619 schedule_work(&ew->work);
620
621 return 1;
622}
623EXPORT_SYMBOL_GPL(execute_in_process_context);
624
1da177e4
LT
625int keventd_up(void)
626{
627 return keventd_wq != NULL;
628}
629
630int current_is_keventd(void)
631{
632 struct cpu_workqueue_struct *cwq;
633 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
634 int ret = 0;
635
636 BUG_ON(!keventd_wq);
637
89ada679 638 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
1da177e4
LT
639 if (current == cwq->thread)
640 ret = 1;
641
642 return ret;
643
644}
645
3af24433
ON
646static struct cpu_workqueue_struct *
647init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
1da177e4 648{
89ada679 649 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4 650
3af24433
ON
651 cwq->wq = wq;
652 spin_lock_init(&cwq->lock);
653 INIT_LIST_HEAD(&cwq->worklist);
654 init_waitqueue_head(&cwq->more_work);
655
656 return cwq;
1da177e4
LT
657}
658
3af24433
ON
659static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
660{
661 struct workqueue_struct *wq = cwq->wq;
662 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
663 struct task_struct *p;
664
665 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
666 /*
667 * Nobody can add the work_struct to this cwq,
668 * if (caller is __create_workqueue)
669 * nobody should see this wq
670 * else // caller is CPU_UP_PREPARE
671 * cpu is not on cpu_online_map
672 * so we can abort safely.
673 */
674 if (IS_ERR(p))
675 return PTR_ERR(p);
676
677 cwq->thread = p;
678 cwq->should_stop = 0;
3af24433
ON
679
680 return 0;
681}
682
06ba38a9
ON
683static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
684{
685 struct task_struct *p = cwq->thread;
686
687 if (p != NULL) {
688 if (cpu >= 0)
689 kthread_bind(p, cpu);
690 wake_up_process(p);
691 }
692}
693
3af24433
ON
694struct workqueue_struct *__create_workqueue(const char *name,
695 int singlethread, int freezeable)
1da177e4 696{
1da177e4 697 struct workqueue_struct *wq;
3af24433
ON
698 struct cpu_workqueue_struct *cwq;
699 int err = 0, cpu;
1da177e4 700
3af24433
ON
701 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
702 if (!wq)
703 return NULL;
704
705 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
706 if (!wq->cpu_wq) {
707 kfree(wq);
708 return NULL;
709 }
710
711 wq->name = name;
cce1a165 712 wq->singlethread = singlethread;
3af24433 713 wq->freezeable = freezeable;
cce1a165 714 INIT_LIST_HEAD(&wq->list);
3af24433
ON
715
716 if (singlethread) {
3af24433
ON
717 cwq = init_cpu_workqueue(wq, singlethread_cpu);
718 err = create_workqueue_thread(cwq, singlethread_cpu);
06ba38a9 719 start_workqueue_thread(cwq, -1);
3af24433 720 } else {
9b41ea72 721 mutex_lock(&workqueue_mutex);
3af24433
ON
722 list_add(&wq->list, &workqueues);
723
724 for_each_possible_cpu(cpu) {
725 cwq = init_cpu_workqueue(wq, cpu);
726 if (err || !cpu_online(cpu))
727 continue;
728 err = create_workqueue_thread(cwq, cpu);
06ba38a9 729 start_workqueue_thread(cwq, cpu);
1da177e4 730 }
3af24433
ON
731 mutex_unlock(&workqueue_mutex);
732 }
733
734 if (err) {
735 destroy_workqueue(wq);
736 wq = NULL;
737 }
738 return wq;
739}
740EXPORT_SYMBOL_GPL(__create_workqueue);
1da177e4 741
3af24433
ON
742static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
743{
744 struct wq_barrier barr;
745 int alive = 0;
89ada679 746
3af24433
ON
747 spin_lock_irq(&cwq->lock);
748 if (cwq->thread != NULL) {
749 insert_wq_barrier(cwq, &barr, 1);
750 cwq->should_stop = 1;
751 alive = 1;
752 }
753 spin_unlock_irq(&cwq->lock);
754
755 if (alive) {
756 wait_for_completion(&barr.done);
757
758 while (unlikely(cwq->thread != NULL))
759 cpu_relax();
760 /*
761 * Wait until cwq->thread unlocks cwq->lock,
762 * it won't touch *cwq after that.
763 */
764 smp_rmb();
765 spin_unlock_wait(&cwq->lock);
766 }
767}
768
769/**
770 * destroy_workqueue - safely terminate a workqueue
771 * @wq: target workqueue
772 *
773 * Safely destroy a workqueue. All work currently pending will be done first.
774 */
775void destroy_workqueue(struct workqueue_struct *wq)
776{
b1f4ec17 777 const cpumask_t *cpu_map = wq_cpu_map(wq);
3af24433 778 struct cpu_workqueue_struct *cwq;
b1f4ec17 779 int cpu;
3af24433 780
b1f4ec17
ON
781 mutex_lock(&workqueue_mutex);
782 list_del(&wq->list);
783 mutex_unlock(&workqueue_mutex);
3af24433 784
b1f4ec17
ON
785 for_each_cpu_mask(cpu, *cpu_map) {
786 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
787 cleanup_workqueue_thread(cwq, cpu);
3af24433 788 }
9b41ea72 789
3af24433
ON
790 free_percpu(wq->cpu_wq);
791 kfree(wq);
792}
793EXPORT_SYMBOL_GPL(destroy_workqueue);
794
795static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
796 unsigned long action,
797 void *hcpu)
798{
799 unsigned int cpu = (unsigned long)hcpu;
800 struct cpu_workqueue_struct *cwq;
801 struct workqueue_struct *wq;
802
803 switch (action) {
804 case CPU_LOCK_ACQUIRE:
9b41ea72 805 mutex_lock(&workqueue_mutex);
3af24433 806 return NOTIFY_OK;
9b41ea72 807
3af24433 808 case CPU_LOCK_RELEASE:
9b41ea72 809 mutex_unlock(&workqueue_mutex);
3af24433 810 return NOTIFY_OK;
1da177e4 811
3af24433
ON
812 case CPU_UP_PREPARE:
813 cpu_set(cpu, cpu_populated_map);
814 }
815
816 list_for_each_entry(wq, &workqueues, list) {
817 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
818
819 switch (action) {
820 case CPU_UP_PREPARE:
821 if (!create_workqueue_thread(cwq, cpu))
822 break;
823 printk(KERN_ERR "workqueue for %i failed\n", cpu);
824 return NOTIFY_BAD;
825
826 case CPU_ONLINE:
06ba38a9 827 start_workqueue_thread(cwq, cpu);
3af24433
ON
828 break;
829
830 case CPU_UP_CANCELED:
06ba38a9 831 start_workqueue_thread(cwq, -1);
3af24433
ON
832 case CPU_DEAD:
833 cleanup_workqueue_thread(cwq, cpu);
834 break;
835 }
1da177e4
LT
836 }
837
838 return NOTIFY_OK;
839}
1da177e4 840
c12920d1 841void __init init_workqueues(void)
1da177e4 842{
3af24433 843 cpu_populated_map = cpu_online_map;
f756d5e2 844 singlethread_cpu = first_cpu(cpu_possible_map);
b1f4ec17 845 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
1da177e4
LT
846 hotcpu_notifier(workqueue_cpu_callback, 0);
847 keventd_wq = create_workqueue("events");
848 BUG_ON(!keventd_wq);
849}