[PATCH] fix leaks on pipe(2) failure exits
[linux-2.6-block.git] / kernel / workqueue.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
89ada679
CL
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
1da177e4
LT
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
1fa44eca 30#include <linux/hardirq.h>
46934023 31#include <linux/mempolicy.h>
341a5958 32#include <linux/freezer.h>
d5abe669
PZ
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
1da177e4
LT
35
36/*
f756d5e2
NL
37 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
1da177e4
LT
39 *
40 * The sequence counters are for flush_scheduled_work(). It wants to wait
9f5d785e 41 * until all currently-scheduled works are completed, but it doesn't
1da177e4
LT
42 * want to be livelocked by new, incoming ones. So it waits until
43 * remove_sequence is >= the insert_sequence which pertained when
44 * flush_scheduled_work() was called.
45 */
46struct cpu_workqueue_struct {
47
48 spinlock_t lock;
49
50 long remove_sequence; /* Least-recently added (next to run) */
51 long insert_sequence; /* Next to add */
52
53 struct list_head worklist;
54 wait_queue_head_t more_work;
55 wait_queue_head_t work_done;
56
57 struct workqueue_struct *wq;
36c8b586 58 struct task_struct *thread;
1da177e4
LT
59
60 int run_depth; /* Detect run_workqueue() recursion depth */
341a5958
RW
61
62 int freezeable; /* Freeze the thread during suspend */
1da177e4
LT
63} ____cacheline_aligned;
64
65/*
66 * The externally visible workqueue abstraction is an array of
67 * per-CPU workqueues:
68 */
69struct workqueue_struct {
89ada679 70 struct cpu_workqueue_struct *cpu_wq;
1da177e4
LT
71 const char *name;
72 struct list_head list; /* Empty if single thread */
73};
74
75/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
76 threads to each one as cpus come/go. */
9b41ea72 77static DEFINE_MUTEX(workqueue_mutex);
1da177e4
LT
78static LIST_HEAD(workqueues);
79
f756d5e2
NL
80static int singlethread_cpu;
81
1da177e4
LT
82/* If it's single threaded, it isn't in the list of workqueues. */
83static inline int is_single_threaded(struct workqueue_struct *wq)
84{
85 return list_empty(&wq->list);
86}
87
4594bf15
DH
88/*
89 * Set the workqueue on which a work item is to be run
90 * - Must *only* be called if the pending flag is set
91 */
365970a1
DH
92static inline void set_wq_data(struct work_struct *work, void *wq)
93{
4594bf15
DH
94 unsigned long new;
95
96 BUG_ON(!work_pending(work));
365970a1 97
365970a1 98 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
a08727ba
LT
99 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
100 atomic_long_set(&work->data, new);
365970a1
DH
101}
102
103static inline void *get_wq_data(struct work_struct *work)
104{
a08727ba 105 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
365970a1
DH
106}
107
68380b58
LT
108static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
109{
110 int ret = 0;
111 unsigned long flags;
112
113 spin_lock_irqsave(&cwq->lock, flags);
114 /*
115 * We need to re-validate the work info after we've gotten
116 * the cpu_workqueue lock. We can run the work now iff:
117 *
118 * - the wq_data still matches the cpu_workqueue_struct
119 * - AND the work is still marked pending
120 * - AND the work is still on a list (which will be this
121 * workqueue_struct list)
122 *
123 * All these conditions are important, because we
124 * need to protect against the work being run right
125 * now on another CPU (all but the last one might be
126 * true if it's currently running and has not been
127 * released yet, for example).
128 */
129 if (get_wq_data(work) == cwq
130 && work_pending(work)
131 && !list_empty(&work->entry)) {
132 work_func_t f = work->func;
133 list_del_init(&work->entry);
134 spin_unlock_irqrestore(&cwq->lock, flags);
135
a08727ba 136 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
68380b58
LT
137 work_release(work);
138 f(work);
139
140 spin_lock_irqsave(&cwq->lock, flags);
141 cwq->remove_sequence++;
142 wake_up(&cwq->work_done);
143 ret = 1;
144 }
145 spin_unlock_irqrestore(&cwq->lock, flags);
146 return ret;
147}
148
149/**
150 * run_scheduled_work - run scheduled work synchronously
151 * @work: work to run
152 *
153 * This checks if the work was pending, and runs it
154 * synchronously if so. It returns a boolean to indicate
155 * whether it had any scheduled work to run or not.
156 *
157 * NOTE! This _only_ works for normal work_structs. You
158 * CANNOT use this for delayed work, because the wq data
159 * for delayed work will not point properly to the per-
160 * CPU workqueue struct, but will change!
161 */
162int fastcall run_scheduled_work(struct work_struct *work)
163{
164 for (;;) {
165 struct cpu_workqueue_struct *cwq;
166
167 if (!work_pending(work))
168 return 0;
169 if (list_empty(&work->entry))
170 return 0;
171 /* NOTE! This depends intimately on __queue_work! */
172 cwq = get_wq_data(work);
173 if (!cwq)
174 return 0;
175 if (__run_work(cwq, work))
176 return 1;
177 }
178}
179EXPORT_SYMBOL(run_scheduled_work);
180
1da177e4
LT
181/* Preempt must be disabled. */
182static void __queue_work(struct cpu_workqueue_struct *cwq,
183 struct work_struct *work)
184{
185 unsigned long flags;
186
187 spin_lock_irqsave(&cwq->lock, flags);
365970a1 188 set_wq_data(work, cwq);
1da177e4
LT
189 list_add_tail(&work->entry, &cwq->worklist);
190 cwq->insert_sequence++;
191 wake_up(&cwq->more_work);
192 spin_unlock_irqrestore(&cwq->lock, flags);
193}
194
0fcb78c2
REB
195/**
196 * queue_work - queue work on a workqueue
197 * @wq: workqueue to use
198 * @work: work to queue
199 *
057647fc 200 * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4
LT
201 *
202 * We queue the work to the CPU it was submitted, but there is no
203 * guarantee that it will be processed by that CPU.
204 */
205int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
206{
207 int ret = 0, cpu = get_cpu();
208
a08727ba 209 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1da177e4 210 if (unlikely(is_single_threaded(wq)))
f756d5e2 211 cpu = singlethread_cpu;
1da177e4 212 BUG_ON(!list_empty(&work->entry));
89ada679 213 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
1da177e4
LT
214 ret = 1;
215 }
216 put_cpu();
217 return ret;
218}
ae90dd5d 219EXPORT_SYMBOL_GPL(queue_work);
1da177e4
LT
220
221static void delayed_work_timer_fn(unsigned long __data)
222{
52bad64d 223 struct delayed_work *dwork = (struct delayed_work *)__data;
365970a1 224 struct workqueue_struct *wq = get_wq_data(&dwork->work);
1da177e4
LT
225 int cpu = smp_processor_id();
226
227 if (unlikely(is_single_threaded(wq)))
f756d5e2 228 cpu = singlethread_cpu;
1da177e4 229
52bad64d 230 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
1da177e4
LT
231}
232
0fcb78c2
REB
233/**
234 * queue_delayed_work - queue work on a workqueue after delay
235 * @wq: workqueue to use
52bad64d 236 * @work: delayable work to queue
0fcb78c2
REB
237 * @delay: number of jiffies to wait before queueing
238 *
057647fc 239 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 240 */
1da177e4 241int fastcall queue_delayed_work(struct workqueue_struct *wq,
52bad64d 242 struct delayed_work *dwork, unsigned long delay)
1da177e4
LT
243{
244 int ret = 0;
52bad64d
DH
245 struct timer_list *timer = &dwork->timer;
246 struct work_struct *work = &dwork->work;
247
248 if (delay == 0)
249 return queue_work(wq, work);
1da177e4 250
a08727ba 251 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
1da177e4
LT
252 BUG_ON(timer_pending(timer));
253 BUG_ON(!list_empty(&work->entry));
254
255 /* This stores wq for the moment, for the timer_fn */
365970a1 256 set_wq_data(work, wq);
1da177e4 257 timer->expires = jiffies + delay;
52bad64d 258 timer->data = (unsigned long)dwork;
1da177e4
LT
259 timer->function = delayed_work_timer_fn;
260 add_timer(timer);
261 ret = 1;
262 }
263 return ret;
264}
ae90dd5d 265EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4 266
0fcb78c2
REB
267/**
268 * queue_delayed_work_on - queue work on specific CPU after delay
269 * @cpu: CPU number to execute work on
270 * @wq: workqueue to use
271 * @work: work to queue
272 * @delay: number of jiffies to wait before queueing
273 *
057647fc 274 * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c2 275 */
7a6bc1cd 276int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d 277 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd
VP
278{
279 int ret = 0;
52bad64d
DH
280 struct timer_list *timer = &dwork->timer;
281 struct work_struct *work = &dwork->work;
7a6bc1cd 282
a08727ba 283 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
7a6bc1cd
VP
284 BUG_ON(timer_pending(timer));
285 BUG_ON(!list_empty(&work->entry));
286
287 /* This stores wq for the moment, for the timer_fn */
365970a1 288 set_wq_data(work, wq);
7a6bc1cd 289 timer->expires = jiffies + delay;
52bad64d 290 timer->data = (unsigned long)dwork;
7a6bc1cd
VP
291 timer->function = delayed_work_timer_fn;
292 add_timer_on(timer, cpu);
293 ret = 1;
294 }
295 return ret;
296}
ae90dd5d 297EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4 298
858119e1 299static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4
LT
300{
301 unsigned long flags;
302
303 /*
304 * Keep taking off work from the queue until
305 * done.
306 */
307 spin_lock_irqsave(&cwq->lock, flags);
308 cwq->run_depth++;
309 if (cwq->run_depth > 3) {
310 /* morton gets to eat his hat */
311 printk("%s: recursion depth exceeded: %d\n",
312 __FUNCTION__, cwq->run_depth);
313 dump_stack();
314 }
315 while (!list_empty(&cwq->worklist)) {
316 struct work_struct *work = list_entry(cwq->worklist.next,
317 struct work_struct, entry);
6bb49e59 318 work_func_t f = work->func;
1da177e4
LT
319
320 list_del_init(cwq->worklist.next);
321 spin_unlock_irqrestore(&cwq->lock, flags);
322
365970a1 323 BUG_ON(get_wq_data(work) != cwq);
a08727ba 324 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
65f27f38
DH
325 work_release(work);
326 f(work);
1da177e4 327
d5abe669
PZ
328 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
329 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
330 "%s/0x%08x/%d\n",
331 current->comm, preempt_count(),
332 current->pid);
333 printk(KERN_ERR " last function: ");
334 print_symbol("%s\n", (unsigned long)f);
335 debug_show_held_locks(current);
336 dump_stack();
337 }
338
1da177e4
LT
339 spin_lock_irqsave(&cwq->lock, flags);
340 cwq->remove_sequence++;
341 wake_up(&cwq->work_done);
342 }
343 cwq->run_depth--;
344 spin_unlock_irqrestore(&cwq->lock, flags);
345}
346
347static int worker_thread(void *__cwq)
348{
349 struct cpu_workqueue_struct *cwq = __cwq;
350 DECLARE_WAITQUEUE(wait, current);
351 struct k_sigaction sa;
352 sigset_t blocked;
353
341a5958
RW
354 if (!cwq->freezeable)
355 current->flags |= PF_NOFREEZE;
1da177e4
LT
356
357 set_user_nice(current, -5);
358
359 /* Block and flush all signals */
360 sigfillset(&blocked);
361 sigprocmask(SIG_BLOCK, &blocked, NULL);
362 flush_signals(current);
363
46934023
CL
364 /*
365 * We inherited MPOL_INTERLEAVE from the booting kernel.
366 * Set MPOL_DEFAULT to insure node local allocations.
367 */
368 numa_default_policy();
369
1da177e4
LT
370 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
371 sa.sa.sa_handler = SIG_IGN;
372 sa.sa.sa_flags = 0;
373 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
374 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
375
376 set_current_state(TASK_INTERRUPTIBLE);
377 while (!kthread_should_stop()) {
341a5958
RW
378 if (cwq->freezeable)
379 try_to_freeze();
380
1da177e4
LT
381 add_wait_queue(&cwq->more_work, &wait);
382 if (list_empty(&cwq->worklist))
383 schedule();
384 else
385 __set_current_state(TASK_RUNNING);
386 remove_wait_queue(&cwq->more_work, &wait);
387
388 if (!list_empty(&cwq->worklist))
389 run_workqueue(cwq);
390 set_current_state(TASK_INTERRUPTIBLE);
391 }
392 __set_current_state(TASK_RUNNING);
393 return 0;
394}
395
396static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
397{
398 if (cwq->thread == current) {
399 /*
400 * Probably keventd trying to flush its own queue. So simply run
401 * it by hand rather than deadlocking.
402 */
403 run_workqueue(cwq);
404 } else {
405 DEFINE_WAIT(wait);
406 long sequence_needed;
407
408 spin_lock_irq(&cwq->lock);
409 sequence_needed = cwq->insert_sequence;
410
411 while (sequence_needed - cwq->remove_sequence > 0) {
412 prepare_to_wait(&cwq->work_done, &wait,
413 TASK_UNINTERRUPTIBLE);
414 spin_unlock_irq(&cwq->lock);
415 schedule();
416 spin_lock_irq(&cwq->lock);
417 }
418 finish_wait(&cwq->work_done, &wait);
419 spin_unlock_irq(&cwq->lock);
420 }
421}
422
0fcb78c2 423/**
1da177e4 424 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 425 * @wq: workqueue to flush
1da177e4
LT
426 *
427 * Forces execution of the workqueue and blocks until its completion.
428 * This is typically used in driver shutdown handlers.
429 *
430 * This function will sample each workqueue's current insert_sequence number and
431 * will sleep until the head sequence is greater than or equal to that. This
432 * means that we sleep until all works which were queued on entry have been
433 * handled, but we are not livelocked by new incoming ones.
434 *
435 * This function used to run the workqueues itself. Now we just wait for the
436 * helper threads to do it.
437 */
438void fastcall flush_workqueue(struct workqueue_struct *wq)
439{
440 might_sleep();
441
442 if (is_single_threaded(wq)) {
bce61dd4 443 /* Always use first cpu's area. */
f756d5e2 444 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
1da177e4
LT
445 } else {
446 int cpu;
447
9b41ea72 448 mutex_lock(&workqueue_mutex);
1da177e4 449 for_each_online_cpu(cpu)
89ada679 450 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
9b41ea72 451 mutex_unlock(&workqueue_mutex);
1da177e4
LT
452 }
453}
ae90dd5d 454EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4
LT
455
456static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
341a5958 457 int cpu, int freezeable)
1da177e4 458{
89ada679 459 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4
LT
460 struct task_struct *p;
461
462 spin_lock_init(&cwq->lock);
463 cwq->wq = wq;
464 cwq->thread = NULL;
465 cwq->insert_sequence = 0;
466 cwq->remove_sequence = 0;
341a5958 467 cwq->freezeable = freezeable;
1da177e4
LT
468 INIT_LIST_HEAD(&cwq->worklist);
469 init_waitqueue_head(&cwq->more_work);
470 init_waitqueue_head(&cwq->work_done);
471
472 if (is_single_threaded(wq))
473 p = kthread_create(worker_thread, cwq, "%s", wq->name);
474 else
475 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
476 if (IS_ERR(p))
477 return NULL;
478 cwq->thread = p;
479 return p;
480}
481
482struct workqueue_struct *__create_workqueue(const char *name,
341a5958 483 int singlethread, int freezeable)
1da177e4
LT
484{
485 int cpu, destroy = 0;
486 struct workqueue_struct *wq;
487 struct task_struct *p;
488
dd392710 489 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1da177e4
LT
490 if (!wq)
491 return NULL;
1da177e4 492
89ada679 493 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
676121fc
BC
494 if (!wq->cpu_wq) {
495 kfree(wq);
496 return NULL;
497 }
498
1da177e4 499 wq->name = name;
9b41ea72 500 mutex_lock(&workqueue_mutex);
1da177e4
LT
501 if (singlethread) {
502 INIT_LIST_HEAD(&wq->list);
341a5958 503 p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
1da177e4
LT
504 if (!p)
505 destroy = 1;
506 else
507 wake_up_process(p);
508 } else {
1da177e4 509 list_add(&wq->list, &workqueues);
1da177e4 510 for_each_online_cpu(cpu) {
341a5958 511 p = create_workqueue_thread(wq, cpu, freezeable);
1da177e4
LT
512 if (p) {
513 kthread_bind(p, cpu);
514 wake_up_process(p);
515 } else
516 destroy = 1;
517 }
518 }
9b41ea72 519 mutex_unlock(&workqueue_mutex);
1da177e4
LT
520
521 /*
522 * Was there any error during startup? If yes then clean up:
523 */
524 if (destroy) {
525 destroy_workqueue(wq);
526 wq = NULL;
527 }
528 return wq;
529}
ae90dd5d 530EXPORT_SYMBOL_GPL(__create_workqueue);
1da177e4
LT
531
532static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
533{
534 struct cpu_workqueue_struct *cwq;
535 unsigned long flags;
536 struct task_struct *p;
537
89ada679 538 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4
LT
539 spin_lock_irqsave(&cwq->lock, flags);
540 p = cwq->thread;
541 cwq->thread = NULL;
542 spin_unlock_irqrestore(&cwq->lock, flags);
543 if (p)
544 kthread_stop(p);
545}
546
0fcb78c2
REB
547/**
548 * destroy_workqueue - safely terminate a workqueue
549 * @wq: target workqueue
550 *
551 * Safely destroy a workqueue. All work currently pending will be done first.
552 */
1da177e4
LT
553void destroy_workqueue(struct workqueue_struct *wq)
554{
555 int cpu;
556
557 flush_workqueue(wq);
558
559 /* We don't need the distraction of CPUs appearing and vanishing. */
9b41ea72 560 mutex_lock(&workqueue_mutex);
1da177e4 561 if (is_single_threaded(wq))
f756d5e2 562 cleanup_workqueue_thread(wq, singlethread_cpu);
1da177e4
LT
563 else {
564 for_each_online_cpu(cpu)
565 cleanup_workqueue_thread(wq, cpu);
1da177e4 566 list_del(&wq->list);
1da177e4 567 }
9b41ea72 568 mutex_unlock(&workqueue_mutex);
89ada679 569 free_percpu(wq->cpu_wq);
1da177e4
LT
570 kfree(wq);
571}
ae90dd5d 572EXPORT_SYMBOL_GPL(destroy_workqueue);
1da177e4
LT
573
574static struct workqueue_struct *keventd_wq;
575
0fcb78c2
REB
576/**
577 * schedule_work - put work task in global workqueue
578 * @work: job to be done
579 *
580 * This puts a job in the kernel-global workqueue.
581 */
1da177e4
LT
582int fastcall schedule_work(struct work_struct *work)
583{
584 return queue_work(keventd_wq, work);
585}
ae90dd5d 586EXPORT_SYMBOL(schedule_work);
1da177e4 587
0fcb78c2
REB
588/**
589 * schedule_delayed_work - put work task in global workqueue after delay
52bad64d
DH
590 * @dwork: job to be done
591 * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c2
REB
592 *
593 * After waiting for a given time this puts a job in the kernel-global
594 * workqueue.
595 */
52bad64d 596int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
1da177e4 597{
52bad64d 598 return queue_delayed_work(keventd_wq, dwork, delay);
1da177e4 599}
ae90dd5d 600EXPORT_SYMBOL(schedule_delayed_work);
1da177e4 601
0fcb78c2
REB
602/**
603 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
604 * @cpu: cpu to use
52bad64d 605 * @dwork: job to be done
0fcb78c2
REB
606 * @delay: number of jiffies to wait
607 *
608 * After waiting for a given time this puts a job in the kernel-global
609 * workqueue on the specified CPU.
610 */
1da177e4 611int schedule_delayed_work_on(int cpu,
52bad64d 612 struct delayed_work *dwork, unsigned long delay)
1da177e4 613{
52bad64d 614 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1da177e4 615}
ae90dd5d 616EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4 617
b6136773
AM
618/**
619 * schedule_on_each_cpu - call a function on each online CPU from keventd
620 * @func: the function to call
b6136773
AM
621 *
622 * Returns zero on success.
623 * Returns -ve errno on failure.
624 *
625 * Appears to be racy against CPU hotplug.
626 *
627 * schedule_on_each_cpu() is very slow.
628 */
65f27f38 629int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
630{
631 int cpu;
b6136773 632 struct work_struct *works;
15316ba8 633
b6136773
AM
634 works = alloc_percpu(struct work_struct);
635 if (!works)
15316ba8 636 return -ENOMEM;
b6136773 637
9b41ea72 638 mutex_lock(&workqueue_mutex);
15316ba8 639 for_each_online_cpu(cpu) {
65f27f38 640 INIT_WORK(per_cpu_ptr(works, cpu), func);
15316ba8 641 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
b6136773 642 per_cpu_ptr(works, cpu));
15316ba8 643 }
9b41ea72 644 mutex_unlock(&workqueue_mutex);
15316ba8 645 flush_workqueue(keventd_wq);
b6136773 646 free_percpu(works);
15316ba8
CL
647 return 0;
648}
649
1da177e4
LT
650void flush_scheduled_work(void)
651{
652 flush_workqueue(keventd_wq);
653}
ae90dd5d 654EXPORT_SYMBOL(flush_scheduled_work);
1da177e4
LT
655
656/**
657 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
658 * work whose handler rearms the delayed work.
659 * @wq: the controlling workqueue structure
52bad64d 660 * @dwork: the delayed work struct
1da177e4 661 */
81ddef77 662void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
52bad64d 663 struct delayed_work *dwork)
1da177e4 664{
52bad64d 665 while (!cancel_delayed_work(dwork))
1da177e4
LT
666 flush_workqueue(wq);
667}
81ddef77 668EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
1da177e4
LT
669
670/**
671 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
672 * work whose handler rearms the delayed work.
52bad64d 673 * @dwork: the delayed work struct
1da177e4 674 */
52bad64d 675void cancel_rearming_delayed_work(struct delayed_work *dwork)
1da177e4 676{
52bad64d 677 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
1da177e4
LT
678}
679EXPORT_SYMBOL(cancel_rearming_delayed_work);
680
1fa44eca
JB
681/**
682 * execute_in_process_context - reliably execute the routine with user context
683 * @fn: the function to execute
1fa44eca
JB
684 * @ew: guaranteed storage for the execute work structure (must
685 * be available when the work executes)
686 *
687 * Executes the function immediately if process context is available,
688 * otherwise schedules the function for delayed execution.
689 *
690 * Returns: 0 - function was executed
691 * 1 - function was scheduled for execution
692 */
65f27f38 693int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
694{
695 if (!in_interrupt()) {
65f27f38 696 fn(&ew->work);
1fa44eca
JB
697 return 0;
698 }
699
65f27f38 700 INIT_WORK(&ew->work, fn);
1fa44eca
JB
701 schedule_work(&ew->work);
702
703 return 1;
704}
705EXPORT_SYMBOL_GPL(execute_in_process_context);
706
1da177e4
LT
707int keventd_up(void)
708{
709 return keventd_wq != NULL;
710}
711
712int current_is_keventd(void)
713{
714 struct cpu_workqueue_struct *cwq;
715 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
716 int ret = 0;
717
718 BUG_ON(!keventd_wq);
719
89ada679 720 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
1da177e4
LT
721 if (current == cwq->thread)
722 ret = 1;
723
724 return ret;
725
726}
727
1da177e4
LT
728/* Take the work from this (downed) CPU. */
729static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
730{
89ada679 731 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
626ab0e6 732 struct list_head list;
1da177e4
LT
733 struct work_struct *work;
734
735 spin_lock_irq(&cwq->lock);
626ab0e6 736 list_replace_init(&cwq->worklist, &list);
1da177e4
LT
737
738 while (!list_empty(&list)) {
739 printk("Taking work for %s\n", wq->name);
740 work = list_entry(list.next,struct work_struct,entry);
741 list_del(&work->entry);
89ada679 742 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
1da177e4
LT
743 }
744 spin_unlock_irq(&cwq->lock);
745}
746
747/* We're holding the cpucontrol mutex here */
9c7b216d 748static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1da177e4
LT
749 unsigned long action,
750 void *hcpu)
751{
752 unsigned int hotcpu = (unsigned long)hcpu;
753 struct workqueue_struct *wq;
754
755 switch (action) {
756 case CPU_UP_PREPARE:
9b41ea72 757 mutex_lock(&workqueue_mutex);
1da177e4
LT
758 /* Create a new workqueue thread for it. */
759 list_for_each_entry(wq, &workqueues, list) {
341a5958 760 if (!create_workqueue_thread(wq, hotcpu, 0)) {
1da177e4
LT
761 printk("workqueue for %i failed\n", hotcpu);
762 return NOTIFY_BAD;
763 }
764 }
765 break;
766
767 case CPU_ONLINE:
768 /* Kick off worker threads. */
769 list_for_each_entry(wq, &workqueues, list) {
89ada679
CL
770 struct cpu_workqueue_struct *cwq;
771
772 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
773 kthread_bind(cwq->thread, hotcpu);
774 wake_up_process(cwq->thread);
1da177e4 775 }
9b41ea72 776 mutex_unlock(&workqueue_mutex);
1da177e4
LT
777 break;
778
779 case CPU_UP_CANCELED:
780 list_for_each_entry(wq, &workqueues, list) {
fc75cdfa
HC
781 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
782 continue;
1da177e4 783 /* Unbind so it can run. */
89ada679 784 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
a4c4af7c 785 any_online_cpu(cpu_online_map));
1da177e4
LT
786 cleanup_workqueue_thread(wq, hotcpu);
787 }
9b41ea72
AM
788 mutex_unlock(&workqueue_mutex);
789 break;
790
791 case CPU_DOWN_PREPARE:
792 mutex_lock(&workqueue_mutex);
793 break;
794
795 case CPU_DOWN_FAILED:
796 mutex_unlock(&workqueue_mutex);
1da177e4
LT
797 break;
798
799 case CPU_DEAD:
800 list_for_each_entry(wq, &workqueues, list)
801 cleanup_workqueue_thread(wq, hotcpu);
802 list_for_each_entry(wq, &workqueues, list)
803 take_over_work(wq, hotcpu);
9b41ea72 804 mutex_unlock(&workqueue_mutex);
1da177e4
LT
805 break;
806 }
807
808 return NOTIFY_OK;
809}
1da177e4
LT
810
811void init_workqueues(void)
812{
f756d5e2 813 singlethread_cpu = first_cpu(cpu_possible_map);
1da177e4
LT
814 hotcpu_notifier(workqueue_cpu_callback, 0);
815 keventd_wq = create_workqueue("events");
816 BUG_ON(!keventd_wq);
817}
818