workqueue: make init_workqueues() __init
[linux-2.6-block.git] / kernel / workqueue.c
... / ...
CommitLineData
1/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
30#include <linux/hardirq.h>
31#include <linux/mempolicy.h>
32#include <linux/freezer.h>
33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
35
36/*
37 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
39 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
44 struct list_head worklist;
45 wait_queue_head_t more_work;
46 struct work_struct *current_work;
47
48 struct workqueue_struct *wq;
49 struct task_struct *thread;
50 int should_stop;
51
52 int run_depth; /* Detect run_workqueue() recursion depth */
53} ____cacheline_aligned;
54
55/*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59struct workqueue_struct {
60 struct cpu_workqueue_struct *cpu_wq;
61 struct list_head list;
62 const char *name;
63 int singlethread;
64 int freezeable; /* Freeze threads during suspend */
65};
66
67/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
68 threads to each one as cpus come/go. */
69static DEFINE_MUTEX(workqueue_mutex);
70static LIST_HEAD(workqueues);
71
72static int singlethread_cpu __read_mostly;
73static cpumask_t cpu_singlethread_map __read_mostly;
74/* optimization, we could use cpu_possible_map */
75static cpumask_t cpu_populated_map __read_mostly;
76
77/* If it's single threaded, it isn't in the list of workqueues. */
78static inline int is_single_threaded(struct workqueue_struct *wq)
79{
80 return wq->singlethread;
81}
82
83static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
84{
85 return is_single_threaded(wq)
86 ? &cpu_singlethread_map : &cpu_populated_map;
87}
88
89/*
90 * Set the workqueue on which a work item is to be run
91 * - Must *only* be called if the pending flag is set
92 */
93static inline void set_wq_data(struct work_struct *work, void *wq)
94{
95 unsigned long new;
96
97 BUG_ON(!work_pending(work));
98
99 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
100 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
101 atomic_long_set(&work->data, new);
102}
103
104static inline void *get_wq_data(struct work_struct *work)
105{
106 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
107}
108
109static void insert_work(struct cpu_workqueue_struct *cwq,
110 struct work_struct *work, int tail)
111{
112 set_wq_data(work, cwq);
113 if (tail)
114 list_add_tail(&work->entry, &cwq->worklist);
115 else
116 list_add(&work->entry, &cwq->worklist);
117 wake_up(&cwq->more_work);
118}
119
120/* Preempt must be disabled. */
121static void __queue_work(struct cpu_workqueue_struct *cwq,
122 struct work_struct *work)
123{
124 unsigned long flags;
125
126 spin_lock_irqsave(&cwq->lock, flags);
127 insert_work(cwq, work, 1);
128 spin_unlock_irqrestore(&cwq->lock, flags);
129}
130
131/**
132 * queue_work - queue work on a workqueue
133 * @wq: workqueue to use
134 * @work: work to queue
135 *
136 * Returns 0 if @work was already on a queue, non-zero otherwise.
137 *
138 * We queue the work to the CPU it was submitted, but there is no
139 * guarantee that it will be processed by that CPU.
140 */
141int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
142{
143 int ret = 0, cpu = get_cpu();
144
145 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
146 if (unlikely(is_single_threaded(wq)))
147 cpu = singlethread_cpu;
148 BUG_ON(!list_empty(&work->entry));
149 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
150 ret = 1;
151 }
152 put_cpu();
153 return ret;
154}
155EXPORT_SYMBOL_GPL(queue_work);
156
157void delayed_work_timer_fn(unsigned long __data)
158{
159 struct delayed_work *dwork = (struct delayed_work *)__data;
160 struct workqueue_struct *wq = get_wq_data(&dwork->work);
161 int cpu = smp_processor_id();
162
163 if (unlikely(is_single_threaded(wq)))
164 cpu = singlethread_cpu;
165
166 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
167}
168
169/**
170 * queue_delayed_work - queue work on a workqueue after delay
171 * @wq: workqueue to use
172 * @dwork: delayable work to queue
173 * @delay: number of jiffies to wait before queueing
174 *
175 * Returns 0 if @work was already on a queue, non-zero otherwise.
176 */
177int fastcall queue_delayed_work(struct workqueue_struct *wq,
178 struct delayed_work *dwork, unsigned long delay)
179{
180 int ret = 0;
181 struct timer_list *timer = &dwork->timer;
182 struct work_struct *work = &dwork->work;
183
184 timer_stats_timer_set_start_info(timer);
185 if (delay == 0)
186 return queue_work(wq, work);
187
188 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
189 BUG_ON(timer_pending(timer));
190 BUG_ON(!list_empty(&work->entry));
191
192 /* This stores wq for the moment, for the timer_fn */
193 set_wq_data(work, wq);
194 timer->expires = jiffies + delay;
195 timer->data = (unsigned long)dwork;
196 timer->function = delayed_work_timer_fn;
197 add_timer(timer);
198 ret = 1;
199 }
200 return ret;
201}
202EXPORT_SYMBOL_GPL(queue_delayed_work);
203
204/**
205 * queue_delayed_work_on - queue work on specific CPU after delay
206 * @cpu: CPU number to execute work on
207 * @wq: workqueue to use
208 * @dwork: work to queue
209 * @delay: number of jiffies to wait before queueing
210 *
211 * Returns 0 if @work was already on a queue, non-zero otherwise.
212 */
213int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
214 struct delayed_work *dwork, unsigned long delay)
215{
216 int ret = 0;
217 struct timer_list *timer = &dwork->timer;
218 struct work_struct *work = &dwork->work;
219
220 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
221 BUG_ON(timer_pending(timer));
222 BUG_ON(!list_empty(&work->entry));
223
224 /* This stores wq for the moment, for the timer_fn */
225 set_wq_data(work, wq);
226 timer->expires = jiffies + delay;
227 timer->data = (unsigned long)dwork;
228 timer->function = delayed_work_timer_fn;
229 add_timer_on(timer, cpu);
230 ret = 1;
231 }
232 return ret;
233}
234EXPORT_SYMBOL_GPL(queue_delayed_work_on);
235
236static void run_workqueue(struct cpu_workqueue_struct *cwq)
237{
238 spin_lock_irq(&cwq->lock);
239 cwq->run_depth++;
240 if (cwq->run_depth > 3) {
241 /* morton gets to eat his hat */
242 printk("%s: recursion depth exceeded: %d\n",
243 __FUNCTION__, cwq->run_depth);
244 dump_stack();
245 }
246 while (!list_empty(&cwq->worklist)) {
247 struct work_struct *work = list_entry(cwq->worklist.next,
248 struct work_struct, entry);
249 work_func_t f = work->func;
250
251 cwq->current_work = work;
252 list_del_init(cwq->worklist.next);
253 spin_unlock_irq(&cwq->lock);
254
255 BUG_ON(get_wq_data(work) != cwq);
256 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
257 work_release(work);
258 f(work);
259
260 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
261 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
262 "%s/0x%08x/%d\n",
263 current->comm, preempt_count(),
264 current->pid);
265 printk(KERN_ERR " last function: ");
266 print_symbol("%s\n", (unsigned long)f);
267 debug_show_held_locks(current);
268 dump_stack();
269 }
270
271 spin_lock_irq(&cwq->lock);
272 cwq->current_work = NULL;
273 }
274 cwq->run_depth--;
275 spin_unlock_irq(&cwq->lock);
276}
277
278/*
279 * NOTE: the caller must not touch *cwq if this func returns true
280 */
281static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
282{
283 int should_stop = cwq->should_stop;
284
285 if (unlikely(should_stop)) {
286 spin_lock_irq(&cwq->lock);
287 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
288 if (should_stop)
289 cwq->thread = NULL;
290 spin_unlock_irq(&cwq->lock);
291 }
292
293 return should_stop;
294}
295
296static int worker_thread(void *__cwq)
297{
298 struct cpu_workqueue_struct *cwq = __cwq;
299 DEFINE_WAIT(wait);
300 struct k_sigaction sa;
301 sigset_t blocked;
302
303 if (!cwq->wq->freezeable)
304 current->flags |= PF_NOFREEZE;
305
306 set_user_nice(current, -5);
307
308 /* Block and flush all signals */
309 sigfillset(&blocked);
310 sigprocmask(SIG_BLOCK, &blocked, NULL);
311 flush_signals(current);
312
313 /*
314 * We inherited MPOL_INTERLEAVE from the booting kernel.
315 * Set MPOL_DEFAULT to insure node local allocations.
316 */
317 numa_default_policy();
318
319 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
320 sa.sa.sa_handler = SIG_IGN;
321 sa.sa.sa_flags = 0;
322 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
323 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
324
325 for (;;) {
326 if (cwq->wq->freezeable)
327 try_to_freeze();
328
329 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
330 if (!cwq->should_stop && list_empty(&cwq->worklist))
331 schedule();
332 finish_wait(&cwq->more_work, &wait);
333
334 if (cwq_should_stop(cwq))
335 break;
336
337 run_workqueue(cwq);
338 }
339
340 return 0;
341}
342
343struct wq_barrier {
344 struct work_struct work;
345 struct completion done;
346};
347
348static void wq_barrier_func(struct work_struct *work)
349{
350 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
351 complete(&barr->done);
352}
353
354static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
355 struct wq_barrier *barr, int tail)
356{
357 INIT_WORK(&barr->work, wq_barrier_func);
358 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
359
360 init_completion(&barr->done);
361
362 insert_work(cwq, &barr->work, tail);
363}
364
365static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
366{
367 if (cwq->thread == current) {
368 /*
369 * Probably keventd trying to flush its own queue. So simply run
370 * it by hand rather than deadlocking.
371 */
372 run_workqueue(cwq);
373 } else {
374 struct wq_barrier barr;
375 int active = 0;
376
377 spin_lock_irq(&cwq->lock);
378 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
379 insert_wq_barrier(cwq, &barr, 1);
380 active = 1;
381 }
382 spin_unlock_irq(&cwq->lock);
383
384 if (active)
385 wait_for_completion(&barr.done);
386 }
387}
388
389/**
390 * flush_workqueue - ensure that any scheduled work has run to completion.
391 * @wq: workqueue to flush
392 *
393 * Forces execution of the workqueue and blocks until its completion.
394 * This is typically used in driver shutdown handlers.
395 *
396 * We sleep until all works which were queued on entry have been handled,
397 * but we are not livelocked by new incoming ones.
398 *
399 * This function used to run the workqueues itself. Now we just wait for the
400 * helper threads to do it.
401 */
402void fastcall flush_workqueue(struct workqueue_struct *wq)
403{
404 const cpumask_t *cpu_map = wq_cpu_map(wq);
405 int cpu;
406
407 might_sleep();
408 for_each_cpu_mask(cpu, *cpu_map)
409 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
410}
411EXPORT_SYMBOL_GPL(flush_workqueue);
412
413static void wait_on_work(struct cpu_workqueue_struct *cwq,
414 struct work_struct *work)
415{
416 struct wq_barrier barr;
417 int running = 0;
418
419 spin_lock_irq(&cwq->lock);
420 if (unlikely(cwq->current_work == work)) {
421 insert_wq_barrier(cwq, &barr, 0);
422 running = 1;
423 }
424 spin_unlock_irq(&cwq->lock);
425
426 if (unlikely(running))
427 wait_for_completion(&barr.done);
428}
429
430/**
431 * flush_work - block until a work_struct's callback has terminated
432 * @wq: the workqueue on which the work is queued
433 * @work: the work which is to be flushed
434 *
435 * flush_work() will attempt to cancel the work if it is queued. If the work's
436 * callback appears to be running, flush_work() will block until it has
437 * completed.
438 *
439 * flush_work() is designed to be used when the caller is tearing down data
440 * structures which the callback function operates upon. It is expected that,
441 * prior to calling flush_work(), the caller has arranged for the work to not
442 * be requeued.
443 */
444void flush_work(struct workqueue_struct *wq, struct work_struct *work)
445{
446 const cpumask_t *cpu_map = wq_cpu_map(wq);
447 struct cpu_workqueue_struct *cwq;
448 int cpu;
449
450 might_sleep();
451
452 cwq = get_wq_data(work);
453 /* Was it ever queued ? */
454 if (!cwq)
455 return;
456
457 /*
458 * This work can't be re-queued, no need to re-check that
459 * get_wq_data() is still the same when we take cwq->lock.
460 */
461 spin_lock_irq(&cwq->lock);
462 list_del_init(&work->entry);
463 work_release(work);
464 spin_unlock_irq(&cwq->lock);
465
466 for_each_cpu_mask(cpu, *cpu_map)
467 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
468}
469EXPORT_SYMBOL_GPL(flush_work);
470
471
472static struct workqueue_struct *keventd_wq;
473
474/**
475 * schedule_work - put work task in global workqueue
476 * @work: job to be done
477 *
478 * This puts a job in the kernel-global workqueue.
479 */
480int fastcall schedule_work(struct work_struct *work)
481{
482 return queue_work(keventd_wq, work);
483}
484EXPORT_SYMBOL(schedule_work);
485
486/**
487 * schedule_delayed_work - put work task in global workqueue after delay
488 * @dwork: job to be done
489 * @delay: number of jiffies to wait or 0 for immediate execution
490 *
491 * After waiting for a given time this puts a job in the kernel-global
492 * workqueue.
493 */
494int fastcall schedule_delayed_work(struct delayed_work *dwork,
495 unsigned long delay)
496{
497 timer_stats_timer_set_start_info(&dwork->timer);
498 return queue_delayed_work(keventd_wq, dwork, delay);
499}
500EXPORT_SYMBOL(schedule_delayed_work);
501
502/**
503 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
504 * @cpu: cpu to use
505 * @dwork: job to be done
506 * @delay: number of jiffies to wait
507 *
508 * After waiting for a given time this puts a job in the kernel-global
509 * workqueue on the specified CPU.
510 */
511int schedule_delayed_work_on(int cpu,
512 struct delayed_work *dwork, unsigned long delay)
513{
514 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
515}
516EXPORT_SYMBOL(schedule_delayed_work_on);
517
518/**
519 * schedule_on_each_cpu - call a function on each online CPU from keventd
520 * @func: the function to call
521 *
522 * Returns zero on success.
523 * Returns -ve errno on failure.
524 *
525 * Appears to be racy against CPU hotplug.
526 *
527 * schedule_on_each_cpu() is very slow.
528 */
529int schedule_on_each_cpu(work_func_t func)
530{
531 int cpu;
532 struct work_struct *works;
533
534 works = alloc_percpu(struct work_struct);
535 if (!works)
536 return -ENOMEM;
537
538 preempt_disable(); /* CPU hotplug */
539 for_each_online_cpu(cpu) {
540 struct work_struct *work = per_cpu_ptr(works, cpu);
541
542 INIT_WORK(work, func);
543 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
544 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
545 }
546 preempt_enable();
547 flush_workqueue(keventd_wq);
548 free_percpu(works);
549 return 0;
550}
551
552void flush_scheduled_work(void)
553{
554 flush_workqueue(keventd_wq);
555}
556EXPORT_SYMBOL(flush_scheduled_work);
557
558void flush_work_keventd(struct work_struct *work)
559{
560 flush_work(keventd_wq, work);
561}
562EXPORT_SYMBOL(flush_work_keventd);
563
564/**
565 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
566 * @wq: the controlling workqueue structure
567 * @dwork: the delayed work struct
568 */
569void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
570 struct delayed_work *dwork)
571{
572 /* Was it ever queued ? */
573 if (!get_wq_data(&dwork->work))
574 return;
575
576 while (!cancel_delayed_work(dwork))
577 flush_workqueue(wq);
578}
579EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
580
581/**
582 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
583 * @dwork: the delayed work struct
584 */
585void cancel_rearming_delayed_work(struct delayed_work *dwork)
586{
587 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
588}
589EXPORT_SYMBOL(cancel_rearming_delayed_work);
590
591/**
592 * execute_in_process_context - reliably execute the routine with user context
593 * @fn: the function to execute
594 * @ew: guaranteed storage for the execute work structure (must
595 * be available when the work executes)
596 *
597 * Executes the function immediately if process context is available,
598 * otherwise schedules the function for delayed execution.
599 *
600 * Returns: 0 - function was executed
601 * 1 - function was scheduled for execution
602 */
603int execute_in_process_context(work_func_t fn, struct execute_work *ew)
604{
605 if (!in_interrupt()) {
606 fn(&ew->work);
607 return 0;
608 }
609
610 INIT_WORK(&ew->work, fn);
611 schedule_work(&ew->work);
612
613 return 1;
614}
615EXPORT_SYMBOL_GPL(execute_in_process_context);
616
617int keventd_up(void)
618{
619 return keventd_wq != NULL;
620}
621
622int current_is_keventd(void)
623{
624 struct cpu_workqueue_struct *cwq;
625 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
626 int ret = 0;
627
628 BUG_ON(!keventd_wq);
629
630 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
631 if (current == cwq->thread)
632 ret = 1;
633
634 return ret;
635
636}
637
638static struct cpu_workqueue_struct *
639init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
640{
641 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
642
643 cwq->wq = wq;
644 spin_lock_init(&cwq->lock);
645 INIT_LIST_HEAD(&cwq->worklist);
646 init_waitqueue_head(&cwq->more_work);
647
648 return cwq;
649}
650
651static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
652{
653 struct workqueue_struct *wq = cwq->wq;
654 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
655 struct task_struct *p;
656
657 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
658 /*
659 * Nobody can add the work_struct to this cwq,
660 * if (caller is __create_workqueue)
661 * nobody should see this wq
662 * else // caller is CPU_UP_PREPARE
663 * cpu is not on cpu_online_map
664 * so we can abort safely.
665 */
666 if (IS_ERR(p))
667 return PTR_ERR(p);
668
669 cwq->thread = p;
670 cwq->should_stop = 0;
671 if (!is_single_threaded(wq))
672 kthread_bind(p, cpu);
673
674 if (is_single_threaded(wq) || cpu_online(cpu))
675 wake_up_process(p);
676
677 return 0;
678}
679
680struct workqueue_struct *__create_workqueue(const char *name,
681 int singlethread, int freezeable)
682{
683 struct workqueue_struct *wq;
684 struct cpu_workqueue_struct *cwq;
685 int err = 0, cpu;
686
687 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
688 if (!wq)
689 return NULL;
690
691 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
692 if (!wq->cpu_wq) {
693 kfree(wq);
694 return NULL;
695 }
696
697 wq->name = name;
698 wq->singlethread = singlethread;
699 wq->freezeable = freezeable;
700 INIT_LIST_HEAD(&wq->list);
701
702 if (singlethread) {
703 cwq = init_cpu_workqueue(wq, singlethread_cpu);
704 err = create_workqueue_thread(cwq, singlethread_cpu);
705 } else {
706 mutex_lock(&workqueue_mutex);
707 list_add(&wq->list, &workqueues);
708
709 for_each_possible_cpu(cpu) {
710 cwq = init_cpu_workqueue(wq, cpu);
711 if (err || !cpu_online(cpu))
712 continue;
713 err = create_workqueue_thread(cwq, cpu);
714 }
715 mutex_unlock(&workqueue_mutex);
716 }
717
718 if (err) {
719 destroy_workqueue(wq);
720 wq = NULL;
721 }
722 return wq;
723}
724EXPORT_SYMBOL_GPL(__create_workqueue);
725
726static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
727{
728 struct wq_barrier barr;
729 int alive = 0;
730
731 spin_lock_irq(&cwq->lock);
732 if (cwq->thread != NULL) {
733 insert_wq_barrier(cwq, &barr, 1);
734 cwq->should_stop = 1;
735 alive = 1;
736 }
737 spin_unlock_irq(&cwq->lock);
738
739 if (alive) {
740 wait_for_completion(&barr.done);
741
742 while (unlikely(cwq->thread != NULL))
743 cpu_relax();
744 /*
745 * Wait until cwq->thread unlocks cwq->lock,
746 * it won't touch *cwq after that.
747 */
748 smp_rmb();
749 spin_unlock_wait(&cwq->lock);
750 }
751}
752
753/**
754 * destroy_workqueue - safely terminate a workqueue
755 * @wq: target workqueue
756 *
757 * Safely destroy a workqueue. All work currently pending will be done first.
758 */
759void destroy_workqueue(struct workqueue_struct *wq)
760{
761 const cpumask_t *cpu_map = wq_cpu_map(wq);
762 struct cpu_workqueue_struct *cwq;
763 int cpu;
764
765 mutex_lock(&workqueue_mutex);
766 list_del(&wq->list);
767 mutex_unlock(&workqueue_mutex);
768
769 for_each_cpu_mask(cpu, *cpu_map) {
770 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
771 cleanup_workqueue_thread(cwq, cpu);
772 }
773
774 free_percpu(wq->cpu_wq);
775 kfree(wq);
776}
777EXPORT_SYMBOL_GPL(destroy_workqueue);
778
779static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
780 unsigned long action,
781 void *hcpu)
782{
783 unsigned int cpu = (unsigned long)hcpu;
784 struct cpu_workqueue_struct *cwq;
785 struct workqueue_struct *wq;
786
787 switch (action) {
788 case CPU_LOCK_ACQUIRE:
789 mutex_lock(&workqueue_mutex);
790 return NOTIFY_OK;
791
792 case CPU_LOCK_RELEASE:
793 mutex_unlock(&workqueue_mutex);
794 return NOTIFY_OK;
795
796 case CPU_UP_PREPARE:
797 cpu_set(cpu, cpu_populated_map);
798 }
799
800 list_for_each_entry(wq, &workqueues, list) {
801 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
802
803 switch (action) {
804 case CPU_UP_PREPARE:
805 if (!create_workqueue_thread(cwq, cpu))
806 break;
807 printk(KERN_ERR "workqueue for %i failed\n", cpu);
808 return NOTIFY_BAD;
809
810 case CPU_ONLINE:
811 wake_up_process(cwq->thread);
812 break;
813
814 case CPU_UP_CANCELED:
815 if (cwq->thread)
816 wake_up_process(cwq->thread);
817 case CPU_DEAD:
818 cleanup_workqueue_thread(cwq, cpu);
819 break;
820 }
821 }
822
823 return NOTIFY_OK;
824}
825
826void __init init_workqueues(void)
827{
828 cpu_populated_map = cpu_online_map;
829 singlethread_cpu = first_cpu(cpu_possible_map);
830 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
831 hotcpu_notifier(workqueue_cpu_callback, 0);
832 keventd_wq = create_workqueue("events");
833 BUG_ON(!keventd_wq);
834}