2 * kernel/workqueue.c - generic async execution with shared worker pool
4 * Copyright (C) 2002 Ingo Molnar
6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
12 * Made to use alloc_percpu by Christoph Lameter.
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There is one worker pool for each CPU and
20 * one extra for works which are better served by workers which are
21 * not bound to any specific CPU.
23 * Please read Documentation/workqueue.txt for details.
26 #include <linux/export.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/completion.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/cpu.h>
35 #include <linux/notifier.h>
36 #include <linux/kthread.h>
37 #include <linux/hardirq.h>
38 #include <linux/mempolicy.h>
39 #include <linux/freezer.h>
40 #include <linux/kallsyms.h>
41 #include <linux/debug_locks.h>
42 #include <linux/lockdep.h>
43 #include <linux/idr.h>
45 #include "workqueue_sched.h"
51 * A bound gcwq is either associated or disassociated with its CPU.
52 * While associated (!DISASSOCIATED), all workers are bound to the
53 * CPU and none has %WORKER_UNBOUND set and concurrency management
56 * While DISASSOCIATED, the cpu may be offline and all workers have
57 * %WORKER_UNBOUND set and concurrency management disabled, and may
58 * be executing on any CPU. The gcwq behaves as an unbound one.
60 * Note that DISASSOCIATED can be flipped only while holding
61 * managership of all pools on the gcwq to avoid changing binding
62 * state while create_worker() is in progress.
64 GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */
65 GCWQ_FREEZING = 1 << 1, /* freeze in progress */
68 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
71 WORKER_STARTED = 1 << 0, /* started */
72 WORKER_DIE = 1 << 1, /* die die die */
73 WORKER_IDLE = 1 << 2, /* is idle */
74 WORKER_PREP = 1 << 3, /* preparing to run works */
75 WORKER_REBIND = 1 << 5, /* mom is home, come back */
76 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
77 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
79 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_REBIND | WORKER_UNBOUND |
82 /* gcwq->trustee_state */
83 TRUSTEE_START = 0, /* start */
84 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
85 TRUSTEE_BUTCHER = 2, /* butcher workers */
86 TRUSTEE_RELEASE = 3, /* release workers */
87 TRUSTEE_DONE = 4, /* trustee is done */
89 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */
91 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
92 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
93 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
95 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
96 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
98 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
99 /* call for help after 10ms
101 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
102 CREATE_COOLDOWN = HZ, /* time to breath after fail */
103 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
106 * Rescue workers are used only on emergencies and shared by
107 * all cpus. Give -20.
109 RESCUER_NICE_LEVEL = -20,
110 HIGHPRI_NICE_LEVEL = -20,
114 * Structure fields follow one of the following exclusion rules.
116 * I: Modifiable by initialization/destruction paths and read-only for
119 * P: Preemption protected. Disabling preemption is enough and should
120 * only be modified and accessed from the local cpu.
122 * L: gcwq->lock protected. Access with gcwq->lock held.
124 * X: During normal operation, modification requires gcwq->lock and
125 * should be done only from local cpu. Either disabling preemption
126 * on local cpu or grabbing gcwq->lock is enough for read access.
127 * If GCWQ_DISASSOCIATED is set, it's identical to L.
129 * F: wq->flush_mutex protected.
131 * W: workqueue_lock protected.
139 * The poor guys doing the actual heavy lifting. All on-duty workers
140 * are either serving the manager role, on idle list or on busy hash.
143 /* on idle list while idle, on busy hash table while busy */
145 struct list_head entry; /* L: while idle */
146 struct hlist_node hentry; /* L: while busy */
149 struct work_struct *current_work; /* L: work being processed */
150 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
151 struct list_head scheduled; /* L: scheduled works */
152 struct task_struct *task; /* I: worker task */
153 struct worker_pool *pool; /* I: the associated pool */
154 /* 64 bytes boundary on 64bit, 32 on 32bit */
155 unsigned long last_active; /* L: last active timestamp */
156 unsigned int flags; /* X: flags */
157 int id; /* I: worker id */
159 /* for rebinding worker to CPU */
160 struct idle_rebind *idle_rebind; /* L: for idle worker */
161 struct work_struct rebind_work; /* L: for busy worker */
165 struct global_cwq *gcwq; /* I: the owning gcwq */
166 unsigned int flags; /* X: flags */
168 struct list_head worklist; /* L: list of pending works */
169 int nr_workers; /* L: total number of workers */
170 int nr_idle; /* L: currently idle ones */
172 struct list_head idle_list; /* X: list of idle workers */
173 struct timer_list idle_timer; /* L: worker idle timeout */
174 struct timer_list mayday_timer; /* L: SOS timer for workers */
176 struct mutex manager_mutex; /* mutex manager should hold */
177 struct ida worker_ida; /* L: for worker IDs */
181 * Global per-cpu workqueue. There's one and only one for each cpu
182 * and all works are queued and processed here regardless of their
186 spinlock_t lock; /* the gcwq lock */
187 unsigned int cpu; /* I: the associated cpu */
188 unsigned int flags; /* L: GCWQ_* flags */
190 /* workers are chained either in busy_hash or pool idle_list */
191 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
192 /* L: hash of busy workers */
194 struct worker_pool pools[2]; /* normal and highpri pools */
196 wait_queue_head_t rebind_hold; /* rebind hold wait */
198 struct task_struct *trustee; /* L: for gcwq shutdown */
199 unsigned int trustee_state; /* L: trustee state */
200 wait_queue_head_t trustee_wait; /* trustee wait */
201 } ____cacheline_aligned_in_smp;
204 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
205 * work_struct->data are used for flags and thus cwqs need to be
206 * aligned at two's power of the number of flag bits.
208 struct cpu_workqueue_struct {
209 struct worker_pool *pool; /* I: the associated pool */
210 struct workqueue_struct *wq; /* I: the owning workqueue */
211 int work_color; /* L: current color */
212 int flush_color; /* L: flushing color */
213 int nr_in_flight[WORK_NR_COLORS];
214 /* L: nr of in_flight works */
215 int nr_active; /* L: nr of active works */
216 int max_active; /* L: max active works */
217 struct list_head delayed_works; /* L: delayed works */
221 * Structure used to wait for workqueue flush.
224 struct list_head list; /* F: list of flushers */
225 int flush_color; /* F: flush color waiting for */
226 struct completion done; /* flush completion */
230 * All cpumasks are assumed to be always set on UP and thus can't be
231 * used to determine whether there's something to be done.
234 typedef cpumask_var_t mayday_mask_t;
235 #define mayday_test_and_set_cpu(cpu, mask) \
236 cpumask_test_and_set_cpu((cpu), (mask))
237 #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
238 #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
239 #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
240 #define free_mayday_mask(mask) free_cpumask_var((mask))
242 typedef unsigned long mayday_mask_t;
243 #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
244 #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
245 #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
246 #define alloc_mayday_mask(maskp, gfp) true
247 #define free_mayday_mask(mask) do { } while (0)
251 * The externally visible workqueue abstraction is an array of
252 * per-CPU workqueues:
254 struct workqueue_struct {
255 unsigned int flags; /* W: WQ_* flags */
257 struct cpu_workqueue_struct __percpu *pcpu;
258 struct cpu_workqueue_struct *single;
260 } cpu_wq; /* I: cwq's */
261 struct list_head list; /* W: list of all workqueues */
263 struct mutex flush_mutex; /* protects wq flushing */
264 int work_color; /* F: current work color */
265 int flush_color; /* F: current flush color */
266 atomic_t nr_cwqs_to_flush; /* flush in progress */
267 struct wq_flusher *first_flusher; /* F: first flusher */
268 struct list_head flusher_queue; /* F: flush waiters */
269 struct list_head flusher_overflow; /* F: flush overflow list */
271 mayday_mask_t mayday_mask; /* cpus requesting rescue */
272 struct worker *rescuer; /* I: rescue worker */
274 int nr_drainers; /* W: drain in progress */
275 int saved_max_active; /* W: saved cwq max_active */
276 #ifdef CONFIG_LOCKDEP
277 struct lockdep_map lockdep_map;
279 char name[]; /* I: workqueue name */
282 struct workqueue_struct *system_wq __read_mostly;
283 struct workqueue_struct *system_long_wq __read_mostly;
284 struct workqueue_struct *system_nrt_wq __read_mostly;
285 struct workqueue_struct *system_unbound_wq __read_mostly;
286 struct workqueue_struct *system_freezable_wq __read_mostly;
287 struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
288 EXPORT_SYMBOL_GPL(system_wq);
289 EXPORT_SYMBOL_GPL(system_long_wq);
290 EXPORT_SYMBOL_GPL(system_nrt_wq);
291 EXPORT_SYMBOL_GPL(system_unbound_wq);
292 EXPORT_SYMBOL_GPL(system_freezable_wq);
293 EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
295 #define CREATE_TRACE_POINTS
296 #include <trace/events/workqueue.h>
298 #define for_each_worker_pool(pool, gcwq) \
299 for ((pool) = &(gcwq)->pools[0]; \
300 (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
302 #define for_each_busy_worker(worker, i, pos, gcwq) \
303 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
304 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
306 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
309 if (cpu < nr_cpu_ids) {
311 cpu = cpumask_next(cpu, mask);
312 if (cpu < nr_cpu_ids)
316 return WORK_CPU_UNBOUND;
318 return WORK_CPU_NONE;
321 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
322 struct workqueue_struct *wq)
324 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
330 * An extra gcwq is defined for an invalid cpu number
331 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
332 * specific CPU. The following iterators are similar to
333 * for_each_*_cpu() iterators but also considers the unbound gcwq.
335 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
336 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
337 * for_each_cwq_cpu() : possible CPUs for bound workqueues,
338 * WORK_CPU_UNBOUND for unbound workqueues
340 #define for_each_gcwq_cpu(cpu) \
341 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
342 (cpu) < WORK_CPU_NONE; \
343 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
345 #define for_each_online_gcwq_cpu(cpu) \
346 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
347 (cpu) < WORK_CPU_NONE; \
348 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
350 #define for_each_cwq_cpu(cpu, wq) \
351 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
352 (cpu) < WORK_CPU_NONE; \
353 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
355 #ifdef CONFIG_DEBUG_OBJECTS_WORK
357 static struct debug_obj_descr work_debug_descr;
359 static void *work_debug_hint(void *addr)
361 return ((struct work_struct *) addr)->func;
365 * fixup_init is called when:
366 * - an active object is initialized
368 static int work_fixup_init(void *addr, enum debug_obj_state state)
370 struct work_struct *work = addr;
373 case ODEBUG_STATE_ACTIVE:
374 cancel_work_sync(work);
375 debug_object_init(work, &work_debug_descr);
383 * fixup_activate is called when:
384 * - an active object is activated
385 * - an unknown object is activated (might be a statically initialized object)
387 static int work_fixup_activate(void *addr, enum debug_obj_state state)
389 struct work_struct *work = addr;
393 case ODEBUG_STATE_NOTAVAILABLE:
395 * This is not really a fixup. The work struct was
396 * statically initialized. We just make sure that it
397 * is tracked in the object tracker.
399 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
400 debug_object_init(work, &work_debug_descr);
401 debug_object_activate(work, &work_debug_descr);
407 case ODEBUG_STATE_ACTIVE:
416 * fixup_free is called when:
417 * - an active object is freed
419 static int work_fixup_free(void *addr, enum debug_obj_state state)
421 struct work_struct *work = addr;
424 case ODEBUG_STATE_ACTIVE:
425 cancel_work_sync(work);
426 debug_object_free(work, &work_debug_descr);
433 static struct debug_obj_descr work_debug_descr = {
434 .name = "work_struct",
435 .debug_hint = work_debug_hint,
436 .fixup_init = work_fixup_init,
437 .fixup_activate = work_fixup_activate,
438 .fixup_free = work_fixup_free,
441 static inline void debug_work_activate(struct work_struct *work)
443 debug_object_activate(work, &work_debug_descr);
446 static inline void debug_work_deactivate(struct work_struct *work)
448 debug_object_deactivate(work, &work_debug_descr);
451 void __init_work(struct work_struct *work, int onstack)
454 debug_object_init_on_stack(work, &work_debug_descr);
456 debug_object_init(work, &work_debug_descr);
458 EXPORT_SYMBOL_GPL(__init_work);
460 void destroy_work_on_stack(struct work_struct *work)
462 debug_object_free(work, &work_debug_descr);
464 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
467 static inline void debug_work_activate(struct work_struct *work) { }
468 static inline void debug_work_deactivate(struct work_struct *work) { }
471 /* Serializes the accesses to the list of workqueues. */
472 static DEFINE_SPINLOCK(workqueue_lock);
473 static LIST_HEAD(workqueues);
474 static bool workqueue_freezing; /* W: have wqs started freezing? */
477 * The almighty global cpu workqueues. nr_running is the only field
478 * which is expected to be used frequently by other cpus via
479 * try_to_wake_up(). Put it in a separate cacheline.
481 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
482 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
485 * Global cpu workqueue and nr_running counter for unbound gcwq. The
486 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
487 * workers have WORKER_UNBOUND set.
489 static struct global_cwq unbound_global_cwq;
490 static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
491 [0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */
494 static int worker_thread(void *__worker);
496 static int worker_pool_pri(struct worker_pool *pool)
498 return pool - pool->gcwq->pools;
501 static struct global_cwq *get_gcwq(unsigned int cpu)
503 if (cpu != WORK_CPU_UNBOUND)
504 return &per_cpu(global_cwq, cpu);
506 return &unbound_global_cwq;
509 static atomic_t *get_pool_nr_running(struct worker_pool *pool)
511 int cpu = pool->gcwq->cpu;
512 int idx = worker_pool_pri(pool);
514 if (cpu != WORK_CPU_UNBOUND)
515 return &per_cpu(pool_nr_running, cpu)[idx];
517 return &unbound_pool_nr_running[idx];
520 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
521 struct workqueue_struct *wq)
523 if (!(wq->flags & WQ_UNBOUND)) {
524 if (likely(cpu < nr_cpu_ids))
525 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
526 } else if (likely(cpu == WORK_CPU_UNBOUND))
527 return wq->cpu_wq.single;
531 static unsigned int work_color_to_flags(int color)
533 return color << WORK_STRUCT_COLOR_SHIFT;
536 static int get_work_color(struct work_struct *work)
538 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
539 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
542 static int work_next_color(int color)
544 return (color + 1) % WORK_NR_COLORS;
548 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
549 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is
550 * cleared and the work data contains the cpu number it was last on.
552 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
553 * cwq, cpu or clear work->data. These functions should only be
554 * called while the work is owned - ie. while the PENDING bit is set.
556 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
557 * corresponding to a work. gcwq is available once the work has been
558 * queued anywhere after initialization. cwq is available only from
559 * queueing until execution starts.
561 static inline void set_work_data(struct work_struct *work, unsigned long data,
564 BUG_ON(!work_pending(work));
565 atomic_long_set(&work->data, data | flags | work_static(work));
568 static void set_work_cwq(struct work_struct *work,
569 struct cpu_workqueue_struct *cwq,
570 unsigned long extra_flags)
572 set_work_data(work, (unsigned long)cwq,
573 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
576 static void set_work_cpu(struct work_struct *work, unsigned int cpu)
578 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
581 static void clear_work_data(struct work_struct *work)
583 set_work_data(work, WORK_STRUCT_NO_CPU, 0);
586 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
588 unsigned long data = atomic_long_read(&work->data);
590 if (data & WORK_STRUCT_CWQ)
591 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
596 static struct global_cwq *get_work_gcwq(struct work_struct *work)
598 unsigned long data = atomic_long_read(&work->data);
601 if (data & WORK_STRUCT_CWQ)
602 return ((struct cpu_workqueue_struct *)
603 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
605 cpu = data >> WORK_STRUCT_FLAG_BITS;
606 if (cpu == WORK_CPU_NONE)
609 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
610 return get_gcwq(cpu);
614 * Policy functions. These define the policies on how the global worker
615 * pools are managed. Unless noted otherwise, these functions assume that
616 * they're being called with gcwq->lock held.
619 static bool __need_more_worker(struct worker_pool *pool)
621 return !atomic_read(get_pool_nr_running(pool));
625 * Need to wake up a worker? Called from anything but currently
628 * Note that, because unbound workers never contribute to nr_running, this
629 * function will always return %true for unbound gcwq as long as the
630 * worklist isn't empty.
632 static bool need_more_worker(struct worker_pool *pool)
634 return !list_empty(&pool->worklist) && __need_more_worker(pool);
637 /* Can I start working? Called from busy but !running workers. */
638 static bool may_start_working(struct worker_pool *pool)
640 return pool->nr_idle;
643 /* Do I need to keep working? Called from currently running workers. */
644 static bool keep_working(struct worker_pool *pool)
646 atomic_t *nr_running = get_pool_nr_running(pool);
648 return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
651 /* Do we need a new worker? Called from manager. */
652 static bool need_to_create_worker(struct worker_pool *pool)
654 return need_more_worker(pool) && !may_start_working(pool);
657 /* Do I need to be the manager? */
658 static bool need_to_manage_workers(struct worker_pool *pool)
660 return need_to_create_worker(pool) ||
661 (pool->flags & POOL_MANAGE_WORKERS);
664 /* Do we have too many workers and should some go away? */
665 static bool too_many_workers(struct worker_pool *pool)
667 bool managing = mutex_is_locked(&pool->manager_mutex);
668 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
669 int nr_busy = pool->nr_workers - nr_idle;
671 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
678 /* Return the first worker. Safe with preemption disabled */
679 static struct worker *first_worker(struct worker_pool *pool)
681 if (unlikely(list_empty(&pool->idle_list)))
684 return list_first_entry(&pool->idle_list, struct worker, entry);
688 * wake_up_worker - wake up an idle worker
689 * @pool: worker pool to wake worker from
691 * Wake up the first idle worker of @pool.
694 * spin_lock_irq(gcwq->lock).
696 static void wake_up_worker(struct worker_pool *pool)
698 struct worker *worker = first_worker(pool);
701 wake_up_process(worker->task);
705 * wq_worker_waking_up - a worker is waking up
706 * @task: task waking up
707 * @cpu: CPU @task is waking up to
709 * This function is called during try_to_wake_up() when a worker is
713 * spin_lock_irq(rq->lock)
715 void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
717 struct worker *worker = kthread_data(task);
719 if (!(worker->flags & WORKER_NOT_RUNNING))
720 atomic_inc(get_pool_nr_running(worker->pool));
724 * wq_worker_sleeping - a worker is going to sleep
725 * @task: task going to sleep
726 * @cpu: CPU in question, must be the current CPU number
728 * This function is called during schedule() when a busy worker is
729 * going to sleep. Worker on the same cpu can be woken up by
730 * returning pointer to its task.
733 * spin_lock_irq(rq->lock)
736 * Worker task on @cpu to wake up, %NULL if none.
738 struct task_struct *wq_worker_sleeping(struct task_struct *task,
741 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
742 struct worker_pool *pool = worker->pool;
743 atomic_t *nr_running = get_pool_nr_running(pool);
745 if (worker->flags & WORKER_NOT_RUNNING)
748 /* this can only happen on the local cpu */
749 BUG_ON(cpu != raw_smp_processor_id());
752 * The counterpart of the following dec_and_test, implied mb,
753 * worklist not empty test sequence is in insert_work().
754 * Please read comment there.
756 * NOT_RUNNING is clear. This means that trustee is not in
757 * charge and we're running on the local cpu w/ rq lock held
758 * and preemption disabled, which in turn means that none else
759 * could be manipulating idle_list, so dereferencing idle_list
760 * without gcwq lock is safe.
762 if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
763 to_wakeup = first_worker(pool);
764 return to_wakeup ? to_wakeup->task : NULL;
768 * worker_set_flags - set worker flags and adjust nr_running accordingly
770 * @flags: flags to set
771 * @wakeup: wakeup an idle worker if necessary
773 * Set @flags in @worker->flags and adjust nr_running accordingly. If
774 * nr_running becomes zero and @wakeup is %true, an idle worker is
778 * spin_lock_irq(gcwq->lock)
780 static inline void worker_set_flags(struct worker *worker, unsigned int flags,
783 struct worker_pool *pool = worker->pool;
785 WARN_ON_ONCE(worker->task != current);
788 * If transitioning into NOT_RUNNING, adjust nr_running and
789 * wake up an idle worker as necessary if requested by
792 if ((flags & WORKER_NOT_RUNNING) &&
793 !(worker->flags & WORKER_NOT_RUNNING)) {
794 atomic_t *nr_running = get_pool_nr_running(pool);
797 if (atomic_dec_and_test(nr_running) &&
798 !list_empty(&pool->worklist))
799 wake_up_worker(pool);
801 atomic_dec(nr_running);
804 worker->flags |= flags;
808 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
810 * @flags: flags to clear
812 * Clear @flags in @worker->flags and adjust nr_running accordingly.
815 * spin_lock_irq(gcwq->lock)
817 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
819 struct worker_pool *pool = worker->pool;
820 unsigned int oflags = worker->flags;
822 WARN_ON_ONCE(worker->task != current);
824 worker->flags &= ~flags;
827 * If transitioning out of NOT_RUNNING, increment nr_running. Note
828 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
829 * of multiple flags, not a single flag.
831 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
832 if (!(worker->flags & WORKER_NOT_RUNNING))
833 atomic_inc(get_pool_nr_running(pool));
837 * busy_worker_head - return the busy hash head for a work
838 * @gcwq: gcwq of interest
839 * @work: work to be hashed
841 * Return hash head of @gcwq for @work.
844 * spin_lock_irq(gcwq->lock).
847 * Pointer to the hash head.
849 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
850 struct work_struct *work)
852 const int base_shift = ilog2(sizeof(struct work_struct));
853 unsigned long v = (unsigned long)work;
855 /* simple shift and fold hash, do we need something better? */
857 v += v >> BUSY_WORKER_HASH_ORDER;
858 v &= BUSY_WORKER_HASH_MASK;
860 return &gcwq->busy_hash[v];
864 * __find_worker_executing_work - find worker which is executing a work
865 * @gcwq: gcwq of interest
866 * @bwh: hash head as returned by busy_worker_head()
867 * @work: work to find worker for
869 * Find a worker which is executing @work on @gcwq. @bwh should be
870 * the hash head obtained by calling busy_worker_head() with the same
874 * spin_lock_irq(gcwq->lock).
877 * Pointer to worker which is executing @work if found, NULL
880 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
881 struct hlist_head *bwh,
882 struct work_struct *work)
884 struct worker *worker;
885 struct hlist_node *tmp;
887 hlist_for_each_entry(worker, tmp, bwh, hentry)
888 if (worker->current_work == work)
894 * find_worker_executing_work - find worker which is executing a work
895 * @gcwq: gcwq of interest
896 * @work: work to find worker for
898 * Find a worker which is executing @work on @gcwq. This function is
899 * identical to __find_worker_executing_work() except that this
900 * function calculates @bwh itself.
903 * spin_lock_irq(gcwq->lock).
906 * Pointer to worker which is executing @work if found, NULL
909 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
910 struct work_struct *work)
912 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
917 * insert_work - insert a work into gcwq
918 * @cwq: cwq @work belongs to
919 * @work: work to insert
920 * @head: insertion point
921 * @extra_flags: extra WORK_STRUCT_* flags to set
923 * Insert @work which belongs to @cwq into @gcwq after @head.
924 * @extra_flags is or'd to work_struct flags.
927 * spin_lock_irq(gcwq->lock).
929 static void insert_work(struct cpu_workqueue_struct *cwq,
930 struct work_struct *work, struct list_head *head,
931 unsigned int extra_flags)
933 struct worker_pool *pool = cwq->pool;
935 /* we own @work, set data and link */
936 set_work_cwq(work, cwq, extra_flags);
939 * Ensure that we get the right work->data if we see the
940 * result of list_add() below, see try_to_grab_pending().
944 list_add_tail(&work->entry, head);
947 * Ensure either worker_sched_deactivated() sees the above
948 * list_add_tail() or we see zero nr_running to avoid workers
949 * lying around lazily while there are works to be processed.
953 if (__need_more_worker(pool))
954 wake_up_worker(pool);
958 * Test whether @work is being queued from another work executing on the
959 * same workqueue. This is rather expensive and should only be used from
962 static bool is_chained_work(struct workqueue_struct *wq)
967 for_each_gcwq_cpu(cpu) {
968 struct global_cwq *gcwq = get_gcwq(cpu);
969 struct worker *worker;
970 struct hlist_node *pos;
973 spin_lock_irqsave(&gcwq->lock, flags);
974 for_each_busy_worker(worker, i, pos, gcwq) {
975 if (worker->task != current)
977 spin_unlock_irqrestore(&gcwq->lock, flags);
979 * I'm @worker, no locking necessary. See if @work
980 * is headed to the same workqueue.
982 return worker->current_cwq->wq == wq;
984 spin_unlock_irqrestore(&gcwq->lock, flags);
989 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
990 struct work_struct *work)
992 struct global_cwq *gcwq;
993 struct cpu_workqueue_struct *cwq;
994 struct list_head *worklist;
995 unsigned int work_flags;
998 debug_work_activate(work);
1000 /* if dying, only works from the same workqueue are allowed */
1001 if (unlikely(wq->flags & WQ_DRAINING) &&
1002 WARN_ON_ONCE(!is_chained_work(wq)))
1005 /* determine gcwq to use */
1006 if (!(wq->flags & WQ_UNBOUND)) {
1007 struct global_cwq *last_gcwq;
1009 if (unlikely(cpu == WORK_CPU_UNBOUND))
1010 cpu = raw_smp_processor_id();
1013 * It's multi cpu. If @wq is non-reentrant and @work
1014 * was previously on a different cpu, it might still
1015 * be running there, in which case the work needs to
1016 * be queued on that cpu to guarantee non-reentrance.
1018 gcwq = get_gcwq(cpu);
1019 if (wq->flags & WQ_NON_REENTRANT &&
1020 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
1021 struct worker *worker;
1023 spin_lock_irqsave(&last_gcwq->lock, flags);
1025 worker = find_worker_executing_work(last_gcwq, work);
1027 if (worker && worker->current_cwq->wq == wq)
1030 /* meh... not running there, queue here */
1031 spin_unlock_irqrestore(&last_gcwq->lock, flags);
1032 spin_lock_irqsave(&gcwq->lock, flags);
1035 spin_lock_irqsave(&gcwq->lock, flags);
1037 gcwq = get_gcwq(WORK_CPU_UNBOUND);
1038 spin_lock_irqsave(&gcwq->lock, flags);
1041 /* gcwq determined, get cwq and queue */
1042 cwq = get_cwq(gcwq->cpu, wq);
1043 trace_workqueue_queue_work(cpu, cwq, work);
1045 if (WARN_ON(!list_empty(&work->entry))) {
1046 spin_unlock_irqrestore(&gcwq->lock, flags);
1050 cwq->nr_in_flight[cwq->work_color]++;
1051 work_flags = work_color_to_flags(cwq->work_color);
1053 if (likely(cwq->nr_active < cwq->max_active)) {
1054 trace_workqueue_activate_work(work);
1056 worklist = &cwq->pool->worklist;
1058 work_flags |= WORK_STRUCT_DELAYED;
1059 worklist = &cwq->delayed_works;
1062 insert_work(cwq, work, worklist, work_flags);
1064 spin_unlock_irqrestore(&gcwq->lock, flags);
1068 * queue_work - queue work on a workqueue
1069 * @wq: workqueue to use
1070 * @work: work to queue
1072 * Returns 0 if @work was already on a queue, non-zero otherwise.
1074 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1075 * it can be processed by another CPU.
1077 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1081 ret = queue_work_on(get_cpu(), wq, work);
1086 EXPORT_SYMBOL_GPL(queue_work);
1089 * queue_work_on - queue work on specific cpu
1090 * @cpu: CPU number to execute work on
1091 * @wq: workqueue to use
1092 * @work: work to queue
1094 * Returns 0 if @work was already on a queue, non-zero otherwise.
1096 * We queue the work to a specific CPU, the caller must ensure it
1100 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1104 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1105 __queue_work(cpu, wq, work);
1110 EXPORT_SYMBOL_GPL(queue_work_on);
1112 static void delayed_work_timer_fn(unsigned long __data)
1114 struct delayed_work *dwork = (struct delayed_work *)__data;
1115 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1117 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
1121 * queue_delayed_work - queue work on a workqueue after delay
1122 * @wq: workqueue to use
1123 * @dwork: delayable work to queue
1124 * @delay: number of jiffies to wait before queueing
1126 * Returns 0 if @work was already on a queue, non-zero otherwise.
1128 int queue_delayed_work(struct workqueue_struct *wq,
1129 struct delayed_work *dwork, unsigned long delay)
1132 return queue_work(wq, &dwork->work);
1134 return queue_delayed_work_on(-1, wq, dwork, delay);
1136 EXPORT_SYMBOL_GPL(queue_delayed_work);
1139 * queue_delayed_work_on - queue work on specific CPU after delay
1140 * @cpu: CPU number to execute work on
1141 * @wq: workqueue to use
1142 * @dwork: work to queue
1143 * @delay: number of jiffies to wait before queueing
1145 * Returns 0 if @work was already on a queue, non-zero otherwise.
1147 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1148 struct delayed_work *dwork, unsigned long delay)
1151 struct timer_list *timer = &dwork->timer;
1152 struct work_struct *work = &dwork->work;
1154 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1157 BUG_ON(timer_pending(timer));
1158 BUG_ON(!list_empty(&work->entry));
1160 timer_stats_timer_set_start_info(&dwork->timer);
1163 * This stores cwq for the moment, for the timer_fn.
1164 * Note that the work's gcwq is preserved to allow
1165 * reentrance detection for delayed works.
1167 if (!(wq->flags & WQ_UNBOUND)) {
1168 struct global_cwq *gcwq = get_work_gcwq(work);
1170 if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1173 lcpu = raw_smp_processor_id();
1175 lcpu = WORK_CPU_UNBOUND;
1177 set_work_cwq(work, get_cwq(lcpu, wq), 0);
1179 timer->expires = jiffies + delay;
1180 timer->data = (unsigned long)dwork;
1181 timer->function = delayed_work_timer_fn;
1183 if (unlikely(cpu >= 0))
1184 add_timer_on(timer, cpu);
1191 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1194 * worker_enter_idle - enter idle state
1195 * @worker: worker which is entering idle state
1197 * @worker is entering idle state. Update stats and idle timer if
1201 * spin_lock_irq(gcwq->lock).
1203 static void worker_enter_idle(struct worker *worker)
1205 struct worker_pool *pool = worker->pool;
1206 struct global_cwq *gcwq = pool->gcwq;
1208 BUG_ON(worker->flags & WORKER_IDLE);
1209 BUG_ON(!list_empty(&worker->entry) &&
1210 (worker->hentry.next || worker->hentry.pprev));
1212 /* can't use worker_set_flags(), also called from start_worker() */
1213 worker->flags |= WORKER_IDLE;
1215 worker->last_active = jiffies;
1217 /* idle_list is LIFO */
1218 list_add(&worker->entry, &pool->idle_list);
1220 if (likely(gcwq->trustee_state != TRUSTEE_DONE)) {
1221 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1222 mod_timer(&pool->idle_timer,
1223 jiffies + IDLE_WORKER_TIMEOUT);
1225 wake_up_all(&gcwq->trustee_wait);
1228 * Sanity check nr_running. Because trustee releases gcwq->lock
1229 * between setting %WORKER_UNBOUND and zapping nr_running, the
1230 * warning may trigger spuriously. Check iff trustee is idle.
1232 WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
1233 pool->nr_workers == pool->nr_idle &&
1234 atomic_read(get_pool_nr_running(pool)));
1238 * worker_leave_idle - leave idle state
1239 * @worker: worker which is leaving idle state
1241 * @worker is leaving idle state. Update stats.
1244 * spin_lock_irq(gcwq->lock).
1246 static void worker_leave_idle(struct worker *worker)
1248 struct worker_pool *pool = worker->pool;
1250 BUG_ON(!(worker->flags & WORKER_IDLE));
1251 worker_clr_flags(worker, WORKER_IDLE);
1253 list_del_init(&worker->entry);
1257 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1260 * Works which are scheduled while the cpu is online must at least be
1261 * scheduled to a worker which is bound to the cpu so that if they are
1262 * flushed from cpu callbacks while cpu is going down, they are
1263 * guaranteed to execute on the cpu.
1265 * This function is to be used by rogue workers and rescuers to bind
1266 * themselves to the target cpu and may race with cpu going down or
1267 * coming online. kthread_bind() can't be used because it may put the
1268 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1269 * verbatim as it's best effort and blocking and gcwq may be
1270 * [dis]associated in the meantime.
1272 * This function tries set_cpus_allowed() and locks gcwq and verifies the
1273 * binding against %GCWQ_DISASSOCIATED which is set during
1274 * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
1275 * enters idle state or fetches works without dropping lock, it can
1276 * guarantee the scheduling requirement described in the first paragraph.
1279 * Might sleep. Called without any lock but returns with gcwq->lock
1283 * %true if the associated gcwq is online (@worker is successfully
1284 * bound), %false if offline.
1286 static bool worker_maybe_bind_and_lock(struct worker *worker)
1287 __acquires(&gcwq->lock)
1289 struct global_cwq *gcwq = worker->pool->gcwq;
1290 struct task_struct *task = worker->task;
1294 * The following call may fail, succeed or succeed
1295 * without actually migrating the task to the cpu if
1296 * it races with cpu hotunplug operation. Verify
1297 * against GCWQ_DISASSOCIATED.
1299 if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1300 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1302 spin_lock_irq(&gcwq->lock);
1303 if (gcwq->flags & GCWQ_DISASSOCIATED)
1305 if (task_cpu(task) == gcwq->cpu &&
1306 cpumask_equal(¤t->cpus_allowed,
1307 get_cpu_mask(gcwq->cpu)))
1309 spin_unlock_irq(&gcwq->lock);
1312 * We've raced with CPU hot[un]plug. Give it a breather
1313 * and retry migration. cond_resched() is required here;
1314 * otherwise, we might deadlock against cpu_stop trying to
1315 * bring down the CPU on non-preemptive kernel.
1322 struct idle_rebind {
1323 int cnt; /* # workers to be rebound */
1324 struct completion done; /* all workers rebound */
1328 * Rebind an idle @worker to its CPU. During CPU onlining, this has to
1329 * happen synchronously for idle workers. worker_thread() will test
1330 * %WORKER_REBIND before leaving idle and call this function.
1332 static void idle_worker_rebind(struct worker *worker)
1334 struct global_cwq *gcwq = worker->pool->gcwq;
1336 /* CPU must be online at this point */
1337 WARN_ON(!worker_maybe_bind_and_lock(worker));
1338 if (!--worker->idle_rebind->cnt)
1339 complete(&worker->idle_rebind->done);
1340 spin_unlock_irq(&worker->pool->gcwq->lock);
1342 /* we did our part, wait for rebind_workers() to finish up */
1343 wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND));
1347 * Function for @worker->rebind.work used to rebind unbound busy workers to
1348 * the associated cpu which is coming back online. This is scheduled by
1349 * cpu up but can race with other cpu hotplug operations and may be
1350 * executed twice without intervening cpu down.
1352 static void busy_worker_rebind_fn(struct work_struct *work)
1354 struct worker *worker = container_of(work, struct worker, rebind_work);
1355 struct global_cwq *gcwq = worker->pool->gcwq;
1357 if (worker_maybe_bind_and_lock(worker))
1358 worker_clr_flags(worker, WORKER_REBIND);
1360 spin_unlock_irq(&gcwq->lock);
1364 * rebind_workers - rebind all workers of a gcwq to the associated CPU
1365 * @gcwq: gcwq of interest
1367 * @gcwq->cpu is coming online. Rebind all workers to the CPU. Rebinding
1368 * is different for idle and busy ones.
1370 * The idle ones should be rebound synchronously and idle rebinding should
1371 * be complete before any worker starts executing work items with
1372 * concurrency management enabled; otherwise, scheduler may oops trying to
1373 * wake up non-local idle worker from wq_worker_sleeping().
1375 * This is achieved by repeatedly requesting rebinding until all idle
1376 * workers are known to have been rebound under @gcwq->lock and holding all
1377 * idle workers from becoming busy until idle rebinding is complete.
1379 * Once idle workers are rebound, busy workers can be rebound as they
1380 * finish executing their current work items. Queueing the rebind work at
1381 * the head of their scheduled lists is enough. Note that nr_running will
1382 * be properbly bumped as busy workers rebind.
1384 * On return, all workers are guaranteed to either be bound or have rebind
1385 * work item scheduled.
1387 static void rebind_workers(struct global_cwq *gcwq)
1388 __releases(&gcwq->lock) __acquires(&gcwq->lock)
1390 struct idle_rebind idle_rebind;
1391 struct worker_pool *pool;
1392 struct worker *worker;
1393 struct hlist_node *pos;
1396 lockdep_assert_held(&gcwq->lock);
1398 for_each_worker_pool(pool, gcwq)
1399 lockdep_assert_held(&pool->manager_mutex);
1402 * Rebind idle workers. Interlocked both ways. We wait for
1403 * workers to rebind via @idle_rebind.done. Workers will wait for
1404 * us to finish up by watching %WORKER_REBIND.
1406 init_completion(&idle_rebind.done);
1408 idle_rebind.cnt = 1;
1409 INIT_COMPLETION(idle_rebind.done);
1411 /* set REBIND and kick idle ones, we'll wait for these later */
1412 for_each_worker_pool(pool, gcwq) {
1413 list_for_each_entry(worker, &pool->idle_list, entry) {
1414 if (worker->flags & WORKER_REBIND)
1417 /* morph UNBOUND to REBIND */
1418 worker->flags &= ~WORKER_UNBOUND;
1419 worker->flags |= WORKER_REBIND;
1422 worker->idle_rebind = &idle_rebind;
1424 /* worker_thread() will call idle_worker_rebind() */
1425 wake_up_process(worker->task);
1429 if (--idle_rebind.cnt) {
1430 spin_unlock_irq(&gcwq->lock);
1431 wait_for_completion(&idle_rebind.done);
1432 spin_lock_irq(&gcwq->lock);
1433 /* busy ones might have become idle while waiting, retry */
1438 * All idle workers are rebound and waiting for %WORKER_REBIND to
1439 * be cleared inside idle_worker_rebind(). Clear and release.
1440 * Clearing %WORKER_REBIND from this foreign context is safe
1441 * because these workers are still guaranteed to be idle.
1443 for_each_worker_pool(pool, gcwq)
1444 list_for_each_entry(worker, &pool->idle_list, entry)
1445 worker->flags &= ~WORKER_REBIND;
1447 wake_up_all(&gcwq->rebind_hold);
1449 /* rebind busy workers */
1450 for_each_busy_worker(worker, i, pos, gcwq) {
1451 struct work_struct *rebind_work = &worker->rebind_work;
1453 /* morph UNBOUND to REBIND */
1454 worker->flags &= ~WORKER_UNBOUND;
1455 worker->flags |= WORKER_REBIND;
1457 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
1458 work_data_bits(rebind_work)))
1461 /* wq doesn't matter, use the default one */
1462 debug_work_activate(rebind_work);
1463 insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
1464 worker->scheduled.next,
1465 work_color_to_flags(WORK_NO_COLOR));
1469 static struct worker *alloc_worker(void)
1471 struct worker *worker;
1473 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1475 INIT_LIST_HEAD(&worker->entry);
1476 INIT_LIST_HEAD(&worker->scheduled);
1477 INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn);
1478 /* on creation a worker is in !idle && prep state */
1479 worker->flags = WORKER_PREP;
1485 * create_worker - create a new workqueue worker
1486 * @pool: pool the new worker will belong to
1488 * Create a new worker which is bound to @pool. The returned worker
1489 * can be started by calling start_worker() or destroyed using
1493 * Might sleep. Does GFP_KERNEL allocations.
1496 * Pointer to the newly created worker.
1498 static struct worker *create_worker(struct worker_pool *pool)
1500 struct global_cwq *gcwq = pool->gcwq;
1501 const char *pri = worker_pool_pri(pool) ? "H" : "";
1502 struct worker *worker = NULL;
1505 spin_lock_irq(&gcwq->lock);
1506 while (ida_get_new(&pool->worker_ida, &id)) {
1507 spin_unlock_irq(&gcwq->lock);
1508 if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
1510 spin_lock_irq(&gcwq->lock);
1512 spin_unlock_irq(&gcwq->lock);
1514 worker = alloc_worker();
1518 worker->pool = pool;
1521 if (gcwq->cpu != WORK_CPU_UNBOUND)
1522 worker->task = kthread_create_on_node(worker_thread,
1523 worker, cpu_to_node(gcwq->cpu),
1524 "kworker/%u:%d%s", gcwq->cpu, id, pri);
1526 worker->task = kthread_create(worker_thread, worker,
1527 "kworker/u:%d%s", id, pri);
1528 if (IS_ERR(worker->task))
1531 if (worker_pool_pri(pool))
1532 set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
1535 * Determine CPU binding of the new worker depending on
1536 * %GCWQ_DISASSOCIATED. The caller is responsible for ensuring the
1537 * flag remains stable across this function. See the comments
1538 * above the flag definition for details.
1540 * As an unbound worker may later become a regular one if CPU comes
1541 * online, make sure every worker has %PF_THREAD_BOUND set.
1543 if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
1544 kthread_bind(worker->task, gcwq->cpu);
1546 worker->task->flags |= PF_THREAD_BOUND;
1547 worker->flags |= WORKER_UNBOUND;
1553 spin_lock_irq(&gcwq->lock);
1554 ida_remove(&pool->worker_ida, id);
1555 spin_unlock_irq(&gcwq->lock);
1562 * start_worker - start a newly created worker
1563 * @worker: worker to start
1565 * Make the gcwq aware of @worker and start it.
1568 * spin_lock_irq(gcwq->lock).
1570 static void start_worker(struct worker *worker)
1572 worker->flags |= WORKER_STARTED;
1573 worker->pool->nr_workers++;
1574 worker_enter_idle(worker);
1575 wake_up_process(worker->task);
1579 * destroy_worker - destroy a workqueue worker
1580 * @worker: worker to be destroyed
1582 * Destroy @worker and adjust @gcwq stats accordingly.
1585 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1587 static void destroy_worker(struct worker *worker)
1589 struct worker_pool *pool = worker->pool;
1590 struct global_cwq *gcwq = pool->gcwq;
1591 int id = worker->id;
1593 /* sanity check frenzy */
1594 BUG_ON(worker->current_work);
1595 BUG_ON(!list_empty(&worker->scheduled));
1597 if (worker->flags & WORKER_STARTED)
1599 if (worker->flags & WORKER_IDLE)
1602 list_del_init(&worker->entry);
1603 worker->flags |= WORKER_DIE;
1605 spin_unlock_irq(&gcwq->lock);
1607 kthread_stop(worker->task);
1610 spin_lock_irq(&gcwq->lock);
1611 ida_remove(&pool->worker_ida, id);
1614 static void idle_worker_timeout(unsigned long __pool)
1616 struct worker_pool *pool = (void *)__pool;
1617 struct global_cwq *gcwq = pool->gcwq;
1619 spin_lock_irq(&gcwq->lock);
1621 if (too_many_workers(pool)) {
1622 struct worker *worker;
1623 unsigned long expires;
1625 /* idle_list is kept in LIFO order, check the last one */
1626 worker = list_entry(pool->idle_list.prev, struct worker, entry);
1627 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1629 if (time_before(jiffies, expires))
1630 mod_timer(&pool->idle_timer, expires);
1632 /* it's been idle for too long, wake up manager */
1633 pool->flags |= POOL_MANAGE_WORKERS;
1634 wake_up_worker(pool);
1638 spin_unlock_irq(&gcwq->lock);
1641 static bool send_mayday(struct work_struct *work)
1643 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1644 struct workqueue_struct *wq = cwq->wq;
1647 if (!(wq->flags & WQ_RESCUER))
1650 /* mayday mayday mayday */
1651 cpu = cwq->pool->gcwq->cpu;
1652 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1653 if (cpu == WORK_CPU_UNBOUND)
1655 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1656 wake_up_process(wq->rescuer->task);
1660 static void gcwq_mayday_timeout(unsigned long __pool)
1662 struct worker_pool *pool = (void *)__pool;
1663 struct global_cwq *gcwq = pool->gcwq;
1664 struct work_struct *work;
1666 spin_lock_irq(&gcwq->lock);
1668 if (need_to_create_worker(pool)) {
1670 * We've been trying to create a new worker but
1671 * haven't been successful. We might be hitting an
1672 * allocation deadlock. Send distress signals to
1675 list_for_each_entry(work, &pool->worklist, entry)
1679 spin_unlock_irq(&gcwq->lock);
1681 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1685 * maybe_create_worker - create a new worker if necessary
1686 * @pool: pool to create a new worker for
1688 * Create a new worker for @pool if necessary. @pool is guaranteed to
1689 * have at least one idle worker on return from this function. If
1690 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1691 * sent to all rescuers with works scheduled on @pool to resolve
1692 * possible allocation deadlock.
1694 * On return, need_to_create_worker() is guaranteed to be false and
1695 * may_start_working() true.
1698 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1699 * multiple times. Does GFP_KERNEL allocations. Called only from
1703 * false if no action was taken and gcwq->lock stayed locked, true
1706 static bool maybe_create_worker(struct worker_pool *pool)
1707 __releases(&gcwq->lock)
1708 __acquires(&gcwq->lock)
1710 struct global_cwq *gcwq = pool->gcwq;
1712 if (!need_to_create_worker(pool))
1715 spin_unlock_irq(&gcwq->lock);
1717 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1718 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1721 struct worker *worker;
1723 worker = create_worker(pool);
1725 del_timer_sync(&pool->mayday_timer);
1726 spin_lock_irq(&gcwq->lock);
1727 start_worker(worker);
1728 BUG_ON(need_to_create_worker(pool));
1732 if (!need_to_create_worker(pool))
1735 __set_current_state(TASK_INTERRUPTIBLE);
1736 schedule_timeout(CREATE_COOLDOWN);
1738 if (!need_to_create_worker(pool))
1742 del_timer_sync(&pool->mayday_timer);
1743 spin_lock_irq(&gcwq->lock);
1744 if (need_to_create_worker(pool))
1750 * maybe_destroy_worker - destroy workers which have been idle for a while
1751 * @pool: pool to destroy workers for
1753 * Destroy @pool workers which have been idle for longer than
1754 * IDLE_WORKER_TIMEOUT.
1757 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1758 * multiple times. Called only from manager.
1761 * false if no action was taken and gcwq->lock stayed locked, true
1764 static bool maybe_destroy_workers(struct worker_pool *pool)
1768 while (too_many_workers(pool)) {
1769 struct worker *worker;
1770 unsigned long expires;
1772 worker = list_entry(pool->idle_list.prev, struct worker, entry);
1773 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1775 if (time_before(jiffies, expires)) {
1776 mod_timer(&pool->idle_timer, expires);
1780 destroy_worker(worker);
1788 * manage_workers - manage worker pool
1791 * Assume the manager role and manage gcwq worker pool @worker belongs
1792 * to. At any given time, there can be only zero or one manager per
1793 * gcwq. The exclusion is handled automatically by this function.
1795 * The caller can safely start processing works on false return. On
1796 * true return, it's guaranteed that need_to_create_worker() is false
1797 * and may_start_working() is true.
1800 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1801 * multiple times. Does GFP_KERNEL allocations.
1804 * false if no action was taken and gcwq->lock stayed locked, true if
1805 * some action was taken.
1807 static bool manage_workers(struct worker *worker)
1809 struct worker_pool *pool = worker->pool;
1812 if (!mutex_trylock(&pool->manager_mutex))
1815 pool->flags &= ~POOL_MANAGE_WORKERS;
1818 * Destroy and then create so that may_start_working() is true
1821 ret |= maybe_destroy_workers(pool);
1822 ret |= maybe_create_worker(pool);
1824 mutex_unlock(&pool->manager_mutex);
1829 * move_linked_works - move linked works to a list
1830 * @work: start of series of works to be scheduled
1831 * @head: target list to append @work to
1832 * @nextp: out paramter for nested worklist walking
1834 * Schedule linked works starting from @work to @head. Work series to
1835 * be scheduled starts at @work and includes any consecutive work with
1836 * WORK_STRUCT_LINKED set in its predecessor.
1838 * If @nextp is not NULL, it's updated to point to the next work of
1839 * the last scheduled work. This allows move_linked_works() to be
1840 * nested inside outer list_for_each_entry_safe().
1843 * spin_lock_irq(gcwq->lock).
1845 static void move_linked_works(struct work_struct *work, struct list_head *head,
1846 struct work_struct **nextp)
1848 struct work_struct *n;
1851 * Linked worklist will always end before the end of the list,
1852 * use NULL for list head.
1854 list_for_each_entry_safe_from(work, n, NULL, entry) {
1855 list_move_tail(&work->entry, head);
1856 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1861 * If we're already inside safe list traversal and have moved
1862 * multiple works to the scheduled queue, the next position
1863 * needs to be updated.
1869 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1871 struct work_struct *work = list_first_entry(&cwq->delayed_works,
1872 struct work_struct, entry);
1874 trace_workqueue_activate_work(work);
1875 move_linked_works(work, &cwq->pool->worklist, NULL);
1876 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1881 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1882 * @cwq: cwq of interest
1883 * @color: color of work which left the queue
1884 * @delayed: for a delayed work
1886 * A work either has completed or is removed from pending queue,
1887 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1890 * spin_lock_irq(gcwq->lock).
1892 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1895 /* ignore uncolored works */
1896 if (color == WORK_NO_COLOR)
1899 cwq->nr_in_flight[color]--;
1903 if (!list_empty(&cwq->delayed_works)) {
1904 /* one down, submit a delayed one */
1905 if (cwq->nr_active < cwq->max_active)
1906 cwq_activate_first_delayed(cwq);
1910 /* is flush in progress and are we at the flushing tip? */
1911 if (likely(cwq->flush_color != color))
1914 /* are there still in-flight works? */
1915 if (cwq->nr_in_flight[color])
1918 /* this cwq is done, clear flush_color */
1919 cwq->flush_color = -1;
1922 * If this was the last cwq, wake up the first flusher. It
1923 * will handle the rest.
1925 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1926 complete(&cwq->wq->first_flusher->done);
1930 * process_one_work - process single work
1932 * @work: work to process
1934 * Process @work. This function contains all the logics necessary to
1935 * process a single work including synchronization against and
1936 * interaction with other workers on the same cpu, queueing and
1937 * flushing. As long as context requirement is met, any worker can
1938 * call this function to process a work.
1941 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1943 static void process_one_work(struct worker *worker, struct work_struct *work)
1944 __releases(&gcwq->lock)
1945 __acquires(&gcwq->lock)
1947 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1948 struct worker_pool *pool = worker->pool;
1949 struct global_cwq *gcwq = pool->gcwq;
1950 struct hlist_head *bwh = busy_worker_head(gcwq, work);
1951 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1952 work_func_t f = work->func;
1954 struct worker *collision;
1955 #ifdef CONFIG_LOCKDEP
1957 * It is permissible to free the struct work_struct from
1958 * inside the function that is called from it, this we need to
1959 * take into account for lockdep too. To avoid bogus "held
1960 * lock freed" warnings as well as problems when looking into
1961 * work->lockdep_map, make a copy and use that here.
1963 struct lockdep_map lockdep_map;
1965 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
1967 WARN_ON_ONCE(!(worker->flags & (WORKER_UNBOUND | WORKER_REBIND)) &&
1968 raw_smp_processor_id() != gcwq->cpu);
1971 * A single work shouldn't be executed concurrently by
1972 * multiple workers on a single cpu. Check whether anyone is
1973 * already processing the work. If so, defer the work to the
1974 * currently executing one.
1976 collision = __find_worker_executing_work(gcwq, bwh, work);
1977 if (unlikely(collision)) {
1978 move_linked_works(work, &collision->scheduled, NULL);
1982 /* claim and process */
1983 debug_work_deactivate(work);
1984 hlist_add_head(&worker->hentry, bwh);
1985 worker->current_work = work;
1986 worker->current_cwq = cwq;
1987 work_color = get_work_color(work);
1989 /* record the current cpu number in the work data and dequeue */
1990 set_work_cpu(work, gcwq->cpu);
1991 list_del_init(&work->entry);
1994 * CPU intensive works don't participate in concurrency
1995 * management. They're the scheduler's responsibility.
1997 if (unlikely(cpu_intensive))
1998 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
2001 * Unbound gcwq isn't concurrency managed and work items should be
2002 * executed ASAP. Wake up another worker if necessary.
2004 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
2005 wake_up_worker(pool);
2007 spin_unlock_irq(&gcwq->lock);
2009 work_clear_pending(work);
2010 lock_map_acquire_read(&cwq->wq->lockdep_map);
2011 lock_map_acquire(&lockdep_map);
2012 trace_workqueue_execute_start(work);
2015 * While we must be careful to not use "work" after this, the trace
2016 * point will only record its address.
2018 trace_workqueue_execute_end(work);
2019 lock_map_release(&lockdep_map);
2020 lock_map_release(&cwq->wq->lockdep_map);
2022 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2023 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
2025 current->comm, preempt_count(), task_pid_nr(current));
2026 printk(KERN_ERR " last function: ");
2027 print_symbol("%s\n", (unsigned long)f);
2028 debug_show_held_locks(current);
2032 spin_lock_irq(&gcwq->lock);
2034 /* clear cpu intensive status */
2035 if (unlikely(cpu_intensive))
2036 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2038 /* we're done with it, release */
2039 hlist_del_init(&worker->hentry);
2040 worker->current_work = NULL;
2041 worker->current_cwq = NULL;
2042 cwq_dec_nr_in_flight(cwq, work_color, false);
2046 * process_scheduled_works - process scheduled works
2049 * Process all scheduled works. Please note that the scheduled list
2050 * may change while processing a work, so this function repeatedly
2051 * fetches a work from the top and executes it.
2054 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2057 static void process_scheduled_works(struct worker *worker)
2059 while (!list_empty(&worker->scheduled)) {
2060 struct work_struct *work = list_first_entry(&worker->scheduled,
2061 struct work_struct, entry);
2062 process_one_work(worker, work);
2067 * worker_thread - the worker thread function
2070 * The gcwq worker thread function. There's a single dynamic pool of
2071 * these per each cpu. These workers process all works regardless of
2072 * their specific target workqueue. The only exception is works which
2073 * belong to workqueues with a rescuer which will be explained in
2076 static int worker_thread(void *__worker)
2078 struct worker *worker = __worker;
2079 struct worker_pool *pool = worker->pool;
2080 struct global_cwq *gcwq = pool->gcwq;
2082 /* tell the scheduler that this is a workqueue worker */
2083 worker->task->flags |= PF_WQ_WORKER;
2085 spin_lock_irq(&gcwq->lock);
2088 * DIE can be set only while idle and REBIND set while busy has
2089 * @worker->rebind_work scheduled. Checking here is enough.
2091 if (unlikely(worker->flags & (WORKER_REBIND | WORKER_DIE))) {
2092 spin_unlock_irq(&gcwq->lock);
2094 if (worker->flags & WORKER_DIE) {
2095 worker->task->flags &= ~PF_WQ_WORKER;
2099 idle_worker_rebind(worker);
2103 worker_leave_idle(worker);
2105 /* no more worker necessary? */
2106 if (!need_more_worker(pool))
2109 /* do we need to manage? */
2110 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2114 * ->scheduled list can only be filled while a worker is
2115 * preparing to process a work or actually processing it.
2116 * Make sure nobody diddled with it while I was sleeping.
2118 BUG_ON(!list_empty(&worker->scheduled));
2121 * When control reaches this point, we're guaranteed to have
2122 * at least one idle worker or that someone else has already
2123 * assumed the manager role.
2125 worker_clr_flags(worker, WORKER_PREP);
2128 struct work_struct *work =
2129 list_first_entry(&pool->worklist,
2130 struct work_struct, entry);
2132 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2133 /* optimization path, not strictly necessary */
2134 process_one_work(worker, work);
2135 if (unlikely(!list_empty(&worker->scheduled)))
2136 process_scheduled_works(worker);
2138 move_linked_works(work, &worker->scheduled, NULL);
2139 process_scheduled_works(worker);
2141 } while (keep_working(pool));
2143 worker_set_flags(worker, WORKER_PREP, false);
2145 if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker))
2149 * gcwq->lock is held and there's no work to process and no
2150 * need to manage, sleep. Workers are woken up only while
2151 * holding gcwq->lock or from local cpu, so setting the
2152 * current state before releasing gcwq->lock is enough to
2153 * prevent losing any event.
2155 worker_enter_idle(worker);
2156 __set_current_state(TASK_INTERRUPTIBLE);
2157 spin_unlock_irq(&gcwq->lock);
2163 * rescuer_thread - the rescuer thread function
2164 * @__wq: the associated workqueue
2166 * Workqueue rescuer thread function. There's one rescuer for each
2167 * workqueue which has WQ_RESCUER set.
2169 * Regular work processing on a gcwq may block trying to create a new
2170 * worker which uses GFP_KERNEL allocation which has slight chance of
2171 * developing into deadlock if some works currently on the same queue
2172 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2173 * the problem rescuer solves.
2175 * When such condition is possible, the gcwq summons rescuers of all
2176 * workqueues which have works queued on the gcwq and let them process
2177 * those works so that forward progress can be guaranteed.
2179 * This should happen rarely.
2181 static int rescuer_thread(void *__wq)
2183 struct workqueue_struct *wq = __wq;
2184 struct worker *rescuer = wq->rescuer;
2185 struct list_head *scheduled = &rescuer->scheduled;
2186 bool is_unbound = wq->flags & WQ_UNBOUND;
2189 set_user_nice(current, RESCUER_NICE_LEVEL);
2191 set_current_state(TASK_INTERRUPTIBLE);
2193 if (kthread_should_stop())
2197 * See whether any cpu is asking for help. Unbounded
2198 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
2200 for_each_mayday_cpu(cpu, wq->mayday_mask) {
2201 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2202 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2203 struct worker_pool *pool = cwq->pool;
2204 struct global_cwq *gcwq = pool->gcwq;
2205 struct work_struct *work, *n;
2207 __set_current_state(TASK_RUNNING);
2208 mayday_clear_cpu(cpu, wq->mayday_mask);
2210 /* migrate to the target cpu if possible */
2211 rescuer->pool = pool;
2212 worker_maybe_bind_and_lock(rescuer);
2215 * Slurp in all works issued via this workqueue and
2218 BUG_ON(!list_empty(&rescuer->scheduled));
2219 list_for_each_entry_safe(work, n, &pool->worklist, entry)
2220 if (get_work_cwq(work) == cwq)
2221 move_linked_works(work, scheduled, &n);
2223 process_scheduled_works(rescuer);
2226 * Leave this gcwq. If keep_working() is %true, notify a
2227 * regular worker; otherwise, we end up with 0 concurrency
2228 * and stalling the execution.
2230 if (keep_working(pool))
2231 wake_up_worker(pool);
2233 spin_unlock_irq(&gcwq->lock);
2241 struct work_struct work;
2242 struct completion done;
2245 static void wq_barrier_func(struct work_struct *work)
2247 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2248 complete(&barr->done);
2252 * insert_wq_barrier - insert a barrier work
2253 * @cwq: cwq to insert barrier into
2254 * @barr: wq_barrier to insert
2255 * @target: target work to attach @barr to
2256 * @worker: worker currently executing @target, NULL if @target is not executing
2258 * @barr is linked to @target such that @barr is completed only after
2259 * @target finishes execution. Please note that the ordering
2260 * guarantee is observed only with respect to @target and on the local
2263 * Currently, a queued barrier can't be canceled. This is because
2264 * try_to_grab_pending() can't determine whether the work to be
2265 * grabbed is at the head of the queue and thus can't clear LINKED
2266 * flag of the previous work while there must be a valid next work
2267 * after a work with LINKED flag set.
2269 * Note that when @worker is non-NULL, @target may be modified
2270 * underneath us, so we can't reliably determine cwq from @target.
2273 * spin_lock_irq(gcwq->lock).
2275 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2276 struct wq_barrier *barr,
2277 struct work_struct *target, struct worker *worker)
2279 struct list_head *head;
2280 unsigned int linked = 0;
2283 * debugobject calls are safe here even with gcwq->lock locked
2284 * as we know for sure that this will not trigger any of the
2285 * checks and call back into the fixup functions where we
2288 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2289 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2290 init_completion(&barr->done);
2293 * If @target is currently being executed, schedule the
2294 * barrier to the worker; otherwise, put it after @target.
2297 head = worker->scheduled.next;
2299 unsigned long *bits = work_data_bits(target);
2301 head = target->entry.next;
2302 /* there can already be other linked works, inherit and set */
2303 linked = *bits & WORK_STRUCT_LINKED;
2304 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2307 debug_work_activate(&barr->work);
2308 insert_work(cwq, &barr->work, head,
2309 work_color_to_flags(WORK_NO_COLOR) | linked);
2313 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2314 * @wq: workqueue being flushed
2315 * @flush_color: new flush color, < 0 for no-op
2316 * @work_color: new work color, < 0 for no-op
2318 * Prepare cwqs for workqueue flushing.
2320 * If @flush_color is non-negative, flush_color on all cwqs should be
2321 * -1. If no cwq has in-flight commands at the specified color, all
2322 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2323 * has in flight commands, its cwq->flush_color is set to
2324 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2325 * wakeup logic is armed and %true is returned.
2327 * The caller should have initialized @wq->first_flusher prior to
2328 * calling this function with non-negative @flush_color. If
2329 * @flush_color is negative, no flush color update is done and %false
2332 * If @work_color is non-negative, all cwqs should have the same
2333 * work_color which is previous to @work_color and all will be
2334 * advanced to @work_color.
2337 * mutex_lock(wq->flush_mutex).
2340 * %true if @flush_color >= 0 and there's something to flush. %false
2343 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2344 int flush_color, int work_color)
2349 if (flush_color >= 0) {
2350 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2351 atomic_set(&wq->nr_cwqs_to_flush, 1);
2354 for_each_cwq_cpu(cpu, wq) {
2355 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2356 struct global_cwq *gcwq = cwq->pool->gcwq;
2358 spin_lock_irq(&gcwq->lock);
2360 if (flush_color >= 0) {
2361 BUG_ON(cwq->flush_color != -1);
2363 if (cwq->nr_in_flight[flush_color]) {
2364 cwq->flush_color = flush_color;
2365 atomic_inc(&wq->nr_cwqs_to_flush);
2370 if (work_color >= 0) {
2371 BUG_ON(work_color != work_next_color(cwq->work_color));
2372 cwq->work_color = work_color;
2375 spin_unlock_irq(&gcwq->lock);
2378 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2379 complete(&wq->first_flusher->done);
2385 * flush_workqueue - ensure that any scheduled work has run to completion.
2386 * @wq: workqueue to flush
2388 * Forces execution of the workqueue and blocks until its completion.
2389 * This is typically used in driver shutdown handlers.
2391 * We sleep until all works which were queued on entry have been handled,
2392 * but we are not livelocked by new incoming ones.
2394 void flush_workqueue(struct workqueue_struct *wq)
2396 struct wq_flusher this_flusher = {
2397 .list = LIST_HEAD_INIT(this_flusher.list),
2399 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2403 lock_map_acquire(&wq->lockdep_map);
2404 lock_map_release(&wq->lockdep_map);
2406 mutex_lock(&wq->flush_mutex);
2409 * Start-to-wait phase
2411 next_color = work_next_color(wq->work_color);
2413 if (next_color != wq->flush_color) {
2415 * Color space is not full. The current work_color
2416 * becomes our flush_color and work_color is advanced
2419 BUG_ON(!list_empty(&wq->flusher_overflow));
2420 this_flusher.flush_color = wq->work_color;
2421 wq->work_color = next_color;
2423 if (!wq->first_flusher) {
2424 /* no flush in progress, become the first flusher */
2425 BUG_ON(wq->flush_color != this_flusher.flush_color);
2427 wq->first_flusher = &this_flusher;
2429 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2431 /* nothing to flush, done */
2432 wq->flush_color = next_color;
2433 wq->first_flusher = NULL;
2438 BUG_ON(wq->flush_color == this_flusher.flush_color);
2439 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2440 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2444 * Oops, color space is full, wait on overflow queue.
2445 * The next flush completion will assign us
2446 * flush_color and transfer to flusher_queue.
2448 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2451 mutex_unlock(&wq->flush_mutex);
2453 wait_for_completion(&this_flusher.done);
2456 * Wake-up-and-cascade phase
2458 * First flushers are responsible for cascading flushes and
2459 * handling overflow. Non-first flushers can simply return.
2461 if (wq->first_flusher != &this_flusher)
2464 mutex_lock(&wq->flush_mutex);
2466 /* we might have raced, check again with mutex held */
2467 if (wq->first_flusher != &this_flusher)
2470 wq->first_flusher = NULL;
2472 BUG_ON(!list_empty(&this_flusher.list));
2473 BUG_ON(wq->flush_color != this_flusher.flush_color);
2476 struct wq_flusher *next, *tmp;
2478 /* complete all the flushers sharing the current flush color */
2479 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2480 if (next->flush_color != wq->flush_color)
2482 list_del_init(&next->list);
2483 complete(&next->done);
2486 BUG_ON(!list_empty(&wq->flusher_overflow) &&
2487 wq->flush_color != work_next_color(wq->work_color));
2489 /* this flush_color is finished, advance by one */
2490 wq->flush_color = work_next_color(wq->flush_color);
2492 /* one color has been freed, handle overflow queue */
2493 if (!list_empty(&wq->flusher_overflow)) {
2495 * Assign the same color to all overflowed
2496 * flushers, advance work_color and append to
2497 * flusher_queue. This is the start-to-wait
2498 * phase for these overflowed flushers.
2500 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2501 tmp->flush_color = wq->work_color;
2503 wq->work_color = work_next_color(wq->work_color);
2505 list_splice_tail_init(&wq->flusher_overflow,
2506 &wq->flusher_queue);
2507 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2510 if (list_empty(&wq->flusher_queue)) {
2511 BUG_ON(wq->flush_color != wq->work_color);
2516 * Need to flush more colors. Make the next flusher
2517 * the new first flusher and arm cwqs.
2519 BUG_ON(wq->flush_color == wq->work_color);
2520 BUG_ON(wq->flush_color != next->flush_color);
2522 list_del_init(&next->list);
2523 wq->first_flusher = next;
2525 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2529 * Meh... this color is already done, clear first
2530 * flusher and repeat cascading.
2532 wq->first_flusher = NULL;
2536 mutex_unlock(&wq->flush_mutex);
2538 EXPORT_SYMBOL_GPL(flush_workqueue);
2541 * drain_workqueue - drain a workqueue
2542 * @wq: workqueue to drain
2544 * Wait until the workqueue becomes empty. While draining is in progress,
2545 * only chain queueing is allowed. IOW, only currently pending or running
2546 * work items on @wq can queue further work items on it. @wq is flushed
2547 * repeatedly until it becomes empty. The number of flushing is detemined
2548 * by the depth of chaining and should be relatively short. Whine if it
2551 void drain_workqueue(struct workqueue_struct *wq)
2553 unsigned int flush_cnt = 0;
2557 * __queue_work() needs to test whether there are drainers, is much
2558 * hotter than drain_workqueue() and already looks at @wq->flags.
2559 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
2561 spin_lock(&workqueue_lock);
2562 if (!wq->nr_drainers++)
2563 wq->flags |= WQ_DRAINING;
2564 spin_unlock(&workqueue_lock);
2566 flush_workqueue(wq);
2568 for_each_cwq_cpu(cpu, wq) {
2569 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2572 spin_lock_irq(&cwq->pool->gcwq->lock);
2573 drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
2574 spin_unlock_irq(&cwq->pool->gcwq->lock);
2579 if (++flush_cnt == 10 ||
2580 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2581 pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
2582 wq->name, flush_cnt);
2586 spin_lock(&workqueue_lock);
2587 if (!--wq->nr_drainers)
2588 wq->flags &= ~WQ_DRAINING;
2589 spin_unlock(&workqueue_lock);
2591 EXPORT_SYMBOL_GPL(drain_workqueue);
2593 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2594 bool wait_executing)
2596 struct worker *worker = NULL;
2597 struct global_cwq *gcwq;
2598 struct cpu_workqueue_struct *cwq;
2601 gcwq = get_work_gcwq(work);
2605 spin_lock_irq(&gcwq->lock);
2606 if (!list_empty(&work->entry)) {
2608 * See the comment near try_to_grab_pending()->smp_rmb().
2609 * If it was re-queued to a different gcwq under us, we
2610 * are not going to wait.
2613 cwq = get_work_cwq(work);
2614 if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
2616 } else if (wait_executing) {
2617 worker = find_worker_executing_work(gcwq, work);
2620 cwq = worker->current_cwq;
2624 insert_wq_barrier(cwq, barr, work, worker);
2625 spin_unlock_irq(&gcwq->lock);
2628 * If @max_active is 1 or rescuer is in use, flushing another work
2629 * item on the same workqueue may lead to deadlock. Make sure the
2630 * flusher is not running on the same workqueue by verifying write
2633 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2634 lock_map_acquire(&cwq->wq->lockdep_map);
2636 lock_map_acquire_read(&cwq->wq->lockdep_map);
2637 lock_map_release(&cwq->wq->lockdep_map);
2641 spin_unlock_irq(&gcwq->lock);
2646 * flush_work - wait for a work to finish executing the last queueing instance
2647 * @work: the work to flush
2649 * Wait until @work has finished execution. This function considers
2650 * only the last queueing instance of @work. If @work has been
2651 * enqueued across different CPUs on a non-reentrant workqueue or on
2652 * multiple workqueues, @work might still be executing on return on
2653 * some of the CPUs from earlier queueing.
2655 * If @work was queued only on a non-reentrant, ordered or unbound
2656 * workqueue, @work is guaranteed to be idle on return if it hasn't
2657 * been requeued since flush started.
2660 * %true if flush_work() waited for the work to finish execution,
2661 * %false if it was already idle.
2663 bool flush_work(struct work_struct *work)
2665 struct wq_barrier barr;
2667 lock_map_acquire(&work->lockdep_map);
2668 lock_map_release(&work->lockdep_map);
2670 if (start_flush_work(work, &barr, true)) {
2671 wait_for_completion(&barr.done);
2672 destroy_work_on_stack(&barr.work);
2677 EXPORT_SYMBOL_GPL(flush_work);
2679 static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2681 struct wq_barrier barr;
2682 struct worker *worker;
2684 spin_lock_irq(&gcwq->lock);
2686 worker = find_worker_executing_work(gcwq, work);
2687 if (unlikely(worker))
2688 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2690 spin_unlock_irq(&gcwq->lock);
2692 if (unlikely(worker)) {
2693 wait_for_completion(&barr.done);
2694 destroy_work_on_stack(&barr.work);
2700 static bool wait_on_work(struct work_struct *work)
2707 lock_map_acquire(&work->lockdep_map);
2708 lock_map_release(&work->lockdep_map);
2710 for_each_gcwq_cpu(cpu)
2711 ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2716 * flush_work_sync - wait until a work has finished execution
2717 * @work: the work to flush
2719 * Wait until @work has finished execution. On return, it's
2720 * guaranteed that all queueing instances of @work which happened
2721 * before this function is called are finished. In other words, if
2722 * @work hasn't been requeued since this function was called, @work is
2723 * guaranteed to be idle on return.
2726 * %true if flush_work_sync() waited for the work to finish execution,
2727 * %false if it was already idle.
2729 bool flush_work_sync(struct work_struct *work)
2731 struct wq_barrier barr;
2732 bool pending, waited;
2734 /* we'll wait for executions separately, queue barr only if pending */
2735 pending = start_flush_work(work, &barr, false);
2737 /* wait for executions to finish */
2738 waited = wait_on_work(work);
2740 /* wait for the pending one */
2742 wait_for_completion(&barr.done);
2743 destroy_work_on_stack(&barr.work);
2746 return pending || waited;
2748 EXPORT_SYMBOL_GPL(flush_work_sync);
2751 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2752 * so this work can't be re-armed in any way.
2754 static int try_to_grab_pending(struct work_struct *work)
2756 struct global_cwq *gcwq;
2759 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2763 * The queueing is in progress, or it is already queued. Try to
2764 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2766 gcwq = get_work_gcwq(work);
2770 spin_lock_irq(&gcwq->lock);
2771 if (!list_empty(&work->entry)) {
2773 * This work is queued, but perhaps we locked the wrong gcwq.
2774 * In that case we must see the new value after rmb(), see
2775 * insert_work()->wmb().
2778 if (gcwq == get_work_gcwq(work)) {
2779 debug_work_deactivate(work);
2780 list_del_init(&work->entry);
2781 cwq_dec_nr_in_flight(get_work_cwq(work),
2782 get_work_color(work),
2783 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2787 spin_unlock_irq(&gcwq->lock);
2792 static bool __cancel_work_timer(struct work_struct *work,
2793 struct timer_list* timer)
2798 ret = (timer && likely(del_timer(timer)));
2800 ret = try_to_grab_pending(work);
2802 } while (unlikely(ret < 0));
2804 clear_work_data(work);
2809 * cancel_work_sync - cancel a work and wait for it to finish
2810 * @work: the work to cancel
2812 * Cancel @work and wait for its execution to finish. This function
2813 * can be used even if the work re-queues itself or migrates to
2814 * another workqueue. On return from this function, @work is
2815 * guaranteed to be not pending or executing on any CPU.
2817 * cancel_work_sync(&delayed_work->work) must not be used for
2818 * delayed_work's. Use cancel_delayed_work_sync() instead.
2820 * The caller must ensure that the workqueue on which @work was last
2821 * queued can't be destroyed before this function returns.
2824 * %true if @work was pending, %false otherwise.
2826 bool cancel_work_sync(struct work_struct *work)
2828 return __cancel_work_timer(work, NULL);
2830 EXPORT_SYMBOL_GPL(cancel_work_sync);
2833 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2834 * @dwork: the delayed work to flush
2836 * Delayed timer is cancelled and the pending work is queued for
2837 * immediate execution. Like flush_work(), this function only
2838 * considers the last queueing instance of @dwork.
2841 * %true if flush_work() waited for the work to finish execution,
2842 * %false if it was already idle.
2844 bool flush_delayed_work(struct delayed_work *dwork)
2846 if (del_timer_sync(&dwork->timer))
2847 __queue_work(raw_smp_processor_id(),
2848 get_work_cwq(&dwork->work)->wq, &dwork->work);
2849 return flush_work(&dwork->work);
2851 EXPORT_SYMBOL(flush_delayed_work);
2854 * flush_delayed_work_sync - wait for a dwork to finish
2855 * @dwork: the delayed work to flush
2857 * Delayed timer is cancelled and the pending work is queued for
2858 * execution immediately. Other than timer handling, its behavior
2859 * is identical to flush_work_sync().
2862 * %true if flush_work_sync() waited for the work to finish execution,
2863 * %false if it was already idle.
2865 bool flush_delayed_work_sync(struct delayed_work *dwork)
2867 if (del_timer_sync(&dwork->timer))
2868 __queue_work(raw_smp_processor_id(),
2869 get_work_cwq(&dwork->work)->wq, &dwork->work);
2870 return flush_work_sync(&dwork->work);
2872 EXPORT_SYMBOL(flush_delayed_work_sync);
2875 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2876 * @dwork: the delayed work cancel
2878 * This is cancel_work_sync() for delayed works.
2881 * %true if @dwork was pending, %false otherwise.
2883 bool cancel_delayed_work_sync(struct delayed_work *dwork)
2885 return __cancel_work_timer(&dwork->work, &dwork->timer);
2887 EXPORT_SYMBOL(cancel_delayed_work_sync);
2890 * schedule_work - put work task in global workqueue
2891 * @work: job to be done
2893 * Returns zero if @work was already on the kernel-global workqueue and
2894 * non-zero otherwise.
2896 * This puts a job in the kernel-global workqueue if it was not already
2897 * queued and leaves it in the same position on the kernel-global
2898 * workqueue otherwise.
2900 int schedule_work(struct work_struct *work)
2902 return queue_work(system_wq, work);
2904 EXPORT_SYMBOL(schedule_work);
2907 * schedule_work_on - put work task on a specific cpu
2908 * @cpu: cpu to put the work task on
2909 * @work: job to be done
2911 * This puts a job on a specific cpu
2913 int schedule_work_on(int cpu, struct work_struct *work)
2915 return queue_work_on(cpu, system_wq, work);
2917 EXPORT_SYMBOL(schedule_work_on);
2920 * schedule_delayed_work - put work task in global workqueue after delay
2921 * @dwork: job to be done
2922 * @delay: number of jiffies to wait or 0 for immediate execution
2924 * After waiting for a given time this puts a job in the kernel-global
2927 int schedule_delayed_work(struct delayed_work *dwork,
2928 unsigned long delay)
2930 return queue_delayed_work(system_wq, dwork, delay);
2932 EXPORT_SYMBOL(schedule_delayed_work);
2935 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2937 * @dwork: job to be done
2938 * @delay: number of jiffies to wait
2940 * After waiting for a given time this puts a job in the kernel-global
2941 * workqueue on the specified CPU.
2943 int schedule_delayed_work_on(int cpu,
2944 struct delayed_work *dwork, unsigned long delay)
2946 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
2948 EXPORT_SYMBOL(schedule_delayed_work_on);
2951 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2952 * @func: the function to call
2954 * schedule_on_each_cpu() executes @func on each online CPU using the
2955 * system workqueue and blocks until all CPUs have completed.
2956 * schedule_on_each_cpu() is very slow.
2959 * 0 on success, -errno on failure.
2961 int schedule_on_each_cpu(work_func_t func)
2964 struct work_struct __percpu *works;
2966 works = alloc_percpu(struct work_struct);
2972 for_each_online_cpu(cpu) {
2973 struct work_struct *work = per_cpu_ptr(works, cpu);
2975 INIT_WORK(work, func);
2976 schedule_work_on(cpu, work);
2979 for_each_online_cpu(cpu)
2980 flush_work(per_cpu_ptr(works, cpu));
2988 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2990 * Forces execution of the kernel-global workqueue and blocks until its
2993 * Think twice before calling this function! It's very easy to get into
2994 * trouble if you don't take great care. Either of the following situations
2995 * will lead to deadlock:
2997 * One of the work items currently on the workqueue needs to acquire
2998 * a lock held by your code or its caller.
3000 * Your code is running in the context of a work routine.
3002 * They will be detected by lockdep when they occur, but the first might not
3003 * occur very often. It depends on what work items are on the workqueue and
3004 * what locks they need, which you have no control over.
3006 * In most situations flushing the entire workqueue is overkill; you merely
3007 * need to know that a particular work item isn't queued and isn't running.
3008 * In such cases you should use cancel_delayed_work_sync() or
3009 * cancel_work_sync() instead.
3011 void flush_scheduled_work(void)
3013 flush_workqueue(system_wq);
3015 EXPORT_SYMBOL(flush_scheduled_work);
3018 * execute_in_process_context - reliably execute the routine with user context
3019 * @fn: the function to execute
3020 * @ew: guaranteed storage for the execute work structure (must
3021 * be available when the work executes)
3023 * Executes the function immediately if process context is available,
3024 * otherwise schedules the function for delayed execution.
3026 * Returns: 0 - function was executed
3027 * 1 - function was scheduled for execution
3029 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3031 if (!in_interrupt()) {
3036 INIT_WORK(&ew->work, fn);
3037 schedule_work(&ew->work);
3041 EXPORT_SYMBOL_GPL(execute_in_process_context);
3043 int keventd_up(void)
3045 return system_wq != NULL;
3048 static int alloc_cwqs(struct workqueue_struct *wq)
3051 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
3052 * Make sure that the alignment isn't lower than that of
3053 * unsigned long long.
3055 const size_t size = sizeof(struct cpu_workqueue_struct);
3056 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
3057 __alignof__(unsigned long long));
3059 if (!(wq->flags & WQ_UNBOUND))
3060 wq->cpu_wq.pcpu = __alloc_percpu(size, align);
3065 * Allocate enough room to align cwq and put an extra
3066 * pointer at the end pointing back to the originally
3067 * allocated pointer which will be used for free.
3069 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
3071 wq->cpu_wq.single = PTR_ALIGN(ptr, align);
3072 *(void **)(wq->cpu_wq.single + 1) = ptr;
3076 /* just in case, make sure it's actually aligned */
3077 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
3078 return wq->cpu_wq.v ? 0 : -ENOMEM;
3081 static void free_cwqs(struct workqueue_struct *wq)
3083 if (!(wq->flags & WQ_UNBOUND))
3084 free_percpu(wq->cpu_wq.pcpu);
3085 else if (wq->cpu_wq.single) {
3086 /* the pointer to free is stored right after the cwq */
3087 kfree(*(void **)(wq->cpu_wq.single + 1));
3091 static int wq_clamp_max_active(int max_active, unsigned int flags,
3094 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
3096 if (max_active < 1 || max_active > lim)
3097 printk(KERN_WARNING "workqueue: max_active %d requested for %s "
3098 "is out of range, clamping between %d and %d\n",
3099 max_active, name, 1, lim);
3101 return clamp_val(max_active, 1, lim);
3104 struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3107 struct lock_class_key *key,
3108 const char *lock_name, ...)
3110 va_list args, args1;
3111 struct workqueue_struct *wq;
3115 /* determine namelen, allocate wq and format name */
3116 va_start(args, lock_name);
3117 va_copy(args1, args);
3118 namelen = vsnprintf(NULL, 0, fmt, args) + 1;
3120 wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
3124 vsnprintf(wq->name, namelen, fmt, args1);
3129 * Workqueues which may be used during memory reclaim should
3130 * have a rescuer to guarantee forward progress.
3132 if (flags & WQ_MEM_RECLAIM)
3133 flags |= WQ_RESCUER;
3135 max_active = max_active ?: WQ_DFL_ACTIVE;
3136 max_active = wq_clamp_max_active(max_active, flags, wq->name);
3140 wq->saved_max_active = max_active;
3141 mutex_init(&wq->flush_mutex);
3142 atomic_set(&wq->nr_cwqs_to_flush, 0);
3143 INIT_LIST_HEAD(&wq->flusher_queue);
3144 INIT_LIST_HEAD(&wq->flusher_overflow);
3146 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3147 INIT_LIST_HEAD(&wq->list);
3149 if (alloc_cwqs(wq) < 0)
3152 for_each_cwq_cpu(cpu, wq) {
3153 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3154 struct global_cwq *gcwq = get_gcwq(cpu);
3155 int pool_idx = (bool)(flags & WQ_HIGHPRI);
3157 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
3158 cwq->pool = &gcwq->pools[pool_idx];
3160 cwq->flush_color = -1;
3161 cwq->max_active = max_active;
3162 INIT_LIST_HEAD(&cwq->delayed_works);
3165 if (flags & WQ_RESCUER) {
3166 struct worker *rescuer;
3168 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
3171 wq->rescuer = rescuer = alloc_worker();
3175 rescuer->task = kthread_create(rescuer_thread, wq, "%s",
3177 if (IS_ERR(rescuer->task))
3180 rescuer->task->flags |= PF_THREAD_BOUND;
3181 wake_up_process(rescuer->task);
3185 * workqueue_lock protects global freeze state and workqueues
3186 * list. Grab it, set max_active accordingly and add the new
3187 * workqueue to workqueues list.
3189 spin_lock(&workqueue_lock);
3191 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3192 for_each_cwq_cpu(cpu, wq)
3193 get_cwq(cpu, wq)->max_active = 0;
3195 list_add(&wq->list, &workqueues);
3197 spin_unlock(&workqueue_lock);
3203 free_mayday_mask(wq->mayday_mask);
3209 EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
3212 * destroy_workqueue - safely terminate a workqueue
3213 * @wq: target workqueue
3215 * Safely destroy a workqueue. All work currently pending will be done first.
3217 void destroy_workqueue(struct workqueue_struct *wq)
3221 /* drain it before proceeding with destruction */
3222 drain_workqueue(wq);
3225 * wq list is used to freeze wq, remove from list after
3226 * flushing is complete in case freeze races us.
3228 spin_lock(&workqueue_lock);
3229 list_del(&wq->list);
3230 spin_unlock(&workqueue_lock);
3233 for_each_cwq_cpu(cpu, wq) {
3234 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3237 for (i = 0; i < WORK_NR_COLORS; i++)
3238 BUG_ON(cwq->nr_in_flight[i]);
3239 BUG_ON(cwq->nr_active);
3240 BUG_ON(!list_empty(&cwq->delayed_works));
3243 if (wq->flags & WQ_RESCUER) {
3244 kthread_stop(wq->rescuer->task);
3245 free_mayday_mask(wq->mayday_mask);
3252 EXPORT_SYMBOL_GPL(destroy_workqueue);
3255 * workqueue_set_max_active - adjust max_active of a workqueue
3256 * @wq: target workqueue
3257 * @max_active: new max_active value.
3259 * Set max_active of @wq to @max_active.
3262 * Don't call from IRQ context.
3264 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3268 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3270 spin_lock(&workqueue_lock);
3272 wq->saved_max_active = max_active;
3274 for_each_cwq_cpu(cpu, wq) {
3275 struct global_cwq *gcwq = get_gcwq(cpu);
3277 spin_lock_irq(&gcwq->lock);
3279 if (!(wq->flags & WQ_FREEZABLE) ||
3280 !(gcwq->flags & GCWQ_FREEZING))
3281 get_cwq(gcwq->cpu, wq)->max_active = max_active;
3283 spin_unlock_irq(&gcwq->lock);
3286 spin_unlock(&workqueue_lock);
3288 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3291 * workqueue_congested - test whether a workqueue is congested
3292 * @cpu: CPU in question
3293 * @wq: target workqueue
3295 * Test whether @wq's cpu workqueue for @cpu is congested. There is
3296 * no synchronization around this function and the test result is
3297 * unreliable and only useful as advisory hints or for debugging.
3300 * %true if congested, %false otherwise.
3302 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3304 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3306 return !list_empty(&cwq->delayed_works);
3308 EXPORT_SYMBOL_GPL(workqueue_congested);
3311 * work_cpu - return the last known associated cpu for @work
3312 * @work: the work of interest
3315 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
3317 unsigned int work_cpu(struct work_struct *work)
3319 struct global_cwq *gcwq = get_work_gcwq(work);
3321 return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3323 EXPORT_SYMBOL_GPL(work_cpu);
3326 * work_busy - test whether a work is currently pending or running
3327 * @work: the work to be tested
3329 * Test whether @work is currently pending or running. There is no
3330 * synchronization around this function and the test result is
3331 * unreliable and only useful as advisory hints or for debugging.
3332 * Especially for reentrant wqs, the pending state might hide the
3336 * OR'd bitmask of WORK_BUSY_* bits.
3338 unsigned int work_busy(struct work_struct *work)
3340 struct global_cwq *gcwq = get_work_gcwq(work);
3341 unsigned long flags;
3342 unsigned int ret = 0;
3347 spin_lock_irqsave(&gcwq->lock, flags);
3349 if (work_pending(work))
3350 ret |= WORK_BUSY_PENDING;
3351 if (find_worker_executing_work(gcwq, work))
3352 ret |= WORK_BUSY_RUNNING;
3354 spin_unlock_irqrestore(&gcwq->lock, flags);
3358 EXPORT_SYMBOL_GPL(work_busy);
3363 * There are two challenges in supporting CPU hotplug. Firstly, there
3364 * are a lot of assumptions on strong associations among work, cwq and
3365 * gcwq which make migrating pending and scheduled works very
3366 * difficult to implement without impacting hot paths. Secondly,
3367 * gcwqs serve mix of short, long and very long running works making
3368 * blocked draining impractical.
3370 * This is solved by allowing a gcwq to be detached from CPU, running it
3371 * with unbound workers and allowing it to be reattached later if the cpu
3372 * comes back online. A separate thread is created to govern a gcwq in
3373 * such state and is called the trustee of the gcwq.
3375 * Trustee states and their descriptions.
3377 * START Command state used on startup. On CPU_DOWN_PREPARE, a
3378 * new trustee is started with this state.
3380 * IN_CHARGE Once started, trustee will enter this state after
3381 * assuming the manager role and making all existing
3382 * workers rogue. DOWN_PREPARE waits for trustee to
3383 * enter this state. After reaching IN_CHARGE, trustee
3384 * tries to execute the pending worklist until it's empty
3385 * and the state is set to BUTCHER, or the state is set
3388 * BUTCHER Command state which is set by the cpu callback after
3389 * the cpu has went down. Once this state is set trustee
3390 * knows that there will be no new works on the worklist
3391 * and once the worklist is empty it can proceed to
3392 * killing idle workers.
3394 * RELEASE Command state which is set by the cpu callback if the
3395 * cpu down has been canceled or it has come online
3396 * again. After recognizing this state, trustee stops
3397 * trying to drain or butcher and clears ROGUE, rebinds
3398 * all remaining workers back to the cpu and releases
3401 * DONE Trustee will enter this state after BUTCHER or RELEASE
3404 * trustee CPU draining
3405 * took over down complete
3406 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3408 * | CPU is back online v return workers |
3409 * ----------------> RELEASE --------------
3412 /* claim manager positions of all pools */
3413 static void gcwq_claim_management(struct global_cwq *gcwq)
3415 struct worker_pool *pool;
3417 for_each_worker_pool(pool, gcwq)
3418 mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools);
3421 /* release manager positions */
3422 static void gcwq_release_management(struct global_cwq *gcwq)
3424 struct worker_pool *pool;
3426 for_each_worker_pool(pool, gcwq)
3427 mutex_unlock(&pool->manager_mutex);
3431 * trustee_wait_event_timeout - timed event wait for trustee
3432 * @cond: condition to wait for
3433 * @timeout: timeout in jiffies
3435 * wait_event_timeout() for trustee to use. Handles locking and
3436 * checks for RELEASE request.
3439 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3440 * multiple times. To be used by trustee.
3443 * Positive indicating left time if @cond is satisfied, 0 if timed
3444 * out, -1 if canceled.
3446 #define trustee_wait_event_timeout(cond, timeout) ({ \
3447 long __ret = (timeout); \
3448 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
3450 spin_unlock_irq(&gcwq->lock); \
3451 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
3452 (gcwq->trustee_state == TRUSTEE_RELEASE), \
3454 spin_lock_irq(&gcwq->lock); \
3456 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
3460 * trustee_wait_event - event wait for trustee
3461 * @cond: condition to wait for
3463 * wait_event() for trustee to use. Automatically handles locking and
3464 * checks for CANCEL request.
3467 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3468 * multiple times. To be used by trustee.
3471 * 0 if @cond is satisfied, -1 if canceled.
3473 #define trustee_wait_event(cond) ({ \
3475 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3476 __ret1 < 0 ? -1 : 0; \
3479 static int __cpuinit trustee_thread(void *__gcwq)
3481 struct global_cwq *gcwq = __gcwq;
3482 struct worker_pool *pool;
3483 struct worker *worker;
3484 struct work_struct *work;
3485 struct hlist_node *pos;
3488 BUG_ON(gcwq->cpu != smp_processor_id());
3490 gcwq_claim_management(gcwq);
3491 spin_lock_irq(&gcwq->lock);
3494 * We've claimed all manager positions. Make all workers unbound
3495 * and set DISASSOCIATED. Before this, all workers except for the
3496 * ones which are still executing works from before the last CPU
3497 * down must be on the cpu. After this, they may become diasporas.
3499 for_each_worker_pool(pool, gcwq)
3500 list_for_each_entry(worker, &pool->idle_list, entry)
3501 worker->flags |= WORKER_UNBOUND;
3503 for_each_busy_worker(worker, i, pos, gcwq)
3504 worker->flags |= WORKER_UNBOUND;
3506 gcwq->flags |= GCWQ_DISASSOCIATED;
3509 * Call schedule() so that we cross rq->lock and thus can guarantee
3510 * sched callbacks see the unbound flag. This is necessary as
3511 * scheduler callbacks may be invoked from other cpus.
3513 spin_unlock_irq(&gcwq->lock);
3515 spin_lock_irq(&gcwq->lock);
3518 * Sched callbacks are disabled now. Zap nr_running. After
3519 * this, nr_running stays zero and need_more_worker() and
3520 * keep_working() are always true as long as the worklist is
3523 for_each_worker_pool(pool, gcwq)
3524 atomic_set(get_pool_nr_running(pool), 0);
3526 spin_unlock_irq(&gcwq->lock);
3527 for_each_worker_pool(pool, gcwq)
3528 del_timer_sync(&pool->idle_timer);
3529 spin_lock_irq(&gcwq->lock);
3532 * We're now in charge. Notify and proceed to drain. We need
3533 * to keep the gcwq running during the whole CPU down
3534 * procedure as other cpu hotunplug callbacks may need to
3535 * flush currently running tasks.
3537 gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3538 wake_up_all(&gcwq->trustee_wait);
3541 * The original cpu is in the process of dying and may go away
3542 * anytime now. When that happens, we and all workers would
3543 * be migrated to other cpus. Try draining any left work. We
3544 * want to get it over with ASAP - spam rescuers, wake up as
3545 * many idlers as necessary and create new ones till the
3546 * worklist is empty. Note that if the gcwq is frozen, there
3547 * may be frozen works in freezable cwqs. Don't declare
3548 * completion while frozen.
3553 for_each_worker_pool(pool, gcwq)
3554 busy |= pool->nr_workers != pool->nr_idle;
3556 if (!busy && !(gcwq->flags & GCWQ_FREEZING) &&
3557 gcwq->trustee_state != TRUSTEE_IN_CHARGE)
3560 for_each_worker_pool(pool, gcwq) {
3563 list_for_each_entry(work, &pool->worklist, entry) {
3568 list_for_each_entry(worker, &pool->idle_list, entry) {
3571 wake_up_process(worker->task);
3574 if (need_to_create_worker(pool)) {
3575 spin_unlock_irq(&gcwq->lock);
3576 worker = create_worker(pool);
3577 spin_lock_irq(&gcwq->lock);
3579 start_worker(worker);
3583 /* give a breather */
3584 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3588 gcwq_release_management(gcwq);
3590 /* notify completion */
3591 gcwq->trustee = NULL;
3592 gcwq->trustee_state = TRUSTEE_DONE;
3593 wake_up_all(&gcwq->trustee_wait);
3594 spin_unlock_irq(&gcwq->lock);
3599 * wait_trustee_state - wait for trustee to enter the specified state
3600 * @gcwq: gcwq the trustee of interest belongs to
3601 * @state: target state to wait for
3603 * Wait for the trustee to reach @state. DONE is already matched.
3606 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3607 * multiple times. To be used by cpu_callback.
3609 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3610 __releases(&gcwq->lock)
3611 __acquires(&gcwq->lock)
3613 if (!(gcwq->trustee_state == state ||
3614 gcwq->trustee_state == TRUSTEE_DONE)) {
3615 spin_unlock_irq(&gcwq->lock);
3616 __wait_event(gcwq->trustee_wait,
3617 gcwq->trustee_state == state ||
3618 gcwq->trustee_state == TRUSTEE_DONE);
3619 spin_lock_irq(&gcwq->lock);
3623 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3624 unsigned long action,
3627 unsigned int cpu = (unsigned long)hcpu;
3628 struct global_cwq *gcwq = get_gcwq(cpu);
3629 struct task_struct *new_trustee = NULL;
3630 struct worker_pool *pool;
3631 unsigned long flags;
3633 action &= ~CPU_TASKS_FROZEN;
3636 case CPU_DOWN_PREPARE:
3637 new_trustee = kthread_create(trustee_thread, gcwq,
3638 "workqueue_trustee/%d\n", cpu);
3639 if (IS_ERR(new_trustee))
3640 return notifier_from_errno(PTR_ERR(new_trustee));
3641 kthread_bind(new_trustee, cpu);
3644 case CPU_UP_PREPARE:
3645 for_each_worker_pool(pool, gcwq) {
3646 struct worker *worker;
3648 if (pool->nr_workers)
3651 worker = create_worker(pool);
3655 spin_lock_irq(&gcwq->lock);
3656 start_worker(worker);
3657 spin_unlock_irq(&gcwq->lock);
3661 /* some are called w/ irq disabled, don't disturb irq status */
3662 spin_lock_irqsave(&gcwq->lock, flags);
3665 case CPU_DOWN_PREPARE:
3666 /* initialize trustee and tell it to acquire the gcwq */
3667 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3668 gcwq->trustee = new_trustee;
3669 gcwq->trustee_state = TRUSTEE_START;
3670 wake_up_process(gcwq->trustee);
3671 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3675 gcwq->trustee_state = TRUSTEE_BUTCHER;
3678 case CPU_DOWN_FAILED:
3680 if (gcwq->trustee_state != TRUSTEE_DONE) {
3681 gcwq->trustee_state = TRUSTEE_RELEASE;
3682 wake_up_process(gcwq->trustee);
3683 wait_trustee_state(gcwq, TRUSTEE_DONE);
3686 spin_unlock_irq(&gcwq->lock);
3687 gcwq_claim_management(gcwq);
3688 spin_lock_irq(&gcwq->lock);
3690 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3692 rebind_workers(gcwq);
3694 gcwq_release_management(gcwq);
3698 spin_unlock_irqrestore(&gcwq->lock, flags);
3700 return notifier_from_errno(0);
3704 * Workqueues should be brought up before normal priority CPU notifiers.
3705 * This will be registered high priority CPU notifier.
3707 static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3708 unsigned long action,
3711 switch (action & ~CPU_TASKS_FROZEN) {
3712 case CPU_UP_PREPARE:
3713 case CPU_DOWN_FAILED:
3715 return workqueue_cpu_callback(nfb, action, hcpu);
3721 * Workqueues should be brought down after normal priority CPU notifiers.
3722 * This will be registered as low priority CPU notifier.
3724 static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3725 unsigned long action,
3728 switch (action & ~CPU_TASKS_FROZEN) {
3729 case CPU_DOWN_PREPARE:
3731 return workqueue_cpu_callback(nfb, action, hcpu);
3738 struct work_for_cpu {
3739 struct completion completion;
3745 static int do_work_for_cpu(void *_wfc)
3747 struct work_for_cpu *wfc = _wfc;
3748 wfc->ret = wfc->fn(wfc->arg);
3749 complete(&wfc->completion);
3754 * work_on_cpu - run a function in user context on a particular cpu
3755 * @cpu: the cpu to run on
3756 * @fn: the function to run
3757 * @arg: the function arg
3759 * This will return the value @fn returns.
3760 * It is up to the caller to ensure that the cpu doesn't go offline.
3761 * The caller must not hold any locks which would prevent @fn from completing.
3763 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3765 struct task_struct *sub_thread;
3766 struct work_for_cpu wfc = {
3767 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3772 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
3773 if (IS_ERR(sub_thread))
3774 return PTR_ERR(sub_thread);
3775 kthread_bind(sub_thread, cpu);
3776 wake_up_process(sub_thread);
3777 wait_for_completion(&wfc.completion);
3780 EXPORT_SYMBOL_GPL(work_on_cpu);
3781 #endif /* CONFIG_SMP */
3783 #ifdef CONFIG_FREEZER
3786 * freeze_workqueues_begin - begin freezing workqueues
3788 * Start freezing workqueues. After this function returns, all freezable
3789 * workqueues will queue new works to their frozen_works list instead of
3793 * Grabs and releases workqueue_lock and gcwq->lock's.
3795 void freeze_workqueues_begin(void)
3799 spin_lock(&workqueue_lock);
3801 BUG_ON(workqueue_freezing);
3802 workqueue_freezing = true;
3804 for_each_gcwq_cpu(cpu) {
3805 struct global_cwq *gcwq = get_gcwq(cpu);
3806 struct workqueue_struct *wq;
3808 spin_lock_irq(&gcwq->lock);
3810 BUG_ON(gcwq->flags & GCWQ_FREEZING);
3811 gcwq->flags |= GCWQ_FREEZING;
3813 list_for_each_entry(wq, &workqueues, list) {
3814 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3816 if (cwq && wq->flags & WQ_FREEZABLE)
3817 cwq->max_active = 0;
3820 spin_unlock_irq(&gcwq->lock);
3823 spin_unlock(&workqueue_lock);
3827 * freeze_workqueues_busy - are freezable workqueues still busy?
3829 * Check whether freezing is complete. This function must be called
3830 * between freeze_workqueues_begin() and thaw_workqueues().
3833 * Grabs and releases workqueue_lock.
3836 * %true if some freezable workqueues are still busy. %false if freezing
3839 bool freeze_workqueues_busy(void)
3844 spin_lock(&workqueue_lock);
3846 BUG_ON(!workqueue_freezing);
3848 for_each_gcwq_cpu(cpu) {
3849 struct workqueue_struct *wq;
3851 * nr_active is monotonically decreasing. It's safe
3852 * to peek without lock.
3854 list_for_each_entry(wq, &workqueues, list) {
3855 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3857 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3860 BUG_ON(cwq->nr_active < 0);
3861 if (cwq->nr_active) {
3868 spin_unlock(&workqueue_lock);
3873 * thaw_workqueues - thaw workqueues
3875 * Thaw workqueues. Normal queueing is restored and all collected
3876 * frozen works are transferred to their respective gcwq worklists.
3879 * Grabs and releases workqueue_lock and gcwq->lock's.
3881 void thaw_workqueues(void)
3885 spin_lock(&workqueue_lock);
3887 if (!workqueue_freezing)
3890 for_each_gcwq_cpu(cpu) {
3891 struct global_cwq *gcwq = get_gcwq(cpu);
3892 struct worker_pool *pool;
3893 struct workqueue_struct *wq;
3895 spin_lock_irq(&gcwq->lock);
3897 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3898 gcwq->flags &= ~GCWQ_FREEZING;
3900 list_for_each_entry(wq, &workqueues, list) {
3901 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3903 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3906 /* restore max_active and repopulate worklist */
3907 cwq->max_active = wq->saved_max_active;
3909 while (!list_empty(&cwq->delayed_works) &&
3910 cwq->nr_active < cwq->max_active)
3911 cwq_activate_first_delayed(cwq);
3914 for_each_worker_pool(pool, gcwq)
3915 wake_up_worker(pool);
3917 spin_unlock_irq(&gcwq->lock);
3920 workqueue_freezing = false;
3922 spin_unlock(&workqueue_lock);
3924 #endif /* CONFIG_FREEZER */
3926 static int __init init_workqueues(void)
3931 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
3932 cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
3934 /* initialize gcwqs */
3935 for_each_gcwq_cpu(cpu) {
3936 struct global_cwq *gcwq = get_gcwq(cpu);
3937 struct worker_pool *pool;
3939 spin_lock_init(&gcwq->lock);
3941 gcwq->flags |= GCWQ_DISASSOCIATED;
3943 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3944 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3946 for_each_worker_pool(pool, gcwq) {
3948 INIT_LIST_HEAD(&pool->worklist);
3949 INIT_LIST_HEAD(&pool->idle_list);
3951 init_timer_deferrable(&pool->idle_timer);
3952 pool->idle_timer.function = idle_worker_timeout;
3953 pool->idle_timer.data = (unsigned long)pool;
3955 setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
3956 (unsigned long)pool);
3958 mutex_init(&pool->manager_mutex);
3959 ida_init(&pool->worker_ida);
3962 init_waitqueue_head(&gcwq->rebind_hold);
3964 gcwq->trustee_state = TRUSTEE_DONE;
3965 init_waitqueue_head(&gcwq->trustee_wait);
3968 /* create the initial worker */
3969 for_each_online_gcwq_cpu(cpu) {
3970 struct global_cwq *gcwq = get_gcwq(cpu);
3971 struct worker_pool *pool;
3973 if (cpu != WORK_CPU_UNBOUND)
3974 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3976 for_each_worker_pool(pool, gcwq) {
3977 struct worker *worker;
3979 worker = create_worker(pool);
3981 spin_lock_irq(&gcwq->lock);
3982 start_worker(worker);
3983 spin_unlock_irq(&gcwq->lock);
3987 system_wq = alloc_workqueue("events", 0, 0);
3988 system_long_wq = alloc_workqueue("events_long", 0, 0);
3989 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3990 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3991 WQ_UNBOUND_MAX_ACTIVE);
3992 system_freezable_wq = alloc_workqueue("events_freezable",
3994 system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
3995 WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
3996 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
3997 !system_unbound_wq || !system_freezable_wq ||
3998 !system_nrt_freezable_wq);
4001 early_initcall(init_workqueues);