mm: distinguish CMA and MOVABLE isolation in has_unmovable_pages()
[linux-2.6-block.git] / kernel / workqueue.c
CommitLineData
1da177e4 1/*
c54fce6e 2 * kernel/workqueue.c - generic async execution with shared worker pool
1da177e4 3 *
c54fce6e 4 * Copyright (C) 2002 Ingo Molnar
1da177e4 5 *
c54fce6e
TH
6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
1da177e4 11 *
c54fce6e 12 * Made to use alloc_percpu by Christoph Lameter.
1da177e4 13 *
c54fce6e
TH
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
89ada679 16 *
c54fce6e
TH
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
b11895c4
L
19 * automatically managed. There are two worker pools for each CPU (one for
20 * normal work items and the other for high priority ones) and some extra
21 * pools for workqueues which are not bound to any specific CPU - the
22 * number of these backing pools is dynamic.
c54fce6e 23 *
9a261491 24 * Please read Documentation/core-api/workqueue.rst for details.
1da177e4
LT
25 */
26
9984de1a 27#include <linux/export.h>
1da177e4
LT
28#include <linux/kernel.h>
29#include <linux/sched.h>
30#include <linux/init.h>
31#include <linux/signal.h>
32#include <linux/completion.h>
33#include <linux/workqueue.h>
34#include <linux/slab.h>
35#include <linux/cpu.h>
36#include <linux/notifier.h>
37#include <linux/kthread.h>
1fa44eca 38#include <linux/hardirq.h>
46934023 39#include <linux/mempolicy.h>
341a5958 40#include <linux/freezer.h>
d5abe669
PZ
41#include <linux/kallsyms.h>
42#include <linux/debug_locks.h>
4e6045f1 43#include <linux/lockdep.h>
c34056a3 44#include <linux/idr.h>
29c91e99 45#include <linux/jhash.h>
42f8570f 46#include <linux/hashtable.h>
76af4d93 47#include <linux/rculist.h>
bce90380 48#include <linux/nodemask.h>
4c16bd32 49#include <linux/moduleparam.h>
3d1cb205 50#include <linux/uaccess.h>
e22bee78 51
ea138446 52#include "workqueue_internal.h"
1da177e4 53
c8e55f36 54enum {
24647570
TH
55 /*
56 * worker_pool flags
bc2ae0f5 57 *
24647570 58 * A bound pool is either associated or disassociated with its CPU.
bc2ae0f5
TH
59 * While associated (!DISASSOCIATED), all workers are bound to the
60 * CPU and none has %WORKER_UNBOUND set and concurrency management
61 * is in effect.
62 *
63 * While DISASSOCIATED, the cpu may be offline and all workers have
64 * %WORKER_UNBOUND set and concurrency management disabled, and may
24647570 65 * be executing on any CPU. The pool behaves as an unbound one.
bc2ae0f5 66 *
bc3a1afc 67 * Note that DISASSOCIATED should be flipped only while holding
92f9c5c4 68 * attach_mutex to avoid changing binding state while
4736cbf7 69 * worker_attach_to_pool() is in progress.
bc2ae0f5 70 */
692b4825 71 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
24647570 72 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
db7bccf4 73
c8e55f36 74 /* worker flags */
c8e55f36
TH
75 WORKER_DIE = 1 << 1, /* die die die */
76 WORKER_IDLE = 1 << 2, /* is idle */
e22bee78 77 WORKER_PREP = 1 << 3, /* preparing to run works */
fb0e7beb 78 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
f3421797 79 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
a9ab775b 80 WORKER_REBOUND = 1 << 8, /* worker was rebound */
e22bee78 81
a9ab775b
TH
82 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
83 WORKER_UNBOUND | WORKER_REBOUND,
db7bccf4 84
e34cdddb 85 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
4ce62e9e 86
29c91e99 87 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
c8e55f36 88 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
db7bccf4 89
e22bee78
TH
90 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
91 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
92
3233cdbd
TH
93 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
94 /* call for help after 10ms
95 (min two ticks) */
e22bee78
TH
96 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
97 CREATE_COOLDOWN = HZ, /* time to breath after fail */
e22bee78
TH
98
99 /*
100 * Rescue workers are used only on emergencies and shared by
8698a745 101 * all cpus. Give MIN_NICE.
e22bee78 102 */
8698a745
DY
103 RESCUER_NICE_LEVEL = MIN_NICE,
104 HIGHPRI_NICE_LEVEL = MIN_NICE,
ecf6881f
TH
105
106 WQ_NAME_LEN = 24,
c8e55f36 107};
1da177e4
LT
108
109/*
4690c4ab
TH
110 * Structure fields follow one of the following exclusion rules.
111 *
e41e704b
TH
112 * I: Modifiable by initialization/destruction paths and read-only for
113 * everyone else.
4690c4ab 114 *
e22bee78
TH
115 * P: Preemption protected. Disabling preemption is enough and should
116 * only be modified and accessed from the local cpu.
117 *
d565ed63 118 * L: pool->lock protected. Access with pool->lock held.
4690c4ab 119 *
d565ed63
TH
120 * X: During normal operation, modification requires pool->lock and should
121 * be done only from local cpu. Either disabling preemption on local
122 * cpu or grabbing pool->lock is enough for read access. If
123 * POOL_DISASSOCIATED is set, it's identical to L.
e22bee78 124 *
92f9c5c4 125 * A: pool->attach_mutex protected.
822d8405 126 *
68e13a67 127 * PL: wq_pool_mutex protected.
5bcab335 128 *
68e13a67 129 * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
76af4d93 130 *
5b95e1af
LJ
131 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
132 *
133 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
134 * sched-RCU for reads.
135 *
3c25a55d
LJ
136 * WQ: wq->mutex protected.
137 *
b5927605 138 * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
2e109a28
TH
139 *
140 * MD: wq_mayday_lock protected.
1da177e4 141 */
1da177e4 142
2eaebdb3 143/* struct worker is defined in workqueue_internal.h */
c34056a3 144
bd7bdd43 145struct worker_pool {
d565ed63 146 spinlock_t lock; /* the pool lock */
d84ff051 147 int cpu; /* I: the associated cpu */
f3f90ad4 148 int node; /* I: the associated node ID */
9daf9e67 149 int id; /* I: pool ID */
11ebea50 150 unsigned int flags; /* X: flags */
bd7bdd43 151
82607adc
TH
152 unsigned long watchdog_ts; /* L: watchdog timestamp */
153
bd7bdd43
TH
154 struct list_head worklist; /* L: list of pending works */
155 int nr_workers; /* L: total number of workers */
ea1abd61
LJ
156
157 /* nr_idle includes the ones off idle_list for rebinding */
bd7bdd43
TH
158 int nr_idle; /* L: currently idle ones */
159
160 struct list_head idle_list; /* X: list of idle workers */
161 struct timer_list idle_timer; /* L: worker idle timeout */
162 struct timer_list mayday_timer; /* L: SOS timer for workers */
163
c5aa87bb 164 /* a workers is either on busy_hash or idle_list, or the manager */
c9e7cf27
TH
165 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
166 /* L: hash of busy workers */
167
bc3a1afc 168 /* see manage_workers() for details on the two manager mutexes */
2607d7a6 169 struct worker *manager; /* L: purely informational */
92f9c5c4
LJ
170 struct mutex attach_mutex; /* attach/detach exclusion */
171 struct list_head workers; /* A: attached workers */
60f5a4bc 172 struct completion *detach_completion; /* all workers detached */
e19e397a 173
7cda9aae 174 struct ida worker_ida; /* worker IDs for task name */
e19e397a 175
7a4e344c 176 struct workqueue_attrs *attrs; /* I: worker attributes */
68e13a67
LJ
177 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
178 int refcnt; /* PL: refcnt for unbound pools */
7a4e344c 179
e19e397a
TH
180 /*
181 * The current concurrency level. As it's likely to be accessed
182 * from other CPUs during try_to_wake_up(), put it in a separate
183 * cacheline.
184 */
185 atomic_t nr_running ____cacheline_aligned_in_smp;
29c91e99
TH
186
187 /*
188 * Destruction of pool is sched-RCU protected to allow dereferences
189 * from get_work_pool().
190 */
191 struct rcu_head rcu;
8b03ae3c
TH
192} ____cacheline_aligned_in_smp;
193
1da177e4 194/*
112202d9
TH
195 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
196 * of work_struct->data are used for flags and the remaining high bits
197 * point to the pwq; thus, pwqs need to be aligned at two's power of the
198 * number of flag bits.
1da177e4 199 */
112202d9 200struct pool_workqueue {
bd7bdd43 201 struct worker_pool *pool; /* I: the associated pool */
4690c4ab 202 struct workqueue_struct *wq; /* I: the owning workqueue */
73f53c4a
TH
203 int work_color; /* L: current color */
204 int flush_color; /* L: flushing color */
8864b4e5 205 int refcnt; /* L: reference count */
73f53c4a
TH
206 int nr_in_flight[WORK_NR_COLORS];
207 /* L: nr of in_flight works */
1e19ffc6 208 int nr_active; /* L: nr of active works */
a0a1a5fd 209 int max_active; /* L: max active works */
1e19ffc6 210 struct list_head delayed_works; /* L: delayed works */
3c25a55d 211 struct list_head pwqs_node; /* WR: node on wq->pwqs */
2e109a28 212 struct list_head mayday_node; /* MD: node on wq->maydays */
8864b4e5
TH
213
214 /*
215 * Release of unbound pwq is punted to system_wq. See put_pwq()
216 * and pwq_unbound_release_workfn() for details. pool_workqueue
217 * itself is also sched-RCU protected so that the first pwq can be
b09f4fd3 218 * determined without grabbing wq->mutex.
8864b4e5
TH
219 */
220 struct work_struct unbound_release_work;
221 struct rcu_head rcu;
e904e6c2 222} __aligned(1 << WORK_STRUCT_FLAG_BITS);
1da177e4 223
73f53c4a
TH
224/*
225 * Structure used to wait for workqueue flush.
226 */
227struct wq_flusher {
3c25a55d
LJ
228 struct list_head list; /* WQ: list of flushers */
229 int flush_color; /* WQ: flush color waiting for */
73f53c4a
TH
230 struct completion done; /* flush completion */
231};
232
226223ab
TH
233struct wq_device;
234
1da177e4 235/*
c5aa87bb
TH
236 * The externally visible workqueue. It relays the issued work items to
237 * the appropriate worker_pool through its pool_workqueues.
1da177e4
LT
238 */
239struct workqueue_struct {
3c25a55d 240 struct list_head pwqs; /* WR: all pwqs of this wq */
e2dca7ad 241 struct list_head list; /* PR: list of all workqueues */
73f53c4a 242
3c25a55d
LJ
243 struct mutex mutex; /* protects this wq */
244 int work_color; /* WQ: current work color */
245 int flush_color; /* WQ: current flush color */
112202d9 246 atomic_t nr_pwqs_to_flush; /* flush in progress */
3c25a55d
LJ
247 struct wq_flusher *first_flusher; /* WQ: first flusher */
248 struct list_head flusher_queue; /* WQ: flush waiters */
249 struct list_head flusher_overflow; /* WQ: flush overflow list */
73f53c4a 250
2e109a28 251 struct list_head maydays; /* MD: pwqs requesting rescue */
e22bee78
TH
252 struct worker *rescuer; /* I: rescue worker */
253
87fc741e 254 int nr_drainers; /* WQ: drain in progress */
a357fc03 255 int saved_max_active; /* WQ: saved pwq max_active */
226223ab 256
5b95e1af
LJ
257 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
258 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
6029a918 259
226223ab
TH
260#ifdef CONFIG_SYSFS
261 struct wq_device *wq_dev; /* I: for sysfs interface */
262#endif
4e6045f1 263#ifdef CONFIG_LOCKDEP
4690c4ab 264 struct lockdep_map lockdep_map;
4e6045f1 265#endif
ecf6881f 266 char name[WQ_NAME_LEN]; /* I: workqueue name */
2728fd2f 267
e2dca7ad
TH
268 /*
269 * Destruction of workqueue_struct is sched-RCU protected to allow
270 * walking the workqueues list without grabbing wq_pool_mutex.
271 * This is used to dump all workqueues from sysrq.
272 */
273 struct rcu_head rcu;
274
2728fd2f
TH
275 /* hot fields used during command issue, aligned to cacheline */
276 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
277 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
5b95e1af 278 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
1da177e4
LT
279};
280
e904e6c2
TH
281static struct kmem_cache *pwq_cache;
282
bce90380
TH
283static cpumask_var_t *wq_numa_possible_cpumask;
284 /* possible CPUs of each node */
285
d55262c4
TH
286static bool wq_disable_numa;
287module_param_named(disable_numa, wq_disable_numa, bool, 0444);
288
cee22a15 289/* see the comment above the definition of WQ_POWER_EFFICIENT */
552f530c 290static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
cee22a15
VK
291module_param_named(power_efficient, wq_power_efficient, bool, 0444);
292
863b710b 293static bool wq_online; /* can kworkers be created yet? */
3347fa09 294
bce90380
TH
295static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
296
4c16bd32
TH
297/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
298static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
299
68e13a67 300static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
2e109a28 301static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
692b4825 302static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
5bcab335 303
e2dca7ad 304static LIST_HEAD(workqueues); /* PR: list of all workqueues */
68e13a67 305static bool workqueue_freezing; /* PL: have wqs started freezing? */
7d19c5ce 306
ef557180
MG
307/* PL: allowable cpus for unbound wqs and work items */
308static cpumask_var_t wq_unbound_cpumask;
309
310/* CPU where unbound work was last round robin scheduled from this CPU */
311static DEFINE_PER_CPU(int, wq_rr_cpu_last);
b05a7928 312
f303fccb
TH
313/*
314 * Local execution of unbound work items is no longer guaranteed. The
315 * following always forces round-robin CPU selection on unbound work items
316 * to uncover usages which depend on it.
317 */
318#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
319static bool wq_debug_force_rr_cpu = true;
320#else
321static bool wq_debug_force_rr_cpu = false;
322#endif
323module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
324
7d19c5ce 325/* the per-cpu worker pools */
25528213 326static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
7d19c5ce 327
68e13a67 328static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
7d19c5ce 329
68e13a67 330/* PL: hash of all unbound pools keyed by pool->attrs */
29c91e99
TH
331static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
332
c5aa87bb 333/* I: attributes used when instantiating standard unbound pools on demand */
29c91e99
TH
334static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
335
8a2b7538
TH
336/* I: attributes used when instantiating ordered pools on demand */
337static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
338
d320c038 339struct workqueue_struct *system_wq __read_mostly;
ad7b1f84 340EXPORT_SYMBOL(system_wq);
044c782c 341struct workqueue_struct *system_highpri_wq __read_mostly;
1aabe902 342EXPORT_SYMBOL_GPL(system_highpri_wq);
044c782c 343struct workqueue_struct *system_long_wq __read_mostly;
d320c038 344EXPORT_SYMBOL_GPL(system_long_wq);
044c782c 345struct workqueue_struct *system_unbound_wq __read_mostly;
f3421797 346EXPORT_SYMBOL_GPL(system_unbound_wq);
044c782c 347struct workqueue_struct *system_freezable_wq __read_mostly;
24d51add 348EXPORT_SYMBOL_GPL(system_freezable_wq);
0668106c
VK
349struct workqueue_struct *system_power_efficient_wq __read_mostly;
350EXPORT_SYMBOL_GPL(system_power_efficient_wq);
351struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
352EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
d320c038 353
7d19c5ce 354static int worker_thread(void *__worker);
6ba94429 355static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
7d19c5ce 356
97bd2347
TH
357#define CREATE_TRACE_POINTS
358#include <trace/events/workqueue.h>
359
68e13a67 360#define assert_rcu_or_pool_mutex() \
f78f5b90
PM
361 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
362 !lockdep_is_held(&wq_pool_mutex), \
363 "sched RCU or wq_pool_mutex should be held")
5bcab335 364
b09f4fd3 365#define assert_rcu_or_wq_mutex(wq) \
f78f5b90
PM
366 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
367 !lockdep_is_held(&wq->mutex), \
368 "sched RCU or wq->mutex should be held")
76af4d93 369
5b95e1af 370#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
f78f5b90
PM
371 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
372 !lockdep_is_held(&wq->mutex) && \
373 !lockdep_is_held(&wq_pool_mutex), \
374 "sched RCU, wq->mutex or wq_pool_mutex should be held")
5b95e1af 375
f02ae73a
TH
376#define for_each_cpu_worker_pool(pool, cpu) \
377 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
378 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
7a62c2c8 379 (pool)++)
4ce62e9e 380
17116969
TH
381/**
382 * for_each_pool - iterate through all worker_pools in the system
383 * @pool: iteration cursor
611c92a0 384 * @pi: integer used for iteration
fa1b54e6 385 *
68e13a67
LJ
386 * This must be called either with wq_pool_mutex held or sched RCU read
387 * locked. If the pool needs to be used beyond the locking in effect, the
388 * caller is responsible for guaranteeing that the pool stays online.
fa1b54e6
TH
389 *
390 * The if/else clause exists only for the lockdep assertion and can be
391 * ignored.
17116969 392 */
611c92a0
TH
393#define for_each_pool(pool, pi) \
394 idr_for_each_entry(&worker_pool_idr, pool, pi) \
68e13a67 395 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
fa1b54e6 396 else
17116969 397
822d8405
TH
398/**
399 * for_each_pool_worker - iterate through all workers of a worker_pool
400 * @worker: iteration cursor
822d8405
TH
401 * @pool: worker_pool to iterate workers of
402 *
92f9c5c4 403 * This must be called with @pool->attach_mutex.
822d8405
TH
404 *
405 * The if/else clause exists only for the lockdep assertion and can be
406 * ignored.
407 */
da028469
LJ
408#define for_each_pool_worker(worker, pool) \
409 list_for_each_entry((worker), &(pool)->workers, node) \
92f9c5c4 410 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
822d8405
TH
411 else
412
49e3cf44
TH
413/**
414 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
415 * @pwq: iteration cursor
416 * @wq: the target workqueue
76af4d93 417 *
b09f4fd3 418 * This must be called either with wq->mutex held or sched RCU read locked.
794b18bc
TH
419 * If the pwq needs to be used beyond the locking in effect, the caller is
420 * responsible for guaranteeing that the pwq stays online.
76af4d93
TH
421 *
422 * The if/else clause exists only for the lockdep assertion and can be
423 * ignored.
49e3cf44
TH
424 */
425#define for_each_pwq(pwq, wq) \
76af4d93 426 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
b09f4fd3 427 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
76af4d93 428 else
f3421797 429
dc186ad7
TG
430#ifdef CONFIG_DEBUG_OBJECTS_WORK
431
432static struct debug_obj_descr work_debug_descr;
433
99777288
SG
434static void *work_debug_hint(void *addr)
435{
436 return ((struct work_struct *) addr)->func;
437}
438
b9fdac7f
DC
439static bool work_is_static_object(void *addr)
440{
441 struct work_struct *work = addr;
442
443 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
444}
445
dc186ad7
TG
446/*
447 * fixup_init is called when:
448 * - an active object is initialized
449 */
02a982a6 450static bool work_fixup_init(void *addr, enum debug_obj_state state)
dc186ad7
TG
451{
452 struct work_struct *work = addr;
453
454 switch (state) {
455 case ODEBUG_STATE_ACTIVE:
456 cancel_work_sync(work);
457 debug_object_init(work, &work_debug_descr);
02a982a6 458 return true;
dc186ad7 459 default:
02a982a6 460 return false;
dc186ad7
TG
461 }
462}
463
dc186ad7
TG
464/*
465 * fixup_free is called when:
466 * - an active object is freed
467 */
02a982a6 468static bool work_fixup_free(void *addr, enum debug_obj_state state)
dc186ad7
TG
469{
470 struct work_struct *work = addr;
471
472 switch (state) {
473 case ODEBUG_STATE_ACTIVE:
474 cancel_work_sync(work);
475 debug_object_free(work, &work_debug_descr);
02a982a6 476 return true;
dc186ad7 477 default:
02a982a6 478 return false;
dc186ad7
TG
479 }
480}
481
482static struct debug_obj_descr work_debug_descr = {
483 .name = "work_struct",
99777288 484 .debug_hint = work_debug_hint,
b9fdac7f 485 .is_static_object = work_is_static_object,
dc186ad7 486 .fixup_init = work_fixup_init,
dc186ad7
TG
487 .fixup_free = work_fixup_free,
488};
489
490static inline void debug_work_activate(struct work_struct *work)
491{
492 debug_object_activate(work, &work_debug_descr);
493}
494
495static inline void debug_work_deactivate(struct work_struct *work)
496{
497 debug_object_deactivate(work, &work_debug_descr);
498}
499
500void __init_work(struct work_struct *work, int onstack)
501{
502 if (onstack)
503 debug_object_init_on_stack(work, &work_debug_descr);
504 else
505 debug_object_init(work, &work_debug_descr);
506}
507EXPORT_SYMBOL_GPL(__init_work);
508
509void destroy_work_on_stack(struct work_struct *work)
510{
511 debug_object_free(work, &work_debug_descr);
512}
513EXPORT_SYMBOL_GPL(destroy_work_on_stack);
514
ea2e64f2
TG
515void destroy_delayed_work_on_stack(struct delayed_work *work)
516{
517 destroy_timer_on_stack(&work->timer);
518 debug_object_free(&work->work, &work_debug_descr);
519}
520EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
521
dc186ad7
TG
522#else
523static inline void debug_work_activate(struct work_struct *work) { }
524static inline void debug_work_deactivate(struct work_struct *work) { }
525#endif
526
4e8b22bd
LB
527/**
528 * worker_pool_assign_id - allocate ID and assing it to @pool
529 * @pool: the pool pointer of interest
530 *
531 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
532 * successfully, -errno on failure.
533 */
9daf9e67
TH
534static int worker_pool_assign_id(struct worker_pool *pool)
535{
536 int ret;
537
68e13a67 538 lockdep_assert_held(&wq_pool_mutex);
5bcab335 539
4e8b22bd
LB
540 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
541 GFP_KERNEL);
229641a6 542 if (ret >= 0) {
e68035fb 543 pool->id = ret;
229641a6
TH
544 return 0;
545 }
fa1b54e6 546 return ret;
7c3eed5c
TH
547}
548
df2d5ae4
TH
549/**
550 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
551 * @wq: the target workqueue
552 * @node: the node ID
553 *
5b95e1af
LJ
554 * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
555 * read locked.
df2d5ae4
TH
556 * If the pwq needs to be used beyond the locking in effect, the caller is
557 * responsible for guaranteeing that the pwq stays online.
d185af30
YB
558 *
559 * Return: The unbound pool_workqueue for @node.
df2d5ae4
TH
560 */
561static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
562 int node)
563{
5b95e1af 564 assert_rcu_or_wq_mutex_or_pool_mutex(wq);
d6e022f1
TH
565
566 /*
567 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
568 * delayed item is pending. The plan is to keep CPU -> NODE
569 * mapping valid and stable across CPU on/offlines. Once that
570 * happens, this workaround can be removed.
571 */
572 if (unlikely(node == NUMA_NO_NODE))
573 return wq->dfl_pwq;
574
df2d5ae4
TH
575 return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
576}
577
73f53c4a
TH
578static unsigned int work_color_to_flags(int color)
579{
580 return color << WORK_STRUCT_COLOR_SHIFT;
581}
582
583static int get_work_color(struct work_struct *work)
584{
585 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
586 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
587}
588
589static int work_next_color(int color)
590{
591 return (color + 1) % WORK_NR_COLORS;
592}
1da177e4 593
14441960 594/*
112202d9
TH
595 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
596 * contain the pointer to the queued pwq. Once execution starts, the flag
7c3eed5c 597 * is cleared and the high bits contain OFFQ flags and pool ID.
7a22ad75 598 *
112202d9
TH
599 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
600 * and clear_work_data() can be used to set the pwq, pool or clear
bbb68dfa
TH
601 * work->data. These functions should only be called while the work is
602 * owned - ie. while the PENDING bit is set.
7a22ad75 603 *
112202d9 604 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
7c3eed5c 605 * corresponding to a work. Pool is available once the work has been
112202d9 606 * queued anywhere after initialization until it is sync canceled. pwq is
7c3eed5c 607 * available only while the work item is queued.
7a22ad75 608 *
bbb68dfa
TH
609 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
610 * canceled. While being canceled, a work item may have its PENDING set
611 * but stay off timer and worklist for arbitrarily long and nobody should
612 * try to steal the PENDING bit.
14441960 613 */
7a22ad75
TH
614static inline void set_work_data(struct work_struct *work, unsigned long data,
615 unsigned long flags)
365970a1 616{
6183c009 617 WARN_ON_ONCE(!work_pending(work));
7a22ad75
TH
618 atomic_long_set(&work->data, data | flags | work_static(work));
619}
365970a1 620
112202d9 621static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
7a22ad75
TH
622 unsigned long extra_flags)
623{
112202d9
TH
624 set_work_data(work, (unsigned long)pwq,
625 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
365970a1
DH
626}
627
4468a00f
LJ
628static void set_work_pool_and_keep_pending(struct work_struct *work,
629 int pool_id)
630{
631 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
632 WORK_STRUCT_PENDING);
633}
634
7c3eed5c
TH
635static void set_work_pool_and_clear_pending(struct work_struct *work,
636 int pool_id)
7a22ad75 637{
23657bb1
TH
638 /*
639 * The following wmb is paired with the implied mb in
640 * test_and_set_bit(PENDING) and ensures all updates to @work made
641 * here are visible to and precede any updates by the next PENDING
642 * owner.
643 */
644 smp_wmb();
7c3eed5c 645 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
346c09f8
RP
646 /*
647 * The following mb guarantees that previous clear of a PENDING bit
648 * will not be reordered with any speculative LOADS or STORES from
649 * work->current_func, which is executed afterwards. This possible
650 * reordering can lead to a missed execution on attempt to qeueue
651 * the same @work. E.g. consider this case:
652 *
653 * CPU#0 CPU#1
654 * ---------------------------- --------------------------------
655 *
656 * 1 STORE event_indicated
657 * 2 queue_work_on() {
658 * 3 test_and_set_bit(PENDING)
659 * 4 } set_..._and_clear_pending() {
660 * 5 set_work_data() # clear bit
661 * 6 smp_mb()
662 * 7 work->current_func() {
663 * 8 LOAD event_indicated
664 * }
665 *
666 * Without an explicit full barrier speculative LOAD on line 8 can
667 * be executed before CPU#0 does STORE on line 1. If that happens,
668 * CPU#0 observes the PENDING bit is still set and new execution of
669 * a @work is not queued in a hope, that CPU#1 will eventually
670 * finish the queued @work. Meanwhile CPU#1 does not see
671 * event_indicated is set, because speculative LOAD was executed
672 * before actual STORE.
673 */
674 smp_mb();
7a22ad75 675}
f756d5e2 676
7a22ad75 677static void clear_work_data(struct work_struct *work)
1da177e4 678{
7c3eed5c
TH
679 smp_wmb(); /* see set_work_pool_and_clear_pending() */
680 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
1da177e4
LT
681}
682
112202d9 683static struct pool_workqueue *get_work_pwq(struct work_struct *work)
b1f4ec17 684{
e120153d 685 unsigned long data = atomic_long_read(&work->data);
7a22ad75 686
112202d9 687 if (data & WORK_STRUCT_PWQ)
e120153d
TH
688 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
689 else
690 return NULL;
4d707b9f
ON
691}
692
7c3eed5c
TH
693/**
694 * get_work_pool - return the worker_pool a given work was associated with
695 * @work: the work item of interest
696 *
68e13a67
LJ
697 * Pools are created and destroyed under wq_pool_mutex, and allows read
698 * access under sched-RCU read lock. As such, this function should be
699 * called under wq_pool_mutex or with preemption disabled.
fa1b54e6
TH
700 *
701 * All fields of the returned pool are accessible as long as the above
702 * mentioned locking is in effect. If the returned pool needs to be used
703 * beyond the critical section, the caller is responsible for ensuring the
704 * returned pool is and stays online.
d185af30
YB
705 *
706 * Return: The worker_pool @work was last associated with. %NULL if none.
7c3eed5c
TH
707 */
708static struct worker_pool *get_work_pool(struct work_struct *work)
365970a1 709{
e120153d 710 unsigned long data = atomic_long_read(&work->data);
7c3eed5c 711 int pool_id;
7a22ad75 712
68e13a67 713 assert_rcu_or_pool_mutex();
fa1b54e6 714
112202d9
TH
715 if (data & WORK_STRUCT_PWQ)
716 return ((struct pool_workqueue *)
7c3eed5c 717 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
7a22ad75 718
7c3eed5c
TH
719 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
720 if (pool_id == WORK_OFFQ_POOL_NONE)
7a22ad75
TH
721 return NULL;
722
fa1b54e6 723 return idr_find(&worker_pool_idr, pool_id);
7c3eed5c
TH
724}
725
726/**
727 * get_work_pool_id - return the worker pool ID a given work is associated with
728 * @work: the work item of interest
729 *
d185af30 730 * Return: The worker_pool ID @work was last associated with.
7c3eed5c
TH
731 * %WORK_OFFQ_POOL_NONE if none.
732 */
733static int get_work_pool_id(struct work_struct *work)
734{
54d5b7d0
LJ
735 unsigned long data = atomic_long_read(&work->data);
736
112202d9
TH
737 if (data & WORK_STRUCT_PWQ)
738 return ((struct pool_workqueue *)
54d5b7d0 739 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
7c3eed5c 740
54d5b7d0 741 return data >> WORK_OFFQ_POOL_SHIFT;
7c3eed5c
TH
742}
743
bbb68dfa
TH
744static void mark_work_canceling(struct work_struct *work)
745{
7c3eed5c 746 unsigned long pool_id = get_work_pool_id(work);
bbb68dfa 747
7c3eed5c
TH
748 pool_id <<= WORK_OFFQ_POOL_SHIFT;
749 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
bbb68dfa
TH
750}
751
752static bool work_is_canceling(struct work_struct *work)
753{
754 unsigned long data = atomic_long_read(&work->data);
755
112202d9 756 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
bbb68dfa
TH
757}
758
e22bee78 759/*
3270476a
TH
760 * Policy functions. These define the policies on how the global worker
761 * pools are managed. Unless noted otherwise, these functions assume that
d565ed63 762 * they're being called with pool->lock held.
e22bee78
TH
763 */
764
63d95a91 765static bool __need_more_worker(struct worker_pool *pool)
a848e3b6 766{
e19e397a 767 return !atomic_read(&pool->nr_running);
a848e3b6
ON
768}
769
4594bf15 770/*
e22bee78
TH
771 * Need to wake up a worker? Called from anything but currently
772 * running workers.
974271c4
TH
773 *
774 * Note that, because unbound workers never contribute to nr_running, this
706026c2 775 * function will always return %true for unbound pools as long as the
974271c4 776 * worklist isn't empty.
4594bf15 777 */
63d95a91 778static bool need_more_worker(struct worker_pool *pool)
365970a1 779{
63d95a91 780 return !list_empty(&pool->worklist) && __need_more_worker(pool);
e22bee78 781}
4594bf15 782
e22bee78 783/* Can I start working? Called from busy but !running workers. */
63d95a91 784static bool may_start_working(struct worker_pool *pool)
e22bee78 785{
63d95a91 786 return pool->nr_idle;
e22bee78
TH
787}
788
789/* Do I need to keep working? Called from currently running workers. */
63d95a91 790static bool keep_working(struct worker_pool *pool)
e22bee78 791{
e19e397a
TH
792 return !list_empty(&pool->worklist) &&
793 atomic_read(&pool->nr_running) <= 1;
e22bee78
TH
794}
795
796/* Do we need a new worker? Called from manager. */
63d95a91 797static bool need_to_create_worker(struct worker_pool *pool)
e22bee78 798{
63d95a91 799 return need_more_worker(pool) && !may_start_working(pool);
e22bee78 800}
365970a1 801
e22bee78 802/* Do we have too many workers and should some go away? */
63d95a91 803static bool too_many_workers(struct worker_pool *pool)
e22bee78 804{
692b4825 805 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
63d95a91
TH
806 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
807 int nr_busy = pool->nr_workers - nr_idle;
e22bee78
TH
808
809 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
365970a1
DH
810}
811
4d707b9f 812/*
e22bee78
TH
813 * Wake up functions.
814 */
815
1037de36
LJ
816/* Return the first idle worker. Safe with preemption disabled */
817static struct worker *first_idle_worker(struct worker_pool *pool)
7e11629d 818{
63d95a91 819 if (unlikely(list_empty(&pool->idle_list)))
7e11629d
TH
820 return NULL;
821
63d95a91 822 return list_first_entry(&pool->idle_list, struct worker, entry);
7e11629d
TH
823}
824
825/**
826 * wake_up_worker - wake up an idle worker
63d95a91 827 * @pool: worker pool to wake worker from
7e11629d 828 *
63d95a91 829 * Wake up the first idle worker of @pool.
7e11629d
TH
830 *
831 * CONTEXT:
d565ed63 832 * spin_lock_irq(pool->lock).
7e11629d 833 */
63d95a91 834static void wake_up_worker(struct worker_pool *pool)
7e11629d 835{
1037de36 836 struct worker *worker = first_idle_worker(pool);
7e11629d
TH
837
838 if (likely(worker))
839 wake_up_process(worker->task);
840}
841
d302f017 842/**
e22bee78
TH
843 * wq_worker_waking_up - a worker is waking up
844 * @task: task waking up
845 * @cpu: CPU @task is waking up to
846 *
847 * This function is called during try_to_wake_up() when a worker is
848 * being awoken.
849 *
850 * CONTEXT:
851 * spin_lock_irq(rq->lock)
852 */
d84ff051 853void wq_worker_waking_up(struct task_struct *task, int cpu)
e22bee78
TH
854{
855 struct worker *worker = kthread_data(task);
856
36576000 857 if (!(worker->flags & WORKER_NOT_RUNNING)) {
ec22ca5e 858 WARN_ON_ONCE(worker->pool->cpu != cpu);
e19e397a 859 atomic_inc(&worker->pool->nr_running);
36576000 860 }
e22bee78
TH
861}
862
863/**
864 * wq_worker_sleeping - a worker is going to sleep
865 * @task: task going to sleep
e22bee78
TH
866 *
867 * This function is called during schedule() when a busy worker is
868 * going to sleep. Worker on the same cpu can be woken up by
869 * returning pointer to its task.
870 *
871 * CONTEXT:
872 * spin_lock_irq(rq->lock)
873 *
d185af30 874 * Return:
e22bee78
TH
875 * Worker task on @cpu to wake up, %NULL if none.
876 */
9b7f6597 877struct task_struct *wq_worker_sleeping(struct task_struct *task)
e22bee78
TH
878{
879 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
111c225a 880 struct worker_pool *pool;
e22bee78 881
111c225a
TH
882 /*
883 * Rescuers, which may not have all the fields set up like normal
884 * workers, also reach here, let's not access anything before
885 * checking NOT_RUNNING.
886 */
2d64672e 887 if (worker->flags & WORKER_NOT_RUNNING)
e22bee78
TH
888 return NULL;
889
111c225a 890 pool = worker->pool;
111c225a 891
e22bee78 892 /* this can only happen on the local cpu */
9b7f6597 893 if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
6183c009 894 return NULL;
e22bee78
TH
895
896 /*
897 * The counterpart of the following dec_and_test, implied mb,
898 * worklist not empty test sequence is in insert_work().
899 * Please read comment there.
900 *
628c78e7
TH
901 * NOT_RUNNING is clear. This means that we're bound to and
902 * running on the local cpu w/ rq lock held and preemption
903 * disabled, which in turn means that none else could be
d565ed63 904 * manipulating idle_list, so dereferencing idle_list without pool
628c78e7 905 * lock is safe.
e22bee78 906 */
e19e397a
TH
907 if (atomic_dec_and_test(&pool->nr_running) &&
908 !list_empty(&pool->worklist))
1037de36 909 to_wakeup = first_idle_worker(pool);
e22bee78
TH
910 return to_wakeup ? to_wakeup->task : NULL;
911}
912
913/**
914 * worker_set_flags - set worker flags and adjust nr_running accordingly
cb444766 915 * @worker: self
d302f017 916 * @flags: flags to set
d302f017 917 *
228f1d00 918 * Set @flags in @worker->flags and adjust nr_running accordingly.
d302f017 919 *
cb444766 920 * CONTEXT:
d565ed63 921 * spin_lock_irq(pool->lock)
d302f017 922 */
228f1d00 923static inline void worker_set_flags(struct worker *worker, unsigned int flags)
d302f017 924{
bd7bdd43 925 struct worker_pool *pool = worker->pool;
e22bee78 926
cb444766
TH
927 WARN_ON_ONCE(worker->task != current);
928
228f1d00 929 /* If transitioning into NOT_RUNNING, adjust nr_running. */
e22bee78
TH
930 if ((flags & WORKER_NOT_RUNNING) &&
931 !(worker->flags & WORKER_NOT_RUNNING)) {
228f1d00 932 atomic_dec(&pool->nr_running);
e22bee78
TH
933 }
934
d302f017
TH
935 worker->flags |= flags;
936}
937
938/**
e22bee78 939 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
cb444766 940 * @worker: self
d302f017
TH
941 * @flags: flags to clear
942 *
e22bee78 943 * Clear @flags in @worker->flags and adjust nr_running accordingly.
d302f017 944 *
cb444766 945 * CONTEXT:
d565ed63 946 * spin_lock_irq(pool->lock)
d302f017
TH
947 */
948static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
949{
63d95a91 950 struct worker_pool *pool = worker->pool;
e22bee78
TH
951 unsigned int oflags = worker->flags;
952
cb444766
TH
953 WARN_ON_ONCE(worker->task != current);
954
d302f017 955 worker->flags &= ~flags;
e22bee78 956
42c025f3
TH
957 /*
958 * If transitioning out of NOT_RUNNING, increment nr_running. Note
959 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
960 * of multiple flags, not a single flag.
961 */
e22bee78
TH
962 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
963 if (!(worker->flags & WORKER_NOT_RUNNING))
e19e397a 964 atomic_inc(&pool->nr_running);
d302f017
TH
965}
966
8cca0eea
TH
967/**
968 * find_worker_executing_work - find worker which is executing a work
c9e7cf27 969 * @pool: pool of interest
8cca0eea
TH
970 * @work: work to find worker for
971 *
c9e7cf27
TH
972 * Find a worker which is executing @work on @pool by searching
973 * @pool->busy_hash which is keyed by the address of @work. For a worker
a2c1c57b
TH
974 * to match, its current execution should match the address of @work and
975 * its work function. This is to avoid unwanted dependency between
976 * unrelated work executions through a work item being recycled while still
977 * being executed.
978 *
979 * This is a bit tricky. A work item may be freed once its execution
980 * starts and nothing prevents the freed area from being recycled for
981 * another work item. If the same work item address ends up being reused
982 * before the original execution finishes, workqueue will identify the
983 * recycled work item as currently executing and make it wait until the
984 * current execution finishes, introducing an unwanted dependency.
985 *
c5aa87bb
TH
986 * This function checks the work item address and work function to avoid
987 * false positives. Note that this isn't complete as one may construct a
988 * work function which can introduce dependency onto itself through a
989 * recycled work item. Well, if somebody wants to shoot oneself in the
990 * foot that badly, there's only so much we can do, and if such deadlock
991 * actually occurs, it should be easy to locate the culprit work function.
8cca0eea
TH
992 *
993 * CONTEXT:
d565ed63 994 * spin_lock_irq(pool->lock).
8cca0eea 995 *
d185af30
YB
996 * Return:
997 * Pointer to worker which is executing @work if found, %NULL
8cca0eea 998 * otherwise.
4d707b9f 999 */
c9e7cf27 1000static struct worker *find_worker_executing_work(struct worker_pool *pool,
8cca0eea 1001 struct work_struct *work)
4d707b9f 1002{
42f8570f 1003 struct worker *worker;
42f8570f 1004
b67bfe0d 1005 hash_for_each_possible(pool->busy_hash, worker, hentry,
a2c1c57b
TH
1006 (unsigned long)work)
1007 if (worker->current_work == work &&
1008 worker->current_func == work->func)
42f8570f
SL
1009 return worker;
1010
1011 return NULL;
4d707b9f
ON
1012}
1013
bf4ede01
TH
1014/**
1015 * move_linked_works - move linked works to a list
1016 * @work: start of series of works to be scheduled
1017 * @head: target list to append @work to
402dd89d 1018 * @nextp: out parameter for nested worklist walking
bf4ede01
TH
1019 *
1020 * Schedule linked works starting from @work to @head. Work series to
1021 * be scheduled starts at @work and includes any consecutive work with
1022 * WORK_STRUCT_LINKED set in its predecessor.
1023 *
1024 * If @nextp is not NULL, it's updated to point to the next work of
1025 * the last scheduled work. This allows move_linked_works() to be
1026 * nested inside outer list_for_each_entry_safe().
1027 *
1028 * CONTEXT:
d565ed63 1029 * spin_lock_irq(pool->lock).
bf4ede01
TH
1030 */
1031static void move_linked_works(struct work_struct *work, struct list_head *head,
1032 struct work_struct **nextp)
1033{
1034 struct work_struct *n;
1035
1036 /*
1037 * Linked worklist will always end before the end of the list,
1038 * use NULL for list head.
1039 */
1040 list_for_each_entry_safe_from(work, n, NULL, entry) {
1041 list_move_tail(&work->entry, head);
1042 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1043 break;
1044 }
1045
1046 /*
1047 * If we're already inside safe list traversal and have moved
1048 * multiple works to the scheduled queue, the next position
1049 * needs to be updated.
1050 */
1051 if (nextp)
1052 *nextp = n;
1053}
1054
8864b4e5
TH
1055/**
1056 * get_pwq - get an extra reference on the specified pool_workqueue
1057 * @pwq: pool_workqueue to get
1058 *
1059 * Obtain an extra reference on @pwq. The caller should guarantee that
1060 * @pwq has positive refcnt and be holding the matching pool->lock.
1061 */
1062static void get_pwq(struct pool_workqueue *pwq)
1063{
1064 lockdep_assert_held(&pwq->pool->lock);
1065 WARN_ON_ONCE(pwq->refcnt <= 0);
1066 pwq->refcnt++;
1067}
1068
1069/**
1070 * put_pwq - put a pool_workqueue reference
1071 * @pwq: pool_workqueue to put
1072 *
1073 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1074 * destruction. The caller should be holding the matching pool->lock.
1075 */
1076static void put_pwq(struct pool_workqueue *pwq)
1077{
1078 lockdep_assert_held(&pwq->pool->lock);
1079 if (likely(--pwq->refcnt))
1080 return;
1081 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1082 return;
1083 /*
1084 * @pwq can't be released under pool->lock, bounce to
1085 * pwq_unbound_release_workfn(). This never recurses on the same
1086 * pool->lock as this path is taken only for unbound workqueues and
1087 * the release work item is scheduled on a per-cpu workqueue. To
1088 * avoid lockdep warning, unbound pool->locks are given lockdep
1089 * subclass of 1 in get_unbound_pool().
1090 */
1091 schedule_work(&pwq->unbound_release_work);
1092}
1093
dce90d47
TH
1094/**
1095 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1096 * @pwq: pool_workqueue to put (can be %NULL)
1097 *
1098 * put_pwq() with locking. This function also allows %NULL @pwq.
1099 */
1100static void put_pwq_unlocked(struct pool_workqueue *pwq)
1101{
1102 if (pwq) {
1103 /*
1104 * As both pwqs and pools are sched-RCU protected, the
1105 * following lock operations are safe.
1106 */
1107 spin_lock_irq(&pwq->pool->lock);
1108 put_pwq(pwq);
1109 spin_unlock_irq(&pwq->pool->lock);
1110 }
1111}
1112
112202d9 1113static void pwq_activate_delayed_work(struct work_struct *work)
bf4ede01 1114{
112202d9 1115 struct pool_workqueue *pwq = get_work_pwq(work);
bf4ede01
TH
1116
1117 trace_workqueue_activate_work(work);
82607adc
TH
1118 if (list_empty(&pwq->pool->worklist))
1119 pwq->pool->watchdog_ts = jiffies;
112202d9 1120 move_linked_works(work, &pwq->pool->worklist, NULL);
bf4ede01 1121 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
112202d9 1122 pwq->nr_active++;
bf4ede01
TH
1123}
1124
112202d9 1125static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
3aa62497 1126{
112202d9 1127 struct work_struct *work = list_first_entry(&pwq->delayed_works,
3aa62497
LJ
1128 struct work_struct, entry);
1129
112202d9 1130 pwq_activate_delayed_work(work);
3aa62497
LJ
1131}
1132
bf4ede01 1133/**
112202d9
TH
1134 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1135 * @pwq: pwq of interest
bf4ede01 1136 * @color: color of work which left the queue
bf4ede01
TH
1137 *
1138 * A work either has completed or is removed from pending queue,
112202d9 1139 * decrement nr_in_flight of its pwq and handle workqueue flushing.
bf4ede01
TH
1140 *
1141 * CONTEXT:
d565ed63 1142 * spin_lock_irq(pool->lock).
bf4ede01 1143 */
112202d9 1144static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
bf4ede01 1145{
8864b4e5 1146 /* uncolored work items don't participate in flushing or nr_active */
bf4ede01 1147 if (color == WORK_NO_COLOR)
8864b4e5 1148 goto out_put;
bf4ede01 1149
112202d9 1150 pwq->nr_in_flight[color]--;
bf4ede01 1151
112202d9
TH
1152 pwq->nr_active--;
1153 if (!list_empty(&pwq->delayed_works)) {
b3f9f405 1154 /* one down, submit a delayed one */
112202d9
TH
1155 if (pwq->nr_active < pwq->max_active)
1156 pwq_activate_first_delayed(pwq);
bf4ede01
TH
1157 }
1158
1159 /* is flush in progress and are we at the flushing tip? */
112202d9 1160 if (likely(pwq->flush_color != color))
8864b4e5 1161 goto out_put;
bf4ede01
TH
1162
1163 /* are there still in-flight works? */
112202d9 1164 if (pwq->nr_in_flight[color])
8864b4e5 1165 goto out_put;
bf4ede01 1166
112202d9
TH
1167 /* this pwq is done, clear flush_color */
1168 pwq->flush_color = -1;
bf4ede01
TH
1169
1170 /*
112202d9 1171 * If this was the last pwq, wake up the first flusher. It
bf4ede01
TH
1172 * will handle the rest.
1173 */
112202d9
TH
1174 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1175 complete(&pwq->wq->first_flusher->done);
8864b4e5
TH
1176out_put:
1177 put_pwq(pwq);
bf4ede01
TH
1178}
1179
36e227d2 1180/**
bbb68dfa 1181 * try_to_grab_pending - steal work item from worklist and disable irq
36e227d2
TH
1182 * @work: work item to steal
1183 * @is_dwork: @work is a delayed_work
bbb68dfa 1184 * @flags: place to store irq state
36e227d2
TH
1185 *
1186 * Try to grab PENDING bit of @work. This function can handle @work in any
d185af30 1187 * stable state - idle, on timer or on worklist.
36e227d2 1188 *
d185af30 1189 * Return:
36e227d2
TH
1190 * 1 if @work was pending and we successfully stole PENDING
1191 * 0 if @work was idle and we claimed PENDING
1192 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
bbb68dfa
TH
1193 * -ENOENT if someone else is canceling @work, this state may persist
1194 * for arbitrarily long
36e227d2 1195 *
d185af30 1196 * Note:
bbb68dfa 1197 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
e0aecdd8
TH
1198 * interrupted while holding PENDING and @work off queue, irq must be
1199 * disabled on entry. This, combined with delayed_work->timer being
1200 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
bbb68dfa
TH
1201 *
1202 * On successful return, >= 0, irq is disabled and the caller is
1203 * responsible for releasing it using local_irq_restore(*@flags).
1204 *
e0aecdd8 1205 * This function is safe to call from any context including IRQ handler.
bf4ede01 1206 */
bbb68dfa
TH
1207static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1208 unsigned long *flags)
bf4ede01 1209{
d565ed63 1210 struct worker_pool *pool;
112202d9 1211 struct pool_workqueue *pwq;
bf4ede01 1212
bbb68dfa
TH
1213 local_irq_save(*flags);
1214
36e227d2
TH
1215 /* try to steal the timer if it exists */
1216 if (is_dwork) {
1217 struct delayed_work *dwork = to_delayed_work(work);
1218
e0aecdd8
TH
1219 /*
1220 * dwork->timer is irqsafe. If del_timer() fails, it's
1221 * guaranteed that the timer is not queued anywhere and not
1222 * running on the local CPU.
1223 */
36e227d2
TH
1224 if (likely(del_timer(&dwork->timer)))
1225 return 1;
1226 }
1227
1228 /* try to claim PENDING the normal way */
bf4ede01
TH
1229 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1230 return 0;
1231
1232 /*
1233 * The queueing is in progress, or it is already queued. Try to
1234 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1235 */
d565ed63
TH
1236 pool = get_work_pool(work);
1237 if (!pool)
bbb68dfa 1238 goto fail;
bf4ede01 1239
d565ed63 1240 spin_lock(&pool->lock);
0b3dae68 1241 /*
112202d9
TH
1242 * work->data is guaranteed to point to pwq only while the work
1243 * item is queued on pwq->wq, and both updating work->data to point
1244 * to pwq on queueing and to pool on dequeueing are done under
1245 * pwq->pool->lock. This in turn guarantees that, if work->data
1246 * points to pwq which is associated with a locked pool, the work
0b3dae68
LJ
1247 * item is currently queued on that pool.
1248 */
112202d9
TH
1249 pwq = get_work_pwq(work);
1250 if (pwq && pwq->pool == pool) {
16062836
TH
1251 debug_work_deactivate(work);
1252
1253 /*
1254 * A delayed work item cannot be grabbed directly because
1255 * it might have linked NO_COLOR work items which, if left
112202d9 1256 * on the delayed_list, will confuse pwq->nr_active
16062836
TH
1257 * management later on and cause stall. Make sure the work
1258 * item is activated before grabbing.
1259 */
1260 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
112202d9 1261 pwq_activate_delayed_work(work);
16062836
TH
1262
1263 list_del_init(&work->entry);
9c34a704 1264 pwq_dec_nr_in_flight(pwq, get_work_color(work));
16062836 1265
112202d9 1266 /* work->data points to pwq iff queued, point to pool */
16062836
TH
1267 set_work_pool_and_keep_pending(work, pool->id);
1268
1269 spin_unlock(&pool->lock);
1270 return 1;
bf4ede01 1271 }
d565ed63 1272 spin_unlock(&pool->lock);
bbb68dfa
TH
1273fail:
1274 local_irq_restore(*flags);
1275 if (work_is_canceling(work))
1276 return -ENOENT;
1277 cpu_relax();
36e227d2 1278 return -EAGAIN;
bf4ede01
TH
1279}
1280
4690c4ab 1281/**
706026c2 1282 * insert_work - insert a work into a pool
112202d9 1283 * @pwq: pwq @work belongs to
4690c4ab
TH
1284 * @work: work to insert
1285 * @head: insertion point
1286 * @extra_flags: extra WORK_STRUCT_* flags to set
1287 *
112202d9 1288 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
706026c2 1289 * work_struct flags.
4690c4ab
TH
1290 *
1291 * CONTEXT:
d565ed63 1292 * spin_lock_irq(pool->lock).
4690c4ab 1293 */
112202d9
TH
1294static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1295 struct list_head *head, unsigned int extra_flags)
b89deed3 1296{
112202d9 1297 struct worker_pool *pool = pwq->pool;
e22bee78 1298
4690c4ab 1299 /* we own @work, set data and link */
112202d9 1300 set_work_pwq(work, pwq, extra_flags);
1a4d9b0a 1301 list_add_tail(&work->entry, head);
8864b4e5 1302 get_pwq(pwq);
e22bee78
TH
1303
1304 /*
c5aa87bb
TH
1305 * Ensure either wq_worker_sleeping() sees the above
1306 * list_add_tail() or we see zero nr_running to avoid workers lying
1307 * around lazily while there are works to be processed.
e22bee78
TH
1308 */
1309 smp_mb();
1310
63d95a91
TH
1311 if (__need_more_worker(pool))
1312 wake_up_worker(pool);
b89deed3
ON
1313}
1314
c8efcc25
TH
1315/*
1316 * Test whether @work is being queued from another work executing on the
8d03ecfe 1317 * same workqueue.
c8efcc25
TH
1318 */
1319static bool is_chained_work(struct workqueue_struct *wq)
1320{
8d03ecfe
TH
1321 struct worker *worker;
1322
1323 worker = current_wq_worker();
1324 /*
1325 * Return %true iff I'm a worker execuing a work item on @wq. If
1326 * I'm @worker, it's safe to dereference it without locking.
1327 */
112202d9 1328 return worker && worker->current_pwq->wq == wq;
c8efcc25
TH
1329}
1330
ef557180
MG
1331/*
1332 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1333 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
1334 * avoid perturbing sensitive tasks.
1335 */
1336static int wq_select_unbound_cpu(int cpu)
1337{
f303fccb 1338 static bool printed_dbg_warning;
ef557180
MG
1339 int new_cpu;
1340
f303fccb
TH
1341 if (likely(!wq_debug_force_rr_cpu)) {
1342 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1343 return cpu;
1344 } else if (!printed_dbg_warning) {
1345 pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1346 printed_dbg_warning = true;
1347 }
1348
ef557180
MG
1349 if (cpumask_empty(wq_unbound_cpumask))
1350 return cpu;
1351
1352 new_cpu = __this_cpu_read(wq_rr_cpu_last);
1353 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1354 if (unlikely(new_cpu >= nr_cpu_ids)) {
1355 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1356 if (unlikely(new_cpu >= nr_cpu_ids))
1357 return cpu;
1358 }
1359 __this_cpu_write(wq_rr_cpu_last, new_cpu);
1360
1361 return new_cpu;
1362}
1363
d84ff051 1364static void __queue_work(int cpu, struct workqueue_struct *wq,
1da177e4
LT
1365 struct work_struct *work)
1366{
112202d9 1367 struct pool_workqueue *pwq;
c9178087 1368 struct worker_pool *last_pool;
1e19ffc6 1369 struct list_head *worklist;
8a2e8e5d 1370 unsigned int work_flags;
b75cac93 1371 unsigned int req_cpu = cpu;
8930caba
TH
1372
1373 /*
1374 * While a work item is PENDING && off queue, a task trying to
1375 * steal the PENDING will busy-loop waiting for it to either get
1376 * queued or lose PENDING. Grabbing PENDING and queueing should
1377 * happen with IRQ disabled.
1378 */
8e8eb730 1379 lockdep_assert_irqs_disabled();
1da177e4 1380
dc186ad7 1381 debug_work_activate(work);
1e19ffc6 1382
9ef28a73 1383 /* if draining, only works from the same workqueue are allowed */
618b01eb 1384 if (unlikely(wq->flags & __WQ_DRAINING) &&
c8efcc25 1385 WARN_ON_ONCE(!is_chained_work(wq)))
e41e704b 1386 return;
9e8cd2f5 1387retry:
df2d5ae4 1388 if (req_cpu == WORK_CPU_UNBOUND)
ef557180 1389 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
df2d5ae4 1390
c9178087 1391 /* pwq which will be used unless @work is executing elsewhere */
df2d5ae4 1392 if (!(wq->flags & WQ_UNBOUND))
7fb98ea7 1393 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
df2d5ae4
TH
1394 else
1395 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
dbf2576e 1396
c9178087
TH
1397 /*
1398 * If @work was previously on a different pool, it might still be
1399 * running there, in which case the work needs to be queued on that
1400 * pool to guarantee non-reentrancy.
1401 */
1402 last_pool = get_work_pool(work);
1403 if (last_pool && last_pool != pwq->pool) {
1404 struct worker *worker;
18aa9eff 1405
c9178087 1406 spin_lock(&last_pool->lock);
18aa9eff 1407
c9178087 1408 worker = find_worker_executing_work(last_pool, work);
18aa9eff 1409
c9178087
TH
1410 if (worker && worker->current_pwq->wq == wq) {
1411 pwq = worker->current_pwq;
8930caba 1412 } else {
c9178087
TH
1413 /* meh... not running there, queue here */
1414 spin_unlock(&last_pool->lock);
112202d9 1415 spin_lock(&pwq->pool->lock);
8930caba 1416 }
f3421797 1417 } else {
112202d9 1418 spin_lock(&pwq->pool->lock);
502ca9d8
TH
1419 }
1420
9e8cd2f5
TH
1421 /*
1422 * pwq is determined and locked. For unbound pools, we could have
1423 * raced with pwq release and it could already be dead. If its
1424 * refcnt is zero, repeat pwq selection. Note that pwqs never die
df2d5ae4
TH
1425 * without another pwq replacing it in the numa_pwq_tbl or while
1426 * work items are executing on it, so the retrying is guaranteed to
9e8cd2f5
TH
1427 * make forward-progress.
1428 */
1429 if (unlikely(!pwq->refcnt)) {
1430 if (wq->flags & WQ_UNBOUND) {
1431 spin_unlock(&pwq->pool->lock);
1432 cpu_relax();
1433 goto retry;
1434 }
1435 /* oops */
1436 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1437 wq->name, cpu);
1438 }
1439
112202d9
TH
1440 /* pwq determined, queue */
1441 trace_workqueue_queue_work(req_cpu, pwq, work);
502ca9d8 1442
f5b2552b 1443 if (WARN_ON(!list_empty(&work->entry))) {
112202d9 1444 spin_unlock(&pwq->pool->lock);
f5b2552b
DC
1445 return;
1446 }
1e19ffc6 1447
112202d9
TH
1448 pwq->nr_in_flight[pwq->work_color]++;
1449 work_flags = work_color_to_flags(pwq->work_color);
1e19ffc6 1450
112202d9 1451 if (likely(pwq->nr_active < pwq->max_active)) {
cdadf009 1452 trace_workqueue_activate_work(work);
112202d9
TH
1453 pwq->nr_active++;
1454 worklist = &pwq->pool->worklist;
82607adc
TH
1455 if (list_empty(worklist))
1456 pwq->pool->watchdog_ts = jiffies;
8a2e8e5d
TH
1457 } else {
1458 work_flags |= WORK_STRUCT_DELAYED;
112202d9 1459 worklist = &pwq->delayed_works;
8a2e8e5d 1460 }
1e19ffc6 1461
112202d9 1462 insert_work(pwq, work, worklist, work_flags);
1e19ffc6 1463
112202d9 1464 spin_unlock(&pwq->pool->lock);
1da177e4
LT
1465}
1466
0fcb78c2 1467/**
c1a220e7
ZR
1468 * queue_work_on - queue work on specific cpu
1469 * @cpu: CPU number to execute work on
0fcb78c2
REB
1470 * @wq: workqueue to use
1471 * @work: work to queue
1472 *
c1a220e7
ZR
1473 * We queue the work to a specific CPU, the caller must ensure it
1474 * can't go away.
d185af30
YB
1475 *
1476 * Return: %false if @work was already on a queue, %true otherwise.
1da177e4 1477 */
d4283e93
TH
1478bool queue_work_on(int cpu, struct workqueue_struct *wq,
1479 struct work_struct *work)
1da177e4 1480{
d4283e93 1481 bool ret = false;
8930caba 1482 unsigned long flags;
ef1ca236 1483
8930caba 1484 local_irq_save(flags);
c1a220e7 1485
22df02bb 1486 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
4690c4ab 1487 __queue_work(cpu, wq, work);
d4283e93 1488 ret = true;
c1a220e7 1489 }
ef1ca236 1490
8930caba 1491 local_irq_restore(flags);
1da177e4
LT
1492 return ret;
1493}
ad7b1f84 1494EXPORT_SYMBOL(queue_work_on);
1da177e4 1495
8c20feb6 1496void delayed_work_timer_fn(struct timer_list *t)
1da177e4 1497{
8c20feb6 1498 struct delayed_work *dwork = from_timer(dwork, t, timer);
1da177e4 1499
e0aecdd8 1500 /* should have been called from irqsafe timer with irq already off */
60c057bc 1501 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1da177e4 1502}
1438ade5 1503EXPORT_SYMBOL(delayed_work_timer_fn);
1da177e4 1504
7beb2edf
TH
1505static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1506 struct delayed_work *dwork, unsigned long delay)
1da177e4 1507{
7beb2edf
TH
1508 struct timer_list *timer = &dwork->timer;
1509 struct work_struct *work = &dwork->work;
7beb2edf 1510
637fdbae 1511 WARN_ON_ONCE(!wq);
8c20feb6 1512 WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)delayed_work_timer_fn);
fc4b514f
TH
1513 WARN_ON_ONCE(timer_pending(timer));
1514 WARN_ON_ONCE(!list_empty(&work->entry));
7beb2edf 1515
8852aac2
TH
1516 /*
1517 * If @delay is 0, queue @dwork->work immediately. This is for
1518 * both optimization and correctness. The earliest @timer can
1519 * expire is on the closest next tick and delayed_work users depend
1520 * on that there's no such delay when @delay is 0.
1521 */
1522 if (!delay) {
1523 __queue_work(cpu, wq, &dwork->work);
1524 return;
1525 }
1526
60c057bc 1527 dwork->wq = wq;
1265057f 1528 dwork->cpu = cpu;
7beb2edf
TH
1529 timer->expires = jiffies + delay;
1530
041bd12e
TH
1531 if (unlikely(cpu != WORK_CPU_UNBOUND))
1532 add_timer_on(timer, cpu);
1533 else
1534 add_timer(timer);
1da177e4
LT
1535}
1536
0fcb78c2
REB
1537/**
1538 * queue_delayed_work_on - queue work on specific CPU after delay
1539 * @cpu: CPU number to execute work on
1540 * @wq: workqueue to use
af9997e4 1541 * @dwork: work to queue
0fcb78c2
REB
1542 * @delay: number of jiffies to wait before queueing
1543 *
d185af30 1544 * Return: %false if @work was already on a queue, %true otherwise. If
715f1300
TH
1545 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1546 * execution.
0fcb78c2 1547 */
d4283e93
TH
1548bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1549 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd 1550{
52bad64d 1551 struct work_struct *work = &dwork->work;
d4283e93 1552 bool ret = false;
8930caba 1553 unsigned long flags;
7a6bc1cd 1554
8930caba
TH
1555 /* read the comment in __queue_work() */
1556 local_irq_save(flags);
7a6bc1cd 1557
22df02bb 1558 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
7beb2edf 1559 __queue_delayed_work(cpu, wq, dwork, delay);
d4283e93 1560 ret = true;
7a6bc1cd 1561 }
8a3e77cc 1562
8930caba 1563 local_irq_restore(flags);
7a6bc1cd
VP
1564 return ret;
1565}
ad7b1f84 1566EXPORT_SYMBOL(queue_delayed_work_on);
c7fc77f7 1567
8376fe22
TH
1568/**
1569 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1570 * @cpu: CPU number to execute work on
1571 * @wq: workqueue to use
1572 * @dwork: work to queue
1573 * @delay: number of jiffies to wait before queueing
1574 *
1575 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1576 * modify @dwork's timer so that it expires after @delay. If @delay is
1577 * zero, @work is guaranteed to be scheduled immediately regardless of its
1578 * current state.
1579 *
d185af30 1580 * Return: %false if @dwork was idle and queued, %true if @dwork was
8376fe22
TH
1581 * pending and its timer was modified.
1582 *
e0aecdd8 1583 * This function is safe to call from any context including IRQ handler.
8376fe22
TH
1584 * See try_to_grab_pending() for details.
1585 */
1586bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1587 struct delayed_work *dwork, unsigned long delay)
1588{
1589 unsigned long flags;
1590 int ret;
c7fc77f7 1591
8376fe22
TH
1592 do {
1593 ret = try_to_grab_pending(&dwork->work, true, &flags);
1594 } while (unlikely(ret == -EAGAIN));
63bc0362 1595
8376fe22
TH
1596 if (likely(ret >= 0)) {
1597 __queue_delayed_work(cpu, wq, dwork, delay);
1598 local_irq_restore(flags);
7a6bc1cd 1599 }
8376fe22
TH
1600
1601 /* -ENOENT from try_to_grab_pending() becomes %true */
7a6bc1cd
VP
1602 return ret;
1603}
8376fe22
TH
1604EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1605
c8e55f36
TH
1606/**
1607 * worker_enter_idle - enter idle state
1608 * @worker: worker which is entering idle state
1609 *
1610 * @worker is entering idle state. Update stats and idle timer if
1611 * necessary.
1612 *
1613 * LOCKING:
d565ed63 1614 * spin_lock_irq(pool->lock).
c8e55f36
TH
1615 */
1616static void worker_enter_idle(struct worker *worker)
1da177e4 1617{
bd7bdd43 1618 struct worker_pool *pool = worker->pool;
c8e55f36 1619
6183c009
TH
1620 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1621 WARN_ON_ONCE(!list_empty(&worker->entry) &&
1622 (worker->hentry.next || worker->hentry.pprev)))
1623 return;
c8e55f36 1624
051e1850 1625 /* can't use worker_set_flags(), also called from create_worker() */
cb444766 1626 worker->flags |= WORKER_IDLE;
bd7bdd43 1627 pool->nr_idle++;
e22bee78 1628 worker->last_active = jiffies;
c8e55f36
TH
1629
1630 /* idle_list is LIFO */
bd7bdd43 1631 list_add(&worker->entry, &pool->idle_list);
db7bccf4 1632
628c78e7
TH
1633 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1634 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
cb444766 1635
544ecf31 1636 /*
706026c2 1637 * Sanity check nr_running. Because wq_unbind_fn() releases
d565ed63 1638 * pool->lock between setting %WORKER_UNBOUND and zapping
628c78e7
TH
1639 * nr_running, the warning may trigger spuriously. Check iff
1640 * unbind is not in progress.
544ecf31 1641 */
24647570 1642 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
bd7bdd43 1643 pool->nr_workers == pool->nr_idle &&
e19e397a 1644 atomic_read(&pool->nr_running));
c8e55f36
TH
1645}
1646
1647/**
1648 * worker_leave_idle - leave idle state
1649 * @worker: worker which is leaving idle state
1650 *
1651 * @worker is leaving idle state. Update stats.
1652 *
1653 * LOCKING:
d565ed63 1654 * spin_lock_irq(pool->lock).
c8e55f36
TH
1655 */
1656static void worker_leave_idle(struct worker *worker)
1657{
bd7bdd43 1658 struct worker_pool *pool = worker->pool;
c8e55f36 1659
6183c009
TH
1660 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1661 return;
d302f017 1662 worker_clr_flags(worker, WORKER_IDLE);
bd7bdd43 1663 pool->nr_idle--;
c8e55f36
TH
1664 list_del_init(&worker->entry);
1665}
1666
f7537df5 1667static struct worker *alloc_worker(int node)
c34056a3
TH
1668{
1669 struct worker *worker;
1670
f7537df5 1671 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
c8e55f36
TH
1672 if (worker) {
1673 INIT_LIST_HEAD(&worker->entry);
affee4b2 1674 INIT_LIST_HEAD(&worker->scheduled);
da028469 1675 INIT_LIST_HEAD(&worker->node);
e22bee78
TH
1676 /* on creation a worker is in !idle && prep state */
1677 worker->flags = WORKER_PREP;
c8e55f36 1678 }
c34056a3
TH
1679 return worker;
1680}
1681
4736cbf7
LJ
1682/**
1683 * worker_attach_to_pool() - attach a worker to a pool
1684 * @worker: worker to be attached
1685 * @pool: the target pool
1686 *
1687 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
1688 * cpu-binding of @worker are kept coordinated with the pool across
1689 * cpu-[un]hotplugs.
1690 */
1691static void worker_attach_to_pool(struct worker *worker,
1692 struct worker_pool *pool)
1693{
1694 mutex_lock(&pool->attach_mutex);
1695
1696 /*
1697 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1698 * online CPUs. It'll be re-applied when any of the CPUs come up.
1699 */
1700 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1701
1702 /*
1703 * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains
1704 * stable across this function. See the comments above the
1705 * flag definition for details.
1706 */
1707 if (pool->flags & POOL_DISASSOCIATED)
1708 worker->flags |= WORKER_UNBOUND;
1709
1710 list_add_tail(&worker->node, &pool->workers);
1711
1712 mutex_unlock(&pool->attach_mutex);
1713}
1714
60f5a4bc
LJ
1715/**
1716 * worker_detach_from_pool() - detach a worker from its pool
1717 * @worker: worker which is attached to its pool
1718 * @pool: the pool @worker is attached to
1719 *
4736cbf7
LJ
1720 * Undo the attaching which had been done in worker_attach_to_pool(). The
1721 * caller worker shouldn't access to the pool after detached except it has
1722 * other reference to the pool.
60f5a4bc
LJ
1723 */
1724static void worker_detach_from_pool(struct worker *worker,
1725 struct worker_pool *pool)
1726{
1727 struct completion *detach_completion = NULL;
1728
92f9c5c4 1729 mutex_lock(&pool->attach_mutex);
da028469
LJ
1730 list_del(&worker->node);
1731 if (list_empty(&pool->workers))
60f5a4bc 1732 detach_completion = pool->detach_completion;
92f9c5c4 1733 mutex_unlock(&pool->attach_mutex);
60f5a4bc 1734
b62c0751
LJ
1735 /* clear leftover flags without pool->lock after it is detached */
1736 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1737
60f5a4bc
LJ
1738 if (detach_completion)
1739 complete(detach_completion);
1740}
1741
c34056a3
TH
1742/**
1743 * create_worker - create a new workqueue worker
63d95a91 1744 * @pool: pool the new worker will belong to
c34056a3 1745 *
051e1850 1746 * Create and start a new worker which is attached to @pool.
c34056a3
TH
1747 *
1748 * CONTEXT:
1749 * Might sleep. Does GFP_KERNEL allocations.
1750 *
d185af30 1751 * Return:
c34056a3
TH
1752 * Pointer to the newly created worker.
1753 */
bc2ae0f5 1754static struct worker *create_worker(struct worker_pool *pool)
c34056a3 1755{
c34056a3 1756 struct worker *worker = NULL;
f3421797 1757 int id = -1;
e3c916a4 1758 char id_buf[16];
c34056a3 1759
7cda9aae
LJ
1760 /* ID is needed to determine kthread name */
1761 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
822d8405
TH
1762 if (id < 0)
1763 goto fail;
c34056a3 1764
f7537df5 1765 worker = alloc_worker(pool->node);
c34056a3
TH
1766 if (!worker)
1767 goto fail;
1768
bd7bdd43 1769 worker->pool = pool;
c34056a3
TH
1770 worker->id = id;
1771
29c91e99 1772 if (pool->cpu >= 0)
e3c916a4
TH
1773 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1774 pool->attrs->nice < 0 ? "H" : "");
f3421797 1775 else
e3c916a4
TH
1776 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1777
f3f90ad4 1778 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
e3c916a4 1779 "kworker/%s", id_buf);
c34056a3
TH
1780 if (IS_ERR(worker->task))
1781 goto fail;
1782
91151228 1783 set_user_nice(worker->task, pool->attrs->nice);
25834c73 1784 kthread_bind_mask(worker->task, pool->attrs->cpumask);
91151228 1785
da028469 1786 /* successful, attach the worker to the pool */
4736cbf7 1787 worker_attach_to_pool(worker, pool);
822d8405 1788
051e1850
LJ
1789 /* start the newly created worker */
1790 spin_lock_irq(&pool->lock);
1791 worker->pool->nr_workers++;
1792 worker_enter_idle(worker);
1793 wake_up_process(worker->task);
1794 spin_unlock_irq(&pool->lock);
1795
c34056a3 1796 return worker;
822d8405 1797
c34056a3 1798fail:
9625ab17 1799 if (id >= 0)
7cda9aae 1800 ida_simple_remove(&pool->worker_ida, id);
c34056a3
TH
1801 kfree(worker);
1802 return NULL;
1803}
1804
c34056a3
TH
1805/**
1806 * destroy_worker - destroy a workqueue worker
1807 * @worker: worker to be destroyed
1808 *
73eb7fe7
LJ
1809 * Destroy @worker and adjust @pool stats accordingly. The worker should
1810 * be idle.
c8e55f36
TH
1811 *
1812 * CONTEXT:
60f5a4bc 1813 * spin_lock_irq(pool->lock).
c34056a3
TH
1814 */
1815static void destroy_worker(struct worker *worker)
1816{
bd7bdd43 1817 struct worker_pool *pool = worker->pool;
c34056a3 1818
cd549687
TH
1819 lockdep_assert_held(&pool->lock);
1820
c34056a3 1821 /* sanity check frenzy */
6183c009 1822 if (WARN_ON(worker->current_work) ||
73eb7fe7
LJ
1823 WARN_ON(!list_empty(&worker->scheduled)) ||
1824 WARN_ON(!(worker->flags & WORKER_IDLE)))
6183c009 1825 return;
c34056a3 1826
73eb7fe7
LJ
1827 pool->nr_workers--;
1828 pool->nr_idle--;
5bdfff96 1829
c8e55f36 1830 list_del_init(&worker->entry);
cb444766 1831 worker->flags |= WORKER_DIE;
60f5a4bc 1832 wake_up_process(worker->task);
c34056a3
TH
1833}
1834
32a6c723 1835static void idle_worker_timeout(struct timer_list *t)
e22bee78 1836{
32a6c723 1837 struct worker_pool *pool = from_timer(pool, t, idle_timer);
e22bee78 1838
d565ed63 1839 spin_lock_irq(&pool->lock);
e22bee78 1840
3347fc9f 1841 while (too_many_workers(pool)) {
e22bee78
TH
1842 struct worker *worker;
1843 unsigned long expires;
1844
1845 /* idle_list is kept in LIFO order, check the last one */
63d95a91 1846 worker = list_entry(pool->idle_list.prev, struct worker, entry);
e22bee78
TH
1847 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1848
3347fc9f 1849 if (time_before(jiffies, expires)) {
63d95a91 1850 mod_timer(&pool->idle_timer, expires);
3347fc9f 1851 break;
d5abe669 1852 }
3347fc9f
LJ
1853
1854 destroy_worker(worker);
e22bee78
TH
1855 }
1856
d565ed63 1857 spin_unlock_irq(&pool->lock);
e22bee78 1858}
d5abe669 1859
493a1724 1860static void send_mayday(struct work_struct *work)
e22bee78 1861{
112202d9
TH
1862 struct pool_workqueue *pwq = get_work_pwq(work);
1863 struct workqueue_struct *wq = pwq->wq;
493a1724 1864
2e109a28 1865 lockdep_assert_held(&wq_mayday_lock);
e22bee78 1866
493008a8 1867 if (!wq->rescuer)
493a1724 1868 return;
e22bee78
TH
1869
1870 /* mayday mayday mayday */
493a1724 1871 if (list_empty(&pwq->mayday_node)) {
77668c8b
LJ
1872 /*
1873 * If @pwq is for an unbound wq, its base ref may be put at
1874 * any time due to an attribute change. Pin @pwq until the
1875 * rescuer is done with it.
1876 */
1877 get_pwq(pwq);
493a1724 1878 list_add_tail(&pwq->mayday_node, &wq->maydays);
e22bee78 1879 wake_up_process(wq->rescuer->task);
493a1724 1880 }
e22bee78
TH
1881}
1882
32a6c723 1883static void pool_mayday_timeout(struct timer_list *t)
e22bee78 1884{
32a6c723 1885 struct worker_pool *pool = from_timer(pool, t, mayday_timer);
e22bee78
TH
1886 struct work_struct *work;
1887
b2d82909
TH
1888 spin_lock_irq(&pool->lock);
1889 spin_lock(&wq_mayday_lock); /* for wq->maydays */
e22bee78 1890
63d95a91 1891 if (need_to_create_worker(pool)) {
e22bee78
TH
1892 /*
1893 * We've been trying to create a new worker but
1894 * haven't been successful. We might be hitting an
1895 * allocation deadlock. Send distress signals to
1896 * rescuers.
1897 */
63d95a91 1898 list_for_each_entry(work, &pool->worklist, entry)
e22bee78 1899 send_mayday(work);
1da177e4 1900 }
e22bee78 1901
b2d82909
TH
1902 spin_unlock(&wq_mayday_lock);
1903 spin_unlock_irq(&pool->lock);
e22bee78 1904
63d95a91 1905 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1da177e4
LT
1906}
1907
e22bee78
TH
1908/**
1909 * maybe_create_worker - create a new worker if necessary
63d95a91 1910 * @pool: pool to create a new worker for
e22bee78 1911 *
63d95a91 1912 * Create a new worker for @pool if necessary. @pool is guaranteed to
e22bee78
TH
1913 * have at least one idle worker on return from this function. If
1914 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
63d95a91 1915 * sent to all rescuers with works scheduled on @pool to resolve
e22bee78
TH
1916 * possible allocation deadlock.
1917 *
c5aa87bb
TH
1918 * On return, need_to_create_worker() is guaranteed to be %false and
1919 * may_start_working() %true.
e22bee78
TH
1920 *
1921 * LOCKING:
d565ed63 1922 * spin_lock_irq(pool->lock) which may be released and regrabbed
e22bee78
TH
1923 * multiple times. Does GFP_KERNEL allocations. Called only from
1924 * manager.
e22bee78 1925 */
29187a9e 1926static void maybe_create_worker(struct worker_pool *pool)
d565ed63
TH
1927__releases(&pool->lock)
1928__acquires(&pool->lock)
1da177e4 1929{
e22bee78 1930restart:
d565ed63 1931 spin_unlock_irq(&pool->lock);
9f9c2364 1932
e22bee78 1933 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
63d95a91 1934 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
e22bee78
TH
1935
1936 while (true) {
051e1850 1937 if (create_worker(pool) || !need_to_create_worker(pool))
e22bee78 1938 break;
1da177e4 1939
e212f361 1940 schedule_timeout_interruptible(CREATE_COOLDOWN);
9f9c2364 1941
63d95a91 1942 if (!need_to_create_worker(pool))
e22bee78
TH
1943 break;
1944 }
1945
63d95a91 1946 del_timer_sync(&pool->mayday_timer);
d565ed63 1947 spin_lock_irq(&pool->lock);
051e1850
LJ
1948 /*
1949 * This is necessary even after a new worker was just successfully
1950 * created as @pool->lock was dropped and the new worker might have
1951 * already become busy.
1952 */
63d95a91 1953 if (need_to_create_worker(pool))
e22bee78 1954 goto restart;
e22bee78
TH
1955}
1956
73f53c4a 1957/**
e22bee78
TH
1958 * manage_workers - manage worker pool
1959 * @worker: self
73f53c4a 1960 *
706026c2 1961 * Assume the manager role and manage the worker pool @worker belongs
e22bee78 1962 * to. At any given time, there can be only zero or one manager per
706026c2 1963 * pool. The exclusion is handled automatically by this function.
e22bee78
TH
1964 *
1965 * The caller can safely start processing works on false return. On
1966 * true return, it's guaranteed that need_to_create_worker() is false
1967 * and may_start_working() is true.
73f53c4a
TH
1968 *
1969 * CONTEXT:
d565ed63 1970 * spin_lock_irq(pool->lock) which may be released and regrabbed
e22bee78
TH
1971 * multiple times. Does GFP_KERNEL allocations.
1972 *
d185af30 1973 * Return:
29187a9e
TH
1974 * %false if the pool doesn't need management and the caller can safely
1975 * start processing works, %true if management function was performed and
1976 * the conditions that the caller verified before calling the function may
1977 * no longer be true.
73f53c4a 1978 */
e22bee78 1979static bool manage_workers(struct worker *worker)
73f53c4a 1980{
63d95a91 1981 struct worker_pool *pool = worker->pool;
73f53c4a 1982
692b4825 1983 if (pool->flags & POOL_MANAGER_ACTIVE)
29187a9e 1984 return false;
692b4825
TH
1985
1986 pool->flags |= POOL_MANAGER_ACTIVE;
2607d7a6 1987 pool->manager = worker;
1e19ffc6 1988
29187a9e 1989 maybe_create_worker(pool);
e22bee78 1990
2607d7a6 1991 pool->manager = NULL;
692b4825
TH
1992 pool->flags &= ~POOL_MANAGER_ACTIVE;
1993 wake_up(&wq_manager_wait);
29187a9e 1994 return true;
73f53c4a
TH
1995}
1996
a62428c0
TH
1997/**
1998 * process_one_work - process single work
c34056a3 1999 * @worker: self
a62428c0
TH
2000 * @work: work to process
2001 *
2002 * Process @work. This function contains all the logics necessary to
2003 * process a single work including synchronization against and
2004 * interaction with other workers on the same cpu, queueing and
2005 * flushing. As long as context requirement is met, any worker can
2006 * call this function to process a work.
2007 *
2008 * CONTEXT:
d565ed63 2009 * spin_lock_irq(pool->lock) which is released and regrabbed.
a62428c0 2010 */
c34056a3 2011static void process_one_work(struct worker *worker, struct work_struct *work)
d565ed63
TH
2012__releases(&pool->lock)
2013__acquires(&pool->lock)
a62428c0 2014{
112202d9 2015 struct pool_workqueue *pwq = get_work_pwq(work);
bd7bdd43 2016 struct worker_pool *pool = worker->pool;
112202d9 2017 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
73f53c4a 2018 int work_color;
7e11629d 2019 struct worker *collision;
a62428c0
TH
2020#ifdef CONFIG_LOCKDEP
2021 /*
2022 * It is permissible to free the struct work_struct from
2023 * inside the function that is called from it, this we need to
2024 * take into account for lockdep too. To avoid bogus "held
2025 * lock freed" warnings as well as problems when looking into
2026 * work->lockdep_map, make a copy and use that here.
2027 */
4d82a1de
PZ
2028 struct lockdep_map lockdep_map;
2029
2030 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
a62428c0 2031#endif
807407c0 2032 /* ensure we're on the correct CPU */
85327af6 2033 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
ec22ca5e 2034 raw_smp_processor_id() != pool->cpu);
25511a47 2035
7e11629d
TH
2036 /*
2037 * A single work shouldn't be executed concurrently by
2038 * multiple workers on a single cpu. Check whether anyone is
2039 * already processing the work. If so, defer the work to the
2040 * currently executing one.
2041 */
c9e7cf27 2042 collision = find_worker_executing_work(pool, work);
7e11629d
TH
2043 if (unlikely(collision)) {
2044 move_linked_works(work, &collision->scheduled, NULL);
2045 return;
2046 }
2047
8930caba 2048 /* claim and dequeue */
a62428c0 2049 debug_work_deactivate(work);
c9e7cf27 2050 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
c34056a3 2051 worker->current_work = work;
a2c1c57b 2052 worker->current_func = work->func;
112202d9 2053 worker->current_pwq = pwq;
73f53c4a 2054 work_color = get_work_color(work);
7a22ad75 2055
a62428c0
TH
2056 list_del_init(&work->entry);
2057
fb0e7beb 2058 /*
228f1d00
LJ
2059 * CPU intensive works don't participate in concurrency management.
2060 * They're the scheduler's responsibility. This takes @worker out
2061 * of concurrency management and the next code block will chain
2062 * execution of the pending work items.
fb0e7beb
TH
2063 */
2064 if (unlikely(cpu_intensive))
228f1d00 2065 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
fb0e7beb 2066
974271c4 2067 /*
a489a03e
LJ
2068 * Wake up another worker if necessary. The condition is always
2069 * false for normal per-cpu workers since nr_running would always
2070 * be >= 1 at this point. This is used to chain execution of the
2071 * pending work items for WORKER_NOT_RUNNING workers such as the
228f1d00 2072 * UNBOUND and CPU_INTENSIVE ones.
974271c4 2073 */
a489a03e 2074 if (need_more_worker(pool))
63d95a91 2075 wake_up_worker(pool);
974271c4 2076
8930caba 2077 /*
7c3eed5c 2078 * Record the last pool and clear PENDING which should be the last
d565ed63 2079 * update to @work. Also, do this inside @pool->lock so that
23657bb1
TH
2080 * PENDING and queued state changes happen together while IRQ is
2081 * disabled.
8930caba 2082 */
7c3eed5c 2083 set_work_pool_and_clear_pending(work, pool->id);
a62428c0 2084
d565ed63 2085 spin_unlock_irq(&pool->lock);
a62428c0 2086
a1d14934 2087 lock_map_acquire(&pwq->wq->lockdep_map);
a62428c0 2088 lock_map_acquire(&lockdep_map);
e6f3faa7 2089 /*
f52be570
PZ
2090 * Strictly speaking we should mark the invariant state without holding
2091 * any locks, that is, before these two lock_map_acquire()'s.
e6f3faa7
PZ
2092 *
2093 * However, that would result in:
2094 *
2095 * A(W1)
2096 * WFC(C)
2097 * A(W1)
2098 * C(C)
2099 *
2100 * Which would create W1->C->W1 dependencies, even though there is no
2101 * actual deadlock possible. There are two solutions, using a
2102 * read-recursive acquire on the work(queue) 'locks', but this will then
f52be570 2103 * hit the lockdep limitation on recursive locks, or simply discard
e6f3faa7
PZ
2104 * these locks.
2105 *
2106 * AFAICT there is no possible deadlock scenario between the
2107 * flush_work() and complete() primitives (except for single-threaded
2108 * workqueues), so hiding them isn't a problem.
2109 */
f52be570 2110 lockdep_invariant_state(true);
e36c886a 2111 trace_workqueue_execute_start(work);
a2c1c57b 2112 worker->current_func(work);
e36c886a
AV
2113 /*
2114 * While we must be careful to not use "work" after this, the trace
2115 * point will only record its address.
2116 */
2117 trace_workqueue_execute_end(work);
a62428c0 2118 lock_map_release(&lockdep_map);
112202d9 2119 lock_map_release(&pwq->wq->lockdep_map);
a62428c0
TH
2120
2121 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
044c782c
VI
2122 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2123 " last function: %pf\n",
a2c1c57b
TH
2124 current->comm, preempt_count(), task_pid_nr(current),
2125 worker->current_func);
a62428c0
TH
2126 debug_show_held_locks(current);
2127 dump_stack();
2128 }
2129
b22ce278
TH
2130 /*
2131 * The following prevents a kworker from hogging CPU on !PREEMPT
2132 * kernels, where a requeueing work item waiting for something to
2133 * happen could deadlock with stop_machine as such work item could
2134 * indefinitely requeue itself while all other CPUs are trapped in
789cbbec
JL
2135 * stop_machine. At the same time, report a quiescent RCU state so
2136 * the same condition doesn't freeze RCU.
b22ce278 2137 */
3e28e377 2138 cond_resched_rcu_qs();
b22ce278 2139
d565ed63 2140 spin_lock_irq(&pool->lock);
a62428c0 2141
fb0e7beb
TH
2142 /* clear cpu intensive status */
2143 if (unlikely(cpu_intensive))
2144 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2145
a62428c0 2146 /* we're done with it, release */
42f8570f 2147 hash_del(&worker->hentry);
c34056a3 2148 worker->current_work = NULL;
a2c1c57b 2149 worker->current_func = NULL;
112202d9 2150 worker->current_pwq = NULL;
3d1cb205 2151 worker->desc_valid = false;
112202d9 2152 pwq_dec_nr_in_flight(pwq, work_color);
a62428c0
TH
2153}
2154
affee4b2
TH
2155/**
2156 * process_scheduled_works - process scheduled works
2157 * @worker: self
2158 *
2159 * Process all scheduled works. Please note that the scheduled list
2160 * may change while processing a work, so this function repeatedly
2161 * fetches a work from the top and executes it.
2162 *
2163 * CONTEXT:
d565ed63 2164 * spin_lock_irq(pool->lock) which may be released and regrabbed
affee4b2
TH
2165 * multiple times.
2166 */
2167static void process_scheduled_works(struct worker *worker)
1da177e4 2168{
affee4b2
TH
2169 while (!list_empty(&worker->scheduled)) {
2170 struct work_struct *work = list_first_entry(&worker->scheduled,
1da177e4 2171 struct work_struct, entry);
c34056a3 2172 process_one_work(worker, work);
1da177e4 2173 }
1da177e4
LT
2174}
2175
4690c4ab
TH
2176/**
2177 * worker_thread - the worker thread function
c34056a3 2178 * @__worker: self
4690c4ab 2179 *
c5aa87bb
TH
2180 * The worker thread function. All workers belong to a worker_pool -
2181 * either a per-cpu one or dynamic unbound one. These workers process all
2182 * work items regardless of their specific target workqueue. The only
2183 * exception is work items which belong to workqueues with a rescuer which
2184 * will be explained in rescuer_thread().
d185af30
YB
2185 *
2186 * Return: 0
4690c4ab 2187 */
c34056a3 2188static int worker_thread(void *__worker)
1da177e4 2189{
c34056a3 2190 struct worker *worker = __worker;
bd7bdd43 2191 struct worker_pool *pool = worker->pool;
1da177e4 2192
e22bee78
TH
2193 /* tell the scheduler that this is a workqueue worker */
2194 worker->task->flags |= PF_WQ_WORKER;
c8e55f36 2195woke_up:
d565ed63 2196 spin_lock_irq(&pool->lock);
1da177e4 2197
a9ab775b
TH
2198 /* am I supposed to die? */
2199 if (unlikely(worker->flags & WORKER_DIE)) {
d565ed63 2200 spin_unlock_irq(&pool->lock);
a9ab775b
TH
2201 WARN_ON_ONCE(!list_empty(&worker->entry));
2202 worker->task->flags &= ~PF_WQ_WORKER;
60f5a4bc
LJ
2203
2204 set_task_comm(worker->task, "kworker/dying");
7cda9aae 2205 ida_simple_remove(&pool->worker_ida, worker->id);
60f5a4bc
LJ
2206 worker_detach_from_pool(worker, pool);
2207 kfree(worker);
a9ab775b 2208 return 0;
c8e55f36 2209 }
affee4b2 2210
c8e55f36 2211 worker_leave_idle(worker);
db7bccf4 2212recheck:
e22bee78 2213 /* no more worker necessary? */
63d95a91 2214 if (!need_more_worker(pool))
e22bee78
TH
2215 goto sleep;
2216
2217 /* do we need to manage? */
63d95a91 2218 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
e22bee78
TH
2219 goto recheck;
2220
c8e55f36
TH
2221 /*
2222 * ->scheduled list can only be filled while a worker is
2223 * preparing to process a work or actually processing it.
2224 * Make sure nobody diddled with it while I was sleeping.
2225 */
6183c009 2226 WARN_ON_ONCE(!list_empty(&worker->scheduled));
c8e55f36 2227
e22bee78 2228 /*
a9ab775b
TH
2229 * Finish PREP stage. We're guaranteed to have at least one idle
2230 * worker or that someone else has already assumed the manager
2231 * role. This is where @worker starts participating in concurrency
2232 * management if applicable and concurrency management is restored
2233 * after being rebound. See rebind_workers() for details.
e22bee78 2234 */
a9ab775b 2235 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
e22bee78
TH
2236
2237 do {
c8e55f36 2238 struct work_struct *work =
bd7bdd43 2239 list_first_entry(&pool->worklist,
c8e55f36
TH
2240 struct work_struct, entry);
2241
82607adc
TH
2242 pool->watchdog_ts = jiffies;
2243
c8e55f36
TH
2244 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2245 /* optimization path, not strictly necessary */
2246 process_one_work(worker, work);
2247 if (unlikely(!list_empty(&worker->scheduled)))
affee4b2 2248 process_scheduled_works(worker);
c8e55f36
TH
2249 } else {
2250 move_linked_works(work, &worker->scheduled, NULL);
2251 process_scheduled_works(worker);
affee4b2 2252 }
63d95a91 2253 } while (keep_working(pool));
e22bee78 2254
228f1d00 2255 worker_set_flags(worker, WORKER_PREP);
d313dd85 2256sleep:
c8e55f36 2257 /*
d565ed63
TH
2258 * pool->lock is held and there's no work to process and no need to
2259 * manage, sleep. Workers are woken up only while holding
2260 * pool->lock or from local cpu, so setting the current state
2261 * before releasing pool->lock is enough to prevent losing any
2262 * event.
c8e55f36
TH
2263 */
2264 worker_enter_idle(worker);
c5a94a61 2265 __set_current_state(TASK_IDLE);
d565ed63 2266 spin_unlock_irq(&pool->lock);
c8e55f36
TH
2267 schedule();
2268 goto woke_up;
1da177e4
LT
2269}
2270
e22bee78
TH
2271/**
2272 * rescuer_thread - the rescuer thread function
111c225a 2273 * @__rescuer: self
e22bee78
TH
2274 *
2275 * Workqueue rescuer thread function. There's one rescuer for each
493008a8 2276 * workqueue which has WQ_MEM_RECLAIM set.
e22bee78 2277 *
706026c2 2278 * Regular work processing on a pool may block trying to create a new
e22bee78
TH
2279 * worker which uses GFP_KERNEL allocation which has slight chance of
2280 * developing into deadlock if some works currently on the same queue
2281 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2282 * the problem rescuer solves.
2283 *
706026c2
TH
2284 * When such condition is possible, the pool summons rescuers of all
2285 * workqueues which have works queued on the pool and let them process
e22bee78
TH
2286 * those works so that forward progress can be guaranteed.
2287 *
2288 * This should happen rarely.
d185af30
YB
2289 *
2290 * Return: 0
e22bee78 2291 */
111c225a 2292static int rescuer_thread(void *__rescuer)
e22bee78 2293{
111c225a
TH
2294 struct worker *rescuer = __rescuer;
2295 struct workqueue_struct *wq = rescuer->rescue_wq;
e22bee78 2296 struct list_head *scheduled = &rescuer->scheduled;
4d595b86 2297 bool should_stop;
e22bee78
TH
2298
2299 set_user_nice(current, RESCUER_NICE_LEVEL);
111c225a
TH
2300
2301 /*
2302 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2303 * doesn't participate in concurrency management.
2304 */
2305 rescuer->task->flags |= PF_WQ_WORKER;
e22bee78 2306repeat:
c5a94a61 2307 set_current_state(TASK_IDLE);
e22bee78 2308
4d595b86
LJ
2309 /*
2310 * By the time the rescuer is requested to stop, the workqueue
2311 * shouldn't have any work pending, but @wq->maydays may still have
2312 * pwq(s) queued. This can happen by non-rescuer workers consuming
2313 * all the work items before the rescuer got to them. Go through
2314 * @wq->maydays processing before acting on should_stop so that the
2315 * list is always empty on exit.
2316 */
2317 should_stop = kthread_should_stop();
e22bee78 2318
493a1724 2319 /* see whether any pwq is asking for help */
2e109a28 2320 spin_lock_irq(&wq_mayday_lock);
493a1724
TH
2321
2322 while (!list_empty(&wq->maydays)) {
2323 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2324 struct pool_workqueue, mayday_node);
112202d9 2325 struct worker_pool *pool = pwq->pool;
e22bee78 2326 struct work_struct *work, *n;
82607adc 2327 bool first = true;
e22bee78
TH
2328
2329 __set_current_state(TASK_RUNNING);
493a1724
TH
2330 list_del_init(&pwq->mayday_node);
2331
2e109a28 2332 spin_unlock_irq(&wq_mayday_lock);
e22bee78 2333
51697d39
LJ
2334 worker_attach_to_pool(rescuer, pool);
2335
2336 spin_lock_irq(&pool->lock);
b3104104 2337 rescuer->pool = pool;
e22bee78
TH
2338
2339 /*
2340 * Slurp in all works issued via this workqueue and
2341 * process'em.
2342 */
0479c8c5 2343 WARN_ON_ONCE(!list_empty(scheduled));
82607adc
TH
2344 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2345 if (get_work_pwq(work) == pwq) {
2346 if (first)
2347 pool->watchdog_ts = jiffies;
e22bee78 2348 move_linked_works(work, scheduled, &n);
82607adc
TH
2349 }
2350 first = false;
2351 }
e22bee78 2352
008847f6
N
2353 if (!list_empty(scheduled)) {
2354 process_scheduled_works(rescuer);
2355
2356 /*
2357 * The above execution of rescued work items could
2358 * have created more to rescue through
2359 * pwq_activate_first_delayed() or chained
2360 * queueing. Let's put @pwq back on mayday list so
2361 * that such back-to-back work items, which may be
2362 * being used to relieve memory pressure, don't
2363 * incur MAYDAY_INTERVAL delay inbetween.
2364 */
2365 if (need_to_create_worker(pool)) {
2366 spin_lock(&wq_mayday_lock);
2367 get_pwq(pwq);
2368 list_move_tail(&pwq->mayday_node, &wq->maydays);
2369 spin_unlock(&wq_mayday_lock);
2370 }
2371 }
7576958a 2372
77668c8b
LJ
2373 /*
2374 * Put the reference grabbed by send_mayday(). @pool won't
13b1d625 2375 * go away while we're still attached to it.
77668c8b
LJ
2376 */
2377 put_pwq(pwq);
2378
7576958a 2379 /*
d8ca83e6 2380 * Leave this pool. If need_more_worker() is %true, notify a
7576958a
TH
2381 * regular worker; otherwise, we end up with 0 concurrency
2382 * and stalling the execution.
2383 */
d8ca83e6 2384 if (need_more_worker(pool))
63d95a91 2385 wake_up_worker(pool);
7576958a 2386
b3104104 2387 rescuer->pool = NULL;
13b1d625
LJ
2388 spin_unlock_irq(&pool->lock);
2389
2390 worker_detach_from_pool(rescuer, pool);
2391
2392 spin_lock_irq(&wq_mayday_lock);
e22bee78
TH
2393 }
2394
2e109a28 2395 spin_unlock_irq(&wq_mayday_lock);
493a1724 2396
4d595b86
LJ
2397 if (should_stop) {
2398 __set_current_state(TASK_RUNNING);
2399 rescuer->task->flags &= ~PF_WQ_WORKER;
2400 return 0;
2401 }
2402
111c225a
TH
2403 /* rescuers should never participate in concurrency management */
2404 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
e22bee78
TH
2405 schedule();
2406 goto repeat;
1da177e4
LT
2407}
2408
fca839c0
TH
2409/**
2410 * check_flush_dependency - check for flush dependency sanity
2411 * @target_wq: workqueue being flushed
2412 * @target_work: work item being flushed (NULL for workqueue flushes)
2413 *
2414 * %current is trying to flush the whole @target_wq or @target_work on it.
2415 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2416 * reclaiming memory or running on a workqueue which doesn't have
2417 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2418 * a deadlock.
2419 */
2420static void check_flush_dependency(struct workqueue_struct *target_wq,
2421 struct work_struct *target_work)
2422{
2423 work_func_t target_func = target_work ? target_work->func : NULL;
2424 struct worker *worker;
2425
2426 if (target_wq->flags & WQ_MEM_RECLAIM)
2427 return;
2428
2429 worker = current_wq_worker();
2430
2431 WARN_ONCE(current->flags & PF_MEMALLOC,
2432 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
2433 current->pid, current->comm, target_wq->name, target_func);
23d11a58
TH
2434 WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2435 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
fca839c0
TH
2436 "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
2437 worker->current_pwq->wq->name, worker->current_func,
2438 target_wq->name, target_func);
2439}
2440
fc2e4d70
ON
2441struct wq_barrier {
2442 struct work_struct work;
2443 struct completion done;
2607d7a6 2444 struct task_struct *task; /* purely informational */
fc2e4d70
ON
2445};
2446
2447static void wq_barrier_func(struct work_struct *work)
2448{
2449 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2450 complete(&barr->done);
2451}
2452
4690c4ab
TH
2453/**
2454 * insert_wq_barrier - insert a barrier work
112202d9 2455 * @pwq: pwq to insert barrier into
4690c4ab 2456 * @barr: wq_barrier to insert
affee4b2
TH
2457 * @target: target work to attach @barr to
2458 * @worker: worker currently executing @target, NULL if @target is not executing
4690c4ab 2459 *
affee4b2
TH
2460 * @barr is linked to @target such that @barr is completed only after
2461 * @target finishes execution. Please note that the ordering
2462 * guarantee is observed only with respect to @target and on the local
2463 * cpu.
2464 *
2465 * Currently, a queued barrier can't be canceled. This is because
2466 * try_to_grab_pending() can't determine whether the work to be
2467 * grabbed is at the head of the queue and thus can't clear LINKED
2468 * flag of the previous work while there must be a valid next work
2469 * after a work with LINKED flag set.
2470 *
2471 * Note that when @worker is non-NULL, @target may be modified
112202d9 2472 * underneath us, so we can't reliably determine pwq from @target.
4690c4ab
TH
2473 *
2474 * CONTEXT:
d565ed63 2475 * spin_lock_irq(pool->lock).
4690c4ab 2476 */
112202d9 2477static void insert_wq_barrier(struct pool_workqueue *pwq,
affee4b2
TH
2478 struct wq_barrier *barr,
2479 struct work_struct *target, struct worker *worker)
fc2e4d70 2480{
affee4b2
TH
2481 struct list_head *head;
2482 unsigned int linked = 0;
2483
dc186ad7 2484 /*
d565ed63 2485 * debugobject calls are safe here even with pool->lock locked
dc186ad7
TG
2486 * as we know for sure that this will not trigger any of the
2487 * checks and call back into the fixup functions where we
2488 * might deadlock.
2489 */
ca1cab37 2490 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
22df02bb 2491 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
52fa5bc5 2492
fd1a5b04
BP
2493 init_completion_map(&barr->done, &target->lockdep_map);
2494
2607d7a6 2495 barr->task = current;
83c22520 2496
affee4b2
TH
2497 /*
2498 * If @target is currently being executed, schedule the
2499 * barrier to the worker; otherwise, put it after @target.
2500 */
2501 if (worker)
2502 head = worker->scheduled.next;
2503 else {
2504 unsigned long *bits = work_data_bits(target);
2505
2506 head = target->entry.next;
2507 /* there can already be other linked works, inherit and set */
2508 linked = *bits & WORK_STRUCT_LINKED;
2509 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2510 }
2511
dc186ad7 2512 debug_work_activate(&barr->work);
112202d9 2513 insert_work(pwq, &barr->work, head,
affee4b2 2514 work_color_to_flags(WORK_NO_COLOR) | linked);
fc2e4d70
ON
2515}
2516
73f53c4a 2517/**
112202d9 2518 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
73f53c4a
TH
2519 * @wq: workqueue being flushed
2520 * @flush_color: new flush color, < 0 for no-op
2521 * @work_color: new work color, < 0 for no-op
2522 *
112202d9 2523 * Prepare pwqs for workqueue flushing.
73f53c4a 2524 *
112202d9
TH
2525 * If @flush_color is non-negative, flush_color on all pwqs should be
2526 * -1. If no pwq has in-flight commands at the specified color, all
2527 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
2528 * has in flight commands, its pwq->flush_color is set to
2529 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
73f53c4a
TH
2530 * wakeup logic is armed and %true is returned.
2531 *
2532 * The caller should have initialized @wq->first_flusher prior to
2533 * calling this function with non-negative @flush_color. If
2534 * @flush_color is negative, no flush color update is done and %false
2535 * is returned.
2536 *
112202d9 2537 * If @work_color is non-negative, all pwqs should have the same
73f53c4a
TH
2538 * work_color which is previous to @work_color and all will be
2539 * advanced to @work_color.
2540 *
2541 * CONTEXT:
3c25a55d 2542 * mutex_lock(wq->mutex).
73f53c4a 2543 *
d185af30 2544 * Return:
73f53c4a
TH
2545 * %true if @flush_color >= 0 and there's something to flush. %false
2546 * otherwise.
2547 */
112202d9 2548static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
73f53c4a 2549 int flush_color, int work_color)
1da177e4 2550{
73f53c4a 2551 bool wait = false;
49e3cf44 2552 struct pool_workqueue *pwq;
1da177e4 2553
73f53c4a 2554 if (flush_color >= 0) {
6183c009 2555 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
112202d9 2556 atomic_set(&wq->nr_pwqs_to_flush, 1);
1da177e4 2557 }
2355b70f 2558
49e3cf44 2559 for_each_pwq(pwq, wq) {
112202d9 2560 struct worker_pool *pool = pwq->pool;
fc2e4d70 2561
b09f4fd3 2562 spin_lock_irq(&pool->lock);
83c22520 2563
73f53c4a 2564 if (flush_color >= 0) {
6183c009 2565 WARN_ON_ONCE(pwq->flush_color != -1);
fc2e4d70 2566
112202d9
TH
2567 if (pwq->nr_in_flight[flush_color]) {
2568 pwq->flush_color = flush_color;
2569 atomic_inc(&wq->nr_pwqs_to_flush);
73f53c4a
TH
2570 wait = true;
2571 }
2572 }
1da177e4 2573
73f53c4a 2574 if (work_color >= 0) {
6183c009 2575 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
112202d9 2576 pwq->work_color = work_color;
73f53c4a 2577 }
1da177e4 2578
b09f4fd3 2579 spin_unlock_irq(&pool->lock);
1da177e4 2580 }
2355b70f 2581
112202d9 2582 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
73f53c4a 2583 complete(&wq->first_flusher->done);
14441960 2584
73f53c4a 2585 return wait;
1da177e4
LT
2586}
2587
0fcb78c2 2588/**
1da177e4 2589 * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 2590 * @wq: workqueue to flush
1da177e4 2591 *
c5aa87bb
TH
2592 * This function sleeps until all work items which were queued on entry
2593 * have finished execution, but it is not livelocked by new incoming ones.
1da177e4 2594 */
7ad5b3a5 2595void flush_workqueue(struct workqueue_struct *wq)
1da177e4 2596{
73f53c4a
TH
2597 struct wq_flusher this_flusher = {
2598 .list = LIST_HEAD_INIT(this_flusher.list),
2599 .flush_color = -1,
fd1a5b04 2600 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
73f53c4a
TH
2601 };
2602 int next_color;
1da177e4 2603
3347fa09
TH
2604 if (WARN_ON(!wq_online))
2605 return;
2606
3c25a55d 2607 mutex_lock(&wq->mutex);
73f53c4a
TH
2608
2609 /*
2610 * Start-to-wait phase
2611 */
2612 next_color = work_next_color(wq->work_color);
2613
2614 if (next_color != wq->flush_color) {
2615 /*
2616 * Color space is not full. The current work_color
2617 * becomes our flush_color and work_color is advanced
2618 * by one.
2619 */
6183c009 2620 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
73f53c4a
TH
2621 this_flusher.flush_color = wq->work_color;
2622 wq->work_color = next_color;
2623
2624 if (!wq->first_flusher) {
2625 /* no flush in progress, become the first flusher */
6183c009 2626 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
73f53c4a
TH
2627
2628 wq->first_flusher = &this_flusher;
2629
112202d9 2630 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
73f53c4a
TH
2631 wq->work_color)) {
2632 /* nothing to flush, done */
2633 wq->flush_color = next_color;
2634 wq->first_flusher = NULL;
2635 goto out_unlock;
2636 }
2637 } else {
2638 /* wait in queue */
6183c009 2639 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
73f53c4a 2640 list_add_tail(&this_flusher.list, &wq->flusher_queue);
112202d9 2641 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
73f53c4a
TH
2642 }
2643 } else {
2644 /*
2645 * Oops, color space is full, wait on overflow queue.
2646 * The next flush completion will assign us
2647 * flush_color and transfer to flusher_queue.
2648 */
2649 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2650 }
2651
fca839c0
TH
2652 check_flush_dependency(wq, NULL);
2653
3c25a55d 2654 mutex_unlock(&wq->mutex);
73f53c4a
TH
2655
2656 wait_for_completion(&this_flusher.done);
2657
2658 /*
2659 * Wake-up-and-cascade phase
2660 *
2661 * First flushers are responsible for cascading flushes and
2662 * handling overflow. Non-first flushers can simply return.
2663 */
2664 if (wq->first_flusher != &this_flusher)
2665 return;
2666
3c25a55d 2667 mutex_lock(&wq->mutex);
73f53c4a 2668
4ce48b37
TH
2669 /* we might have raced, check again with mutex held */
2670 if (wq->first_flusher != &this_flusher)
2671 goto out_unlock;
2672
73f53c4a
TH
2673 wq->first_flusher = NULL;
2674
6183c009
TH
2675 WARN_ON_ONCE(!list_empty(&this_flusher.list));
2676 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
73f53c4a
TH
2677
2678 while (true) {
2679 struct wq_flusher *next, *tmp;
2680
2681 /* complete all the flushers sharing the current flush color */
2682 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2683 if (next->flush_color != wq->flush_color)
2684 break;
2685 list_del_init(&next->list);
2686 complete(&next->done);
2687 }
2688
6183c009
TH
2689 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2690 wq->flush_color != work_next_color(wq->work_color));
73f53c4a
TH
2691
2692 /* this flush_color is finished, advance by one */
2693 wq->flush_color = work_next_color(wq->flush_color);
2694
2695 /* one color has been freed, handle overflow queue */
2696 if (!list_empty(&wq->flusher_overflow)) {
2697 /*
2698 * Assign the same color to all overflowed
2699 * flushers, advance work_color and append to
2700 * flusher_queue. This is the start-to-wait
2701 * phase for these overflowed flushers.
2702 */
2703 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2704 tmp->flush_color = wq->work_color;
2705
2706 wq->work_color = work_next_color(wq->work_color);
2707
2708 list_splice_tail_init(&wq->flusher_overflow,
2709 &wq->flusher_queue);
112202d9 2710 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
73f53c4a
TH
2711 }
2712
2713 if (list_empty(&wq->flusher_queue)) {
6183c009 2714 WARN_ON_ONCE(wq->flush_color != wq->work_color);
73f53c4a
TH
2715 break;
2716 }
2717
2718 /*
2719 * Need to flush more colors. Make the next flusher
112202d9 2720 * the new first flusher and arm pwqs.
73f53c4a 2721 */
6183c009
TH
2722 WARN_ON_ONCE(wq->flush_color == wq->work_color);
2723 WARN_ON_ONCE(wq->flush_color != next->flush_color);
73f53c4a
TH
2724
2725 list_del_init(&next->list);
2726 wq->first_flusher = next;
2727
112202d9 2728 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
73f53c4a
TH
2729 break;
2730
2731 /*
2732 * Meh... this color is already done, clear first
2733 * flusher and repeat cascading.
2734 */
2735 wq->first_flusher = NULL;
2736 }
2737
2738out_unlock:
3c25a55d 2739 mutex_unlock(&wq->mutex);
1da177e4 2740}
1dadafa8 2741EXPORT_SYMBOL(flush_workqueue);
1da177e4 2742
9c5a2ba7
TH
2743/**
2744 * drain_workqueue - drain a workqueue
2745 * @wq: workqueue to drain
2746 *
2747 * Wait until the workqueue becomes empty. While draining is in progress,
2748 * only chain queueing is allowed. IOW, only currently pending or running
2749 * work items on @wq can queue further work items on it. @wq is flushed
b749b1b6 2750 * repeatedly until it becomes empty. The number of flushing is determined
9c5a2ba7
TH
2751 * by the depth of chaining and should be relatively short. Whine if it
2752 * takes too long.
2753 */
2754void drain_workqueue(struct workqueue_struct *wq)
2755{
2756 unsigned int flush_cnt = 0;
49e3cf44 2757 struct pool_workqueue *pwq;
9c5a2ba7
TH
2758
2759 /*
2760 * __queue_work() needs to test whether there are drainers, is much
2761 * hotter than drain_workqueue() and already looks at @wq->flags.
618b01eb 2762 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
9c5a2ba7 2763 */
87fc741e 2764 mutex_lock(&wq->mutex);
9c5a2ba7 2765 if (!wq->nr_drainers++)
618b01eb 2766 wq->flags |= __WQ_DRAINING;
87fc741e 2767 mutex_unlock(&wq->mutex);
9c5a2ba7
TH
2768reflush:
2769 flush_workqueue(wq);
2770
b09f4fd3 2771 mutex_lock(&wq->mutex);
76af4d93 2772
49e3cf44 2773 for_each_pwq(pwq, wq) {
fa2563e4 2774 bool drained;
9c5a2ba7 2775
b09f4fd3 2776 spin_lock_irq(&pwq->pool->lock);
112202d9 2777 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
b09f4fd3 2778 spin_unlock_irq(&pwq->pool->lock);
fa2563e4
TT
2779
2780 if (drained)
9c5a2ba7
TH
2781 continue;
2782
2783 if (++flush_cnt == 10 ||
2784 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
c5aa87bb 2785 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
044c782c 2786 wq->name, flush_cnt);
76af4d93 2787
b09f4fd3 2788 mutex_unlock(&wq->mutex);
9c5a2ba7
TH
2789 goto reflush;
2790 }
2791
9c5a2ba7 2792 if (!--wq->nr_drainers)
618b01eb 2793 wq->flags &= ~__WQ_DRAINING;
87fc741e 2794 mutex_unlock(&wq->mutex);
9c5a2ba7
TH
2795}
2796EXPORT_SYMBOL_GPL(drain_workqueue);
2797
606a5020 2798static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
db700897 2799{
affee4b2 2800 struct worker *worker = NULL;
c9e7cf27 2801 struct worker_pool *pool;
112202d9 2802 struct pool_workqueue *pwq;
db700897
ON
2803
2804 might_sleep();
fa1b54e6
TH
2805
2806 local_irq_disable();
c9e7cf27 2807 pool = get_work_pool(work);
fa1b54e6
TH
2808 if (!pool) {
2809 local_irq_enable();
baf59022 2810 return false;
fa1b54e6 2811 }
db700897 2812
fa1b54e6 2813 spin_lock(&pool->lock);
0b3dae68 2814 /* see the comment in try_to_grab_pending() with the same code */
112202d9
TH
2815 pwq = get_work_pwq(work);
2816 if (pwq) {
2817 if (unlikely(pwq->pool != pool))
4690c4ab 2818 goto already_gone;
606a5020 2819 } else {
c9e7cf27 2820 worker = find_worker_executing_work(pool, work);
affee4b2 2821 if (!worker)
4690c4ab 2822 goto already_gone;
112202d9 2823 pwq = worker->current_pwq;
606a5020 2824 }
db700897 2825
fca839c0
TH
2826 check_flush_dependency(pwq->wq, work);
2827
112202d9 2828 insert_wq_barrier(pwq, barr, work, worker);
d565ed63 2829 spin_unlock_irq(&pool->lock);
7a22ad75 2830
e159489b 2831 /*
a1d14934
PZ
2832 * Force a lock recursion deadlock when using flush_work() inside a
2833 * single-threaded or rescuer equipped workqueue.
2834 *
2835 * For single threaded workqueues the deadlock happens when the work
2836 * is after the work issuing the flush_work(). For rescuer equipped
2837 * workqueues the deadlock happens when the rescuer stalls, blocking
2838 * forward progress.
e159489b 2839 */
a1d14934 2840 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) {
112202d9 2841 lock_map_acquire(&pwq->wq->lockdep_map);
a1d14934
PZ
2842 lock_map_release(&pwq->wq->lockdep_map);
2843 }
e159489b 2844
401a8d04 2845 return true;
4690c4ab 2846already_gone:
d565ed63 2847 spin_unlock_irq(&pool->lock);
401a8d04 2848 return false;
db700897 2849}
baf59022
TH
2850
2851/**
2852 * flush_work - wait for a work to finish executing the last queueing instance
2853 * @work: the work to flush
2854 *
606a5020
TH
2855 * Wait until @work has finished execution. @work is guaranteed to be idle
2856 * on return if it hasn't been requeued since flush started.
baf59022 2857 *
d185af30 2858 * Return:
baf59022
TH
2859 * %true if flush_work() waited for the work to finish execution,
2860 * %false if it was already idle.
2861 */
2862bool flush_work(struct work_struct *work)
2863{
12997d1a
BH
2864 struct wq_barrier barr;
2865
3347fa09
TH
2866 if (WARN_ON(!wq_online))
2867 return false;
2868
12997d1a
BH
2869 if (start_flush_work(work, &barr)) {
2870 wait_for_completion(&barr.done);
2871 destroy_work_on_stack(&barr.work);
2872 return true;
2873 } else {
2874 return false;
2875 }
6e84d644 2876}
606a5020 2877EXPORT_SYMBOL_GPL(flush_work);
6e84d644 2878
8603e1b3 2879struct cwt_wait {
ac6424b9 2880 wait_queue_entry_t wait;
8603e1b3
TH
2881 struct work_struct *work;
2882};
2883
ac6424b9 2884static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
8603e1b3
TH
2885{
2886 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
2887
2888 if (cwait->work != key)
2889 return 0;
2890 return autoremove_wake_function(wait, mode, sync, key);
2891}
2892
36e227d2 2893static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
1f1f642e 2894{
8603e1b3 2895 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
bbb68dfa 2896 unsigned long flags;
1f1f642e
ON
2897 int ret;
2898
2899 do {
bbb68dfa
TH
2900 ret = try_to_grab_pending(work, is_dwork, &flags);
2901 /*
8603e1b3
TH
2902 * If someone else is already canceling, wait for it to
2903 * finish. flush_work() doesn't work for PREEMPT_NONE
2904 * because we may get scheduled between @work's completion
2905 * and the other canceling task resuming and clearing
2906 * CANCELING - flush_work() will return false immediately
2907 * as @work is no longer busy, try_to_grab_pending() will
2908 * return -ENOENT as @work is still being canceled and the
2909 * other canceling task won't be able to clear CANCELING as
2910 * we're hogging the CPU.
2911 *
2912 * Let's wait for completion using a waitqueue. As this
2913 * may lead to the thundering herd problem, use a custom
2914 * wake function which matches @work along with exclusive
2915 * wait and wakeup.
bbb68dfa 2916 */
8603e1b3
TH
2917 if (unlikely(ret == -ENOENT)) {
2918 struct cwt_wait cwait;
2919
2920 init_wait(&cwait.wait);
2921 cwait.wait.func = cwt_wakefn;
2922 cwait.work = work;
2923
2924 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
2925 TASK_UNINTERRUPTIBLE);
2926 if (work_is_canceling(work))
2927 schedule();
2928 finish_wait(&cancel_waitq, &cwait.wait);
2929 }
1f1f642e
ON
2930 } while (unlikely(ret < 0));
2931
bbb68dfa
TH
2932 /* tell other tasks trying to grab @work to back off */
2933 mark_work_canceling(work);
2934 local_irq_restore(flags);
2935
3347fa09
TH
2936 /*
2937 * This allows canceling during early boot. We know that @work
2938 * isn't executing.
2939 */
2940 if (wq_online)
2941 flush_work(work);
2942
7a22ad75 2943 clear_work_data(work);
8603e1b3
TH
2944
2945 /*
2946 * Paired with prepare_to_wait() above so that either
2947 * waitqueue_active() is visible here or !work_is_canceling() is
2948 * visible there.
2949 */
2950 smp_mb();
2951 if (waitqueue_active(&cancel_waitq))
2952 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
2953
1f1f642e
ON
2954 return ret;
2955}
2956
6e84d644 2957/**
401a8d04
TH
2958 * cancel_work_sync - cancel a work and wait for it to finish
2959 * @work: the work to cancel
6e84d644 2960 *
401a8d04
TH
2961 * Cancel @work and wait for its execution to finish. This function
2962 * can be used even if the work re-queues itself or migrates to
2963 * another workqueue. On return from this function, @work is
2964 * guaranteed to be not pending or executing on any CPU.
1f1f642e 2965 *
401a8d04
TH
2966 * cancel_work_sync(&delayed_work->work) must not be used for
2967 * delayed_work's. Use cancel_delayed_work_sync() instead.
6e84d644 2968 *
401a8d04 2969 * The caller must ensure that the workqueue on which @work was last
6e84d644 2970 * queued can't be destroyed before this function returns.
401a8d04 2971 *
d185af30 2972 * Return:
401a8d04 2973 * %true if @work was pending, %false otherwise.
6e84d644 2974 */
401a8d04 2975bool cancel_work_sync(struct work_struct *work)
6e84d644 2976{
36e227d2 2977 return __cancel_work_timer(work, false);
b89deed3 2978}
28e53bdd 2979EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed3 2980
6e84d644 2981/**
401a8d04
TH
2982 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2983 * @dwork: the delayed work to flush
6e84d644 2984 *
401a8d04
TH
2985 * Delayed timer is cancelled and the pending work is queued for
2986 * immediate execution. Like flush_work(), this function only
2987 * considers the last queueing instance of @dwork.
1f1f642e 2988 *
d185af30 2989 * Return:
401a8d04
TH
2990 * %true if flush_work() waited for the work to finish execution,
2991 * %false if it was already idle.
6e84d644 2992 */
401a8d04
TH
2993bool flush_delayed_work(struct delayed_work *dwork)
2994{
8930caba 2995 local_irq_disable();
401a8d04 2996 if (del_timer_sync(&dwork->timer))
60c057bc 2997 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
8930caba 2998 local_irq_enable();
401a8d04
TH
2999 return flush_work(&dwork->work);
3000}
3001EXPORT_SYMBOL(flush_delayed_work);
3002
f72b8792
JA
3003static bool __cancel_work(struct work_struct *work, bool is_dwork)
3004{
3005 unsigned long flags;
3006 int ret;
3007
3008 do {
3009 ret = try_to_grab_pending(work, is_dwork, &flags);
3010 } while (unlikely(ret == -EAGAIN));
3011
3012 if (unlikely(ret < 0))
3013 return false;
3014
3015 set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3016 local_irq_restore(flags);
3017 return ret;
3018}
3019
3020/*
3021 * See cancel_delayed_work()
3022 */
3023bool cancel_work(struct work_struct *work)
3024{
3025 return __cancel_work(work, false);
3026}
3027
09383498 3028/**
57b30ae7
TH
3029 * cancel_delayed_work - cancel a delayed work
3030 * @dwork: delayed_work to cancel
09383498 3031 *
d185af30
YB
3032 * Kill off a pending delayed_work.
3033 *
3034 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3035 * pending.
3036 *
3037 * Note:
3038 * The work callback function may still be running on return, unless
3039 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
3040 * use cancel_delayed_work_sync() to wait on it.
09383498 3041 *
57b30ae7 3042 * This function is safe to call from any context including IRQ handler.
09383498 3043 */
57b30ae7 3044bool cancel_delayed_work(struct delayed_work *dwork)
09383498 3045{
f72b8792 3046 return __cancel_work(&dwork->work, true);
09383498 3047}
57b30ae7 3048EXPORT_SYMBOL(cancel_delayed_work);
09383498 3049
401a8d04
TH
3050/**
3051 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3052 * @dwork: the delayed work cancel
3053 *
3054 * This is cancel_work_sync() for delayed works.
3055 *
d185af30 3056 * Return:
401a8d04
TH
3057 * %true if @dwork was pending, %false otherwise.
3058 */
3059bool cancel_delayed_work_sync(struct delayed_work *dwork)
6e84d644 3060{
36e227d2 3061 return __cancel_work_timer(&dwork->work, true);
6e84d644 3062}
f5a421a4 3063EXPORT_SYMBOL(cancel_delayed_work_sync);
1da177e4 3064
b6136773 3065/**
31ddd871 3066 * schedule_on_each_cpu - execute a function synchronously on each online CPU
b6136773 3067 * @func: the function to call
b6136773 3068 *
31ddd871
TH
3069 * schedule_on_each_cpu() executes @func on each online CPU using the
3070 * system workqueue and blocks until all CPUs have completed.
b6136773 3071 * schedule_on_each_cpu() is very slow.
31ddd871 3072 *
d185af30 3073 * Return:
31ddd871 3074 * 0 on success, -errno on failure.
b6136773 3075 */
65f27f38 3076int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
3077{
3078 int cpu;
38f51568 3079 struct work_struct __percpu *works;
15316ba8 3080
b6136773
AM
3081 works = alloc_percpu(struct work_struct);
3082 if (!works)
15316ba8 3083 return -ENOMEM;
b6136773 3084
93981800
TH
3085 get_online_cpus();
3086
15316ba8 3087 for_each_online_cpu(cpu) {
9bfb1839
IM
3088 struct work_struct *work = per_cpu_ptr(works, cpu);
3089
3090 INIT_WORK(work, func);
b71ab8c2 3091 schedule_work_on(cpu, work);
65a64464 3092 }
93981800
TH
3093
3094 for_each_online_cpu(cpu)
3095 flush_work(per_cpu_ptr(works, cpu));
3096
95402b38 3097 put_online_cpus();
b6136773 3098 free_percpu(works);
15316ba8
CL
3099 return 0;
3100}
3101
1fa44eca
JB
3102/**
3103 * execute_in_process_context - reliably execute the routine with user context
3104 * @fn: the function to execute
1fa44eca
JB
3105 * @ew: guaranteed storage for the execute work structure (must
3106 * be available when the work executes)
3107 *
3108 * Executes the function immediately if process context is available,
3109 * otherwise schedules the function for delayed execution.
3110 *
d185af30 3111 * Return: 0 - function was executed
1fa44eca
JB
3112 * 1 - function was scheduled for execution
3113 */
65f27f38 3114int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
3115{
3116 if (!in_interrupt()) {
65f27f38 3117 fn(&ew->work);
1fa44eca
JB
3118 return 0;
3119 }
3120
65f27f38 3121 INIT_WORK(&ew->work, fn);
1fa44eca
JB
3122 schedule_work(&ew->work);
3123
3124 return 1;
3125}
3126EXPORT_SYMBOL_GPL(execute_in_process_context);
3127
6ba94429
FW
3128/**
3129 * free_workqueue_attrs - free a workqueue_attrs
3130 * @attrs: workqueue_attrs to free
226223ab 3131 *
6ba94429 3132 * Undo alloc_workqueue_attrs().
226223ab 3133 */
6ba94429 3134void free_workqueue_attrs(struct workqueue_attrs *attrs)
226223ab 3135{
6ba94429
FW
3136 if (attrs) {
3137 free_cpumask_var(attrs->cpumask);
3138 kfree(attrs);
3139 }
226223ab
TH
3140}
3141
6ba94429
FW
3142/**
3143 * alloc_workqueue_attrs - allocate a workqueue_attrs
3144 * @gfp_mask: allocation mask to use
3145 *
3146 * Allocate a new workqueue_attrs, initialize with default settings and
3147 * return it.
3148 *
3149 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3150 */
3151struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
226223ab 3152{
6ba94429 3153 struct workqueue_attrs *attrs;
226223ab 3154
6ba94429
FW
3155 attrs = kzalloc(sizeof(*attrs), gfp_mask);
3156 if (!attrs)
3157 goto fail;
3158 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
3159 goto fail;
3160
3161 cpumask_copy(attrs->cpumask, cpu_possible_mask);
3162 return attrs;
3163fail:
3164 free_workqueue_attrs(attrs);
3165 return NULL;
226223ab
TH
3166}
3167
6ba94429
FW
3168static void copy_workqueue_attrs(struct workqueue_attrs *to,
3169 const struct workqueue_attrs *from)
226223ab 3170{
6ba94429
FW
3171 to->nice = from->nice;
3172 cpumask_copy(to->cpumask, from->cpumask);
3173 /*
3174 * Unlike hash and equality test, this function doesn't ignore
3175 * ->no_numa as it is used for both pool and wq attrs. Instead,
3176 * get_unbound_pool() explicitly clears ->no_numa after copying.
3177 */
3178 to->no_numa = from->no_numa;
226223ab
TH
3179}
3180
6ba94429
FW
3181/* hash value of the content of @attr */
3182static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
226223ab 3183{
6ba94429 3184 u32 hash = 0;
226223ab 3185
6ba94429
FW
3186 hash = jhash_1word(attrs->nice, hash);
3187 hash = jhash(cpumask_bits(attrs->cpumask),
3188 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3189 return hash;
226223ab 3190}
226223ab 3191
6ba94429
FW
3192/* content equality test */
3193static bool wqattrs_equal(const struct workqueue_attrs *a,
3194 const struct workqueue_attrs *b)
226223ab 3195{
6ba94429
FW
3196 if (a->nice != b->nice)
3197 return false;
3198 if (!cpumask_equal(a->cpumask, b->cpumask))
3199 return false;
3200 return true;
226223ab
TH
3201}
3202
6ba94429
FW
3203/**
3204 * init_worker_pool - initialize a newly zalloc'd worker_pool
3205 * @pool: worker_pool to initialize
3206 *
402dd89d 3207 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
6ba94429
FW
3208 *
3209 * Return: 0 on success, -errno on failure. Even on failure, all fields
3210 * inside @pool proper are initialized and put_unbound_pool() can be called
3211 * on @pool safely to release it.
3212 */
3213static int init_worker_pool(struct worker_pool *pool)
226223ab 3214{
6ba94429
FW
3215 spin_lock_init(&pool->lock);
3216 pool->id = -1;
3217 pool->cpu = -1;
3218 pool->node = NUMA_NO_NODE;
3219 pool->flags |= POOL_DISASSOCIATED;
82607adc 3220 pool->watchdog_ts = jiffies;
6ba94429
FW
3221 INIT_LIST_HEAD(&pool->worklist);
3222 INIT_LIST_HEAD(&pool->idle_list);
3223 hash_init(pool->busy_hash);
226223ab 3224
32a6c723 3225 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
226223ab 3226
32a6c723 3227 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
226223ab 3228
6ba94429
FW
3229 mutex_init(&pool->attach_mutex);
3230 INIT_LIST_HEAD(&pool->workers);
226223ab 3231
6ba94429
FW
3232 ida_init(&pool->worker_ida);
3233 INIT_HLIST_NODE(&pool->hash_node);
3234 pool->refcnt = 1;
226223ab 3235
6ba94429
FW
3236 /* shouldn't fail above this point */
3237 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
3238 if (!pool->attrs)
3239 return -ENOMEM;
3240 return 0;
226223ab
TH
3241}
3242
6ba94429 3243static void rcu_free_wq(struct rcu_head *rcu)
226223ab 3244{
6ba94429
FW
3245 struct workqueue_struct *wq =
3246 container_of(rcu, struct workqueue_struct, rcu);
226223ab 3247
6ba94429
FW
3248 if (!(wq->flags & WQ_UNBOUND))
3249 free_percpu(wq->cpu_pwqs);
226223ab 3250 else
6ba94429 3251 free_workqueue_attrs(wq->unbound_attrs);
226223ab 3252
6ba94429
FW
3253 kfree(wq->rescuer);
3254 kfree(wq);
226223ab
TH
3255}
3256
6ba94429 3257static void rcu_free_pool(struct rcu_head *rcu)
226223ab 3258{
6ba94429 3259 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
226223ab 3260
6ba94429
FW
3261 ida_destroy(&pool->worker_ida);
3262 free_workqueue_attrs(pool->attrs);
3263 kfree(pool);
226223ab
TH
3264}
3265
6ba94429
FW
3266/**
3267 * put_unbound_pool - put a worker_pool
3268 * @pool: worker_pool to put
3269 *
3270 * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
3271 * safe manner. get_unbound_pool() calls this function on its failure path
3272 * and this function should be able to release pools which went through,
3273 * successfully or not, init_worker_pool().
3274 *
3275 * Should be called with wq_pool_mutex held.
3276 */
3277static void put_unbound_pool(struct worker_pool *pool)
226223ab 3278{
6ba94429
FW
3279 DECLARE_COMPLETION_ONSTACK(detach_completion);
3280 struct worker *worker;
226223ab 3281
6ba94429 3282 lockdep_assert_held(&wq_pool_mutex);
226223ab 3283
6ba94429
FW
3284 if (--pool->refcnt)
3285 return;
226223ab 3286
6ba94429
FW
3287 /* sanity checks */
3288 if (WARN_ON(!(pool->cpu < 0)) ||
3289 WARN_ON(!list_empty(&pool->worklist)))
3290 return;
226223ab 3291
6ba94429
FW
3292 /* release id and unhash */
3293 if (pool->id >= 0)
3294 idr_remove(&worker_pool_idr, pool->id);
3295 hash_del(&pool->hash_node);
d55262c4 3296
6ba94429 3297 /*
692b4825
TH
3298 * Become the manager and destroy all workers. This prevents
3299 * @pool's workers from blocking on attach_mutex. We're the last
3300 * manager and @pool gets freed with the flag set.
6ba94429 3301 */
6ba94429 3302 spin_lock_irq(&pool->lock);
692b4825
TH
3303 wait_event_lock_irq(wq_manager_wait,
3304 !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
3305 pool->flags |= POOL_MANAGER_ACTIVE;
3306
6ba94429
FW
3307 while ((worker = first_idle_worker(pool)))
3308 destroy_worker(worker);
3309 WARN_ON(pool->nr_workers || pool->nr_idle);
3310 spin_unlock_irq(&pool->lock);
d55262c4 3311
6ba94429
FW
3312 mutex_lock(&pool->attach_mutex);
3313 if (!list_empty(&pool->workers))
3314 pool->detach_completion = &detach_completion;
3315 mutex_unlock(&pool->attach_mutex);
226223ab 3316
6ba94429
FW
3317 if (pool->detach_completion)
3318 wait_for_completion(pool->detach_completion);
226223ab 3319
6ba94429
FW
3320 /* shut down the timers */
3321 del_timer_sync(&pool->idle_timer);
3322 del_timer_sync(&pool->mayday_timer);
226223ab 3323
6ba94429
FW
3324 /* sched-RCU protected to allow dereferences from get_work_pool() */
3325 call_rcu_sched(&pool->rcu, rcu_free_pool);
226223ab
TH
3326}
3327
3328/**
6ba94429
FW
3329 * get_unbound_pool - get a worker_pool with the specified attributes
3330 * @attrs: the attributes of the worker_pool to get
226223ab 3331 *
6ba94429
FW
3332 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3333 * reference count and return it. If there already is a matching
3334 * worker_pool, it will be used; otherwise, this function attempts to
3335 * create a new one.
226223ab 3336 *
6ba94429 3337 * Should be called with wq_pool_mutex held.
226223ab 3338 *
6ba94429
FW
3339 * Return: On success, a worker_pool with the same attributes as @attrs.
3340 * On failure, %NULL.
226223ab 3341 */
6ba94429 3342static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
226223ab 3343{
6ba94429
FW
3344 u32 hash = wqattrs_hash(attrs);
3345 struct worker_pool *pool;
3346 int node;
e2273584 3347 int target_node = NUMA_NO_NODE;
226223ab 3348
6ba94429 3349 lockdep_assert_held(&wq_pool_mutex);
226223ab 3350
6ba94429
FW
3351 /* do we already have a matching pool? */
3352 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3353 if (wqattrs_equal(pool->attrs, attrs)) {
3354 pool->refcnt++;
3355 return pool;
3356 }
3357 }
226223ab 3358
e2273584
XP
3359 /* if cpumask is contained inside a NUMA node, we belong to that node */
3360 if (wq_numa_enabled) {
3361 for_each_node(node) {
3362 if (cpumask_subset(attrs->cpumask,
3363 wq_numa_possible_cpumask[node])) {
3364 target_node = node;
3365 break;
3366 }
3367 }
3368 }
3369
6ba94429 3370 /* nope, create a new one */
e2273584 3371 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
6ba94429
FW
3372 if (!pool || init_worker_pool(pool) < 0)
3373 goto fail;
3374
3375 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
3376 copy_workqueue_attrs(pool->attrs, attrs);
e2273584 3377 pool->node = target_node;
226223ab
TH
3378
3379 /*
6ba94429
FW
3380 * no_numa isn't a worker_pool attribute, always clear it. See
3381 * 'struct workqueue_attrs' comments for detail.
226223ab 3382 */
6ba94429 3383 pool->attrs->no_numa = false;
226223ab 3384
6ba94429
FW
3385 if (worker_pool_assign_id(pool) < 0)
3386 goto fail;
226223ab 3387
6ba94429 3388 /* create and start the initial worker */
3347fa09 3389 if (wq_online && !create_worker(pool))
6ba94429 3390 goto fail;
226223ab 3391
6ba94429
FW
3392 /* install */
3393 hash_add(unbound_pool_hash, &pool->hash_node, hash);
226223ab 3394
6ba94429
FW
3395 return pool;
3396fail:
3397 if (pool)
3398 put_unbound_pool(pool);
3399 return NULL;
226223ab 3400}
226223ab 3401
6ba94429 3402static void rcu_free_pwq(struct rcu_head *rcu)
7a4e344c 3403{
6ba94429
FW
3404 kmem_cache_free(pwq_cache,
3405 container_of(rcu, struct pool_workqueue, rcu));
7a4e344c
TH
3406}
3407
6ba94429
FW
3408/*
3409 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3410 * and needs to be destroyed.
7a4e344c 3411 */
6ba94429 3412static void pwq_unbound_release_workfn(struct work_struct *work)
7a4e344c 3413{
6ba94429
FW
3414 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3415 unbound_release_work);
3416 struct workqueue_struct *wq = pwq->wq;
3417 struct worker_pool *pool = pwq->pool;
3418 bool is_last;
7a4e344c 3419
6ba94429
FW
3420 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3421 return;
7a4e344c 3422
6ba94429
FW
3423 mutex_lock(&wq->mutex);
3424 list_del_rcu(&pwq->pwqs_node);
3425 is_last = list_empty(&wq->pwqs);
3426 mutex_unlock(&wq->mutex);
3427
3428 mutex_lock(&wq_pool_mutex);
3429 put_unbound_pool(pool);
3430 mutex_unlock(&wq_pool_mutex);
3431
3432 call_rcu_sched(&pwq->rcu, rcu_free_pwq);
7a4e344c 3433
2865a8fb 3434 /*
6ba94429
FW
3435 * If we're the last pwq going away, @wq is already dead and no one
3436 * is gonna access it anymore. Schedule RCU free.
2865a8fb 3437 */
6ba94429
FW
3438 if (is_last)
3439 call_rcu_sched(&wq->rcu, rcu_free_wq);
29c91e99
TH
3440}
3441
7a4e344c 3442/**
6ba94429
FW
3443 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3444 * @pwq: target pool_workqueue
d185af30 3445 *
6ba94429
FW
3446 * If @pwq isn't freezing, set @pwq->max_active to the associated
3447 * workqueue's saved_max_active and activate delayed work items
3448 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
7a4e344c 3449 */
6ba94429 3450static void pwq_adjust_max_active(struct pool_workqueue *pwq)
4e1a1f9a 3451{
6ba94429
FW
3452 struct workqueue_struct *wq = pwq->wq;
3453 bool freezable = wq->flags & WQ_FREEZABLE;
3347fa09 3454 unsigned long flags;
4e1a1f9a 3455
6ba94429
FW
3456 /* for @wq->saved_max_active */
3457 lockdep_assert_held(&wq->mutex);
4e1a1f9a 3458
6ba94429
FW
3459 /* fast exit for non-freezable wqs */
3460 if (!freezable && pwq->max_active == wq->saved_max_active)
3461 return;
7a4e344c 3462
3347fa09
TH
3463 /* this function can be called during early boot w/ irq disabled */
3464 spin_lock_irqsave(&pwq->pool->lock, flags);
29c91e99 3465
6ba94429
FW
3466 /*
3467 * During [un]freezing, the caller is responsible for ensuring that
3468 * this function is called at least once after @workqueue_freezing
3469 * is updated and visible.
3470 */
3471 if (!freezable || !workqueue_freezing) {
3472 pwq->max_active = wq->saved_max_active;
4e1a1f9a 3473
6ba94429
FW
3474 while (!list_empty(&pwq->delayed_works) &&
3475 pwq->nr_active < pwq->max_active)
3476 pwq_activate_first_delayed(pwq);
e2dca7ad 3477
6ba94429
FW
3478 /*
3479 * Need to kick a worker after thawed or an unbound wq's
3480 * max_active is bumped. It's a slow path. Do it always.
3481 */
3482 wake_up_worker(pwq->pool);
3483 } else {
3484 pwq->max_active = 0;
3485 }
e2dca7ad 3486
3347fa09 3487 spin_unlock_irqrestore(&pwq->pool->lock, flags);
e2dca7ad
TH
3488}
3489
6ba94429
FW
3490/* initialize newly alloced @pwq which is associated with @wq and @pool */
3491static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3492 struct worker_pool *pool)
29c91e99 3493{
6ba94429 3494 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
29c91e99 3495
6ba94429
FW
3496 memset(pwq, 0, sizeof(*pwq));
3497
3498 pwq->pool = pool;
3499 pwq->wq = wq;
3500 pwq->flush_color = -1;
3501 pwq->refcnt = 1;
3502 INIT_LIST_HEAD(&pwq->delayed_works);
3503 INIT_LIST_HEAD(&pwq->pwqs_node);
3504 INIT_LIST_HEAD(&pwq->mayday_node);
3505 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
29c91e99
TH
3506}
3507
6ba94429
FW
3508/* sync @pwq with the current state of its associated wq and link it */
3509static void link_pwq(struct pool_workqueue *pwq)
29c91e99 3510{
6ba94429 3511 struct workqueue_struct *wq = pwq->wq;
29c91e99 3512
6ba94429 3513 lockdep_assert_held(&wq->mutex);
a892cacc 3514
6ba94429
FW
3515 /* may be called multiple times, ignore if already linked */
3516 if (!list_empty(&pwq->pwqs_node))
29c91e99 3517 return;
29c91e99 3518
6ba94429
FW
3519 /* set the matching work_color */
3520 pwq->work_color = wq->work_color;
29c91e99 3521
6ba94429
FW
3522 /* sync max_active to the current setting */
3523 pwq_adjust_max_active(pwq);
29c91e99 3524
6ba94429
FW
3525 /* link in @pwq */
3526 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3527}
29c91e99 3528
6ba94429
FW
3529/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3530static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3531 const struct workqueue_attrs *attrs)
3532{
3533 struct worker_pool *pool;
3534 struct pool_workqueue *pwq;
60f5a4bc 3535
6ba94429 3536 lockdep_assert_held(&wq_pool_mutex);
60f5a4bc 3537
6ba94429
FW
3538 pool = get_unbound_pool(attrs);
3539 if (!pool)
3540 return NULL;
60f5a4bc 3541
6ba94429
FW
3542 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3543 if (!pwq) {
3544 put_unbound_pool(pool);
3545 return NULL;
3546 }
29c91e99 3547
6ba94429
FW
3548 init_pwq(pwq, wq, pool);
3549 return pwq;
3550}
29c91e99 3551
29c91e99 3552/**
30186c6f 3553 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
042f7df1 3554 * @attrs: the wq_attrs of the default pwq of the target workqueue
6ba94429
FW
3555 * @node: the target NUMA node
3556 * @cpu_going_down: if >= 0, the CPU to consider as offline
3557 * @cpumask: outarg, the resulting cpumask
29c91e99 3558 *
6ba94429
FW
3559 * Calculate the cpumask a workqueue with @attrs should use on @node. If
3560 * @cpu_going_down is >= 0, that cpu is considered offline during
3561 * calculation. The result is stored in @cpumask.
a892cacc 3562 *
6ba94429
FW
3563 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
3564 * enabled and @node has online CPUs requested by @attrs, the returned
3565 * cpumask is the intersection of the possible CPUs of @node and
3566 * @attrs->cpumask.
d185af30 3567 *
6ba94429
FW
3568 * The caller is responsible for ensuring that the cpumask of @node stays
3569 * stable.
3570 *
3571 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3572 * %false if equal.
29c91e99 3573 */
6ba94429
FW
3574static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3575 int cpu_going_down, cpumask_t *cpumask)
29c91e99 3576{
6ba94429
FW
3577 if (!wq_numa_enabled || attrs->no_numa)
3578 goto use_dfl;
29c91e99 3579
6ba94429
FW
3580 /* does @node have any online CPUs @attrs wants? */
3581 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3582 if (cpu_going_down >= 0)
3583 cpumask_clear_cpu(cpu_going_down, cpumask);
29c91e99 3584
6ba94429
FW
3585 if (cpumask_empty(cpumask))
3586 goto use_dfl;
4c16bd32
TH
3587
3588 /* yeap, return possible CPUs in @node that @attrs wants */
3589 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
1ad0f0a7
MB
3590
3591 if (cpumask_empty(cpumask)) {
3592 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3593 "possible intersect\n");
3594 return false;
3595 }
3596
4c16bd32
TH
3597 return !cpumask_equal(cpumask, attrs->cpumask);
3598
3599use_dfl:
3600 cpumask_copy(cpumask, attrs->cpumask);
3601 return false;
3602}
3603
1befcf30
TH
3604/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3605static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3606 int node,
3607 struct pool_workqueue *pwq)
3608{
3609 struct pool_workqueue *old_pwq;
3610
5b95e1af 3611 lockdep_assert_held(&wq_pool_mutex);
1befcf30
TH
3612 lockdep_assert_held(&wq->mutex);
3613
3614 /* link_pwq() can handle duplicate calls */
3615 link_pwq(pwq);
3616
3617 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3618 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3619 return old_pwq;
3620}
3621
2d5f0764
LJ
3622/* context to store the prepared attrs & pwqs before applying */
3623struct apply_wqattrs_ctx {
3624 struct workqueue_struct *wq; /* target workqueue */
3625 struct workqueue_attrs *attrs; /* attrs to apply */
042f7df1 3626 struct list_head list; /* queued for batching commit */
2d5f0764
LJ
3627 struct pool_workqueue *dfl_pwq;
3628 struct pool_workqueue *pwq_tbl[];
3629};
3630
3631/* free the resources after success or abort */
3632static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3633{
3634 if (ctx) {
3635 int node;
3636
3637 for_each_node(node)
3638 put_pwq_unlocked(ctx->pwq_tbl[node]);
3639 put_pwq_unlocked(ctx->dfl_pwq);
3640
3641 free_workqueue_attrs(ctx->attrs);
3642
3643 kfree(ctx);
3644 }
3645}
3646
3647/* allocate the attrs and pwqs for later installation */
3648static struct apply_wqattrs_ctx *
3649apply_wqattrs_prepare(struct workqueue_struct *wq,
3650 const struct workqueue_attrs *attrs)
9e8cd2f5 3651{
2d5f0764 3652 struct apply_wqattrs_ctx *ctx;
4c16bd32 3653 struct workqueue_attrs *new_attrs, *tmp_attrs;
2d5f0764 3654 int node;
9e8cd2f5 3655
2d5f0764 3656 lockdep_assert_held(&wq_pool_mutex);
9e8cd2f5 3657
2d5f0764
LJ
3658 ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]),
3659 GFP_KERNEL);
8719dcea 3660
13e2e556 3661 new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
4c16bd32 3662 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
2d5f0764
LJ
3663 if (!ctx || !new_attrs || !tmp_attrs)
3664 goto out_free;
13e2e556 3665
042f7df1
LJ
3666 /*
3667 * Calculate the attrs of the default pwq.
3668 * If the user configured cpumask doesn't overlap with the
3669 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
3670 */
13e2e556 3671 copy_workqueue_attrs(new_attrs, attrs);
b05a7928 3672 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
042f7df1
LJ
3673 if (unlikely(cpumask_empty(new_attrs->cpumask)))
3674 cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
13e2e556 3675
4c16bd32
TH
3676 /*
3677 * We may create multiple pwqs with differing cpumasks. Make a
3678 * copy of @new_attrs which will be modified and used to obtain
3679 * pools.
3680 */
3681 copy_workqueue_attrs(tmp_attrs, new_attrs);
3682
4c16bd32
TH
3683 /*
3684 * If something goes wrong during CPU up/down, we'll fall back to
3685 * the default pwq covering whole @attrs->cpumask. Always create
3686 * it even if we don't use it immediately.
3687 */
2d5f0764
LJ
3688 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3689 if (!ctx->dfl_pwq)
3690 goto out_free;
4c16bd32
TH
3691
3692 for_each_node(node) {
042f7df1 3693 if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
2d5f0764
LJ
3694 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3695 if (!ctx->pwq_tbl[node])
3696 goto out_free;
4c16bd32 3697 } else {
2d5f0764
LJ
3698 ctx->dfl_pwq->refcnt++;
3699 ctx->pwq_tbl[node] = ctx->dfl_pwq;
4c16bd32
TH
3700 }
3701 }
3702
042f7df1
LJ
3703 /* save the user configured attrs and sanitize it. */
3704 copy_workqueue_attrs(new_attrs, attrs);
3705 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
2d5f0764 3706 ctx->attrs = new_attrs;
042f7df1 3707
2d5f0764
LJ
3708 ctx->wq = wq;
3709 free_workqueue_attrs(tmp_attrs);
3710 return ctx;
3711
3712out_free:
3713 free_workqueue_attrs(tmp_attrs);
3714 free_workqueue_attrs(new_attrs);
3715 apply_wqattrs_cleanup(ctx);
3716 return NULL;
3717}
3718
3719/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
3720static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
3721{
3722 int node;
9e8cd2f5 3723
4c16bd32 3724 /* all pwqs have been created successfully, let's install'em */
2d5f0764 3725 mutex_lock(&ctx->wq->mutex);
a892cacc 3726
2d5f0764 3727 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
4c16bd32
TH
3728
3729 /* save the previous pwq and install the new one */
f147f29e 3730 for_each_node(node)
2d5f0764
LJ
3731 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
3732 ctx->pwq_tbl[node]);
4c16bd32
TH
3733
3734 /* @dfl_pwq might not have been used, ensure it's linked */
2d5f0764
LJ
3735 link_pwq(ctx->dfl_pwq);
3736 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
f147f29e 3737
2d5f0764
LJ
3738 mutex_unlock(&ctx->wq->mutex);
3739}
9e8cd2f5 3740
a0111cf6
LJ
3741static void apply_wqattrs_lock(void)
3742{
3743 /* CPUs should stay stable across pwq creations and installations */
3744 get_online_cpus();
3745 mutex_lock(&wq_pool_mutex);
3746}
3747
3748static void apply_wqattrs_unlock(void)
3749{
3750 mutex_unlock(&wq_pool_mutex);
3751 put_online_cpus();
3752}
3753
3754static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3755 const struct workqueue_attrs *attrs)
2d5f0764
LJ
3756{
3757 struct apply_wqattrs_ctx *ctx;
4c16bd32 3758
2d5f0764
LJ
3759 /* only unbound workqueues can change attributes */
3760 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3761 return -EINVAL;
13e2e556 3762
2d5f0764 3763 /* creating multiple pwqs breaks ordering guarantee */
0a94efb5
TH
3764 if (!list_empty(&wq->pwqs)) {
3765 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
3766 return -EINVAL;
3767
3768 wq->flags &= ~__WQ_ORDERED;
3769 }
2d5f0764 3770
2d5f0764 3771 ctx = apply_wqattrs_prepare(wq, attrs);
6201171e 3772 if (!ctx)
3773 return -ENOMEM;
2d5f0764
LJ
3774
3775 /* the ctx has been prepared successfully, let's commit it */
6201171e 3776 apply_wqattrs_commit(ctx);
2d5f0764
LJ
3777 apply_wqattrs_cleanup(ctx);
3778
6201171e 3779 return 0;
9e8cd2f5
TH
3780}
3781
a0111cf6
LJ
3782/**
3783 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
3784 * @wq: the target workqueue
3785 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
3786 *
3787 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
3788 * machines, this function maps a separate pwq to each NUMA node with
3789 * possibles CPUs in @attrs->cpumask so that work items are affine to the
3790 * NUMA node it was issued on. Older pwqs are released as in-flight work
3791 * items finish. Note that a work item which repeatedly requeues itself
3792 * back-to-back will stay on its current pwq.
3793 *
3794 * Performs GFP_KERNEL allocations.
3795 *
3796 * Return: 0 on success and -errno on failure.
3797 */
3798int apply_workqueue_attrs(struct workqueue_struct *wq,
3799 const struct workqueue_attrs *attrs)
3800{
3801 int ret;
3802
3803 apply_wqattrs_lock();
3804 ret = apply_workqueue_attrs_locked(wq, attrs);
3805 apply_wqattrs_unlock();
3806
3807 return ret;
3808}
3809
4c16bd32
TH
3810/**
3811 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
3812 * @wq: the target workqueue
3813 * @cpu: the CPU coming up or going down
3814 * @online: whether @cpu is coming up or going down
3815 *
3816 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
3817 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
3818 * @wq accordingly.
3819 *
3820 * If NUMA affinity can't be adjusted due to memory allocation failure, it
3821 * falls back to @wq->dfl_pwq which may not be optimal but is always
3822 * correct.
3823 *
3824 * Note that when the last allowed CPU of a NUMA node goes offline for a
3825 * workqueue with a cpumask spanning multiple nodes, the workers which were
3826 * already executing the work items for the workqueue will lose their CPU
3827 * affinity and may execute on any CPU. This is similar to how per-cpu
3828 * workqueues behave on CPU_DOWN. If a workqueue user wants strict
3829 * affinity, it's the user's responsibility to flush the work item from
3830 * CPU_DOWN_PREPARE.
3831 */
3832static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
3833 bool online)
3834{
3835 int node = cpu_to_node(cpu);
3836 int cpu_off = online ? -1 : cpu;
3837 struct pool_workqueue *old_pwq = NULL, *pwq;
3838 struct workqueue_attrs *target_attrs;
3839 cpumask_t *cpumask;
3840
3841 lockdep_assert_held(&wq_pool_mutex);
3842
f7142ed4
LJ
3843 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
3844 wq->unbound_attrs->no_numa)
4c16bd32
TH
3845 return;
3846
3847 /*
3848 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
3849 * Let's use a preallocated one. The following buf is protected by
3850 * CPU hotplug exclusion.
3851 */
3852 target_attrs = wq_update_unbound_numa_attrs_buf;
3853 cpumask = target_attrs->cpumask;
3854
4c16bd32
TH
3855 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
3856 pwq = unbound_pwq_by_node(wq, node);
3857
3858 /*
3859 * Let's determine what needs to be done. If the target cpumask is
042f7df1
LJ
3860 * different from the default pwq's, we need to compare it to @pwq's
3861 * and create a new one if they don't match. If the target cpumask
3862 * equals the default pwq's, the default pwq should be used.
4c16bd32 3863 */
042f7df1 3864 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4c16bd32 3865 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
f7142ed4 3866 return;
4c16bd32 3867 } else {
534a3fbb 3868 goto use_dfl_pwq;
4c16bd32
TH
3869 }
3870
4c16bd32
TH
3871 /* create a new pwq */
3872 pwq = alloc_unbound_pwq(wq, target_attrs);
3873 if (!pwq) {
2d916033
FF
3874 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
3875 wq->name);
77f300b1 3876 goto use_dfl_pwq;
4c16bd32
TH
3877 }
3878
f7142ed4 3879 /* Install the new pwq. */
4c16bd32
TH
3880 mutex_lock(&wq->mutex);
3881 old_pwq = numa_pwq_tbl_install(wq, node, pwq);
3882 goto out_unlock;
3883
3884use_dfl_pwq:
f7142ed4 3885 mutex_lock(&wq->mutex);
4c16bd32
TH
3886 spin_lock_irq(&wq->dfl_pwq->pool->lock);
3887 get_pwq(wq->dfl_pwq);
3888 spin_unlock_irq(&wq->dfl_pwq->pool->lock);
3889 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
3890out_unlock:
3891 mutex_unlock(&wq->mutex);
3892 put_pwq_unlocked(old_pwq);
3893}
3894
30cdf249 3895static int alloc_and_link_pwqs(struct workqueue_struct *wq)
0f900049 3896{
49e3cf44 3897 bool highpri = wq->flags & WQ_HIGHPRI;
8a2b7538 3898 int cpu, ret;
30cdf249
TH
3899
3900 if (!(wq->flags & WQ_UNBOUND)) {
420c0ddb
TH
3901 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
3902 if (!wq->cpu_pwqs)
30cdf249
TH
3903 return -ENOMEM;
3904
3905 for_each_possible_cpu(cpu) {
7fb98ea7
TH
3906 struct pool_workqueue *pwq =
3907 per_cpu_ptr(wq->cpu_pwqs, cpu);
7a62c2c8 3908 struct worker_pool *cpu_pools =
f02ae73a 3909 per_cpu(cpu_worker_pools, cpu);
f3421797 3910
f147f29e
TH
3911 init_pwq(pwq, wq, &cpu_pools[highpri]);
3912
3913 mutex_lock(&wq->mutex);
1befcf30 3914 link_pwq(pwq);
f147f29e 3915 mutex_unlock(&wq->mutex);
30cdf249 3916 }
9e8cd2f5 3917 return 0;
8a2b7538
TH
3918 } else if (wq->flags & __WQ_ORDERED) {
3919 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
3920 /* there should only be single pwq for ordering guarantee */
3921 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
3922 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
3923 "ordering guarantee broken for workqueue %s\n", wq->name);
3924 return ret;
30cdf249 3925 } else {
9e8cd2f5 3926 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
30cdf249 3927 }
0f900049
TH
3928}
3929
f3421797
TH
3930static int wq_clamp_max_active(int max_active, unsigned int flags,
3931 const char *name)
b71ab8c2 3932{
f3421797
TH
3933 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
3934
3935 if (max_active < 1 || max_active > lim)
044c782c
VI
3936 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
3937 max_active, name, 1, lim);
b71ab8c2 3938
f3421797 3939 return clamp_val(max_active, 1, lim);
b71ab8c2
TH
3940}
3941
b196be89 3942struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
d320c038
TH
3943 unsigned int flags,
3944 int max_active,
3945 struct lock_class_key *key,
b196be89 3946 const char *lock_name, ...)
1da177e4 3947{
df2d5ae4 3948 size_t tbl_size = 0;
ecf6881f 3949 va_list args;
1da177e4 3950 struct workqueue_struct *wq;
49e3cf44 3951 struct pool_workqueue *pwq;
b196be89 3952
5c0338c6
TH
3953 /*
3954 * Unbound && max_active == 1 used to imply ordered, which is no
3955 * longer the case on NUMA machines due to per-node pools. While
3956 * alloc_ordered_workqueue() is the right way to create an ordered
3957 * workqueue, keep the previous behavior to avoid subtle breakages
3958 * on NUMA.
3959 */
3960 if ((flags & WQ_UNBOUND) && max_active == 1)
3961 flags |= __WQ_ORDERED;
3962
cee22a15
VK
3963 /* see the comment above the definition of WQ_POWER_EFFICIENT */
3964 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
3965 flags |= WQ_UNBOUND;
3966
ecf6881f 3967 /* allocate wq and format name */
df2d5ae4 3968 if (flags & WQ_UNBOUND)
ddcb57e2 3969 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
df2d5ae4
TH
3970
3971 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
b196be89 3972 if (!wq)
d2c1d404 3973 return NULL;
b196be89 3974
6029a918
TH
3975 if (flags & WQ_UNBOUND) {
3976 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3977 if (!wq->unbound_attrs)
3978 goto err_free_wq;
3979 }
3980
ecf6881f
TH
3981 va_start(args, lock_name);
3982 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
b196be89 3983 va_end(args);
1da177e4 3984
d320c038 3985 max_active = max_active ?: WQ_DFL_ACTIVE;
b196be89 3986 max_active = wq_clamp_max_active(max_active, flags, wq->name);
3af24433 3987
b196be89 3988 /* init wq */
97e37d7b 3989 wq->flags = flags;
a0a1a5fd 3990 wq->saved_max_active = max_active;
3c25a55d 3991 mutex_init(&wq->mutex);
112202d9 3992 atomic_set(&wq->nr_pwqs_to_flush, 0);
30cdf249 3993 INIT_LIST_HEAD(&wq->pwqs);
73f53c4a
TH
3994 INIT_LIST_HEAD(&wq->flusher_queue);
3995 INIT_LIST_HEAD(&wq->flusher_overflow);
493a1724 3996 INIT_LIST_HEAD(&wq->maydays);
502ca9d8 3997
eb13ba87 3998 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
cce1a165 3999 INIT_LIST_HEAD(&wq->list);
3af24433 4000
30cdf249 4001 if (alloc_and_link_pwqs(wq) < 0)
d2c1d404 4002 goto err_free_wq;
1537663f 4003
493008a8
TH
4004 /*
4005 * Workqueues which may be used during memory reclaim should
4006 * have a rescuer to guarantee forward progress.
4007 */
4008 if (flags & WQ_MEM_RECLAIM) {
e22bee78
TH
4009 struct worker *rescuer;
4010
f7537df5 4011 rescuer = alloc_worker(NUMA_NO_NODE);
e22bee78 4012 if (!rescuer)
d2c1d404 4013 goto err_destroy;
e22bee78 4014
111c225a
TH
4015 rescuer->rescue_wq = wq;
4016 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
b196be89 4017 wq->name);
d2c1d404
TH
4018 if (IS_ERR(rescuer->task)) {
4019 kfree(rescuer);
4020 goto err_destroy;
4021 }
e22bee78 4022
d2c1d404 4023 wq->rescuer = rescuer;
25834c73 4024 kthread_bind_mask(rescuer->task, cpu_possible_mask);
e22bee78 4025 wake_up_process(rescuer->task);
3af24433
ON
4026 }
4027
226223ab
TH
4028 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4029 goto err_destroy;
4030
a0a1a5fd 4031 /*
68e13a67
LJ
4032 * wq_pool_mutex protects global freeze state and workqueues list.
4033 * Grab it, adjust max_active and add the new @wq to workqueues
4034 * list.
a0a1a5fd 4035 */
68e13a67 4036 mutex_lock(&wq_pool_mutex);
a0a1a5fd 4037
a357fc03 4038 mutex_lock(&wq->mutex);
699ce097
TH
4039 for_each_pwq(pwq, wq)
4040 pwq_adjust_max_active(pwq);
a357fc03 4041 mutex_unlock(&wq->mutex);
a0a1a5fd 4042
e2dca7ad 4043 list_add_tail_rcu(&wq->list, &workqueues);
a0a1a5fd 4044
68e13a67 4045 mutex_unlock(&wq_pool_mutex);
1537663f 4046
3af24433 4047 return wq;
d2c1d404
TH
4048
4049err_free_wq:
6029a918 4050 free_workqueue_attrs(wq->unbound_attrs);
d2c1d404
TH
4051 kfree(wq);
4052 return NULL;
4053err_destroy:
4054 destroy_workqueue(wq);
4690c4ab 4055 return NULL;
3af24433 4056}
d320c038 4057EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
1da177e4 4058
3af24433
ON
4059/**
4060 * destroy_workqueue - safely terminate a workqueue
4061 * @wq: target workqueue
4062 *
4063 * Safely destroy a workqueue. All work currently pending will be done first.
4064 */
4065void destroy_workqueue(struct workqueue_struct *wq)
4066{
49e3cf44 4067 struct pool_workqueue *pwq;
4c16bd32 4068 int node;
3af24433 4069
9c5a2ba7
TH
4070 /* drain it before proceeding with destruction */
4071 drain_workqueue(wq);
c8efcc25 4072
6183c009 4073 /* sanity checks */
b09f4fd3 4074 mutex_lock(&wq->mutex);
49e3cf44 4075 for_each_pwq(pwq, wq) {
6183c009
TH
4076 int i;
4077
76af4d93
TH
4078 for (i = 0; i < WORK_NR_COLORS; i++) {
4079 if (WARN_ON(pwq->nr_in_flight[i])) {
b09f4fd3 4080 mutex_unlock(&wq->mutex);
fa07fb6a 4081 show_workqueue_state();
6183c009 4082 return;
76af4d93
TH
4083 }
4084 }
4085
5c529597 4086 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
8864b4e5 4087 WARN_ON(pwq->nr_active) ||
76af4d93 4088 WARN_ON(!list_empty(&pwq->delayed_works))) {
b09f4fd3 4089 mutex_unlock(&wq->mutex);
fa07fb6a 4090 show_workqueue_state();
6183c009 4091 return;
76af4d93 4092 }
6183c009 4093 }
b09f4fd3 4094 mutex_unlock(&wq->mutex);
6183c009 4095
a0a1a5fd
TH
4096 /*
4097 * wq list is used to freeze wq, remove from list after
4098 * flushing is complete in case freeze races us.
4099 */
68e13a67 4100 mutex_lock(&wq_pool_mutex);
e2dca7ad 4101 list_del_rcu(&wq->list);
68e13a67 4102 mutex_unlock(&wq_pool_mutex);
3af24433 4103
226223ab
TH
4104 workqueue_sysfs_unregister(wq);
4105
e2dca7ad 4106 if (wq->rescuer)
e22bee78 4107 kthread_stop(wq->rescuer->task);
e22bee78 4108
8864b4e5
TH
4109 if (!(wq->flags & WQ_UNBOUND)) {
4110 /*
4111 * The base ref is never dropped on per-cpu pwqs. Directly
e2dca7ad 4112 * schedule RCU free.
8864b4e5 4113 */
e2dca7ad 4114 call_rcu_sched(&wq->rcu, rcu_free_wq);
8864b4e5
TH
4115 } else {
4116 /*
4117 * We're the sole accessor of @wq at this point. Directly
4c16bd32
TH
4118 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4119 * @wq will be freed when the last pwq is released.
8864b4e5 4120 */
4c16bd32
TH
4121 for_each_node(node) {
4122 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4123 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4124 put_pwq_unlocked(pwq);
4125 }
4126
4127 /*
4128 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
4129 * put. Don't access it afterwards.
4130 */
4131 pwq = wq->dfl_pwq;
4132 wq->dfl_pwq = NULL;
dce90d47 4133 put_pwq_unlocked(pwq);
29c91e99 4134 }
3af24433
ON
4135}
4136EXPORT_SYMBOL_GPL(destroy_workqueue);
4137
dcd989cb
TH
4138/**
4139 * workqueue_set_max_active - adjust max_active of a workqueue
4140 * @wq: target workqueue
4141 * @max_active: new max_active value.
4142 *
4143 * Set max_active of @wq to @max_active.
4144 *
4145 * CONTEXT:
4146 * Don't call from IRQ context.
4147 */
4148void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4149{
49e3cf44 4150 struct pool_workqueue *pwq;
dcd989cb 4151
8719dcea 4152 /* disallow meddling with max_active for ordered workqueues */
0a94efb5 4153 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
8719dcea
TH
4154 return;
4155
f3421797 4156 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
dcd989cb 4157
a357fc03 4158 mutex_lock(&wq->mutex);
dcd989cb 4159
0a94efb5 4160 wq->flags &= ~__WQ_ORDERED;
dcd989cb
TH
4161 wq->saved_max_active = max_active;
4162
699ce097
TH
4163 for_each_pwq(pwq, wq)
4164 pwq_adjust_max_active(pwq);
93981800 4165
a357fc03 4166 mutex_unlock(&wq->mutex);
15316ba8 4167}
dcd989cb 4168EXPORT_SYMBOL_GPL(workqueue_set_max_active);
15316ba8 4169
e6267616
TH
4170/**
4171 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4172 *
4173 * Determine whether %current is a workqueue rescuer. Can be used from
4174 * work functions to determine whether it's being run off the rescuer task.
d185af30
YB
4175 *
4176 * Return: %true if %current is a workqueue rescuer. %false otherwise.
e6267616
TH
4177 */
4178bool current_is_workqueue_rescuer(void)
4179{
4180 struct worker *worker = current_wq_worker();
4181
6a092dfd 4182 return worker && worker->rescue_wq;
e6267616
TH
4183}
4184
eef6a7d5 4185/**
dcd989cb
TH
4186 * workqueue_congested - test whether a workqueue is congested
4187 * @cpu: CPU in question
4188 * @wq: target workqueue
eef6a7d5 4189 *
dcd989cb
TH
4190 * Test whether @wq's cpu workqueue for @cpu is congested. There is
4191 * no synchronization around this function and the test result is
4192 * unreliable and only useful as advisory hints or for debugging.
eef6a7d5 4193 *
d3251859
TH
4194 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4195 * Note that both per-cpu and unbound workqueues may be associated with
4196 * multiple pool_workqueues which have separate congested states. A
4197 * workqueue being congested on one CPU doesn't mean the workqueue is also
4198 * contested on other CPUs / NUMA nodes.
4199 *
d185af30 4200 * Return:
dcd989cb 4201 * %true if congested, %false otherwise.
eef6a7d5 4202 */
d84ff051 4203bool workqueue_congested(int cpu, struct workqueue_struct *wq)
1da177e4 4204{
7fb98ea7 4205 struct pool_workqueue *pwq;
76af4d93
TH
4206 bool ret;
4207
88109453 4208 rcu_read_lock_sched();
7fb98ea7 4209
d3251859
TH
4210 if (cpu == WORK_CPU_UNBOUND)
4211 cpu = smp_processor_id();
4212
7fb98ea7
TH
4213 if (!(wq->flags & WQ_UNBOUND))
4214 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4215 else
df2d5ae4 4216 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
dcd989cb 4217
76af4d93 4218 ret = !list_empty(&pwq->delayed_works);
88109453 4219 rcu_read_unlock_sched();
76af4d93
TH
4220
4221 return ret;
1da177e4 4222}
dcd989cb 4223EXPORT_SYMBOL_GPL(workqueue_congested);
1da177e4 4224
dcd989cb
TH
4225/**
4226 * work_busy - test whether a work is currently pending or running
4227 * @work: the work to be tested
4228 *
4229 * Test whether @work is currently pending or running. There is no
4230 * synchronization around this function and the test result is
4231 * unreliable and only useful as advisory hints or for debugging.
dcd989cb 4232 *
d185af30 4233 * Return:
dcd989cb
TH
4234 * OR'd bitmask of WORK_BUSY_* bits.
4235 */
4236unsigned int work_busy(struct work_struct *work)
1da177e4 4237{
fa1b54e6 4238 struct worker_pool *pool;
dcd989cb
TH
4239 unsigned long flags;
4240 unsigned int ret = 0;
1da177e4 4241
dcd989cb
TH
4242 if (work_pending(work))
4243 ret |= WORK_BUSY_PENDING;
1da177e4 4244
fa1b54e6
TH
4245 local_irq_save(flags);
4246 pool = get_work_pool(work);
038366c5 4247 if (pool) {
fa1b54e6 4248 spin_lock(&pool->lock);
038366c5
LJ
4249 if (find_worker_executing_work(pool, work))
4250 ret |= WORK_BUSY_RUNNING;
fa1b54e6 4251 spin_unlock(&pool->lock);
038366c5 4252 }
fa1b54e6 4253 local_irq_restore(flags);
1da177e4 4254
dcd989cb 4255 return ret;
1da177e4 4256}
dcd989cb 4257EXPORT_SYMBOL_GPL(work_busy);
1da177e4 4258
3d1cb205
TH
4259/**
4260 * set_worker_desc - set description for the current work item
4261 * @fmt: printf-style format string
4262 * @...: arguments for the format string
4263 *
4264 * This function can be called by a running work function to describe what
4265 * the work item is about. If the worker task gets dumped, this
4266 * information will be printed out together to help debugging. The
4267 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4268 */
4269void set_worker_desc(const char *fmt, ...)
4270{
4271 struct worker *worker = current_wq_worker();
4272 va_list args;
4273
4274 if (worker) {
4275 va_start(args, fmt);
4276 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4277 va_end(args);
4278 worker->desc_valid = true;
4279 }
4280}
4281
4282/**
4283 * print_worker_info - print out worker information and description
4284 * @log_lvl: the log level to use when printing
4285 * @task: target task
4286 *
4287 * If @task is a worker and currently executing a work item, print out the
4288 * name of the workqueue being serviced and worker description set with
4289 * set_worker_desc() by the currently executing work item.
4290 *
4291 * This function can be safely called on any task as long as the
4292 * task_struct itself is accessible. While safe, this function isn't
4293 * synchronized and may print out mixups or garbages of limited length.
4294 */
4295void print_worker_info(const char *log_lvl, struct task_struct *task)
4296{
4297 work_func_t *fn = NULL;
4298 char name[WQ_NAME_LEN] = { };
4299 char desc[WORKER_DESC_LEN] = { };
4300 struct pool_workqueue *pwq = NULL;
4301 struct workqueue_struct *wq = NULL;
4302 bool desc_valid = false;
4303 struct worker *worker;
4304
4305 if (!(task->flags & PF_WQ_WORKER))
4306 return;
4307
4308 /*
4309 * This function is called without any synchronization and @task
4310 * could be in any state. Be careful with dereferences.
4311 */
e700591a 4312 worker = kthread_probe_data(task);
3d1cb205
TH
4313
4314 /*
4315 * Carefully copy the associated workqueue's workfn and name. Keep
4316 * the original last '\0' in case the original contains garbage.
4317 */
4318 probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4319 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4320 probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4321 probe_kernel_read(name, wq->name, sizeof(name) - 1);
4322
4323 /* copy worker description */
4324 probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
4325 if (desc_valid)
4326 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4327
4328 if (fn || name[0] || desc[0]) {
4329 printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
4330 if (desc[0])
4331 pr_cont(" (%s)", desc);
4332 pr_cont("\n");
4333 }
4334}
4335
3494fc30
TH
4336static void pr_cont_pool_info(struct worker_pool *pool)
4337{
4338 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4339 if (pool->node != NUMA_NO_NODE)
4340 pr_cont(" node=%d", pool->node);
4341 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4342}
4343
4344static void pr_cont_work(bool comma, struct work_struct *work)
4345{
4346 if (work->func == wq_barrier_func) {
4347 struct wq_barrier *barr;
4348
4349 barr = container_of(work, struct wq_barrier, work);
4350
4351 pr_cont("%s BAR(%d)", comma ? "," : "",
4352 task_pid_nr(barr->task));
4353 } else {
4354 pr_cont("%s %pf", comma ? "," : "", work->func);
4355 }
4356}
4357
4358static void show_pwq(struct pool_workqueue *pwq)
4359{
4360 struct worker_pool *pool = pwq->pool;
4361 struct work_struct *work;
4362 struct worker *worker;
4363 bool has_in_flight = false, has_pending = false;
4364 int bkt;
4365
4366 pr_info(" pwq %d:", pool->id);
4367 pr_cont_pool_info(pool);
4368
4369 pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
4370 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4371
4372 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4373 if (worker->current_pwq == pwq) {
4374 has_in_flight = true;
4375 break;
4376 }
4377 }
4378 if (has_in_flight) {
4379 bool comma = false;
4380
4381 pr_info(" in-flight:");
4382 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4383 if (worker->current_pwq != pwq)
4384 continue;
4385
4386 pr_cont("%s %d%s:%pf", comma ? "," : "",
4387 task_pid_nr(worker->task),
4388 worker == pwq->wq->rescuer ? "(RESCUER)" : "",
4389 worker->current_func);
4390 list_for_each_entry(work, &worker->scheduled, entry)
4391 pr_cont_work(false, work);
4392 comma = true;
4393 }
4394 pr_cont("\n");
4395 }
4396
4397 list_for_each_entry(work, &pool->worklist, entry) {
4398 if (get_work_pwq(work) == pwq) {
4399 has_pending = true;
4400 break;
4401 }
4402 }
4403 if (has_pending) {
4404 bool comma = false;
4405
4406 pr_info(" pending:");
4407 list_for_each_entry(work, &pool->worklist, entry) {
4408 if (get_work_pwq(work) != pwq)
4409 continue;
4410
4411 pr_cont_work(comma, work);
4412 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4413 }
4414 pr_cont("\n");
4415 }
4416
4417 if (!list_empty(&pwq->delayed_works)) {
4418 bool comma = false;
4419
4420 pr_info(" delayed:");
4421 list_for_each_entry(work, &pwq->delayed_works, entry) {
4422 pr_cont_work(comma, work);
4423 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4424 }
4425 pr_cont("\n");
4426 }
4427}
4428
4429/**
4430 * show_workqueue_state - dump workqueue state
4431 *
7b776af6
RL
4432 * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4433 * all busy workqueues and pools.
3494fc30
TH
4434 */
4435void show_workqueue_state(void)
4436{
4437 struct workqueue_struct *wq;
4438 struct worker_pool *pool;
4439 unsigned long flags;
4440 int pi;
4441
4442 rcu_read_lock_sched();
4443
4444 pr_info("Showing busy workqueues and worker pools:\n");
4445
4446 list_for_each_entry_rcu(wq, &workqueues, list) {
4447 struct pool_workqueue *pwq;
4448 bool idle = true;
4449
4450 for_each_pwq(pwq, wq) {
4451 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
4452 idle = false;
4453 break;
4454 }
4455 }
4456 if (idle)
4457 continue;
4458
4459 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4460
4461 for_each_pwq(pwq, wq) {
4462 spin_lock_irqsave(&pwq->pool->lock, flags);
4463 if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4464 show_pwq(pwq);
4465 spin_unlock_irqrestore(&pwq->pool->lock, flags);
4466 }
4467 }
4468
4469 for_each_pool(pool, pi) {
4470 struct worker *worker;
4471 bool first = true;
4472
4473 spin_lock_irqsave(&pool->lock, flags);
4474 if (pool->nr_workers == pool->nr_idle)
4475 goto next_pool;
4476
4477 pr_info("pool %d:", pool->id);
4478 pr_cont_pool_info(pool);
82607adc
TH
4479 pr_cont(" hung=%us workers=%d",
4480 jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4481 pool->nr_workers);
3494fc30
TH
4482 if (pool->manager)
4483 pr_cont(" manager: %d",
4484 task_pid_nr(pool->manager->task));
4485 list_for_each_entry(worker, &pool->idle_list, entry) {
4486 pr_cont(" %s%d", first ? "idle: " : "",
4487 task_pid_nr(worker->task));
4488 first = false;
4489 }
4490 pr_cont("\n");
4491 next_pool:
4492 spin_unlock_irqrestore(&pool->lock, flags);
4493 }
4494
4495 rcu_read_unlock_sched();
4496}
4497
db7bccf4
TH
4498/*
4499 * CPU hotplug.
4500 *
e22bee78 4501 * There are two challenges in supporting CPU hotplug. Firstly, there
112202d9 4502 * are a lot of assumptions on strong associations among work, pwq and
706026c2 4503 * pool which make migrating pending and scheduled works very
e22bee78 4504 * difficult to implement without impacting hot paths. Secondly,
94cf58bb 4505 * worker pools serve mix of short, long and very long running works making
e22bee78
TH
4506 * blocked draining impractical.
4507 *
24647570 4508 * This is solved by allowing the pools to be disassociated from the CPU
628c78e7
TH
4509 * running as an unbound one and allowing it to be reattached later if the
4510 * cpu comes back online.
db7bccf4 4511 */
1da177e4 4512
706026c2 4513static void wq_unbind_fn(struct work_struct *work)
3af24433 4514{
38db41d9 4515 int cpu = smp_processor_id();
4ce62e9e 4516 struct worker_pool *pool;
db7bccf4 4517 struct worker *worker;
3af24433 4518
f02ae73a 4519 for_each_cpu_worker_pool(pool, cpu) {
92f9c5c4 4520 mutex_lock(&pool->attach_mutex);
94cf58bb 4521 spin_lock_irq(&pool->lock);
3af24433 4522
94cf58bb 4523 /*
92f9c5c4 4524 * We've blocked all attach/detach operations. Make all workers
94cf58bb
TH
4525 * unbound and set DISASSOCIATED. Before this, all workers
4526 * except for the ones which are still executing works from
4527 * before the last CPU down must be on the cpu. After
4528 * this, they may become diasporas.
4529 */
da028469 4530 for_each_pool_worker(worker, pool)
c9e7cf27 4531 worker->flags |= WORKER_UNBOUND;
06ba38a9 4532
24647570 4533 pool->flags |= POOL_DISASSOCIATED;
f2d5a0ee 4534
94cf58bb 4535 spin_unlock_irq(&pool->lock);
92f9c5c4 4536 mutex_unlock(&pool->attach_mutex);
628c78e7 4537
eb283428
LJ
4538 /*
4539 * Call schedule() so that we cross rq->lock and thus can
4540 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4541 * This is necessary as scheduler callbacks may be invoked
4542 * from other cpus.
4543 */
4544 schedule();
06ba38a9 4545
eb283428
LJ
4546 /*
4547 * Sched callbacks are disabled now. Zap nr_running.
4548 * After this, nr_running stays zero and need_more_worker()
4549 * and keep_working() are always true as long as the
4550 * worklist is not empty. This pool now behaves as an
4551 * unbound (in terms of concurrency management) pool which
4552 * are served by workers tied to the pool.
4553 */
e19e397a 4554 atomic_set(&pool->nr_running, 0);
eb283428
LJ
4555
4556 /*
4557 * With concurrency management just turned off, a busy
4558 * worker blocking could lead to lengthy stalls. Kick off
4559 * unbound chain execution of currently pending work items.
4560 */
4561 spin_lock_irq(&pool->lock);
4562 wake_up_worker(pool);
4563 spin_unlock_irq(&pool->lock);
4564 }
3af24433 4565}
3af24433 4566
bd7c089e
TH
4567/**
4568 * rebind_workers - rebind all workers of a pool to the associated CPU
4569 * @pool: pool of interest
4570 *
a9ab775b 4571 * @pool->cpu is coming online. Rebind all workers to the CPU.
bd7c089e
TH
4572 */
4573static void rebind_workers(struct worker_pool *pool)
4574{
a9ab775b 4575 struct worker *worker;
bd7c089e 4576
92f9c5c4 4577 lockdep_assert_held(&pool->attach_mutex);
bd7c089e 4578
a9ab775b
TH
4579 /*
4580 * Restore CPU affinity of all workers. As all idle workers should
4581 * be on the run-queue of the associated CPU before any local
402dd89d 4582 * wake-ups for concurrency management happen, restore CPU affinity
a9ab775b
TH
4583 * of all workers first and then clear UNBOUND. As we're called
4584 * from CPU_ONLINE, the following shouldn't fail.
4585 */
da028469 4586 for_each_pool_worker(worker, pool)
a9ab775b
TH
4587 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4588 pool->attrs->cpumask) < 0);
bd7c089e 4589
a9ab775b 4590 spin_lock_irq(&pool->lock);
f7c17d26
WL
4591
4592 /*
4593 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
4594 * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
4595 * being reworked and this can go away in time.
4596 */
4597 if (!(pool->flags & POOL_DISASSOCIATED)) {
4598 spin_unlock_irq(&pool->lock);
4599 return;
4600 }
4601
3de5e884 4602 pool->flags &= ~POOL_DISASSOCIATED;
bd7c089e 4603
da028469 4604 for_each_pool_worker(worker, pool) {
a9ab775b 4605 unsigned int worker_flags = worker->flags;
bd7c089e
TH
4606
4607 /*
a9ab775b
TH
4608 * A bound idle worker should actually be on the runqueue
4609 * of the associated CPU for local wake-ups targeting it to
4610 * work. Kick all idle workers so that they migrate to the
4611 * associated CPU. Doing this in the same loop as
4612 * replacing UNBOUND with REBOUND is safe as no worker will
4613 * be bound before @pool->lock is released.
bd7c089e 4614 */
a9ab775b
TH
4615 if (worker_flags & WORKER_IDLE)
4616 wake_up_process(worker->task);
bd7c089e 4617
a9ab775b
TH
4618 /*
4619 * We want to clear UNBOUND but can't directly call
4620 * worker_clr_flags() or adjust nr_running. Atomically
4621 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4622 * @worker will clear REBOUND using worker_clr_flags() when
4623 * it initiates the next execution cycle thus restoring
4624 * concurrency management. Note that when or whether
4625 * @worker clears REBOUND doesn't affect correctness.
4626 *
c95491ed 4627 * WRITE_ONCE() is necessary because @worker->flags may be
a9ab775b
TH
4628 * tested without holding any lock in
4629 * wq_worker_waking_up(). Without it, NOT_RUNNING test may
4630 * fail incorrectly leading to premature concurrency
4631 * management operations.
4632 */
4633 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4634 worker_flags |= WORKER_REBOUND;
4635 worker_flags &= ~WORKER_UNBOUND;
c95491ed 4636 WRITE_ONCE(worker->flags, worker_flags);
bd7c089e 4637 }
a9ab775b
TH
4638
4639 spin_unlock_irq(&pool->lock);
bd7c089e
TH
4640}
4641
7dbc725e
TH
4642/**
4643 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4644 * @pool: unbound pool of interest
4645 * @cpu: the CPU which is coming up
4646 *
4647 * An unbound pool may end up with a cpumask which doesn't have any online
4648 * CPUs. When a worker of such pool get scheduled, the scheduler resets
4649 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
4650 * online CPU before, cpus_allowed of all its workers should be restored.
4651 */
4652static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4653{
4654 static cpumask_t cpumask;
4655 struct worker *worker;
7dbc725e 4656
92f9c5c4 4657 lockdep_assert_held(&pool->attach_mutex);
7dbc725e
TH
4658
4659 /* is @cpu allowed for @pool? */
4660 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4661 return;
4662
7dbc725e 4663 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
7dbc725e
TH
4664
4665 /* as we're called from CPU_ONLINE, the following shouldn't fail */
da028469 4666 for_each_pool_worker(worker, pool)
d945b5e9 4667 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
7dbc725e
TH
4668}
4669
7ee681b2
TG
4670int workqueue_prepare_cpu(unsigned int cpu)
4671{
4672 struct worker_pool *pool;
4673
4674 for_each_cpu_worker_pool(pool, cpu) {
4675 if (pool->nr_workers)
4676 continue;
4677 if (!create_worker(pool))
4678 return -ENOMEM;
4679 }
4680 return 0;
4681}
4682
4683int workqueue_online_cpu(unsigned int cpu)
3af24433 4684{
4ce62e9e 4685 struct worker_pool *pool;
4c16bd32 4686 struct workqueue_struct *wq;
7dbc725e 4687 int pi;
3ce63377 4688
7ee681b2 4689 mutex_lock(&wq_pool_mutex);
7dbc725e 4690
7ee681b2
TG
4691 for_each_pool(pool, pi) {
4692 mutex_lock(&pool->attach_mutex);
94cf58bb 4693
7ee681b2
TG
4694 if (pool->cpu == cpu)
4695 rebind_workers(pool);
4696 else if (pool->cpu < 0)
4697 restore_unbound_workers_cpumask(pool, cpu);
94cf58bb 4698
7ee681b2
TG
4699 mutex_unlock(&pool->attach_mutex);
4700 }
6ba94429 4701
7ee681b2
TG
4702 /* update NUMA affinity of unbound workqueues */
4703 list_for_each_entry(wq, &workqueues, list)
4704 wq_update_unbound_numa(wq, cpu, true);
6ba94429 4705
7ee681b2
TG
4706 mutex_unlock(&wq_pool_mutex);
4707 return 0;
6ba94429
FW
4708}
4709
7ee681b2 4710int workqueue_offline_cpu(unsigned int cpu)
6ba94429 4711{
6ba94429
FW
4712 struct work_struct unbind_work;
4713 struct workqueue_struct *wq;
4714
7ee681b2
TG
4715 /* unbinding per-cpu workers should happen on the local CPU */
4716 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
4717 queue_work_on(cpu, system_highpri_wq, &unbind_work);
4718
4719 /* update NUMA affinity of unbound workqueues */
4720 mutex_lock(&wq_pool_mutex);
4721 list_for_each_entry(wq, &workqueues, list)
4722 wq_update_unbound_numa(wq, cpu, false);
4723 mutex_unlock(&wq_pool_mutex);
4724
4725 /* wait for per-cpu unbinding to finish */
4726 flush_work(&unbind_work);
4727 destroy_work_on_stack(&unbind_work);
4728 return 0;
6ba94429
FW
4729}
4730
4731#ifdef CONFIG_SMP
4732
4733struct work_for_cpu {
4734 struct work_struct work;
4735 long (*fn)(void *);
4736 void *arg;
4737 long ret;
4738};
4739
4740static void work_for_cpu_fn(struct work_struct *work)
4741{
4742 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
4743
4744 wfc->ret = wfc->fn(wfc->arg);
4745}
4746
4747/**
22aceb31 4748 * work_on_cpu - run a function in thread context on a particular cpu
6ba94429
FW
4749 * @cpu: the cpu to run on
4750 * @fn: the function to run
4751 * @arg: the function arg
4752 *
4753 * It is up to the caller to ensure that the cpu doesn't go offline.
4754 * The caller must not hold any locks which would prevent @fn from completing.
4755 *
4756 * Return: The value @fn returns.
4757 */
4758long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4759{
4760 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
4761
4762 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4763 schedule_work_on(cpu, &wfc.work);
4764 flush_work(&wfc.work);
4765 destroy_work_on_stack(&wfc.work);
4766 return wfc.ret;
4767}
4768EXPORT_SYMBOL_GPL(work_on_cpu);
0e8d6a93
TG
4769
4770/**
4771 * work_on_cpu_safe - run a function in thread context on a particular cpu
4772 * @cpu: the cpu to run on
4773 * @fn: the function to run
4774 * @arg: the function argument
4775 *
4776 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
4777 * any locks which would prevent @fn from completing.
4778 *
4779 * Return: The value @fn returns.
4780 */
4781long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
4782{
4783 long ret = -ENODEV;
4784
4785 get_online_cpus();
4786 if (cpu_online(cpu))
4787 ret = work_on_cpu(cpu, fn, arg);
4788 put_online_cpus();
4789 return ret;
4790}
4791EXPORT_SYMBOL_GPL(work_on_cpu_safe);
6ba94429
FW
4792#endif /* CONFIG_SMP */
4793
4794#ifdef CONFIG_FREEZER
4795
4796/**
4797 * freeze_workqueues_begin - begin freezing workqueues
4798 *
4799 * Start freezing workqueues. After this function returns, all freezable
4800 * workqueues will queue new works to their delayed_works list instead of
4801 * pool->worklist.
4802 *
4803 * CONTEXT:
4804 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4805 */
4806void freeze_workqueues_begin(void)
4807{
4808 struct workqueue_struct *wq;
4809 struct pool_workqueue *pwq;
4810
4811 mutex_lock(&wq_pool_mutex);
4812
4813 WARN_ON_ONCE(workqueue_freezing);
4814 workqueue_freezing = true;
4815
4816 list_for_each_entry(wq, &workqueues, list) {
4817 mutex_lock(&wq->mutex);
4818 for_each_pwq(pwq, wq)
4819 pwq_adjust_max_active(pwq);
4820 mutex_unlock(&wq->mutex);
4821 }
4822
4823 mutex_unlock(&wq_pool_mutex);
4824}
4825
4826/**
4827 * freeze_workqueues_busy - are freezable workqueues still busy?
4828 *
4829 * Check whether freezing is complete. This function must be called
4830 * between freeze_workqueues_begin() and thaw_workqueues().
4831 *
4832 * CONTEXT:
4833 * Grabs and releases wq_pool_mutex.
4834 *
4835 * Return:
4836 * %true if some freezable workqueues are still busy. %false if freezing
4837 * is complete.
4838 */
4839bool freeze_workqueues_busy(void)
4840{
4841 bool busy = false;
4842 struct workqueue_struct *wq;
4843 struct pool_workqueue *pwq;
4844
4845 mutex_lock(&wq_pool_mutex);
4846
4847 WARN_ON_ONCE(!workqueue_freezing);
4848
4849 list_for_each_entry(wq, &workqueues, list) {
4850 if (!(wq->flags & WQ_FREEZABLE))
4851 continue;
4852 /*
4853 * nr_active is monotonically decreasing. It's safe
4854 * to peek without lock.
4855 */
4856 rcu_read_lock_sched();
4857 for_each_pwq(pwq, wq) {
4858 WARN_ON_ONCE(pwq->nr_active < 0);
4859 if (pwq->nr_active) {
4860 busy = true;
4861 rcu_read_unlock_sched();
4862 goto out_unlock;
4863 }
4864 }
4865 rcu_read_unlock_sched();
4866 }
4867out_unlock:
4868 mutex_unlock(&wq_pool_mutex);
4869 return busy;
4870}
4871
4872/**
4873 * thaw_workqueues - thaw workqueues
4874 *
4875 * Thaw workqueues. Normal queueing is restored and all collected
4876 * frozen works are transferred to their respective pool worklists.
4877 *
4878 * CONTEXT:
4879 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4880 */
4881void thaw_workqueues(void)
4882{
4883 struct workqueue_struct *wq;
4884 struct pool_workqueue *pwq;
4885
4886 mutex_lock(&wq_pool_mutex);
4887
4888 if (!workqueue_freezing)
4889 goto out_unlock;
4890
4891 workqueue_freezing = false;
4892
4893 /* restore max_active and repopulate worklist */
4894 list_for_each_entry(wq, &workqueues, list) {
4895 mutex_lock(&wq->mutex);
4896 for_each_pwq(pwq, wq)
4897 pwq_adjust_max_active(pwq);
4898 mutex_unlock(&wq->mutex);
4899 }
4900
4901out_unlock:
4902 mutex_unlock(&wq_pool_mutex);
4903}
4904#endif /* CONFIG_FREEZER */
4905
042f7df1
LJ
4906static int workqueue_apply_unbound_cpumask(void)
4907{
4908 LIST_HEAD(ctxs);
4909 int ret = 0;
4910 struct workqueue_struct *wq;
4911 struct apply_wqattrs_ctx *ctx, *n;
4912
4913 lockdep_assert_held(&wq_pool_mutex);
4914
4915 list_for_each_entry(wq, &workqueues, list) {
4916 if (!(wq->flags & WQ_UNBOUND))
4917 continue;
4918 /* creating multiple pwqs breaks ordering guarantee */
4919 if (wq->flags & __WQ_ORDERED)
4920 continue;
4921
4922 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
4923 if (!ctx) {
4924 ret = -ENOMEM;
4925 break;
4926 }
4927
4928 list_add_tail(&ctx->list, &ctxs);
4929 }
4930
4931 list_for_each_entry_safe(ctx, n, &ctxs, list) {
4932 if (!ret)
4933 apply_wqattrs_commit(ctx);
4934 apply_wqattrs_cleanup(ctx);
4935 }
4936
4937 return ret;
4938}
4939
4940/**
4941 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
4942 * @cpumask: the cpumask to set
4943 *
4944 * The low-level workqueues cpumask is a global cpumask that limits
4945 * the affinity of all unbound workqueues. This function check the @cpumask
4946 * and apply it to all unbound workqueues and updates all pwqs of them.
4947 *
4948 * Retun: 0 - Success
4949 * -EINVAL - Invalid @cpumask
4950 * -ENOMEM - Failed to allocate memory for attrs or pwqs.
4951 */
4952int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
4953{
4954 int ret = -EINVAL;
4955 cpumask_var_t saved_cpumask;
4956
4957 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
4958 return -ENOMEM;
4959
042f7df1
LJ
4960 cpumask_and(cpumask, cpumask, cpu_possible_mask);
4961 if (!cpumask_empty(cpumask)) {
a0111cf6 4962 apply_wqattrs_lock();
042f7df1
LJ
4963
4964 /* save the old wq_unbound_cpumask. */
4965 cpumask_copy(saved_cpumask, wq_unbound_cpumask);
4966
4967 /* update wq_unbound_cpumask at first and apply it to wqs. */
4968 cpumask_copy(wq_unbound_cpumask, cpumask);
4969 ret = workqueue_apply_unbound_cpumask();
4970
4971 /* restore the wq_unbound_cpumask when failed. */
4972 if (ret < 0)
4973 cpumask_copy(wq_unbound_cpumask, saved_cpumask);
4974
a0111cf6 4975 apply_wqattrs_unlock();
042f7df1 4976 }
042f7df1
LJ
4977
4978 free_cpumask_var(saved_cpumask);
4979 return ret;
4980}
4981
6ba94429
FW
4982#ifdef CONFIG_SYSFS
4983/*
4984 * Workqueues with WQ_SYSFS flag set is visible to userland via
4985 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
4986 * following attributes.
4987 *
4988 * per_cpu RO bool : whether the workqueue is per-cpu or unbound
4989 * max_active RW int : maximum number of in-flight work items
4990 *
4991 * Unbound workqueues have the following extra attributes.
4992 *
4993 * id RO int : the associated pool ID
4994 * nice RW int : nice value of the workers
4995 * cpumask RW mask : bitmask of allowed CPUs for the workers
4996 */
4997struct wq_device {
4998 struct workqueue_struct *wq;
4999 struct device dev;
5000};
5001
5002static struct workqueue_struct *dev_to_wq(struct device *dev)
5003{
5004 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5005
5006 return wq_dev->wq;
5007}
5008
5009static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5010 char *buf)
5011{
5012 struct workqueue_struct *wq = dev_to_wq(dev);
5013
5014 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5015}
5016static DEVICE_ATTR_RO(per_cpu);
5017
5018static ssize_t max_active_show(struct device *dev,
5019 struct device_attribute *attr, char *buf)
5020{
5021 struct workqueue_struct *wq = dev_to_wq(dev);
5022
5023 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5024}
5025
5026static ssize_t max_active_store(struct device *dev,
5027 struct device_attribute *attr, const char *buf,
5028 size_t count)
5029{
5030 struct workqueue_struct *wq = dev_to_wq(dev);
5031 int val;
5032
5033 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5034 return -EINVAL;
5035
5036 workqueue_set_max_active(wq, val);
5037 return count;
5038}
5039static DEVICE_ATTR_RW(max_active);
5040
5041static struct attribute *wq_sysfs_attrs[] = {
5042 &dev_attr_per_cpu.attr,
5043 &dev_attr_max_active.attr,
5044 NULL,
5045};
5046ATTRIBUTE_GROUPS(wq_sysfs);
5047
5048static ssize_t wq_pool_ids_show(struct device *dev,
5049 struct device_attribute *attr, char *buf)
5050{
5051 struct workqueue_struct *wq = dev_to_wq(dev);
5052 const char *delim = "";
5053 int node, written = 0;
5054
5055 rcu_read_lock_sched();
5056 for_each_node(node) {
5057 written += scnprintf(buf + written, PAGE_SIZE - written,
5058 "%s%d:%d", delim, node,
5059 unbound_pwq_by_node(wq, node)->pool->id);
5060 delim = " ";
5061 }
5062 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5063 rcu_read_unlock_sched();
5064
5065 return written;
5066}
5067
5068static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5069 char *buf)
5070{
5071 struct workqueue_struct *wq = dev_to_wq(dev);
5072 int written;
5073
5074 mutex_lock(&wq->mutex);
5075 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5076 mutex_unlock(&wq->mutex);
5077
5078 return written;
5079}
5080
5081/* prepare workqueue_attrs for sysfs store operations */
5082static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5083{
5084 struct workqueue_attrs *attrs;
5085
899a94fe
LJ
5086 lockdep_assert_held(&wq_pool_mutex);
5087
6ba94429
FW
5088 attrs = alloc_workqueue_attrs(GFP_KERNEL);
5089 if (!attrs)
5090 return NULL;
5091
6ba94429 5092 copy_workqueue_attrs(attrs, wq->unbound_attrs);
6ba94429
FW
5093 return attrs;
5094}
5095
5096static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5097 const char *buf, size_t count)
5098{
5099 struct workqueue_struct *wq = dev_to_wq(dev);
5100 struct workqueue_attrs *attrs;
d4d3e257
LJ
5101 int ret = -ENOMEM;
5102
5103 apply_wqattrs_lock();
6ba94429
FW
5104
5105 attrs = wq_sysfs_prep_attrs(wq);
5106 if (!attrs)
d4d3e257 5107 goto out_unlock;
6ba94429
FW
5108
5109 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5110 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
d4d3e257 5111 ret = apply_workqueue_attrs_locked(wq, attrs);
6ba94429
FW
5112 else
5113 ret = -EINVAL;
5114
d4d3e257
LJ
5115out_unlock:
5116 apply_wqattrs_unlock();
6ba94429
FW
5117 free_workqueue_attrs(attrs);
5118 return ret ?: count;
5119}
5120
5121static ssize_t wq_cpumask_show(struct device *dev,
5122 struct device_attribute *attr, char *buf)
5123{
5124 struct workqueue_struct *wq = dev_to_wq(dev);
5125 int written;
5126
5127 mutex_lock(&wq->mutex);
5128 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5129 cpumask_pr_args(wq->unbound_attrs->cpumask));
5130 mutex_unlock(&wq->mutex);
5131 return written;
5132}
5133
5134static ssize_t wq_cpumask_store(struct device *dev,
5135 struct device_attribute *attr,
5136 const char *buf, size_t count)
5137{
5138 struct workqueue_struct *wq = dev_to_wq(dev);
5139 struct workqueue_attrs *attrs;
d4d3e257
LJ
5140 int ret = -ENOMEM;
5141
5142 apply_wqattrs_lock();
6ba94429
FW
5143
5144 attrs = wq_sysfs_prep_attrs(wq);
5145 if (!attrs)
d4d3e257 5146 goto out_unlock;
6ba94429
FW
5147
5148 ret = cpumask_parse(buf, attrs->cpumask);
5149 if (!ret)
d4d3e257 5150 ret = apply_workqueue_attrs_locked(wq, attrs);
6ba94429 5151
d4d3e257
LJ
5152out_unlock:
5153 apply_wqattrs_unlock();
6ba94429
FW
5154 free_workqueue_attrs(attrs);
5155 return ret ?: count;
5156}
5157
5158static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5159 char *buf)
5160{
5161 struct workqueue_struct *wq = dev_to_wq(dev);
5162 int written;
7dbc725e 5163
6ba94429
FW
5164 mutex_lock(&wq->mutex);
5165 written = scnprintf(buf, PAGE_SIZE, "%d\n",
5166 !wq->unbound_attrs->no_numa);
5167 mutex_unlock(&wq->mutex);
4c16bd32 5168
6ba94429 5169 return written;
65758202
TH
5170}
5171
6ba94429
FW
5172static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5173 const char *buf, size_t count)
65758202 5174{
6ba94429
FW
5175 struct workqueue_struct *wq = dev_to_wq(dev);
5176 struct workqueue_attrs *attrs;
d4d3e257
LJ
5177 int v, ret = -ENOMEM;
5178
5179 apply_wqattrs_lock();
4c16bd32 5180
6ba94429
FW
5181 attrs = wq_sysfs_prep_attrs(wq);
5182 if (!attrs)
d4d3e257 5183 goto out_unlock;
4c16bd32 5184
6ba94429
FW
5185 ret = -EINVAL;
5186 if (sscanf(buf, "%d", &v) == 1) {
5187 attrs->no_numa = !v;
d4d3e257 5188 ret = apply_workqueue_attrs_locked(wq, attrs);
65758202 5189 }
6ba94429 5190
d4d3e257
LJ
5191out_unlock:
5192 apply_wqattrs_unlock();
6ba94429
FW
5193 free_workqueue_attrs(attrs);
5194 return ret ?: count;
65758202
TH
5195}
5196
6ba94429
FW
5197static struct device_attribute wq_sysfs_unbound_attrs[] = {
5198 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5199 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5200 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5201 __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5202 __ATTR_NULL,
5203};
8ccad40d 5204
6ba94429
FW
5205static struct bus_type wq_subsys = {
5206 .name = "workqueue",
5207 .dev_groups = wq_sysfs_groups,
2d3854a3
RR
5208};
5209
b05a7928
FW
5210static ssize_t wq_unbound_cpumask_show(struct device *dev,
5211 struct device_attribute *attr, char *buf)
5212{
5213 int written;
5214
042f7df1 5215 mutex_lock(&wq_pool_mutex);
b05a7928
FW
5216 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5217 cpumask_pr_args(wq_unbound_cpumask));
042f7df1 5218 mutex_unlock(&wq_pool_mutex);
b05a7928
FW
5219
5220 return written;
5221}
5222
042f7df1
LJ
5223static ssize_t wq_unbound_cpumask_store(struct device *dev,
5224 struct device_attribute *attr, const char *buf, size_t count)
5225{
5226 cpumask_var_t cpumask;
5227 int ret;
5228
5229 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5230 return -ENOMEM;
5231
5232 ret = cpumask_parse(buf, cpumask);
5233 if (!ret)
5234 ret = workqueue_set_unbound_cpumask(cpumask);
5235
5236 free_cpumask_var(cpumask);
5237 return ret ? ret : count;
5238}
5239
b05a7928 5240static struct device_attribute wq_sysfs_cpumask_attr =
042f7df1
LJ
5241 __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5242 wq_unbound_cpumask_store);
b05a7928 5243
6ba94429 5244static int __init wq_sysfs_init(void)
2d3854a3 5245{
b05a7928
FW
5246 int err;
5247
5248 err = subsys_virtual_register(&wq_subsys, NULL);
5249 if (err)
5250 return err;
5251
5252 return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
2d3854a3 5253}
6ba94429 5254core_initcall(wq_sysfs_init);
2d3854a3 5255
6ba94429 5256static void wq_device_release(struct device *dev)
2d3854a3 5257{
6ba94429 5258 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
6b44003e 5259
6ba94429 5260 kfree(wq_dev);
2d3854a3 5261}
a0a1a5fd
TH
5262
5263/**
6ba94429
FW
5264 * workqueue_sysfs_register - make a workqueue visible in sysfs
5265 * @wq: the workqueue to register
a0a1a5fd 5266 *
6ba94429
FW
5267 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5268 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5269 * which is the preferred method.
a0a1a5fd 5270 *
6ba94429
FW
5271 * Workqueue user should use this function directly iff it wants to apply
5272 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5273 * apply_workqueue_attrs() may race against userland updating the
5274 * attributes.
5275 *
5276 * Return: 0 on success, -errno on failure.
a0a1a5fd 5277 */
6ba94429 5278int workqueue_sysfs_register(struct workqueue_struct *wq)
a0a1a5fd 5279{
6ba94429
FW
5280 struct wq_device *wq_dev;
5281 int ret;
a0a1a5fd 5282
6ba94429 5283 /*
402dd89d 5284 * Adjusting max_active or creating new pwqs by applying
6ba94429
FW
5285 * attributes breaks ordering guarantee. Disallow exposing ordered
5286 * workqueues.
5287 */
0a94efb5 5288 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
6ba94429 5289 return -EINVAL;
a0a1a5fd 5290
6ba94429
FW
5291 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5292 if (!wq_dev)
5293 return -ENOMEM;
5bcab335 5294
6ba94429
FW
5295 wq_dev->wq = wq;
5296 wq_dev->dev.bus = &wq_subsys;
6ba94429 5297 wq_dev->dev.release = wq_device_release;
23217b44 5298 dev_set_name(&wq_dev->dev, "%s", wq->name);
a0a1a5fd 5299
6ba94429
FW
5300 /*
5301 * unbound_attrs are created separately. Suppress uevent until
5302 * everything is ready.
5303 */
5304 dev_set_uevent_suppress(&wq_dev->dev, true);
a0a1a5fd 5305
6ba94429
FW
5306 ret = device_register(&wq_dev->dev);
5307 if (ret) {
5308 kfree(wq_dev);
5309 wq->wq_dev = NULL;
5310 return ret;
5311 }
a0a1a5fd 5312
6ba94429
FW
5313 if (wq->flags & WQ_UNBOUND) {
5314 struct device_attribute *attr;
a0a1a5fd 5315
6ba94429
FW
5316 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5317 ret = device_create_file(&wq_dev->dev, attr);
5318 if (ret) {
5319 device_unregister(&wq_dev->dev);
5320 wq->wq_dev = NULL;
5321 return ret;
a0a1a5fd
TH
5322 }
5323 }
5324 }
6ba94429
FW
5325
5326 dev_set_uevent_suppress(&wq_dev->dev, false);
5327 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5328 return 0;
a0a1a5fd
TH
5329}
5330
5331/**
6ba94429
FW
5332 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5333 * @wq: the workqueue to unregister
a0a1a5fd 5334 *
6ba94429 5335 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
a0a1a5fd 5336 */
6ba94429 5337static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
a0a1a5fd 5338{
6ba94429 5339 struct wq_device *wq_dev = wq->wq_dev;
8b03ae3c 5340
6ba94429
FW
5341 if (!wq->wq_dev)
5342 return;
a0a1a5fd 5343
6ba94429
FW
5344 wq->wq_dev = NULL;
5345 device_unregister(&wq_dev->dev);
a0a1a5fd 5346}
6ba94429
FW
5347#else /* CONFIG_SYSFS */
5348static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
5349#endif /* CONFIG_SYSFS */
a0a1a5fd 5350
82607adc
TH
5351/*
5352 * Workqueue watchdog.
5353 *
5354 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5355 * flush dependency, a concurrency managed work item which stays RUNNING
5356 * indefinitely. Workqueue stalls can be very difficult to debug as the
5357 * usual warning mechanisms don't trigger and internal workqueue state is
5358 * largely opaque.
5359 *
5360 * Workqueue watchdog monitors all worker pools periodically and dumps
5361 * state if some pools failed to make forward progress for a while where
5362 * forward progress is defined as the first item on ->worklist changing.
5363 *
5364 * This mechanism is controlled through the kernel parameter
5365 * "workqueue.watchdog_thresh" which can be updated at runtime through the
5366 * corresponding sysfs parameter file.
5367 */
5368#ifdef CONFIG_WQ_WATCHDOG
5369
82607adc 5370static unsigned long wq_watchdog_thresh = 30;
5cd79d6a 5371static struct timer_list wq_watchdog_timer;
82607adc
TH
5372
5373static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5374static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5375
5376static void wq_watchdog_reset_touched(void)
5377{
5378 int cpu;
5379
5380 wq_watchdog_touched = jiffies;
5381 for_each_possible_cpu(cpu)
5382 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5383}
5384
5cd79d6a 5385static void wq_watchdog_timer_fn(struct timer_list *unused)
82607adc
TH
5386{
5387 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5388 bool lockup_detected = false;
5389 struct worker_pool *pool;
5390 int pi;
5391
5392 if (!thresh)
5393 return;
5394
5395 rcu_read_lock();
5396
5397 for_each_pool(pool, pi) {
5398 unsigned long pool_ts, touched, ts;
5399
5400 if (list_empty(&pool->worklist))
5401 continue;
5402
5403 /* get the latest of pool and touched timestamps */
5404 pool_ts = READ_ONCE(pool->watchdog_ts);
5405 touched = READ_ONCE(wq_watchdog_touched);
5406
5407 if (time_after(pool_ts, touched))
5408 ts = pool_ts;
5409 else
5410 ts = touched;
5411
5412 if (pool->cpu >= 0) {
5413 unsigned long cpu_touched =
5414 READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
5415 pool->cpu));
5416 if (time_after(cpu_touched, ts))
5417 ts = cpu_touched;
5418 }
5419
5420 /* did we stall? */
5421 if (time_after(jiffies, ts + thresh)) {
5422 lockup_detected = true;
5423 pr_emerg("BUG: workqueue lockup - pool");
5424 pr_cont_pool_info(pool);
5425 pr_cont(" stuck for %us!\n",
5426 jiffies_to_msecs(jiffies - pool_ts) / 1000);
5427 }
5428 }
5429
5430 rcu_read_unlock();
5431
5432 if (lockup_detected)
5433 show_workqueue_state();
5434
5435 wq_watchdog_reset_touched();
5436 mod_timer(&wq_watchdog_timer, jiffies + thresh);
5437}
5438
5439void wq_watchdog_touch(int cpu)
5440{
5441 if (cpu >= 0)
5442 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5443 else
5444 wq_watchdog_touched = jiffies;
5445}
5446
5447static void wq_watchdog_set_thresh(unsigned long thresh)
5448{
5449 wq_watchdog_thresh = 0;
5450 del_timer_sync(&wq_watchdog_timer);
5451
5452 if (thresh) {
5453 wq_watchdog_thresh = thresh;
5454 wq_watchdog_reset_touched();
5455 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5456 }
5457}
5458
5459static int wq_watchdog_param_set_thresh(const char *val,
5460 const struct kernel_param *kp)
5461{
5462 unsigned long thresh;
5463 int ret;
5464
5465 ret = kstrtoul(val, 0, &thresh);
5466 if (ret)
5467 return ret;
5468
5469 if (system_wq)
5470 wq_watchdog_set_thresh(thresh);
5471 else
5472 wq_watchdog_thresh = thresh;
5473
5474 return 0;
5475}
5476
5477static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5478 .set = wq_watchdog_param_set_thresh,
5479 .get = param_get_ulong,
5480};
5481
5482module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5483 0644);
5484
5485static void wq_watchdog_init(void)
5486{
5cd79d6a 5487 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
82607adc
TH
5488 wq_watchdog_set_thresh(wq_watchdog_thresh);
5489}
5490
5491#else /* CONFIG_WQ_WATCHDOG */
5492
5493static inline void wq_watchdog_init(void) { }
5494
5495#endif /* CONFIG_WQ_WATCHDOG */
5496
bce90380
TH
5497static void __init wq_numa_init(void)
5498{
5499 cpumask_var_t *tbl;
5500 int node, cpu;
5501
bce90380
TH
5502 if (num_possible_nodes() <= 1)
5503 return;
5504
d55262c4
TH
5505 if (wq_disable_numa) {
5506 pr_info("workqueue: NUMA affinity support disabled\n");
5507 return;
5508 }
5509
4c16bd32
TH
5510 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
5511 BUG_ON(!wq_update_unbound_numa_attrs_buf);
5512
bce90380
TH
5513 /*
5514 * We want masks of possible CPUs of each node which isn't readily
5515 * available. Build one from cpu_to_node() which should have been
5516 * fully initialized by now.
5517 */
ddcb57e2 5518 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
bce90380
TH
5519 BUG_ON(!tbl);
5520
5521 for_each_node(node)
5a6024f1 5522 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
1be0c25d 5523 node_online(node) ? node : NUMA_NO_NODE));
bce90380
TH
5524
5525 for_each_possible_cpu(cpu) {
5526 node = cpu_to_node(cpu);
5527 if (WARN_ON(node == NUMA_NO_NODE)) {
5528 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5529 /* happens iff arch is bonkers, let's just proceed */
5530 return;
5531 }
5532 cpumask_set_cpu(cpu, tbl[node]);
5533 }
5534
5535 wq_numa_possible_cpumask = tbl;
5536 wq_numa_enabled = true;
5537}
5538
3347fa09
TH
5539/**
5540 * workqueue_init_early - early init for workqueue subsystem
5541 *
5542 * This is the first half of two-staged workqueue subsystem initialization
5543 * and invoked as soon as the bare basics - memory allocation, cpumasks and
5544 * idr are up. It sets up all the data structures and system workqueues
5545 * and allows early boot code to create workqueues and queue/cancel work
5546 * items. Actual work item execution starts only after kthreads can be
5547 * created and scheduled right before early initcalls.
5548 */
5549int __init workqueue_init_early(void)
1da177e4 5550{
7a4e344c
TH
5551 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5552 int i, cpu;
c34056a3 5553
e904e6c2
TH
5554 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5555
b05a7928
FW
5556 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5557 cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
5558
e904e6c2
TH
5559 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5560
706026c2 5561 /* initialize CPU pools */
29c91e99 5562 for_each_possible_cpu(cpu) {
4ce62e9e 5563 struct worker_pool *pool;
8b03ae3c 5564
7a4e344c 5565 i = 0;
f02ae73a 5566 for_each_cpu_worker_pool(pool, cpu) {
7a4e344c 5567 BUG_ON(init_worker_pool(pool));
ec22ca5e 5568 pool->cpu = cpu;
29c91e99 5569 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
7a4e344c 5570 pool->attrs->nice = std_nice[i++];
f3f90ad4 5571 pool->node = cpu_to_node(cpu);
7a4e344c 5572
9daf9e67 5573 /* alloc pool ID */
68e13a67 5574 mutex_lock(&wq_pool_mutex);
9daf9e67 5575 BUG_ON(worker_pool_assign_id(pool));
68e13a67 5576 mutex_unlock(&wq_pool_mutex);
4ce62e9e 5577 }
8b03ae3c
TH
5578 }
5579
8a2b7538 5580 /* create default unbound and ordered wq attrs */
29c91e99
TH
5581 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5582 struct workqueue_attrs *attrs;
5583
5584 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
29c91e99 5585 attrs->nice = std_nice[i];
29c91e99 5586 unbound_std_wq_attrs[i] = attrs;
8a2b7538
TH
5587
5588 /*
5589 * An ordered wq should have only one pwq as ordering is
5590 * guaranteed by max_active which is enforced by pwqs.
5591 * Turn off NUMA so that dfl_pwq is used for all nodes.
5592 */
5593 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5594 attrs->nice = std_nice[i];
5595 attrs->no_numa = true;
5596 ordered_wq_attrs[i] = attrs;
29c91e99
TH
5597 }
5598
d320c038 5599 system_wq = alloc_workqueue("events", 0, 0);
1aabe902 5600 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
d320c038 5601 system_long_wq = alloc_workqueue("events_long", 0, 0);
f3421797
TH
5602 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5603 WQ_UNBOUND_MAX_ACTIVE);
24d51add
TH
5604 system_freezable_wq = alloc_workqueue("events_freezable",
5605 WQ_FREEZABLE, 0);
0668106c
VK
5606 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5607 WQ_POWER_EFFICIENT, 0);
5608 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5609 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5610 0);
1aabe902 5611 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
0668106c
VK
5612 !system_unbound_wq || !system_freezable_wq ||
5613 !system_power_efficient_wq ||
5614 !system_freezable_power_efficient_wq);
82607adc 5615
3347fa09
TH
5616 return 0;
5617}
5618
5619/**
5620 * workqueue_init - bring workqueue subsystem fully online
5621 *
5622 * This is the latter half of two-staged workqueue subsystem initialization
5623 * and invoked as soon as kthreads can be created and scheduled.
5624 * Workqueues have been created and work items queued on them, but there
5625 * are no kworkers executing the work items yet. Populate the worker pools
5626 * with the initial workers and enable future kworker creations.
5627 */
5628int __init workqueue_init(void)
5629{
2186d9f9 5630 struct workqueue_struct *wq;
3347fa09
TH
5631 struct worker_pool *pool;
5632 int cpu, bkt;
5633
2186d9f9
TH
5634 /*
5635 * It'd be simpler to initialize NUMA in workqueue_init_early() but
5636 * CPU to node mapping may not be available that early on some
5637 * archs such as power and arm64. As per-cpu pools created
5638 * previously could be missing node hint and unbound pools NUMA
5639 * affinity, fix them up.
5640 */
5641 wq_numa_init();
5642
5643 mutex_lock(&wq_pool_mutex);
5644
5645 for_each_possible_cpu(cpu) {
5646 for_each_cpu_worker_pool(pool, cpu) {
5647 pool->node = cpu_to_node(cpu);
5648 }
5649 }
5650
5651 list_for_each_entry(wq, &workqueues, list)
5652 wq_update_unbound_numa(wq, smp_processor_id(), true);
5653
5654 mutex_unlock(&wq_pool_mutex);
5655
3347fa09
TH
5656 /* create the initial workers */
5657 for_each_online_cpu(cpu) {
5658 for_each_cpu_worker_pool(pool, cpu) {
5659 pool->flags &= ~POOL_DISASSOCIATED;
5660 BUG_ON(!create_worker(pool));
5661 }
5662 }
5663
5664 hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
5665 BUG_ON(!create_worker(pool));
5666
5667 wq_online = true;
82607adc
TH
5668 wq_watchdog_init();
5669
6ee0578b 5670 return 0;
1da177e4 5671}