blk-throttle: fix lockdep warning of "cgroup_mutex or RCU read lock required!"
[linux-block.git] / kernel / workqueue.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
c54fce6e 3 * kernel/workqueue.c - generic async execution with shared worker pool
1da177e4 4 *
c54fce6e 5 * Copyright (C) 2002 Ingo Molnar
1da177e4 6 *
c54fce6e
TH
7 * Derived from the taskqueue/keventd code by:
8 * David Woodhouse <dwmw2@infradead.org>
9 * Andrew Morton
10 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
11 * Theodore Ts'o <tytso@mit.edu>
1da177e4 12 *
c54fce6e 13 * Made to use alloc_percpu by Christoph Lameter.
1da177e4 14 *
c54fce6e
TH
15 * Copyright (C) 2010 SUSE Linux Products GmbH
16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
89ada679 17 *
c54fce6e
TH
18 * This is the generic async execution mechanism. Work items as are
19 * executed in process context. The worker pool is shared and
b11895c4
L
20 * automatically managed. There are two worker pools for each CPU (one for
21 * normal work items and the other for high priority ones) and some extra
22 * pools for workqueues which are not bound to any specific CPU - the
23 * number of these backing pools is dynamic.
c54fce6e 24 *
9a261491 25 * Please read Documentation/core-api/workqueue.rst for details.
1da177e4
LT
26 */
27
9984de1a 28#include <linux/export.h>
1da177e4
LT
29#include <linux/kernel.h>
30#include <linux/sched.h>
31#include <linux/init.h>
32#include <linux/signal.h>
33#include <linux/completion.h>
34#include <linux/workqueue.h>
35#include <linux/slab.h>
36#include <linux/cpu.h>
37#include <linux/notifier.h>
38#include <linux/kthread.h>
1fa44eca 39#include <linux/hardirq.h>
46934023 40#include <linux/mempolicy.h>
341a5958 41#include <linux/freezer.h>
d5abe669 42#include <linux/debug_locks.h>
4e6045f1 43#include <linux/lockdep.h>
c34056a3 44#include <linux/idr.h>
29c91e99 45#include <linux/jhash.h>
42f8570f 46#include <linux/hashtable.h>
76af4d93 47#include <linux/rculist.h>
bce90380 48#include <linux/nodemask.h>
4c16bd32 49#include <linux/moduleparam.h>
3d1cb205 50#include <linux/uaccess.h>
c98a9805 51#include <linux/sched/isolation.h>
cd2440d6 52#include <linux/sched/debug.h>
62635ea8 53#include <linux/nmi.h>
940d71c6 54#include <linux/kvm_para.h>
aa6fde93 55#include <linux/delay.h>
e22bee78 56
ea138446 57#include "workqueue_internal.h"
1da177e4 58
c8e55f36 59enum {
24647570
TH
60 /*
61 * worker_pool flags
bc2ae0f5 62 *
24647570 63 * A bound pool is either associated or disassociated with its CPU.
bc2ae0f5
TH
64 * While associated (!DISASSOCIATED), all workers are bound to the
65 * CPU and none has %WORKER_UNBOUND set and concurrency management
66 * is in effect.
67 *
68 * While DISASSOCIATED, the cpu may be offline and all workers have
69 * %WORKER_UNBOUND set and concurrency management disabled, and may
24647570 70 * be executing on any CPU. The pool behaves as an unbound one.
bc2ae0f5 71 *
bc3a1afc 72 * Note that DISASSOCIATED should be flipped only while holding
1258fae7 73 * wq_pool_attach_mutex to avoid changing binding state while
4736cbf7 74 * worker_attach_to_pool() is in progress.
bc2ae0f5 75 */
692b4825 76 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
24647570 77 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
db7bccf4 78
c8e55f36 79 /* worker flags */
c8e55f36
TH
80 WORKER_DIE = 1 << 1, /* die die die */
81 WORKER_IDLE = 1 << 2, /* is idle */
e22bee78 82 WORKER_PREP = 1 << 3, /* preparing to run works */
fb0e7beb 83 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
f3421797 84 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
a9ab775b 85 WORKER_REBOUND = 1 << 8, /* worker was rebound */
e22bee78 86
a9ab775b
TH
87 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
88 WORKER_UNBOUND | WORKER_REBOUND,
db7bccf4 89
e34cdddb 90 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
4ce62e9e 91
29c91e99 92 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
c8e55f36 93 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
db7bccf4 94
e22bee78
TH
95 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
96 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
97
3233cdbd
TH
98 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
99 /* call for help after 10ms
100 (min two ticks) */
e22bee78
TH
101 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
102 CREATE_COOLDOWN = HZ, /* time to breath after fail */
e22bee78
TH
103
104 /*
105 * Rescue workers are used only on emergencies and shared by
8698a745 106 * all cpus. Give MIN_NICE.
e22bee78 107 */
8698a745
DY
108 RESCUER_NICE_LEVEL = MIN_NICE,
109 HIGHPRI_NICE_LEVEL = MIN_NICE,
ecf6881f
TH
110
111 WQ_NAME_LEN = 24,
c8e55f36 112};
1da177e4
LT
113
114/*
4690c4ab
TH
115 * Structure fields follow one of the following exclusion rules.
116 *
e41e704b
TH
117 * I: Modifiable by initialization/destruction paths and read-only for
118 * everyone else.
4690c4ab 119 *
e22bee78
TH
120 * P: Preemption protected. Disabling preemption is enough and should
121 * only be modified and accessed from the local cpu.
122 *
d565ed63 123 * L: pool->lock protected. Access with pool->lock held.
4690c4ab 124 *
bdf8b9bf
TH
125 * K: Only modified by worker while holding pool->lock. Can be safely read by
126 * self, while holding pool->lock or from IRQ context if %current is the
127 * kworker.
128 *
129 * S: Only modified by worker self.
130 *
1258fae7 131 * A: wq_pool_attach_mutex protected.
822d8405 132 *
68e13a67 133 * PL: wq_pool_mutex protected.
5bcab335 134 *
24acfb71 135 * PR: wq_pool_mutex protected for writes. RCU protected for reads.
76af4d93 136 *
5b95e1af
LJ
137 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
138 *
139 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
24acfb71 140 * RCU for reads.
5b95e1af 141 *
3c25a55d
LJ
142 * WQ: wq->mutex protected.
143 *
24acfb71 144 * WR: wq->mutex protected for writes. RCU protected for reads.
2e109a28
TH
145 *
146 * MD: wq_mayday_lock protected.
cd2440d6
PM
147 *
148 * WD: Used internally by the watchdog.
1da177e4 149 */
1da177e4 150
2eaebdb3 151/* struct worker is defined in workqueue_internal.h */
c34056a3 152
bd7bdd43 153struct worker_pool {
a9b8a985 154 raw_spinlock_t lock; /* the pool lock */
d84ff051 155 int cpu; /* I: the associated cpu */
f3f90ad4 156 int node; /* I: the associated node ID */
9daf9e67 157 int id; /* I: pool ID */
bc8b50c2 158 unsigned int flags; /* L: flags */
bd7bdd43 159
82607adc 160 unsigned long watchdog_ts; /* L: watchdog timestamp */
cd2440d6 161 bool cpu_stall; /* WD: stalled cpu bound pool */
82607adc 162
bc35f7ef
LJ
163 /*
164 * The counter is incremented in a process context on the associated CPU
165 * w/ preemption disabled, and decremented or reset in the same context
166 * but w/ pool->lock held. The readers grab pool->lock and are
167 * guaranteed to see if the counter reached zero.
168 */
169 int nr_running;
84f91c62 170
bd7bdd43 171 struct list_head worklist; /* L: list of pending works */
ea1abd61 172
5826cc8f
LJ
173 int nr_workers; /* L: total number of workers */
174 int nr_idle; /* L: currently idle workers */
bd7bdd43 175
2c1f1a91 176 struct list_head idle_list; /* L: list of idle workers */
bd7bdd43 177 struct timer_list idle_timer; /* L: worker idle timeout */
3f959aa3
VS
178 struct work_struct idle_cull_work; /* L: worker idle cleanup */
179
180 struct timer_list mayday_timer; /* L: SOS timer for workers */
bd7bdd43 181
c5aa87bb 182 /* a workers is either on busy_hash or idle_list, or the manager */
c9e7cf27
TH
183 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
184 /* L: hash of busy workers */
185
2607d7a6 186 struct worker *manager; /* L: purely informational */
92f9c5c4 187 struct list_head workers; /* A: attached workers */
e02b9312 188 struct list_head dying_workers; /* A: workers about to die */
60f5a4bc 189 struct completion *detach_completion; /* all workers detached */
e19e397a 190
7cda9aae 191 struct ida worker_ida; /* worker IDs for task name */
e19e397a 192
7a4e344c 193 struct workqueue_attrs *attrs; /* I: worker attributes */
68e13a67
LJ
194 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
195 int refcnt; /* PL: refcnt for unbound pools */
7a4e344c 196
29c91e99 197 /*
24acfb71 198 * Destruction of pool is RCU protected to allow dereferences
29c91e99
TH
199 * from get_work_pool().
200 */
201 struct rcu_head rcu;
84f91c62 202};
8b03ae3c 203
725e8ec5
TH
204/*
205 * Per-pool_workqueue statistics. These can be monitored using
206 * tools/workqueue/wq_monitor.py.
207 */
208enum pool_workqueue_stats {
209 PWQ_STAT_STARTED, /* work items started execution */
210 PWQ_STAT_COMPLETED, /* work items completed execution */
8a1dd1e5 211 PWQ_STAT_CPU_TIME, /* total CPU time consumed */
616db877 212 PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */
725e8ec5 213 PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */
8639eceb 214 PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */
725e8ec5
TH
215 PWQ_STAT_MAYDAY, /* maydays to rescuer */
216 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
217
218 PWQ_NR_STATS,
219};
220
1da177e4 221/*
112202d9
TH
222 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
223 * of work_struct->data are used for flags and the remaining high bits
224 * point to the pwq; thus, pwqs need to be aligned at two's power of the
225 * number of flag bits.
1da177e4 226 */
112202d9 227struct pool_workqueue {
bd7bdd43 228 struct worker_pool *pool; /* I: the associated pool */
4690c4ab 229 struct workqueue_struct *wq; /* I: the owning workqueue */
73f53c4a
TH
230 int work_color; /* L: current color */
231 int flush_color; /* L: flushing color */
8864b4e5 232 int refcnt; /* L: reference count */
73f53c4a
TH
233 int nr_in_flight[WORK_NR_COLORS];
234 /* L: nr of in_flight works */
018f3a13
LJ
235
236 /*
237 * nr_active management and WORK_STRUCT_INACTIVE:
238 *
239 * When pwq->nr_active >= max_active, new work item is queued to
240 * pwq->inactive_works instead of pool->worklist and marked with
241 * WORK_STRUCT_INACTIVE.
242 *
243 * All work items marked with WORK_STRUCT_INACTIVE do not participate
244 * in pwq->nr_active and all work items in pwq->inactive_works are
245 * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE
246 * work items are in pwq->inactive_works. Some of them are ready to
247 * run in pool->worklist or worker->scheduled. Those work itmes are
248 * only struct wq_barrier which is used for flush_work() and should
249 * not participate in pwq->nr_active. For non-barrier work item, it
250 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
251 */
1e19ffc6 252 int nr_active; /* L: nr of active works */
a0a1a5fd 253 int max_active; /* L: max active works */
f97a4a1a 254 struct list_head inactive_works; /* L: inactive works */
3c25a55d 255 struct list_head pwqs_node; /* WR: node on wq->pwqs */
2e109a28 256 struct list_head mayday_node; /* MD: node on wq->maydays */
8864b4e5 257
725e8ec5
TH
258 u64 stats[PWQ_NR_STATS];
259
8864b4e5 260 /*
967b494e 261 * Release of unbound pwq is punted to a kthread_worker. See put_pwq()
687a9aa5
TH
262 * and pwq_release_workfn() for details. pool_workqueue itself is also
263 * RCU protected so that the first pwq can be determined without
967b494e 264 * grabbing wq->mutex.
8864b4e5 265 */
687a9aa5 266 struct kthread_work release_work;
8864b4e5 267 struct rcu_head rcu;
e904e6c2 268} __aligned(1 << WORK_STRUCT_FLAG_BITS);
1da177e4 269
73f53c4a
TH
270/*
271 * Structure used to wait for workqueue flush.
272 */
273struct wq_flusher {
3c25a55d
LJ
274 struct list_head list; /* WQ: list of flushers */
275 int flush_color; /* WQ: flush color waiting for */
73f53c4a
TH
276 struct completion done; /* flush completion */
277};
278
226223ab
TH
279struct wq_device;
280
1da177e4 281/*
c5aa87bb
TH
282 * The externally visible workqueue. It relays the issued work items to
283 * the appropriate worker_pool through its pool_workqueues.
1da177e4
LT
284 */
285struct workqueue_struct {
3c25a55d 286 struct list_head pwqs; /* WR: all pwqs of this wq */
e2dca7ad 287 struct list_head list; /* PR: list of all workqueues */
73f53c4a 288
3c25a55d
LJ
289 struct mutex mutex; /* protects this wq */
290 int work_color; /* WQ: current work color */
291 int flush_color; /* WQ: current flush color */
112202d9 292 atomic_t nr_pwqs_to_flush; /* flush in progress */
3c25a55d
LJ
293 struct wq_flusher *first_flusher; /* WQ: first flusher */
294 struct list_head flusher_queue; /* WQ: flush waiters */
295 struct list_head flusher_overflow; /* WQ: flush overflow list */
73f53c4a 296
2e109a28 297 struct list_head maydays; /* MD: pwqs requesting rescue */
30ae2fc0 298 struct worker *rescuer; /* MD: rescue worker */
e22bee78 299
87fc741e 300 int nr_drainers; /* WQ: drain in progress */
a357fc03 301 int saved_max_active; /* WQ: saved pwq max_active */
226223ab 302
5b95e1af
LJ
303 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
304 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
6029a918 305
226223ab
TH
306#ifdef CONFIG_SYSFS
307 struct wq_device *wq_dev; /* I: for sysfs interface */
308#endif
4e6045f1 309#ifdef CONFIG_LOCKDEP
669de8bd
BVA
310 char *lock_name;
311 struct lock_class_key key;
4690c4ab 312 struct lockdep_map lockdep_map;
4e6045f1 313#endif
ecf6881f 314 char name[WQ_NAME_LEN]; /* I: workqueue name */
2728fd2f 315
e2dca7ad 316 /*
24acfb71
TG
317 * Destruction of workqueue_struct is RCU protected to allow walking
318 * the workqueues list without grabbing wq_pool_mutex.
e2dca7ad
TH
319 * This is used to dump all workqueues from sysrq.
320 */
321 struct rcu_head rcu;
322
2728fd2f
TH
323 /* hot fields used during command issue, aligned to cacheline */
324 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
636b927e 325 struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
1da177e4
LT
326};
327
e904e6c2
TH
328static struct kmem_cache *pwq_cache;
329
84193c07
TH
330/*
331 * Each pod type describes how CPUs should be grouped for unbound workqueues.
332 * See the comment above workqueue_attrs->affn_scope.
333 */
334struct wq_pod_type {
335 int nr_pods; /* number of pods */
336 cpumask_var_t *pod_cpus; /* pod -> cpus */
337 int *pod_node; /* pod -> node */
338 int *cpu_pod; /* cpu -> pod */
339};
340
341static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
523a301e 342static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
63c5484e
TH
343
344static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
523a301e 345 [WQ_AFFN_DFL] = "default",
63c5484e
TH
346 [WQ_AFFN_CPU] = "cpu",
347 [WQ_AFFN_SMT] = "smt",
348 [WQ_AFFN_CACHE] = "cache",
349 [WQ_AFFN_NUMA] = "numa",
350 [WQ_AFFN_SYSTEM] = "system",
351};
bce90380 352
616db877
TH
353/*
354 * Per-cpu work items which run for longer than the following threshold are
355 * automatically considered CPU intensive and excluded from concurrency
356 * management to prevent them from noticeably delaying other per-cpu work items.
aa6fde93
TH
357 * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter.
358 * The actual value is initialized in wq_cpu_intensive_thresh_init().
616db877 359 */
aa6fde93 360static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
616db877
TH
361module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
362
cee22a15 363/* see the comment above the definition of WQ_POWER_EFFICIENT */
552f530c 364static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
cee22a15
VK
365module_param_named(power_efficient, wq_power_efficient, bool, 0444);
366
863b710b 367static bool wq_online; /* can kworkers be created yet? */
3347fa09 368
fef59c9c
TH
369/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
370static struct workqueue_attrs *wq_update_pod_attrs_buf;
4c16bd32 371
68e13a67 372static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
1258fae7 373static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
a9b8a985 374static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
d8bb65ab
SAS
375/* wait for manager to go away */
376static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
5bcab335 377
e2dca7ad 378static LIST_HEAD(workqueues); /* PR: list of all workqueues */
68e13a67 379static bool workqueue_freezing; /* PL: have wqs started freezing? */
7d19c5ce 380
99c621ef 381/* PL&A: allowable cpus for unbound wqs and work items */
ef557180
MG
382static cpumask_var_t wq_unbound_cpumask;
383
ace3c549 384/* for further constrain wq_unbound_cpumask by cmdline parameter*/
385static struct cpumask wq_cmdline_cpumask __initdata;
386
ef557180
MG
387/* CPU where unbound work was last round robin scheduled from this CPU */
388static DEFINE_PER_CPU(int, wq_rr_cpu_last);
b05a7928 389
f303fccb
TH
390/*
391 * Local execution of unbound work items is no longer guaranteed. The
392 * following always forces round-robin CPU selection on unbound work items
393 * to uncover usages which depend on it.
394 */
395#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
396static bool wq_debug_force_rr_cpu = true;
397#else
398static bool wq_debug_force_rr_cpu = false;
399#endif
400module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
401
7d19c5ce 402/* the per-cpu worker pools */
25528213 403static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
7d19c5ce 404
68e13a67 405static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
7d19c5ce 406
68e13a67 407/* PL: hash of all unbound pools keyed by pool->attrs */
29c91e99
TH
408static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
409
c5aa87bb 410/* I: attributes used when instantiating standard unbound pools on demand */
29c91e99
TH
411static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
412
8a2b7538
TH
413/* I: attributes used when instantiating ordered pools on demand */
414static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
415
967b494e
TH
416/*
417 * I: kthread_worker to release pwq's. pwq release needs to be bounced to a
418 * process context while holding a pool lock. Bounce to a dedicated kthread
419 * worker to avoid A-A deadlocks.
420 */
68279f9c 421static struct kthread_worker *pwq_release_worker __ro_after_init;
967b494e 422
68279f9c 423struct workqueue_struct *system_wq __ro_after_init;
ad7b1f84 424EXPORT_SYMBOL(system_wq);
68279f9c 425struct workqueue_struct *system_highpri_wq __ro_after_init;
1aabe902 426EXPORT_SYMBOL_GPL(system_highpri_wq);
68279f9c 427struct workqueue_struct *system_long_wq __ro_after_init;
d320c038 428EXPORT_SYMBOL_GPL(system_long_wq);
68279f9c 429struct workqueue_struct *system_unbound_wq __ro_after_init;
f3421797 430EXPORT_SYMBOL_GPL(system_unbound_wq);
68279f9c 431struct workqueue_struct *system_freezable_wq __ro_after_init;
24d51add 432EXPORT_SYMBOL_GPL(system_freezable_wq);
68279f9c 433struct workqueue_struct *system_power_efficient_wq __ro_after_init;
0668106c 434EXPORT_SYMBOL_GPL(system_power_efficient_wq);
68279f9c 435struct workqueue_struct *system_freezable_power_efficient_wq __ro_after_init;
0668106c 436EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
d320c038 437
7d19c5ce 438static int worker_thread(void *__worker);
6ba94429 439static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
c29eb853 440static void show_pwq(struct pool_workqueue *pwq);
55df0933 441static void show_one_worker_pool(struct worker_pool *pool);
7d19c5ce 442
97bd2347
TH
443#define CREATE_TRACE_POINTS
444#include <trace/events/workqueue.h>
445
68e13a67 446#define assert_rcu_or_pool_mutex() \
24acfb71 447 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
f78f5b90 448 !lockdep_is_held(&wq_pool_mutex), \
24acfb71 449 "RCU or wq_pool_mutex should be held")
5bcab335 450
5b95e1af 451#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
24acfb71 452 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
f78f5b90
PM
453 !lockdep_is_held(&wq->mutex) && \
454 !lockdep_is_held(&wq_pool_mutex), \
24acfb71 455 "RCU, wq->mutex or wq_pool_mutex should be held")
5b95e1af 456
f02ae73a
TH
457#define for_each_cpu_worker_pool(pool, cpu) \
458 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
459 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
7a62c2c8 460 (pool)++)
4ce62e9e 461
17116969
TH
462/**
463 * for_each_pool - iterate through all worker_pools in the system
464 * @pool: iteration cursor
611c92a0 465 * @pi: integer used for iteration
fa1b54e6 466 *
24acfb71 467 * This must be called either with wq_pool_mutex held or RCU read
68e13a67
LJ
468 * locked. If the pool needs to be used beyond the locking in effect, the
469 * caller is responsible for guaranteeing that the pool stays online.
fa1b54e6
TH
470 *
471 * The if/else clause exists only for the lockdep assertion and can be
472 * ignored.
17116969 473 */
611c92a0
TH
474#define for_each_pool(pool, pi) \
475 idr_for_each_entry(&worker_pool_idr, pool, pi) \
68e13a67 476 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
fa1b54e6 477 else
17116969 478
822d8405
TH
479/**
480 * for_each_pool_worker - iterate through all workers of a worker_pool
481 * @worker: iteration cursor
822d8405
TH
482 * @pool: worker_pool to iterate workers of
483 *
1258fae7 484 * This must be called with wq_pool_attach_mutex.
822d8405
TH
485 *
486 * The if/else clause exists only for the lockdep assertion and can be
487 * ignored.
488 */
da028469
LJ
489#define for_each_pool_worker(worker, pool) \
490 list_for_each_entry((worker), &(pool)->workers, node) \
1258fae7 491 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
822d8405
TH
492 else
493
49e3cf44
TH
494/**
495 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
496 * @pwq: iteration cursor
497 * @wq: the target workqueue
76af4d93 498 *
24acfb71 499 * This must be called either with wq->mutex held or RCU read locked.
794b18bc
TH
500 * If the pwq needs to be used beyond the locking in effect, the caller is
501 * responsible for guaranteeing that the pwq stays online.
76af4d93
TH
502 *
503 * The if/else clause exists only for the lockdep assertion and can be
504 * ignored.
49e3cf44
TH
505 */
506#define for_each_pwq(pwq, wq) \
49e9d1a9 507 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
5a644662 508 lockdep_is_held(&(wq->mutex)))
f3421797 509
dc186ad7
TG
510#ifdef CONFIG_DEBUG_OBJECTS_WORK
511
f9e62f31 512static const struct debug_obj_descr work_debug_descr;
dc186ad7 513
99777288
SG
514static void *work_debug_hint(void *addr)
515{
516 return ((struct work_struct *) addr)->func;
517}
518
b9fdac7f
DC
519static bool work_is_static_object(void *addr)
520{
521 struct work_struct *work = addr;
522
523 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
524}
525
dc186ad7
TG
526/*
527 * fixup_init is called when:
528 * - an active object is initialized
529 */
02a982a6 530static bool work_fixup_init(void *addr, enum debug_obj_state state)
dc186ad7
TG
531{
532 struct work_struct *work = addr;
533
534 switch (state) {
535 case ODEBUG_STATE_ACTIVE:
536 cancel_work_sync(work);
537 debug_object_init(work, &work_debug_descr);
02a982a6 538 return true;
dc186ad7 539 default:
02a982a6 540 return false;
dc186ad7
TG
541 }
542}
543
dc186ad7
TG
544/*
545 * fixup_free is called when:
546 * - an active object is freed
547 */
02a982a6 548static bool work_fixup_free(void *addr, enum debug_obj_state state)
dc186ad7
TG
549{
550 struct work_struct *work = addr;
551
552 switch (state) {
553 case ODEBUG_STATE_ACTIVE:
554 cancel_work_sync(work);
555 debug_object_free(work, &work_debug_descr);
02a982a6 556 return true;
dc186ad7 557 default:
02a982a6 558 return false;
dc186ad7
TG
559 }
560}
561
f9e62f31 562static const struct debug_obj_descr work_debug_descr = {
dc186ad7 563 .name = "work_struct",
99777288 564 .debug_hint = work_debug_hint,
b9fdac7f 565 .is_static_object = work_is_static_object,
dc186ad7 566 .fixup_init = work_fixup_init,
dc186ad7
TG
567 .fixup_free = work_fixup_free,
568};
569
570static inline void debug_work_activate(struct work_struct *work)
571{
572 debug_object_activate(work, &work_debug_descr);
573}
574
575static inline void debug_work_deactivate(struct work_struct *work)
576{
577 debug_object_deactivate(work, &work_debug_descr);
578}
579
580void __init_work(struct work_struct *work, int onstack)
581{
582 if (onstack)
583 debug_object_init_on_stack(work, &work_debug_descr);
584 else
585 debug_object_init(work, &work_debug_descr);
586}
587EXPORT_SYMBOL_GPL(__init_work);
588
589void destroy_work_on_stack(struct work_struct *work)
590{
591 debug_object_free(work, &work_debug_descr);
592}
593EXPORT_SYMBOL_GPL(destroy_work_on_stack);
594
ea2e64f2
TG
595void destroy_delayed_work_on_stack(struct delayed_work *work)
596{
597 destroy_timer_on_stack(&work->timer);
598 debug_object_free(&work->work, &work_debug_descr);
599}
600EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
601
dc186ad7
TG
602#else
603static inline void debug_work_activate(struct work_struct *work) { }
604static inline void debug_work_deactivate(struct work_struct *work) { }
605#endif
606
4e8b22bd 607/**
67dc8325 608 * worker_pool_assign_id - allocate ID and assign it to @pool
4e8b22bd
LB
609 * @pool: the pool pointer of interest
610 *
611 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
612 * successfully, -errno on failure.
613 */
9daf9e67
TH
614static int worker_pool_assign_id(struct worker_pool *pool)
615{
616 int ret;
617
68e13a67 618 lockdep_assert_held(&wq_pool_mutex);
5bcab335 619
4e8b22bd
LB
620 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
621 GFP_KERNEL);
229641a6 622 if (ret >= 0) {
e68035fb 623 pool->id = ret;
229641a6
TH
624 return 0;
625 }
fa1b54e6 626 return ret;
7c3eed5c
TH
627}
628
73f53c4a
TH
629static unsigned int work_color_to_flags(int color)
630{
631 return color << WORK_STRUCT_COLOR_SHIFT;
632}
633
c4560c2c 634static int get_work_color(unsigned long work_data)
73f53c4a 635{
c4560c2c 636 return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
73f53c4a
TH
637 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
638}
639
640static int work_next_color(int color)
641{
642 return (color + 1) % WORK_NR_COLORS;
643}
1da177e4 644
14441960 645/*
112202d9
TH
646 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
647 * contain the pointer to the queued pwq. Once execution starts, the flag
7c3eed5c 648 * is cleared and the high bits contain OFFQ flags and pool ID.
7a22ad75 649 *
112202d9
TH
650 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
651 * and clear_work_data() can be used to set the pwq, pool or clear
bbb68dfa
TH
652 * work->data. These functions should only be called while the work is
653 * owned - ie. while the PENDING bit is set.
7a22ad75 654 *
112202d9 655 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
7c3eed5c 656 * corresponding to a work. Pool is available once the work has been
112202d9 657 * queued anywhere after initialization until it is sync canceled. pwq is
7c3eed5c 658 * available only while the work item is queued.
7a22ad75 659 *
bbb68dfa
TH
660 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
661 * canceled. While being canceled, a work item may have its PENDING set
662 * but stay off timer and worklist for arbitrarily long and nobody should
663 * try to steal the PENDING bit.
14441960 664 */
7a22ad75
TH
665static inline void set_work_data(struct work_struct *work, unsigned long data,
666 unsigned long flags)
365970a1 667{
6183c009 668 WARN_ON_ONCE(!work_pending(work));
7a22ad75
TH
669 atomic_long_set(&work->data, data | flags | work_static(work));
670}
365970a1 671
112202d9 672static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
7a22ad75
TH
673 unsigned long extra_flags)
674{
112202d9
TH
675 set_work_data(work, (unsigned long)pwq,
676 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
365970a1
DH
677}
678
4468a00f
LJ
679static void set_work_pool_and_keep_pending(struct work_struct *work,
680 int pool_id)
681{
682 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
683 WORK_STRUCT_PENDING);
684}
685
7c3eed5c
TH
686static void set_work_pool_and_clear_pending(struct work_struct *work,
687 int pool_id)
7a22ad75 688{
23657bb1
TH
689 /*
690 * The following wmb is paired with the implied mb in
691 * test_and_set_bit(PENDING) and ensures all updates to @work made
692 * here are visible to and precede any updates by the next PENDING
693 * owner.
694 */
695 smp_wmb();
7c3eed5c 696 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
346c09f8
RP
697 /*
698 * The following mb guarantees that previous clear of a PENDING bit
699 * will not be reordered with any speculative LOADS or STORES from
700 * work->current_func, which is executed afterwards. This possible
8bdc6201 701 * reordering can lead to a missed execution on attempt to queue
346c09f8
RP
702 * the same @work. E.g. consider this case:
703 *
704 * CPU#0 CPU#1
705 * ---------------------------- --------------------------------
706 *
707 * 1 STORE event_indicated
708 * 2 queue_work_on() {
709 * 3 test_and_set_bit(PENDING)
710 * 4 } set_..._and_clear_pending() {
711 * 5 set_work_data() # clear bit
712 * 6 smp_mb()
713 * 7 work->current_func() {
714 * 8 LOAD event_indicated
715 * }
716 *
717 * Without an explicit full barrier speculative LOAD on line 8 can
718 * be executed before CPU#0 does STORE on line 1. If that happens,
719 * CPU#0 observes the PENDING bit is still set and new execution of
720 * a @work is not queued in a hope, that CPU#1 will eventually
721 * finish the queued @work. Meanwhile CPU#1 does not see
722 * event_indicated is set, because speculative LOAD was executed
723 * before actual STORE.
724 */
725 smp_mb();
7a22ad75 726}
f756d5e2 727
7a22ad75 728static void clear_work_data(struct work_struct *work)
1da177e4 729{
7c3eed5c
TH
730 smp_wmb(); /* see set_work_pool_and_clear_pending() */
731 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
1da177e4
LT
732}
733
afa4bb77
LT
734static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
735{
736 return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
737}
738
112202d9 739static struct pool_workqueue *get_work_pwq(struct work_struct *work)
b1f4ec17 740{
e120153d 741 unsigned long data = atomic_long_read(&work->data);
7a22ad75 742
112202d9 743 if (data & WORK_STRUCT_PWQ)
afa4bb77 744 return work_struct_pwq(data);
e120153d
TH
745 else
746 return NULL;
4d707b9f
ON
747}
748
7c3eed5c
TH
749/**
750 * get_work_pool - return the worker_pool a given work was associated with
751 * @work: the work item of interest
752 *
68e13a67 753 * Pools are created and destroyed under wq_pool_mutex, and allows read
24acfb71
TG
754 * access under RCU read lock. As such, this function should be
755 * called under wq_pool_mutex or inside of a rcu_read_lock() region.
fa1b54e6
TH
756 *
757 * All fields of the returned pool are accessible as long as the above
758 * mentioned locking is in effect. If the returned pool needs to be used
759 * beyond the critical section, the caller is responsible for ensuring the
760 * returned pool is and stays online.
d185af30
YB
761 *
762 * Return: The worker_pool @work was last associated with. %NULL if none.
7c3eed5c
TH
763 */
764static struct worker_pool *get_work_pool(struct work_struct *work)
365970a1 765{
e120153d 766 unsigned long data = atomic_long_read(&work->data);
7c3eed5c 767 int pool_id;
7a22ad75 768
68e13a67 769 assert_rcu_or_pool_mutex();
fa1b54e6 770
112202d9 771 if (data & WORK_STRUCT_PWQ)
afa4bb77 772 return work_struct_pwq(data)->pool;
7a22ad75 773
7c3eed5c
TH
774 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
775 if (pool_id == WORK_OFFQ_POOL_NONE)
7a22ad75
TH
776 return NULL;
777
fa1b54e6 778 return idr_find(&worker_pool_idr, pool_id);
7c3eed5c
TH
779}
780
781/**
782 * get_work_pool_id - return the worker pool ID a given work is associated with
783 * @work: the work item of interest
784 *
d185af30 785 * Return: The worker_pool ID @work was last associated with.
7c3eed5c
TH
786 * %WORK_OFFQ_POOL_NONE if none.
787 */
788static int get_work_pool_id(struct work_struct *work)
789{
54d5b7d0
LJ
790 unsigned long data = atomic_long_read(&work->data);
791
112202d9 792 if (data & WORK_STRUCT_PWQ)
afa4bb77 793 return work_struct_pwq(data)->pool->id;
7c3eed5c 794
54d5b7d0 795 return data >> WORK_OFFQ_POOL_SHIFT;
7c3eed5c
TH
796}
797
bbb68dfa
TH
798static void mark_work_canceling(struct work_struct *work)
799{
7c3eed5c 800 unsigned long pool_id = get_work_pool_id(work);
bbb68dfa 801
7c3eed5c
TH
802 pool_id <<= WORK_OFFQ_POOL_SHIFT;
803 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
bbb68dfa
TH
804}
805
806static bool work_is_canceling(struct work_struct *work)
807{
808 unsigned long data = atomic_long_read(&work->data);
809
112202d9 810 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
bbb68dfa
TH
811}
812
e22bee78 813/*
3270476a
TH
814 * Policy functions. These define the policies on how the global worker
815 * pools are managed. Unless noted otherwise, these functions assume that
d565ed63 816 * they're being called with pool->lock held.
e22bee78
TH
817 */
818
4594bf15 819/*
e22bee78
TH
820 * Need to wake up a worker? Called from anything but currently
821 * running workers.
974271c4
TH
822 *
823 * Note that, because unbound workers never contribute to nr_running, this
706026c2 824 * function will always return %true for unbound pools as long as the
974271c4 825 * worklist isn't empty.
4594bf15 826 */
63d95a91 827static bool need_more_worker(struct worker_pool *pool)
365970a1 828{
0219a352 829 return !list_empty(&pool->worklist) && !pool->nr_running;
e22bee78 830}
4594bf15 831
e22bee78 832/* Can I start working? Called from busy but !running workers. */
63d95a91 833static bool may_start_working(struct worker_pool *pool)
e22bee78 834{
63d95a91 835 return pool->nr_idle;
e22bee78
TH
836}
837
838/* Do I need to keep working? Called from currently running workers. */
63d95a91 839static bool keep_working(struct worker_pool *pool)
e22bee78 840{
bc35f7ef 841 return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
e22bee78
TH
842}
843
844/* Do we need a new worker? Called from manager. */
63d95a91 845static bool need_to_create_worker(struct worker_pool *pool)
e22bee78 846{
63d95a91 847 return need_more_worker(pool) && !may_start_working(pool);
e22bee78 848}
365970a1 849
e22bee78 850/* Do we have too many workers and should some go away? */
63d95a91 851static bool too_many_workers(struct worker_pool *pool)
e22bee78 852{
692b4825 853 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
63d95a91
TH
854 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
855 int nr_busy = pool->nr_workers - nr_idle;
e22bee78
TH
856
857 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
365970a1
DH
858}
859
c54d5046
TH
860/**
861 * worker_set_flags - set worker flags and adjust nr_running accordingly
862 * @worker: self
863 * @flags: flags to set
864 *
865 * Set @flags in @worker->flags and adjust nr_running accordingly.
c54d5046
TH
866 */
867static inline void worker_set_flags(struct worker *worker, unsigned int flags)
868{
869 struct worker_pool *pool = worker->pool;
870
bc8b50c2 871 lockdep_assert_held(&pool->lock);
c54d5046
TH
872
873 /* If transitioning into NOT_RUNNING, adjust nr_running. */
874 if ((flags & WORKER_NOT_RUNNING) &&
875 !(worker->flags & WORKER_NOT_RUNNING)) {
876 pool->nr_running--;
877 }
878
879 worker->flags |= flags;
880}
881
882/**
883 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
884 * @worker: self
885 * @flags: flags to clear
886 *
887 * Clear @flags in @worker->flags and adjust nr_running accordingly.
c54d5046
TH
888 */
889static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
890{
891 struct worker_pool *pool = worker->pool;
892 unsigned int oflags = worker->flags;
893
bc8b50c2 894 lockdep_assert_held(&pool->lock);
c54d5046
TH
895
896 worker->flags &= ~flags;
897
898 /*
899 * If transitioning out of NOT_RUNNING, increment nr_running. Note
900 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
901 * of multiple flags, not a single flag.
902 */
903 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
904 if (!(worker->flags & WORKER_NOT_RUNNING))
905 pool->nr_running++;
906}
907
797e8345
TH
908/* Return the first idle worker. Called with pool->lock held. */
909static struct worker *first_idle_worker(struct worker_pool *pool)
910{
911 if (unlikely(list_empty(&pool->idle_list)))
912 return NULL;
913
914 return list_first_entry(&pool->idle_list, struct worker, entry);
915}
916
917/**
918 * worker_enter_idle - enter idle state
919 * @worker: worker which is entering idle state
920 *
921 * @worker is entering idle state. Update stats and idle timer if
922 * necessary.
923 *
924 * LOCKING:
925 * raw_spin_lock_irq(pool->lock).
926 */
927static void worker_enter_idle(struct worker *worker)
928{
929 struct worker_pool *pool = worker->pool;
930
931 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
932 WARN_ON_ONCE(!list_empty(&worker->entry) &&
933 (worker->hentry.next || worker->hentry.pprev)))
934 return;
935
936 /* can't use worker_set_flags(), also called from create_worker() */
937 worker->flags |= WORKER_IDLE;
938 pool->nr_idle++;
939 worker->last_active = jiffies;
940
941 /* idle_list is LIFO */
942 list_add(&worker->entry, &pool->idle_list);
943
944 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
945 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
946
947 /* Sanity check nr_running. */
948 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
949}
950
951/**
952 * worker_leave_idle - leave idle state
953 * @worker: worker which is leaving idle state
954 *
955 * @worker is leaving idle state. Update stats.
956 *
957 * LOCKING:
958 * raw_spin_lock_irq(pool->lock).
959 */
960static void worker_leave_idle(struct worker *worker)
961{
962 struct worker_pool *pool = worker->pool;
963
964 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
965 return;
966 worker_clr_flags(worker, WORKER_IDLE);
967 pool->nr_idle--;
968 list_del_init(&worker->entry);
969}
970
971/**
972 * find_worker_executing_work - find worker which is executing a work
973 * @pool: pool of interest
974 * @work: work to find worker for
975 *
976 * Find a worker which is executing @work on @pool by searching
977 * @pool->busy_hash which is keyed by the address of @work. For a worker
978 * to match, its current execution should match the address of @work and
979 * its work function. This is to avoid unwanted dependency between
980 * unrelated work executions through a work item being recycled while still
981 * being executed.
982 *
983 * This is a bit tricky. A work item may be freed once its execution
984 * starts and nothing prevents the freed area from being recycled for
985 * another work item. If the same work item address ends up being reused
986 * before the original execution finishes, workqueue will identify the
987 * recycled work item as currently executing and make it wait until the
988 * current execution finishes, introducing an unwanted dependency.
989 *
990 * This function checks the work item address and work function to avoid
991 * false positives. Note that this isn't complete as one may construct a
992 * work function which can introduce dependency onto itself through a
993 * recycled work item. Well, if somebody wants to shoot oneself in the
994 * foot that badly, there's only so much we can do, and if such deadlock
995 * actually occurs, it should be easy to locate the culprit work function.
996 *
997 * CONTEXT:
998 * raw_spin_lock_irq(pool->lock).
999 *
1000 * Return:
1001 * Pointer to worker which is executing @work if found, %NULL
1002 * otherwise.
1003 */
1004static struct worker *find_worker_executing_work(struct worker_pool *pool,
1005 struct work_struct *work)
1006{
1007 struct worker *worker;
1008
1009 hash_for_each_possible(pool->busy_hash, worker, hentry,
1010 (unsigned long)work)
1011 if (worker->current_work == work &&
1012 worker->current_func == work->func)
1013 return worker;
1014
1015 return NULL;
1016}
1017
1018/**
1019 * move_linked_works - move linked works to a list
1020 * @work: start of series of works to be scheduled
1021 * @head: target list to append @work to
1022 * @nextp: out parameter for nested worklist walking
1023 *
873eaca6
TH
1024 * Schedule linked works starting from @work to @head. Work series to be
1025 * scheduled starts at @work and includes any consecutive work with
1026 * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on
1027 * @nextp.
797e8345
TH
1028 *
1029 * CONTEXT:
1030 * raw_spin_lock_irq(pool->lock).
1031 */
1032static void move_linked_works(struct work_struct *work, struct list_head *head,
1033 struct work_struct **nextp)
1034{
1035 struct work_struct *n;
1036
1037 /*
1038 * Linked worklist will always end before the end of the list,
1039 * use NULL for list head.
1040 */
1041 list_for_each_entry_safe_from(work, n, NULL, entry) {
1042 list_move_tail(&work->entry, head);
1043 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1044 break;
1045 }
1046
1047 /*
1048 * If we're already inside safe list traversal and have moved
1049 * multiple works to the scheduled queue, the next position
1050 * needs to be updated.
1051 */
1052 if (nextp)
1053 *nextp = n;
1054}
1055
873eaca6
TH
1056/**
1057 * assign_work - assign a work item and its linked work items to a worker
1058 * @work: work to assign
1059 * @worker: worker to assign to
1060 * @nextp: out parameter for nested worklist walking
1061 *
1062 * Assign @work and its linked work items to @worker. If @work is already being
1063 * executed by another worker in the same pool, it'll be punted there.
1064 *
1065 * If @nextp is not NULL, it's updated to point to the next work of the last
1066 * scheduled work. This allows assign_work() to be nested inside
1067 * list_for_each_entry_safe().
1068 *
1069 * Returns %true if @work was successfully assigned to @worker. %false if @work
1070 * was punted to another worker already executing it.
1071 */
1072static bool assign_work(struct work_struct *work, struct worker *worker,
1073 struct work_struct **nextp)
1074{
1075 struct worker_pool *pool = worker->pool;
1076 struct worker *collision;
1077
1078 lockdep_assert_held(&pool->lock);
1079
1080 /*
1081 * A single work shouldn't be executed concurrently by multiple workers.
1082 * __queue_work() ensures that @work doesn't jump to a different pool
1083 * while still running in the previous pool. Here, we should ensure that
1084 * @work is not executed concurrently by multiple workers from the same
1085 * pool. Check whether anyone is already processing the work. If so,
1086 * defer the work to the currently executing one.
1087 */
1088 collision = find_worker_executing_work(pool, work);
1089 if (unlikely(collision)) {
1090 move_linked_works(work, &collision->scheduled, nextp);
1091 return false;
1092 }
1093
1094 move_linked_works(work, &worker->scheduled, nextp);
1095 return true;
1096}
1097
797e8345 1098/**
0219a352
TH
1099 * kick_pool - wake up an idle worker if necessary
1100 * @pool: pool to kick
797e8345 1101 *
0219a352
TH
1102 * @pool may have pending work items. Wake up worker if necessary. Returns
1103 * whether a worker was woken up.
797e8345 1104 */
0219a352 1105static bool kick_pool(struct worker_pool *pool)
797e8345
TH
1106{
1107 struct worker *worker = first_idle_worker(pool);
8639eceb 1108 struct task_struct *p;
797e8345 1109
0219a352
TH
1110 lockdep_assert_held(&pool->lock);
1111
1112 if (!need_more_worker(pool) || !worker)
1113 return false;
1114
8639eceb
TH
1115 p = worker->task;
1116
1117#ifdef CONFIG_SMP
1118 /*
1119 * Idle @worker is about to execute @work and waking up provides an
1120 * opportunity to migrate @worker at a lower cost by setting the task's
1121 * wake_cpu field. Let's see if we want to move @worker to improve
1122 * execution locality.
1123 *
1124 * We're waking the worker that went idle the latest and there's some
1125 * chance that @worker is marked idle but hasn't gone off CPU yet. If
1126 * so, setting the wake_cpu won't do anything. As this is a best-effort
1127 * optimization and the race window is narrow, let's leave as-is for
1128 * now. If this becomes pronounced, we can skip over workers which are
1129 * still on cpu when picking an idle worker.
1130 *
1131 * If @pool has non-strict affinity, @worker might have ended up outside
1132 * its affinity scope. Repatriate.
1133 */
1134 if (!pool->attrs->affn_strict &&
1135 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
1136 struct work_struct *work = list_first_entry(&pool->worklist,
1137 struct work_struct, entry);
1138 p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask);
1139 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
1140 }
1141#endif
1142 wake_up_process(p);
0219a352 1143 return true;
797e8345
TH
1144}
1145
63638450
TH
1146#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
1147
1148/*
1149 * Concurrency-managed per-cpu work items that hog CPU for longer than
1150 * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism,
1151 * which prevents them from stalling other concurrency-managed work items. If a
1152 * work function keeps triggering this mechanism, it's likely that the work item
1153 * should be using an unbound workqueue instead.
1154 *
1155 * wq_cpu_intensive_report() tracks work functions which trigger such conditions
1156 * and report them so that they can be examined and converted to use unbound
1157 * workqueues as appropriate. To avoid flooding the console, each violating work
1158 * function is tracked and reported with exponential backoff.
1159 */
1160#define WCI_MAX_ENTS 128
1161
1162struct wci_ent {
1163 work_func_t func;
1164 atomic64_t cnt;
1165 struct hlist_node hash_node;
1166};
1167
1168static struct wci_ent wci_ents[WCI_MAX_ENTS];
1169static int wci_nr_ents;
1170static DEFINE_RAW_SPINLOCK(wci_lock);
1171static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS));
1172
1173static struct wci_ent *wci_find_ent(work_func_t func)
1174{
1175 struct wci_ent *ent;
1176
1177 hash_for_each_possible_rcu(wci_hash, ent, hash_node,
1178 (unsigned long)func) {
1179 if (ent->func == func)
1180 return ent;
1181 }
1182 return NULL;
1183}
1184
1185static void wq_cpu_intensive_report(work_func_t func)
1186{
1187 struct wci_ent *ent;
1188
1189restart:
1190 ent = wci_find_ent(func);
1191 if (ent) {
1192 u64 cnt;
1193
1194 /*
1195 * Start reporting from the fourth time and back off
1196 * exponentially.
1197 */
1198 cnt = atomic64_inc_return_relaxed(&ent->cnt);
1199 if (cnt >= 4 && is_power_of_2(cnt))
1200 printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n",
1201 ent->func, wq_cpu_intensive_thresh_us,
1202 atomic64_read(&ent->cnt));
1203 return;
1204 }
1205
1206 /*
1207 * @func is a new violation. Allocate a new entry for it. If wcn_ents[]
1208 * is exhausted, something went really wrong and we probably made enough
1209 * noise already.
1210 */
1211 if (wci_nr_ents >= WCI_MAX_ENTS)
1212 return;
1213
1214 raw_spin_lock(&wci_lock);
1215
1216 if (wci_nr_ents >= WCI_MAX_ENTS) {
1217 raw_spin_unlock(&wci_lock);
1218 return;
1219 }
1220
1221 if (wci_find_ent(func)) {
1222 raw_spin_unlock(&wci_lock);
1223 goto restart;
1224 }
1225
1226 ent = &wci_ents[wci_nr_ents++];
1227 ent->func = func;
1228 atomic64_set(&ent->cnt, 1);
1229 hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func);
1230
1231 raw_spin_unlock(&wci_lock);
1232}
1233
1234#else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1235static void wq_cpu_intensive_report(work_func_t func) {}
1236#endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1237
d302f017 1238/**
6d25be57 1239 * wq_worker_running - a worker is running again
e22bee78 1240 * @task: task waking up
e22bee78 1241 *
6d25be57 1242 * This function is called when a worker returns from schedule()
e22bee78 1243 */
6d25be57 1244void wq_worker_running(struct task_struct *task)
e22bee78
TH
1245{
1246 struct worker *worker = kthread_data(task);
1247
c8f6219b 1248 if (!READ_ONCE(worker->sleeping))
6d25be57 1249 return;
07edfece
FW
1250
1251 /*
1252 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
1253 * and the nr_running increment below, we may ruin the nr_running reset
1254 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
1255 * pool. Protect against such race.
1256 */
1257 preempt_disable();
6d25be57 1258 if (!(worker->flags & WORKER_NOT_RUNNING))
bc35f7ef 1259 worker->pool->nr_running++;
07edfece 1260 preempt_enable();
616db877
TH
1261
1262 /*
1263 * CPU intensive auto-detection cares about how long a work item hogged
1264 * CPU without sleeping. Reset the starting timestamp on wakeup.
1265 */
1266 worker->current_at = worker->task->se.sum_exec_runtime;
1267
c8f6219b 1268 WRITE_ONCE(worker->sleeping, 0);
e22bee78
TH
1269}
1270
1271/**
1272 * wq_worker_sleeping - a worker is going to sleep
1273 * @task: task going to sleep
e22bee78 1274 *
6d25be57 1275 * This function is called from schedule() when a busy worker is
ccf45156 1276 * going to sleep.
e22bee78 1277 */
6d25be57 1278void wq_worker_sleeping(struct task_struct *task)
e22bee78 1279{
cc5bff38 1280 struct worker *worker = kthread_data(task);
111c225a 1281 struct worker_pool *pool;
e22bee78 1282
111c225a
TH
1283 /*
1284 * Rescuers, which may not have all the fields set up like normal
1285 * workers, also reach here, let's not access anything before
1286 * checking NOT_RUNNING.
1287 */
2d64672e 1288 if (worker->flags & WORKER_NOT_RUNNING)
6d25be57 1289 return;
e22bee78 1290
111c225a 1291 pool = worker->pool;
111c225a 1292
62849a96 1293 /* Return if preempted before wq_worker_running() was reached */
c8f6219b 1294 if (READ_ONCE(worker->sleeping))
6d25be57
TG
1295 return;
1296
c8f6219b 1297 WRITE_ONCE(worker->sleeping, 1);
a9b8a985 1298 raw_spin_lock_irq(&pool->lock);
e22bee78 1299
45c753f5
FW
1300 /*
1301 * Recheck in case unbind_workers() preempted us. We don't
1302 * want to decrement nr_running after the worker is unbound
1303 * and nr_running has been reset.
1304 */
1305 if (worker->flags & WORKER_NOT_RUNNING) {
1306 raw_spin_unlock_irq(&pool->lock);
1307 return;
1308 }
1309
bc35f7ef 1310 pool->nr_running--;
0219a352 1311 if (kick_pool(pool))
725e8ec5 1312 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
0219a352 1313
a9b8a985 1314 raw_spin_unlock_irq(&pool->lock);
e22bee78
TH
1315}
1316
616db877
TH
1317/**
1318 * wq_worker_tick - a scheduler tick occurred while a kworker is running
1319 * @task: task currently running
1320 *
1321 * Called from scheduler_tick(). We're in the IRQ context and the current
1322 * worker's fields which follow the 'K' locking rule can be accessed safely.
1323 */
1324void wq_worker_tick(struct task_struct *task)
1325{
1326 struct worker *worker = kthread_data(task);
1327 struct pool_workqueue *pwq = worker->current_pwq;
1328 struct worker_pool *pool = worker->pool;
1329
1330 if (!pwq)
1331 return;
1332
8a1dd1e5
TH
1333 pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC;
1334
18c8ae81
Z
1335 if (!wq_cpu_intensive_thresh_us)
1336 return;
1337
616db877
TH
1338 /*
1339 * If the current worker is concurrency managed and hogged the CPU for
1340 * longer than wq_cpu_intensive_thresh_us, it's automatically marked
1341 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items.
c8f6219b
Z
1342 *
1343 * Set @worker->sleeping means that @worker is in the process of
1344 * switching out voluntarily and won't be contributing to
1345 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also
1346 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to
1347 * double decrements. The task is releasing the CPU anyway. Let's skip.
1348 * We probably want to make this prettier in the future.
616db877 1349 */
c8f6219b 1350 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
616db877
TH
1351 worker->task->se.sum_exec_runtime - worker->current_at <
1352 wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
1353 return;
1354
1355 raw_spin_lock(&pool->lock);
1356
1357 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
63638450 1358 wq_cpu_intensive_report(worker->current_func);
616db877
TH
1359 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
1360
0219a352 1361 if (kick_pool(pool))
616db877 1362 pwq->stats[PWQ_STAT_CM_WAKEUP]++;
616db877
TH
1363
1364 raw_spin_unlock(&pool->lock);
1365}
1366
1b69ac6b
JW
1367/**
1368 * wq_worker_last_func - retrieve worker's last work function
8194fe94 1369 * @task: Task to retrieve last work function of.
1b69ac6b
JW
1370 *
1371 * Determine the last function a worker executed. This is called from
1372 * the scheduler to get a worker's last known identity.
1373 *
1374 * CONTEXT:
a9b8a985 1375 * raw_spin_lock_irq(rq->lock)
1b69ac6b 1376 *
4b047002
JW
1377 * This function is called during schedule() when a kworker is going
1378 * to sleep. It's used by psi to identify aggregation workers during
1379 * dequeuing, to allow periodic aggregation to shut-off when that
1380 * worker is the last task in the system or cgroup to go to sleep.
1381 *
1382 * As this function doesn't involve any workqueue-related locking, it
1383 * only returns stable values when called from inside the scheduler's
1384 * queuing and dequeuing paths, when @task, which must be a kworker,
1385 * is guaranteed to not be processing any works.
1386 *
1b69ac6b
JW
1387 * Return:
1388 * The last work function %current executed as a worker, NULL if it
1389 * hasn't executed any work yet.
1390 */
1391work_func_t wq_worker_last_func(struct task_struct *task)
1392{
1393 struct worker *worker = kthread_data(task);
1394
1395 return worker->last_func;
1396}
1397
8864b4e5
TH
1398/**
1399 * get_pwq - get an extra reference on the specified pool_workqueue
1400 * @pwq: pool_workqueue to get
1401 *
1402 * Obtain an extra reference on @pwq. The caller should guarantee that
1403 * @pwq has positive refcnt and be holding the matching pool->lock.
1404 */
1405static void get_pwq(struct pool_workqueue *pwq)
1406{
1407 lockdep_assert_held(&pwq->pool->lock);
1408 WARN_ON_ONCE(pwq->refcnt <= 0);
1409 pwq->refcnt++;
1410}
1411
1412/**
1413 * put_pwq - put a pool_workqueue reference
1414 * @pwq: pool_workqueue to put
1415 *
1416 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1417 * destruction. The caller should be holding the matching pool->lock.
1418 */
1419static void put_pwq(struct pool_workqueue *pwq)
1420{
1421 lockdep_assert_held(&pwq->pool->lock);
1422 if (likely(--pwq->refcnt))
1423 return;
8864b4e5 1424 /*
967b494e
TH
1425 * @pwq can't be released under pool->lock, bounce to a dedicated
1426 * kthread_worker to avoid A-A deadlocks.
8864b4e5 1427 */
687a9aa5 1428 kthread_queue_work(pwq_release_worker, &pwq->release_work);
8864b4e5
TH
1429}
1430
dce90d47
TH
1431/**
1432 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1433 * @pwq: pool_workqueue to put (can be %NULL)
1434 *
1435 * put_pwq() with locking. This function also allows %NULL @pwq.
1436 */
1437static void put_pwq_unlocked(struct pool_workqueue *pwq)
1438{
1439 if (pwq) {
1440 /*
24acfb71 1441 * As both pwqs and pools are RCU protected, the
dce90d47
TH
1442 * following lock operations are safe.
1443 */
a9b8a985 1444 raw_spin_lock_irq(&pwq->pool->lock);
dce90d47 1445 put_pwq(pwq);
a9b8a985 1446 raw_spin_unlock_irq(&pwq->pool->lock);
dce90d47
TH
1447 }
1448}
1449
f97a4a1a 1450static void pwq_activate_inactive_work(struct work_struct *work)
bf4ede01 1451{
112202d9 1452 struct pool_workqueue *pwq = get_work_pwq(work);
bf4ede01
TH
1453
1454 trace_workqueue_activate_work(work);
82607adc
TH
1455 if (list_empty(&pwq->pool->worklist))
1456 pwq->pool->watchdog_ts = jiffies;
112202d9 1457 move_linked_works(work, &pwq->pool->worklist, NULL);
f97a4a1a 1458 __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
112202d9 1459 pwq->nr_active++;
bf4ede01
TH
1460}
1461
f97a4a1a 1462static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
3aa62497 1463{
f97a4a1a 1464 struct work_struct *work = list_first_entry(&pwq->inactive_works,
3aa62497
LJ
1465 struct work_struct, entry);
1466
f97a4a1a 1467 pwq_activate_inactive_work(work);
3aa62497
LJ
1468}
1469
bf4ede01 1470/**
112202d9
TH
1471 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1472 * @pwq: pwq of interest
c4560c2c 1473 * @work_data: work_data of work which left the queue
bf4ede01
TH
1474 *
1475 * A work either has completed or is removed from pending queue,
112202d9 1476 * decrement nr_in_flight of its pwq and handle workqueue flushing.
bf4ede01
TH
1477 *
1478 * CONTEXT:
a9b8a985 1479 * raw_spin_lock_irq(pool->lock).
bf4ede01 1480 */
c4560c2c 1481static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
bf4ede01 1482{
c4560c2c
LJ
1483 int color = get_work_color(work_data);
1484
018f3a13
LJ
1485 if (!(work_data & WORK_STRUCT_INACTIVE)) {
1486 pwq->nr_active--;
1487 if (!list_empty(&pwq->inactive_works)) {
1488 /* one down, submit an inactive one */
1489 if (pwq->nr_active < pwq->max_active)
1490 pwq_activate_first_inactive(pwq);
1491 }
1492 }
1493
112202d9 1494 pwq->nr_in_flight[color]--;
bf4ede01 1495
bf4ede01 1496 /* is flush in progress and are we at the flushing tip? */
112202d9 1497 if (likely(pwq->flush_color != color))
8864b4e5 1498 goto out_put;
bf4ede01
TH
1499
1500 /* are there still in-flight works? */
112202d9 1501 if (pwq->nr_in_flight[color])
8864b4e5 1502 goto out_put;
bf4ede01 1503
112202d9
TH
1504 /* this pwq is done, clear flush_color */
1505 pwq->flush_color = -1;
bf4ede01
TH
1506
1507 /*
112202d9 1508 * If this was the last pwq, wake up the first flusher. It
bf4ede01
TH
1509 * will handle the rest.
1510 */
112202d9
TH
1511 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1512 complete(&pwq->wq->first_flusher->done);
8864b4e5
TH
1513out_put:
1514 put_pwq(pwq);
bf4ede01
TH
1515}
1516
36e227d2 1517/**
bbb68dfa 1518 * try_to_grab_pending - steal work item from worklist and disable irq
36e227d2
TH
1519 * @work: work item to steal
1520 * @is_dwork: @work is a delayed_work
bbb68dfa 1521 * @flags: place to store irq state
36e227d2
TH
1522 *
1523 * Try to grab PENDING bit of @work. This function can handle @work in any
d185af30 1524 * stable state - idle, on timer or on worklist.
36e227d2 1525 *
d185af30 1526 * Return:
3eb6b31b
MCC
1527 *
1528 * ======== ================================================================
36e227d2
TH
1529 * 1 if @work was pending and we successfully stole PENDING
1530 * 0 if @work was idle and we claimed PENDING
1531 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
bbb68dfa
TH
1532 * -ENOENT if someone else is canceling @work, this state may persist
1533 * for arbitrarily long
3eb6b31b 1534 * ======== ================================================================
36e227d2 1535 *
d185af30 1536 * Note:
bbb68dfa 1537 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
e0aecdd8
TH
1538 * interrupted while holding PENDING and @work off queue, irq must be
1539 * disabled on entry. This, combined with delayed_work->timer being
1540 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
bbb68dfa
TH
1541 *
1542 * On successful return, >= 0, irq is disabled and the caller is
1543 * responsible for releasing it using local_irq_restore(*@flags).
1544 *
e0aecdd8 1545 * This function is safe to call from any context including IRQ handler.
bf4ede01 1546 */
bbb68dfa
TH
1547static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1548 unsigned long *flags)
bf4ede01 1549{
d565ed63 1550 struct worker_pool *pool;
112202d9 1551 struct pool_workqueue *pwq;
bf4ede01 1552
bbb68dfa
TH
1553 local_irq_save(*flags);
1554
36e227d2
TH
1555 /* try to steal the timer if it exists */
1556 if (is_dwork) {
1557 struct delayed_work *dwork = to_delayed_work(work);
1558
e0aecdd8
TH
1559 /*
1560 * dwork->timer is irqsafe. If del_timer() fails, it's
1561 * guaranteed that the timer is not queued anywhere and not
1562 * running on the local CPU.
1563 */
36e227d2
TH
1564 if (likely(del_timer(&dwork->timer)))
1565 return 1;
1566 }
1567
1568 /* try to claim PENDING the normal way */
bf4ede01
TH
1569 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1570 return 0;
1571
24acfb71 1572 rcu_read_lock();
bf4ede01
TH
1573 /*
1574 * The queueing is in progress, or it is already queued. Try to
1575 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1576 */
d565ed63
TH
1577 pool = get_work_pool(work);
1578 if (!pool)
bbb68dfa 1579 goto fail;
bf4ede01 1580
a9b8a985 1581 raw_spin_lock(&pool->lock);
0b3dae68 1582 /*
112202d9
TH
1583 * work->data is guaranteed to point to pwq only while the work
1584 * item is queued on pwq->wq, and both updating work->data to point
1585 * to pwq on queueing and to pool on dequeueing are done under
1586 * pwq->pool->lock. This in turn guarantees that, if work->data
1587 * points to pwq which is associated with a locked pool, the work
0b3dae68
LJ
1588 * item is currently queued on that pool.
1589 */
112202d9
TH
1590 pwq = get_work_pwq(work);
1591 if (pwq && pwq->pool == pool) {
16062836
TH
1592 debug_work_deactivate(work);
1593
1594 /*
018f3a13
LJ
1595 * A cancelable inactive work item must be in the
1596 * pwq->inactive_works since a queued barrier can't be
1597 * canceled (see the comments in insert_wq_barrier()).
1598 *
f97a4a1a 1599 * An inactive work item cannot be grabbed directly because
d812796e 1600 * it might have linked barrier work items which, if left
f97a4a1a 1601 * on the inactive_works list, will confuse pwq->nr_active
16062836
TH
1602 * management later on and cause stall. Make sure the work
1603 * item is activated before grabbing.
1604 */
f97a4a1a
LJ
1605 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
1606 pwq_activate_inactive_work(work);
16062836
TH
1607
1608 list_del_init(&work->entry);
c4560c2c 1609 pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
16062836 1610
112202d9 1611 /* work->data points to pwq iff queued, point to pool */
16062836
TH
1612 set_work_pool_and_keep_pending(work, pool->id);
1613
a9b8a985 1614 raw_spin_unlock(&pool->lock);
24acfb71 1615 rcu_read_unlock();
16062836 1616 return 1;
bf4ede01 1617 }
a9b8a985 1618 raw_spin_unlock(&pool->lock);
bbb68dfa 1619fail:
24acfb71 1620 rcu_read_unlock();
bbb68dfa
TH
1621 local_irq_restore(*flags);
1622 if (work_is_canceling(work))
1623 return -ENOENT;
1624 cpu_relax();
36e227d2 1625 return -EAGAIN;
bf4ede01
TH
1626}
1627
4690c4ab 1628/**
706026c2 1629 * insert_work - insert a work into a pool
112202d9 1630 * @pwq: pwq @work belongs to
4690c4ab
TH
1631 * @work: work to insert
1632 * @head: insertion point
1633 * @extra_flags: extra WORK_STRUCT_* flags to set
1634 *
112202d9 1635 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
706026c2 1636 * work_struct flags.
4690c4ab
TH
1637 *
1638 * CONTEXT:
a9b8a985 1639 * raw_spin_lock_irq(pool->lock).
4690c4ab 1640 */
112202d9
TH
1641static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1642 struct list_head *head, unsigned int extra_flags)
b89deed3 1643{
fe089f87 1644 debug_work_activate(work);
e22bee78 1645
e89a85d6 1646 /* record the work call stack in order to print it in KASAN reports */
f70da745 1647 kasan_record_aux_stack_noalloc(work);
e89a85d6 1648
4690c4ab 1649 /* we own @work, set data and link */
112202d9 1650 set_work_pwq(work, pwq, extra_flags);
1a4d9b0a 1651 list_add_tail(&work->entry, head);
8864b4e5 1652 get_pwq(pwq);
b89deed3
ON
1653}
1654
c8efcc25
TH
1655/*
1656 * Test whether @work is being queued from another work executing on the
8d03ecfe 1657 * same workqueue.
c8efcc25
TH
1658 */
1659static bool is_chained_work(struct workqueue_struct *wq)
1660{
8d03ecfe
TH
1661 struct worker *worker;
1662
1663 worker = current_wq_worker();
1664 /*
bf393fd4 1665 * Return %true iff I'm a worker executing a work item on @wq. If
8d03ecfe
TH
1666 * I'm @worker, it's safe to dereference it without locking.
1667 */
112202d9 1668 return worker && worker->current_pwq->wq == wq;
c8efcc25
TH
1669}
1670
ef557180
MG
1671/*
1672 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1673 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
1674 * avoid perturbing sensitive tasks.
1675 */
1676static int wq_select_unbound_cpu(int cpu)
1677{
1678 int new_cpu;
1679
f303fccb
TH
1680 if (likely(!wq_debug_force_rr_cpu)) {
1681 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1682 return cpu;
a8ec5880
AF
1683 } else {
1684 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
f303fccb
TH
1685 }
1686
ef557180
MG
1687 if (cpumask_empty(wq_unbound_cpumask))
1688 return cpu;
1689
1690 new_cpu = __this_cpu_read(wq_rr_cpu_last);
1691 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1692 if (unlikely(new_cpu >= nr_cpu_ids)) {
1693 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1694 if (unlikely(new_cpu >= nr_cpu_ids))
1695 return cpu;
1696 }
1697 __this_cpu_write(wq_rr_cpu_last, new_cpu);
1698
1699 return new_cpu;
1700}
1701
d84ff051 1702static void __queue_work(int cpu, struct workqueue_struct *wq,
1da177e4
LT
1703 struct work_struct *work)
1704{
112202d9 1705 struct pool_workqueue *pwq;
fe089f87 1706 struct worker_pool *last_pool, *pool;
8a2e8e5d 1707 unsigned int work_flags;
b75cac93 1708 unsigned int req_cpu = cpu;
8930caba
TH
1709
1710 /*
1711 * While a work item is PENDING && off queue, a task trying to
1712 * steal the PENDING will busy-loop waiting for it to either get
1713 * queued or lose PENDING. Grabbing PENDING and queueing should
1714 * happen with IRQ disabled.
1715 */
8e8eb730 1716 lockdep_assert_irqs_disabled();
1da177e4 1717
1e19ffc6 1718
33e3f0a3
RC
1719 /*
1720 * For a draining wq, only works from the same workqueue are
1721 * allowed. The __WQ_DESTROYING helps to spot the issue that
1722 * queues a new work item to a wq after destroy_workqueue(wq).
1723 */
1724 if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
1725 WARN_ON_ONCE(!is_chained_work(wq))))
e41e704b 1726 return;
24acfb71 1727 rcu_read_lock();
9e8cd2f5 1728retry:
c9178087 1729 /* pwq which will be used unless @work is executing elsewhere */
636b927e
TH
1730 if (req_cpu == WORK_CPU_UNBOUND) {
1731 if (wq->flags & WQ_UNBOUND)
aa202f1f 1732 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
636b927e 1733 else
aa202f1f 1734 cpu = raw_smp_processor_id();
aa202f1f 1735 }
dbf2576e 1736
636b927e 1737 pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu));
fe089f87
TH
1738 pool = pwq->pool;
1739
c9178087
TH
1740 /*
1741 * If @work was previously on a different pool, it might still be
1742 * running there, in which case the work needs to be queued on that
1743 * pool to guarantee non-reentrancy.
1744 */
1745 last_pool = get_work_pool(work);
fe089f87 1746 if (last_pool && last_pool != pool) {
c9178087 1747 struct worker *worker;
18aa9eff 1748
a9b8a985 1749 raw_spin_lock(&last_pool->lock);
18aa9eff 1750
c9178087 1751 worker = find_worker_executing_work(last_pool, work);
18aa9eff 1752
c9178087
TH
1753 if (worker && worker->current_pwq->wq == wq) {
1754 pwq = worker->current_pwq;
fe089f87
TH
1755 pool = pwq->pool;
1756 WARN_ON_ONCE(pool != last_pool);
8930caba 1757 } else {
c9178087 1758 /* meh... not running there, queue here */
a9b8a985 1759 raw_spin_unlock(&last_pool->lock);
fe089f87 1760 raw_spin_lock(&pool->lock);
8930caba 1761 }
f3421797 1762 } else {
fe089f87 1763 raw_spin_lock(&pool->lock);
502ca9d8
TH
1764 }
1765
9e8cd2f5 1766 /*
636b927e
TH
1767 * pwq is determined and locked. For unbound pools, we could have raced
1768 * with pwq release and it could already be dead. If its refcnt is zero,
1769 * repeat pwq selection. Note that unbound pwqs never die without
1770 * another pwq replacing it in cpu_pwq or while work items are executing
1771 * on it, so the retrying is guaranteed to make forward-progress.
9e8cd2f5
TH
1772 */
1773 if (unlikely(!pwq->refcnt)) {
1774 if (wq->flags & WQ_UNBOUND) {
fe089f87 1775 raw_spin_unlock(&pool->lock);
9e8cd2f5
TH
1776 cpu_relax();
1777 goto retry;
1778 }
1779 /* oops */
1780 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1781 wq->name, cpu);
1782 }
1783
112202d9
TH
1784 /* pwq determined, queue */
1785 trace_workqueue_queue_work(req_cpu, pwq, work);
502ca9d8 1786
24acfb71
TG
1787 if (WARN_ON(!list_empty(&work->entry)))
1788 goto out;
1e19ffc6 1789
112202d9
TH
1790 pwq->nr_in_flight[pwq->work_color]++;
1791 work_flags = work_color_to_flags(pwq->work_color);
1e19ffc6 1792
112202d9 1793 if (likely(pwq->nr_active < pwq->max_active)) {
fe089f87
TH
1794 if (list_empty(&pool->worklist))
1795 pool->watchdog_ts = jiffies;
1796
cdadf009 1797 trace_workqueue_activate_work(work);
112202d9 1798 pwq->nr_active++;
fe089f87 1799 insert_work(pwq, work, &pool->worklist, work_flags);
0219a352 1800 kick_pool(pool);
8a2e8e5d 1801 } else {
f97a4a1a 1802 work_flags |= WORK_STRUCT_INACTIVE;
fe089f87 1803 insert_work(pwq, work, &pwq->inactive_works, work_flags);
8a2e8e5d 1804 }
1e19ffc6 1805
24acfb71 1806out:
fe089f87 1807 raw_spin_unlock(&pool->lock);
24acfb71 1808 rcu_read_unlock();
1da177e4
LT
1809}
1810
0fcb78c2 1811/**
c1a220e7
ZR
1812 * queue_work_on - queue work on specific cpu
1813 * @cpu: CPU number to execute work on
0fcb78c2
REB
1814 * @wq: workqueue to use
1815 * @work: work to queue
1816 *
c1a220e7 1817 * We queue the work to a specific CPU, the caller must ensure it
443378f0
PM
1818 * can't go away. Callers that fail to ensure that the specified
1819 * CPU cannot go away will execute on a randomly chosen CPU.
854f5cc5
PM
1820 * But note well that callers specifying a CPU that never has been
1821 * online will get a splat.
d185af30
YB
1822 *
1823 * Return: %false if @work was already on a queue, %true otherwise.
1da177e4 1824 */
d4283e93
TH
1825bool queue_work_on(int cpu, struct workqueue_struct *wq,
1826 struct work_struct *work)
1da177e4 1827{
d4283e93 1828 bool ret = false;
8930caba 1829 unsigned long flags;
ef1ca236 1830
8930caba 1831 local_irq_save(flags);
c1a220e7 1832
22df02bb 1833 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
4690c4ab 1834 __queue_work(cpu, wq, work);
d4283e93 1835 ret = true;
c1a220e7 1836 }
ef1ca236 1837
8930caba 1838 local_irq_restore(flags);
1da177e4
LT
1839 return ret;
1840}
ad7b1f84 1841EXPORT_SYMBOL(queue_work_on);
1da177e4 1842
8204e0c1 1843/**
fef59c9c 1844 * select_numa_node_cpu - Select a CPU based on NUMA node
8204e0c1
AD
1845 * @node: NUMA node ID that we want to select a CPU from
1846 *
1847 * This function will attempt to find a "random" cpu available on a given
1848 * node. If there are no CPUs available on the given node it will return
1849 * WORK_CPU_UNBOUND indicating that we should just schedule to any
1850 * available CPU if we need to schedule this work.
1851 */
fef59c9c 1852static int select_numa_node_cpu(int node)
8204e0c1
AD
1853{
1854 int cpu;
1855
8204e0c1
AD
1856 /* Delay binding to CPU if node is not valid or online */
1857 if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1858 return WORK_CPU_UNBOUND;
1859
1860 /* Use local node/cpu if we are already there */
1861 cpu = raw_smp_processor_id();
1862 if (node == cpu_to_node(cpu))
1863 return cpu;
1864
1865 /* Use "random" otherwise know as "first" online CPU of node */
1866 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1867
1868 /* If CPU is valid return that, otherwise just defer */
1869 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1870}
1871
1872/**
1873 * queue_work_node - queue work on a "random" cpu for a given NUMA node
1874 * @node: NUMA node that we are targeting the work for
1875 * @wq: workqueue to use
1876 * @work: work to queue
1877 *
1878 * We queue the work to a "random" CPU within a given NUMA node. The basic
1879 * idea here is to provide a way to somehow associate work with a given
1880 * NUMA node.
1881 *
1882 * This function will only make a best effort attempt at getting this onto
1883 * the right NUMA node. If no node is requested or the requested node is
1884 * offline then we just fall back to standard queue_work behavior.
1885 *
1886 * Currently the "random" CPU ends up being the first available CPU in the
1887 * intersection of cpu_online_mask and the cpumask of the node, unless we
1888 * are running on the node. In that case we just use the current CPU.
1889 *
1890 * Return: %false if @work was already on a queue, %true otherwise.
1891 */
1892bool queue_work_node(int node, struct workqueue_struct *wq,
1893 struct work_struct *work)
1894{
1895 unsigned long flags;
1896 bool ret = false;
1897
1898 /*
1899 * This current implementation is specific to unbound workqueues.
1900 * Specifically we only return the first available CPU for a given
1901 * node instead of cycling through individual CPUs within the node.
1902 *
1903 * If this is used with a per-cpu workqueue then the logic in
1904 * workqueue_select_cpu_near would need to be updated to allow for
1905 * some round robin type logic.
1906 */
1907 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1908
1909 local_irq_save(flags);
1910
1911 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
fef59c9c 1912 int cpu = select_numa_node_cpu(node);
8204e0c1
AD
1913
1914 __queue_work(cpu, wq, work);
1915 ret = true;
1916 }
1917
1918 local_irq_restore(flags);
1919 return ret;
1920}
1921EXPORT_SYMBOL_GPL(queue_work_node);
1922
8c20feb6 1923void delayed_work_timer_fn(struct timer_list *t)
1da177e4 1924{
8c20feb6 1925 struct delayed_work *dwork = from_timer(dwork, t, timer);
1da177e4 1926
e0aecdd8 1927 /* should have been called from irqsafe timer with irq already off */
60c057bc 1928 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1da177e4 1929}
1438ade5 1930EXPORT_SYMBOL(delayed_work_timer_fn);
1da177e4 1931
7beb2edf
TH
1932static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1933 struct delayed_work *dwork, unsigned long delay)
1da177e4 1934{
7beb2edf
TH
1935 struct timer_list *timer = &dwork->timer;
1936 struct work_struct *work = &dwork->work;
7beb2edf 1937
637fdbae 1938 WARN_ON_ONCE(!wq);
4b243563 1939 WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
fc4b514f
TH
1940 WARN_ON_ONCE(timer_pending(timer));
1941 WARN_ON_ONCE(!list_empty(&work->entry));
7beb2edf 1942
8852aac2
TH
1943 /*
1944 * If @delay is 0, queue @dwork->work immediately. This is for
1945 * both optimization and correctness. The earliest @timer can
1946 * expire is on the closest next tick and delayed_work users depend
1947 * on that there's no such delay when @delay is 0.
1948 */
1949 if (!delay) {
1950 __queue_work(cpu, wq, &dwork->work);
1951 return;
1952 }
1953
60c057bc 1954 dwork->wq = wq;
1265057f 1955 dwork->cpu = cpu;
7beb2edf
TH
1956 timer->expires = jiffies + delay;
1957
041bd12e
TH
1958 if (unlikely(cpu != WORK_CPU_UNBOUND))
1959 add_timer_on(timer, cpu);
1960 else
1961 add_timer(timer);
1da177e4
LT
1962}
1963
0fcb78c2
REB
1964/**
1965 * queue_delayed_work_on - queue work on specific CPU after delay
1966 * @cpu: CPU number to execute work on
1967 * @wq: workqueue to use
af9997e4 1968 * @dwork: work to queue
0fcb78c2
REB
1969 * @delay: number of jiffies to wait before queueing
1970 *
d185af30 1971 * Return: %false if @work was already on a queue, %true otherwise. If
715f1300
TH
1972 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1973 * execution.
0fcb78c2 1974 */
d4283e93
TH
1975bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1976 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd 1977{
52bad64d 1978 struct work_struct *work = &dwork->work;
d4283e93 1979 bool ret = false;
8930caba 1980 unsigned long flags;
7a6bc1cd 1981
8930caba
TH
1982 /* read the comment in __queue_work() */
1983 local_irq_save(flags);
7a6bc1cd 1984
22df02bb 1985 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
7beb2edf 1986 __queue_delayed_work(cpu, wq, dwork, delay);
d4283e93 1987 ret = true;
7a6bc1cd 1988 }
8a3e77cc 1989
8930caba 1990 local_irq_restore(flags);
7a6bc1cd
VP
1991 return ret;
1992}
ad7b1f84 1993EXPORT_SYMBOL(queue_delayed_work_on);
c7fc77f7 1994
8376fe22
TH
1995/**
1996 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1997 * @cpu: CPU number to execute work on
1998 * @wq: workqueue to use
1999 * @dwork: work to queue
2000 * @delay: number of jiffies to wait before queueing
2001 *
2002 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
2003 * modify @dwork's timer so that it expires after @delay. If @delay is
2004 * zero, @work is guaranteed to be scheduled immediately regardless of its
2005 * current state.
2006 *
d185af30 2007 * Return: %false if @dwork was idle and queued, %true if @dwork was
8376fe22
TH
2008 * pending and its timer was modified.
2009 *
e0aecdd8 2010 * This function is safe to call from any context including IRQ handler.
8376fe22
TH
2011 * See try_to_grab_pending() for details.
2012 */
2013bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
2014 struct delayed_work *dwork, unsigned long delay)
2015{
2016 unsigned long flags;
2017 int ret;
c7fc77f7 2018
8376fe22
TH
2019 do {
2020 ret = try_to_grab_pending(&dwork->work, true, &flags);
2021 } while (unlikely(ret == -EAGAIN));
63bc0362 2022
8376fe22
TH
2023 if (likely(ret >= 0)) {
2024 __queue_delayed_work(cpu, wq, dwork, delay);
2025 local_irq_restore(flags);
7a6bc1cd 2026 }
8376fe22
TH
2027
2028 /* -ENOENT from try_to_grab_pending() becomes %true */
7a6bc1cd
VP
2029 return ret;
2030}
8376fe22
TH
2031EXPORT_SYMBOL_GPL(mod_delayed_work_on);
2032
05f0fe6b
TH
2033static void rcu_work_rcufn(struct rcu_head *rcu)
2034{
2035 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
2036
2037 /* read the comment in __queue_work() */
2038 local_irq_disable();
2039 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
2040 local_irq_enable();
2041}
2042
2043/**
2044 * queue_rcu_work - queue work after a RCU grace period
2045 * @wq: workqueue to use
2046 * @rwork: work to queue
2047 *
2048 * Return: %false if @rwork was already pending, %true otherwise. Note
2049 * that a full RCU grace period is guaranteed only after a %true return.
bf393fd4 2050 * While @rwork is guaranteed to be executed after a %false return, the
05f0fe6b
TH
2051 * execution may happen before a full RCU grace period has passed.
2052 */
2053bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
2054{
2055 struct work_struct *work = &rwork->work;
2056
2057 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2058 rwork->wq = wq;
a7e30c0e 2059 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
05f0fe6b
TH
2060 return true;
2061 }
2062
2063 return false;
2064}
2065EXPORT_SYMBOL(queue_rcu_work);
2066
f7537df5 2067static struct worker *alloc_worker(int node)
c34056a3
TH
2068{
2069 struct worker *worker;
2070
f7537df5 2071 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
c8e55f36
TH
2072 if (worker) {
2073 INIT_LIST_HEAD(&worker->entry);
affee4b2 2074 INIT_LIST_HEAD(&worker->scheduled);
da028469 2075 INIT_LIST_HEAD(&worker->node);
e22bee78
TH
2076 /* on creation a worker is in !idle && prep state */
2077 worker->flags = WORKER_PREP;
c8e55f36 2078 }
c34056a3
TH
2079 return worker;
2080}
2081
9546b29e
TH
2082static cpumask_t *pool_allowed_cpus(struct worker_pool *pool)
2083{
8639eceb
TH
2084 if (pool->cpu < 0 && pool->attrs->affn_strict)
2085 return pool->attrs->__pod_cpumask;
2086 else
2087 return pool->attrs->cpumask;
9546b29e
TH
2088}
2089
4736cbf7
LJ
2090/**
2091 * worker_attach_to_pool() - attach a worker to a pool
2092 * @worker: worker to be attached
2093 * @pool: the target pool
2094 *
2095 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
2096 * cpu-binding of @worker are kept coordinated with the pool across
2097 * cpu-[un]hotplugs.
2098 */
2099static void worker_attach_to_pool(struct worker *worker,
2100 struct worker_pool *pool)
2101{
1258fae7 2102 mutex_lock(&wq_pool_attach_mutex);
4736cbf7 2103
4736cbf7 2104 /*
1258fae7
TH
2105 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
2106 * stable across this function. See the comments above the flag
2107 * definition for details.
4736cbf7
LJ
2108 */
2109 if (pool->flags & POOL_DISASSOCIATED)
2110 worker->flags |= WORKER_UNBOUND;
5c25b5ff
PZ
2111 else
2112 kthread_set_per_cpu(worker->task, pool->cpu);
4736cbf7 2113
640f17c8 2114 if (worker->rescue_wq)
9546b29e 2115 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
640f17c8 2116
4736cbf7 2117 list_add_tail(&worker->node, &pool->workers);
a2d812a2 2118 worker->pool = pool;
4736cbf7 2119
1258fae7 2120 mutex_unlock(&wq_pool_attach_mutex);
4736cbf7
LJ
2121}
2122
60f5a4bc
LJ
2123/**
2124 * worker_detach_from_pool() - detach a worker from its pool
2125 * @worker: worker which is attached to its pool
60f5a4bc 2126 *
4736cbf7
LJ
2127 * Undo the attaching which had been done in worker_attach_to_pool(). The
2128 * caller worker shouldn't access to the pool after detached except it has
2129 * other reference to the pool.
60f5a4bc 2130 */
a2d812a2 2131static void worker_detach_from_pool(struct worker *worker)
60f5a4bc 2132{
a2d812a2 2133 struct worker_pool *pool = worker->pool;
60f5a4bc
LJ
2134 struct completion *detach_completion = NULL;
2135
1258fae7 2136 mutex_lock(&wq_pool_attach_mutex);
a2d812a2 2137
5c25b5ff 2138 kthread_set_per_cpu(worker->task, -1);
da028469 2139 list_del(&worker->node);
a2d812a2
TH
2140 worker->pool = NULL;
2141
e02b9312 2142 if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
60f5a4bc 2143 detach_completion = pool->detach_completion;
1258fae7 2144 mutex_unlock(&wq_pool_attach_mutex);
60f5a4bc 2145
b62c0751
LJ
2146 /* clear leftover flags without pool->lock after it is detached */
2147 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
2148
60f5a4bc
LJ
2149 if (detach_completion)
2150 complete(detach_completion);
2151}
2152
c34056a3
TH
2153/**
2154 * create_worker - create a new workqueue worker
63d95a91 2155 * @pool: pool the new worker will belong to
c34056a3 2156 *
051e1850 2157 * Create and start a new worker which is attached to @pool.
c34056a3
TH
2158 *
2159 * CONTEXT:
2160 * Might sleep. Does GFP_KERNEL allocations.
2161 *
d185af30 2162 * Return:
c34056a3
TH
2163 * Pointer to the newly created worker.
2164 */
bc2ae0f5 2165static struct worker *create_worker(struct worker_pool *pool)
c34056a3 2166{
e441b56f
ZL
2167 struct worker *worker;
2168 int id;
5d9c7a1e 2169 char id_buf[23];
c34056a3 2170
7cda9aae 2171 /* ID is needed to determine kthread name */
e441b56f 2172 id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
3f0ea0b8
PM
2173 if (id < 0) {
2174 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n",
2175 ERR_PTR(id));
e441b56f 2176 return NULL;
3f0ea0b8 2177 }
c34056a3 2178
f7537df5 2179 worker = alloc_worker(pool->node);
3f0ea0b8
PM
2180 if (!worker) {
2181 pr_err_once("workqueue: Failed to allocate a worker\n");
c34056a3 2182 goto fail;
3f0ea0b8 2183 }
c34056a3 2184
c34056a3
TH
2185 worker->id = id;
2186
29c91e99 2187 if (pool->cpu >= 0)
e3c916a4
TH
2188 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
2189 pool->attrs->nice < 0 ? "H" : "");
f3421797 2190 else
e3c916a4
TH
2191 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
2192
f3f90ad4 2193 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
e3c916a4 2194 "kworker/%s", id_buf);
3f0ea0b8 2195 if (IS_ERR(worker->task)) {
60f54038
PM
2196 if (PTR_ERR(worker->task) == -EINTR) {
2197 pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
2198 id_buf);
2199 } else {
2200 pr_err_once("workqueue: Failed to create a worker thread: %pe",
2201 worker->task);
2202 }
c34056a3 2203 goto fail;
3f0ea0b8 2204 }
c34056a3 2205
91151228 2206 set_user_nice(worker->task, pool->attrs->nice);
9546b29e 2207 kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
91151228 2208
da028469 2209 /* successful, attach the worker to the pool */
4736cbf7 2210 worker_attach_to_pool(worker, pool);
822d8405 2211
051e1850 2212 /* start the newly created worker */
a9b8a985 2213 raw_spin_lock_irq(&pool->lock);
0219a352 2214
051e1850
LJ
2215 worker->pool->nr_workers++;
2216 worker_enter_idle(worker);
0219a352
TH
2217 kick_pool(pool);
2218
2219 /*
2220 * @worker is waiting on a completion in kthread() and will trigger hung
2221 * check if not woken up soon. As kick_pool() might not have waken it
2222 * up, wake it up explicitly once more.
2223 */
051e1850 2224 wake_up_process(worker->task);
0219a352 2225
a9b8a985 2226 raw_spin_unlock_irq(&pool->lock);
051e1850 2227
c34056a3 2228 return worker;
822d8405 2229
c34056a3 2230fail:
e441b56f 2231 ida_free(&pool->worker_ida, id);
c34056a3
TH
2232 kfree(worker);
2233 return NULL;
2234}
2235
793777bc
VS
2236static void unbind_worker(struct worker *worker)
2237{
2238 lockdep_assert_held(&wq_pool_attach_mutex);
2239
2240 kthread_set_per_cpu(worker->task, -1);
2241 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
2242 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
2243 else
2244 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
2245}
2246
e02b9312
VS
2247static void wake_dying_workers(struct list_head *cull_list)
2248{
2249 struct worker *worker, *tmp;
2250
2251 list_for_each_entry_safe(worker, tmp, cull_list, entry) {
2252 list_del_init(&worker->entry);
2253 unbind_worker(worker);
2254 /*
2255 * If the worker was somehow already running, then it had to be
2256 * in pool->idle_list when set_worker_dying() happened or we
2257 * wouldn't have gotten here.
2258 *
2259 * Thus, the worker must either have observed the WORKER_DIE
2260 * flag, or have set its state to TASK_IDLE. Either way, the
2261 * below will be observed by the worker and is safe to do
2262 * outside of pool->lock.
2263 */
2264 wake_up_process(worker->task);
2265 }
2266}
2267
c34056a3 2268/**
e02b9312 2269 * set_worker_dying - Tag a worker for destruction
c34056a3 2270 * @worker: worker to be destroyed
e02b9312 2271 * @list: transfer worker away from its pool->idle_list and into list
c34056a3 2272 *
e02b9312
VS
2273 * Tag @worker for destruction and adjust @pool stats accordingly. The worker
2274 * should be idle.
c8e55f36
TH
2275 *
2276 * CONTEXT:
a9b8a985 2277 * raw_spin_lock_irq(pool->lock).
c34056a3 2278 */
e02b9312 2279static void set_worker_dying(struct worker *worker, struct list_head *list)
c34056a3 2280{
bd7bdd43 2281 struct worker_pool *pool = worker->pool;
c34056a3 2282
cd549687 2283 lockdep_assert_held(&pool->lock);
e02b9312 2284 lockdep_assert_held(&wq_pool_attach_mutex);
cd549687 2285
c34056a3 2286 /* sanity check frenzy */
6183c009 2287 if (WARN_ON(worker->current_work) ||
73eb7fe7
LJ
2288 WARN_ON(!list_empty(&worker->scheduled)) ||
2289 WARN_ON(!(worker->flags & WORKER_IDLE)))
6183c009 2290 return;
c34056a3 2291
73eb7fe7
LJ
2292 pool->nr_workers--;
2293 pool->nr_idle--;
5bdfff96 2294
cb444766 2295 worker->flags |= WORKER_DIE;
e02b9312
VS
2296
2297 list_move(&worker->entry, list);
2298 list_move(&worker->node, &pool->dying_workers);
c34056a3
TH
2299}
2300
3f959aa3
VS
2301/**
2302 * idle_worker_timeout - check if some idle workers can now be deleted.
2303 * @t: The pool's idle_timer that just expired
2304 *
2305 * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
2306 * worker_leave_idle(), as a worker flicking between idle and active while its
2307 * pool is at the too_many_workers() tipping point would cause too much timer
2308 * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
2309 * it expire and re-evaluate things from there.
2310 */
32a6c723 2311static void idle_worker_timeout(struct timer_list *t)
e22bee78 2312{
32a6c723 2313 struct worker_pool *pool = from_timer(pool, t, idle_timer);
3f959aa3
VS
2314 bool do_cull = false;
2315
2316 if (work_pending(&pool->idle_cull_work))
2317 return;
e22bee78 2318
a9b8a985 2319 raw_spin_lock_irq(&pool->lock);
e22bee78 2320
3f959aa3 2321 if (too_many_workers(pool)) {
e22bee78
TH
2322 struct worker *worker;
2323 unsigned long expires;
2324
2325 /* idle_list is kept in LIFO order, check the last one */
3f959aa3
VS
2326 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2327 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2328 do_cull = !time_before(jiffies, expires);
2329
2330 if (!do_cull)
2331 mod_timer(&pool->idle_timer, expires);
2332 }
2333 raw_spin_unlock_irq(&pool->lock);
2334
2335 if (do_cull)
2336 queue_work(system_unbound_wq, &pool->idle_cull_work);
2337}
2338
2339/**
2340 * idle_cull_fn - cull workers that have been idle for too long.
2341 * @work: the pool's work for handling these idle workers
2342 *
2343 * This goes through a pool's idle workers and gets rid of those that have been
2344 * idle for at least IDLE_WORKER_TIMEOUT seconds.
e02b9312
VS
2345 *
2346 * We don't want to disturb isolated CPUs because of a pcpu kworker being
2347 * culled, so this also resets worker affinity. This requires a sleepable
2348 * context, hence the split between timer callback and work item.
3f959aa3
VS
2349 */
2350static void idle_cull_fn(struct work_struct *work)
2351{
2352 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
9680540c 2353 LIST_HEAD(cull_list);
3f959aa3 2354
e02b9312
VS
2355 /*
2356 * Grabbing wq_pool_attach_mutex here ensures an already-running worker
2357 * cannot proceed beyong worker_detach_from_pool() in its self-destruct
2358 * path. This is required as a previously-preempted worker could run after
2359 * set_worker_dying() has happened but before wake_dying_workers() did.
2360 */
2361 mutex_lock(&wq_pool_attach_mutex);
3f959aa3
VS
2362 raw_spin_lock_irq(&pool->lock);
2363
2364 while (too_many_workers(pool)) {
2365 struct worker *worker;
2366 unsigned long expires;
2367
63d95a91 2368 worker = list_entry(pool->idle_list.prev, struct worker, entry);
e22bee78
TH
2369 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2370
3347fc9f 2371 if (time_before(jiffies, expires)) {
63d95a91 2372 mod_timer(&pool->idle_timer, expires);
3347fc9f 2373 break;
d5abe669 2374 }
3347fc9f 2375
e02b9312 2376 set_worker_dying(worker, &cull_list);
e22bee78
TH
2377 }
2378
a9b8a985 2379 raw_spin_unlock_irq(&pool->lock);
e02b9312
VS
2380 wake_dying_workers(&cull_list);
2381 mutex_unlock(&wq_pool_attach_mutex);
e22bee78 2382}
d5abe669 2383
493a1724 2384static void send_mayday(struct work_struct *work)
e22bee78 2385{
112202d9
TH
2386 struct pool_workqueue *pwq = get_work_pwq(work);
2387 struct workqueue_struct *wq = pwq->wq;
493a1724 2388
2e109a28 2389 lockdep_assert_held(&wq_mayday_lock);
e22bee78 2390
493008a8 2391 if (!wq->rescuer)
493a1724 2392 return;
e22bee78
TH
2393
2394 /* mayday mayday mayday */
493a1724 2395 if (list_empty(&pwq->mayday_node)) {
77668c8b
LJ
2396 /*
2397 * If @pwq is for an unbound wq, its base ref may be put at
2398 * any time due to an attribute change. Pin @pwq until the
2399 * rescuer is done with it.
2400 */
2401 get_pwq(pwq);
493a1724 2402 list_add_tail(&pwq->mayday_node, &wq->maydays);
e22bee78 2403 wake_up_process(wq->rescuer->task);
725e8ec5 2404 pwq->stats[PWQ_STAT_MAYDAY]++;
493a1724 2405 }
e22bee78
TH
2406}
2407
32a6c723 2408static void pool_mayday_timeout(struct timer_list *t)
e22bee78 2409{
32a6c723 2410 struct worker_pool *pool = from_timer(pool, t, mayday_timer);
e22bee78
TH
2411 struct work_struct *work;
2412
a9b8a985
SAS
2413 raw_spin_lock_irq(&pool->lock);
2414 raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
e22bee78 2415
63d95a91 2416 if (need_to_create_worker(pool)) {
e22bee78
TH
2417 /*
2418 * We've been trying to create a new worker but
2419 * haven't been successful. We might be hitting an
2420 * allocation deadlock. Send distress signals to
2421 * rescuers.
2422 */
63d95a91 2423 list_for_each_entry(work, &pool->worklist, entry)
e22bee78 2424 send_mayday(work);
1da177e4 2425 }
e22bee78 2426
a9b8a985
SAS
2427 raw_spin_unlock(&wq_mayday_lock);
2428 raw_spin_unlock_irq(&pool->lock);
e22bee78 2429
63d95a91 2430 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1da177e4
LT
2431}
2432
e22bee78
TH
2433/**
2434 * maybe_create_worker - create a new worker if necessary
63d95a91 2435 * @pool: pool to create a new worker for
e22bee78 2436 *
63d95a91 2437 * Create a new worker for @pool if necessary. @pool is guaranteed to
e22bee78
TH
2438 * have at least one idle worker on return from this function. If
2439 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
63d95a91 2440 * sent to all rescuers with works scheduled on @pool to resolve
e22bee78
TH
2441 * possible allocation deadlock.
2442 *
c5aa87bb
TH
2443 * On return, need_to_create_worker() is guaranteed to be %false and
2444 * may_start_working() %true.
e22bee78
TH
2445 *
2446 * LOCKING:
a9b8a985 2447 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
e22bee78
TH
2448 * multiple times. Does GFP_KERNEL allocations. Called only from
2449 * manager.
e22bee78 2450 */
29187a9e 2451static void maybe_create_worker(struct worker_pool *pool)
d565ed63
TH
2452__releases(&pool->lock)
2453__acquires(&pool->lock)
1da177e4 2454{
e22bee78 2455restart:
a9b8a985 2456 raw_spin_unlock_irq(&pool->lock);
9f9c2364 2457
e22bee78 2458 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
63d95a91 2459 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
e22bee78
TH
2460
2461 while (true) {
051e1850 2462 if (create_worker(pool) || !need_to_create_worker(pool))
e22bee78 2463 break;
1da177e4 2464
e212f361 2465 schedule_timeout_interruptible(CREATE_COOLDOWN);
9f9c2364 2466
63d95a91 2467 if (!need_to_create_worker(pool))
e22bee78
TH
2468 break;
2469 }
2470
63d95a91 2471 del_timer_sync(&pool->mayday_timer);
a9b8a985 2472 raw_spin_lock_irq(&pool->lock);
051e1850
LJ
2473 /*
2474 * This is necessary even after a new worker was just successfully
2475 * created as @pool->lock was dropped and the new worker might have
2476 * already become busy.
2477 */
63d95a91 2478 if (need_to_create_worker(pool))
e22bee78 2479 goto restart;
e22bee78
TH
2480}
2481
73f53c4a 2482/**
e22bee78
TH
2483 * manage_workers - manage worker pool
2484 * @worker: self
73f53c4a 2485 *
706026c2 2486 * Assume the manager role and manage the worker pool @worker belongs
e22bee78 2487 * to. At any given time, there can be only zero or one manager per
706026c2 2488 * pool. The exclusion is handled automatically by this function.
e22bee78
TH
2489 *
2490 * The caller can safely start processing works on false return. On
2491 * true return, it's guaranteed that need_to_create_worker() is false
2492 * and may_start_working() is true.
73f53c4a
TH
2493 *
2494 * CONTEXT:
a9b8a985 2495 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
e22bee78
TH
2496 * multiple times. Does GFP_KERNEL allocations.
2497 *
d185af30 2498 * Return:
29187a9e
TH
2499 * %false if the pool doesn't need management and the caller can safely
2500 * start processing works, %true if management function was performed and
2501 * the conditions that the caller verified before calling the function may
2502 * no longer be true.
73f53c4a 2503 */
e22bee78 2504static bool manage_workers(struct worker *worker)
73f53c4a 2505{
63d95a91 2506 struct worker_pool *pool = worker->pool;
73f53c4a 2507
692b4825 2508 if (pool->flags & POOL_MANAGER_ACTIVE)
29187a9e 2509 return false;
692b4825
TH
2510
2511 pool->flags |= POOL_MANAGER_ACTIVE;
2607d7a6 2512 pool->manager = worker;
1e19ffc6 2513
29187a9e 2514 maybe_create_worker(pool);
e22bee78 2515
2607d7a6 2516 pool->manager = NULL;
692b4825 2517 pool->flags &= ~POOL_MANAGER_ACTIVE;
d8bb65ab 2518 rcuwait_wake_up(&manager_wait);
29187a9e 2519 return true;
73f53c4a
TH
2520}
2521
a62428c0
TH
2522/**
2523 * process_one_work - process single work
c34056a3 2524 * @worker: self
a62428c0
TH
2525 * @work: work to process
2526 *
2527 * Process @work. This function contains all the logics necessary to
2528 * process a single work including synchronization against and
2529 * interaction with other workers on the same cpu, queueing and
2530 * flushing. As long as context requirement is met, any worker can
2531 * call this function to process a work.
2532 *
2533 * CONTEXT:
a9b8a985 2534 * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
a62428c0 2535 */
c34056a3 2536static void process_one_work(struct worker *worker, struct work_struct *work)
d565ed63
TH
2537__releases(&pool->lock)
2538__acquires(&pool->lock)
a62428c0 2539{
112202d9 2540 struct pool_workqueue *pwq = get_work_pwq(work);
bd7bdd43 2541 struct worker_pool *pool = worker->pool;
c4560c2c 2542 unsigned long work_data;
a62428c0
TH
2543#ifdef CONFIG_LOCKDEP
2544 /*
2545 * It is permissible to free the struct work_struct from
2546 * inside the function that is called from it, this we need to
2547 * take into account for lockdep too. To avoid bogus "held
2548 * lock freed" warnings as well as problems when looking into
2549 * work->lockdep_map, make a copy and use that here.
2550 */
4d82a1de
PZ
2551 struct lockdep_map lockdep_map;
2552
2553 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
a62428c0 2554#endif
807407c0 2555 /* ensure we're on the correct CPU */
85327af6 2556 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
ec22ca5e 2557 raw_smp_processor_id() != pool->cpu);
25511a47 2558
8930caba 2559 /* claim and dequeue */
a62428c0 2560 debug_work_deactivate(work);
c9e7cf27 2561 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
c34056a3 2562 worker->current_work = work;
a2c1c57b 2563 worker->current_func = work->func;
112202d9 2564 worker->current_pwq = pwq;
616db877 2565 worker->current_at = worker->task->se.sum_exec_runtime;
c4560c2c 2566 work_data = *work_data_bits(work);
d812796e 2567 worker->current_color = get_work_color(work_data);
7a22ad75 2568
8bf89593
TH
2569 /*
2570 * Record wq name for cmdline and debug reporting, may get
2571 * overridden through set_worker_desc().
2572 */
2573 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2574
a62428c0
TH
2575 list_del_init(&work->entry);
2576
fb0e7beb 2577 /*
228f1d00
LJ
2578 * CPU intensive works don't participate in concurrency management.
2579 * They're the scheduler's responsibility. This takes @worker out
2580 * of concurrency management and the next code block will chain
2581 * execution of the pending work items.
fb0e7beb 2582 */
616db877 2583 if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE))
228f1d00 2584 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
fb0e7beb 2585
974271c4 2586 /*
0219a352
TH
2587 * Kick @pool if necessary. It's always noop for per-cpu worker pools
2588 * since nr_running would always be >= 1 at this point. This is used to
2589 * chain execution of the pending work items for WORKER_NOT_RUNNING
2590 * workers such as the UNBOUND and CPU_INTENSIVE ones.
974271c4 2591 */
0219a352 2592 kick_pool(pool);
974271c4 2593
8930caba 2594 /*
7c3eed5c 2595 * Record the last pool and clear PENDING which should be the last
d565ed63 2596 * update to @work. Also, do this inside @pool->lock so that
23657bb1
TH
2597 * PENDING and queued state changes happen together while IRQ is
2598 * disabled.
8930caba 2599 */
7c3eed5c 2600 set_work_pool_and_clear_pending(work, pool->id);
a62428c0 2601
fe48ba7d 2602 pwq->stats[PWQ_STAT_STARTED]++;
a9b8a985 2603 raw_spin_unlock_irq(&pool->lock);
a62428c0 2604
a1d14934 2605 lock_map_acquire(&pwq->wq->lockdep_map);
a62428c0 2606 lock_map_acquire(&lockdep_map);
e6f3faa7 2607 /*
f52be570
PZ
2608 * Strictly speaking we should mark the invariant state without holding
2609 * any locks, that is, before these two lock_map_acquire()'s.
e6f3faa7
PZ
2610 *
2611 * However, that would result in:
2612 *
2613 * A(W1)
2614 * WFC(C)
2615 * A(W1)
2616 * C(C)
2617 *
2618 * Which would create W1->C->W1 dependencies, even though there is no
2619 * actual deadlock possible. There are two solutions, using a
2620 * read-recursive acquire on the work(queue) 'locks', but this will then
f52be570 2621 * hit the lockdep limitation on recursive locks, or simply discard
e6f3faa7
PZ
2622 * these locks.
2623 *
2624 * AFAICT there is no possible deadlock scenario between the
2625 * flush_work() and complete() primitives (except for single-threaded
2626 * workqueues), so hiding them isn't a problem.
2627 */
f52be570 2628 lockdep_invariant_state(true);
e36c886a 2629 trace_workqueue_execute_start(work);
a2c1c57b 2630 worker->current_func(work);
e36c886a
AV
2631 /*
2632 * While we must be careful to not use "work" after this, the trace
2633 * point will only record its address.
2634 */
1c5da0ec 2635 trace_workqueue_execute_end(work, worker->current_func);
725e8ec5 2636 pwq->stats[PWQ_STAT_COMPLETED]++;
a62428c0 2637 lock_map_release(&lockdep_map);
112202d9 2638 lock_map_release(&pwq->wq->lockdep_map);
a62428c0
TH
2639
2640 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
044c782c 2641 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
d75f773c 2642 " last function: %ps\n",
a2c1c57b
TH
2643 current->comm, preempt_count(), task_pid_nr(current),
2644 worker->current_func);
a62428c0
TH
2645 debug_show_held_locks(current);
2646 dump_stack();
2647 }
2648
b22ce278 2649 /*
025f50f3 2650 * The following prevents a kworker from hogging CPU on !PREEMPTION
b22ce278
TH
2651 * kernels, where a requeueing work item waiting for something to
2652 * happen could deadlock with stop_machine as such work item could
2653 * indefinitely requeue itself while all other CPUs are trapped in
789cbbec
JL
2654 * stop_machine. At the same time, report a quiescent RCU state so
2655 * the same condition doesn't freeze RCU.
b22ce278 2656 */
a7e6425e 2657 cond_resched();
b22ce278 2658
a9b8a985 2659 raw_spin_lock_irq(&pool->lock);
a62428c0 2660
616db877
TH
2661 /*
2662 * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked
2663 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than
2664 * wq_cpu_intensive_thresh_us. Clear it.
2665 */
2666 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
fb0e7beb 2667
1b69ac6b
JW
2668 /* tag the worker for identification in schedule() */
2669 worker->last_func = worker->current_func;
2670
a62428c0 2671 /* we're done with it, release */
42f8570f 2672 hash_del(&worker->hentry);
c34056a3 2673 worker->current_work = NULL;
a2c1c57b 2674 worker->current_func = NULL;
112202d9 2675 worker->current_pwq = NULL;
d812796e 2676 worker->current_color = INT_MAX;
c4560c2c 2677 pwq_dec_nr_in_flight(pwq, work_data);
a62428c0
TH
2678}
2679
affee4b2
TH
2680/**
2681 * process_scheduled_works - process scheduled works
2682 * @worker: self
2683 *
2684 * Process all scheduled works. Please note that the scheduled list
2685 * may change while processing a work, so this function repeatedly
2686 * fetches a work from the top and executes it.
2687 *
2688 * CONTEXT:
a9b8a985 2689 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
affee4b2
TH
2690 * multiple times.
2691 */
2692static void process_scheduled_works(struct worker *worker)
1da177e4 2693{
c0ab017d
TH
2694 struct work_struct *work;
2695 bool first = true;
2696
2697 while ((work = list_first_entry_or_null(&worker->scheduled,
2698 struct work_struct, entry))) {
2699 if (first) {
2700 worker->pool->watchdog_ts = jiffies;
2701 first = false;
2702 }
c34056a3 2703 process_one_work(worker, work);
1da177e4 2704 }
1da177e4
LT
2705}
2706
197f6acc
TH
2707static void set_pf_worker(bool val)
2708{
2709 mutex_lock(&wq_pool_attach_mutex);
2710 if (val)
2711 current->flags |= PF_WQ_WORKER;
2712 else
2713 current->flags &= ~PF_WQ_WORKER;
2714 mutex_unlock(&wq_pool_attach_mutex);
2715}
2716
4690c4ab
TH
2717/**
2718 * worker_thread - the worker thread function
c34056a3 2719 * @__worker: self
4690c4ab 2720 *
c5aa87bb
TH
2721 * The worker thread function. All workers belong to a worker_pool -
2722 * either a per-cpu one or dynamic unbound one. These workers process all
2723 * work items regardless of their specific target workqueue. The only
2724 * exception is work items which belong to workqueues with a rescuer which
2725 * will be explained in rescuer_thread().
d185af30
YB
2726 *
2727 * Return: 0
4690c4ab 2728 */
c34056a3 2729static int worker_thread(void *__worker)
1da177e4 2730{
c34056a3 2731 struct worker *worker = __worker;
bd7bdd43 2732 struct worker_pool *pool = worker->pool;
1da177e4 2733
e22bee78 2734 /* tell the scheduler that this is a workqueue worker */
197f6acc 2735 set_pf_worker(true);
c8e55f36 2736woke_up:
a9b8a985 2737 raw_spin_lock_irq(&pool->lock);
1da177e4 2738
a9ab775b
TH
2739 /* am I supposed to die? */
2740 if (unlikely(worker->flags & WORKER_DIE)) {
a9b8a985 2741 raw_spin_unlock_irq(&pool->lock);
197f6acc 2742 set_pf_worker(false);
60f5a4bc
LJ
2743
2744 set_task_comm(worker->task, "kworker/dying");
e441b56f 2745 ida_free(&pool->worker_ida, worker->id);
a2d812a2 2746 worker_detach_from_pool(worker);
e02b9312 2747 WARN_ON_ONCE(!list_empty(&worker->entry));
60f5a4bc 2748 kfree(worker);
a9ab775b 2749 return 0;
c8e55f36 2750 }
affee4b2 2751
c8e55f36 2752 worker_leave_idle(worker);
db7bccf4 2753recheck:
e22bee78 2754 /* no more worker necessary? */
63d95a91 2755 if (!need_more_worker(pool))
e22bee78
TH
2756 goto sleep;
2757
2758 /* do we need to manage? */
63d95a91 2759 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
e22bee78
TH
2760 goto recheck;
2761
c8e55f36
TH
2762 /*
2763 * ->scheduled list can only be filled while a worker is
2764 * preparing to process a work or actually processing it.
2765 * Make sure nobody diddled with it while I was sleeping.
2766 */
6183c009 2767 WARN_ON_ONCE(!list_empty(&worker->scheduled));
c8e55f36 2768
e22bee78 2769 /*
a9ab775b
TH
2770 * Finish PREP stage. We're guaranteed to have at least one idle
2771 * worker or that someone else has already assumed the manager
2772 * role. This is where @worker starts participating in concurrency
2773 * management if applicable and concurrency management is restored
2774 * after being rebound. See rebind_workers() for details.
e22bee78 2775 */
a9ab775b 2776 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
e22bee78
TH
2777
2778 do {
c8e55f36 2779 struct work_struct *work =
bd7bdd43 2780 list_first_entry(&pool->worklist,
c8e55f36
TH
2781 struct work_struct, entry);
2782
873eaca6
TH
2783 if (assign_work(work, worker, NULL))
2784 process_scheduled_works(worker);
63d95a91 2785 } while (keep_working(pool));
e22bee78 2786
228f1d00 2787 worker_set_flags(worker, WORKER_PREP);
d313dd85 2788sleep:
c8e55f36 2789 /*
d565ed63
TH
2790 * pool->lock is held and there's no work to process and no need to
2791 * manage, sleep. Workers are woken up only while holding
2792 * pool->lock or from local cpu, so setting the current state
2793 * before releasing pool->lock is enough to prevent losing any
2794 * event.
c8e55f36
TH
2795 */
2796 worker_enter_idle(worker);
c5a94a61 2797 __set_current_state(TASK_IDLE);
a9b8a985 2798 raw_spin_unlock_irq(&pool->lock);
c8e55f36
TH
2799 schedule();
2800 goto woke_up;
1da177e4
LT
2801}
2802
e22bee78
TH
2803/**
2804 * rescuer_thread - the rescuer thread function
111c225a 2805 * @__rescuer: self
e22bee78
TH
2806 *
2807 * Workqueue rescuer thread function. There's one rescuer for each
493008a8 2808 * workqueue which has WQ_MEM_RECLAIM set.
e22bee78 2809 *
706026c2 2810 * Regular work processing on a pool may block trying to create a new
e22bee78
TH
2811 * worker which uses GFP_KERNEL allocation which has slight chance of
2812 * developing into deadlock if some works currently on the same queue
2813 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2814 * the problem rescuer solves.
2815 *
706026c2
TH
2816 * When such condition is possible, the pool summons rescuers of all
2817 * workqueues which have works queued on the pool and let them process
e22bee78
TH
2818 * those works so that forward progress can be guaranteed.
2819 *
2820 * This should happen rarely.
d185af30
YB
2821 *
2822 * Return: 0
e22bee78 2823 */
111c225a 2824static int rescuer_thread(void *__rescuer)
e22bee78 2825{
111c225a
TH
2826 struct worker *rescuer = __rescuer;
2827 struct workqueue_struct *wq = rescuer->rescue_wq;
4d595b86 2828 bool should_stop;
e22bee78
TH
2829
2830 set_user_nice(current, RESCUER_NICE_LEVEL);
111c225a
TH
2831
2832 /*
2833 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2834 * doesn't participate in concurrency management.
2835 */
197f6acc 2836 set_pf_worker(true);
e22bee78 2837repeat:
c5a94a61 2838 set_current_state(TASK_IDLE);
e22bee78 2839
4d595b86
LJ
2840 /*
2841 * By the time the rescuer is requested to stop, the workqueue
2842 * shouldn't have any work pending, but @wq->maydays may still have
2843 * pwq(s) queued. This can happen by non-rescuer workers consuming
2844 * all the work items before the rescuer got to them. Go through
2845 * @wq->maydays processing before acting on should_stop so that the
2846 * list is always empty on exit.
2847 */
2848 should_stop = kthread_should_stop();
e22bee78 2849
493a1724 2850 /* see whether any pwq is asking for help */
a9b8a985 2851 raw_spin_lock_irq(&wq_mayday_lock);
493a1724
TH
2852
2853 while (!list_empty(&wq->maydays)) {
2854 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2855 struct pool_workqueue, mayday_node);
112202d9 2856 struct worker_pool *pool = pwq->pool;
e22bee78
TH
2857 struct work_struct *work, *n;
2858
2859 __set_current_state(TASK_RUNNING);
493a1724
TH
2860 list_del_init(&pwq->mayday_node);
2861
a9b8a985 2862 raw_spin_unlock_irq(&wq_mayday_lock);
e22bee78 2863
51697d39
LJ
2864 worker_attach_to_pool(rescuer, pool);
2865
a9b8a985 2866 raw_spin_lock_irq(&pool->lock);
e22bee78
TH
2867
2868 /*
2869 * Slurp in all works issued via this workqueue and
2870 * process'em.
2871 */
873eaca6 2872 WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
82607adc 2873 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
873eaca6
TH
2874 if (get_work_pwq(work) == pwq &&
2875 assign_work(work, rescuer, &n))
725e8ec5 2876 pwq->stats[PWQ_STAT_RESCUED]++;
82607adc 2877 }
e22bee78 2878
873eaca6 2879 if (!list_empty(&rescuer->scheduled)) {
008847f6
N
2880 process_scheduled_works(rescuer);
2881
2882 /*
2883 * The above execution of rescued work items could
2884 * have created more to rescue through
f97a4a1a 2885 * pwq_activate_first_inactive() or chained
008847f6
N
2886 * queueing. Let's put @pwq back on mayday list so
2887 * that such back-to-back work items, which may be
2888 * being used to relieve memory pressure, don't
2889 * incur MAYDAY_INTERVAL delay inbetween.
2890 */
4f3f4cf3 2891 if (pwq->nr_active && need_to_create_worker(pool)) {
a9b8a985 2892 raw_spin_lock(&wq_mayday_lock);
e66b39af
TH
2893 /*
2894 * Queue iff we aren't racing destruction
2895 * and somebody else hasn't queued it already.
2896 */
2897 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2898 get_pwq(pwq);
2899 list_add_tail(&pwq->mayday_node, &wq->maydays);
2900 }
a9b8a985 2901 raw_spin_unlock(&wq_mayday_lock);
008847f6
N
2902 }
2903 }
7576958a 2904
77668c8b
LJ
2905 /*
2906 * Put the reference grabbed by send_mayday(). @pool won't
13b1d625 2907 * go away while we're still attached to it.
77668c8b
LJ
2908 */
2909 put_pwq(pwq);
2910
7576958a 2911 /*
0219a352
TH
2912 * Leave this pool. Notify regular workers; otherwise, we end up
2913 * with 0 concurrency and stalling the execution.
7576958a 2914 */
0219a352 2915 kick_pool(pool);
7576958a 2916
a9b8a985 2917 raw_spin_unlock_irq(&pool->lock);
13b1d625 2918
a2d812a2 2919 worker_detach_from_pool(rescuer);
13b1d625 2920
a9b8a985 2921 raw_spin_lock_irq(&wq_mayday_lock);
e22bee78
TH
2922 }
2923
a9b8a985 2924 raw_spin_unlock_irq(&wq_mayday_lock);
493a1724 2925
4d595b86
LJ
2926 if (should_stop) {
2927 __set_current_state(TASK_RUNNING);
197f6acc 2928 set_pf_worker(false);
4d595b86
LJ
2929 return 0;
2930 }
2931
111c225a
TH
2932 /* rescuers should never participate in concurrency management */
2933 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
e22bee78
TH
2934 schedule();
2935 goto repeat;
1da177e4
LT
2936}
2937
fca839c0
TH
2938/**
2939 * check_flush_dependency - check for flush dependency sanity
2940 * @target_wq: workqueue being flushed
2941 * @target_work: work item being flushed (NULL for workqueue flushes)
2942 *
2943 * %current is trying to flush the whole @target_wq or @target_work on it.
2944 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2945 * reclaiming memory or running on a workqueue which doesn't have
2946 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2947 * a deadlock.
2948 */
2949static void check_flush_dependency(struct workqueue_struct *target_wq,
2950 struct work_struct *target_work)
2951{
2952 work_func_t target_func = target_work ? target_work->func : NULL;
2953 struct worker *worker;
2954
2955 if (target_wq->flags & WQ_MEM_RECLAIM)
2956 return;
2957
2958 worker = current_wq_worker();
2959
2960 WARN_ONCE(current->flags & PF_MEMALLOC,
d75f773c 2961 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
fca839c0 2962 current->pid, current->comm, target_wq->name, target_func);
23d11a58
TH
2963 WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2964 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
d75f773c 2965 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
fca839c0
TH
2966 worker->current_pwq->wq->name, worker->current_func,
2967 target_wq->name, target_func);
2968}
2969
fc2e4d70
ON
2970struct wq_barrier {
2971 struct work_struct work;
2972 struct completion done;
2607d7a6 2973 struct task_struct *task; /* purely informational */
fc2e4d70
ON
2974};
2975
2976static void wq_barrier_func(struct work_struct *work)
2977{
2978 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2979 complete(&barr->done);
2980}
2981
4690c4ab
TH
2982/**
2983 * insert_wq_barrier - insert a barrier work
112202d9 2984 * @pwq: pwq to insert barrier into
4690c4ab 2985 * @barr: wq_barrier to insert
affee4b2
TH
2986 * @target: target work to attach @barr to
2987 * @worker: worker currently executing @target, NULL if @target is not executing
4690c4ab 2988 *
affee4b2
TH
2989 * @barr is linked to @target such that @barr is completed only after
2990 * @target finishes execution. Please note that the ordering
2991 * guarantee is observed only with respect to @target and on the local
2992 * cpu.
2993 *
2994 * Currently, a queued barrier can't be canceled. This is because
2995 * try_to_grab_pending() can't determine whether the work to be
2996 * grabbed is at the head of the queue and thus can't clear LINKED
2997 * flag of the previous work while there must be a valid next work
2998 * after a work with LINKED flag set.
2999 *
3000 * Note that when @worker is non-NULL, @target may be modified
112202d9 3001 * underneath us, so we can't reliably determine pwq from @target.
4690c4ab
TH
3002 *
3003 * CONTEXT:
a9b8a985 3004 * raw_spin_lock_irq(pool->lock).
4690c4ab 3005 */
112202d9 3006static void insert_wq_barrier(struct pool_workqueue *pwq,
affee4b2
TH
3007 struct wq_barrier *barr,
3008 struct work_struct *target, struct worker *worker)
fc2e4d70 3009{
d812796e
LJ
3010 unsigned int work_flags = 0;
3011 unsigned int work_color;
affee4b2 3012 struct list_head *head;
affee4b2 3013
dc186ad7 3014 /*
d565ed63 3015 * debugobject calls are safe here even with pool->lock locked
dc186ad7
TG
3016 * as we know for sure that this will not trigger any of the
3017 * checks and call back into the fixup functions where we
3018 * might deadlock.
3019 */
ca1cab37 3020 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
22df02bb 3021 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
52fa5bc5 3022
fd1a5b04
BP
3023 init_completion_map(&barr->done, &target->lockdep_map);
3024
2607d7a6 3025 barr->task = current;
83c22520 3026
018f3a13
LJ
3027 /* The barrier work item does not participate in pwq->nr_active. */
3028 work_flags |= WORK_STRUCT_INACTIVE;
3029
affee4b2
TH
3030 /*
3031 * If @target is currently being executed, schedule the
3032 * barrier to the worker; otherwise, put it after @target.
3033 */
d812796e 3034 if (worker) {
affee4b2 3035 head = worker->scheduled.next;
d812796e
LJ
3036 work_color = worker->current_color;
3037 } else {
affee4b2
TH
3038 unsigned long *bits = work_data_bits(target);
3039
3040 head = target->entry.next;
3041 /* there can already be other linked works, inherit and set */
d21cece0 3042 work_flags |= *bits & WORK_STRUCT_LINKED;
d812796e 3043 work_color = get_work_color(*bits);
affee4b2
TH
3044 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
3045 }
3046
d812796e
LJ
3047 pwq->nr_in_flight[work_color]++;
3048 work_flags |= work_color_to_flags(work_color);
3049
d21cece0 3050 insert_work(pwq, &barr->work, head, work_flags);
fc2e4d70
ON
3051}
3052
73f53c4a 3053/**
112202d9 3054 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
73f53c4a
TH
3055 * @wq: workqueue being flushed
3056 * @flush_color: new flush color, < 0 for no-op
3057 * @work_color: new work color, < 0 for no-op
3058 *
112202d9 3059 * Prepare pwqs for workqueue flushing.
73f53c4a 3060 *
112202d9
TH
3061 * If @flush_color is non-negative, flush_color on all pwqs should be
3062 * -1. If no pwq has in-flight commands at the specified color, all
3063 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
3064 * has in flight commands, its pwq->flush_color is set to
3065 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
73f53c4a
TH
3066 * wakeup logic is armed and %true is returned.
3067 *
3068 * The caller should have initialized @wq->first_flusher prior to
3069 * calling this function with non-negative @flush_color. If
3070 * @flush_color is negative, no flush color update is done and %false
3071 * is returned.
3072 *
112202d9 3073 * If @work_color is non-negative, all pwqs should have the same
73f53c4a
TH
3074 * work_color which is previous to @work_color and all will be
3075 * advanced to @work_color.
3076 *
3077 * CONTEXT:
3c25a55d 3078 * mutex_lock(wq->mutex).
73f53c4a 3079 *
d185af30 3080 * Return:
73f53c4a
TH
3081 * %true if @flush_color >= 0 and there's something to flush. %false
3082 * otherwise.
3083 */
112202d9 3084static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
73f53c4a 3085 int flush_color, int work_color)
1da177e4 3086{
73f53c4a 3087 bool wait = false;
49e3cf44 3088 struct pool_workqueue *pwq;
1da177e4 3089
73f53c4a 3090 if (flush_color >= 0) {
6183c009 3091 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
112202d9 3092 atomic_set(&wq->nr_pwqs_to_flush, 1);
1da177e4 3093 }
2355b70f 3094
49e3cf44 3095 for_each_pwq(pwq, wq) {
112202d9 3096 struct worker_pool *pool = pwq->pool;
fc2e4d70 3097
a9b8a985 3098 raw_spin_lock_irq(&pool->lock);
83c22520 3099
73f53c4a 3100 if (flush_color >= 0) {
6183c009 3101 WARN_ON_ONCE(pwq->flush_color != -1);
fc2e4d70 3102
112202d9
TH
3103 if (pwq->nr_in_flight[flush_color]) {
3104 pwq->flush_color = flush_color;
3105 atomic_inc(&wq->nr_pwqs_to_flush);
73f53c4a
TH
3106 wait = true;
3107 }
3108 }
1da177e4 3109
73f53c4a 3110 if (work_color >= 0) {
6183c009 3111 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
112202d9 3112 pwq->work_color = work_color;
73f53c4a 3113 }
1da177e4 3114
a9b8a985 3115 raw_spin_unlock_irq(&pool->lock);
1da177e4 3116 }
2355b70f 3117
112202d9 3118 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
73f53c4a 3119 complete(&wq->first_flusher->done);
14441960 3120
73f53c4a 3121 return wait;
1da177e4
LT
3122}
3123
0fcb78c2 3124/**
c4f135d6 3125 * __flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 3126 * @wq: workqueue to flush
1da177e4 3127 *
c5aa87bb
TH
3128 * This function sleeps until all work items which were queued on entry
3129 * have finished execution, but it is not livelocked by new incoming ones.
1da177e4 3130 */
c4f135d6 3131void __flush_workqueue(struct workqueue_struct *wq)
1da177e4 3132{
73f53c4a
TH
3133 struct wq_flusher this_flusher = {
3134 .list = LIST_HEAD_INIT(this_flusher.list),
3135 .flush_color = -1,
fd1a5b04 3136 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
73f53c4a
TH
3137 };
3138 int next_color;
1da177e4 3139
3347fa09
TH
3140 if (WARN_ON(!wq_online))
3141 return;
3142
87915adc
JB
3143 lock_map_acquire(&wq->lockdep_map);
3144 lock_map_release(&wq->lockdep_map);
3145
3c25a55d 3146 mutex_lock(&wq->mutex);
73f53c4a
TH
3147
3148 /*
3149 * Start-to-wait phase
3150 */
3151 next_color = work_next_color(wq->work_color);
3152
3153 if (next_color != wq->flush_color) {
3154 /*
3155 * Color space is not full. The current work_color
3156 * becomes our flush_color and work_color is advanced
3157 * by one.
3158 */
6183c009 3159 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
73f53c4a
TH
3160 this_flusher.flush_color = wq->work_color;
3161 wq->work_color = next_color;
3162
3163 if (!wq->first_flusher) {
3164 /* no flush in progress, become the first flusher */
6183c009 3165 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
73f53c4a
TH
3166
3167 wq->first_flusher = &this_flusher;
3168
112202d9 3169 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
73f53c4a
TH
3170 wq->work_color)) {
3171 /* nothing to flush, done */
3172 wq->flush_color = next_color;
3173 wq->first_flusher = NULL;
3174 goto out_unlock;
3175 }
3176 } else {
3177 /* wait in queue */
6183c009 3178 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
73f53c4a 3179 list_add_tail(&this_flusher.list, &wq->flusher_queue);
112202d9 3180 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
73f53c4a
TH
3181 }
3182 } else {
3183 /*
3184 * Oops, color space is full, wait on overflow queue.
3185 * The next flush completion will assign us
3186 * flush_color and transfer to flusher_queue.
3187 */
3188 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
3189 }
3190
fca839c0
TH
3191 check_flush_dependency(wq, NULL);
3192
3c25a55d 3193 mutex_unlock(&wq->mutex);
73f53c4a
TH
3194
3195 wait_for_completion(&this_flusher.done);
3196
3197 /*
3198 * Wake-up-and-cascade phase
3199 *
3200 * First flushers are responsible for cascading flushes and
3201 * handling overflow. Non-first flushers can simply return.
3202 */
00d5d15b 3203 if (READ_ONCE(wq->first_flusher) != &this_flusher)
73f53c4a
TH
3204 return;
3205
3c25a55d 3206 mutex_lock(&wq->mutex);
73f53c4a 3207
4ce48b37
TH
3208 /* we might have raced, check again with mutex held */
3209 if (wq->first_flusher != &this_flusher)
3210 goto out_unlock;
3211
00d5d15b 3212 WRITE_ONCE(wq->first_flusher, NULL);
73f53c4a 3213
6183c009
TH
3214 WARN_ON_ONCE(!list_empty(&this_flusher.list));
3215 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
73f53c4a
TH
3216
3217 while (true) {
3218 struct wq_flusher *next, *tmp;
3219
3220 /* complete all the flushers sharing the current flush color */
3221 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
3222 if (next->flush_color != wq->flush_color)
3223 break;
3224 list_del_init(&next->list);
3225 complete(&next->done);
3226 }
3227
6183c009
TH
3228 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
3229 wq->flush_color != work_next_color(wq->work_color));
73f53c4a
TH
3230
3231 /* this flush_color is finished, advance by one */
3232 wq->flush_color = work_next_color(wq->flush_color);
3233
3234 /* one color has been freed, handle overflow queue */
3235 if (!list_empty(&wq->flusher_overflow)) {
3236 /*
3237 * Assign the same color to all overflowed
3238 * flushers, advance work_color and append to
3239 * flusher_queue. This is the start-to-wait
3240 * phase for these overflowed flushers.
3241 */
3242 list_for_each_entry(tmp, &wq->flusher_overflow, list)
3243 tmp->flush_color = wq->work_color;
3244
3245 wq->work_color = work_next_color(wq->work_color);
3246
3247 list_splice_tail_init(&wq->flusher_overflow,
3248 &wq->flusher_queue);
112202d9 3249 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
73f53c4a
TH
3250 }
3251
3252 if (list_empty(&wq->flusher_queue)) {
6183c009 3253 WARN_ON_ONCE(wq->flush_color != wq->work_color);
73f53c4a
TH
3254 break;
3255 }
3256
3257 /*
3258 * Need to flush more colors. Make the next flusher
112202d9 3259 * the new first flusher and arm pwqs.
73f53c4a 3260 */
6183c009
TH
3261 WARN_ON_ONCE(wq->flush_color == wq->work_color);
3262 WARN_ON_ONCE(wq->flush_color != next->flush_color);
73f53c4a
TH
3263
3264 list_del_init(&next->list);
3265 wq->first_flusher = next;
3266
112202d9 3267 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
73f53c4a
TH
3268 break;
3269
3270 /*
3271 * Meh... this color is already done, clear first
3272 * flusher and repeat cascading.
3273 */
3274 wq->first_flusher = NULL;
3275 }
3276
3277out_unlock:
3c25a55d 3278 mutex_unlock(&wq->mutex);
1da177e4 3279}
c4f135d6 3280EXPORT_SYMBOL(__flush_workqueue);
1da177e4 3281
9c5a2ba7
TH
3282/**
3283 * drain_workqueue - drain a workqueue
3284 * @wq: workqueue to drain
3285 *
3286 * Wait until the workqueue becomes empty. While draining is in progress,
3287 * only chain queueing is allowed. IOW, only currently pending or running
3288 * work items on @wq can queue further work items on it. @wq is flushed
b749b1b6 3289 * repeatedly until it becomes empty. The number of flushing is determined
9c5a2ba7
TH
3290 * by the depth of chaining and should be relatively short. Whine if it
3291 * takes too long.
3292 */
3293void drain_workqueue(struct workqueue_struct *wq)
3294{
3295 unsigned int flush_cnt = 0;
49e3cf44 3296 struct pool_workqueue *pwq;
9c5a2ba7
TH
3297
3298 /*
3299 * __queue_work() needs to test whether there are drainers, is much
3300 * hotter than drain_workqueue() and already looks at @wq->flags.
618b01eb 3301 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
9c5a2ba7 3302 */
87fc741e 3303 mutex_lock(&wq->mutex);
9c5a2ba7 3304 if (!wq->nr_drainers++)
618b01eb 3305 wq->flags |= __WQ_DRAINING;
87fc741e 3306 mutex_unlock(&wq->mutex);
9c5a2ba7 3307reflush:
c4f135d6 3308 __flush_workqueue(wq);
9c5a2ba7 3309
b09f4fd3 3310 mutex_lock(&wq->mutex);
76af4d93 3311
49e3cf44 3312 for_each_pwq(pwq, wq) {
fa2563e4 3313 bool drained;
9c5a2ba7 3314
a9b8a985 3315 raw_spin_lock_irq(&pwq->pool->lock);
f97a4a1a 3316 drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
a9b8a985 3317 raw_spin_unlock_irq(&pwq->pool->lock);
fa2563e4
TT
3318
3319 if (drained)
9c5a2ba7
TH
3320 continue;
3321
3322 if (++flush_cnt == 10 ||
3323 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
e9ad2eb3
SZ
3324 pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
3325 wq->name, __func__, flush_cnt);
76af4d93 3326
b09f4fd3 3327 mutex_unlock(&wq->mutex);
9c5a2ba7
TH
3328 goto reflush;
3329 }
3330
9c5a2ba7 3331 if (!--wq->nr_drainers)
618b01eb 3332 wq->flags &= ~__WQ_DRAINING;
87fc741e 3333 mutex_unlock(&wq->mutex);
9c5a2ba7
TH
3334}
3335EXPORT_SYMBOL_GPL(drain_workqueue);
3336
d6e89786
JB
3337static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3338 bool from_cancel)
db700897 3339{
affee4b2 3340 struct worker *worker = NULL;
c9e7cf27 3341 struct worker_pool *pool;
112202d9 3342 struct pool_workqueue *pwq;
db700897
ON
3343
3344 might_sleep();
fa1b54e6 3345
24acfb71 3346 rcu_read_lock();
c9e7cf27 3347 pool = get_work_pool(work);
fa1b54e6 3348 if (!pool) {
24acfb71 3349 rcu_read_unlock();
baf59022 3350 return false;
fa1b54e6 3351 }
db700897 3352
a9b8a985 3353 raw_spin_lock_irq(&pool->lock);
0b3dae68 3354 /* see the comment in try_to_grab_pending() with the same code */
112202d9
TH
3355 pwq = get_work_pwq(work);
3356 if (pwq) {
3357 if (unlikely(pwq->pool != pool))
4690c4ab 3358 goto already_gone;
606a5020 3359 } else {
c9e7cf27 3360 worker = find_worker_executing_work(pool, work);
affee4b2 3361 if (!worker)
4690c4ab 3362 goto already_gone;
112202d9 3363 pwq = worker->current_pwq;
606a5020 3364 }
db700897 3365
fca839c0
TH
3366 check_flush_dependency(pwq->wq, work);
3367
112202d9 3368 insert_wq_barrier(pwq, barr, work, worker);
a9b8a985 3369 raw_spin_unlock_irq(&pool->lock);
7a22ad75 3370
e159489b 3371 /*
a1d14934
PZ
3372 * Force a lock recursion deadlock when using flush_work() inside a
3373 * single-threaded or rescuer equipped workqueue.
3374 *
3375 * For single threaded workqueues the deadlock happens when the work
3376 * is after the work issuing the flush_work(). For rescuer equipped
3377 * workqueues the deadlock happens when the rescuer stalls, blocking
3378 * forward progress.
e159489b 3379 */
d6e89786
JB
3380 if (!from_cancel &&
3381 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
112202d9 3382 lock_map_acquire(&pwq->wq->lockdep_map);
a1d14934
PZ
3383 lock_map_release(&pwq->wq->lockdep_map);
3384 }
24acfb71 3385 rcu_read_unlock();
401a8d04 3386 return true;
4690c4ab 3387already_gone:
a9b8a985 3388 raw_spin_unlock_irq(&pool->lock);
24acfb71 3389 rcu_read_unlock();
401a8d04 3390 return false;
db700897 3391}
baf59022 3392
d6e89786
JB
3393static bool __flush_work(struct work_struct *work, bool from_cancel)
3394{
3395 struct wq_barrier barr;
3396
3397 if (WARN_ON(!wq_online))
3398 return false;
3399
4d43d395
TH
3400 if (WARN_ON(!work->func))
3401 return false;
3402
c0feea59
TH
3403 lock_map_acquire(&work->lockdep_map);
3404 lock_map_release(&work->lockdep_map);
87915adc 3405
d6e89786
JB
3406 if (start_flush_work(work, &barr, from_cancel)) {
3407 wait_for_completion(&barr.done);
3408 destroy_work_on_stack(&barr.work);
3409 return true;
3410 } else {
3411 return false;
3412 }
3413}
3414
baf59022
TH
3415/**
3416 * flush_work - wait for a work to finish executing the last queueing instance
3417 * @work: the work to flush
3418 *
606a5020
TH
3419 * Wait until @work has finished execution. @work is guaranteed to be idle
3420 * on return if it hasn't been requeued since flush started.
baf59022 3421 *
d185af30 3422 * Return:
baf59022
TH
3423 * %true if flush_work() waited for the work to finish execution,
3424 * %false if it was already idle.
3425 */
3426bool flush_work(struct work_struct *work)
3427{
d6e89786 3428 return __flush_work(work, false);
6e84d644 3429}
606a5020 3430EXPORT_SYMBOL_GPL(flush_work);
6e84d644 3431
8603e1b3 3432struct cwt_wait {
ac6424b9 3433 wait_queue_entry_t wait;
8603e1b3
TH
3434 struct work_struct *work;
3435};
3436
ac6424b9 3437static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
8603e1b3
TH
3438{
3439 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3440
3441 if (cwait->work != key)
3442 return 0;
3443 return autoremove_wake_function(wait, mode, sync, key);
3444}
3445
36e227d2 3446static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
1f1f642e 3447{
8603e1b3 3448 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
bbb68dfa 3449 unsigned long flags;
1f1f642e
ON
3450 int ret;
3451
3452 do {
bbb68dfa
TH
3453 ret = try_to_grab_pending(work, is_dwork, &flags);
3454 /*
8603e1b3
TH
3455 * If someone else is already canceling, wait for it to
3456 * finish. flush_work() doesn't work for PREEMPT_NONE
3457 * because we may get scheduled between @work's completion
3458 * and the other canceling task resuming and clearing
3459 * CANCELING - flush_work() will return false immediately
3460 * as @work is no longer busy, try_to_grab_pending() will
3461 * return -ENOENT as @work is still being canceled and the
3462 * other canceling task won't be able to clear CANCELING as
3463 * we're hogging the CPU.
3464 *
3465 * Let's wait for completion using a waitqueue. As this
3466 * may lead to the thundering herd problem, use a custom
3467 * wake function which matches @work along with exclusive
3468 * wait and wakeup.
bbb68dfa 3469 */
8603e1b3
TH
3470 if (unlikely(ret == -ENOENT)) {
3471 struct cwt_wait cwait;
3472
3473 init_wait(&cwait.wait);
3474 cwait.wait.func = cwt_wakefn;
3475 cwait.work = work;
3476
3477 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3478 TASK_UNINTERRUPTIBLE);
3479 if (work_is_canceling(work))
3480 schedule();
3481 finish_wait(&cancel_waitq, &cwait.wait);
3482 }
1f1f642e
ON
3483 } while (unlikely(ret < 0));
3484
bbb68dfa
TH
3485 /* tell other tasks trying to grab @work to back off */
3486 mark_work_canceling(work);
3487 local_irq_restore(flags);
3488
3347fa09
TH
3489 /*
3490 * This allows canceling during early boot. We know that @work
3491 * isn't executing.
3492 */
3493 if (wq_online)
d6e89786 3494 __flush_work(work, true);
3347fa09 3495
7a22ad75 3496 clear_work_data(work);
8603e1b3
TH
3497
3498 /*
3499 * Paired with prepare_to_wait() above so that either
3500 * waitqueue_active() is visible here or !work_is_canceling() is
3501 * visible there.
3502 */
3503 smp_mb();
3504 if (waitqueue_active(&cancel_waitq))
3505 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3506
1f1f642e
ON
3507 return ret;
3508}
3509
6e84d644 3510/**
401a8d04
TH
3511 * cancel_work_sync - cancel a work and wait for it to finish
3512 * @work: the work to cancel
6e84d644 3513 *
401a8d04
TH
3514 * Cancel @work and wait for its execution to finish. This function
3515 * can be used even if the work re-queues itself or migrates to
3516 * another workqueue. On return from this function, @work is
3517 * guaranteed to be not pending or executing on any CPU.
1f1f642e 3518 *
401a8d04
TH
3519 * cancel_work_sync(&delayed_work->work) must not be used for
3520 * delayed_work's. Use cancel_delayed_work_sync() instead.
6e84d644 3521 *
401a8d04 3522 * The caller must ensure that the workqueue on which @work was last
6e84d644 3523 * queued can't be destroyed before this function returns.
401a8d04 3524 *
d185af30 3525 * Return:
401a8d04 3526 * %true if @work was pending, %false otherwise.
6e84d644 3527 */
401a8d04 3528bool cancel_work_sync(struct work_struct *work)
6e84d644 3529{
36e227d2 3530 return __cancel_work_timer(work, false);
b89deed3 3531}
28e53bdd 3532EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed3 3533
6e84d644 3534/**
401a8d04
TH
3535 * flush_delayed_work - wait for a dwork to finish executing the last queueing
3536 * @dwork: the delayed work to flush
6e84d644 3537 *
401a8d04
TH
3538 * Delayed timer is cancelled and the pending work is queued for
3539 * immediate execution. Like flush_work(), this function only
3540 * considers the last queueing instance of @dwork.
1f1f642e 3541 *
d185af30 3542 * Return:
401a8d04
TH
3543 * %true if flush_work() waited for the work to finish execution,
3544 * %false if it was already idle.
6e84d644 3545 */
401a8d04
TH
3546bool flush_delayed_work(struct delayed_work *dwork)
3547{
8930caba 3548 local_irq_disable();
401a8d04 3549 if (del_timer_sync(&dwork->timer))
60c057bc 3550 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
8930caba 3551 local_irq_enable();
401a8d04
TH
3552 return flush_work(&dwork->work);
3553}
3554EXPORT_SYMBOL(flush_delayed_work);
3555
05f0fe6b
TH
3556/**
3557 * flush_rcu_work - wait for a rwork to finish executing the last queueing
3558 * @rwork: the rcu work to flush
3559 *
3560 * Return:
3561 * %true if flush_rcu_work() waited for the work to finish execution,
3562 * %false if it was already idle.
3563 */
3564bool flush_rcu_work(struct rcu_work *rwork)
3565{
3566 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3567 rcu_barrier();
3568 flush_work(&rwork->work);
3569 return true;
3570 } else {
3571 return flush_work(&rwork->work);
3572 }
3573}
3574EXPORT_SYMBOL(flush_rcu_work);
3575
f72b8792
JA
3576static bool __cancel_work(struct work_struct *work, bool is_dwork)
3577{
3578 unsigned long flags;
3579 int ret;
3580
3581 do {
3582 ret = try_to_grab_pending(work, is_dwork, &flags);
3583 } while (unlikely(ret == -EAGAIN));
3584
3585 if (unlikely(ret < 0))
3586 return false;
3587
3588 set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3589 local_irq_restore(flags);
3590 return ret;
3591}
3592
73b4b532
AG
3593/*
3594 * See cancel_delayed_work()
3595 */
3596bool cancel_work(struct work_struct *work)
3597{
3598 return __cancel_work(work, false);
3599}
3600EXPORT_SYMBOL(cancel_work);
3601
09383498 3602/**
57b30ae7
TH
3603 * cancel_delayed_work - cancel a delayed work
3604 * @dwork: delayed_work to cancel
09383498 3605 *
d185af30
YB
3606 * Kill off a pending delayed_work.
3607 *
3608 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3609 * pending.
3610 *
3611 * Note:
3612 * The work callback function may still be running on return, unless
3613 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
3614 * use cancel_delayed_work_sync() to wait on it.
09383498 3615 *
57b30ae7 3616 * This function is safe to call from any context including IRQ handler.
09383498 3617 */
57b30ae7 3618bool cancel_delayed_work(struct delayed_work *dwork)
09383498 3619{
f72b8792 3620 return __cancel_work(&dwork->work, true);
09383498 3621}
57b30ae7 3622EXPORT_SYMBOL(cancel_delayed_work);
09383498 3623
401a8d04
TH
3624/**
3625 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3626 * @dwork: the delayed work cancel
3627 *
3628 * This is cancel_work_sync() for delayed works.
3629 *
d185af30 3630 * Return:
401a8d04
TH
3631 * %true if @dwork was pending, %false otherwise.
3632 */
3633bool cancel_delayed_work_sync(struct delayed_work *dwork)
6e84d644 3634{
36e227d2 3635 return __cancel_work_timer(&dwork->work, true);
6e84d644 3636}
f5a421a4 3637EXPORT_SYMBOL(cancel_delayed_work_sync);
1da177e4 3638
b6136773 3639/**
31ddd871 3640 * schedule_on_each_cpu - execute a function synchronously on each online CPU
b6136773 3641 * @func: the function to call
b6136773 3642 *
31ddd871
TH
3643 * schedule_on_each_cpu() executes @func on each online CPU using the
3644 * system workqueue and blocks until all CPUs have completed.
b6136773 3645 * schedule_on_each_cpu() is very slow.
31ddd871 3646 *
d185af30 3647 * Return:
31ddd871 3648 * 0 on success, -errno on failure.
b6136773 3649 */
65f27f38 3650int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
3651{
3652 int cpu;
38f51568 3653 struct work_struct __percpu *works;
15316ba8 3654
b6136773
AM
3655 works = alloc_percpu(struct work_struct);
3656 if (!works)
15316ba8 3657 return -ENOMEM;
b6136773 3658
ffd8bea8 3659 cpus_read_lock();
93981800 3660
15316ba8 3661 for_each_online_cpu(cpu) {
9bfb1839
IM
3662 struct work_struct *work = per_cpu_ptr(works, cpu);
3663
3664 INIT_WORK(work, func);
b71ab8c2 3665 schedule_work_on(cpu, work);
65a64464 3666 }
93981800
TH
3667
3668 for_each_online_cpu(cpu)
3669 flush_work(per_cpu_ptr(works, cpu));
3670
ffd8bea8 3671 cpus_read_unlock();
b6136773 3672 free_percpu(works);
15316ba8
CL
3673 return 0;
3674}
3675
1fa44eca
JB
3676/**
3677 * execute_in_process_context - reliably execute the routine with user context
3678 * @fn: the function to execute
1fa44eca
JB
3679 * @ew: guaranteed storage for the execute work structure (must
3680 * be available when the work executes)
3681 *
3682 * Executes the function immediately if process context is available,
3683 * otherwise schedules the function for delayed execution.
3684 *
d185af30 3685 * Return: 0 - function was executed
1fa44eca
JB
3686 * 1 - function was scheduled for execution
3687 */
65f27f38 3688int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
3689{
3690 if (!in_interrupt()) {
65f27f38 3691 fn(&ew->work);
1fa44eca
JB
3692 return 0;
3693 }
3694
65f27f38 3695 INIT_WORK(&ew->work, fn);
1fa44eca
JB
3696 schedule_work(&ew->work);
3697
3698 return 1;
3699}
3700EXPORT_SYMBOL_GPL(execute_in_process_context);
3701
6ba94429
FW
3702/**
3703 * free_workqueue_attrs - free a workqueue_attrs
3704 * @attrs: workqueue_attrs to free
226223ab 3705 *
6ba94429 3706 * Undo alloc_workqueue_attrs().
226223ab 3707 */
513c98d0 3708void free_workqueue_attrs(struct workqueue_attrs *attrs)
226223ab 3709{
6ba94429
FW
3710 if (attrs) {
3711 free_cpumask_var(attrs->cpumask);
9546b29e 3712 free_cpumask_var(attrs->__pod_cpumask);
6ba94429
FW
3713 kfree(attrs);
3714 }
226223ab
TH
3715}
3716
6ba94429
FW
3717/**
3718 * alloc_workqueue_attrs - allocate a workqueue_attrs
6ba94429
FW
3719 *
3720 * Allocate a new workqueue_attrs, initialize with default settings and
3721 * return it.
3722 *
3723 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3724 */
513c98d0 3725struct workqueue_attrs *alloc_workqueue_attrs(void)
226223ab 3726{
6ba94429 3727 struct workqueue_attrs *attrs;
226223ab 3728
be69d00d 3729 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
6ba94429
FW
3730 if (!attrs)
3731 goto fail;
be69d00d 3732 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
6ba94429 3733 goto fail;
9546b29e
TH
3734 if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL))
3735 goto fail;
6ba94429
FW
3736
3737 cpumask_copy(attrs->cpumask, cpu_possible_mask);
523a301e 3738 attrs->affn_scope = WQ_AFFN_DFL;
6ba94429
FW
3739 return attrs;
3740fail:
3741 free_workqueue_attrs(attrs);
3742 return NULL;
226223ab
TH
3743}
3744
6ba94429
FW
3745static void copy_workqueue_attrs(struct workqueue_attrs *to,
3746 const struct workqueue_attrs *from)
226223ab 3747{
6ba94429
FW
3748 to->nice = from->nice;
3749 cpumask_copy(to->cpumask, from->cpumask);
9546b29e 3750 cpumask_copy(to->__pod_cpumask, from->__pod_cpumask);
8639eceb 3751 to->affn_strict = from->affn_strict;
84193c07 3752
6ba94429 3753 /*
84193c07
TH
3754 * Unlike hash and equality test, copying shouldn't ignore wq-only
3755 * fields as copying is used for both pool and wq attrs. Instead,
3756 * get_unbound_pool() explicitly clears the fields.
6ba94429 3757 */
84193c07 3758 to->affn_scope = from->affn_scope;
af73f5c9 3759 to->ordered = from->ordered;
226223ab
TH
3760}
3761
5de7a03c
TH
3762/*
3763 * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the
3764 * comments in 'struct workqueue_attrs' definition.
3765 */
3766static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs)
3767{
84193c07 3768 attrs->affn_scope = WQ_AFFN_NR_TYPES;
5de7a03c
TH
3769 attrs->ordered = false;
3770}
3771
6ba94429
FW
3772/* hash value of the content of @attr */
3773static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
226223ab 3774{
6ba94429 3775 u32 hash = 0;
226223ab 3776
6ba94429
FW
3777 hash = jhash_1word(attrs->nice, hash);
3778 hash = jhash(cpumask_bits(attrs->cpumask),
3779 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
9546b29e
TH
3780 hash = jhash(cpumask_bits(attrs->__pod_cpumask),
3781 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
8639eceb 3782 hash = jhash_1word(attrs->affn_strict, hash);
6ba94429 3783 return hash;
226223ab 3784}
226223ab 3785
6ba94429
FW
3786/* content equality test */
3787static bool wqattrs_equal(const struct workqueue_attrs *a,
3788 const struct workqueue_attrs *b)
226223ab 3789{
6ba94429
FW
3790 if (a->nice != b->nice)
3791 return false;
3792 if (!cpumask_equal(a->cpumask, b->cpumask))
3793 return false;
9546b29e
TH
3794 if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
3795 return false;
8639eceb
TH
3796 if (a->affn_strict != b->affn_strict)
3797 return false;
6ba94429 3798 return true;
226223ab
TH
3799}
3800
0f36ee24
TH
3801/* Update @attrs with actually available CPUs */
3802static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs,
3803 const cpumask_t *unbound_cpumask)
3804{
3805 /*
3806 * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If
3807 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to
3808 * @unbound_cpumask.
3809 */
3810 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask);
3811 if (unlikely(cpumask_empty(attrs->cpumask)))
3812 cpumask_copy(attrs->cpumask, unbound_cpumask);
3813}
3814
84193c07
TH
3815/* find wq_pod_type to use for @attrs */
3816static const struct wq_pod_type *
3817wqattrs_pod_type(const struct workqueue_attrs *attrs)
3818{
523a301e
TH
3819 enum wq_affn_scope scope;
3820 struct wq_pod_type *pt;
3821
3822 /* to synchronize access to wq_affn_dfl */
3823 lockdep_assert_held(&wq_pool_mutex);
3824
3825 if (attrs->affn_scope == WQ_AFFN_DFL)
3826 scope = wq_affn_dfl;
3827 else
3828 scope = attrs->affn_scope;
3829
3830 pt = &wq_pod_types[scope];
84193c07
TH
3831
3832 if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) &&
3833 likely(pt->nr_pods))
3834 return pt;
3835
3836 /*
3837 * Before workqueue_init_topology(), only SYSTEM is available which is
3838 * initialized in workqueue_init_early().
3839 */
3840 pt = &wq_pod_types[WQ_AFFN_SYSTEM];
3841 BUG_ON(!pt->nr_pods);
3842 return pt;
3843}
3844
6ba94429
FW
3845/**
3846 * init_worker_pool - initialize a newly zalloc'd worker_pool
3847 * @pool: worker_pool to initialize
3848 *
402dd89d 3849 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
6ba94429
FW
3850 *
3851 * Return: 0 on success, -errno on failure. Even on failure, all fields
3852 * inside @pool proper are initialized and put_unbound_pool() can be called
3853 * on @pool safely to release it.
3854 */
3855static int init_worker_pool(struct worker_pool *pool)
226223ab 3856{
a9b8a985 3857 raw_spin_lock_init(&pool->lock);
6ba94429
FW
3858 pool->id = -1;
3859 pool->cpu = -1;
3860 pool->node = NUMA_NO_NODE;
3861 pool->flags |= POOL_DISASSOCIATED;
82607adc 3862 pool->watchdog_ts = jiffies;
6ba94429
FW
3863 INIT_LIST_HEAD(&pool->worklist);
3864 INIT_LIST_HEAD(&pool->idle_list);
3865 hash_init(pool->busy_hash);
226223ab 3866
32a6c723 3867 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3f959aa3 3868 INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
226223ab 3869
32a6c723 3870 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
226223ab 3871
6ba94429 3872 INIT_LIST_HEAD(&pool->workers);
e02b9312 3873 INIT_LIST_HEAD(&pool->dying_workers);
226223ab 3874
6ba94429
FW
3875 ida_init(&pool->worker_ida);
3876 INIT_HLIST_NODE(&pool->hash_node);
3877 pool->refcnt = 1;
226223ab 3878
6ba94429 3879 /* shouldn't fail above this point */
be69d00d 3880 pool->attrs = alloc_workqueue_attrs();
6ba94429
FW
3881 if (!pool->attrs)
3882 return -ENOMEM;
5de7a03c
TH
3883
3884 wqattrs_clear_for_pool(pool->attrs);
3885
6ba94429 3886 return 0;
226223ab
TH
3887}
3888
669de8bd
BVA
3889#ifdef CONFIG_LOCKDEP
3890static void wq_init_lockdep(struct workqueue_struct *wq)
3891{
3892 char *lock_name;
3893
3894 lockdep_register_key(&wq->key);
3895 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3896 if (!lock_name)
3897 lock_name = wq->name;
69a106c0
QC
3898
3899 wq->lock_name = lock_name;
669de8bd
BVA
3900 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3901}
3902
3903static void wq_unregister_lockdep(struct workqueue_struct *wq)
3904{
3905 lockdep_unregister_key(&wq->key);
3906}
3907
3908static void wq_free_lockdep(struct workqueue_struct *wq)
3909{
3910 if (wq->lock_name != wq->name)
3911 kfree(wq->lock_name);
3912}
3913#else
3914static void wq_init_lockdep(struct workqueue_struct *wq)
3915{
3916}
3917
3918static void wq_unregister_lockdep(struct workqueue_struct *wq)
3919{
3920}
3921
3922static void wq_free_lockdep(struct workqueue_struct *wq)
3923{
3924}
3925#endif
3926
6ba94429 3927static void rcu_free_wq(struct rcu_head *rcu)
226223ab 3928{
6ba94429
FW
3929 struct workqueue_struct *wq =
3930 container_of(rcu, struct workqueue_struct, rcu);
226223ab 3931
669de8bd 3932 wq_free_lockdep(wq);
636b927e
TH
3933 free_percpu(wq->cpu_pwq);
3934 free_workqueue_attrs(wq->unbound_attrs);
6ba94429 3935 kfree(wq);
226223ab
TH
3936}
3937
6ba94429 3938static void rcu_free_pool(struct rcu_head *rcu)
226223ab 3939{
6ba94429 3940 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
226223ab 3941
6ba94429
FW
3942 ida_destroy(&pool->worker_ida);
3943 free_workqueue_attrs(pool->attrs);
3944 kfree(pool);
226223ab
TH
3945}
3946
6ba94429
FW
3947/**
3948 * put_unbound_pool - put a worker_pool
3949 * @pool: worker_pool to put
3950 *
24acfb71 3951 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
6ba94429
FW
3952 * safe manner. get_unbound_pool() calls this function on its failure path
3953 * and this function should be able to release pools which went through,
3954 * successfully or not, init_worker_pool().
3955 *
3956 * Should be called with wq_pool_mutex held.
3957 */
3958static void put_unbound_pool(struct worker_pool *pool)
226223ab 3959{
6ba94429
FW
3960 DECLARE_COMPLETION_ONSTACK(detach_completion);
3961 struct worker *worker;
9680540c 3962 LIST_HEAD(cull_list);
e02b9312 3963
6ba94429 3964 lockdep_assert_held(&wq_pool_mutex);
226223ab 3965
6ba94429
FW
3966 if (--pool->refcnt)
3967 return;
226223ab 3968
6ba94429
FW
3969 /* sanity checks */
3970 if (WARN_ON(!(pool->cpu < 0)) ||
3971 WARN_ON(!list_empty(&pool->worklist)))
3972 return;
226223ab 3973
6ba94429
FW
3974 /* release id and unhash */
3975 if (pool->id >= 0)
3976 idr_remove(&worker_pool_idr, pool->id);
3977 hash_del(&pool->hash_node);
d55262c4 3978
6ba94429 3979 /*
692b4825
TH
3980 * Become the manager and destroy all workers. This prevents
3981 * @pool's workers from blocking on attach_mutex. We're the last
3982 * manager and @pool gets freed with the flag set.
9ab03be4
VS
3983 *
3984 * Having a concurrent manager is quite unlikely to happen as we can
3985 * only get here with
3986 * pwq->refcnt == pool->refcnt == 0
3987 * which implies no work queued to the pool, which implies no worker can
3988 * become the manager. However a worker could have taken the role of
3989 * manager before the refcnts dropped to 0, since maybe_create_worker()
3990 * drops pool->lock
6ba94429 3991 */
9ab03be4
VS
3992 while (true) {
3993 rcuwait_wait_event(&manager_wait,
3994 !(pool->flags & POOL_MANAGER_ACTIVE),
3995 TASK_UNINTERRUPTIBLE);
e02b9312
VS
3996
3997 mutex_lock(&wq_pool_attach_mutex);
9ab03be4
VS
3998 raw_spin_lock_irq(&pool->lock);
3999 if (!(pool->flags & POOL_MANAGER_ACTIVE)) {
4000 pool->flags |= POOL_MANAGER_ACTIVE;
4001 break;
4002 }
4003 raw_spin_unlock_irq(&pool->lock);
e02b9312 4004 mutex_unlock(&wq_pool_attach_mutex);
9ab03be4 4005 }
692b4825 4006
6ba94429 4007 while ((worker = first_idle_worker(pool)))
e02b9312 4008 set_worker_dying(worker, &cull_list);
6ba94429 4009 WARN_ON(pool->nr_workers || pool->nr_idle);
a9b8a985 4010 raw_spin_unlock_irq(&pool->lock);
d55262c4 4011
e02b9312
VS
4012 wake_dying_workers(&cull_list);
4013
4014 if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
6ba94429 4015 pool->detach_completion = &detach_completion;
1258fae7 4016 mutex_unlock(&wq_pool_attach_mutex);
226223ab 4017
6ba94429
FW
4018 if (pool->detach_completion)
4019 wait_for_completion(pool->detach_completion);
226223ab 4020
6ba94429
FW
4021 /* shut down the timers */
4022 del_timer_sync(&pool->idle_timer);
3f959aa3 4023 cancel_work_sync(&pool->idle_cull_work);
6ba94429 4024 del_timer_sync(&pool->mayday_timer);
226223ab 4025
24acfb71 4026 /* RCU protected to allow dereferences from get_work_pool() */
25b00775 4027 call_rcu(&pool->rcu, rcu_free_pool);
226223ab
TH
4028}
4029
4030/**
6ba94429
FW
4031 * get_unbound_pool - get a worker_pool with the specified attributes
4032 * @attrs: the attributes of the worker_pool to get
226223ab 4033 *
6ba94429
FW
4034 * Obtain a worker_pool which has the same attributes as @attrs, bump the
4035 * reference count and return it. If there already is a matching
4036 * worker_pool, it will be used; otherwise, this function attempts to
4037 * create a new one.
226223ab 4038 *
6ba94429 4039 * Should be called with wq_pool_mutex held.
226223ab 4040 *
6ba94429
FW
4041 * Return: On success, a worker_pool with the same attributes as @attrs.
4042 * On failure, %NULL.
226223ab 4043 */
6ba94429 4044static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
226223ab 4045{
84193c07 4046 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA];
6ba94429
FW
4047 u32 hash = wqattrs_hash(attrs);
4048 struct worker_pool *pool;
84193c07 4049 int pod, node = NUMA_NO_NODE;
226223ab 4050
6ba94429 4051 lockdep_assert_held(&wq_pool_mutex);
226223ab 4052
6ba94429
FW
4053 /* do we already have a matching pool? */
4054 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
4055 if (wqattrs_equal(pool->attrs, attrs)) {
4056 pool->refcnt++;
4057 return pool;
4058 }
4059 }
226223ab 4060
9546b29e 4061 /* If __pod_cpumask is contained inside a NUMA pod, that's our node */
84193c07 4062 for (pod = 0; pod < pt->nr_pods; pod++) {
9546b29e 4063 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) {
84193c07
TH
4064 node = pt->pod_node[pod];
4065 break;
e2273584
XP
4066 }
4067 }
4068
6ba94429 4069 /* nope, create a new one */
84193c07 4070 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node);
6ba94429
FW
4071 if (!pool || init_worker_pool(pool) < 0)
4072 goto fail;
4073
84193c07 4074 pool->node = node;
5de7a03c
TH
4075 copy_workqueue_attrs(pool->attrs, attrs);
4076 wqattrs_clear_for_pool(pool->attrs);
226223ab 4077
6ba94429
FW
4078 if (worker_pool_assign_id(pool) < 0)
4079 goto fail;
226223ab 4080
6ba94429 4081 /* create and start the initial worker */
3347fa09 4082 if (wq_online && !create_worker(pool))
6ba94429 4083 goto fail;
226223ab 4084
6ba94429
FW
4085 /* install */
4086 hash_add(unbound_pool_hash, &pool->hash_node, hash);
226223ab 4087
6ba94429
FW
4088 return pool;
4089fail:
4090 if (pool)
4091 put_unbound_pool(pool);
4092 return NULL;
226223ab 4093}
226223ab 4094
6ba94429 4095static void rcu_free_pwq(struct rcu_head *rcu)
7a4e344c 4096{
6ba94429
FW
4097 kmem_cache_free(pwq_cache,
4098 container_of(rcu, struct pool_workqueue, rcu));
7a4e344c
TH
4099}
4100
6ba94429 4101/*
967b494e
TH
4102 * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
4103 * refcnt and needs to be destroyed.
7a4e344c 4104 */
687a9aa5 4105static void pwq_release_workfn(struct kthread_work *work)
7a4e344c 4106{
6ba94429 4107 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
687a9aa5 4108 release_work);
6ba94429
FW
4109 struct workqueue_struct *wq = pwq->wq;
4110 struct worker_pool *pool = pwq->pool;
b42b0bdd 4111 bool is_last = false;
7a4e344c 4112
b42b0bdd 4113 /*
687a9aa5 4114 * When @pwq is not linked, it doesn't hold any reference to the
b42b0bdd
YY
4115 * @wq, and @wq is invalid to access.
4116 */
4117 if (!list_empty(&pwq->pwqs_node)) {
b42b0bdd
YY
4118 mutex_lock(&wq->mutex);
4119 list_del_rcu(&pwq->pwqs_node);
4120 is_last = list_empty(&wq->pwqs);
4121 mutex_unlock(&wq->mutex);
4122 }
6ba94429 4123
687a9aa5
TH
4124 if (wq->flags & WQ_UNBOUND) {
4125 mutex_lock(&wq_pool_mutex);
4126 put_unbound_pool(pool);
4127 mutex_unlock(&wq_pool_mutex);
4128 }
6ba94429 4129
25b00775 4130 call_rcu(&pwq->rcu, rcu_free_pwq);
7a4e344c 4131
2865a8fb 4132 /*
6ba94429
FW
4133 * If we're the last pwq going away, @wq is already dead and no one
4134 * is gonna access it anymore. Schedule RCU free.
2865a8fb 4135 */
669de8bd
BVA
4136 if (is_last) {
4137 wq_unregister_lockdep(wq);
25b00775 4138 call_rcu(&wq->rcu, rcu_free_wq);
669de8bd 4139 }
29c91e99
TH
4140}
4141
7a4e344c 4142/**
6ba94429
FW
4143 * pwq_adjust_max_active - update a pwq's max_active to the current setting
4144 * @pwq: target pool_workqueue
d185af30 4145 *
6ba94429 4146 * If @pwq isn't freezing, set @pwq->max_active to the associated
f97a4a1a 4147 * workqueue's saved_max_active and activate inactive work items
6ba94429 4148 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
7a4e344c 4149 */
6ba94429 4150static void pwq_adjust_max_active(struct pool_workqueue *pwq)
4e1a1f9a 4151{
6ba94429
FW
4152 struct workqueue_struct *wq = pwq->wq;
4153 bool freezable = wq->flags & WQ_FREEZABLE;
3347fa09 4154 unsigned long flags;
4e1a1f9a 4155
6ba94429
FW
4156 /* for @wq->saved_max_active */
4157 lockdep_assert_held(&wq->mutex);
4e1a1f9a 4158
6ba94429
FW
4159 /* fast exit for non-freezable wqs */
4160 if (!freezable && pwq->max_active == wq->saved_max_active)
4161 return;
7a4e344c 4162
3347fa09 4163 /* this function can be called during early boot w/ irq disabled */
a9b8a985 4164 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
29c91e99 4165
6ba94429
FW
4166 /*
4167 * During [un]freezing, the caller is responsible for ensuring that
4168 * this function is called at least once after @workqueue_freezing
4169 * is updated and visible.
4170 */
4171 if (!freezable || !workqueue_freezing) {
4172 pwq->max_active = wq->saved_max_active;
4e1a1f9a 4173
f97a4a1a 4174 while (!list_empty(&pwq->inactive_works) &&
0219a352 4175 pwq->nr_active < pwq->max_active)
f97a4a1a 4176 pwq_activate_first_inactive(pwq);
e2dca7ad 4177
0219a352 4178 kick_pool(pwq->pool);
6ba94429
FW
4179 } else {
4180 pwq->max_active = 0;
4181 }
e2dca7ad 4182
a9b8a985 4183 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
e2dca7ad
TH
4184}
4185
67dc8325 4186/* initialize newly allocated @pwq which is associated with @wq and @pool */
6ba94429
FW
4187static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
4188 struct worker_pool *pool)
29c91e99 4189{
6ba94429 4190 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
29c91e99 4191
6ba94429
FW
4192 memset(pwq, 0, sizeof(*pwq));
4193
4194 pwq->pool = pool;
4195 pwq->wq = wq;
4196 pwq->flush_color = -1;
4197 pwq->refcnt = 1;
f97a4a1a 4198 INIT_LIST_HEAD(&pwq->inactive_works);
6ba94429
FW
4199 INIT_LIST_HEAD(&pwq->pwqs_node);
4200 INIT_LIST_HEAD(&pwq->mayday_node);
687a9aa5 4201 kthread_init_work(&pwq->release_work, pwq_release_workfn);
29c91e99
TH
4202}
4203
6ba94429
FW
4204/* sync @pwq with the current state of its associated wq and link it */
4205static void link_pwq(struct pool_workqueue *pwq)
29c91e99 4206{
6ba94429 4207 struct workqueue_struct *wq = pwq->wq;
29c91e99 4208
6ba94429 4209 lockdep_assert_held(&wq->mutex);
a892cacc 4210
6ba94429
FW
4211 /* may be called multiple times, ignore if already linked */
4212 if (!list_empty(&pwq->pwqs_node))
29c91e99 4213 return;
29c91e99 4214
6ba94429
FW
4215 /* set the matching work_color */
4216 pwq->work_color = wq->work_color;
29c91e99 4217
6ba94429
FW
4218 /* sync max_active to the current setting */
4219 pwq_adjust_max_active(pwq);
29c91e99 4220
6ba94429
FW
4221 /* link in @pwq */
4222 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
4223}
29c91e99 4224
6ba94429
FW
4225/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
4226static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
4227 const struct workqueue_attrs *attrs)
4228{
4229 struct worker_pool *pool;
4230 struct pool_workqueue *pwq;
60f5a4bc 4231
6ba94429 4232 lockdep_assert_held(&wq_pool_mutex);
60f5a4bc 4233
6ba94429
FW
4234 pool = get_unbound_pool(attrs);
4235 if (!pool)
4236 return NULL;
60f5a4bc 4237
6ba94429
FW
4238 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
4239 if (!pwq) {
4240 put_unbound_pool(pool);
4241 return NULL;
4242 }
29c91e99 4243
6ba94429
FW
4244 init_pwq(pwq, wq, pool);
4245 return pwq;
4246}
29c91e99 4247
29c91e99 4248/**
fef59c9c 4249 * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
042f7df1 4250 * @attrs: the wq_attrs of the default pwq of the target workqueue
84193c07 4251 * @cpu: the target CPU
6ba94429 4252 * @cpu_going_down: if >= 0, the CPU to consider as offline
29c91e99 4253 *
fef59c9c
TH
4254 * Calculate the cpumask a workqueue with @attrs should use on @pod. If
4255 * @cpu_going_down is >= 0, that cpu is considered offline during calculation.
9546b29e 4256 * The result is stored in @attrs->__pod_cpumask.
a892cacc 4257 *
fef59c9c
TH
4258 * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
4259 * and @pod has online CPUs requested by @attrs, the returned cpumask is the
4260 * intersection of the possible CPUs of @pod and @attrs->cpumask.
d185af30 4261 *
fef59c9c 4262 * The caller is responsible for ensuring that the cpumask of @pod stays stable.
29c91e99 4263 */
9546b29e
TH
4264static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
4265 int cpu_going_down)
29c91e99 4266{
84193c07
TH
4267 const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
4268 int pod = pt->cpu_pod[cpu];
29c91e99 4269
fef59c9c 4270 /* does @pod have any online CPUs @attrs wants? */
9546b29e
TH
4271 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
4272 cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask);
6ba94429 4273 if (cpu_going_down >= 0)
9546b29e 4274 cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask);
29c91e99 4275
9546b29e
TH
4276 if (cpumask_empty(attrs->__pod_cpumask)) {
4277 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
84193c07
TH
4278 return;
4279 }
4c16bd32 4280
fef59c9c 4281 /* yeap, return possible CPUs in @pod that @attrs wants */
9546b29e 4282 cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]);
1ad0f0a7 4283
9546b29e 4284 if (cpumask_empty(attrs->__pod_cpumask))
1ad0f0a7
MB
4285 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
4286 "possible intersect\n");
4c16bd32
TH
4287}
4288
636b927e
TH
4289/* install @pwq into @wq's cpu_pwq and return the old pwq */
4290static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
4291 int cpu, struct pool_workqueue *pwq)
1befcf30
TH
4292{
4293 struct pool_workqueue *old_pwq;
4294
5b95e1af 4295 lockdep_assert_held(&wq_pool_mutex);
1befcf30
TH
4296 lockdep_assert_held(&wq->mutex);
4297
4298 /* link_pwq() can handle duplicate calls */
4299 link_pwq(pwq);
4300
636b927e
TH
4301 old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
4302 rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq);
1befcf30
TH
4303 return old_pwq;
4304}
4305
2d5f0764
LJ
4306/* context to store the prepared attrs & pwqs before applying */
4307struct apply_wqattrs_ctx {
4308 struct workqueue_struct *wq; /* target workqueue */
4309 struct workqueue_attrs *attrs; /* attrs to apply */
042f7df1 4310 struct list_head list; /* queued for batching commit */
2d5f0764
LJ
4311 struct pool_workqueue *dfl_pwq;
4312 struct pool_workqueue *pwq_tbl[];
4313};
4314
4315/* free the resources after success or abort */
4316static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
4317{
4318 if (ctx) {
636b927e 4319 int cpu;
2d5f0764 4320
636b927e
TH
4321 for_each_possible_cpu(cpu)
4322 put_pwq_unlocked(ctx->pwq_tbl[cpu]);
2d5f0764
LJ
4323 put_pwq_unlocked(ctx->dfl_pwq);
4324
4325 free_workqueue_attrs(ctx->attrs);
4326
4327 kfree(ctx);
4328 }
4329}
4330
4331/* allocate the attrs and pwqs for later installation */
4332static struct apply_wqattrs_ctx *
4333apply_wqattrs_prepare(struct workqueue_struct *wq,
99c621ef
LJ
4334 const struct workqueue_attrs *attrs,
4335 const cpumask_var_t unbound_cpumask)
9e8cd2f5 4336{
2d5f0764 4337 struct apply_wqattrs_ctx *ctx;
9546b29e 4338 struct workqueue_attrs *new_attrs;
636b927e 4339 int cpu;
9e8cd2f5 4340
2d5f0764 4341 lockdep_assert_held(&wq_pool_mutex);
9e8cd2f5 4342
84193c07
TH
4343 if (WARN_ON(attrs->affn_scope < 0 ||
4344 attrs->affn_scope >= WQ_AFFN_NR_TYPES))
4345 return ERR_PTR(-EINVAL);
4346
636b927e 4347 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL);
8719dcea 4348
be69d00d 4349 new_attrs = alloc_workqueue_attrs();
9546b29e 4350 if (!ctx || !new_attrs)
2d5f0764 4351 goto out_free;
13e2e556 4352
4c16bd32
TH
4353 /*
4354 * If something goes wrong during CPU up/down, we'll fall back to
4355 * the default pwq covering whole @attrs->cpumask. Always create
4356 * it even if we don't use it immediately.
4357 */
0f36ee24
TH
4358 copy_workqueue_attrs(new_attrs, attrs);
4359 wqattrs_actualize_cpumask(new_attrs, unbound_cpumask);
9546b29e 4360 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
2d5f0764
LJ
4361 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
4362 if (!ctx->dfl_pwq)
4363 goto out_free;
4c16bd32 4364
636b927e 4365 for_each_possible_cpu(cpu) {
af73f5c9 4366 if (new_attrs->ordered) {
2d5f0764 4367 ctx->dfl_pwq->refcnt++;
636b927e
TH
4368 ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
4369 } else {
9546b29e
TH
4370 wq_calc_pod_cpumask(new_attrs, cpu, -1);
4371 ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
636b927e
TH
4372 if (!ctx->pwq_tbl[cpu])
4373 goto out_free;
4c16bd32
TH
4374 }
4375 }
4376
042f7df1
LJ
4377 /* save the user configured attrs and sanitize it. */
4378 copy_workqueue_attrs(new_attrs, attrs);
4379 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
9546b29e 4380 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
2d5f0764 4381 ctx->attrs = new_attrs;
042f7df1 4382
2d5f0764 4383 ctx->wq = wq;
2d5f0764
LJ
4384 return ctx;
4385
4386out_free:
2d5f0764
LJ
4387 free_workqueue_attrs(new_attrs);
4388 apply_wqattrs_cleanup(ctx);
84193c07 4389 return ERR_PTR(-ENOMEM);
2d5f0764
LJ
4390}
4391
4392/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
4393static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
4394{
636b927e 4395 int cpu;
9e8cd2f5 4396
4c16bd32 4397 /* all pwqs have been created successfully, let's install'em */
2d5f0764 4398 mutex_lock(&ctx->wq->mutex);
a892cacc 4399
2d5f0764 4400 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
4c16bd32
TH
4401
4402 /* save the previous pwq and install the new one */
636b927e
TH
4403 for_each_possible_cpu(cpu)
4404 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
4405 ctx->pwq_tbl[cpu]);
4c16bd32
TH
4406
4407 /* @dfl_pwq might not have been used, ensure it's linked */
2d5f0764
LJ
4408 link_pwq(ctx->dfl_pwq);
4409 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
f147f29e 4410
2d5f0764
LJ
4411 mutex_unlock(&ctx->wq->mutex);
4412}
9e8cd2f5 4413
a0111cf6
LJ
4414static void apply_wqattrs_lock(void)
4415{
4416 /* CPUs should stay stable across pwq creations and installations */
ffd8bea8 4417 cpus_read_lock();
a0111cf6
LJ
4418 mutex_lock(&wq_pool_mutex);
4419}
4420
4421static void apply_wqattrs_unlock(void)
4422{
4423 mutex_unlock(&wq_pool_mutex);
ffd8bea8 4424 cpus_read_unlock();
a0111cf6
LJ
4425}
4426
4427static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4428 const struct workqueue_attrs *attrs)
2d5f0764
LJ
4429{
4430 struct apply_wqattrs_ctx *ctx;
4c16bd32 4431
2d5f0764
LJ
4432 /* only unbound workqueues can change attributes */
4433 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4434 return -EINVAL;
13e2e556 4435
2d5f0764 4436 /* creating multiple pwqs breaks ordering guarantee */
0a94efb5
TH
4437 if (!list_empty(&wq->pwqs)) {
4438 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4439 return -EINVAL;
4440
4441 wq->flags &= ~__WQ_ORDERED;
4442 }
2d5f0764 4443
99c621ef 4444 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
84193c07
TH
4445 if (IS_ERR(ctx))
4446 return PTR_ERR(ctx);
2d5f0764
LJ
4447
4448 /* the ctx has been prepared successfully, let's commit it */
6201171e 4449 apply_wqattrs_commit(ctx);
2d5f0764
LJ
4450 apply_wqattrs_cleanup(ctx);
4451
6201171e 4452 return 0;
9e8cd2f5
TH
4453}
4454
a0111cf6
LJ
4455/**
4456 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4457 * @wq: the target workqueue
4458 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4459 *
fef59c9c
TH
4460 * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps
4461 * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that
4462 * work items are affine to the pod it was issued on. Older pwqs are released as
4463 * in-flight work items finish. Note that a work item which repeatedly requeues
4464 * itself back-to-back will stay on its current pwq.
a0111cf6
LJ
4465 *
4466 * Performs GFP_KERNEL allocations.
4467 *
ffd8bea8 4468 * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
509b3204 4469 *
a0111cf6
LJ
4470 * Return: 0 on success and -errno on failure.
4471 */
513c98d0 4472int apply_workqueue_attrs(struct workqueue_struct *wq,
a0111cf6
LJ
4473 const struct workqueue_attrs *attrs)
4474{
4475 int ret;
4476
509b3204
DJ
4477 lockdep_assert_cpus_held();
4478
4479 mutex_lock(&wq_pool_mutex);
a0111cf6 4480 ret = apply_workqueue_attrs_locked(wq, attrs);
509b3204 4481 mutex_unlock(&wq_pool_mutex);
a0111cf6
LJ
4482
4483 return ret;
4484}
4485
4c16bd32 4486/**
fef59c9c 4487 * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
4c16bd32 4488 * @wq: the target workqueue
4cbfd3de
TH
4489 * @cpu: the CPU to update pool association for
4490 * @hotplug_cpu: the CPU coming up or going down
4c16bd32
TH
4491 * @online: whether @cpu is coming up or going down
4492 *
4493 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
fef59c9c 4494 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of
4c16bd32
TH
4495 * @wq accordingly.
4496 *
fef59c9c
TH
4497 *
4498 * If pod affinity can't be adjusted due to memory allocation failure, it falls
4499 * back to @wq->dfl_pwq which may not be optimal but is always correct.
4500 *
4501 * Note that when the last allowed CPU of a pod goes offline for a workqueue
4502 * with a cpumask spanning multiple pods, the workers which were already
4503 * executing the work items for the workqueue will lose their CPU affinity and
4504 * may execute on any CPU. This is similar to how per-cpu workqueues behave on
4505 * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
4506 * responsibility to flush the work item from CPU_DOWN_PREPARE.
4c16bd32 4507 */
fef59c9c
TH
4508static void wq_update_pod(struct workqueue_struct *wq, int cpu,
4509 int hotplug_cpu, bool online)
4c16bd32 4510{
4cbfd3de 4511 int off_cpu = online ? -1 : hotplug_cpu;
4c16bd32
TH
4512 struct pool_workqueue *old_pwq = NULL, *pwq;
4513 struct workqueue_attrs *target_attrs;
4c16bd32
TH
4514
4515 lockdep_assert_held(&wq_pool_mutex);
4516
84193c07 4517 if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered)
4c16bd32
TH
4518 return;
4519
4520 /*
4521 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4522 * Let's use a preallocated one. The following buf is protected by
4523 * CPU hotplug exclusion.
4524 */
fef59c9c 4525 target_attrs = wq_update_pod_attrs_buf;
4c16bd32 4526
4c16bd32 4527 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
0f36ee24 4528 wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
4c16bd32 4529
636b927e 4530 /* nothing to do if the target cpumask matches the current pwq */
9546b29e 4531 wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
636b927e
TH
4532 pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu),
4533 lockdep_is_held(&wq_pool_mutex));
9546b29e 4534 if (wqattrs_equal(target_attrs, pwq->pool->attrs))
636b927e 4535 return;
4c16bd32 4536
4c16bd32
TH
4537 /* create a new pwq */
4538 pwq = alloc_unbound_pwq(wq, target_attrs);
4539 if (!pwq) {
fef59c9c 4540 pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n",
2d916033 4541 wq->name);
77f300b1 4542 goto use_dfl_pwq;
4c16bd32
TH
4543 }
4544
f7142ed4 4545 /* Install the new pwq. */
4c16bd32 4546 mutex_lock(&wq->mutex);
636b927e 4547 old_pwq = install_unbound_pwq(wq, cpu, pwq);
4c16bd32
TH
4548 goto out_unlock;
4549
4550use_dfl_pwq:
f7142ed4 4551 mutex_lock(&wq->mutex);
a9b8a985 4552 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
4c16bd32 4553 get_pwq(wq->dfl_pwq);
a9b8a985 4554 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
636b927e 4555 old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq);
4c16bd32
TH
4556out_unlock:
4557 mutex_unlock(&wq->mutex);
4558 put_pwq_unlocked(old_pwq);
4559}
4560
30cdf249 4561static int alloc_and_link_pwqs(struct workqueue_struct *wq)
0f900049 4562{
49e3cf44 4563 bool highpri = wq->flags & WQ_HIGHPRI;
8a2b7538 4564 int cpu, ret;
30cdf249 4565
636b927e
TH
4566 wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
4567 if (!wq->cpu_pwq)
4568 goto enomem;
30cdf249 4569
636b927e 4570 if (!(wq->flags & WQ_UNBOUND)) {
30cdf249 4571 for_each_possible_cpu(cpu) {
687a9aa5 4572 struct pool_workqueue **pwq_p =
ee1ceef7 4573 per_cpu_ptr(wq->cpu_pwq, cpu);
687a9aa5
TH
4574 struct worker_pool *pool =
4575 &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]);
4576
4577 *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL,
4578 pool->node);
4579 if (!*pwq_p)
4580 goto enomem;
f3421797 4581
687a9aa5 4582 init_pwq(*pwq_p, wq, pool);
f147f29e
TH
4583
4584 mutex_lock(&wq->mutex);
687a9aa5 4585 link_pwq(*pwq_p);
f147f29e 4586 mutex_unlock(&wq->mutex);
30cdf249 4587 }
9e8cd2f5 4588 return 0;
509b3204
DJ
4589 }
4590
ffd8bea8 4591 cpus_read_lock();
509b3204 4592 if (wq->flags & __WQ_ORDERED) {
8a2b7538
TH
4593 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4594 /* there should only be single pwq for ordering guarantee */
4595 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4596 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4597 "ordering guarantee broken for workqueue %s\n", wq->name);
30cdf249 4598 } else {
509b3204 4599 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
30cdf249 4600 }
ffd8bea8 4601 cpus_read_unlock();
509b3204 4602
64344553
Z
4603 /* for unbound pwq, flush the pwq_release_worker ensures that the
4604 * pwq_release_workfn() completes before calling kfree(wq).
4605 */
4606 if (ret)
4607 kthread_flush_worker(pwq_release_worker);
4608
509b3204 4609 return ret;
687a9aa5
TH
4610
4611enomem:
4612 if (wq->cpu_pwq) {
7b42f401
Z
4613 for_each_possible_cpu(cpu) {
4614 struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
4615
4616 if (pwq)
4617 kmem_cache_free(pwq_cache, pwq);
4618 }
687a9aa5
TH
4619 free_percpu(wq->cpu_pwq);
4620 wq->cpu_pwq = NULL;
4621 }
4622 return -ENOMEM;
0f900049
TH
4623}
4624
f3421797
TH
4625static int wq_clamp_max_active(int max_active, unsigned int flags,
4626 const char *name)
b71ab8c2 4627{
636b927e 4628 if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
044c782c 4629 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
636b927e 4630 max_active, name, 1, WQ_MAX_ACTIVE);
b71ab8c2 4631
636b927e 4632 return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
b71ab8c2
TH
4633}
4634
983c7515
TH
4635/*
4636 * Workqueues which may be used during memory reclaim should have a rescuer
4637 * to guarantee forward progress.
4638 */
4639static int init_rescuer(struct workqueue_struct *wq)
4640{
4641 struct worker *rescuer;
b92b36ea 4642 int ret;
983c7515
TH
4643
4644 if (!(wq->flags & WQ_MEM_RECLAIM))
4645 return 0;
4646
4647 rescuer = alloc_worker(NUMA_NO_NODE);
4c0736a7
PM
4648 if (!rescuer) {
4649 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n",
4650 wq->name);
983c7515 4651 return -ENOMEM;
4c0736a7 4652 }
983c7515
TH
4653
4654 rescuer->rescue_wq = wq;
b6a46f72 4655 rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
f187b697 4656 if (IS_ERR(rescuer->task)) {
b92b36ea 4657 ret = PTR_ERR(rescuer->task);
4c0736a7
PM
4658 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
4659 wq->name, ERR_PTR(ret));
983c7515 4660 kfree(rescuer);
b92b36ea 4661 return ret;
983c7515
TH
4662 }
4663
4664 wq->rescuer = rescuer;
4665 kthread_bind_mask(rescuer->task, cpu_possible_mask);
4666 wake_up_process(rescuer->task);
4667
4668 return 0;
4669}
4670
a2775bbc 4671__printf(1, 4)
669de8bd
BVA
4672struct workqueue_struct *alloc_workqueue(const char *fmt,
4673 unsigned int flags,
4674 int max_active, ...)
1da177e4 4675{
ecf6881f 4676 va_list args;
1da177e4 4677 struct workqueue_struct *wq;
49e3cf44 4678 struct pool_workqueue *pwq;
b196be89 4679
5c0338c6 4680 /*
fef59c9c
TH
4681 * Unbound && max_active == 1 used to imply ordered, which is no longer
4682 * the case on many machines due to per-pod pools. While
5c0338c6 4683 * alloc_ordered_workqueue() is the right way to create an ordered
fef59c9c 4684 * workqueue, keep the previous behavior to avoid subtle breakages.
5c0338c6
TH
4685 */
4686 if ((flags & WQ_UNBOUND) && max_active == 1)
4687 flags |= __WQ_ORDERED;
4688
cee22a15
VK
4689 /* see the comment above the definition of WQ_POWER_EFFICIENT */
4690 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4691 flags |= WQ_UNBOUND;
4692
ecf6881f 4693 /* allocate wq and format name */
636b927e 4694 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
b196be89 4695 if (!wq)
d2c1d404 4696 return NULL;
b196be89 4697
6029a918 4698 if (flags & WQ_UNBOUND) {
be69d00d 4699 wq->unbound_attrs = alloc_workqueue_attrs();
6029a918
TH
4700 if (!wq->unbound_attrs)
4701 goto err_free_wq;
4702 }
4703
669de8bd 4704 va_start(args, max_active);
ecf6881f 4705 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
b196be89 4706 va_end(args);
1da177e4 4707
d320c038 4708 max_active = max_active ?: WQ_DFL_ACTIVE;
b196be89 4709 max_active = wq_clamp_max_active(max_active, flags, wq->name);
3af24433 4710
b196be89 4711 /* init wq */
97e37d7b 4712 wq->flags = flags;
a0a1a5fd 4713 wq->saved_max_active = max_active;
3c25a55d 4714 mutex_init(&wq->mutex);
112202d9 4715 atomic_set(&wq->nr_pwqs_to_flush, 0);
30cdf249 4716 INIT_LIST_HEAD(&wq->pwqs);
73f53c4a
TH
4717 INIT_LIST_HEAD(&wq->flusher_queue);
4718 INIT_LIST_HEAD(&wq->flusher_overflow);
493a1724 4719 INIT_LIST_HEAD(&wq->maydays);
502ca9d8 4720
669de8bd 4721 wq_init_lockdep(wq);
cce1a165 4722 INIT_LIST_HEAD(&wq->list);
3af24433 4723
30cdf249 4724 if (alloc_and_link_pwqs(wq) < 0)
82efcab3 4725 goto err_unreg_lockdep;
1537663f 4726
40c17f75 4727 if (wq_online && init_rescuer(wq) < 0)
983c7515 4728 goto err_destroy;
3af24433 4729
226223ab
TH
4730 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4731 goto err_destroy;
4732
a0a1a5fd 4733 /*
68e13a67
LJ
4734 * wq_pool_mutex protects global freeze state and workqueues list.
4735 * Grab it, adjust max_active and add the new @wq to workqueues
4736 * list.
a0a1a5fd 4737 */
68e13a67 4738 mutex_lock(&wq_pool_mutex);
a0a1a5fd 4739
a357fc03 4740 mutex_lock(&wq->mutex);
699ce097
TH
4741 for_each_pwq(pwq, wq)
4742 pwq_adjust_max_active(pwq);
a357fc03 4743 mutex_unlock(&wq->mutex);
a0a1a5fd 4744
e2dca7ad 4745 list_add_tail_rcu(&wq->list, &workqueues);
a0a1a5fd 4746
68e13a67 4747 mutex_unlock(&wq_pool_mutex);
1537663f 4748
3af24433 4749 return wq;
d2c1d404 4750
82efcab3 4751err_unreg_lockdep:
009bb421
BVA
4752 wq_unregister_lockdep(wq);
4753 wq_free_lockdep(wq);
82efcab3 4754err_free_wq:
6029a918 4755 free_workqueue_attrs(wq->unbound_attrs);
d2c1d404
TH
4756 kfree(wq);
4757 return NULL;
4758err_destroy:
4759 destroy_workqueue(wq);
4690c4ab 4760 return NULL;
3af24433 4761}
669de8bd 4762EXPORT_SYMBOL_GPL(alloc_workqueue);
1da177e4 4763
c29eb853
TH
4764static bool pwq_busy(struct pool_workqueue *pwq)
4765{
4766 int i;
4767
4768 for (i = 0; i < WORK_NR_COLORS; i++)
4769 if (pwq->nr_in_flight[i])
4770 return true;
4771
4772 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
4773 return true;
f97a4a1a 4774 if (pwq->nr_active || !list_empty(&pwq->inactive_works))
c29eb853
TH
4775 return true;
4776
4777 return false;
4778}
4779
3af24433
ON
4780/**
4781 * destroy_workqueue - safely terminate a workqueue
4782 * @wq: target workqueue
4783 *
4784 * Safely destroy a workqueue. All work currently pending will be done first.
4785 */
4786void destroy_workqueue(struct workqueue_struct *wq)
4787{
49e3cf44 4788 struct pool_workqueue *pwq;
636b927e 4789 int cpu;
3af24433 4790
def98c84
TH
4791 /*
4792 * Remove it from sysfs first so that sanity check failure doesn't
4793 * lead to sysfs name conflicts.
4794 */
4795 workqueue_sysfs_unregister(wq);
4796
33e3f0a3
RC
4797 /* mark the workqueue destruction is in progress */
4798 mutex_lock(&wq->mutex);
4799 wq->flags |= __WQ_DESTROYING;
4800 mutex_unlock(&wq->mutex);
4801
9c5a2ba7
TH
4802 /* drain it before proceeding with destruction */
4803 drain_workqueue(wq);
c8efcc25 4804
def98c84
TH
4805 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4806 if (wq->rescuer) {
4807 struct worker *rescuer = wq->rescuer;
4808
4809 /* this prevents new queueing */
a9b8a985 4810 raw_spin_lock_irq(&wq_mayday_lock);
def98c84 4811 wq->rescuer = NULL;
a9b8a985 4812 raw_spin_unlock_irq(&wq_mayday_lock);
def98c84
TH
4813
4814 /* rescuer will empty maydays list before exiting */
4815 kthread_stop(rescuer->task);
8efe1223 4816 kfree(rescuer);
def98c84
TH
4817 }
4818
c29eb853
TH
4819 /*
4820 * Sanity checks - grab all the locks so that we wait for all
4821 * in-flight operations which may do put_pwq().
4822 */
4823 mutex_lock(&wq_pool_mutex);
b09f4fd3 4824 mutex_lock(&wq->mutex);
49e3cf44 4825 for_each_pwq(pwq, wq) {
a9b8a985 4826 raw_spin_lock_irq(&pwq->pool->lock);
c29eb853 4827 if (WARN_ON(pwq_busy(pwq))) {
1d9a6159
KW
4828 pr_warn("%s: %s has the following busy pwq\n",
4829 __func__, wq->name);
c29eb853 4830 show_pwq(pwq);
a9b8a985 4831 raw_spin_unlock_irq(&pwq->pool->lock);
b09f4fd3 4832 mutex_unlock(&wq->mutex);
c29eb853 4833 mutex_unlock(&wq_pool_mutex);
55df0933 4834 show_one_workqueue(wq);
6183c009 4835 return;
76af4d93 4836 }
a9b8a985 4837 raw_spin_unlock_irq(&pwq->pool->lock);
6183c009 4838 }
b09f4fd3 4839 mutex_unlock(&wq->mutex);
6183c009 4840
a0a1a5fd
TH
4841 /*
4842 * wq list is used to freeze wq, remove from list after
4843 * flushing is complete in case freeze races us.
4844 */
e2dca7ad 4845 list_del_rcu(&wq->list);
68e13a67 4846 mutex_unlock(&wq_pool_mutex);
3af24433 4847
636b927e
TH
4848 /*
4849 * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq
4850 * to put the base refs. @wq will be auto-destroyed from the last
4851 * pwq_put. RCU read lock prevents @wq from going away from under us.
4852 */
4853 rcu_read_lock();
4c16bd32 4854
636b927e
TH
4855 for_each_possible_cpu(cpu) {
4856 pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
4857 RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL);
dce90d47 4858 put_pwq_unlocked(pwq);
29c91e99 4859 }
636b927e
TH
4860
4861 put_pwq_unlocked(wq->dfl_pwq);
4862 wq->dfl_pwq = NULL;
4863
4864 rcu_read_unlock();
3af24433
ON
4865}
4866EXPORT_SYMBOL_GPL(destroy_workqueue);
4867
dcd989cb
TH
4868/**
4869 * workqueue_set_max_active - adjust max_active of a workqueue
4870 * @wq: target workqueue
4871 * @max_active: new max_active value.
4872 *
4873 * Set max_active of @wq to @max_active.
4874 *
4875 * CONTEXT:
4876 * Don't call from IRQ context.
4877 */
4878void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4879{
49e3cf44 4880 struct pool_workqueue *pwq;
dcd989cb 4881
8719dcea 4882 /* disallow meddling with max_active for ordered workqueues */
0a94efb5 4883 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
8719dcea
TH
4884 return;
4885
f3421797 4886 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
dcd989cb 4887
a357fc03 4888 mutex_lock(&wq->mutex);
dcd989cb 4889
0a94efb5 4890 wq->flags &= ~__WQ_ORDERED;
dcd989cb
TH
4891 wq->saved_max_active = max_active;
4892
699ce097
TH
4893 for_each_pwq(pwq, wq)
4894 pwq_adjust_max_active(pwq);
93981800 4895
a357fc03 4896 mutex_unlock(&wq->mutex);
15316ba8 4897}
dcd989cb 4898EXPORT_SYMBOL_GPL(workqueue_set_max_active);
15316ba8 4899
27d4ee03
LW
4900/**
4901 * current_work - retrieve %current task's work struct
4902 *
4903 * Determine if %current task is a workqueue worker and what it's working on.
4904 * Useful to find out the context that the %current task is running in.
4905 *
4906 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4907 */
4908struct work_struct *current_work(void)
4909{
4910 struct worker *worker = current_wq_worker();
4911
4912 return worker ? worker->current_work : NULL;
4913}
4914EXPORT_SYMBOL(current_work);
4915
e6267616
TH
4916/**
4917 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4918 *
4919 * Determine whether %current is a workqueue rescuer. Can be used from
4920 * work functions to determine whether it's being run off the rescuer task.
d185af30
YB
4921 *
4922 * Return: %true if %current is a workqueue rescuer. %false otherwise.
e6267616
TH
4923 */
4924bool current_is_workqueue_rescuer(void)
4925{
4926 struct worker *worker = current_wq_worker();
4927
6a092dfd 4928 return worker && worker->rescue_wq;
e6267616
TH
4929}
4930
eef6a7d5 4931/**
dcd989cb
TH
4932 * workqueue_congested - test whether a workqueue is congested
4933 * @cpu: CPU in question
4934 * @wq: target workqueue
eef6a7d5 4935 *
dcd989cb
TH
4936 * Test whether @wq's cpu workqueue for @cpu is congested. There is
4937 * no synchronization around this function and the test result is
4938 * unreliable and only useful as advisory hints or for debugging.
eef6a7d5 4939 *
d3251859 4940 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
636b927e
TH
4941 *
4942 * With the exception of ordered workqueues, all workqueues have per-cpu
4943 * pool_workqueues, each with its own congested state. A workqueue being
4944 * congested on one CPU doesn't mean that the workqueue is contested on any
4945 * other CPUs.
d3251859 4946 *
d185af30 4947 * Return:
dcd989cb 4948 * %true if congested, %false otherwise.
eef6a7d5 4949 */
d84ff051 4950bool workqueue_congested(int cpu, struct workqueue_struct *wq)
1da177e4 4951{
7fb98ea7 4952 struct pool_workqueue *pwq;
76af4d93
TH
4953 bool ret;
4954
24acfb71
TG
4955 rcu_read_lock();
4956 preempt_disable();
7fb98ea7 4957
d3251859
TH
4958 if (cpu == WORK_CPU_UNBOUND)
4959 cpu = smp_processor_id();
4960
636b927e 4961 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
f97a4a1a 4962 ret = !list_empty(&pwq->inactive_works);
636b927e 4963
24acfb71
TG
4964 preempt_enable();
4965 rcu_read_unlock();
76af4d93
TH
4966
4967 return ret;
1da177e4 4968}
dcd989cb 4969EXPORT_SYMBOL_GPL(workqueue_congested);
1da177e4 4970
dcd989cb
TH
4971/**
4972 * work_busy - test whether a work is currently pending or running
4973 * @work: the work to be tested
4974 *
4975 * Test whether @work is currently pending or running. There is no
4976 * synchronization around this function and the test result is
4977 * unreliable and only useful as advisory hints or for debugging.
dcd989cb 4978 *
d185af30 4979 * Return:
dcd989cb
TH
4980 * OR'd bitmask of WORK_BUSY_* bits.
4981 */
4982unsigned int work_busy(struct work_struct *work)
1da177e4 4983{
fa1b54e6 4984 struct worker_pool *pool;
dcd989cb
TH
4985 unsigned long flags;
4986 unsigned int ret = 0;
1da177e4 4987
dcd989cb
TH
4988 if (work_pending(work))
4989 ret |= WORK_BUSY_PENDING;
1da177e4 4990
24acfb71 4991 rcu_read_lock();
fa1b54e6 4992 pool = get_work_pool(work);
038366c5 4993 if (pool) {
a9b8a985 4994 raw_spin_lock_irqsave(&pool->lock, flags);
038366c5
LJ
4995 if (find_worker_executing_work(pool, work))
4996 ret |= WORK_BUSY_RUNNING;
a9b8a985 4997 raw_spin_unlock_irqrestore(&pool->lock, flags);
038366c5 4998 }
24acfb71 4999 rcu_read_unlock();
1da177e4 5000
dcd989cb 5001 return ret;
1da177e4 5002}
dcd989cb 5003EXPORT_SYMBOL_GPL(work_busy);
1da177e4 5004
3d1cb205
TH
5005/**
5006 * set_worker_desc - set description for the current work item
5007 * @fmt: printf-style format string
5008 * @...: arguments for the format string
5009 *
5010 * This function can be called by a running work function to describe what
5011 * the work item is about. If the worker task gets dumped, this
5012 * information will be printed out together to help debugging. The
5013 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
5014 */
5015void set_worker_desc(const char *fmt, ...)
5016{
5017 struct worker *worker = current_wq_worker();
5018 va_list args;
5019
5020 if (worker) {
5021 va_start(args, fmt);
5022 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
5023 va_end(args);
3d1cb205
TH
5024 }
5025}
5c750d58 5026EXPORT_SYMBOL_GPL(set_worker_desc);
3d1cb205
TH
5027
5028/**
5029 * print_worker_info - print out worker information and description
5030 * @log_lvl: the log level to use when printing
5031 * @task: target task
5032 *
5033 * If @task is a worker and currently executing a work item, print out the
5034 * name of the workqueue being serviced and worker description set with
5035 * set_worker_desc() by the currently executing work item.
5036 *
5037 * This function can be safely called on any task as long as the
5038 * task_struct itself is accessible. While safe, this function isn't
5039 * synchronized and may print out mixups or garbages of limited length.
5040 */
5041void print_worker_info(const char *log_lvl, struct task_struct *task)
5042{
5043 work_func_t *fn = NULL;
5044 char name[WQ_NAME_LEN] = { };
5045 char desc[WORKER_DESC_LEN] = { };
5046 struct pool_workqueue *pwq = NULL;
5047 struct workqueue_struct *wq = NULL;
3d1cb205
TH
5048 struct worker *worker;
5049
5050 if (!(task->flags & PF_WQ_WORKER))
5051 return;
5052
5053 /*
5054 * This function is called without any synchronization and @task
5055 * could be in any state. Be careful with dereferences.
5056 */
e700591a 5057 worker = kthread_probe_data(task);
3d1cb205
TH
5058
5059 /*
8bf89593
TH
5060 * Carefully copy the associated workqueue's workfn, name and desc.
5061 * Keep the original last '\0' in case the original is garbage.
3d1cb205 5062 */
fe557319
CH
5063 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
5064 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
5065 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
5066 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
5067 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
3d1cb205
TH
5068
5069 if (fn || name[0] || desc[0]) {
d75f773c 5070 printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
8bf89593 5071 if (strcmp(name, desc))
3d1cb205
TH
5072 pr_cont(" (%s)", desc);
5073 pr_cont("\n");
5074 }
5075}
5076
3494fc30
TH
5077static void pr_cont_pool_info(struct worker_pool *pool)
5078{
5079 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
5080 if (pool->node != NUMA_NO_NODE)
5081 pr_cont(" node=%d", pool->node);
5082 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
5083}
5084
c76feb0d
PM
5085struct pr_cont_work_struct {
5086 bool comma;
5087 work_func_t func;
5088 long ctr;
5089};
5090
5091static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
5092{
5093 if (!pcwsp->ctr)
5094 goto out_record;
5095 if (func == pcwsp->func) {
5096 pcwsp->ctr++;
5097 return;
5098 }
5099 if (pcwsp->ctr == 1)
5100 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
5101 else
5102 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
5103 pcwsp->ctr = 0;
5104out_record:
5105 if ((long)func == -1L)
5106 return;
5107 pcwsp->comma = comma;
5108 pcwsp->func = func;
5109 pcwsp->ctr = 1;
5110}
5111
5112static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
3494fc30
TH
5113{
5114 if (work->func == wq_barrier_func) {
5115 struct wq_barrier *barr;
5116
5117 barr = container_of(work, struct wq_barrier, work);
5118
c76feb0d 5119 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
3494fc30
TH
5120 pr_cont("%s BAR(%d)", comma ? "," : "",
5121 task_pid_nr(barr->task));
5122 } else {
c76feb0d
PM
5123 if (!comma)
5124 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
5125 pr_cont_work_flush(comma, work->func, pcwsp);
3494fc30
TH
5126 }
5127}
5128
5129static void show_pwq(struct pool_workqueue *pwq)
5130{
c76feb0d 5131 struct pr_cont_work_struct pcws = { .ctr = 0, };
3494fc30
TH
5132 struct worker_pool *pool = pwq->pool;
5133 struct work_struct *work;
5134 struct worker *worker;
5135 bool has_in_flight = false, has_pending = false;
5136 int bkt;
5137
5138 pr_info(" pwq %d:", pool->id);
5139 pr_cont_pool_info(pool);
5140
e66b39af
TH
5141 pr_cont(" active=%d/%d refcnt=%d%s\n",
5142 pwq->nr_active, pwq->max_active, pwq->refcnt,
3494fc30
TH
5143 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
5144
5145 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
5146 if (worker->current_pwq == pwq) {
5147 has_in_flight = true;
5148 break;
5149 }
5150 }
5151 if (has_in_flight) {
5152 bool comma = false;
5153
5154 pr_info(" in-flight:");
5155 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
5156 if (worker->current_pwq != pwq)
5157 continue;
5158
d75f773c 5159 pr_cont("%s %d%s:%ps", comma ? "," : "",
3494fc30 5160 task_pid_nr(worker->task),
30ae2fc0 5161 worker->rescue_wq ? "(RESCUER)" : "",
3494fc30
TH
5162 worker->current_func);
5163 list_for_each_entry(work, &worker->scheduled, entry)
c76feb0d
PM
5164 pr_cont_work(false, work, &pcws);
5165 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
3494fc30
TH
5166 comma = true;
5167 }
5168 pr_cont("\n");
5169 }
5170
5171 list_for_each_entry(work, &pool->worklist, entry) {
5172 if (get_work_pwq(work) == pwq) {
5173 has_pending = true;
5174 break;
5175 }
5176 }
5177 if (has_pending) {
5178 bool comma = false;
5179
5180 pr_info(" pending:");
5181 list_for_each_entry(work, &pool->worklist, entry) {
5182 if (get_work_pwq(work) != pwq)
5183 continue;
5184
c76feb0d 5185 pr_cont_work(comma, work, &pcws);
3494fc30
TH
5186 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
5187 }
c76feb0d 5188 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
3494fc30
TH
5189 pr_cont("\n");
5190 }
5191
f97a4a1a 5192 if (!list_empty(&pwq->inactive_works)) {
3494fc30
TH
5193 bool comma = false;
5194
f97a4a1a
LJ
5195 pr_info(" inactive:");
5196 list_for_each_entry(work, &pwq->inactive_works, entry) {
c76feb0d 5197 pr_cont_work(comma, work, &pcws);
3494fc30
TH
5198 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
5199 }
c76feb0d 5200 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
3494fc30
TH
5201 pr_cont("\n");
5202 }
5203}
5204
5205/**
55df0933
IK
5206 * show_one_workqueue - dump state of specified workqueue
5207 * @wq: workqueue whose state will be printed
3494fc30 5208 */
55df0933 5209void show_one_workqueue(struct workqueue_struct *wq)
3494fc30 5210{
55df0933
IK
5211 struct pool_workqueue *pwq;
5212 bool idle = true;
3494fc30 5213 unsigned long flags;
3494fc30 5214
55df0933
IK
5215 for_each_pwq(pwq, wq) {
5216 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
5217 idle = false;
5218 break;
3494fc30 5219 }
55df0933
IK
5220 }
5221 if (idle) /* Nothing to print for idle workqueue */
5222 return;
3494fc30 5223
55df0933 5224 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
3494fc30 5225
55df0933
IK
5226 for_each_pwq(pwq, wq) {
5227 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
5228 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
62635ea8 5229 /*
55df0933
IK
5230 * Defer printing to avoid deadlocks in console
5231 * drivers that queue work while holding locks
5232 * also taken in their write paths.
62635ea8 5233 */
55df0933
IK
5234 printk_deferred_enter();
5235 show_pwq(pwq);
5236 printk_deferred_exit();
3494fc30 5237 }
55df0933 5238 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
62635ea8
SS
5239 /*
5240 * We could be printing a lot from atomic context, e.g.
55df0933 5241 * sysrq-t -> show_all_workqueues(). Avoid triggering
62635ea8
SS
5242 * hard lockup.
5243 */
5244 touch_nmi_watchdog();
3494fc30
TH
5245 }
5246
55df0933
IK
5247}
5248
5249/**
5250 * show_one_worker_pool - dump state of specified worker pool
5251 * @pool: worker pool whose state will be printed
5252 */
5253static void show_one_worker_pool(struct worker_pool *pool)
5254{
5255 struct worker *worker;
5256 bool first = true;
5257 unsigned long flags;
335a42eb 5258 unsigned long hung = 0;
55df0933
IK
5259
5260 raw_spin_lock_irqsave(&pool->lock, flags);
5261 if (pool->nr_workers == pool->nr_idle)
5262 goto next_pool;
335a42eb
PM
5263
5264 /* How long the first pending work is waiting for a worker. */
5265 if (!list_empty(&pool->worklist))
5266 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
5267
55df0933
IK
5268 /*
5269 * Defer printing to avoid deadlocks in console drivers that
5270 * queue work while holding locks also taken in their write
5271 * paths.
5272 */
5273 printk_deferred_enter();
5274 pr_info("pool %d:", pool->id);
5275 pr_cont_pool_info(pool);
335a42eb 5276 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
55df0933
IK
5277 if (pool->manager)
5278 pr_cont(" manager: %d",
5279 task_pid_nr(pool->manager->task));
5280 list_for_each_entry(worker, &pool->idle_list, entry) {
5281 pr_cont(" %s%d", first ? "idle: " : "",
5282 task_pid_nr(worker->task));
5283 first = false;
5284 }
5285 pr_cont("\n");
5286 printk_deferred_exit();
5287next_pool:
5288 raw_spin_unlock_irqrestore(&pool->lock, flags);
5289 /*
5290 * We could be printing a lot from atomic context, e.g.
5291 * sysrq-t -> show_all_workqueues(). Avoid triggering
5292 * hard lockup.
5293 */
5294 touch_nmi_watchdog();
5295
5296}
5297
5298/**
5299 * show_all_workqueues - dump workqueue state
5300 *
704bc669 5301 * Called from a sysrq handler and prints out all busy workqueues and pools.
55df0933
IK
5302 */
5303void show_all_workqueues(void)
5304{
5305 struct workqueue_struct *wq;
5306 struct worker_pool *pool;
5307 int pi;
5308
5309 rcu_read_lock();
5310
5311 pr_info("Showing busy workqueues and worker pools:\n");
5312
5313 list_for_each_entry_rcu(wq, &workqueues, list)
5314 show_one_workqueue(wq);
5315
5316 for_each_pool(pool, pi)
5317 show_one_worker_pool(pool);
5318
24acfb71 5319 rcu_read_unlock();
3494fc30
TH
5320}
5321
704bc669
JL
5322/**
5323 * show_freezable_workqueues - dump freezable workqueue state
5324 *
5325 * Called from try_to_freeze_tasks() and prints out all freezable workqueues
5326 * still busy.
5327 */
5328void show_freezable_workqueues(void)
5329{
5330 struct workqueue_struct *wq;
5331
5332 rcu_read_lock();
5333
5334 pr_info("Showing freezable workqueues that are still busy:\n");
5335
5336 list_for_each_entry_rcu(wq, &workqueues, list) {
5337 if (!(wq->flags & WQ_FREEZABLE))
5338 continue;
5339 show_one_workqueue(wq);
5340 }
5341
5342 rcu_read_unlock();
5343}
5344
6b59808b
TH
5345/* used to show worker information through /proc/PID/{comm,stat,status} */
5346void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
5347{
6b59808b
TH
5348 int off;
5349
5350 /* always show the actual comm */
5351 off = strscpy(buf, task->comm, size);
5352 if (off < 0)
5353 return;
5354
197f6acc 5355 /* stabilize PF_WQ_WORKER and worker pool association */
6b59808b
TH
5356 mutex_lock(&wq_pool_attach_mutex);
5357
197f6acc
TH
5358 if (task->flags & PF_WQ_WORKER) {
5359 struct worker *worker = kthread_data(task);
5360 struct worker_pool *pool = worker->pool;
6b59808b 5361
197f6acc 5362 if (pool) {
a9b8a985 5363 raw_spin_lock_irq(&pool->lock);
197f6acc
TH
5364 /*
5365 * ->desc tracks information (wq name or
5366 * set_worker_desc()) for the latest execution. If
5367 * current, prepend '+', otherwise '-'.
5368 */
5369 if (worker->desc[0] != '\0') {
5370 if (worker->current_work)
5371 scnprintf(buf + off, size - off, "+%s",
5372 worker->desc);
5373 else
5374 scnprintf(buf + off, size - off, "-%s",
5375 worker->desc);
5376 }
a9b8a985 5377 raw_spin_unlock_irq(&pool->lock);
6b59808b 5378 }
6b59808b
TH
5379 }
5380
5381 mutex_unlock(&wq_pool_attach_mutex);
5382}
5383
66448bc2
MM
5384#ifdef CONFIG_SMP
5385
db7bccf4
TH
5386/*
5387 * CPU hotplug.
5388 *
e22bee78 5389 * There are two challenges in supporting CPU hotplug. Firstly, there
112202d9 5390 * are a lot of assumptions on strong associations among work, pwq and
706026c2 5391 * pool which make migrating pending and scheduled works very
e22bee78 5392 * difficult to implement without impacting hot paths. Secondly,
94cf58bb 5393 * worker pools serve mix of short, long and very long running works making
e22bee78
TH
5394 * blocked draining impractical.
5395 *
24647570 5396 * This is solved by allowing the pools to be disassociated from the CPU
628c78e7
TH
5397 * running as an unbound one and allowing it to be reattached later if the
5398 * cpu comes back online.
db7bccf4 5399 */
1da177e4 5400
e8b3f8db 5401static void unbind_workers(int cpu)
3af24433 5402{
4ce62e9e 5403 struct worker_pool *pool;
db7bccf4 5404 struct worker *worker;
3af24433 5405
f02ae73a 5406 for_each_cpu_worker_pool(pool, cpu) {
1258fae7 5407 mutex_lock(&wq_pool_attach_mutex);
a9b8a985 5408 raw_spin_lock_irq(&pool->lock);
3af24433 5409
94cf58bb 5410 /*
92f9c5c4 5411 * We've blocked all attach/detach operations. Make all workers
94cf58bb 5412 * unbound and set DISASSOCIATED. Before this, all workers
11b45b0b 5413 * must be on the cpu. After this, they may become diasporas.
b4ac9384
LJ
5414 * And the preemption disabled section in their sched callbacks
5415 * are guaranteed to see WORKER_UNBOUND since the code here
5416 * is on the same cpu.
94cf58bb 5417 */
da028469 5418 for_each_pool_worker(worker, pool)
c9e7cf27 5419 worker->flags |= WORKER_UNBOUND;
06ba38a9 5420
24647570 5421 pool->flags |= POOL_DISASSOCIATED;
f2d5a0ee 5422
eb283428 5423 /*
989442d7
LJ
5424 * The handling of nr_running in sched callbacks are disabled
5425 * now. Zap nr_running. After this, nr_running stays zero and
5426 * need_more_worker() and keep_working() are always true as
5427 * long as the worklist is not empty. This pool now behaves as
5428 * an unbound (in terms of concurrency management) pool which
eb283428
LJ
5429 * are served by workers tied to the pool.
5430 */
bc35f7ef 5431 pool->nr_running = 0;
eb283428
LJ
5432
5433 /*
5434 * With concurrency management just turned off, a busy
5435 * worker blocking could lead to lengthy stalls. Kick off
5436 * unbound chain execution of currently pending work items.
5437 */
0219a352 5438 kick_pool(pool);
989442d7 5439
a9b8a985 5440 raw_spin_unlock_irq(&pool->lock);
989442d7 5441
793777bc
VS
5442 for_each_pool_worker(worker, pool)
5443 unbind_worker(worker);
989442d7
LJ
5444
5445 mutex_unlock(&wq_pool_attach_mutex);
eb283428 5446 }
3af24433 5447}
3af24433 5448
bd7c089e
TH
5449/**
5450 * rebind_workers - rebind all workers of a pool to the associated CPU
5451 * @pool: pool of interest
5452 *
a9ab775b 5453 * @pool->cpu is coming online. Rebind all workers to the CPU.
bd7c089e
TH
5454 */
5455static void rebind_workers(struct worker_pool *pool)
5456{
a9ab775b 5457 struct worker *worker;
bd7c089e 5458
1258fae7 5459 lockdep_assert_held(&wq_pool_attach_mutex);
bd7c089e 5460
a9ab775b
TH
5461 /*
5462 * Restore CPU affinity of all workers. As all idle workers should
5463 * be on the run-queue of the associated CPU before any local
402dd89d 5464 * wake-ups for concurrency management happen, restore CPU affinity
a9ab775b
TH
5465 * of all workers first and then clear UNBOUND. As we're called
5466 * from CPU_ONLINE, the following shouldn't fail.
5467 */
c63a2e52
VS
5468 for_each_pool_worker(worker, pool) {
5469 kthread_set_per_cpu(worker->task, pool->cpu);
5470 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
9546b29e 5471 pool_allowed_cpus(pool)) < 0);
c63a2e52 5472 }
bd7c089e 5473
a9b8a985 5474 raw_spin_lock_irq(&pool->lock);
f7c17d26 5475
3de5e884 5476 pool->flags &= ~POOL_DISASSOCIATED;
bd7c089e 5477
da028469 5478 for_each_pool_worker(worker, pool) {
a9ab775b 5479 unsigned int worker_flags = worker->flags;
bd7c089e 5480
a9ab775b
TH
5481 /*
5482 * We want to clear UNBOUND but can't directly call
5483 * worker_clr_flags() or adjust nr_running. Atomically
5484 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
5485 * @worker will clear REBOUND using worker_clr_flags() when
5486 * it initiates the next execution cycle thus restoring
5487 * concurrency management. Note that when or whether
5488 * @worker clears REBOUND doesn't affect correctness.
5489 *
c95491ed 5490 * WRITE_ONCE() is necessary because @worker->flags may be
a9ab775b 5491 * tested without holding any lock in
6d25be57 5492 * wq_worker_running(). Without it, NOT_RUNNING test may
a9ab775b
TH
5493 * fail incorrectly leading to premature concurrency
5494 * management operations.
5495 */
5496 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
5497 worker_flags |= WORKER_REBOUND;
5498 worker_flags &= ~WORKER_UNBOUND;
c95491ed 5499 WRITE_ONCE(worker->flags, worker_flags);
bd7c089e 5500 }
a9ab775b 5501
a9b8a985 5502 raw_spin_unlock_irq(&pool->lock);
bd7c089e
TH
5503}
5504
7dbc725e
TH
5505/**
5506 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
5507 * @pool: unbound pool of interest
5508 * @cpu: the CPU which is coming up
5509 *
5510 * An unbound pool may end up with a cpumask which doesn't have any online
5511 * CPUs. When a worker of such pool get scheduled, the scheduler resets
5512 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
5513 * online CPU before, cpus_allowed of all its workers should be restored.
5514 */
5515static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
5516{
5517 static cpumask_t cpumask;
5518 struct worker *worker;
7dbc725e 5519
1258fae7 5520 lockdep_assert_held(&wq_pool_attach_mutex);
7dbc725e
TH
5521
5522 /* is @cpu allowed for @pool? */
5523 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
5524 return;
5525
7dbc725e 5526 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
7dbc725e
TH
5527
5528 /* as we're called from CPU_ONLINE, the following shouldn't fail */
da028469 5529 for_each_pool_worker(worker, pool)
d945b5e9 5530 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
7dbc725e
TH
5531}
5532
7ee681b2
TG
5533int workqueue_prepare_cpu(unsigned int cpu)
5534{
5535 struct worker_pool *pool;
5536
5537 for_each_cpu_worker_pool(pool, cpu) {
5538 if (pool->nr_workers)
5539 continue;
5540 if (!create_worker(pool))
5541 return -ENOMEM;
5542 }
5543 return 0;
5544}
5545
5546int workqueue_online_cpu(unsigned int cpu)
3af24433 5547{
4ce62e9e 5548 struct worker_pool *pool;
4c16bd32 5549 struct workqueue_struct *wq;
7dbc725e 5550 int pi;
3ce63377 5551
7ee681b2 5552 mutex_lock(&wq_pool_mutex);
7dbc725e 5553
7ee681b2 5554 for_each_pool(pool, pi) {
1258fae7 5555 mutex_lock(&wq_pool_attach_mutex);
94cf58bb 5556
7ee681b2
TG
5557 if (pool->cpu == cpu)
5558 rebind_workers(pool);
5559 else if (pool->cpu < 0)
5560 restore_unbound_workers_cpumask(pool, cpu);
94cf58bb 5561
1258fae7 5562 mutex_unlock(&wq_pool_attach_mutex);
7ee681b2 5563 }
6ba94429 5564
fef59c9c 5565 /* update pod affinity of unbound workqueues */
4cbfd3de 5566 list_for_each_entry(wq, &workqueues, list) {
84193c07
TH
5567 struct workqueue_attrs *attrs = wq->unbound_attrs;
5568
5569 if (attrs) {
5570 const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
5571 int tcpu;
4cbfd3de 5572
84193c07 5573 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
fef59c9c 5574 wq_update_pod(wq, tcpu, cpu, true);
4cbfd3de
TH
5575 }
5576 }
6ba94429 5577
7ee681b2
TG
5578 mutex_unlock(&wq_pool_mutex);
5579 return 0;
6ba94429
FW
5580}
5581
7ee681b2 5582int workqueue_offline_cpu(unsigned int cpu)
6ba94429 5583{
6ba94429
FW
5584 struct workqueue_struct *wq;
5585
7ee681b2 5586 /* unbinding per-cpu workers should happen on the local CPU */
e8b3f8db
LJ
5587 if (WARN_ON(cpu != smp_processor_id()))
5588 return -1;
5589
5590 unbind_workers(cpu);
7ee681b2 5591
fef59c9c 5592 /* update pod affinity of unbound workqueues */
7ee681b2 5593 mutex_lock(&wq_pool_mutex);
4cbfd3de 5594 list_for_each_entry(wq, &workqueues, list) {
84193c07
TH
5595 struct workqueue_attrs *attrs = wq->unbound_attrs;
5596
5597 if (attrs) {
5598 const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
5599 int tcpu;
4cbfd3de 5600
84193c07 5601 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
fef59c9c 5602 wq_update_pod(wq, tcpu, cpu, false);
4cbfd3de
TH
5603 }
5604 }
7ee681b2
TG
5605 mutex_unlock(&wq_pool_mutex);
5606
7ee681b2 5607 return 0;
6ba94429
FW
5608}
5609
6ba94429
FW
5610struct work_for_cpu {
5611 struct work_struct work;
5612 long (*fn)(void *);
5613 void *arg;
5614 long ret;
5615};
5616
5617static void work_for_cpu_fn(struct work_struct *work)
5618{
5619 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5620
5621 wfc->ret = wfc->fn(wfc->arg);
5622}
5623
5624/**
265f3ed0 5625 * work_on_cpu_key - run a function in thread context on a particular cpu
6ba94429
FW
5626 * @cpu: the cpu to run on
5627 * @fn: the function to run
5628 * @arg: the function arg
265f3ed0 5629 * @key: The lock class key for lock debugging purposes
6ba94429
FW
5630 *
5631 * It is up to the caller to ensure that the cpu doesn't go offline.
5632 * The caller must not hold any locks which would prevent @fn from completing.
5633 *
5634 * Return: The value @fn returns.
5635 */
265f3ed0
FW
5636long work_on_cpu_key(int cpu, long (*fn)(void *),
5637 void *arg, struct lock_class_key *key)
6ba94429
FW
5638{
5639 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5640
265f3ed0 5641 INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
6ba94429
FW
5642 schedule_work_on(cpu, &wfc.work);
5643 flush_work(&wfc.work);
5644 destroy_work_on_stack(&wfc.work);
5645 return wfc.ret;
5646}
265f3ed0 5647EXPORT_SYMBOL_GPL(work_on_cpu_key);
0e8d6a93
TG
5648
5649/**
265f3ed0 5650 * work_on_cpu_safe_key - run a function in thread context on a particular cpu
0e8d6a93
TG
5651 * @cpu: the cpu to run on
5652 * @fn: the function to run
5653 * @arg: the function argument
265f3ed0 5654 * @key: The lock class key for lock debugging purposes
0e8d6a93
TG
5655 *
5656 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5657 * any locks which would prevent @fn from completing.
5658 *
5659 * Return: The value @fn returns.
5660 */
265f3ed0
FW
5661long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
5662 void *arg, struct lock_class_key *key)
0e8d6a93
TG
5663{
5664 long ret = -ENODEV;
5665
ffd8bea8 5666 cpus_read_lock();
0e8d6a93 5667 if (cpu_online(cpu))
265f3ed0 5668 ret = work_on_cpu_key(cpu, fn, arg, key);
ffd8bea8 5669 cpus_read_unlock();
0e8d6a93
TG
5670 return ret;
5671}
265f3ed0 5672EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
6ba94429
FW
5673#endif /* CONFIG_SMP */
5674
5675#ifdef CONFIG_FREEZER
5676
5677/**
5678 * freeze_workqueues_begin - begin freezing workqueues
5679 *
5680 * Start freezing workqueues. After this function returns, all freezable
f97a4a1a 5681 * workqueues will queue new works to their inactive_works list instead of
6ba94429
FW
5682 * pool->worklist.
5683 *
5684 * CONTEXT:
5685 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5686 */
5687void freeze_workqueues_begin(void)
5688{
5689 struct workqueue_struct *wq;
5690 struct pool_workqueue *pwq;
5691
5692 mutex_lock(&wq_pool_mutex);
5693
5694 WARN_ON_ONCE(workqueue_freezing);
5695 workqueue_freezing = true;
5696
5697 list_for_each_entry(wq, &workqueues, list) {
5698 mutex_lock(&wq->mutex);
5699 for_each_pwq(pwq, wq)
5700 pwq_adjust_max_active(pwq);
5701 mutex_unlock(&wq->mutex);
5702 }
5703
5704 mutex_unlock(&wq_pool_mutex);
5705}
5706
5707/**
5708 * freeze_workqueues_busy - are freezable workqueues still busy?
5709 *
5710 * Check whether freezing is complete. This function must be called
5711 * between freeze_workqueues_begin() and thaw_workqueues().
5712 *
5713 * CONTEXT:
5714 * Grabs and releases wq_pool_mutex.
5715 *
5716 * Return:
5717 * %true if some freezable workqueues are still busy. %false if freezing
5718 * is complete.
5719 */
5720bool freeze_workqueues_busy(void)
5721{
5722 bool busy = false;
5723 struct workqueue_struct *wq;
5724 struct pool_workqueue *pwq;
5725
5726 mutex_lock(&wq_pool_mutex);
5727
5728 WARN_ON_ONCE(!workqueue_freezing);
5729
5730 list_for_each_entry(wq, &workqueues, list) {
5731 if (!(wq->flags & WQ_FREEZABLE))
5732 continue;
5733 /*
5734 * nr_active is monotonically decreasing. It's safe
5735 * to peek without lock.
5736 */
24acfb71 5737 rcu_read_lock();
6ba94429
FW
5738 for_each_pwq(pwq, wq) {
5739 WARN_ON_ONCE(pwq->nr_active < 0);
5740 if (pwq->nr_active) {
5741 busy = true;
24acfb71 5742 rcu_read_unlock();
6ba94429
FW
5743 goto out_unlock;
5744 }
5745 }
24acfb71 5746 rcu_read_unlock();
6ba94429
FW
5747 }
5748out_unlock:
5749 mutex_unlock(&wq_pool_mutex);
5750 return busy;
5751}
5752
5753/**
5754 * thaw_workqueues - thaw workqueues
5755 *
5756 * Thaw workqueues. Normal queueing is restored and all collected
5757 * frozen works are transferred to their respective pool worklists.
5758 *
5759 * CONTEXT:
5760 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5761 */
5762void thaw_workqueues(void)
5763{
5764 struct workqueue_struct *wq;
5765 struct pool_workqueue *pwq;
5766
5767 mutex_lock(&wq_pool_mutex);
5768
5769 if (!workqueue_freezing)
5770 goto out_unlock;
5771
5772 workqueue_freezing = false;
5773
5774 /* restore max_active and repopulate worklist */
5775 list_for_each_entry(wq, &workqueues, list) {
5776 mutex_lock(&wq->mutex);
5777 for_each_pwq(pwq, wq)
5778 pwq_adjust_max_active(pwq);
5779 mutex_unlock(&wq->mutex);
5780 }
5781
5782out_unlock:
5783 mutex_unlock(&wq_pool_mutex);
5784}
5785#endif /* CONFIG_FREEZER */
5786
99c621ef 5787static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
042f7df1
LJ
5788{
5789 LIST_HEAD(ctxs);
5790 int ret = 0;
5791 struct workqueue_struct *wq;
5792 struct apply_wqattrs_ctx *ctx, *n;
5793
5794 lockdep_assert_held(&wq_pool_mutex);
5795
5796 list_for_each_entry(wq, &workqueues, list) {
5797 if (!(wq->flags & WQ_UNBOUND))
5798 continue;
ca10d851 5799
042f7df1 5800 /* creating multiple pwqs breaks ordering guarantee */
ca10d851
WL
5801 if (!list_empty(&wq->pwqs)) {
5802 if (wq->flags & __WQ_ORDERED_EXPLICIT)
5803 continue;
5804 wq->flags &= ~__WQ_ORDERED;
5805 }
042f7df1 5806
99c621ef 5807 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
84193c07
TH
5808 if (IS_ERR(ctx)) {
5809 ret = PTR_ERR(ctx);
042f7df1
LJ
5810 break;
5811 }
5812
5813 list_add_tail(&ctx->list, &ctxs);
5814 }
5815
5816 list_for_each_entry_safe(ctx, n, &ctxs, list) {
5817 if (!ret)
5818 apply_wqattrs_commit(ctx);
5819 apply_wqattrs_cleanup(ctx);
5820 }
5821
99c621ef
LJ
5822 if (!ret) {
5823 mutex_lock(&wq_pool_attach_mutex);
5824 cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
5825 mutex_unlock(&wq_pool_attach_mutex);
5826 }
042f7df1
LJ
5827 return ret;
5828}
5829
5830/**
5831 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5832 * @cpumask: the cpumask to set
5833 *
5834 * The low-level workqueues cpumask is a global cpumask that limits
5835 * the affinity of all unbound workqueues. This function check the @cpumask
5836 * and apply it to all unbound workqueues and updates all pwqs of them.
5837 *
67dc8325 5838 * Return: 0 - Success
042f7df1
LJ
5839 * -EINVAL - Invalid @cpumask
5840 * -ENOMEM - Failed to allocate memory for attrs or pwqs.
5841 */
5842int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5843{
5844 int ret = -EINVAL;
042f7df1 5845
c98a9805
TS
5846 /*
5847 * Not excluding isolated cpus on purpose.
5848 * If the user wishes to include them, we allow that.
5849 */
042f7df1
LJ
5850 cpumask_and(cpumask, cpumask, cpu_possible_mask);
5851 if (!cpumask_empty(cpumask)) {
a0111cf6 5852 apply_wqattrs_lock();
d25302e4
MD
5853 if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
5854 ret = 0;
5855 goto out_unlock;
5856 }
5857
99c621ef 5858 ret = workqueue_apply_unbound_cpumask(cpumask);
042f7df1 5859
d25302e4 5860out_unlock:
a0111cf6 5861 apply_wqattrs_unlock();
042f7df1 5862 }
042f7df1 5863
042f7df1
LJ
5864 return ret;
5865}
5866
63c5484e
TH
5867static int parse_affn_scope(const char *val)
5868{
5869 int i;
5870
5871 for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) {
5872 if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i])))
5873 return i;
5874 }
5875 return -EINVAL;
5876}
5877
5878static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
5879{
523a301e
TH
5880 struct workqueue_struct *wq;
5881 int affn, cpu;
63c5484e
TH
5882
5883 affn = parse_affn_scope(val);
5884 if (affn < 0)
5885 return affn;
523a301e
TH
5886 if (affn == WQ_AFFN_DFL)
5887 return -EINVAL;
5888
5889 cpus_read_lock();
5890 mutex_lock(&wq_pool_mutex);
63c5484e
TH
5891
5892 wq_affn_dfl = affn;
523a301e
TH
5893
5894 list_for_each_entry(wq, &workqueues, list) {
5895 for_each_online_cpu(cpu) {
5896 wq_update_pod(wq, cpu, cpu, true);
5897 }
5898 }
5899
5900 mutex_unlock(&wq_pool_mutex);
5901 cpus_read_unlock();
5902
63c5484e
TH
5903 return 0;
5904}
5905
5906static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp)
5907{
5908 return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]);
5909}
5910
5911static const struct kernel_param_ops wq_affn_dfl_ops = {
5912 .set = wq_affn_dfl_set,
5913 .get = wq_affn_dfl_get,
5914};
5915
5916module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644);
5917
6ba94429
FW
5918#ifdef CONFIG_SYSFS
5919/*
5920 * Workqueues with WQ_SYSFS flag set is visible to userland via
5921 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
5922 * following attributes.
5923 *
63c5484e
TH
5924 * per_cpu RO bool : whether the workqueue is per-cpu or unbound
5925 * max_active RW int : maximum number of in-flight work items
6ba94429
FW
5926 *
5927 * Unbound workqueues have the following extra attributes.
5928 *
63c5484e
TH
5929 * nice RW int : nice value of the workers
5930 * cpumask RW mask : bitmask of allowed CPUs for the workers
5931 * affinity_scope RW str : worker CPU affinity scope (cache, numa, none)
8639eceb 5932 * affinity_strict RW bool : worker CPU affinity is strict
6ba94429
FW
5933 */
5934struct wq_device {
5935 struct workqueue_struct *wq;
5936 struct device dev;
5937};
5938
5939static struct workqueue_struct *dev_to_wq(struct device *dev)
5940{
5941 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5942
5943 return wq_dev->wq;
5944}
5945
5946static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5947 char *buf)
5948{
5949 struct workqueue_struct *wq = dev_to_wq(dev);
5950
5951 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5952}
5953static DEVICE_ATTR_RO(per_cpu);
5954
5955static ssize_t max_active_show(struct device *dev,
5956 struct device_attribute *attr, char *buf)
5957{
5958 struct workqueue_struct *wq = dev_to_wq(dev);
5959
5960 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5961}
5962
5963static ssize_t max_active_store(struct device *dev,
5964 struct device_attribute *attr, const char *buf,
5965 size_t count)
5966{
5967 struct workqueue_struct *wq = dev_to_wq(dev);
5968 int val;
5969
5970 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5971 return -EINVAL;
5972
5973 workqueue_set_max_active(wq, val);
5974 return count;
5975}
5976static DEVICE_ATTR_RW(max_active);
5977
5978static struct attribute *wq_sysfs_attrs[] = {
5979 &dev_attr_per_cpu.attr,
5980 &dev_attr_max_active.attr,
5981 NULL,
5982};
5983ATTRIBUTE_GROUPS(wq_sysfs);
5984
6ba94429
FW
5985static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5986 char *buf)
5987{
5988 struct workqueue_struct *wq = dev_to_wq(dev);
5989 int written;
5990
5991 mutex_lock(&wq->mutex);
5992 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5993 mutex_unlock(&wq->mutex);
5994
5995 return written;
5996}
5997
5998/* prepare workqueue_attrs for sysfs store operations */
5999static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
6000{
6001 struct workqueue_attrs *attrs;
6002
899a94fe
LJ
6003 lockdep_assert_held(&wq_pool_mutex);
6004
be69d00d 6005 attrs = alloc_workqueue_attrs();
6ba94429
FW
6006 if (!attrs)
6007 return NULL;
6008
6ba94429 6009 copy_workqueue_attrs(attrs, wq->unbound_attrs);
6ba94429
FW
6010 return attrs;
6011}
6012
6013static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
6014 const char *buf, size_t count)
6015{
6016 struct workqueue_struct *wq = dev_to_wq(dev);
6017 struct workqueue_attrs *attrs;
d4d3e257
LJ
6018 int ret = -ENOMEM;
6019
6020 apply_wqattrs_lock();
6ba94429
FW
6021
6022 attrs = wq_sysfs_prep_attrs(wq);
6023 if (!attrs)
d4d3e257 6024 goto out_unlock;
6ba94429
FW
6025
6026 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
6027 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
d4d3e257 6028 ret = apply_workqueue_attrs_locked(wq, attrs);
6ba94429
FW
6029 else
6030 ret = -EINVAL;
6031
d4d3e257
LJ
6032out_unlock:
6033 apply_wqattrs_unlock();
6ba94429
FW
6034 free_workqueue_attrs(attrs);
6035 return ret ?: count;
6036}
6037
6038static ssize_t wq_cpumask_show(struct device *dev,
6039 struct device_attribute *attr, char *buf)
6040{
6041 struct workqueue_struct *wq = dev_to_wq(dev);
6042 int written;
6043
6044 mutex_lock(&wq->mutex);
6045 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
6046 cpumask_pr_args(wq->unbound_attrs->cpumask));
6047 mutex_unlock(&wq->mutex);
6048 return written;
6049}
6050
6051static ssize_t wq_cpumask_store(struct device *dev,
6052 struct device_attribute *attr,
6053 const char *buf, size_t count)
6054{
6055 struct workqueue_struct *wq = dev_to_wq(dev);
6056 struct workqueue_attrs *attrs;
d4d3e257
LJ
6057 int ret = -ENOMEM;
6058
6059 apply_wqattrs_lock();
6ba94429
FW
6060
6061 attrs = wq_sysfs_prep_attrs(wq);
6062 if (!attrs)
d4d3e257 6063 goto out_unlock;
6ba94429
FW
6064
6065 ret = cpumask_parse(buf, attrs->cpumask);
6066 if (!ret)
d4d3e257 6067 ret = apply_workqueue_attrs_locked(wq, attrs);
6ba94429 6068
d4d3e257
LJ
6069out_unlock:
6070 apply_wqattrs_unlock();
6ba94429
FW
6071 free_workqueue_attrs(attrs);
6072 return ret ?: count;
6073}
6074
63c5484e
TH
6075static ssize_t wq_affn_scope_show(struct device *dev,
6076 struct device_attribute *attr, char *buf)
6077{
6078 struct workqueue_struct *wq = dev_to_wq(dev);
6079 int written;
6080
6081 mutex_lock(&wq->mutex);
523a301e
TH
6082 if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL)
6083 written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
6084 wq_affn_names[WQ_AFFN_DFL],
6085 wq_affn_names[wq_affn_dfl]);
6086 else
6087 written = scnprintf(buf, PAGE_SIZE, "%s\n",
6088 wq_affn_names[wq->unbound_attrs->affn_scope]);
63c5484e
TH
6089 mutex_unlock(&wq->mutex);
6090
6091 return written;
6092}
6093
6094static ssize_t wq_affn_scope_store(struct device *dev,
6095 struct device_attribute *attr,
6096 const char *buf, size_t count)
6097{
6098 struct workqueue_struct *wq = dev_to_wq(dev);
6099 struct workqueue_attrs *attrs;
6100 int affn, ret = -ENOMEM;
6101
6102 affn = parse_affn_scope(buf);
6103 if (affn < 0)
6104 return affn;
6105
6106 apply_wqattrs_lock();
6107 attrs = wq_sysfs_prep_attrs(wq);
6108 if (attrs) {
6109 attrs->affn_scope = affn;
6110 ret = apply_workqueue_attrs_locked(wq, attrs);
6111 }
6112 apply_wqattrs_unlock();
6113 free_workqueue_attrs(attrs);
6114 return ret ?: count;
6115}
6116
8639eceb
TH
6117static ssize_t wq_affinity_strict_show(struct device *dev,
6118 struct device_attribute *attr, char *buf)
6119{
6120 struct workqueue_struct *wq = dev_to_wq(dev);
6121
6122 return scnprintf(buf, PAGE_SIZE, "%d\n",
6123 wq->unbound_attrs->affn_strict);
6124}
6125
6126static ssize_t wq_affinity_strict_store(struct device *dev,
6127 struct device_attribute *attr,
6128 const char *buf, size_t count)
6129{
6130 struct workqueue_struct *wq = dev_to_wq(dev);
6131 struct workqueue_attrs *attrs;
6132 int v, ret = -ENOMEM;
6133
6134 if (sscanf(buf, "%d", &v) != 1)
6135 return -EINVAL;
6136
6137 apply_wqattrs_lock();
6138 attrs = wq_sysfs_prep_attrs(wq);
6139 if (attrs) {
6140 attrs->affn_strict = (bool)v;
6141 ret = apply_workqueue_attrs_locked(wq, attrs);
6142 }
6143 apply_wqattrs_unlock();
6144 free_workqueue_attrs(attrs);
6145 return ret ?: count;
6146}
6147
6ba94429 6148static struct device_attribute wq_sysfs_unbound_attrs[] = {
6ba94429
FW
6149 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
6150 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
63c5484e 6151 __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store),
8639eceb 6152 __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store),
6ba94429
FW
6153 __ATTR_NULL,
6154};
8ccad40d 6155
6ba94429
FW
6156static struct bus_type wq_subsys = {
6157 .name = "workqueue",
6158 .dev_groups = wq_sysfs_groups,
2d3854a3
RR
6159};
6160
b05a7928
FW
6161static ssize_t wq_unbound_cpumask_show(struct device *dev,
6162 struct device_attribute *attr, char *buf)
6163{
6164 int written;
6165
042f7df1 6166 mutex_lock(&wq_pool_mutex);
b05a7928
FW
6167 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
6168 cpumask_pr_args(wq_unbound_cpumask));
042f7df1 6169 mutex_unlock(&wq_pool_mutex);
b05a7928
FW
6170
6171 return written;
6172}
6173
042f7df1
LJ
6174static ssize_t wq_unbound_cpumask_store(struct device *dev,
6175 struct device_attribute *attr, const char *buf, size_t count)
6176{
6177 cpumask_var_t cpumask;
6178 int ret;
6179
6180 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
6181 return -ENOMEM;
6182
6183 ret = cpumask_parse(buf, cpumask);
6184 if (!ret)
6185 ret = workqueue_set_unbound_cpumask(cpumask);
6186
6187 free_cpumask_var(cpumask);
6188 return ret ? ret : count;
6189}
6190
b05a7928 6191static struct device_attribute wq_sysfs_cpumask_attr =
042f7df1
LJ
6192 __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
6193 wq_unbound_cpumask_store);
b05a7928 6194
6ba94429 6195static int __init wq_sysfs_init(void)
2d3854a3 6196{
686f6697 6197 struct device *dev_root;
b05a7928
FW
6198 int err;
6199
6200 err = subsys_virtual_register(&wq_subsys, NULL);
6201 if (err)
6202 return err;
6203
686f6697
GKH
6204 dev_root = bus_get_dev_root(&wq_subsys);
6205 if (dev_root) {
6206 err = device_create_file(dev_root, &wq_sysfs_cpumask_attr);
6207 put_device(dev_root);
6208 }
6209 return err;
2d3854a3 6210}
6ba94429 6211core_initcall(wq_sysfs_init);
2d3854a3 6212
6ba94429 6213static void wq_device_release(struct device *dev)
2d3854a3 6214{
6ba94429 6215 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
6b44003e 6216
6ba94429 6217 kfree(wq_dev);
2d3854a3 6218}
a0a1a5fd
TH
6219
6220/**
6ba94429
FW
6221 * workqueue_sysfs_register - make a workqueue visible in sysfs
6222 * @wq: the workqueue to register
a0a1a5fd 6223 *
6ba94429
FW
6224 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
6225 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
6226 * which is the preferred method.
a0a1a5fd 6227 *
6ba94429
FW
6228 * Workqueue user should use this function directly iff it wants to apply
6229 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
6230 * apply_workqueue_attrs() may race against userland updating the
6231 * attributes.
6232 *
6233 * Return: 0 on success, -errno on failure.
a0a1a5fd 6234 */
6ba94429 6235int workqueue_sysfs_register(struct workqueue_struct *wq)
a0a1a5fd 6236{
6ba94429
FW
6237 struct wq_device *wq_dev;
6238 int ret;
a0a1a5fd 6239
6ba94429 6240 /*
402dd89d 6241 * Adjusting max_active or creating new pwqs by applying
6ba94429
FW
6242 * attributes breaks ordering guarantee. Disallow exposing ordered
6243 * workqueues.
6244 */
0a94efb5 6245 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
6ba94429 6246 return -EINVAL;
a0a1a5fd 6247
6ba94429
FW
6248 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
6249 if (!wq_dev)
6250 return -ENOMEM;
5bcab335 6251
6ba94429
FW
6252 wq_dev->wq = wq;
6253 wq_dev->dev.bus = &wq_subsys;
6ba94429 6254 wq_dev->dev.release = wq_device_release;
23217b44 6255 dev_set_name(&wq_dev->dev, "%s", wq->name);
a0a1a5fd 6256
6ba94429
FW
6257 /*
6258 * unbound_attrs are created separately. Suppress uevent until
6259 * everything is ready.
6260 */
6261 dev_set_uevent_suppress(&wq_dev->dev, true);
a0a1a5fd 6262
6ba94429
FW
6263 ret = device_register(&wq_dev->dev);
6264 if (ret) {
537f4146 6265 put_device(&wq_dev->dev);
6ba94429
FW
6266 wq->wq_dev = NULL;
6267 return ret;
6268 }
a0a1a5fd 6269
6ba94429
FW
6270 if (wq->flags & WQ_UNBOUND) {
6271 struct device_attribute *attr;
a0a1a5fd 6272
6ba94429
FW
6273 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
6274 ret = device_create_file(&wq_dev->dev, attr);
6275 if (ret) {
6276 device_unregister(&wq_dev->dev);
6277 wq->wq_dev = NULL;
6278 return ret;
a0a1a5fd
TH
6279 }
6280 }
6281 }
6ba94429
FW
6282
6283 dev_set_uevent_suppress(&wq_dev->dev, false);
6284 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
6285 return 0;
a0a1a5fd
TH
6286}
6287
6288/**
6ba94429
FW
6289 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
6290 * @wq: the workqueue to unregister
a0a1a5fd 6291 *
6ba94429 6292 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
a0a1a5fd 6293 */
6ba94429 6294static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
a0a1a5fd 6295{
6ba94429 6296 struct wq_device *wq_dev = wq->wq_dev;
8b03ae3c 6297
6ba94429
FW
6298 if (!wq->wq_dev)
6299 return;
a0a1a5fd 6300
6ba94429
FW
6301 wq->wq_dev = NULL;
6302 device_unregister(&wq_dev->dev);
a0a1a5fd 6303}
6ba94429
FW
6304#else /* CONFIG_SYSFS */
6305static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
6306#endif /* CONFIG_SYSFS */
a0a1a5fd 6307
82607adc
TH
6308/*
6309 * Workqueue watchdog.
6310 *
6311 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
6312 * flush dependency, a concurrency managed work item which stays RUNNING
6313 * indefinitely. Workqueue stalls can be very difficult to debug as the
6314 * usual warning mechanisms don't trigger and internal workqueue state is
6315 * largely opaque.
6316 *
6317 * Workqueue watchdog monitors all worker pools periodically and dumps
6318 * state if some pools failed to make forward progress for a while where
6319 * forward progress is defined as the first item on ->worklist changing.
6320 *
6321 * This mechanism is controlled through the kernel parameter
6322 * "workqueue.watchdog_thresh" which can be updated at runtime through the
6323 * corresponding sysfs parameter file.
6324 */
6325#ifdef CONFIG_WQ_WATCHDOG
6326
82607adc 6327static unsigned long wq_watchdog_thresh = 30;
5cd79d6a 6328static struct timer_list wq_watchdog_timer;
82607adc
TH
6329
6330static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
6331static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
6332
cd2440d6
PM
6333/*
6334 * Show workers that might prevent the processing of pending work items.
6335 * The only candidates are CPU-bound workers in the running state.
6336 * Pending work items should be handled by another idle worker
6337 * in all other situations.
6338 */
6339static void show_cpu_pool_hog(struct worker_pool *pool)
6340{
6341 struct worker *worker;
6342 unsigned long flags;
6343 int bkt;
6344
6345 raw_spin_lock_irqsave(&pool->lock, flags);
6346
6347 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6348 if (task_is_running(worker->task)) {
6349 /*
6350 * Defer printing to avoid deadlocks in console
6351 * drivers that queue work while holding locks
6352 * also taken in their write paths.
6353 */
6354 printk_deferred_enter();
6355
6356 pr_info("pool %d:\n", pool->id);
6357 sched_show_task(worker->task);
6358
6359 printk_deferred_exit();
6360 }
6361 }
6362
6363 raw_spin_unlock_irqrestore(&pool->lock, flags);
6364}
6365
6366static void show_cpu_pools_hogs(void)
6367{
6368 struct worker_pool *pool;
6369 int pi;
6370
6371 pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
6372
6373 rcu_read_lock();
6374
6375 for_each_pool(pool, pi) {
6376 if (pool->cpu_stall)
6377 show_cpu_pool_hog(pool);
6378
6379 }
6380
6381 rcu_read_unlock();
6382}
6383
82607adc
TH
6384static void wq_watchdog_reset_touched(void)
6385{
6386 int cpu;
6387
6388 wq_watchdog_touched = jiffies;
6389 for_each_possible_cpu(cpu)
6390 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
6391}
6392
5cd79d6a 6393static void wq_watchdog_timer_fn(struct timer_list *unused)
82607adc
TH
6394{
6395 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
6396 bool lockup_detected = false;
cd2440d6 6397 bool cpu_pool_stall = false;
940d71c6 6398 unsigned long now = jiffies;
82607adc
TH
6399 struct worker_pool *pool;
6400 int pi;
6401
6402 if (!thresh)
6403 return;
6404
6405 rcu_read_lock();
6406
6407 for_each_pool(pool, pi) {
6408 unsigned long pool_ts, touched, ts;
6409
cd2440d6 6410 pool->cpu_stall = false;
82607adc
TH
6411 if (list_empty(&pool->worklist))
6412 continue;
6413
940d71c6
SS
6414 /*
6415 * If a virtual machine is stopped by the host it can look to
6416 * the watchdog like a stall.
6417 */
6418 kvm_check_and_clear_guest_paused();
6419
82607adc 6420 /* get the latest of pool and touched timestamps */
89e28ce6
WQ
6421 if (pool->cpu >= 0)
6422 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
6423 else
6424 touched = READ_ONCE(wq_watchdog_touched);
82607adc 6425 pool_ts = READ_ONCE(pool->watchdog_ts);
82607adc
TH
6426
6427 if (time_after(pool_ts, touched))
6428 ts = pool_ts;
6429 else
6430 ts = touched;
6431
82607adc 6432 /* did we stall? */
940d71c6 6433 if (time_after(now, ts + thresh)) {
82607adc 6434 lockup_detected = true;
cd2440d6
PM
6435 if (pool->cpu >= 0) {
6436 pool->cpu_stall = true;
6437 cpu_pool_stall = true;
6438 }
82607adc
TH
6439 pr_emerg("BUG: workqueue lockup - pool");
6440 pr_cont_pool_info(pool);
6441 pr_cont(" stuck for %us!\n",
940d71c6 6442 jiffies_to_msecs(now - pool_ts) / 1000);
82607adc 6443 }
cd2440d6
PM
6444
6445
82607adc
TH
6446 }
6447
6448 rcu_read_unlock();
6449
6450 if (lockup_detected)
55df0933 6451 show_all_workqueues();
82607adc 6452
cd2440d6
PM
6453 if (cpu_pool_stall)
6454 show_cpu_pools_hogs();
6455
82607adc
TH
6456 wq_watchdog_reset_touched();
6457 mod_timer(&wq_watchdog_timer, jiffies + thresh);
6458}
6459
cb9d7fd5 6460notrace void wq_watchdog_touch(int cpu)
82607adc
TH
6461{
6462 if (cpu >= 0)
6463 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
89e28ce6
WQ
6464
6465 wq_watchdog_touched = jiffies;
82607adc
TH
6466}
6467
6468static void wq_watchdog_set_thresh(unsigned long thresh)
6469{
6470 wq_watchdog_thresh = 0;
6471 del_timer_sync(&wq_watchdog_timer);
6472
6473 if (thresh) {
6474 wq_watchdog_thresh = thresh;
6475 wq_watchdog_reset_touched();
6476 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
6477 }
6478}
6479
6480static int wq_watchdog_param_set_thresh(const char *val,
6481 const struct kernel_param *kp)
6482{
6483 unsigned long thresh;
6484 int ret;
6485
6486 ret = kstrtoul(val, 0, &thresh);
6487 if (ret)
6488 return ret;
6489
6490 if (system_wq)
6491 wq_watchdog_set_thresh(thresh);
6492 else
6493 wq_watchdog_thresh = thresh;
6494
6495 return 0;
6496}
6497
6498static const struct kernel_param_ops wq_watchdog_thresh_ops = {
6499 .set = wq_watchdog_param_set_thresh,
6500 .get = param_get_ulong,
6501};
6502
6503module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
6504 0644);
6505
6506static void wq_watchdog_init(void)
6507{
5cd79d6a 6508 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
82607adc
TH
6509 wq_watchdog_set_thresh(wq_watchdog_thresh);
6510}
6511
6512#else /* CONFIG_WQ_WATCHDOG */
6513
6514static inline void wq_watchdog_init(void) { }
6515
6516#endif /* CONFIG_WQ_WATCHDOG */
6517
3347fa09
TH
6518/**
6519 * workqueue_init_early - early init for workqueue subsystem
6520 *
2930155b
TH
6521 * This is the first step of three-staged workqueue subsystem initialization and
6522 * invoked as soon as the bare basics - memory allocation, cpumasks and idr are
6523 * up. It sets up all the data structures and system workqueues and allows early
6524 * boot code to create workqueues and queue/cancel work items. Actual work item
6525 * execution starts only after kthreads can be created and scheduled right
6526 * before early initcalls.
3347fa09 6527 */
2333e829 6528void __init workqueue_init_early(void)
1da177e4 6529{
84193c07 6530 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
7a4e344c
TH
6531 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
6532 int i, cpu;
c34056a3 6533
10cdb157 6534 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
e904e6c2 6535
b05a7928 6536 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
04d4e665
FW
6537 cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
6538 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
b05a7928 6539
ace3c549 6540 if (!cpumask_empty(&wq_cmdline_cpumask))
6541 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, &wq_cmdline_cpumask);
6542
e904e6c2
TH
6543 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
6544
2930155b
TH
6545 wq_update_pod_attrs_buf = alloc_workqueue_attrs();
6546 BUG_ON(!wq_update_pod_attrs_buf);
6547
84193c07
TH
6548 /* initialize WQ_AFFN_SYSTEM pods */
6549 pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
6550 pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL);
6551 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
6552 BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod);
6553
6554 BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
6555
84193c07
TH
6556 pt->nr_pods = 1;
6557 cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
6558 pt->pod_node[0] = NUMA_NO_NODE;
6559 pt->cpu_pod[0] = 0;
6560
706026c2 6561 /* initialize CPU pools */
29c91e99 6562 for_each_possible_cpu(cpu) {
4ce62e9e 6563 struct worker_pool *pool;
8b03ae3c 6564
7a4e344c 6565 i = 0;
f02ae73a 6566 for_each_cpu_worker_pool(pool, cpu) {
7a4e344c 6567 BUG_ON(init_worker_pool(pool));
ec22ca5e 6568 pool->cpu = cpu;
29c91e99 6569 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
9546b29e 6570 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu));
7a4e344c 6571 pool->attrs->nice = std_nice[i++];
8639eceb 6572 pool->attrs->affn_strict = true;
f3f90ad4 6573 pool->node = cpu_to_node(cpu);
7a4e344c 6574
9daf9e67 6575 /* alloc pool ID */
68e13a67 6576 mutex_lock(&wq_pool_mutex);
9daf9e67 6577 BUG_ON(worker_pool_assign_id(pool));
68e13a67 6578 mutex_unlock(&wq_pool_mutex);
4ce62e9e 6579 }
8b03ae3c
TH
6580 }
6581
8a2b7538 6582 /* create default unbound and ordered wq attrs */
29c91e99
TH
6583 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
6584 struct workqueue_attrs *attrs;
6585
be69d00d 6586 BUG_ON(!(attrs = alloc_workqueue_attrs()));
29c91e99 6587 attrs->nice = std_nice[i];
29c91e99 6588 unbound_std_wq_attrs[i] = attrs;
8a2b7538
TH
6589
6590 /*
6591 * An ordered wq should have only one pwq as ordering is
6592 * guaranteed by max_active which is enforced by pwqs.
8a2b7538 6593 */
be69d00d 6594 BUG_ON(!(attrs = alloc_workqueue_attrs()));
8a2b7538 6595 attrs->nice = std_nice[i];
af73f5c9 6596 attrs->ordered = true;
8a2b7538 6597 ordered_wq_attrs[i] = attrs;
29c91e99
TH
6598 }
6599
d320c038 6600 system_wq = alloc_workqueue("events", 0, 0);
1aabe902 6601 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
d320c038 6602 system_long_wq = alloc_workqueue("events_long", 0, 0);
f3421797 6603 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
636b927e 6604 WQ_MAX_ACTIVE);
24d51add
TH
6605 system_freezable_wq = alloc_workqueue("events_freezable",
6606 WQ_FREEZABLE, 0);
0668106c
VK
6607 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
6608 WQ_POWER_EFFICIENT, 0);
6609 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
6610 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
6611 0);
1aabe902 6612 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
0668106c
VK
6613 !system_unbound_wq || !system_freezable_wq ||
6614 !system_power_efficient_wq ||
6615 !system_freezable_power_efficient_wq);
3347fa09
TH
6616}
6617
aa6fde93
TH
6618static void __init wq_cpu_intensive_thresh_init(void)
6619{
6620 unsigned long thresh;
6621 unsigned long bogo;
6622
dd64c873
Z
6623 pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
6624 BUG_ON(IS_ERR(pwq_release_worker));
6625
aa6fde93
TH
6626 /* if the user set it to a specific value, keep it */
6627 if (wq_cpu_intensive_thresh_us != ULONG_MAX)
6628 return;
6629
6630 /*
6631 * The default of 10ms is derived from the fact that most modern (as of
6632 * 2023) processors can do a lot in 10ms and that it's just below what
6633 * most consider human-perceivable. However, the kernel also runs on a
6634 * lot slower CPUs including microcontrollers where the threshold is way
6635 * too low.
6636 *
6637 * Let's scale up the threshold upto 1 second if BogoMips is below 4000.
6638 * This is by no means accurate but it doesn't have to be. The mechanism
6639 * is still useful even when the threshold is fully scaled up. Also, as
6640 * the reports would usually be applicable to everyone, some machines
6641 * operating on longer thresholds won't significantly diminish their
6642 * usefulness.
6643 */
6644 thresh = 10 * USEC_PER_MSEC;
6645
6646 /* see init/calibrate.c for lpj -> BogoMIPS calculation */
6647 bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1);
6648 if (bogo < 4000)
6649 thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC);
6650
6651 pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n",
6652 loops_per_jiffy, bogo, thresh);
6653
6654 wq_cpu_intensive_thresh_us = thresh;
6655}
6656
3347fa09
TH
6657/**
6658 * workqueue_init - bring workqueue subsystem fully online
6659 *
2930155b
TH
6660 * This is the second step of three-staged workqueue subsystem initialization
6661 * and invoked as soon as kthreads can be created and scheduled. Workqueues have
6662 * been created and work items queued on them, but there are no kworkers
6663 * executing the work items yet. Populate the worker pools with the initial
6664 * workers and enable future kworker creations.
3347fa09 6665 */
2333e829 6666void __init workqueue_init(void)
3347fa09 6667{
2186d9f9 6668 struct workqueue_struct *wq;
3347fa09
TH
6669 struct worker_pool *pool;
6670 int cpu, bkt;
6671
aa6fde93
TH
6672 wq_cpu_intensive_thresh_init();
6673
2186d9f9
TH
6674 mutex_lock(&wq_pool_mutex);
6675
2930155b
TH
6676 /*
6677 * Per-cpu pools created earlier could be missing node hint. Fix them
6678 * up. Also, create a rescuer for workqueues that requested it.
6679 */
2186d9f9
TH
6680 for_each_possible_cpu(cpu) {
6681 for_each_cpu_worker_pool(pool, cpu) {
6682 pool->node = cpu_to_node(cpu);
6683 }
6684 }
6685
40c17f75 6686 list_for_each_entry(wq, &workqueues, list) {
40c17f75
TH
6687 WARN(init_rescuer(wq),
6688 "workqueue: failed to create early rescuer for %s",
6689 wq->name);
6690 }
2186d9f9
TH
6691
6692 mutex_unlock(&wq_pool_mutex);
6693
3347fa09
TH
6694 /* create the initial workers */
6695 for_each_online_cpu(cpu) {
6696 for_each_cpu_worker_pool(pool, cpu) {
6697 pool->flags &= ~POOL_DISASSOCIATED;
6698 BUG_ON(!create_worker(pool));
6699 }
6700 }
6701
6702 hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
6703 BUG_ON(!create_worker(pool));
6704
6705 wq_online = true;
82607adc 6706 wq_watchdog_init();
1da177e4 6707}
c4f135d6 6708
025e1684
TH
6709/*
6710 * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to
6711 * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique
6712 * and consecutive pod ID. The rest of @pt is initialized accordingly.
6713 */
6714static void __init init_pod_type(struct wq_pod_type *pt,
6715 bool (*cpus_share_pod)(int, int))
6716{
6717 int cur, pre, cpu, pod;
6718
6719 pt->nr_pods = 0;
6720
6721 /* init @pt->cpu_pod[] according to @cpus_share_pod() */
6722 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
6723 BUG_ON(!pt->cpu_pod);
6724
6725 for_each_possible_cpu(cur) {
6726 for_each_possible_cpu(pre) {
6727 if (pre >= cur) {
6728 pt->cpu_pod[cur] = pt->nr_pods++;
6729 break;
6730 }
6731 if (cpus_share_pod(cur, pre)) {
6732 pt->cpu_pod[cur] = pt->cpu_pod[pre];
6733 break;
6734 }
6735 }
6736 }
6737
6738 /* init the rest to match @pt->cpu_pod[] */
6739 pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
6740 pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL);
6741 BUG_ON(!pt->pod_cpus || !pt->pod_node);
6742
6743 for (pod = 0; pod < pt->nr_pods; pod++)
6744 BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL));
6745
6746 for_each_possible_cpu(cpu) {
6747 cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
6748 pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
6749 }
6750}
6751
63c5484e
TH
6752static bool __init cpus_dont_share(int cpu0, int cpu1)
6753{
6754 return false;
6755}
6756
6757static bool __init cpus_share_smt(int cpu0, int cpu1)
6758{
6759#ifdef CONFIG_SCHED_SMT
6760 return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
6761#else
6762 return false;
6763#endif
6764}
6765
025e1684
TH
6766static bool __init cpus_share_numa(int cpu0, int cpu1)
6767{
6768 return cpu_to_node(cpu0) == cpu_to_node(cpu1);
6769}
6770
2930155b
TH
6771/**
6772 * workqueue_init_topology - initialize CPU pods for unbound workqueues
6773 *
6774 * This is the third step of there-staged workqueue subsystem initialization and
6775 * invoked after SMP and topology information are fully initialized. It
6776 * initializes the unbound CPU pods accordingly.
6777 */
6778void __init workqueue_init_topology(void)
a86feae6 6779{
2930155b 6780 struct workqueue_struct *wq;
025e1684 6781 int cpu;
a86feae6 6782
63c5484e
TH
6783 init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
6784 init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
6785 init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
025e1684 6786 init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
a86feae6 6787
2930155b 6788 mutex_lock(&wq_pool_mutex);
a86feae6 6789
2930155b
TH
6790 /*
6791 * Workqueues allocated earlier would have all CPUs sharing the default
6792 * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU
6793 * combinations to apply per-pod sharing.
6794 */
6795 list_for_each_entry(wq, &workqueues, list) {
6796 for_each_online_cpu(cpu) {
6797 wq_update_pod(wq, cpu, cpu, true);
6798 }
6799 }
6800
6801 mutex_unlock(&wq_pool_mutex);
a86feae6
TH
6802}
6803
20bdedaf
TH
6804void __warn_flushing_systemwide_wq(void)
6805{
6806 pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");
6807 dump_stack();
6808}
c4f135d6 6809EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
ace3c549 6810
6811static int __init workqueue_unbound_cpus_setup(char *str)
6812{
6813 if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
6814 cpumask_clear(&wq_cmdline_cpumask);
6815 pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
6816 }
6817
6818 return 1;
6819}
6820__setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);