Merge tag 'exfat-for-6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/linkin...
[linux-2.6-block.git] / kernel / workqueue.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
c54fce6e 3 * kernel/workqueue.c - generic async execution with shared worker pool
1da177e4 4 *
c54fce6e 5 * Copyright (C) 2002 Ingo Molnar
1da177e4 6 *
c54fce6e
TH
7 * Derived from the taskqueue/keventd code by:
8 * David Woodhouse <dwmw2@infradead.org>
9 * Andrew Morton
10 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
11 * Theodore Ts'o <tytso@mit.edu>
1da177e4 12 *
c54fce6e 13 * Made to use alloc_percpu by Christoph Lameter.
1da177e4 14 *
c54fce6e
TH
15 * Copyright (C) 2010 SUSE Linux Products GmbH
16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
89ada679 17 *
c54fce6e
TH
18 * This is the generic async execution mechanism. Work items as are
19 * executed in process context. The worker pool is shared and
b11895c4
L
20 * automatically managed. There are two worker pools for each CPU (one for
21 * normal work items and the other for high priority ones) and some extra
22 * pools for workqueues which are not bound to any specific CPU - the
23 * number of these backing pools is dynamic.
c54fce6e 24 *
9a261491 25 * Please read Documentation/core-api/workqueue.rst for details.
1da177e4
LT
26 */
27
9984de1a 28#include <linux/export.h>
1da177e4
LT
29#include <linux/kernel.h>
30#include <linux/sched.h>
31#include <linux/init.h>
32#include <linux/signal.h>
33#include <linux/completion.h>
34#include <linux/workqueue.h>
35#include <linux/slab.h>
36#include <linux/cpu.h>
37#include <linux/notifier.h>
38#include <linux/kthread.h>
1fa44eca 39#include <linux/hardirq.h>
46934023 40#include <linux/mempolicy.h>
341a5958 41#include <linux/freezer.h>
d5abe669 42#include <linux/debug_locks.h>
4e6045f1 43#include <linux/lockdep.h>
c34056a3 44#include <linux/idr.h>
29c91e99 45#include <linux/jhash.h>
42f8570f 46#include <linux/hashtable.h>
76af4d93 47#include <linux/rculist.h>
bce90380 48#include <linux/nodemask.h>
4c16bd32 49#include <linux/moduleparam.h>
3d1cb205 50#include <linux/uaccess.h>
c98a9805 51#include <linux/sched/isolation.h>
cd2440d6 52#include <linux/sched/debug.h>
62635ea8 53#include <linux/nmi.h>
940d71c6 54#include <linux/kvm_para.h>
aa6fde93 55#include <linux/delay.h>
e22bee78 56
ea138446 57#include "workqueue_internal.h"
1da177e4 58
c8e55f36 59enum {
24647570
TH
60 /*
61 * worker_pool flags
bc2ae0f5 62 *
24647570 63 * A bound pool is either associated or disassociated with its CPU.
bc2ae0f5
TH
64 * While associated (!DISASSOCIATED), all workers are bound to the
65 * CPU and none has %WORKER_UNBOUND set and concurrency management
66 * is in effect.
67 *
68 * While DISASSOCIATED, the cpu may be offline and all workers have
69 * %WORKER_UNBOUND set and concurrency management disabled, and may
24647570 70 * be executing on any CPU. The pool behaves as an unbound one.
bc2ae0f5 71 *
bc3a1afc 72 * Note that DISASSOCIATED should be flipped only while holding
1258fae7 73 * wq_pool_attach_mutex to avoid changing binding state while
4736cbf7 74 * worker_attach_to_pool() is in progress.
bc2ae0f5 75 */
692b4825 76 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
24647570 77 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
db7bccf4 78
c8e55f36 79 /* worker flags */
c8e55f36
TH
80 WORKER_DIE = 1 << 1, /* die die die */
81 WORKER_IDLE = 1 << 2, /* is idle */
e22bee78 82 WORKER_PREP = 1 << 3, /* preparing to run works */
fb0e7beb 83 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
f3421797 84 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
a9ab775b 85 WORKER_REBOUND = 1 << 8, /* worker was rebound */
e22bee78 86
a9ab775b
TH
87 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE |
88 WORKER_UNBOUND | WORKER_REBOUND,
db7bccf4 89
e34cdddb 90 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */
4ce62e9e 91
29c91e99 92 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */
c8e55f36 93 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
db7bccf4 94
e22bee78
TH
95 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
96 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
97
3233cdbd
TH
98 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
99 /* call for help after 10ms
100 (min two ticks) */
e22bee78
TH
101 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
102 CREATE_COOLDOWN = HZ, /* time to breath after fail */
e22bee78
TH
103
104 /*
105 * Rescue workers are used only on emergencies and shared by
8698a745 106 * all cpus. Give MIN_NICE.
e22bee78 107 */
8698a745
DY
108 RESCUER_NICE_LEVEL = MIN_NICE,
109 HIGHPRI_NICE_LEVEL = MIN_NICE,
ecf6881f
TH
110
111 WQ_NAME_LEN = 24,
c8e55f36 112};
1da177e4
LT
113
114/*
4690c4ab
TH
115 * Structure fields follow one of the following exclusion rules.
116 *
e41e704b
TH
117 * I: Modifiable by initialization/destruction paths and read-only for
118 * everyone else.
4690c4ab 119 *
e22bee78
TH
120 * P: Preemption protected. Disabling preemption is enough and should
121 * only be modified and accessed from the local cpu.
122 *
d565ed63 123 * L: pool->lock protected. Access with pool->lock held.
4690c4ab 124 *
bdf8b9bf
TH
125 * K: Only modified by worker while holding pool->lock. Can be safely read by
126 * self, while holding pool->lock or from IRQ context if %current is the
127 * kworker.
128 *
129 * S: Only modified by worker self.
130 *
1258fae7 131 * A: wq_pool_attach_mutex protected.
822d8405 132 *
68e13a67 133 * PL: wq_pool_mutex protected.
5bcab335 134 *
24acfb71 135 * PR: wq_pool_mutex protected for writes. RCU protected for reads.
76af4d93 136 *
5b95e1af
LJ
137 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
138 *
139 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
24acfb71 140 * RCU for reads.
5b95e1af 141 *
3c25a55d
LJ
142 * WQ: wq->mutex protected.
143 *
24acfb71 144 * WR: wq->mutex protected for writes. RCU protected for reads.
2e109a28
TH
145 *
146 * MD: wq_mayday_lock protected.
cd2440d6
PM
147 *
148 * WD: Used internally by the watchdog.
1da177e4 149 */
1da177e4 150
2eaebdb3 151/* struct worker is defined in workqueue_internal.h */
c34056a3 152
bd7bdd43 153struct worker_pool {
a9b8a985 154 raw_spinlock_t lock; /* the pool lock */
d84ff051 155 int cpu; /* I: the associated cpu */
f3f90ad4 156 int node; /* I: the associated node ID */
9daf9e67 157 int id; /* I: pool ID */
bc8b50c2 158 unsigned int flags; /* L: flags */
bd7bdd43 159
82607adc 160 unsigned long watchdog_ts; /* L: watchdog timestamp */
cd2440d6 161 bool cpu_stall; /* WD: stalled cpu bound pool */
82607adc 162
bc35f7ef
LJ
163 /*
164 * The counter is incremented in a process context on the associated CPU
165 * w/ preemption disabled, and decremented or reset in the same context
166 * but w/ pool->lock held. The readers grab pool->lock and are
167 * guaranteed to see if the counter reached zero.
168 */
169 int nr_running;
84f91c62 170
bd7bdd43 171 struct list_head worklist; /* L: list of pending works */
ea1abd61 172
5826cc8f
LJ
173 int nr_workers; /* L: total number of workers */
174 int nr_idle; /* L: currently idle workers */
bd7bdd43 175
2c1f1a91 176 struct list_head idle_list; /* L: list of idle workers */
bd7bdd43 177 struct timer_list idle_timer; /* L: worker idle timeout */
3f959aa3
VS
178 struct work_struct idle_cull_work; /* L: worker idle cleanup */
179
180 struct timer_list mayday_timer; /* L: SOS timer for workers */
bd7bdd43 181
c5aa87bb 182 /* a workers is either on busy_hash or idle_list, or the manager */
c9e7cf27
TH
183 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
184 /* L: hash of busy workers */
185
2607d7a6 186 struct worker *manager; /* L: purely informational */
92f9c5c4 187 struct list_head workers; /* A: attached workers */
e02b9312 188 struct list_head dying_workers; /* A: workers about to die */
60f5a4bc 189 struct completion *detach_completion; /* all workers detached */
e19e397a 190
7cda9aae 191 struct ida worker_ida; /* worker IDs for task name */
e19e397a 192
7a4e344c 193 struct workqueue_attrs *attrs; /* I: worker attributes */
68e13a67
LJ
194 struct hlist_node hash_node; /* PL: unbound_pool_hash node */
195 int refcnt; /* PL: refcnt for unbound pools */
7a4e344c 196
29c91e99 197 /*
24acfb71 198 * Destruction of pool is RCU protected to allow dereferences
29c91e99
TH
199 * from get_work_pool().
200 */
201 struct rcu_head rcu;
84f91c62 202};
8b03ae3c 203
725e8ec5
TH
204/*
205 * Per-pool_workqueue statistics. These can be monitored using
206 * tools/workqueue/wq_monitor.py.
207 */
208enum pool_workqueue_stats {
209 PWQ_STAT_STARTED, /* work items started execution */
210 PWQ_STAT_COMPLETED, /* work items completed execution */
8a1dd1e5 211 PWQ_STAT_CPU_TIME, /* total CPU time consumed */
616db877 212 PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */
725e8ec5 213 PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */
8639eceb 214 PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */
725e8ec5
TH
215 PWQ_STAT_MAYDAY, /* maydays to rescuer */
216 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
217
218 PWQ_NR_STATS,
219};
220
1da177e4 221/*
112202d9
TH
222 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
223 * of work_struct->data are used for flags and the remaining high bits
224 * point to the pwq; thus, pwqs need to be aligned at two's power of the
225 * number of flag bits.
1da177e4 226 */
112202d9 227struct pool_workqueue {
bd7bdd43 228 struct worker_pool *pool; /* I: the associated pool */
4690c4ab 229 struct workqueue_struct *wq; /* I: the owning workqueue */
73f53c4a
TH
230 int work_color; /* L: current color */
231 int flush_color; /* L: flushing color */
8864b4e5 232 int refcnt; /* L: reference count */
73f53c4a
TH
233 int nr_in_flight[WORK_NR_COLORS];
234 /* L: nr of in_flight works */
018f3a13
LJ
235
236 /*
237 * nr_active management and WORK_STRUCT_INACTIVE:
238 *
239 * When pwq->nr_active >= max_active, new work item is queued to
240 * pwq->inactive_works instead of pool->worklist and marked with
241 * WORK_STRUCT_INACTIVE.
242 *
243 * All work items marked with WORK_STRUCT_INACTIVE do not participate
244 * in pwq->nr_active and all work items in pwq->inactive_works are
245 * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE
246 * work items are in pwq->inactive_works. Some of them are ready to
247 * run in pool->worklist or worker->scheduled. Those work itmes are
248 * only struct wq_barrier which is used for flush_work() and should
249 * not participate in pwq->nr_active. For non-barrier work item, it
250 * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
251 */
1e19ffc6 252 int nr_active; /* L: nr of active works */
a0a1a5fd 253 int max_active; /* L: max active works */
f97a4a1a 254 struct list_head inactive_works; /* L: inactive works */
3c25a55d 255 struct list_head pwqs_node; /* WR: node on wq->pwqs */
2e109a28 256 struct list_head mayday_node; /* MD: node on wq->maydays */
8864b4e5 257
725e8ec5
TH
258 u64 stats[PWQ_NR_STATS];
259
8864b4e5 260 /*
967b494e 261 * Release of unbound pwq is punted to a kthread_worker. See put_pwq()
687a9aa5
TH
262 * and pwq_release_workfn() for details. pool_workqueue itself is also
263 * RCU protected so that the first pwq can be determined without
967b494e 264 * grabbing wq->mutex.
8864b4e5 265 */
687a9aa5 266 struct kthread_work release_work;
8864b4e5 267 struct rcu_head rcu;
e904e6c2 268} __aligned(1 << WORK_STRUCT_FLAG_BITS);
1da177e4 269
73f53c4a
TH
270/*
271 * Structure used to wait for workqueue flush.
272 */
273struct wq_flusher {
3c25a55d
LJ
274 struct list_head list; /* WQ: list of flushers */
275 int flush_color; /* WQ: flush color waiting for */
73f53c4a
TH
276 struct completion done; /* flush completion */
277};
278
226223ab
TH
279struct wq_device;
280
1da177e4 281/*
c5aa87bb
TH
282 * The externally visible workqueue. It relays the issued work items to
283 * the appropriate worker_pool through its pool_workqueues.
1da177e4
LT
284 */
285struct workqueue_struct {
3c25a55d 286 struct list_head pwqs; /* WR: all pwqs of this wq */
e2dca7ad 287 struct list_head list; /* PR: list of all workqueues */
73f53c4a 288
3c25a55d
LJ
289 struct mutex mutex; /* protects this wq */
290 int work_color; /* WQ: current work color */
291 int flush_color; /* WQ: current flush color */
112202d9 292 atomic_t nr_pwqs_to_flush; /* flush in progress */
3c25a55d
LJ
293 struct wq_flusher *first_flusher; /* WQ: first flusher */
294 struct list_head flusher_queue; /* WQ: flush waiters */
295 struct list_head flusher_overflow; /* WQ: flush overflow list */
73f53c4a 296
2e109a28 297 struct list_head maydays; /* MD: pwqs requesting rescue */
30ae2fc0 298 struct worker *rescuer; /* MD: rescue worker */
e22bee78 299
87fc741e 300 int nr_drainers; /* WQ: drain in progress */
a357fc03 301 int saved_max_active; /* WQ: saved pwq max_active */
226223ab 302
5b95e1af
LJ
303 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
304 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
6029a918 305
226223ab
TH
306#ifdef CONFIG_SYSFS
307 struct wq_device *wq_dev; /* I: for sysfs interface */
308#endif
4e6045f1 309#ifdef CONFIG_LOCKDEP
669de8bd
BVA
310 char *lock_name;
311 struct lock_class_key key;
4690c4ab 312 struct lockdep_map lockdep_map;
4e6045f1 313#endif
ecf6881f 314 char name[WQ_NAME_LEN]; /* I: workqueue name */
2728fd2f 315
e2dca7ad 316 /*
24acfb71
TG
317 * Destruction of workqueue_struct is RCU protected to allow walking
318 * the workqueues list without grabbing wq_pool_mutex.
e2dca7ad
TH
319 * This is used to dump all workqueues from sysrq.
320 */
321 struct rcu_head rcu;
322
2728fd2f
TH
323 /* hot fields used during command issue, aligned to cacheline */
324 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
636b927e 325 struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
1da177e4
LT
326};
327
e904e6c2
TH
328static struct kmem_cache *pwq_cache;
329
84193c07
TH
330/*
331 * Each pod type describes how CPUs should be grouped for unbound workqueues.
332 * See the comment above workqueue_attrs->affn_scope.
333 */
334struct wq_pod_type {
335 int nr_pods; /* number of pods */
336 cpumask_var_t *pod_cpus; /* pod -> cpus */
337 int *pod_node; /* pod -> node */
338 int *cpu_pod; /* cpu -> pod */
339};
340
341static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
523a301e 342static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
63c5484e
TH
343
344static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
523a301e 345 [WQ_AFFN_DFL] = "default",
63c5484e
TH
346 [WQ_AFFN_CPU] = "cpu",
347 [WQ_AFFN_SMT] = "smt",
348 [WQ_AFFN_CACHE] = "cache",
349 [WQ_AFFN_NUMA] = "numa",
350 [WQ_AFFN_SYSTEM] = "system",
351};
bce90380 352
616db877
TH
353/*
354 * Per-cpu work items which run for longer than the following threshold are
355 * automatically considered CPU intensive and excluded from concurrency
356 * management to prevent them from noticeably delaying other per-cpu work items.
aa6fde93
TH
357 * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter.
358 * The actual value is initialized in wq_cpu_intensive_thresh_init().
616db877 359 */
aa6fde93 360static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
616db877
TH
361module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
362
cee22a15 363/* see the comment above the definition of WQ_POWER_EFFICIENT */
552f530c 364static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
cee22a15
VK
365module_param_named(power_efficient, wq_power_efficient, bool, 0444);
366
863b710b 367static bool wq_online; /* can kworkers be created yet? */
3347fa09 368
fef59c9c
TH
369/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
370static struct workqueue_attrs *wq_update_pod_attrs_buf;
4c16bd32 371
68e13a67 372static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
1258fae7 373static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
a9b8a985 374static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
d8bb65ab
SAS
375/* wait for manager to go away */
376static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
5bcab335 377
e2dca7ad 378static LIST_HEAD(workqueues); /* PR: list of all workqueues */
68e13a67 379static bool workqueue_freezing; /* PL: have wqs started freezing? */
7d19c5ce 380
99c621ef 381/* PL&A: allowable cpus for unbound wqs and work items */
ef557180
MG
382static cpumask_var_t wq_unbound_cpumask;
383
fe28f631
WL
384/* PL: user requested unbound cpumask via sysfs */
385static cpumask_var_t wq_requested_unbound_cpumask;
386
387/* PL: isolated cpumask to be excluded from unbound cpumask */
388static cpumask_var_t wq_isolated_cpumask;
389
ace3c549 390/* for further constrain wq_unbound_cpumask by cmdline parameter*/
391static struct cpumask wq_cmdline_cpumask __initdata;
392
ef557180
MG
393/* CPU where unbound work was last round robin scheduled from this CPU */
394static DEFINE_PER_CPU(int, wq_rr_cpu_last);
b05a7928 395
f303fccb
TH
396/*
397 * Local execution of unbound work items is no longer guaranteed. The
398 * following always forces round-robin CPU selection on unbound work items
399 * to uncover usages which depend on it.
400 */
401#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
402static bool wq_debug_force_rr_cpu = true;
403#else
404static bool wq_debug_force_rr_cpu = false;
405#endif
406module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
407
7d19c5ce 408/* the per-cpu worker pools */
25528213 409static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
7d19c5ce 410
68e13a67 411static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
7d19c5ce 412
68e13a67 413/* PL: hash of all unbound pools keyed by pool->attrs */
29c91e99
TH
414static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
415
c5aa87bb 416/* I: attributes used when instantiating standard unbound pools on demand */
29c91e99
TH
417static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
418
8a2b7538
TH
419/* I: attributes used when instantiating ordered pools on demand */
420static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
421
967b494e
TH
422/*
423 * I: kthread_worker to release pwq's. pwq release needs to be bounced to a
424 * process context while holding a pool lock. Bounce to a dedicated kthread
425 * worker to avoid A-A deadlocks.
426 */
68279f9c 427static struct kthread_worker *pwq_release_worker __ro_after_init;
967b494e 428
68279f9c 429struct workqueue_struct *system_wq __ro_after_init;
ad7b1f84 430EXPORT_SYMBOL(system_wq);
68279f9c 431struct workqueue_struct *system_highpri_wq __ro_after_init;
1aabe902 432EXPORT_SYMBOL_GPL(system_highpri_wq);
68279f9c 433struct workqueue_struct *system_long_wq __ro_after_init;
d320c038 434EXPORT_SYMBOL_GPL(system_long_wq);
68279f9c 435struct workqueue_struct *system_unbound_wq __ro_after_init;
f3421797 436EXPORT_SYMBOL_GPL(system_unbound_wq);
68279f9c 437struct workqueue_struct *system_freezable_wq __ro_after_init;
24d51add 438EXPORT_SYMBOL_GPL(system_freezable_wq);
68279f9c 439struct workqueue_struct *system_power_efficient_wq __ro_after_init;
0668106c 440EXPORT_SYMBOL_GPL(system_power_efficient_wq);
68279f9c 441struct workqueue_struct *system_freezable_power_efficient_wq __ro_after_init;
0668106c 442EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
d320c038 443
7d19c5ce 444static int worker_thread(void *__worker);
6ba94429 445static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
c29eb853 446static void show_pwq(struct pool_workqueue *pwq);
55df0933 447static void show_one_worker_pool(struct worker_pool *pool);
7d19c5ce 448
97bd2347
TH
449#define CREATE_TRACE_POINTS
450#include <trace/events/workqueue.h>
451
68e13a67 452#define assert_rcu_or_pool_mutex() \
24acfb71 453 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
f78f5b90 454 !lockdep_is_held(&wq_pool_mutex), \
24acfb71 455 "RCU or wq_pool_mutex should be held")
5bcab335 456
5b95e1af 457#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
24acfb71 458 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
f78f5b90
PM
459 !lockdep_is_held(&wq->mutex) && \
460 !lockdep_is_held(&wq_pool_mutex), \
24acfb71 461 "RCU, wq->mutex or wq_pool_mutex should be held")
5b95e1af 462
f02ae73a
TH
463#define for_each_cpu_worker_pool(pool, cpu) \
464 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
465 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
7a62c2c8 466 (pool)++)
4ce62e9e 467
17116969
TH
468/**
469 * for_each_pool - iterate through all worker_pools in the system
470 * @pool: iteration cursor
611c92a0 471 * @pi: integer used for iteration
fa1b54e6 472 *
24acfb71 473 * This must be called either with wq_pool_mutex held or RCU read
68e13a67
LJ
474 * locked. If the pool needs to be used beyond the locking in effect, the
475 * caller is responsible for guaranteeing that the pool stays online.
fa1b54e6
TH
476 *
477 * The if/else clause exists only for the lockdep assertion and can be
478 * ignored.
17116969 479 */
611c92a0
TH
480#define for_each_pool(pool, pi) \
481 idr_for_each_entry(&worker_pool_idr, pool, pi) \
68e13a67 482 if (({ assert_rcu_or_pool_mutex(); false; })) { } \
fa1b54e6 483 else
17116969 484
822d8405
TH
485/**
486 * for_each_pool_worker - iterate through all workers of a worker_pool
487 * @worker: iteration cursor
822d8405
TH
488 * @pool: worker_pool to iterate workers of
489 *
1258fae7 490 * This must be called with wq_pool_attach_mutex.
822d8405
TH
491 *
492 * The if/else clause exists only for the lockdep assertion and can be
493 * ignored.
494 */
da028469
LJ
495#define for_each_pool_worker(worker, pool) \
496 list_for_each_entry((worker), &(pool)->workers, node) \
1258fae7 497 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
822d8405
TH
498 else
499
49e3cf44
TH
500/**
501 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
502 * @pwq: iteration cursor
503 * @wq: the target workqueue
76af4d93 504 *
24acfb71 505 * This must be called either with wq->mutex held or RCU read locked.
794b18bc
TH
506 * If the pwq needs to be used beyond the locking in effect, the caller is
507 * responsible for guaranteeing that the pwq stays online.
76af4d93
TH
508 *
509 * The if/else clause exists only for the lockdep assertion and can be
510 * ignored.
49e3cf44
TH
511 */
512#define for_each_pwq(pwq, wq) \
49e9d1a9 513 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
5a644662 514 lockdep_is_held(&(wq->mutex)))
f3421797 515
dc186ad7
TG
516#ifdef CONFIG_DEBUG_OBJECTS_WORK
517
f9e62f31 518static const struct debug_obj_descr work_debug_descr;
dc186ad7 519
99777288
SG
520static void *work_debug_hint(void *addr)
521{
522 return ((struct work_struct *) addr)->func;
523}
524
b9fdac7f
DC
525static bool work_is_static_object(void *addr)
526{
527 struct work_struct *work = addr;
528
529 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
530}
531
dc186ad7
TG
532/*
533 * fixup_init is called when:
534 * - an active object is initialized
535 */
02a982a6 536static bool work_fixup_init(void *addr, enum debug_obj_state state)
dc186ad7
TG
537{
538 struct work_struct *work = addr;
539
540 switch (state) {
541 case ODEBUG_STATE_ACTIVE:
542 cancel_work_sync(work);
543 debug_object_init(work, &work_debug_descr);
02a982a6 544 return true;
dc186ad7 545 default:
02a982a6 546 return false;
dc186ad7
TG
547 }
548}
549
dc186ad7
TG
550/*
551 * fixup_free is called when:
552 * - an active object is freed
553 */
02a982a6 554static bool work_fixup_free(void *addr, enum debug_obj_state state)
dc186ad7
TG
555{
556 struct work_struct *work = addr;
557
558 switch (state) {
559 case ODEBUG_STATE_ACTIVE:
560 cancel_work_sync(work);
561 debug_object_free(work, &work_debug_descr);
02a982a6 562 return true;
dc186ad7 563 default:
02a982a6 564 return false;
dc186ad7
TG
565 }
566}
567
f9e62f31 568static const struct debug_obj_descr work_debug_descr = {
dc186ad7 569 .name = "work_struct",
99777288 570 .debug_hint = work_debug_hint,
b9fdac7f 571 .is_static_object = work_is_static_object,
dc186ad7 572 .fixup_init = work_fixup_init,
dc186ad7
TG
573 .fixup_free = work_fixup_free,
574};
575
576static inline void debug_work_activate(struct work_struct *work)
577{
578 debug_object_activate(work, &work_debug_descr);
579}
580
581static inline void debug_work_deactivate(struct work_struct *work)
582{
583 debug_object_deactivate(work, &work_debug_descr);
584}
585
586void __init_work(struct work_struct *work, int onstack)
587{
588 if (onstack)
589 debug_object_init_on_stack(work, &work_debug_descr);
590 else
591 debug_object_init(work, &work_debug_descr);
592}
593EXPORT_SYMBOL_GPL(__init_work);
594
595void destroy_work_on_stack(struct work_struct *work)
596{
597 debug_object_free(work, &work_debug_descr);
598}
599EXPORT_SYMBOL_GPL(destroy_work_on_stack);
600
ea2e64f2
TG
601void destroy_delayed_work_on_stack(struct delayed_work *work)
602{
603 destroy_timer_on_stack(&work->timer);
604 debug_object_free(&work->work, &work_debug_descr);
605}
606EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
607
dc186ad7
TG
608#else
609static inline void debug_work_activate(struct work_struct *work) { }
610static inline void debug_work_deactivate(struct work_struct *work) { }
611#endif
612
4e8b22bd 613/**
67dc8325 614 * worker_pool_assign_id - allocate ID and assign it to @pool
4e8b22bd
LB
615 * @pool: the pool pointer of interest
616 *
617 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
618 * successfully, -errno on failure.
619 */
9daf9e67
TH
620static int worker_pool_assign_id(struct worker_pool *pool)
621{
622 int ret;
623
68e13a67 624 lockdep_assert_held(&wq_pool_mutex);
5bcab335 625
4e8b22bd
LB
626 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
627 GFP_KERNEL);
229641a6 628 if (ret >= 0) {
e68035fb 629 pool->id = ret;
229641a6
TH
630 return 0;
631 }
fa1b54e6 632 return ret;
7c3eed5c
TH
633}
634
73f53c4a
TH
635static unsigned int work_color_to_flags(int color)
636{
637 return color << WORK_STRUCT_COLOR_SHIFT;
638}
639
c4560c2c 640static int get_work_color(unsigned long work_data)
73f53c4a 641{
c4560c2c 642 return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
73f53c4a
TH
643 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
644}
645
646static int work_next_color(int color)
647{
648 return (color + 1) % WORK_NR_COLORS;
649}
1da177e4 650
14441960 651/*
112202d9
TH
652 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
653 * contain the pointer to the queued pwq. Once execution starts, the flag
7c3eed5c 654 * is cleared and the high bits contain OFFQ flags and pool ID.
7a22ad75 655 *
112202d9
TH
656 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
657 * and clear_work_data() can be used to set the pwq, pool or clear
bbb68dfa
TH
658 * work->data. These functions should only be called while the work is
659 * owned - ie. while the PENDING bit is set.
7a22ad75 660 *
112202d9 661 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
7c3eed5c 662 * corresponding to a work. Pool is available once the work has been
112202d9 663 * queued anywhere after initialization until it is sync canceled. pwq is
7c3eed5c 664 * available only while the work item is queued.
7a22ad75 665 *
bbb68dfa
TH
666 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
667 * canceled. While being canceled, a work item may have its PENDING set
668 * but stay off timer and worklist for arbitrarily long and nobody should
669 * try to steal the PENDING bit.
14441960 670 */
7a22ad75
TH
671static inline void set_work_data(struct work_struct *work, unsigned long data,
672 unsigned long flags)
365970a1 673{
6183c009 674 WARN_ON_ONCE(!work_pending(work));
7a22ad75
TH
675 atomic_long_set(&work->data, data | flags | work_static(work));
676}
365970a1 677
112202d9 678static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
7a22ad75
TH
679 unsigned long extra_flags)
680{
112202d9
TH
681 set_work_data(work, (unsigned long)pwq,
682 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
365970a1
DH
683}
684
4468a00f
LJ
685static void set_work_pool_and_keep_pending(struct work_struct *work,
686 int pool_id)
687{
688 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
689 WORK_STRUCT_PENDING);
690}
691
7c3eed5c
TH
692static void set_work_pool_and_clear_pending(struct work_struct *work,
693 int pool_id)
7a22ad75 694{
23657bb1
TH
695 /*
696 * The following wmb is paired with the implied mb in
697 * test_and_set_bit(PENDING) and ensures all updates to @work made
698 * here are visible to and precede any updates by the next PENDING
699 * owner.
700 */
701 smp_wmb();
7c3eed5c 702 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
346c09f8
RP
703 /*
704 * The following mb guarantees that previous clear of a PENDING bit
705 * will not be reordered with any speculative LOADS or STORES from
706 * work->current_func, which is executed afterwards. This possible
8bdc6201 707 * reordering can lead to a missed execution on attempt to queue
346c09f8
RP
708 * the same @work. E.g. consider this case:
709 *
710 * CPU#0 CPU#1
711 * ---------------------------- --------------------------------
712 *
713 * 1 STORE event_indicated
714 * 2 queue_work_on() {
715 * 3 test_and_set_bit(PENDING)
716 * 4 } set_..._and_clear_pending() {
717 * 5 set_work_data() # clear bit
718 * 6 smp_mb()
719 * 7 work->current_func() {
720 * 8 LOAD event_indicated
721 * }
722 *
723 * Without an explicit full barrier speculative LOAD on line 8 can
724 * be executed before CPU#0 does STORE on line 1. If that happens,
725 * CPU#0 observes the PENDING bit is still set and new execution of
726 * a @work is not queued in a hope, that CPU#1 will eventually
727 * finish the queued @work. Meanwhile CPU#1 does not see
728 * event_indicated is set, because speculative LOAD was executed
729 * before actual STORE.
730 */
731 smp_mb();
7a22ad75 732}
f756d5e2 733
7a22ad75 734static void clear_work_data(struct work_struct *work)
1da177e4 735{
7c3eed5c
TH
736 smp_wmb(); /* see set_work_pool_and_clear_pending() */
737 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
1da177e4
LT
738}
739
afa4bb77
LT
740static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
741{
742 return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
743}
744
112202d9 745static struct pool_workqueue *get_work_pwq(struct work_struct *work)
b1f4ec17 746{
e120153d 747 unsigned long data = atomic_long_read(&work->data);
7a22ad75 748
112202d9 749 if (data & WORK_STRUCT_PWQ)
afa4bb77 750 return work_struct_pwq(data);
e120153d
TH
751 else
752 return NULL;
4d707b9f
ON
753}
754
7c3eed5c
TH
755/**
756 * get_work_pool - return the worker_pool a given work was associated with
757 * @work: the work item of interest
758 *
68e13a67 759 * Pools are created and destroyed under wq_pool_mutex, and allows read
24acfb71
TG
760 * access under RCU read lock. As such, this function should be
761 * called under wq_pool_mutex or inside of a rcu_read_lock() region.
fa1b54e6
TH
762 *
763 * All fields of the returned pool are accessible as long as the above
764 * mentioned locking is in effect. If the returned pool needs to be used
765 * beyond the critical section, the caller is responsible for ensuring the
766 * returned pool is and stays online.
d185af30
YB
767 *
768 * Return: The worker_pool @work was last associated with. %NULL if none.
7c3eed5c
TH
769 */
770static struct worker_pool *get_work_pool(struct work_struct *work)
365970a1 771{
e120153d 772 unsigned long data = atomic_long_read(&work->data);
7c3eed5c 773 int pool_id;
7a22ad75 774
68e13a67 775 assert_rcu_or_pool_mutex();
fa1b54e6 776
112202d9 777 if (data & WORK_STRUCT_PWQ)
afa4bb77 778 return work_struct_pwq(data)->pool;
7a22ad75 779
7c3eed5c
TH
780 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
781 if (pool_id == WORK_OFFQ_POOL_NONE)
7a22ad75
TH
782 return NULL;
783
fa1b54e6 784 return idr_find(&worker_pool_idr, pool_id);
7c3eed5c
TH
785}
786
787/**
788 * get_work_pool_id - return the worker pool ID a given work is associated with
789 * @work: the work item of interest
790 *
d185af30 791 * Return: The worker_pool ID @work was last associated with.
7c3eed5c
TH
792 * %WORK_OFFQ_POOL_NONE if none.
793 */
794static int get_work_pool_id(struct work_struct *work)
795{
54d5b7d0
LJ
796 unsigned long data = atomic_long_read(&work->data);
797
112202d9 798 if (data & WORK_STRUCT_PWQ)
afa4bb77 799 return work_struct_pwq(data)->pool->id;
7c3eed5c 800
54d5b7d0 801 return data >> WORK_OFFQ_POOL_SHIFT;
7c3eed5c
TH
802}
803
bbb68dfa
TH
804static void mark_work_canceling(struct work_struct *work)
805{
7c3eed5c 806 unsigned long pool_id = get_work_pool_id(work);
bbb68dfa 807
7c3eed5c
TH
808 pool_id <<= WORK_OFFQ_POOL_SHIFT;
809 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
bbb68dfa
TH
810}
811
812static bool work_is_canceling(struct work_struct *work)
813{
814 unsigned long data = atomic_long_read(&work->data);
815
112202d9 816 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
bbb68dfa
TH
817}
818
e22bee78 819/*
3270476a
TH
820 * Policy functions. These define the policies on how the global worker
821 * pools are managed. Unless noted otherwise, these functions assume that
d565ed63 822 * they're being called with pool->lock held.
e22bee78
TH
823 */
824
4594bf15 825/*
e22bee78
TH
826 * Need to wake up a worker? Called from anything but currently
827 * running workers.
974271c4
TH
828 *
829 * Note that, because unbound workers never contribute to nr_running, this
706026c2 830 * function will always return %true for unbound pools as long as the
974271c4 831 * worklist isn't empty.
4594bf15 832 */
63d95a91 833static bool need_more_worker(struct worker_pool *pool)
365970a1 834{
0219a352 835 return !list_empty(&pool->worklist) && !pool->nr_running;
e22bee78 836}
4594bf15 837
e22bee78 838/* Can I start working? Called from busy but !running workers. */
63d95a91 839static bool may_start_working(struct worker_pool *pool)
e22bee78 840{
63d95a91 841 return pool->nr_idle;
e22bee78
TH
842}
843
844/* Do I need to keep working? Called from currently running workers. */
63d95a91 845static bool keep_working(struct worker_pool *pool)
e22bee78 846{
bc35f7ef 847 return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
e22bee78
TH
848}
849
850/* Do we need a new worker? Called from manager. */
63d95a91 851static bool need_to_create_worker(struct worker_pool *pool)
e22bee78 852{
63d95a91 853 return need_more_worker(pool) && !may_start_working(pool);
e22bee78 854}
365970a1 855
e22bee78 856/* Do we have too many workers and should some go away? */
63d95a91 857static bool too_many_workers(struct worker_pool *pool)
e22bee78 858{
692b4825 859 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
63d95a91
TH
860 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
861 int nr_busy = pool->nr_workers - nr_idle;
e22bee78
TH
862
863 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
365970a1
DH
864}
865
c54d5046
TH
866/**
867 * worker_set_flags - set worker flags and adjust nr_running accordingly
868 * @worker: self
869 * @flags: flags to set
870 *
871 * Set @flags in @worker->flags and adjust nr_running accordingly.
c54d5046
TH
872 */
873static inline void worker_set_flags(struct worker *worker, unsigned int flags)
874{
875 struct worker_pool *pool = worker->pool;
876
bc8b50c2 877 lockdep_assert_held(&pool->lock);
c54d5046
TH
878
879 /* If transitioning into NOT_RUNNING, adjust nr_running. */
880 if ((flags & WORKER_NOT_RUNNING) &&
881 !(worker->flags & WORKER_NOT_RUNNING)) {
882 pool->nr_running--;
883 }
884
885 worker->flags |= flags;
886}
887
888/**
889 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
890 * @worker: self
891 * @flags: flags to clear
892 *
893 * Clear @flags in @worker->flags and adjust nr_running accordingly.
c54d5046
TH
894 */
895static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
896{
897 struct worker_pool *pool = worker->pool;
898 unsigned int oflags = worker->flags;
899
bc8b50c2 900 lockdep_assert_held(&pool->lock);
c54d5046
TH
901
902 worker->flags &= ~flags;
903
904 /*
905 * If transitioning out of NOT_RUNNING, increment nr_running. Note
906 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
907 * of multiple flags, not a single flag.
908 */
909 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
910 if (!(worker->flags & WORKER_NOT_RUNNING))
911 pool->nr_running++;
912}
913
797e8345
TH
914/* Return the first idle worker. Called with pool->lock held. */
915static struct worker *first_idle_worker(struct worker_pool *pool)
916{
917 if (unlikely(list_empty(&pool->idle_list)))
918 return NULL;
919
920 return list_first_entry(&pool->idle_list, struct worker, entry);
921}
922
923/**
924 * worker_enter_idle - enter idle state
925 * @worker: worker which is entering idle state
926 *
927 * @worker is entering idle state. Update stats and idle timer if
928 * necessary.
929 *
930 * LOCKING:
931 * raw_spin_lock_irq(pool->lock).
932 */
933static void worker_enter_idle(struct worker *worker)
934{
935 struct worker_pool *pool = worker->pool;
936
937 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
938 WARN_ON_ONCE(!list_empty(&worker->entry) &&
939 (worker->hentry.next || worker->hentry.pprev)))
940 return;
941
942 /* can't use worker_set_flags(), also called from create_worker() */
943 worker->flags |= WORKER_IDLE;
944 pool->nr_idle++;
945 worker->last_active = jiffies;
946
947 /* idle_list is LIFO */
948 list_add(&worker->entry, &pool->idle_list);
949
950 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
951 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
952
953 /* Sanity check nr_running. */
954 WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
955}
956
957/**
958 * worker_leave_idle - leave idle state
959 * @worker: worker which is leaving idle state
960 *
961 * @worker is leaving idle state. Update stats.
962 *
963 * LOCKING:
964 * raw_spin_lock_irq(pool->lock).
965 */
966static void worker_leave_idle(struct worker *worker)
967{
968 struct worker_pool *pool = worker->pool;
969
970 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
971 return;
972 worker_clr_flags(worker, WORKER_IDLE);
973 pool->nr_idle--;
974 list_del_init(&worker->entry);
975}
976
977/**
978 * find_worker_executing_work - find worker which is executing a work
979 * @pool: pool of interest
980 * @work: work to find worker for
981 *
982 * Find a worker which is executing @work on @pool by searching
983 * @pool->busy_hash which is keyed by the address of @work. For a worker
984 * to match, its current execution should match the address of @work and
985 * its work function. This is to avoid unwanted dependency between
986 * unrelated work executions through a work item being recycled while still
987 * being executed.
988 *
989 * This is a bit tricky. A work item may be freed once its execution
990 * starts and nothing prevents the freed area from being recycled for
991 * another work item. If the same work item address ends up being reused
992 * before the original execution finishes, workqueue will identify the
993 * recycled work item as currently executing and make it wait until the
994 * current execution finishes, introducing an unwanted dependency.
995 *
996 * This function checks the work item address and work function to avoid
997 * false positives. Note that this isn't complete as one may construct a
998 * work function which can introduce dependency onto itself through a
999 * recycled work item. Well, if somebody wants to shoot oneself in the
1000 * foot that badly, there's only so much we can do, and if such deadlock
1001 * actually occurs, it should be easy to locate the culprit work function.
1002 *
1003 * CONTEXT:
1004 * raw_spin_lock_irq(pool->lock).
1005 *
1006 * Return:
1007 * Pointer to worker which is executing @work if found, %NULL
1008 * otherwise.
1009 */
1010static struct worker *find_worker_executing_work(struct worker_pool *pool,
1011 struct work_struct *work)
1012{
1013 struct worker *worker;
1014
1015 hash_for_each_possible(pool->busy_hash, worker, hentry,
1016 (unsigned long)work)
1017 if (worker->current_work == work &&
1018 worker->current_func == work->func)
1019 return worker;
1020
1021 return NULL;
1022}
1023
1024/**
1025 * move_linked_works - move linked works to a list
1026 * @work: start of series of works to be scheduled
1027 * @head: target list to append @work to
1028 * @nextp: out parameter for nested worklist walking
1029 *
873eaca6
TH
1030 * Schedule linked works starting from @work to @head. Work series to be
1031 * scheduled starts at @work and includes any consecutive work with
1032 * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on
1033 * @nextp.
797e8345
TH
1034 *
1035 * CONTEXT:
1036 * raw_spin_lock_irq(pool->lock).
1037 */
1038static void move_linked_works(struct work_struct *work, struct list_head *head,
1039 struct work_struct **nextp)
1040{
1041 struct work_struct *n;
1042
1043 /*
1044 * Linked worklist will always end before the end of the list,
1045 * use NULL for list head.
1046 */
1047 list_for_each_entry_safe_from(work, n, NULL, entry) {
1048 list_move_tail(&work->entry, head);
1049 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1050 break;
1051 }
1052
1053 /*
1054 * If we're already inside safe list traversal and have moved
1055 * multiple works to the scheduled queue, the next position
1056 * needs to be updated.
1057 */
1058 if (nextp)
1059 *nextp = n;
1060}
1061
873eaca6
TH
1062/**
1063 * assign_work - assign a work item and its linked work items to a worker
1064 * @work: work to assign
1065 * @worker: worker to assign to
1066 * @nextp: out parameter for nested worklist walking
1067 *
1068 * Assign @work and its linked work items to @worker. If @work is already being
1069 * executed by another worker in the same pool, it'll be punted there.
1070 *
1071 * If @nextp is not NULL, it's updated to point to the next work of the last
1072 * scheduled work. This allows assign_work() to be nested inside
1073 * list_for_each_entry_safe().
1074 *
1075 * Returns %true if @work was successfully assigned to @worker. %false if @work
1076 * was punted to another worker already executing it.
1077 */
1078static bool assign_work(struct work_struct *work, struct worker *worker,
1079 struct work_struct **nextp)
1080{
1081 struct worker_pool *pool = worker->pool;
1082 struct worker *collision;
1083
1084 lockdep_assert_held(&pool->lock);
1085
1086 /*
1087 * A single work shouldn't be executed concurrently by multiple workers.
1088 * __queue_work() ensures that @work doesn't jump to a different pool
1089 * while still running in the previous pool. Here, we should ensure that
1090 * @work is not executed concurrently by multiple workers from the same
1091 * pool. Check whether anyone is already processing the work. If so,
1092 * defer the work to the currently executing one.
1093 */
1094 collision = find_worker_executing_work(pool, work);
1095 if (unlikely(collision)) {
1096 move_linked_works(work, &collision->scheduled, nextp);
1097 return false;
1098 }
1099
1100 move_linked_works(work, &worker->scheduled, nextp);
1101 return true;
1102}
1103
797e8345 1104/**
0219a352
TH
1105 * kick_pool - wake up an idle worker if necessary
1106 * @pool: pool to kick
797e8345 1107 *
0219a352
TH
1108 * @pool may have pending work items. Wake up worker if necessary. Returns
1109 * whether a worker was woken up.
797e8345 1110 */
0219a352 1111static bool kick_pool(struct worker_pool *pool)
797e8345
TH
1112{
1113 struct worker *worker = first_idle_worker(pool);
8639eceb 1114 struct task_struct *p;
797e8345 1115
0219a352
TH
1116 lockdep_assert_held(&pool->lock);
1117
1118 if (!need_more_worker(pool) || !worker)
1119 return false;
1120
8639eceb
TH
1121 p = worker->task;
1122
1123#ifdef CONFIG_SMP
1124 /*
1125 * Idle @worker is about to execute @work and waking up provides an
1126 * opportunity to migrate @worker at a lower cost by setting the task's
1127 * wake_cpu field. Let's see if we want to move @worker to improve
1128 * execution locality.
1129 *
1130 * We're waking the worker that went idle the latest and there's some
1131 * chance that @worker is marked idle but hasn't gone off CPU yet. If
1132 * so, setting the wake_cpu won't do anything. As this is a best-effort
1133 * optimization and the race window is narrow, let's leave as-is for
1134 * now. If this becomes pronounced, we can skip over workers which are
1135 * still on cpu when picking an idle worker.
1136 *
1137 * If @pool has non-strict affinity, @worker might have ended up outside
1138 * its affinity scope. Repatriate.
1139 */
1140 if (!pool->attrs->affn_strict &&
1141 !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
1142 struct work_struct *work = list_first_entry(&pool->worklist,
1143 struct work_struct, entry);
1144 p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask);
1145 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
1146 }
1147#endif
1148 wake_up_process(p);
0219a352 1149 return true;
797e8345
TH
1150}
1151
63638450
TH
1152#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
1153
1154/*
1155 * Concurrency-managed per-cpu work items that hog CPU for longer than
1156 * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism,
1157 * which prevents them from stalling other concurrency-managed work items. If a
1158 * work function keeps triggering this mechanism, it's likely that the work item
1159 * should be using an unbound workqueue instead.
1160 *
1161 * wq_cpu_intensive_report() tracks work functions which trigger such conditions
1162 * and report them so that they can be examined and converted to use unbound
1163 * workqueues as appropriate. To avoid flooding the console, each violating work
1164 * function is tracked and reported with exponential backoff.
1165 */
1166#define WCI_MAX_ENTS 128
1167
1168struct wci_ent {
1169 work_func_t func;
1170 atomic64_t cnt;
1171 struct hlist_node hash_node;
1172};
1173
1174static struct wci_ent wci_ents[WCI_MAX_ENTS];
1175static int wci_nr_ents;
1176static DEFINE_RAW_SPINLOCK(wci_lock);
1177static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS));
1178
1179static struct wci_ent *wci_find_ent(work_func_t func)
1180{
1181 struct wci_ent *ent;
1182
1183 hash_for_each_possible_rcu(wci_hash, ent, hash_node,
1184 (unsigned long)func) {
1185 if (ent->func == func)
1186 return ent;
1187 }
1188 return NULL;
1189}
1190
1191static void wq_cpu_intensive_report(work_func_t func)
1192{
1193 struct wci_ent *ent;
1194
1195restart:
1196 ent = wci_find_ent(func);
1197 if (ent) {
1198 u64 cnt;
1199
1200 /*
1201 * Start reporting from the fourth time and back off
1202 * exponentially.
1203 */
1204 cnt = atomic64_inc_return_relaxed(&ent->cnt);
1205 if (cnt >= 4 && is_power_of_2(cnt))
1206 printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n",
1207 ent->func, wq_cpu_intensive_thresh_us,
1208 atomic64_read(&ent->cnt));
1209 return;
1210 }
1211
1212 /*
1213 * @func is a new violation. Allocate a new entry for it. If wcn_ents[]
1214 * is exhausted, something went really wrong and we probably made enough
1215 * noise already.
1216 */
1217 if (wci_nr_ents >= WCI_MAX_ENTS)
1218 return;
1219
1220 raw_spin_lock(&wci_lock);
1221
1222 if (wci_nr_ents >= WCI_MAX_ENTS) {
1223 raw_spin_unlock(&wci_lock);
1224 return;
1225 }
1226
1227 if (wci_find_ent(func)) {
1228 raw_spin_unlock(&wci_lock);
1229 goto restart;
1230 }
1231
1232 ent = &wci_ents[wci_nr_ents++];
1233 ent->func = func;
1234 atomic64_set(&ent->cnt, 1);
1235 hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func);
1236
1237 raw_spin_unlock(&wci_lock);
1238}
1239
1240#else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1241static void wq_cpu_intensive_report(work_func_t func) {}
1242#endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1243
d302f017 1244/**
6d25be57 1245 * wq_worker_running - a worker is running again
e22bee78 1246 * @task: task waking up
e22bee78 1247 *
6d25be57 1248 * This function is called when a worker returns from schedule()
e22bee78 1249 */
6d25be57 1250void wq_worker_running(struct task_struct *task)
e22bee78
TH
1251{
1252 struct worker *worker = kthread_data(task);
1253
c8f6219b 1254 if (!READ_ONCE(worker->sleeping))
6d25be57 1255 return;
07edfece
FW
1256
1257 /*
1258 * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
1259 * and the nr_running increment below, we may ruin the nr_running reset
1260 * and leave with an unexpected pool->nr_running == 1 on the newly unbound
1261 * pool. Protect against such race.
1262 */
1263 preempt_disable();
6d25be57 1264 if (!(worker->flags & WORKER_NOT_RUNNING))
bc35f7ef 1265 worker->pool->nr_running++;
07edfece 1266 preempt_enable();
616db877
TH
1267
1268 /*
1269 * CPU intensive auto-detection cares about how long a work item hogged
1270 * CPU without sleeping. Reset the starting timestamp on wakeup.
1271 */
1272 worker->current_at = worker->task->se.sum_exec_runtime;
1273
c8f6219b 1274 WRITE_ONCE(worker->sleeping, 0);
e22bee78
TH
1275}
1276
1277/**
1278 * wq_worker_sleeping - a worker is going to sleep
1279 * @task: task going to sleep
e22bee78 1280 *
6d25be57 1281 * This function is called from schedule() when a busy worker is
ccf45156 1282 * going to sleep.
e22bee78 1283 */
6d25be57 1284void wq_worker_sleeping(struct task_struct *task)
e22bee78 1285{
cc5bff38 1286 struct worker *worker = kthread_data(task);
111c225a 1287 struct worker_pool *pool;
e22bee78 1288
111c225a
TH
1289 /*
1290 * Rescuers, which may not have all the fields set up like normal
1291 * workers, also reach here, let's not access anything before
1292 * checking NOT_RUNNING.
1293 */
2d64672e 1294 if (worker->flags & WORKER_NOT_RUNNING)
6d25be57 1295 return;
e22bee78 1296
111c225a 1297 pool = worker->pool;
111c225a 1298
62849a96 1299 /* Return if preempted before wq_worker_running() was reached */
c8f6219b 1300 if (READ_ONCE(worker->sleeping))
6d25be57
TG
1301 return;
1302
c8f6219b 1303 WRITE_ONCE(worker->sleeping, 1);
a9b8a985 1304 raw_spin_lock_irq(&pool->lock);
e22bee78 1305
45c753f5
FW
1306 /*
1307 * Recheck in case unbind_workers() preempted us. We don't
1308 * want to decrement nr_running after the worker is unbound
1309 * and nr_running has been reset.
1310 */
1311 if (worker->flags & WORKER_NOT_RUNNING) {
1312 raw_spin_unlock_irq(&pool->lock);
1313 return;
1314 }
1315
bc35f7ef 1316 pool->nr_running--;
0219a352 1317 if (kick_pool(pool))
725e8ec5 1318 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
0219a352 1319
a9b8a985 1320 raw_spin_unlock_irq(&pool->lock);
e22bee78
TH
1321}
1322
616db877
TH
1323/**
1324 * wq_worker_tick - a scheduler tick occurred while a kworker is running
1325 * @task: task currently running
1326 *
1327 * Called from scheduler_tick(). We're in the IRQ context and the current
1328 * worker's fields which follow the 'K' locking rule can be accessed safely.
1329 */
1330void wq_worker_tick(struct task_struct *task)
1331{
1332 struct worker *worker = kthread_data(task);
1333 struct pool_workqueue *pwq = worker->current_pwq;
1334 struct worker_pool *pool = worker->pool;
1335
1336 if (!pwq)
1337 return;
1338
8a1dd1e5
TH
1339 pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC;
1340
18c8ae81
Z
1341 if (!wq_cpu_intensive_thresh_us)
1342 return;
1343
616db877
TH
1344 /*
1345 * If the current worker is concurrency managed and hogged the CPU for
1346 * longer than wq_cpu_intensive_thresh_us, it's automatically marked
1347 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items.
c8f6219b
Z
1348 *
1349 * Set @worker->sleeping means that @worker is in the process of
1350 * switching out voluntarily and won't be contributing to
1351 * @pool->nr_running until it wakes up. As wq_worker_sleeping() also
1352 * decrements ->nr_running, setting CPU_INTENSIVE here can lead to
1353 * double decrements. The task is releasing the CPU anyway. Let's skip.
1354 * We probably want to make this prettier in the future.
616db877 1355 */
c8f6219b 1356 if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
616db877
TH
1357 worker->task->se.sum_exec_runtime - worker->current_at <
1358 wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
1359 return;
1360
1361 raw_spin_lock(&pool->lock);
1362
1363 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
63638450 1364 wq_cpu_intensive_report(worker->current_func);
616db877
TH
1365 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
1366
0219a352 1367 if (kick_pool(pool))
616db877 1368 pwq->stats[PWQ_STAT_CM_WAKEUP]++;
616db877
TH
1369
1370 raw_spin_unlock(&pool->lock);
1371}
1372
1b69ac6b
JW
1373/**
1374 * wq_worker_last_func - retrieve worker's last work function
8194fe94 1375 * @task: Task to retrieve last work function of.
1b69ac6b
JW
1376 *
1377 * Determine the last function a worker executed. This is called from
1378 * the scheduler to get a worker's last known identity.
1379 *
1380 * CONTEXT:
a9b8a985 1381 * raw_spin_lock_irq(rq->lock)
1b69ac6b 1382 *
4b047002
JW
1383 * This function is called during schedule() when a kworker is going
1384 * to sleep. It's used by psi to identify aggregation workers during
1385 * dequeuing, to allow periodic aggregation to shut-off when that
1386 * worker is the last task in the system or cgroup to go to sleep.
1387 *
1388 * As this function doesn't involve any workqueue-related locking, it
1389 * only returns stable values when called from inside the scheduler's
1390 * queuing and dequeuing paths, when @task, which must be a kworker,
1391 * is guaranteed to not be processing any works.
1392 *
1b69ac6b
JW
1393 * Return:
1394 * The last work function %current executed as a worker, NULL if it
1395 * hasn't executed any work yet.
1396 */
1397work_func_t wq_worker_last_func(struct task_struct *task)
1398{
1399 struct worker *worker = kthread_data(task);
1400
1401 return worker->last_func;
1402}
1403
8864b4e5
TH
1404/**
1405 * get_pwq - get an extra reference on the specified pool_workqueue
1406 * @pwq: pool_workqueue to get
1407 *
1408 * Obtain an extra reference on @pwq. The caller should guarantee that
1409 * @pwq has positive refcnt and be holding the matching pool->lock.
1410 */
1411static void get_pwq(struct pool_workqueue *pwq)
1412{
1413 lockdep_assert_held(&pwq->pool->lock);
1414 WARN_ON_ONCE(pwq->refcnt <= 0);
1415 pwq->refcnt++;
1416}
1417
1418/**
1419 * put_pwq - put a pool_workqueue reference
1420 * @pwq: pool_workqueue to put
1421 *
1422 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its
1423 * destruction. The caller should be holding the matching pool->lock.
1424 */
1425static void put_pwq(struct pool_workqueue *pwq)
1426{
1427 lockdep_assert_held(&pwq->pool->lock);
1428 if (likely(--pwq->refcnt))
1429 return;
8864b4e5 1430 /*
967b494e
TH
1431 * @pwq can't be released under pool->lock, bounce to a dedicated
1432 * kthread_worker to avoid A-A deadlocks.
8864b4e5 1433 */
687a9aa5 1434 kthread_queue_work(pwq_release_worker, &pwq->release_work);
8864b4e5
TH
1435}
1436
dce90d47
TH
1437/**
1438 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1439 * @pwq: pool_workqueue to put (can be %NULL)
1440 *
1441 * put_pwq() with locking. This function also allows %NULL @pwq.
1442 */
1443static void put_pwq_unlocked(struct pool_workqueue *pwq)
1444{
1445 if (pwq) {
1446 /*
24acfb71 1447 * As both pwqs and pools are RCU protected, the
dce90d47
TH
1448 * following lock operations are safe.
1449 */
a9b8a985 1450 raw_spin_lock_irq(&pwq->pool->lock);
dce90d47 1451 put_pwq(pwq);
a9b8a985 1452 raw_spin_unlock_irq(&pwq->pool->lock);
dce90d47
TH
1453 }
1454}
1455
f97a4a1a 1456static void pwq_activate_inactive_work(struct work_struct *work)
bf4ede01 1457{
112202d9 1458 struct pool_workqueue *pwq = get_work_pwq(work);
bf4ede01
TH
1459
1460 trace_workqueue_activate_work(work);
82607adc
TH
1461 if (list_empty(&pwq->pool->worklist))
1462 pwq->pool->watchdog_ts = jiffies;
112202d9 1463 move_linked_works(work, &pwq->pool->worklist, NULL);
f97a4a1a 1464 __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
112202d9 1465 pwq->nr_active++;
bf4ede01
TH
1466}
1467
f97a4a1a 1468static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
3aa62497 1469{
f97a4a1a 1470 struct work_struct *work = list_first_entry(&pwq->inactive_works,
3aa62497
LJ
1471 struct work_struct, entry);
1472
f97a4a1a 1473 pwq_activate_inactive_work(work);
3aa62497
LJ
1474}
1475
bf4ede01 1476/**
112202d9
TH
1477 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1478 * @pwq: pwq of interest
c4560c2c 1479 * @work_data: work_data of work which left the queue
bf4ede01
TH
1480 *
1481 * A work either has completed or is removed from pending queue,
112202d9 1482 * decrement nr_in_flight of its pwq and handle workqueue flushing.
bf4ede01
TH
1483 *
1484 * CONTEXT:
a9b8a985 1485 * raw_spin_lock_irq(pool->lock).
bf4ede01 1486 */
c4560c2c 1487static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
bf4ede01 1488{
c4560c2c
LJ
1489 int color = get_work_color(work_data);
1490
018f3a13
LJ
1491 if (!(work_data & WORK_STRUCT_INACTIVE)) {
1492 pwq->nr_active--;
1493 if (!list_empty(&pwq->inactive_works)) {
1494 /* one down, submit an inactive one */
1495 if (pwq->nr_active < pwq->max_active)
1496 pwq_activate_first_inactive(pwq);
1497 }
1498 }
1499
112202d9 1500 pwq->nr_in_flight[color]--;
bf4ede01 1501
bf4ede01 1502 /* is flush in progress and are we at the flushing tip? */
112202d9 1503 if (likely(pwq->flush_color != color))
8864b4e5 1504 goto out_put;
bf4ede01
TH
1505
1506 /* are there still in-flight works? */
112202d9 1507 if (pwq->nr_in_flight[color])
8864b4e5 1508 goto out_put;
bf4ede01 1509
112202d9
TH
1510 /* this pwq is done, clear flush_color */
1511 pwq->flush_color = -1;
bf4ede01
TH
1512
1513 /*
112202d9 1514 * If this was the last pwq, wake up the first flusher. It
bf4ede01
TH
1515 * will handle the rest.
1516 */
112202d9
TH
1517 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1518 complete(&pwq->wq->first_flusher->done);
8864b4e5
TH
1519out_put:
1520 put_pwq(pwq);
bf4ede01
TH
1521}
1522
36e227d2 1523/**
bbb68dfa 1524 * try_to_grab_pending - steal work item from worklist and disable irq
36e227d2
TH
1525 * @work: work item to steal
1526 * @is_dwork: @work is a delayed_work
bbb68dfa 1527 * @flags: place to store irq state
36e227d2
TH
1528 *
1529 * Try to grab PENDING bit of @work. This function can handle @work in any
d185af30 1530 * stable state - idle, on timer or on worklist.
36e227d2 1531 *
d185af30 1532 * Return:
3eb6b31b
MCC
1533 *
1534 * ======== ================================================================
36e227d2
TH
1535 * 1 if @work was pending and we successfully stole PENDING
1536 * 0 if @work was idle and we claimed PENDING
1537 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
bbb68dfa
TH
1538 * -ENOENT if someone else is canceling @work, this state may persist
1539 * for arbitrarily long
3eb6b31b 1540 * ======== ================================================================
36e227d2 1541 *
d185af30 1542 * Note:
bbb68dfa 1543 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
e0aecdd8
TH
1544 * interrupted while holding PENDING and @work off queue, irq must be
1545 * disabled on entry. This, combined with delayed_work->timer being
1546 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
bbb68dfa
TH
1547 *
1548 * On successful return, >= 0, irq is disabled and the caller is
1549 * responsible for releasing it using local_irq_restore(*@flags).
1550 *
e0aecdd8 1551 * This function is safe to call from any context including IRQ handler.
bf4ede01 1552 */
bbb68dfa
TH
1553static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1554 unsigned long *flags)
bf4ede01 1555{
d565ed63 1556 struct worker_pool *pool;
112202d9 1557 struct pool_workqueue *pwq;
bf4ede01 1558
bbb68dfa
TH
1559 local_irq_save(*flags);
1560
36e227d2
TH
1561 /* try to steal the timer if it exists */
1562 if (is_dwork) {
1563 struct delayed_work *dwork = to_delayed_work(work);
1564
e0aecdd8
TH
1565 /*
1566 * dwork->timer is irqsafe. If del_timer() fails, it's
1567 * guaranteed that the timer is not queued anywhere and not
1568 * running on the local CPU.
1569 */
36e227d2
TH
1570 if (likely(del_timer(&dwork->timer)))
1571 return 1;
1572 }
1573
1574 /* try to claim PENDING the normal way */
bf4ede01
TH
1575 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1576 return 0;
1577
24acfb71 1578 rcu_read_lock();
bf4ede01
TH
1579 /*
1580 * The queueing is in progress, or it is already queued. Try to
1581 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1582 */
d565ed63
TH
1583 pool = get_work_pool(work);
1584 if (!pool)
bbb68dfa 1585 goto fail;
bf4ede01 1586
a9b8a985 1587 raw_spin_lock(&pool->lock);
0b3dae68 1588 /*
112202d9
TH
1589 * work->data is guaranteed to point to pwq only while the work
1590 * item is queued on pwq->wq, and both updating work->data to point
1591 * to pwq on queueing and to pool on dequeueing are done under
1592 * pwq->pool->lock. This in turn guarantees that, if work->data
1593 * points to pwq which is associated with a locked pool, the work
0b3dae68
LJ
1594 * item is currently queued on that pool.
1595 */
112202d9
TH
1596 pwq = get_work_pwq(work);
1597 if (pwq && pwq->pool == pool) {
16062836
TH
1598 debug_work_deactivate(work);
1599
1600 /*
018f3a13
LJ
1601 * A cancelable inactive work item must be in the
1602 * pwq->inactive_works since a queued barrier can't be
1603 * canceled (see the comments in insert_wq_barrier()).
1604 *
f97a4a1a 1605 * An inactive work item cannot be grabbed directly because
d812796e 1606 * it might have linked barrier work items which, if left
f97a4a1a 1607 * on the inactive_works list, will confuse pwq->nr_active
16062836
TH
1608 * management later on and cause stall. Make sure the work
1609 * item is activated before grabbing.
1610 */
f97a4a1a
LJ
1611 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
1612 pwq_activate_inactive_work(work);
16062836
TH
1613
1614 list_del_init(&work->entry);
c4560c2c 1615 pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
16062836 1616
112202d9 1617 /* work->data points to pwq iff queued, point to pool */
16062836
TH
1618 set_work_pool_and_keep_pending(work, pool->id);
1619
a9b8a985 1620 raw_spin_unlock(&pool->lock);
24acfb71 1621 rcu_read_unlock();
16062836 1622 return 1;
bf4ede01 1623 }
a9b8a985 1624 raw_spin_unlock(&pool->lock);
bbb68dfa 1625fail:
24acfb71 1626 rcu_read_unlock();
bbb68dfa
TH
1627 local_irq_restore(*flags);
1628 if (work_is_canceling(work))
1629 return -ENOENT;
1630 cpu_relax();
36e227d2 1631 return -EAGAIN;
bf4ede01
TH
1632}
1633
4690c4ab 1634/**
706026c2 1635 * insert_work - insert a work into a pool
112202d9 1636 * @pwq: pwq @work belongs to
4690c4ab
TH
1637 * @work: work to insert
1638 * @head: insertion point
1639 * @extra_flags: extra WORK_STRUCT_* flags to set
1640 *
112202d9 1641 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
706026c2 1642 * work_struct flags.
4690c4ab
TH
1643 *
1644 * CONTEXT:
a9b8a985 1645 * raw_spin_lock_irq(pool->lock).
4690c4ab 1646 */
112202d9
TH
1647static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1648 struct list_head *head, unsigned int extra_flags)
b89deed3 1649{
fe089f87 1650 debug_work_activate(work);
e22bee78 1651
e89a85d6 1652 /* record the work call stack in order to print it in KASAN reports */
f70da745 1653 kasan_record_aux_stack_noalloc(work);
e89a85d6 1654
4690c4ab 1655 /* we own @work, set data and link */
112202d9 1656 set_work_pwq(work, pwq, extra_flags);
1a4d9b0a 1657 list_add_tail(&work->entry, head);
8864b4e5 1658 get_pwq(pwq);
b89deed3
ON
1659}
1660
c8efcc25
TH
1661/*
1662 * Test whether @work is being queued from another work executing on the
8d03ecfe 1663 * same workqueue.
c8efcc25
TH
1664 */
1665static bool is_chained_work(struct workqueue_struct *wq)
1666{
8d03ecfe
TH
1667 struct worker *worker;
1668
1669 worker = current_wq_worker();
1670 /*
bf393fd4 1671 * Return %true iff I'm a worker executing a work item on @wq. If
8d03ecfe
TH
1672 * I'm @worker, it's safe to dereference it without locking.
1673 */
112202d9 1674 return worker && worker->current_pwq->wq == wq;
c8efcc25
TH
1675}
1676
ef557180
MG
1677/*
1678 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1679 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
1680 * avoid perturbing sensitive tasks.
1681 */
1682static int wq_select_unbound_cpu(int cpu)
1683{
1684 int new_cpu;
1685
f303fccb
TH
1686 if (likely(!wq_debug_force_rr_cpu)) {
1687 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1688 return cpu;
a8ec5880
AF
1689 } else {
1690 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
f303fccb
TH
1691 }
1692
ef557180
MG
1693 new_cpu = __this_cpu_read(wq_rr_cpu_last);
1694 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1695 if (unlikely(new_cpu >= nr_cpu_ids)) {
1696 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1697 if (unlikely(new_cpu >= nr_cpu_ids))
1698 return cpu;
1699 }
1700 __this_cpu_write(wq_rr_cpu_last, new_cpu);
1701
1702 return new_cpu;
1703}
1704
d84ff051 1705static void __queue_work(int cpu, struct workqueue_struct *wq,
1da177e4
LT
1706 struct work_struct *work)
1707{
112202d9 1708 struct pool_workqueue *pwq;
fe089f87 1709 struct worker_pool *last_pool, *pool;
8a2e8e5d 1710 unsigned int work_flags;
b75cac93 1711 unsigned int req_cpu = cpu;
8930caba
TH
1712
1713 /*
1714 * While a work item is PENDING && off queue, a task trying to
1715 * steal the PENDING will busy-loop waiting for it to either get
1716 * queued or lose PENDING. Grabbing PENDING and queueing should
1717 * happen with IRQ disabled.
1718 */
8e8eb730 1719 lockdep_assert_irqs_disabled();
1da177e4 1720
1e19ffc6 1721
33e3f0a3
RC
1722 /*
1723 * For a draining wq, only works from the same workqueue are
1724 * allowed. The __WQ_DESTROYING helps to spot the issue that
1725 * queues a new work item to a wq after destroy_workqueue(wq).
1726 */
1727 if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
1728 WARN_ON_ONCE(!is_chained_work(wq))))
e41e704b 1729 return;
24acfb71 1730 rcu_read_lock();
9e8cd2f5 1731retry:
c9178087 1732 /* pwq which will be used unless @work is executing elsewhere */
636b927e
TH
1733 if (req_cpu == WORK_CPU_UNBOUND) {
1734 if (wq->flags & WQ_UNBOUND)
aa202f1f 1735 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
636b927e 1736 else
aa202f1f 1737 cpu = raw_smp_processor_id();
aa202f1f 1738 }
dbf2576e 1739
636b927e 1740 pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu));
fe089f87
TH
1741 pool = pwq->pool;
1742
c9178087
TH
1743 /*
1744 * If @work was previously on a different pool, it might still be
1745 * running there, in which case the work needs to be queued on that
1746 * pool to guarantee non-reentrancy.
1747 */
1748 last_pool = get_work_pool(work);
fe089f87 1749 if (last_pool && last_pool != pool) {
c9178087 1750 struct worker *worker;
18aa9eff 1751
a9b8a985 1752 raw_spin_lock(&last_pool->lock);
18aa9eff 1753
c9178087 1754 worker = find_worker_executing_work(last_pool, work);
18aa9eff 1755
c9178087
TH
1756 if (worker && worker->current_pwq->wq == wq) {
1757 pwq = worker->current_pwq;
fe089f87
TH
1758 pool = pwq->pool;
1759 WARN_ON_ONCE(pool != last_pool);
8930caba 1760 } else {
c9178087 1761 /* meh... not running there, queue here */
a9b8a985 1762 raw_spin_unlock(&last_pool->lock);
fe089f87 1763 raw_spin_lock(&pool->lock);
8930caba 1764 }
f3421797 1765 } else {
fe089f87 1766 raw_spin_lock(&pool->lock);
502ca9d8
TH
1767 }
1768
9e8cd2f5 1769 /*
636b927e
TH
1770 * pwq is determined and locked. For unbound pools, we could have raced
1771 * with pwq release and it could already be dead. If its refcnt is zero,
1772 * repeat pwq selection. Note that unbound pwqs never die without
1773 * another pwq replacing it in cpu_pwq or while work items are executing
1774 * on it, so the retrying is guaranteed to make forward-progress.
9e8cd2f5
TH
1775 */
1776 if (unlikely(!pwq->refcnt)) {
1777 if (wq->flags & WQ_UNBOUND) {
fe089f87 1778 raw_spin_unlock(&pool->lock);
9e8cd2f5
TH
1779 cpu_relax();
1780 goto retry;
1781 }
1782 /* oops */
1783 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1784 wq->name, cpu);
1785 }
1786
112202d9
TH
1787 /* pwq determined, queue */
1788 trace_workqueue_queue_work(req_cpu, pwq, work);
502ca9d8 1789
24acfb71
TG
1790 if (WARN_ON(!list_empty(&work->entry)))
1791 goto out;
1e19ffc6 1792
112202d9
TH
1793 pwq->nr_in_flight[pwq->work_color]++;
1794 work_flags = work_color_to_flags(pwq->work_color);
1e19ffc6 1795
112202d9 1796 if (likely(pwq->nr_active < pwq->max_active)) {
fe089f87
TH
1797 if (list_empty(&pool->worklist))
1798 pool->watchdog_ts = jiffies;
1799
cdadf009 1800 trace_workqueue_activate_work(work);
112202d9 1801 pwq->nr_active++;
fe089f87 1802 insert_work(pwq, work, &pool->worklist, work_flags);
0219a352 1803 kick_pool(pool);
8a2e8e5d 1804 } else {
f97a4a1a 1805 work_flags |= WORK_STRUCT_INACTIVE;
fe089f87 1806 insert_work(pwq, work, &pwq->inactive_works, work_flags);
8a2e8e5d 1807 }
1e19ffc6 1808
24acfb71 1809out:
fe089f87 1810 raw_spin_unlock(&pool->lock);
24acfb71 1811 rcu_read_unlock();
1da177e4
LT
1812}
1813
0fcb78c2 1814/**
c1a220e7
ZR
1815 * queue_work_on - queue work on specific cpu
1816 * @cpu: CPU number to execute work on
0fcb78c2
REB
1817 * @wq: workqueue to use
1818 * @work: work to queue
1819 *
c1a220e7 1820 * We queue the work to a specific CPU, the caller must ensure it
443378f0
PM
1821 * can't go away. Callers that fail to ensure that the specified
1822 * CPU cannot go away will execute on a randomly chosen CPU.
854f5cc5
PM
1823 * But note well that callers specifying a CPU that never has been
1824 * online will get a splat.
d185af30
YB
1825 *
1826 * Return: %false if @work was already on a queue, %true otherwise.
1da177e4 1827 */
d4283e93
TH
1828bool queue_work_on(int cpu, struct workqueue_struct *wq,
1829 struct work_struct *work)
1da177e4 1830{
d4283e93 1831 bool ret = false;
8930caba 1832 unsigned long flags;
ef1ca236 1833
8930caba 1834 local_irq_save(flags);
c1a220e7 1835
22df02bb 1836 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
4690c4ab 1837 __queue_work(cpu, wq, work);
d4283e93 1838 ret = true;
c1a220e7 1839 }
ef1ca236 1840
8930caba 1841 local_irq_restore(flags);
1da177e4
LT
1842 return ret;
1843}
ad7b1f84 1844EXPORT_SYMBOL(queue_work_on);
1da177e4 1845
8204e0c1 1846/**
fef59c9c 1847 * select_numa_node_cpu - Select a CPU based on NUMA node
8204e0c1
AD
1848 * @node: NUMA node ID that we want to select a CPU from
1849 *
1850 * This function will attempt to find a "random" cpu available on a given
1851 * node. If there are no CPUs available on the given node it will return
1852 * WORK_CPU_UNBOUND indicating that we should just schedule to any
1853 * available CPU if we need to schedule this work.
1854 */
fef59c9c 1855static int select_numa_node_cpu(int node)
8204e0c1
AD
1856{
1857 int cpu;
1858
8204e0c1
AD
1859 /* Delay binding to CPU if node is not valid or online */
1860 if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1861 return WORK_CPU_UNBOUND;
1862
1863 /* Use local node/cpu if we are already there */
1864 cpu = raw_smp_processor_id();
1865 if (node == cpu_to_node(cpu))
1866 return cpu;
1867
1868 /* Use "random" otherwise know as "first" online CPU of node */
1869 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1870
1871 /* If CPU is valid return that, otherwise just defer */
1872 return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1873}
1874
1875/**
1876 * queue_work_node - queue work on a "random" cpu for a given NUMA node
1877 * @node: NUMA node that we are targeting the work for
1878 * @wq: workqueue to use
1879 * @work: work to queue
1880 *
1881 * We queue the work to a "random" CPU within a given NUMA node. The basic
1882 * idea here is to provide a way to somehow associate work with a given
1883 * NUMA node.
1884 *
1885 * This function will only make a best effort attempt at getting this onto
1886 * the right NUMA node. If no node is requested or the requested node is
1887 * offline then we just fall back to standard queue_work behavior.
1888 *
1889 * Currently the "random" CPU ends up being the first available CPU in the
1890 * intersection of cpu_online_mask and the cpumask of the node, unless we
1891 * are running on the node. In that case we just use the current CPU.
1892 *
1893 * Return: %false if @work was already on a queue, %true otherwise.
1894 */
1895bool queue_work_node(int node, struct workqueue_struct *wq,
1896 struct work_struct *work)
1897{
1898 unsigned long flags;
1899 bool ret = false;
1900
1901 /*
1902 * This current implementation is specific to unbound workqueues.
1903 * Specifically we only return the first available CPU for a given
1904 * node instead of cycling through individual CPUs within the node.
1905 *
1906 * If this is used with a per-cpu workqueue then the logic in
1907 * workqueue_select_cpu_near would need to be updated to allow for
1908 * some round robin type logic.
1909 */
1910 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1911
1912 local_irq_save(flags);
1913
1914 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
fef59c9c 1915 int cpu = select_numa_node_cpu(node);
8204e0c1
AD
1916
1917 __queue_work(cpu, wq, work);
1918 ret = true;
1919 }
1920
1921 local_irq_restore(flags);
1922 return ret;
1923}
1924EXPORT_SYMBOL_GPL(queue_work_node);
1925
8c20feb6 1926void delayed_work_timer_fn(struct timer_list *t)
1da177e4 1927{
8c20feb6 1928 struct delayed_work *dwork = from_timer(dwork, t, timer);
1da177e4 1929
e0aecdd8 1930 /* should have been called from irqsafe timer with irq already off */
60c057bc 1931 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1da177e4 1932}
1438ade5 1933EXPORT_SYMBOL(delayed_work_timer_fn);
1da177e4 1934
7beb2edf
TH
1935static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1936 struct delayed_work *dwork, unsigned long delay)
1da177e4 1937{
7beb2edf
TH
1938 struct timer_list *timer = &dwork->timer;
1939 struct work_struct *work = &dwork->work;
7beb2edf 1940
637fdbae 1941 WARN_ON_ONCE(!wq);
4b243563 1942 WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
fc4b514f
TH
1943 WARN_ON_ONCE(timer_pending(timer));
1944 WARN_ON_ONCE(!list_empty(&work->entry));
7beb2edf 1945
8852aac2
TH
1946 /*
1947 * If @delay is 0, queue @dwork->work immediately. This is for
1948 * both optimization and correctness. The earliest @timer can
1949 * expire is on the closest next tick and delayed_work users depend
1950 * on that there's no such delay when @delay is 0.
1951 */
1952 if (!delay) {
1953 __queue_work(cpu, wq, &dwork->work);
1954 return;
1955 }
1956
60c057bc 1957 dwork->wq = wq;
1265057f 1958 dwork->cpu = cpu;
7beb2edf
TH
1959 timer->expires = jiffies + delay;
1960
041bd12e
TH
1961 if (unlikely(cpu != WORK_CPU_UNBOUND))
1962 add_timer_on(timer, cpu);
1963 else
1964 add_timer(timer);
1da177e4
LT
1965}
1966
0fcb78c2
REB
1967/**
1968 * queue_delayed_work_on - queue work on specific CPU after delay
1969 * @cpu: CPU number to execute work on
1970 * @wq: workqueue to use
af9997e4 1971 * @dwork: work to queue
0fcb78c2
REB
1972 * @delay: number of jiffies to wait before queueing
1973 *
d185af30 1974 * Return: %false if @work was already on a queue, %true otherwise. If
715f1300
TH
1975 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1976 * execution.
0fcb78c2 1977 */
d4283e93
TH
1978bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1979 struct delayed_work *dwork, unsigned long delay)
7a6bc1cd 1980{
52bad64d 1981 struct work_struct *work = &dwork->work;
d4283e93 1982 bool ret = false;
8930caba 1983 unsigned long flags;
7a6bc1cd 1984
8930caba
TH
1985 /* read the comment in __queue_work() */
1986 local_irq_save(flags);
7a6bc1cd 1987
22df02bb 1988 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
7beb2edf 1989 __queue_delayed_work(cpu, wq, dwork, delay);
d4283e93 1990 ret = true;
7a6bc1cd 1991 }
8a3e77cc 1992
8930caba 1993 local_irq_restore(flags);
7a6bc1cd
VP
1994 return ret;
1995}
ad7b1f84 1996EXPORT_SYMBOL(queue_delayed_work_on);
c7fc77f7 1997
8376fe22
TH
1998/**
1999 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
2000 * @cpu: CPU number to execute work on
2001 * @wq: workqueue to use
2002 * @dwork: work to queue
2003 * @delay: number of jiffies to wait before queueing
2004 *
2005 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
2006 * modify @dwork's timer so that it expires after @delay. If @delay is
2007 * zero, @work is guaranteed to be scheduled immediately regardless of its
2008 * current state.
2009 *
d185af30 2010 * Return: %false if @dwork was idle and queued, %true if @dwork was
8376fe22
TH
2011 * pending and its timer was modified.
2012 *
e0aecdd8 2013 * This function is safe to call from any context including IRQ handler.
8376fe22
TH
2014 * See try_to_grab_pending() for details.
2015 */
2016bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
2017 struct delayed_work *dwork, unsigned long delay)
2018{
2019 unsigned long flags;
2020 int ret;
c7fc77f7 2021
8376fe22
TH
2022 do {
2023 ret = try_to_grab_pending(&dwork->work, true, &flags);
2024 } while (unlikely(ret == -EAGAIN));
63bc0362 2025
8376fe22
TH
2026 if (likely(ret >= 0)) {
2027 __queue_delayed_work(cpu, wq, dwork, delay);
2028 local_irq_restore(flags);
7a6bc1cd 2029 }
8376fe22
TH
2030
2031 /* -ENOENT from try_to_grab_pending() becomes %true */
7a6bc1cd
VP
2032 return ret;
2033}
8376fe22
TH
2034EXPORT_SYMBOL_GPL(mod_delayed_work_on);
2035
05f0fe6b
TH
2036static void rcu_work_rcufn(struct rcu_head *rcu)
2037{
2038 struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
2039
2040 /* read the comment in __queue_work() */
2041 local_irq_disable();
2042 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
2043 local_irq_enable();
2044}
2045
2046/**
2047 * queue_rcu_work - queue work after a RCU grace period
2048 * @wq: workqueue to use
2049 * @rwork: work to queue
2050 *
2051 * Return: %false if @rwork was already pending, %true otherwise. Note
2052 * that a full RCU grace period is guaranteed only after a %true return.
bf393fd4 2053 * While @rwork is guaranteed to be executed after a %false return, the
05f0fe6b
TH
2054 * execution may happen before a full RCU grace period has passed.
2055 */
2056bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
2057{
2058 struct work_struct *work = &rwork->work;
2059
2060 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2061 rwork->wq = wq;
a7e30c0e 2062 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
05f0fe6b
TH
2063 return true;
2064 }
2065
2066 return false;
2067}
2068EXPORT_SYMBOL(queue_rcu_work);
2069
f7537df5 2070static struct worker *alloc_worker(int node)
c34056a3
TH
2071{
2072 struct worker *worker;
2073
f7537df5 2074 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
c8e55f36
TH
2075 if (worker) {
2076 INIT_LIST_HEAD(&worker->entry);
affee4b2 2077 INIT_LIST_HEAD(&worker->scheduled);
da028469 2078 INIT_LIST_HEAD(&worker->node);
e22bee78
TH
2079 /* on creation a worker is in !idle && prep state */
2080 worker->flags = WORKER_PREP;
c8e55f36 2081 }
c34056a3
TH
2082 return worker;
2083}
2084
9546b29e
TH
2085static cpumask_t *pool_allowed_cpus(struct worker_pool *pool)
2086{
8639eceb
TH
2087 if (pool->cpu < 0 && pool->attrs->affn_strict)
2088 return pool->attrs->__pod_cpumask;
2089 else
2090 return pool->attrs->cpumask;
9546b29e
TH
2091}
2092
4736cbf7
LJ
2093/**
2094 * worker_attach_to_pool() - attach a worker to a pool
2095 * @worker: worker to be attached
2096 * @pool: the target pool
2097 *
2098 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and
2099 * cpu-binding of @worker are kept coordinated with the pool across
2100 * cpu-[un]hotplugs.
2101 */
2102static void worker_attach_to_pool(struct worker *worker,
2103 struct worker_pool *pool)
2104{
1258fae7 2105 mutex_lock(&wq_pool_attach_mutex);
4736cbf7 2106
4736cbf7 2107 /*
1258fae7
TH
2108 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
2109 * stable across this function. See the comments above the flag
2110 * definition for details.
4736cbf7
LJ
2111 */
2112 if (pool->flags & POOL_DISASSOCIATED)
2113 worker->flags |= WORKER_UNBOUND;
5c25b5ff
PZ
2114 else
2115 kthread_set_per_cpu(worker->task, pool->cpu);
4736cbf7 2116
640f17c8 2117 if (worker->rescue_wq)
9546b29e 2118 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
640f17c8 2119
4736cbf7 2120 list_add_tail(&worker->node, &pool->workers);
a2d812a2 2121 worker->pool = pool;
4736cbf7 2122
1258fae7 2123 mutex_unlock(&wq_pool_attach_mutex);
4736cbf7
LJ
2124}
2125
60f5a4bc
LJ
2126/**
2127 * worker_detach_from_pool() - detach a worker from its pool
2128 * @worker: worker which is attached to its pool
60f5a4bc 2129 *
4736cbf7
LJ
2130 * Undo the attaching which had been done in worker_attach_to_pool(). The
2131 * caller worker shouldn't access to the pool after detached except it has
2132 * other reference to the pool.
60f5a4bc 2133 */
a2d812a2 2134static void worker_detach_from_pool(struct worker *worker)
60f5a4bc 2135{
a2d812a2 2136 struct worker_pool *pool = worker->pool;
60f5a4bc
LJ
2137 struct completion *detach_completion = NULL;
2138
1258fae7 2139 mutex_lock(&wq_pool_attach_mutex);
a2d812a2 2140
5c25b5ff 2141 kthread_set_per_cpu(worker->task, -1);
da028469 2142 list_del(&worker->node);
a2d812a2
TH
2143 worker->pool = NULL;
2144
e02b9312 2145 if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
60f5a4bc 2146 detach_completion = pool->detach_completion;
1258fae7 2147 mutex_unlock(&wq_pool_attach_mutex);
60f5a4bc 2148
b62c0751
LJ
2149 /* clear leftover flags without pool->lock after it is detached */
2150 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
2151
60f5a4bc
LJ
2152 if (detach_completion)
2153 complete(detach_completion);
2154}
2155
c34056a3
TH
2156/**
2157 * create_worker - create a new workqueue worker
63d95a91 2158 * @pool: pool the new worker will belong to
c34056a3 2159 *
051e1850 2160 * Create and start a new worker which is attached to @pool.
c34056a3
TH
2161 *
2162 * CONTEXT:
2163 * Might sleep. Does GFP_KERNEL allocations.
2164 *
d185af30 2165 * Return:
c34056a3
TH
2166 * Pointer to the newly created worker.
2167 */
bc2ae0f5 2168static struct worker *create_worker(struct worker_pool *pool)
c34056a3 2169{
e441b56f
ZL
2170 struct worker *worker;
2171 int id;
5d9c7a1e 2172 char id_buf[23];
c34056a3 2173
7cda9aae 2174 /* ID is needed to determine kthread name */
e441b56f 2175 id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
3f0ea0b8
PM
2176 if (id < 0) {
2177 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n",
2178 ERR_PTR(id));
e441b56f 2179 return NULL;
3f0ea0b8 2180 }
c34056a3 2181
f7537df5 2182 worker = alloc_worker(pool->node);
3f0ea0b8
PM
2183 if (!worker) {
2184 pr_err_once("workqueue: Failed to allocate a worker\n");
c34056a3 2185 goto fail;
3f0ea0b8 2186 }
c34056a3 2187
c34056a3
TH
2188 worker->id = id;
2189
29c91e99 2190 if (pool->cpu >= 0)
e3c916a4
TH
2191 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
2192 pool->attrs->nice < 0 ? "H" : "");
f3421797 2193 else
e3c916a4
TH
2194 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
2195
f3f90ad4 2196 worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
e3c916a4 2197 "kworker/%s", id_buf);
3f0ea0b8 2198 if (IS_ERR(worker->task)) {
60f54038
PM
2199 if (PTR_ERR(worker->task) == -EINTR) {
2200 pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
2201 id_buf);
2202 } else {
2203 pr_err_once("workqueue: Failed to create a worker thread: %pe",
2204 worker->task);
2205 }
c34056a3 2206 goto fail;
3f0ea0b8 2207 }
c34056a3 2208
91151228 2209 set_user_nice(worker->task, pool->attrs->nice);
9546b29e 2210 kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
91151228 2211
da028469 2212 /* successful, attach the worker to the pool */
4736cbf7 2213 worker_attach_to_pool(worker, pool);
822d8405 2214
051e1850 2215 /* start the newly created worker */
a9b8a985 2216 raw_spin_lock_irq(&pool->lock);
0219a352 2217
051e1850
LJ
2218 worker->pool->nr_workers++;
2219 worker_enter_idle(worker);
0219a352
TH
2220 kick_pool(pool);
2221
2222 /*
2223 * @worker is waiting on a completion in kthread() and will trigger hung
2224 * check if not woken up soon. As kick_pool() might not have waken it
2225 * up, wake it up explicitly once more.
2226 */
051e1850 2227 wake_up_process(worker->task);
0219a352 2228
a9b8a985 2229 raw_spin_unlock_irq(&pool->lock);
051e1850 2230
c34056a3 2231 return worker;
822d8405 2232
c34056a3 2233fail:
e441b56f 2234 ida_free(&pool->worker_ida, id);
c34056a3
TH
2235 kfree(worker);
2236 return NULL;
2237}
2238
793777bc
VS
2239static void unbind_worker(struct worker *worker)
2240{
2241 lockdep_assert_held(&wq_pool_attach_mutex);
2242
2243 kthread_set_per_cpu(worker->task, -1);
2244 if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
2245 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
2246 else
2247 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
2248}
2249
e02b9312
VS
2250static void wake_dying_workers(struct list_head *cull_list)
2251{
2252 struct worker *worker, *tmp;
2253
2254 list_for_each_entry_safe(worker, tmp, cull_list, entry) {
2255 list_del_init(&worker->entry);
2256 unbind_worker(worker);
2257 /*
2258 * If the worker was somehow already running, then it had to be
2259 * in pool->idle_list when set_worker_dying() happened or we
2260 * wouldn't have gotten here.
2261 *
2262 * Thus, the worker must either have observed the WORKER_DIE
2263 * flag, or have set its state to TASK_IDLE. Either way, the
2264 * below will be observed by the worker and is safe to do
2265 * outside of pool->lock.
2266 */
2267 wake_up_process(worker->task);
2268 }
2269}
2270
c34056a3 2271/**
e02b9312 2272 * set_worker_dying - Tag a worker for destruction
c34056a3 2273 * @worker: worker to be destroyed
e02b9312 2274 * @list: transfer worker away from its pool->idle_list and into list
c34056a3 2275 *
e02b9312
VS
2276 * Tag @worker for destruction and adjust @pool stats accordingly. The worker
2277 * should be idle.
c8e55f36
TH
2278 *
2279 * CONTEXT:
a9b8a985 2280 * raw_spin_lock_irq(pool->lock).
c34056a3 2281 */
e02b9312 2282static void set_worker_dying(struct worker *worker, struct list_head *list)
c34056a3 2283{
bd7bdd43 2284 struct worker_pool *pool = worker->pool;
c34056a3 2285
cd549687 2286 lockdep_assert_held(&pool->lock);
e02b9312 2287 lockdep_assert_held(&wq_pool_attach_mutex);
cd549687 2288
c34056a3 2289 /* sanity check frenzy */
6183c009 2290 if (WARN_ON(worker->current_work) ||
73eb7fe7
LJ
2291 WARN_ON(!list_empty(&worker->scheduled)) ||
2292 WARN_ON(!(worker->flags & WORKER_IDLE)))
6183c009 2293 return;
c34056a3 2294
73eb7fe7
LJ
2295 pool->nr_workers--;
2296 pool->nr_idle--;
5bdfff96 2297
cb444766 2298 worker->flags |= WORKER_DIE;
e02b9312
VS
2299
2300 list_move(&worker->entry, list);
2301 list_move(&worker->node, &pool->dying_workers);
c34056a3
TH
2302}
2303
3f959aa3
VS
2304/**
2305 * idle_worker_timeout - check if some idle workers can now be deleted.
2306 * @t: The pool's idle_timer that just expired
2307 *
2308 * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
2309 * worker_leave_idle(), as a worker flicking between idle and active while its
2310 * pool is at the too_many_workers() tipping point would cause too much timer
2311 * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
2312 * it expire and re-evaluate things from there.
2313 */
32a6c723 2314static void idle_worker_timeout(struct timer_list *t)
e22bee78 2315{
32a6c723 2316 struct worker_pool *pool = from_timer(pool, t, idle_timer);
3f959aa3
VS
2317 bool do_cull = false;
2318
2319 if (work_pending(&pool->idle_cull_work))
2320 return;
e22bee78 2321
a9b8a985 2322 raw_spin_lock_irq(&pool->lock);
e22bee78 2323
3f959aa3 2324 if (too_many_workers(pool)) {
e22bee78
TH
2325 struct worker *worker;
2326 unsigned long expires;
2327
2328 /* idle_list is kept in LIFO order, check the last one */
3f959aa3
VS
2329 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2330 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2331 do_cull = !time_before(jiffies, expires);
2332
2333 if (!do_cull)
2334 mod_timer(&pool->idle_timer, expires);
2335 }
2336 raw_spin_unlock_irq(&pool->lock);
2337
2338 if (do_cull)
2339 queue_work(system_unbound_wq, &pool->idle_cull_work);
2340}
2341
2342/**
2343 * idle_cull_fn - cull workers that have been idle for too long.
2344 * @work: the pool's work for handling these idle workers
2345 *
2346 * This goes through a pool's idle workers and gets rid of those that have been
2347 * idle for at least IDLE_WORKER_TIMEOUT seconds.
e02b9312
VS
2348 *
2349 * We don't want to disturb isolated CPUs because of a pcpu kworker being
2350 * culled, so this also resets worker affinity. This requires a sleepable
2351 * context, hence the split between timer callback and work item.
3f959aa3
VS
2352 */
2353static void idle_cull_fn(struct work_struct *work)
2354{
2355 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
9680540c 2356 LIST_HEAD(cull_list);
3f959aa3 2357
e02b9312
VS
2358 /*
2359 * Grabbing wq_pool_attach_mutex here ensures an already-running worker
2360 * cannot proceed beyong worker_detach_from_pool() in its self-destruct
2361 * path. This is required as a previously-preempted worker could run after
2362 * set_worker_dying() has happened but before wake_dying_workers() did.
2363 */
2364 mutex_lock(&wq_pool_attach_mutex);
3f959aa3
VS
2365 raw_spin_lock_irq(&pool->lock);
2366
2367 while (too_many_workers(pool)) {
2368 struct worker *worker;
2369 unsigned long expires;
2370
63d95a91 2371 worker = list_entry(pool->idle_list.prev, struct worker, entry);
e22bee78
TH
2372 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2373
3347fc9f 2374 if (time_before(jiffies, expires)) {
63d95a91 2375 mod_timer(&pool->idle_timer, expires);
3347fc9f 2376 break;
d5abe669 2377 }
3347fc9f 2378
e02b9312 2379 set_worker_dying(worker, &cull_list);
e22bee78
TH
2380 }
2381
a9b8a985 2382 raw_spin_unlock_irq(&pool->lock);
e02b9312
VS
2383 wake_dying_workers(&cull_list);
2384 mutex_unlock(&wq_pool_attach_mutex);
e22bee78 2385}
d5abe669 2386
493a1724 2387static void send_mayday(struct work_struct *work)
e22bee78 2388{
112202d9
TH
2389 struct pool_workqueue *pwq = get_work_pwq(work);
2390 struct workqueue_struct *wq = pwq->wq;
493a1724 2391
2e109a28 2392 lockdep_assert_held(&wq_mayday_lock);
e22bee78 2393
493008a8 2394 if (!wq->rescuer)
493a1724 2395 return;
e22bee78
TH
2396
2397 /* mayday mayday mayday */
493a1724 2398 if (list_empty(&pwq->mayday_node)) {
77668c8b
LJ
2399 /*
2400 * If @pwq is for an unbound wq, its base ref may be put at
2401 * any time due to an attribute change. Pin @pwq until the
2402 * rescuer is done with it.
2403 */
2404 get_pwq(pwq);
493a1724 2405 list_add_tail(&pwq->mayday_node, &wq->maydays);
e22bee78 2406 wake_up_process(wq->rescuer->task);
725e8ec5 2407 pwq->stats[PWQ_STAT_MAYDAY]++;
493a1724 2408 }
e22bee78
TH
2409}
2410
32a6c723 2411static void pool_mayday_timeout(struct timer_list *t)
e22bee78 2412{
32a6c723 2413 struct worker_pool *pool = from_timer(pool, t, mayday_timer);
e22bee78
TH
2414 struct work_struct *work;
2415
a9b8a985
SAS
2416 raw_spin_lock_irq(&pool->lock);
2417 raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
e22bee78 2418
63d95a91 2419 if (need_to_create_worker(pool)) {
e22bee78
TH
2420 /*
2421 * We've been trying to create a new worker but
2422 * haven't been successful. We might be hitting an
2423 * allocation deadlock. Send distress signals to
2424 * rescuers.
2425 */
63d95a91 2426 list_for_each_entry(work, &pool->worklist, entry)
e22bee78 2427 send_mayday(work);
1da177e4 2428 }
e22bee78 2429
a9b8a985
SAS
2430 raw_spin_unlock(&wq_mayday_lock);
2431 raw_spin_unlock_irq(&pool->lock);
e22bee78 2432
63d95a91 2433 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1da177e4
LT
2434}
2435
e22bee78
TH
2436/**
2437 * maybe_create_worker - create a new worker if necessary
63d95a91 2438 * @pool: pool to create a new worker for
e22bee78 2439 *
63d95a91 2440 * Create a new worker for @pool if necessary. @pool is guaranteed to
e22bee78
TH
2441 * have at least one idle worker on return from this function. If
2442 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
63d95a91 2443 * sent to all rescuers with works scheduled on @pool to resolve
e22bee78
TH
2444 * possible allocation deadlock.
2445 *
c5aa87bb
TH
2446 * On return, need_to_create_worker() is guaranteed to be %false and
2447 * may_start_working() %true.
e22bee78
TH
2448 *
2449 * LOCKING:
a9b8a985 2450 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
e22bee78
TH
2451 * multiple times. Does GFP_KERNEL allocations. Called only from
2452 * manager.
e22bee78 2453 */
29187a9e 2454static void maybe_create_worker(struct worker_pool *pool)
d565ed63
TH
2455__releases(&pool->lock)
2456__acquires(&pool->lock)
1da177e4 2457{
e22bee78 2458restart:
a9b8a985 2459 raw_spin_unlock_irq(&pool->lock);
9f9c2364 2460
e22bee78 2461 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
63d95a91 2462 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
e22bee78
TH
2463
2464 while (true) {
051e1850 2465 if (create_worker(pool) || !need_to_create_worker(pool))
e22bee78 2466 break;
1da177e4 2467
e212f361 2468 schedule_timeout_interruptible(CREATE_COOLDOWN);
9f9c2364 2469
63d95a91 2470 if (!need_to_create_worker(pool))
e22bee78
TH
2471 break;
2472 }
2473
63d95a91 2474 del_timer_sync(&pool->mayday_timer);
a9b8a985 2475 raw_spin_lock_irq(&pool->lock);
051e1850
LJ
2476 /*
2477 * This is necessary even after a new worker was just successfully
2478 * created as @pool->lock was dropped and the new worker might have
2479 * already become busy.
2480 */
63d95a91 2481 if (need_to_create_worker(pool))
e22bee78 2482 goto restart;
e22bee78
TH
2483}
2484
73f53c4a 2485/**
e22bee78
TH
2486 * manage_workers - manage worker pool
2487 * @worker: self
73f53c4a 2488 *
706026c2 2489 * Assume the manager role and manage the worker pool @worker belongs
e22bee78 2490 * to. At any given time, there can be only zero or one manager per
706026c2 2491 * pool. The exclusion is handled automatically by this function.
e22bee78
TH
2492 *
2493 * The caller can safely start processing works on false return. On
2494 * true return, it's guaranteed that need_to_create_worker() is false
2495 * and may_start_working() is true.
73f53c4a
TH
2496 *
2497 * CONTEXT:
a9b8a985 2498 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
e22bee78
TH
2499 * multiple times. Does GFP_KERNEL allocations.
2500 *
d185af30 2501 * Return:
29187a9e
TH
2502 * %false if the pool doesn't need management and the caller can safely
2503 * start processing works, %true if management function was performed and
2504 * the conditions that the caller verified before calling the function may
2505 * no longer be true.
73f53c4a 2506 */
e22bee78 2507static bool manage_workers(struct worker *worker)
73f53c4a 2508{
63d95a91 2509 struct worker_pool *pool = worker->pool;
73f53c4a 2510
692b4825 2511 if (pool->flags & POOL_MANAGER_ACTIVE)
29187a9e 2512 return false;
692b4825
TH
2513
2514 pool->flags |= POOL_MANAGER_ACTIVE;
2607d7a6 2515 pool->manager = worker;
1e19ffc6 2516
29187a9e 2517 maybe_create_worker(pool);
e22bee78 2518
2607d7a6 2519 pool->manager = NULL;
692b4825 2520 pool->flags &= ~POOL_MANAGER_ACTIVE;
d8bb65ab 2521 rcuwait_wake_up(&manager_wait);
29187a9e 2522 return true;
73f53c4a
TH
2523}
2524
a62428c0
TH
2525/**
2526 * process_one_work - process single work
c34056a3 2527 * @worker: self
a62428c0
TH
2528 * @work: work to process
2529 *
2530 * Process @work. This function contains all the logics necessary to
2531 * process a single work including synchronization against and
2532 * interaction with other workers on the same cpu, queueing and
2533 * flushing. As long as context requirement is met, any worker can
2534 * call this function to process a work.
2535 *
2536 * CONTEXT:
a9b8a985 2537 * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
a62428c0 2538 */
c34056a3 2539static void process_one_work(struct worker *worker, struct work_struct *work)
d565ed63
TH
2540__releases(&pool->lock)
2541__acquires(&pool->lock)
a62428c0 2542{
112202d9 2543 struct pool_workqueue *pwq = get_work_pwq(work);
bd7bdd43 2544 struct worker_pool *pool = worker->pool;
c4560c2c 2545 unsigned long work_data;
a62428c0
TH
2546#ifdef CONFIG_LOCKDEP
2547 /*
2548 * It is permissible to free the struct work_struct from
2549 * inside the function that is called from it, this we need to
2550 * take into account for lockdep too. To avoid bogus "held
2551 * lock freed" warnings as well as problems when looking into
2552 * work->lockdep_map, make a copy and use that here.
2553 */
4d82a1de
PZ
2554 struct lockdep_map lockdep_map;
2555
2556 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
a62428c0 2557#endif
807407c0 2558 /* ensure we're on the correct CPU */
85327af6 2559 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
ec22ca5e 2560 raw_smp_processor_id() != pool->cpu);
25511a47 2561
8930caba 2562 /* claim and dequeue */
a62428c0 2563 debug_work_deactivate(work);
c9e7cf27 2564 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
c34056a3 2565 worker->current_work = work;
a2c1c57b 2566 worker->current_func = work->func;
112202d9 2567 worker->current_pwq = pwq;
616db877 2568 worker->current_at = worker->task->se.sum_exec_runtime;
c4560c2c 2569 work_data = *work_data_bits(work);
d812796e 2570 worker->current_color = get_work_color(work_data);
7a22ad75 2571
8bf89593
TH
2572 /*
2573 * Record wq name for cmdline and debug reporting, may get
2574 * overridden through set_worker_desc().
2575 */
2576 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2577
a62428c0
TH
2578 list_del_init(&work->entry);
2579
fb0e7beb 2580 /*
228f1d00
LJ
2581 * CPU intensive works don't participate in concurrency management.
2582 * They're the scheduler's responsibility. This takes @worker out
2583 * of concurrency management and the next code block will chain
2584 * execution of the pending work items.
fb0e7beb 2585 */
616db877 2586 if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE))
228f1d00 2587 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
fb0e7beb 2588
974271c4 2589 /*
0219a352
TH
2590 * Kick @pool if necessary. It's always noop for per-cpu worker pools
2591 * since nr_running would always be >= 1 at this point. This is used to
2592 * chain execution of the pending work items for WORKER_NOT_RUNNING
2593 * workers such as the UNBOUND and CPU_INTENSIVE ones.
974271c4 2594 */
0219a352 2595 kick_pool(pool);
974271c4 2596
8930caba 2597 /*
7c3eed5c 2598 * Record the last pool and clear PENDING which should be the last
d565ed63 2599 * update to @work. Also, do this inside @pool->lock so that
23657bb1
TH
2600 * PENDING and queued state changes happen together while IRQ is
2601 * disabled.
8930caba 2602 */
7c3eed5c 2603 set_work_pool_and_clear_pending(work, pool->id);
a62428c0 2604
fe48ba7d 2605 pwq->stats[PWQ_STAT_STARTED]++;
a9b8a985 2606 raw_spin_unlock_irq(&pool->lock);
a62428c0 2607
a1d14934 2608 lock_map_acquire(&pwq->wq->lockdep_map);
a62428c0 2609 lock_map_acquire(&lockdep_map);
e6f3faa7 2610 /*
f52be570
PZ
2611 * Strictly speaking we should mark the invariant state without holding
2612 * any locks, that is, before these two lock_map_acquire()'s.
e6f3faa7
PZ
2613 *
2614 * However, that would result in:
2615 *
2616 * A(W1)
2617 * WFC(C)
2618 * A(W1)
2619 * C(C)
2620 *
2621 * Which would create W1->C->W1 dependencies, even though there is no
2622 * actual deadlock possible. There are two solutions, using a
2623 * read-recursive acquire on the work(queue) 'locks', but this will then
f52be570 2624 * hit the lockdep limitation on recursive locks, or simply discard
e6f3faa7
PZ
2625 * these locks.
2626 *
2627 * AFAICT there is no possible deadlock scenario between the
2628 * flush_work() and complete() primitives (except for single-threaded
2629 * workqueues), so hiding them isn't a problem.
2630 */
f52be570 2631 lockdep_invariant_state(true);
e36c886a 2632 trace_workqueue_execute_start(work);
a2c1c57b 2633 worker->current_func(work);
e36c886a
AV
2634 /*
2635 * While we must be careful to not use "work" after this, the trace
2636 * point will only record its address.
2637 */
1c5da0ec 2638 trace_workqueue_execute_end(work, worker->current_func);
725e8ec5 2639 pwq->stats[PWQ_STAT_COMPLETED]++;
a62428c0 2640 lock_map_release(&lockdep_map);
112202d9 2641 lock_map_release(&pwq->wq->lockdep_map);
a62428c0
TH
2642
2643 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
044c782c 2644 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
d75f773c 2645 " last function: %ps\n",
a2c1c57b
TH
2646 current->comm, preempt_count(), task_pid_nr(current),
2647 worker->current_func);
a62428c0
TH
2648 debug_show_held_locks(current);
2649 dump_stack();
2650 }
2651
b22ce278 2652 /*
025f50f3 2653 * The following prevents a kworker from hogging CPU on !PREEMPTION
b22ce278
TH
2654 * kernels, where a requeueing work item waiting for something to
2655 * happen could deadlock with stop_machine as such work item could
2656 * indefinitely requeue itself while all other CPUs are trapped in
789cbbec
JL
2657 * stop_machine. At the same time, report a quiescent RCU state so
2658 * the same condition doesn't freeze RCU.
b22ce278 2659 */
a7e6425e 2660 cond_resched();
b22ce278 2661
a9b8a985 2662 raw_spin_lock_irq(&pool->lock);
a62428c0 2663
616db877
TH
2664 /*
2665 * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked
2666 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than
2667 * wq_cpu_intensive_thresh_us. Clear it.
2668 */
2669 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
fb0e7beb 2670
1b69ac6b
JW
2671 /* tag the worker for identification in schedule() */
2672 worker->last_func = worker->current_func;
2673
a62428c0 2674 /* we're done with it, release */
42f8570f 2675 hash_del(&worker->hentry);
c34056a3 2676 worker->current_work = NULL;
a2c1c57b 2677 worker->current_func = NULL;
112202d9 2678 worker->current_pwq = NULL;
d812796e 2679 worker->current_color = INT_MAX;
c4560c2c 2680 pwq_dec_nr_in_flight(pwq, work_data);
a62428c0
TH
2681}
2682
affee4b2
TH
2683/**
2684 * process_scheduled_works - process scheduled works
2685 * @worker: self
2686 *
2687 * Process all scheduled works. Please note that the scheduled list
2688 * may change while processing a work, so this function repeatedly
2689 * fetches a work from the top and executes it.
2690 *
2691 * CONTEXT:
a9b8a985 2692 * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
affee4b2
TH
2693 * multiple times.
2694 */
2695static void process_scheduled_works(struct worker *worker)
1da177e4 2696{
c0ab017d
TH
2697 struct work_struct *work;
2698 bool first = true;
2699
2700 while ((work = list_first_entry_or_null(&worker->scheduled,
2701 struct work_struct, entry))) {
2702 if (first) {
2703 worker->pool->watchdog_ts = jiffies;
2704 first = false;
2705 }
c34056a3 2706 process_one_work(worker, work);
1da177e4 2707 }
1da177e4
LT
2708}
2709
197f6acc
TH
2710static void set_pf_worker(bool val)
2711{
2712 mutex_lock(&wq_pool_attach_mutex);
2713 if (val)
2714 current->flags |= PF_WQ_WORKER;
2715 else
2716 current->flags &= ~PF_WQ_WORKER;
2717 mutex_unlock(&wq_pool_attach_mutex);
2718}
2719
4690c4ab
TH
2720/**
2721 * worker_thread - the worker thread function
c34056a3 2722 * @__worker: self
4690c4ab 2723 *
c5aa87bb
TH
2724 * The worker thread function. All workers belong to a worker_pool -
2725 * either a per-cpu one or dynamic unbound one. These workers process all
2726 * work items regardless of their specific target workqueue. The only
2727 * exception is work items which belong to workqueues with a rescuer which
2728 * will be explained in rescuer_thread().
d185af30
YB
2729 *
2730 * Return: 0
4690c4ab 2731 */
c34056a3 2732static int worker_thread(void *__worker)
1da177e4 2733{
c34056a3 2734 struct worker *worker = __worker;
bd7bdd43 2735 struct worker_pool *pool = worker->pool;
1da177e4 2736
e22bee78 2737 /* tell the scheduler that this is a workqueue worker */
197f6acc 2738 set_pf_worker(true);
c8e55f36 2739woke_up:
a9b8a985 2740 raw_spin_lock_irq(&pool->lock);
1da177e4 2741
a9ab775b
TH
2742 /* am I supposed to die? */
2743 if (unlikely(worker->flags & WORKER_DIE)) {
a9b8a985 2744 raw_spin_unlock_irq(&pool->lock);
197f6acc 2745 set_pf_worker(false);
60f5a4bc
LJ
2746
2747 set_task_comm(worker->task, "kworker/dying");
e441b56f 2748 ida_free(&pool->worker_ida, worker->id);
a2d812a2 2749 worker_detach_from_pool(worker);
e02b9312 2750 WARN_ON_ONCE(!list_empty(&worker->entry));
60f5a4bc 2751 kfree(worker);
a9ab775b 2752 return 0;
c8e55f36 2753 }
affee4b2 2754
c8e55f36 2755 worker_leave_idle(worker);
db7bccf4 2756recheck:
e22bee78 2757 /* no more worker necessary? */
63d95a91 2758 if (!need_more_worker(pool))
e22bee78
TH
2759 goto sleep;
2760
2761 /* do we need to manage? */
63d95a91 2762 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
e22bee78
TH
2763 goto recheck;
2764
c8e55f36
TH
2765 /*
2766 * ->scheduled list can only be filled while a worker is
2767 * preparing to process a work or actually processing it.
2768 * Make sure nobody diddled with it while I was sleeping.
2769 */
6183c009 2770 WARN_ON_ONCE(!list_empty(&worker->scheduled));
c8e55f36 2771
e22bee78 2772 /*
a9ab775b
TH
2773 * Finish PREP stage. We're guaranteed to have at least one idle
2774 * worker or that someone else has already assumed the manager
2775 * role. This is where @worker starts participating in concurrency
2776 * management if applicable and concurrency management is restored
2777 * after being rebound. See rebind_workers() for details.
e22bee78 2778 */
a9ab775b 2779 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
e22bee78
TH
2780
2781 do {
c8e55f36 2782 struct work_struct *work =
bd7bdd43 2783 list_first_entry(&pool->worklist,
c8e55f36
TH
2784 struct work_struct, entry);
2785
873eaca6
TH
2786 if (assign_work(work, worker, NULL))
2787 process_scheduled_works(worker);
63d95a91 2788 } while (keep_working(pool));
e22bee78 2789
228f1d00 2790 worker_set_flags(worker, WORKER_PREP);
d313dd85 2791sleep:
c8e55f36 2792 /*
d565ed63
TH
2793 * pool->lock is held and there's no work to process and no need to
2794 * manage, sleep. Workers are woken up only while holding
2795 * pool->lock or from local cpu, so setting the current state
2796 * before releasing pool->lock is enough to prevent losing any
2797 * event.
c8e55f36
TH
2798 */
2799 worker_enter_idle(worker);
c5a94a61 2800 __set_current_state(TASK_IDLE);
a9b8a985 2801 raw_spin_unlock_irq(&pool->lock);
c8e55f36
TH
2802 schedule();
2803 goto woke_up;
1da177e4
LT
2804}
2805
e22bee78
TH
2806/**
2807 * rescuer_thread - the rescuer thread function
111c225a 2808 * @__rescuer: self
e22bee78
TH
2809 *
2810 * Workqueue rescuer thread function. There's one rescuer for each
493008a8 2811 * workqueue which has WQ_MEM_RECLAIM set.
e22bee78 2812 *
706026c2 2813 * Regular work processing on a pool may block trying to create a new
e22bee78
TH
2814 * worker which uses GFP_KERNEL allocation which has slight chance of
2815 * developing into deadlock if some works currently on the same queue
2816 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2817 * the problem rescuer solves.
2818 *
706026c2
TH
2819 * When such condition is possible, the pool summons rescuers of all
2820 * workqueues which have works queued on the pool and let them process
e22bee78
TH
2821 * those works so that forward progress can be guaranteed.
2822 *
2823 * This should happen rarely.
d185af30
YB
2824 *
2825 * Return: 0
e22bee78 2826 */
111c225a 2827static int rescuer_thread(void *__rescuer)
e22bee78 2828{
111c225a
TH
2829 struct worker *rescuer = __rescuer;
2830 struct workqueue_struct *wq = rescuer->rescue_wq;
4d595b86 2831 bool should_stop;
e22bee78
TH
2832
2833 set_user_nice(current, RESCUER_NICE_LEVEL);
111c225a
TH
2834
2835 /*
2836 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2837 * doesn't participate in concurrency management.
2838 */
197f6acc 2839 set_pf_worker(true);
e22bee78 2840repeat:
c5a94a61 2841 set_current_state(TASK_IDLE);
e22bee78 2842
4d595b86
LJ
2843 /*
2844 * By the time the rescuer is requested to stop, the workqueue
2845 * shouldn't have any work pending, but @wq->maydays may still have
2846 * pwq(s) queued. This can happen by non-rescuer workers consuming
2847 * all the work items before the rescuer got to them. Go through
2848 * @wq->maydays processing before acting on should_stop so that the
2849 * list is always empty on exit.
2850 */
2851 should_stop = kthread_should_stop();
e22bee78 2852
493a1724 2853 /* see whether any pwq is asking for help */
a9b8a985 2854 raw_spin_lock_irq(&wq_mayday_lock);
493a1724
TH
2855
2856 while (!list_empty(&wq->maydays)) {
2857 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2858 struct pool_workqueue, mayday_node);
112202d9 2859 struct worker_pool *pool = pwq->pool;
e22bee78
TH
2860 struct work_struct *work, *n;
2861
2862 __set_current_state(TASK_RUNNING);
493a1724
TH
2863 list_del_init(&pwq->mayday_node);
2864
a9b8a985 2865 raw_spin_unlock_irq(&wq_mayday_lock);
e22bee78 2866
51697d39
LJ
2867 worker_attach_to_pool(rescuer, pool);
2868
a9b8a985 2869 raw_spin_lock_irq(&pool->lock);
e22bee78
TH
2870
2871 /*
2872 * Slurp in all works issued via this workqueue and
2873 * process'em.
2874 */
873eaca6 2875 WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
82607adc 2876 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
873eaca6
TH
2877 if (get_work_pwq(work) == pwq &&
2878 assign_work(work, rescuer, &n))
725e8ec5 2879 pwq->stats[PWQ_STAT_RESCUED]++;
82607adc 2880 }
e22bee78 2881
873eaca6 2882 if (!list_empty(&rescuer->scheduled)) {
008847f6
N
2883 process_scheduled_works(rescuer);
2884
2885 /*
2886 * The above execution of rescued work items could
2887 * have created more to rescue through
f97a4a1a 2888 * pwq_activate_first_inactive() or chained
008847f6
N
2889 * queueing. Let's put @pwq back on mayday list so
2890 * that such back-to-back work items, which may be
2891 * being used to relieve memory pressure, don't
2892 * incur MAYDAY_INTERVAL delay inbetween.
2893 */
4f3f4cf3 2894 if (pwq->nr_active && need_to_create_worker(pool)) {
a9b8a985 2895 raw_spin_lock(&wq_mayday_lock);
e66b39af
TH
2896 /*
2897 * Queue iff we aren't racing destruction
2898 * and somebody else hasn't queued it already.
2899 */
2900 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
2901 get_pwq(pwq);
2902 list_add_tail(&pwq->mayday_node, &wq->maydays);
2903 }
a9b8a985 2904 raw_spin_unlock(&wq_mayday_lock);
008847f6
N
2905 }
2906 }
7576958a 2907
77668c8b
LJ
2908 /*
2909 * Put the reference grabbed by send_mayday(). @pool won't
13b1d625 2910 * go away while we're still attached to it.
77668c8b
LJ
2911 */
2912 put_pwq(pwq);
2913
7576958a 2914 /*
0219a352
TH
2915 * Leave this pool. Notify regular workers; otherwise, we end up
2916 * with 0 concurrency and stalling the execution.
7576958a 2917 */
0219a352 2918 kick_pool(pool);
7576958a 2919
a9b8a985 2920 raw_spin_unlock_irq(&pool->lock);
13b1d625 2921
a2d812a2 2922 worker_detach_from_pool(rescuer);
13b1d625 2923
a9b8a985 2924 raw_spin_lock_irq(&wq_mayday_lock);
e22bee78
TH
2925 }
2926
a9b8a985 2927 raw_spin_unlock_irq(&wq_mayday_lock);
493a1724 2928
4d595b86
LJ
2929 if (should_stop) {
2930 __set_current_state(TASK_RUNNING);
197f6acc 2931 set_pf_worker(false);
4d595b86
LJ
2932 return 0;
2933 }
2934
111c225a
TH
2935 /* rescuers should never participate in concurrency management */
2936 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
e22bee78
TH
2937 schedule();
2938 goto repeat;
1da177e4
LT
2939}
2940
fca839c0
TH
2941/**
2942 * check_flush_dependency - check for flush dependency sanity
2943 * @target_wq: workqueue being flushed
2944 * @target_work: work item being flushed (NULL for workqueue flushes)
2945 *
2946 * %current is trying to flush the whole @target_wq or @target_work on it.
2947 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2948 * reclaiming memory or running on a workqueue which doesn't have
2949 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2950 * a deadlock.
2951 */
2952static void check_flush_dependency(struct workqueue_struct *target_wq,
2953 struct work_struct *target_work)
2954{
2955 work_func_t target_func = target_work ? target_work->func : NULL;
2956 struct worker *worker;
2957
2958 if (target_wq->flags & WQ_MEM_RECLAIM)
2959 return;
2960
2961 worker = current_wq_worker();
2962
2963 WARN_ONCE(current->flags & PF_MEMALLOC,
d75f773c 2964 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
fca839c0 2965 current->pid, current->comm, target_wq->name, target_func);
23d11a58
TH
2966 WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2967 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
d75f773c 2968 "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
fca839c0
TH
2969 worker->current_pwq->wq->name, worker->current_func,
2970 target_wq->name, target_func);
2971}
2972
fc2e4d70
ON
2973struct wq_barrier {
2974 struct work_struct work;
2975 struct completion done;
2607d7a6 2976 struct task_struct *task; /* purely informational */
fc2e4d70
ON
2977};
2978
2979static void wq_barrier_func(struct work_struct *work)
2980{
2981 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2982 complete(&barr->done);
2983}
2984
4690c4ab
TH
2985/**
2986 * insert_wq_barrier - insert a barrier work
112202d9 2987 * @pwq: pwq to insert barrier into
4690c4ab 2988 * @barr: wq_barrier to insert
affee4b2
TH
2989 * @target: target work to attach @barr to
2990 * @worker: worker currently executing @target, NULL if @target is not executing
4690c4ab 2991 *
affee4b2
TH
2992 * @barr is linked to @target such that @barr is completed only after
2993 * @target finishes execution. Please note that the ordering
2994 * guarantee is observed only with respect to @target and on the local
2995 * cpu.
2996 *
2997 * Currently, a queued barrier can't be canceled. This is because
2998 * try_to_grab_pending() can't determine whether the work to be
2999 * grabbed is at the head of the queue and thus can't clear LINKED
3000 * flag of the previous work while there must be a valid next work
3001 * after a work with LINKED flag set.
3002 *
3003 * Note that when @worker is non-NULL, @target may be modified
112202d9 3004 * underneath us, so we can't reliably determine pwq from @target.
4690c4ab
TH
3005 *
3006 * CONTEXT:
a9b8a985 3007 * raw_spin_lock_irq(pool->lock).
4690c4ab 3008 */
112202d9 3009static void insert_wq_barrier(struct pool_workqueue *pwq,
affee4b2
TH
3010 struct wq_barrier *barr,
3011 struct work_struct *target, struct worker *worker)
fc2e4d70 3012{
d812796e
LJ
3013 unsigned int work_flags = 0;
3014 unsigned int work_color;
affee4b2 3015 struct list_head *head;
affee4b2 3016
dc186ad7 3017 /*
d565ed63 3018 * debugobject calls are safe here even with pool->lock locked
dc186ad7
TG
3019 * as we know for sure that this will not trigger any of the
3020 * checks and call back into the fixup functions where we
3021 * might deadlock.
3022 */
ca1cab37 3023 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
22df02bb 3024 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
52fa5bc5 3025
fd1a5b04
BP
3026 init_completion_map(&barr->done, &target->lockdep_map);
3027
2607d7a6 3028 barr->task = current;
83c22520 3029
018f3a13
LJ
3030 /* The barrier work item does not participate in pwq->nr_active. */
3031 work_flags |= WORK_STRUCT_INACTIVE;
3032
affee4b2
TH
3033 /*
3034 * If @target is currently being executed, schedule the
3035 * barrier to the worker; otherwise, put it after @target.
3036 */
d812796e 3037 if (worker) {
affee4b2 3038 head = worker->scheduled.next;
d812796e
LJ
3039 work_color = worker->current_color;
3040 } else {
affee4b2
TH
3041 unsigned long *bits = work_data_bits(target);
3042
3043 head = target->entry.next;
3044 /* there can already be other linked works, inherit and set */
d21cece0 3045 work_flags |= *bits & WORK_STRUCT_LINKED;
d812796e 3046 work_color = get_work_color(*bits);
affee4b2
TH
3047 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
3048 }
3049
d812796e
LJ
3050 pwq->nr_in_flight[work_color]++;
3051 work_flags |= work_color_to_flags(work_color);
3052
d21cece0 3053 insert_work(pwq, &barr->work, head, work_flags);
fc2e4d70
ON
3054}
3055
73f53c4a 3056/**
112202d9 3057 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
73f53c4a
TH
3058 * @wq: workqueue being flushed
3059 * @flush_color: new flush color, < 0 for no-op
3060 * @work_color: new work color, < 0 for no-op
3061 *
112202d9 3062 * Prepare pwqs for workqueue flushing.
73f53c4a 3063 *
112202d9
TH
3064 * If @flush_color is non-negative, flush_color on all pwqs should be
3065 * -1. If no pwq has in-flight commands at the specified color, all
3066 * pwq->flush_color's stay at -1 and %false is returned. If any pwq
3067 * has in flight commands, its pwq->flush_color is set to
3068 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
73f53c4a
TH
3069 * wakeup logic is armed and %true is returned.
3070 *
3071 * The caller should have initialized @wq->first_flusher prior to
3072 * calling this function with non-negative @flush_color. If
3073 * @flush_color is negative, no flush color update is done and %false
3074 * is returned.
3075 *
112202d9 3076 * If @work_color is non-negative, all pwqs should have the same
73f53c4a
TH
3077 * work_color which is previous to @work_color and all will be
3078 * advanced to @work_color.
3079 *
3080 * CONTEXT:
3c25a55d 3081 * mutex_lock(wq->mutex).
73f53c4a 3082 *
d185af30 3083 * Return:
73f53c4a
TH
3084 * %true if @flush_color >= 0 and there's something to flush. %false
3085 * otherwise.
3086 */
112202d9 3087static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
73f53c4a 3088 int flush_color, int work_color)
1da177e4 3089{
73f53c4a 3090 bool wait = false;
49e3cf44 3091 struct pool_workqueue *pwq;
1da177e4 3092
73f53c4a 3093 if (flush_color >= 0) {
6183c009 3094 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
112202d9 3095 atomic_set(&wq->nr_pwqs_to_flush, 1);
1da177e4 3096 }
2355b70f 3097
49e3cf44 3098 for_each_pwq(pwq, wq) {
112202d9 3099 struct worker_pool *pool = pwq->pool;
fc2e4d70 3100
a9b8a985 3101 raw_spin_lock_irq(&pool->lock);
83c22520 3102
73f53c4a 3103 if (flush_color >= 0) {
6183c009 3104 WARN_ON_ONCE(pwq->flush_color != -1);
fc2e4d70 3105
112202d9
TH
3106 if (pwq->nr_in_flight[flush_color]) {
3107 pwq->flush_color = flush_color;
3108 atomic_inc(&wq->nr_pwqs_to_flush);
73f53c4a
TH
3109 wait = true;
3110 }
3111 }
1da177e4 3112
73f53c4a 3113 if (work_color >= 0) {
6183c009 3114 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
112202d9 3115 pwq->work_color = work_color;
73f53c4a 3116 }
1da177e4 3117
a9b8a985 3118 raw_spin_unlock_irq(&pool->lock);
1da177e4 3119 }
2355b70f 3120
112202d9 3121 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
73f53c4a 3122 complete(&wq->first_flusher->done);
14441960 3123
73f53c4a 3124 return wait;
1da177e4
LT
3125}
3126
0fcb78c2 3127/**
c4f135d6 3128 * __flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c2 3129 * @wq: workqueue to flush
1da177e4 3130 *
c5aa87bb
TH
3131 * This function sleeps until all work items which were queued on entry
3132 * have finished execution, but it is not livelocked by new incoming ones.
1da177e4 3133 */
c4f135d6 3134void __flush_workqueue(struct workqueue_struct *wq)
1da177e4 3135{
73f53c4a
TH
3136 struct wq_flusher this_flusher = {
3137 .list = LIST_HEAD_INIT(this_flusher.list),
3138 .flush_color = -1,
fd1a5b04 3139 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
73f53c4a
TH
3140 };
3141 int next_color;
1da177e4 3142
3347fa09
TH
3143 if (WARN_ON(!wq_online))
3144 return;
3145
87915adc
JB
3146 lock_map_acquire(&wq->lockdep_map);
3147 lock_map_release(&wq->lockdep_map);
3148
3c25a55d 3149 mutex_lock(&wq->mutex);
73f53c4a
TH
3150
3151 /*
3152 * Start-to-wait phase
3153 */
3154 next_color = work_next_color(wq->work_color);
3155
3156 if (next_color != wq->flush_color) {
3157 /*
3158 * Color space is not full. The current work_color
3159 * becomes our flush_color and work_color is advanced
3160 * by one.
3161 */
6183c009 3162 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
73f53c4a
TH
3163 this_flusher.flush_color = wq->work_color;
3164 wq->work_color = next_color;
3165
3166 if (!wq->first_flusher) {
3167 /* no flush in progress, become the first flusher */
6183c009 3168 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
73f53c4a
TH
3169
3170 wq->first_flusher = &this_flusher;
3171
112202d9 3172 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
73f53c4a
TH
3173 wq->work_color)) {
3174 /* nothing to flush, done */
3175 wq->flush_color = next_color;
3176 wq->first_flusher = NULL;
3177 goto out_unlock;
3178 }
3179 } else {
3180 /* wait in queue */
6183c009 3181 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
73f53c4a 3182 list_add_tail(&this_flusher.list, &wq->flusher_queue);
112202d9 3183 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
73f53c4a
TH
3184 }
3185 } else {
3186 /*
3187 * Oops, color space is full, wait on overflow queue.
3188 * The next flush completion will assign us
3189 * flush_color and transfer to flusher_queue.
3190 */
3191 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
3192 }
3193
fca839c0
TH
3194 check_flush_dependency(wq, NULL);
3195
3c25a55d 3196 mutex_unlock(&wq->mutex);
73f53c4a
TH
3197
3198 wait_for_completion(&this_flusher.done);
3199
3200 /*
3201 * Wake-up-and-cascade phase
3202 *
3203 * First flushers are responsible for cascading flushes and
3204 * handling overflow. Non-first flushers can simply return.
3205 */
00d5d15b 3206 if (READ_ONCE(wq->first_flusher) != &this_flusher)
73f53c4a
TH
3207 return;
3208
3c25a55d 3209 mutex_lock(&wq->mutex);
73f53c4a 3210
4ce48b37
TH
3211 /* we might have raced, check again with mutex held */
3212 if (wq->first_flusher != &this_flusher)
3213 goto out_unlock;
3214
00d5d15b 3215 WRITE_ONCE(wq->first_flusher, NULL);
73f53c4a 3216
6183c009
TH
3217 WARN_ON_ONCE(!list_empty(&this_flusher.list));
3218 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
73f53c4a
TH
3219
3220 while (true) {
3221 struct wq_flusher *next, *tmp;
3222
3223 /* complete all the flushers sharing the current flush color */
3224 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
3225 if (next->flush_color != wq->flush_color)
3226 break;
3227 list_del_init(&next->list);
3228 complete(&next->done);
3229 }
3230
6183c009
TH
3231 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
3232 wq->flush_color != work_next_color(wq->work_color));
73f53c4a
TH
3233
3234 /* this flush_color is finished, advance by one */
3235 wq->flush_color = work_next_color(wq->flush_color);
3236
3237 /* one color has been freed, handle overflow queue */
3238 if (!list_empty(&wq->flusher_overflow)) {
3239 /*
3240 * Assign the same color to all overflowed
3241 * flushers, advance work_color and append to
3242 * flusher_queue. This is the start-to-wait
3243 * phase for these overflowed flushers.
3244 */
3245 list_for_each_entry(tmp, &wq->flusher_overflow, list)
3246 tmp->flush_color = wq->work_color;
3247
3248 wq->work_color = work_next_color(wq->work_color);
3249
3250 list_splice_tail_init(&wq->flusher_overflow,
3251 &wq->flusher_queue);
112202d9 3252 flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
73f53c4a
TH
3253 }
3254
3255 if (list_empty(&wq->flusher_queue)) {
6183c009 3256 WARN_ON_ONCE(wq->flush_color != wq->work_color);
73f53c4a
TH
3257 break;
3258 }
3259
3260 /*
3261 * Need to flush more colors. Make the next flusher
112202d9 3262 * the new first flusher and arm pwqs.
73f53c4a 3263 */
6183c009
TH
3264 WARN_ON_ONCE(wq->flush_color == wq->work_color);
3265 WARN_ON_ONCE(wq->flush_color != next->flush_color);
73f53c4a
TH
3266
3267 list_del_init(&next->list);
3268 wq->first_flusher = next;
3269
112202d9 3270 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
73f53c4a
TH
3271 break;
3272
3273 /*
3274 * Meh... this color is already done, clear first
3275 * flusher and repeat cascading.
3276 */
3277 wq->first_flusher = NULL;
3278 }
3279
3280out_unlock:
3c25a55d 3281 mutex_unlock(&wq->mutex);
1da177e4 3282}
c4f135d6 3283EXPORT_SYMBOL(__flush_workqueue);
1da177e4 3284
9c5a2ba7
TH
3285/**
3286 * drain_workqueue - drain a workqueue
3287 * @wq: workqueue to drain
3288 *
3289 * Wait until the workqueue becomes empty. While draining is in progress,
3290 * only chain queueing is allowed. IOW, only currently pending or running
3291 * work items on @wq can queue further work items on it. @wq is flushed
b749b1b6 3292 * repeatedly until it becomes empty. The number of flushing is determined
9c5a2ba7
TH
3293 * by the depth of chaining and should be relatively short. Whine if it
3294 * takes too long.
3295 */
3296void drain_workqueue(struct workqueue_struct *wq)
3297{
3298 unsigned int flush_cnt = 0;
49e3cf44 3299 struct pool_workqueue *pwq;
9c5a2ba7
TH
3300
3301 /*
3302 * __queue_work() needs to test whether there are drainers, is much
3303 * hotter than drain_workqueue() and already looks at @wq->flags.
618b01eb 3304 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
9c5a2ba7 3305 */
87fc741e 3306 mutex_lock(&wq->mutex);
9c5a2ba7 3307 if (!wq->nr_drainers++)
618b01eb 3308 wq->flags |= __WQ_DRAINING;
87fc741e 3309 mutex_unlock(&wq->mutex);
9c5a2ba7 3310reflush:
c4f135d6 3311 __flush_workqueue(wq);
9c5a2ba7 3312
b09f4fd3 3313 mutex_lock(&wq->mutex);
76af4d93 3314
49e3cf44 3315 for_each_pwq(pwq, wq) {
fa2563e4 3316 bool drained;
9c5a2ba7 3317
a9b8a985 3318 raw_spin_lock_irq(&pwq->pool->lock);
f97a4a1a 3319 drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
a9b8a985 3320 raw_spin_unlock_irq(&pwq->pool->lock);
fa2563e4
TT
3321
3322 if (drained)
9c5a2ba7
TH
3323 continue;
3324
3325 if (++flush_cnt == 10 ||
3326 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
e9ad2eb3
SZ
3327 pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
3328 wq->name, __func__, flush_cnt);
76af4d93 3329
b09f4fd3 3330 mutex_unlock(&wq->mutex);
9c5a2ba7
TH
3331 goto reflush;
3332 }
3333
9c5a2ba7 3334 if (!--wq->nr_drainers)
618b01eb 3335 wq->flags &= ~__WQ_DRAINING;
87fc741e 3336 mutex_unlock(&wq->mutex);
9c5a2ba7
TH
3337}
3338EXPORT_SYMBOL_GPL(drain_workqueue);
3339
d6e89786
JB
3340static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
3341 bool from_cancel)
db700897 3342{
affee4b2 3343 struct worker *worker = NULL;
c9e7cf27 3344 struct worker_pool *pool;
112202d9 3345 struct pool_workqueue *pwq;
db700897
ON
3346
3347 might_sleep();
fa1b54e6 3348
24acfb71 3349 rcu_read_lock();
c9e7cf27 3350 pool = get_work_pool(work);
fa1b54e6 3351 if (!pool) {
24acfb71 3352 rcu_read_unlock();
baf59022 3353 return false;
fa1b54e6 3354 }
db700897 3355
a9b8a985 3356 raw_spin_lock_irq(&pool->lock);
0b3dae68 3357 /* see the comment in try_to_grab_pending() with the same code */
112202d9
TH
3358 pwq = get_work_pwq(work);
3359 if (pwq) {
3360 if (unlikely(pwq->pool != pool))
4690c4ab 3361 goto already_gone;
606a5020 3362 } else {
c9e7cf27 3363 worker = find_worker_executing_work(pool, work);
affee4b2 3364 if (!worker)
4690c4ab 3365 goto already_gone;
112202d9 3366 pwq = worker->current_pwq;
606a5020 3367 }
db700897 3368
fca839c0
TH
3369 check_flush_dependency(pwq->wq, work);
3370
112202d9 3371 insert_wq_barrier(pwq, barr, work, worker);
a9b8a985 3372 raw_spin_unlock_irq(&pool->lock);
7a22ad75 3373
e159489b 3374 /*
a1d14934
PZ
3375 * Force a lock recursion deadlock when using flush_work() inside a
3376 * single-threaded or rescuer equipped workqueue.
3377 *
3378 * For single threaded workqueues the deadlock happens when the work
3379 * is after the work issuing the flush_work(). For rescuer equipped
3380 * workqueues the deadlock happens when the rescuer stalls, blocking
3381 * forward progress.
e159489b 3382 */
d6e89786
JB
3383 if (!from_cancel &&
3384 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
112202d9 3385 lock_map_acquire(&pwq->wq->lockdep_map);
a1d14934
PZ
3386 lock_map_release(&pwq->wq->lockdep_map);
3387 }
24acfb71 3388 rcu_read_unlock();
401a8d04 3389 return true;
4690c4ab 3390already_gone:
a9b8a985 3391 raw_spin_unlock_irq(&pool->lock);
24acfb71 3392 rcu_read_unlock();
401a8d04 3393 return false;
db700897 3394}
baf59022 3395
d6e89786
JB
3396static bool __flush_work(struct work_struct *work, bool from_cancel)
3397{
3398 struct wq_barrier barr;
3399
3400 if (WARN_ON(!wq_online))
3401 return false;
3402
4d43d395
TH
3403 if (WARN_ON(!work->func))
3404 return false;
3405
c0feea59
TH
3406 lock_map_acquire(&work->lockdep_map);
3407 lock_map_release(&work->lockdep_map);
87915adc 3408
d6e89786
JB
3409 if (start_flush_work(work, &barr, from_cancel)) {
3410 wait_for_completion(&barr.done);
3411 destroy_work_on_stack(&barr.work);
3412 return true;
3413 } else {
3414 return false;
3415 }
3416}
3417
baf59022
TH
3418/**
3419 * flush_work - wait for a work to finish executing the last queueing instance
3420 * @work: the work to flush
3421 *
606a5020
TH
3422 * Wait until @work has finished execution. @work is guaranteed to be idle
3423 * on return if it hasn't been requeued since flush started.
baf59022 3424 *
d185af30 3425 * Return:
baf59022
TH
3426 * %true if flush_work() waited for the work to finish execution,
3427 * %false if it was already idle.
3428 */
3429bool flush_work(struct work_struct *work)
3430{
d6e89786 3431 return __flush_work(work, false);
6e84d644 3432}
606a5020 3433EXPORT_SYMBOL_GPL(flush_work);
6e84d644 3434
8603e1b3 3435struct cwt_wait {
ac6424b9 3436 wait_queue_entry_t wait;
8603e1b3
TH
3437 struct work_struct *work;
3438};
3439
ac6424b9 3440static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
8603e1b3
TH
3441{
3442 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3443
3444 if (cwait->work != key)
3445 return 0;
3446 return autoremove_wake_function(wait, mode, sync, key);
3447}
3448
36e227d2 3449static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
1f1f642e 3450{
8603e1b3 3451 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
bbb68dfa 3452 unsigned long flags;
1f1f642e
ON
3453 int ret;
3454
3455 do {
bbb68dfa
TH
3456 ret = try_to_grab_pending(work, is_dwork, &flags);
3457 /*
8603e1b3
TH
3458 * If someone else is already canceling, wait for it to
3459 * finish. flush_work() doesn't work for PREEMPT_NONE
3460 * because we may get scheduled between @work's completion
3461 * and the other canceling task resuming and clearing
3462 * CANCELING - flush_work() will return false immediately
3463 * as @work is no longer busy, try_to_grab_pending() will
3464 * return -ENOENT as @work is still being canceled and the
3465 * other canceling task won't be able to clear CANCELING as
3466 * we're hogging the CPU.
3467 *
3468 * Let's wait for completion using a waitqueue. As this
3469 * may lead to the thundering herd problem, use a custom
3470 * wake function which matches @work along with exclusive
3471 * wait and wakeup.
bbb68dfa 3472 */
8603e1b3
TH
3473 if (unlikely(ret == -ENOENT)) {
3474 struct cwt_wait cwait;
3475
3476 init_wait(&cwait.wait);
3477 cwait.wait.func = cwt_wakefn;
3478 cwait.work = work;
3479
3480 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3481 TASK_UNINTERRUPTIBLE);
3482 if (work_is_canceling(work))
3483 schedule();
3484 finish_wait(&cancel_waitq, &cwait.wait);
3485 }
1f1f642e
ON
3486 } while (unlikely(ret < 0));
3487
bbb68dfa
TH
3488 /* tell other tasks trying to grab @work to back off */
3489 mark_work_canceling(work);
3490 local_irq_restore(flags);
3491
3347fa09
TH
3492 /*
3493 * This allows canceling during early boot. We know that @work
3494 * isn't executing.
3495 */
3496 if (wq_online)
d6e89786 3497 __flush_work(work, true);
3347fa09 3498
7a22ad75 3499 clear_work_data(work);
8603e1b3
TH
3500
3501 /*
3502 * Paired with prepare_to_wait() above so that either
3503 * waitqueue_active() is visible here or !work_is_canceling() is
3504 * visible there.
3505 */
3506 smp_mb();
3507 if (waitqueue_active(&cancel_waitq))
3508 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3509
1f1f642e
ON
3510 return ret;
3511}
3512
6e84d644 3513/**
401a8d04
TH
3514 * cancel_work_sync - cancel a work and wait for it to finish
3515 * @work: the work to cancel
6e84d644 3516 *
401a8d04
TH
3517 * Cancel @work and wait for its execution to finish. This function
3518 * can be used even if the work re-queues itself or migrates to
3519 * another workqueue. On return from this function, @work is
3520 * guaranteed to be not pending or executing on any CPU.
1f1f642e 3521 *
401a8d04
TH
3522 * cancel_work_sync(&delayed_work->work) must not be used for
3523 * delayed_work's. Use cancel_delayed_work_sync() instead.
6e84d644 3524 *
401a8d04 3525 * The caller must ensure that the workqueue on which @work was last
6e84d644 3526 * queued can't be destroyed before this function returns.
401a8d04 3527 *
d185af30 3528 * Return:
401a8d04 3529 * %true if @work was pending, %false otherwise.
6e84d644 3530 */
401a8d04 3531bool cancel_work_sync(struct work_struct *work)
6e84d644 3532{
36e227d2 3533 return __cancel_work_timer(work, false);
b89deed3 3534}
28e53bdd 3535EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed3 3536
6e84d644 3537/**
401a8d04
TH
3538 * flush_delayed_work - wait for a dwork to finish executing the last queueing
3539 * @dwork: the delayed work to flush
6e84d644 3540 *
401a8d04
TH
3541 * Delayed timer is cancelled and the pending work is queued for
3542 * immediate execution. Like flush_work(), this function only
3543 * considers the last queueing instance of @dwork.
1f1f642e 3544 *
d185af30 3545 * Return:
401a8d04
TH
3546 * %true if flush_work() waited for the work to finish execution,
3547 * %false if it was already idle.
6e84d644 3548 */
401a8d04
TH
3549bool flush_delayed_work(struct delayed_work *dwork)
3550{
8930caba 3551 local_irq_disable();
401a8d04 3552 if (del_timer_sync(&dwork->timer))
60c057bc 3553 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
8930caba 3554 local_irq_enable();
401a8d04
TH
3555 return flush_work(&dwork->work);
3556}
3557EXPORT_SYMBOL(flush_delayed_work);
3558
05f0fe6b
TH
3559/**
3560 * flush_rcu_work - wait for a rwork to finish executing the last queueing
3561 * @rwork: the rcu work to flush
3562 *
3563 * Return:
3564 * %true if flush_rcu_work() waited for the work to finish execution,
3565 * %false if it was already idle.
3566 */
3567bool flush_rcu_work(struct rcu_work *rwork)
3568{
3569 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3570 rcu_barrier();
3571 flush_work(&rwork->work);
3572 return true;
3573 } else {
3574 return flush_work(&rwork->work);
3575 }
3576}
3577EXPORT_SYMBOL(flush_rcu_work);
3578
f72b8792
JA
3579static bool __cancel_work(struct work_struct *work, bool is_dwork)
3580{
3581 unsigned long flags;
3582 int ret;
3583
3584 do {
3585 ret = try_to_grab_pending(work, is_dwork, &flags);
3586 } while (unlikely(ret == -EAGAIN));
3587
3588 if (unlikely(ret < 0))
3589 return false;
3590
3591 set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3592 local_irq_restore(flags);
3593 return ret;
3594}
3595
73b4b532
AG
3596/*
3597 * See cancel_delayed_work()
3598 */
3599bool cancel_work(struct work_struct *work)
3600{
3601 return __cancel_work(work, false);
3602}
3603EXPORT_SYMBOL(cancel_work);
3604
09383498 3605/**
57b30ae7
TH
3606 * cancel_delayed_work - cancel a delayed work
3607 * @dwork: delayed_work to cancel
09383498 3608 *
d185af30
YB
3609 * Kill off a pending delayed_work.
3610 *
3611 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3612 * pending.
3613 *
3614 * Note:
3615 * The work callback function may still be running on return, unless
3616 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
3617 * use cancel_delayed_work_sync() to wait on it.
09383498 3618 *
57b30ae7 3619 * This function is safe to call from any context including IRQ handler.
09383498 3620 */
57b30ae7 3621bool cancel_delayed_work(struct delayed_work *dwork)
09383498 3622{
f72b8792 3623 return __cancel_work(&dwork->work, true);
09383498 3624}
57b30ae7 3625EXPORT_SYMBOL(cancel_delayed_work);
09383498 3626
401a8d04
TH
3627/**
3628 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3629 * @dwork: the delayed work cancel
3630 *
3631 * This is cancel_work_sync() for delayed works.
3632 *
d185af30 3633 * Return:
401a8d04
TH
3634 * %true if @dwork was pending, %false otherwise.
3635 */
3636bool cancel_delayed_work_sync(struct delayed_work *dwork)
6e84d644 3637{
36e227d2 3638 return __cancel_work_timer(&dwork->work, true);
6e84d644 3639}
f5a421a4 3640EXPORT_SYMBOL(cancel_delayed_work_sync);
1da177e4 3641
b6136773 3642/**
31ddd871 3643 * schedule_on_each_cpu - execute a function synchronously on each online CPU
b6136773 3644 * @func: the function to call
b6136773 3645 *
31ddd871
TH
3646 * schedule_on_each_cpu() executes @func on each online CPU using the
3647 * system workqueue and blocks until all CPUs have completed.
b6136773 3648 * schedule_on_each_cpu() is very slow.
31ddd871 3649 *
d185af30 3650 * Return:
31ddd871 3651 * 0 on success, -errno on failure.
b6136773 3652 */
65f27f38 3653int schedule_on_each_cpu(work_func_t func)
15316ba8
CL
3654{
3655 int cpu;
38f51568 3656 struct work_struct __percpu *works;
15316ba8 3657
b6136773
AM
3658 works = alloc_percpu(struct work_struct);
3659 if (!works)
15316ba8 3660 return -ENOMEM;
b6136773 3661
ffd8bea8 3662 cpus_read_lock();
93981800 3663
15316ba8 3664 for_each_online_cpu(cpu) {
9bfb1839
IM
3665 struct work_struct *work = per_cpu_ptr(works, cpu);
3666
3667 INIT_WORK(work, func);
b71ab8c2 3668 schedule_work_on(cpu, work);
65a64464 3669 }
93981800
TH
3670
3671 for_each_online_cpu(cpu)
3672 flush_work(per_cpu_ptr(works, cpu));
3673
ffd8bea8 3674 cpus_read_unlock();
b6136773 3675 free_percpu(works);
15316ba8
CL
3676 return 0;
3677}
3678
1fa44eca
JB
3679/**
3680 * execute_in_process_context - reliably execute the routine with user context
3681 * @fn: the function to execute
1fa44eca
JB
3682 * @ew: guaranteed storage for the execute work structure (must
3683 * be available when the work executes)
3684 *
3685 * Executes the function immediately if process context is available,
3686 * otherwise schedules the function for delayed execution.
3687 *
d185af30 3688 * Return: 0 - function was executed
1fa44eca
JB
3689 * 1 - function was scheduled for execution
3690 */
65f27f38 3691int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44eca
JB
3692{
3693 if (!in_interrupt()) {
65f27f38 3694 fn(&ew->work);
1fa44eca
JB
3695 return 0;
3696 }
3697
65f27f38 3698 INIT_WORK(&ew->work, fn);
1fa44eca
JB
3699 schedule_work(&ew->work);
3700
3701 return 1;
3702}
3703EXPORT_SYMBOL_GPL(execute_in_process_context);
3704
6ba94429
FW
3705/**
3706 * free_workqueue_attrs - free a workqueue_attrs
3707 * @attrs: workqueue_attrs to free
226223ab 3708 *
6ba94429 3709 * Undo alloc_workqueue_attrs().
226223ab 3710 */
513c98d0 3711void free_workqueue_attrs(struct workqueue_attrs *attrs)
226223ab 3712{
6ba94429
FW
3713 if (attrs) {
3714 free_cpumask_var(attrs->cpumask);
9546b29e 3715 free_cpumask_var(attrs->__pod_cpumask);
6ba94429
FW
3716 kfree(attrs);
3717 }
226223ab
TH
3718}
3719
6ba94429
FW
3720/**
3721 * alloc_workqueue_attrs - allocate a workqueue_attrs
6ba94429
FW
3722 *
3723 * Allocate a new workqueue_attrs, initialize with default settings and
3724 * return it.
3725 *
3726 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3727 */
513c98d0 3728struct workqueue_attrs *alloc_workqueue_attrs(void)
226223ab 3729{
6ba94429 3730 struct workqueue_attrs *attrs;
226223ab 3731
be69d00d 3732 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
6ba94429
FW
3733 if (!attrs)
3734 goto fail;
be69d00d 3735 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
6ba94429 3736 goto fail;
9546b29e
TH
3737 if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL))
3738 goto fail;
6ba94429
FW
3739
3740 cpumask_copy(attrs->cpumask, cpu_possible_mask);
523a301e 3741 attrs->affn_scope = WQ_AFFN_DFL;
6ba94429
FW
3742 return attrs;
3743fail:
3744 free_workqueue_attrs(attrs);
3745 return NULL;
226223ab
TH
3746}
3747
6ba94429
FW
3748static void copy_workqueue_attrs(struct workqueue_attrs *to,
3749 const struct workqueue_attrs *from)
226223ab 3750{
6ba94429
FW
3751 to->nice = from->nice;
3752 cpumask_copy(to->cpumask, from->cpumask);
9546b29e 3753 cpumask_copy(to->__pod_cpumask, from->__pod_cpumask);
8639eceb 3754 to->affn_strict = from->affn_strict;
84193c07 3755
6ba94429 3756 /*
84193c07
TH
3757 * Unlike hash and equality test, copying shouldn't ignore wq-only
3758 * fields as copying is used for both pool and wq attrs. Instead,
3759 * get_unbound_pool() explicitly clears the fields.
6ba94429 3760 */
84193c07 3761 to->affn_scope = from->affn_scope;
af73f5c9 3762 to->ordered = from->ordered;
226223ab
TH
3763}
3764
5de7a03c
TH
3765/*
3766 * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the
3767 * comments in 'struct workqueue_attrs' definition.
3768 */
3769static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs)
3770{
84193c07 3771 attrs->affn_scope = WQ_AFFN_NR_TYPES;
5de7a03c
TH
3772 attrs->ordered = false;
3773}
3774
6ba94429
FW
3775/* hash value of the content of @attr */
3776static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
226223ab 3777{
6ba94429 3778 u32 hash = 0;
226223ab 3779
6ba94429
FW
3780 hash = jhash_1word(attrs->nice, hash);
3781 hash = jhash(cpumask_bits(attrs->cpumask),
3782 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
9546b29e
TH
3783 hash = jhash(cpumask_bits(attrs->__pod_cpumask),
3784 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
8639eceb 3785 hash = jhash_1word(attrs->affn_strict, hash);
6ba94429 3786 return hash;
226223ab 3787}
226223ab 3788
6ba94429
FW
3789/* content equality test */
3790static bool wqattrs_equal(const struct workqueue_attrs *a,
3791 const struct workqueue_attrs *b)
226223ab 3792{
6ba94429
FW
3793 if (a->nice != b->nice)
3794 return false;
3795 if (!cpumask_equal(a->cpumask, b->cpumask))
3796 return false;
9546b29e
TH
3797 if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
3798 return false;
8639eceb
TH
3799 if (a->affn_strict != b->affn_strict)
3800 return false;
6ba94429 3801 return true;
226223ab
TH
3802}
3803
0f36ee24
TH
3804/* Update @attrs with actually available CPUs */
3805static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs,
3806 const cpumask_t *unbound_cpumask)
3807{
3808 /*
3809 * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If
3810 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to
3811 * @unbound_cpumask.
3812 */
3813 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask);
3814 if (unlikely(cpumask_empty(attrs->cpumask)))
3815 cpumask_copy(attrs->cpumask, unbound_cpumask);
3816}
3817
84193c07
TH
3818/* find wq_pod_type to use for @attrs */
3819static const struct wq_pod_type *
3820wqattrs_pod_type(const struct workqueue_attrs *attrs)
3821{
523a301e
TH
3822 enum wq_affn_scope scope;
3823 struct wq_pod_type *pt;
3824
3825 /* to synchronize access to wq_affn_dfl */
3826 lockdep_assert_held(&wq_pool_mutex);
3827
3828 if (attrs->affn_scope == WQ_AFFN_DFL)
3829 scope = wq_affn_dfl;
3830 else
3831 scope = attrs->affn_scope;
3832
3833 pt = &wq_pod_types[scope];
84193c07
TH
3834
3835 if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) &&
3836 likely(pt->nr_pods))
3837 return pt;
3838
3839 /*
3840 * Before workqueue_init_topology(), only SYSTEM is available which is
3841 * initialized in workqueue_init_early().
3842 */
3843 pt = &wq_pod_types[WQ_AFFN_SYSTEM];
3844 BUG_ON(!pt->nr_pods);
3845 return pt;
3846}
3847
6ba94429
FW
3848/**
3849 * init_worker_pool - initialize a newly zalloc'd worker_pool
3850 * @pool: worker_pool to initialize
3851 *
402dd89d 3852 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs.
6ba94429
FW
3853 *
3854 * Return: 0 on success, -errno on failure. Even on failure, all fields
3855 * inside @pool proper are initialized and put_unbound_pool() can be called
3856 * on @pool safely to release it.
3857 */
3858static int init_worker_pool(struct worker_pool *pool)
226223ab 3859{
a9b8a985 3860 raw_spin_lock_init(&pool->lock);
6ba94429
FW
3861 pool->id = -1;
3862 pool->cpu = -1;
3863 pool->node = NUMA_NO_NODE;
3864 pool->flags |= POOL_DISASSOCIATED;
82607adc 3865 pool->watchdog_ts = jiffies;
6ba94429
FW
3866 INIT_LIST_HEAD(&pool->worklist);
3867 INIT_LIST_HEAD(&pool->idle_list);
3868 hash_init(pool->busy_hash);
226223ab 3869
32a6c723 3870 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3f959aa3 3871 INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
226223ab 3872
32a6c723 3873 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
226223ab 3874
6ba94429 3875 INIT_LIST_HEAD(&pool->workers);
e02b9312 3876 INIT_LIST_HEAD(&pool->dying_workers);
226223ab 3877
6ba94429
FW
3878 ida_init(&pool->worker_ida);
3879 INIT_HLIST_NODE(&pool->hash_node);
3880 pool->refcnt = 1;
226223ab 3881
6ba94429 3882 /* shouldn't fail above this point */
be69d00d 3883 pool->attrs = alloc_workqueue_attrs();
6ba94429
FW
3884 if (!pool->attrs)
3885 return -ENOMEM;
5de7a03c
TH
3886
3887 wqattrs_clear_for_pool(pool->attrs);
3888
6ba94429 3889 return 0;
226223ab
TH
3890}
3891
669de8bd
BVA
3892#ifdef CONFIG_LOCKDEP
3893static void wq_init_lockdep(struct workqueue_struct *wq)
3894{
3895 char *lock_name;
3896
3897 lockdep_register_key(&wq->key);
3898 lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3899 if (!lock_name)
3900 lock_name = wq->name;
69a106c0
QC
3901
3902 wq->lock_name = lock_name;
669de8bd
BVA
3903 lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3904}
3905
3906static void wq_unregister_lockdep(struct workqueue_struct *wq)
3907{
3908 lockdep_unregister_key(&wq->key);
3909}
3910
3911static void wq_free_lockdep(struct workqueue_struct *wq)
3912{
3913 if (wq->lock_name != wq->name)
3914 kfree(wq->lock_name);
3915}
3916#else
3917static void wq_init_lockdep(struct workqueue_struct *wq)
3918{
3919}
3920
3921static void wq_unregister_lockdep(struct workqueue_struct *wq)
3922{
3923}
3924
3925static void wq_free_lockdep(struct workqueue_struct *wq)
3926{
3927}
3928#endif
3929
6ba94429 3930static void rcu_free_wq(struct rcu_head *rcu)
226223ab 3931{
6ba94429
FW
3932 struct workqueue_struct *wq =
3933 container_of(rcu, struct workqueue_struct, rcu);
226223ab 3934
669de8bd 3935 wq_free_lockdep(wq);
636b927e
TH
3936 free_percpu(wq->cpu_pwq);
3937 free_workqueue_attrs(wq->unbound_attrs);
6ba94429 3938 kfree(wq);
226223ab
TH
3939}
3940
6ba94429 3941static void rcu_free_pool(struct rcu_head *rcu)
226223ab 3942{
6ba94429 3943 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
226223ab 3944
6ba94429
FW
3945 ida_destroy(&pool->worker_ida);
3946 free_workqueue_attrs(pool->attrs);
3947 kfree(pool);
226223ab
TH
3948}
3949
6ba94429
FW
3950/**
3951 * put_unbound_pool - put a worker_pool
3952 * @pool: worker_pool to put
3953 *
24acfb71 3954 * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
6ba94429
FW
3955 * safe manner. get_unbound_pool() calls this function on its failure path
3956 * and this function should be able to release pools which went through,
3957 * successfully or not, init_worker_pool().
3958 *
3959 * Should be called with wq_pool_mutex held.
3960 */
3961static void put_unbound_pool(struct worker_pool *pool)
226223ab 3962{
6ba94429
FW
3963 DECLARE_COMPLETION_ONSTACK(detach_completion);
3964 struct worker *worker;
9680540c 3965 LIST_HEAD(cull_list);
e02b9312 3966
6ba94429 3967 lockdep_assert_held(&wq_pool_mutex);
226223ab 3968
6ba94429
FW
3969 if (--pool->refcnt)
3970 return;
226223ab 3971
6ba94429
FW
3972 /* sanity checks */
3973 if (WARN_ON(!(pool->cpu < 0)) ||
3974 WARN_ON(!list_empty(&pool->worklist)))
3975 return;
226223ab 3976
6ba94429
FW
3977 /* release id and unhash */
3978 if (pool->id >= 0)
3979 idr_remove(&worker_pool_idr, pool->id);
3980 hash_del(&pool->hash_node);
d55262c4 3981
6ba94429 3982 /*
692b4825
TH
3983 * Become the manager and destroy all workers. This prevents
3984 * @pool's workers from blocking on attach_mutex. We're the last
3985 * manager and @pool gets freed with the flag set.
9ab03be4
VS
3986 *
3987 * Having a concurrent manager is quite unlikely to happen as we can
3988 * only get here with
3989 * pwq->refcnt == pool->refcnt == 0
3990 * which implies no work queued to the pool, which implies no worker can
3991 * become the manager. However a worker could have taken the role of
3992 * manager before the refcnts dropped to 0, since maybe_create_worker()
3993 * drops pool->lock
6ba94429 3994 */
9ab03be4
VS
3995 while (true) {
3996 rcuwait_wait_event(&manager_wait,
3997 !(pool->flags & POOL_MANAGER_ACTIVE),
3998 TASK_UNINTERRUPTIBLE);
e02b9312
VS
3999
4000 mutex_lock(&wq_pool_attach_mutex);
9ab03be4
VS
4001 raw_spin_lock_irq(&pool->lock);
4002 if (!(pool->flags & POOL_MANAGER_ACTIVE)) {
4003 pool->flags |= POOL_MANAGER_ACTIVE;
4004 break;
4005 }
4006 raw_spin_unlock_irq(&pool->lock);
e02b9312 4007 mutex_unlock(&wq_pool_attach_mutex);
9ab03be4 4008 }
692b4825 4009
6ba94429 4010 while ((worker = first_idle_worker(pool)))
e02b9312 4011 set_worker_dying(worker, &cull_list);
6ba94429 4012 WARN_ON(pool->nr_workers || pool->nr_idle);
a9b8a985 4013 raw_spin_unlock_irq(&pool->lock);
d55262c4 4014
e02b9312
VS
4015 wake_dying_workers(&cull_list);
4016
4017 if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
6ba94429 4018 pool->detach_completion = &detach_completion;
1258fae7 4019 mutex_unlock(&wq_pool_attach_mutex);
226223ab 4020
6ba94429
FW
4021 if (pool->detach_completion)
4022 wait_for_completion(pool->detach_completion);
226223ab 4023
6ba94429
FW
4024 /* shut down the timers */
4025 del_timer_sync(&pool->idle_timer);
3f959aa3 4026 cancel_work_sync(&pool->idle_cull_work);
6ba94429 4027 del_timer_sync(&pool->mayday_timer);
226223ab 4028
24acfb71 4029 /* RCU protected to allow dereferences from get_work_pool() */
25b00775 4030 call_rcu(&pool->rcu, rcu_free_pool);
226223ab
TH
4031}
4032
4033/**
6ba94429
FW
4034 * get_unbound_pool - get a worker_pool with the specified attributes
4035 * @attrs: the attributes of the worker_pool to get
226223ab 4036 *
6ba94429
FW
4037 * Obtain a worker_pool which has the same attributes as @attrs, bump the
4038 * reference count and return it. If there already is a matching
4039 * worker_pool, it will be used; otherwise, this function attempts to
4040 * create a new one.
226223ab 4041 *
6ba94429 4042 * Should be called with wq_pool_mutex held.
226223ab 4043 *
6ba94429
FW
4044 * Return: On success, a worker_pool with the same attributes as @attrs.
4045 * On failure, %NULL.
226223ab 4046 */
6ba94429 4047static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
226223ab 4048{
84193c07 4049 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA];
6ba94429
FW
4050 u32 hash = wqattrs_hash(attrs);
4051 struct worker_pool *pool;
84193c07 4052 int pod, node = NUMA_NO_NODE;
226223ab 4053
6ba94429 4054 lockdep_assert_held(&wq_pool_mutex);
226223ab 4055
6ba94429
FW
4056 /* do we already have a matching pool? */
4057 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
4058 if (wqattrs_equal(pool->attrs, attrs)) {
4059 pool->refcnt++;
4060 return pool;
4061 }
4062 }
226223ab 4063
9546b29e 4064 /* If __pod_cpumask is contained inside a NUMA pod, that's our node */
84193c07 4065 for (pod = 0; pod < pt->nr_pods; pod++) {
9546b29e 4066 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) {
84193c07
TH
4067 node = pt->pod_node[pod];
4068 break;
e2273584
XP
4069 }
4070 }
4071
6ba94429 4072 /* nope, create a new one */
84193c07 4073 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node);
6ba94429
FW
4074 if (!pool || init_worker_pool(pool) < 0)
4075 goto fail;
4076
84193c07 4077 pool->node = node;
5de7a03c
TH
4078 copy_workqueue_attrs(pool->attrs, attrs);
4079 wqattrs_clear_for_pool(pool->attrs);
226223ab 4080
6ba94429
FW
4081 if (worker_pool_assign_id(pool) < 0)
4082 goto fail;
226223ab 4083
6ba94429 4084 /* create and start the initial worker */
3347fa09 4085 if (wq_online && !create_worker(pool))
6ba94429 4086 goto fail;
226223ab 4087
6ba94429
FW
4088 /* install */
4089 hash_add(unbound_pool_hash, &pool->hash_node, hash);
226223ab 4090
6ba94429
FW
4091 return pool;
4092fail:
4093 if (pool)
4094 put_unbound_pool(pool);
4095 return NULL;
226223ab 4096}
226223ab 4097
6ba94429 4098static void rcu_free_pwq(struct rcu_head *rcu)
7a4e344c 4099{
6ba94429
FW
4100 kmem_cache_free(pwq_cache,
4101 container_of(rcu, struct pool_workqueue, rcu));
7a4e344c
TH
4102}
4103
6ba94429 4104/*
967b494e
TH
4105 * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
4106 * refcnt and needs to be destroyed.
7a4e344c 4107 */
687a9aa5 4108static void pwq_release_workfn(struct kthread_work *work)
7a4e344c 4109{
6ba94429 4110 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
687a9aa5 4111 release_work);
6ba94429
FW
4112 struct workqueue_struct *wq = pwq->wq;
4113 struct worker_pool *pool = pwq->pool;
b42b0bdd 4114 bool is_last = false;
7a4e344c 4115
b42b0bdd 4116 /*
687a9aa5 4117 * When @pwq is not linked, it doesn't hold any reference to the
b42b0bdd
YY
4118 * @wq, and @wq is invalid to access.
4119 */
4120 if (!list_empty(&pwq->pwqs_node)) {
b42b0bdd
YY
4121 mutex_lock(&wq->mutex);
4122 list_del_rcu(&pwq->pwqs_node);
4123 is_last = list_empty(&wq->pwqs);
4124 mutex_unlock(&wq->mutex);
4125 }
6ba94429 4126
687a9aa5
TH
4127 if (wq->flags & WQ_UNBOUND) {
4128 mutex_lock(&wq_pool_mutex);
4129 put_unbound_pool(pool);
4130 mutex_unlock(&wq_pool_mutex);
4131 }
6ba94429 4132
25b00775 4133 call_rcu(&pwq->rcu, rcu_free_pwq);
7a4e344c 4134
2865a8fb 4135 /*
6ba94429
FW
4136 * If we're the last pwq going away, @wq is already dead and no one
4137 * is gonna access it anymore. Schedule RCU free.
2865a8fb 4138 */
669de8bd
BVA
4139 if (is_last) {
4140 wq_unregister_lockdep(wq);
25b00775 4141 call_rcu(&wq->rcu, rcu_free_wq);
669de8bd 4142 }
29c91e99
TH
4143}
4144
7a4e344c 4145/**
6ba94429
FW
4146 * pwq_adjust_max_active - update a pwq's max_active to the current setting
4147 * @pwq: target pool_workqueue
d185af30 4148 *
6ba94429 4149 * If @pwq isn't freezing, set @pwq->max_active to the associated
f97a4a1a 4150 * workqueue's saved_max_active and activate inactive work items
6ba94429 4151 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
7a4e344c 4152 */
6ba94429 4153static void pwq_adjust_max_active(struct pool_workqueue *pwq)
4e1a1f9a 4154{
6ba94429
FW
4155 struct workqueue_struct *wq = pwq->wq;
4156 bool freezable = wq->flags & WQ_FREEZABLE;
3347fa09 4157 unsigned long flags;
4e1a1f9a 4158
6ba94429
FW
4159 /* for @wq->saved_max_active */
4160 lockdep_assert_held(&wq->mutex);
4e1a1f9a 4161
6ba94429
FW
4162 /* fast exit for non-freezable wqs */
4163 if (!freezable && pwq->max_active == wq->saved_max_active)
4164 return;
7a4e344c 4165
3347fa09 4166 /* this function can be called during early boot w/ irq disabled */
a9b8a985 4167 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
29c91e99 4168
6ba94429
FW
4169 /*
4170 * During [un]freezing, the caller is responsible for ensuring that
4171 * this function is called at least once after @workqueue_freezing
4172 * is updated and visible.
4173 */
4174 if (!freezable || !workqueue_freezing) {
4175 pwq->max_active = wq->saved_max_active;
4e1a1f9a 4176
f97a4a1a 4177 while (!list_empty(&pwq->inactive_works) &&
0219a352 4178 pwq->nr_active < pwq->max_active)
f97a4a1a 4179 pwq_activate_first_inactive(pwq);
e2dca7ad 4180
0219a352 4181 kick_pool(pwq->pool);
6ba94429
FW
4182 } else {
4183 pwq->max_active = 0;
4184 }
e2dca7ad 4185
a9b8a985 4186 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
e2dca7ad
TH
4187}
4188
67dc8325 4189/* initialize newly allocated @pwq which is associated with @wq and @pool */
6ba94429
FW
4190static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
4191 struct worker_pool *pool)
29c91e99 4192{
6ba94429 4193 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
29c91e99 4194
6ba94429
FW
4195 memset(pwq, 0, sizeof(*pwq));
4196
4197 pwq->pool = pool;
4198 pwq->wq = wq;
4199 pwq->flush_color = -1;
4200 pwq->refcnt = 1;
f97a4a1a 4201 INIT_LIST_HEAD(&pwq->inactive_works);
6ba94429
FW
4202 INIT_LIST_HEAD(&pwq->pwqs_node);
4203 INIT_LIST_HEAD(&pwq->mayday_node);
687a9aa5 4204 kthread_init_work(&pwq->release_work, pwq_release_workfn);
29c91e99
TH
4205}
4206
6ba94429
FW
4207/* sync @pwq with the current state of its associated wq and link it */
4208static void link_pwq(struct pool_workqueue *pwq)
29c91e99 4209{
6ba94429 4210 struct workqueue_struct *wq = pwq->wq;
29c91e99 4211
6ba94429 4212 lockdep_assert_held(&wq->mutex);
a892cacc 4213
6ba94429
FW
4214 /* may be called multiple times, ignore if already linked */
4215 if (!list_empty(&pwq->pwqs_node))
29c91e99 4216 return;
29c91e99 4217
6ba94429
FW
4218 /* set the matching work_color */
4219 pwq->work_color = wq->work_color;
29c91e99 4220
6ba94429
FW
4221 /* sync max_active to the current setting */
4222 pwq_adjust_max_active(pwq);
29c91e99 4223
6ba94429
FW
4224 /* link in @pwq */
4225 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
4226}
29c91e99 4227
6ba94429
FW
4228/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
4229static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
4230 const struct workqueue_attrs *attrs)
4231{
4232 struct worker_pool *pool;
4233 struct pool_workqueue *pwq;
60f5a4bc 4234
6ba94429 4235 lockdep_assert_held(&wq_pool_mutex);
60f5a4bc 4236
6ba94429
FW
4237 pool = get_unbound_pool(attrs);
4238 if (!pool)
4239 return NULL;
60f5a4bc 4240
6ba94429
FW
4241 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
4242 if (!pwq) {
4243 put_unbound_pool(pool);
4244 return NULL;
4245 }
29c91e99 4246
6ba94429
FW
4247 init_pwq(pwq, wq, pool);
4248 return pwq;
4249}
29c91e99 4250
29c91e99 4251/**
fef59c9c 4252 * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
042f7df1 4253 * @attrs: the wq_attrs of the default pwq of the target workqueue
84193c07 4254 * @cpu: the target CPU
6ba94429 4255 * @cpu_going_down: if >= 0, the CPU to consider as offline
29c91e99 4256 *
fef59c9c
TH
4257 * Calculate the cpumask a workqueue with @attrs should use on @pod. If
4258 * @cpu_going_down is >= 0, that cpu is considered offline during calculation.
9546b29e 4259 * The result is stored in @attrs->__pod_cpumask.
a892cacc 4260 *
fef59c9c
TH
4261 * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
4262 * and @pod has online CPUs requested by @attrs, the returned cpumask is the
4263 * intersection of the possible CPUs of @pod and @attrs->cpumask.
d185af30 4264 *
fef59c9c 4265 * The caller is responsible for ensuring that the cpumask of @pod stays stable.
29c91e99 4266 */
9546b29e
TH
4267static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
4268 int cpu_going_down)
29c91e99 4269{
84193c07
TH
4270 const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
4271 int pod = pt->cpu_pod[cpu];
29c91e99 4272
fef59c9c 4273 /* does @pod have any online CPUs @attrs wants? */
9546b29e
TH
4274 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
4275 cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask);
6ba94429 4276 if (cpu_going_down >= 0)
9546b29e 4277 cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask);
29c91e99 4278
9546b29e
TH
4279 if (cpumask_empty(attrs->__pod_cpumask)) {
4280 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
84193c07
TH
4281 return;
4282 }
4c16bd32 4283
fef59c9c 4284 /* yeap, return possible CPUs in @pod that @attrs wants */
9546b29e 4285 cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]);
1ad0f0a7 4286
9546b29e 4287 if (cpumask_empty(attrs->__pod_cpumask))
1ad0f0a7
MB
4288 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
4289 "possible intersect\n");
4c16bd32
TH
4290}
4291
636b927e
TH
4292/* install @pwq into @wq's cpu_pwq and return the old pwq */
4293static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
4294 int cpu, struct pool_workqueue *pwq)
1befcf30
TH
4295{
4296 struct pool_workqueue *old_pwq;
4297
5b95e1af 4298 lockdep_assert_held(&wq_pool_mutex);
1befcf30
TH
4299 lockdep_assert_held(&wq->mutex);
4300
4301 /* link_pwq() can handle duplicate calls */
4302 link_pwq(pwq);
4303
636b927e
TH
4304 old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
4305 rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq);
1befcf30
TH
4306 return old_pwq;
4307}
4308
2d5f0764
LJ
4309/* context to store the prepared attrs & pwqs before applying */
4310struct apply_wqattrs_ctx {
4311 struct workqueue_struct *wq; /* target workqueue */
4312 struct workqueue_attrs *attrs; /* attrs to apply */
042f7df1 4313 struct list_head list; /* queued for batching commit */
2d5f0764
LJ
4314 struct pool_workqueue *dfl_pwq;
4315 struct pool_workqueue *pwq_tbl[];
4316};
4317
4318/* free the resources after success or abort */
4319static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
4320{
4321 if (ctx) {
636b927e 4322 int cpu;
2d5f0764 4323
636b927e
TH
4324 for_each_possible_cpu(cpu)
4325 put_pwq_unlocked(ctx->pwq_tbl[cpu]);
2d5f0764
LJ
4326 put_pwq_unlocked(ctx->dfl_pwq);
4327
4328 free_workqueue_attrs(ctx->attrs);
4329
4330 kfree(ctx);
4331 }
4332}
4333
4334/* allocate the attrs and pwqs for later installation */
4335static struct apply_wqattrs_ctx *
4336apply_wqattrs_prepare(struct workqueue_struct *wq,
99c621ef
LJ
4337 const struct workqueue_attrs *attrs,
4338 const cpumask_var_t unbound_cpumask)
9e8cd2f5 4339{
2d5f0764 4340 struct apply_wqattrs_ctx *ctx;
9546b29e 4341 struct workqueue_attrs *new_attrs;
636b927e 4342 int cpu;
9e8cd2f5 4343
2d5f0764 4344 lockdep_assert_held(&wq_pool_mutex);
9e8cd2f5 4345
84193c07
TH
4346 if (WARN_ON(attrs->affn_scope < 0 ||
4347 attrs->affn_scope >= WQ_AFFN_NR_TYPES))
4348 return ERR_PTR(-EINVAL);
4349
636b927e 4350 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL);
8719dcea 4351
be69d00d 4352 new_attrs = alloc_workqueue_attrs();
9546b29e 4353 if (!ctx || !new_attrs)
2d5f0764 4354 goto out_free;
13e2e556 4355
4c16bd32
TH
4356 /*
4357 * If something goes wrong during CPU up/down, we'll fall back to
4358 * the default pwq covering whole @attrs->cpumask. Always create
4359 * it even if we don't use it immediately.
4360 */
0f36ee24
TH
4361 copy_workqueue_attrs(new_attrs, attrs);
4362 wqattrs_actualize_cpumask(new_attrs, unbound_cpumask);
9546b29e 4363 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
2d5f0764
LJ
4364 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
4365 if (!ctx->dfl_pwq)
4366 goto out_free;
4c16bd32 4367
636b927e 4368 for_each_possible_cpu(cpu) {
af73f5c9 4369 if (new_attrs->ordered) {
2d5f0764 4370 ctx->dfl_pwq->refcnt++;
636b927e
TH
4371 ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
4372 } else {
9546b29e
TH
4373 wq_calc_pod_cpumask(new_attrs, cpu, -1);
4374 ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
636b927e
TH
4375 if (!ctx->pwq_tbl[cpu])
4376 goto out_free;
4c16bd32
TH
4377 }
4378 }
4379
042f7df1
LJ
4380 /* save the user configured attrs and sanitize it. */
4381 copy_workqueue_attrs(new_attrs, attrs);
4382 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
9546b29e 4383 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
2d5f0764 4384 ctx->attrs = new_attrs;
042f7df1 4385
2d5f0764 4386 ctx->wq = wq;
2d5f0764
LJ
4387 return ctx;
4388
4389out_free:
2d5f0764
LJ
4390 free_workqueue_attrs(new_attrs);
4391 apply_wqattrs_cleanup(ctx);
84193c07 4392 return ERR_PTR(-ENOMEM);
2d5f0764
LJ
4393}
4394
4395/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
4396static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
4397{
636b927e 4398 int cpu;
9e8cd2f5 4399
4c16bd32 4400 /* all pwqs have been created successfully, let's install'em */
2d5f0764 4401 mutex_lock(&ctx->wq->mutex);
a892cacc 4402
2d5f0764 4403 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
4c16bd32
TH
4404
4405 /* save the previous pwq and install the new one */
636b927e
TH
4406 for_each_possible_cpu(cpu)
4407 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
4408 ctx->pwq_tbl[cpu]);
4c16bd32
TH
4409
4410 /* @dfl_pwq might not have been used, ensure it's linked */
2d5f0764
LJ
4411 link_pwq(ctx->dfl_pwq);
4412 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
f147f29e 4413
2d5f0764
LJ
4414 mutex_unlock(&ctx->wq->mutex);
4415}
9e8cd2f5 4416
a0111cf6
LJ
4417static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
4418 const struct workqueue_attrs *attrs)
2d5f0764
LJ
4419{
4420 struct apply_wqattrs_ctx *ctx;
4c16bd32 4421
2d5f0764
LJ
4422 /* only unbound workqueues can change attributes */
4423 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
4424 return -EINVAL;
13e2e556 4425
2d5f0764 4426 /* creating multiple pwqs breaks ordering guarantee */
0a94efb5
TH
4427 if (!list_empty(&wq->pwqs)) {
4428 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4429 return -EINVAL;
4430
4431 wq->flags &= ~__WQ_ORDERED;
4432 }
2d5f0764 4433
99c621ef 4434 ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
84193c07
TH
4435 if (IS_ERR(ctx))
4436 return PTR_ERR(ctx);
2d5f0764
LJ
4437
4438 /* the ctx has been prepared successfully, let's commit it */
6201171e 4439 apply_wqattrs_commit(ctx);
2d5f0764
LJ
4440 apply_wqattrs_cleanup(ctx);
4441
6201171e 4442 return 0;
9e8cd2f5
TH
4443}
4444
a0111cf6
LJ
4445/**
4446 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4447 * @wq: the target workqueue
4448 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4449 *
fef59c9c
TH
4450 * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps
4451 * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that
4452 * work items are affine to the pod it was issued on. Older pwqs are released as
4453 * in-flight work items finish. Note that a work item which repeatedly requeues
4454 * itself back-to-back will stay on its current pwq.
a0111cf6
LJ
4455 *
4456 * Performs GFP_KERNEL allocations.
4457 *
ffd8bea8 4458 * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
509b3204 4459 *
a0111cf6
LJ
4460 * Return: 0 on success and -errno on failure.
4461 */
513c98d0 4462int apply_workqueue_attrs(struct workqueue_struct *wq,
a0111cf6
LJ
4463 const struct workqueue_attrs *attrs)
4464{
4465 int ret;
4466
509b3204
DJ
4467 lockdep_assert_cpus_held();
4468
4469 mutex_lock(&wq_pool_mutex);
a0111cf6 4470 ret = apply_workqueue_attrs_locked(wq, attrs);
509b3204 4471 mutex_unlock(&wq_pool_mutex);
a0111cf6
LJ
4472
4473 return ret;
4474}
4475
4c16bd32 4476/**
fef59c9c 4477 * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
4c16bd32 4478 * @wq: the target workqueue
4cbfd3de
TH
4479 * @cpu: the CPU to update pool association for
4480 * @hotplug_cpu: the CPU coming up or going down
4c16bd32
TH
4481 * @online: whether @cpu is coming up or going down
4482 *
4483 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
fef59c9c 4484 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of
4c16bd32
TH
4485 * @wq accordingly.
4486 *
fef59c9c
TH
4487 *
4488 * If pod affinity can't be adjusted due to memory allocation failure, it falls
4489 * back to @wq->dfl_pwq which may not be optimal but is always correct.
4490 *
4491 * Note that when the last allowed CPU of a pod goes offline for a workqueue
4492 * with a cpumask spanning multiple pods, the workers which were already
4493 * executing the work items for the workqueue will lose their CPU affinity and
4494 * may execute on any CPU. This is similar to how per-cpu workqueues behave on
4495 * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
4496 * responsibility to flush the work item from CPU_DOWN_PREPARE.
4c16bd32 4497 */
fef59c9c
TH
4498static void wq_update_pod(struct workqueue_struct *wq, int cpu,
4499 int hotplug_cpu, bool online)
4c16bd32 4500{
4cbfd3de 4501 int off_cpu = online ? -1 : hotplug_cpu;
4c16bd32
TH
4502 struct pool_workqueue *old_pwq = NULL, *pwq;
4503 struct workqueue_attrs *target_attrs;
4c16bd32
TH
4504
4505 lockdep_assert_held(&wq_pool_mutex);
4506
84193c07 4507 if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered)
4c16bd32
TH
4508 return;
4509
4510 /*
4511 * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4512 * Let's use a preallocated one. The following buf is protected by
4513 * CPU hotplug exclusion.
4514 */
fef59c9c 4515 target_attrs = wq_update_pod_attrs_buf;
4c16bd32 4516
4c16bd32 4517 copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
0f36ee24 4518 wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
4c16bd32 4519
636b927e 4520 /* nothing to do if the target cpumask matches the current pwq */
9546b29e 4521 wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
636b927e
TH
4522 pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu),
4523 lockdep_is_held(&wq_pool_mutex));
9546b29e 4524 if (wqattrs_equal(target_attrs, pwq->pool->attrs))
636b927e 4525 return;
4c16bd32 4526
4c16bd32
TH
4527 /* create a new pwq */
4528 pwq = alloc_unbound_pwq(wq, target_attrs);
4529 if (!pwq) {
fef59c9c 4530 pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n",
2d916033 4531 wq->name);
77f300b1 4532 goto use_dfl_pwq;
4c16bd32
TH
4533 }
4534
f7142ed4 4535 /* Install the new pwq. */
4c16bd32 4536 mutex_lock(&wq->mutex);
636b927e 4537 old_pwq = install_unbound_pwq(wq, cpu, pwq);
4c16bd32
TH
4538 goto out_unlock;
4539
4540use_dfl_pwq:
f7142ed4 4541 mutex_lock(&wq->mutex);
a9b8a985 4542 raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
4c16bd32 4543 get_pwq(wq->dfl_pwq);
a9b8a985 4544 raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
636b927e 4545 old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq);
4c16bd32
TH
4546out_unlock:
4547 mutex_unlock(&wq->mutex);
4548 put_pwq_unlocked(old_pwq);
4549}
4550
30cdf249 4551static int alloc_and_link_pwqs(struct workqueue_struct *wq)
0f900049 4552{
49e3cf44 4553 bool highpri = wq->flags & WQ_HIGHPRI;
8a2b7538 4554 int cpu, ret;
30cdf249 4555
636b927e
TH
4556 wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
4557 if (!wq->cpu_pwq)
4558 goto enomem;
30cdf249 4559
636b927e 4560 if (!(wq->flags & WQ_UNBOUND)) {
30cdf249 4561 for_each_possible_cpu(cpu) {
687a9aa5 4562 struct pool_workqueue **pwq_p =
ee1ceef7 4563 per_cpu_ptr(wq->cpu_pwq, cpu);
687a9aa5
TH
4564 struct worker_pool *pool =
4565 &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]);
4566
4567 *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL,
4568 pool->node);
4569 if (!*pwq_p)
4570 goto enomem;
f3421797 4571
687a9aa5 4572 init_pwq(*pwq_p, wq, pool);
f147f29e
TH
4573
4574 mutex_lock(&wq->mutex);
687a9aa5 4575 link_pwq(*pwq_p);
f147f29e 4576 mutex_unlock(&wq->mutex);
30cdf249 4577 }
9e8cd2f5 4578 return 0;
509b3204
DJ
4579 }
4580
ffd8bea8 4581 cpus_read_lock();
509b3204 4582 if (wq->flags & __WQ_ORDERED) {
8a2b7538
TH
4583 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4584 /* there should only be single pwq for ordering guarantee */
4585 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4586 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4587 "ordering guarantee broken for workqueue %s\n", wq->name);
30cdf249 4588 } else {
509b3204 4589 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
30cdf249 4590 }
ffd8bea8 4591 cpus_read_unlock();
509b3204 4592
64344553
Z
4593 /* for unbound pwq, flush the pwq_release_worker ensures that the
4594 * pwq_release_workfn() completes before calling kfree(wq).
4595 */
4596 if (ret)
4597 kthread_flush_worker(pwq_release_worker);
4598
509b3204 4599 return ret;
687a9aa5
TH
4600
4601enomem:
4602 if (wq->cpu_pwq) {
7b42f401
Z
4603 for_each_possible_cpu(cpu) {
4604 struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
4605
4606 if (pwq)
4607 kmem_cache_free(pwq_cache, pwq);
4608 }
687a9aa5
TH
4609 free_percpu(wq->cpu_pwq);
4610 wq->cpu_pwq = NULL;
4611 }
4612 return -ENOMEM;
0f900049
TH
4613}
4614
f3421797
TH
4615static int wq_clamp_max_active(int max_active, unsigned int flags,
4616 const char *name)
b71ab8c2 4617{
636b927e 4618 if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
044c782c 4619 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
636b927e 4620 max_active, name, 1, WQ_MAX_ACTIVE);
b71ab8c2 4621
636b927e 4622 return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
b71ab8c2
TH
4623}
4624
983c7515
TH
4625/*
4626 * Workqueues which may be used during memory reclaim should have a rescuer
4627 * to guarantee forward progress.
4628 */
4629static int init_rescuer(struct workqueue_struct *wq)
4630{
4631 struct worker *rescuer;
b92b36ea 4632 int ret;
983c7515
TH
4633
4634 if (!(wq->flags & WQ_MEM_RECLAIM))
4635 return 0;
4636
4637 rescuer = alloc_worker(NUMA_NO_NODE);
4c0736a7
PM
4638 if (!rescuer) {
4639 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n",
4640 wq->name);
983c7515 4641 return -ENOMEM;
4c0736a7 4642 }
983c7515
TH
4643
4644 rescuer->rescue_wq = wq;
b6a46f72 4645 rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
f187b697 4646 if (IS_ERR(rescuer->task)) {
b92b36ea 4647 ret = PTR_ERR(rescuer->task);
4c0736a7
PM
4648 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
4649 wq->name, ERR_PTR(ret));
983c7515 4650 kfree(rescuer);
b92b36ea 4651 return ret;
983c7515
TH
4652 }
4653
4654 wq->rescuer = rescuer;
4655 kthread_bind_mask(rescuer->task, cpu_possible_mask);
4656 wake_up_process(rescuer->task);
4657
4658 return 0;
4659}
4660
a2775bbc 4661__printf(1, 4)
669de8bd
BVA
4662struct workqueue_struct *alloc_workqueue(const char *fmt,
4663 unsigned int flags,
4664 int max_active, ...)
1da177e4 4665{
ecf6881f 4666 va_list args;
1da177e4 4667 struct workqueue_struct *wq;
49e3cf44 4668 struct pool_workqueue *pwq;
b196be89 4669
5c0338c6 4670 /*
fef59c9c
TH
4671 * Unbound && max_active == 1 used to imply ordered, which is no longer
4672 * the case on many machines due to per-pod pools. While
5c0338c6 4673 * alloc_ordered_workqueue() is the right way to create an ordered
fef59c9c 4674 * workqueue, keep the previous behavior to avoid subtle breakages.
5c0338c6
TH
4675 */
4676 if ((flags & WQ_UNBOUND) && max_active == 1)
4677 flags |= __WQ_ORDERED;
4678
cee22a15
VK
4679 /* see the comment above the definition of WQ_POWER_EFFICIENT */
4680 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4681 flags |= WQ_UNBOUND;
4682
ecf6881f 4683 /* allocate wq and format name */
636b927e 4684 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
b196be89 4685 if (!wq)
d2c1d404 4686 return NULL;
b196be89 4687
6029a918 4688 if (flags & WQ_UNBOUND) {
be69d00d 4689 wq->unbound_attrs = alloc_workqueue_attrs();
6029a918
TH
4690 if (!wq->unbound_attrs)
4691 goto err_free_wq;
4692 }
4693
669de8bd 4694 va_start(args, max_active);
ecf6881f 4695 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
b196be89 4696 va_end(args);
1da177e4 4697
d320c038 4698 max_active = max_active ?: WQ_DFL_ACTIVE;
b196be89 4699 max_active = wq_clamp_max_active(max_active, flags, wq->name);
3af24433 4700
b196be89 4701 /* init wq */
97e37d7b 4702 wq->flags = flags;
a0a1a5fd 4703 wq->saved_max_active = max_active;
3c25a55d 4704 mutex_init(&wq->mutex);
112202d9 4705 atomic_set(&wq->nr_pwqs_to_flush, 0);
30cdf249 4706 INIT_LIST_HEAD(&wq->pwqs);
73f53c4a
TH
4707 INIT_LIST_HEAD(&wq->flusher_queue);
4708 INIT_LIST_HEAD(&wq->flusher_overflow);
493a1724 4709 INIT_LIST_HEAD(&wq->maydays);
502ca9d8 4710
669de8bd 4711 wq_init_lockdep(wq);
cce1a165 4712 INIT_LIST_HEAD(&wq->list);
3af24433 4713
30cdf249 4714 if (alloc_and_link_pwqs(wq) < 0)
82efcab3 4715 goto err_unreg_lockdep;
1537663f 4716
40c17f75 4717 if (wq_online && init_rescuer(wq) < 0)
983c7515 4718 goto err_destroy;
3af24433 4719
226223ab
TH
4720 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4721 goto err_destroy;
4722
a0a1a5fd 4723 /*
68e13a67
LJ
4724 * wq_pool_mutex protects global freeze state and workqueues list.
4725 * Grab it, adjust max_active and add the new @wq to workqueues
4726 * list.
a0a1a5fd 4727 */
68e13a67 4728 mutex_lock(&wq_pool_mutex);
a0a1a5fd 4729
a357fc03 4730 mutex_lock(&wq->mutex);
699ce097
TH
4731 for_each_pwq(pwq, wq)
4732 pwq_adjust_max_active(pwq);
a357fc03 4733 mutex_unlock(&wq->mutex);
a0a1a5fd 4734
e2dca7ad 4735 list_add_tail_rcu(&wq->list, &workqueues);
a0a1a5fd 4736
68e13a67 4737 mutex_unlock(&wq_pool_mutex);
1537663f 4738
3af24433 4739 return wq;
d2c1d404 4740
82efcab3 4741err_unreg_lockdep:
009bb421
BVA
4742 wq_unregister_lockdep(wq);
4743 wq_free_lockdep(wq);
82efcab3 4744err_free_wq:
6029a918 4745 free_workqueue_attrs(wq->unbound_attrs);
d2c1d404
TH
4746 kfree(wq);
4747 return NULL;
4748err_destroy:
4749 destroy_workqueue(wq);
4690c4ab 4750 return NULL;
3af24433 4751}
669de8bd 4752EXPORT_SYMBOL_GPL(alloc_workqueue);
1da177e4 4753
c29eb853
TH
4754static bool pwq_busy(struct pool_workqueue *pwq)
4755{
4756 int i;
4757
4758 for (i = 0; i < WORK_NR_COLORS; i++)
4759 if (pwq->nr_in_flight[i])
4760 return true;
4761
4762 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
4763 return true;
f97a4a1a 4764 if (pwq->nr_active || !list_empty(&pwq->inactive_works))
c29eb853
TH
4765 return true;
4766
4767 return false;
4768}
4769
3af24433
ON
4770/**
4771 * destroy_workqueue - safely terminate a workqueue
4772 * @wq: target workqueue
4773 *
4774 * Safely destroy a workqueue. All work currently pending will be done first.
4775 */
4776void destroy_workqueue(struct workqueue_struct *wq)
4777{
49e3cf44 4778 struct pool_workqueue *pwq;
636b927e 4779 int cpu;
3af24433 4780
def98c84
TH
4781 /*
4782 * Remove it from sysfs first so that sanity check failure doesn't
4783 * lead to sysfs name conflicts.
4784 */
4785 workqueue_sysfs_unregister(wq);
4786
33e3f0a3
RC
4787 /* mark the workqueue destruction is in progress */
4788 mutex_lock(&wq->mutex);
4789 wq->flags |= __WQ_DESTROYING;
4790 mutex_unlock(&wq->mutex);
4791
9c5a2ba7
TH
4792 /* drain it before proceeding with destruction */
4793 drain_workqueue(wq);
c8efcc25 4794
def98c84
TH
4795 /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
4796 if (wq->rescuer) {
4797 struct worker *rescuer = wq->rescuer;
4798
4799 /* this prevents new queueing */
a9b8a985 4800 raw_spin_lock_irq(&wq_mayday_lock);
def98c84 4801 wq->rescuer = NULL;
a9b8a985 4802 raw_spin_unlock_irq(&wq_mayday_lock);
def98c84
TH
4803
4804 /* rescuer will empty maydays list before exiting */
4805 kthread_stop(rescuer->task);
8efe1223 4806 kfree(rescuer);
def98c84
TH
4807 }
4808
c29eb853
TH
4809 /*
4810 * Sanity checks - grab all the locks so that we wait for all
4811 * in-flight operations which may do put_pwq().
4812 */
4813 mutex_lock(&wq_pool_mutex);
b09f4fd3 4814 mutex_lock(&wq->mutex);
49e3cf44 4815 for_each_pwq(pwq, wq) {
a9b8a985 4816 raw_spin_lock_irq(&pwq->pool->lock);
c29eb853 4817 if (WARN_ON(pwq_busy(pwq))) {
1d9a6159
KW
4818 pr_warn("%s: %s has the following busy pwq\n",
4819 __func__, wq->name);
c29eb853 4820 show_pwq(pwq);
a9b8a985 4821 raw_spin_unlock_irq(&pwq->pool->lock);
b09f4fd3 4822 mutex_unlock(&wq->mutex);
c29eb853 4823 mutex_unlock(&wq_pool_mutex);
55df0933 4824 show_one_workqueue(wq);
6183c009 4825 return;
76af4d93 4826 }
a9b8a985 4827 raw_spin_unlock_irq(&pwq->pool->lock);
6183c009 4828 }
b09f4fd3 4829 mutex_unlock(&wq->mutex);
6183c009 4830
a0a1a5fd
TH
4831 /*
4832 * wq list is used to freeze wq, remove from list after
4833 * flushing is complete in case freeze races us.
4834 */
e2dca7ad 4835 list_del_rcu(&wq->list);
68e13a67 4836 mutex_unlock(&wq_pool_mutex);
3af24433 4837
636b927e
TH
4838 /*
4839 * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq
4840 * to put the base refs. @wq will be auto-destroyed from the last
4841 * pwq_put. RCU read lock prevents @wq from going away from under us.
4842 */
4843 rcu_read_lock();
4c16bd32 4844
636b927e
TH
4845 for_each_possible_cpu(cpu) {
4846 pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
4847 RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL);
dce90d47 4848 put_pwq_unlocked(pwq);
29c91e99 4849 }
636b927e
TH
4850
4851 put_pwq_unlocked(wq->dfl_pwq);
4852 wq->dfl_pwq = NULL;
4853
4854 rcu_read_unlock();
3af24433
ON
4855}
4856EXPORT_SYMBOL_GPL(destroy_workqueue);
4857
dcd989cb
TH
4858/**
4859 * workqueue_set_max_active - adjust max_active of a workqueue
4860 * @wq: target workqueue
4861 * @max_active: new max_active value.
4862 *
4863 * Set max_active of @wq to @max_active.
4864 *
4865 * CONTEXT:
4866 * Don't call from IRQ context.
4867 */
4868void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4869{
49e3cf44 4870 struct pool_workqueue *pwq;
dcd989cb 4871
8719dcea 4872 /* disallow meddling with max_active for ordered workqueues */
0a94efb5 4873 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
8719dcea
TH
4874 return;
4875
f3421797 4876 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
dcd989cb 4877
a357fc03 4878 mutex_lock(&wq->mutex);
dcd989cb 4879
0a94efb5 4880 wq->flags &= ~__WQ_ORDERED;
dcd989cb
TH
4881 wq->saved_max_active = max_active;
4882
699ce097
TH
4883 for_each_pwq(pwq, wq)
4884 pwq_adjust_max_active(pwq);
93981800 4885
a357fc03 4886 mutex_unlock(&wq->mutex);
15316ba8 4887}
dcd989cb 4888EXPORT_SYMBOL_GPL(workqueue_set_max_active);
15316ba8 4889
27d4ee03
LW
4890/**
4891 * current_work - retrieve %current task's work struct
4892 *
4893 * Determine if %current task is a workqueue worker and what it's working on.
4894 * Useful to find out the context that the %current task is running in.
4895 *
4896 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4897 */
4898struct work_struct *current_work(void)
4899{
4900 struct worker *worker = current_wq_worker();
4901
4902 return worker ? worker->current_work : NULL;
4903}
4904EXPORT_SYMBOL(current_work);
4905
e6267616
TH
4906/**
4907 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4908 *
4909 * Determine whether %current is a workqueue rescuer. Can be used from
4910 * work functions to determine whether it's being run off the rescuer task.
d185af30
YB
4911 *
4912 * Return: %true if %current is a workqueue rescuer. %false otherwise.
e6267616
TH
4913 */
4914bool current_is_workqueue_rescuer(void)
4915{
4916 struct worker *worker = current_wq_worker();
4917
6a092dfd 4918 return worker && worker->rescue_wq;
e6267616
TH
4919}
4920
eef6a7d5 4921/**
dcd989cb
TH
4922 * workqueue_congested - test whether a workqueue is congested
4923 * @cpu: CPU in question
4924 * @wq: target workqueue
eef6a7d5 4925 *
dcd989cb
TH
4926 * Test whether @wq's cpu workqueue for @cpu is congested. There is
4927 * no synchronization around this function and the test result is
4928 * unreliable and only useful as advisory hints or for debugging.
eef6a7d5 4929 *
d3251859 4930 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
636b927e
TH
4931 *
4932 * With the exception of ordered workqueues, all workqueues have per-cpu
4933 * pool_workqueues, each with its own congested state. A workqueue being
4934 * congested on one CPU doesn't mean that the workqueue is contested on any
4935 * other CPUs.
d3251859 4936 *
d185af30 4937 * Return:
dcd989cb 4938 * %true if congested, %false otherwise.
eef6a7d5 4939 */
d84ff051 4940bool workqueue_congested(int cpu, struct workqueue_struct *wq)
1da177e4 4941{
7fb98ea7 4942 struct pool_workqueue *pwq;
76af4d93
TH
4943 bool ret;
4944
24acfb71
TG
4945 rcu_read_lock();
4946 preempt_disable();
7fb98ea7 4947
d3251859
TH
4948 if (cpu == WORK_CPU_UNBOUND)
4949 cpu = smp_processor_id();
4950
636b927e 4951 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
f97a4a1a 4952 ret = !list_empty(&pwq->inactive_works);
636b927e 4953
24acfb71
TG
4954 preempt_enable();
4955 rcu_read_unlock();
76af4d93
TH
4956
4957 return ret;
1da177e4 4958}
dcd989cb 4959EXPORT_SYMBOL_GPL(workqueue_congested);
1da177e4 4960
dcd989cb
TH
4961/**
4962 * work_busy - test whether a work is currently pending or running
4963 * @work: the work to be tested
4964 *
4965 * Test whether @work is currently pending or running. There is no
4966 * synchronization around this function and the test result is
4967 * unreliable and only useful as advisory hints or for debugging.
dcd989cb 4968 *
d185af30 4969 * Return:
dcd989cb
TH
4970 * OR'd bitmask of WORK_BUSY_* bits.
4971 */
4972unsigned int work_busy(struct work_struct *work)
1da177e4 4973{
fa1b54e6 4974 struct worker_pool *pool;
dcd989cb
TH
4975 unsigned long flags;
4976 unsigned int ret = 0;
1da177e4 4977
dcd989cb
TH
4978 if (work_pending(work))
4979 ret |= WORK_BUSY_PENDING;
1da177e4 4980
24acfb71 4981 rcu_read_lock();
fa1b54e6 4982 pool = get_work_pool(work);
038366c5 4983 if (pool) {
a9b8a985 4984 raw_spin_lock_irqsave(&pool->lock, flags);
038366c5
LJ
4985 if (find_worker_executing_work(pool, work))
4986 ret |= WORK_BUSY_RUNNING;
a9b8a985 4987 raw_spin_unlock_irqrestore(&pool->lock, flags);
038366c5 4988 }
24acfb71 4989 rcu_read_unlock();
1da177e4 4990
dcd989cb 4991 return ret;
1da177e4 4992}
dcd989cb 4993EXPORT_SYMBOL_GPL(work_busy);
1da177e4 4994
3d1cb205
TH
4995/**
4996 * set_worker_desc - set description for the current work item
4997 * @fmt: printf-style format string
4998 * @...: arguments for the format string
4999 *
5000 * This function can be called by a running work function to describe what
5001 * the work item is about. If the worker task gets dumped, this
5002 * information will be printed out together to help debugging. The
5003 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
5004 */
5005void set_worker_desc(const char *fmt, ...)
5006{
5007 struct worker *worker = current_wq_worker();
5008 va_list args;
5009
5010 if (worker) {
5011 va_start(args, fmt);
5012 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
5013 va_end(args);
3d1cb205
TH
5014 }
5015}
5c750d58 5016EXPORT_SYMBOL_GPL(set_worker_desc);
3d1cb205
TH
5017
5018/**
5019 * print_worker_info - print out worker information and description
5020 * @log_lvl: the log level to use when printing
5021 * @task: target task
5022 *
5023 * If @task is a worker and currently executing a work item, print out the
5024 * name of the workqueue being serviced and worker description set with
5025 * set_worker_desc() by the currently executing work item.
5026 *
5027 * This function can be safely called on any task as long as the
5028 * task_struct itself is accessible. While safe, this function isn't
5029 * synchronized and may print out mixups or garbages of limited length.
5030 */
5031void print_worker_info(const char *log_lvl, struct task_struct *task)
5032{
5033 work_func_t *fn = NULL;
5034 char name[WQ_NAME_LEN] = { };
5035 char desc[WORKER_DESC_LEN] = { };
5036 struct pool_workqueue *pwq = NULL;
5037 struct workqueue_struct *wq = NULL;
3d1cb205
TH
5038 struct worker *worker;
5039
5040 if (!(task->flags & PF_WQ_WORKER))
5041 return;
5042
5043 /*
5044 * This function is called without any synchronization and @task
5045 * could be in any state. Be careful with dereferences.
5046 */
e700591a 5047 worker = kthread_probe_data(task);
3d1cb205
TH
5048
5049 /*
8bf89593
TH
5050 * Carefully copy the associated workqueue's workfn, name and desc.
5051 * Keep the original last '\0' in case the original is garbage.
3d1cb205 5052 */
fe557319
CH
5053 copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
5054 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
5055 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
5056 copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
5057 copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
3d1cb205
TH
5058
5059 if (fn || name[0] || desc[0]) {
d75f773c 5060 printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
8bf89593 5061 if (strcmp(name, desc))
3d1cb205
TH
5062 pr_cont(" (%s)", desc);
5063 pr_cont("\n");
5064 }
5065}
5066
3494fc30
TH
5067static void pr_cont_pool_info(struct worker_pool *pool)
5068{
5069 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
5070 if (pool->node != NUMA_NO_NODE)
5071 pr_cont(" node=%d", pool->node);
5072 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
5073}
5074
c76feb0d
PM
5075struct pr_cont_work_struct {
5076 bool comma;
5077 work_func_t func;
5078 long ctr;
5079};
5080
5081static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
5082{
5083 if (!pcwsp->ctr)
5084 goto out_record;
5085 if (func == pcwsp->func) {
5086 pcwsp->ctr++;
5087 return;
5088 }
5089 if (pcwsp->ctr == 1)
5090 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
5091 else
5092 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
5093 pcwsp->ctr = 0;
5094out_record:
5095 if ((long)func == -1L)
5096 return;
5097 pcwsp->comma = comma;
5098 pcwsp->func = func;
5099 pcwsp->ctr = 1;
5100}
5101
5102static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
3494fc30
TH
5103{
5104 if (work->func == wq_barrier_func) {
5105 struct wq_barrier *barr;
5106
5107 barr = container_of(work, struct wq_barrier, work);
5108
c76feb0d 5109 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
3494fc30
TH
5110 pr_cont("%s BAR(%d)", comma ? "," : "",
5111 task_pid_nr(barr->task));
5112 } else {
c76feb0d
PM
5113 if (!comma)
5114 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
5115 pr_cont_work_flush(comma, work->func, pcwsp);
3494fc30
TH
5116 }
5117}
5118
5119static void show_pwq(struct pool_workqueue *pwq)
5120{
c76feb0d 5121 struct pr_cont_work_struct pcws = { .ctr = 0, };
3494fc30
TH
5122 struct worker_pool *pool = pwq->pool;
5123 struct work_struct *work;
5124 struct worker *worker;
5125 bool has_in_flight = false, has_pending = false;
5126 int bkt;
5127
5128 pr_info(" pwq %d:", pool->id);
5129 pr_cont_pool_info(pool);
5130
e66b39af
TH
5131 pr_cont(" active=%d/%d refcnt=%d%s\n",
5132 pwq->nr_active, pwq->max_active, pwq->refcnt,
3494fc30
TH
5133 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
5134
5135 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
5136 if (worker->current_pwq == pwq) {
5137 has_in_flight = true;
5138 break;
5139 }
5140 }
5141 if (has_in_flight) {
5142 bool comma = false;
5143
5144 pr_info(" in-flight:");
5145 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
5146 if (worker->current_pwq != pwq)
5147 continue;
5148
d75f773c 5149 pr_cont("%s %d%s:%ps", comma ? "," : "",
3494fc30 5150 task_pid_nr(worker->task),
30ae2fc0 5151 worker->rescue_wq ? "(RESCUER)" : "",
3494fc30
TH
5152 worker->current_func);
5153 list_for_each_entry(work, &worker->scheduled, entry)
c76feb0d
PM
5154 pr_cont_work(false, work, &pcws);
5155 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
3494fc30
TH
5156 comma = true;
5157 }
5158 pr_cont("\n");
5159 }
5160
5161 list_for_each_entry(work, &pool->worklist, entry) {
5162 if (get_work_pwq(work) == pwq) {
5163 has_pending = true;
5164 break;
5165 }
5166 }
5167 if (has_pending) {
5168 bool comma = false;
5169
5170 pr_info(" pending:");
5171 list_for_each_entry(work, &pool->worklist, entry) {
5172 if (get_work_pwq(work) != pwq)
5173 continue;
5174
c76feb0d 5175 pr_cont_work(comma, work, &pcws);
3494fc30
TH
5176 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
5177 }
c76feb0d 5178 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
3494fc30
TH
5179 pr_cont("\n");
5180 }
5181
f97a4a1a 5182 if (!list_empty(&pwq->inactive_works)) {
3494fc30
TH
5183 bool comma = false;
5184
f97a4a1a
LJ
5185 pr_info(" inactive:");
5186 list_for_each_entry(work, &pwq->inactive_works, entry) {
c76feb0d 5187 pr_cont_work(comma, work, &pcws);
3494fc30
TH
5188 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
5189 }
c76feb0d 5190 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
3494fc30
TH
5191 pr_cont("\n");
5192 }
5193}
5194
5195/**
55df0933
IK
5196 * show_one_workqueue - dump state of specified workqueue
5197 * @wq: workqueue whose state will be printed
3494fc30 5198 */
55df0933 5199void show_one_workqueue(struct workqueue_struct *wq)
3494fc30 5200{
55df0933
IK
5201 struct pool_workqueue *pwq;
5202 bool idle = true;
3494fc30 5203 unsigned long flags;
3494fc30 5204
55df0933
IK
5205 for_each_pwq(pwq, wq) {
5206 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
5207 idle = false;
5208 break;
3494fc30 5209 }
55df0933
IK
5210 }
5211 if (idle) /* Nothing to print for idle workqueue */
5212 return;
3494fc30 5213
55df0933 5214 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
3494fc30 5215
55df0933
IK
5216 for_each_pwq(pwq, wq) {
5217 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
5218 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
62635ea8 5219 /*
55df0933
IK
5220 * Defer printing to avoid deadlocks in console
5221 * drivers that queue work while holding locks
5222 * also taken in their write paths.
62635ea8 5223 */
55df0933
IK
5224 printk_deferred_enter();
5225 show_pwq(pwq);
5226 printk_deferred_exit();
3494fc30 5227 }
55df0933 5228 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
62635ea8
SS
5229 /*
5230 * We could be printing a lot from atomic context, e.g.
55df0933 5231 * sysrq-t -> show_all_workqueues(). Avoid triggering
62635ea8
SS
5232 * hard lockup.
5233 */
5234 touch_nmi_watchdog();
3494fc30
TH
5235 }
5236
55df0933
IK
5237}
5238
5239/**
5240 * show_one_worker_pool - dump state of specified worker pool
5241 * @pool: worker pool whose state will be printed
5242 */
5243static void show_one_worker_pool(struct worker_pool *pool)
5244{
5245 struct worker *worker;
5246 bool first = true;
5247 unsigned long flags;
335a42eb 5248 unsigned long hung = 0;
55df0933
IK
5249
5250 raw_spin_lock_irqsave(&pool->lock, flags);
5251 if (pool->nr_workers == pool->nr_idle)
5252 goto next_pool;
335a42eb
PM
5253
5254 /* How long the first pending work is waiting for a worker. */
5255 if (!list_empty(&pool->worklist))
5256 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
5257
55df0933
IK
5258 /*
5259 * Defer printing to avoid deadlocks in console drivers that
5260 * queue work while holding locks also taken in their write
5261 * paths.
5262 */
5263 printk_deferred_enter();
5264 pr_info("pool %d:", pool->id);
5265 pr_cont_pool_info(pool);
335a42eb 5266 pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
55df0933
IK
5267 if (pool->manager)
5268 pr_cont(" manager: %d",
5269 task_pid_nr(pool->manager->task));
5270 list_for_each_entry(worker, &pool->idle_list, entry) {
5271 pr_cont(" %s%d", first ? "idle: " : "",
5272 task_pid_nr(worker->task));
5273 first = false;
5274 }
5275 pr_cont("\n");
5276 printk_deferred_exit();
5277next_pool:
5278 raw_spin_unlock_irqrestore(&pool->lock, flags);
5279 /*
5280 * We could be printing a lot from atomic context, e.g.
5281 * sysrq-t -> show_all_workqueues(). Avoid triggering
5282 * hard lockup.
5283 */
5284 touch_nmi_watchdog();
5285
5286}
5287
5288/**
5289 * show_all_workqueues - dump workqueue state
5290 *
704bc669 5291 * Called from a sysrq handler and prints out all busy workqueues and pools.
55df0933
IK
5292 */
5293void show_all_workqueues(void)
5294{
5295 struct workqueue_struct *wq;
5296 struct worker_pool *pool;
5297 int pi;
5298
5299 rcu_read_lock();
5300
5301 pr_info("Showing busy workqueues and worker pools:\n");
5302
5303 list_for_each_entry_rcu(wq, &workqueues, list)
5304 show_one_workqueue(wq);
5305
5306 for_each_pool(pool, pi)
5307 show_one_worker_pool(pool);
5308
24acfb71 5309 rcu_read_unlock();
3494fc30
TH
5310}
5311
704bc669
JL
5312/**
5313 * show_freezable_workqueues - dump freezable workqueue state
5314 *
5315 * Called from try_to_freeze_tasks() and prints out all freezable workqueues
5316 * still busy.
5317 */
5318void show_freezable_workqueues(void)
5319{
5320 struct workqueue_struct *wq;
5321
5322 rcu_read_lock();
5323
5324 pr_info("Showing freezable workqueues that are still busy:\n");
5325
5326 list_for_each_entry_rcu(wq, &workqueues, list) {
5327 if (!(wq->flags & WQ_FREEZABLE))
5328 continue;
5329 show_one_workqueue(wq);
5330 }
5331
5332 rcu_read_unlock();
5333}
5334
6b59808b
TH
5335/* used to show worker information through /proc/PID/{comm,stat,status} */
5336void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
5337{
6b59808b
TH
5338 int off;
5339
5340 /* always show the actual comm */
5341 off = strscpy(buf, task->comm, size);
5342 if (off < 0)
5343 return;
5344
197f6acc 5345 /* stabilize PF_WQ_WORKER and worker pool association */
6b59808b
TH
5346 mutex_lock(&wq_pool_attach_mutex);
5347
197f6acc
TH
5348 if (task->flags & PF_WQ_WORKER) {
5349 struct worker *worker = kthread_data(task);
5350 struct worker_pool *pool = worker->pool;
6b59808b 5351
197f6acc 5352 if (pool) {
a9b8a985 5353 raw_spin_lock_irq(&pool->lock);
197f6acc
TH
5354 /*
5355 * ->desc tracks information (wq name or
5356 * set_worker_desc()) for the latest execution. If
5357 * current, prepend '+', otherwise '-'.
5358 */
5359 if (worker->desc[0] != '\0') {
5360 if (worker->current_work)
5361 scnprintf(buf + off, size - off, "+%s",
5362 worker->desc);
5363 else
5364 scnprintf(buf + off, size - off, "-%s",
5365 worker->desc);
5366 }
a9b8a985 5367 raw_spin_unlock_irq(&pool->lock);
6b59808b 5368 }
6b59808b
TH
5369 }
5370
5371 mutex_unlock(&wq_pool_attach_mutex);
5372}
5373
66448bc2
MM
5374#ifdef CONFIG_SMP
5375
db7bccf4
TH
5376/*
5377 * CPU hotplug.
5378 *
e22bee78 5379 * There are two challenges in supporting CPU hotplug. Firstly, there
112202d9 5380 * are a lot of assumptions on strong associations among work, pwq and
706026c2 5381 * pool which make migrating pending and scheduled works very
e22bee78 5382 * difficult to implement without impacting hot paths. Secondly,
94cf58bb 5383 * worker pools serve mix of short, long and very long running works making
e22bee78
TH
5384 * blocked draining impractical.
5385 *
24647570 5386 * This is solved by allowing the pools to be disassociated from the CPU
628c78e7
TH
5387 * running as an unbound one and allowing it to be reattached later if the
5388 * cpu comes back online.
db7bccf4 5389 */
1da177e4 5390
e8b3f8db 5391static void unbind_workers(int cpu)
3af24433 5392{
4ce62e9e 5393 struct worker_pool *pool;
db7bccf4 5394 struct worker *worker;
3af24433 5395
f02ae73a 5396 for_each_cpu_worker_pool(pool, cpu) {
1258fae7 5397 mutex_lock(&wq_pool_attach_mutex);
a9b8a985 5398 raw_spin_lock_irq(&pool->lock);
3af24433 5399
94cf58bb 5400 /*
92f9c5c4 5401 * We've blocked all attach/detach operations. Make all workers
94cf58bb 5402 * unbound and set DISASSOCIATED. Before this, all workers
11b45b0b 5403 * must be on the cpu. After this, they may become diasporas.
b4ac9384
LJ
5404 * And the preemption disabled section in their sched callbacks
5405 * are guaranteed to see WORKER_UNBOUND since the code here
5406 * is on the same cpu.
94cf58bb 5407 */
da028469 5408 for_each_pool_worker(worker, pool)
c9e7cf27 5409 worker->flags |= WORKER_UNBOUND;
06ba38a9 5410
24647570 5411 pool->flags |= POOL_DISASSOCIATED;
f2d5a0ee 5412
eb283428 5413 /*
989442d7
LJ
5414 * The handling of nr_running in sched callbacks are disabled
5415 * now. Zap nr_running. After this, nr_running stays zero and
5416 * need_more_worker() and keep_working() are always true as
5417 * long as the worklist is not empty. This pool now behaves as
5418 * an unbound (in terms of concurrency management) pool which
eb283428
LJ
5419 * are served by workers tied to the pool.
5420 */
bc35f7ef 5421 pool->nr_running = 0;
eb283428
LJ
5422
5423 /*
5424 * With concurrency management just turned off, a busy
5425 * worker blocking could lead to lengthy stalls. Kick off
5426 * unbound chain execution of currently pending work items.
5427 */
0219a352 5428 kick_pool(pool);
989442d7 5429
a9b8a985 5430 raw_spin_unlock_irq(&pool->lock);
989442d7 5431
793777bc
VS
5432 for_each_pool_worker(worker, pool)
5433 unbind_worker(worker);
989442d7
LJ
5434
5435 mutex_unlock(&wq_pool_attach_mutex);
eb283428 5436 }
3af24433 5437}
3af24433 5438
bd7c089e
TH
5439/**
5440 * rebind_workers - rebind all workers of a pool to the associated CPU
5441 * @pool: pool of interest
5442 *
a9ab775b 5443 * @pool->cpu is coming online. Rebind all workers to the CPU.
bd7c089e
TH
5444 */
5445static void rebind_workers(struct worker_pool *pool)
5446{
a9ab775b 5447 struct worker *worker;
bd7c089e 5448
1258fae7 5449 lockdep_assert_held(&wq_pool_attach_mutex);
bd7c089e 5450
a9ab775b
TH
5451 /*
5452 * Restore CPU affinity of all workers. As all idle workers should
5453 * be on the run-queue of the associated CPU before any local
402dd89d 5454 * wake-ups for concurrency management happen, restore CPU affinity
a9ab775b
TH
5455 * of all workers first and then clear UNBOUND. As we're called
5456 * from CPU_ONLINE, the following shouldn't fail.
5457 */
c63a2e52
VS
5458 for_each_pool_worker(worker, pool) {
5459 kthread_set_per_cpu(worker->task, pool->cpu);
5460 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
9546b29e 5461 pool_allowed_cpus(pool)) < 0);
c63a2e52 5462 }
bd7c089e 5463
a9b8a985 5464 raw_spin_lock_irq(&pool->lock);
f7c17d26 5465
3de5e884 5466 pool->flags &= ~POOL_DISASSOCIATED;
bd7c089e 5467
da028469 5468 for_each_pool_worker(worker, pool) {
a9ab775b 5469 unsigned int worker_flags = worker->flags;
bd7c089e 5470
a9ab775b
TH
5471 /*
5472 * We want to clear UNBOUND but can't directly call
5473 * worker_clr_flags() or adjust nr_running. Atomically
5474 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
5475 * @worker will clear REBOUND using worker_clr_flags() when
5476 * it initiates the next execution cycle thus restoring
5477 * concurrency management. Note that when or whether
5478 * @worker clears REBOUND doesn't affect correctness.
5479 *
c95491ed 5480 * WRITE_ONCE() is necessary because @worker->flags may be
a9ab775b 5481 * tested without holding any lock in
6d25be57 5482 * wq_worker_running(). Without it, NOT_RUNNING test may
a9ab775b
TH
5483 * fail incorrectly leading to premature concurrency
5484 * management operations.
5485 */
5486 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
5487 worker_flags |= WORKER_REBOUND;
5488 worker_flags &= ~WORKER_UNBOUND;
c95491ed 5489 WRITE_ONCE(worker->flags, worker_flags);
bd7c089e 5490 }
a9ab775b 5491
a9b8a985 5492 raw_spin_unlock_irq(&pool->lock);
bd7c089e
TH
5493}
5494
7dbc725e
TH
5495/**
5496 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
5497 * @pool: unbound pool of interest
5498 * @cpu: the CPU which is coming up
5499 *
5500 * An unbound pool may end up with a cpumask which doesn't have any online
5501 * CPUs. When a worker of such pool get scheduled, the scheduler resets
5502 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
5503 * online CPU before, cpus_allowed of all its workers should be restored.
5504 */
5505static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
5506{
5507 static cpumask_t cpumask;
5508 struct worker *worker;
7dbc725e 5509
1258fae7 5510 lockdep_assert_held(&wq_pool_attach_mutex);
7dbc725e
TH
5511
5512 /* is @cpu allowed for @pool? */
5513 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
5514 return;
5515
7dbc725e 5516 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
7dbc725e
TH
5517
5518 /* as we're called from CPU_ONLINE, the following shouldn't fail */
da028469 5519 for_each_pool_worker(worker, pool)
d945b5e9 5520 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
7dbc725e
TH
5521}
5522
7ee681b2
TG
5523int workqueue_prepare_cpu(unsigned int cpu)
5524{
5525 struct worker_pool *pool;
5526
5527 for_each_cpu_worker_pool(pool, cpu) {
5528 if (pool->nr_workers)
5529 continue;
5530 if (!create_worker(pool))
5531 return -ENOMEM;
5532 }
5533 return 0;
5534}
5535
5536int workqueue_online_cpu(unsigned int cpu)
3af24433 5537{
4ce62e9e 5538 struct worker_pool *pool;
4c16bd32 5539 struct workqueue_struct *wq;
7dbc725e 5540 int pi;
3ce63377 5541
7ee681b2 5542 mutex_lock(&wq_pool_mutex);
7dbc725e 5543
7ee681b2 5544 for_each_pool(pool, pi) {
1258fae7 5545 mutex_lock(&wq_pool_attach_mutex);
94cf58bb 5546
7ee681b2
TG
5547 if (pool->cpu == cpu)
5548 rebind_workers(pool);
5549 else if (pool->cpu < 0)
5550 restore_unbound_workers_cpumask(pool, cpu);
94cf58bb 5551
1258fae7 5552 mutex_unlock(&wq_pool_attach_mutex);
7ee681b2 5553 }
6ba94429 5554
fef59c9c 5555 /* update pod affinity of unbound workqueues */
4cbfd3de 5556 list_for_each_entry(wq, &workqueues, list) {
84193c07
TH
5557 struct workqueue_attrs *attrs = wq->unbound_attrs;
5558
5559 if (attrs) {
5560 const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
5561 int tcpu;
4cbfd3de 5562
84193c07 5563 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
fef59c9c 5564 wq_update_pod(wq, tcpu, cpu, true);
4cbfd3de
TH
5565 }
5566 }
6ba94429 5567
7ee681b2
TG
5568 mutex_unlock(&wq_pool_mutex);
5569 return 0;
6ba94429
FW
5570}
5571
7ee681b2 5572int workqueue_offline_cpu(unsigned int cpu)
6ba94429 5573{
6ba94429
FW
5574 struct workqueue_struct *wq;
5575
7ee681b2 5576 /* unbinding per-cpu workers should happen on the local CPU */
e8b3f8db
LJ
5577 if (WARN_ON(cpu != smp_processor_id()))
5578 return -1;
5579
5580 unbind_workers(cpu);
7ee681b2 5581
fef59c9c 5582 /* update pod affinity of unbound workqueues */
7ee681b2 5583 mutex_lock(&wq_pool_mutex);
4cbfd3de 5584 list_for_each_entry(wq, &workqueues, list) {
84193c07
TH
5585 struct workqueue_attrs *attrs = wq->unbound_attrs;
5586
5587 if (attrs) {
5588 const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
5589 int tcpu;
4cbfd3de 5590
84193c07 5591 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
fef59c9c 5592 wq_update_pod(wq, tcpu, cpu, false);
4cbfd3de
TH
5593 }
5594 }
7ee681b2
TG
5595 mutex_unlock(&wq_pool_mutex);
5596
7ee681b2 5597 return 0;
6ba94429
FW
5598}
5599
6ba94429
FW
5600struct work_for_cpu {
5601 struct work_struct work;
5602 long (*fn)(void *);
5603 void *arg;
5604 long ret;
5605};
5606
5607static void work_for_cpu_fn(struct work_struct *work)
5608{
5609 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5610
5611 wfc->ret = wfc->fn(wfc->arg);
5612}
5613
5614/**
265f3ed0 5615 * work_on_cpu_key - run a function in thread context on a particular cpu
6ba94429
FW
5616 * @cpu: the cpu to run on
5617 * @fn: the function to run
5618 * @arg: the function arg
265f3ed0 5619 * @key: The lock class key for lock debugging purposes
6ba94429
FW
5620 *
5621 * It is up to the caller to ensure that the cpu doesn't go offline.
5622 * The caller must not hold any locks which would prevent @fn from completing.
5623 *
5624 * Return: The value @fn returns.
5625 */
265f3ed0
FW
5626long work_on_cpu_key(int cpu, long (*fn)(void *),
5627 void *arg, struct lock_class_key *key)
6ba94429
FW
5628{
5629 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5630
265f3ed0 5631 INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
6ba94429
FW
5632 schedule_work_on(cpu, &wfc.work);
5633 flush_work(&wfc.work);
5634 destroy_work_on_stack(&wfc.work);
5635 return wfc.ret;
5636}
265f3ed0 5637EXPORT_SYMBOL_GPL(work_on_cpu_key);
0e8d6a93
TG
5638
5639/**
265f3ed0 5640 * work_on_cpu_safe_key - run a function in thread context on a particular cpu
0e8d6a93
TG
5641 * @cpu: the cpu to run on
5642 * @fn: the function to run
5643 * @arg: the function argument
265f3ed0 5644 * @key: The lock class key for lock debugging purposes
0e8d6a93
TG
5645 *
5646 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5647 * any locks which would prevent @fn from completing.
5648 *
5649 * Return: The value @fn returns.
5650 */
265f3ed0
FW
5651long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
5652 void *arg, struct lock_class_key *key)
0e8d6a93
TG
5653{
5654 long ret = -ENODEV;
5655
ffd8bea8 5656 cpus_read_lock();
0e8d6a93 5657 if (cpu_online(cpu))
265f3ed0 5658 ret = work_on_cpu_key(cpu, fn, arg, key);
ffd8bea8 5659 cpus_read_unlock();
0e8d6a93
TG
5660 return ret;
5661}
265f3ed0 5662EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
6ba94429
FW
5663#endif /* CONFIG_SMP */
5664
5665#ifdef CONFIG_FREEZER
5666
5667/**
5668 * freeze_workqueues_begin - begin freezing workqueues
5669 *
5670 * Start freezing workqueues. After this function returns, all freezable
f97a4a1a 5671 * workqueues will queue new works to their inactive_works list instead of
6ba94429
FW
5672 * pool->worklist.
5673 *
5674 * CONTEXT:
5675 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5676 */
5677void freeze_workqueues_begin(void)
5678{
5679 struct workqueue_struct *wq;
5680 struct pool_workqueue *pwq;
5681
5682 mutex_lock(&wq_pool_mutex);
5683
5684 WARN_ON_ONCE(workqueue_freezing);
5685 workqueue_freezing = true;
5686
5687 list_for_each_entry(wq, &workqueues, list) {
5688 mutex_lock(&wq->mutex);
5689 for_each_pwq(pwq, wq)
5690 pwq_adjust_max_active(pwq);
5691 mutex_unlock(&wq->mutex);
5692 }
5693
5694 mutex_unlock(&wq_pool_mutex);
5695}
5696
5697/**
5698 * freeze_workqueues_busy - are freezable workqueues still busy?
5699 *
5700 * Check whether freezing is complete. This function must be called
5701 * between freeze_workqueues_begin() and thaw_workqueues().
5702 *
5703 * CONTEXT:
5704 * Grabs and releases wq_pool_mutex.
5705 *
5706 * Return:
5707 * %true if some freezable workqueues are still busy. %false if freezing
5708 * is complete.
5709 */
5710bool freeze_workqueues_busy(void)
5711{
5712 bool busy = false;
5713 struct workqueue_struct *wq;
5714 struct pool_workqueue *pwq;
5715
5716 mutex_lock(&wq_pool_mutex);
5717
5718 WARN_ON_ONCE(!workqueue_freezing);
5719
5720 list_for_each_entry(wq, &workqueues, list) {
5721 if (!(wq->flags & WQ_FREEZABLE))
5722 continue;
5723 /*
5724 * nr_active is monotonically decreasing. It's safe
5725 * to peek without lock.
5726 */
24acfb71 5727 rcu_read_lock();
6ba94429
FW
5728 for_each_pwq(pwq, wq) {
5729 WARN_ON_ONCE(pwq->nr_active < 0);
5730 if (pwq->nr_active) {
5731 busy = true;
24acfb71 5732 rcu_read_unlock();
6ba94429
FW
5733 goto out_unlock;
5734 }
5735 }
24acfb71 5736 rcu_read_unlock();
6ba94429
FW
5737 }
5738out_unlock:
5739 mutex_unlock(&wq_pool_mutex);
5740 return busy;
5741}
5742
5743/**
5744 * thaw_workqueues - thaw workqueues
5745 *
5746 * Thaw workqueues. Normal queueing is restored and all collected
5747 * frozen works are transferred to their respective pool worklists.
5748 *
5749 * CONTEXT:
5750 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5751 */
5752void thaw_workqueues(void)
5753{
5754 struct workqueue_struct *wq;
5755 struct pool_workqueue *pwq;
5756
5757 mutex_lock(&wq_pool_mutex);
5758
5759 if (!workqueue_freezing)
5760 goto out_unlock;
5761
5762 workqueue_freezing = false;
5763
5764 /* restore max_active and repopulate worklist */
5765 list_for_each_entry(wq, &workqueues, list) {
5766 mutex_lock(&wq->mutex);
5767 for_each_pwq(pwq, wq)
5768 pwq_adjust_max_active(pwq);
5769 mutex_unlock(&wq->mutex);
5770 }
5771
5772out_unlock:
5773 mutex_unlock(&wq_pool_mutex);
5774}
5775#endif /* CONFIG_FREEZER */
5776
99c621ef 5777static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
042f7df1
LJ
5778{
5779 LIST_HEAD(ctxs);
5780 int ret = 0;
5781 struct workqueue_struct *wq;
5782 struct apply_wqattrs_ctx *ctx, *n;
5783
5784 lockdep_assert_held(&wq_pool_mutex);
5785
5786 list_for_each_entry(wq, &workqueues, list) {
5787 if (!(wq->flags & WQ_UNBOUND))
5788 continue;
ca10d851 5789
042f7df1 5790 /* creating multiple pwqs breaks ordering guarantee */
ca10d851
WL
5791 if (!list_empty(&wq->pwqs)) {
5792 if (wq->flags & __WQ_ORDERED_EXPLICIT)
5793 continue;
5794 wq->flags &= ~__WQ_ORDERED;
5795 }
042f7df1 5796
99c621ef 5797 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
84193c07
TH
5798 if (IS_ERR(ctx)) {
5799 ret = PTR_ERR(ctx);
042f7df1
LJ
5800 break;
5801 }
5802
5803 list_add_tail(&ctx->list, &ctxs);
5804 }
5805
5806 list_for_each_entry_safe(ctx, n, &ctxs, list) {
5807 if (!ret)
5808 apply_wqattrs_commit(ctx);
5809 apply_wqattrs_cleanup(ctx);
5810 }
5811
99c621ef
LJ
5812 if (!ret) {
5813 mutex_lock(&wq_pool_attach_mutex);
5814 cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
5815 mutex_unlock(&wq_pool_attach_mutex);
5816 }
042f7df1
LJ
5817 return ret;
5818}
5819
fe28f631
WL
5820/**
5821 * workqueue_unbound_exclude_cpumask - Exclude given CPUs from unbound cpumask
5822 * @exclude_cpumask: the cpumask to be excluded from wq_unbound_cpumask
5823 *
5824 * This function can be called from cpuset code to provide a set of isolated
5825 * CPUs that should be excluded from wq_unbound_cpumask. The caller must hold
5826 * either cpus_read_lock or cpus_write_lock.
5827 */
5828int workqueue_unbound_exclude_cpumask(cpumask_var_t exclude_cpumask)
5829{
5830 cpumask_var_t cpumask;
5831 int ret = 0;
5832
5833 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5834 return -ENOMEM;
5835
5836 lockdep_assert_cpus_held();
5837 mutex_lock(&wq_pool_mutex);
5838
5839 /* Save the current isolated cpumask & export it via sysfs */
5840 cpumask_copy(wq_isolated_cpumask, exclude_cpumask);
5841
5842 /*
5843 * If the operation fails, it will fall back to
5844 * wq_requested_unbound_cpumask which is initially set to
5845 * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) house keeping mask and rewritten
5846 * by any subsequent write to workqueue/cpumask sysfs file.
5847 */
5848 if (!cpumask_andnot(cpumask, wq_requested_unbound_cpumask, exclude_cpumask))
5849 cpumask_copy(cpumask, wq_requested_unbound_cpumask);
5850 if (!cpumask_equal(cpumask, wq_unbound_cpumask))
5851 ret = workqueue_apply_unbound_cpumask(cpumask);
5852
5853 mutex_unlock(&wq_pool_mutex);
5854 free_cpumask_var(cpumask);
5855 return ret;
5856}
5857
63c5484e
TH
5858static int parse_affn_scope(const char *val)
5859{
5860 int i;
5861
5862 for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) {
5863 if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i])))
5864 return i;
5865 }
5866 return -EINVAL;
5867}
5868
5869static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
5870{
523a301e
TH
5871 struct workqueue_struct *wq;
5872 int affn, cpu;
63c5484e
TH
5873
5874 affn = parse_affn_scope(val);
5875 if (affn < 0)
5876 return affn;
523a301e
TH
5877 if (affn == WQ_AFFN_DFL)
5878 return -EINVAL;
5879
5880 cpus_read_lock();
5881 mutex_lock(&wq_pool_mutex);
63c5484e
TH
5882
5883 wq_affn_dfl = affn;
523a301e
TH
5884
5885 list_for_each_entry(wq, &workqueues, list) {
5886 for_each_online_cpu(cpu) {
5887 wq_update_pod(wq, cpu, cpu, true);
5888 }
5889 }
5890
5891 mutex_unlock(&wq_pool_mutex);
5892 cpus_read_unlock();
5893
63c5484e
TH
5894 return 0;
5895}
5896
5897static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp)
5898{
5899 return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]);
5900}
5901
5902static const struct kernel_param_ops wq_affn_dfl_ops = {
5903 .set = wq_affn_dfl_set,
5904 .get = wq_affn_dfl_get,
5905};
5906
5907module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644);
5908
6ba94429
FW
5909#ifdef CONFIG_SYSFS
5910/*
5911 * Workqueues with WQ_SYSFS flag set is visible to userland via
5912 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
5913 * following attributes.
5914 *
63c5484e
TH
5915 * per_cpu RO bool : whether the workqueue is per-cpu or unbound
5916 * max_active RW int : maximum number of in-flight work items
6ba94429
FW
5917 *
5918 * Unbound workqueues have the following extra attributes.
5919 *
63c5484e
TH
5920 * nice RW int : nice value of the workers
5921 * cpumask RW mask : bitmask of allowed CPUs for the workers
5922 * affinity_scope RW str : worker CPU affinity scope (cache, numa, none)
8639eceb 5923 * affinity_strict RW bool : worker CPU affinity is strict
6ba94429
FW
5924 */
5925struct wq_device {
5926 struct workqueue_struct *wq;
5927 struct device dev;
5928};
5929
5930static struct workqueue_struct *dev_to_wq(struct device *dev)
5931{
5932 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5933
5934 return wq_dev->wq;
5935}
5936
5937static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5938 char *buf)
5939{
5940 struct workqueue_struct *wq = dev_to_wq(dev);
5941
5942 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5943}
5944static DEVICE_ATTR_RO(per_cpu);
5945
5946static ssize_t max_active_show(struct device *dev,
5947 struct device_attribute *attr, char *buf)
5948{
5949 struct workqueue_struct *wq = dev_to_wq(dev);
5950
5951 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5952}
5953
5954static ssize_t max_active_store(struct device *dev,
5955 struct device_attribute *attr, const char *buf,
5956 size_t count)
5957{
5958 struct workqueue_struct *wq = dev_to_wq(dev);
5959 int val;
5960
5961 if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5962 return -EINVAL;
5963
5964 workqueue_set_max_active(wq, val);
5965 return count;
5966}
5967static DEVICE_ATTR_RW(max_active);
5968
5969static struct attribute *wq_sysfs_attrs[] = {
5970 &dev_attr_per_cpu.attr,
5971 &dev_attr_max_active.attr,
5972 NULL,
5973};
5974ATTRIBUTE_GROUPS(wq_sysfs);
5975
49277a5b
WL
5976static void apply_wqattrs_lock(void)
5977{
5978 /* CPUs should stay stable across pwq creations and installations */
5979 cpus_read_lock();
5980 mutex_lock(&wq_pool_mutex);
5981}
5982
5983static void apply_wqattrs_unlock(void)
5984{
5985 mutex_unlock(&wq_pool_mutex);
5986 cpus_read_unlock();
5987}
5988
6ba94429
FW
5989static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5990 char *buf)
5991{
5992 struct workqueue_struct *wq = dev_to_wq(dev);
5993 int written;
5994
5995 mutex_lock(&wq->mutex);
5996 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5997 mutex_unlock(&wq->mutex);
5998
5999 return written;
6000}
6001
6002/* prepare workqueue_attrs for sysfs store operations */
6003static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
6004{
6005 struct workqueue_attrs *attrs;
6006
899a94fe
LJ
6007 lockdep_assert_held(&wq_pool_mutex);
6008
be69d00d 6009 attrs = alloc_workqueue_attrs();
6ba94429
FW
6010 if (!attrs)
6011 return NULL;
6012
6ba94429 6013 copy_workqueue_attrs(attrs, wq->unbound_attrs);
6ba94429
FW
6014 return attrs;
6015}
6016
6017static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
6018 const char *buf, size_t count)
6019{
6020 struct workqueue_struct *wq = dev_to_wq(dev);
6021 struct workqueue_attrs *attrs;
d4d3e257
LJ
6022 int ret = -ENOMEM;
6023
6024 apply_wqattrs_lock();
6ba94429
FW
6025
6026 attrs = wq_sysfs_prep_attrs(wq);
6027 if (!attrs)
d4d3e257 6028 goto out_unlock;
6ba94429
FW
6029
6030 if (sscanf(buf, "%d", &attrs->nice) == 1 &&
6031 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
d4d3e257 6032 ret = apply_workqueue_attrs_locked(wq, attrs);
6ba94429
FW
6033 else
6034 ret = -EINVAL;
6035
d4d3e257
LJ
6036out_unlock:
6037 apply_wqattrs_unlock();
6ba94429
FW
6038 free_workqueue_attrs(attrs);
6039 return ret ?: count;
6040}
6041
6042static ssize_t wq_cpumask_show(struct device *dev,
6043 struct device_attribute *attr, char *buf)
6044{
6045 struct workqueue_struct *wq = dev_to_wq(dev);
6046 int written;
6047
6048 mutex_lock(&wq->mutex);
6049 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
6050 cpumask_pr_args(wq->unbound_attrs->cpumask));
6051 mutex_unlock(&wq->mutex);
6052 return written;
6053}
6054
6055static ssize_t wq_cpumask_store(struct device *dev,
6056 struct device_attribute *attr,
6057 const char *buf, size_t count)
6058{
6059 struct workqueue_struct *wq = dev_to_wq(dev);
6060 struct workqueue_attrs *attrs;
d4d3e257
LJ
6061 int ret = -ENOMEM;
6062
6063 apply_wqattrs_lock();
6ba94429
FW
6064
6065 attrs = wq_sysfs_prep_attrs(wq);
6066 if (!attrs)
d4d3e257 6067 goto out_unlock;
6ba94429
FW
6068
6069 ret = cpumask_parse(buf, attrs->cpumask);
6070 if (!ret)
d4d3e257 6071 ret = apply_workqueue_attrs_locked(wq, attrs);
6ba94429 6072
d4d3e257
LJ
6073out_unlock:
6074 apply_wqattrs_unlock();
6ba94429
FW
6075 free_workqueue_attrs(attrs);
6076 return ret ?: count;
6077}
6078
63c5484e
TH
6079static ssize_t wq_affn_scope_show(struct device *dev,
6080 struct device_attribute *attr, char *buf)
6081{
6082 struct workqueue_struct *wq = dev_to_wq(dev);
6083 int written;
6084
6085 mutex_lock(&wq->mutex);
523a301e
TH
6086 if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL)
6087 written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
6088 wq_affn_names[WQ_AFFN_DFL],
6089 wq_affn_names[wq_affn_dfl]);
6090 else
6091 written = scnprintf(buf, PAGE_SIZE, "%s\n",
6092 wq_affn_names[wq->unbound_attrs->affn_scope]);
63c5484e
TH
6093 mutex_unlock(&wq->mutex);
6094
6095 return written;
6096}
6097
6098static ssize_t wq_affn_scope_store(struct device *dev,
6099 struct device_attribute *attr,
6100 const char *buf, size_t count)
6101{
6102 struct workqueue_struct *wq = dev_to_wq(dev);
6103 struct workqueue_attrs *attrs;
6104 int affn, ret = -ENOMEM;
6105
6106 affn = parse_affn_scope(buf);
6107 if (affn < 0)
6108 return affn;
6109
6110 apply_wqattrs_lock();
6111 attrs = wq_sysfs_prep_attrs(wq);
6112 if (attrs) {
6113 attrs->affn_scope = affn;
6114 ret = apply_workqueue_attrs_locked(wq, attrs);
6115 }
6116 apply_wqattrs_unlock();
6117 free_workqueue_attrs(attrs);
6118 return ret ?: count;
6119}
6120
8639eceb
TH
6121static ssize_t wq_affinity_strict_show(struct device *dev,
6122 struct device_attribute *attr, char *buf)
6123{
6124 struct workqueue_struct *wq = dev_to_wq(dev);
6125
6126 return scnprintf(buf, PAGE_SIZE, "%d\n",
6127 wq->unbound_attrs->affn_strict);
6128}
6129
6130static ssize_t wq_affinity_strict_store(struct device *dev,
6131 struct device_attribute *attr,
6132 const char *buf, size_t count)
6133{
6134 struct workqueue_struct *wq = dev_to_wq(dev);
6135 struct workqueue_attrs *attrs;
6136 int v, ret = -ENOMEM;
6137
6138 if (sscanf(buf, "%d", &v) != 1)
6139 return -EINVAL;
6140
6141 apply_wqattrs_lock();
6142 attrs = wq_sysfs_prep_attrs(wq);
6143 if (attrs) {
6144 attrs->affn_strict = (bool)v;
6145 ret = apply_workqueue_attrs_locked(wq, attrs);
6146 }
6147 apply_wqattrs_unlock();
6148 free_workqueue_attrs(attrs);
6149 return ret ?: count;
6150}
6151
6ba94429 6152static struct device_attribute wq_sysfs_unbound_attrs[] = {
6ba94429
FW
6153 __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
6154 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
63c5484e 6155 __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store),
8639eceb 6156 __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store),
6ba94429
FW
6157 __ATTR_NULL,
6158};
8ccad40d 6159
6ba94429
FW
6160static struct bus_type wq_subsys = {
6161 .name = "workqueue",
6162 .dev_groups = wq_sysfs_groups,
2d3854a3
RR
6163};
6164
49277a5b
WL
6165/**
6166 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
6167 * @cpumask: the cpumask to set
6168 *
6169 * The low-level workqueues cpumask is a global cpumask that limits
6170 * the affinity of all unbound workqueues. This function check the @cpumask
6171 * and apply it to all unbound workqueues and updates all pwqs of them.
6172 *
6173 * Return: 0 - Success
6174 * -EINVAL - Invalid @cpumask
6175 * -ENOMEM - Failed to allocate memory for attrs or pwqs.
6176 */
6177static int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
6178{
6179 int ret = -EINVAL;
6180
6181 /*
6182 * Not excluding isolated cpus on purpose.
6183 * If the user wishes to include them, we allow that.
6184 */
6185 cpumask_and(cpumask, cpumask, cpu_possible_mask);
6186 if (!cpumask_empty(cpumask)) {
6187 apply_wqattrs_lock();
6188 cpumask_copy(wq_requested_unbound_cpumask, cpumask);
6189 if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
6190 ret = 0;
6191 goto out_unlock;
6192 }
6193
6194 ret = workqueue_apply_unbound_cpumask(cpumask);
6195
6196out_unlock:
6197 apply_wqattrs_unlock();
6198 }
6199
6200 return ret;
6201}
6202
fe28f631
WL
6203static ssize_t __wq_cpumask_show(struct device *dev,
6204 struct device_attribute *attr, char *buf, cpumask_var_t mask)
b05a7928
FW
6205{
6206 int written;
6207
042f7df1 6208 mutex_lock(&wq_pool_mutex);
fe28f631 6209 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
042f7df1 6210 mutex_unlock(&wq_pool_mutex);
b05a7928
FW
6211
6212 return written;
6213}
6214
fe28f631
WL
6215static ssize_t wq_unbound_cpumask_show(struct device *dev,
6216 struct device_attribute *attr, char *buf)
6217{
6218 return __wq_cpumask_show(dev, attr, buf, wq_unbound_cpumask);
6219}
6220
6221static ssize_t wq_requested_cpumask_show(struct device *dev,
6222 struct device_attribute *attr, char *buf)
6223{
6224 return __wq_cpumask_show(dev, attr, buf, wq_requested_unbound_cpumask);
6225}
6226
6227static ssize_t wq_isolated_cpumask_show(struct device *dev,
6228 struct device_attribute *attr, char *buf)
6229{
6230 return __wq_cpumask_show(dev, attr, buf, wq_isolated_cpumask);
6231}
6232
042f7df1
LJ
6233static ssize_t wq_unbound_cpumask_store(struct device *dev,
6234 struct device_attribute *attr, const char *buf, size_t count)
6235{
6236 cpumask_var_t cpumask;
6237 int ret;
6238
6239 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
6240 return -ENOMEM;
6241
6242 ret = cpumask_parse(buf, cpumask);
6243 if (!ret)
6244 ret = workqueue_set_unbound_cpumask(cpumask);
6245
6246 free_cpumask_var(cpumask);
6247 return ret ? ret : count;
6248}
6249
fe28f631 6250static struct device_attribute wq_sysfs_cpumask_attrs[] = {
042f7df1 6251 __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
fe28f631
WL
6252 wq_unbound_cpumask_store),
6253 __ATTR(cpumask_requested, 0444, wq_requested_cpumask_show, NULL),
6254 __ATTR(cpumask_isolated, 0444, wq_isolated_cpumask_show, NULL),
6255 __ATTR_NULL,
6256};
b05a7928 6257
6ba94429 6258static int __init wq_sysfs_init(void)
2d3854a3 6259{
686f6697 6260 struct device *dev_root;
b05a7928
FW
6261 int err;
6262
6263 err = subsys_virtual_register(&wq_subsys, NULL);
6264 if (err)
6265 return err;
6266
686f6697
GKH
6267 dev_root = bus_get_dev_root(&wq_subsys);
6268 if (dev_root) {
fe28f631
WL
6269 struct device_attribute *attr;
6270
6271 for (attr = wq_sysfs_cpumask_attrs; attr->attr.name; attr++) {
6272 err = device_create_file(dev_root, attr);
6273 if (err)
6274 break;
6275 }
686f6697
GKH
6276 put_device(dev_root);
6277 }
6278 return err;
2d3854a3 6279}
6ba94429 6280core_initcall(wq_sysfs_init);
2d3854a3 6281
6ba94429 6282static void wq_device_release(struct device *dev)
2d3854a3 6283{
6ba94429 6284 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
6b44003e 6285
6ba94429 6286 kfree(wq_dev);
2d3854a3 6287}
a0a1a5fd
TH
6288
6289/**
6ba94429
FW
6290 * workqueue_sysfs_register - make a workqueue visible in sysfs
6291 * @wq: the workqueue to register
a0a1a5fd 6292 *
6ba94429
FW
6293 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
6294 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
6295 * which is the preferred method.
a0a1a5fd 6296 *
6ba94429
FW
6297 * Workqueue user should use this function directly iff it wants to apply
6298 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
6299 * apply_workqueue_attrs() may race against userland updating the
6300 * attributes.
6301 *
6302 * Return: 0 on success, -errno on failure.
a0a1a5fd 6303 */
6ba94429 6304int workqueue_sysfs_register(struct workqueue_struct *wq)
a0a1a5fd 6305{
6ba94429
FW
6306 struct wq_device *wq_dev;
6307 int ret;
a0a1a5fd 6308
6ba94429 6309 /*
402dd89d 6310 * Adjusting max_active or creating new pwqs by applying
6ba94429
FW
6311 * attributes breaks ordering guarantee. Disallow exposing ordered
6312 * workqueues.
6313 */
0a94efb5 6314 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
6ba94429 6315 return -EINVAL;
a0a1a5fd 6316
6ba94429
FW
6317 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
6318 if (!wq_dev)
6319 return -ENOMEM;
5bcab335 6320
6ba94429
FW
6321 wq_dev->wq = wq;
6322 wq_dev->dev.bus = &wq_subsys;
6ba94429 6323 wq_dev->dev.release = wq_device_release;
23217b44 6324 dev_set_name(&wq_dev->dev, "%s", wq->name);
a0a1a5fd 6325
6ba94429
FW
6326 /*
6327 * unbound_attrs are created separately. Suppress uevent until
6328 * everything is ready.
6329 */
6330 dev_set_uevent_suppress(&wq_dev->dev, true);
a0a1a5fd 6331
6ba94429
FW
6332 ret = device_register(&wq_dev->dev);
6333 if (ret) {
537f4146 6334 put_device(&wq_dev->dev);
6ba94429
FW
6335 wq->wq_dev = NULL;
6336 return ret;
6337 }
a0a1a5fd 6338
6ba94429
FW
6339 if (wq->flags & WQ_UNBOUND) {
6340 struct device_attribute *attr;
a0a1a5fd 6341
6ba94429
FW
6342 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
6343 ret = device_create_file(&wq_dev->dev, attr);
6344 if (ret) {
6345 device_unregister(&wq_dev->dev);
6346 wq->wq_dev = NULL;
6347 return ret;
a0a1a5fd
TH
6348 }
6349 }
6350 }
6ba94429
FW
6351
6352 dev_set_uevent_suppress(&wq_dev->dev, false);
6353 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
6354 return 0;
a0a1a5fd
TH
6355}
6356
6357/**
6ba94429
FW
6358 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
6359 * @wq: the workqueue to unregister
a0a1a5fd 6360 *
6ba94429 6361 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
a0a1a5fd 6362 */
6ba94429 6363static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
a0a1a5fd 6364{
6ba94429 6365 struct wq_device *wq_dev = wq->wq_dev;
8b03ae3c 6366
6ba94429
FW
6367 if (!wq->wq_dev)
6368 return;
a0a1a5fd 6369
6ba94429
FW
6370 wq->wq_dev = NULL;
6371 device_unregister(&wq_dev->dev);
a0a1a5fd 6372}
6ba94429
FW
6373#else /* CONFIG_SYSFS */
6374static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
6375#endif /* CONFIG_SYSFS */
a0a1a5fd 6376
82607adc
TH
6377/*
6378 * Workqueue watchdog.
6379 *
6380 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
6381 * flush dependency, a concurrency managed work item which stays RUNNING
6382 * indefinitely. Workqueue stalls can be very difficult to debug as the
6383 * usual warning mechanisms don't trigger and internal workqueue state is
6384 * largely opaque.
6385 *
6386 * Workqueue watchdog monitors all worker pools periodically and dumps
6387 * state if some pools failed to make forward progress for a while where
6388 * forward progress is defined as the first item on ->worklist changing.
6389 *
6390 * This mechanism is controlled through the kernel parameter
6391 * "workqueue.watchdog_thresh" which can be updated at runtime through the
6392 * corresponding sysfs parameter file.
6393 */
6394#ifdef CONFIG_WQ_WATCHDOG
6395
82607adc 6396static unsigned long wq_watchdog_thresh = 30;
5cd79d6a 6397static struct timer_list wq_watchdog_timer;
82607adc
TH
6398
6399static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
6400static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
6401
cd2440d6
PM
6402/*
6403 * Show workers that might prevent the processing of pending work items.
6404 * The only candidates are CPU-bound workers in the running state.
6405 * Pending work items should be handled by another idle worker
6406 * in all other situations.
6407 */
6408static void show_cpu_pool_hog(struct worker_pool *pool)
6409{
6410 struct worker *worker;
6411 unsigned long flags;
6412 int bkt;
6413
6414 raw_spin_lock_irqsave(&pool->lock, flags);
6415
6416 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6417 if (task_is_running(worker->task)) {
6418 /*
6419 * Defer printing to avoid deadlocks in console
6420 * drivers that queue work while holding locks
6421 * also taken in their write paths.
6422 */
6423 printk_deferred_enter();
6424
6425 pr_info("pool %d:\n", pool->id);
6426 sched_show_task(worker->task);
6427
6428 printk_deferred_exit();
6429 }
6430 }
6431
6432 raw_spin_unlock_irqrestore(&pool->lock, flags);
6433}
6434
6435static void show_cpu_pools_hogs(void)
6436{
6437 struct worker_pool *pool;
6438 int pi;
6439
6440 pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
6441
6442 rcu_read_lock();
6443
6444 for_each_pool(pool, pi) {
6445 if (pool->cpu_stall)
6446 show_cpu_pool_hog(pool);
6447
6448 }
6449
6450 rcu_read_unlock();
6451}
6452
82607adc
TH
6453static void wq_watchdog_reset_touched(void)
6454{
6455 int cpu;
6456
6457 wq_watchdog_touched = jiffies;
6458 for_each_possible_cpu(cpu)
6459 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
6460}
6461
5cd79d6a 6462static void wq_watchdog_timer_fn(struct timer_list *unused)
82607adc
TH
6463{
6464 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
6465 bool lockup_detected = false;
cd2440d6 6466 bool cpu_pool_stall = false;
940d71c6 6467 unsigned long now = jiffies;
82607adc
TH
6468 struct worker_pool *pool;
6469 int pi;
6470
6471 if (!thresh)
6472 return;
6473
6474 rcu_read_lock();
6475
6476 for_each_pool(pool, pi) {
6477 unsigned long pool_ts, touched, ts;
6478
cd2440d6 6479 pool->cpu_stall = false;
82607adc
TH
6480 if (list_empty(&pool->worklist))
6481 continue;
6482
940d71c6
SS
6483 /*
6484 * If a virtual machine is stopped by the host it can look to
6485 * the watchdog like a stall.
6486 */
6487 kvm_check_and_clear_guest_paused();
6488
82607adc 6489 /* get the latest of pool and touched timestamps */
89e28ce6
WQ
6490 if (pool->cpu >= 0)
6491 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
6492 else
6493 touched = READ_ONCE(wq_watchdog_touched);
82607adc 6494 pool_ts = READ_ONCE(pool->watchdog_ts);
82607adc
TH
6495
6496 if (time_after(pool_ts, touched))
6497 ts = pool_ts;
6498 else
6499 ts = touched;
6500
82607adc 6501 /* did we stall? */
940d71c6 6502 if (time_after(now, ts + thresh)) {
82607adc 6503 lockup_detected = true;
cd2440d6
PM
6504 if (pool->cpu >= 0) {
6505 pool->cpu_stall = true;
6506 cpu_pool_stall = true;
6507 }
82607adc
TH
6508 pr_emerg("BUG: workqueue lockup - pool");
6509 pr_cont_pool_info(pool);
6510 pr_cont(" stuck for %us!\n",
940d71c6 6511 jiffies_to_msecs(now - pool_ts) / 1000);
82607adc 6512 }
cd2440d6
PM
6513
6514
82607adc
TH
6515 }
6516
6517 rcu_read_unlock();
6518
6519 if (lockup_detected)
55df0933 6520 show_all_workqueues();
82607adc 6521
cd2440d6
PM
6522 if (cpu_pool_stall)
6523 show_cpu_pools_hogs();
6524
82607adc
TH
6525 wq_watchdog_reset_touched();
6526 mod_timer(&wq_watchdog_timer, jiffies + thresh);
6527}
6528
cb9d7fd5 6529notrace void wq_watchdog_touch(int cpu)
82607adc
TH
6530{
6531 if (cpu >= 0)
6532 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
89e28ce6
WQ
6533
6534 wq_watchdog_touched = jiffies;
82607adc
TH
6535}
6536
6537static void wq_watchdog_set_thresh(unsigned long thresh)
6538{
6539 wq_watchdog_thresh = 0;
6540 del_timer_sync(&wq_watchdog_timer);
6541
6542 if (thresh) {
6543 wq_watchdog_thresh = thresh;
6544 wq_watchdog_reset_touched();
6545 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
6546 }
6547}
6548
6549static int wq_watchdog_param_set_thresh(const char *val,
6550 const struct kernel_param *kp)
6551{
6552 unsigned long thresh;
6553 int ret;
6554
6555 ret = kstrtoul(val, 0, &thresh);
6556 if (ret)
6557 return ret;
6558
6559 if (system_wq)
6560 wq_watchdog_set_thresh(thresh);
6561 else
6562 wq_watchdog_thresh = thresh;
6563
6564 return 0;
6565}
6566
6567static const struct kernel_param_ops wq_watchdog_thresh_ops = {
6568 .set = wq_watchdog_param_set_thresh,
6569 .get = param_get_ulong,
6570};
6571
6572module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
6573 0644);
6574
6575static void wq_watchdog_init(void)
6576{
5cd79d6a 6577 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
82607adc
TH
6578 wq_watchdog_set_thresh(wq_watchdog_thresh);
6579}
6580
6581#else /* CONFIG_WQ_WATCHDOG */
6582
6583static inline void wq_watchdog_init(void) { }
6584
6585#endif /* CONFIG_WQ_WATCHDOG */
6586
4a6c5607
TH
6587static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
6588{
6589 if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
6590 pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n",
6591 cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask));
6592 return;
6593 }
6594
6595 cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask);
6596}
6597
3347fa09
TH
6598/**
6599 * workqueue_init_early - early init for workqueue subsystem
6600 *
2930155b
TH
6601 * This is the first step of three-staged workqueue subsystem initialization and
6602 * invoked as soon as the bare basics - memory allocation, cpumasks and idr are
6603 * up. It sets up all the data structures and system workqueues and allows early
6604 * boot code to create workqueues and queue/cancel work items. Actual work item
6605 * execution starts only after kthreads can be created and scheduled right
6606 * before early initcalls.
3347fa09 6607 */
2333e829 6608void __init workqueue_init_early(void)
1da177e4 6609{
84193c07 6610 struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
7a4e344c
TH
6611 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
6612 int i, cpu;
c34056a3 6613
10cdb157 6614 BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
e904e6c2 6615
b05a7928 6616 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
fe28f631
WL
6617 BUG_ON(!alloc_cpumask_var(&wq_requested_unbound_cpumask, GFP_KERNEL));
6618 BUG_ON(!zalloc_cpumask_var(&wq_isolated_cpumask, GFP_KERNEL));
b05a7928 6619
4a6c5607
TH
6620 cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
6621 restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
6622 restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
ace3c549 6623 if (!cpumask_empty(&wq_cmdline_cpumask))
4a6c5607 6624 restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
ace3c549 6625
fe28f631 6626 cpumask_copy(wq_requested_unbound_cpumask, wq_unbound_cpumask);
ace3c549 6627
e904e6c2
TH
6628 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
6629
2930155b
TH
6630 wq_update_pod_attrs_buf = alloc_workqueue_attrs();
6631 BUG_ON(!wq_update_pod_attrs_buf);
6632
84193c07
TH
6633 /* initialize WQ_AFFN_SYSTEM pods */
6634 pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
6635 pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL);
6636 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
6637 BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod);
6638
6639 BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
6640
84193c07
TH
6641 pt->nr_pods = 1;
6642 cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
6643 pt->pod_node[0] = NUMA_NO_NODE;
6644 pt->cpu_pod[0] = 0;
6645
706026c2 6646 /* initialize CPU pools */
29c91e99 6647 for_each_possible_cpu(cpu) {
4ce62e9e 6648 struct worker_pool *pool;
8b03ae3c 6649
7a4e344c 6650 i = 0;
f02ae73a 6651 for_each_cpu_worker_pool(pool, cpu) {
7a4e344c 6652 BUG_ON(init_worker_pool(pool));
ec22ca5e 6653 pool->cpu = cpu;
29c91e99 6654 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
9546b29e 6655 cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu));
7a4e344c 6656 pool->attrs->nice = std_nice[i++];
8639eceb 6657 pool->attrs->affn_strict = true;
f3f90ad4 6658 pool->node = cpu_to_node(cpu);
7a4e344c 6659
9daf9e67 6660 /* alloc pool ID */
68e13a67 6661 mutex_lock(&wq_pool_mutex);
9daf9e67 6662 BUG_ON(worker_pool_assign_id(pool));
68e13a67 6663 mutex_unlock(&wq_pool_mutex);
4ce62e9e 6664 }
8b03ae3c
TH
6665 }
6666
8a2b7538 6667 /* create default unbound and ordered wq attrs */
29c91e99
TH
6668 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
6669 struct workqueue_attrs *attrs;
6670
be69d00d 6671 BUG_ON(!(attrs = alloc_workqueue_attrs()));
29c91e99 6672 attrs->nice = std_nice[i];
29c91e99 6673 unbound_std_wq_attrs[i] = attrs;
8a2b7538
TH
6674
6675 /*
6676 * An ordered wq should have only one pwq as ordering is
6677 * guaranteed by max_active which is enforced by pwqs.
8a2b7538 6678 */
be69d00d 6679 BUG_ON(!(attrs = alloc_workqueue_attrs()));
8a2b7538 6680 attrs->nice = std_nice[i];
af73f5c9 6681 attrs->ordered = true;
8a2b7538 6682 ordered_wq_attrs[i] = attrs;
29c91e99
TH
6683 }
6684
d320c038 6685 system_wq = alloc_workqueue("events", 0, 0);
1aabe902 6686 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
d320c038 6687 system_long_wq = alloc_workqueue("events_long", 0, 0);
f3421797 6688 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
636b927e 6689 WQ_MAX_ACTIVE);
24d51add
TH
6690 system_freezable_wq = alloc_workqueue("events_freezable",
6691 WQ_FREEZABLE, 0);
0668106c
VK
6692 system_power_efficient_wq = alloc_workqueue("events_power_efficient",
6693 WQ_POWER_EFFICIENT, 0);
6694 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
6695 WQ_FREEZABLE | WQ_POWER_EFFICIENT,
6696 0);
1aabe902 6697 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
0668106c
VK
6698 !system_unbound_wq || !system_freezable_wq ||
6699 !system_power_efficient_wq ||
6700 !system_freezable_power_efficient_wq);
3347fa09
TH
6701}
6702
aa6fde93
TH
6703static void __init wq_cpu_intensive_thresh_init(void)
6704{
6705 unsigned long thresh;
6706 unsigned long bogo;
6707
dd64c873
Z
6708 pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
6709 BUG_ON(IS_ERR(pwq_release_worker));
6710
aa6fde93
TH
6711 /* if the user set it to a specific value, keep it */
6712 if (wq_cpu_intensive_thresh_us != ULONG_MAX)
6713 return;
6714
6715 /*
6716 * The default of 10ms is derived from the fact that most modern (as of
6717 * 2023) processors can do a lot in 10ms and that it's just below what
6718 * most consider human-perceivable. However, the kernel also runs on a
6719 * lot slower CPUs including microcontrollers where the threshold is way
6720 * too low.
6721 *
6722 * Let's scale up the threshold upto 1 second if BogoMips is below 4000.
6723 * This is by no means accurate but it doesn't have to be. The mechanism
6724 * is still useful even when the threshold is fully scaled up. Also, as
6725 * the reports would usually be applicable to everyone, some machines
6726 * operating on longer thresholds won't significantly diminish their
6727 * usefulness.
6728 */
6729 thresh = 10 * USEC_PER_MSEC;
6730
6731 /* see init/calibrate.c for lpj -> BogoMIPS calculation */
6732 bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1);
6733 if (bogo < 4000)
6734 thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC);
6735
6736 pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n",
6737 loops_per_jiffy, bogo, thresh);
6738
6739 wq_cpu_intensive_thresh_us = thresh;
6740}
6741
3347fa09
TH
6742/**
6743 * workqueue_init - bring workqueue subsystem fully online
6744 *
2930155b
TH
6745 * This is the second step of three-staged workqueue subsystem initialization
6746 * and invoked as soon as kthreads can be created and scheduled. Workqueues have
6747 * been created and work items queued on them, but there are no kworkers
6748 * executing the work items yet. Populate the worker pools with the initial
6749 * workers and enable future kworker creations.
3347fa09 6750 */
2333e829 6751void __init workqueue_init(void)
3347fa09 6752{
2186d9f9 6753 struct workqueue_struct *wq;
3347fa09
TH
6754 struct worker_pool *pool;
6755 int cpu, bkt;
6756
aa6fde93
TH
6757 wq_cpu_intensive_thresh_init();
6758
2186d9f9
TH
6759 mutex_lock(&wq_pool_mutex);
6760
2930155b
TH
6761 /*
6762 * Per-cpu pools created earlier could be missing node hint. Fix them
6763 * up. Also, create a rescuer for workqueues that requested it.
6764 */
2186d9f9
TH
6765 for_each_possible_cpu(cpu) {
6766 for_each_cpu_worker_pool(pool, cpu) {
6767 pool->node = cpu_to_node(cpu);
6768 }
6769 }
6770
40c17f75 6771 list_for_each_entry(wq, &workqueues, list) {
40c17f75
TH
6772 WARN(init_rescuer(wq),
6773 "workqueue: failed to create early rescuer for %s",
6774 wq->name);
6775 }
2186d9f9
TH
6776
6777 mutex_unlock(&wq_pool_mutex);
6778
3347fa09
TH
6779 /* create the initial workers */
6780 for_each_online_cpu(cpu) {
6781 for_each_cpu_worker_pool(pool, cpu) {
6782 pool->flags &= ~POOL_DISASSOCIATED;
6783 BUG_ON(!create_worker(pool));
6784 }
6785 }
6786
6787 hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
6788 BUG_ON(!create_worker(pool));
6789
6790 wq_online = true;
82607adc 6791 wq_watchdog_init();
1da177e4 6792}
c4f135d6 6793
025e1684
TH
6794/*
6795 * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to
6796 * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique
6797 * and consecutive pod ID. The rest of @pt is initialized accordingly.
6798 */
6799static void __init init_pod_type(struct wq_pod_type *pt,
6800 bool (*cpus_share_pod)(int, int))
6801{
6802 int cur, pre, cpu, pod;
6803
6804 pt->nr_pods = 0;
6805
6806 /* init @pt->cpu_pod[] according to @cpus_share_pod() */
6807 pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
6808 BUG_ON(!pt->cpu_pod);
6809
6810 for_each_possible_cpu(cur) {
6811 for_each_possible_cpu(pre) {
6812 if (pre >= cur) {
6813 pt->cpu_pod[cur] = pt->nr_pods++;
6814 break;
6815 }
6816 if (cpus_share_pod(cur, pre)) {
6817 pt->cpu_pod[cur] = pt->cpu_pod[pre];
6818 break;
6819 }
6820 }
6821 }
6822
6823 /* init the rest to match @pt->cpu_pod[] */
6824 pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
6825 pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL);
6826 BUG_ON(!pt->pod_cpus || !pt->pod_node);
6827
6828 for (pod = 0; pod < pt->nr_pods; pod++)
6829 BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL));
6830
6831 for_each_possible_cpu(cpu) {
6832 cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
6833 pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
6834 }
6835}
6836
63c5484e
TH
6837static bool __init cpus_dont_share(int cpu0, int cpu1)
6838{
6839 return false;
6840}
6841
6842static bool __init cpus_share_smt(int cpu0, int cpu1)
6843{
6844#ifdef CONFIG_SCHED_SMT
6845 return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
6846#else
6847 return false;
6848#endif
6849}
6850
025e1684
TH
6851static bool __init cpus_share_numa(int cpu0, int cpu1)
6852{
6853 return cpu_to_node(cpu0) == cpu_to_node(cpu1);
6854}
6855
2930155b
TH
6856/**
6857 * workqueue_init_topology - initialize CPU pods for unbound workqueues
6858 *
6859 * This is the third step of there-staged workqueue subsystem initialization and
6860 * invoked after SMP and topology information are fully initialized. It
6861 * initializes the unbound CPU pods accordingly.
6862 */
6863void __init workqueue_init_topology(void)
a86feae6 6864{
2930155b 6865 struct workqueue_struct *wq;
025e1684 6866 int cpu;
a86feae6 6867
63c5484e
TH
6868 init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
6869 init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
6870 init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
025e1684 6871 init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
a86feae6 6872
2930155b 6873 mutex_lock(&wq_pool_mutex);
a86feae6 6874
2930155b
TH
6875 /*
6876 * Workqueues allocated earlier would have all CPUs sharing the default
6877 * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU
6878 * combinations to apply per-pod sharing.
6879 */
6880 list_for_each_entry(wq, &workqueues, list) {
6881 for_each_online_cpu(cpu) {
6882 wq_update_pod(wq, cpu, cpu, true);
6883 }
6884 }
6885
6886 mutex_unlock(&wq_pool_mutex);
a86feae6
TH
6887}
6888
20bdedaf
TH
6889void __warn_flushing_systemwide_wq(void)
6890{
6891 pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");
6892 dump_stack();
6893}
c4f135d6 6894EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
ace3c549 6895
6896static int __init workqueue_unbound_cpus_setup(char *str)
6897{
6898 if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
6899 cpumask_clear(&wq_cmdline_cpumask);
6900 pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
6901 }
6902
6903 return 1;
6904}
6905__setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);