Commit | Line | Data |
---|---|---|
771b53d0 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Basic worker thread pool for io_uring | |
4 | * | |
5 | * Copyright (C) 2019 Jens Axboe | |
6 | * | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/errno.h> | |
11 | #include <linux/sched/signal.h> | |
771b53d0 JA |
12 | #include <linux/percpu.h> |
13 | #include <linux/slab.h> | |
771b53d0 | 14 | #include <linux/rculist_nulls.h> |
43c01fbe | 15 | #include <linux/cpu.h> |
355f841a | 16 | #include <linux/task_work.h> |
5bd2182d | 17 | #include <linux/audit.h> |
dd47c104 | 18 | #include <uapi/linux/io_uring.h> |
771b53d0 JA |
19 | |
20 | #include "io-wq.h" | |
a6b21fbb | 21 | #include "slist.h" |
024f15e0 | 22 | #include "io_uring.h" |
771b53d0 JA |
23 | |
24 | #define WORKER_IDLE_TIMEOUT (5 * HZ) | |
25 | ||
26 | enum { | |
27 | IO_WORKER_F_UP = 1, /* up and active */ | |
28 | IO_WORKER_F_RUNNING = 2, /* account as running */ | |
29 | IO_WORKER_F_FREE = 4, /* worker on free list */ | |
05c5f4ee | 30 | IO_WORKER_F_BOUND = 8, /* is doing bounded work */ |
771b53d0 JA |
31 | }; |
32 | ||
33 | enum { | |
34 | IO_WQ_BIT_EXIT = 0, /* wq exiting */ | |
771b53d0 JA |
35 | }; |
36 | ||
37 | enum { | |
f95dc207 | 38 | IO_ACCT_STALLED_BIT = 0, /* stalled on hash */ |
771b53d0 JA |
39 | }; |
40 | ||
41 | /* | |
42 | * One for each thread in a wqe pool | |
43 | */ | |
44 | struct io_worker { | |
45 | refcount_t ref; | |
46 | unsigned flags; | |
47 | struct hlist_nulls_node nulls_node; | |
e61df66c | 48 | struct list_head all_list; |
771b53d0 | 49 | struct task_struct *task; |
771b53d0 | 50 | struct io_wqe *wqe; |
36c2f922 | 51 | |
771b53d0 | 52 | struct io_wq_work *cur_work; |
361aee45 | 53 | struct io_wq_work *next_work; |
081b5820 | 54 | raw_spinlock_t lock; |
771b53d0 | 55 | |
eb2de941 JA |
56 | struct completion ref_done; |
57 | ||
d3e9f732 JA |
58 | unsigned long create_state; |
59 | struct callback_head create_work; | |
60 | int create_index; | |
61 | ||
3146cba9 JA |
62 | union { |
63 | struct rcu_head rcu; | |
64 | struct work_struct work; | |
65 | }; | |
771b53d0 JA |
66 | }; |
67 | ||
771b53d0 JA |
68 | #if BITS_PER_LONG == 64 |
69 | #define IO_WQ_HASH_ORDER 6 | |
70 | #else | |
71 | #define IO_WQ_HASH_ORDER 5 | |
72 | #endif | |
73 | ||
86f3cd1b PB |
74 | #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER) |
75 | ||
c5def4ab JA |
76 | struct io_wqe_acct { |
77 | unsigned nr_workers; | |
78 | unsigned max_workers; | |
685fe7fe | 79 | int index; |
c5def4ab | 80 | atomic_t nr_running; |
42abc95f | 81 | raw_spinlock_t lock; |
f95dc207 JA |
82 | struct io_wq_work_list work_list; |
83 | unsigned long flags; | |
c5def4ab JA |
84 | }; |
85 | ||
86 | enum { | |
87 | IO_WQ_ACCT_BOUND, | |
88 | IO_WQ_ACCT_UNBOUND, | |
f95dc207 | 89 | IO_WQ_ACCT_NR, |
c5def4ab JA |
90 | }; |
91 | ||
771b53d0 JA |
92 | /* |
93 | * Per-node worker thread pool | |
94 | */ | |
95 | struct io_wqe { | |
f95dc207 | 96 | raw_spinlock_t lock; |
86127bb1 | 97 | struct io_wqe_acct acct[IO_WQ_ACCT_NR]; |
771b53d0 JA |
98 | |
99 | int node; | |
771b53d0 | 100 | |
021d1cdd | 101 | struct hlist_nulls_head free_list; |
e61df66c | 102 | struct list_head all_list; |
771b53d0 | 103 | |
e941894e JA |
104 | struct wait_queue_entry wait; |
105 | ||
771b53d0 | 106 | struct io_wq *wq; |
86f3cd1b | 107 | struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS]; |
0e03496d JA |
108 | |
109 | cpumask_var_t cpu_mask; | |
771b53d0 JA |
110 | }; |
111 | ||
112 | /* | |
113 | * Per io_wq state | |
114 | */ | |
115 | struct io_wq { | |
771b53d0 | 116 | unsigned long state; |
771b53d0 | 117 | |
e9fd9396 | 118 | free_work_fn *free_work; |
f5fa38c5 | 119 | io_wq_work_fn *do_work; |
7d723065 | 120 | |
e941894e JA |
121 | struct io_wq_hash *hash; |
122 | ||
fb3a1f6c JA |
123 | atomic_t worker_refs; |
124 | struct completion worker_done; | |
125 | ||
43c01fbe | 126 | struct hlist_node cpuhp_node; |
3bfe6106 | 127 | |
685fe7fe | 128 | struct task_struct *task; |
c7f405d6 PB |
129 | |
130 | struct io_wqe *wqes[]; | |
771b53d0 JA |
131 | }; |
132 | ||
43c01fbe JA |
133 | static enum cpuhp_state io_wq_online; |
134 | ||
f0127254 JA |
135 | struct io_cb_cancel_data { |
136 | work_cancel_fn *fn; | |
137 | void *data; | |
138 | int nr_running; | |
139 | int nr_pending; | |
140 | bool cancel_all; | |
141 | }; | |
142 | ||
3146cba9 | 143 | static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index); |
83d6c393 | 144 | static void io_wqe_dec_running(struct io_worker *worker); |
3146cba9 JA |
145 | static bool io_acct_cancel_pending_work(struct io_wqe *wqe, |
146 | struct io_wqe_acct *acct, | |
147 | struct io_cb_cancel_data *match); | |
1d5f5ea7 | 148 | static void create_worker_cb(struct callback_head *cb); |
71a85387 | 149 | static void io_wq_cancel_tw_create(struct io_wq *wq); |
f0127254 | 150 | |
771b53d0 JA |
151 | static bool io_worker_get(struct io_worker *worker) |
152 | { | |
153 | return refcount_inc_not_zero(&worker->ref); | |
154 | } | |
155 | ||
156 | static void io_worker_release(struct io_worker *worker) | |
157 | { | |
158 | if (refcount_dec_and_test(&worker->ref)) | |
eb2de941 | 159 | complete(&worker->ref_done); |
771b53d0 JA |
160 | } |
161 | ||
8418f22a PB |
162 | static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound) |
163 | { | |
164 | return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND]; | |
165 | } | |
166 | ||
c5def4ab JA |
167 | static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe, |
168 | struct io_wq_work *work) | |
169 | { | |
8418f22a | 170 | return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND)); |
c5def4ab JA |
171 | } |
172 | ||
958234d5 | 173 | static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker) |
c5def4ab | 174 | { |
8418f22a | 175 | return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND); |
c5def4ab JA |
176 | } |
177 | ||
685fe7fe JA |
178 | static void io_worker_ref_put(struct io_wq *wq) |
179 | { | |
180 | if (atomic_dec_and_test(&wq->worker_refs)) | |
181 | complete(&wq->worker_done); | |
182 | } | |
183 | ||
1d5f5ea7 PB |
184 | static void io_worker_cancel_cb(struct io_worker *worker) |
185 | { | |
186 | struct io_wqe_acct *acct = io_wqe_get_acct(worker); | |
187 | struct io_wqe *wqe = worker->wqe; | |
188 | struct io_wq *wq = wqe->wq; | |
189 | ||
190 | atomic_dec(&acct->nr_running); | |
191 | raw_spin_lock(&worker->wqe->lock); | |
192 | acct->nr_workers--; | |
193 | raw_spin_unlock(&worker->wqe->lock); | |
194 | io_worker_ref_put(wq); | |
195 | clear_bit_unlock(0, &worker->create_state); | |
196 | io_worker_release(worker); | |
197 | } | |
198 | ||
199 | static bool io_task_worker_match(struct callback_head *cb, void *data) | |
200 | { | |
201 | struct io_worker *worker; | |
202 | ||
203 | if (cb->func != create_worker_cb) | |
204 | return false; | |
205 | worker = container_of(cb, struct io_worker, create_work); | |
206 | return worker == data; | |
207 | } | |
208 | ||
771b53d0 JA |
209 | static void io_worker_exit(struct io_worker *worker) |
210 | { | |
211 | struct io_wqe *wqe = worker->wqe; | |
1d5f5ea7 | 212 | struct io_wq *wq = wqe->wq; |
771b53d0 | 213 | |
1d5f5ea7 PB |
214 | while (1) { |
215 | struct callback_head *cb = task_work_cancel_match(wq->task, | |
216 | io_task_worker_match, worker); | |
217 | ||
218 | if (!cb) | |
219 | break; | |
220 | io_worker_cancel_cb(worker); | |
221 | } | |
771b53d0 | 222 | |
c907e52c | 223 | io_worker_release(worker); |
eb2de941 | 224 | wait_for_completion(&worker->ref_done); |
771b53d0 | 225 | |
a9a4aa9f | 226 | raw_spin_lock(&wqe->lock); |
83d6c393 | 227 | if (worker->flags & IO_WORKER_F_FREE) |
bf1daa4b | 228 | hlist_nulls_del_rcu(&worker->nulls_node); |
e61df66c | 229 | list_del_rcu(&worker->all_list); |
42abc95f | 230 | raw_spin_unlock(&wqe->lock); |
83d6c393 JA |
231 | io_wqe_dec_running(worker); |
232 | worker->flags = 0; | |
42abc95f | 233 | preempt_disable(); |
83d6c393 JA |
234 | current->flags &= ~PF_IO_WORKER; |
235 | preempt_enable(); | |
771b53d0 | 236 | |
364b05fd | 237 | kfree_rcu(worker, rcu); |
685fe7fe | 238 | io_worker_ref_put(wqe->wq); |
46fe18b1 | 239 | do_exit(0); |
771b53d0 JA |
240 | } |
241 | ||
f95dc207 | 242 | static inline bool io_acct_run_queue(struct io_wqe_acct *acct) |
c5def4ab | 243 | { |
e13fb1fe HX |
244 | bool ret = false; |
245 | ||
246 | raw_spin_lock(&acct->lock); | |
f95dc207 JA |
247 | if (!wq_list_empty(&acct->work_list) && |
248 | !test_bit(IO_ACCT_STALLED_BIT, &acct->flags)) | |
e13fb1fe HX |
249 | ret = true; |
250 | raw_spin_unlock(&acct->lock); | |
251 | ||
252 | return ret; | |
c5def4ab JA |
253 | } |
254 | ||
255 | /* | |
256 | * Check head of free list for an available worker. If one isn't available, | |
685fe7fe | 257 | * caller must create one. |
c5def4ab | 258 | */ |
f95dc207 JA |
259 | static bool io_wqe_activate_free_worker(struct io_wqe *wqe, |
260 | struct io_wqe_acct *acct) | |
c5def4ab JA |
261 | __must_hold(RCU) |
262 | { | |
263 | struct hlist_nulls_node *n; | |
264 | struct io_worker *worker; | |
265 | ||
83d6c393 JA |
266 | /* |
267 | * Iterate free_list and see if we can find an idle worker to | |
268 | * activate. If a given worker is on the free_list but in the process | |
269 | * of exiting, keep trying. | |
270 | */ | |
271 | hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) { | |
272 | if (!io_worker_get(worker)) | |
273 | continue; | |
f95dc207 JA |
274 | if (io_wqe_get_acct(worker) != acct) { |
275 | io_worker_release(worker); | |
276 | continue; | |
277 | } | |
83d6c393 JA |
278 | if (wake_up_process(worker->task)) { |
279 | io_worker_release(worker); | |
280 | return true; | |
281 | } | |
c5def4ab | 282 | io_worker_release(worker); |
c5def4ab JA |
283 | } |
284 | ||
285 | return false; | |
286 | } | |
287 | ||
288 | /* | |
289 | * We need a worker. If we find a free one, we're good. If not, and we're | |
685fe7fe | 290 | * below the max number of workers, create one. |
c5def4ab | 291 | */ |
3146cba9 | 292 | static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) |
c5def4ab | 293 | { |
c5def4ab JA |
294 | /* |
295 | * Most likely an attempt to queue unbounded work on an io_wq that | |
296 | * wasn't setup with any unbounded workers. | |
297 | */ | |
e6ab8991 PB |
298 | if (unlikely(!acct->max_workers)) |
299 | pr_warn_once("io-wq is not configured for unbound workers"); | |
c5def4ab | 300 | |
94ffb0a2 | 301 | raw_spin_lock(&wqe->lock); |
bc369921 | 302 | if (acct->nr_workers >= acct->max_workers) { |
7a842fb5 HX |
303 | raw_spin_unlock(&wqe->lock); |
304 | return true; | |
94ffb0a2 | 305 | } |
7a842fb5 | 306 | acct->nr_workers++; |
94ffb0a2 | 307 | raw_spin_unlock(&wqe->lock); |
7a842fb5 HX |
308 | atomic_inc(&acct->nr_running); |
309 | atomic_inc(&wqe->wq->worker_refs); | |
310 | return create_io_worker(wqe->wq, wqe, acct->index); | |
c5def4ab JA |
311 | } |
312 | ||
958234d5 | 313 | static void io_wqe_inc_running(struct io_worker *worker) |
c5def4ab | 314 | { |
958234d5 | 315 | struct io_wqe_acct *acct = io_wqe_get_acct(worker); |
c5def4ab JA |
316 | |
317 | atomic_inc(&acct->nr_running); | |
318 | } | |
319 | ||
685fe7fe JA |
320 | static void create_worker_cb(struct callback_head *cb) |
321 | { | |
d3e9f732 | 322 | struct io_worker *worker; |
685fe7fe | 323 | struct io_wq *wq; |
21698274 HX |
324 | struct io_wqe *wqe; |
325 | struct io_wqe_acct *acct; | |
05c5f4ee | 326 | bool do_create = false; |
685fe7fe | 327 | |
d3e9f732 JA |
328 | worker = container_of(cb, struct io_worker, create_work); |
329 | wqe = worker->wqe; | |
21698274 | 330 | wq = wqe->wq; |
d3e9f732 | 331 | acct = &wqe->acct[worker->create_index]; |
a9a4aa9f | 332 | raw_spin_lock(&wqe->lock); |
49e7f0c7 | 333 | if (acct->nr_workers < acct->max_workers) { |
21698274 | 334 | acct->nr_workers++; |
49e7f0c7 HX |
335 | do_create = true; |
336 | } | |
a9a4aa9f | 337 | raw_spin_unlock(&wqe->lock); |
49e7f0c7 | 338 | if (do_create) { |
05c5f4ee | 339 | create_io_worker(wq, wqe, worker->create_index); |
49e7f0c7 HX |
340 | } else { |
341 | atomic_dec(&acct->nr_running); | |
342 | io_worker_ref_put(wq); | |
343 | } | |
d3e9f732 JA |
344 | clear_bit_unlock(0, &worker->create_state); |
345 | io_worker_release(worker); | |
685fe7fe JA |
346 | } |
347 | ||
3146cba9 JA |
348 | static bool io_queue_worker_create(struct io_worker *worker, |
349 | struct io_wqe_acct *acct, | |
350 | task_work_func_t func) | |
685fe7fe | 351 | { |
3146cba9 | 352 | struct io_wqe *wqe = worker->wqe; |
685fe7fe JA |
353 | struct io_wq *wq = wqe->wq; |
354 | ||
355 | /* raced with exit, just ignore create call */ | |
356 | if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) | |
357 | goto fail; | |
d3e9f732 JA |
358 | if (!io_worker_get(worker)) |
359 | goto fail; | |
360 | /* | |
361 | * create_state manages ownership of create_work/index. We should | |
362 | * only need one entry per worker, as the worker going to sleep | |
363 | * will trigger the condition, and waking will clear it once it | |
364 | * runs the task_work. | |
365 | */ | |
366 | if (test_bit(0, &worker->create_state) || | |
367 | test_and_set_bit_lock(0, &worker->create_state)) | |
368 | goto fail_release; | |
685fe7fe | 369 | |
71a85387 | 370 | atomic_inc(&wq->worker_refs); |
3146cba9 | 371 | init_task_work(&worker->create_work, func); |
d3e9f732 | 372 | worker->create_index = acct->index; |
71a85387 JA |
373 | if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { |
374 | /* | |
375 | * EXIT may have been set after checking it above, check after | |
376 | * adding the task_work and remove any creation item if it is | |
377 | * now set. wq exit does that too, but we can have added this | |
378 | * work item after we canceled in io_wq_exit_workers(). | |
379 | */ | |
380 | if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) | |
381 | io_wq_cancel_tw_create(wq); | |
382 | io_worker_ref_put(wq); | |
3146cba9 | 383 | return true; |
71a85387 JA |
384 | } |
385 | io_worker_ref_put(wq); | |
d3e9f732 JA |
386 | clear_bit_unlock(0, &worker->create_state); |
387 | fail_release: | |
388 | io_worker_release(worker); | |
685fe7fe JA |
389 | fail: |
390 | atomic_dec(&acct->nr_running); | |
391 | io_worker_ref_put(wq); | |
3146cba9 | 392 | return false; |
685fe7fe JA |
393 | } |
394 | ||
958234d5 | 395 | static void io_wqe_dec_running(struct io_worker *worker) |
c5def4ab | 396 | { |
958234d5 JA |
397 | struct io_wqe_acct *acct = io_wqe_get_acct(worker); |
398 | struct io_wqe *wqe = worker->wqe; | |
c5def4ab | 399 | |
685fe7fe JA |
400 | if (!(worker->flags & IO_WORKER_F_UP)) |
401 | return; | |
402 | ||
42abc95f HX |
403 | if (!atomic_dec_and_test(&acct->nr_running)) |
404 | return; | |
e13fb1fe | 405 | if (!io_acct_run_queue(acct)) |
42abc95f | 406 | return; |
42abc95f | 407 | |
42abc95f HX |
408 | atomic_inc(&acct->nr_running); |
409 | atomic_inc(&wqe->wq->worker_refs); | |
410 | io_queue_worker_create(worker, acct, create_worker_cb); | |
c5def4ab JA |
411 | } |
412 | ||
771b53d0 JA |
413 | /* |
414 | * Worker will start processing some work. Move it to the busy list, if | |
415 | * it's currently on the freelist | |
416 | */ | |
ea6e7cee | 417 | static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker) |
771b53d0 JA |
418 | { |
419 | if (worker->flags & IO_WORKER_F_FREE) { | |
420 | worker->flags &= ~IO_WORKER_F_FREE; | |
42abc95f | 421 | raw_spin_lock(&wqe->lock); |
771b53d0 | 422 | hlist_nulls_del_init_rcu(&worker->nulls_node); |
42abc95f | 423 | raw_spin_unlock(&wqe->lock); |
771b53d0 | 424 | } |
771b53d0 JA |
425 | } |
426 | ||
427 | /* | |
428 | * No work, worker going to sleep. Move to freelist, and unuse mm if we | |
429 | * have one attached. Dropping the mm may potentially sleep, so we drop | |
430 | * the lock in that case and return success. Since the caller has to | |
431 | * retry the loop in that case (we changed task state), we don't regrab | |
432 | * the lock if we return success. | |
433 | */ | |
c6d77d92 | 434 | static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker) |
771b53d0 JA |
435 | __must_hold(wqe->lock) |
436 | { | |
437 | if (!(worker->flags & IO_WORKER_F_FREE)) { | |
438 | worker->flags |= IO_WORKER_F_FREE; | |
021d1cdd | 439 | hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); |
771b53d0 | 440 | } |
771b53d0 JA |
441 | } |
442 | ||
60cf46ae PB |
443 | static inline unsigned int io_get_work_hash(struct io_wq_work *work) |
444 | { | |
445 | return work->flags >> IO_WQ_HASH_SHIFT; | |
446 | } | |
447 | ||
d3e3c102 | 448 | static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash) |
e941894e JA |
449 | { |
450 | struct io_wq *wq = wqe->wq; | |
d3e3c102 | 451 | bool ret = false; |
e941894e | 452 | |
08bdbd39 | 453 | spin_lock_irq(&wq->hash->wait.lock); |
e941894e JA |
454 | if (list_empty(&wqe->wait.entry)) { |
455 | __add_wait_queue(&wq->hash->wait, &wqe->wait); | |
456 | if (!test_bit(hash, &wq->hash->map)) { | |
457 | __set_current_state(TASK_RUNNING); | |
458 | list_del_init(&wqe->wait.entry); | |
d3e3c102 | 459 | ret = true; |
e941894e JA |
460 | } |
461 | } | |
08bdbd39 | 462 | spin_unlock_irq(&wq->hash->wait.lock); |
d3e3c102 | 463 | return ret; |
e941894e JA |
464 | } |
465 | ||
f95dc207 | 466 | static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct, |
0242f642 | 467 | struct io_worker *worker) |
42abc95f | 468 | __must_hold(acct->lock) |
771b53d0 | 469 | { |
6206f0e1 | 470 | struct io_wq_work_node *node, *prev; |
86f3cd1b | 471 | struct io_wq_work *work, *tail; |
e941894e | 472 | unsigned int stall_hash = -1U; |
f95dc207 | 473 | struct io_wqe *wqe = worker->wqe; |
771b53d0 | 474 | |
f95dc207 | 475 | wq_list_for_each(node, prev, &acct->work_list) { |
e941894e JA |
476 | unsigned int hash; |
477 | ||
6206f0e1 JA |
478 | work = container_of(node, struct io_wq_work, list); |
479 | ||
771b53d0 | 480 | /* not hashed, can run anytime */ |
8766dd51 | 481 | if (!io_wq_is_hashed(work)) { |
f95dc207 | 482 | wq_list_del(&acct->work_list, node, prev); |
771b53d0 JA |
483 | return work; |
484 | } | |
485 | ||
60cf46ae | 486 | hash = io_get_work_hash(work); |
e941894e JA |
487 | /* all items with this hash lie in [work, tail] */ |
488 | tail = wqe->hash_tail[hash]; | |
489 | ||
490 | /* hashed, can run if not already running */ | |
491 | if (!test_and_set_bit(hash, &wqe->wq->hash->map)) { | |
86f3cd1b | 492 | wqe->hash_tail[hash] = NULL; |
f95dc207 | 493 | wq_list_cut(&acct->work_list, &tail->list, prev); |
771b53d0 JA |
494 | return work; |
495 | } | |
e941894e JA |
496 | if (stall_hash == -1U) |
497 | stall_hash = hash; | |
498 | /* fast forward to a next hash, for-each will fix up @prev */ | |
499 | node = &tail->list; | |
500 | } | |
501 | ||
502 | if (stall_hash != -1U) { | |
d3e3c102 JA |
503 | bool unstalled; |
504 | ||
0242f642 JA |
505 | /* |
506 | * Set this before dropping the lock to avoid racing with new | |
507 | * work being added and clearing the stalled bit. | |
508 | */ | |
f95dc207 | 509 | set_bit(IO_ACCT_STALLED_BIT, &acct->flags); |
42abc95f | 510 | raw_spin_unlock(&acct->lock); |
d3e3c102 | 511 | unstalled = io_wait_on_hash(wqe, stall_hash); |
42abc95f | 512 | raw_spin_lock(&acct->lock); |
d3e3c102 JA |
513 | if (unstalled) { |
514 | clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); | |
515 | if (wq_has_sleeper(&wqe->wq->hash->wait)) | |
516 | wake_up(&wqe->wq->hash->wait); | |
517 | } | |
771b53d0 JA |
518 | } |
519 | ||
520 | return NULL; | |
521 | } | |
522 | ||
dc026a73 PB |
523 | static void io_assign_current_work(struct io_worker *worker, |
524 | struct io_wq_work *work) | |
525 | { | |
d78298e7 | 526 | if (work) { |
024f15e0 | 527 | io_run_task_work(); |
d78298e7 PB |
528 | cond_resched(); |
529 | } | |
dc026a73 | 530 | |
081b5820 | 531 | raw_spin_lock(&worker->lock); |
dc026a73 | 532 | worker->cur_work = work; |
361aee45 | 533 | worker->next_work = NULL; |
081b5820 | 534 | raw_spin_unlock(&worker->lock); |
dc026a73 PB |
535 | } |
536 | ||
60cf46ae PB |
537 | static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work); |
538 | ||
771b53d0 | 539 | static void io_worker_handle_work(struct io_worker *worker) |
771b53d0 | 540 | { |
f95dc207 | 541 | struct io_wqe_acct *acct = io_wqe_get_acct(worker); |
771b53d0 JA |
542 | struct io_wqe *wqe = worker->wqe; |
543 | struct io_wq *wq = wqe->wq; | |
c60eb049 | 544 | bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state); |
771b53d0 JA |
545 | |
546 | do { | |
86f3cd1b | 547 | struct io_wq_work *work; |
73031f76 | 548 | |
771b53d0 JA |
549 | /* |
550 | * If we got some work, mark us as busy. If we didn't, but | |
551 | * the list isn't empty, it means we stalled on hashed work. | |
552 | * Mark us stalled so we don't keep looking for work when we | |
553 | * can't make progress, any work completion or insertion will | |
554 | * clear the stalled flag. | |
555 | */ | |
e13fb1fe | 556 | raw_spin_lock(&acct->lock); |
f95dc207 | 557 | work = io_get_next_work(acct, worker); |
42abc95f | 558 | raw_spin_unlock(&acct->lock); |
361aee45 | 559 | if (work) { |
ea6e7cee | 560 | __io_worker_busy(wqe, worker); |
771b53d0 | 561 | |
361aee45 JA |
562 | /* |
563 | * Make sure cancelation can find this, even before | |
564 | * it becomes the active work. That avoids a window | |
565 | * where the work has been removed from our general | |
566 | * work list, but isn't yet discoverable as the | |
567 | * current work item for this worker. | |
568 | */ | |
569 | raw_spin_lock(&worker->lock); | |
570 | worker->next_work = work; | |
571 | raw_spin_unlock(&worker->lock); | |
42abc95f | 572 | } else { |
771b53d0 | 573 | break; |
42abc95f | 574 | } |
58e39319 | 575 | io_assign_current_work(worker, work); |
e941894e | 576 | __set_current_state(TASK_RUNNING); |
36c2f922 | 577 | |
dc026a73 PB |
578 | /* handle a whole dependent link */ |
579 | do { | |
5280f7e5 | 580 | struct io_wq_work *next_hashed, *linked; |
b089ed39 | 581 | unsigned int hash = io_get_work_hash(work); |
dc026a73 | 582 | |
86f3cd1b | 583 | next_hashed = wq_next_work(work); |
c60eb049 PB |
584 | |
585 | if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND)) | |
586 | work->flags |= IO_WQ_WORK_CANCEL; | |
5280f7e5 PB |
587 | wq->do_work(work); |
588 | io_assign_current_work(worker, NULL); | |
dc026a73 | 589 | |
5280f7e5 | 590 | linked = wq->free_work(work); |
86f3cd1b PB |
591 | work = next_hashed; |
592 | if (!work && linked && !io_wq_is_hashed(linked)) { | |
593 | work = linked; | |
594 | linked = NULL; | |
595 | } | |
596 | io_assign_current_work(worker, work); | |
86f3cd1b PB |
597 | if (linked) |
598 | io_wqe_enqueue(wqe, linked); | |
599 | ||
600 | if (hash != -1U && !next_hashed) { | |
d3e3c102 JA |
601 | /* serialize hash clear with wake_up() */ |
602 | spin_lock_irq(&wq->hash->wait.lock); | |
e941894e | 603 | clear_bit(hash, &wq->hash->map); |
f95dc207 | 604 | clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); |
d3e3c102 | 605 | spin_unlock_irq(&wq->hash->wait.lock); |
e941894e JA |
606 | if (wq_has_sleeper(&wq->hash->wait)) |
607 | wake_up(&wq->hash->wait); | |
7d723065 | 608 | } |
58e39319 | 609 | } while (work); |
771b53d0 JA |
610 | } while (1); |
611 | } | |
612 | ||
771b53d0 JA |
613 | static int io_wqe_worker(void *data) |
614 | { | |
615 | struct io_worker *worker = data; | |
f95dc207 | 616 | struct io_wqe_acct *acct = io_wqe_get_acct(worker); |
771b53d0 JA |
617 | struct io_wqe *wqe = worker->wqe; |
618 | struct io_wq *wq = wqe->wq; | |
05c5f4ee | 619 | bool last_timeout = false; |
46fe18b1 | 620 | char buf[TASK_COMM_LEN]; |
771b53d0 | 621 | |
46fe18b1 | 622 | worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); |
46fe18b1 | 623 | |
685fe7fe | 624 | snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid); |
46fe18b1 | 625 | set_task_comm(current, buf); |
771b53d0 JA |
626 | |
627 | while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { | |
16efa4fc JA |
628 | long ret; |
629 | ||
506d95ff | 630 | set_current_state(TASK_INTERRUPTIBLE); |
e13fb1fe | 631 | while (io_acct_run_queue(acct)) |
771b53d0 | 632 | io_worker_handle_work(worker); |
e13fb1fe | 633 | |
42abc95f | 634 | raw_spin_lock(&wqe->lock); |
05c5f4ee JA |
635 | /* timed out, exit unless we're the last worker */ |
636 | if (last_timeout && acct->nr_workers > 1) { | |
767a65e9 | 637 | acct->nr_workers--; |
05c5f4ee JA |
638 | raw_spin_unlock(&wqe->lock); |
639 | __set_current_state(TASK_RUNNING); | |
640 | break; | |
641 | } | |
642 | last_timeout = false; | |
c6d77d92 | 643 | __io_worker_idle(wqe, worker); |
a9a4aa9f | 644 | raw_spin_unlock(&wqe->lock); |
024f15e0 | 645 | if (io_run_task_work()) |
00ddff43 | 646 | continue; |
16efa4fc | 647 | ret = schedule_timeout(WORKER_IDLE_TIMEOUT); |
dbe1bdbb JA |
648 | if (signal_pending(current)) { |
649 | struct ksignal ksig; | |
650 | ||
651 | if (!get_signal(&ksig)) | |
652 | continue; | |
78f8876c | 653 | break; |
dbe1bdbb | 654 | } |
05c5f4ee | 655 | last_timeout = !ret; |
771b53d0 JA |
656 | } |
657 | ||
e13fb1fe | 658 | if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) |
e587227b | 659 | io_worker_handle_work(worker); |
771b53d0 JA |
660 | |
661 | io_worker_exit(worker); | |
662 | return 0; | |
663 | } | |
664 | ||
771b53d0 JA |
665 | /* |
666 | * Called when a worker is scheduled in. Mark us as currently running. | |
667 | */ | |
668 | void io_wq_worker_running(struct task_struct *tsk) | |
669 | { | |
e32cf5df | 670 | struct io_worker *worker = tsk->worker_private; |
771b53d0 | 671 | |
3bfe6106 JA |
672 | if (!worker) |
673 | return; | |
771b53d0 JA |
674 | if (!(worker->flags & IO_WORKER_F_UP)) |
675 | return; | |
676 | if (worker->flags & IO_WORKER_F_RUNNING) | |
677 | return; | |
678 | worker->flags |= IO_WORKER_F_RUNNING; | |
958234d5 | 679 | io_wqe_inc_running(worker); |
771b53d0 JA |
680 | } |
681 | ||
682 | /* | |
683 | * Called when worker is going to sleep. If there are no workers currently | |
685fe7fe | 684 | * running and we have work pending, wake up a free one or create a new one. |
771b53d0 JA |
685 | */ |
686 | void io_wq_worker_sleeping(struct task_struct *tsk) | |
687 | { | |
e32cf5df | 688 | struct io_worker *worker = tsk->worker_private; |
771b53d0 | 689 | |
3bfe6106 JA |
690 | if (!worker) |
691 | return; | |
771b53d0 JA |
692 | if (!(worker->flags & IO_WORKER_F_UP)) |
693 | return; | |
694 | if (!(worker->flags & IO_WORKER_F_RUNNING)) | |
695 | return; | |
696 | ||
697 | worker->flags &= ~IO_WORKER_F_RUNNING; | |
958234d5 | 698 | io_wqe_dec_running(worker); |
771b53d0 JA |
699 | } |
700 | ||
3146cba9 JA |
701 | static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker, |
702 | struct task_struct *tsk) | |
703 | { | |
e32cf5df | 704 | tsk->worker_private = worker; |
3146cba9 JA |
705 | worker->task = tsk; |
706 | set_cpus_allowed_ptr(tsk, wqe->cpu_mask); | |
707 | tsk->flags |= PF_NO_SETAFFINITY; | |
708 | ||
709 | raw_spin_lock(&wqe->lock); | |
710 | hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list); | |
711 | list_add_tail_rcu(&worker->all_list, &wqe->all_list); | |
712 | worker->flags |= IO_WORKER_F_FREE; | |
713 | raw_spin_unlock(&wqe->lock); | |
714 | wake_up_new_task(tsk); | |
715 | } | |
716 | ||
717 | static bool io_wq_work_match_all(struct io_wq_work *work, void *data) | |
718 | { | |
719 | return true; | |
720 | } | |
721 | ||
722 | static inline bool io_should_retry_thread(long err) | |
723 | { | |
a226abcd JA |
724 | /* |
725 | * Prevent perpetual task_work retry, if the task (or its group) is | |
726 | * exiting. | |
727 | */ | |
728 | if (fatal_signal_pending(current)) | |
729 | return false; | |
730 | ||
3146cba9 JA |
731 | switch (err) { |
732 | case -EAGAIN: | |
733 | case -ERESTARTSYS: | |
734 | case -ERESTARTNOINTR: | |
735 | case -ERESTARTNOHAND: | |
736 | return true; | |
737 | default: | |
738 | return false; | |
739 | } | |
740 | } | |
741 | ||
742 | static void create_worker_cont(struct callback_head *cb) | |
743 | { | |
744 | struct io_worker *worker; | |
745 | struct task_struct *tsk; | |
746 | struct io_wqe *wqe; | |
747 | ||
748 | worker = container_of(cb, struct io_worker, create_work); | |
749 | clear_bit_unlock(0, &worker->create_state); | |
750 | wqe = worker->wqe; | |
751 | tsk = create_io_thread(io_wqe_worker, worker, wqe->node); | |
752 | if (!IS_ERR(tsk)) { | |
753 | io_init_new_worker(wqe, worker, tsk); | |
754 | io_worker_release(worker); | |
755 | return; | |
756 | } else if (!io_should_retry_thread(PTR_ERR(tsk))) { | |
757 | struct io_wqe_acct *acct = io_wqe_get_acct(worker); | |
758 | ||
759 | atomic_dec(&acct->nr_running); | |
760 | raw_spin_lock(&wqe->lock); | |
761 | acct->nr_workers--; | |
762 | if (!acct->nr_workers) { | |
763 | struct io_cb_cancel_data match = { | |
764 | .fn = io_wq_work_match_all, | |
765 | .cancel_all = true, | |
766 | }; | |
767 | ||
42abc95f | 768 | raw_spin_unlock(&wqe->lock); |
3146cba9 | 769 | while (io_acct_cancel_pending_work(wqe, acct, &match)) |
42abc95f HX |
770 | ; |
771 | } else { | |
772 | raw_spin_unlock(&wqe->lock); | |
3146cba9 | 773 | } |
3146cba9 | 774 | io_worker_ref_put(wqe->wq); |
66e70be7 | 775 | kfree(worker); |
3146cba9 JA |
776 | return; |
777 | } | |
778 | ||
779 | /* re-create attempts grab a new worker ref, drop the existing one */ | |
780 | io_worker_release(worker); | |
781 | schedule_work(&worker->work); | |
782 | } | |
783 | ||
784 | static void io_workqueue_create(struct work_struct *work) | |
785 | { | |
786 | struct io_worker *worker = container_of(work, struct io_worker, work); | |
787 | struct io_wqe_acct *acct = io_wqe_get_acct(worker); | |
788 | ||
71e1cef2 | 789 | if (!io_queue_worker_create(worker, acct, create_worker_cont)) |
66e70be7 | 790 | kfree(worker); |
3146cba9 JA |
791 | } |
792 | ||
793 | static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) | |
3bfe6106 | 794 | { |
46fe18b1 | 795 | struct io_wqe_acct *acct = &wqe->acct[index]; |
3bfe6106 | 796 | struct io_worker *worker; |
46fe18b1 | 797 | struct task_struct *tsk; |
3bfe6106 | 798 | |
8b3e78b5 JA |
799 | __set_current_state(TASK_RUNNING); |
800 | ||
3bfe6106 | 801 | worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); |
3146cba9 | 802 | if (!worker) { |
685fe7fe JA |
803 | fail: |
804 | atomic_dec(&acct->nr_running); | |
a9a4aa9f | 805 | raw_spin_lock(&wqe->lock); |
3d4e4fac | 806 | acct->nr_workers--; |
a9a4aa9f | 807 | raw_spin_unlock(&wqe->lock); |
685fe7fe | 808 | io_worker_ref_put(wq); |
3146cba9 | 809 | return false; |
3bfe6106 | 810 | } |
46fe18b1 | 811 | |
3146cba9 JA |
812 | refcount_set(&worker->ref, 1); |
813 | worker->wqe = wqe; | |
081b5820 | 814 | raw_spin_lock_init(&worker->lock); |
3146cba9 | 815 | init_completion(&worker->ref_done); |
46fe18b1 | 816 | |
46fe18b1 JA |
817 | if (index == IO_WQ_ACCT_BOUND) |
818 | worker->flags |= IO_WORKER_F_BOUND; | |
3146cba9 JA |
819 | |
820 | tsk = create_io_thread(io_wqe_worker, worker, wqe->node); | |
821 | if (!IS_ERR(tsk)) { | |
822 | io_init_new_worker(wqe, worker, tsk); | |
823 | } else if (!io_should_retry_thread(PTR_ERR(tsk))) { | |
66e70be7 | 824 | kfree(worker); |
3146cba9 JA |
825 | goto fail; |
826 | } else { | |
827 | INIT_WORK(&worker->work, io_workqueue_create); | |
828 | schedule_work(&worker->work); | |
829 | } | |
830 | ||
831 | return true; | |
771b53d0 JA |
832 | } |
833 | ||
c4068bf8 HD |
834 | /* |
835 | * Iterate the passed in list and call the specific function for each | |
836 | * worker that isn't exiting | |
837 | */ | |
838 | static bool io_wq_for_each_worker(struct io_wqe *wqe, | |
839 | bool (*func)(struct io_worker *, void *), | |
840 | void *data) | |
841 | { | |
842 | struct io_worker *worker; | |
843 | bool ret = false; | |
844 | ||
845 | list_for_each_entry_rcu(worker, &wqe->all_list, all_list) { | |
846 | if (io_worker_get(worker)) { | |
847 | /* no task if node is/was offline */ | |
848 | if (worker->task) | |
849 | ret = func(worker, data); | |
850 | io_worker_release(worker); | |
851 | if (ret) | |
852 | break; | |
853 | } | |
854 | } | |
855 | ||
856 | return ret; | |
857 | } | |
858 | ||
859 | static bool io_wq_worker_wake(struct io_worker *worker, void *data) | |
860 | { | |
6cf5862e | 861 | __set_notify_signal(worker->task); |
c4068bf8 HD |
862 | wake_up_process(worker->task); |
863 | return false; | |
864 | } | |
865 | ||
e9fd9396 | 866 | static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) |
fc04c39b | 867 | { |
e9fd9396 PB |
868 | struct io_wq *wq = wqe->wq; |
869 | ||
fc04c39b | 870 | do { |
fc04c39b | 871 | work->flags |= IO_WQ_WORK_CANCEL; |
5280f7e5 PB |
872 | wq->do_work(work); |
873 | work = wq->free_work(work); | |
fc04c39b PB |
874 | } while (work); |
875 | } | |
876 | ||
86f3cd1b PB |
877 | static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work) |
878 | { | |
f95dc207 | 879 | struct io_wqe_acct *acct = io_work_get_acct(wqe, work); |
86f3cd1b PB |
880 | unsigned int hash; |
881 | struct io_wq_work *tail; | |
882 | ||
883 | if (!io_wq_is_hashed(work)) { | |
884 | append: | |
f95dc207 | 885 | wq_list_add_tail(&work->list, &acct->work_list); |
86f3cd1b PB |
886 | return; |
887 | } | |
888 | ||
889 | hash = io_get_work_hash(work); | |
890 | tail = wqe->hash_tail[hash]; | |
891 | wqe->hash_tail[hash] = work; | |
892 | if (!tail) | |
893 | goto append; | |
894 | ||
f95dc207 | 895 | wq_list_add_after(&work->list, &tail->list, &acct->work_list); |
86f3cd1b PB |
896 | } |
897 | ||
713b9825 PB |
898 | static bool io_wq_work_match_item(struct io_wq_work *work, void *data) |
899 | { | |
900 | return work == data; | |
901 | } | |
902 | ||
771b53d0 JA |
903 | static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) |
904 | { | |
c5def4ab | 905 | struct io_wqe_acct *acct = io_work_get_acct(wqe, work); |
42abc95f | 906 | struct io_cb_cancel_data match; |
94ffb0a2 JA |
907 | unsigned work_flags = work->flags; |
908 | bool do_create; | |
771b53d0 | 909 | |
991468dc JA |
910 | /* |
911 | * If io-wq is exiting for this task, or if the request has explicitly | |
912 | * been marked as one that should not get executed, cancel it here. | |
913 | */ | |
914 | if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) || | |
915 | (work->flags & IO_WQ_WORK_CANCEL)) { | |
70e35125 | 916 | io_run_cancel(work, wqe); |
4fb6ac32 JA |
917 | return; |
918 | } | |
919 | ||
42abc95f | 920 | raw_spin_lock(&acct->lock); |
86f3cd1b | 921 | io_wqe_insert_work(wqe, work); |
f95dc207 | 922 | clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); |
42abc95f | 923 | raw_spin_unlock(&acct->lock); |
94ffb0a2 | 924 | |
42abc95f | 925 | raw_spin_lock(&wqe->lock); |
94ffb0a2 | 926 | rcu_read_lock(); |
f95dc207 | 927 | do_create = !io_wqe_activate_free_worker(wqe, acct); |
94ffb0a2 JA |
928 | rcu_read_unlock(); |
929 | ||
a9a4aa9f | 930 | raw_spin_unlock(&wqe->lock); |
771b53d0 | 931 | |
94ffb0a2 | 932 | if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) || |
3146cba9 JA |
933 | !atomic_read(&acct->nr_running))) { |
934 | bool did_create; | |
935 | ||
936 | did_create = io_wqe_create_worker(wqe, acct); | |
713b9825 PB |
937 | if (likely(did_create)) |
938 | return; | |
939 | ||
940 | raw_spin_lock(&wqe->lock); | |
42abc95f HX |
941 | if (acct->nr_workers) { |
942 | raw_spin_unlock(&wqe->lock); | |
943 | return; | |
3146cba9 | 944 | } |
713b9825 | 945 | raw_spin_unlock(&wqe->lock); |
42abc95f HX |
946 | |
947 | /* fatal condition, failed to create the first worker */ | |
948 | match.fn = io_wq_work_match_item, | |
949 | match.data = work, | |
950 | match.cancel_all = false, | |
951 | ||
952 | io_acct_cancel_pending_work(wqe, acct, &match); | |
3146cba9 | 953 | } |
771b53d0 JA |
954 | } |
955 | ||
956 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) | |
957 | { | |
958 | struct io_wqe *wqe = wq->wqes[numa_node_id()]; | |
959 | ||
960 | io_wqe_enqueue(wqe, work); | |
961 | } | |
962 | ||
963 | /* | |
8766dd51 PB |
964 | * Work items that hash to the same value will not be done in parallel. |
965 | * Used to limit concurrent writes, generally hashed by inode. | |
771b53d0 | 966 | */ |
8766dd51 | 967 | void io_wq_hash_work(struct io_wq_work *work, void *val) |
771b53d0 | 968 | { |
8766dd51 | 969 | unsigned int bit; |
771b53d0 JA |
970 | |
971 | bit = hash_ptr(val, IO_WQ_HASH_ORDER); | |
972 | work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); | |
771b53d0 JA |
973 | } |
974 | ||
361aee45 JA |
975 | static bool __io_wq_worker_cancel(struct io_worker *worker, |
976 | struct io_cb_cancel_data *match, | |
977 | struct io_wq_work *work) | |
978 | { | |
979 | if (work && match->fn(work, match->data)) { | |
980 | work->flags |= IO_WQ_WORK_CANCEL; | |
6cf5862e | 981 | __set_notify_signal(worker->task); |
361aee45 JA |
982 | return true; |
983 | } | |
984 | ||
985 | return false; | |
986 | } | |
987 | ||
2293b419 | 988 | static bool io_wq_worker_cancel(struct io_worker *worker, void *data) |
62755e35 | 989 | { |
2293b419 | 990 | struct io_cb_cancel_data *match = data; |
62755e35 JA |
991 | |
992 | /* | |
993 | * Hold the lock to avoid ->cur_work going out of scope, caller | |
36c2f922 | 994 | * may dereference the passed in work. |
62755e35 | 995 | */ |
081b5820 | 996 | raw_spin_lock(&worker->lock); |
361aee45 JA |
997 | if (__io_wq_worker_cancel(worker, match, worker->cur_work) || |
998 | __io_wq_worker_cancel(worker, match, worker->next_work)) | |
4f26bda1 | 999 | match->nr_running++; |
081b5820 | 1000 | raw_spin_unlock(&worker->lock); |
771b53d0 | 1001 | |
4f26bda1 | 1002 | return match->nr_running && !match->cancel_all; |
771b53d0 JA |
1003 | } |
1004 | ||
204361a7 PB |
1005 | static inline void io_wqe_remove_pending(struct io_wqe *wqe, |
1006 | struct io_wq_work *work, | |
1007 | struct io_wq_work_node *prev) | |
1008 | { | |
f95dc207 | 1009 | struct io_wqe_acct *acct = io_work_get_acct(wqe, work); |
204361a7 PB |
1010 | unsigned int hash = io_get_work_hash(work); |
1011 | struct io_wq_work *prev_work = NULL; | |
1012 | ||
1013 | if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) { | |
1014 | if (prev) | |
1015 | prev_work = container_of(prev, struct io_wq_work, list); | |
1016 | if (prev_work && io_get_work_hash(prev_work) == hash) | |
1017 | wqe->hash_tail[hash] = prev_work; | |
1018 | else | |
1019 | wqe->hash_tail[hash] = NULL; | |
1020 | } | |
f95dc207 | 1021 | wq_list_del(&acct->work_list, &work->list, prev); |
204361a7 PB |
1022 | } |
1023 | ||
3146cba9 JA |
1024 | static bool io_acct_cancel_pending_work(struct io_wqe *wqe, |
1025 | struct io_wqe_acct *acct, | |
1026 | struct io_cb_cancel_data *match) | |
771b53d0 | 1027 | { |
6206f0e1 | 1028 | struct io_wq_work_node *node, *prev; |
771b53d0 | 1029 | struct io_wq_work *work; |
771b53d0 | 1030 | |
42abc95f | 1031 | raw_spin_lock(&acct->lock); |
3146cba9 JA |
1032 | wq_list_for_each(node, prev, &acct->work_list) { |
1033 | work = container_of(node, struct io_wq_work, list); | |
1034 | if (!match->fn(work, match->data)) | |
1035 | continue; | |
1036 | io_wqe_remove_pending(wqe, work, prev); | |
42abc95f | 1037 | raw_spin_unlock(&acct->lock); |
3146cba9 JA |
1038 | io_run_cancel(work, wqe); |
1039 | match->nr_pending++; | |
1040 | /* not safe to continue after unlock */ | |
1041 | return true; | |
1042 | } | |
42abc95f | 1043 | raw_spin_unlock(&acct->lock); |
3146cba9 JA |
1044 | |
1045 | return false; | |
1046 | } | |
1047 | ||
1048 | static void io_wqe_cancel_pending_work(struct io_wqe *wqe, | |
1049 | struct io_cb_cancel_data *match) | |
1050 | { | |
1051 | int i; | |
4f26bda1 | 1052 | retry: |
f95dc207 JA |
1053 | for (i = 0; i < IO_WQ_ACCT_NR; i++) { |
1054 | struct io_wqe_acct *acct = io_get_acct(wqe, i == 0); | |
4f26bda1 | 1055 | |
3146cba9 JA |
1056 | if (io_acct_cancel_pending_work(wqe, acct, match)) { |
1057 | if (match->cancel_all) | |
1058 | goto retry; | |
36e4c58b | 1059 | break; |
f95dc207 | 1060 | } |
771b53d0 | 1061 | } |
f4c2665e PB |
1062 | } |
1063 | ||
4f26bda1 | 1064 | static void io_wqe_cancel_running_work(struct io_wqe *wqe, |
f4c2665e PB |
1065 | struct io_cb_cancel_data *match) |
1066 | { | |
771b53d0 | 1067 | rcu_read_lock(); |
4f26bda1 | 1068 | io_wq_for_each_worker(wqe, io_wq_worker_cancel, match); |
771b53d0 | 1069 | rcu_read_unlock(); |
771b53d0 JA |
1070 | } |
1071 | ||
2293b419 | 1072 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, |
4f26bda1 | 1073 | void *data, bool cancel_all) |
771b53d0 | 1074 | { |
2293b419 | 1075 | struct io_cb_cancel_data match = { |
4f26bda1 PB |
1076 | .fn = cancel, |
1077 | .data = data, | |
1078 | .cancel_all = cancel_all, | |
00bcda13 | 1079 | }; |
3fc50ab5 | 1080 | int node; |
771b53d0 | 1081 | |
f4c2665e PB |
1082 | /* |
1083 | * First check pending list, if we're lucky we can just remove it | |
1084 | * from there. CANCEL_OK means that the work is returned as-new, | |
1085 | * no completion will be posted for it. | |
efdf5184 JA |
1086 | * |
1087 | * Then check if a free (going busy) or busy worker has the work | |
f4c2665e PB |
1088 | * currently running. If we find it there, we'll return CANCEL_RUNNING |
1089 | * as an indication that we attempt to signal cancellation. The | |
1090 | * completion will run normally in this case. | |
efdf5184 JA |
1091 | * |
1092 | * Do both of these while holding the wqe->lock, to ensure that | |
1093 | * we'll find a work item regardless of state. | |
f4c2665e PB |
1094 | */ |
1095 | for_each_node(node) { | |
1096 | struct io_wqe *wqe = wq->wqes[node]; | |
1097 | ||
efdf5184 | 1098 | io_wqe_cancel_pending_work(wqe, &match); |
42abc95f | 1099 | if (match.nr_pending && !match.cancel_all) |
efdf5184 | 1100 | return IO_WQ_CANCEL_OK; |
efdf5184 | 1101 | |
42abc95f | 1102 | raw_spin_lock(&wqe->lock); |
4f26bda1 | 1103 | io_wqe_cancel_running_work(wqe, &match); |
36e4c58b | 1104 | raw_spin_unlock(&wqe->lock); |
4f26bda1 | 1105 | if (match.nr_running && !match.cancel_all) |
f4c2665e PB |
1106 | return IO_WQ_CANCEL_RUNNING; |
1107 | } | |
1108 | ||
4f26bda1 PB |
1109 | if (match.nr_running) |
1110 | return IO_WQ_CANCEL_RUNNING; | |
1111 | if (match.nr_pending) | |
1112 | return IO_WQ_CANCEL_OK; | |
f4c2665e | 1113 | return IO_WQ_CANCEL_NOTFOUND; |
771b53d0 JA |
1114 | } |
1115 | ||
e941894e JA |
1116 | static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode, |
1117 | int sync, void *key) | |
1118 | { | |
1119 | struct io_wqe *wqe = container_of(wait, struct io_wqe, wait); | |
f95dc207 | 1120 | int i; |
e941894e JA |
1121 | |
1122 | list_del_init(&wait->entry); | |
1123 | ||
1124 | rcu_read_lock(); | |
f95dc207 JA |
1125 | for (i = 0; i < IO_WQ_ACCT_NR; i++) { |
1126 | struct io_wqe_acct *acct = &wqe->acct[i]; | |
1127 | ||
1128 | if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags)) | |
1129 | io_wqe_activate_free_worker(wqe, acct); | |
1130 | } | |
e941894e | 1131 | rcu_read_unlock(); |
e941894e JA |
1132 | return 1; |
1133 | } | |
1134 | ||
576a347b | 1135 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) |
771b53d0 | 1136 | { |
f95dc207 | 1137 | int ret, node, i; |
771b53d0 JA |
1138 | struct io_wq *wq; |
1139 | ||
f5fa38c5 | 1140 | if (WARN_ON_ONCE(!data->free_work || !data->do_work)) |
e9fd9396 | 1141 | return ERR_PTR(-EINVAL); |
e6ab8991 PB |
1142 | if (WARN_ON_ONCE(!bounded)) |
1143 | return ERR_PTR(-EINVAL); | |
e9fd9396 | 1144 | |
c7f405d6 | 1145 | wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL); |
771b53d0 JA |
1146 | if (!wq) |
1147 | return ERR_PTR(-ENOMEM); | |
43c01fbe JA |
1148 | ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node); |
1149 | if (ret) | |
c7f405d6 | 1150 | goto err_wq; |
771b53d0 | 1151 | |
e941894e JA |
1152 | refcount_inc(&data->hash->refs); |
1153 | wq->hash = data->hash; | |
e9fd9396 | 1154 | wq->free_work = data->free_work; |
f5fa38c5 | 1155 | wq->do_work = data->do_work; |
7d723065 | 1156 | |
43c01fbe | 1157 | ret = -ENOMEM; |
3fc50ab5 | 1158 | for_each_node(node) { |
771b53d0 | 1159 | struct io_wqe *wqe; |
7563439a | 1160 | int alloc_node = node; |
771b53d0 | 1161 | |
7563439a JA |
1162 | if (!node_online(alloc_node)) |
1163 | alloc_node = NUMA_NO_NODE; | |
1164 | wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node); | |
771b53d0 | 1165 | if (!wqe) |
3fc50ab5 | 1166 | goto err; |
996d3efe | 1167 | wq->wqes[node] = wqe; |
0e03496d JA |
1168 | if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL)) |
1169 | goto err; | |
1170 | cpumask_copy(wqe->cpu_mask, cpumask_of_node(node)); | |
7563439a | 1171 | wqe->node = alloc_node; |
c5def4ab | 1172 | wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; |
728f13e7 | 1173 | wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = |
c5def4ab | 1174 | task_rlimit(current, RLIMIT_NPROC); |
e941894e | 1175 | INIT_LIST_HEAD(&wqe->wait.entry); |
f95dc207 JA |
1176 | wqe->wait.func = io_wqe_hash_wake; |
1177 | for (i = 0; i < IO_WQ_ACCT_NR; i++) { | |
1178 | struct io_wqe_acct *acct = &wqe->acct[i]; | |
1179 | ||
1180 | acct->index = i; | |
1181 | atomic_set(&acct->nr_running, 0); | |
1182 | INIT_WQ_LIST(&acct->work_list); | |
42abc95f | 1183 | raw_spin_lock_init(&acct->lock); |
f95dc207 | 1184 | } |
771b53d0 | 1185 | wqe->wq = wq; |
95da8465 | 1186 | raw_spin_lock_init(&wqe->lock); |
021d1cdd | 1187 | INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0); |
e61df66c | 1188 | INIT_LIST_HEAD(&wqe->all_list); |
771b53d0 JA |
1189 | } |
1190 | ||
685fe7fe | 1191 | wq->task = get_task_struct(data->task); |
685fe7fe JA |
1192 | atomic_set(&wq->worker_refs, 1); |
1193 | init_completion(&wq->worker_done); | |
1194 | return wq; | |
b60fda60 | 1195 | err: |
dc7bbc9e | 1196 | io_wq_put_hash(data->hash); |
43c01fbe | 1197 | cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); |
0e03496d JA |
1198 | for_each_node(node) { |
1199 | if (!wq->wqes[node]) | |
1200 | continue; | |
1201 | free_cpumask_var(wq->wqes[node]->cpu_mask); | |
3fc50ab5 | 1202 | kfree(wq->wqes[node]); |
0e03496d | 1203 | } |
43c01fbe | 1204 | err_wq: |
b60fda60 | 1205 | kfree(wq); |
771b53d0 JA |
1206 | return ERR_PTR(ret); |
1207 | } | |
1208 | ||
c80ca470 JA |
1209 | static bool io_task_work_match(struct callback_head *cb, void *data) |
1210 | { | |
d3e9f732 | 1211 | struct io_worker *worker; |
c80ca470 | 1212 | |
3b33e3f4 | 1213 | if (cb->func != create_worker_cb && cb->func != create_worker_cont) |
c80ca470 | 1214 | return false; |
d3e9f732 JA |
1215 | worker = container_of(cb, struct io_worker, create_work); |
1216 | return worker->wqe->wq == data; | |
c80ca470 JA |
1217 | } |
1218 | ||
17a91051 PB |
1219 | void io_wq_exit_start(struct io_wq *wq) |
1220 | { | |
1221 | set_bit(IO_WQ_BIT_EXIT, &wq->state); | |
1222 | } | |
1223 | ||
71a85387 | 1224 | static void io_wq_cancel_tw_create(struct io_wq *wq) |
afcc4015 | 1225 | { |
685fe7fe | 1226 | struct callback_head *cb; |
685fe7fe | 1227 | |
c80ca470 | 1228 | while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) { |
d3e9f732 | 1229 | struct io_worker *worker; |
685fe7fe | 1230 | |
d3e9f732 | 1231 | worker = container_of(cb, struct io_worker, create_work); |
1d5f5ea7 | 1232 | io_worker_cancel_cb(worker); |
af82425c | 1233 | kfree(worker); |
685fe7fe | 1234 | } |
71a85387 JA |
1235 | } |
1236 | ||
1237 | static void io_wq_exit_workers(struct io_wq *wq) | |
1238 | { | |
1239 | int node; | |
1240 | ||
1241 | if (!wq->task) | |
1242 | return; | |
1243 | ||
1244 | io_wq_cancel_tw_create(wq); | |
685fe7fe JA |
1245 | |
1246 | rcu_read_lock(); | |
1247 | for_each_node(node) { | |
1248 | struct io_wqe *wqe = wq->wqes[node]; | |
1249 | ||
1250 | io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL); | |
afcc4015 | 1251 | } |
685fe7fe JA |
1252 | rcu_read_unlock(); |
1253 | io_worker_ref_put(wq); | |
1254 | wait_for_completion(&wq->worker_done); | |
3743c172 Z |
1255 | |
1256 | for_each_node(node) { | |
1257 | spin_lock_irq(&wq->hash->wait.lock); | |
1258 | list_del_init(&wq->wqes[node]->wait.entry); | |
1259 | spin_unlock_irq(&wq->hash->wait.lock); | |
1260 | } | |
685fe7fe JA |
1261 | put_task_struct(wq->task); |
1262 | wq->task = NULL; | |
afcc4015 JA |
1263 | } |
1264 | ||
4fb6ac32 | 1265 | static void io_wq_destroy(struct io_wq *wq) |
771b53d0 | 1266 | { |
3fc50ab5 | 1267 | int node; |
771b53d0 | 1268 | |
43c01fbe JA |
1269 | cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); |
1270 | ||
e941894e JA |
1271 | for_each_node(node) { |
1272 | struct io_wqe *wqe = wq->wqes[node]; | |
f5d2d23b JA |
1273 | struct io_cb_cancel_data match = { |
1274 | .fn = io_wq_work_match_all, | |
1275 | .cancel_all = true, | |
1276 | }; | |
1277 | io_wqe_cancel_pending_work(wqe, &match); | |
0e03496d | 1278 | free_cpumask_var(wqe->cpu_mask); |
e941894e JA |
1279 | kfree(wqe); |
1280 | } | |
e941894e | 1281 | io_wq_put_hash(wq->hash); |
771b53d0 | 1282 | kfree(wq); |
4fb6ac32 JA |
1283 | } |
1284 | ||
afcc4015 JA |
1285 | void io_wq_put_and_exit(struct io_wq *wq) |
1286 | { | |
17a91051 PB |
1287 | WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state)); |
1288 | ||
685fe7fe | 1289 | io_wq_exit_workers(wq); |
382cb030 | 1290 | io_wq_destroy(wq); |
afcc4015 JA |
1291 | } |
1292 | ||
0e03496d JA |
1293 | struct online_data { |
1294 | unsigned int cpu; | |
1295 | bool online; | |
1296 | }; | |
1297 | ||
43c01fbe JA |
1298 | static bool io_wq_worker_affinity(struct io_worker *worker, void *data) |
1299 | { | |
0e03496d | 1300 | struct online_data *od = data; |
e0051d7d | 1301 | |
0e03496d JA |
1302 | if (od->online) |
1303 | cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask); | |
1304 | else | |
1305 | cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask); | |
43c01fbe JA |
1306 | return false; |
1307 | } | |
1308 | ||
0e03496d | 1309 | static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online) |
43c01fbe | 1310 | { |
0e03496d JA |
1311 | struct online_data od = { |
1312 | .cpu = cpu, | |
1313 | .online = online | |
1314 | }; | |
43c01fbe JA |
1315 | int i; |
1316 | ||
1317 | rcu_read_lock(); | |
1318 | for_each_node(i) | |
0e03496d | 1319 | io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od); |
43c01fbe JA |
1320 | rcu_read_unlock(); |
1321 | return 0; | |
1322 | } | |
1323 | ||
0e03496d JA |
1324 | static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node) |
1325 | { | |
1326 | struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); | |
1327 | ||
1328 | return __io_wq_cpu_online(wq, cpu, true); | |
1329 | } | |
1330 | ||
1331 | static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node) | |
1332 | { | |
1333 | struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); | |
1334 | ||
1335 | return __io_wq_cpu_online(wq, cpu, false); | |
1336 | } | |
1337 | ||
fe76421d JA |
1338 | int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask) |
1339 | { | |
1340 | int i; | |
1341 | ||
1342 | rcu_read_lock(); | |
1343 | for_each_node(i) { | |
1344 | struct io_wqe *wqe = wq->wqes[i]; | |
1345 | ||
1346 | if (mask) | |
1347 | cpumask_copy(wqe->cpu_mask, mask); | |
1348 | else | |
1349 | cpumask_copy(wqe->cpu_mask, cpumask_of_node(i)); | |
1350 | } | |
1351 | rcu_read_unlock(); | |
1352 | return 0; | |
1353 | } | |
1354 | ||
2e480058 JA |
1355 | /* |
1356 | * Set max number of unbounded workers, returns old value. If new_count is 0, | |
1357 | * then just return the old value. | |
1358 | */ | |
1359 | int io_wq_max_workers(struct io_wq *wq, int *new_count) | |
1360 | { | |
71c9ce27 BZ |
1361 | int prev[IO_WQ_ACCT_NR]; |
1362 | bool first_node = true; | |
1363 | int i, node; | |
2e480058 | 1364 | |
dd47c104 ES |
1365 | BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND); |
1366 | BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND); | |
1367 | BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2); | |
1368 | ||
86127bb1 | 1369 | for (i = 0; i < IO_WQ_ACCT_NR; i++) { |
2e480058 JA |
1370 | if (new_count[i] > task_rlimit(current, RLIMIT_NPROC)) |
1371 | new_count[i] = task_rlimit(current, RLIMIT_NPROC); | |
1372 | } | |
1373 | ||
71c9ce27 BZ |
1374 | for (i = 0; i < IO_WQ_ACCT_NR; i++) |
1375 | prev[i] = 0; | |
1376 | ||
2e480058 JA |
1377 | rcu_read_lock(); |
1378 | for_each_node(node) { | |
bc369921 | 1379 | struct io_wqe *wqe = wq->wqes[node]; |
2e480058 JA |
1380 | struct io_wqe_acct *acct; |
1381 | ||
bc369921 | 1382 | raw_spin_lock(&wqe->lock); |
f95dc207 | 1383 | for (i = 0; i < IO_WQ_ACCT_NR; i++) { |
bc369921 | 1384 | acct = &wqe->acct[i]; |
71c9ce27 BZ |
1385 | if (first_node) |
1386 | prev[i] = max_t(int, acct->max_workers, prev[i]); | |
2e480058 JA |
1387 | if (new_count[i]) |
1388 | acct->max_workers = new_count[i]; | |
2e480058 | 1389 | } |
bc369921 | 1390 | raw_spin_unlock(&wqe->lock); |
71c9ce27 | 1391 | first_node = false; |
2e480058 JA |
1392 | } |
1393 | rcu_read_unlock(); | |
71c9ce27 BZ |
1394 | |
1395 | for (i = 0; i < IO_WQ_ACCT_NR; i++) | |
1396 | new_count[i] = prev[i]; | |
1397 | ||
2e480058 JA |
1398 | return 0; |
1399 | } | |
1400 | ||
43c01fbe JA |
1401 | static __init int io_wq_init(void) |
1402 | { | |
1403 | int ret; | |
1404 | ||
1405 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online", | |
0e03496d | 1406 | io_wq_cpu_online, io_wq_cpu_offline); |
43c01fbe JA |
1407 | if (ret < 0) |
1408 | return ret; | |
1409 | io_wq_online = ret; | |
1410 | return 0; | |
1411 | } | |
1412 | subsys_initcall(io_wq_init); |