Commit | Line | Data |
---|---|---|
17437f31 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Contains the core associated with submission side polling of the SQ | |
4 | * ring, offloading submissions from the application to a kernel thread. | |
5 | */ | |
6 | #include <linux/kernel.h> | |
7 | #include <linux/errno.h> | |
8 | #include <linux/file.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/slab.h> | |
11 | #include <linux/audit.h> | |
12 | #include <linux/security.h> | |
13 | #include <linux/io_uring.h> | |
14 | ||
15 | #include <uapi/linux/io_uring.h> | |
16 | ||
17 | #include "io_uring_types.h" | |
18 | #include "io_uring.h" | |
19 | #include "sqpoll.h" | |
20 | ||
21 | #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8 | |
22 | ||
23 | enum { | |
24 | IO_SQ_THREAD_SHOULD_STOP = 0, | |
25 | IO_SQ_THREAD_SHOULD_PARK, | |
26 | }; | |
27 | ||
28 | void io_sq_thread_unpark(struct io_sq_data *sqd) | |
29 | __releases(&sqd->lock) | |
30 | { | |
31 | WARN_ON_ONCE(sqd->thread == current); | |
32 | ||
33 | /* | |
34 | * Do the dance but not conditional clear_bit() because it'd race with | |
35 | * other threads incrementing park_pending and setting the bit. | |
36 | */ | |
37 | clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); | |
38 | if (atomic_dec_return(&sqd->park_pending)) | |
39 | set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); | |
40 | mutex_unlock(&sqd->lock); | |
41 | } | |
42 | ||
43 | void io_sq_thread_park(struct io_sq_data *sqd) | |
44 | __acquires(&sqd->lock) | |
45 | { | |
46 | WARN_ON_ONCE(sqd->thread == current); | |
47 | ||
48 | atomic_inc(&sqd->park_pending); | |
49 | set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); | |
50 | mutex_lock(&sqd->lock); | |
51 | if (sqd->thread) | |
52 | wake_up_process(sqd->thread); | |
53 | } | |
54 | ||
55 | void io_sq_thread_stop(struct io_sq_data *sqd) | |
56 | { | |
57 | WARN_ON_ONCE(sqd->thread == current); | |
58 | WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); | |
59 | ||
60 | set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); | |
61 | mutex_lock(&sqd->lock); | |
62 | if (sqd->thread) | |
63 | wake_up_process(sqd->thread); | |
64 | mutex_unlock(&sqd->lock); | |
65 | wait_for_completion(&sqd->exited); | |
66 | } | |
67 | ||
68 | void io_put_sq_data(struct io_sq_data *sqd) | |
69 | { | |
70 | if (refcount_dec_and_test(&sqd->refs)) { | |
71 | WARN_ON_ONCE(atomic_read(&sqd->park_pending)); | |
72 | ||
73 | io_sq_thread_stop(sqd); | |
74 | kfree(sqd); | |
75 | } | |
76 | } | |
77 | ||
78 | static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) | |
79 | { | |
80 | struct io_ring_ctx *ctx; | |
81 | unsigned sq_thread_idle = 0; | |
82 | ||
83 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) | |
84 | sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); | |
85 | sqd->sq_thread_idle = sq_thread_idle; | |
86 | } | |
87 | ||
88 | void io_sq_thread_finish(struct io_ring_ctx *ctx) | |
89 | { | |
90 | struct io_sq_data *sqd = ctx->sq_data; | |
91 | ||
92 | if (sqd) { | |
93 | io_sq_thread_park(sqd); | |
94 | list_del_init(&ctx->sqd_list); | |
95 | io_sqd_update_thread_idle(sqd); | |
96 | io_sq_thread_unpark(sqd); | |
97 | ||
98 | io_put_sq_data(sqd); | |
99 | ctx->sq_data = NULL; | |
100 | } | |
101 | } | |
102 | ||
103 | static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) | |
104 | { | |
105 | struct io_ring_ctx *ctx_attach; | |
106 | struct io_sq_data *sqd; | |
107 | struct fd f; | |
108 | ||
109 | f = fdget(p->wq_fd); | |
110 | if (!f.file) | |
111 | return ERR_PTR(-ENXIO); | |
112 | if (!io_is_uring_fops(f.file)) { | |
113 | fdput(f); | |
114 | return ERR_PTR(-EINVAL); | |
115 | } | |
116 | ||
117 | ctx_attach = f.file->private_data; | |
118 | sqd = ctx_attach->sq_data; | |
119 | if (!sqd) { | |
120 | fdput(f); | |
121 | return ERR_PTR(-EINVAL); | |
122 | } | |
123 | if (sqd->task_tgid != current->tgid) { | |
124 | fdput(f); | |
125 | return ERR_PTR(-EPERM); | |
126 | } | |
127 | ||
128 | refcount_inc(&sqd->refs); | |
129 | fdput(f); | |
130 | return sqd; | |
131 | } | |
132 | ||
133 | static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, | |
134 | bool *attached) | |
135 | { | |
136 | struct io_sq_data *sqd; | |
137 | ||
138 | *attached = false; | |
139 | if (p->flags & IORING_SETUP_ATTACH_WQ) { | |
140 | sqd = io_attach_sq_data(p); | |
141 | if (!IS_ERR(sqd)) { | |
142 | *attached = true; | |
143 | return sqd; | |
144 | } | |
145 | /* fall through for EPERM case, setup new sqd/task */ | |
146 | if (PTR_ERR(sqd) != -EPERM) | |
147 | return sqd; | |
148 | } | |
149 | ||
150 | sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); | |
151 | if (!sqd) | |
152 | return ERR_PTR(-ENOMEM); | |
153 | ||
154 | atomic_set(&sqd->park_pending, 0); | |
155 | refcount_set(&sqd->refs, 1); | |
156 | INIT_LIST_HEAD(&sqd->ctx_list); | |
157 | mutex_init(&sqd->lock); | |
158 | init_waitqueue_head(&sqd->wait); | |
159 | init_completion(&sqd->exited); | |
160 | return sqd; | |
161 | } | |
162 | ||
163 | static inline bool io_sqd_events_pending(struct io_sq_data *sqd) | |
164 | { | |
165 | return READ_ONCE(sqd->state); | |
166 | } | |
167 | ||
168 | static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) | |
169 | { | |
170 | unsigned int to_submit; | |
171 | int ret = 0; | |
172 | ||
173 | to_submit = io_sqring_entries(ctx); | |
174 | /* if we're handling multiple rings, cap submit size for fairness */ | |
175 | if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE) | |
176 | to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE; | |
177 | ||
178 | if (!wq_list_empty(&ctx->iopoll_list) || to_submit) { | |
179 | const struct cred *creds = NULL; | |
180 | ||
181 | if (ctx->sq_creds != current_cred()) | |
182 | creds = override_creds(ctx->sq_creds); | |
183 | ||
184 | mutex_lock(&ctx->uring_lock); | |
185 | if (!wq_list_empty(&ctx->iopoll_list)) | |
186 | io_do_iopoll(ctx, true); | |
187 | ||
188 | /* | |
189 | * Don't submit if refs are dying, good for io_uring_register(), | |
190 | * but also it is relied upon by io_ring_exit_work() | |
191 | */ | |
192 | if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) && | |
193 | !(ctx->flags & IORING_SETUP_R_DISABLED)) | |
194 | ret = io_submit_sqes(ctx, to_submit); | |
195 | mutex_unlock(&ctx->uring_lock); | |
196 | ||
197 | if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait)) | |
198 | wake_up(&ctx->sqo_sq_wait); | |
199 | if (creds) | |
200 | revert_creds(creds); | |
201 | } | |
202 | ||
203 | return ret; | |
204 | } | |
205 | ||
206 | static bool io_sqd_handle_event(struct io_sq_data *sqd) | |
207 | { | |
208 | bool did_sig = false; | |
209 | struct ksignal ksig; | |
210 | ||
211 | if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || | |
212 | signal_pending(current)) { | |
213 | mutex_unlock(&sqd->lock); | |
214 | if (signal_pending(current)) | |
215 | did_sig = get_signal(&ksig); | |
216 | cond_resched(); | |
217 | mutex_lock(&sqd->lock); | |
218 | } | |
219 | return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); | |
220 | } | |
221 | ||
222 | static int io_sq_thread(void *data) | |
223 | { | |
224 | struct io_sq_data *sqd = data; | |
225 | struct io_ring_ctx *ctx; | |
226 | unsigned long timeout = 0; | |
227 | char buf[TASK_COMM_LEN]; | |
228 | DEFINE_WAIT(wait); | |
229 | ||
230 | snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); | |
231 | set_task_comm(current, buf); | |
232 | ||
233 | if (sqd->sq_cpu != -1) | |
234 | set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); | |
235 | else | |
236 | set_cpus_allowed_ptr(current, cpu_online_mask); | |
237 | current->flags |= PF_NO_SETAFFINITY; | |
238 | ||
239 | audit_alloc_kernel(current); | |
240 | ||
241 | mutex_lock(&sqd->lock); | |
242 | while (1) { | |
243 | bool cap_entries, sqt_spin = false; | |
244 | ||
245 | if (io_sqd_events_pending(sqd) || signal_pending(current)) { | |
246 | if (io_sqd_handle_event(sqd)) | |
247 | break; | |
248 | timeout = jiffies + sqd->sq_thread_idle; | |
249 | } | |
250 | ||
251 | cap_entries = !list_is_singular(&sqd->ctx_list); | |
252 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { | |
253 | int ret = __io_sq_thread(ctx, cap_entries); | |
254 | ||
255 | if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list))) | |
256 | sqt_spin = true; | |
257 | } | |
258 | if (io_run_task_work()) | |
259 | sqt_spin = true; | |
260 | ||
261 | if (sqt_spin || !time_after(jiffies, timeout)) { | |
262 | cond_resched(); | |
263 | if (sqt_spin) | |
264 | timeout = jiffies + sqd->sq_thread_idle; | |
265 | continue; | |
266 | } | |
267 | ||
268 | prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); | |
269 | if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) { | |
270 | bool needs_sched = true; | |
271 | ||
272 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { | |
273 | atomic_or(IORING_SQ_NEED_WAKEUP, | |
274 | &ctx->rings->sq_flags); | |
275 | if ((ctx->flags & IORING_SETUP_IOPOLL) && | |
276 | !wq_list_empty(&ctx->iopoll_list)) { | |
277 | needs_sched = false; | |
278 | break; | |
279 | } | |
280 | ||
281 | /* | |
282 | * Ensure the store of the wakeup flag is not | |
283 | * reordered with the load of the SQ tail | |
284 | */ | |
285 | smp_mb__after_atomic(); | |
286 | ||
287 | if (io_sqring_entries(ctx)) { | |
288 | needs_sched = false; | |
289 | break; | |
290 | } | |
291 | } | |
292 | ||
293 | if (needs_sched) { | |
294 | mutex_unlock(&sqd->lock); | |
295 | schedule(); | |
296 | mutex_lock(&sqd->lock); | |
297 | } | |
298 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) | |
299 | atomic_andnot(IORING_SQ_NEED_WAKEUP, | |
300 | &ctx->rings->sq_flags); | |
301 | } | |
302 | ||
303 | finish_wait(&sqd->wait, &wait); | |
304 | timeout = jiffies + sqd->sq_thread_idle; | |
305 | } | |
306 | ||
307 | io_uring_cancel_generic(true, sqd); | |
308 | sqd->thread = NULL; | |
309 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) | |
310 | atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags); | |
311 | io_run_task_work(); | |
312 | mutex_unlock(&sqd->lock); | |
313 | ||
314 | audit_free(current); | |
315 | ||
316 | complete(&sqd->exited); | |
317 | do_exit(0); | |
318 | } | |
319 | ||
320 | int io_sqpoll_wait_sq(struct io_ring_ctx *ctx) | |
321 | { | |
322 | DEFINE_WAIT(wait); | |
323 | ||
324 | do { | |
325 | if (!io_sqring_full(ctx)) | |
326 | break; | |
327 | prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE); | |
328 | ||
329 | if (!io_sqring_full(ctx)) | |
330 | break; | |
331 | schedule(); | |
332 | } while (!signal_pending(current)); | |
333 | ||
334 | finish_wait(&ctx->sqo_sq_wait, &wait); | |
335 | return 0; | |
336 | } | |
337 | ||
338 | __cold int io_sq_offload_create(struct io_ring_ctx *ctx, | |
339 | struct io_uring_params *p) | |
340 | { | |
341 | int ret; | |
342 | ||
343 | /* Retain compatibility with failing for an invalid attach attempt */ | |
344 | if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == | |
345 | IORING_SETUP_ATTACH_WQ) { | |
346 | struct fd f; | |
347 | ||
348 | f = fdget(p->wq_fd); | |
349 | if (!f.file) | |
350 | return -ENXIO; | |
351 | if (!io_is_uring_fops(f.file)) { | |
352 | fdput(f); | |
353 | return -EINVAL; | |
354 | } | |
355 | fdput(f); | |
356 | } | |
357 | if (ctx->flags & IORING_SETUP_SQPOLL) { | |
358 | struct task_struct *tsk; | |
359 | struct io_sq_data *sqd; | |
360 | bool attached; | |
361 | ||
362 | ret = security_uring_sqpoll(); | |
363 | if (ret) | |
364 | return ret; | |
365 | ||
366 | sqd = io_get_sq_data(p, &attached); | |
367 | if (IS_ERR(sqd)) { | |
368 | ret = PTR_ERR(sqd); | |
369 | goto err; | |
370 | } | |
371 | ||
372 | ctx->sq_creds = get_current_cred(); | |
373 | ctx->sq_data = sqd; | |
374 | ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); | |
375 | if (!ctx->sq_thread_idle) | |
376 | ctx->sq_thread_idle = HZ; | |
377 | ||
378 | io_sq_thread_park(sqd); | |
379 | list_add(&ctx->sqd_list, &sqd->ctx_list); | |
380 | io_sqd_update_thread_idle(sqd); | |
381 | /* don't attach to a dying SQPOLL thread, would be racy */ | |
382 | ret = (attached && !sqd->thread) ? -ENXIO : 0; | |
383 | io_sq_thread_unpark(sqd); | |
384 | ||
385 | if (ret < 0) | |
386 | goto err; | |
387 | if (attached) | |
388 | return 0; | |
389 | ||
390 | if (p->flags & IORING_SETUP_SQ_AFF) { | |
391 | int cpu = p->sq_thread_cpu; | |
392 | ||
393 | ret = -EINVAL; | |
394 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) | |
395 | goto err_sqpoll; | |
396 | sqd->sq_cpu = cpu; | |
397 | } else { | |
398 | sqd->sq_cpu = -1; | |
399 | } | |
400 | ||
401 | sqd->task_pid = current->pid; | |
402 | sqd->task_tgid = current->tgid; | |
403 | tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); | |
404 | if (IS_ERR(tsk)) { | |
405 | ret = PTR_ERR(tsk); | |
406 | goto err_sqpoll; | |
407 | } | |
408 | ||
409 | sqd->thread = tsk; | |
410 | ret = io_uring_alloc_task_context(tsk, ctx); | |
411 | wake_up_new_task(tsk); | |
412 | if (ret) | |
413 | goto err; | |
414 | } else if (p->flags & IORING_SETUP_SQ_AFF) { | |
415 | /* Can't have SQ_AFF without SQPOLL */ | |
416 | ret = -EINVAL; | |
417 | goto err; | |
418 | } | |
419 | ||
420 | return 0; | |
421 | err_sqpoll: | |
422 | complete(&ctx->sq_data->exited); | |
423 | err: | |
424 | io_sq_thread_finish(ctx); | |
425 | return ret; | |
426 | } |