| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Shared application/kernel submission and completion ring pairs, for |
| 4 | * supporting fast/efficient IO. |
| 5 | * |
| 6 | * A note on the read/write ordering memory barriers that are matched between |
| 7 | * the application and kernel side. |
| 8 | * |
| 9 | * After the application reads the CQ ring tail, it must use an |
| 10 | * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses |
| 11 | * before writing the tail (using smp_load_acquire to read the tail will |
| 12 | * do). It also needs a smp_mb() before updating CQ head (ordering the |
| 13 | * entry load(s) with the head store), pairing with an implicit barrier |
| 14 | * through a control-dependency in io_get_cqe (smp_store_release to |
| 15 | * store head will do). Failure to do so could lead to reading invalid |
| 16 | * CQ entries. |
| 17 | * |
| 18 | * Likewise, the application must use an appropriate smp_wmb() before |
| 19 | * writing the SQ tail (ordering SQ entry stores with the tail store), |
| 20 | * which pairs with smp_load_acquire in io_get_sqring (smp_store_release |
| 21 | * to store the tail will do). And it needs a barrier ordering the SQ |
| 22 | * head load before writing new SQ entries (smp_load_acquire to read |
| 23 | * head will do). |
| 24 | * |
| 25 | * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application |
| 26 | * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after* |
| 27 | * updating the SQ tail; a full memory barrier smp_mb() is needed |
| 28 | * between. |
| 29 | * |
| 30 | * Also see the examples in the liburing library: |
| 31 | * |
| 32 | * git://git.kernel.dk/liburing |
| 33 | * |
| 34 | * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens |
| 35 | * from data shared between the kernel and application. This is done both |
| 36 | * for ordering purposes, but also to ensure that once a value is loaded from |
| 37 | * data that the application could potentially modify, it remains stable. |
| 38 | * |
| 39 | * Copyright (C) 2018-2019 Jens Axboe |
| 40 | * Copyright (c) 2018-2019 Christoph Hellwig |
| 41 | */ |
| 42 | #include <linux/kernel.h> |
| 43 | #include <linux/init.h> |
| 44 | #include <linux/errno.h> |
| 45 | #include <linux/syscalls.h> |
| 46 | #include <net/compat.h> |
| 47 | #include <linux/refcount.h> |
| 48 | #include <linux/uio.h> |
| 49 | #include <linux/bits.h> |
| 50 | |
| 51 | #include <linux/sched/signal.h> |
| 52 | #include <linux/fs.h> |
| 53 | #include <linux/file.h> |
| 54 | #include <linux/fdtable.h> |
| 55 | #include <linux/mm.h> |
| 56 | #include <linux/mman.h> |
| 57 | #include <linux/percpu.h> |
| 58 | #include <linux/slab.h> |
| 59 | #include <linux/bvec.h> |
| 60 | #include <linux/net.h> |
| 61 | #include <net/sock.h> |
| 62 | #include <linux/anon_inodes.h> |
| 63 | #include <linux/sched/mm.h> |
| 64 | #include <linux/uaccess.h> |
| 65 | #include <linux/nospec.h> |
| 66 | #include <linux/fsnotify.h> |
| 67 | #include <linux/fadvise.h> |
| 68 | #include <linux/task_work.h> |
| 69 | #include <linux/io_uring.h> |
| 70 | #include <linux/io_uring/cmd.h> |
| 71 | #include <linux/audit.h> |
| 72 | #include <linux/security.h> |
| 73 | #include <asm/shmparam.h> |
| 74 | |
| 75 | #define CREATE_TRACE_POINTS |
| 76 | #include <trace/events/io_uring.h> |
| 77 | |
| 78 | #include <uapi/linux/io_uring.h> |
| 79 | |
| 80 | #include "io-wq.h" |
| 81 | |
| 82 | #include "io_uring.h" |
| 83 | #include "opdef.h" |
| 84 | #include "refs.h" |
| 85 | #include "tctx.h" |
| 86 | #include "register.h" |
| 87 | #include "sqpoll.h" |
| 88 | #include "fdinfo.h" |
| 89 | #include "kbuf.h" |
| 90 | #include "rsrc.h" |
| 91 | #include "cancel.h" |
| 92 | #include "net.h" |
| 93 | #include "notif.h" |
| 94 | #include "waitid.h" |
| 95 | #include "futex.h" |
| 96 | #include "napi.h" |
| 97 | #include "uring_cmd.h" |
| 98 | #include "msg_ring.h" |
| 99 | #include "memmap.h" |
| 100 | |
| 101 | #include "timeout.h" |
| 102 | #include "poll.h" |
| 103 | #include "rw.h" |
| 104 | #include "alloc_cache.h" |
| 105 | #include "eventfd.h" |
| 106 | |
| 107 | #define IORING_MAX_ENTRIES 32768 |
| 108 | #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) |
| 109 | |
| 110 | #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \ |
| 111 | IOSQE_IO_HARDLINK | IOSQE_ASYNC) |
| 112 | |
| 113 | #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \ |
| 114 | IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS) |
| 115 | |
| 116 | #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ |
| 117 | REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \ |
| 118 | REQ_F_ASYNC_DATA) |
| 119 | |
| 120 | #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\ |
| 121 | IO_REQ_CLEAN_FLAGS) |
| 122 | |
| 123 | #define IO_TCTX_REFS_CACHE_NR (1U << 10) |
| 124 | |
| 125 | #define IO_COMPL_BATCH 32 |
| 126 | #define IO_REQ_ALLOC_BATCH 8 |
| 127 | |
| 128 | struct io_defer_entry { |
| 129 | struct list_head list; |
| 130 | struct io_kiocb *req; |
| 131 | u32 seq; |
| 132 | }; |
| 133 | |
| 134 | /* requests with any of those set should undergo io_disarm_next() */ |
| 135 | #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) |
| 136 | #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) |
| 137 | |
| 138 | /* |
| 139 | * No waiters. It's larger than any valid value of the tw counter |
| 140 | * so that tests against ->cq_wait_nr would fail and skip wake_up(). |
| 141 | */ |
| 142 | #define IO_CQ_WAKE_INIT (-1U) |
| 143 | /* Forced wake up if there is a waiter regardless of ->cq_wait_nr */ |
| 144 | #define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1) |
| 145 | |
| 146 | static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, |
| 147 | struct task_struct *task, |
| 148 | bool cancel_all); |
| 149 | |
| 150 | static void io_queue_sqe(struct io_kiocb *req); |
| 151 | |
| 152 | struct kmem_cache *req_cachep; |
| 153 | static struct workqueue_struct *iou_wq __ro_after_init; |
| 154 | |
| 155 | static int __read_mostly sysctl_io_uring_disabled; |
| 156 | static int __read_mostly sysctl_io_uring_group = -1; |
| 157 | |
| 158 | #ifdef CONFIG_SYSCTL |
| 159 | static struct ctl_table kernel_io_uring_disabled_table[] = { |
| 160 | { |
| 161 | .procname = "io_uring_disabled", |
| 162 | .data = &sysctl_io_uring_disabled, |
| 163 | .maxlen = sizeof(sysctl_io_uring_disabled), |
| 164 | .mode = 0644, |
| 165 | .proc_handler = proc_dointvec_minmax, |
| 166 | .extra1 = SYSCTL_ZERO, |
| 167 | .extra2 = SYSCTL_TWO, |
| 168 | }, |
| 169 | { |
| 170 | .procname = "io_uring_group", |
| 171 | .data = &sysctl_io_uring_group, |
| 172 | .maxlen = sizeof(gid_t), |
| 173 | .mode = 0644, |
| 174 | .proc_handler = proc_dointvec, |
| 175 | }, |
| 176 | }; |
| 177 | #endif |
| 178 | |
| 179 | static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) |
| 180 | { |
| 181 | return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); |
| 182 | } |
| 183 | |
| 184 | static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx) |
| 185 | { |
| 186 | return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); |
| 187 | } |
| 188 | |
| 189 | static bool io_match_linked(struct io_kiocb *head) |
| 190 | { |
| 191 | struct io_kiocb *req; |
| 192 | |
| 193 | io_for_each_link(req, head) { |
| 194 | if (req->flags & REQ_F_INFLIGHT) |
| 195 | return true; |
| 196 | } |
| 197 | return false; |
| 198 | } |
| 199 | |
| 200 | /* |
| 201 | * As io_match_task() but protected against racing with linked timeouts. |
| 202 | * User must not hold timeout_lock. |
| 203 | */ |
| 204 | bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, |
| 205 | bool cancel_all) |
| 206 | { |
| 207 | bool matched; |
| 208 | |
| 209 | if (task && head->task != task) |
| 210 | return false; |
| 211 | if (cancel_all) |
| 212 | return true; |
| 213 | |
| 214 | if (head->flags & REQ_F_LINK_TIMEOUT) { |
| 215 | struct io_ring_ctx *ctx = head->ctx; |
| 216 | |
| 217 | /* protect against races with linked timeouts */ |
| 218 | spin_lock_irq(&ctx->timeout_lock); |
| 219 | matched = io_match_linked(head); |
| 220 | spin_unlock_irq(&ctx->timeout_lock); |
| 221 | } else { |
| 222 | matched = io_match_linked(head); |
| 223 | } |
| 224 | return matched; |
| 225 | } |
| 226 | |
| 227 | static inline void req_fail_link_node(struct io_kiocb *req, int res) |
| 228 | { |
| 229 | req_set_fail(req); |
| 230 | io_req_set_res(req, res, 0); |
| 231 | } |
| 232 | |
| 233 | static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) |
| 234 | { |
| 235 | wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); |
| 236 | } |
| 237 | |
| 238 | static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) |
| 239 | { |
| 240 | struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); |
| 241 | |
| 242 | complete(&ctx->ref_comp); |
| 243 | } |
| 244 | |
| 245 | static __cold void io_fallback_req_func(struct work_struct *work) |
| 246 | { |
| 247 | struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, |
| 248 | fallback_work.work); |
| 249 | struct llist_node *node = llist_del_all(&ctx->fallback_llist); |
| 250 | struct io_kiocb *req, *tmp; |
| 251 | struct io_tw_state ts = {}; |
| 252 | |
| 253 | percpu_ref_get(&ctx->refs); |
| 254 | mutex_lock(&ctx->uring_lock); |
| 255 | llist_for_each_entry_safe(req, tmp, node, io_task_work.node) |
| 256 | req->io_task_work.func(req, &ts); |
| 257 | io_submit_flush_completions(ctx); |
| 258 | mutex_unlock(&ctx->uring_lock); |
| 259 | percpu_ref_put(&ctx->refs); |
| 260 | } |
| 261 | |
| 262 | static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits) |
| 263 | { |
| 264 | unsigned hash_buckets = 1U << bits; |
| 265 | size_t hash_size = hash_buckets * sizeof(table->hbs[0]); |
| 266 | |
| 267 | table->hbs = kmalloc(hash_size, GFP_KERNEL); |
| 268 | if (!table->hbs) |
| 269 | return -ENOMEM; |
| 270 | |
| 271 | table->hash_bits = bits; |
| 272 | init_hash_table(table, hash_buckets); |
| 273 | return 0; |
| 274 | } |
| 275 | |
| 276 | static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) |
| 277 | { |
| 278 | struct io_ring_ctx *ctx; |
| 279 | int hash_bits; |
| 280 | bool ret; |
| 281 | |
| 282 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 283 | if (!ctx) |
| 284 | return NULL; |
| 285 | |
| 286 | xa_init(&ctx->io_bl_xa); |
| 287 | |
| 288 | /* |
| 289 | * Use 5 bits less than the max cq entries, that should give us around |
| 290 | * 32 entries per hash list if totally full and uniformly spread, but |
| 291 | * don't keep too many buckets to not overconsume memory. |
| 292 | */ |
| 293 | hash_bits = ilog2(p->cq_entries) - 5; |
| 294 | hash_bits = clamp(hash_bits, 1, 8); |
| 295 | if (io_alloc_hash_table(&ctx->cancel_table, hash_bits)) |
| 296 | goto err; |
| 297 | if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits)) |
| 298 | goto err; |
| 299 | if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, |
| 300 | 0, GFP_KERNEL)) |
| 301 | goto err; |
| 302 | |
| 303 | ctx->flags = p->flags; |
| 304 | atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT); |
| 305 | init_waitqueue_head(&ctx->sqo_sq_wait); |
| 306 | INIT_LIST_HEAD(&ctx->sqd_list); |
| 307 | INIT_LIST_HEAD(&ctx->cq_overflow_list); |
| 308 | INIT_LIST_HEAD(&ctx->io_buffers_cache); |
| 309 | ret = io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX, |
| 310 | sizeof(struct io_rsrc_node)); |
| 311 | ret |= io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX, |
| 312 | sizeof(struct async_poll)); |
| 313 | ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX, |
| 314 | sizeof(struct io_async_msghdr)); |
| 315 | ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX, |
| 316 | sizeof(struct io_async_rw)); |
| 317 | ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX, |
| 318 | sizeof(struct uring_cache)); |
| 319 | spin_lock_init(&ctx->msg_lock); |
| 320 | ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX, |
| 321 | sizeof(struct io_kiocb)); |
| 322 | ret |= io_futex_cache_init(ctx); |
| 323 | if (ret) |
| 324 | goto err; |
| 325 | init_completion(&ctx->ref_comp); |
| 326 | xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); |
| 327 | mutex_init(&ctx->uring_lock); |
| 328 | init_waitqueue_head(&ctx->cq_wait); |
| 329 | init_waitqueue_head(&ctx->poll_wq); |
| 330 | init_waitqueue_head(&ctx->rsrc_quiesce_wq); |
| 331 | spin_lock_init(&ctx->completion_lock); |
| 332 | spin_lock_init(&ctx->timeout_lock); |
| 333 | INIT_WQ_LIST(&ctx->iopoll_list); |
| 334 | INIT_LIST_HEAD(&ctx->io_buffers_comp); |
| 335 | INIT_LIST_HEAD(&ctx->defer_list); |
| 336 | INIT_LIST_HEAD(&ctx->timeout_list); |
| 337 | INIT_LIST_HEAD(&ctx->ltimeout_list); |
| 338 | INIT_LIST_HEAD(&ctx->rsrc_ref_list); |
| 339 | init_llist_head(&ctx->work_llist); |
| 340 | INIT_LIST_HEAD(&ctx->tctx_list); |
| 341 | ctx->submit_state.free_list.next = NULL; |
| 342 | INIT_HLIST_HEAD(&ctx->waitid_list); |
| 343 | #ifdef CONFIG_FUTEX |
| 344 | INIT_HLIST_HEAD(&ctx->futex_list); |
| 345 | #endif |
| 346 | INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); |
| 347 | INIT_WQ_LIST(&ctx->submit_state.compl_reqs); |
| 348 | INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd); |
| 349 | io_napi_init(ctx); |
| 350 | |
| 351 | return ctx; |
| 352 | err: |
| 353 | io_alloc_cache_free(&ctx->rsrc_node_cache, kfree); |
| 354 | io_alloc_cache_free(&ctx->apoll_cache, kfree); |
| 355 | io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); |
| 356 | io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); |
| 357 | io_alloc_cache_free(&ctx->uring_cache, kfree); |
| 358 | io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free); |
| 359 | io_futex_cache_free(ctx); |
| 360 | kfree(ctx->cancel_table.hbs); |
| 361 | kfree(ctx->cancel_table_locked.hbs); |
| 362 | xa_destroy(&ctx->io_bl_xa); |
| 363 | kfree(ctx); |
| 364 | return NULL; |
| 365 | } |
| 366 | |
| 367 | static void io_account_cq_overflow(struct io_ring_ctx *ctx) |
| 368 | { |
| 369 | struct io_rings *r = ctx->rings; |
| 370 | |
| 371 | WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1); |
| 372 | ctx->cq_extra--; |
| 373 | } |
| 374 | |
| 375 | static bool req_need_defer(struct io_kiocb *req, u32 seq) |
| 376 | { |
| 377 | if (unlikely(req->flags & REQ_F_IO_DRAIN)) { |
| 378 | struct io_ring_ctx *ctx = req->ctx; |
| 379 | |
| 380 | return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail; |
| 381 | } |
| 382 | |
| 383 | return false; |
| 384 | } |
| 385 | |
| 386 | static void io_clean_op(struct io_kiocb *req) |
| 387 | { |
| 388 | if (req->flags & REQ_F_BUFFER_SELECTED) { |
| 389 | spin_lock(&req->ctx->completion_lock); |
| 390 | io_kbuf_drop(req); |
| 391 | spin_unlock(&req->ctx->completion_lock); |
| 392 | } |
| 393 | |
| 394 | if (req->flags & REQ_F_NEED_CLEANUP) { |
| 395 | const struct io_cold_def *def = &io_cold_defs[req->opcode]; |
| 396 | |
| 397 | if (def->cleanup) |
| 398 | def->cleanup(req); |
| 399 | } |
| 400 | if ((req->flags & REQ_F_POLLED) && req->apoll) { |
| 401 | kfree(req->apoll->double_poll); |
| 402 | kfree(req->apoll); |
| 403 | req->apoll = NULL; |
| 404 | } |
| 405 | if (req->flags & REQ_F_INFLIGHT) { |
| 406 | struct io_uring_task *tctx = req->task->io_uring; |
| 407 | |
| 408 | atomic_dec(&tctx->inflight_tracked); |
| 409 | } |
| 410 | if (req->flags & REQ_F_CREDS) |
| 411 | put_cred(req->creds); |
| 412 | if (req->flags & REQ_F_ASYNC_DATA) { |
| 413 | kfree(req->async_data); |
| 414 | req->async_data = NULL; |
| 415 | } |
| 416 | req->flags &= ~IO_REQ_CLEAN_FLAGS; |
| 417 | } |
| 418 | |
| 419 | static inline void io_req_track_inflight(struct io_kiocb *req) |
| 420 | { |
| 421 | if (!(req->flags & REQ_F_INFLIGHT)) { |
| 422 | req->flags |= REQ_F_INFLIGHT; |
| 423 | atomic_inc(&req->task->io_uring->inflight_tracked); |
| 424 | } |
| 425 | } |
| 426 | |
| 427 | static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) |
| 428 | { |
| 429 | if (WARN_ON_ONCE(!req->link)) |
| 430 | return NULL; |
| 431 | |
| 432 | req->flags &= ~REQ_F_ARM_LTIMEOUT; |
| 433 | req->flags |= REQ_F_LINK_TIMEOUT; |
| 434 | |
| 435 | /* linked timeouts should have two refs once prep'ed */ |
| 436 | io_req_set_refcount(req); |
| 437 | __io_req_set_refcount(req->link, 2); |
| 438 | return req->link; |
| 439 | } |
| 440 | |
| 441 | static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) |
| 442 | { |
| 443 | if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) |
| 444 | return NULL; |
| 445 | return __io_prep_linked_timeout(req); |
| 446 | } |
| 447 | |
| 448 | static noinline void __io_arm_ltimeout(struct io_kiocb *req) |
| 449 | { |
| 450 | io_queue_linked_timeout(__io_prep_linked_timeout(req)); |
| 451 | } |
| 452 | |
| 453 | static inline void io_arm_ltimeout(struct io_kiocb *req) |
| 454 | { |
| 455 | if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT)) |
| 456 | __io_arm_ltimeout(req); |
| 457 | } |
| 458 | |
| 459 | static void io_prep_async_work(struct io_kiocb *req) |
| 460 | { |
| 461 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; |
| 462 | struct io_ring_ctx *ctx = req->ctx; |
| 463 | |
| 464 | if (!(req->flags & REQ_F_CREDS)) { |
| 465 | req->flags |= REQ_F_CREDS; |
| 466 | req->creds = get_current_cred(); |
| 467 | } |
| 468 | |
| 469 | req->work.list.next = NULL; |
| 470 | atomic_set(&req->work.flags, 0); |
| 471 | if (req->flags & REQ_F_FORCE_ASYNC) |
| 472 | atomic_or(IO_WQ_WORK_CONCURRENT, &req->work.flags); |
| 473 | |
| 474 | if (req->file && !(req->flags & REQ_F_FIXED_FILE)) |
| 475 | req->flags |= io_file_get_flags(req->file); |
| 476 | |
| 477 | if (req->file && (req->flags & REQ_F_ISREG)) { |
| 478 | bool should_hash = def->hash_reg_file; |
| 479 | |
| 480 | /* don't serialize this request if the fs doesn't need it */ |
| 481 | if (should_hash && (req->file->f_flags & O_DIRECT) && |
| 482 | (req->file->f_op->fop_flags & FOP_DIO_PARALLEL_WRITE)) |
| 483 | should_hash = false; |
| 484 | if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL)) |
| 485 | io_wq_hash_work(&req->work, file_inode(req->file)); |
| 486 | } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { |
| 487 | if (def->unbound_nonreg_file) |
| 488 | atomic_or(IO_WQ_WORK_UNBOUND, &req->work.flags); |
| 489 | } |
| 490 | } |
| 491 | |
| 492 | static void io_prep_async_link(struct io_kiocb *req) |
| 493 | { |
| 494 | struct io_kiocb *cur; |
| 495 | |
| 496 | if (req->flags & REQ_F_LINK_TIMEOUT) { |
| 497 | struct io_ring_ctx *ctx = req->ctx; |
| 498 | |
| 499 | spin_lock_irq(&ctx->timeout_lock); |
| 500 | io_for_each_link(cur, req) |
| 501 | io_prep_async_work(cur); |
| 502 | spin_unlock_irq(&ctx->timeout_lock); |
| 503 | } else { |
| 504 | io_for_each_link(cur, req) |
| 505 | io_prep_async_work(cur); |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | static void io_queue_iowq(struct io_kiocb *req) |
| 510 | { |
| 511 | struct io_kiocb *link = io_prep_linked_timeout(req); |
| 512 | struct io_uring_task *tctx = req->task->io_uring; |
| 513 | |
| 514 | BUG_ON(!tctx); |
| 515 | BUG_ON(!tctx->io_wq); |
| 516 | |
| 517 | /* init ->work of the whole link before punting */ |
| 518 | io_prep_async_link(req); |
| 519 | |
| 520 | /* |
| 521 | * Not expected to happen, but if we do have a bug where this _can_ |
| 522 | * happen, catch it here and ensure the request is marked as |
| 523 | * canceled. That will make io-wq go through the usual work cancel |
| 524 | * procedure rather than attempt to run this request (or create a new |
| 525 | * worker for it). |
| 526 | */ |
| 527 | if (WARN_ON_ONCE(!same_thread_group(req->task, current))) |
| 528 | atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags); |
| 529 | |
| 530 | trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work)); |
| 531 | io_wq_enqueue(tctx->io_wq, &req->work); |
| 532 | if (link) |
| 533 | io_queue_linked_timeout(link); |
| 534 | } |
| 535 | |
| 536 | static void io_req_queue_iowq_tw(struct io_kiocb *req, struct io_tw_state *ts) |
| 537 | { |
| 538 | io_queue_iowq(req); |
| 539 | } |
| 540 | |
| 541 | void io_req_queue_iowq(struct io_kiocb *req) |
| 542 | { |
| 543 | req->io_task_work.func = io_req_queue_iowq_tw; |
| 544 | io_req_task_work_add(req); |
| 545 | } |
| 546 | |
| 547 | static __cold void io_queue_deferred(struct io_ring_ctx *ctx) |
| 548 | { |
| 549 | while (!list_empty(&ctx->defer_list)) { |
| 550 | struct io_defer_entry *de = list_first_entry(&ctx->defer_list, |
| 551 | struct io_defer_entry, list); |
| 552 | |
| 553 | if (req_need_defer(de->req, de->seq)) |
| 554 | break; |
| 555 | list_del_init(&de->list); |
| 556 | io_req_task_queue(de->req); |
| 557 | kfree(de); |
| 558 | } |
| 559 | } |
| 560 | |
| 561 | void __io_commit_cqring_flush(struct io_ring_ctx *ctx) |
| 562 | { |
| 563 | if (ctx->poll_activated) |
| 564 | io_poll_wq_wake(ctx); |
| 565 | if (ctx->off_timeout_used) |
| 566 | io_flush_timeouts(ctx); |
| 567 | if (ctx->drain_active) { |
| 568 | spin_lock(&ctx->completion_lock); |
| 569 | io_queue_deferred(ctx); |
| 570 | spin_unlock(&ctx->completion_lock); |
| 571 | } |
| 572 | if (ctx->has_evfd) |
| 573 | io_eventfd_flush_signal(ctx); |
| 574 | } |
| 575 | |
| 576 | static inline void __io_cq_lock(struct io_ring_ctx *ctx) |
| 577 | { |
| 578 | if (!ctx->lockless_cq) |
| 579 | spin_lock(&ctx->completion_lock); |
| 580 | } |
| 581 | |
| 582 | static inline void io_cq_lock(struct io_ring_ctx *ctx) |
| 583 | __acquires(ctx->completion_lock) |
| 584 | { |
| 585 | spin_lock(&ctx->completion_lock); |
| 586 | } |
| 587 | |
| 588 | static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) |
| 589 | { |
| 590 | io_commit_cqring(ctx); |
| 591 | if (!ctx->task_complete) { |
| 592 | if (!ctx->lockless_cq) |
| 593 | spin_unlock(&ctx->completion_lock); |
| 594 | /* IOPOLL rings only need to wake up if it's also SQPOLL */ |
| 595 | if (!ctx->syscall_iopoll) |
| 596 | io_cqring_wake(ctx); |
| 597 | } |
| 598 | io_commit_cqring_flush(ctx); |
| 599 | } |
| 600 | |
| 601 | static void io_cq_unlock_post(struct io_ring_ctx *ctx) |
| 602 | __releases(ctx->completion_lock) |
| 603 | { |
| 604 | io_commit_cqring(ctx); |
| 605 | spin_unlock(&ctx->completion_lock); |
| 606 | io_cqring_wake(ctx); |
| 607 | io_commit_cqring_flush(ctx); |
| 608 | } |
| 609 | |
| 610 | static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying) |
| 611 | { |
| 612 | size_t cqe_size = sizeof(struct io_uring_cqe); |
| 613 | |
| 614 | lockdep_assert_held(&ctx->uring_lock); |
| 615 | |
| 616 | /* don't abort if we're dying, entries must get freed */ |
| 617 | if (!dying && __io_cqring_events(ctx) == ctx->cq_entries) |
| 618 | return; |
| 619 | |
| 620 | if (ctx->flags & IORING_SETUP_CQE32) |
| 621 | cqe_size <<= 1; |
| 622 | |
| 623 | io_cq_lock(ctx); |
| 624 | while (!list_empty(&ctx->cq_overflow_list)) { |
| 625 | struct io_uring_cqe *cqe; |
| 626 | struct io_overflow_cqe *ocqe; |
| 627 | |
| 628 | ocqe = list_first_entry(&ctx->cq_overflow_list, |
| 629 | struct io_overflow_cqe, list); |
| 630 | |
| 631 | if (!dying) { |
| 632 | if (!io_get_cqe_overflow(ctx, &cqe, true)) |
| 633 | break; |
| 634 | memcpy(cqe, &ocqe->cqe, cqe_size); |
| 635 | } |
| 636 | list_del(&ocqe->list); |
| 637 | kfree(ocqe); |
| 638 | |
| 639 | /* |
| 640 | * For silly syzbot cases that deliberately overflow by huge |
| 641 | * amounts, check if we need to resched and drop and |
| 642 | * reacquire the locks if so. Nothing real would ever hit this. |
| 643 | * Ideally we'd have a non-posting unlock for this, but hard |
| 644 | * to care for a non-real case. |
| 645 | */ |
| 646 | if (need_resched()) { |
| 647 | io_cq_unlock_post(ctx); |
| 648 | mutex_unlock(&ctx->uring_lock); |
| 649 | cond_resched(); |
| 650 | mutex_lock(&ctx->uring_lock); |
| 651 | io_cq_lock(ctx); |
| 652 | } |
| 653 | } |
| 654 | |
| 655 | if (list_empty(&ctx->cq_overflow_list)) { |
| 656 | clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); |
| 657 | atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); |
| 658 | } |
| 659 | io_cq_unlock_post(ctx); |
| 660 | } |
| 661 | |
| 662 | static void io_cqring_overflow_kill(struct io_ring_ctx *ctx) |
| 663 | { |
| 664 | if (ctx->rings) |
| 665 | __io_cqring_overflow_flush(ctx, true); |
| 666 | } |
| 667 | |
| 668 | static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) |
| 669 | { |
| 670 | mutex_lock(&ctx->uring_lock); |
| 671 | __io_cqring_overflow_flush(ctx, false); |
| 672 | mutex_unlock(&ctx->uring_lock); |
| 673 | } |
| 674 | |
| 675 | /* can be called by any task */ |
| 676 | static void io_put_task_remote(struct task_struct *task) |
| 677 | { |
| 678 | struct io_uring_task *tctx = task->io_uring; |
| 679 | |
| 680 | percpu_counter_sub(&tctx->inflight, 1); |
| 681 | if (unlikely(atomic_read(&tctx->in_cancel))) |
| 682 | wake_up(&tctx->wait); |
| 683 | put_task_struct(task); |
| 684 | } |
| 685 | |
| 686 | /* used by a task to put its own references */ |
| 687 | static void io_put_task_local(struct task_struct *task) |
| 688 | { |
| 689 | task->io_uring->cached_refs++; |
| 690 | } |
| 691 | |
| 692 | /* must to be called somewhat shortly after putting a request */ |
| 693 | static inline void io_put_task(struct task_struct *task) |
| 694 | { |
| 695 | if (likely(task == current)) |
| 696 | io_put_task_local(task); |
| 697 | else |
| 698 | io_put_task_remote(task); |
| 699 | } |
| 700 | |
| 701 | void io_task_refs_refill(struct io_uring_task *tctx) |
| 702 | { |
| 703 | unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; |
| 704 | |
| 705 | percpu_counter_add(&tctx->inflight, refill); |
| 706 | refcount_add(refill, ¤t->usage); |
| 707 | tctx->cached_refs += refill; |
| 708 | } |
| 709 | |
| 710 | static __cold void io_uring_drop_tctx_refs(struct task_struct *task) |
| 711 | { |
| 712 | struct io_uring_task *tctx = task->io_uring; |
| 713 | unsigned int refs = tctx->cached_refs; |
| 714 | |
| 715 | if (refs) { |
| 716 | tctx->cached_refs = 0; |
| 717 | percpu_counter_sub(&tctx->inflight, refs); |
| 718 | put_task_struct_many(task, refs); |
| 719 | } |
| 720 | } |
| 721 | |
| 722 | static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, |
| 723 | s32 res, u32 cflags, u64 extra1, u64 extra2) |
| 724 | { |
| 725 | struct io_overflow_cqe *ocqe; |
| 726 | size_t ocq_size = sizeof(struct io_overflow_cqe); |
| 727 | bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32); |
| 728 | |
| 729 | lockdep_assert_held(&ctx->completion_lock); |
| 730 | |
| 731 | if (is_cqe32) |
| 732 | ocq_size += sizeof(struct io_uring_cqe); |
| 733 | |
| 734 | ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT); |
| 735 | trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe); |
| 736 | if (!ocqe) { |
| 737 | /* |
| 738 | * If we're in ring overflow flush mode, or in task cancel mode, |
| 739 | * or cannot allocate an overflow entry, then we need to drop it |
| 740 | * on the floor. |
| 741 | */ |
| 742 | io_account_cq_overflow(ctx); |
| 743 | set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq); |
| 744 | return false; |
| 745 | } |
| 746 | if (list_empty(&ctx->cq_overflow_list)) { |
| 747 | set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); |
| 748 | atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); |
| 749 | |
| 750 | } |
| 751 | ocqe->cqe.user_data = user_data; |
| 752 | ocqe->cqe.res = res; |
| 753 | ocqe->cqe.flags = cflags; |
| 754 | if (is_cqe32) { |
| 755 | ocqe->cqe.big_cqe[0] = extra1; |
| 756 | ocqe->cqe.big_cqe[1] = extra2; |
| 757 | } |
| 758 | list_add_tail(&ocqe->list, &ctx->cq_overflow_list); |
| 759 | return true; |
| 760 | } |
| 761 | |
| 762 | static void io_req_cqe_overflow(struct io_kiocb *req) |
| 763 | { |
| 764 | io_cqring_event_overflow(req->ctx, req->cqe.user_data, |
| 765 | req->cqe.res, req->cqe.flags, |
| 766 | req->big_cqe.extra1, req->big_cqe.extra2); |
| 767 | memset(&req->big_cqe, 0, sizeof(req->big_cqe)); |
| 768 | } |
| 769 | |
| 770 | /* |
| 771 | * writes to the cq entry need to come after reading head; the |
| 772 | * control dependency is enough as we're using WRITE_ONCE to |
| 773 | * fill the cq entry |
| 774 | */ |
| 775 | bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow) |
| 776 | { |
| 777 | struct io_rings *rings = ctx->rings; |
| 778 | unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); |
| 779 | unsigned int free, queued, len; |
| 780 | |
| 781 | /* |
| 782 | * Posting into the CQ when there are pending overflowed CQEs may break |
| 783 | * ordering guarantees, which will affect links, F_MORE users and more. |
| 784 | * Force overflow the completion. |
| 785 | */ |
| 786 | if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))) |
| 787 | return false; |
| 788 | |
| 789 | /* userspace may cheat modifying the tail, be safe and do min */ |
| 790 | queued = min(__io_cqring_events(ctx), ctx->cq_entries); |
| 791 | free = ctx->cq_entries - queued; |
| 792 | /* we need a contiguous range, limit based on the current array offset */ |
| 793 | len = min(free, ctx->cq_entries - off); |
| 794 | if (!len) |
| 795 | return false; |
| 796 | |
| 797 | if (ctx->flags & IORING_SETUP_CQE32) { |
| 798 | off <<= 1; |
| 799 | len <<= 1; |
| 800 | } |
| 801 | |
| 802 | ctx->cqe_cached = &rings->cqes[off]; |
| 803 | ctx->cqe_sentinel = ctx->cqe_cached + len; |
| 804 | return true; |
| 805 | } |
| 806 | |
| 807 | static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, |
| 808 | u32 cflags) |
| 809 | { |
| 810 | struct io_uring_cqe *cqe; |
| 811 | |
| 812 | ctx->cq_extra++; |
| 813 | |
| 814 | /* |
| 815 | * If we can't get a cq entry, userspace overflowed the |
| 816 | * submission (by quite a lot). Increment the overflow count in |
| 817 | * the ring. |
| 818 | */ |
| 819 | if (likely(io_get_cqe(ctx, &cqe))) { |
| 820 | trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0); |
| 821 | |
| 822 | WRITE_ONCE(cqe->user_data, user_data); |
| 823 | WRITE_ONCE(cqe->res, res); |
| 824 | WRITE_ONCE(cqe->flags, cflags); |
| 825 | |
| 826 | if (ctx->flags & IORING_SETUP_CQE32) { |
| 827 | WRITE_ONCE(cqe->big_cqe[0], 0); |
| 828 | WRITE_ONCE(cqe->big_cqe[1], 0); |
| 829 | } |
| 830 | return true; |
| 831 | } |
| 832 | return false; |
| 833 | } |
| 834 | |
| 835 | static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, |
| 836 | u32 cflags) |
| 837 | { |
| 838 | bool filled; |
| 839 | |
| 840 | filled = io_fill_cqe_aux(ctx, user_data, res, cflags); |
| 841 | if (!filled) |
| 842 | filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); |
| 843 | |
| 844 | return filled; |
| 845 | } |
| 846 | |
| 847 | bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) |
| 848 | { |
| 849 | bool filled; |
| 850 | |
| 851 | io_cq_lock(ctx); |
| 852 | filled = __io_post_aux_cqe(ctx, user_data, res, cflags); |
| 853 | io_cq_unlock_post(ctx); |
| 854 | return filled; |
| 855 | } |
| 856 | |
| 857 | /* |
| 858 | * Must be called from inline task_work so we now a flush will happen later, |
| 859 | * and obviously with ctx->uring_lock held (tw always has that). |
| 860 | */ |
| 861 | void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) |
| 862 | { |
| 863 | if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) { |
| 864 | spin_lock(&ctx->completion_lock); |
| 865 | io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); |
| 866 | spin_unlock(&ctx->completion_lock); |
| 867 | } |
| 868 | ctx->submit_state.cq_flush = true; |
| 869 | } |
| 870 | |
| 871 | /* |
| 872 | * A helper for multishot requests posting additional CQEs. |
| 873 | * Should only be used from a task_work including IO_URING_F_MULTISHOT. |
| 874 | */ |
| 875 | bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags) |
| 876 | { |
| 877 | struct io_ring_ctx *ctx = req->ctx; |
| 878 | bool posted; |
| 879 | |
| 880 | lockdep_assert(!io_wq_current_is_worker()); |
| 881 | lockdep_assert_held(&ctx->uring_lock); |
| 882 | |
| 883 | __io_cq_lock(ctx); |
| 884 | posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); |
| 885 | ctx->submit_state.cq_flush = true; |
| 886 | __io_cq_unlock_post(ctx); |
| 887 | return posted; |
| 888 | } |
| 889 | |
| 890 | static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) |
| 891 | { |
| 892 | struct io_ring_ctx *ctx = req->ctx; |
| 893 | |
| 894 | /* |
| 895 | * All execution paths but io-wq use the deferred completions by |
| 896 | * passing IO_URING_F_COMPLETE_DEFER and thus should not end up here. |
| 897 | */ |
| 898 | if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ))) |
| 899 | return; |
| 900 | |
| 901 | /* |
| 902 | * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires |
| 903 | * the submitter task context, IOPOLL protects with uring_lock. |
| 904 | */ |
| 905 | if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) { |
| 906 | req->io_task_work.func = io_req_task_complete; |
| 907 | io_req_task_work_add(req); |
| 908 | return; |
| 909 | } |
| 910 | |
| 911 | io_cq_lock(ctx); |
| 912 | if (!(req->flags & REQ_F_CQE_SKIP)) { |
| 913 | if (!io_fill_cqe_req(ctx, req)) |
| 914 | io_req_cqe_overflow(req); |
| 915 | } |
| 916 | io_cq_unlock_post(ctx); |
| 917 | |
| 918 | /* |
| 919 | * We don't free the request here because we know it's called from |
| 920 | * io-wq only, which holds a reference, so it cannot be the last put. |
| 921 | */ |
| 922 | req_ref_put(req); |
| 923 | } |
| 924 | |
| 925 | void io_req_defer_failed(struct io_kiocb *req, s32 res) |
| 926 | __must_hold(&ctx->uring_lock) |
| 927 | { |
| 928 | const struct io_cold_def *def = &io_cold_defs[req->opcode]; |
| 929 | |
| 930 | lockdep_assert_held(&req->ctx->uring_lock); |
| 931 | |
| 932 | req_set_fail(req); |
| 933 | io_req_set_res(req, res, io_put_kbuf(req, res, IO_URING_F_UNLOCKED)); |
| 934 | if (def->fail) |
| 935 | def->fail(req); |
| 936 | io_req_complete_defer(req); |
| 937 | } |
| 938 | |
| 939 | /* |
| 940 | * Don't initialise the fields below on every allocation, but do that in |
| 941 | * advance and keep them valid across allocations. |
| 942 | */ |
| 943 | static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) |
| 944 | { |
| 945 | req->ctx = ctx; |
| 946 | req->link = NULL; |
| 947 | req->async_data = NULL; |
| 948 | /* not necessary, but safer to zero */ |
| 949 | memset(&req->cqe, 0, sizeof(req->cqe)); |
| 950 | memset(&req->big_cqe, 0, sizeof(req->big_cqe)); |
| 951 | } |
| 952 | |
| 953 | /* |
| 954 | * A request might get retired back into the request caches even before opcode |
| 955 | * handlers and io_issue_sqe() are done with it, e.g. inline completion path. |
| 956 | * Because of that, io_alloc_req() should be called only under ->uring_lock |
| 957 | * and with extra caution to not get a request that is still worked on. |
| 958 | */ |
| 959 | __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) |
| 960 | __must_hold(&ctx->uring_lock) |
| 961 | { |
| 962 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; |
| 963 | void *reqs[IO_REQ_ALLOC_BATCH]; |
| 964 | int ret; |
| 965 | |
| 966 | ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); |
| 967 | |
| 968 | /* |
| 969 | * Bulk alloc is all-or-nothing. If we fail to get a batch, |
| 970 | * retry single alloc to be on the safe side. |
| 971 | */ |
| 972 | if (unlikely(ret <= 0)) { |
| 973 | reqs[0] = kmem_cache_alloc(req_cachep, gfp); |
| 974 | if (!reqs[0]) |
| 975 | return false; |
| 976 | ret = 1; |
| 977 | } |
| 978 | |
| 979 | percpu_ref_get_many(&ctx->refs, ret); |
| 980 | while (ret--) { |
| 981 | struct io_kiocb *req = reqs[ret]; |
| 982 | |
| 983 | io_preinit_req(req, ctx); |
| 984 | io_req_add_to_cache(req, ctx); |
| 985 | } |
| 986 | return true; |
| 987 | } |
| 988 | |
| 989 | __cold void io_free_req(struct io_kiocb *req) |
| 990 | { |
| 991 | /* refs were already put, restore them for io_req_task_complete() */ |
| 992 | req->flags &= ~REQ_F_REFCOUNT; |
| 993 | /* we only want to free it, don't post CQEs */ |
| 994 | req->flags |= REQ_F_CQE_SKIP; |
| 995 | req->io_task_work.func = io_req_task_complete; |
| 996 | io_req_task_work_add(req); |
| 997 | } |
| 998 | |
| 999 | static void __io_req_find_next_prep(struct io_kiocb *req) |
| 1000 | { |
| 1001 | struct io_ring_ctx *ctx = req->ctx; |
| 1002 | |
| 1003 | spin_lock(&ctx->completion_lock); |
| 1004 | io_disarm_next(req); |
| 1005 | spin_unlock(&ctx->completion_lock); |
| 1006 | } |
| 1007 | |
| 1008 | static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) |
| 1009 | { |
| 1010 | struct io_kiocb *nxt; |
| 1011 | |
| 1012 | /* |
| 1013 | * If LINK is set, we have dependent requests in this chain. If we |
| 1014 | * didn't fail this request, queue the first one up, moving any other |
| 1015 | * dependencies to the next request. In case of failure, fail the rest |
| 1016 | * of the chain. |
| 1017 | */ |
| 1018 | if (unlikely(req->flags & IO_DISARM_MASK)) |
| 1019 | __io_req_find_next_prep(req); |
| 1020 | nxt = req->link; |
| 1021 | req->link = NULL; |
| 1022 | return nxt; |
| 1023 | } |
| 1024 | |
| 1025 | static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts) |
| 1026 | { |
| 1027 | if (!ctx) |
| 1028 | return; |
| 1029 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
| 1030 | atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); |
| 1031 | |
| 1032 | io_submit_flush_completions(ctx); |
| 1033 | mutex_unlock(&ctx->uring_lock); |
| 1034 | percpu_ref_put(&ctx->refs); |
| 1035 | } |
| 1036 | |
| 1037 | /* |
| 1038 | * Run queued task_work, returning the number of entries processed in *count. |
| 1039 | * If more entries than max_entries are available, stop processing once this |
| 1040 | * is reached and return the rest of the list. |
| 1041 | */ |
| 1042 | struct llist_node *io_handle_tw_list(struct llist_node *node, |
| 1043 | unsigned int *count, |
| 1044 | unsigned int max_entries) |
| 1045 | { |
| 1046 | struct io_ring_ctx *ctx = NULL; |
| 1047 | struct io_tw_state ts = { }; |
| 1048 | |
| 1049 | do { |
| 1050 | struct llist_node *next = node->next; |
| 1051 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
| 1052 | io_task_work.node); |
| 1053 | |
| 1054 | if (req->ctx != ctx) { |
| 1055 | ctx_flush_and_put(ctx, &ts); |
| 1056 | ctx = req->ctx; |
| 1057 | mutex_lock(&ctx->uring_lock); |
| 1058 | percpu_ref_get(&ctx->refs); |
| 1059 | } |
| 1060 | INDIRECT_CALL_2(req->io_task_work.func, |
| 1061 | io_poll_task_func, io_req_rw_complete, |
| 1062 | req, &ts); |
| 1063 | node = next; |
| 1064 | (*count)++; |
| 1065 | if (unlikely(need_resched())) { |
| 1066 | ctx_flush_and_put(ctx, &ts); |
| 1067 | ctx = NULL; |
| 1068 | cond_resched(); |
| 1069 | } |
| 1070 | } while (node && *count < max_entries); |
| 1071 | |
| 1072 | ctx_flush_and_put(ctx, &ts); |
| 1073 | return node; |
| 1074 | } |
| 1075 | |
| 1076 | /** |
| 1077 | * io_llist_xchg - swap all entries in a lock-less list |
| 1078 | * @head: the head of lock-less list to delete all entries |
| 1079 | * @new: new entry as the head of the list |
| 1080 | * |
| 1081 | * If list is empty, return NULL, otherwise, return the pointer to the first entry. |
| 1082 | * The order of entries returned is from the newest to the oldest added one. |
| 1083 | */ |
| 1084 | static inline struct llist_node *io_llist_xchg(struct llist_head *head, |
| 1085 | struct llist_node *new) |
| 1086 | { |
| 1087 | return xchg(&head->first, new); |
| 1088 | } |
| 1089 | |
| 1090 | static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync) |
| 1091 | { |
| 1092 | struct llist_node *node = llist_del_all(&tctx->task_list); |
| 1093 | struct io_ring_ctx *last_ctx = NULL; |
| 1094 | struct io_kiocb *req; |
| 1095 | |
| 1096 | while (node) { |
| 1097 | req = container_of(node, struct io_kiocb, io_task_work.node); |
| 1098 | node = node->next; |
| 1099 | if (sync && last_ctx != req->ctx) { |
| 1100 | if (last_ctx) { |
| 1101 | flush_delayed_work(&last_ctx->fallback_work); |
| 1102 | percpu_ref_put(&last_ctx->refs); |
| 1103 | } |
| 1104 | last_ctx = req->ctx; |
| 1105 | percpu_ref_get(&last_ctx->refs); |
| 1106 | } |
| 1107 | if (llist_add(&req->io_task_work.node, |
| 1108 | &req->ctx->fallback_llist)) |
| 1109 | schedule_delayed_work(&req->ctx->fallback_work, 1); |
| 1110 | } |
| 1111 | |
| 1112 | if (last_ctx) { |
| 1113 | flush_delayed_work(&last_ctx->fallback_work); |
| 1114 | percpu_ref_put(&last_ctx->refs); |
| 1115 | } |
| 1116 | } |
| 1117 | |
| 1118 | struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, |
| 1119 | unsigned int max_entries, |
| 1120 | unsigned int *count) |
| 1121 | { |
| 1122 | struct llist_node *node; |
| 1123 | |
| 1124 | if (unlikely(current->flags & PF_EXITING)) { |
| 1125 | io_fallback_tw(tctx, true); |
| 1126 | return NULL; |
| 1127 | } |
| 1128 | |
| 1129 | node = llist_del_all(&tctx->task_list); |
| 1130 | if (node) { |
| 1131 | node = llist_reverse_order(node); |
| 1132 | node = io_handle_tw_list(node, count, max_entries); |
| 1133 | } |
| 1134 | |
| 1135 | /* relaxed read is enough as only the task itself sets ->in_cancel */ |
| 1136 | if (unlikely(atomic_read(&tctx->in_cancel))) |
| 1137 | io_uring_drop_tctx_refs(current); |
| 1138 | |
| 1139 | trace_io_uring_task_work_run(tctx, *count); |
| 1140 | return node; |
| 1141 | } |
| 1142 | |
| 1143 | void tctx_task_work(struct callback_head *cb) |
| 1144 | { |
| 1145 | struct io_uring_task *tctx; |
| 1146 | struct llist_node *ret; |
| 1147 | unsigned int count = 0; |
| 1148 | |
| 1149 | tctx = container_of(cb, struct io_uring_task, task_work); |
| 1150 | ret = tctx_task_work_run(tctx, UINT_MAX, &count); |
| 1151 | /* can't happen */ |
| 1152 | WARN_ON_ONCE(ret); |
| 1153 | } |
| 1154 | |
| 1155 | static inline void io_req_local_work_add(struct io_kiocb *req, |
| 1156 | struct io_ring_ctx *ctx, |
| 1157 | unsigned flags) |
| 1158 | { |
| 1159 | unsigned nr_wait, nr_tw, nr_tw_prev; |
| 1160 | struct llist_node *head; |
| 1161 | |
| 1162 | /* See comment above IO_CQ_WAKE_INIT */ |
| 1163 | BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES); |
| 1164 | |
| 1165 | /* |
| 1166 | * We don't know how many reuqests is there in the link and whether |
| 1167 | * they can even be queued lazily, fall back to non-lazy. |
| 1168 | */ |
| 1169 | if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) |
| 1170 | flags &= ~IOU_F_TWQ_LAZY_WAKE; |
| 1171 | |
| 1172 | guard(rcu)(); |
| 1173 | |
| 1174 | head = READ_ONCE(ctx->work_llist.first); |
| 1175 | do { |
| 1176 | nr_tw_prev = 0; |
| 1177 | if (head) { |
| 1178 | struct io_kiocb *first_req = container_of(head, |
| 1179 | struct io_kiocb, |
| 1180 | io_task_work.node); |
| 1181 | /* |
| 1182 | * Might be executed at any moment, rely on |
| 1183 | * SLAB_TYPESAFE_BY_RCU to keep it alive. |
| 1184 | */ |
| 1185 | nr_tw_prev = READ_ONCE(first_req->nr_tw); |
| 1186 | } |
| 1187 | |
| 1188 | /* |
| 1189 | * Theoretically, it can overflow, but that's fine as one of |
| 1190 | * previous adds should've tried to wake the task. |
| 1191 | */ |
| 1192 | nr_tw = nr_tw_prev + 1; |
| 1193 | if (!(flags & IOU_F_TWQ_LAZY_WAKE)) |
| 1194 | nr_tw = IO_CQ_WAKE_FORCE; |
| 1195 | |
| 1196 | req->nr_tw = nr_tw; |
| 1197 | req->io_task_work.node.next = head; |
| 1198 | } while (!try_cmpxchg(&ctx->work_llist.first, &head, |
| 1199 | &req->io_task_work.node)); |
| 1200 | |
| 1201 | /* |
| 1202 | * cmpxchg implies a full barrier, which pairs with the barrier |
| 1203 | * in set_current_state() on the io_cqring_wait() side. It's used |
| 1204 | * to ensure that either we see updated ->cq_wait_nr, or waiters |
| 1205 | * going to sleep will observe the work added to the list, which |
| 1206 | * is similar to the wait/wawke task state sync. |
| 1207 | */ |
| 1208 | |
| 1209 | if (!head) { |
| 1210 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
| 1211 | atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); |
| 1212 | if (ctx->has_evfd) |
| 1213 | io_eventfd_signal(ctx); |
| 1214 | } |
| 1215 | |
| 1216 | nr_wait = atomic_read(&ctx->cq_wait_nr); |
| 1217 | /* not enough or no one is waiting */ |
| 1218 | if (nr_tw < nr_wait) |
| 1219 | return; |
| 1220 | /* the previous add has already woken it up */ |
| 1221 | if (nr_tw_prev >= nr_wait) |
| 1222 | return; |
| 1223 | wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE); |
| 1224 | } |
| 1225 | |
| 1226 | static void io_req_normal_work_add(struct io_kiocb *req) |
| 1227 | { |
| 1228 | struct io_uring_task *tctx = req->task->io_uring; |
| 1229 | struct io_ring_ctx *ctx = req->ctx; |
| 1230 | |
| 1231 | /* task_work already pending, we're done */ |
| 1232 | if (!llist_add(&req->io_task_work.node, &tctx->task_list)) |
| 1233 | return; |
| 1234 | |
| 1235 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
| 1236 | atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); |
| 1237 | |
| 1238 | /* SQPOLL doesn't need the task_work added, it'll run it itself */ |
| 1239 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
| 1240 | struct io_sq_data *sqd = ctx->sq_data; |
| 1241 | |
| 1242 | if (sqd->thread) |
| 1243 | __set_notify_signal(sqd->thread); |
| 1244 | return; |
| 1245 | } |
| 1246 | |
| 1247 | if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) |
| 1248 | return; |
| 1249 | |
| 1250 | io_fallback_tw(tctx, false); |
| 1251 | } |
| 1252 | |
| 1253 | void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) |
| 1254 | { |
| 1255 | if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) |
| 1256 | io_req_local_work_add(req, req->ctx, flags); |
| 1257 | else |
| 1258 | io_req_normal_work_add(req); |
| 1259 | } |
| 1260 | |
| 1261 | void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx, |
| 1262 | unsigned flags) |
| 1263 | { |
| 1264 | if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))) |
| 1265 | return; |
| 1266 | io_req_local_work_add(req, ctx, flags); |
| 1267 | } |
| 1268 | |
| 1269 | static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) |
| 1270 | { |
| 1271 | struct llist_node *node; |
| 1272 | |
| 1273 | node = llist_del_all(&ctx->work_llist); |
| 1274 | while (node) { |
| 1275 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
| 1276 | io_task_work.node); |
| 1277 | |
| 1278 | node = node->next; |
| 1279 | io_req_normal_work_add(req); |
| 1280 | } |
| 1281 | } |
| 1282 | |
| 1283 | static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events, |
| 1284 | int min_events) |
| 1285 | { |
| 1286 | if (llist_empty(&ctx->work_llist)) |
| 1287 | return false; |
| 1288 | if (events < min_events) |
| 1289 | return true; |
| 1290 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
| 1291 | atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); |
| 1292 | return false; |
| 1293 | } |
| 1294 | |
| 1295 | static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts, |
| 1296 | int min_events) |
| 1297 | { |
| 1298 | struct llist_node *node; |
| 1299 | unsigned int loops = 0; |
| 1300 | int ret = 0; |
| 1301 | |
| 1302 | if (WARN_ON_ONCE(ctx->submitter_task != current)) |
| 1303 | return -EEXIST; |
| 1304 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
| 1305 | atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); |
| 1306 | again: |
| 1307 | /* |
| 1308 | * llists are in reverse order, flip it back the right way before |
| 1309 | * running the pending items. |
| 1310 | */ |
| 1311 | node = llist_reverse_order(io_llist_xchg(&ctx->work_llist, NULL)); |
| 1312 | while (node) { |
| 1313 | struct llist_node *next = node->next; |
| 1314 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
| 1315 | io_task_work.node); |
| 1316 | INDIRECT_CALL_2(req->io_task_work.func, |
| 1317 | io_poll_task_func, io_req_rw_complete, |
| 1318 | req, ts); |
| 1319 | ret++; |
| 1320 | node = next; |
| 1321 | } |
| 1322 | loops++; |
| 1323 | |
| 1324 | if (io_run_local_work_continue(ctx, ret, min_events)) |
| 1325 | goto again; |
| 1326 | io_submit_flush_completions(ctx); |
| 1327 | if (io_run_local_work_continue(ctx, ret, min_events)) |
| 1328 | goto again; |
| 1329 | |
| 1330 | trace_io_uring_local_work_run(ctx, ret, loops); |
| 1331 | return ret; |
| 1332 | } |
| 1333 | |
| 1334 | static inline int io_run_local_work_locked(struct io_ring_ctx *ctx, |
| 1335 | int min_events) |
| 1336 | { |
| 1337 | struct io_tw_state ts = {}; |
| 1338 | |
| 1339 | if (llist_empty(&ctx->work_llist)) |
| 1340 | return 0; |
| 1341 | return __io_run_local_work(ctx, &ts, min_events); |
| 1342 | } |
| 1343 | |
| 1344 | static int io_run_local_work(struct io_ring_ctx *ctx, int min_events) |
| 1345 | { |
| 1346 | struct io_tw_state ts = {}; |
| 1347 | int ret; |
| 1348 | |
| 1349 | mutex_lock(&ctx->uring_lock); |
| 1350 | ret = __io_run_local_work(ctx, &ts, min_events); |
| 1351 | mutex_unlock(&ctx->uring_lock); |
| 1352 | return ret; |
| 1353 | } |
| 1354 | |
| 1355 | static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts) |
| 1356 | { |
| 1357 | io_tw_lock(req->ctx, ts); |
| 1358 | io_req_defer_failed(req, req->cqe.res); |
| 1359 | } |
| 1360 | |
| 1361 | void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts) |
| 1362 | { |
| 1363 | io_tw_lock(req->ctx, ts); |
| 1364 | /* req->task == current here, checking PF_EXITING is safe */ |
| 1365 | if (unlikely(req->task->flags & PF_EXITING)) |
| 1366 | io_req_defer_failed(req, -EFAULT); |
| 1367 | else if (req->flags & REQ_F_FORCE_ASYNC) |
| 1368 | io_queue_iowq(req); |
| 1369 | else |
| 1370 | io_queue_sqe(req); |
| 1371 | } |
| 1372 | |
| 1373 | void io_req_task_queue_fail(struct io_kiocb *req, int ret) |
| 1374 | { |
| 1375 | io_req_set_res(req, ret, 0); |
| 1376 | req->io_task_work.func = io_req_task_cancel; |
| 1377 | io_req_task_work_add(req); |
| 1378 | } |
| 1379 | |
| 1380 | void io_req_task_queue(struct io_kiocb *req) |
| 1381 | { |
| 1382 | req->io_task_work.func = io_req_task_submit; |
| 1383 | io_req_task_work_add(req); |
| 1384 | } |
| 1385 | |
| 1386 | void io_queue_next(struct io_kiocb *req) |
| 1387 | { |
| 1388 | struct io_kiocb *nxt = io_req_find_next(req); |
| 1389 | |
| 1390 | if (nxt) |
| 1391 | io_req_task_queue(nxt); |
| 1392 | } |
| 1393 | |
| 1394 | static void io_free_batch_list(struct io_ring_ctx *ctx, |
| 1395 | struct io_wq_work_node *node) |
| 1396 | __must_hold(&ctx->uring_lock) |
| 1397 | { |
| 1398 | do { |
| 1399 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
| 1400 | comp_list); |
| 1401 | |
| 1402 | if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) { |
| 1403 | if (req->flags & REQ_F_REFCOUNT) { |
| 1404 | node = req->comp_list.next; |
| 1405 | if (!req_ref_put_and_test(req)) |
| 1406 | continue; |
| 1407 | } |
| 1408 | if ((req->flags & REQ_F_POLLED) && req->apoll) { |
| 1409 | struct async_poll *apoll = req->apoll; |
| 1410 | |
| 1411 | if (apoll->double_poll) |
| 1412 | kfree(apoll->double_poll); |
| 1413 | if (!io_alloc_cache_put(&ctx->apoll_cache, apoll)) |
| 1414 | kfree(apoll); |
| 1415 | req->flags &= ~REQ_F_POLLED; |
| 1416 | } |
| 1417 | if (req->flags & IO_REQ_LINK_FLAGS) |
| 1418 | io_queue_next(req); |
| 1419 | if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS)) |
| 1420 | io_clean_op(req); |
| 1421 | } |
| 1422 | io_put_file(req); |
| 1423 | io_put_rsrc_node(ctx, req->rsrc_node); |
| 1424 | io_put_task(req->task); |
| 1425 | |
| 1426 | node = req->comp_list.next; |
| 1427 | io_req_add_to_cache(req, ctx); |
| 1428 | } while (node); |
| 1429 | } |
| 1430 | |
| 1431 | void __io_submit_flush_completions(struct io_ring_ctx *ctx) |
| 1432 | __must_hold(&ctx->uring_lock) |
| 1433 | { |
| 1434 | struct io_submit_state *state = &ctx->submit_state; |
| 1435 | struct io_wq_work_node *node; |
| 1436 | |
| 1437 | __io_cq_lock(ctx); |
| 1438 | __wq_list_for_each(node, &state->compl_reqs) { |
| 1439 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
| 1440 | comp_list); |
| 1441 | |
| 1442 | if (!(req->flags & REQ_F_CQE_SKIP) && |
| 1443 | unlikely(!io_fill_cqe_req(ctx, req))) { |
| 1444 | if (ctx->lockless_cq) { |
| 1445 | spin_lock(&ctx->completion_lock); |
| 1446 | io_req_cqe_overflow(req); |
| 1447 | spin_unlock(&ctx->completion_lock); |
| 1448 | } else { |
| 1449 | io_req_cqe_overflow(req); |
| 1450 | } |
| 1451 | } |
| 1452 | } |
| 1453 | __io_cq_unlock_post(ctx); |
| 1454 | |
| 1455 | if (!wq_list_empty(&state->compl_reqs)) { |
| 1456 | io_free_batch_list(ctx, state->compl_reqs.first); |
| 1457 | INIT_WQ_LIST(&state->compl_reqs); |
| 1458 | } |
| 1459 | ctx->submit_state.cq_flush = false; |
| 1460 | } |
| 1461 | |
| 1462 | static unsigned io_cqring_events(struct io_ring_ctx *ctx) |
| 1463 | { |
| 1464 | /* See comment at the top of this file */ |
| 1465 | smp_rmb(); |
| 1466 | return __io_cqring_events(ctx); |
| 1467 | } |
| 1468 | |
| 1469 | /* |
| 1470 | * We can't just wait for polled events to come to us, we have to actively |
| 1471 | * find and complete them. |
| 1472 | */ |
| 1473 | static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) |
| 1474 | { |
| 1475 | if (!(ctx->flags & IORING_SETUP_IOPOLL)) |
| 1476 | return; |
| 1477 | |
| 1478 | mutex_lock(&ctx->uring_lock); |
| 1479 | while (!wq_list_empty(&ctx->iopoll_list)) { |
| 1480 | /* let it sleep and repeat later if can't complete a request */ |
| 1481 | if (io_do_iopoll(ctx, true) == 0) |
| 1482 | break; |
| 1483 | /* |
| 1484 | * Ensure we allow local-to-the-cpu processing to take place, |
| 1485 | * in this case we need to ensure that we reap all events. |
| 1486 | * Also let task_work, etc. to progress by releasing the mutex |
| 1487 | */ |
| 1488 | if (need_resched()) { |
| 1489 | mutex_unlock(&ctx->uring_lock); |
| 1490 | cond_resched(); |
| 1491 | mutex_lock(&ctx->uring_lock); |
| 1492 | } |
| 1493 | } |
| 1494 | mutex_unlock(&ctx->uring_lock); |
| 1495 | } |
| 1496 | |
| 1497 | static int io_iopoll_check(struct io_ring_ctx *ctx, long min) |
| 1498 | { |
| 1499 | unsigned int nr_events = 0; |
| 1500 | unsigned long check_cq; |
| 1501 | |
| 1502 | lockdep_assert_held(&ctx->uring_lock); |
| 1503 | |
| 1504 | if (!io_allowed_run_tw(ctx)) |
| 1505 | return -EEXIST; |
| 1506 | |
| 1507 | check_cq = READ_ONCE(ctx->check_cq); |
| 1508 | if (unlikely(check_cq)) { |
| 1509 | if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) |
| 1510 | __io_cqring_overflow_flush(ctx, false); |
| 1511 | /* |
| 1512 | * Similarly do not spin if we have not informed the user of any |
| 1513 | * dropped CQE. |
| 1514 | */ |
| 1515 | if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) |
| 1516 | return -EBADR; |
| 1517 | } |
| 1518 | /* |
| 1519 | * Don't enter poll loop if we already have events pending. |
| 1520 | * If we do, we can potentially be spinning for commands that |
| 1521 | * already triggered a CQE (eg in error). |
| 1522 | */ |
| 1523 | if (io_cqring_events(ctx)) |
| 1524 | return 0; |
| 1525 | |
| 1526 | do { |
| 1527 | int ret = 0; |
| 1528 | |
| 1529 | /* |
| 1530 | * If a submit got punted to a workqueue, we can have the |
| 1531 | * application entering polling for a command before it gets |
| 1532 | * issued. That app will hold the uring_lock for the duration |
| 1533 | * of the poll right here, so we need to take a breather every |
| 1534 | * now and then to ensure that the issue has a chance to add |
| 1535 | * the poll to the issued list. Otherwise we can spin here |
| 1536 | * forever, while the workqueue is stuck trying to acquire the |
| 1537 | * very same mutex. |
| 1538 | */ |
| 1539 | if (wq_list_empty(&ctx->iopoll_list) || |
| 1540 | io_task_work_pending(ctx)) { |
| 1541 | u32 tail = ctx->cached_cq_tail; |
| 1542 | |
| 1543 | (void) io_run_local_work_locked(ctx, min); |
| 1544 | |
| 1545 | if (task_work_pending(current) || |
| 1546 | wq_list_empty(&ctx->iopoll_list)) { |
| 1547 | mutex_unlock(&ctx->uring_lock); |
| 1548 | io_run_task_work(); |
| 1549 | mutex_lock(&ctx->uring_lock); |
| 1550 | } |
| 1551 | /* some requests don't go through iopoll_list */ |
| 1552 | if (tail != ctx->cached_cq_tail || |
| 1553 | wq_list_empty(&ctx->iopoll_list)) |
| 1554 | break; |
| 1555 | } |
| 1556 | ret = io_do_iopoll(ctx, !min); |
| 1557 | if (unlikely(ret < 0)) |
| 1558 | return ret; |
| 1559 | |
| 1560 | if (task_sigpending(current)) |
| 1561 | return -EINTR; |
| 1562 | if (need_resched()) |
| 1563 | break; |
| 1564 | |
| 1565 | nr_events += ret; |
| 1566 | } while (nr_events < min); |
| 1567 | |
| 1568 | return 0; |
| 1569 | } |
| 1570 | |
| 1571 | void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts) |
| 1572 | { |
| 1573 | io_req_complete_defer(req); |
| 1574 | } |
| 1575 | |
| 1576 | /* |
| 1577 | * After the iocb has been issued, it's safe to be found on the poll list. |
| 1578 | * Adding the kiocb to the list AFTER submission ensures that we don't |
| 1579 | * find it from a io_do_iopoll() thread before the issuer is done |
| 1580 | * accessing the kiocb cookie. |
| 1581 | */ |
| 1582 | static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) |
| 1583 | { |
| 1584 | struct io_ring_ctx *ctx = req->ctx; |
| 1585 | const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; |
| 1586 | |
| 1587 | /* workqueue context doesn't hold uring_lock, grab it now */ |
| 1588 | if (unlikely(needs_lock)) |
| 1589 | mutex_lock(&ctx->uring_lock); |
| 1590 | |
| 1591 | /* |
| 1592 | * Track whether we have multiple files in our lists. This will impact |
| 1593 | * how we do polling eventually, not spinning if we're on potentially |
| 1594 | * different devices. |
| 1595 | */ |
| 1596 | if (wq_list_empty(&ctx->iopoll_list)) { |
| 1597 | ctx->poll_multi_queue = false; |
| 1598 | } else if (!ctx->poll_multi_queue) { |
| 1599 | struct io_kiocb *list_req; |
| 1600 | |
| 1601 | list_req = container_of(ctx->iopoll_list.first, struct io_kiocb, |
| 1602 | comp_list); |
| 1603 | if (list_req->file != req->file) |
| 1604 | ctx->poll_multi_queue = true; |
| 1605 | } |
| 1606 | |
| 1607 | /* |
| 1608 | * For fast devices, IO may have already completed. If it has, add |
| 1609 | * it to the front so we find it first. |
| 1610 | */ |
| 1611 | if (READ_ONCE(req->iopoll_completed)) |
| 1612 | wq_list_add_head(&req->comp_list, &ctx->iopoll_list); |
| 1613 | else |
| 1614 | wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); |
| 1615 | |
| 1616 | if (unlikely(needs_lock)) { |
| 1617 | /* |
| 1618 | * If IORING_SETUP_SQPOLL is enabled, sqes are either handle |
| 1619 | * in sq thread task context or in io worker task context. If |
| 1620 | * current task context is sq thread, we don't need to check |
| 1621 | * whether should wake up sq thread. |
| 1622 | */ |
| 1623 | if ((ctx->flags & IORING_SETUP_SQPOLL) && |
| 1624 | wq_has_sleeper(&ctx->sq_data->wait)) |
| 1625 | wake_up(&ctx->sq_data->wait); |
| 1626 | |
| 1627 | mutex_unlock(&ctx->uring_lock); |
| 1628 | } |
| 1629 | } |
| 1630 | |
| 1631 | io_req_flags_t io_file_get_flags(struct file *file) |
| 1632 | { |
| 1633 | io_req_flags_t res = 0; |
| 1634 | |
| 1635 | if (S_ISREG(file_inode(file)->i_mode)) |
| 1636 | res |= REQ_F_ISREG; |
| 1637 | if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT)) |
| 1638 | res |= REQ_F_SUPPORT_NOWAIT; |
| 1639 | return res; |
| 1640 | } |
| 1641 | |
| 1642 | bool io_alloc_async_data(struct io_kiocb *req) |
| 1643 | { |
| 1644 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; |
| 1645 | |
| 1646 | WARN_ON_ONCE(!def->async_size); |
| 1647 | req->async_data = kmalloc(def->async_size, GFP_KERNEL); |
| 1648 | if (req->async_data) { |
| 1649 | req->flags |= REQ_F_ASYNC_DATA; |
| 1650 | return false; |
| 1651 | } |
| 1652 | return true; |
| 1653 | } |
| 1654 | |
| 1655 | static u32 io_get_sequence(struct io_kiocb *req) |
| 1656 | { |
| 1657 | u32 seq = req->ctx->cached_sq_head; |
| 1658 | struct io_kiocb *cur; |
| 1659 | |
| 1660 | /* need original cached_sq_head, but it was increased for each req */ |
| 1661 | io_for_each_link(cur, req) |
| 1662 | seq--; |
| 1663 | return seq; |
| 1664 | } |
| 1665 | |
| 1666 | static __cold void io_drain_req(struct io_kiocb *req) |
| 1667 | __must_hold(&ctx->uring_lock) |
| 1668 | { |
| 1669 | struct io_ring_ctx *ctx = req->ctx; |
| 1670 | struct io_defer_entry *de; |
| 1671 | int ret; |
| 1672 | u32 seq = io_get_sequence(req); |
| 1673 | |
| 1674 | /* Still need defer if there is pending req in defer list. */ |
| 1675 | spin_lock(&ctx->completion_lock); |
| 1676 | if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) { |
| 1677 | spin_unlock(&ctx->completion_lock); |
| 1678 | queue: |
| 1679 | ctx->drain_active = false; |
| 1680 | io_req_task_queue(req); |
| 1681 | return; |
| 1682 | } |
| 1683 | spin_unlock(&ctx->completion_lock); |
| 1684 | |
| 1685 | io_prep_async_link(req); |
| 1686 | de = kmalloc(sizeof(*de), GFP_KERNEL); |
| 1687 | if (!de) { |
| 1688 | ret = -ENOMEM; |
| 1689 | io_req_defer_failed(req, ret); |
| 1690 | return; |
| 1691 | } |
| 1692 | |
| 1693 | spin_lock(&ctx->completion_lock); |
| 1694 | if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { |
| 1695 | spin_unlock(&ctx->completion_lock); |
| 1696 | kfree(de); |
| 1697 | goto queue; |
| 1698 | } |
| 1699 | |
| 1700 | trace_io_uring_defer(req); |
| 1701 | de->req = req; |
| 1702 | de->seq = seq; |
| 1703 | list_add_tail(&de->list, &ctx->defer_list); |
| 1704 | spin_unlock(&ctx->completion_lock); |
| 1705 | } |
| 1706 | |
| 1707 | static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def, |
| 1708 | unsigned int issue_flags) |
| 1709 | { |
| 1710 | if (req->file || !def->needs_file) |
| 1711 | return true; |
| 1712 | |
| 1713 | if (req->flags & REQ_F_FIXED_FILE) |
| 1714 | req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags); |
| 1715 | else |
| 1716 | req->file = io_file_get_normal(req, req->cqe.fd); |
| 1717 | |
| 1718 | return !!req->file; |
| 1719 | } |
| 1720 | |
| 1721 | static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) |
| 1722 | { |
| 1723 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; |
| 1724 | const struct cred *creds = NULL; |
| 1725 | int ret; |
| 1726 | |
| 1727 | if (unlikely(!io_assign_file(req, def, issue_flags))) |
| 1728 | return -EBADF; |
| 1729 | |
| 1730 | if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred())) |
| 1731 | creds = override_creds(req->creds); |
| 1732 | |
| 1733 | if (!def->audit_skip) |
| 1734 | audit_uring_entry(req->opcode); |
| 1735 | |
| 1736 | ret = def->issue(req, issue_flags); |
| 1737 | |
| 1738 | if (!def->audit_skip) |
| 1739 | audit_uring_exit(!ret, ret); |
| 1740 | |
| 1741 | if (creds) |
| 1742 | revert_creds(creds); |
| 1743 | |
| 1744 | if (ret == IOU_OK) { |
| 1745 | if (issue_flags & IO_URING_F_COMPLETE_DEFER) |
| 1746 | io_req_complete_defer(req); |
| 1747 | else |
| 1748 | io_req_complete_post(req, issue_flags); |
| 1749 | |
| 1750 | return 0; |
| 1751 | } |
| 1752 | |
| 1753 | if (ret == IOU_ISSUE_SKIP_COMPLETE) { |
| 1754 | ret = 0; |
| 1755 | io_arm_ltimeout(req); |
| 1756 | |
| 1757 | /* If the op doesn't have a file, we're not polling for it */ |
| 1758 | if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue) |
| 1759 | io_iopoll_req_issued(req, issue_flags); |
| 1760 | } |
| 1761 | return ret; |
| 1762 | } |
| 1763 | |
| 1764 | int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts) |
| 1765 | { |
| 1766 | io_tw_lock(req->ctx, ts); |
| 1767 | return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT| |
| 1768 | IO_URING_F_COMPLETE_DEFER); |
| 1769 | } |
| 1770 | |
| 1771 | struct io_wq_work *io_wq_free_work(struct io_wq_work *work) |
| 1772 | { |
| 1773 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
| 1774 | struct io_kiocb *nxt = NULL; |
| 1775 | |
| 1776 | if (req_ref_put_and_test(req)) { |
| 1777 | if (req->flags & IO_REQ_LINK_FLAGS) |
| 1778 | nxt = io_req_find_next(req); |
| 1779 | io_free_req(req); |
| 1780 | } |
| 1781 | return nxt ? &nxt->work : NULL; |
| 1782 | } |
| 1783 | |
| 1784 | void io_wq_submit_work(struct io_wq_work *work) |
| 1785 | { |
| 1786 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
| 1787 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; |
| 1788 | unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ; |
| 1789 | bool needs_poll = false; |
| 1790 | int ret = 0, err = -ECANCELED; |
| 1791 | |
| 1792 | /* one will be dropped by ->io_wq_free_work() after returning to io-wq */ |
| 1793 | if (!(req->flags & REQ_F_REFCOUNT)) |
| 1794 | __io_req_set_refcount(req, 2); |
| 1795 | else |
| 1796 | req_ref_get(req); |
| 1797 | |
| 1798 | io_arm_ltimeout(req); |
| 1799 | |
| 1800 | /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ |
| 1801 | if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) { |
| 1802 | fail: |
| 1803 | io_req_task_queue_fail(req, err); |
| 1804 | return; |
| 1805 | } |
| 1806 | if (!io_assign_file(req, def, issue_flags)) { |
| 1807 | err = -EBADF; |
| 1808 | atomic_or(IO_WQ_WORK_CANCEL, &work->flags); |
| 1809 | goto fail; |
| 1810 | } |
| 1811 | |
| 1812 | /* |
| 1813 | * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the |
| 1814 | * submitter task context. Final request completions are handed to the |
| 1815 | * right context, however this is not the case of auxiliary CQEs, |
| 1816 | * which is the main mean of operation for multishot requests. |
| 1817 | * Don't allow any multishot execution from io-wq. It's more restrictive |
| 1818 | * than necessary and also cleaner. |
| 1819 | */ |
| 1820 | if (req->flags & REQ_F_APOLL_MULTISHOT) { |
| 1821 | err = -EBADFD; |
| 1822 | if (!io_file_can_poll(req)) |
| 1823 | goto fail; |
| 1824 | if (req->file->f_flags & O_NONBLOCK || |
| 1825 | req->file->f_mode & FMODE_NOWAIT) { |
| 1826 | err = -ECANCELED; |
| 1827 | if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK) |
| 1828 | goto fail; |
| 1829 | return; |
| 1830 | } else { |
| 1831 | req->flags &= ~REQ_F_APOLL_MULTISHOT; |
| 1832 | } |
| 1833 | } |
| 1834 | |
| 1835 | if (req->flags & REQ_F_FORCE_ASYNC) { |
| 1836 | bool opcode_poll = def->pollin || def->pollout; |
| 1837 | |
| 1838 | if (opcode_poll && io_file_can_poll(req)) { |
| 1839 | needs_poll = true; |
| 1840 | issue_flags |= IO_URING_F_NONBLOCK; |
| 1841 | } |
| 1842 | } |
| 1843 | |
| 1844 | do { |
| 1845 | ret = io_issue_sqe(req, issue_flags); |
| 1846 | if (ret != -EAGAIN) |
| 1847 | break; |
| 1848 | |
| 1849 | /* |
| 1850 | * If REQ_F_NOWAIT is set, then don't wait or retry with |
| 1851 | * poll. -EAGAIN is final for that case. |
| 1852 | */ |
| 1853 | if (req->flags & REQ_F_NOWAIT) |
| 1854 | break; |
| 1855 | |
| 1856 | /* |
| 1857 | * We can get EAGAIN for iopolled IO even though we're |
| 1858 | * forcing a sync submission from here, since we can't |
| 1859 | * wait for request slots on the block side. |
| 1860 | */ |
| 1861 | if (!needs_poll) { |
| 1862 | if (!(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 1863 | break; |
| 1864 | if (io_wq_worker_stopped()) |
| 1865 | break; |
| 1866 | cond_resched(); |
| 1867 | continue; |
| 1868 | } |
| 1869 | |
| 1870 | if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK) |
| 1871 | return; |
| 1872 | /* aborted or ready, in either case retry blocking */ |
| 1873 | needs_poll = false; |
| 1874 | issue_flags &= ~IO_URING_F_NONBLOCK; |
| 1875 | } while (1); |
| 1876 | |
| 1877 | /* avoid locking problems by failing it from a clean context */ |
| 1878 | if (ret) |
| 1879 | io_req_task_queue_fail(req, ret); |
| 1880 | } |
| 1881 | |
| 1882 | inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd, |
| 1883 | unsigned int issue_flags) |
| 1884 | { |
| 1885 | struct io_ring_ctx *ctx = req->ctx; |
| 1886 | struct io_fixed_file *slot; |
| 1887 | struct file *file = NULL; |
| 1888 | |
| 1889 | io_ring_submit_lock(ctx, issue_flags); |
| 1890 | |
| 1891 | if (unlikely((unsigned int)fd >= ctx->nr_user_files)) |
| 1892 | goto out; |
| 1893 | fd = array_index_nospec(fd, ctx->nr_user_files); |
| 1894 | slot = io_fixed_file_slot(&ctx->file_table, fd); |
| 1895 | if (!req->rsrc_node) |
| 1896 | __io_req_set_rsrc_node(req, ctx); |
| 1897 | req->flags |= io_slot_flags(slot); |
| 1898 | file = io_slot_file(slot); |
| 1899 | out: |
| 1900 | io_ring_submit_unlock(ctx, issue_flags); |
| 1901 | return file; |
| 1902 | } |
| 1903 | |
| 1904 | struct file *io_file_get_normal(struct io_kiocb *req, int fd) |
| 1905 | { |
| 1906 | struct file *file = fget(fd); |
| 1907 | |
| 1908 | trace_io_uring_file_get(req, fd); |
| 1909 | |
| 1910 | /* we don't allow fixed io_uring files */ |
| 1911 | if (file && io_is_uring_fops(file)) |
| 1912 | io_req_track_inflight(req); |
| 1913 | return file; |
| 1914 | } |
| 1915 | |
| 1916 | static void io_queue_async(struct io_kiocb *req, int ret) |
| 1917 | __must_hold(&req->ctx->uring_lock) |
| 1918 | { |
| 1919 | struct io_kiocb *linked_timeout; |
| 1920 | |
| 1921 | if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { |
| 1922 | io_req_defer_failed(req, ret); |
| 1923 | return; |
| 1924 | } |
| 1925 | |
| 1926 | linked_timeout = io_prep_linked_timeout(req); |
| 1927 | |
| 1928 | switch (io_arm_poll_handler(req, 0)) { |
| 1929 | case IO_APOLL_READY: |
| 1930 | io_kbuf_recycle(req, 0); |
| 1931 | io_req_task_queue(req); |
| 1932 | break; |
| 1933 | case IO_APOLL_ABORTED: |
| 1934 | io_kbuf_recycle(req, 0); |
| 1935 | io_queue_iowq(req); |
| 1936 | break; |
| 1937 | case IO_APOLL_OK: |
| 1938 | break; |
| 1939 | } |
| 1940 | |
| 1941 | if (linked_timeout) |
| 1942 | io_queue_linked_timeout(linked_timeout); |
| 1943 | } |
| 1944 | |
| 1945 | static inline void io_queue_sqe(struct io_kiocb *req) |
| 1946 | __must_hold(&req->ctx->uring_lock) |
| 1947 | { |
| 1948 | int ret; |
| 1949 | |
| 1950 | ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); |
| 1951 | |
| 1952 | /* |
| 1953 | * We async punt it if the file wasn't marked NOWAIT, or if the file |
| 1954 | * doesn't support non-blocking read/write attempts |
| 1955 | */ |
| 1956 | if (unlikely(ret)) |
| 1957 | io_queue_async(req, ret); |
| 1958 | } |
| 1959 | |
| 1960 | static void io_queue_sqe_fallback(struct io_kiocb *req) |
| 1961 | __must_hold(&req->ctx->uring_lock) |
| 1962 | { |
| 1963 | if (unlikely(req->flags & REQ_F_FAIL)) { |
| 1964 | /* |
| 1965 | * We don't submit, fail them all, for that replace hardlinks |
| 1966 | * with normal links. Extra REQ_F_LINK is tolerated. |
| 1967 | */ |
| 1968 | req->flags &= ~REQ_F_HARDLINK; |
| 1969 | req->flags |= REQ_F_LINK; |
| 1970 | io_req_defer_failed(req, req->cqe.res); |
| 1971 | } else { |
| 1972 | if (unlikely(req->ctx->drain_active)) |
| 1973 | io_drain_req(req); |
| 1974 | else |
| 1975 | io_queue_iowq(req); |
| 1976 | } |
| 1977 | } |
| 1978 | |
| 1979 | /* |
| 1980 | * Check SQE restrictions (opcode and flags). |
| 1981 | * |
| 1982 | * Returns 'true' if SQE is allowed, 'false' otherwise. |
| 1983 | */ |
| 1984 | static inline bool io_check_restriction(struct io_ring_ctx *ctx, |
| 1985 | struct io_kiocb *req, |
| 1986 | unsigned int sqe_flags) |
| 1987 | { |
| 1988 | if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) |
| 1989 | return false; |
| 1990 | |
| 1991 | if ((sqe_flags & ctx->restrictions.sqe_flags_required) != |
| 1992 | ctx->restrictions.sqe_flags_required) |
| 1993 | return false; |
| 1994 | |
| 1995 | if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed | |
| 1996 | ctx->restrictions.sqe_flags_required)) |
| 1997 | return false; |
| 1998 | |
| 1999 | return true; |
| 2000 | } |
| 2001 | |
| 2002 | static void io_init_req_drain(struct io_kiocb *req) |
| 2003 | { |
| 2004 | struct io_ring_ctx *ctx = req->ctx; |
| 2005 | struct io_kiocb *head = ctx->submit_state.link.head; |
| 2006 | |
| 2007 | ctx->drain_active = true; |
| 2008 | if (head) { |
| 2009 | /* |
| 2010 | * If we need to drain a request in the middle of a link, drain |
| 2011 | * the head request and the next request/link after the current |
| 2012 | * link. Considering sequential execution of links, |
| 2013 | * REQ_F_IO_DRAIN will be maintained for every request of our |
| 2014 | * link. |
| 2015 | */ |
| 2016 | head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; |
| 2017 | ctx->drain_next = true; |
| 2018 | } |
| 2019 | } |
| 2020 | |
| 2021 | static __cold int io_init_fail_req(struct io_kiocb *req, int err) |
| 2022 | { |
| 2023 | /* ensure per-opcode data is cleared if we fail before prep */ |
| 2024 | memset(&req->cmd.data, 0, sizeof(req->cmd.data)); |
| 2025 | return err; |
| 2026 | } |
| 2027 | |
| 2028 | static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, |
| 2029 | const struct io_uring_sqe *sqe) |
| 2030 | __must_hold(&ctx->uring_lock) |
| 2031 | { |
| 2032 | const struct io_issue_def *def; |
| 2033 | unsigned int sqe_flags; |
| 2034 | int personality; |
| 2035 | u8 opcode; |
| 2036 | |
| 2037 | /* req is partially pre-initialised, see io_preinit_req() */ |
| 2038 | req->opcode = opcode = READ_ONCE(sqe->opcode); |
| 2039 | /* same numerical values with corresponding REQ_F_*, safe to copy */ |
| 2040 | sqe_flags = READ_ONCE(sqe->flags); |
| 2041 | req->flags = (io_req_flags_t) sqe_flags; |
| 2042 | req->cqe.user_data = READ_ONCE(sqe->user_data); |
| 2043 | req->file = NULL; |
| 2044 | req->rsrc_node = NULL; |
| 2045 | req->task = current; |
| 2046 | req->cancel_seq_set = false; |
| 2047 | |
| 2048 | if (unlikely(opcode >= IORING_OP_LAST)) { |
| 2049 | req->opcode = 0; |
| 2050 | return io_init_fail_req(req, -EINVAL); |
| 2051 | } |
| 2052 | def = &io_issue_defs[opcode]; |
| 2053 | if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { |
| 2054 | /* enforce forwards compatibility on users */ |
| 2055 | if (sqe_flags & ~SQE_VALID_FLAGS) |
| 2056 | return io_init_fail_req(req, -EINVAL); |
| 2057 | if (sqe_flags & IOSQE_BUFFER_SELECT) { |
| 2058 | if (!def->buffer_select) |
| 2059 | return io_init_fail_req(req, -EOPNOTSUPP); |
| 2060 | req->buf_index = READ_ONCE(sqe->buf_group); |
| 2061 | } |
| 2062 | if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS) |
| 2063 | ctx->drain_disabled = true; |
| 2064 | if (sqe_flags & IOSQE_IO_DRAIN) { |
| 2065 | if (ctx->drain_disabled) |
| 2066 | return io_init_fail_req(req, -EOPNOTSUPP); |
| 2067 | io_init_req_drain(req); |
| 2068 | } |
| 2069 | } |
| 2070 | if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { |
| 2071 | if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) |
| 2072 | return io_init_fail_req(req, -EACCES); |
| 2073 | /* knock it to the slow queue path, will be drained there */ |
| 2074 | if (ctx->drain_active) |
| 2075 | req->flags |= REQ_F_FORCE_ASYNC; |
| 2076 | /* if there is no link, we're at "next" request and need to drain */ |
| 2077 | if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { |
| 2078 | ctx->drain_next = false; |
| 2079 | ctx->drain_active = true; |
| 2080 | req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; |
| 2081 | } |
| 2082 | } |
| 2083 | |
| 2084 | if (!def->ioprio && sqe->ioprio) |
| 2085 | return io_init_fail_req(req, -EINVAL); |
| 2086 | if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL)) |
| 2087 | return io_init_fail_req(req, -EINVAL); |
| 2088 | |
| 2089 | if (def->needs_file) { |
| 2090 | struct io_submit_state *state = &ctx->submit_state; |
| 2091 | |
| 2092 | req->cqe.fd = READ_ONCE(sqe->fd); |
| 2093 | |
| 2094 | /* |
| 2095 | * Plug now if we have more than 2 IO left after this, and the |
| 2096 | * target is potentially a read/write to block based storage. |
| 2097 | */ |
| 2098 | if (state->need_plug && def->plug) { |
| 2099 | state->plug_started = true; |
| 2100 | state->need_plug = false; |
| 2101 | blk_start_plug_nr_ios(&state->plug, state->submit_nr); |
| 2102 | } |
| 2103 | } |
| 2104 | |
| 2105 | personality = READ_ONCE(sqe->personality); |
| 2106 | if (personality) { |
| 2107 | int ret; |
| 2108 | |
| 2109 | req->creds = xa_load(&ctx->personalities, personality); |
| 2110 | if (!req->creds) |
| 2111 | return io_init_fail_req(req, -EINVAL); |
| 2112 | get_cred(req->creds); |
| 2113 | ret = security_uring_override_creds(req->creds); |
| 2114 | if (ret) { |
| 2115 | put_cred(req->creds); |
| 2116 | return io_init_fail_req(req, ret); |
| 2117 | } |
| 2118 | req->flags |= REQ_F_CREDS; |
| 2119 | } |
| 2120 | |
| 2121 | return def->prep(req, sqe); |
| 2122 | } |
| 2123 | |
| 2124 | static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, |
| 2125 | struct io_kiocb *req, int ret) |
| 2126 | { |
| 2127 | struct io_ring_ctx *ctx = req->ctx; |
| 2128 | struct io_submit_link *link = &ctx->submit_state.link; |
| 2129 | struct io_kiocb *head = link->head; |
| 2130 | |
| 2131 | trace_io_uring_req_failed(sqe, req, ret); |
| 2132 | |
| 2133 | /* |
| 2134 | * Avoid breaking links in the middle as it renders links with SQPOLL |
| 2135 | * unusable. Instead of failing eagerly, continue assembling the link if |
| 2136 | * applicable and mark the head with REQ_F_FAIL. The link flushing code |
| 2137 | * should find the flag and handle the rest. |
| 2138 | */ |
| 2139 | req_fail_link_node(req, ret); |
| 2140 | if (head && !(head->flags & REQ_F_FAIL)) |
| 2141 | req_fail_link_node(head, -ECANCELED); |
| 2142 | |
| 2143 | if (!(req->flags & IO_REQ_LINK_FLAGS)) { |
| 2144 | if (head) { |
| 2145 | link->last->link = req; |
| 2146 | link->head = NULL; |
| 2147 | req = head; |
| 2148 | } |
| 2149 | io_queue_sqe_fallback(req); |
| 2150 | return ret; |
| 2151 | } |
| 2152 | |
| 2153 | if (head) |
| 2154 | link->last->link = req; |
| 2155 | else |
| 2156 | link->head = req; |
| 2157 | link->last = req; |
| 2158 | return 0; |
| 2159 | } |
| 2160 | |
| 2161 | static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, |
| 2162 | const struct io_uring_sqe *sqe) |
| 2163 | __must_hold(&ctx->uring_lock) |
| 2164 | { |
| 2165 | struct io_submit_link *link = &ctx->submit_state.link; |
| 2166 | int ret; |
| 2167 | |
| 2168 | ret = io_init_req(ctx, req, sqe); |
| 2169 | if (unlikely(ret)) |
| 2170 | return io_submit_fail_init(sqe, req, ret); |
| 2171 | |
| 2172 | trace_io_uring_submit_req(req); |
| 2173 | |
| 2174 | /* |
| 2175 | * If we already have a head request, queue this one for async |
| 2176 | * submittal once the head completes. If we don't have a head but |
| 2177 | * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be |
| 2178 | * submitted sync once the chain is complete. If none of those |
| 2179 | * conditions are true (normal request), then just queue it. |
| 2180 | */ |
| 2181 | if (unlikely(link->head)) { |
| 2182 | trace_io_uring_link(req, link->last); |
| 2183 | link->last->link = req; |
| 2184 | link->last = req; |
| 2185 | |
| 2186 | if (req->flags & IO_REQ_LINK_FLAGS) |
| 2187 | return 0; |
| 2188 | /* last request of the link, flush it */ |
| 2189 | req = link->head; |
| 2190 | link->head = NULL; |
| 2191 | if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)) |
| 2192 | goto fallback; |
| 2193 | |
| 2194 | } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS | |
| 2195 | REQ_F_FORCE_ASYNC | REQ_F_FAIL))) { |
| 2196 | if (req->flags & IO_REQ_LINK_FLAGS) { |
| 2197 | link->head = req; |
| 2198 | link->last = req; |
| 2199 | } else { |
| 2200 | fallback: |
| 2201 | io_queue_sqe_fallback(req); |
| 2202 | } |
| 2203 | return 0; |
| 2204 | } |
| 2205 | |
| 2206 | io_queue_sqe(req); |
| 2207 | return 0; |
| 2208 | } |
| 2209 | |
| 2210 | /* |
| 2211 | * Batched submission is done, ensure local IO is flushed out. |
| 2212 | */ |
| 2213 | static void io_submit_state_end(struct io_ring_ctx *ctx) |
| 2214 | { |
| 2215 | struct io_submit_state *state = &ctx->submit_state; |
| 2216 | |
| 2217 | if (unlikely(state->link.head)) |
| 2218 | io_queue_sqe_fallback(state->link.head); |
| 2219 | /* flush only after queuing links as they can generate completions */ |
| 2220 | io_submit_flush_completions(ctx); |
| 2221 | if (state->plug_started) |
| 2222 | blk_finish_plug(&state->plug); |
| 2223 | } |
| 2224 | |
| 2225 | /* |
| 2226 | * Start submission side cache. |
| 2227 | */ |
| 2228 | static void io_submit_state_start(struct io_submit_state *state, |
| 2229 | unsigned int max_ios) |
| 2230 | { |
| 2231 | state->plug_started = false; |
| 2232 | state->need_plug = max_ios > 2; |
| 2233 | state->submit_nr = max_ios; |
| 2234 | /* set only head, no need to init link_last in advance */ |
| 2235 | state->link.head = NULL; |
| 2236 | } |
| 2237 | |
| 2238 | static void io_commit_sqring(struct io_ring_ctx *ctx) |
| 2239 | { |
| 2240 | struct io_rings *rings = ctx->rings; |
| 2241 | |
| 2242 | /* |
| 2243 | * Ensure any loads from the SQEs are done at this point, |
| 2244 | * since once we write the new head, the application could |
| 2245 | * write new data to them. |
| 2246 | */ |
| 2247 | smp_store_release(&rings->sq.head, ctx->cached_sq_head); |
| 2248 | } |
| 2249 | |
| 2250 | /* |
| 2251 | * Fetch an sqe, if one is available. Note this returns a pointer to memory |
| 2252 | * that is mapped by userspace. This means that care needs to be taken to |
| 2253 | * ensure that reads are stable, as we cannot rely on userspace always |
| 2254 | * being a good citizen. If members of the sqe are validated and then later |
| 2255 | * used, it's important that those reads are done through READ_ONCE() to |
| 2256 | * prevent a re-load down the line. |
| 2257 | */ |
| 2258 | static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe) |
| 2259 | { |
| 2260 | unsigned mask = ctx->sq_entries - 1; |
| 2261 | unsigned head = ctx->cached_sq_head++ & mask; |
| 2262 | |
| 2263 | if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) { |
| 2264 | head = READ_ONCE(ctx->sq_array[head]); |
| 2265 | if (unlikely(head >= ctx->sq_entries)) { |
| 2266 | /* drop invalid entries */ |
| 2267 | spin_lock(&ctx->completion_lock); |
| 2268 | ctx->cq_extra--; |
| 2269 | spin_unlock(&ctx->completion_lock); |
| 2270 | WRITE_ONCE(ctx->rings->sq_dropped, |
| 2271 | READ_ONCE(ctx->rings->sq_dropped) + 1); |
| 2272 | return false; |
| 2273 | } |
| 2274 | } |
| 2275 | |
| 2276 | /* |
| 2277 | * The cached sq head (or cq tail) serves two purposes: |
| 2278 | * |
| 2279 | * 1) allows us to batch the cost of updating the user visible |
| 2280 | * head updates. |
| 2281 | * 2) allows the kernel side to track the head on its own, even |
| 2282 | * though the application is the one updating it. |
| 2283 | */ |
| 2284 | |
| 2285 | /* double index for 128-byte SQEs, twice as long */ |
| 2286 | if (ctx->flags & IORING_SETUP_SQE128) |
| 2287 | head <<= 1; |
| 2288 | *sqe = &ctx->sq_sqes[head]; |
| 2289 | return true; |
| 2290 | } |
| 2291 | |
| 2292 | int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) |
| 2293 | __must_hold(&ctx->uring_lock) |
| 2294 | { |
| 2295 | unsigned int entries = io_sqring_entries(ctx); |
| 2296 | unsigned int left; |
| 2297 | int ret; |
| 2298 | |
| 2299 | if (unlikely(!entries)) |
| 2300 | return 0; |
| 2301 | /* make sure SQ entry isn't read before tail */ |
| 2302 | ret = left = min(nr, entries); |
| 2303 | io_get_task_refs(left); |
| 2304 | io_submit_state_start(&ctx->submit_state, left); |
| 2305 | |
| 2306 | do { |
| 2307 | const struct io_uring_sqe *sqe; |
| 2308 | struct io_kiocb *req; |
| 2309 | |
| 2310 | if (unlikely(!io_alloc_req(ctx, &req))) |
| 2311 | break; |
| 2312 | if (unlikely(!io_get_sqe(ctx, &sqe))) { |
| 2313 | io_req_add_to_cache(req, ctx); |
| 2314 | break; |
| 2315 | } |
| 2316 | |
| 2317 | /* |
| 2318 | * Continue submitting even for sqe failure if the |
| 2319 | * ring was setup with IORING_SETUP_SUBMIT_ALL |
| 2320 | */ |
| 2321 | if (unlikely(io_submit_sqe(ctx, req, sqe)) && |
| 2322 | !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) { |
| 2323 | left--; |
| 2324 | break; |
| 2325 | } |
| 2326 | } while (--left); |
| 2327 | |
| 2328 | if (unlikely(left)) { |
| 2329 | ret -= left; |
| 2330 | /* try again if it submitted nothing and can't allocate a req */ |
| 2331 | if (!ret && io_req_cache_empty(ctx)) |
| 2332 | ret = -EAGAIN; |
| 2333 | current->io_uring->cached_refs += left; |
| 2334 | } |
| 2335 | |
| 2336 | io_submit_state_end(ctx); |
| 2337 | /* Commit SQ ring head once we've consumed and submitted all SQEs */ |
| 2338 | io_commit_sqring(ctx); |
| 2339 | return ret; |
| 2340 | } |
| 2341 | |
| 2342 | static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, |
| 2343 | int wake_flags, void *key) |
| 2344 | { |
| 2345 | struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq); |
| 2346 | |
| 2347 | /* |
| 2348 | * Cannot safely flush overflowed CQEs from here, ensure we wake up |
| 2349 | * the task, and the next invocation will do it. |
| 2350 | */ |
| 2351 | if (io_should_wake(iowq) || io_has_work(iowq->ctx)) |
| 2352 | return autoremove_wake_function(curr, mode, wake_flags, key); |
| 2353 | return -1; |
| 2354 | } |
| 2355 | |
| 2356 | int io_run_task_work_sig(struct io_ring_ctx *ctx) |
| 2357 | { |
| 2358 | if (!llist_empty(&ctx->work_llist)) { |
| 2359 | __set_current_state(TASK_RUNNING); |
| 2360 | if (io_run_local_work(ctx, INT_MAX) > 0) |
| 2361 | return 0; |
| 2362 | } |
| 2363 | if (io_run_task_work() > 0) |
| 2364 | return 0; |
| 2365 | if (task_sigpending(current)) |
| 2366 | return -EINTR; |
| 2367 | return 0; |
| 2368 | } |
| 2369 | |
| 2370 | static bool current_pending_io(void) |
| 2371 | { |
| 2372 | struct io_uring_task *tctx = current->io_uring; |
| 2373 | |
| 2374 | if (!tctx) |
| 2375 | return false; |
| 2376 | return percpu_counter_read_positive(&tctx->inflight); |
| 2377 | } |
| 2378 | |
| 2379 | static enum hrtimer_restart io_cqring_timer_wakeup(struct hrtimer *timer) |
| 2380 | { |
| 2381 | struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t); |
| 2382 | |
| 2383 | WRITE_ONCE(iowq->hit_timeout, 1); |
| 2384 | iowq->min_timeout = 0; |
| 2385 | wake_up_process(iowq->wq.private); |
| 2386 | return HRTIMER_NORESTART; |
| 2387 | } |
| 2388 | |
| 2389 | /* |
| 2390 | * Doing min_timeout portion. If we saw any timeouts, events, or have work, |
| 2391 | * wake up. If not, and we have a normal timeout, switch to that and keep |
| 2392 | * sleeping. |
| 2393 | */ |
| 2394 | static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer) |
| 2395 | { |
| 2396 | struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t); |
| 2397 | struct io_ring_ctx *ctx = iowq->ctx; |
| 2398 | |
| 2399 | /* no general timeout, or shorter (or equal), we are done */ |
| 2400 | if (iowq->timeout == KTIME_MAX || |
| 2401 | ktime_compare(iowq->min_timeout, iowq->timeout) >= 0) |
| 2402 | goto out_wake; |
| 2403 | /* work we may need to run, wake function will see if we need to wake */ |
| 2404 | if (io_has_work(ctx)) |
| 2405 | goto out_wake; |
| 2406 | /* got events since we started waiting, min timeout is done */ |
| 2407 | if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail)) |
| 2408 | goto out_wake; |
| 2409 | /* if we have any events and min timeout expired, we're done */ |
| 2410 | if (io_cqring_events(ctx)) |
| 2411 | goto out_wake; |
| 2412 | |
| 2413 | /* |
| 2414 | * If using deferred task_work running and application is waiting on |
| 2415 | * more than one request, ensure we reset it now where we are switching |
| 2416 | * to normal sleeps. Any request completion post min_wait should wake |
| 2417 | * the task and return. |
| 2418 | */ |
| 2419 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { |
| 2420 | atomic_set(&ctx->cq_wait_nr, 1); |
| 2421 | smp_mb(); |
| 2422 | if (!llist_empty(&ctx->work_llist)) |
| 2423 | goto out_wake; |
| 2424 | } |
| 2425 | |
| 2426 | iowq->t.function = io_cqring_timer_wakeup; |
| 2427 | hrtimer_set_expires(timer, iowq->timeout); |
| 2428 | return HRTIMER_RESTART; |
| 2429 | out_wake: |
| 2430 | return io_cqring_timer_wakeup(timer); |
| 2431 | } |
| 2432 | |
| 2433 | static int io_cqring_schedule_timeout(struct io_wait_queue *iowq, |
| 2434 | clockid_t clock_id, ktime_t start_time) |
| 2435 | { |
| 2436 | ktime_t timeout; |
| 2437 | |
| 2438 | hrtimer_init_on_stack(&iowq->t, clock_id, HRTIMER_MODE_ABS); |
| 2439 | if (iowq->min_timeout) { |
| 2440 | timeout = ktime_add_ns(iowq->min_timeout, start_time); |
| 2441 | iowq->t.function = io_cqring_min_timer_wakeup; |
| 2442 | } else { |
| 2443 | timeout = iowq->timeout; |
| 2444 | iowq->t.function = io_cqring_timer_wakeup; |
| 2445 | } |
| 2446 | |
| 2447 | hrtimer_set_expires_range_ns(&iowq->t, timeout, 0); |
| 2448 | hrtimer_start_expires(&iowq->t, HRTIMER_MODE_ABS); |
| 2449 | |
| 2450 | if (!READ_ONCE(iowq->hit_timeout)) |
| 2451 | schedule(); |
| 2452 | |
| 2453 | hrtimer_cancel(&iowq->t); |
| 2454 | destroy_hrtimer_on_stack(&iowq->t); |
| 2455 | __set_current_state(TASK_RUNNING); |
| 2456 | |
| 2457 | return READ_ONCE(iowq->hit_timeout) ? -ETIME : 0; |
| 2458 | } |
| 2459 | |
| 2460 | static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx, |
| 2461 | struct io_wait_queue *iowq, |
| 2462 | ktime_t start_time) |
| 2463 | { |
| 2464 | int ret = 0; |
| 2465 | |
| 2466 | /* |
| 2467 | * Mark us as being in io_wait if we have pending requests, so cpufreq |
| 2468 | * can take into account that the task is waiting for IO - turns out |
| 2469 | * to be important for low QD IO. |
| 2470 | */ |
| 2471 | if (current_pending_io()) |
| 2472 | current->in_iowait = 1; |
| 2473 | if (iowq->timeout != KTIME_MAX || iowq->min_timeout) |
| 2474 | ret = io_cqring_schedule_timeout(iowq, ctx->clockid, start_time); |
| 2475 | else |
| 2476 | schedule(); |
| 2477 | current->in_iowait = 0; |
| 2478 | return ret; |
| 2479 | } |
| 2480 | |
| 2481 | /* If this returns > 0, the caller should retry */ |
| 2482 | static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, |
| 2483 | struct io_wait_queue *iowq, |
| 2484 | ktime_t start_time) |
| 2485 | { |
| 2486 | if (unlikely(READ_ONCE(ctx->check_cq))) |
| 2487 | return 1; |
| 2488 | if (unlikely(!llist_empty(&ctx->work_llist))) |
| 2489 | return 1; |
| 2490 | if (unlikely(task_work_pending(current))) |
| 2491 | return 1; |
| 2492 | if (unlikely(task_sigpending(current))) |
| 2493 | return -EINTR; |
| 2494 | if (unlikely(io_should_wake(iowq))) |
| 2495 | return 0; |
| 2496 | |
| 2497 | return __io_cqring_wait_schedule(ctx, iowq, start_time); |
| 2498 | } |
| 2499 | |
| 2500 | struct ext_arg { |
| 2501 | size_t argsz; |
| 2502 | struct __kernel_timespec __user *ts; |
| 2503 | const sigset_t __user *sig; |
| 2504 | ktime_t min_time; |
| 2505 | }; |
| 2506 | |
| 2507 | /* |
| 2508 | * Wait until events become available, if we don't already have some. The |
| 2509 | * application must reap them itself, as they reside on the shared cq ring. |
| 2510 | */ |
| 2511 | static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags, |
| 2512 | struct ext_arg *ext_arg) |
| 2513 | { |
| 2514 | struct io_wait_queue iowq; |
| 2515 | struct io_rings *rings = ctx->rings; |
| 2516 | ktime_t start_time; |
| 2517 | int ret; |
| 2518 | |
| 2519 | if (!io_allowed_run_tw(ctx)) |
| 2520 | return -EEXIST; |
| 2521 | if (!llist_empty(&ctx->work_llist)) |
| 2522 | io_run_local_work(ctx, min_events); |
| 2523 | io_run_task_work(); |
| 2524 | |
| 2525 | if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))) |
| 2526 | io_cqring_do_overflow_flush(ctx); |
| 2527 | if (__io_cqring_events_user(ctx) >= min_events) |
| 2528 | return 0; |
| 2529 | |
| 2530 | init_waitqueue_func_entry(&iowq.wq, io_wake_function); |
| 2531 | iowq.wq.private = current; |
| 2532 | INIT_LIST_HEAD(&iowq.wq.entry); |
| 2533 | iowq.ctx = ctx; |
| 2534 | iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; |
| 2535 | iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail); |
| 2536 | iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); |
| 2537 | iowq.hit_timeout = 0; |
| 2538 | iowq.min_timeout = ext_arg->min_time; |
| 2539 | iowq.timeout = KTIME_MAX; |
| 2540 | start_time = io_get_time(ctx); |
| 2541 | |
| 2542 | if (ext_arg->ts) { |
| 2543 | struct timespec64 ts; |
| 2544 | |
| 2545 | if (get_timespec64(&ts, ext_arg->ts)) |
| 2546 | return -EFAULT; |
| 2547 | |
| 2548 | iowq.timeout = timespec64_to_ktime(ts); |
| 2549 | if (!(flags & IORING_ENTER_ABS_TIMER)) |
| 2550 | iowq.timeout = ktime_add(iowq.timeout, start_time); |
| 2551 | } |
| 2552 | |
| 2553 | if (ext_arg->sig) { |
| 2554 | #ifdef CONFIG_COMPAT |
| 2555 | if (in_compat_syscall()) |
| 2556 | ret = set_compat_user_sigmask((const compat_sigset_t __user *)ext_arg->sig, |
| 2557 | ext_arg->argsz); |
| 2558 | else |
| 2559 | #endif |
| 2560 | ret = set_user_sigmask(ext_arg->sig, ext_arg->argsz); |
| 2561 | |
| 2562 | if (ret) |
| 2563 | return ret; |
| 2564 | } |
| 2565 | |
| 2566 | io_napi_busy_loop(ctx, &iowq); |
| 2567 | |
| 2568 | trace_io_uring_cqring_wait(ctx, min_events); |
| 2569 | do { |
| 2570 | unsigned long check_cq; |
| 2571 | int nr_wait; |
| 2572 | |
| 2573 | /* if min timeout has been hit, don't reset wait count */ |
| 2574 | if (!iowq.hit_timeout) |
| 2575 | nr_wait = (int) iowq.cq_tail - |
| 2576 | READ_ONCE(ctx->rings->cq.tail); |
| 2577 | else |
| 2578 | nr_wait = 1; |
| 2579 | |
| 2580 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { |
| 2581 | atomic_set(&ctx->cq_wait_nr, nr_wait); |
| 2582 | set_current_state(TASK_INTERRUPTIBLE); |
| 2583 | } else { |
| 2584 | prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, |
| 2585 | TASK_INTERRUPTIBLE); |
| 2586 | } |
| 2587 | |
| 2588 | ret = io_cqring_wait_schedule(ctx, &iowq, start_time); |
| 2589 | __set_current_state(TASK_RUNNING); |
| 2590 | atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT); |
| 2591 | |
| 2592 | /* |
| 2593 | * Run task_work after scheduling and before io_should_wake(). |
| 2594 | * If we got woken because of task_work being processed, run it |
| 2595 | * now rather than let the caller do another wait loop. |
| 2596 | */ |
| 2597 | if (!llist_empty(&ctx->work_llist)) |
| 2598 | io_run_local_work(ctx, nr_wait); |
| 2599 | io_run_task_work(); |
| 2600 | |
| 2601 | /* |
| 2602 | * Non-local task_work will be run on exit to userspace, but |
| 2603 | * if we're using DEFER_TASKRUN, then we could have waited |
| 2604 | * with a timeout for a number of requests. If the timeout |
| 2605 | * hits, we could have some requests ready to process. Ensure |
| 2606 | * this break is _after_ we have run task_work, to avoid |
| 2607 | * deferring running potentially pending requests until the |
| 2608 | * next time we wait for events. |
| 2609 | */ |
| 2610 | if (ret < 0) |
| 2611 | break; |
| 2612 | |
| 2613 | check_cq = READ_ONCE(ctx->check_cq); |
| 2614 | if (unlikely(check_cq)) { |
| 2615 | /* let the caller flush overflows, retry */ |
| 2616 | if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) |
| 2617 | io_cqring_do_overflow_flush(ctx); |
| 2618 | if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) { |
| 2619 | ret = -EBADR; |
| 2620 | break; |
| 2621 | } |
| 2622 | } |
| 2623 | |
| 2624 | if (io_should_wake(&iowq)) { |
| 2625 | ret = 0; |
| 2626 | break; |
| 2627 | } |
| 2628 | cond_resched(); |
| 2629 | } while (1); |
| 2630 | |
| 2631 | if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) |
| 2632 | finish_wait(&ctx->cq_wait, &iowq.wq); |
| 2633 | restore_saved_sigmask_unless(ret == -EINTR); |
| 2634 | |
| 2635 | return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; |
| 2636 | } |
| 2637 | |
| 2638 | static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr, |
| 2639 | size_t size) |
| 2640 | { |
| 2641 | return __io_uaddr_map(&ctx->ring_pages, &ctx->n_ring_pages, uaddr, |
| 2642 | size); |
| 2643 | } |
| 2644 | |
| 2645 | static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr, |
| 2646 | size_t size) |
| 2647 | { |
| 2648 | return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr, |
| 2649 | size); |
| 2650 | } |
| 2651 | |
| 2652 | static void io_rings_free(struct io_ring_ctx *ctx) |
| 2653 | { |
| 2654 | if (!(ctx->flags & IORING_SETUP_NO_MMAP)) { |
| 2655 | io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages, |
| 2656 | true); |
| 2657 | io_pages_unmap(ctx->sq_sqes, &ctx->sqe_pages, &ctx->n_sqe_pages, |
| 2658 | true); |
| 2659 | } else { |
| 2660 | io_pages_free(&ctx->ring_pages, ctx->n_ring_pages); |
| 2661 | ctx->n_ring_pages = 0; |
| 2662 | io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages); |
| 2663 | ctx->n_sqe_pages = 0; |
| 2664 | vunmap(ctx->rings); |
| 2665 | vunmap(ctx->sq_sqes); |
| 2666 | } |
| 2667 | |
| 2668 | ctx->rings = NULL; |
| 2669 | ctx->sq_sqes = NULL; |
| 2670 | } |
| 2671 | |
| 2672 | static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries, |
| 2673 | unsigned int cq_entries, size_t *sq_offset) |
| 2674 | { |
| 2675 | struct io_rings *rings; |
| 2676 | size_t off, sq_array_size; |
| 2677 | |
| 2678 | off = struct_size(rings, cqes, cq_entries); |
| 2679 | if (off == SIZE_MAX) |
| 2680 | return SIZE_MAX; |
| 2681 | if (ctx->flags & IORING_SETUP_CQE32) { |
| 2682 | if (check_shl_overflow(off, 1, &off)) |
| 2683 | return SIZE_MAX; |
| 2684 | } |
| 2685 | |
| 2686 | #ifdef CONFIG_SMP |
| 2687 | off = ALIGN(off, SMP_CACHE_BYTES); |
| 2688 | if (off == 0) |
| 2689 | return SIZE_MAX; |
| 2690 | #endif |
| 2691 | |
| 2692 | if (ctx->flags & IORING_SETUP_NO_SQARRAY) { |
| 2693 | *sq_offset = SIZE_MAX; |
| 2694 | return off; |
| 2695 | } |
| 2696 | |
| 2697 | *sq_offset = off; |
| 2698 | |
| 2699 | sq_array_size = array_size(sizeof(u32), sq_entries); |
| 2700 | if (sq_array_size == SIZE_MAX) |
| 2701 | return SIZE_MAX; |
| 2702 | |
| 2703 | if (check_add_overflow(off, sq_array_size, &off)) |
| 2704 | return SIZE_MAX; |
| 2705 | |
| 2706 | return off; |
| 2707 | } |
| 2708 | |
| 2709 | static void io_req_caches_free(struct io_ring_ctx *ctx) |
| 2710 | { |
| 2711 | struct io_kiocb *req; |
| 2712 | int nr = 0; |
| 2713 | |
| 2714 | mutex_lock(&ctx->uring_lock); |
| 2715 | |
| 2716 | while (!io_req_cache_empty(ctx)) { |
| 2717 | req = io_extract_req(ctx); |
| 2718 | kmem_cache_free(req_cachep, req); |
| 2719 | nr++; |
| 2720 | } |
| 2721 | if (nr) |
| 2722 | percpu_ref_put_many(&ctx->refs, nr); |
| 2723 | mutex_unlock(&ctx->uring_lock); |
| 2724 | } |
| 2725 | |
| 2726 | static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) |
| 2727 | { |
| 2728 | io_sq_thread_finish(ctx); |
| 2729 | /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ |
| 2730 | if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list))) |
| 2731 | return; |
| 2732 | |
| 2733 | mutex_lock(&ctx->uring_lock); |
| 2734 | if (ctx->buf_data) |
| 2735 | __io_sqe_buffers_unregister(ctx); |
| 2736 | if (ctx->file_data) |
| 2737 | __io_sqe_files_unregister(ctx); |
| 2738 | io_cqring_overflow_kill(ctx); |
| 2739 | io_eventfd_unregister(ctx); |
| 2740 | io_alloc_cache_free(&ctx->apoll_cache, kfree); |
| 2741 | io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); |
| 2742 | io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); |
| 2743 | io_alloc_cache_free(&ctx->uring_cache, kfree); |
| 2744 | io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free); |
| 2745 | io_futex_cache_free(ctx); |
| 2746 | io_destroy_buffers(ctx); |
| 2747 | mutex_unlock(&ctx->uring_lock); |
| 2748 | if (ctx->sq_creds) |
| 2749 | put_cred(ctx->sq_creds); |
| 2750 | if (ctx->submitter_task) |
| 2751 | put_task_struct(ctx->submitter_task); |
| 2752 | |
| 2753 | /* there are no registered resources left, nobody uses it */ |
| 2754 | if (ctx->rsrc_node) |
| 2755 | io_rsrc_node_destroy(ctx, ctx->rsrc_node); |
| 2756 | |
| 2757 | WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); |
| 2758 | WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); |
| 2759 | |
| 2760 | io_alloc_cache_free(&ctx->rsrc_node_cache, kfree); |
| 2761 | if (ctx->mm_account) { |
| 2762 | mmdrop(ctx->mm_account); |
| 2763 | ctx->mm_account = NULL; |
| 2764 | } |
| 2765 | io_rings_free(ctx); |
| 2766 | |
| 2767 | percpu_ref_exit(&ctx->refs); |
| 2768 | free_uid(ctx->user); |
| 2769 | io_req_caches_free(ctx); |
| 2770 | if (ctx->hash_map) |
| 2771 | io_wq_put_hash(ctx->hash_map); |
| 2772 | io_napi_free(ctx); |
| 2773 | kfree(ctx->cancel_table.hbs); |
| 2774 | kfree(ctx->cancel_table_locked.hbs); |
| 2775 | xa_destroy(&ctx->io_bl_xa); |
| 2776 | kfree(ctx); |
| 2777 | } |
| 2778 | |
| 2779 | static __cold void io_activate_pollwq_cb(struct callback_head *cb) |
| 2780 | { |
| 2781 | struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx, |
| 2782 | poll_wq_task_work); |
| 2783 | |
| 2784 | mutex_lock(&ctx->uring_lock); |
| 2785 | ctx->poll_activated = true; |
| 2786 | mutex_unlock(&ctx->uring_lock); |
| 2787 | |
| 2788 | /* |
| 2789 | * Wake ups for some events between start of polling and activation |
| 2790 | * might've been lost due to loose synchronisation. |
| 2791 | */ |
| 2792 | wake_up_all(&ctx->poll_wq); |
| 2793 | percpu_ref_put(&ctx->refs); |
| 2794 | } |
| 2795 | |
| 2796 | __cold void io_activate_pollwq(struct io_ring_ctx *ctx) |
| 2797 | { |
| 2798 | spin_lock(&ctx->completion_lock); |
| 2799 | /* already activated or in progress */ |
| 2800 | if (ctx->poll_activated || ctx->poll_wq_task_work.func) |
| 2801 | goto out; |
| 2802 | if (WARN_ON_ONCE(!ctx->task_complete)) |
| 2803 | goto out; |
| 2804 | if (!ctx->submitter_task) |
| 2805 | goto out; |
| 2806 | /* |
| 2807 | * with ->submitter_task only the submitter task completes requests, we |
| 2808 | * only need to sync with it, which is done by injecting a tw |
| 2809 | */ |
| 2810 | init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb); |
| 2811 | percpu_ref_get(&ctx->refs); |
| 2812 | if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL)) |
| 2813 | percpu_ref_put(&ctx->refs); |
| 2814 | out: |
| 2815 | spin_unlock(&ctx->completion_lock); |
| 2816 | } |
| 2817 | |
| 2818 | static __poll_t io_uring_poll(struct file *file, poll_table *wait) |
| 2819 | { |
| 2820 | struct io_ring_ctx *ctx = file->private_data; |
| 2821 | __poll_t mask = 0; |
| 2822 | |
| 2823 | if (unlikely(!ctx->poll_activated)) |
| 2824 | io_activate_pollwq(ctx); |
| 2825 | |
| 2826 | poll_wait(file, &ctx->poll_wq, wait); |
| 2827 | /* |
| 2828 | * synchronizes with barrier from wq_has_sleeper call in |
| 2829 | * io_commit_cqring |
| 2830 | */ |
| 2831 | smp_rmb(); |
| 2832 | if (!io_sqring_full(ctx)) |
| 2833 | mask |= EPOLLOUT | EPOLLWRNORM; |
| 2834 | |
| 2835 | /* |
| 2836 | * Don't flush cqring overflow list here, just do a simple check. |
| 2837 | * Otherwise there could possible be ABBA deadlock: |
| 2838 | * CPU0 CPU1 |
| 2839 | * ---- ---- |
| 2840 | * lock(&ctx->uring_lock); |
| 2841 | * lock(&ep->mtx); |
| 2842 | * lock(&ctx->uring_lock); |
| 2843 | * lock(&ep->mtx); |
| 2844 | * |
| 2845 | * Users may get EPOLLIN meanwhile seeing nothing in cqring, this |
| 2846 | * pushes them to do the flush. |
| 2847 | */ |
| 2848 | |
| 2849 | if (__io_cqring_events_user(ctx) || io_has_work(ctx)) |
| 2850 | mask |= EPOLLIN | EPOLLRDNORM; |
| 2851 | |
| 2852 | return mask; |
| 2853 | } |
| 2854 | |
| 2855 | struct io_tctx_exit { |
| 2856 | struct callback_head task_work; |
| 2857 | struct completion completion; |
| 2858 | struct io_ring_ctx *ctx; |
| 2859 | }; |
| 2860 | |
| 2861 | static __cold void io_tctx_exit_cb(struct callback_head *cb) |
| 2862 | { |
| 2863 | struct io_uring_task *tctx = current->io_uring; |
| 2864 | struct io_tctx_exit *work; |
| 2865 | |
| 2866 | work = container_of(cb, struct io_tctx_exit, task_work); |
| 2867 | /* |
| 2868 | * When @in_cancel, we're in cancellation and it's racy to remove the |
| 2869 | * node. It'll be removed by the end of cancellation, just ignore it. |
| 2870 | * tctx can be NULL if the queueing of this task_work raced with |
| 2871 | * work cancelation off the exec path. |
| 2872 | */ |
| 2873 | if (tctx && !atomic_read(&tctx->in_cancel)) |
| 2874 | io_uring_del_tctx_node((unsigned long)work->ctx); |
| 2875 | complete(&work->completion); |
| 2876 | } |
| 2877 | |
| 2878 | static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) |
| 2879 | { |
| 2880 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
| 2881 | |
| 2882 | return req->ctx == data; |
| 2883 | } |
| 2884 | |
| 2885 | static __cold void io_ring_exit_work(struct work_struct *work) |
| 2886 | { |
| 2887 | struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); |
| 2888 | unsigned long timeout = jiffies + HZ * 60 * 5; |
| 2889 | unsigned long interval = HZ / 20; |
| 2890 | struct io_tctx_exit exit; |
| 2891 | struct io_tctx_node *node; |
| 2892 | int ret; |
| 2893 | |
| 2894 | /* |
| 2895 | * If we're doing polled IO and end up having requests being |
| 2896 | * submitted async (out-of-line), then completions can come in while |
| 2897 | * we're waiting for refs to drop. We need to reap these manually, |
| 2898 | * as nobody else will be looking for them. |
| 2899 | */ |
| 2900 | do { |
| 2901 | if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { |
| 2902 | mutex_lock(&ctx->uring_lock); |
| 2903 | io_cqring_overflow_kill(ctx); |
| 2904 | mutex_unlock(&ctx->uring_lock); |
| 2905 | } |
| 2906 | |
| 2907 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) |
| 2908 | io_move_task_work_from_local(ctx); |
| 2909 | |
| 2910 | while (io_uring_try_cancel_requests(ctx, NULL, true)) |
| 2911 | cond_resched(); |
| 2912 | |
| 2913 | if (ctx->sq_data) { |
| 2914 | struct io_sq_data *sqd = ctx->sq_data; |
| 2915 | struct task_struct *tsk; |
| 2916 | |
| 2917 | io_sq_thread_park(sqd); |
| 2918 | tsk = sqd->thread; |
| 2919 | if (tsk && tsk->io_uring && tsk->io_uring->io_wq) |
| 2920 | io_wq_cancel_cb(tsk->io_uring->io_wq, |
| 2921 | io_cancel_ctx_cb, ctx, true); |
| 2922 | io_sq_thread_unpark(sqd); |
| 2923 | } |
| 2924 | |
| 2925 | io_req_caches_free(ctx); |
| 2926 | |
| 2927 | if (WARN_ON_ONCE(time_after(jiffies, timeout))) { |
| 2928 | /* there is little hope left, don't run it too often */ |
| 2929 | interval = HZ * 60; |
| 2930 | } |
| 2931 | /* |
| 2932 | * This is really an uninterruptible wait, as it has to be |
| 2933 | * complete. But it's also run from a kworker, which doesn't |
| 2934 | * take signals, so it's fine to make it interruptible. This |
| 2935 | * avoids scenarios where we knowingly can wait much longer |
| 2936 | * on completions, for example if someone does a SIGSTOP on |
| 2937 | * a task that needs to finish task_work to make this loop |
| 2938 | * complete. That's a synthetic situation that should not |
| 2939 | * cause a stuck task backtrace, and hence a potential panic |
| 2940 | * on stuck tasks if that is enabled. |
| 2941 | */ |
| 2942 | } while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval)); |
| 2943 | |
| 2944 | init_completion(&exit.completion); |
| 2945 | init_task_work(&exit.task_work, io_tctx_exit_cb); |
| 2946 | exit.ctx = ctx; |
| 2947 | |
| 2948 | mutex_lock(&ctx->uring_lock); |
| 2949 | while (!list_empty(&ctx->tctx_list)) { |
| 2950 | WARN_ON_ONCE(time_after(jiffies, timeout)); |
| 2951 | |
| 2952 | node = list_first_entry(&ctx->tctx_list, struct io_tctx_node, |
| 2953 | ctx_node); |
| 2954 | /* don't spin on a single task if cancellation failed */ |
| 2955 | list_rotate_left(&ctx->tctx_list); |
| 2956 | ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL); |
| 2957 | if (WARN_ON_ONCE(ret)) |
| 2958 | continue; |
| 2959 | |
| 2960 | mutex_unlock(&ctx->uring_lock); |
| 2961 | /* |
| 2962 | * See comment above for |
| 2963 | * wait_for_completion_interruptible_timeout() on why this |
| 2964 | * wait is marked as interruptible. |
| 2965 | */ |
| 2966 | wait_for_completion_interruptible(&exit.completion); |
| 2967 | mutex_lock(&ctx->uring_lock); |
| 2968 | } |
| 2969 | mutex_unlock(&ctx->uring_lock); |
| 2970 | spin_lock(&ctx->completion_lock); |
| 2971 | spin_unlock(&ctx->completion_lock); |
| 2972 | |
| 2973 | /* pairs with RCU read section in io_req_local_work_add() */ |
| 2974 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) |
| 2975 | synchronize_rcu(); |
| 2976 | |
| 2977 | io_ring_ctx_free(ctx); |
| 2978 | } |
| 2979 | |
| 2980 | static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) |
| 2981 | { |
| 2982 | unsigned long index; |
| 2983 | struct creds *creds; |
| 2984 | |
| 2985 | mutex_lock(&ctx->uring_lock); |
| 2986 | percpu_ref_kill(&ctx->refs); |
| 2987 | xa_for_each(&ctx->personalities, index, creds) |
| 2988 | io_unregister_personality(ctx, index); |
| 2989 | mutex_unlock(&ctx->uring_lock); |
| 2990 | |
| 2991 | flush_delayed_work(&ctx->fallback_work); |
| 2992 | |
| 2993 | INIT_WORK(&ctx->exit_work, io_ring_exit_work); |
| 2994 | /* |
| 2995 | * Use system_unbound_wq to avoid spawning tons of event kworkers |
| 2996 | * if we're exiting a ton of rings at the same time. It just adds |
| 2997 | * noise and overhead, there's no discernable change in runtime |
| 2998 | * over using system_wq. |
| 2999 | */ |
| 3000 | queue_work(iou_wq, &ctx->exit_work); |
| 3001 | } |
| 3002 | |
| 3003 | static int io_uring_release(struct inode *inode, struct file *file) |
| 3004 | { |
| 3005 | struct io_ring_ctx *ctx = file->private_data; |
| 3006 | |
| 3007 | file->private_data = NULL; |
| 3008 | io_ring_ctx_wait_and_kill(ctx); |
| 3009 | return 0; |
| 3010 | } |
| 3011 | |
| 3012 | struct io_task_cancel { |
| 3013 | struct task_struct *task; |
| 3014 | bool all; |
| 3015 | }; |
| 3016 | |
| 3017 | static bool io_cancel_task_cb(struct io_wq_work *work, void *data) |
| 3018 | { |
| 3019 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
| 3020 | struct io_task_cancel *cancel = data; |
| 3021 | |
| 3022 | return io_match_task_safe(req, cancel->task, cancel->all); |
| 3023 | } |
| 3024 | |
| 3025 | static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, |
| 3026 | struct task_struct *task, |
| 3027 | bool cancel_all) |
| 3028 | { |
| 3029 | struct io_defer_entry *de; |
| 3030 | LIST_HEAD(list); |
| 3031 | |
| 3032 | spin_lock(&ctx->completion_lock); |
| 3033 | list_for_each_entry_reverse(de, &ctx->defer_list, list) { |
| 3034 | if (io_match_task_safe(de->req, task, cancel_all)) { |
| 3035 | list_cut_position(&list, &ctx->defer_list, &de->list); |
| 3036 | break; |
| 3037 | } |
| 3038 | } |
| 3039 | spin_unlock(&ctx->completion_lock); |
| 3040 | if (list_empty(&list)) |
| 3041 | return false; |
| 3042 | |
| 3043 | while (!list_empty(&list)) { |
| 3044 | de = list_first_entry(&list, struct io_defer_entry, list); |
| 3045 | list_del_init(&de->list); |
| 3046 | io_req_task_queue_fail(de->req, -ECANCELED); |
| 3047 | kfree(de); |
| 3048 | } |
| 3049 | return true; |
| 3050 | } |
| 3051 | |
| 3052 | static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) |
| 3053 | { |
| 3054 | struct io_tctx_node *node; |
| 3055 | enum io_wq_cancel cret; |
| 3056 | bool ret = false; |
| 3057 | |
| 3058 | mutex_lock(&ctx->uring_lock); |
| 3059 | list_for_each_entry(node, &ctx->tctx_list, ctx_node) { |
| 3060 | struct io_uring_task *tctx = node->task->io_uring; |
| 3061 | |
| 3062 | /* |
| 3063 | * io_wq will stay alive while we hold uring_lock, because it's |
| 3064 | * killed after ctx nodes, which requires to take the lock. |
| 3065 | */ |
| 3066 | if (!tctx || !tctx->io_wq) |
| 3067 | continue; |
| 3068 | cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); |
| 3069 | ret |= (cret != IO_WQ_CANCEL_NOTFOUND); |
| 3070 | } |
| 3071 | mutex_unlock(&ctx->uring_lock); |
| 3072 | |
| 3073 | return ret; |
| 3074 | } |
| 3075 | |
| 3076 | static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, |
| 3077 | struct task_struct *task, |
| 3078 | bool cancel_all) |
| 3079 | { |
| 3080 | struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; |
| 3081 | struct io_uring_task *tctx = task ? task->io_uring : NULL; |
| 3082 | enum io_wq_cancel cret; |
| 3083 | bool ret = false; |
| 3084 | |
| 3085 | /* set it so io_req_local_work_add() would wake us up */ |
| 3086 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { |
| 3087 | atomic_set(&ctx->cq_wait_nr, 1); |
| 3088 | smp_mb(); |
| 3089 | } |
| 3090 | |
| 3091 | /* failed during ring init, it couldn't have issued any requests */ |
| 3092 | if (!ctx->rings) |
| 3093 | return false; |
| 3094 | |
| 3095 | if (!task) { |
| 3096 | ret |= io_uring_try_cancel_iowq(ctx); |
| 3097 | } else if (tctx && tctx->io_wq) { |
| 3098 | /* |
| 3099 | * Cancels requests of all rings, not only @ctx, but |
| 3100 | * it's fine as the task is in exit/exec. |
| 3101 | */ |
| 3102 | cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, |
| 3103 | &cancel, true); |
| 3104 | ret |= (cret != IO_WQ_CANCEL_NOTFOUND); |
| 3105 | } |
| 3106 | |
| 3107 | /* SQPOLL thread does its own polling */ |
| 3108 | if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || |
| 3109 | (ctx->sq_data && ctx->sq_data->thread == current)) { |
| 3110 | while (!wq_list_empty(&ctx->iopoll_list)) { |
| 3111 | io_iopoll_try_reap_events(ctx); |
| 3112 | ret = true; |
| 3113 | cond_resched(); |
| 3114 | } |
| 3115 | } |
| 3116 | |
| 3117 | if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && |
| 3118 | io_allowed_defer_tw_run(ctx)) |
| 3119 | ret |= io_run_local_work(ctx, INT_MAX) > 0; |
| 3120 | ret |= io_cancel_defer_files(ctx, task, cancel_all); |
| 3121 | mutex_lock(&ctx->uring_lock); |
| 3122 | ret |= io_poll_remove_all(ctx, task, cancel_all); |
| 3123 | ret |= io_waitid_remove_all(ctx, task, cancel_all); |
| 3124 | ret |= io_futex_remove_all(ctx, task, cancel_all); |
| 3125 | ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all); |
| 3126 | mutex_unlock(&ctx->uring_lock); |
| 3127 | ret |= io_kill_timeouts(ctx, task, cancel_all); |
| 3128 | if (task) |
| 3129 | ret |= io_run_task_work() > 0; |
| 3130 | else |
| 3131 | ret |= flush_delayed_work(&ctx->fallback_work); |
| 3132 | return ret; |
| 3133 | } |
| 3134 | |
| 3135 | static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) |
| 3136 | { |
| 3137 | if (tracked) |
| 3138 | return atomic_read(&tctx->inflight_tracked); |
| 3139 | return percpu_counter_sum(&tctx->inflight); |
| 3140 | } |
| 3141 | |
| 3142 | /* |
| 3143 | * Find any io_uring ctx that this task has registered or done IO on, and cancel |
| 3144 | * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation. |
| 3145 | */ |
| 3146 | __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) |
| 3147 | { |
| 3148 | struct io_uring_task *tctx = current->io_uring; |
| 3149 | struct io_ring_ctx *ctx; |
| 3150 | struct io_tctx_node *node; |
| 3151 | unsigned long index; |
| 3152 | s64 inflight; |
| 3153 | DEFINE_WAIT(wait); |
| 3154 | |
| 3155 | WARN_ON_ONCE(sqd && sqd->thread != current); |
| 3156 | |
| 3157 | if (!current->io_uring) |
| 3158 | return; |
| 3159 | if (tctx->io_wq) |
| 3160 | io_wq_exit_start(tctx->io_wq); |
| 3161 | |
| 3162 | atomic_inc(&tctx->in_cancel); |
| 3163 | do { |
| 3164 | bool loop = false; |
| 3165 | |
| 3166 | io_uring_drop_tctx_refs(current); |
| 3167 | if (!tctx_inflight(tctx, !cancel_all)) |
| 3168 | break; |
| 3169 | |
| 3170 | /* read completions before cancelations */ |
| 3171 | inflight = tctx_inflight(tctx, false); |
| 3172 | if (!inflight) |
| 3173 | break; |
| 3174 | |
| 3175 | if (!sqd) { |
| 3176 | xa_for_each(&tctx->xa, index, node) { |
| 3177 | /* sqpoll task will cancel all its requests */ |
| 3178 | if (node->ctx->sq_data) |
| 3179 | continue; |
| 3180 | loop |= io_uring_try_cancel_requests(node->ctx, |
| 3181 | current, cancel_all); |
| 3182 | } |
| 3183 | } else { |
| 3184 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) |
| 3185 | loop |= io_uring_try_cancel_requests(ctx, |
| 3186 | current, |
| 3187 | cancel_all); |
| 3188 | } |
| 3189 | |
| 3190 | if (loop) { |
| 3191 | cond_resched(); |
| 3192 | continue; |
| 3193 | } |
| 3194 | |
| 3195 | prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); |
| 3196 | io_run_task_work(); |
| 3197 | io_uring_drop_tctx_refs(current); |
| 3198 | xa_for_each(&tctx->xa, index, node) { |
| 3199 | if (!llist_empty(&node->ctx->work_llist)) { |
| 3200 | WARN_ON_ONCE(node->ctx->submitter_task && |
| 3201 | node->ctx->submitter_task != current); |
| 3202 | goto end_wait; |
| 3203 | } |
| 3204 | } |
| 3205 | /* |
| 3206 | * If we've seen completions, retry without waiting. This |
| 3207 | * avoids a race where a completion comes in before we did |
| 3208 | * prepare_to_wait(). |
| 3209 | */ |
| 3210 | if (inflight == tctx_inflight(tctx, !cancel_all)) |
| 3211 | schedule(); |
| 3212 | end_wait: |
| 3213 | finish_wait(&tctx->wait, &wait); |
| 3214 | } while (1); |
| 3215 | |
| 3216 | io_uring_clean_tctx(tctx); |
| 3217 | if (cancel_all) { |
| 3218 | /* |
| 3219 | * We shouldn't run task_works after cancel, so just leave |
| 3220 | * ->in_cancel set for normal exit. |
| 3221 | */ |
| 3222 | atomic_dec(&tctx->in_cancel); |
| 3223 | /* for exec all current's requests should be gone, kill tctx */ |
| 3224 | __io_uring_free(current); |
| 3225 | } |
| 3226 | } |
| 3227 | |
| 3228 | void __io_uring_cancel(bool cancel_all) |
| 3229 | { |
| 3230 | io_uring_cancel_generic(cancel_all, NULL); |
| 3231 | } |
| 3232 | |
| 3233 | static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz) |
| 3234 | { |
| 3235 | if (flags & IORING_ENTER_EXT_ARG) { |
| 3236 | struct io_uring_getevents_arg arg; |
| 3237 | |
| 3238 | if (argsz != sizeof(arg)) |
| 3239 | return -EINVAL; |
| 3240 | if (copy_from_user(&arg, argp, sizeof(arg))) |
| 3241 | return -EFAULT; |
| 3242 | } |
| 3243 | return 0; |
| 3244 | } |
| 3245 | |
| 3246 | static int io_get_ext_arg(unsigned flags, const void __user *argp, |
| 3247 | struct ext_arg *ext_arg) |
| 3248 | { |
| 3249 | struct io_uring_getevents_arg arg; |
| 3250 | |
| 3251 | /* |
| 3252 | * If EXT_ARG isn't set, then we have no timespec and the argp pointer |
| 3253 | * is just a pointer to the sigset_t. |
| 3254 | */ |
| 3255 | if (!(flags & IORING_ENTER_EXT_ARG)) { |
| 3256 | ext_arg->sig = (const sigset_t __user *) argp; |
| 3257 | ext_arg->ts = NULL; |
| 3258 | return 0; |
| 3259 | } |
| 3260 | |
| 3261 | /* |
| 3262 | * EXT_ARG is set - ensure we agree on the size of it and copy in our |
| 3263 | * timespec and sigset_t pointers if good. |
| 3264 | */ |
| 3265 | if (ext_arg->argsz != sizeof(arg)) |
| 3266 | return -EINVAL; |
| 3267 | if (copy_from_user(&arg, argp, sizeof(arg))) |
| 3268 | return -EFAULT; |
| 3269 | ext_arg->min_time = arg.min_wait_usec * NSEC_PER_USEC; |
| 3270 | ext_arg->sig = u64_to_user_ptr(arg.sigmask); |
| 3271 | ext_arg->argsz = arg.sigmask_sz; |
| 3272 | ext_arg->ts = u64_to_user_ptr(arg.ts); |
| 3273 | return 0; |
| 3274 | } |
| 3275 | |
| 3276 | SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, |
| 3277 | u32, min_complete, u32, flags, const void __user *, argp, |
| 3278 | size_t, argsz) |
| 3279 | { |
| 3280 | struct io_ring_ctx *ctx; |
| 3281 | struct file *file; |
| 3282 | long ret; |
| 3283 | |
| 3284 | if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | |
| 3285 | IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | |
| 3286 | IORING_ENTER_REGISTERED_RING | |
| 3287 | IORING_ENTER_ABS_TIMER))) |
| 3288 | return -EINVAL; |
| 3289 | |
| 3290 | /* |
| 3291 | * Ring fd has been registered via IORING_REGISTER_RING_FDS, we |
| 3292 | * need only dereference our task private array to find it. |
| 3293 | */ |
| 3294 | if (flags & IORING_ENTER_REGISTERED_RING) { |
| 3295 | struct io_uring_task *tctx = current->io_uring; |
| 3296 | |
| 3297 | if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) |
| 3298 | return -EINVAL; |
| 3299 | fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); |
| 3300 | file = tctx->registered_rings[fd]; |
| 3301 | if (unlikely(!file)) |
| 3302 | return -EBADF; |
| 3303 | } else { |
| 3304 | file = fget(fd); |
| 3305 | if (unlikely(!file)) |
| 3306 | return -EBADF; |
| 3307 | ret = -EOPNOTSUPP; |
| 3308 | if (unlikely(!io_is_uring_fops(file))) |
| 3309 | goto out; |
| 3310 | } |
| 3311 | |
| 3312 | ctx = file->private_data; |
| 3313 | ret = -EBADFD; |
| 3314 | if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED)) |
| 3315 | goto out; |
| 3316 | |
| 3317 | /* |
| 3318 | * For SQ polling, the thread will do all submissions and completions. |
| 3319 | * Just return the requested submit count, and wake the thread if |
| 3320 | * we were asked to. |
| 3321 | */ |
| 3322 | ret = 0; |
| 3323 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
| 3324 | if (unlikely(ctx->sq_data->thread == NULL)) { |
| 3325 | ret = -EOWNERDEAD; |
| 3326 | goto out; |
| 3327 | } |
| 3328 | if (flags & IORING_ENTER_SQ_WAKEUP) |
| 3329 | wake_up(&ctx->sq_data->wait); |
| 3330 | if (flags & IORING_ENTER_SQ_WAIT) |
| 3331 | io_sqpoll_wait_sq(ctx); |
| 3332 | |
| 3333 | ret = to_submit; |
| 3334 | } else if (to_submit) { |
| 3335 | ret = io_uring_add_tctx_node(ctx); |
| 3336 | if (unlikely(ret)) |
| 3337 | goto out; |
| 3338 | |
| 3339 | mutex_lock(&ctx->uring_lock); |
| 3340 | ret = io_submit_sqes(ctx, to_submit); |
| 3341 | if (ret != to_submit) { |
| 3342 | mutex_unlock(&ctx->uring_lock); |
| 3343 | goto out; |
| 3344 | } |
| 3345 | if (flags & IORING_ENTER_GETEVENTS) { |
| 3346 | if (ctx->syscall_iopoll) |
| 3347 | goto iopoll_locked; |
| 3348 | /* |
| 3349 | * Ignore errors, we'll soon call io_cqring_wait() and |
| 3350 | * it should handle ownership problems if any. |
| 3351 | */ |
| 3352 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) |
| 3353 | (void)io_run_local_work_locked(ctx, min_complete); |
| 3354 | } |
| 3355 | mutex_unlock(&ctx->uring_lock); |
| 3356 | } |
| 3357 | |
| 3358 | if (flags & IORING_ENTER_GETEVENTS) { |
| 3359 | int ret2; |
| 3360 | |
| 3361 | if (ctx->syscall_iopoll) { |
| 3362 | /* |
| 3363 | * We disallow the app entering submit/complete with |
| 3364 | * polling, but we still need to lock the ring to |
| 3365 | * prevent racing with polled issue that got punted to |
| 3366 | * a workqueue. |
| 3367 | */ |
| 3368 | mutex_lock(&ctx->uring_lock); |
| 3369 | iopoll_locked: |
| 3370 | ret2 = io_validate_ext_arg(flags, argp, argsz); |
| 3371 | if (likely(!ret2)) { |
| 3372 | min_complete = min(min_complete, |
| 3373 | ctx->cq_entries); |
| 3374 | ret2 = io_iopoll_check(ctx, min_complete); |
| 3375 | } |
| 3376 | mutex_unlock(&ctx->uring_lock); |
| 3377 | } else { |
| 3378 | struct ext_arg ext_arg = { .argsz = argsz }; |
| 3379 | |
| 3380 | ret2 = io_get_ext_arg(flags, argp, &ext_arg); |
| 3381 | if (likely(!ret2)) { |
| 3382 | min_complete = min(min_complete, |
| 3383 | ctx->cq_entries); |
| 3384 | ret2 = io_cqring_wait(ctx, min_complete, flags, |
| 3385 | &ext_arg); |
| 3386 | } |
| 3387 | } |
| 3388 | |
| 3389 | if (!ret) { |
| 3390 | ret = ret2; |
| 3391 | |
| 3392 | /* |
| 3393 | * EBADR indicates that one or more CQE were dropped. |
| 3394 | * Once the user has been informed we can clear the bit |
| 3395 | * as they are obviously ok with those drops. |
| 3396 | */ |
| 3397 | if (unlikely(ret2 == -EBADR)) |
| 3398 | clear_bit(IO_CHECK_CQ_DROPPED_BIT, |
| 3399 | &ctx->check_cq); |
| 3400 | } |
| 3401 | } |
| 3402 | out: |
| 3403 | if (!(flags & IORING_ENTER_REGISTERED_RING)) |
| 3404 | fput(file); |
| 3405 | return ret; |
| 3406 | } |
| 3407 | |
| 3408 | static const struct file_operations io_uring_fops = { |
| 3409 | .release = io_uring_release, |
| 3410 | .mmap = io_uring_mmap, |
| 3411 | .get_unmapped_area = io_uring_get_unmapped_area, |
| 3412 | #ifndef CONFIG_MMU |
| 3413 | .mmap_capabilities = io_uring_nommu_mmap_capabilities, |
| 3414 | #endif |
| 3415 | .poll = io_uring_poll, |
| 3416 | #ifdef CONFIG_PROC_FS |
| 3417 | .show_fdinfo = io_uring_show_fdinfo, |
| 3418 | #endif |
| 3419 | }; |
| 3420 | |
| 3421 | bool io_is_uring_fops(struct file *file) |
| 3422 | { |
| 3423 | return file->f_op == &io_uring_fops; |
| 3424 | } |
| 3425 | |
| 3426 | static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, |
| 3427 | struct io_uring_params *p) |
| 3428 | { |
| 3429 | struct io_rings *rings; |
| 3430 | size_t size, sq_array_offset; |
| 3431 | void *ptr; |
| 3432 | |
| 3433 | /* make sure these are sane, as we already accounted them */ |
| 3434 | ctx->sq_entries = p->sq_entries; |
| 3435 | ctx->cq_entries = p->cq_entries; |
| 3436 | |
| 3437 | size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset); |
| 3438 | if (size == SIZE_MAX) |
| 3439 | return -EOVERFLOW; |
| 3440 | |
| 3441 | if (!(ctx->flags & IORING_SETUP_NO_MMAP)) |
| 3442 | rings = io_pages_map(&ctx->ring_pages, &ctx->n_ring_pages, size); |
| 3443 | else |
| 3444 | rings = io_rings_map(ctx, p->cq_off.user_addr, size); |
| 3445 | |
| 3446 | if (IS_ERR(rings)) |
| 3447 | return PTR_ERR(rings); |
| 3448 | |
| 3449 | ctx->rings = rings; |
| 3450 | if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) |
| 3451 | ctx->sq_array = (u32 *)((char *)rings + sq_array_offset); |
| 3452 | rings->sq_ring_mask = p->sq_entries - 1; |
| 3453 | rings->cq_ring_mask = p->cq_entries - 1; |
| 3454 | rings->sq_ring_entries = p->sq_entries; |
| 3455 | rings->cq_ring_entries = p->cq_entries; |
| 3456 | |
| 3457 | if (p->flags & IORING_SETUP_SQE128) |
| 3458 | size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries); |
| 3459 | else |
| 3460 | size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); |
| 3461 | if (size == SIZE_MAX) { |
| 3462 | io_rings_free(ctx); |
| 3463 | return -EOVERFLOW; |
| 3464 | } |
| 3465 | |
| 3466 | if (!(ctx->flags & IORING_SETUP_NO_MMAP)) |
| 3467 | ptr = io_pages_map(&ctx->sqe_pages, &ctx->n_sqe_pages, size); |
| 3468 | else |
| 3469 | ptr = io_sqes_map(ctx, p->sq_off.user_addr, size); |
| 3470 | |
| 3471 | if (IS_ERR(ptr)) { |
| 3472 | io_rings_free(ctx); |
| 3473 | return PTR_ERR(ptr); |
| 3474 | } |
| 3475 | |
| 3476 | ctx->sq_sqes = ptr; |
| 3477 | return 0; |
| 3478 | } |
| 3479 | |
| 3480 | static int io_uring_install_fd(struct file *file) |
| 3481 | { |
| 3482 | int fd; |
| 3483 | |
| 3484 | fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); |
| 3485 | if (fd < 0) |
| 3486 | return fd; |
| 3487 | fd_install(fd, file); |
| 3488 | return fd; |
| 3489 | } |
| 3490 | |
| 3491 | /* |
| 3492 | * Allocate an anonymous fd, this is what constitutes the application |
| 3493 | * visible backing of an io_uring instance. The application mmaps this |
| 3494 | * fd to gain access to the SQ/CQ ring details. |
| 3495 | */ |
| 3496 | static struct file *io_uring_get_file(struct io_ring_ctx *ctx) |
| 3497 | { |
| 3498 | /* Create a new inode so that the LSM can block the creation. */ |
| 3499 | return anon_inode_create_getfile("[io_uring]", &io_uring_fops, ctx, |
| 3500 | O_RDWR | O_CLOEXEC, NULL); |
| 3501 | } |
| 3502 | |
| 3503 | static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, |
| 3504 | struct io_uring_params __user *params) |
| 3505 | { |
| 3506 | struct io_ring_ctx *ctx; |
| 3507 | struct io_uring_task *tctx; |
| 3508 | struct file *file; |
| 3509 | int ret; |
| 3510 | |
| 3511 | if (!entries) |
| 3512 | return -EINVAL; |
| 3513 | if (entries > IORING_MAX_ENTRIES) { |
| 3514 | if (!(p->flags & IORING_SETUP_CLAMP)) |
| 3515 | return -EINVAL; |
| 3516 | entries = IORING_MAX_ENTRIES; |
| 3517 | } |
| 3518 | |
| 3519 | if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY) |
| 3520 | && !(p->flags & IORING_SETUP_NO_MMAP)) |
| 3521 | return -EINVAL; |
| 3522 | |
| 3523 | /* |
| 3524 | * Use twice as many entries for the CQ ring. It's possible for the |
| 3525 | * application to drive a higher depth than the size of the SQ ring, |
| 3526 | * since the sqes are only used at submission time. This allows for |
| 3527 | * some flexibility in overcommitting a bit. If the application has |
| 3528 | * set IORING_SETUP_CQSIZE, it will have passed in the desired number |
| 3529 | * of CQ ring entries manually. |
| 3530 | */ |
| 3531 | p->sq_entries = roundup_pow_of_two(entries); |
| 3532 | if (p->flags & IORING_SETUP_CQSIZE) { |
| 3533 | /* |
| 3534 | * If IORING_SETUP_CQSIZE is set, we do the same roundup |
| 3535 | * to a power-of-two, if it isn't already. We do NOT impose |
| 3536 | * any cq vs sq ring sizing. |
| 3537 | */ |
| 3538 | if (!p->cq_entries) |
| 3539 | return -EINVAL; |
| 3540 | if (p->cq_entries > IORING_MAX_CQ_ENTRIES) { |
| 3541 | if (!(p->flags & IORING_SETUP_CLAMP)) |
| 3542 | return -EINVAL; |
| 3543 | p->cq_entries = IORING_MAX_CQ_ENTRIES; |
| 3544 | } |
| 3545 | p->cq_entries = roundup_pow_of_two(p->cq_entries); |
| 3546 | if (p->cq_entries < p->sq_entries) |
| 3547 | return -EINVAL; |
| 3548 | } else { |
| 3549 | p->cq_entries = 2 * p->sq_entries; |
| 3550 | } |
| 3551 | |
| 3552 | ctx = io_ring_ctx_alloc(p); |
| 3553 | if (!ctx) |
| 3554 | return -ENOMEM; |
| 3555 | |
| 3556 | ctx->clockid = CLOCK_MONOTONIC; |
| 3557 | ctx->clock_offset = 0; |
| 3558 | |
| 3559 | if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && |
| 3560 | !(ctx->flags & IORING_SETUP_IOPOLL) && |
| 3561 | !(ctx->flags & IORING_SETUP_SQPOLL)) |
| 3562 | ctx->task_complete = true; |
| 3563 | |
| 3564 | if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) |
| 3565 | ctx->lockless_cq = true; |
| 3566 | |
| 3567 | /* |
| 3568 | * lazy poll_wq activation relies on ->task_complete for synchronisation |
| 3569 | * purposes, see io_activate_pollwq() |
| 3570 | */ |
| 3571 | if (!ctx->task_complete) |
| 3572 | ctx->poll_activated = true; |
| 3573 | |
| 3574 | /* |
| 3575 | * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user |
| 3576 | * space applications don't need to do io completion events |
| 3577 | * polling again, they can rely on io_sq_thread to do polling |
| 3578 | * work, which can reduce cpu usage and uring_lock contention. |
| 3579 | */ |
| 3580 | if (ctx->flags & IORING_SETUP_IOPOLL && |
| 3581 | !(ctx->flags & IORING_SETUP_SQPOLL)) |
| 3582 | ctx->syscall_iopoll = 1; |
| 3583 | |
| 3584 | ctx->compat = in_compat_syscall(); |
| 3585 | if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK)) |
| 3586 | ctx->user = get_uid(current_user()); |
| 3587 | |
| 3588 | /* |
| 3589 | * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if |
| 3590 | * COOP_TASKRUN is set, then IPIs are never needed by the app. |
| 3591 | */ |
| 3592 | ret = -EINVAL; |
| 3593 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
| 3594 | /* IPI related flags don't make sense with SQPOLL */ |
| 3595 | if (ctx->flags & (IORING_SETUP_COOP_TASKRUN | |
| 3596 | IORING_SETUP_TASKRUN_FLAG | |
| 3597 | IORING_SETUP_DEFER_TASKRUN)) |
| 3598 | goto err; |
| 3599 | ctx->notify_method = TWA_SIGNAL_NO_IPI; |
| 3600 | } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) { |
| 3601 | ctx->notify_method = TWA_SIGNAL_NO_IPI; |
| 3602 | } else { |
| 3603 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG && |
| 3604 | !(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) |
| 3605 | goto err; |
| 3606 | ctx->notify_method = TWA_SIGNAL; |
| 3607 | } |
| 3608 | |
| 3609 | /* |
| 3610 | * For DEFER_TASKRUN we require the completion task to be the same as the |
| 3611 | * submission task. This implies that there is only one submitter, so enforce |
| 3612 | * that. |
| 3613 | */ |
| 3614 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN && |
| 3615 | !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) { |
| 3616 | goto err; |
| 3617 | } |
| 3618 | |
| 3619 | /* |
| 3620 | * This is just grabbed for accounting purposes. When a process exits, |
| 3621 | * the mm is exited and dropped before the files, hence we need to hang |
| 3622 | * on to this mm purely for the purposes of being able to unaccount |
| 3623 | * memory (locked/pinned vm). It's not used for anything else. |
| 3624 | */ |
| 3625 | mmgrab(current->mm); |
| 3626 | ctx->mm_account = current->mm; |
| 3627 | |
| 3628 | ret = io_allocate_scq_urings(ctx, p); |
| 3629 | if (ret) |
| 3630 | goto err; |
| 3631 | |
| 3632 | ret = io_sq_offload_create(ctx, p); |
| 3633 | if (ret) |
| 3634 | goto err; |
| 3635 | |
| 3636 | ret = io_rsrc_init(ctx); |
| 3637 | if (ret) |
| 3638 | goto err; |
| 3639 | |
| 3640 | p->sq_off.head = offsetof(struct io_rings, sq.head); |
| 3641 | p->sq_off.tail = offsetof(struct io_rings, sq.tail); |
| 3642 | p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask); |
| 3643 | p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries); |
| 3644 | p->sq_off.flags = offsetof(struct io_rings, sq_flags); |
| 3645 | p->sq_off.dropped = offsetof(struct io_rings, sq_dropped); |
| 3646 | if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) |
| 3647 | p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings; |
| 3648 | p->sq_off.resv1 = 0; |
| 3649 | if (!(ctx->flags & IORING_SETUP_NO_MMAP)) |
| 3650 | p->sq_off.user_addr = 0; |
| 3651 | |
| 3652 | p->cq_off.head = offsetof(struct io_rings, cq.head); |
| 3653 | p->cq_off.tail = offsetof(struct io_rings, cq.tail); |
| 3654 | p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask); |
| 3655 | p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); |
| 3656 | p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); |
| 3657 | p->cq_off.cqes = offsetof(struct io_rings, cqes); |
| 3658 | p->cq_off.flags = offsetof(struct io_rings, cq_flags); |
| 3659 | p->cq_off.resv1 = 0; |
| 3660 | if (!(ctx->flags & IORING_SETUP_NO_MMAP)) |
| 3661 | p->cq_off.user_addr = 0; |
| 3662 | |
| 3663 | p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | |
| 3664 | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | |
| 3665 | IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | |
| 3666 | IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED | |
| 3667 | IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS | |
| 3668 | IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP | |
| 3669 | IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING | |
| 3670 | IORING_FEAT_RECVSEND_BUNDLE | IORING_FEAT_MIN_TIMEOUT; |
| 3671 | |
| 3672 | if (copy_to_user(params, p, sizeof(*p))) { |
| 3673 | ret = -EFAULT; |
| 3674 | goto err; |
| 3675 | } |
| 3676 | |
| 3677 | if (ctx->flags & IORING_SETUP_SINGLE_ISSUER |
| 3678 | && !(ctx->flags & IORING_SETUP_R_DISABLED)) |
| 3679 | WRITE_ONCE(ctx->submitter_task, get_task_struct(current)); |
| 3680 | |
| 3681 | file = io_uring_get_file(ctx); |
| 3682 | if (IS_ERR(file)) { |
| 3683 | ret = PTR_ERR(file); |
| 3684 | goto err; |
| 3685 | } |
| 3686 | |
| 3687 | ret = __io_uring_add_tctx_node(ctx); |
| 3688 | if (ret) |
| 3689 | goto err_fput; |
| 3690 | tctx = current->io_uring; |
| 3691 | |
| 3692 | /* |
| 3693 | * Install ring fd as the very last thing, so we don't risk someone |
| 3694 | * having closed it before we finish setup |
| 3695 | */ |
| 3696 | if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY) |
| 3697 | ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX); |
| 3698 | else |
| 3699 | ret = io_uring_install_fd(file); |
| 3700 | if (ret < 0) |
| 3701 | goto err_fput; |
| 3702 | |
| 3703 | trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); |
| 3704 | return ret; |
| 3705 | err: |
| 3706 | io_ring_ctx_wait_and_kill(ctx); |
| 3707 | return ret; |
| 3708 | err_fput: |
| 3709 | fput(file); |
| 3710 | return ret; |
| 3711 | } |
| 3712 | |
| 3713 | /* |
| 3714 | * Sets up an aio uring context, and returns the fd. Applications asks for a |
| 3715 | * ring size, we return the actual sq/cq ring sizes (among other things) in the |
| 3716 | * params structure passed in. |
| 3717 | */ |
| 3718 | static long io_uring_setup(u32 entries, struct io_uring_params __user *params) |
| 3719 | { |
| 3720 | struct io_uring_params p; |
| 3721 | int i; |
| 3722 | |
| 3723 | if (copy_from_user(&p, params, sizeof(p))) |
| 3724 | return -EFAULT; |
| 3725 | for (i = 0; i < ARRAY_SIZE(p.resv); i++) { |
| 3726 | if (p.resv[i]) |
| 3727 | return -EINVAL; |
| 3728 | } |
| 3729 | |
| 3730 | if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | |
| 3731 | IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | |
| 3732 | IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | |
| 3733 | IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | |
| 3734 | IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG | |
| 3735 | IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | |
| 3736 | IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN | |
| 3737 | IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY | |
| 3738 | IORING_SETUP_NO_SQARRAY)) |
| 3739 | return -EINVAL; |
| 3740 | |
| 3741 | return io_uring_create(entries, &p, params); |
| 3742 | } |
| 3743 | |
| 3744 | static inline bool io_uring_allowed(void) |
| 3745 | { |
| 3746 | int disabled = READ_ONCE(sysctl_io_uring_disabled); |
| 3747 | kgid_t io_uring_group; |
| 3748 | |
| 3749 | if (disabled == 2) |
| 3750 | return false; |
| 3751 | |
| 3752 | if (disabled == 0 || capable(CAP_SYS_ADMIN)) |
| 3753 | return true; |
| 3754 | |
| 3755 | io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group); |
| 3756 | if (!gid_valid(io_uring_group)) |
| 3757 | return false; |
| 3758 | |
| 3759 | return in_group_p(io_uring_group); |
| 3760 | } |
| 3761 | |
| 3762 | SYSCALL_DEFINE2(io_uring_setup, u32, entries, |
| 3763 | struct io_uring_params __user *, params) |
| 3764 | { |
| 3765 | if (!io_uring_allowed()) |
| 3766 | return -EPERM; |
| 3767 | |
| 3768 | return io_uring_setup(entries, params); |
| 3769 | } |
| 3770 | |
| 3771 | static int __init io_uring_init(void) |
| 3772 | { |
| 3773 | struct kmem_cache_args kmem_args = { |
| 3774 | .useroffset = offsetof(struct io_kiocb, cmd.data), |
| 3775 | .usersize = sizeof_field(struct io_kiocb, cmd.data), |
| 3776 | }; |
| 3777 | |
| 3778 | #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ |
| 3779 | BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ |
| 3780 | BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ |
| 3781 | } while (0) |
| 3782 | |
| 3783 | #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \ |
| 3784 | __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename) |
| 3785 | #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \ |
| 3786 | __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename) |
| 3787 | BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64); |
| 3788 | BUILD_BUG_SQE_ELEM(0, __u8, opcode); |
| 3789 | BUILD_BUG_SQE_ELEM(1, __u8, flags); |
| 3790 | BUILD_BUG_SQE_ELEM(2, __u16, ioprio); |
| 3791 | BUILD_BUG_SQE_ELEM(4, __s32, fd); |
| 3792 | BUILD_BUG_SQE_ELEM(8, __u64, off); |
| 3793 | BUILD_BUG_SQE_ELEM(8, __u64, addr2); |
| 3794 | BUILD_BUG_SQE_ELEM(8, __u32, cmd_op); |
| 3795 | BUILD_BUG_SQE_ELEM(12, __u32, __pad1); |
| 3796 | BUILD_BUG_SQE_ELEM(16, __u64, addr); |
| 3797 | BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in); |
| 3798 | BUILD_BUG_SQE_ELEM(24, __u32, len); |
| 3799 | BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags); |
| 3800 | BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); |
| 3801 | BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); |
| 3802 | BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); |
| 3803 | BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events); |
| 3804 | BUILD_BUG_SQE_ELEM(28, __u32, poll32_events); |
| 3805 | BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); |
| 3806 | BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); |
| 3807 | BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); |
| 3808 | BUILD_BUG_SQE_ELEM(28, __u32, accept_flags); |
| 3809 | BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags); |
| 3810 | BUILD_BUG_SQE_ELEM(28, __u32, open_flags); |
| 3811 | BUILD_BUG_SQE_ELEM(28, __u32, statx_flags); |
| 3812 | BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice); |
| 3813 | BUILD_BUG_SQE_ELEM(28, __u32, splice_flags); |
| 3814 | BUILD_BUG_SQE_ELEM(28, __u32, rename_flags); |
| 3815 | BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags); |
| 3816 | BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags); |
| 3817 | BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags); |
| 3818 | BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags); |
| 3819 | BUILD_BUG_SQE_ELEM(32, __u64, user_data); |
| 3820 | BUILD_BUG_SQE_ELEM(40, __u16, buf_index); |
| 3821 | BUILD_BUG_SQE_ELEM(40, __u16, buf_group); |
| 3822 | BUILD_BUG_SQE_ELEM(42, __u16, personality); |
| 3823 | BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); |
| 3824 | BUILD_BUG_SQE_ELEM(44, __u32, file_index); |
| 3825 | BUILD_BUG_SQE_ELEM(44, __u16, addr_len); |
| 3826 | BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]); |
| 3827 | BUILD_BUG_SQE_ELEM(48, __u64, addr3); |
| 3828 | BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd); |
| 3829 | BUILD_BUG_SQE_ELEM(56, __u64, __pad2); |
| 3830 | |
| 3831 | BUILD_BUG_ON(sizeof(struct io_uring_files_update) != |
| 3832 | sizeof(struct io_uring_rsrc_update)); |
| 3833 | BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) > |
| 3834 | sizeof(struct io_uring_rsrc_update2)); |
| 3835 | |
| 3836 | /* ->buf_index is u16 */ |
| 3837 | BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0); |
| 3838 | BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) != |
| 3839 | offsetof(struct io_uring_buf_ring, tail)); |
| 3840 | |
| 3841 | /* should fit into one byte */ |
| 3842 | BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); |
| 3843 | BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8)); |
| 3844 | BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS); |
| 3845 | |
| 3846 | BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof_field(struct io_kiocb, flags)); |
| 3847 | |
| 3848 | BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32)); |
| 3849 | |
| 3850 | /* top 8bits are for internal use */ |
| 3851 | BUILD_BUG_ON((IORING_URING_CMD_MASK & 0xff000000) != 0); |
| 3852 | |
| 3853 | io_uring_optable_init(); |
| 3854 | |
| 3855 | /* |
| 3856 | * Allow user copy in the per-command field, which starts after the |
| 3857 | * file in io_kiocb and until the opcode field. The openat2 handling |
| 3858 | * requires copying in user memory into the io_kiocb object in that |
| 3859 | * range, and HARDENED_USERCOPY will complain if we haven't |
| 3860 | * correctly annotated this range. |
| 3861 | */ |
| 3862 | req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args, |
| 3863 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT | |
| 3864 | SLAB_TYPESAFE_BY_RCU); |
| 3865 | io_buf_cachep = KMEM_CACHE(io_buffer, |
| 3866 | SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); |
| 3867 | |
| 3868 | iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64); |
| 3869 | |
| 3870 | #ifdef CONFIG_SYSCTL |
| 3871 | register_sysctl_init("kernel", kernel_io_uring_disabled_table); |
| 3872 | #endif |
| 3873 | |
| 3874 | return 0; |
| 3875 | }; |
| 3876 | __initcall(io_uring_init); |