| 1 | #ifndef IOU_CORE_H |
| 2 | #define IOU_CORE_H |
| 3 | |
| 4 | #include <linux/errno.h> |
| 5 | #include <linux/lockdep.h> |
| 6 | #include <linux/resume_user_mode.h> |
| 7 | #include <linux/kasan.h> |
| 8 | #include <linux/poll.h> |
| 9 | #include <linux/io_uring_types.h> |
| 10 | #include <uapi/linux/eventpoll.h> |
| 11 | #include "alloc_cache.h" |
| 12 | #include "io-wq.h" |
| 13 | #include "slist.h" |
| 14 | #include "filetable.h" |
| 15 | #include "opdef.h" |
| 16 | |
| 17 | #ifndef CREATE_TRACE_POINTS |
| 18 | #include <trace/events/io_uring.h> |
| 19 | #endif |
| 20 | |
| 21 | enum { |
| 22 | IOU_COMPLETE = 0, |
| 23 | |
| 24 | IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, |
| 25 | |
| 26 | /* |
| 27 | * The request has more work to do and should be retried. io_uring will |
| 28 | * attempt to wait on the file for eligible opcodes, but otherwise |
| 29 | * it'll be handed to iowq for blocking execution. It works for normal |
| 30 | * requests as well as for the multi shot mode. |
| 31 | */ |
| 32 | IOU_RETRY = -EAGAIN, |
| 33 | |
| 34 | /* |
| 35 | * Requeue the task_work to restart operations on this request. The |
| 36 | * actual value isn't important, should just be not an otherwise |
| 37 | * valid error code, yet less than -MAX_ERRNO and valid internally. |
| 38 | */ |
| 39 | IOU_REQUEUE = -3072, |
| 40 | }; |
| 41 | |
| 42 | struct io_wait_queue { |
| 43 | struct wait_queue_entry wq; |
| 44 | struct io_ring_ctx *ctx; |
| 45 | unsigned cq_tail; |
| 46 | unsigned cq_min_tail; |
| 47 | unsigned nr_timeouts; |
| 48 | int hit_timeout; |
| 49 | ktime_t min_timeout; |
| 50 | ktime_t timeout; |
| 51 | struct hrtimer t; |
| 52 | |
| 53 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 54 | ktime_t napi_busy_poll_dt; |
| 55 | bool napi_prefer_busy_poll; |
| 56 | #endif |
| 57 | }; |
| 58 | |
| 59 | static inline bool io_should_wake(struct io_wait_queue *iowq) |
| 60 | { |
| 61 | struct io_ring_ctx *ctx = iowq->ctx; |
| 62 | int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail; |
| 63 | |
| 64 | /* |
| 65 | * Wake up if we have enough events, or if a timeout occurred since we |
| 66 | * started waiting. For timeouts, we always want to return to userspace, |
| 67 | * regardless of event count. |
| 68 | */ |
| 69 | return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; |
| 70 | } |
| 71 | |
| 72 | #define IORING_MAX_ENTRIES 32768 |
| 73 | #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) |
| 74 | |
| 75 | unsigned long rings_size(unsigned int flags, unsigned int sq_entries, |
| 76 | unsigned int cq_entries, size_t *sq_offset); |
| 77 | int io_uring_fill_params(unsigned entries, struct io_uring_params *p); |
| 78 | bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); |
| 79 | int io_run_task_work_sig(struct io_ring_ctx *ctx); |
| 80 | void io_req_defer_failed(struct io_kiocb *req, s32 res); |
| 81 | bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); |
| 82 | void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); |
| 83 | bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags); |
| 84 | void __io_commit_cqring_flush(struct io_ring_ctx *ctx); |
| 85 | |
| 86 | void io_req_track_inflight(struct io_kiocb *req); |
| 87 | struct file *io_file_get_normal(struct io_kiocb *req, int fd); |
| 88 | struct file *io_file_get_fixed(struct io_kiocb *req, int fd, |
| 89 | unsigned issue_flags); |
| 90 | |
| 91 | void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); |
| 92 | void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags); |
| 93 | void io_req_task_queue(struct io_kiocb *req); |
| 94 | void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw); |
| 95 | void io_req_task_queue_fail(struct io_kiocb *req, int ret); |
| 96 | void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw); |
| 97 | struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries); |
| 98 | struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count); |
| 99 | void tctx_task_work(struct callback_head *cb); |
| 100 | __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); |
| 101 | |
| 102 | int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, |
| 103 | int start, int end); |
| 104 | void io_req_queue_iowq(struct io_kiocb *req); |
| 105 | |
| 106 | int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw); |
| 107 | int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); |
| 108 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); |
| 109 | void __io_submit_flush_completions(struct io_ring_ctx *ctx); |
| 110 | |
| 111 | struct io_wq_work *io_wq_free_work(struct io_wq_work *work); |
| 112 | void io_wq_submit_work(struct io_wq_work *work); |
| 113 | |
| 114 | void io_free_req(struct io_kiocb *req); |
| 115 | void io_queue_next(struct io_kiocb *req); |
| 116 | void io_task_refs_refill(struct io_uring_task *tctx); |
| 117 | bool __io_alloc_req_refill(struct io_ring_ctx *ctx); |
| 118 | |
| 119 | bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx, |
| 120 | bool cancel_all); |
| 121 | |
| 122 | void io_activate_pollwq(struct io_ring_ctx *ctx); |
| 123 | |
| 124 | static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) |
| 125 | { |
| 126 | #if defined(CONFIG_PROVE_LOCKING) |
| 127 | lockdep_assert(in_task()); |
| 128 | |
| 129 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) |
| 130 | lockdep_assert_held(&ctx->uring_lock); |
| 131 | |
| 132 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
| 133 | lockdep_assert_held(&ctx->uring_lock); |
| 134 | } else if (!ctx->task_complete) { |
| 135 | lockdep_assert_held(&ctx->completion_lock); |
| 136 | } else if (ctx->submitter_task) { |
| 137 | /* |
| 138 | * ->submitter_task may be NULL and we can still post a CQE, |
| 139 | * if the ring has been setup with IORING_SETUP_R_DISABLED. |
| 140 | * Not from an SQE, as those cannot be submitted, but via |
| 141 | * updating tagged resources. |
| 142 | */ |
| 143 | if (!percpu_ref_is_dying(&ctx->refs)) |
| 144 | lockdep_assert(current == ctx->submitter_task); |
| 145 | } |
| 146 | #endif |
| 147 | } |
| 148 | |
| 149 | static inline bool io_is_compat(struct io_ring_ctx *ctx) |
| 150 | { |
| 151 | return IS_ENABLED(CONFIG_COMPAT) && unlikely(ctx->compat); |
| 152 | } |
| 153 | |
| 154 | static inline void io_req_task_work_add(struct io_kiocb *req) |
| 155 | { |
| 156 | __io_req_task_work_add(req, 0); |
| 157 | } |
| 158 | |
| 159 | static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) |
| 160 | { |
| 161 | if (!wq_list_empty(&ctx->submit_state.compl_reqs) || |
| 162 | ctx->submit_state.cq_flush) |
| 163 | __io_submit_flush_completions(ctx); |
| 164 | } |
| 165 | |
| 166 | #define io_for_each_link(pos, head) \ |
| 167 | for (pos = (head); pos; pos = pos->link) |
| 168 | |
| 169 | static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx, |
| 170 | struct io_uring_cqe **ret, |
| 171 | bool overflow) |
| 172 | { |
| 173 | io_lockdep_assert_cq_locked(ctx); |
| 174 | |
| 175 | if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) { |
| 176 | if (unlikely(!io_cqe_cache_refill(ctx, overflow))) |
| 177 | return false; |
| 178 | } |
| 179 | *ret = ctx->cqe_cached; |
| 180 | ctx->cached_cq_tail++; |
| 181 | ctx->cqe_cached++; |
| 182 | if (ctx->flags & IORING_SETUP_CQE32) |
| 183 | ctx->cqe_cached++; |
| 184 | return true; |
| 185 | } |
| 186 | |
| 187 | static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret) |
| 188 | { |
| 189 | return io_get_cqe_overflow(ctx, ret, false); |
| 190 | } |
| 191 | |
| 192 | static inline bool io_defer_get_uncommited_cqe(struct io_ring_ctx *ctx, |
| 193 | struct io_uring_cqe **cqe_ret) |
| 194 | { |
| 195 | io_lockdep_assert_cq_locked(ctx); |
| 196 | |
| 197 | ctx->submit_state.cq_flush = true; |
| 198 | return io_get_cqe(ctx, cqe_ret); |
| 199 | } |
| 200 | |
| 201 | static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, |
| 202 | struct io_kiocb *req) |
| 203 | { |
| 204 | struct io_uring_cqe *cqe; |
| 205 | |
| 206 | /* |
| 207 | * If we can't get a cq entry, userspace overflowed the |
| 208 | * submission (by quite a lot). Increment the overflow count in |
| 209 | * the ring. |
| 210 | */ |
| 211 | if (unlikely(!io_get_cqe(ctx, &cqe))) |
| 212 | return false; |
| 213 | |
| 214 | |
| 215 | memcpy(cqe, &req->cqe, sizeof(*cqe)); |
| 216 | if (ctx->flags & IORING_SETUP_CQE32) { |
| 217 | memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); |
| 218 | memset(&req->big_cqe, 0, sizeof(req->big_cqe)); |
| 219 | } |
| 220 | |
| 221 | if (trace_io_uring_complete_enabled()) |
| 222 | trace_io_uring_complete(req->ctx, req, cqe); |
| 223 | return true; |
| 224 | } |
| 225 | |
| 226 | static inline void req_set_fail(struct io_kiocb *req) |
| 227 | { |
| 228 | req->flags |= REQ_F_FAIL; |
| 229 | if (req->flags & REQ_F_CQE_SKIP) { |
| 230 | req->flags &= ~REQ_F_CQE_SKIP; |
| 231 | req->flags |= REQ_F_SKIP_LINK_CQES; |
| 232 | } |
| 233 | } |
| 234 | |
| 235 | static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) |
| 236 | { |
| 237 | req->cqe.res = res; |
| 238 | req->cqe.flags = cflags; |
| 239 | } |
| 240 | |
| 241 | static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache, |
| 242 | struct io_kiocb *req) |
| 243 | { |
| 244 | if (cache) { |
| 245 | req->async_data = io_cache_alloc(cache, GFP_KERNEL); |
| 246 | } else { |
| 247 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; |
| 248 | |
| 249 | WARN_ON_ONCE(!def->async_size); |
| 250 | req->async_data = kmalloc(def->async_size, GFP_KERNEL); |
| 251 | } |
| 252 | if (req->async_data) |
| 253 | req->flags |= REQ_F_ASYNC_DATA; |
| 254 | return req->async_data; |
| 255 | } |
| 256 | |
| 257 | static inline bool req_has_async_data(struct io_kiocb *req) |
| 258 | { |
| 259 | return req->flags & REQ_F_ASYNC_DATA; |
| 260 | } |
| 261 | |
| 262 | static inline void io_put_file(struct io_kiocb *req) |
| 263 | { |
| 264 | if (!(req->flags & REQ_F_FIXED_FILE) && req->file) |
| 265 | fput(req->file); |
| 266 | } |
| 267 | |
| 268 | static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, |
| 269 | unsigned issue_flags) |
| 270 | { |
| 271 | lockdep_assert_held(&ctx->uring_lock); |
| 272 | if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) |
| 273 | mutex_unlock(&ctx->uring_lock); |
| 274 | } |
| 275 | |
| 276 | static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, |
| 277 | unsigned issue_flags) |
| 278 | { |
| 279 | /* |
| 280 | * "Normal" inline submissions always hold the uring_lock, since we |
| 281 | * grab it from the system call. Same is true for the SQPOLL offload. |
| 282 | * The only exception is when we've detached the request and issue it |
| 283 | * from an async worker thread, grab the lock for that case. |
| 284 | */ |
| 285 | if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) |
| 286 | mutex_lock(&ctx->uring_lock); |
| 287 | lockdep_assert_held(&ctx->uring_lock); |
| 288 | } |
| 289 | |
| 290 | static inline void io_commit_cqring(struct io_ring_ctx *ctx) |
| 291 | { |
| 292 | /* order cqe stores with ring update */ |
| 293 | smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); |
| 294 | } |
| 295 | |
| 296 | static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) |
| 297 | { |
| 298 | if (wq_has_sleeper(&ctx->poll_wq)) |
| 299 | __wake_up(&ctx->poll_wq, TASK_NORMAL, 0, |
| 300 | poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); |
| 301 | } |
| 302 | |
| 303 | static inline void io_cqring_wake(struct io_ring_ctx *ctx) |
| 304 | { |
| 305 | /* |
| 306 | * Trigger waitqueue handler on all waiters on our waitqueue. This |
| 307 | * won't necessarily wake up all the tasks, io_should_wake() will make |
| 308 | * that decision. |
| 309 | * |
| 310 | * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter |
| 311 | * set in the mask so that if we recurse back into our own poll |
| 312 | * waitqueue handlers, we know we have a dependency between eventfd or |
| 313 | * epoll and should terminate multishot poll at that point. |
| 314 | */ |
| 315 | if (wq_has_sleeper(&ctx->cq_wait)) |
| 316 | __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, |
| 317 | poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); |
| 318 | } |
| 319 | |
| 320 | static inline bool io_sqring_full(struct io_ring_ctx *ctx) |
| 321 | { |
| 322 | struct io_rings *r = ctx->rings; |
| 323 | |
| 324 | /* |
| 325 | * SQPOLL must use the actual sqring head, as using the cached_sq_head |
| 326 | * is race prone if the SQPOLL thread has grabbed entries but not yet |
| 327 | * committed them to the ring. For !SQPOLL, this doesn't matter, but |
| 328 | * since this helper is just used for SQPOLL sqring waits (or POLLOUT), |
| 329 | * just read the actual sqring head unconditionally. |
| 330 | */ |
| 331 | return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries; |
| 332 | } |
| 333 | |
| 334 | static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) |
| 335 | { |
| 336 | struct io_rings *rings = ctx->rings; |
| 337 | unsigned int entries; |
| 338 | |
| 339 | /* make sure SQ entry isn't read before tail */ |
| 340 | entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; |
| 341 | return min(entries, ctx->sq_entries); |
| 342 | } |
| 343 | |
| 344 | static inline int io_run_task_work(void) |
| 345 | { |
| 346 | bool ret = false; |
| 347 | |
| 348 | /* |
| 349 | * Always check-and-clear the task_work notification signal. With how |
| 350 | * signaling works for task_work, we can find it set with nothing to |
| 351 | * run. We need to clear it for that case, like get_signal() does. |
| 352 | */ |
| 353 | if (test_thread_flag(TIF_NOTIFY_SIGNAL)) |
| 354 | clear_notify_signal(); |
| 355 | /* |
| 356 | * PF_IO_WORKER never returns to userspace, so check here if we have |
| 357 | * notify work that needs processing. |
| 358 | */ |
| 359 | if (current->flags & PF_IO_WORKER) { |
| 360 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { |
| 361 | __set_current_state(TASK_RUNNING); |
| 362 | resume_user_mode_work(NULL); |
| 363 | } |
| 364 | if (current->io_uring) { |
| 365 | unsigned int count = 0; |
| 366 | |
| 367 | __set_current_state(TASK_RUNNING); |
| 368 | tctx_task_work_run(current->io_uring, UINT_MAX, &count); |
| 369 | if (count) |
| 370 | ret = true; |
| 371 | } |
| 372 | } |
| 373 | if (task_work_pending(current)) { |
| 374 | __set_current_state(TASK_RUNNING); |
| 375 | task_work_run(); |
| 376 | ret = true; |
| 377 | } |
| 378 | |
| 379 | return ret; |
| 380 | } |
| 381 | |
| 382 | static inline bool io_local_work_pending(struct io_ring_ctx *ctx) |
| 383 | { |
| 384 | return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist); |
| 385 | } |
| 386 | |
| 387 | static inline bool io_task_work_pending(struct io_ring_ctx *ctx) |
| 388 | { |
| 389 | return task_work_pending(current) || io_local_work_pending(ctx); |
| 390 | } |
| 391 | |
| 392 | static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw) |
| 393 | { |
| 394 | lockdep_assert_held(&ctx->uring_lock); |
| 395 | } |
| 396 | |
| 397 | /* |
| 398 | * Don't complete immediately but use deferred completion infrastructure. |
| 399 | * Protected by ->uring_lock and can only be used either with |
| 400 | * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. |
| 401 | */ |
| 402 | static inline void io_req_complete_defer(struct io_kiocb *req) |
| 403 | __must_hold(&req->ctx->uring_lock) |
| 404 | { |
| 405 | struct io_submit_state *state = &req->ctx->submit_state; |
| 406 | |
| 407 | lockdep_assert_held(&req->ctx->uring_lock); |
| 408 | |
| 409 | wq_list_add_tail(&req->comp_list, &state->compl_reqs); |
| 410 | } |
| 411 | |
| 412 | static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) |
| 413 | { |
| 414 | if (unlikely(ctx->off_timeout_used || |
| 415 | ctx->has_evfd || ctx->poll_activated)) |
| 416 | __io_commit_cqring_flush(ctx); |
| 417 | } |
| 418 | |
| 419 | static inline void io_get_task_refs(int nr) |
| 420 | { |
| 421 | struct io_uring_task *tctx = current->io_uring; |
| 422 | |
| 423 | tctx->cached_refs -= nr; |
| 424 | if (unlikely(tctx->cached_refs < 0)) |
| 425 | io_task_refs_refill(tctx); |
| 426 | } |
| 427 | |
| 428 | static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) |
| 429 | { |
| 430 | return !ctx->submit_state.free_list.next; |
| 431 | } |
| 432 | |
| 433 | extern struct kmem_cache *req_cachep; |
| 434 | |
| 435 | static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx) |
| 436 | { |
| 437 | struct io_kiocb *req; |
| 438 | |
| 439 | req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list); |
| 440 | wq_stack_extract(&ctx->submit_state.free_list); |
| 441 | return req; |
| 442 | } |
| 443 | |
| 444 | static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req) |
| 445 | { |
| 446 | if (unlikely(io_req_cache_empty(ctx))) { |
| 447 | if (!__io_alloc_req_refill(ctx)) |
| 448 | return false; |
| 449 | } |
| 450 | *req = io_extract_req(ctx); |
| 451 | return true; |
| 452 | } |
| 453 | |
| 454 | static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) |
| 455 | { |
| 456 | return likely(ctx->submitter_task == current); |
| 457 | } |
| 458 | |
| 459 | static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) |
| 460 | { |
| 461 | return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || |
| 462 | ctx->submitter_task == current); |
| 463 | } |
| 464 | |
| 465 | /* |
| 466 | * Terminate the request if either of these conditions are true: |
| 467 | * |
| 468 | * 1) It's being executed by the original task, but that task is marked |
| 469 | * with PF_EXITING as it's exiting. |
| 470 | * 2) PF_KTHREAD is set, in which case the invoker of the task_work is |
| 471 | * our fallback task_work. |
| 472 | */ |
| 473 | static inline bool io_should_terminate_tw(void) |
| 474 | { |
| 475 | return current->flags & (PF_KTHREAD | PF_EXITING); |
| 476 | } |
| 477 | |
| 478 | static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) |
| 479 | { |
| 480 | io_req_set_res(req, res, 0); |
| 481 | req->io_task_work.func = io_req_task_complete; |
| 482 | io_req_task_work_add(req); |
| 483 | } |
| 484 | |
| 485 | /* |
| 486 | * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each |
| 487 | * slot. |
| 488 | */ |
| 489 | static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) |
| 490 | { |
| 491 | if (ctx->flags & IORING_SETUP_SQE128) |
| 492 | return 2 * sizeof(struct io_uring_sqe); |
| 493 | return sizeof(struct io_uring_sqe); |
| 494 | } |
| 495 | |
| 496 | static inline bool io_file_can_poll(struct io_kiocb *req) |
| 497 | { |
| 498 | if (req->flags & REQ_F_CAN_POLL) |
| 499 | return true; |
| 500 | if (req->file && file_can_poll(req->file)) { |
| 501 | req->flags |= REQ_F_CAN_POLL; |
| 502 | return true; |
| 503 | } |
| 504 | return false; |
| 505 | } |
| 506 | |
| 507 | static inline ktime_t io_get_time(struct io_ring_ctx *ctx) |
| 508 | { |
| 509 | if (ctx->clockid == CLOCK_MONOTONIC) |
| 510 | return ktime_get(); |
| 511 | |
| 512 | return ktime_get_with_offset(ctx->clock_offset); |
| 513 | } |
| 514 | |
| 515 | enum { |
| 516 | IO_CHECK_CQ_OVERFLOW_BIT, |
| 517 | IO_CHECK_CQ_DROPPED_BIT, |
| 518 | }; |
| 519 | |
| 520 | static inline bool io_has_work(struct io_ring_ctx *ctx) |
| 521 | { |
| 522 | return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) || |
| 523 | io_local_work_pending(ctx); |
| 524 | } |
| 525 | #endif |