static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
- set_notify_signal(worker->task);
+ __set_notify_signal(worker->task);
wake_up_process(worker->task);
return false;
}
{
if (work && match->fn(work, match->data)) {
work->flags |= IO_WQ_WORK_CANCEL;
- set_notify_signal(worker->task);
+ __set_notify_signal(worker->task);
return true;
}
{
if (!ctx)
return;
+ if (ctx->flags & IORING_SETUP_TW_FLAG &&
+ ctx->rings->sq_flags & IORING_SQ_TW) {
+ spin_lock(&ctx->completion_lock);
+ WRITE_ONCE(ctx->rings->sq_flags,
+ ctx->rings->sq_flags & ~IORING_SQ_TW);
+ spin_unlock(&ctx->completion_lock);
+ }
if (*locked) {
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
static void io_req_task_work_add(struct io_kiocb *req, bool priority)
{
struct task_struct *tsk = req->task;
+ struct io_ring_ctx *ctx = req->ctx;
struct io_uring_task *tctx = tsk->io_uring;
struct io_task_work *head;
req->io_task_work.next = head;
} while (cmpxchg(&tctx->task_list, head, &req->io_task_work) != head);
+ if (ctx->flags & IORING_SETUP_TW_FLAG &&
+ !(ctx->rings->sq_flags & IORING_SQ_TW)) {
+ spin_lock(&ctx->completion_lock);
+ WRITE_ONCE(ctx->rings->sq_flags,
+ ctx->rings->sq_flags | IORING_SQ_TW);
+ spin_unlock(&ctx->completion_lock);
+ }
+
if (!head) {
/*
* SQPOLL kernel thread doesn't need notification, just a wakeup. For
* processing task_work. There's no reliable way to tell if TWA_RESUME
* will do the job.
*/
- if (req->ctx->flags & IORING_SETUP_SQPOLL)
+ if (ctx->flags & IORING_SETUP_SQPOLL)
wake_up_process(tsk);
+ else if (ctx->flags & IORING_SETUP_NOIPI)
+ task_work_notify(tsk, TWA_SIGNAL_NOIPI);
else
task_work_notify(tsk, TWA_SIGNAL);
}
if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
- IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL))
+ IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
+ IORING_SETUP_NOIPI | IORING_SETUP_TW_FLAG))
return -EINVAL;
return io_uring_create(entries, &p, params);
smp_mb__after_atomic();
}
+/*
+ * Returns 'true' if kick_process() is needed to force a transition from
+ * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work.
+ */
+static inline bool __set_notify_signal(struct task_struct *task)
+{
+ return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
+ !wake_up_state(task, TASK_INTERRUPTIBLE);
+}
+
/*
* Called to break out of interruptible wait loops, and enter the
* exit_to_user_mode_loop().
*/
static inline void set_notify_signal(struct task_struct *task)
{
- if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
- !wake_up_state(task, TASK_INTERRUPTIBLE))
+ if (__set_notify_signal(task))
kick_process(task);
}
#define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */
#define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */
#define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */
+#define IORING_SETUP_NOIPI (1U << 8) /* no IPI needed */
+#define IORING_SETUP_TW_FLAG (1U << 9) /* set IORING_SQ_TW */
enum {
IORING_OP_NOP,
*/
#define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */
#define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */
+#define IORING_SQ_TW (1U << 2)
struct io_cqring_offsets {
__u32 head;