summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-08-10 15:14:18 -0600
committerJens Axboe <axboe@kernel.dk>2021-08-12 11:23:01 -0600
commitcfee77f8ca0d8e70e59dae0b22db93c4c65b4680 (patch)
treefc0d6ab1b4d3c095fb2b367c6d422d454a557e7e
parent48ecb6369f1f290ab3ff7412d14074b83123634b (diff)
io_uring: run linked timeouts from task_work
This is in preparation to making the completion lock work outside of hard/soft IRQ context. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c42
1 files changed, 30 insertions, 12 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index cb7e11d93371..91863f9362f2 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -538,6 +538,8 @@ struct io_timeout {
struct list_head list;
/* head of the link, used by linked timeouts only */
struct io_kiocb *head;
+ /* for linked completions */
+ struct io_kiocb *prev;
};
struct io_timeout_rem {
@@ -1846,6 +1848,7 @@ static inline void io_remove_next_linked(struct io_kiocb *req)
static bool io_kill_linked_timeout(struct io_kiocb *req)
__must_hold(&req->ctx->completion_lock)
+ __must_hold(&req->ctx->timeout_lock)
{
struct io_kiocb *link = req->link;
@@ -1890,8 +1893,13 @@ static bool io_disarm_next(struct io_kiocb *req)
{
bool posted = false;
- if (likely(req->flags & REQ_F_LINK_TIMEOUT))
+ if (likely(req->flags & REQ_F_LINK_TIMEOUT)) {
+ struct io_ring_ctx *ctx = req->ctx;
+
+ spin_lock_irq(&ctx->timeout_lock);
posted = io_kill_linked_timeout(req);
+ spin_unlock_irq(&ctx->timeout_lock);
+ }
if (unlikely((req->flags & REQ_F_FAIL) &&
!(req->flags & REQ_F_HARDLINK))) {
posted |= (req->link != NULL);
@@ -6357,6 +6365,20 @@ static inline struct file *io_file_get(struct io_ring_ctx *ctx,
return io_file_get_normal(ctx, req, fd);
}
+static void io_req_task_link_timeout(struct io_kiocb *req)
+{
+ struct io_kiocb *prev = req->timeout.prev;
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (prev) {
+ io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
+ io_put_req(prev);
+ io_put_req(req);
+ } else {
+ io_req_complete_post(req, -ETIME, 0);
+ }
+}
+
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
{
struct io_timeout_data *data = container_of(timer,
@@ -6365,7 +6387,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
- spin_lock_irqsave(&ctx->completion_lock, flags);
+ spin_lock_irqsave(&ctx->timeout_lock, flags);
prev = req->timeout.head;
req->timeout.head = NULL;
@@ -6378,15 +6400,11 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
if (!req_ref_inc_not_zero(prev))
prev = NULL;
}
- spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ req->timeout.prev = prev;
+ spin_unlock_irqrestore(&ctx->timeout_lock, flags);
- if (prev) {
- io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
- io_put_req_deferred(prev, 1);
- io_put_req_deferred(req, 1);
- } else {
- io_req_complete_post(req, -ETIME, 0);
- }
+ req->io_task_work.func = io_req_task_link_timeout;
+ io_req_task_work_add(req);
return HRTIMER_NORESTART;
}
@@ -6394,7 +6412,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- spin_lock_irq(&ctx->completion_lock);
+ spin_lock_irq(&ctx->timeout_lock);
/*
* If the back reference is NULL, then our linked request finished
* before we got a chance to setup the timer
@@ -6406,7 +6424,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
data->mode);
}
- spin_unlock_irq(&ctx->completion_lock);
+ spin_unlock_irq(&ctx->timeout_lock);
/* drop submission reference */
io_put_req(req);
}