Skip to content

Commit

Permalink
io_uring: optimise io_prep_linked_timeout()
Browse files Browse the repository at this point in the history
Linked timeout handling during issuing is heavy, it adds extra
instructions and forces to save the next linked timeout before
io_issue_sqe().

Follwing the same reasoning as in refcounting patches, a request can't
be freed by the time it returns from io_issue_sqe(), so now we don't
need to do io_prep_linked_timeout() in advance, and it can be delayed to
colder paths optimising the generic path.

Also, it should also save quite a lot for requests with linked timeouts
and completed inline on timeout spinlocking + hrtimer_start() +
hrtimer_try_to_cancel() and so on.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/19bfc9a0d26c5c5f1e359f7650afe807ca8ef879.1628981736.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Pavel Begunkov authored and Jens Axboe committed Aug 23, 2021
1 parent 0756a86 commit 906c6ca
Showing 1 changed file with 22 additions and 3 deletions.
25 changes: 22 additions & 3 deletions fs/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -1306,8 +1306,16 @@ static void io_req_track_inflight(struct io_kiocb *req)
}
}

static inline void io_unprep_linked_timeout(struct io_kiocb *req)
{
req->flags &= ~REQ_F_LINK_TIMEOUT;
}

static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
{
if (WARN_ON_ONCE(!req->link))
return NULL;

req->flags &= ~REQ_F_ARM_LTIMEOUT;
req->flags |= REQ_F_LINK_TIMEOUT;

Expand Down Expand Up @@ -1932,6 +1940,7 @@ static bool io_disarm_next(struct io_kiocb *req)
if (req->flags & REQ_F_ARM_LTIMEOUT) {
struct io_kiocb *link = req->link;

req->flags &= ~REQ_F_ARM_LTIMEOUT;
if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
io_remove_next_linked(req);
io_cqring_fill_event(link->ctx, link->user_data,
Expand Down Expand Up @@ -6485,7 +6494,7 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
static void __io_queue_sqe(struct io_kiocb *req)
__must_hold(&req->ctx->uring_lock)
{
struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
struct io_kiocb *linked_timeout;
int ret;

issue_sqe:
Expand All @@ -6503,10 +6512,19 @@ static void __io_queue_sqe(struct io_kiocb *req)
state->compl_reqs[state->compl_nr++] = req;
if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
io_submit_flush_completions(ctx);
return;
}

linked_timeout = io_prep_linked_timeout(req);
if (linked_timeout)
io_queue_linked_timeout(linked_timeout);
} else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
linked_timeout = io_prep_linked_timeout(req);

switch (io_arm_poll_handler(req)) {
case IO_APOLL_READY:
if (linked_timeout)
io_unprep_linked_timeout(req);
goto issue_sqe;
case IO_APOLL_ABORTED:
/*
Expand All @@ -6516,11 +6534,12 @@ static void __io_queue_sqe(struct io_kiocb *req)
io_queue_async_work(req);
break;
}

if (linked_timeout)
io_queue_linked_timeout(linked_timeout);
} else {
io_req_complete_failed(req, ret);
}
if (linked_timeout)
io_queue_linked_timeout(linked_timeout);
}

static inline void io_queue_sqe(struct io_kiocb *req)
Expand Down

0 comments on commit 906c6ca

Please sign in to comment.