Skip to content

Commit

Permalink
io_uring: optimise putting task struct
Browse files Browse the repository at this point in the history
We cache all the reference to task + tctx, so if io_put_task() is
called by the corresponding task itself, we can save on atomics and
return the refs right back into the cache.

It's beneficial for all inline completions, and also iopolling, when
polling and submissions are done by the same task, including
SQPOLL|IOPOLL.

Note: io_uring_cancel_generic() can return refs to the cache as well,
so those should be flushed in the loop for tctx_inflight() to work
right.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/6fe9646b3cb70e46aca1f58426776e368c8926b3.1628471125.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Pavel Begunkov authored and Jens Axboe committed Aug 23, 2021
1 parent af066f3 commit e9dbe22
Showing 1 changed file with 11 additions and 6 deletions.
17 changes: 11 additions & 6 deletions fs/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -2102,10 +2102,12 @@ static inline void io_init_req_batch(struct req_batch *rb)
static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
struct req_batch *rb)
{
if (rb->task)
io_put_task(rb->task, rb->task_refs);
if (rb->ctx_refs)
percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
if (rb->task == current)
current->io_uring->cached_refs += rb->task_refs;
else if (rb->task)
io_put_task(rb->task, rb->task_refs);
}

static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
Expand Down Expand Up @@ -9138,9 +9140,11 @@ static void io_uring_drop_tctx_refs(struct task_struct *task)
struct io_uring_task *tctx = task->io_uring;
unsigned int refs = tctx->cached_refs;

tctx->cached_refs = 0;
percpu_counter_sub(&tctx->inflight, refs);
put_task_struct_many(task, refs);
if (refs) {
tctx->cached_refs = 0;
percpu_counter_sub(&tctx->inflight, refs);
put_task_struct_many(task, refs);
}
}

/*
Expand All @@ -9161,9 +9165,9 @@ static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
if (tctx->io_wq)
io_wq_exit_start(tctx->io_wq);

io_uring_drop_tctx_refs(current);
atomic_inc(&tctx->in_idle);
do {
io_uring_drop_tctx_refs(current);
/* read completions before cancelations */
inflight = tctx_inflight(tctx, !cancel_all);
if (!inflight)
Expand All @@ -9187,6 +9191,7 @@ static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
}

prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
io_uring_drop_tctx_refs(current);
/*
* If we've seen completions, retry without waiting. This
* avoids a race where a completion comes in before we did
Expand Down

0 comments on commit e9dbe22

Please sign in to comment.