Skip to content

Commit

Permalink
io_uring: don't recycle provided buffer if punted to async worker
Browse files Browse the repository at this point in the history
We only really need to recycle the buffer when going async for a file
type that has an indefinite reponse time (eg non-file/bdev). And for
files that to arm poll, the async worker will arm poll anyway and the
buffer will get recycled there.

In that latter case, we're not holding ctx->uring_lock. Ensure we take
the issue_flags into account and acquire it if we need to.

Fixes: b1c6264 ("io_uring: recycle provided buffers if request goes async")
Reported-by: Stefan Roesch <shr@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Jens Axboe committed Mar 23, 2022
1 parent d89a4fa commit 4d55f23
Showing 1 changed file with 8 additions and 3 deletions.
11 changes: 8 additions & 3 deletions fs/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -1383,7 +1383,7 @@ static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
return NULL;
}

static void io_kbuf_recycle(struct io_kiocb *req)
static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
Expand All @@ -1392,13 +1392,19 @@ static void io_kbuf_recycle(struct io_kiocb *req)
if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
return;

if (issue_flags & IO_URING_F_UNLOCKED)
mutex_lock(&ctx->uring_lock);

lockdep_assert_held(&ctx->uring_lock);

buf = req->kbuf;
bl = io_buffer_get_list(ctx, buf->bgid);
list_add(&buf->list, &bl->buf_list);
req->flags &= ~REQ_F_BUFFER_SELECTED;
req->kbuf = NULL;

if (issue_flags & IO_URING_F_UNLOCKED)
mutex_unlock(&ctx->uring_lock);
}

static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
Expand Down Expand Up @@ -6254,7 +6260,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
req->flags |= REQ_F_POLLED;
ipt.pt._qproc = io_async_queue_proc;

io_kbuf_recycle(req);
io_kbuf_recycle(req, issue_flags);

ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
if (ret || ipt.error)
Expand Down Expand Up @@ -7504,7 +7510,6 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
*/
io_kbuf_recycle(req);
io_queue_async_work(req, NULL);
break;
case IO_APOLL_OK:
Expand Down

0 comments on commit 4d55f23

Please sign in to comment.