Skip to content

Commit

Permalink
io_uring: fix stuck fallback reqs
Browse files Browse the repository at this point in the history
When task_work_add() fails, we use ->exit_task_work to queue the work.
That will be run only in the cancellation path, which happens either
when the ctx is dying or one of tasks with inflight requests is exiting
or executing. There is a good chance that such a request would just get
stuck in the list potentially hodling a file, all io_uring rsrc
recycling or some other resources. Nothing terrible, it'll go away at
some point, but we don't want to lock them up for longer than needed.

Replace that hand made ->exit_task_work with delayed_work + llist
inspired by fput_many().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Pavel Begunkov authored and Jens Axboe committed Jul 1, 2021
1 parent c288d9c commit 9011bf9
Showing 1 changed file with 20 additions and 41 deletions.
61 changes: 20 additions & 41 deletions fs/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,8 @@ struct io_ring_ctx {
struct mm_struct *mm_account;

/* ctx exit and cancelation */
struct callback_head *exit_task_work;
struct llist_head fallback_llist;
struct delayed_work fallback_work;
struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp;
Expand Down Expand Up @@ -859,6 +860,8 @@ struct io_kiocb {
struct io_wq_work work;
const struct cred *creds;

struct llist_node fallback_node;

/* store used ubuf, so we can prevent reloading */
struct io_mapped_ubuf *imu;
};
Expand Down Expand Up @@ -1071,6 +1074,8 @@ static void io_submit_flush_completions(struct io_ring_ctx *ctx);
static bool io_poll_remove_waitqs(struct io_kiocb *req);
static int io_req_prep_async(struct io_kiocb *req);

static void io_fallback_req_func(struct work_struct *unused);

static struct kmem_cache *req_cachep;

static const struct file_operations io_uring_fops;
Expand Down Expand Up @@ -1202,6 +1207,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->tctx_list);
INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
INIT_LIST_HEAD(&ctx->locked_free_list);
INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
return ctx;
err:
kfree(ctx->dummy_ubuf);
Expand Down Expand Up @@ -1999,44 +2005,12 @@ static int io_req_task_work_add(struct io_kiocb *req)
return ret;
}

static bool io_run_task_work_head(struct callback_head **work_head)
{
struct callback_head *work, *next;
bool executed = false;

do {
work = xchg(work_head, NULL);
if (!work)
break;

do {
next = work->next;
work->func(work);
work = next;
cond_resched();
} while (work);
executed = true;
} while (1);

return executed;
}

static void io_task_work_add_head(struct callback_head **work_head,
struct callback_head *task_work)
{
struct callback_head *head;

do {
head = READ_ONCE(*work_head);
task_work->next = head;
} while (cmpxchg(work_head, head, task_work) != head);
}

static void io_req_task_work_add_fallback(struct io_kiocb *req,
task_work_func_t cb)
{
init_task_work(&req->task_work, cb);
io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
if (llist_add(&req->fallback_node, &req->ctx->fallback_llist))
schedule_delayed_work(&req->ctx->fallback_work, 1);
}

static void io_req_task_cancel(struct callback_head *cb)
Expand Down Expand Up @@ -2485,6 +2459,17 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
}
#endif

static void io_fallback_req_func(struct work_struct *work)
{
struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
fallback_work.work);
struct llist_node *node = llist_del_all(&ctx->fallback_llist);
struct io_kiocb *req, *tmp;

llist_for_each_entry_safe(req, tmp, node, fallback_node)
req->task_work.func(&req->task_work);
}

static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
unsigned int issue_flags)
{
Expand Down Expand Up @@ -8767,11 +8752,6 @@ static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
return -EINVAL;
}

static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
{
return io_run_task_work_head(&ctx->exit_task_work);
}

struct io_tctx_exit {
struct callback_head task_work;
struct completion completion;
Expand Down Expand Up @@ -9036,7 +9016,6 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
ret |= io_kill_timeouts(ctx, task, cancel_all);
if (task)
ret |= io_run_task_work();
ret |= io_run_ctx_fallback(ctx);
if (!ret)
break;
cond_resched();
Expand Down

0 comments on commit 9011bf9

Please sign in to comment.