Skip to content

Commit

Permalink
io_uring: lockdep annotate CQ locking
Browse files Browse the repository at this point in the history
Locking around CQE posting is complex and depends on options the ring is
created with, add more thorough lockdep annotations checking all
invariants.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/aa3770b4eacae3915d782cc2ab2f395a99b4b232.1672795976.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Pavel Begunkov authored and Jens Axboe committed Jan 4, 2023
1 parent 9ffa13f commit f26cc95
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 3 deletions.
5 changes: 2 additions & 3 deletions io_uring/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -731,6 +731,8 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
size_t ocq_size = sizeof(struct io_overflow_cqe);
bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);

lockdep_assert_held(&ctx->completion_lock);

if (is_cqe32)
ocq_size += sizeof(struct io_uring_cqe);

Expand Down Expand Up @@ -820,9 +822,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
{
struct io_uring_cqe *cqe;

if (!ctx->task_complete)
lockdep_assert_held(&ctx->completion_lock);

ctx->cq_extra++;

/*
Expand Down
15 changes: 15 additions & 0 deletions io_uring/io_uring.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,19 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
bool cancel_all);

#define io_lockdep_assert_cq_locked(ctx) \
do { \
if (ctx->flags & IORING_SETUP_IOPOLL) { \
lockdep_assert_held(&ctx->uring_lock); \
} else if (!ctx->task_complete) { \
lockdep_assert_held(&ctx->completion_lock); \
} else if (ctx->submitter_task->flags & PF_EXITING) { \
lockdep_assert(current_work()); \
} else { \
lockdep_assert(current == ctx->submitter_task); \
} \
} while (0)

static inline void io_req_task_work_add(struct io_kiocb *req)
{
__io_req_task_work_add(req, true);
Expand All @@ -92,6 +105,8 @@ void io_cq_unlock_post(struct io_ring_ctx *ctx);
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
bool overflow)
{
io_lockdep_assert_cq_locked(ctx);

if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
struct io_uring_cqe *cqe = ctx->cqe_cached;

Expand Down

0 comments on commit f26cc95

Please sign in to comment.