Skip to content

Commit

Permalink
io_uring: modify io_get_cqe for CQE32
Browse files Browse the repository at this point in the history
Modify accesses to the CQE array to take large CQE's into account. The
index needs to be shifted by one for large CQE's.

Signed-off-by: Stefan Roesch <shr@fb.com>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Link: https://lore.kernel.org/r/20220426182134.136504-7-shr@fb.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Stefan Roesch authored and Jens Axboe committed May 9, 2022
1 parent effcf8b commit 2fee6bc
Showing 1 changed file with 17 additions and 2 deletions.
19 changes: 17 additions & 2 deletions fs/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -2069,8 +2069,12 @@ static noinline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
unsigned int shift = 0;
unsigned int free, queued, len;

if (ctx->flags & IORING_SETUP_CQE32)
shift = 1;

/* userspace may cheat modifying the tail, be safe and do min */
queued = min(__io_cqring_events(ctx), ctx->cq_entries);
free = ctx->cq_entries - queued;
Expand All @@ -2082,15 +2086,26 @@ static noinline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
ctx->cached_cq_tail++;
ctx->cqe_cached = &rings->cqes[off];
ctx->cqe_sentinel = ctx->cqe_cached + len;
return ctx->cqe_cached++;
ctx->cqe_cached++;
return &rings->cqes[off << shift];
}

static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
{
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
struct io_uring_cqe *cqe = ctx->cqe_cached;

if (ctx->flags & IORING_SETUP_CQE32) {
unsigned int off = ctx->cqe_cached - ctx->rings->cqes;

cqe += off;
}

ctx->cached_cq_tail++;
return ctx->cqe_cached++;
ctx->cqe_cached++;
return cqe;
}

return __io_get_cqe(ctx);
}

Expand Down

0 comments on commit 2fee6bc

Please sign in to comment.