Skip to content

Commit

Permalink
io-uring: move io_wait_queue definition to header file
Browse files Browse the repository at this point in the history
This moves the definition of the io_wait_queue structure to the header
file so it can be also used from other files.

Signed-off-by: Stefan Roesch <shr@devkernel.io>
Link: https://lore.kernel.org/r/20230608163839.2891748-4-shr@devkernel.io
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Stefan Roesch authored and Jens Axboe committed Feb 9, 2024
1 parent adaad27 commit 405b4dc
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 21 deletions.
21 changes: 0 additions & 21 deletions io_uring/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -2477,33 +2477,12 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
return ret;
}

struct io_wait_queue {
struct wait_queue_entry wq;
struct io_ring_ctx *ctx;
unsigned cq_tail;
unsigned nr_timeouts;
ktime_t timeout;
};

static inline bool io_has_work(struct io_ring_ctx *ctx)
{
return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
!llist_empty(&ctx->work_llist);
}

static inline bool io_should_wake(struct io_wait_queue *iowq)
{
struct io_ring_ctx *ctx = iowq->ctx;
int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;

/*
* Wake up if we have enough events, or if a timeout occurred since we
* started waiting. For timeouts, we always want to return to userspace,
* regardless of event count.
*/
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}

static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
int wake_flags, void *key)
{
Expand Down
22 changes: 22 additions & 0 deletions io_uring/io_uring.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,28 @@ enum {
IOU_STOP_MULTISHOT = -ECANCELED,
};

struct io_wait_queue {
struct wait_queue_entry wq;
struct io_ring_ctx *ctx;
unsigned cq_tail;
unsigned nr_timeouts;
ktime_t timeout;

};

static inline bool io_should_wake(struct io_wait_queue *iowq)
{
struct io_ring_ctx *ctx = iowq->ctx;
int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;

/*
* Wake up if we have enough events, or if a timeout occurred since we
* started waiting. For timeouts, we always want to return to userspace,
* regardless of event count.
*/
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}

bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
void io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
Expand Down

0 comments on commit 405b4dc

Please sign in to comment.