Skip to content

Commit

Permalink
ublk: remove io_cmds list in ublk_queue
Browse files Browse the repository at this point in the history
The current I/O dispatch mechanism - queueing I/O by adding it to the
io_cmds list (and poking task_work as needed), then dispatching it in
ublk server task context by reversing io_cmds and completing the
io_uring command associated to each one - was introduced by commit
7d4a931 ("ublk_drv: don't forward io commands in reserve order")
to ensure that the ublk server received I/O in the same order that the
block layer submitted it to ublk_drv. This mechanism was only needed for
the "raw" task_work submission mechanism, since the io_uring task work
wrapper maintains FIFO ordering (using quite a similar mechanism in
fact). The "raw" task_work submission mechanism is no longer supported
in ublk_drv as of commit 29dc5d0 ("ublk: kill queuing request by
task_work_add"), so the explicit llist/reversal is no longer needed - it
just duplicates logic already present in the underlying io_uring APIs.
Remove it.

Signed-off-by: Uday Shankar <ushankar@purestorage.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250318-ublk_io_cmds-v1-1-c1bb74798fef@purestorage.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Uday Shankar authored and Jens Axboe committed Mar 19, 2025
1 parent e1a0202 commit 989bcd6
Showing 1 changed file with 11 additions and 35 deletions.
46 changes: 11 additions & 35 deletions drivers/block/ublk_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,6 @@
UBLK_PARAM_TYPE_DMA_ALIGN)

struct ublk_rq_data {
struct llist_node node;

struct kref ref;
};

Expand Down Expand Up @@ -142,8 +140,6 @@ struct ublk_queue {
struct task_struct *ubq_daemon;
char *io_cmd_buf;

struct llist_head io_cmds;

unsigned long io_addr; /* mapped vm address */
unsigned int max_io_sz;
bool force_abort;
Expand Down Expand Up @@ -1108,7 +1104,7 @@ static void ublk_complete_rq(struct kref *ref)
}

/*
* Since __ublk_rq_task_work always fails requests immediately during
* Since ublk_rq_task_work_cb always fails requests immediately during
* exiting, __ublk_fail_req() is only called from abort context during
* exiting. So lock is unnecessary.
*
Expand Down Expand Up @@ -1154,11 +1150,14 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}

static inline void __ublk_rq_task_work(struct request *req,
unsigned issue_flags)
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
int tag = req->tag;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
int tag = pdu->tag;
struct request *req = blk_mq_tag_to_rq(
ubq->dev->tag_set.tags[ubq->q_id], tag);
struct ublk_io *io = &ubq->ios[tag];
unsigned int mapped_bytes;

Expand Down Expand Up @@ -1233,34 +1232,11 @@ static inline void __ublk_rq_task_work(struct request *req,
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}

static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
unsigned issue_flags)
{
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data, *tmp;

io_cmds = llist_reverse_order(io_cmds);
llist_for_each_entry_safe(data, tmp, io_cmds, node)
__ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
}

static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;

ublk_forward_io_cmds(ubq, issue_flags);
}

static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
struct ublk_io *io = &ubq->ios[rq->tag];

if (llist_add(&data->node, &ubq->io_cmds)) {
struct ublk_io *io = &ubq->ios[rq->tag];

io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
}
io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
}

static enum blk_eh_timer_return ublk_timeout(struct request *rq)
Expand Down Expand Up @@ -1453,7 +1429,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
struct request *rq;

/*
* Either we fail the request or ublk_rq_task_work_fn
* Either we fail the request or ublk_rq_task_work_cb
* will do it
*/
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
Expand Down

0 comments on commit 989bcd6

Please sign in to comment.