Skip to content

Commit

Permalink
ublk: move ublk_cancel_dev() out of ub->mutex
Browse files Browse the repository at this point in the history
ublk_cancel_dev() just calls ublk_cancel_queue() to cancel all pending
io commands after ublk request queue is idle. The only protection is just
the read & write of ubq->nr_io_ready and avoid duplicated command cancel,
so add one per-queue lock with cancel flag for providing this protection,
meantime move ublk_cancel_dev() out of ub->mutex.

Then we needn't to call io_uring_cmd_complete_in_task() to cancel
pending command. And the same cancel logic will be re-used for
cancelable uring command.

This patch basically reverts commit ac5902f ("ublk: fix AB-BA lockdep warning").

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20231009093324.957829-4-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Ming Lei authored and Jens Axboe committed Oct 17, 2023
1 parent 3421c7f commit 85248d6
Showing 1 changed file with 23 additions and 17 deletions.
40 changes: 23 additions & 17 deletions drivers/block/ublk_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,9 @@ struct ublk_uring_cmd_pdu {
*/
#define UBLK_IO_FLAG_NEED_GET_DATA 0x08

/* atomic RW with ubq->cancel_lock */
#define UBLK_IO_FLAG_CANCELED 0x80000000

struct ublk_io {
/* userspace buffer address from io cmd */
__u64 addr;
Expand All @@ -139,6 +142,7 @@ struct ublk_queue {
bool force_abort;
bool timeout;
unsigned short nr_io_ready; /* how many ios setup */
spinlock_t cancel_lock;
struct ublk_device *dev;
struct ublk_io ios[];
};
Expand Down Expand Up @@ -1474,28 +1478,28 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
return ubq->nr_io_ready == ubq->q_depth;
}

static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
{
io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
}

static void ublk_cancel_queue(struct ublk_queue *ubq)
{
int i;

if (!ublk_queue_ready(ubq))
return;

for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];

if (io->flags & UBLK_IO_FLAG_ACTIVE)
io_uring_cmd_complete_in_task(io->cmd,
ublk_cmd_cancel_cb);
}
if (io->flags & UBLK_IO_FLAG_ACTIVE) {
bool done;

/* all io commands are canceled */
ubq->nr_io_ready = 0;
spin_lock(&ubq->cancel_lock);
done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
if (!done)
io->flags |= UBLK_IO_FLAG_CANCELED;
spin_unlock(&ubq->cancel_lock);

if (!done)
io_uring_cmd_done(io->cmd,
UBLK_IO_RES_ABORT, 0,
IO_URING_F_UNLOCKED);
}
}
}

/* Cancel all pending commands, must be called after del_gendisk() returns */
Expand Down Expand Up @@ -1542,7 +1546,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub)
blk_mq_quiesce_queue(ub->ub_disk->queue);
ublk_wait_tagset_rqs_idle(ub);
ub->dev_info.state = UBLK_S_DEV_QUIESCED;
ublk_cancel_dev(ub);
/* we are going to release task_struct of ubq_daemon and resets
* ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
* Besides, monitor_work is not necessary in QUIESCED state since we have
Expand All @@ -1565,6 +1568,7 @@ static void ublk_quiesce_work_fn(struct work_struct *work)
__ublk_quiesce_dev(ub);
unlock:
mutex_unlock(&ub->mutex);
ublk_cancel_dev(ub);
}

static void ublk_unquiesce_dev(struct ublk_device *ub)
Expand Down Expand Up @@ -1604,8 +1608,8 @@ static void ublk_stop_dev(struct ublk_device *ub)
put_disk(ub->ub_disk);
ub->ub_disk = NULL;
unlock:
ublk_cancel_dev(ub);
mutex_unlock(&ub->mutex);
ublk_cancel_dev(ub);
cancel_delayed_work_sync(&ub->monitor_work);
}

Expand Down Expand Up @@ -1979,6 +1983,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id)
void *ptr;
int size;

spin_lock_init(&ubq->cancel_lock);
ubq->flags = ub->dev_info.flags;
ubq->q_id = q_id;
ubq->q_depth = ub->dev_info.queue_depth;
Expand Down Expand Up @@ -2593,8 +2598,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
int i;

WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));

/* All old ioucmds have to be completed */
WARN_ON_ONCE(ubq->nr_io_ready);
ubq->nr_io_ready = 0;
/* old daemon is PF_EXITING, put it now */
put_task_struct(ubq->ubq_daemon);
/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
Expand Down

0 comments on commit 85248d6

Please sign in to comment.