From 3f674e7b670b7b7d9261935820e4eba3c059f835 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 6 Mar 2025 14:25:57 -0800 Subject: [PATCH 1/5] nvme-pci: fix stuck reset on concurrent DPC and HP The PCIe error handling has the nvme driver quiesce the device, attempt to restart it, then wait for that restart to complete. A PCIe DPC event also toggles the PCIe link. If the slot doesn't have out-of-band presence detection, this will trigger a pciehp re-enumeration. The error handling that calls nvme_error_resume is holding the device lock while this happens. This lock blocks pciehp's request to disconnect the driver from proceeding. Meanwhile the nvme's reset can't make forward progress because its device isn't there anymore with outstanding IO, and the timeout handler won't do anything to fix it because the device is undergoing error handling. End result: deadlocked. Fix this by having the timeout handler short cut the disabling for a disconnected PCIe device. The downside is that we're relying on an IO timeout to clean up this mess, which could be a minute by default. Tested-by: Nilay Shroff Reviewed-by: Nilay Shroff Signed-off-by: Keith Busch --- drivers/nvme/host/pci.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 640590b21728..e59aad269abf 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1411,9 +1411,20 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; struct nvme_command cmd = { }; + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 csts = readl(dev->bar + NVME_REG_CSTS); u8 opcode; + /* + * Shutdown the device immediately if we see it is disconnected. This + * unblocks PCIe error handling if the nvme driver is waiting in + * error_resume for a device that has been removed. We can't unbind the + * driver while the driver's error callback is waiting to complete, so + * we're relying on a timeout to break that deadlock if a removal + * occurs while reset work is running. + */ + if (pci_dev_is_disconnected(pdev)) + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); if (nvme_state_terminal(&dev->ctrl)) goto disable; @@ -1421,7 +1432,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) * the recovery mechanism will surely fail. */ mb(); - if (pci_channel_offline(to_pci_dev(dev->dev))) + if (pci_channel_offline(pdev)) return BLK_EH_RESET_TIMER; /* From bf9b8020a80d32f6aa80591297a087d0519dc931 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Thu, 6 Mar 2025 17:55:48 +0900 Subject: [PATCH 2/5] nvmet: pci-epf: Set NVMET_PCI_EPF_Q_LIVE when a queue is fully created The function nvmet_pci_epf_create_sq() use test_and_set_bit() to check that a submission queue is not already live and if not, set the NVMET_PCI_EPF_Q_LIVE queue flag to declare the sq live (ready to use). However, this is done on entry to the function, before the submission queue is actually fully initialized and ready to use. This creates a race situation with the function nvmet_pci_epf_poll_sqs_work() which looks at the NVMET_PCI_EPF_Q_LIVE queue flag to poll the submission queue when it is live. This race can lead to invalid DMA transfers if nvmet_pci_epf_poll_sqs_work() runs after the NVMET_PCI_EPF_Q_LIVE flag is set but before setting the sq pci address and doorbell ofset. Avoid this race by only testing the NVMET_PCI_EPF_Q_LIVE flag on entry to nvmet_pci_epf_create_sq() and setting it after the submission queue is fully setup before nvmet_pci_epf_create_sq() returns success. Since the function nvmet_pci_epf_create_cq() also has the same racy flag setting pattern, also make a similar change in that function. Fixes: 0faa0fe6f90e ("nvmet: New NVMe PCI endpoint function target driver") Signed-off-by: Damien Le Moal Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/target/pci-epf.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c index 565d2bd36dcd..d55ad334670c 100644 --- a/drivers/nvme/target/pci-epf.c +++ b/drivers/nvme/target/pci-epf.c @@ -1265,7 +1265,7 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; u16 status; - if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) + if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) return NVME_SC_QID_INVALID | NVME_STATUS_DNR; if (!(flags & NVME_QUEUE_PHYS_CONTIG)) @@ -1300,6 +1300,8 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, if (status != NVME_SC_SUCCESS) goto err; + set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); + dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", cqid, qsize, cq->qes, cq->vector); @@ -1307,7 +1309,6 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, err: clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); - clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); return status; } @@ -1333,7 +1334,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; u16 status; - if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) + if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) return NVME_SC_QID_INVALID | NVME_STATUS_DNR; if (!(flags & NVME_QUEUE_PHYS_CONTIG)) @@ -1355,7 +1356,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth); if (status != NVME_SC_SUCCESS) - goto out_clear_bit; + return status; sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND, min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid); @@ -1365,6 +1366,8 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, goto out_destroy_sq; } + set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags); + dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n", sqid, qsize, sq->qes); @@ -1372,8 +1375,6 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, out_destroy_sq: nvmet_sq_destroy(&sq->nvme_sq); -out_clear_bit: - clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags); return status; } From 39393f5c5c795992507aa5005a9d58396a5b07f1 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Thu, 6 Mar 2025 17:55:49 +0900 Subject: [PATCH 3/5] nvmet: pci-epf: Do not add an IRQ vector if not needed The function nvmet_pci_epf_create_cq() always unconditionally calls nvmet_pci_epf_add_irq_vector() to add an IRQ vector for a completion queue. But this is not correct if the host requested the creation of a completion queue for polling, without an IRQ vector specified (i.e. the flag NVME_CQ_IRQ_ENABLED is not set). Fix this by calling nvmet_pci_epf_add_irq_vector() and setting the queue flag NVMET_PCI_EPF_Q_IRQ_ENABLED for the cq only if NVME_CQ_IRQ_ENABLED is set. While at it, also fix the error path to add the missing removal of the added IRQ vector if nvmet_cq_create() fails. Fixes: 0faa0fe6f90e ("nvmet: New NVMe PCI endpoint function target driver") Signed-off-by: Damien Le Moal Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/target/pci-epf.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c index d55ad334670c..b1e31483f157 100644 --- a/drivers/nvme/target/pci-epf.c +++ b/drivers/nvme/target/pci-epf.c @@ -1271,9 +1271,6 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, if (!(flags & NVME_QUEUE_PHYS_CONTIG)) return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR; - if (flags & NVME_CQ_IRQ_ENABLED) - set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); - cq->pci_addr = pci_addr; cq->qid = cqid; cq->depth = qsize + 1; @@ -1290,10 +1287,11 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, cq->qes = ctrl->io_cqes; cq->pci_size = cq->qes * cq->depth; - cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector); - if (!cq->iv) { - status = NVME_SC_INTERNAL | NVME_STATUS_DNR; - goto err; + if (flags & NVME_CQ_IRQ_ENABLED) { + cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector); + if (!cq->iv) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); } status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth); @@ -1308,7 +1306,8 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, return NVME_SC_SUCCESS; err: - clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); + if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) + nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); return status; } From e5c2bcc0cd47321d78bb4e865d7857304139f95d Mon Sep 17 00:00:00 2001 From: Shin'ichiro Kawasaki Date: Tue, 11 Mar 2025 19:43:58 +0900 Subject: [PATCH 4/5] nvme: move error logging from nvme_end_req() to __nvme_end_req() Before the Commit 1f47ed294a2b ("block: cleanup and fix batch completion adding conditions"), blk_mq_add_to_batch() did not add failed passthrough requests to batch, and returned false. After the commit, blk_mq_add_to_batch() always adds passthrough requests to batch regardless of whether the request failed or not, and returns true. This affected error logging feature in the NVME driver. Before the commit, the call chain of failed passthrough request was as follows: nvme_handle_cqe() blk_mq_add_to_batch() .. false is returned, then call nvme_pci_complete_rq() nvme_pci_complete_rq() nvme_complete_rq() nvme_end_req() nvme_log_err_passthru() .. error logging __nvme_end_req() .. end of the rqeuest After the commit, the call chain is as follows: nvme_handle_cqe() blk_mq_add_to_batch() .. true is returned, then set nvme_pci_complete_batch() .. nvme_pci_complete_batch() nvme_complete_batch() nvme_complete_batch_req() __nvme_end_req() .. end of the request, without error logging To make the error logging feature work again for passthrough requests, move the nvme_log_err_passthru() call from nvme_end_req() to __nvme_end_req(). While at it, move nvme_log_error() call for non-passthrough requests together with nvme_log_err_passthru(). Even though the trigger commit does not affect non-passthrough requests, move it together for code simplicity. Fixes: 1f47ed294a2b ("block: cleanup and fix batch completion adding conditions") Signed-off-by: Shin'ichiro Kawasaki Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20250311104359.1767728-2-shinichiro.kawasaki@wdc.com Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f028913e2e62..8359d0aa0e44 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -431,6 +431,12 @@ static inline void nvme_end_req_zoned(struct request *req) static inline void __nvme_end_req(struct request *req) { + if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { + if (blk_rq_is_passthrough(req)) + nvme_log_err_passthru(req); + else + nvme_log_error(req); + } nvme_end_req_zoned(req); nvme_trace_bio_complete(req); if (req->cmd_flags & REQ_NVME_MPATH) @@ -441,12 +447,6 @@ void nvme_end_req(struct request *req) { blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { - if (blk_rq_is_passthrough(req)) - nvme_log_err_passthru(req); - else - nvme_log_error(req); - } __nvme_end_req(req); blk_mq_end_request(req, status); } From 9bce6b5f8987678b9c6c1fe433af6b5fe41feadc Mon Sep 17 00:00:00 2001 From: Shin'ichiro Kawasaki Date: Tue, 11 Mar 2025 19:43:59 +0900 Subject: [PATCH 5/5] block: change blk_mq_add_to_batch() third argument type to bool Commit 1f47ed294a2b ("block: cleanup and fix batch completion adding conditions") modified the evaluation criteria for the third argument, 'ioerror', in the blk_mq_add_to_batch() function. Initially, the function had checked if 'ioerror' equals zero. Following the commit, it started checking for negative error values, with the presumption that such values, for instance -EIO, would be passed in. However, blk_mq_add_to_batch() callers do not pass negative error values. Instead, they pass status codes defined in various ways: - NVMe PCI and Apple drivers pass NVMe status code - virtio_blk driver passes the virtblk request header status byte - null_blk driver passes blk_status_t These codes are either zero or positive, therefore the revised check fails to function as intended. Specifically, with the NVMe PCI driver, this modification led to the failure of the blktests test case nvme/039. In this test scenario, errors are artificially injected to the NVMe driver, resulting in positive NVMe status codes passed to blk_mq_add_to_batch(), which unexpectedly processes the failed I/O in a batch. Hence the failure. To correct the ioerror check within blk_mq_add_to_batch(), make all callers to uniformly pass the argument as boolean. Modify the callers to check their specific status codes and pass the boolean value 'is_error'. Also describe the arguments of blK_mq_add_to_batch as kerneldoc. Fixes: 1f47ed294a2b ("block: cleanup and fix batch completion adding conditions") Signed-off-by: Shin'ichiro Kawasaki Link: https://lore.kernel.org/r/20250311104359.1767728-3-shinichiro.kawasaki@wdc.com [axboe: fold in documentation update] Signed-off-by: Jens Axboe --- drivers/block/null_blk/main.c | 4 ++-- drivers/block/virtio_blk.c | 5 +++-- drivers/nvme/host/apple.c | 3 ++- drivers/nvme/host/pci.c | 5 +++-- include/linux/blk-mq.h | 16 ++++++++++++---- 5 files changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index d94ef37480bd..fdc7a0b2af10 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1549,8 +1549,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) cmd = blk_mq_rq_to_pdu(req); cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req), blk_rq_sectors(req)); - if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error, - blk_mq_end_request_batch)) + if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK, + blk_mq_end_request_batch)) blk_mq_end_request(req, cmd->error); nr++; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index a4af39fc7ea2..286cab5e5368 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -1207,11 +1207,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { struct request *req = blk_mq_rq_from_pdu(vbr); + u8 status = virtblk_vbr_status(vbr); found++; if (!blk_mq_complete_request_remote(req) && - !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr), - virtblk_complete_batch)) + !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK, + virtblk_complete_batch)) virtblk_request_done(req); } diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index a060f69558e7..8971aca41e63 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -599,7 +599,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q, } if (!nvme_try_complete_req(req, cqe->status, cqe->result) && - !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, + !blk_mq_add_to_batch(req, iob, + nvme_req(req)->status != NVME_SC_SUCCESS, apple_nvme_complete_batch)) apple_nvme_complete_rq(req); } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 640590b21728..75de86e235ad 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1130,8 +1130,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); if (!nvme_try_complete_req(req, cqe->status, cqe->result) && - !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, - nvme_pci_complete_batch)) + !blk_mq_add_to_batch(req, iob, + nvme_req(req)->status != NVME_SC_SUCCESS, + nvme_pci_complete_batch)) nvme_pci_complete_rq(req); } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 71f4f0cc3dac..aba9c24486aa 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -852,12 +852,20 @@ static inline bool blk_mq_is_reserved_rq(struct request *rq) return rq->rq_flags & RQF_RESV; } -/* +/** + * blk_mq_add_to_batch() - add a request to the completion batch + * @req: The request to add to batch + * @iob: The batch to add the request + * @is_error: Specify true if the request failed with an error + * @complete: The completaion handler for the request + * * Batched completions only work when there is no I/O error and no special * ->end_io handler. + * + * Return: true when the request was added to the batch, otherwise false */ static inline bool blk_mq_add_to_batch(struct request *req, - struct io_comp_batch *iob, int ioerror, + struct io_comp_batch *iob, bool is_error, void (*complete)(struct io_comp_batch *)) { /* @@ -865,7 +873,7 @@ static inline bool blk_mq_add_to_batch(struct request *req, * 1) No batch container * 2) Has scheduler data attached * 3) Not a passthrough request and end_io set - * 4) Not a passthrough request and an ioerror + * 4) Not a passthrough request and failed with an error */ if (!iob) return false; @@ -874,7 +882,7 @@ static inline bool blk_mq_add_to_batch(struct request *req, if (!blk_rq_is_passthrough(req)) { if (req->end_io) return false; - if (ioerror < 0) + if (is_error) return false; }