Skip to content

Commit

Permalink
Merge tag 'block-6.14-20250313' of git://git.kernel.dk/linux
Browse files Browse the repository at this point in the history
Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
     - Concurrent pci error and hotplug handling fix (Keith)
     - Endpoint function fixes (Damien)

 - Fix for a regression introduced in this cycle with error checking for
   batched request completions (Shin'ichiro)

* tag 'block-6.14-20250313' of git://git.kernel.dk/linux:
  block: change blk_mq_add_to_batch() third argument type to bool
  nvme: move error logging from nvme_end_req() to __nvme_end_req()
  nvmet: pci-epf: Do not add an IRQ vector if not needed
  nvmet: pci-epf: Set NVMET_PCI_EPF_Q_LIVE when a queue is fully created
  nvme-pci: fix stuck reset on concurrent DPC and HP
  • Loading branch information
Linus Torvalds committed Mar 14, 2025
2 parents 83158b2 + a938135 commit 580b203
Show file tree
Hide file tree
Showing 7 changed files with 54 additions and 32 deletions.
4 changes: 2 additions & 2 deletions drivers/block/null_blk/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1549,8 +1549,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
blk_mq_end_request_batch))
if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK,
blk_mq_end_request_batch))
blk_mq_end_request(req, cmd->error);
nr++;
}
Expand Down
5 changes: 3 additions & 2 deletions drivers/block/virtio_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -1207,11 +1207,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)

while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
u8 status = virtblk_vbr_status(vbr);

found++;
if (!blk_mq_complete_request_remote(req) &&
!blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
virtblk_complete_batch))
!blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK,
virtblk_complete_batch))
virtblk_request_done(req);
}

Expand Down
3 changes: 2 additions & 1 deletion drivers/nvme/host/apple.c
Original file line number Diff line number Diff line change
Expand Up @@ -599,7 +599,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
}

if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
!blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
!blk_mq_add_to_batch(req, iob,
nvme_req(req)->status != NVME_SC_SUCCESS,
apple_nvme_complete_batch))
apple_nvme_complete_rq(req);
}
Expand Down
12 changes: 6 additions & 6 deletions drivers/nvme/host/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -431,6 +431,12 @@ static inline void nvme_end_req_zoned(struct request *req)

static inline void __nvme_end_req(struct request *req)
{
if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
if (blk_rq_is_passthrough(req))
nvme_log_err_passthru(req);
else
nvme_log_error(req);
}
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH)
Expand All @@ -441,12 +447,6 @@ void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);

if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
if (blk_rq_is_passthrough(req))
nvme_log_err_passthru(req);
else
nvme_log_error(req);
}
__nvme_end_req(req);
blk_mq_end_request(req, status);
}
Expand Down
18 changes: 15 additions & 3 deletions drivers/nvme/host/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -1130,8 +1130,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,

trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
!blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
nvme_pci_complete_batch))
!blk_mq_add_to_batch(req, iob,
nvme_req(req)->status != NVME_SC_SUCCESS,
nvme_pci_complete_batch))
nvme_pci_complete_rq(req);
}

Expand Down Expand Up @@ -1411,17 +1412,28 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
struct nvme_command cmd = { };
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 csts = readl(dev->bar + NVME_REG_CSTS);
u8 opcode;

/*
* Shutdown the device immediately if we see it is disconnected. This
* unblocks PCIe error handling if the nvme driver is waiting in
* error_resume for a device that has been removed. We can't unbind the
* driver while the driver's error callback is waiting to complete, so
* we're relying on a timeout to break that deadlock if a removal
* occurs while reset work is running.
*/
if (pci_dev_is_disconnected(pdev))
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
if (nvme_state_terminal(&dev->ctrl))
goto disable;

/* If PCI error recovery process is happening, we cannot reset or
* the recovery mechanism will surely fail.
*/
mb();
if (pci_channel_offline(to_pci_dev(dev->dev)))
if (pci_channel_offline(pdev))
return BLK_EH_RESET_TIMER;

/*
Expand Down
28 changes: 14 additions & 14 deletions drivers/nvme/target/pci-epf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1265,15 +1265,12 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
u16 status;

if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;

if (!(flags & NVME_QUEUE_PHYS_CONTIG))
return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;

if (flags & NVME_CQ_IRQ_ENABLED)
set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);

cq->pci_addr = pci_addr;
cq->qid = cqid;
cq->depth = qsize + 1;
Expand All @@ -1290,24 +1287,27 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
cq->qes = ctrl->io_cqes;
cq->pci_size = cq->qes * cq->depth;

cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
if (!cq->iv) {
status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
goto err;
if (flags & NVME_CQ_IRQ_ENABLED) {
cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
if (!cq->iv)
return NVME_SC_INTERNAL | NVME_STATUS_DNR;
set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
}

status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
if (status != NVME_SC_SUCCESS)
goto err;

set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);

dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
cqid, qsize, cq->qes, cq->vector);

return NVME_SC_SUCCESS;

err:
clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
return status;
}

Expand All @@ -1333,7 +1333,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
u16 status;

if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;

if (!(flags & NVME_QUEUE_PHYS_CONTIG))
Expand All @@ -1355,7 +1355,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,

status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth);
if (status != NVME_SC_SUCCESS)
goto out_clear_bit;
return status;

sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
Expand All @@ -1365,15 +1365,15 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
goto out_destroy_sq;
}

set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);

dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
sqid, qsize, sq->qes);

return NVME_SC_SUCCESS;

out_destroy_sq:
nvmet_sq_destroy(&sq->nvme_sq);
out_clear_bit:
clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
return status;
}

Expand Down
16 changes: 12 additions & 4 deletions include/linux/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -852,20 +852,28 @@ static inline bool blk_mq_is_reserved_rq(struct request *rq)
return rq->rq_flags & RQF_RESV;
}

/*
/**
* blk_mq_add_to_batch() - add a request to the completion batch
* @req: The request to add to batch
* @iob: The batch to add the request
* @is_error: Specify true if the request failed with an error
* @complete: The completaion handler for the request
*
* Batched completions only work when there is no I/O error and no special
* ->end_io handler.
*
* Return: true when the request was added to the batch, otherwise false
*/
static inline bool blk_mq_add_to_batch(struct request *req,
struct io_comp_batch *iob, int ioerror,
struct io_comp_batch *iob, bool is_error,
void (*complete)(struct io_comp_batch *))
{
/*
* Check various conditions that exclude batch processing:
* 1) No batch container
* 2) Has scheduler data attached
* 3) Not a passthrough request and end_io set
* 4) Not a passthrough request and an ioerror
* 4) Not a passthrough request and failed with an error
*/
if (!iob)
return false;
Expand All @@ -874,7 +882,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
if (!blk_rq_is_passthrough(req)) {
if (req->end_io)
return false;
if (ioerror < 0)
if (is_error)
return false;
}

Expand Down

0 comments on commit 580b203

Please sign in to comment.