Skip to content

Commit

Permalink
Merge tag 'nvme-5.13-2021-04-06' of git://git.infradead.org/nvme into…
Browse files Browse the repository at this point in the history
… for-5.13/drivers

Pull NVMe updates from Christoph:

"nvme updates for Linux 5.13

 - fix handling of very large MDTS values (Bart Van Assche)
 - retrigger ANA log update if group descriptor isn't found
   (Hannes Reinecke)
 - fix locking contexts in nvme-tcp and nvmet-tcp (Sagi Grimberg)
 - return proper error code from discovery ctrl (Hou Pu)
 - verify the SGLS field in nvmet-tcp and nvmet-fc (Max Gurtovoy)
 - disallow passthru cmd from targeting a nsid != nsid of the block dev
   (Niklas Cassel)
 - do not allow model_number exceed 40 bytes in nvmet (Noam Gottlieb)
 - enable optional queue idle period tracking in nvmet-tcp
   (Mark Wunderlich)
 - various cleanups and optimizations (Chaitanya Kulkarni, Kanchan Joshi)
 - expose fast_io_fail_tmo in sysfs (Daniel Wagner)
 - implement non-MDTS command limits (Keith Busch)
 - reduce warnings for unhandled command effects (Keith Busch)
 - allocate storage for the SQE as part of the nvme_request (Keith Busch)"

* tag 'nvme-5.13-2021-04-06' of git://git.infradead.org/nvme: (33 commits)
  nvme: fix handling of large MDTS values
  nvme: implement non-mdts command limits
  nvme: disallow passthru cmd from targeting a nsid != nsid of the block dev
  nvme: retrigger ANA log update if group descriptor isn't found
  nvme: export fast_io_fail_tmo to sysfs
  nvme: remove superfluous else in nvme_ctrl_loss_tmo_store
  nvme: use sysfs_emit instead of sprintf
  nvme-fc: check sgl supported by target
  nvme-tcp: check sgl supported by target
  nvmet-tcp: enable optional queue idle period tracking
  nvmet-tcp: fix incorrect locking in state_change sk callback
  nvme-tcp: block BH in sk state_change sk callback
  nvmet: return proper error code from discovery ctrl
  nvme: warn of unhandled effects only once
  nvme: use driver pdu command for passthrough
  nvme-pci: allocate nvme_command within driver pdu
  nvmet: do not allow model_number exceed 40 bytes
  nvmet: remove unnecessary ctrl parameter
  nvmet-fc: update function documentation
  nvme-fc: fix the function documentation comment
  ...
  • Loading branch information
Jens Axboe committed Apr 6, 2021
2 parents 8075585 + 8609c63 commit 762d6bd
Show file tree
Hide file tree
Showing 17 changed files with 336 additions and 178 deletions.
298 changes: 195 additions & 103 deletions drivers/nvme/host/core.c

Large diffs are not rendered by default.

14 changes: 9 additions & 5 deletions drivers/nvme/host/fc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1708,7 +1708,7 @@ nvme_fc_handle_ls_rqst_work(struct work_struct *work)
*
* If this routine returns error, the LLDD should abort the exchange.
*
* @remoteport: pointer to the (registered) remote port that the LS
* @portptr: pointer to the (registered) remote port that the LS
* was received from. The remoteport is associated with
* a specific localport.
* @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
Expand Down Expand Up @@ -2128,6 +2128,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
op->op.fcp_req.first_sgl = op->sgl;
op->op.fcp_req.private = &op->priv[0];
nvme_req(rq)->ctrl = &ctrl->ctrl;
nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
return res;
}

Expand Down Expand Up @@ -2759,8 +2760,6 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_fc_ctrl *ctrl = queue->ctrl;
struct request *rq = bd->rq;
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe;
enum nvmefc_fcp_datadir io_dir;
bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
u32 data_len;
Expand All @@ -2770,7 +2769,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);

ret = nvme_setup_cmd(ns, rq, sqe);
ret = nvme_setup_cmd(ns, rq);
if (ret)
return ret;

Expand Down Expand Up @@ -3086,7 +3085,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)

blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);

ret = nvme_init_identify(&ctrl->ctrl);
ret = nvme_init_ctrl_finish(&ctrl->ctrl);
if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
goto out_disconnect_admin_queue;

Expand All @@ -3100,6 +3099,11 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
}

/* FC-NVME supports normal SGL Data Block Descriptors */
if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) {
dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n");
goto out_disconnect_admin_queue;
}

if (opts->queue_size > ctrl->ctrl.maxcmd) {
/* warn if maxcmd is lower than queue_size */
Expand Down
12 changes: 8 additions & 4 deletions drivers/nvme/host/multipath.c
Original file line number Diff line number Diff line change
Expand Up @@ -602,8 +602,8 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev);

return sprintf(buf, "%s\n",
nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
return sysfs_emit(buf, "%s\n",
nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
}

static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
Expand All @@ -628,7 +628,7 @@ SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
}
DEVICE_ATTR_RO(ana_grpid);

Expand All @@ -637,7 +637,7 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
{
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);

return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
}
DEVICE_ATTR_RO(ana_state);

Expand Down Expand Up @@ -668,6 +668,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
if (desc.state) {
/* found the group desc: update */
nvme_update_ns_ana_state(&desc, ns);
} else {
/* group desc not found: trigger a re-read */
set_bit(NVME_NS_ANA_PENDING, &ns->flags);
queue_work(nvme_wq, &ns->ctrl->ana_work);
}
} else {
ns->ana_state = NVME_ANA_OPTIMIZED;
Expand Down
10 changes: 6 additions & 4 deletions drivers/nvme/host/nvme.h
Original file line number Diff line number Diff line change
Expand Up @@ -276,6 +276,9 @@ struct nvme_ctrl {
u32 max_hw_sectors;
u32 max_segments;
u32 max_integrity_segments;
u32 max_discard_sectors;
u32 max_discard_segments;
u32 max_zeroes_sectors;
#ifdef CONFIG_BLK_DEV_ZONED
u32 max_zone_append;
#endif
Expand Down Expand Up @@ -599,7 +602,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_identify(struct nvme_ctrl *ctrl);
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);

void nvme_remove_namespaces(struct nvme_ctrl *ctrl);

Expand All @@ -623,8 +626,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
Expand Down Expand Up @@ -745,7 +747,7 @@ static inline void nvme_trace_bio_complete(struct request *req)
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
struct nvme_id_ctrl *id)
{
if (ctrl->subsys->cmic & (1 << 3))
if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
dev_warn(ctrl->device,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0;
Expand Down
26 changes: 10 additions & 16 deletions drivers/nvme/host/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,7 @@ struct nvme_queue {
*/
struct nvme_iod {
struct nvme_request req;
struct nvme_command cmd;
struct nvme_queue *nvmeq;
bool use_sgl;
int aborted;
Expand Down Expand Up @@ -429,6 +430,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
iod->nvmeq = nvmeq;

nvme_req(req)->ctrl = &dev->ctrl;
nvme_req(req)->cmd = &iod->cmd;
return 0;
}

Expand Down Expand Up @@ -917,7 +919,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_command cmnd;
struct nvme_command *cmnd = &iod->cmd;
blk_status_t ret;

iod->aborted = 0;
Expand All @@ -931,24 +933,24 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
return BLK_STS_IOERR;

ret = nvme_setup_cmd(ns, req, &cmnd);
ret = nvme_setup_cmd(ns, req);
if (ret)
return ret;

if (blk_rq_nr_phys_segments(req)) {
ret = nvme_map_data(dev, req, &cmnd);
ret = nvme_map_data(dev, req, cmnd);
if (ret)
goto out_free_cmd;
}

if (blk_integrity_rq(req)) {
ret = nvme_map_metadata(dev, req, &cmnd);
ret = nvme_map_metadata(dev, req, cmnd);
if (ret)
goto out_unmap_data;
}

blk_mq_start_request(req);
nvme_submit_cmd(nvmeq, &cmnd, bd->last);
nvme_submit_cmd(nvmeq, cmnd, bd->last);
return BLK_STS_OK;
out_unmap_data:
nvme_unmap_data(dev, req);
Expand Down Expand Up @@ -1060,18 +1062,10 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq)
static irqreturn_t nvme_irq(int irq, void *data)
{
struct nvme_queue *nvmeq = data;
irqreturn_t ret = IRQ_NONE;

/*
* The rmb/wmb pair ensures we see all updates from a previous run of
* the irq handler, even if that was on another CPU.
*/
rmb();
if (nvme_process_cq(nvmeq))
ret = IRQ_HANDLED;
wmb();

return ret;
return IRQ_HANDLED;
return IRQ_NONE;
}

static irqreturn_t nvme_irq_check(int irq, void *data)
Expand Down Expand Up @@ -2653,7 +2647,7 @@ static void nvme_reset_work(struct work_struct *work)
*/
dev->ctrl.max_integrity_segments = 1;

result = nvme_init_identify(&dev->ctrl);
result = nvme_init_ctrl_finish(&dev->ctrl);
if (result)
goto out;

Expand Down
7 changes: 4 additions & 3 deletions drivers/nvme/host/rdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -314,6 +314,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
NVME_RDMA_DATA_SGL_SIZE;

req->queue = queue;
nvme_req(rq)->cmd = req->sqe.data;

return 0;
}
Expand Down Expand Up @@ -917,7 +918,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,

blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);

error = nvme_init_identify(&ctrl->ctrl);
error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error)
goto out_quiesce_queue;

Expand Down Expand Up @@ -2038,7 +2039,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *rq = bd->rq;
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_qe *sqe = &req->sqe;
struct nvme_command *c = sqe->data;
struct nvme_command *c = nvme_req(rq)->cmd;
struct ib_device *dev;
bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
blk_status_t ret;
Expand All @@ -2061,7 +2062,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
ib_dma_sync_single_for_cpu(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);

ret = nvme_setup_cmd(ns, rq, c);
ret = nvme_setup_cmd(ns, rq);
if (ret)
goto unmap_qe;

Expand Down
16 changes: 12 additions & 4 deletions drivers/nvme/host/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -417,6 +417,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
{
struct nvme_tcp_ctrl *ctrl = set->driver_data;
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_tcp_cmd_pdu *pdu;
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
u8 hdgst = nvme_tcp_hdgst_len(queue);
Expand All @@ -427,8 +428,10 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
if (!req->pdu)
return -ENOMEM;

pdu = req->pdu;
req->queue = queue;
nvme_req(rq)->ctrl = &ctrl->ctrl;
nvme_req(rq)->cmd = &pdu->cmd;

return 0;
}
Expand Down Expand Up @@ -867,7 +870,7 @@ static void nvme_tcp_state_change(struct sock *sk)
{
struct nvme_tcp_queue *queue;

read_lock(&sk->sk_callback_lock);
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
if (!queue)
goto done;
Expand All @@ -888,7 +891,7 @@ static void nvme_tcp_state_change(struct sock *sk)

queue->state_change(sk);
done:
read_unlock(&sk->sk_callback_lock);
read_unlock_bh(&sk->sk_callback_lock);
}

static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
Expand Down Expand Up @@ -1875,7 +1878,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)

blk_mq_unquiesce_queue(ctrl->admin_q);

error = nvme_init_identify(ctrl);
error = nvme_init_ctrl_finish(ctrl);
if (error)
goto out_quiesce_queue;

Expand Down Expand Up @@ -1963,6 +1966,11 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
goto destroy_admin;
}

if (!(ctrl->sgls & ((1 << 0) | (1 << 1)))) {
dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
goto destroy_admin;
}

if (opts->queue_size > ctrl->sqsize + 1)
dev_warn(ctrl->device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
Expand Down Expand Up @@ -2259,7 +2267,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
blk_status_t ret;

ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
ret = nvme_setup_cmd(ns, rq);
if (ret)
return ret;

Expand Down
4 changes: 2 additions & 2 deletions drivers/nvme/target/admin-cmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
default:
id->nuse = id->nsze;
break;
}
}

if (req->ns->bdev)
nvmet_bdev_set_limits(req->ns->bdev, id);
Expand Down Expand Up @@ -940,7 +940,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
return nvmet_parse_discovery_cmd(req);

ret = nvmet_check_ctrl_status(req, cmd);
ret = nvmet_check_ctrl_status(req);
if (unlikely(ret))
return ret;

Expand Down
6 changes: 6 additions & 0 deletions drivers/nvme/target/configfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1149,6 +1149,12 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
if (!len)
return -EINVAL;

if (len > NVMET_MN_MAX_SIZE) {
pr_err("Model nubmer size can not exceed %d Bytes\n",
NVMET_MN_MAX_SIZE);
return -EINVAL;
}

for (pos = 0; pos < len; pos++) {
if (!nvmet_is_ascii(page[pos]))
return -EINVAL;
Expand Down
Loading

0 comments on commit 762d6bd

Please sign in to comment.