Skip to content

Commit

Permalink
Merge tag 'nvme-5.12-2021-02-11' of git://git.infradead.org/nvme into…
Browse files Browse the repository at this point in the history
… for-5.12/drivers

Pull NVMe updates from Christoph:

"nvme updates for 5.12:

 - fix multipath handling of ->queue_rq errors (Chao Leng)
 - nvmet cleanups (Chaitanya Kulkarni)
 - add a quirk for buggy Amazon controller (Filippo Sironi)
 - avoid devm allocations in nvme-hwmon that don't interact well with
   fabrics (Hannes Reinecke)
 - sysfs cleanups (Jiapeng Chong)
 - fix nr_zones for multipath (Keith Busch)
 - nvme-tcp crash fix for no-data commands (Sagi Grimberg)
 - nvmet-tcp fixes (Sagi Grimberg)
 - add a missing __rcu annotation (me)"

* tag 'nvme-5.12-2021-02-11' of git://git.infradead.org/nvme: (22 commits)
  nvme-tcp: fix crash triggered with a dataless request submission
  nvme: add 48-bit DMA address quirk for Amazon NVMe controllers
  nvme-hwmon: rework to avoid devm allocation
  nvmet: remove else at the end of the function
  nvmet: add nvmet_req_subsys() helper
  nvmet: use min of device_path and disk len
  nvmet: use invalid cmd opcode helper
  nvmet: use invalid cmd opcode helper
  nvmet: add helper to report invalid opcode
  nvmet: remove extra variable in id-ns handler
  nvmet: make nvmet_find_namespace() req based
  nvmet: return uniform error for invalid ns
  nvmet: set status to 0 in case for invalid nsid
  nvmet-fc: add a missing __rcu annotation to nvmet_fc_tgt_assoc.queues
  nvme-multipath: set nr_zones for zoned namespaces
  nvmet-tcp: fix potential race of tcp socket closing accept_work
  nvmet-tcp: fix receive data digest calculation for multiple h2cdata PDUs
  nvme-rdma: handle nvme_rdma_post_send failures better
  nvme-fabrics: avoid double completions in nvmf_fail_nonready_command
  nvme: introduce a nvme_host_path_error helper
  ...
  • Loading branch information
Jens Axboe committed Feb 11, 2021
2 parents 5978868 + e11e511 commit 65fb1b0
Show file tree
Hide file tree
Showing 18 changed files with 208 additions and 105 deletions.
26 changes: 21 additions & 5 deletions drivers/nvme/host/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -355,6 +355,21 @@ void nvme_complete_rq(struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);

/*
* Called to unwind from ->queue_rq on a failed command submission so that the
* multipathing code gets called to potentially failover to another path.
* The caller needs to unwind all transport specific resource allocations and
* must return propagate the return value.
*/
blk_status_t nvme_host_path_error(struct request *req)
{
nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
blk_mq_set_request_complete(req);
nvme_complete_rq(req);
return BLK_STS_OK;
}
EXPORT_SYMBOL_GPL(nvme_host_path_error);

bool nvme_cancel_request(struct request *req, void *data, bool reserved)
{
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
Expand Down Expand Up @@ -2848,7 +2863,7 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev);

return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
return sysfs_emit(buf, "%s\n", subsys->subnqn);
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);

Expand Down Expand Up @@ -3541,7 +3556,7 @@ static ssize_t nvme_sysfs_show_transport(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
return sysfs_emit(buf, "%s\n", ctrl->ops->name);
}
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);

Expand Down Expand Up @@ -3575,7 +3590,7 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);

Expand All @@ -3585,7 +3600,7 @@ static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
}
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);

Expand All @@ -3595,7 +3610,7 @@ static ssize_t nvme_sysfs_show_hostid(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);

return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
}
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);

Expand Down Expand Up @@ -4456,6 +4471,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);

void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
nvme_hwmon_exit(ctrl);
nvme_fault_inject_fini(&ctrl->fault_inject);
dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device);
Expand Down
6 changes: 1 addition & 5 deletions drivers/nvme/host/fabrics.c
Original file line number Diff line number Diff line change
Expand Up @@ -552,11 +552,7 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;

nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
blk_mq_start_request(rq);
nvme_complete_rq(rq);
return BLK_STS_OK;
return nvme_host_path_error(rq);
}
EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);

Expand Down
31 changes: 21 additions & 10 deletions drivers/nvme/host/hwmon.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,12 +223,12 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {

int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
struct device *dev = ctrl->dev;
struct device *dev = ctrl->device;
struct nvme_hwmon_data *data;
struct device *hwmon;
int err;

data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return 0;

Expand All @@ -237,19 +237,30 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)

err = nvme_hwmon_get_smart_log(data);
if (err) {
dev_warn(ctrl->device,
"Failed to read smart log (error %d)\n", err);
devm_kfree(dev, data);
dev_warn(dev, "Failed to read smart log (error %d)\n", err);
kfree(data);
return err;
}

hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
&nvme_hwmon_chip_info,
NULL);
hwmon = hwmon_device_register_with_info(dev, "nvme",
data, &nvme_hwmon_chip_info,
NULL);
if (IS_ERR(hwmon)) {
dev_warn(dev, "Failed to instantiate hwmon device\n");
devm_kfree(dev, data);
kfree(data);
}

ctrl->hwmon_device = hwmon;
return 0;
}

void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
{
if (ctrl->hwmon_device) {
struct nvme_hwmon_data *data =
dev_get_drvdata(ctrl->hwmon_device);

hwmon_device_unregister(ctrl->hwmon_device);
ctrl->hwmon_device = NULL;
kfree(data);
}
}
4 changes: 4 additions & 0 deletions drivers/nvme/host/multipath.c
Original file line number Diff line number Diff line change
Expand Up @@ -677,6 +677,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
ns->head->disk->queue);
#ifdef CONFIG_BLK_DEV_ZONED
if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
#endif
}

void nvme_mpath_remove_disk(struct nvme_ns_head *head)
Expand Down
15 changes: 15 additions & 0 deletions drivers/nvme/host/nvme.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,12 @@ enum nvme_quirks {
* NVMe 1.3 compliance.
*/
NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),

/*
* The controller does not properly handle DMA addresses over
* 48 bits.
*/
NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
};

/*
Expand Down Expand Up @@ -246,6 +252,9 @@ struct nvme_ctrl {
struct rw_semaphore namespaces_rwsem;
struct device ctrl_device;
struct device *device; /* char device */
#ifdef CONFIG_NVME_HWMON
struct device *hwmon_device;
#endif
struct cdev cdev;
struct work_struct reset_work;
struct work_struct delete_work;
Expand Down Expand Up @@ -575,6 +584,7 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
}

void nvme_complete_rq(struct request *req);
blk_status_t nvme_host_path_error(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
Expand Down Expand Up @@ -811,11 +821,16 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)

#ifdef CONFIG_NVME_HWMON
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
#else
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
return 0;
}

static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
{
}
#endif

u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
Expand Down
21 changes: 20 additions & 1 deletion drivers/nvme/host/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -2362,13 +2362,16 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{
int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
int dma_address_bits = 64;

if (pci_enable_device_mem(pdev))
return result;

pci_set_master(pdev);

if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)))
if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
dma_address_bits = 48;
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits)))
goto disable;

if (readl(dev->bar + NVME_REG_CSTS) == -1) {
Expand Down Expand Up @@ -3257,6 +3260,22 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
Expand Down
4 changes: 3 additions & 1 deletion drivers/nvme/host/rdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -2098,7 +2098,9 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
err_unmap:
nvme_rdma_unmap_data(queue, rq);
err:
if (err == -ENOMEM || err == -EAGAIN)
if (err == -EIO)
ret = nvme_host_path_error(rq);
else if (err == -ENOMEM || err == -EAGAIN)
ret = BLK_STS_RESOURCE;
else
ret = BLK_STS_IOERR;
Expand Down
2 changes: 1 addition & 1 deletion drivers/nvme/host/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -2271,7 +2271,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_len = blk_rq_nr_phys_segments(rq) ?
blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio;
if (req->curr_bio)
if (req->curr_bio && req->data_len)
nvme_tcp_init_iter(req, rq_data_dir(rq));

if (rq_data_dir(rq) == WRITE &&
Expand Down
59 changes: 24 additions & 35 deletions drivers/nvme/target/admin-cmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
u64 host_reads, host_writes, data_units_read, data_units_written;
u16 status;

req->ns = nvmet_find_namespace(req->sq->ctrl,
req->cmd->get_log_page.nsid);
if (!req->ns) {
pr_err("Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid));
req->error_loc = offsetof(struct nvme_rw_command, nsid);
return NVME_SC_INVALID_NS;
}
status = nvmet_req_find_ns(req);
if (status)
return status;

/* we don't have the right data for file backed ns */
if (!req->ns->bdev)
Expand Down Expand Up @@ -466,9 +462,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)

static void nvmet_execute_identify_ns(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ns *id;
u16 status = 0;
u16 status;

if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
Expand All @@ -483,9 +478,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
}

/* return an all zeroed buffer if we can't find an active namespace */
req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
if (!req->ns) {
status = NVME_SC_INVALID_NS;
status = nvmet_req_find_ns(req);
if (status) {
status = 0;
goto done;
}

Expand Down Expand Up @@ -527,7 +522,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)

id->lbaf[0].ds = req->ns->blksize_shift;

if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
NVME_NS_DPC_PI_TYPE3;
Expand Down Expand Up @@ -604,15 +599,12 @@ static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,

static void nvmet_execute_identify_desclist(struct nvmet_req *req)
{
u16 status = 0;
off_t off = 0;
u16 status;

req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
if (!req->ns) {
req->error_loc = offsetof(struct nvme_identify, nsid);
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
status = nvmet_req_find_ns(req);
if (status)
goto out;
}

if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
Expand Down Expand Up @@ -691,14 +683,12 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
{
u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u16 status;

req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
if (unlikely(!req->ns)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
status = nvmet_req_find_ns(req);
if (status)
return status;
}

mutex_lock(&subsys->lock);
switch (write_protect) {
Expand Down Expand Up @@ -752,7 +742,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)

void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
u16 status = 0;
Expand Down Expand Up @@ -796,14 +786,13 @@ void nvmet_execute_set_features(struct nvmet_req *req)

static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 result;

req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
if (!req->ns) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
return NVME_SC_INVALID_NS | NVME_SC_DNR;
}
result = nvmet_req_find_ns(req);
if (result)
return result;

mutex_lock(&subsys->lock);
if (req->ns->readonly == true)
result = NVME_NS_WRITE_PROTECT;
Expand All @@ -827,7 +816,7 @@ void nvmet_get_feat_async_event(struct nvmet_req *req)

void nvmet_execute_get_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;

Expand Down Expand Up @@ -934,7 +923,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)

if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_cmd(req);
if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
return nvmet_parse_discovery_cmd(req);

ret = nvmet_check_ctrl_status(req, cmd);
Expand Down
Loading

0 comments on commit 65fb1b0

Please sign in to comment.