From a208fc56721775987c1b86e20d86d7e0d017c0b2 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Tue, 16 Nov 2021 16:49:18 +0100 Subject: [PATCH 01/10] nvmet-tcp: fix a race condition between release_queue and io_work If the initiator executes a reset controller operation while performing I/O, the target kernel will crash because of a race condition between release_queue and io_work; nvmet_tcp_uninit_data_in_cmds() may be executed while io_work is running, calling flush_work() was not sufficient to prevent this because io_work could requeue itself. Fix this bug by using cancel_work_sync() to prevent io_work from requeuing itself and set rcv_state to NVMET_TCP_RECV_ERR to make sure we don't receive any more data from the socket. Signed-off-by: Maurizio Lombardi Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Reviewed-by: John Meneghini Signed-off-by: Christoph Hellwig --- drivers/nvme/target/tcp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 84c387e4bf431..18f36256095f6 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1437,7 +1437,9 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) mutex_unlock(&nvmet_tcp_queue_mutex); nvmet_tcp_restore_socket_callbacks(queue); - flush_work(&queue->io_work); + cancel_work_sync(&queue->io_work); + /* stop accepting incoming data */ + queue->rcv_state = NVMET_TCP_RECV_ERR; nvmet_tcp_uninit_data_in_cmds(queue); nvmet_sq_destroy(&queue->nvme_sq); From 69b85e1f1d1d1e49601ec3e85d2031188657cca2 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Tue, 16 Nov 2021 16:49:19 +0100 Subject: [PATCH 02/10] nvmet-tcp: add an helper to free the cmd buffers Makes the code easier to read and to debug. Sets the freed pointers to NULL, it will be useful when destroying the queues to understand if the commands' buffers have been released already or not. Signed-off-by: Maurizio Lombardi Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Reviewed-by: John Meneghini Signed-off-by: Christoph Hellwig --- drivers/nvme/target/tcp.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 18f36256095f6..786b1440a9af4 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -166,6 +166,8 @@ static struct workqueue_struct *nvmet_tcp_wq; static const struct nvmet_fabrics_ops nvmet_tcp_ops; static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); +static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd); +static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd); static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd) @@ -297,6 +299,16 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) return 0; } +static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) +{ + WARN_ON(unlikely(cmd->nr_mapped > 0)); + + kfree(cmd->iov); + sgl_free(cmd->req.sg); + cmd->iov = NULL; + cmd->req.sg = NULL; +} + static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) { struct scatterlist *sg; @@ -306,6 +318,8 @@ static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) for (i = 0; i < cmd->nr_mapped; i++) kunmap(sg_page(&sg[i])); + + cmd->nr_mapped = 0; } static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) @@ -387,7 +401,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) return 0; err: - sgl_free(cmd->req.sg); + nvmet_tcp_free_cmd_buffers(cmd); return NVME_SC_INTERNAL; } @@ -632,10 +646,8 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) } } - if (queue->nvme_sq.sqhd_disabled) { - kfree(cmd->iov); - sgl_free(cmd->req.sg); - } + if (queue->nvme_sq.sqhd_disabled) + nvmet_tcp_free_cmd_buffers(cmd); return 1; @@ -664,8 +676,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, if (left) return -EAGAIN; - kfree(cmd->iov); - sgl_free(cmd->req.sg); + nvmet_tcp_free_cmd_buffers(cmd); cmd->queue->snd_cmd = NULL; nvmet_tcp_put_cmd(cmd); return 1; @@ -1406,8 +1417,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) { nvmet_req_uninit(&cmd->req); nvmet_tcp_unmap_pdu_iovec(cmd); - kfree(cmd->iov); - sgl_free(cmd->req.sg); + nvmet_tcp_free_cmd_buffers(cmd); } static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) From af21250bb503a02e705b461886321e394b300524 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Tue, 16 Nov 2021 16:49:20 +0100 Subject: [PATCH 03/10] nvmet-tcp: fix memory leak when performing a controller reset If a reset controller is executed while the initiator is performing some I/O the driver may leak the memory allocated for the commands' iovec. Make sure that nvmet_tcp_uninit_data_in_cmds() releases all the memory. Signed-off-by: Maurizio Lombardi Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Reviewed-by: John Meneghini Signed-off-by: Christoph Hellwig --- drivers/nvme/target/tcp.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 786b1440a9af4..605aa2a8ca536 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1427,7 +1427,10 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) for (i = 0; i < queue->nr_cmds; i++, cmd++) { if (nvmet_tcp_need_data_in(cmd)) - nvmet_tcp_finish_cmd(cmd); + nvmet_req_uninit(&cmd->req); + + nvmet_tcp_unmap_pdu_iovec(cmd); + nvmet_tcp_free_cmd_buffers(cmd); } if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { From 102110efdff6beedece6ab9b51664c32ac01e2db Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Mon, 22 Nov 2021 15:38:41 +0530 Subject: [PATCH 04/10] nvmet-tcp: fix incomplete data digest send Current nvmet_try_send_ddgst() code does not check whether all data digest bytes are transmitted, fix this by returning -EAGAIN if all data digest bytes are not transmitted. Fixes: 872d26a391da ("nvmet-tcp: add NVMe over TCP target driver") Signed-off-by: Varun Prakash Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/target/tcp.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 605aa2a8ca536..cb6a473c3eafa 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -711,10 +711,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) { struct nvmet_tcp_queue *queue = cmd->queue; + int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; struct kvec iov = { .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, - .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset + .iov_len = left }; int ret; @@ -728,6 +729,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) return ret; cmd->offset += ret; + left -= ret; + + if (left) + return -EAGAIN; if (queue->nvme_sq.sqhd_disabled) { cmd->queue->snd_cmd = NULL; From 1d3ef9c3a39e04be31155c27ebf80342350c3abf Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Tue, 23 Nov 2021 16:28:56 +0530 Subject: [PATCH 05/10] nvme-tcp: validate R2T PDU in nvme_tcp_handle_r2t() If maxh2cdata < r2t_length then driver will form multiple H2CData PDUs, validate R2T PDU in nvme_tcp_handle_r2t() to reuse nvme_tcp_setup_h2c_data_pdu(). Also set req->state to NVME_TCP_SEND_H2C_PDU in nvme_tcp_setup_h2c_data_pdu(). Signed-off-by: Varun Prakash Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/tcp.c | 55 ++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 31 deletions(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 33bc83d8d9928..5f8ad4d4ac8cf 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -572,7 +572,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, return ret; } -static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, +static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, struct nvme_tcp_r2t_pdu *pdu) { struct nvme_tcp_data_pdu *data = req->pdu; @@ -581,32 +581,11 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, u8 hdgst = nvme_tcp_hdgst_len(queue); u8 ddgst = nvme_tcp_ddgst_len(queue); + req->state = NVME_TCP_SEND_H2C_PDU; + req->offset = 0; req->pdu_len = le32_to_cpu(pdu->r2t_length); req->pdu_sent = 0; - if (unlikely(!req->pdu_len)) { - dev_err(queue->ctrl->ctrl.device, - "req %d r2t len is %u, probably a bug...\n", - rq->tag, req->pdu_len); - return -EPROTO; - } - - if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { - dev_err(queue->ctrl->ctrl.device, - "req %d r2t len %u exceeded data len %u (%zu sent)\n", - rq->tag, req->pdu_len, req->data_len, - req->data_sent); - return -EPROTO; - } - - if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { - dev_err(queue->ctrl->ctrl.device, - "req %d unexpected r2t offset %u (expected %zu)\n", - rq->tag, le32_to_cpu(pdu->r2t_offset), - req->data_sent); - return -EPROTO; - } - memset(data, 0, sizeof(*data)); data->hdr.type = nvme_tcp_h2c_data; data->hdr.flags = NVME_TCP_F_DATA_LAST; @@ -622,7 +601,6 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, data->command_id = nvme_cid(rq); data->data_offset = pdu->r2t_offset; data->data_length = cpu_to_le32(req->pdu_len); - return 0; } static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, @@ -630,7 +608,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, { struct nvme_tcp_request *req; struct request *rq; - int ret; + u32 r2t_length = le32_to_cpu(pdu->r2t_length); rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); if (!rq) { @@ -641,13 +619,28 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, } req = blk_mq_rq_to_pdu(rq); - ret = nvme_tcp_setup_h2c_data_pdu(req, pdu); - if (unlikely(ret)) - return ret; + if (unlikely(!r2t_length)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len is %u, probably a bug...\n", + rq->tag, r2t_length); + return -EPROTO; + } - req->state = NVME_TCP_SEND_H2C_PDU; - req->offset = 0; + if (unlikely(req->data_sent + r2t_length > req->data_len)) { + dev_err(queue->ctrl->ctrl.device, + "req %d r2t len %u exceeded data len %u (%zu sent)\n", + rq->tag, r2t_length, req->data_len, req->data_sent); + return -EPROTO; + } + + if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { + dev_err(queue->ctrl->ctrl.device, + "req %d unexpected r2t offset %u (expected %zu)\n", + rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent); + return -EPROTO; + } + nvme_tcp_setup_h2c_data_pdu(req, pdu); nvme_tcp_queue_request(req, false, true); return 0; From a5053c92b3db71c3f7f9f13934ca620632828d06 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Wed, 3 Nov 2021 09:18:17 +0100 Subject: [PATCH 06/10] nvme-tcp: fix memory leak when freeing a queue Release the page frag cache when tearing down the io queues Signed-off-by: Maurizio Lombardi Reviewed-by: Sagi Grimberg Reviewed-by: John Meneghini Signed-off-by: Christoph Hellwig --- drivers/nvme/host/tcp.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 5f8ad4d4ac8cf..4ceb28675fdf6 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1225,6 +1225,7 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) { + struct page *page; struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; @@ -1234,6 +1235,11 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) if (queue->hdr_digest || queue->data_digest) nvme_tcp_free_crypto(queue); + if (queue->pf_cache.va) { + page = virt_to_head_page(queue->pf_cache.va); + __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); + queue->pf_cache.va = NULL; + } sock_release(queue->sock); kfree(queue->pdu); mutex_destroy(&queue->send_mutex); From 5a6254d55e2a9f7919ead8580d7aa0c7a382b26a Mon Sep 17 00:00:00 2001 From: Enzo Matsumiya Date: Fri, 5 Nov 2021 23:08:57 -0300 Subject: [PATCH 07/10] nvme-pci: add NO APST quirk for Kioxia device This particular Kioxia device times out and aborts I/O during any load, but it's more easily observable with discards (fstrim). The device gets to a state that is also not possible to use "nvme set-feature" to disable APST. Booting with nvme_core.default_ps_max_latency=0 solves the issue. We had a dozen or so of these devices behaving this same way in customer environments. Signed-off-by: Enzo Matsumiya Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 4b5de8f5435a5..50a98f56da88c 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2469,6 +2469,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = { .vid = 0x14a4, .fr = "22301111", .quirks = NVME_QUIRK_SIMPLE_SUSPEND, + }, + { + /* + * This Kioxia CD6-V Series / HPE PE8030 device times out and + * aborts I/O during any load, but more easily reproducible + * with discards (fstrim). + * + * The device is left in a state where it is also not possible + * to use "nvme set-feature" to disable APST, but booting with + * nvme_core.default_ps_max_latency=0 works. + */ + .vid = 0x1e0f, + .mn = "KCD6XVUL6T40", + .quirks = NVME_QUIRK_NO_APST, } }; From 8e8aaf512a91ae44d40647a88b51326c7b0a70a8 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Fri, 12 Nov 2021 15:16:12 +0100 Subject: [PATCH 08/10] nvme-fabrics: ignore invalid fast_io_fail_tmo values Valid fast_io_fail_tmo values are integers >= 0 or -1 (disabled). Prevent userspace from setting arbitrary negative values. Signed-off-by: Maurizio Lombardi Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/host/fabrics.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index c5a2b71c52686..282d54117e0ac 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -698,6 +698,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, if (token >= 0) pr_warn("I/O fail on reconnect controller after %d sec\n", token); + else + token = -1; + opts->fast_io_fail_tmo = token; break; case NVMF_OPT_HOSTNQN: From 00b33cf3da726757aef636365bb52e9536434e9a Mon Sep 17 00:00:00 2001 From: Klaus Jensen Date: Wed, 10 Nov 2021 10:19:06 +0100 Subject: [PATCH 09/10] nvme: fix write zeroes pi Write Zeroes sets PRACT when block integrity is enabled (as it should), but neglects to also set the reftag which is expected by reads. This causes protection errors on reads. Fix this by setting the reftag for type 1 and 2 (for type 3, reads will not check the reftag). Signed-off-by: Klaus Jensen Reviewed-by: Martin K. Petersen Signed-off-by: Christoph Hellwig --- drivers/nvme/host/core.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 50a98f56da88c..4c63564adeaa6 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -895,10 +895,19 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->write_zeroes.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); - if (nvme_ns_has_pi(ns)) + + if (nvme_ns_has_pi(ns)) { cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT); - else - cmnd->write_zeroes.control = 0; + + switch (ns->pi_type) { + case NVME_NS_DPS_PI_TYPE1: + case NVME_NS_DPS_PI_TYPE2: + cmnd->write_zeroes.reftag = + cpu_to_le32(t10_pi_ref_tag(req)); + break; + } + } + return BLK_STS_OK; } From c024b226a417c4eb9353ff500b1c823165d4d508 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Mon, 22 Nov 2021 11:08:27 +0100 Subject: [PATCH 10/10] nvmet: use IOCB_NOWAIT only if the filesystem supports it Submit I/O requests with the IOCB_NOWAIT flag set only if the underlying filesystem supports it. Fixes: 50a909db36f2 ("nvmet: use IOCB_NOWAIT for file-ns buffered I/O") Signed-off-by: Maurizio Lombardi Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/target/io-cmd-file.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 6aa30f30b572e..6be6e59d273bb 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "nvmet.h" #define NVMET_MAX_MPOOL_BVEC 16 @@ -266,7 +267,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req) if (req->ns->buffered_io) { if (likely(!req->f.mpool_alloc) && - nvmet_file_execute_io(req, IOCB_NOWAIT)) + (req->ns->file->f_mode & FMODE_NOWAIT) && + nvmet_file_execute_io(req, IOCB_NOWAIT)) return; nvmet_file_submit_buffered_io(req); } else