Skip to content

Commit

Permalink
Merge tag 'nvme-6.14-2025-03-05' of git://git.infradead.org/nvme into…
Browse files Browse the repository at this point in the history
… block-6.14

Pull NVMe fixe from Keith:

"nvme fixes for Linux 6.14

 - TCP use after free fix on polling (Sagi)
 - Controller memory buffer cleanup fixes (Icenowy)
 - Free leaking requests on bad user passthrough commands (Keith)
 - TCP error message fix (Maurizio)
 - TCP corruption fix on partial PDU (Maurizio)
 - TCP memory ordering fix for weakly ordered archs (Meir)
 - Type coercion fix on message error for TCP (Dan)"

* tag 'nvme-6.14-2025-03-05' of git://git.infradead.org/nvme:
  nvme-tcp: fix signedness bug in nvme_tcp_init_connection()
  nvmet-tcp: Fix a possible sporadic response drops in weakly ordered arch
  nvme-tcp: fix potential memory corruption in nvme_tcp_recv_pdu()
  nvme-tcp: Fix a C2HTermReq error message
  nvmet: remove old function prototype
  nvme-ioctl: fix leaked requests on mapping error
  nvme-pci: skip CMB blocks incompatible with PCI P2P DMA
  nvme-pci: clean up CMBMSC when registering CMB fails
  nvme-tcp: fix possible UAF in nvme_tcp_poll
  • Loading branch information
Jens Axboe committed Mar 6, 2025
2 parents e06472b + 528361c commit ca57b5b
Show file tree
Hide file tree
Showing 5 changed files with 68 additions and 26 deletions.
12 changes: 8 additions & 4 deletions drivers/nvme/host/ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (!nvme_ctrl_sgl_supported(ctrl))
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
if (has_metadata) {
if (!supports_metadata)
return -EINVAL;
if (!supports_metadata) {
ret = -EINVAL;
goto out;
}
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
Expand All @@ -139,8 +141,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
struct iov_iter iter;

/* fixedbufs is only for non-vectored io */
if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
return -EINVAL;
if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
ret = -EINVAL;
goto out;
}
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
rq_data_dir(req), &iter, ioucmd);
if (ret < 0)
Expand Down
21 changes: 13 additions & 8 deletions drivers/nvme/host/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -1982,6 +1982,18 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if (offset > bar_size)
return;

/*
* Controllers may support a CMB size larger than their BAR, for
* example, due to being behind a bridge. Reduce the CMB to the
* reported size of the BAR
*/
size = min(size, bar_size - offset);

if (!IS_ALIGNED(size, memremap_compat_align()) ||
!IS_ALIGNED(pci_resource_start(pdev, bar),
memremap_compat_align()))
return;

/*
* Tell the controller about the host side address mapping the CMB,
* and enable CMB decoding for the NVMe 1.4+ scheme:
Expand All @@ -1992,17 +2004,10 @@ static void nvme_map_cmb(struct nvme_dev *dev)
dev->bar + NVME_REG_CMBMSC);
}

/*
* Controllers may support a CMB size larger than their BAR,
* for example, due to being behind a bridge. Reduce the CMB to
* the reported size of the BAR
*/
if (size > bar_size - offset)
size = bar_size - offset;

if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
dev_warn(dev->ctrl.device,
"failed to register the CMB\n");
hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC);
return;
}

Expand Down
45 changes: 36 additions & 9 deletions drivers/nvme/host/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,19 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}

static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)
{
switch (type) {
case nvme_tcp_c2h_term:
case nvme_tcp_c2h_data:
case nvme_tcp_r2t:
case nvme_tcp_rsp:
return true;
default:
return false;
}
}

/*
* Check if the queue is TLS encrypted
*/
Expand Down Expand Up @@ -775,7 +788,7 @@ static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
[NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
[NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
[NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
[NVME_TCP_FES_R2T_LIMIT_EXCEEDED] = "R2T Limit Exceeded",
[NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded",
[NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
};

Expand Down Expand Up @@ -818,6 +831,16 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return 0;

hdr = queue->pdu;
if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) {
if (!nvme_tcp_recv_pdu_supported(hdr->type))
goto unsupported_pdu;

dev_err(queue->ctrl->ctrl.device,
"pdu type %d has unexpected header length (%d)\n",
hdr->type, hdr->hlen);
return -EPROTO;
}

if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
/*
* C2HTermReq never includes Header or Data digests.
Expand Down Expand Up @@ -850,10 +873,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_tcp_init_recv_ctx(queue);
return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
default:
dev_err(queue->ctrl->ctrl.device,
"unsupported pdu type (%d)\n", hdr->type);
return -EINVAL;
goto unsupported_pdu;
}

unsupported_pdu:
dev_err(queue->ctrl->ctrl.device,
"unsupported pdu type (%d)\n", hdr->type);
return -EINVAL;
}

static inline void nvme_tcp_end_request(struct request *rq, u16 status)
Expand Down Expand Up @@ -1495,11 +1521,11 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
msg.msg_flags = MSG_WAITALL;
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
if (ret < sizeof(*icresp)) {
if (ret >= 0 && ret < sizeof(*icresp))
ret = -ECONNRESET;
if (ret < 0) {
pr_warn("queue %d: failed to receive icresp, error %d\n",
nvme_tcp_queue_id(queue), ret);
if (ret >= 0)
ret = -ECONNRESET;
goto free_icresp;
}
ret = -ENOTCONN;
Expand Down Expand Up @@ -2699,16 +2725,17 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
int ret;

if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
return 0;

set_bit(NVME_TCP_Q_POLLING, &queue->flags);
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
nvme_tcp_try_recv(queue);
ret = nvme_tcp_try_recv(queue);
clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
return queue->nr_cqe;
return ret < 0 ? ret : queue->nr_cqe;
}

static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
Expand Down
1 change: 0 additions & 1 deletion drivers/nvme/target/nvmet.h
Original file line number Diff line number Diff line change
Expand Up @@ -647,7 +647,6 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
struct nvmet_host *host);
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page);
bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);

#define NVMET_MIN_QUEUE_SIZE 16
#define NVMET_MAX_QUEUE_SIZE 1024
Expand Down
15 changes: 11 additions & 4 deletions drivers/nvme/target/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -571,10 +571,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
struct nvmet_tcp_cmd *cmd =
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
enum nvmet_tcp_recv_state queue_state;
struct nvmet_tcp_cmd *queue_cmd;
struct nvme_sgl_desc *sgl;
u32 len;

if (unlikely(cmd == queue->cmd)) {
/* Pairs with store_release in nvmet_prepare_receive_pdu() */
queue_state = smp_load_acquire(&queue->rcv_state);
queue_cmd = READ_ONCE(queue->cmd);

if (unlikely(cmd == queue_cmd)) {
sgl = &cmd->req.cmd->common.dptr.sgl;
len = le32_to_cpu(sgl->length);

Expand All @@ -583,7 +589,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
* Avoid using helpers, this might happen before
* nvmet_req_init is completed.
*/
if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
if (queue_state == NVMET_TCP_RECV_PDU &&
len && len <= cmd->req.port->inline_data_size &&
nvme_is_write(cmd->req.cmd))
return;
Expand Down Expand Up @@ -847,8 +853,9 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
{
queue->offset = 0;
queue->left = sizeof(struct nvme_tcp_hdr);
queue->cmd = NULL;
queue->rcv_state = NVMET_TCP_RECV_PDU;
WRITE_ONCE(queue->cmd, NULL);
/* Ensure rcv_state is visible only after queue->cmd is set */
smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
}

static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
Expand Down

0 comments on commit ca57b5b

Please sign in to comment.