Skip to content

Commit

Permalink
Merge branch 'vsock-virtio-vhost-msg_zerocopy-preparations'
Browse files Browse the repository at this point in the history
Arseniy Krasnov says:

====================
vsock/virtio/vhost: MSG_ZEROCOPY preparations

this patchset is first of three parts of another big patchset for
MSG_ZEROCOPY flag support:
https://lore.kernel.org/netdev/20230701063947.3422088-1-AVKrasnov@sberdevices.ru/

During review of this series, Stefano Garzarella <sgarzare@redhat.com>
suggested to split it for three parts to simplify review and merging:

1) virtio and vhost updates (for fragged skbs) <--- this patchset
2) AF_VSOCK updates (allows to enable MSG_ZEROCOPY mode and read
   tx completions) and update for Documentation/.
3) Updates for tests and utils.

This series enables handling of fragged skbs in virtio and vhost parts.
Newly logic won't be triggered, because SO_ZEROCOPY options is still
impossible to enable at this moment (next bunch of patches from big
set above will enable it).
====================

Link: https://lore.kernel.org/r/20230916130918.4105122-1-avkrasnov@salutedevices.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
  • Loading branch information
Paolo Abeni committed Sep 21, 2023
2 parents b3af9c0 + 581512a commit 71b263e
Show file tree
Hide file tree
Showing 5 changed files with 348 additions and 87 deletions.
14 changes: 9 additions & 5 deletions drivers/vhost/vsock.c
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct sk_buff *skb;
unsigned out, in;
size_t nbytes;
u32 offset;
int head;

skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
Expand Down Expand Up @@ -156,7 +157,8 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
}

iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
payload_len = skb->len;
offset = VIRTIO_VSOCK_SKB_CB(skb)->offset;
payload_len = skb->len - offset;
hdr = virtio_vsock_hdr(skb);

/* If the packet is greater than the space available in the
Expand Down Expand Up @@ -197,8 +199,10 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
break;
}

nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
if (nbytes != payload_len) {
if (skb_copy_datagram_iter(skb,
offset,
&iov_iter,
payload_len)) {
kfree_skb(skb);
vq_err(vq, "Faulted on copying pkt buf\n");
break;
Expand All @@ -212,13 +216,13 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
added = true;

skb_pull(skb, payload_len);
VIRTIO_VSOCK_SKB_CB(skb)->offset += payload_len;
total_len += payload_len;

/* If we didn't send all the payload we can requeue the packet
* to send it with the next available buffer.
*/
if (skb->len > 0) {
if (VIRTIO_VSOCK_SKB_CB(skb)->offset < skb->len) {
hdr->flags |= cpu_to_le32(flags_to_restore);

/* We are queueing the same skb to handle
Expand Down
10 changes: 10 additions & 0 deletions include/linux/virtio_vsock.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
struct virtio_vsock_skb_cb {
bool reply;
bool tap_delivered;
u32 offset;
};

#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
Expand Down Expand Up @@ -159,6 +160,15 @@ struct virtio_transport {

/* Takes ownership of the packet */
int (*send_pkt)(struct sk_buff *skb);

/* Used in MSG_ZEROCOPY mode. Checks, that provided data
* (number of buffers) could be transmitted with zerocopy
* mode. If this callback is not implemented for the current
* transport - this means that this transport doesn't need
* extra checks and can perform zerocopy transmission by
* default.
*/
bool (*can_msgzerocopy)(int bufs_num);
};

ssize_t
Expand Down
12 changes: 8 additions & 4 deletions include/trace/events/vsock_virtio_transport_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,15 +43,17 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__u32 len,
__u16 type,
__u16 op,
__u32 flags
__u32 flags,
bool zcopy
),
TP_ARGS(
src_cid, src_port,
dst_cid, dst_port,
len,
type,
op,
flags
flags,
zcopy
),
TP_STRUCT__entry(
__field(__u32, src_cid)
Expand All @@ -62,6 +64,7 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__field(__u16, type)
__field(__u16, op)
__field(__u32, flags)
__field(bool, zcopy)
),
TP_fast_assign(
__entry->src_cid = src_cid;
Expand All @@ -72,14 +75,15 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__entry->type = type;
__entry->op = op;
__entry->flags = flags;
__entry->zcopy = zcopy;
),
TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x",
TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x zcopy=%s",
__entry->src_cid, __entry->src_port,
__entry->dst_cid, __entry->dst_port,
__entry->len,
show_type(__entry->type),
show_op(__entry->op),
__entry->flags)
__entry->flags, __entry->zcopy ? "true" : "false")
);

TRACE_EVENT(virtio_transport_recv_pkt,
Expand Down
92 changes: 85 additions & 7 deletions net/vmw_vsock/virtio_transport.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,17 @@ struct virtio_vsock {

u32 guest_cid;
bool seqpacket_allow;

/* These fields are used only in tx path in function
* 'virtio_transport_send_pkt_work()', so to save
* stack space in it, place both of them here. Each
* pointer from 'out_sgs' points to the corresponding
* element in 'out_bufs' - this is initialized in
* 'virtio_vsock_probe()'. Both fields are protected
* by 'tx_lock'. +1 is needed for packet header.
*/
struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1];
struct scatterlist out_bufs[MAX_SKB_FRAGS + 1];
};

static u32 virtio_transport_get_local_cid(void)
Expand Down Expand Up @@ -100,8 +111,8 @@ virtio_transport_send_pkt_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX];

for (;;) {
struct scatterlist hdr, buf, *sgs[2];
int ret, in_sg = 0, out_sg = 0;
struct scatterlist **sgs;
struct sk_buff *skb;
bool reply;

Expand All @@ -111,12 +122,43 @@ virtio_transport_send_pkt_work(struct work_struct *work)

virtio_transport_deliver_tap_pkt(skb);
reply = virtio_vsock_skb_reply(skb);

sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb)));
sgs[out_sg++] = &hdr;
if (skb->len > 0) {
sg_init_one(&buf, skb->data, skb->len);
sgs[out_sg++] = &buf;
sgs = vsock->out_sgs;
sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
sizeof(*virtio_vsock_hdr(skb)));
out_sg++;

if (!skb_is_nonlinear(skb)) {
if (skb->len > 0) {
sg_init_one(sgs[out_sg], skb->data, skb->len);
out_sg++;
}
} else {
struct skb_shared_info *si;
int i;

/* If skb is nonlinear, then its buffer must contain
* only header and nothing more. Data is stored in
* the fragged part.
*/
WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));

si = skb_shinfo(skb);

for (i = 0; i < si->nr_frags; i++) {
skb_frag_t *skb_frag = &si->frags[i];
void *va;

/* We will use 'page_to_virt()' for the userspace page
* here, because virtio or dma-mapping layers will call
* 'virt_to_phys()' later to fill the buffer descriptor.
* We don't touch memory at "virtual" address of this page.
*/
va = page_to_virt(skb_frag->bv_page);
sg_init_one(sgs[out_sg],
va + skb_frag->bv_offset,
skb_frag->bv_len);
out_sg++;
}
}

ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
Expand Down Expand Up @@ -413,6 +455,37 @@ static void virtio_vsock_rx_done(struct virtqueue *vq)
queue_work(virtio_vsock_workqueue, &vsock->rx_work);
}

static bool virtio_transport_can_msgzerocopy(int bufs_num)
{
struct virtio_vsock *vsock;
bool res = false;

rcu_read_lock();

vsock = rcu_dereference(the_virtio_vsock);
if (vsock) {
struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX];

/* Check that tx queue is large enough to keep whole
* data to send. This is needed, because when there is
* not enough free space in the queue, current skb to
* send will be reinserted to the head of tx list of
* the socket to retry transmission later, so if skb
* is bigger than whole queue, it will be reinserted
* again and again, thus blocking other skbs to be sent.
* Each page of the user provided buffer will be added
* as a single buffer to the tx virtqueue, so compare
* number of pages against maximum capacity of the queue.
*/
if (bufs_num <= vq->num_max)
res = true;
}

rcu_read_unlock();

return res;
}

static bool virtio_transport_seqpacket_allow(u32 remote_cid);

static struct virtio_transport virtio_transport = {
Expand Down Expand Up @@ -462,6 +535,7 @@ static struct virtio_transport virtio_transport = {
},

.send_pkt = virtio_transport_send_pkt,
.can_msgzerocopy = virtio_transport_can_msgzerocopy,
};

static bool virtio_transport_seqpacket_allow(u32 remote_cid)
Expand Down Expand Up @@ -621,6 +695,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
{
struct virtio_vsock *vsock = NULL;
int ret;
int i;

ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
if (ret)
Expand Down Expand Up @@ -663,6 +738,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
if (ret < 0)
goto out;

for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++)
vsock->out_sgs[i] = &vsock->out_bufs[i];

rcu_assign_pointer(the_virtio_vsock, vsock);

mutex_unlock(&the_virtio_vsock_mutex);
Expand Down
Loading

0 comments on commit 71b263e

Please sign in to comment.