Skip to content

Commit

Permalink
vhost_net: fix possible infinite loop
Browse files Browse the repository at this point in the history
commit e2412c0 upstream.

When the rx buffer is too small for a packet, we will discard the vq
descriptor and retry it for the next packet:

while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
					      &busyloop_intr))) {
...
	/* On overrun, truncate and discard */
	if (unlikely(headcount > UIO_MAXIOV)) {
		iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
		err = sock->ops->recvmsg(sock, &msg,
					 1, MSG_DONTWAIT | MSG_TRUNC);
		pr_debug("Discarded rx packet: len %zd\n", sock_len);
		continue;
	}
...
}

This makes it possible to trigger a infinite while..continue loop
through the co-opreation of two VMs like:

1) Malicious VM1 allocate 1 byte rx buffer and try to slow down the
   vhost process as much as possible e.g using indirect descriptors or
   other.
2) Malicious VM2 generate packets to VM1 as fast as possible

Fixing this by checking against weight at the end of RX and TX
loop. This also eliminate other similar cases when:

- userspace is consuming the packets in the meanwhile
- theoretical TOCTOU attack if guest moving avail index back and forth
  to hit the continue after vhost find guest just add new buffers

This addresses CVE-2019-3900.

Fixes: d8316f3 ("vhost: fix total length when packets are too short")
Fixes: 3a4d5c9 ("vhost_net: a kernel-level virtio server")
Signed-off-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Balbir Singh <sblbir@amzn.com>
  • Loading branch information
Jason Wang authored and Greg Kroah-Hartman committed Jul 10, 2019
1 parent c051fb9 commit ae44674
Showing 1 changed file with 10 additions and 10 deletions.
20 changes: 10 additions & 10 deletions drivers/vhost/net.c
Original file line number Diff line number Diff line change
Expand Up @@ -482,7 +482,7 @@ static void handle_tx(struct vhost_net *net)
hdr_size = nvq->vhost_hlen;
zcopy = nvq->ubufs;

for (;;) {
do {
/* Release DMAs done buffers first */
if (zcopy)
vhost_zerocopy_signal_used(net, vq);
Expand Down Expand Up @@ -578,10 +578,7 @@ static void handle_tx(struct vhost_net *net)
else
vhost_zerocopy_signal_used(net, vq);
vhost_net_tx_packet(net);
if (unlikely(vhost_exceeds_weight(vq, ++sent_pkts,
total_len)))
break;
}
} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
out:
mutex_unlock(&vq->mutex);
}
Expand Down Expand Up @@ -779,7 +776,11 @@ static void handle_rx(struct vhost_net *net)
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);

while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
do {
sock_len = vhost_net_rx_peek_head_len(net, sock->sk);

if (!sock_len)
break;
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads, vhost_len,
Expand Down Expand Up @@ -860,9 +861,8 @@ static void handle_rx(struct vhost_net *net)
vhost_log_write(vq, vq_log, log, vhost_len,
vq->iov, in);
total_len += vhost_len;
if (unlikely(vhost_exceeds_weight(vq, ++recv_pkts, total_len)))
goto out;
}
} while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));

vhost_net_enable_vq(net, vq);
out:
mutex_unlock(&vq->mutex);
Expand Down Expand Up @@ -941,7 +941,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vhost_net_buf_init(&n->vqs[i].rxq);
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
VHOST_NET_WEIGHT, VHOST_NET_PKT_WEIGHT);
VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);

vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
Expand Down

0 comments on commit ae44674

Please sign in to comment.