2018-01-15 14:26:15 +03:00
|
|
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
2017-12-04 11:08:19 +03:00
|
|
|
From: Wei Xu <wexu@redhat.com>
|
|
|
|
Date: Fri, 1 Dec 2017 05:10:36 -0500
|
2018-01-15 14:26:15 +03:00
|
|
|
Subject: [PATCH] vhost: fix skb leak in handle_rx()
|
2017-12-04 11:08:19 +03:00
|
|
|
MIME-Version: 1.0
|
|
|
|
Content-Type: text/plain; charset=UTF-8
|
|
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
|
|
|
|
Matthew found a roughly 40% tcp throughput regression with commit
|
|
|
|
c67df11f(vhost_net: try batch dequing from skb array) as discussed
|
|
|
|
in the following thread:
|
|
|
|
https://www.mail-archive.com/netdev@vger.kernel.org/msg187936.html
|
|
|
|
|
|
|
|
Eventually we figured out that it was a skb leak in handle_rx()
|
|
|
|
when sending packets to the VM. This usually happens when a guest
|
|
|
|
can not drain out vq as fast as vhost fills in, afterwards it sets
|
|
|
|
off the traffic jam and leaks skb(s) which occurs as no headcount
|
|
|
|
to send on the vq from vhost side.
|
|
|
|
|
|
|
|
This can be avoided by making sure we have got enough headcount
|
|
|
|
before actually consuming a skb from the batched rx array while
|
|
|
|
transmitting, which is simply done by moving checking the zero
|
|
|
|
headcount a bit ahead.
|
|
|
|
|
|
|
|
Signed-off-by: Wei Xu <wexu@redhat.com>
|
|
|
|
Reported-by: Matthew Rosato <mjrosato@linux.vnet.ibm.com>
|
|
|
|
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
|
|
|
---
|
|
|
|
drivers/vhost/net.c | 20 ++++++++++----------
|
|
|
|
1 file changed, 10 insertions(+), 10 deletions(-)
|
|
|
|
|
|
|
|
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
|
|
|
|
index 1c75572f5a3f..010253847022 100644
|
|
|
|
--- a/drivers/vhost/net.c
|
|
|
|
+++ b/drivers/vhost/net.c
|
|
|
|
@@ -781,16 +781,6 @@ static void handle_rx(struct vhost_net *net)
|
|
|
|
/* On error, stop handling until the next kick. */
|
|
|
|
if (unlikely(headcount < 0))
|
|
|
|
goto out;
|
|
|
|
- if (nvq->rx_array)
|
|
|
|
- msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
|
|
|
|
- /* On overrun, truncate and discard */
|
|
|
|
- if (unlikely(headcount > UIO_MAXIOV)) {
|
|
|
|
- iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
|
|
|
|
- err = sock->ops->recvmsg(sock, &msg,
|
|
|
|
- 1, MSG_DONTWAIT | MSG_TRUNC);
|
|
|
|
- pr_debug("Discarded rx packet: len %zd\n", sock_len);
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
/* OK, now we need to know about added descriptors. */
|
|
|
|
if (!headcount) {
|
|
|
|
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
|
|
|
|
@@ -803,6 +793,16 @@ static void handle_rx(struct vhost_net *net)
|
|
|
|
* they refilled. */
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
+ if (nvq->rx_array)
|
|
|
|
+ msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
|
|
|
|
+ /* On overrun, truncate and discard */
|
|
|
|
+ if (unlikely(headcount > UIO_MAXIOV)) {
|
|
|
|
+ iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
|
|
|
|
+ err = sock->ops->recvmsg(sock, &msg,
|
|
|
|
+ 1, MSG_DONTWAIT | MSG_TRUNC);
|
|
|
|
+ pr_debug("Discarded rx packet: len %zd\n", sock_len);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
/* We don't need to be notified again. */
|
|
|
|
iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
|
|
|
|
fixup = msg.msg_iter;
|
|
|
|
--
|
|
|
|
2.14.2
|
|
|
|
|