a0a93ff7fe
we never released them yet (only introduced after 6.1.0), but there are upstream reports about regressions for them at: https://lore.kernel.org/netdev/CAK8fFZ5pzMaw3U1KXgC_OK4shKGsN=HDcR62cfPOuL0umXE1Ww@mail.gmail.com/ https://lore.kernel.org/netdev/CAK8fFZ6A_Gphw_3-QMGKEFQk=sfCw1Qmq0TVZK3rtAi7vb621A@mail.gmail.com/ So do a preventive revert. Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
119 lines
4.1 KiB
Diff
119 lines
4.1 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|
Date: Sat, 7 Jan 2023 13:50:22 +0100
|
|
Subject: [PATCH] Revert "gro: add support of (hw)gro packets to gro stack"
|
|
|
|
Seems to be the cause of a regression in network performance:
|
|
https://lore.kernel.org/netdev/CAK8fFZ5pzMaw3U1KXgC_OK4shKGsN=HDcR62cfPOuL0umXE1Ww@mail.gmail.com/
|
|
|
|
This reverts commit 5eddb24901ee49eee23c0bfce6af2e83fd5679bd.
|
|
|
|
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|
---
|
|
net/core/gro.c | 18 ++++--------------
|
|
net/ipv4/tcp_offload.c | 17 ++---------------
|
|
2 files changed, 6 insertions(+), 29 deletions(-)
|
|
|
|
diff --git a/net/core/gro.c b/net/core/gro.c
|
|
index bc9451743307..b4190eb08467 100644
|
|
--- a/net/core/gro.c
|
|
+++ b/net/core/gro.c
|
|
@@ -160,7 +160,6 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
|
|
unsigned int gro_max_size;
|
|
unsigned int new_truesize;
|
|
struct sk_buff *lp;
|
|
- int segs;
|
|
|
|
/* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
|
|
gro_max_size = READ_ONCE(p->dev->gro_max_size);
|
|
@@ -176,7 +175,6 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
|
|
return -E2BIG;
|
|
}
|
|
|
|
- segs = NAPI_GRO_CB(skb)->count;
|
|
lp = NAPI_GRO_CB(p)->last;
|
|
pinfo = skb_shinfo(lp);
|
|
|
|
@@ -267,7 +265,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
|
|
lp = p;
|
|
|
|
done:
|
|
- NAPI_GRO_CB(p)->count += segs;
|
|
+ NAPI_GRO_CB(p)->count++;
|
|
p->data_len += len;
|
|
p->truesize += delta_truesize;
|
|
p->len += len;
|
|
@@ -498,15 +496,8 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
|
BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
|
|
sizeof(u32))); /* Avoid slow unaligned acc */
|
|
*(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
|
|
- NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
|
|
+ NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
|
|
NAPI_GRO_CB(skb)->is_atomic = 1;
|
|
- NAPI_GRO_CB(skb)->count = 1;
|
|
- if (unlikely(skb_is_gso(skb))) {
|
|
- NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
|
|
- /* Only support TCP at the moment. */
|
|
- if (!skb_is_gso_tcp(skb))
|
|
- NAPI_GRO_CB(skb)->flush = 1;
|
|
- }
|
|
|
|
/* Setup for GRO checksum validation */
|
|
switch (skb->ip_summed) {
|
|
@@ -554,10 +545,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
|
else
|
|
gro_list->count++;
|
|
|
|
+ NAPI_GRO_CB(skb)->count = 1;
|
|
NAPI_GRO_CB(skb)->age = jiffies;
|
|
NAPI_GRO_CB(skb)->last = skb;
|
|
- if (!skb_is_gso(skb))
|
|
- skb_shinfo(skb)->gso_size = skb_gro_len(skb);
|
|
+ skb_shinfo(skb)->gso_size = skb_gro_len(skb);
|
|
list_add(&skb->list, &gro_list->list);
|
|
ret = GRO_HELD;
|
|
|
|
@@ -669,7 +660,6 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
|
|
|
|
skb->encapsulation = 0;
|
|
skb_shinfo(skb)->gso_type = 0;
|
|
- skb_shinfo(skb)->gso_size = 0;
|
|
if (unlikely(skb->slow_gro)) {
|
|
skb_orphan(skb);
|
|
skb_ext_reset(skb);
|
|
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
|
|
index 45dda7889387..a844a0d38482 100644
|
|
--- a/net/ipv4/tcp_offload.c
|
|
+++ b/net/ipv4/tcp_offload.c
|
|
@@ -255,15 +255,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
|
|
|
|
mss = skb_shinfo(p)->gso_size;
|
|
|
|
- /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
|
|
- * If it is a single frame, do not aggregate it if its length
|
|
- * is bigger than our mss.
|
|
- */
|
|
- if (unlikely(skb_is_gso(skb)))
|
|
- flush |= (mss != skb_shinfo(skb)->gso_size);
|
|
- else
|
|
- flush |= (len - 1) >= mss;
|
|
-
|
|
+ flush |= (len - 1) >= mss;
|
|
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
|
|
#ifdef CONFIG_TLS_DEVICE
|
|
flush |= p->decrypted ^ skb->decrypted;
|
|
@@ -277,12 +269,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
|
|
tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
|
|
|
|
out_check_final:
|
|
- /* Force a flush if last segment is smaller than mss. */
|
|
- if (unlikely(skb_is_gso(skb)))
|
|
- flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
|
|
- else
|
|
- flush = len < mss;
|
|
-
|
|
+ flush = len < mss;
|
|
flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
|
|
TCP_FLAG_RST | TCP_FLAG_SYN |
|
|
TCP_FLAG_FIN));
|