Blob Blame History Raw
From: Yunsheng Lin <linyunsheng@huawei.com>
Date: Mon, 6 May 2019 10:48:44 +0800
Subject: net: hns3: add linearizing checking for TSO case
Patch-mainline: v5.2-rc1
Git-commit: db4970aa92a148389826057290cd45bb30f5650e
References: bsc#1104353 FATE#326415 bsc#1134947

HW requires every continuous 8 buffer data to be larger than MSS,
we simplify it by ensuring skb_headlen + the first continuous
7 frags to to be larger than GSO header len + mss, and the
remaining continuous 7 frags to be larger than MSS except the
last 7 frags.

This patch adds hns3_skb_need_linearized to handle it for TSO
case.

Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c |   45 ++++++++++++++++++++++++
 1 file changed, 45 insertions(+)

--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1181,6 +1181,47 @@ static int hns3_nic_bd_num(struct sk_buf
 	return bd_num;
 }
 
+static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
+{
+	if (!skb->encapsulation)
+		return skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+	return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+}
+
+/* HW need every continuous 8 buffer data to be larger than MSS,
+ * we simplify it by ensuring skb_headlen + the first continuous
+ * 7 frags to to be larger than gso header len + mss, and the remaining
+ * continuous 7 frags to be larger than MSS except the last 7 frags.
+ */
+static bool hns3_skb_need_linearized(struct sk_buff *skb)
+{
+	int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
+	unsigned int tot_len = 0;
+	int i;
+
+	for (i = 0; i < bd_limit; i++)
+		tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+	/* ensure headlen + the first 7 frags is greater than mss + header
+	 * and the first 7 frags is greater than mss.
+	 */
+	if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
+	    hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
+		return true;
+
+	/* ensure the remaining continuous 7 buffer is greater than mss */
+	for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
+		tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
+		tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
+
+		if (tot_len < skb_shinfo(skb)->gso_size)
+			return true;
+	}
+
+	return false;
+}
+
 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
 				  struct sk_buff **out_skb)
 {
@@ -1194,6 +1235,9 @@ static int hns3_nic_maybe_stop_tx(struct
 	if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
 		struct sk_buff *new_skb;
 
+		if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
+			goto out;
+
 		bd_num = hns3_tx_bd_count(skb->len);
 		if (unlikely(ring_space(ring) < bd_num))
 			return -EBUSY;
@@ -1209,6 +1253,7 @@ static int hns3_nic_maybe_stop_tx(struct
 		u64_stats_update_end(&ring->syncp);
 	}
 
+out:
 	if (unlikely(ring_space(ring) < bd_num))
 		return -EBUSY;