Blob Blame History Raw
From: Julian Wiedmann <jwi@linux.ibm.com>
Subject: s390/qeth: remove unused L3 xmit code
Patch-mainline: v4.20-rc1
Git-commit: 0a6da4b10d9c263586975dabdbf3aedd12e76a11
References: FATE#326377, LTC#169210, bsc#1115382

Summary:        qeth: Full-blown TCP Segmentation Offload
Description:    As of now, qeth only supports TCP Segmentation Offload (TSO)
                for IPv4 in Layer3 devices. This feature extends the existing
                support to IPv6, and adds support for TSO in both IP variants
                for Layer2.

                To cleanly pull in all the necessary changes to the transmit
                code, update the qeth driver to the current 4.20 level.


Upstream-Description:

             s390/qeth: remove unused L3 xmit code

             qeth_l3_xmit() is now only used for TSOv4 traffic, shrink it down.

             Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
             Signed-off-by: David S. Miller <davem@davemloft.net>

Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/s390/net/qeth_l3_main.c |   71 +++++++++-------------------------------
 1 file changed, 17 insertions(+), 54 deletions(-)

--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2236,44 +2236,24 @@ static int qeth_l3_xmit_offload(struct q
 static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
 			struct qeth_qdio_out_q *queue, int ipv, int cast_type)
 {
-	int elements, len, rc;
-	__be16 *tag;
 	struct qeth_hdr *hdr = NULL;
-	int hdr_elements = 0;
 	struct sk_buff *new_skb = NULL;
 	int tx_bytes = skb->len;
 	unsigned int hd_len;
-	bool use_tso, is_sg;
-
-	/* Ignore segment size from skb_is_gso(), 1 page is always used. */
-	use_tso = skb_is_gso(skb) &&
-		  (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4);
+	int elements, rc;
+	bool is_sg;
 
 	/* create a clone with writeable headroom */
-	new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) +
-					    VLAN_HLEN);
+	new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso));
 	if (!new_skb)
 		return -ENOMEM;
 
-	if (ipv == 4) {
-		skb_pull(new_skb, ETH_HLEN);
-	} else if (skb_vlan_tag_present(new_skb)) {
-		skb_push(new_skb, VLAN_HLEN);
-		skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
-		skb_copy_to_linear_data_offset(new_skb, 4,
-					       new_skb->data + 8, 4);
-		skb_copy_to_linear_data_offset(new_skb, 8,
-					       new_skb->data + 12, 4);
-		tag = (__be16 *)(new_skb->data + 12);
-		*tag = cpu_to_be16(ETH_P_8021Q);
-		*(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb));
-	}
+	skb_pull(new_skb, ETH_HLEN);
 
 	/* fix hardware limitation: as long as we do not have sbal
 	 * chaining we can not send long frag lists
 	 */
-	if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
-	    (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) {
+	if (!qeth_l3_get_elements_no_tso(card, new_skb, 1)) {
 		rc = skb_linearize(new_skb);
 
 		if (card->options.performance_stats) {
@@ -2286,38 +2266,23 @@ static int qeth_l3_xmit(struct qeth_card
 			goto out;
 	}
 
-	if (use_tso) {
-		hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
-		memset(hdr, 0, sizeof(struct qeth_hdr_tso));
-		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
-				    new_skb->len - sizeof(struct qeth_hdr_tso));
-		qeth_tso_fill_header(card, hdr, new_skb);
-		hdr_elements++;
-	} else {
-		hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
-		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
-				    new_skb->len - sizeof(struct qeth_hdr));
-	}
+	hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
+	memset(hdr, 0, sizeof(struct qeth_hdr_tso));
+	qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+			    new_skb->len - sizeof(struct qeth_hdr_tso));
+	qeth_tso_fill_header(card, hdr, new_skb);
 
-	elements = use_tso ?
-		   qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
-		   qeth_get_elements_no(card, new_skb, hdr_elements, 0);
+	elements = qeth_l3_get_elements_no_tso(card, new_skb, 1);
 	if (!elements) {
 		rc = -E2BIG;
 		goto out;
 	}
-	elements += hdr_elements;
+	elements++;
 
-	if (use_tso) {
-		hd_len = sizeof(struct qeth_hdr_tso) +
-			 ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
-		len = hd_len;
-	} else {
-		hd_len = 0;
-		len = sizeof(struct qeth_hdr_layer3);
-	}
+	hd_len = sizeof(struct qeth_hdr_tso) + ip_hdrlen(new_skb) +
+		 tcp_hdrlen(new_skb);
 
-	if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) {
+	if (qeth_hdr_chk_and_bounce(new_skb, &hdr, hd_len)) {
 		rc = -EINVAL;
 		goto out;
 	}
@@ -2333,10 +2298,8 @@ out:
 			card->perf_stats.buf_elements_sent += elements;
 			if (is_sg)
 				card->perf_stats.sg_skbs_sent++;
-			if (use_tso) {
-				card->perf_stats.large_send_bytes += tx_bytes;
-				card->perf_stats.large_send_cnt++;
-			}
+			card->perf_stats.large_send_bytes += tx_bytes;
+			card->perf_stats.large_send_cnt++;
 		}
 	} else {
 		if (new_skb != skb)