Blob Blame History Raw
From: Mike Marciniszyn <mike.marciniszyn@intel.com>
Date: Tue, 23 Jun 2020 16:43:22 -0400
Subject: IB/hfi1: Correct -EBUSY handling in tx code
Patch-mainline: v5.8-rc3
Git-commit: 82172b765530f84b4b9da929f2dcf46f2b7b232b
References: jsc#SLE-13208

The current code mishandles -EBUSY in two ways:
- The flow change doesn't test the return from the flush and runs on to
  process the current packet racing with the wakeup processing
- The -EBUSY handling for a single packet inserts the tx into the txlist
  after the submit call, racing with the same wakeup processing

Fix the first by dropping the skb and returning NETDEV_TX_OK.

Fix the second by insuring the the list entry within the txreq is inited
when allocated.  This enables the sleep routine to detect that the txreq
has used the non-list api and queue the packet to the txlist.

Both flaws can lead to having the flushing thread executing in causing two
threads to manipulate the txlist.

Fixes: d99dc602e2a5 ("IB/hfi1: Add functions to transmit datagram ipoib packets")
Link: https://lore.kernel.org/r/20200623204321.108092.83898.stgit@awfm-01.aw.intel.com
Reviewed-by: Kaike Wan <kaike.wan@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/hw/hfi1/ipoib_tx.c |   33 +++++++++++++++++----------------
 1 file changed, 17 insertions(+), 16 deletions(-)

--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -369,6 +369,7 @@ static struct ipoib_txreq *hfi1_ipoib_se
 	tx->priv = priv;
 	tx->txq = txp->txq;
 	tx->skb = skb;
+	INIT_LIST_HEAD(&tx->txreq.list);
 
 	hfi1_ipoib_build_ib_tx_headers(tx, txp);
 
@@ -469,6 +470,7 @@ static int hfi1_ipoib_send_dma_single(st
 
 	ret = hfi1_ipoib_submit_tx(txq, tx);
 	if (likely(!ret)) {
+tx_ok:
 		trace_sdma_output_ibhdr(tx->priv->dd,
 					&tx->sdma_hdr.hdr,
 					ib_is_sc5(txp->flow.sc5));
@@ -478,20 +480,8 @@ static int hfi1_ipoib_send_dma_single(st
 
 	txq->pkts_sent = false;
 
-	if (ret == -EBUSY) {
-		list_add_tail(&tx->txreq.list, &txq->tx_list);
-
-		trace_sdma_output_ibhdr(tx->priv->dd,
-					&tx->sdma_hdr.hdr,
-					ib_is_sc5(txp->flow.sc5));
-		hfi1_ipoib_check_queue_depth(txq);
-		return NETDEV_TX_OK;
-	}
-
-	if (ret == -ECOMM) {
-		hfi1_ipoib_check_queue_depth(txq);
-		return NETDEV_TX_OK;
-	}
+	if (ret == -EBUSY || ret == -ECOMM)
+		goto tx_ok;
 
 	sdma_txclean(priv->dd, &tx->txreq);
 	dev_kfree_skb_any(skb);
@@ -509,9 +499,17 @@ static int hfi1_ipoib_send_dma_list(stru
 	struct ipoib_txreq *tx;
 
 	/* Has the flow change ? */
-	if (txq->flow.as_int != txp->flow.as_int)
-		(void)hfi1_ipoib_flush_tx_list(dev, txq);
+	if (txq->flow.as_int != txp->flow.as_int) {
+		int ret;
 
+		ret = hfi1_ipoib_flush_tx_list(dev, txq);
+		if (unlikely(ret)) {
+			if (ret == -EBUSY)
+				++dev->stats.tx_dropped;
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
+	}
 	tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
 	if (IS_ERR(tx)) {
 		int ret = PTR_ERR(tx);
@@ -612,6 +610,9 @@ static int hfi1_ipoib_sdma_sleep(struct
 
 		netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
 
+		if (list_empty(&txreq->list))
+			/* came from non-list submit */
+			list_add_tail(&txreq->list, &txq->tx_list);
 		if (list_empty(&txq->wait.list))
 			iowait_queue(pkts_sent, wait->iow, &sde->dmawait);