Blob Blame History Raw
From: Intiyaz Basha <intiyaz.basha@cavium.com>
Date: Mon, 26 Mar 2018 13:40:27 -0700
Subject: liquidio: Removed duplicate Tx queue status check
Patch-mainline: v4.17-rc1
Git-commit: 4171ec060073e1789cb868f43585983c69ff767b
References: bsc#1119406 FATE#326562

Napi is checking Tx queue status and waking the Tx queue if required.
Same operation is being done while freeing every Tx buffer.
So removed the duplicate operation of checking Tx queue status from the Tx
buffer free functions.

Signed-off-by: Intiyaz Basha <intiyaz.basha@cavium.com>
Signed-off-by: Felix Manlunas <felix.manlunas@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/cavium/liquidio/lio_main.c    |   28 --------------------
 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c |   29 ---------------------
 2 files changed, 57 deletions(-)

--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1635,28 +1635,6 @@ static int octeon_pci_os_setup(struct oc
 }
 
 /**
- * \brief Check Tx queue state for a given network buffer
- * @param lio per-network private data
- * @param skb network buffer
- */
-static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
-{
-	int q, iq;
-
-	q = skb->queue_mapping;
-	iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
-
-	if (octnet_iq_is_full(lio->oct_dev, iq))
-		return 0;
-
-	if (__netif_subqueue_stopped(lio->netdev, q)) {
-		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
-		netif_wake_subqueue(lio->netdev, q);
-	}
-	return 1;
-}
-
-/**
  * \brief Unmap and free network buffer
  * @param buf buffer
  */
@@ -1673,8 +1651,6 @@ static void free_netbuf(void *buf)
 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
 			 DMA_TO_DEVICE);
 
-	check_txq_state(lio, skb);
-
 	tx_buffer_free(skb);
 }
 
@@ -1715,8 +1691,6 @@ static void free_netsgbuf(void *buf)
 	list_add_tail(&g->list, &lio->glist[iq]);
 	spin_unlock(&lio->glist_lock[iq]);
 
-	check_txq_state(lio, skb);     /* mq support: sub-queue state check */
-
 	tx_buffer_free(skb);
 }
 
@@ -1762,8 +1736,6 @@ static void free_netsgbuf_with_resp(void
 	spin_unlock(&lio->glist_lock[iq]);
 
 	/* Don't free the skb yet */
-
-	check_txq_state(lio, skb);
 }
 
 /**
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -954,29 +954,6 @@ static int octeon_pci_os_setup(struct oc
 }
 
 /**
- * \brief Check Tx queue state for a given network buffer
- * @param lio per-network private data
- * @param skb network buffer
- */
-static int check_txq_state(struct lio *lio, struct sk_buff *skb)
-{
-	int q, iq;
-
-	q = skb->queue_mapping;
-	iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no;
-
-	if (octnet_iq_is_full(lio->oct_dev, iq))
-		return 0;
-
-	if (__netif_subqueue_stopped(lio->netdev, q)) {
-		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
-		netif_wake_subqueue(lio->netdev, q);
-	}
-
-	return 1;
-}
-
-/**
  * \brief Unmap and free network buffer
  * @param buf buffer
  */
@@ -993,8 +970,6 @@ static void free_netbuf(void *buf)
 	dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
 			 DMA_TO_DEVICE);
 
-	check_txq_state(lio, skb);
-
 	tx_buffer_free(skb);
 }
 
@@ -1036,8 +1011,6 @@ static void free_netsgbuf(void *buf)
 	list_add_tail(&g->list, &lio->glist[iq]);
 	spin_unlock(&lio->glist_lock[iq]);
 
-	check_txq_state(lio, skb); /* mq support: sub-queue state check */
-
 	tx_buffer_free(skb);
 }
 
@@ -1083,8 +1056,6 @@ static void free_netsgbuf_with_resp(void
 	spin_unlock(&lio->glist_lock[iq]);
 
 	/* Don't free the skb yet */
-
-	check_txq_state(lio, skb);
 }
 
 /**