Blob Blame History Raw
From 82e3be320d1e38a5e91a79d0eb451954c87ab7fe Mon Sep 17 00:00:00 2001
From: Nathan Fontenot <nfont@linux.vnet.ibm.com>
Date: Wed, 21 Feb 2018 21:33:56 -0600
Subject: [PATCH] ibmvnic: Split counters for scrq/pools/napi

References: bsc#1082223
Patch-mainline: v4.17-rc1
Git-commit: 82e3be320d1e38a5e91a79d0eb451954c87ab7fe

The approach of one counter to rule them all when tracking the number
of active sub-crqs, pools, and napi has problems handling some failover
scenarios. This is due to the split in initializing the sub crqs,
pools and napi in different places and the placement of updating
the active counts.

This patch simplifies this by having a counter for tx and rx
sub-crqs, pools, and napi.

Signed-off-by: Nathan Fontenot <nfont@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Michal Suchanek <msuchanek@suse.de>
---
 drivers/net/ethernet/ibm/ibmvnic.c | 38 +++++++++++++++++---------------------
 drivers/net/ethernet/ibm/ibmvnic.h |  7 +++++--
 2 files changed, 22 insertions(+), 23 deletions(-)

--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -461,7 +461,7 @@
 	if (!adapter->rx_pool)
 		return;
 
-	for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
+	for (i = 0; i < adapter->num_active_rx_pools; i++) {
 		rx_pool = &adapter->rx_pool[i];
 
 		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -484,6 +484,7 @@
 
 	kfree(adapter->rx_pool);
 	adapter->rx_pool = NULL;
+	adapter->num_active_rx_pools = 0;
 }
 
 static int init_rx_pools(struct net_device *netdev)
@@ -508,6 +509,8 @@
 		return -1;
 	}
 
+	adapter->num_active_rx_pools = rxadd_subcrqs;
+
 	for (i = 0; i < rxadd_subcrqs; i++) {
 		rx_pool = &adapter->rx_pool[i];
 
@@ -608,7 +611,7 @@
 	if (!adapter->tx_pool)
 		return;
 
-	for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
+	for (i = 0; i < adapter->num_active_tx_pools; i++) {
 		netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
 		tx_pool = &adapter->tx_pool[i];
 		kfree(tx_pool->tx_buff);
@@ -619,6 +622,7 @@
 
 	kfree(adapter->tx_pool);
 	adapter->tx_pool = NULL;
+	adapter->num_active_tx_pools = 0;
 }
 
 static int init_tx_pools(struct net_device *netdev)
@@ -635,6 +639,8 @@
 	if (!adapter->tx_pool)
 		return -1;
 
+	adapter->num_active_tx_pools = tx_subcrqs;
+
 	for (i = 0; i < tx_subcrqs; i++) {
 		tx_pool = &adapter->tx_pool[i];
 
@@ -745,6 +751,7 @@
 			       ibmvnic_poll, NAPI_POLL_WEIGHT);
 	}
 
+	adapter->num_active_rx_napi = adapter->req_rx_queues;
 	return 0;
 }
 
@@ -755,7 +762,7 @@
 	if (!adapter->napi)
 		return;
 
-	for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
+	for (i = 0; i < adapter->num_active_rx_napi; i++) {
 		if (&adapter->napi[i]) {
 			netdev_dbg(adapter->netdev,
 				   "Releasing napi[%d]\n", i);
@@ -765,6 +772,7 @@
 
 	kfree(adapter->napi);
 	adapter->napi = NULL;
+	adapter->num_active_rx_napi = 0;
 }
 
 static int ibmvnic_login(struct net_device *netdev)
@@ -998,10 +1006,6 @@
 		return rc;
 
 	rc = init_tx_pools(netdev);
-
-	adapter->num_active_tx_scrqs = adapter->req_tx_queues;
-	adapter->num_active_rx_scrqs = adapter->req_rx_queues;
-
 	return rc;
 }
 
@@ -1706,9 +1710,6 @@
 
 			release_napi(adapter);
 			init_napi(adapter);
-
-			adapter->num_active_tx_scrqs = adapter->req_tx_queues;
-			adapter->num_active_rx_scrqs = adapter->req_rx_queues;
 		} else {
 			rc = reset_tx_pools(adapter);
 			if (rc)
@@ -2403,19 +2404,10 @@
 
 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
 {
-	u64 num_tx_scrqs, num_rx_scrqs;
 	int i;
 
-	if (adapter->state == VNIC_PROBED) {
-		num_tx_scrqs = adapter->req_tx_queues;
-		num_rx_scrqs = adapter->req_rx_queues;
-	} else {
-		num_tx_scrqs = adapter->num_active_tx_scrqs;
-		num_rx_scrqs = adapter->num_active_rx_scrqs;
-	}
-
 	if (adapter->tx_scrq) {
-		for (i = 0; i < num_tx_scrqs; i++) {
+		for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
 			if (!adapter->tx_scrq[i])
 				continue;
 
@@ -2434,10 +2426,11 @@
 
 		kfree(adapter->tx_scrq);
 		adapter->tx_scrq = NULL;
+		adapter->num_active_tx_scrqs = 0;
 	}
 
 	if (adapter->rx_scrq) {
-		for (i = 0; i < num_rx_scrqs; i++) {
+		for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
 			if (!adapter->rx_scrq[i])
 				continue;
 
@@ -2456,6 +2449,7 @@
 
 		kfree(adapter->rx_scrq);
 		adapter->rx_scrq = NULL;
+		adapter->num_active_rx_scrqs = 0;
 	}
 }
 
@@ -2723,6 +2717,7 @@
 	for (i = 0; i < adapter->req_tx_queues; i++) {
 		adapter->tx_scrq[i] = allqueues[i];
 		adapter->tx_scrq[i]->pool_index = i;
+		adapter->num_active_tx_scrqs++;
 	}
 
 	adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -2733,6 +2728,7 @@
 	for (i = 0; i < adapter->req_rx_queues; i++) {
 		adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
 		adapter->rx_scrq[i]->scrq_num = i;
+		adapter->num_active_rx_scrqs++;
 	}
 
 	kfree(allqueues);
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1092,8 +1092,11 @@
 	u64 opt_rxba_entries_per_subcrq;
 	__be64 tx_rx_desc_req;
 	u8 map_id;
-	u64 num_active_rx_scrqs;
-	u64 num_active_tx_scrqs;
+	u32 num_active_rx_scrqs;
+	u32 num_active_rx_pools;
+	u32 num_active_rx_napi;
+	u32 num_active_tx_scrqs;
+	u32 num_active_tx_pools;
 
 	struct tasklet_struct tasklet;
 	enum vnic_state state;