Blob Blame History Raw
From: Michael Chan <michael.chan@broadcom.com>
Date: Wed, 17 Jan 2018 03:21:12 -0500
Subject: bnxt_en: Expand bnxt_check_rings() to check all resources.
Patch-mainline: v4.16-rc1
Git-commit: 8f23d638b36b4ff0fe5785cf01f9bdc41afb9c06
References: bsc#1086282 FATE#324873

bnxt_check_rings() is called by ethtool, XDP setup, and ndo_setup_tc()
to see if there are enough resources to support the new configuration.
Expand the call to test all resources if the firmware supports the new
API.  With the more flexible resource allocation scheme, this call must
be made to check that all resources are available before committing to
allocate the resources.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c |   93 +++++++++++++++++++++++++++---
 1 file changed, 84 insertions(+), 9 deletions(-)

--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4712,28 +4712,99 @@ static bool bnxt_need_reserve_rings(stru
 	return false;
 }
 
-static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+				    int ring_grps, int cp_rings)
 {
-	struct hwrm_func_cfg_input req = {0};
+	struct hwrm_func_vf_cfg_input req = {0};
+	u32 flags, enables;
 	int rc;
 
-	if (bp->hwrm_spec_code < 0x10801)
+	if (!(bp->flags & BNXT_FLAG_NEW_RM))
 		return 0;
 
-	if (BNXT_VF(bp))
-		return 0;
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+	enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+		  FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
+
+	req.flags = cpu_to_le32(flags);
+	req.enables = cpu_to_le32(enables);
+	req.num_tx_rings = cpu_to_le16(tx_rings);
+	req.num_rx_rings = cpu_to_le16(rx_rings);
+	req.num_cmpl_rings = cpu_to_le16(cp_rings);
+	req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+	req.num_stat_ctxs = cpu_to_le16(cp_rings);
+	req.num_vnics = cpu_to_le16(1);
+	if (bp->flags & BNXT_FLAG_RFS)
+		req.num_vnics = cpu_to_le16(rx_rings + 1);
+	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		return -ENOMEM;
+	return 0;
+}
+
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+				    int ring_grps, int cp_rings)
+{
+	struct hwrm_func_cfg_input req = {0};
+	u32 flags, enables;
+	int rc;
 
 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 	req.fid = cpu_to_le16(0xffff);
-	req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
-	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
+	enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS;
 	req.num_tx_rings = cpu_to_le16(tx_rings);
+	if (bp->flags & BNXT_FLAG_NEW_RM) {
+		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+		enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+			   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+			   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+			   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+			   FUNC_CFG_REQ_ENABLES_NUM_VNICS;
+		req.num_rx_rings = cpu_to_le16(rx_rings);
+		req.num_cmpl_rings = cpu_to_le16(cp_rings);
+		req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+		req.num_stat_ctxs = cpu_to_le16(cp_rings);
+		req.num_vnics = cpu_to_le16(1);
+		if (bp->flags & BNXT_FLAG_RFS)
+			req.num_vnics = cpu_to_le16(rx_rings + 1);
+	}
+	req.flags = cpu_to_le32(flags);
+	req.enables = cpu_to_le32(enables);
 	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 	if (rc)
 		return -ENOMEM;
 	return 0;
 }
 
+static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+				 int ring_grps, int cp_rings)
+{
+	if (bp->hwrm_spec_code < 0x10801)
+		return 0;
+
+	if (BNXT_PF(bp))
+		return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
+						ring_grps, cp_rings);
+
+	return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+					cp_rings);
+}
+
 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
 {
@@ -7365,7 +7436,8 @@ int bnxt_check_rings(struct bnxt *bp, in
 {
 	int max_rx, max_tx, tx_sets = 1;
 	int tx_rings_needed;
-	int rc;
+	int rx_rings = rx;
+	int cp, rc;
 
 	if (tcs)
 		tx_sets = tcs;
@@ -7381,7 +7453,10 @@ int bnxt_check_rings(struct bnxt *bp, in
 	if (max_tx < tx_rings_needed)
 		return -ENOMEM;
 
-	return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		rx_rings <<= 1;
+	cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+	return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp);
 }
 
 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)