Blob Blame History Raw
From: Michael Chan <michael.chan@broadcom.com>
Date: Sun, 14 Oct 2018 07:02:42 -0400
Subject: bnxt_en: Add new flags to setup new page table PTE bits on newer
 devices.
Patch-mainline: v4.20-rc1
Git-commit: 66cca20abcf742e2e39ec437144e7787ecefa037
References: bsc#1104745 FATE#325918

Newer chips require the PTU_PTE_VALID bit to be set for every page
table entry for context memory and rings.  Additional bits are also
required for page table entries for all rings.  Add a flags field to
bnxt_ring_mem_info struct to specify these additional bits to be used
when setting up the pages tables as needed.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c |   17 +++++++++++++++--
 drivers/net/ethernet/broadcom/bnxt/bnxt.h |    8 ++++++++
 2 files changed, 23 insertions(+), 2 deletions(-)

--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2230,8 +2230,11 @@ static void bnxt_free_ring(struct bnxt *
 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 {
 	struct pci_dev *pdev = bp->pdev;
+	u64 valid_bit = 0;
 	int i;
 
+	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
+		valid_bit = PTU_PTE_VALID;
 	if (rmem->nr_pages > 1) {
 		rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
 						  rmem->nr_pages * 8,
@@ -2242,6 +2245,8 @@ static int bnxt_alloc_ring(struct bnxt *
 	}
 
 	for (i = 0; i < rmem->nr_pages; i++) {
+		u64 extra_bits = valid_bit;
+
 		rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
 						     rmem->page_size,
 						     &rmem->dma_arr[i],
@@ -2249,8 +2254,16 @@ static int bnxt_alloc_ring(struct bnxt *
 		if (!rmem->pg_arr[i])
 			return -ENOMEM;
 
-		if (rmem->nr_pages > 1)
-			rmem->pg_tbl[i] = cpu_to_le64(rmem->dma_arr[i]);
+		if (rmem->nr_pages > 1) {
+			if (i == rmem->nr_pages - 2 &&
+			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+				extra_bits |= PTU_PTE_NEXT_TO_LAST;
+			else if (i == rmem->nr_pages - 1 &&
+				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
+				extra_bits |= PTU_PTE_LAST;
+			rmem->pg_tbl[i] =
+				cpu_to_le64(rmem->dma_arr[i] | extra_bits);
+		}
 	}
 
 	if (rmem->vmem_size) {
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -580,6 +580,10 @@ struct bnxt_sw_rx_agg_bd {
 struct bnxt_ring_mem_info {
 	int			nr_pages;
 	int			page_size;
+	u32			flags;
+#define BNXT_RMEM_VALID_PTE_FLAG	1
+#define BNXT_RMEM_RING_PTE_FLAG		2
+
 	void			**pg_arr;
 	dma_addr_t		*dma_arr;
 
@@ -1109,6 +1113,10 @@ struct bnxt_vf_rep {
 	struct bnxt_vf_rep_stats	tx_stats;
 };
 
+#define PTU_PTE_VALID             0x1UL
+#define PTU_PTE_LAST              0x2UL
+#define PTU_PTE_NEXT_TO_LAST      0x4UL
+
 struct bnxt {
 	void __iomem		*bar0;
 	void __iomem		*bar1;