Blob Blame History Raw
From: Alexander Lobakin <alobakin@marvell.com>
Date: Thu, 23 Jul 2020 01:10:35 +0300
Subject: qed: sanitize PBL chains allocation
Patch-mainline: v5.9-rc1
Git-commit: 9b6ee3cf95d322ab02e9927f5b08ebc870ca9f1f
References: jsc#SLE-15143

PBL chain elements are actually DMA addresses stored in __le64, but
currently their size is hardcoded to 8, and DMA addresses are assigned
via cast to variable-sized dma_addr_t without any bitwise conversions.
Change the type of pbl_virt array to match the actual one, add a new
field to store the size of allocated DMA memory and sanitize elements
assignment.

Misc: give more logic names to the members of qed_chain::pbl_sp embedded
struct.

Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/qlogic/qed/qed_chain.c       |   21 ++++++++++-----------
 drivers/net/ethernet/qlogic/qed/qed_sp_commands.c |    4 ++--
 include/linux/qed/qed_chain.h                     |   16 ++++++++--------
 3 files changed, 20 insertions(+), 21 deletions(-)

--- a/drivers/net/ethernet/qlogic/qed/qed_chain.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c
@@ -49,7 +49,7 @@ static void qed_chain_free_pbl(struct qe
 {
 	struct device *dev = &cdev->pdev->dev;
 	struct addr_tbl_entry *entry;
-	u32 pbl_size, i;
+	u32 i;
 
 	if (!chain->pbl.pp_addr_tbl)
 		return;
@@ -63,11 +63,10 @@ static void qed_chain_free_pbl(struct qe
 				  entry->dma_map);
 	}
 
-	pbl_size = chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
-
 	if (!chain->b_external_pbl)
-		dma_free_coherent(dev, pbl_size, chain->pbl_sp.p_virt_table,
-				  chain->pbl_sp.p_phys_table);
+		dma_free_coherent(dev, chain->pbl_sp.table_size,
+				  chain->pbl_sp.table_virt,
+				  chain->pbl_sp.table_phys);
 
 	vfree(chain->pbl.pp_addr_tbl);
 	chain->pbl.pp_addr_tbl = NULL;
@@ -190,7 +189,7 @@ static int qed_chain_alloc_pbl(struct qe
 	struct device *dev = &cdev->pdev->dev;
 	struct addr_tbl_entry *addr_tbl;
 	dma_addr_t phys, pbl_phys;
-	void *pbl_virt;
+	__le64 *pbl_virt;
 	u32 page_cnt, i;
 	size_t size;
 	void *virt;
@@ -214,7 +213,7 @@ static int qed_chain_alloc_pbl(struct qe
 
 		chain->b_external_pbl = true;
 	} else {
-		size = array_size(page_cnt, QED_CHAIN_PBL_ENTRY_SIZE);
+		size = array_size(page_cnt, sizeof(*pbl_virt));
 		if (unlikely(size == SIZE_MAX))
 			return -EOVERFLOW;
 
@@ -225,8 +224,9 @@ static int qed_chain_alloc_pbl(struct qe
 	if (!pbl_virt)
 		return -ENOMEM;
 
-	chain->pbl_sp.p_virt_table = pbl_virt;
-	chain->pbl_sp.p_phys_table = pbl_phys;
+	chain->pbl_sp.table_virt = pbl_virt;
+	chain->pbl_sp.table_phys = pbl_phys;
+	chain->pbl_sp.table_size = size;
 
 	for (i = 0; i < page_cnt; i++) {
 		virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys,
@@ -240,8 +240,7 @@ static int qed_chain_alloc_pbl(struct qe
 		}
 
 		/* Fill the PBL table with the physical address of the page */
-		*(dma_addr_t *)pbl_virt = phys;
-		pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+		pbl_virt[i] = cpu_to_le64(phys);
 
 		/* Keep the virtual address of the page */
 		addr_tbl[i].virt_addr = virt;
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -366,11 +366,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_h
 
 	/* Place EQ address in RAMROD */
 	DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
-		       p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
+		       qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
 	page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
 	p_ramrod->event_ring_num_pages = page_cnt;
 	DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
-		       p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
+		       qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
 
 	qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
 
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -127,8 +127,9 @@ struct qed_chain {
 
 	/* Base address of a pre-allocated buffer for pbl */
 	struct {
-		dma_addr_t				p_phys_table;
-		void					*p_virt_table;
+		__le64					*table_virt;
+		dma_addr_t				table_phys;
+		size_t					table_size;
 	}						pbl_sp;
 
 	/* Address of first page of the chain - the address is required
@@ -146,7 +147,6 @@ struct qed_chain {
 	bool						b_external_pbl;
 };
 
-#define QED_CHAIN_PBL_ENTRY_SIZE			8
 #define QED_CHAIN_PAGE_SIZE				0x1000
 
 #define ELEMS_PER_PAGE(elem_size)					     \
@@ -236,7 +236,7 @@ static inline u32 qed_chain_get_page_cnt
 
 static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
 {
-	return p_chain->pbl_sp.p_phys_table;
+	return p_chain->pbl_sp.table_phys;
 }
 
 /**
@@ -527,8 +527,8 @@ static inline void qed_chain_init_params
 	p_chain->capacity = p_chain->usable_per_page * page_cnt;
 	p_chain->size = p_chain->elem_per_page * page_cnt;
 
-	p_chain->pbl_sp.p_phys_table = 0;
-	p_chain->pbl_sp.p_virt_table = NULL;
+	p_chain->pbl_sp.table_phys = 0;
+	p_chain->pbl_sp.table_virt = NULL;
 	p_chain->pbl.pp_addr_tbl = NULL;
 }
 
@@ -569,8 +569,8 @@ static inline void qed_chain_init_pbl_me
 					  dma_addr_t p_phys_pbl,
 					  struct addr_tbl_entry *pp_addr_tbl)
 {
-	p_chain->pbl_sp.p_phys_table = p_phys_pbl;
-	p_chain->pbl_sp.p_virt_table = p_virt_pbl;
+	p_chain->pbl_sp.table_phys = p_phys_pbl;
+	p_chain->pbl_sp.table_virt = p_virt_pbl;
 	p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
 }