Blob Blame History Raw
From: Lucas Stach <l.stach@pengutronix.de>
Date: Thu, 7 Sep 2017 15:36:57 +0200
Subject: drm/etnaviv: iommuv1: fold pagetable alloc and free into caller
Git-commit: 1a540490e919bd64a9be35fca5147bbb7b2ac166
Patch-mainline: v4.15-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Those functions are simple enough to fold them into the calling
function. This also fixes a correctness issue, as the alloc/free
functions didn't specifiy the device the memory was allocated for.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-By: Wladimir J. van der Laan <laanwj@gmail.com>

Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/etnaviv/etnaviv_iommu.c |   31 ++++++++++---------------------
 1 file changed, 10 insertions(+), 21 deletions(-)

--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -50,22 +50,6 @@ static struct etnaviv_iommu_domain *to_e
 	return container_of(domain, struct etnaviv_iommu_domain, domain);
 }
 
-static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
-			 size_t size)
-{
-	pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
-	if (!pgtable->pgtable)
-		return -ENOMEM;
-
-	return 0;
-}
-
-static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
-			 size_t size)
-{
-	dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
-}
-
 static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
 			  unsigned long iova, phys_addr_t paddr)
 {
@@ -78,7 +62,7 @@ static void pgtable_write(struct etnaviv
 static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
 {
 	u32 *p;
-	int ret, i;
+	int i;
 
 	etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
 						  SZ_4K,
@@ -91,12 +75,15 @@ static int __etnaviv_iommu_init(struct e
 	for (i = 0; i < SZ_4K / 4; i++)
 		*p++ = 0xdead55aa;
 
-	ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
-	if (ret < 0) {
+	etnaviv_domain->pgtable.pgtable =
+			dma_alloc_coherent(etnaviv_domain->dev, PT_SIZE,
+					   &etnaviv_domain->pgtable.paddr,
+					   GFP_KERNEL);
+	if (!etnaviv_domain->pgtable.pgtable) {
 		dma_free_coherent(etnaviv_domain->dev, SZ_4K,
 				  etnaviv_domain->bad_page_cpu,
 				  etnaviv_domain->bad_page_dma);
-		return ret;
+		return -ENOMEM;
 	}
 
 	for (i = 0; i < PT_ENTRIES; i++)
@@ -112,7 +99,9 @@ static void etnaviv_domain_free(struct i
 {
 	struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
 
-	pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
+	dma_free_coherent(etnaviv_domain->dev, PT_SIZE,
+			  etnaviv_domain->pgtable.pgtable,
+			  etnaviv_domain->pgtable.paddr);
 
 	dma_free_coherent(etnaviv_domain->dev, SZ_4K,
 			  etnaviv_domain->bad_page_cpu,