Blob Blame History Raw
From: Lucas Stach <l.stach@pengutronix.de>
Date: Tue, 17 Apr 2018 12:00:46 +0200
Subject: drm/etnaviv: switch MMU page tables to writecombine memory
Git-commit: 1af998b27c6c63d43c491783144ad0310d13a747
Patch-mainline: v4.18-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

We are likely to write multiple page entries at once and already ensure
proper write buffer flushing before GPU submit, so this improves CPU
time usage in the submit path without any downsides.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/etnaviv/etnaviv_iommu.c    |   36 ++++++-------
 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c |   78 +++++++++++++----------------
 2 files changed, 52 insertions(+), 62 deletions(-)

--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -47,11 +47,10 @@ static int __etnaviv_iommu_init(struct e
 	u32 *p;
 	int i;
 
-	etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
-						etnaviv_domain->base.dev,
-						SZ_4K,
-						&etnaviv_domain->base.bad_page_dma,
-						GFP_KERNEL);
+	etnaviv_domain->base.bad_page_cpu =
+			dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+				     &etnaviv_domain->base.bad_page_dma,
+				     GFP_KERNEL);
 	if (!etnaviv_domain->base.bad_page_cpu)
 		return -ENOMEM;
 
@@ -59,14 +58,14 @@ static int __etnaviv_iommu_init(struct e
 	for (i = 0; i < SZ_4K / 4; i++)
 		*p++ = 0xdead55aa;
 
-	etnaviv_domain->pgtable_cpu =
-			dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE,
-					   &etnaviv_domain->pgtable_dma,
-					   GFP_KERNEL);
+	etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+						   PT_SIZE,
+						   &etnaviv_domain->pgtable_dma,
+						   GFP_KERNEL);
 	if (!etnaviv_domain->pgtable_cpu) {
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->base.bad_page_cpu,
-				  etnaviv_domain->base.bad_page_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->base.bad_page_cpu,
+			    etnaviv_domain->base.bad_page_dma);
 		return -ENOMEM;
 	}
 
@@ -81,13 +80,12 @@ static void etnaviv_iommuv1_domain_free(
 	struct etnaviv_iommuv1_domain *etnaviv_domain =
 			to_etnaviv_domain(domain);
 
-	dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE,
-			  etnaviv_domain->pgtable_cpu,
-			  etnaviv_domain->pgtable_dma);
-
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->base.bad_page_cpu,
-			  etnaviv_domain->base.bad_page_dma);
+	dma_free_wc(etnaviv_domain->base.dev, PT_SIZE,
+		    etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma);
+
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->base.bad_page_cpu,
+		    etnaviv_domain->base.bad_page_dma);
 
 	kfree(etnaviv_domain);
 }
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -104,11 +104,10 @@ static int etnaviv_iommuv2_init(struct e
 	int ret, i, j;
 
 	/* allocate scratch page */
-	etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
-						etnaviv_domain->base.dev,
-						SZ_4K,
-						&etnaviv_domain->base.bad_page_dma,
-						GFP_KERNEL);
+	etnaviv_domain->base.bad_page_cpu =
+			dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+				     &etnaviv_domain->base.bad_page_dma,
+				     GFP_KERNEL);
 	if (!etnaviv_domain->base.bad_page_cpu) {
 		ret = -ENOMEM;
 		goto fail_mem;
@@ -117,19 +116,17 @@ static int etnaviv_iommuv2_init(struct e
 	for (i = 0; i < SZ_4K / 4; i++)
 		*p++ = 0xdead55aa;
 
-	etnaviv_domain->pta_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
-						     SZ_4K,
-						     &etnaviv_domain->pta_dma,
-						     GFP_KERNEL);
+	etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+					       SZ_4K, &etnaviv_domain->pta_dma,
+					       GFP_KERNEL);
 	if (!etnaviv_domain->pta_cpu) {
 		ret = -ENOMEM;
 		goto fail_mem;
 	}
 
-	etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
-						  SZ_4K,
-						  &etnaviv_domain->mtlb_dma,
-						  GFP_KERNEL);
+	etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
+						SZ_4K, &etnaviv_domain->mtlb_dma,
+						GFP_KERNEL);
 	if (!etnaviv_domain->mtlb_cpu) {
 		ret = -ENOMEM;
 		goto fail_mem;
@@ -138,10 +135,9 @@ static int etnaviv_iommuv2_init(struct e
 	/* pre-populate STLB pages (may want to switch to on-demand later) */
 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 		etnaviv_domain->stlb_cpu[i] =
-				dma_alloc_coherent(etnaviv_domain->base.dev,
-						   SZ_4K,
-						   &etnaviv_domain->stlb_dma[i],
-						   GFP_KERNEL);
+				dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
+					     &etnaviv_domain->stlb_dma[i],
+					     GFP_KERNEL);
 		if (!etnaviv_domain->stlb_cpu[i]) {
 			ret = -ENOMEM;
 			goto fail_mem;
@@ -158,25 +154,23 @@ static int etnaviv_iommuv2_init(struct e
 
 fail_mem:
 	if (etnaviv_domain->base.bad_page_cpu)
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->base.bad_page_cpu,
-				  etnaviv_domain->base.bad_page_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->base.bad_page_cpu,
+			    etnaviv_domain->base.bad_page_dma);
 
 	if (etnaviv_domain->pta_cpu)
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->pta_cpu,
-				  etnaviv_domain->pta_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
 
 	if (etnaviv_domain->mtlb_cpu)
-		dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-				  etnaviv_domain->mtlb_cpu,
-				  etnaviv_domain->mtlb_dma);
+		dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+			    etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
 
 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 		if (etnaviv_domain->stlb_cpu[i])
-			dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-					  etnaviv_domain->stlb_cpu[i],
-					  etnaviv_domain->stlb_dma[i]);
+			dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+				    etnaviv_domain->stlb_cpu[i],
+				    etnaviv_domain->stlb_dma[i]);
 	}
 
 	return ret;
@@ -188,23 +182,21 @@ static void etnaviv_iommuv2_domain_free(
 			to_etnaviv_domain(domain);
 	int i;
 
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->base.bad_page_cpu,
-			  etnaviv_domain->base.bad_page_dma);
-
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->pta_cpu,
-			  etnaviv_domain->pta_dma);
-
-	dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-			  etnaviv_domain->mtlb_cpu,
-			  etnaviv_domain->mtlb_dma);
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->base.bad_page_cpu,
+		    etnaviv_domain->base.bad_page_dma);
+
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma);
+
+	dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+		    etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma);
 
 	for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
 		if (etnaviv_domain->stlb_cpu[i])
-			dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
-					  etnaviv_domain->stlb_cpu[i],
-					  etnaviv_domain->stlb_dma[i]);
+			dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
+				    etnaviv_domain->stlb_cpu[i],
+				    etnaviv_domain->stlb_dma[i]);
 	}
 
 	vfree(etnaviv_domain);