Blob Blame History Raw
From: Ben Skeggs <bskeggs@redhat.com>
Date: Wed, 1 Nov 2017 03:56:19 +1000
Subject: drm/nouveau: consolidate handling of dma mask
Git-commit: 325a72827c2c000f6f3810a3e680fa1101d94456
Patch-mainline: v4.15-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/nouveau/nouveau_ttm.c              |   31 ---------------------
 drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c   |   24 ++++++++--------
 drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c |    2 -
 3 files changed, 13 insertions(+), 44 deletions(-)

--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -253,7 +253,6 @@ nouveau_ttm_init(struct nouveau_drm *drm
 	struct nvkm_device *device = nvxx_device(&drm->client.device);
 	struct nvkm_pci *pci = device->pci;
 	struct drm_device *dev = drm->dev;
-	u8 bits;
 	int ret;
 
 	if (pci && pci->agp.bridge) {
@@ -263,34 +262,6 @@ nouveau_ttm_init(struct nouveau_drm *drm
 		drm->agp.cma = pci->agp.cma;
 	}
 
-	bits = nvxx_mmu(&drm->client.device)->dma_bits;
-	if (nvxx_device(&drm->client.device)->func->pci) {
-		if (drm->agp.bridge)
-			bits = 32;
-	} else if (device->func->tegra) {
-		struct nvkm_device_tegra *tegra = device->func->tegra(device);
-
-		/*
-		 * If the platform can use a IOMMU, then the addressable DMA
-		 * space is constrained by the IOMMU bit
-		 */
-		if (tegra->func->iommu_bit)
-			bits = min(bits, tegra->func->iommu_bit);
-
-	}
-
-	ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
-	if (ret && bits != 32) {
-		bits = 32;
-		ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
-	}
-	if (ret)
-		return ret;
-
-	ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
-	if (ret)
-		dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
-
 	ret = nouveau_ttm_global_init(drm);
 	if (ret)
 		return ret;
@@ -300,7 +271,7 @@ nouveau_ttm_init(struct nouveau_drm *drm
 				  &nouveau_bo_driver,
 				  dev->anon_inode->i_mapping,
 				  DRM_FILE_PAGE_OFFSET,
-				  bits <= 32 ? true : false);
+				  drm->client.mmu.dmabits <= 32 ? true : false);
 	if (ret) {
 		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
 		return ret;
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1627,7 +1627,7 @@ nvkm_device_pci_new(struct pci_dev *pci_
 	const struct nvkm_device_pci_vendor *pciv;
 	const char *name = NULL;
 	struct nvkm_device_pci *pdev;
-	int ret;
+	int ret, bits;
 
 	ret = pci_enable_device(pci_dev);
 	if (ret)
@@ -1679,17 +1679,17 @@ nvkm_device_pci_new(struct pci_dev *pci_
 	if (ret)
 		return ret;
 
-	/*
-	 * Set a preliminary DMA mask based on the .dma_bits member of the
-	 * MMU subdevice. This allows other subdevices to create DMA mappings
-	 * in their init() or oneinit() methods, which may be called before the
-	 * TTM layer sets the DMA mask definitively.
-	 * This is necessary for platforms where the default DMA mask of 32
-	 * does not cover any system memory, i.e., when all RAM is > 4 GB.
-	 */
-	if (pdev->device.mmu)
-		dma_set_mask_and_coherent(&pci_dev->dev,
-				DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+	/* Set DMA mask based on capabilities reported by the MMU subdev. */
+	if (pdev->device.mmu && !pdev->device.pci->agp.bridge)
+		bits = pdev->device.mmu->dma_bits;
+	else
+		bits = 32;
+
+	ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(bits));
+	if (ret && bits != 32) {
+		dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+		pdev->device.mmu->dma_bits = 32;
+	}
 
 	return 0;
 }
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -309,8 +309,6 @@ nvkm_device_tegra_new(const struct nvkm_
 
 	/**
 	 * The IOMMU bit defines the upper limit of the GPU-addressable space.
-	 * This will be refined in nouveau_ttm_init but we need to do it early
-	 * for instmem to behave properly
 	 */
 	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
 	if (ret)