Blob Blame History Raw
From: Ben Skeggs <bskeggs@redhat.com>
Date: Wed, 1 Nov 2017 03:56:19 +1000
Subject: drm/nouveau/mmu/nv04-nv4x: move global vmm to nvkm_mmu
Git-commit: 0b11b30de9d2960d87373e50223800c8f9f6a89f
Patch-mainline: v4.15-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

In a future commit, this will be constructed by common code.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h  |    2 ++
 drivers/gpu/drm/nouveau/nouveau_ttm.c              |    5 +----
 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c |    3 +--
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c     |   12 ++++++------
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h     |    1 -
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c     |    8 ++++----
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c     |    8 ++++----
 7 files changed, 18 insertions(+), 21 deletions(-)

--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -58,6 +58,8 @@ struct nvkm_mmu {
 	u64 limit;
 	u8  dma_bits;
 	u8  lpg_shift;
+
+	struct nvkm_vmm *vmm;
 };
 
 int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -192,16 +192,13 @@ const struct ttm_mem_type_manager_func n
 	.debug = nouveau_gart_manager_debug
 };
 
-/*XXX*/
-#include <subdev/mmu/nv04.h>
 static int
 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 {
 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
 	struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
-	struct nv04_mmu *priv = (void *)mmu;
 	struct nvkm_vm *vm = NULL;
-	nvkm_vm_ref(priv->vm, &vm, NULL);
+	nvkm_vm_ref(mmu->vmm, &vm, NULL);
 	man->priv = vm;
 	return 0;
 }
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -49,8 +49,7 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *bas
 	int ret;
 
 	if (dmaobj->clone) {
-		struct nv04_mmu *mmu = nv04_mmu(device->mmu);
-		struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
+		struct nvkm_memory *pgt = device->mmu->vmm->pgt[0].mem[0];
 		if (!dmaobj->base.start)
 			return nvkm_gpuobj_wrap(pgt, pgpuobj);
 		nvkm_kmap(pgt);
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -81,15 +81,15 @@ nv04_mmu_oneinit(struct nvkm_mmu *base)
 	int ret;
 
 	ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
-			     &mmu->vm);
+			     &mmu->base.vmm);
 	if (ret)
 		return ret;
 
 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 			      (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
 			      16, true, &dma);
-	mmu->vm->pgt[0].mem[0] = dma;
-	mmu->vm->pgt[0].refcount[0] = 1;
+	mmu->base.vmm->pgt[0].mem[0] = dma;
+	mmu->base.vmm->pgt[0].refcount[0] = 1;
 	if (ret)
 		return ret;
 
@@ -105,9 +105,9 @@ nv04_mmu_dtor(struct nvkm_mmu *base)
 {
 	struct nv04_mmu *mmu = nv04_mmu(base);
 	struct nvkm_device *device = mmu->base.subdev.device;
-	if (mmu->vm) {
-		nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
-		nvkm_vm_ref(NULL, &mmu->vm, NULL);
+	if (mmu->base.vmm) {
+		nvkm_memory_del(&mmu->base.vmm->pgt[0].mem[0]);
+		nvkm_vm_ref(NULL, &mmu->base.vmm, NULL);
 	}
 	if (mmu->nullp) {
 		dma_free_coherent(device->dev, 16 * 1024,
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
@@ -5,7 +5,6 @@
 
 struct nv04_mmu {
 	struct nvkm_mmu base;
-	struct nvkm_vm *vm;
 	dma_addr_t null;
 	void *nullp;
 };
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -93,14 +93,14 @@ nv41_mmu_oneinit(struct nvkm_mmu *base)
 	int ret;
 
 	ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
-			     &mmu->vm);
+			     &mmu->base.vmm);
 	if (ret)
 		return ret;
 
 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 			      (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
-			      &mmu->vm->pgt[0].mem[0]);
-	mmu->vm->pgt[0].refcount[0] = 1;
+			      &mmu->base.vmm->pgt[0].mem[0]);
+	mmu->base.vmm->pgt[0].refcount[0] = 1;
 	return ret;
 }
 
@@ -109,7 +109,7 @@ nv41_mmu_init(struct nvkm_mmu *base)
 {
 	struct nv04_mmu *mmu = nv04_mmu(base);
 	struct nvkm_device *device = mmu->base.subdev.device;
-	struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
+	struct nvkm_memory *dma = mmu->base.vmm->pgt[0].mem[0];
 	nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
 	nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
 	nvkm_wr32(device, 0x100820, 0x00000000);
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -173,15 +173,15 @@ nv44_mmu_oneinit(struct nvkm_mmu *base)
 	}
 
 	ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
-			     &mmu->vm);
+			     &mmu->base.vmm);
 	if (ret)
 		return ret;
 
 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
 			      (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
 			      512 * 1024, true,
-			      &mmu->vm->pgt[0].mem[0]);
-	mmu->vm->pgt[0].refcount[0] = 1;
+			      &mmu->base.vmm->pgt[0].mem[0]);
+	mmu->base.vmm->pgt[0].refcount[0] = 1;
 	return ret;
 }
 
@@ -190,7 +190,7 @@ nv44_mmu_init(struct nvkm_mmu *base)
 {
 	struct nv04_mmu *mmu = nv04_mmu(base);
 	struct nvkm_device *device = mmu->base.subdev.device;
-	struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
+	struct nvkm_memory *gart = mmu->base.vmm->pgt[0].mem[0];
 	u32 addr;
 
 	/* calculate vram address of this PRAMIN block, object must be