Blob Blame History Raw
From: Ben Skeggs <bskeggs@redhat.com>
Date: Wed, 1 Nov 2017 03:56:19 +1000
Subject: drm/nouveau/mmu: build up information on available memory types
Git-commit: 51645eb71485dd3d72d9fe2acd0298057afdf437
Patch-mainline: v4.15-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h |   20 +++
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c    |  120 ++++++++++++++++++++++
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c     |    1 
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c   |    1 
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c   |    1 
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c   |    1 
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c   |    2 
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c   |    2 
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c   |    1 
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c   |    1 
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h    |    1 
 11 files changed, 151 insertions(+)

--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -108,6 +108,26 @@ struct nvkm_mmu {
 	u8  dma_bits;
 	u8  lpg_shift;
 
+	int heap_nr;
+	struct {
+#define NVKM_MEM_VRAM                                                      0x01
+#define NVKM_MEM_HOST                                                      0x02
+#define NVKM_MEM_COMP                                                      0x04
+#define NVKM_MEM_DISP                                                      0x08
+		u8  type;
+		u64 size;
+	} heap[4];
+
+	int type_nr;
+	struct {
+#define NVKM_MEM_KIND                                                      0x10
+#define NVKM_MEM_MAPPABLE                                                  0x20
+#define NVKM_MEM_COHERENT                                                  0x40
+#define NVKM_MEM_UNCACHED                                                  0x80
+		u8 type;
+		u8 heap;
+	} type[16];
+
 	struct nvkm_vmm *vmm;
 
 	struct {
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -24,6 +24,7 @@
 #include "priv.h"
 #include "vmm.h"
 
+#include <subdev/bar.h>
 #include <subdev/fb.h>
 
 #include <nvif/if500d.h>
@@ -443,11 +444,130 @@ nvkm_vm_ref(struct nvkm_vm *ref, struct
 	return 0;
 }
 
+static void
+nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
+{
+	if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) {
+		mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type;
+		mmu->type[mmu->type_nr].heap = heap;
+		mmu->type_nr++;
+	}
+}
+
+static int
+nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size)
+{
+	if (size) {
+		if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) {
+			mmu->heap[mmu->heap_nr].type = type;
+			mmu->heap[mmu->heap_nr].size = size;
+			return mmu->heap_nr++;
+		}
+	}
+	return -EINVAL;
+}
+
+static void
+nvkm_mmu_host(struct nvkm_mmu *mmu)
+{
+	struct nvkm_device *device = mmu->subdev.device;
+	u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys;
+	int heap;
+
+	/* Non-mappable system memory. */
+	heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL);
+	nvkm_mmu_type(mmu, heap, type);
+
+	/* Non-coherent, cached, system memory.
+	 *
+	 * Block-linear mappings of system memory must be done through
+	 * BAR1, and cannot be supported on systems where we're unable
+	 * to map BAR1 with write-combining.
+	 */
+	type |= NVKM_MEM_MAPPABLE;
+	if (!device->bar || device->bar->iomap_uncached)
+		nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+	else
+		nvkm_mmu_type(mmu, heap, type);
+
+	/* Coherent, cached, system memory.
+	 *
+	 * Unsupported on systems that aren't able to support snooped
+	 * mappings, and also for block-linear mappings which must be
+	 * done through BAR1.
+	 */
+	type |= NVKM_MEM_COHERENT;
+	if (device->func->cpu_coherent)
+		nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+
+	/* Uncached system memory. */
+	nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED);
+}
+
+static void
+nvkm_mmu_vram(struct nvkm_mmu *mmu)
+{
+	struct nvkm_device *device = mmu->subdev.device;
+	struct nvkm_mm *mm = &device->fb->ram->vram;
+	const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+	const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+	const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+	u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
+	u8 heap = NVKM_MEM_VRAM;
+	int heapM, heapN, heapU;
+
+	/* Mixed-memory doesn't support compression or display. */
+	heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT);
+
+	heap |= NVKM_MEM_COMP;
+	heap |= NVKM_MEM_DISP;
+	heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT);
+	heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT);
+
+	/* Add non-mappable VRAM types first so that they're preferred
+	 * over anything else.  Mixed-memory will be slower than other
+	 * heaps, it's prioritised last.
+	 */
+	nvkm_mmu_type(mmu, heapU, type);
+	nvkm_mmu_type(mmu, heapN, type);
+	nvkm_mmu_type(mmu, heapM, type);
+
+	/* Add host memory types next, under the assumption that users
+	 * wanting mappable memory want to use them as staging buffers
+	 * or the like.
+	 */
+	nvkm_mmu_host(mmu);
+
+	/* Mappable VRAM types go last, as they're basically the worst
+	 * possible type to ask for unless there's no other choice.
+	 */
+	if (device->bar) {
+		/* Write-combined BAR1 access. */
+		type |= NVKM_MEM_MAPPABLE;
+		if (!device->bar->iomap_uncached) {
+			nvkm_mmu_type(mmu, heapN, type);
+			nvkm_mmu_type(mmu, heapM, type);
+		}
+
+		/* Uncached BAR1 access. */
+		type |= NVKM_MEM_COHERENT;
+		type |= NVKM_MEM_UNCACHED;
+		nvkm_mmu_type(mmu, heapN, type);
+		nvkm_mmu_type(mmu, heapM, type);
+	}
+}
+
 static int
 nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
 {
 	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
 
+	/* Determine available memory types. */
+	if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram)
+		nvkm_mmu_vram(mmu);
+	else
+		nvkm_mmu_host(mmu);
+
 	if (mmu->func->vmm.global) {
 		int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL,
 				       "gart", &mmu->vmm);
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
@@ -30,6 +30,7 @@ g84_mmu = {
 	.lpg_shift = 16,
 	.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
 	.kind = nv50_mmu_kind,
+	.kind_sys = true,
 };
 
 int
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -78,6 +78,7 @@ gf100_mmu = {
 	.lpg_shift = 17,
 	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
 	.kind = gf100_mmu_kind,
+	.kind_sys = true,
 };
 
 int
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
@@ -30,6 +30,7 @@ gk104_mmu = {
 	.lpg_shift = 17,
 	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
 	.kind = gf100_mmu_kind,
+	.kind_sys = true,
 };
 
 int
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
@@ -30,6 +30,7 @@ gk20a_mmu = {
 	.lpg_shift = 17,
 	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
 	.kind = gf100_mmu_kind,
+	.kind_sys = true,
 };
 
 int
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -74,6 +74,7 @@ gm200_mmu = {
 	.lpg_shift = 17,
 	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
 	.kind = gm200_mmu_kind,
+	.kind_sys = true,
 };
 
 static const struct nvkm_mmu_func
@@ -83,6 +84,7 @@ gm200_mmu_fixed = {
 	.lpg_shift = 17,
 	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
 	.kind = gm200_mmu_kind,
+	.kind_sys = true,
 };
 
 int
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
@@ -32,6 +32,7 @@ gm20b_mmu = {
 	.lpg_shift = 17,
 	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
 	.kind = gm200_mmu_kind,
+	.kind_sys = true,
 };
 
 static const struct nvkm_mmu_func
@@ -41,6 +42,7 @@ gm20b_mmu_fixed = {
 	.lpg_shift = 17,
 	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
 	.kind = gm200_mmu_kind,
+	.kind_sys = true,
 };
 
 int
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
@@ -32,6 +32,7 @@ gp100_mmu = {
 	.lpg_shift = 16,
 	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
 	.kind = gm200_mmu_kind,
+	.kind_sys = true,
 };
 
 int
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
@@ -32,6 +32,7 @@ gp10b_mmu = {
 	.lpg_shift = 16,
 	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
 	.kind = gm200_mmu_kind,
+	.kind_sys = true,
 };
 
 int
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -25,6 +25,7 @@ struct nvkm_mmu_func {
 	} vmm;
 
 	const u8 *(*kind)(struct nvkm_mmu *, int *count);
+	bool kind_sys;
 };
 
 extern const struct nvkm_mmu_func nv04_mmu;