Blob Blame History Raw
From: Ben Skeggs <bskeggs@redhat.com>
Date: Wed, 1 Nov 2017 03:56:19 +1000
Subject: drm/nouveau/mmu/gm200,gm20b: implement new vmm backend
Git-commit: e12cf6ad43888c152c3b2edb59525587cd98a227
Patch-mainline: v4.15-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Adds support for:
- 64KiB big page size.
- System-memory PTs.
- LPTE "invalid" state.
- (Tegra) Use of video memory aperture.
- Sparse PDEs/PTEs.
- Additional blocklinear kinds.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/nouveau/include/nvif/ifb00d.h      |   13 +++++
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c    |   54 +++++++++++++++++----
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c    |   12 ----
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h     |    2 
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c |   34 +++++++++++++
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c |    6 ++
 6 files changed, 101 insertions(+), 20 deletions(-)

--- a/drivers/gpu/drm/nouveau/include/nvif/ifb00d.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ifb00d.h
@@ -11,4 +11,17 @@ struct gm200_vmm_v0 {
 	__u8  version;
 	__u8  bigpage;
 };
+
+struct gm200_vmm_map_vn {
+	/* nvif_vmm_map_vX ... */
+};
+
+struct gm200_vmm_map_v0 {
+	/* nvif_vmm_map_vX ... */
+	__u8  version;
+	__u8  vol;
+	__u8  ro;
+	__u8  priv;
+	__u8  kind;
+};
 #endif
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -25,6 +25,48 @@
 
 #include <nvif/class.h>
 
+const u8 *
+gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
+{
+	static const u8
+	kind[256] = {
+		0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+		0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+		0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+		0x28, 0x29, 0x2a, 0x2b, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+		0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+		0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+		0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+		0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+		0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+		0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+		0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+		0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+		0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+		0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+		0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+		0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+		0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+	};
+	*count = ARRAY_SIZE(kind);
+	return kind;
+}
+
 static const struct nvkm_mmu_func
 gm200_mmu = {
 	.limit = (1ULL << 40),
@@ -32,12 +74,8 @@ gm200_mmu = {
 	.pgt_bits  = 27 - 12,
 	.spg_shift = 12,
 	.lpg_shift = 17,
-	.map_pgt = gf100_vm_map_pgt,
-	.map = gf100_vm_map,
-	.map_sg = gf100_vm_map_sg,
-	.unmap = gf100_vm_unmap,
-	.flush = gf100_vm_flush,
 	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
+	.kind = gm200_mmu_kind,
 };
 
 static const struct nvkm_mmu_func
@@ -47,12 +85,8 @@ gm200_mmu_fixed = {
 	.pgt_bits  = 27 - 12,
 	.spg_shift = 12,
 	.lpg_shift = 17,
-	.map_pgt = gf100_vm_map_pgt,
-	.map = gf100_vm_map,
-	.map_sg = gf100_vm_map_sg,
-	.unmap = gf100_vm_unmap,
-	.flush = gf100_vm_flush,
 	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
+	.kind = gm200_mmu_kind,
 };
 
 int
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
@@ -32,12 +32,8 @@ gm20b_mmu = {
 	.pgt_bits  = 27 - 12,
 	.spg_shift = 12,
 	.lpg_shift = 17,
-	.map_pgt = gf100_vm_map_pgt,
-	.map = gf100_vm_map,
-	.map_sg = gf100_vm_map_sg,
-	.unmap = gf100_vm_unmap,
-	.flush = gf100_vm_flush,
 	.vmm = {{ -1,  0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
+	.kind = gm200_mmu_kind,
 };
 
 static const struct nvkm_mmu_func
@@ -47,12 +43,8 @@ gm20b_mmu_fixed = {
 	.pgt_bits  = 27 - 12,
 	.spg_shift = 12,
 	.lpg_shift = 17,
-	.map_pgt = gf100_vm_map_pgt,
-	.map = gf100_vm_map,
-	.map_sg = gf100_vm_map_sg,
-	.unmap = gf100_vm_unmap,
-	.flush = gf100_vm_flush,
 	.vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
+	.kind = gm200_mmu_kind,
 };
 
 int
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -54,6 +54,8 @@ void gf100_vm_unmap(struct nvkm_vma *, s
 void gf100_vm_flush(struct nvkm_vm *);
 const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count);
 
+const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *);
+
 struct nvkm_mmu_pt {
 	union {
 		struct nvkm_mmu_ptc *ptc;
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
@@ -24,16 +24,44 @@
 #include <nvif/ifb00d.h>
 #include <nvif/unpack.h>
 
+static void
+gm200_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+		     struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	/* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
+	VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes);
+}
+
 static const struct nvkm_vmm_desc_func
 gm200_vmm_spt = {
+	.unmap = gf100_vmm_pgt_unmap,
+	.sparse = gm200_vmm_pgt_sparse,
+	.mem = gf100_vmm_pgt_mem,
+	.dma = gf100_vmm_pgt_dma,
+	.sgl = gf100_vmm_pgt_sgl,
 };
 
 static const struct nvkm_vmm_desc_func
 gm200_vmm_lpt = {
+	.invalid = gk104_vmm_lpt_invalid,
+	.unmap = gf100_vmm_pgt_unmap,
+	.sparse = gm200_vmm_pgt_sparse,
+	.mem = gf100_vmm_pgt_mem,
 };
 
+static void
+gm200_vmm_pgd_sparse(struct nvkm_vmm *vmm,
+		     struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+	/* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
+	VMM_FO064(pt, vmm, pdei * 8, BIT_ULL(35) /* VOL_BIG. */, pdes);
+}
+
 static const struct nvkm_vmm_desc_func
 gm200_vmm_pgd = {
+	.unmap = gf100_vmm_pgt_unmap,
+	.sparse = gm200_vmm_pgd_sparse,
+	.pde = gf100_vmm_pgd_pde,
 };
 
 const struct nvkm_vmm_desc
@@ -82,6 +110,9 @@ static const struct nvkm_vmm_func
 gm200_vmm_17 = {
 	.join = gm200_vmm_join,
 	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
 	.page = {
 		{ 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
 		{ 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SVxC },
@@ -94,6 +125,9 @@ static const struct nvkm_vmm_func
 gm200_vmm_16 = {
 	.join = gm200_vmm_join,
 	.part = gf100_vmm_part,
+	.aper = gf100_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
 	.page = {
 		{ 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
 		{ 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SVxC },
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
@@ -25,6 +25,9 @@ static const struct nvkm_vmm_func
 gm20b_vmm_17 = {
 	.join = gm200_vmm_join,
 	.part = gf100_vmm_part,
+	.aper = gk20a_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
 	.page = {
 		{ 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
 		{ 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SxHC },
@@ -37,6 +40,9 @@ static const struct nvkm_vmm_func
 gm20b_vmm_16 = {
 	.join = gm200_vmm_join,
 	.part = gf100_vmm_part,
+	.aper = gk20a_vmm_aper,
+	.valid = gf100_vmm_valid,
+	.flush = gf100_vmm_flush,
 	.page = {
 		{ 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
 		{ 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SxHC },