Blob Blame History Raw
From: Ben Skeggs <bskeggs@redhat.com>
Date: Wed, 1 Nov 2017 03:56:19 +1000
Subject: drm/nouveau: split various bo flags out into their own members
Git-commit: 7760a2e38a8324688e83b91f91ff7be710e70db1
Patch-mainline: v4.15-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

It's far more convenient to deal with like this.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/nouveau/nouveau_bo.c   |   71 ++++++++++++++++++---------------
 drivers/gpu/drm/nouveau/nouveau_bo.h   |   10 +++-
 drivers/gpu/drm/nouveau/nouveau_gem.c  |   11 ++++-
 drivers/gpu/drm/nouveau/nouveau_gem.h  |    3 -
 drivers/gpu/drm/nouveau/nouveau_ttm.c  |   14 ++----
 drivers/gpu/drm/nouveau/nv50_display.c |    8 +--
 6 files changed, 66 insertions(+), 51 deletions(-)

--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -97,7 +97,7 @@ nv10_bo_put_tile_region(struct drm_devic
 
 static struct nouveau_drm_tile *
 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
-		   u32 size, u32 pitch, u32 flags)
+		   u32 size, u32 pitch, u32 zeta)
 {
 	struct nouveau_drm *drm = nouveau_drm(dev);
 	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
@@ -120,8 +120,7 @@ nv10_bo_set_tiling(struct drm_device *de
 	}
 
 	if (found)
-		nv10_bo_update_tile_region(dev, found, addr, size,
-					    pitch, flags);
+		nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
 	return found;
 }
 
@@ -155,27 +154,27 @@ nouveau_bo_fixup_align(struct nouveau_bo
 	struct nvif_device *device = &drm->client.device;
 
 	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
-		if (nvbo->tile_mode) {
+		if (nvbo->mode) {
 			if (device->info.chipset >= 0x40) {
 				*align = 65536;
-				*size = roundup_64(*size, 64 * nvbo->tile_mode);
+				*size = roundup_64(*size, 64 * nvbo->mode);
 
 			} else if (device->info.chipset >= 0x30) {
 				*align = 32768;
-				*size = roundup_64(*size, 64 * nvbo->tile_mode);
+				*size = roundup_64(*size, 64 * nvbo->mode);
 
 			} else if (device->info.chipset >= 0x20) {
 				*align = 16384;
-				*size = roundup_64(*size, 64 * nvbo->tile_mode);
+				*size = roundup_64(*size, 64 * nvbo->mode);
 
 			} else if (device->info.chipset >= 0x10) {
 				*align = 16384;
-				*size = roundup_64(*size, 32 * nvbo->tile_mode);
+				*size = roundup_64(*size, 32 * nvbo->mode);
 			}
 		}
 	} else {
-		*size = roundup_64(*size, (1 << nvbo->page_shift));
-		*align = max((1 <<  nvbo->page_shift), *align);
+		*size = roundup_64(*size, (1 << nvbo->page));
+		*align = max((1 <<  nvbo->page), *align);
 	}
 
 	*size = roundup_64(*size, PAGE_SIZE);
@@ -207,18 +206,34 @@ nouveau_bo_new(struct nouveau_cli *cli,
 	INIT_LIST_HEAD(&nvbo->head);
 	INIT_LIST_HEAD(&nvbo->entry);
 	INIT_LIST_HEAD(&nvbo->vma_list);
-	nvbo->tile_mode = tile_mode;
-	nvbo->tile_flags = tile_flags;
 	nvbo->bo.bdev = &drm->ttm.bdev;
 	nvbo->cli = cli;
 
 	if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
 		nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
 
-	nvbo->page_shift = 12;
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+		nvbo->comp = gf100_pte_storage_type_map[nvbo->kind] != nvbo->kind;
+	} else
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+		nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+		nvbo->comp = (tile_flags & 0x00030000) >> 16;
+	} else {
+		nvbo->zeta = (tile_flags & 0x00000007);
+	}
+	nvbo->mode = tile_mode;
+	nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
+
+	nvbo->page = 12;
 	if (drm->client.vm) {
 		if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
-			nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
+			nvbo->page = drm->client.vm->mmu->lpg_shift;
+		else {
+			if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+				nvbo->kind = gf100_pte_storage_type_map[nvbo->kind];
+			nvbo->comp = 0;
+		}
 	}
 
 	nouveau_bo_fixup_align(nvbo, flags, &align, &size);
@@ -262,7 +277,7 @@ set_placement_range(struct nouveau_bo *n
 	unsigned i, fpfn, lpfn;
 
 	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
-	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+	    nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
 		/*
 		 * Make sure that the color and depth buffers are handled
@@ -270,7 +285,7 @@ set_placement_range(struct nouveau_bo *n
 		 * speed up when alpha-blending and depth-test are enabled
 		 * at the same time.
 		 */
-		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
+		if (nvbo->zeta) {
 			fpfn = vram_pages / 2;
 			lpfn = ~0;
 		} else {
@@ -321,14 +336,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo,
 
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
 	    memtype == TTM_PL_FLAG_VRAM && contig) {
-		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
-			if (bo->mem.mem_type == TTM_PL_VRAM) {
-				struct nvkm_mem *mem = bo->mem.mm_node;
-				if (!nvkm_mm_contiguous(mem->mem))
-					evict = true;
-			}
-			nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
+		if (!nvbo->contig) {
+			nvbo->contig = true;
 			force = true;
+			evict = true;
 		}
 	}
 
@@ -376,7 +387,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo,
 
 out:
 	if (force && ret)
-		nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
+		nvbo->contig = false;
 	ttm_bo_unreserve(bo);
 	return ret;
 }
@@ -1210,7 +1221,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_o
 	list_for_each_entry(vma, &nvbo->vma_list, head) {
 		if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM &&
 			      (new_reg->mem_type == TTM_PL_VRAM ||
-			       nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
+			       nvbo->page != vma->vm->mmu->lpg_shift)) {
 			nvkm_vm_map(vma, new_reg->mm_node);
 		} else {
 			WARN_ON(ttm_bo_wait(bo, false, false));
@@ -1234,8 +1245,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_obj
 
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
 		*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
-						nvbo->tile_mode,
-						nvbo->tile_flags);
+					       nvbo->mode, nvbo->zeta);
 	}
 
 	return 0;
@@ -1408,7 +1418,7 @@ nouveau_ttm_fault_reserve_notify(struct
 	 */
 	if (bo->mem.mem_type != TTM_PL_VRAM) {
 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
-		    !nouveau_bo_tile_layout(nvbo))
+		    !nvbo->kind)
 			return 0;
 
 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
@@ -1596,14 +1606,13 @@ nouveau_bo_vma_add(struct nouveau_bo *nv
 	const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
 	int ret;
 
-	ret = nvkm_vm_get(vm, size, nvbo->page_shift,
-			     NV_MEM_ACCESS_RW, vma);
+	ret = nvkm_vm_get(vm, size, nvbo->page, NV_MEM_ACCESS_RW, vma);
 	if (ret)
 		return ret;
 
 	if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
 	    (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
-	     nvbo->page_shift != vma->vm->mmu->lpg_shift))
+	     nvbo->page != vma->vm->mmu->lpg_shift))
 		nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
 
 	list_add_tail(&vma->head, &nvbo->vma_list);
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -24,12 +24,16 @@ struct nouveau_bo {
 	bool validate_mapped;
 
 	struct list_head vma_list;
-	unsigned page_shift;
 
 	struct nouveau_cli *cli;
 
-	u32 tile_mode;
-	u32 tile_flags;
+	unsigned contig:1;
+	unsigned page:5;
+	unsigned kind:8;
+	unsigned comp:3;
+	unsigned zeta:3;
+	unsigned mode;
+
 	struct nouveau_drm_tile *tile;
 
 	/* Only valid if allocated via nouveau_gem_new() and iff you hold a
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -246,8 +246,15 @@ nouveau_gem_info(struct drm_file *file_p
 
 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
 	rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
-	rep->tile_mode = nvbo->tile_mode;
-	rep->tile_flags = nvbo->tile_flags;
+	rep->tile_mode = nvbo->mode;
+	rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+		rep->tile_flags |= nvbo->kind << 8;
+	else
+	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
+		rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
+	else
+		rep->tile_flags |= nvbo->zeta;
 	return 0;
 }
 
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -6,9 +6,6 @@
 #include "nouveau_drv.h"
 #include "nouveau_bo.h"
 
-#define nouveau_bo_tile_layout(nvbo)				\
-	((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
-
 static inline struct nouveau_bo *
 nouveau_gem_object(struct drm_gem_object *gem)
 {
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -88,18 +88,18 @@ nouveau_vram_manager_new(struct ttm_mem_
 	if (drm->client.device.info.ram_size == 0)
 		return -ENOMEM;
 
-	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
-		size_nc = 1 << nvbo->page_shift;
+	if (!nvbo->contig)
+		size_nc = 1 << nvbo->page;
 
 	ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT,
 			     reg->page_alignment << PAGE_SHIFT, size_nc,
-			     (nvbo->tile_flags >> 8) & 0x3ff, &node);
+			     nvbo->comp << 8 | nvbo->kind, &node);
 	if (ret) {
 		reg->mm_node = NULL;
 		return (ret == -ENOSPC) ? 0 : ret;
 	}
 
-	node->page_shift = nvbo->page_shift;
+	node->page_shift = nvbo->page;
 
 	reg->mm_node = node;
 	reg->start   = node->offset >> PAGE_SHIFT;
@@ -158,14 +158,12 @@ nouveau_gart_manager_new(struct ttm_mem_
 	case NV_DEVICE_INFO_V0_CURIE:
 		break;
 	case NV_DEVICE_INFO_V0_TESLA:
-		if (drm->client.device.info.chipset != 0x50)
-			node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
-		break;
 	case NV_DEVICE_INFO_V0_FERMI:
 	case NV_DEVICE_INFO_V0_KEPLER:
 	case NV_DEVICE_INFO_V0_MAXWELL:
 	case NV_DEVICE_INFO_V0_PASCAL:
-		node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
+		if (drm->client.device.info.chipset != 0x50)
+			node->memtype = nvbo->kind;
 		break;
 	default:
 		NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -424,7 +424,7 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *d
 {
 	struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
 	struct nv50_dmac_ctxdma *ctxdma;
-	const u8    kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+	const u8    kind = fb->nvbo->kind;
 	const u32 handle = 0xfb000000 | kind;
 	struct {
 		struct nv_dma_v0 base;
@@ -847,7 +847,7 @@ nv50_wndw_atomic_check_acquire(struct nv
 
 	asyw->image.w = fb->base.width;
 	asyw->image.h = fb->base.height;
-	asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+	asyw->image.kind = fb->nvbo->kind;
 
 	if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
 		asyw->interval = 0;
@@ -857,9 +857,9 @@ nv50_wndw_atomic_check_acquire(struct nv
 	if (asyw->image.kind) {
 		asyw->image.layout = 0;
 		if (drm->client.device.info.chipset >= 0xc0)
-			asyw->image.block = fb->nvbo->tile_mode >> 4;
+			asyw->image.block = fb->nvbo->mode >> 4;
 		else
-			asyw->image.block = fb->nvbo->tile_mode;
+			asyw->image.block = fb->nvbo->mode;
 		asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
 	} else {
 		asyw->image.layout = 1;