Blob Blame History Raw
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Tue, 1 Aug 2017 10:51:43 +0200
Subject: drm/amdgpu: separate bo_va structure
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: ec681545afe5a448b43a2fe5c206ee48e19dabb3
Patch-mainline: v4.14-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Split that into vm_bo_base and bo_va to allow other uses as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c     |    4 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    |    4 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |   14 +----
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h  |    4 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   |    2 
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     |   79 +++++++++++++++--------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h     |   12 ++++
 7 files changed, 65 insertions(+), 54 deletions(-)

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1487,7 +1487,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_
 			    addr > mapping->last)
 				continue;
 
-			*bo = lobj->bo_va->bo;
+			*bo = lobj->bo_va->base.bo;
 			return mapping;
 		}
 
@@ -1496,7 +1496,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_
 			    addr > mapping->last)
 				continue;
 
-			*bo = lobj->bo_va->bo;
+			*bo = lobj->bo_va->base.bo;
 			return mapping;
 		}
 	}
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -621,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_devic
 
 	switch (args->operation) {
 	case AMDGPU_VA_OP_MAP:
-		r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
+		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
 					args->map_size);
 		if (r)
 			goto error_backoff;
@@ -641,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_devic
 						args->map_size);
 		break;
 	case AMDGPU_VA_OP_REPLACE:
-		r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
+		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
 					args->map_size);
 		if (r)
 			goto error_backoff;
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -33,6 +33,7 @@
 
 #define AMDGPU_BO_INVALID_OFFSET	LONG_MAX
 
+/* bo virtual addresses in a vm */
 struct amdgpu_bo_va_mapping {
 	struct list_head		list;
 	struct rb_node			rb;
@@ -43,26 +44,19 @@ struct amdgpu_bo_va_mapping {
 	uint64_t			flags;
 };
 
-/* bo virtual addresses in a specific vm */
+/* User space allocated BO in a VM */
 struct amdgpu_bo_va {
+	struct amdgpu_vm_bo_base	base;
+
 	/* protected by bo being reserved */
-	struct list_head		bo_list;
 	struct dma_fence	        *last_pt_update;
 	unsigned			ref_count;
 
-	/* protected by vm mutex and spinlock */
-	struct list_head		vm_status;
-
 	/* mappings for this bo_va */
 	struct list_head		invalids;
 	struct list_head		valids;
-
-	/* constant after initialization */
-	struct amdgpu_vm		*vm;
-	struct amdgpu_bo		*bo;
 };
 
-
 struct amdgpu_bo {
 	/* Protected by tbo.reserved */
 	u32				preferred_domains;
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -284,7 +284,7 @@ TRACE_EVENT(amdgpu_vm_bo_map,
 			     ),
 
 	    TP_fast_assign(
-			   __entry->bo = bo_va ? bo_va->bo : NULL;
+			   __entry->bo = bo_va ? bo_va->base.bo : NULL;
 			   __entry->start = mapping->start;
 			   __entry->last = mapping->last;
 			   __entry->offset = mapping->offset;
@@ -308,7 +308,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
 			     ),
 
 	    TP_fast_assign(
-			   __entry->bo = bo_va->bo;
+			   __entry->bo = bo_va->base.bo;
 			   __entry->start = mapping->start;
 			   __entry->last = mapping->last;
 			   __entry->offset = mapping->offset;
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -76,7 +76,7 @@ int amdgpu_map_static_csa(struct amdgpu_
 		return -ENOMEM;
 	}
 
-	r = amdgpu_vm_alloc_pts(adev, (*bo_va)->vm, AMDGPU_CSA_VADDR,
+	r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
 				AMDGPU_CSA_SIZE);
 	if (r) {
 		DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -870,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(s
 {
 	struct amdgpu_bo_va *bo_va;
 
-	list_for_each_entry(bo_va, &bo->va, bo_list) {
-		if (bo_va->vm == vm) {
+	list_for_each_entry(bo_va, &bo->va, base.bo_list) {
+		if (bo_va->base.vm == vm) {
 			return bo_va;
 		}
 	}
@@ -1726,7 +1726,8 @@ int amdgpu_vm_bo_update(struct amdgpu_de
 			struct amdgpu_bo_va *bo_va,
 			bool clear)
 {
-	struct amdgpu_vm *vm = bo_va->vm;
+	struct amdgpu_bo *bo = bo_va->base.bo;
+	struct amdgpu_vm *vm = bo_va->base.vm;
 	struct amdgpu_bo_va_mapping *mapping;
 	dma_addr_t *pages_addr = NULL;
 	uint64_t gtt_flags, flags;
@@ -1735,27 +1736,27 @@ int amdgpu_vm_bo_update(struct amdgpu_de
 	struct dma_fence *exclusive;
 	int r;
 
-	if (clear || !bo_va->bo) {
+	if (clear || !bo_va->base.bo) {
 		mem = NULL;
 		nodes = NULL;
 		exclusive = NULL;
 	} else {
 		struct ttm_dma_tt *ttm;
 
-		mem = &bo_va->bo->tbo.mem;
+		mem = &bo_va->base.bo->tbo.mem;
 		nodes = mem->mm_node;
 		if (mem->mem_type == TTM_PL_TT) {
-			ttm = container_of(bo_va->bo->tbo.ttm, struct
-					   ttm_dma_tt, ttm);
+			ttm = container_of(bo_va->base.bo->tbo.ttm,
+					   struct ttm_dma_tt, ttm);
 			pages_addr = ttm->dma_address;
 		}
-		exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
+		exclusive = reservation_object_get_excl(bo->tbo.resv);
 	}
 
-	if (bo_va->bo) {
-		flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
-		gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
-			adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
+	if (bo) {
+		flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+		gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) &&
+			adev == amdgpu_ttm_adev(bo->tbo.bdev)) ?
 			flags : 0;
 	} else {
 		flags = 0x0;
@@ -1763,7 +1764,7 @@ int amdgpu_vm_bo_update(struct amdgpu_de
 	}
 
 	spin_lock(&vm->status_lock);
-	if (!list_empty(&bo_va->vm_status))
+	if (!list_empty(&bo_va->base.vm_status))
 		list_splice_init(&bo_va->valids, &bo_va->invalids);
 	spin_unlock(&vm->status_lock);
 
@@ -1786,9 +1787,9 @@ int amdgpu_vm_bo_update(struct amdgpu_de
 
 	spin_lock(&vm->status_lock);
 	list_splice_init(&bo_va->invalids, &bo_va->valids);
-	list_del_init(&bo_va->vm_status);
+	list_del_init(&bo_va->base.vm_status);
 	if (clear)
-		list_add(&bo_va->vm_status, &vm->cleared);
+		list_add(&bo_va->base.vm_status, &vm->cleared);
 	spin_unlock(&vm->status_lock);
 
 	if (vm->use_cpu_for_update) {
@@ -2001,7 +2002,7 @@ int amdgpu_vm_clear_invalids(struct amdg
 	spin_lock(&vm->status_lock);
 	while (!list_empty(&vm->invalidated)) {
 		bo_va = list_first_entry(&vm->invalidated,
-			struct amdgpu_bo_va, vm_status);
+			struct amdgpu_bo_va, base.vm_status);
 		spin_unlock(&vm->status_lock);
 
 		r = amdgpu_vm_bo_update(adev, bo_va, true);
@@ -2041,16 +2042,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(st
 	if (bo_va == NULL) {
 		return NULL;
 	}
-	bo_va->vm = vm;
-	bo_va->bo = bo;
+	bo_va->base.vm = vm;
+	bo_va->base.bo = bo;
+	INIT_LIST_HEAD(&bo_va->base.bo_list);
+	INIT_LIST_HEAD(&bo_va->base.vm_status);
+
 	bo_va->ref_count = 1;
-	INIT_LIST_HEAD(&bo_va->bo_list);
 	INIT_LIST_HEAD(&bo_va->valids);
 	INIT_LIST_HEAD(&bo_va->invalids);
-	INIT_LIST_HEAD(&bo_va->vm_status);
 
 	if (bo)
-		list_add_tail(&bo_va->bo_list, &bo->va);
+		list_add_tail(&bo_va->base.bo_list, &bo->va);
 
 	return bo_va;
 }
@@ -2075,7 +2077,8 @@ int amdgpu_vm_bo_map(struct amdgpu_devic
 		     uint64_t size, uint64_t flags)
 {
 	struct amdgpu_bo_va_mapping *mapping, *tmp;
-	struct amdgpu_vm *vm = bo_va->vm;
+	struct amdgpu_bo *bo = bo_va->base.bo;
+	struct amdgpu_vm *vm = bo_va->base.vm;
 	uint64_t eaddr;
 
 	/* validate the parameters */
@@ -2086,7 +2089,7 @@ int amdgpu_vm_bo_map(struct amdgpu_devic
 	/* make sure object fit at this offset */
 	eaddr = saddr + size - 1;
 	if (saddr >= eaddr ||
-	    (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+	    (bo && offset + size > amdgpu_bo_size(bo)))
 		return -EINVAL;
 
 	saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2096,7 +2099,7 @@ int amdgpu_vm_bo_map(struct amdgpu_devic
 	if (tmp) {
 		/* bo and tmp overlap, invalid addr */
 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
-			"0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
+			"0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
 			tmp->start, tmp->last + 1);
 		return -EINVAL;
 	}
@@ -2141,7 +2144,8 @@ int amdgpu_vm_bo_replace_map(struct amdg
 			     uint64_t size, uint64_t flags)
 {
 	struct amdgpu_bo_va_mapping *mapping;
-	struct amdgpu_vm *vm = bo_va->vm;
+	struct amdgpu_bo *bo = bo_va->base.bo;
+	struct amdgpu_vm *vm = bo_va->base.vm;
 	uint64_t eaddr;
 	int r;
 
@@ -2153,7 +2157,7 @@ int amdgpu_vm_bo_replace_map(struct amdg
 	/* make sure object fit at this offset */
 	eaddr = saddr + size - 1;
 	if (saddr >= eaddr ||
-	    (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+	    (bo && offset + size > amdgpu_bo_size(bo)))
 		return -EINVAL;
 
 	/* Allocate all the needed memory */
@@ -2161,7 +2165,7 @@ int amdgpu_vm_bo_replace_map(struct amdg
 	if (!mapping)
 		return -ENOMEM;
 
-	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
+	r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
 	if (r) {
 		kfree(mapping);
 		return r;
@@ -2201,7 +2205,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_dev
 		       uint64_t saddr)
 {
 	struct amdgpu_bo_va_mapping *mapping;
-	struct amdgpu_vm *vm = bo_va->vm;
+	struct amdgpu_vm *vm = bo_va->base.vm;
 	bool valid = true;
 
 	saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2349,12 +2353,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_devi
 		      struct amdgpu_bo_va *bo_va)
 {
 	struct amdgpu_bo_va_mapping *mapping, *next;
-	struct amdgpu_vm *vm = bo_va->vm;
+	struct amdgpu_vm *vm = bo_va->base.vm;
 
-	list_del(&bo_va->bo_list);
+	list_del(&bo_va->base.bo_list);
 
 	spin_lock(&vm->status_lock);
-	list_del(&bo_va->vm_status);
+	list_del(&bo_va->base.vm_status);
 	spin_unlock(&vm->status_lock);
 
 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
@@ -2386,13 +2390,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_devi
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
 			     struct amdgpu_bo *bo)
 {
-	struct amdgpu_bo_va *bo_va;
+	struct amdgpu_vm_bo_base *bo_base;
 
-	list_for_each_entry(bo_va, &bo->va, bo_list) {
-		spin_lock(&bo_va->vm->status_lock);
-		if (list_empty(&bo_va->vm_status))
-			list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
-		spin_unlock(&bo_va->vm->status_lock);
+	list_for_each_entry(bo_base, &bo->va, bo_list) {
+		spin_lock(&bo_base->vm->status_lock);
+		if (list_empty(&bo_base->vm_status))
+			list_add(&bo_base->vm_status,
+				 &bo_base->vm->invalidated);
+		spin_unlock(&bo_base->vm->status_lock);
 	}
 }
 
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -99,6 +99,18 @@ struct amdgpu_bo_list_entry;
 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
 #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
 
+/* base structure for tracking BO usage in a VM */
+struct amdgpu_vm_bo_base {
+	/* constant after initialization */
+	struct amdgpu_vm		*vm;
+	struct amdgpu_bo		*bo;
+
+	/* protected by bo being reserved */
+	struct list_head		bo_list;
+
+	/* protected by spinlock */
+	struct list_head		vm_status;
+};
 
 struct amdgpu_vm_pt {
 	struct amdgpu_bo	*bo;