Blob Blame History Raw
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Mon, 11 Sep 2017 16:54:59 +0200
Subject: drm/amdgpu: fix amdgpu_vm_handle_moved as well v2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: 4e55eb3879fea6d8c7d414cebaa5bff1da58b4a1
Patch-mainline: v4.15-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

There is no guarantee that the last BO_VA actually needed an update.

Additional to that all command submissions must wait for moved BOs to
be cleared, not just the first one.

v2: Don't overwrite any newer fence.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |    2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c |   24 ++++++++++--------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |    3 +--
 3 files changed, 12 insertions(+), 17 deletions(-)

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -812,7 +812,7 @@ static int amdgpu_bo_vm_update_pte(struc
 
 	}
 
-	r = amdgpu_vm_handle_moved(adev, vm, &p->job->sync);
+	r = amdgpu_vm_handle_moved(adev, vm);
 	if (r)
 		return r;
 
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1743,7 +1743,7 @@ int amdgpu_vm_bo_update(struct amdgpu_de
 	dma_addr_t *pages_addr = NULL;
 	struct ttm_mem_reg *mem;
 	struct drm_mm_node *nodes;
-	struct dma_fence *exclusive;
+	struct dma_fence *exclusive, **last_update;
 	uint64_t flags;
 	int r;
 
@@ -1769,6 +1769,11 @@ int amdgpu_vm_bo_update(struct amdgpu_de
 	else
 		flags = 0x0;
 
+	if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
+		last_update = &vm->last_update;
+	else
+		last_update = &bo_va->last_pt_update;
+
 	if (!clear && bo_va->base.moved) {
 		bo_va->base.moved = false;
 		list_splice_init(&bo_va->valids, &bo_va->invalids);
@@ -1780,7 +1785,7 @@ int amdgpu_vm_bo_update(struct amdgpu_de
 	list_for_each_entry(mapping, &bo_va->invalids, list) {
 		r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
 					       mapping, flags, nodes,
-					       &bo_va->last_pt_update);
+					       last_update);
 		if (r)
 			return r;
 	}
@@ -1803,12 +1808,6 @@ int amdgpu_vm_bo_update(struct amdgpu_de
 			trace_amdgpu_vm_bo_mapping(mapping);
 	}
 
-	if (bo_va->base.bo &&
-	    bo_va->base.bo->tbo.resv == vm->root.base.bo->tbo.resv) {
-		dma_fence_put(vm->last_update);
-		vm->last_update = dma_fence_get(bo_va->last_pt_update);
-	}
-
 	return 0;
 }
 
@@ -2006,15 +2005,15 @@ int amdgpu_vm_clear_freed(struct amdgpu_
  * PTs have to be reserved!
  */
 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
-			   struct amdgpu_vm *vm,
-			   struct amdgpu_sync *sync)
+			   struct amdgpu_vm *vm)
 {
-	struct amdgpu_bo_va *bo_va = NULL;
 	bool clear;
 	int r = 0;
 
 	spin_lock(&vm->status_lock);
 	while (!list_empty(&vm->moved)) {
+		struct amdgpu_bo_va *bo_va;
+
 		bo_va = list_first_entry(&vm->moved,
 			struct amdgpu_bo_va, base.vm_status);
 		spin_unlock(&vm->status_lock);
@@ -2030,9 +2029,6 @@ int amdgpu_vm_handle_moved(struct amdgpu
 	}
 	spin_unlock(&vm->status_lock);
 
-	if (bo_va)
-		r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
-
 	return r;
 }
 
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -250,8 +250,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_
 			  struct amdgpu_vm *vm,
 			  struct dma_fence **fence);
 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
-			   struct amdgpu_vm *vm,
-			   struct amdgpu_sync *sync);
+			   struct amdgpu_vm *vm);
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 			struct amdgpu_bo_va *bo_va,
 			bool clear);