Blob Blame History Raw
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Thu, 20 Jul 2017 20:55:06 +0200
Subject: drm/ttm: individualize BO reservation obj when they are freed
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: 841e763b40764a7699ae07f4cb1921af62d6316d
Patch-mainline: v4.14-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Use the BOs reservation object when it is put on the ddelete list. This way we
avoid delaying freeing up the BO because of new fences on the reservation object.

This is used by dma-buf and amdgpu's VM page tables.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c |   54 +++++++++++++++++++++++++++++++++++++------
 1 file changed, 47 insertions(+), 7 deletions(-)

--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -394,14 +394,33 @@ static void ttm_bo_cleanup_memtype_use(s
 	ww_mutex_unlock (&bo->resv->lock);
 }
 
+static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
+{
+	int r;
+
+	if (bo->resv == &bo->ttm_resv)
+		return 0;
+
+	reservation_object_init(&bo->ttm_resv);
+	BUG_ON(reservation_object_lock(&bo->ttm_resv, NULL));
+
+	r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
+	if (r) {
+		reservation_object_unlock(&bo->ttm_resv);
+		reservation_object_fini(&bo->ttm_resv);
+	}
+
+	return r;
+}
+
 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 {
 	struct reservation_object_list *fobj;
 	struct dma_fence *fence;
 	int i;
 
-	fobj = reservation_object_get_list(bo->resv);
-	fence = reservation_object_get_excl(bo->resv);
+	fobj = reservation_object_get_list(&bo->ttm_resv);
+	fence = reservation_object_get_excl(&bo->ttm_resv);
 	if (fence && !fence->ops->signaled)
 		dma_fence_enable_sw_signaling(fence);
 
@@ -430,8 +449,19 @@ static void ttm_bo_cleanup_refs_or_queue
 			ttm_bo_cleanup_memtype_use(bo);
 
 			return;
-		} else
-			ttm_bo_flush_all_fences(bo);
+		}
+
+		ret = ttm_bo_individualize_resv(bo);
+		if (ret) {
+			/* Last resort, if we fail to allocate memory for the
+			 * fences block for the BO to become idle and free it.
+			 */
+			spin_unlock(&glob->lru_lock);
+			ttm_bo_wait(bo, true, true);
+			ttm_bo_cleanup_memtype_use(bo);
+			return;
+		}
+		ttm_bo_flush_all_fences(bo);
 
 		/*
 		 * Make NO_EVICT bos immediately available to
@@ -443,6 +473,8 @@ static void ttm_bo_cleanup_refs_or_queue
 			ttm_bo_add_to_lru(bo);
 		}
 
+		if (bo->resv != &bo->ttm_resv)
+			reservation_object_unlock(&bo->ttm_resv);
 		__ttm_bo_unreserve(bo);
 	}
 
@@ -471,17 +503,25 @@ static int ttm_bo_cleanup_refs_and_unloc
 					  bool no_wait_gpu)
 {
 	struct ttm_bo_global *glob = bo->glob;
+	struct reservation_object *resv;
 	int ret;
 
-	ret = ttm_bo_wait(bo, false, true);
+	if (unlikely(list_empty(&bo->ddestroy)))
+		resv = bo->resv;
+	else
+		resv = &bo->ttm_resv;
+
+	if (reservation_object_test_signaled_rcu(resv, true))
+		ret = 0;
+	else
+		ret = -EBUSY;
 
 	if (ret && !no_wait_gpu) {
 		long lret;
 		ww_mutex_unlock(&bo->resv->lock);
 		spin_unlock(&glob->lru_lock);
 
-		lret = reservation_object_wait_timeout_rcu(bo->resv,
-							   true,
+		lret = reservation_object_wait_timeout_rcu(resv, true,
 							   interruptible,
 							   30 * HZ);