Blob Blame History Raw
From 52b8da4792426649c832070ac4ef15c44867d1dd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Wed, 16 Jun 2021 09:20:56 +0200
Subject: drm/nouveau: use the new iterator in nouveau_fence_sync
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: 40298cb45071e98c2c6465e2ef25dd8f8af77918
Patch-mainline: v5.17-rc1
References: jsc#PED-1166 jsc#PED-1168 jsc#PED-1170 jsc#PED-1218 jsc#PED-1220 jsc#PED-1222 jsc#PED-1223 jsc#PED-1225

Simplifying the code a bit.

The new implementation unifies the handling between drivers and so
results in waiting for all shared fernces in all cases.

SUSE backport: Added changes from merge commit:
8d0749b4f83bf4768ceae45ee6a79e6e7eddfc2a

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20211005113742.1101-26-christian.koenig@amd.com
Acked-by: Patrik Jakobsson <pjakobsson@suse.de>
---
 drivers/gpu/drm/nouveau/nouveau_fence.c | 76 ++++++++++---------------
 1 file changed, 30 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 0ae416aa76dc..a3a04e0d76ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -339,70 +339,54 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
 }
 
 int
-nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
+nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
+		   bool exclusive, bool intr)
 {
 	struct nouveau_fence_chan *fctx = chan->fence;
-	struct dma_fence *fence;
 	struct dma_resv *resv = nvbo->bo.base.resv;
-	struct dma_resv_list *fobj;
-	struct nouveau_fence *f;
-	int ret = 0, i;
+	int i, ret;
 
 	if (!exclusive) {
 		ret = dma_resv_reserve_shared(resv, 1);
-
 		if (ret)
 			return ret;
-
-		fobj = NULL;
-	} else {
-		fobj = dma_resv_shared_list(resv);
 	}
 
 	/* Waiting for the exclusive fence first causes performance regressions
 	 * under some circumstances. So manually wait for the shared ones first.
 	 */
-	for (i = 0; i < (fobj ? fobj->shared_count : 0) && !ret; ++i) {
-		struct nouveau_channel *prev = NULL;
-		bool must_wait = true;
-
-		fence = rcu_dereference_protected(fobj->shared[i],
-						dma_resv_held(resv));
-
-		f = nouveau_local_fence(fence, chan->drm);
-		if (f) {
-			rcu_read_lock();
-			prev = rcu_dereference(f->channel);
-			if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
-				must_wait = false;
-			rcu_read_unlock();
-		}
+	for (i = 0; i < 2; ++i) {
+		struct dma_resv_iter cursor;
+		struct dma_fence *fence;
+
+		dma_resv_for_each_fence(&cursor, resv, exclusive, fence) {
+			struct nouveau_fence *f;
+
+			if (i == 0 && dma_resv_iter_is_exclusive(&cursor))
+				continue;
+
+			f = nouveau_local_fence(fence, chan->drm);
+			if (f) {
+				struct nouveau_channel *prev;
+				bool must_wait = true;
+
+				rcu_read_lock();
+				prev = rcu_dereference(f->channel);
+				if (prev && (prev == chan ||
+					     fctx->sync(f, prev, chan) == 0))
+					must_wait = false;
+				rcu_read_unlock();
+				if (!must_wait)
+					continue;
+			}
 
-		if (must_wait)
 			ret = dma_fence_wait(fence, intr);
-	}
-
-	fence = dma_resv_excl_fence(resv);
-	if (fence) {
-		struct nouveau_channel *prev = NULL;
-		bool must_wait = true;
-
-		f = nouveau_local_fence(fence, chan->drm);
-		if (f) {
-			rcu_read_lock();
-			prev = rcu_dereference(f->channel);
-			if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
-				must_wait = false;
-			rcu_read_unlock();
+			if (ret)
+				return ret;
 		}
-
-		if (must_wait)
-			ret = dma_fence_wait(fence, intr);
-
-		return ret;
 	}
 
-	return ret;
+	return 0;
 }
 
 void
-- 
2.38.1