Blob Blame History Raw
From 07bfe6bf1052f074093cdea95d6041f48b994c4b Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 25 Jun 2019 14:01:09 +0100
Subject: drm/i915/execlists: Convert recursive defer_request() into iterative
Git-commit: 07bfe6bf1052f074093cdea95d6041f48b994c4b
Patch-mainline: v5.4-rc1
References: bsc#1152489

As this engine owns the lock around rq->sched.link (for those waiters
submitted to this engine), we can use that link as an element in a local
list. We can thus replace the recursive algorithm with an iterative walk
over the ordered list of waiters.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190625130128.11009-1-chris@chris-wilson.co.uk
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 52 +++++++++++++++--------------
 1 file changed, 27 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 28685ba91a2c..22afd2616d7f 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -833,10 +833,9 @@ last_active(const struct intel_engine_execlists *execlists)
 	return *last;
 }
 
-static void
-defer_request(struct i915_request * const rq, struct list_head * const pl)
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
 {
-	struct i915_dependency *p;
+	LIST_HEAD(list);
 
 	/*
 	 * We want to move the interrupted request to the back of
@@ -845,34 +844,37 @@ defer_request(struct i915_request * const rq, struct list_head * const pl)
 	 * flight and were waiting for the interrupted request to
 	 * be run after it again.
 	 */
-	list_move_tail(&rq->sched.link, pl);
+	do {
+		struct i915_dependency *p;
 
-	list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
-		struct i915_request *w =
-			container_of(p->waiter, typeof(*w), sched);
+		GEM_BUG_ON(i915_request_is_active(rq));
+		list_move_tail(&rq->sched.link, pl);
 
-		/* Leave semaphores spinning on the other engines */
-		if (w->engine != rq->engine)
-			continue;
+		list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+			struct i915_request *w =
+				container_of(p->waiter, typeof(*w), sched);
 
-		/* No waiter should start before the active request completed */
-		GEM_BUG_ON(i915_request_started(w));
+			/* Leave semaphores spinning on the other engines */
+			if (w->engine != rq->engine)
+				continue;
 
-		GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
-		if (rq_prio(w) < rq_prio(rq))
-			continue;
+			/* No waiter should start before its signaler */
+			GEM_BUG_ON(i915_request_started(w) &&
+				   !i915_request_completed(rq));
 
-		if (list_empty(&w->sched.link))
-			continue; /* Not yet submitted; unready */
+			GEM_BUG_ON(i915_request_is_active(w));
+			if (list_empty(&w->sched.link))
+				continue; /* Not yet submitted; unready */
 
-		/*
-		 * This should be very shallow as it is limited by the
-		 * number of requests that can fit in a ring (<64) and
-		 * the number of contexts that can be in flight on this
-		 * engine.
-		 */
-		defer_request(w, pl);
-	}
+			if (rq_prio(w) < rq_prio(rq))
+				continue;
+
+			GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+			list_move_tail(&w->sched.link, &list);
+		}
+
+		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+	} while (rq);
 }
 
 static void defer_active(struct intel_engine_cs *engine)
-- 
2.28.0