Blob Blame History Raw
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 15 May 2018 15:31:49 +0100
Subject: drm/i915: Shrink search list for active timelines
Git-commit: f75f91574617a3c6fbc821c6b156f5777a59d0ed
Patch-mainline: v4.19-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

When switching to the kernel context, we force the switch to occur after
all currently active requests (so that we know the GPU won't switch
immediately away and the kernel context remains current as we work). To
do so we have to inspect all the timelines and add a fence from the
active work to queue our switch afterwards. We can use the tracked set
of active rings to shrink our search for active timelines.

v2: Use a local to shrink the list_for_each_entry()

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180515143149.4795-1-chris@chris-wilson.co.uk

Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/i915/i915_gem_context.c |   25 ++++++++++++++-----------
 1 file changed, 14 insertions(+), 11 deletions(-)

--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -596,41 +596,44 @@ last_request_on_engine(struct i915_timel
 
 static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
 {
-	struct i915_timeline *timeline;
+	struct list_head * const active_rings = &engine->i915->gt.active_rings;
+	struct intel_ring *ring;
 
-	list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
-		if (last_request_on_engine(timeline, engine))
+	lockdep_assert_held(&engine->i915->drm.struct_mutex);
+
+	list_for_each_entry(ring, active_rings, active_link) {
+		if (last_request_on_engine(ring->timeline, engine))
 			return false;
 	}
 
 	return intel_engine_has_kernel_context(engine);
 }
 
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
-	struct i915_timeline *timeline;
 	enum intel_engine_id id;
 
-	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+	lockdep_assert_held(&i915->drm.struct_mutex);
 
-	i915_retire_requests(dev_priv);
+	i915_retire_requests(i915);
 
-	for_each_engine(engine, dev_priv, id) {
+	for_each_engine(engine, i915, id) {
+		struct intel_ring *ring;
 		struct i915_request *rq;
 
 		if (engine_has_idle_kernel_context(engine))
 			continue;
 
-		rq = i915_request_alloc(engine, dev_priv->kernel_context);
+		rq = i915_request_alloc(engine, i915->kernel_context);
 		if (IS_ERR(rq))
 			return PTR_ERR(rq);
 
 		/* Queue this switch after all other activity */
-		list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+		list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
 			struct i915_request *prev;
 
-			prev = last_request_on_engine(timeline, engine);
+			prev = last_request_on_engine(ring->timeline, engine);
 			if (prev)
 				i915_sw_fence_await_sw_fence_gfp(&rq->submit,
 								 &prev->submit,