Blob Blame History Raw
From 868771f9da96c59536f9946fde2c9f0694ca3f37 Mon Sep 17 00:00:00 2001
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Date: Wed, 22 Dec 2021 16:56:22 +0100
Subject: drm/i915: Use trylock instead of blocking lock for
 __i915_gem_free_objects.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: 6cb12fbda1c2e2fcb6d3adfe01f18eef6812e278
Patch-mainline: v5.17-rc1
References: jsc#PED-1166 jsc#PED-1168 jsc#PED-1170 jsc#PED-1218 jsc#PED-1220 jsc#PED-1222 jsc#PED-1223 jsc#PED-1225

Convert free_work into delayed_work, similar to ttm to allow converting the
blocking lock in __i915_gem_free_objects to a trylock.

Unlike ttm, the object should already be idle, as it's kept alive
by a reference through struct i915_vma->active, which is dropped
after all vma's are idle.

Because of this, we can use a no wait by default, or when the lock
is contested, we use ttm's 10 ms.

The trylock should only fail when the object is sharing it's resv with
other objects, and typically objects are not kept locked for a long
time, so we can safely retry on failure.

Fixes: be7612fd6665 ("drm/i915: Require object lock when freeing pages during destruction")
Testcase: igt/gem_exec_alignment/pi*
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211222155622.2960379-1-maarten.lankhorst@linux.intel.com
Acked-by: Patrik Jakobsson <pjakobsson@suse.de>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.c | 14 ++++++++++----
 drivers/gpu/drm/i915/i915_drv.h            |  4 ++--
 2 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 39cd563544a5..d87b508b59b1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -331,7 +331,13 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 			continue;
 		}
 
-		i915_gem_object_lock(obj, NULL);
+		if (!i915_gem_object_trylock(obj, NULL)) {
+			/* busy, toss it back to the pile */
+			if (llist_add(&obj->freed, &i915->mm.free_list))
+				queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
+			continue;
+		}
+
 		__i915_gem_object_pages_fini(obj);
 		i915_gem_object_unlock(obj);
 		__i915_gem_free_object(obj);
@@ -353,7 +359,7 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915)
 static void __i915_gem_free_work(struct work_struct *work)
 {
 	struct drm_i915_private *i915 =
-		container_of(work, struct drm_i915_private, mm.free_work);
+		container_of(work, struct drm_i915_private, mm.free_work.work);
 
 	i915_gem_flush_free_objects(i915);
 }
@@ -385,7 +391,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	 */
 
 	if (llist_add(&obj->freed, &i915->mm.free_list))
-		queue_work(i915->wq, &i915->mm.free_work);
+		queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
 }
 
 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
@@ -710,7 +716,7 @@ bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
 
 void i915_gem_init__objects(struct drm_i915_private *i915)
 {
-	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+	INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
 }
 
 void i915_objects_module_exit(void)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 74d03bd58b76..0c70ab08fc0c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -473,7 +473,7 @@ struct i915_gem_mm {
 	 * List of objects which are pending destruction.
 	 */
 	struct llist_head free_list;
-	struct work_struct free_work;
+	struct delayed_work free_work;
 	/**
 	 * Count of objects pending destructions. Used to skip needlessly
 	 * waiting on an RCU barrier if no objects are waiting to be freed.
@@ -1636,7 +1636,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
 	 * armed the work again.
 	 */
 	while (atomic_read(&i915->mm.free_count)) {
-		flush_work(&i915->mm.free_work);
+		flush_delayed_work(&i915->mm.free_work);
 		flush_delayed_work(&i915->bdev.wq);
 		rcu_barrier();
 	}
-- 
2.38.1