Blob Blame History Raw
From cd2a4eaf8c79daa41bdeb7251dbf66413291fd70 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Tue, 30 Jul 2019 21:58:05 +0100
Subject: drm/i915: Report resv_obj allocation failure
Git-commit: cd2a4eaf8c79daa41bdeb7251dbf66413291fd70
Patch-mainline: v5.4-rc1
References: bsc#1152489

Since commit 64d6c500a384 ("drm/i915: Generalise GPU activity
tracking"), we have been prepared for i915_vma_move_to_active() to fail.
We can take advantage of this to report the failure for allocating the
shared-fence slot in the reservation_object.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190730205805.3733-1-chris@chris-wilson.co.uk
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
---
 drivers/gpu/drm/i915/i915_vma.c |   31 +++++++++----------------------
 1 file changed, 9 insertions(+), 22 deletions(-)

--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -886,23 +886,6 @@ void i915_vma_revoke_mmap(struct i915_vm
 		list_del(&vma->obj->userfault_link);
 }
 
-static void export_fence(struct i915_vma *vma,
-			 struct i915_request *rq,
-			 unsigned int flags)
-{
-	struct dma_resv *resv = vma->resv;
-
-	/*
-	 * Ignore errors from failing to allocate the new fence, we can't
-	 * handle an error right now. Worst case should be missed
-	 * synchronisation leading to rendering corruption.
-	 */
-	if (flags & EXEC_OBJECT_WRITE)
-		dma_resv_add_excl_fence(resv, &rq->fence);
-	else if (dma_resv_reserve_shared(resv, 1) == 0)
-		dma_resv_add_shared_fence(resv, &rq->fence);
-}
-
 int i915_vma_move_to_active(struct i915_vma *vma,
 			    struct i915_request *rq,
 			    unsigned int flags)
@@ -926,14 +909,20 @@ int i915_vma_move_to_active(struct i915_
 	if (unlikely(err))
 		return err;
 
-	obj->write_domain = 0;
 	if (flags & EXEC_OBJECT_WRITE) {
-		obj->write_domain = I915_GEM_DOMAIN_RENDER;
-
 		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
 			__i915_active_request_set(&obj->frontbuffer_write, rq);
 
+		dma_resv_add_excl_fence(vma->resv, &rq->fence);
+		obj->write_domain = I915_GEM_DOMAIN_RENDER;
 		obj->read_domains = 0;
+	} else {
+		err = dma_resv_reserve_shared(vma->resv, 1);
+		if (unlikely(err))
+			return err;
+
+		dma_resv_add_shared_fence(vma->resv, &rq->fence);
+		obj->write_domain = 0;
 	}
 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
 	obj->mm.dirty = true;
@@ -941,8 +930,6 @@ int i915_vma_move_to_active(struct i915_
 	if (flags & EXEC_OBJECT_NEEDS_FENCE)
 		__i915_active_request_set(&vma->last_fence, rq);
 
-	export_fence(vma, rq, flags);
-
 	GEM_BUG_ON(!i915_vma_is_active(vma));
 	return 0;
 }