Blob Blame History Raw
From: Lucas Stach <l.stach@pengutronix.de>
Date: Wed, 29 Nov 2017 14:49:04 +0100
Subject: drm/etnaviv: track fences by IDR instead of seqno
Git-commit: 8bc4d885bd42e9a1d47a53aa4efbb818597ef9a0
Patch-mainline: v4.17-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

This moves away from using the internal seqno as the userspace fence
reference. By moving to a generic ID, we can later replace the internal
fence by something different than the etnaviv seqno fence.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/etnaviv/etnaviv_gem.h        |    1 
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c |    2 
 drivers/gpu/drm/etnaviv/etnaviv_gpu.c        |   56 ++++++++++++++++++---------
 drivers/gpu/drm/etnaviv/etnaviv_gpu.h        |    1 
 4 files changed, 42 insertions(+), 18 deletions(-)

--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -104,6 +104,7 @@ struct etnaviv_gem_submit {
 	struct kref refcount;
 	struct etnaviv_gpu *gpu;
 	struct dma_fence *out_fence, *in_fence;
+	int out_fence_id;
 	struct list_head node; /* GPU active submit list */
 	struct etnaviv_cmdbuf cmdbuf;
 	bool runtime_resumed;
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -563,7 +563,7 @@ int etnaviv_ioctl_gem_submit(struct drm_
 	}
 
 	args->fence_fd = out_fence_fd;
-	args->fence = submit->out_fence->seqno;
+	args->fence = submit->out_fence_id;
 
 err_submit_objects:
 	etnaviv_submit_put(submit);
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1016,6 +1016,7 @@ static void hangcheck_disable(struct etn
 /* fence object management */
 struct etnaviv_fence {
 	struct etnaviv_gpu *gpu;
+	int id;
 	struct dma_fence base;
 };
 
@@ -1052,6 +1053,11 @@ static void etnaviv_fence_release(struct
 {
 	struct etnaviv_fence *f = to_etnaviv_fence(fence);
 
+	/* first remove from IDR, so fence can not be looked up anymore */
+	mutex_lock(&f->gpu->lock);
+	idr_remove(&f->gpu->fence_idr, f->id);
+	mutex_unlock(&f->gpu->lock);
+
 	kfree_rcu(f, base.rcu);
 }
 
@@ -1078,6 +1084,11 @@ static struct dma_fence *etnaviv_gpu_fen
 	if (!f)
 		return NULL;
 
+	f->id = idr_alloc_cyclic(&gpu->fence_idr, &f->base, 0, INT_MAX, GFP_KERNEL);
+	if (f->id < 0) {
+		kfree(f);
+		return NULL;
+	}
 	f->gpu = gpu;
 
 	dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
@@ -1226,35 +1237,43 @@ static void retire_worker(struct work_st
 }
 
 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
-	u32 fence, struct timespec *timeout)
+	u32 id, struct timespec *timeout)
 {
+	struct dma_fence *fence;
 	int ret;
 
-	if (fence_after(fence, gpu->next_fence)) {
-		DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
-				fence, gpu->next_fence);
-		return -EINVAL;
-	}
+	/*
+	 * Look up the fence and take a reference. The mutex only synchronizes
+	 * the IDR lookup with the fence release. We might still find a fence
+	 * whose refcount has already dropped to zero. dma_fence_get_rcu
+	 * pretends we didn't find a fence in that case.
+	 */
+	ret = mutex_lock_interruptible(&gpu->lock);
+	if (ret)
+		return ret;
+	fence = idr_find(&gpu->fence_idr, id);
+	if (fence)
+		fence = dma_fence_get_rcu(fence);
+	mutex_unlock(&gpu->lock);
+
+	if (!fence)
+		return 0;
 
 	if (!timeout) {
 		/* No timeout was requested: just test for completion */
-		ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
+		ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
 	} else {
 		unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
 
-		ret = wait_event_interruptible_timeout(gpu->fence_event,
-						fence_completed(gpu, fence),
-						remaining);
-		if (ret == 0) {
-			DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
-				fence, gpu->retired_fence,
-				gpu->completed_fence);
+		ret = dma_fence_wait_timeout(fence, true, remaining);
+		if (ret == 0)
 			ret = -ETIMEDOUT;
-		} else if (ret != -ERESTARTSYS) {
+		else if (ret != -ERESTARTSYS)
 			ret = 0;
-		}
+
 	}
 
+	dma_fence_put(fence);
 	return ret;
 }
 
@@ -1386,6 +1405,7 @@ int etnaviv_gpu_submit(struct etnaviv_gp
 		ret = -ENOMEM;
 		goto out_unlock;
 	}
+	submit->out_fence_id = to_etnaviv_fence(submit->out_fence)->id;
 
 	gpu->active_fence = submit->out_fence->seqno;
 
@@ -1490,7 +1510,6 @@ static irqreturn_t irq_handler(int irq,
 				continue;
 
 			gpu->event[event].fence = NULL;
-			dma_fence_signal(fence);
 
 			/*
 			 * Events can be processed out of order.  Eg,
@@ -1503,6 +1522,7 @@ static irqreturn_t irq_handler(int irq,
 			 */
 			if (fence_after(fence->seqno, gpu->completed_fence))
 				gpu->completed_fence = fence->seqno;
+			dma_fence_signal(fence);
 
 			event_free(gpu, event);
 		}
@@ -1700,6 +1720,7 @@ static int etnaviv_gpu_bind(struct devic
 
 	gpu->drm = drm;
 	gpu->fence_context = dma_fence_context_alloc(1);
+	idr_init(&gpu->fence_idr);
 	spin_lock_init(&gpu->fence_spinlock);
 
 	INIT_LIST_HEAD(&gpu->active_submit_list);
@@ -1751,6 +1772,7 @@ static void etnaviv_gpu_unbind(struct de
 	}
 
 	gpu->drm = NULL;
+	idr_destroy(&gpu->fence_idr);
 
 	if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
 		thermal_cooling_device_unregister(gpu->cooling);
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -128,6 +128,7 @@ struct etnaviv_gpu {
 	u32 idle_mask;
 
 	/* Fencing support */
+	struct idr fence_idr;
 	u32 next_fence;
 	u32 active_fence;
 	u32 completed_fence;