Blob Blame History Raw
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Fri, 5 Jan 2018 11:16:22 +0100
Subject: drm/amdgpu: add amdgpu_pasid_free_delayed v2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: 4b5f75504975b42377fa198b0672c9a52eb728e0
Patch-mainline: v4.17-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

Free up a pasid after all fences signaled.

v2: also handle the case when we can't allocate a fence array.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c |   82 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h |    2 
 2 files changed, 84 insertions(+)

--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -40,6 +40,12 @@
  */
 static DEFINE_IDA(amdgpu_pasid_ida);
 
+/* Helper to free pasid from a fence callback */
+struct amdgpu_pasid_cb {
+	struct dma_fence_cb cb;
+	unsigned int pasid;
+};
+
 /**
  * amdgpu_pasid_alloc - Allocate a PASID
  * @bits: Maximum width of the PASID in bits, must be at least 1
@@ -75,6 +81,82 @@ void amdgpu_pasid_free(unsigned int pasi
 	ida_simple_remove(&amdgpu_pasid_ida, pasid);
 }
 
+static void amdgpu_pasid_free_cb(struct dma_fence *fence,
+				 struct dma_fence_cb *_cb)
+{
+	struct amdgpu_pasid_cb *cb =
+		container_of(_cb, struct amdgpu_pasid_cb, cb);
+
+	amdgpu_pasid_free(cb->pasid);
+	dma_fence_put(fence);
+	kfree(cb);
+}
+
+/**
+ * amdgpu_pasid_free_delayed - free pasid when fences signal
+ *
+ * @resv: reservation object with the fences to wait for
+ * @pasid: pasid to free
+ *
+ * Free the pasid only after all the fences in resv are signaled.
+ */
+void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+			       unsigned int pasid)
+{
+	struct dma_fence *fence, **fences;
+	struct amdgpu_pasid_cb *cb;
+	unsigned count;
+	int r;
+
+	r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
+	if (r)
+		goto fallback;
+
+	if (count == 0) {
+		amdgpu_pasid_free(pasid);
+		return;
+	}
+
+	if (count == 1) {
+		fence = fences[0];
+		kfree(fences);
+	} else {
+		uint64_t context = dma_fence_context_alloc(1);
+		struct dma_fence_array *array;
+
+		array = dma_fence_array_create(count, fences, context,
+					       1, false);
+		if (!array) {
+			kfree(fences);
+			goto fallback;
+		}
+		fence = &array->base;
+	}
+
+	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
+	if (!cb) {
+		/* Last resort when we are OOM */
+		dma_fence_wait(fence, false);
+		dma_fence_put(fence);
+		amdgpu_pasid_free(pasid);
+	} else {
+		cb->pasid = pasid;
+		if (dma_fence_add_callback(fence, &cb->cb,
+					   amdgpu_pasid_free_cb))
+			amdgpu_pasid_free_cb(fence, &cb->cb);
+	}
+
+	return;
+
+fallback:
+	/* Not enough memory for the delayed delete, as last resort
+	 * block for all the fences to complete.
+	 */
+	reservation_object_wait_timeout_rcu(resv, true, false,
+					    MAX_SCHEDULE_TIMEOUT);
+	amdgpu_pasid_free(pasid);
+}
+
 /*
  * VMID manager
  *
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -69,6 +69,8 @@ struct amdgpu_vmid_mgr {
 
 int amdgpu_pasid_alloc(unsigned int bits);
 void amdgpu_pasid_free(unsigned int pasid);
+void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+			       unsigned int pasid);
 
 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
 			       struct amdgpu_vmid *id);