Blob Blame History Raw
From 6948ce333d043072ef4a3f63422989dd25fee4aa Mon Sep 17 00:00:00 2001
From: Simon Ser <contact@emersion.fr>
Date: Fri, 14 Jul 2023 11:13:03 +0000
Subject: drm/syncobj: add IOCTL to register an eventfd
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Git-commit: c7a472297169156252a50d76965eb36b081186e2
Patch-mainline: v6.6-rc1
References: jsc#PED-3527 jsc#PED-5475 jsc#PED-6068 jsc#PED-6070 jsc#PED-6116 jsc#PED-6120 jsc#PED-5065 jsc#PED-5477 jsc#PED-5511 jsc#PED-6041 jsc#PED-6069 jsc#PED-6071

Introduce a new DRM_IOCTL_SYNCOBJ_EVENTFD IOCTL which signals an
eventfd from a syncobj.

This is useful for Wayland compositors to handle wait-before-submit.
Wayland clients can send a timeline point to the compositor
before the point has materialized yet, then compositors can wait
for the point to materialize via this new IOCTL.

The existing DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT IOCTL is not suitable
because it blocks. Compositors want to integrate the wait with
their poll(2)-based event loop.

Requirements for new uAPI:

- User-space patch: https://gitlab.freedesktop.org/wlroots/wlroots/-/merge_requests/4262
- IGT: https://lists.freedesktop.org/archives/igt-dev/2023-July/057893.html

v2:
- Wait for fence when flags is zero
- Improve documentation (Pekka)
- Rename IOCTL (Christian)
- Fix typo in drm_syncobj_add_eventfd() (Christian)

v3:
- Link user-space + IGT patches
- Add reference from overview docs

v4: fix IOCTL number conflict with GETFB2 (Nicholas Choi, Vitaly Prosyak)

Signed-off-by: Simon Ser <contact@emersion.fr>
Reviewed-by: Christian König <christian.koenig@amd.com>
Acked-by: Pekka Paalanen <pekka.paalanen@collabora.com>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Cc: Daniel Stone <daniel@fooishbar.org>
Cc: James Jones <jajones@nvidia.com>
Cc: Austin Shafer <ashafer@nvidia.com>
Cc: Vitaly Prosyak <vprosyak@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230714111257.11940-1-contact@emersion.fr
Acked-by: Patrik Jakobsson <pjakobsson@suse.de>
---
 drivers/gpu/drm/drm_internal.h |   2 +
 drivers/gpu/drm/drm_ioctl.c    |   2 +
 drivers/gpu/drm/drm_syncobj.c  | 148 +++++++++++++++++++++++++++++++--
 include/drm/drm_syncobj.h      |   6 +-
 include/uapi/drm/drm.h         |  23 +++++
 5 files changed, 174 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index d7e023bbb0d5..ba12acd55139 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -245,6 +245,8 @@ int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file_private);
 int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
 				    struct drm_file *file_private);
+int drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_private);
 int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_private);
 int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 8e9afe7af19c..f03ffbacfe9b 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -701,6 +701,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 		      DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
 		      DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_EVENTFD, drm_syncobj_eventfd_ioctl,
+		      DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
 		      DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index e592c5da70ce..f7003d1ec5ef 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -136,6 +136,10 @@
  * requirement is inherited from the wait-before-signal behavior required by
  * the Vulkan timeline semaphore API.
  *
+ * Alternatively, &DRM_IOCTL_SYNCOBJ_EVENTFD can be used to wait without
+ * blocking: an eventfd will be signaled when the syncobj is. This is useful to
+ * integrate the wait in an event loop.
+ *
  *
  * Import/export of syncobjs
  * -------------------------
@@ -185,6 +189,7 @@
 
 #include <linux/anon_inodes.h>
 #include <linux/dma-fence-unwrap.h>
+#include <linux/eventfd.h>
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/sched/signal.h>
@@ -212,6 +217,20 @@ struct syncobj_wait_entry {
 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
 				      struct syncobj_wait_entry *wait);
 
+struct syncobj_eventfd_entry {
+	struct list_head node;
+	struct dma_fence *fence;
+	struct dma_fence_cb fence_cb;
+	struct drm_syncobj *syncobj;
+	struct eventfd_ctx *ev_fd_ctx;
+	u64 point;
+	u32 flags;
+};
+
+static void
+syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+			   struct syncobj_eventfd_entry *entry);
+
 /**
  * drm_syncobj_find - lookup and reference a sync object.
  * @file_private: drm file private pointer
@@ -274,6 +293,28 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
 	spin_unlock(&syncobj->lock);
 }
 
+static void
+syncobj_eventfd_entry_free(struct syncobj_eventfd_entry *entry)
+{
+	eventfd_ctx_put(entry->ev_fd_ctx);
+	dma_fence_put(entry->fence);
+	/* This happens either inside the syncobj lock, or after the node has
+	 * already been removed from the list.
+	 */
+	list_del(&entry->node);
+	kfree(entry);
+}
+
+static void
+drm_syncobj_add_eventfd(struct drm_syncobj *syncobj,
+			struct syncobj_eventfd_entry *entry)
+{
+	spin_lock(&syncobj->lock);
+	list_add_tail(&entry->node, &syncobj->ev_fd_list);
+	syncobj_eventfd_entry_func(syncobj, entry);
+	spin_unlock(&syncobj->lock);
+}
+
 /**
  * drm_syncobj_add_point - add new timeline point to the syncobj
  * @syncobj: sync object to add timeline point do
@@ -288,7 +329,8 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
 			   struct dma_fence *fence,
 			   uint64_t point)
 {
-	struct syncobj_wait_entry *cur, *tmp;
+	struct syncobj_wait_entry *wait_cur, *wait_tmp;
+	struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
 	struct dma_fence *prev;
 
 	dma_fence_get(fence);
@@ -302,8 +344,10 @@ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
 	dma_fence_chain_init(chain, prev, fence, point);
 	rcu_assign_pointer(syncobj->fence, &chain->base);
 
-	list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
-		syncobj_wait_syncobj_func(syncobj, cur);
+	list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
+		syncobj_wait_syncobj_func(syncobj, wait_cur);
+	list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+		syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
 	spin_unlock(&syncobj->lock);
 
 	/* Walk the chain once to trigger garbage collection */
@@ -323,7 +367,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
 			       struct dma_fence *fence)
 {
 	struct dma_fence *old_fence;
-	struct syncobj_wait_entry *cur, *tmp;
+	struct syncobj_wait_entry *wait_cur, *wait_tmp;
+	struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
 
 	if (fence)
 		dma_fence_get(fence);
@@ -335,8 +380,10 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
 	rcu_assign_pointer(syncobj->fence, fence);
 
 	if (fence != old_fence) {
-		list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
-			syncobj_wait_syncobj_func(syncobj, cur);
+		list_for_each_entry_safe(wait_cur, wait_tmp, &syncobj->cb_list, node)
+			syncobj_wait_syncobj_func(syncobj, wait_cur);
+		list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+			syncobj_eventfd_entry_func(syncobj, ev_fd_cur);
 	}
 
 	spin_unlock(&syncobj->lock);
@@ -472,7 +519,13 @@ void drm_syncobj_free(struct kref *kref)
 	struct drm_syncobj *syncobj = container_of(kref,
 						   struct drm_syncobj,
 						   refcount);
+	struct syncobj_eventfd_entry *ev_fd_cur, *ev_fd_tmp;
+
 	drm_syncobj_replace_fence(syncobj, NULL);
+
+	list_for_each_entry_safe(ev_fd_cur, ev_fd_tmp, &syncobj->ev_fd_list, node)
+		syncobj_eventfd_entry_free(ev_fd_cur);
+
 	kfree(syncobj);
 }
 EXPORT_SYMBOL(drm_syncobj_free);
@@ -501,6 +554,7 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
 
 	kref_init(&syncobj->refcount);
 	INIT_LIST_HEAD(&syncobj->cb_list);
+	INIT_LIST_HEAD(&syncobj->ev_fd_list);
 	spin_lock_init(&syncobj->lock);
 
 	if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
@@ -1304,6 +1358,88 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
 	return ret;
 }
 
+static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
+					     struct dma_fence_cb *cb)
+{
+	struct syncobj_eventfd_entry *entry =
+		container_of(cb, struct syncobj_eventfd_entry, fence_cb);
+
+	eventfd_signal(entry->ev_fd_ctx, 1);
+	syncobj_eventfd_entry_free(entry);
+}
+
+static void
+syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
+			   struct syncobj_eventfd_entry *entry)
+{
+	int ret;
+	struct dma_fence *fence;
+
+	/* This happens inside the syncobj lock */
+	fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+	ret = dma_fence_chain_find_seqno(&fence, entry->point);
+	if (ret != 0 || !fence) {
+		dma_fence_put(fence);
+		return;
+	}
+
+	list_del_init(&entry->node);
+	entry->fence = fence;
+
+	if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
+		eventfd_signal(entry->ev_fd_ctx, 1);
+		syncobj_eventfd_entry_free(entry);
+	} else {
+		ret = dma_fence_add_callback(fence, &entry->fence_cb,
+					     syncobj_eventfd_entry_fence_func);
+		if (ret == -ENOENT) {
+			eventfd_signal(entry->ev_fd_ctx, 1);
+			syncobj_eventfd_entry_free(entry);
+		}
+	}
+}
+
+int
+drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_private)
+{
+	struct drm_syncobj_eventfd *args = data;
+	struct drm_syncobj *syncobj;
+	struct eventfd_ctx *ev_fd_ctx;
+	struct syncobj_eventfd_entry *entry;
+
+	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+		return -EOPNOTSUPP;
+
+	if (args->flags & ~DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)
+		return -EINVAL;
+
+	if (args->pad)
+		return -EINVAL;
+
+	syncobj = drm_syncobj_find(file_private, args->handle);
+	if (!syncobj)
+		return -ENOENT;
+
+	ev_fd_ctx = eventfd_ctx_fdget(args->fd);
+	if (IS_ERR(ev_fd_ctx))
+		return PTR_ERR(ev_fd_ctx);
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		eventfd_ctx_put(ev_fd_ctx);
+		return -ENOMEM;
+	}
+	entry->syncobj = syncobj;
+	entry->ev_fd_ctx = ev_fd_ctx;
+	entry->point = args->point;
+	entry->flags = args->flags;
+
+	drm_syncobj_add_eventfd(syncobj, entry);
+	drm_syncobj_put(syncobj);
+
+	return 0;
+}
 
 int
 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 6cf7243a1dc5..b40052132e52 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -54,7 +54,11 @@ struct drm_syncobj {
 	 */
 	struct list_head cb_list;
 	/**
-	 * @lock: Protects &cb_list and write-locks &fence.
+	 * @ev_fd_list: List of registered eventfd.
+	 */
+	struct list_head ev_fd_list;
+	/**
+	 * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence.
 	 */
 	spinlock_t lock;
 	/**
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index a87bbbbca2d4..863e47200911 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -909,6 +909,27 @@ struct drm_syncobj_timeline_wait {
 	__u32 pad;
 };
 
+/**
+ * struct drm_syncobj_eventfd
+ * @handle: syncobj handle.
+ * @flags: Zero to wait for the point to be signalled, or
+ *         &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
+ *         available for the point.
+ * @point: syncobj timeline point (set to zero for binary syncobjs).
+ * @fd: Existing eventfd to sent events to.
+ * @pad: Must be zero.
+ *
+ * Register an eventfd to be signalled by a syncobj. The eventfd counter will
+ * be incremented by one.
+ */
+struct drm_syncobj_eventfd {
+	__u32 handle;
+	__u32 flags;
+	__u64 point;
+	__s32 fd;
+	__u32 pad;
+};
+
 
 struct drm_syncobj_array {
 	__u64 handles;
@@ -1169,6 +1190,8 @@ extern "C" {
  */
 #define DRM_IOCTL_MODE_GETFB2		DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
 
+#define DRM_IOCTL_SYNCOBJ_EVENTFD	DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+
 /*
  * Device specific ioctls should only be in their respective headers
  * The device specific ioctl range is from 0x40 to 0x9f.
-- 
2.43.0