Blob Blame History Raw
From 66fc25ca6b7ec4124606e0d59c71c6bcf14e05bb Mon Sep 17 00:00:00 2001
From: Pavel Begunkov <asml.silence@gmail.com>
Date: Thu, 17 Mar 2022 02:03:40 +0000
Subject: [PATCH] io_uring: shuffle io_eventfd_signal() bits around
Git-commit: 66fc25ca6b7ec4124606e0d59c71c6bcf14e05bb
Patch-mainline: v5.18-rc1
References: bsc#1205205

A preparation patch, which moves a fast ->io_ev_fd check out of
io_eventfd_signal() into ev_posted*(). Compilers are smart enough for it
to not change anything, but will need it later.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/ec4091ac76d43912b73917e8db651c2dac4b7b01.1647481208.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
---
 fs/io_uring.c | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 692dbe7b98e9..31c625f61fd8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1828,10 +1828,6 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
 {
 	struct io_ev_fd *ev_fd;
 
-	/* Return quickly if ctx->io_ev_fd doesn't exist */
-	if (likely(!rcu_dereference_raw(ctx->io_ev_fd)))
-		return;
-
 	rcu_read_lock();
 	/*
 	 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
@@ -1851,7 +1847,6 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
 
 	if (!ev_fd->eventfd_async || io_wq_current_is_worker())
 		eventfd_signal(ev_fd->cq_ev_fd, 1);
-
 out:
 	rcu_read_unlock();
 }
@@ -1863,7 +1858,7 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
  * 1:1 relationship between how many times this function is called (and
  * hence the eventfd count) and number of CQEs posted to the CQ ring.
  */
-static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
 {
 	/*
 	 * wake_up_all() may seem excessive, but io_wake_function() and
@@ -1872,7 +1867,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
 	 */
 	if (wq_has_sleeper(&ctx->cq_wait))
 		wake_up_all(&ctx->cq_wait);
-	io_eventfd_signal(ctx);
+	if (unlikely(rcu_dereference_raw(ctx->io_ev_fd)))
+		io_eventfd_signal(ctx);
 }
 
 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@@ -1881,7 +1877,8 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
 		if (wq_has_sleeper(&ctx->cq_wait))
 			wake_up_all(&ctx->cq_wait);
 	}
-	io_eventfd_signal(ctx);
+	if (unlikely(rcu_dereference_raw(ctx->io_ev_fd)))
+		io_eventfd_signal(ctx);
 }
 
 /* Returns true if there are no backlogged entries after the flush */
-- 
2.35.3