Blob Blame History Raw
From e677edbcabee849bfdd43f1602bccbecf736a646 Mon Sep 17 00:00:00 2001
From: Jens Axboe <axboe@kernel.dk>
Date: Fri, 8 Apr 2022 11:08:58 -0600
Subject: [PATCH] io_uring: fix race between timeout flush and removal
Git-commit: e677edbcabee849bfdd43f1602bccbecf736a646
Patch-mainline: v5.18-rc2
References: bsc#1198811 CVE-2022-29582

io_flush_timeouts() assumes the timeout isn't in progress of triggering
or being removed/canceled, so it unconditionally removes it from the
timeout list and attempts to cancel it.

Leave it on the list and let the normal timeout cancelation take care
of it.

Cc: stable@vger.kernel.org # 5.5+
Signed-off-by: Jens Axboe <axboe@kernel.dk>
[ddiss: merge without 50c1df2b56e0 (io_uring: support
 CLOCK_BOOTTIME/REALTIME for timeouts)]
Acked-by: David Disseldorp <ddiss@suse.de>

---
 fs/io_uring.c |    7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1366,12 +1366,11 @@ static void io_flush_timeouts(struct io_
 	__must_hold(&ctx->completion_lock)
 {
 	u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+	struct io_kiocb *req, *tmp;
 
 	spin_lock_irq(&ctx->timeout_lock);
-	while (!list_empty(&ctx->timeout_list)) {
+	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
 		u32 events_needed, events_got;
-		struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
-						struct io_kiocb, timeout.list);
 
 		if (io_is_timeout_noseq(req))
 			break;
@@ -1388,7 +1387,6 @@ static void io_flush_timeouts(struct io_
 		if (events_got < events_needed)
 			break;
 
-		list_del_init(&req->timeout.list);
 		io_kill_timeout(req, 0);
 	}
 	ctx->cq_last_tm_flush = seq;
@@ -5691,6 +5689,7 @@ static int io_timeout_prep(struct io_kio
 	if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
 		return -EINVAL;
 
+	INIT_LIST_HEAD(&req->timeout.list);
 	data->mode = io_translate_timeout_mode(flags);
 	hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
 	if (is_timeout_link)