Blob Blame History Raw
From 9e9e8c54554066d4ba4bf1eeaba9a3b98c480cc8 Mon Sep 17 00:00:00 2001
From: Gabriel Krisman Bertazi <krisman@suse.de>
Date: Mon, 1 May 2023 23:19:53 -0400
Subject: [PATCH] timens: Forbid changing time namespace for an io_uring
 process
Patch-mainline: Never, specific to 15SP3
References: bsc#1208474 CVE-2023-23586

Even if single-threaded from a userspace point of view, io-uring applications
spawn kernelspace workers that partially share mm_struct with the original task.

Allowing the time namespace to be changed for such tasks open doors for
race-conditions that can leak kernel memory.  Newer kernels already fix this by
completely reworking the way io-workers work that avoid the shared mm issue, but
older kernels are still vulnerable.  This patch avoids the issue by preventing
time namespaces from being set when a task has created io-workers.

Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
---
 fs/io_uring.c              | 13 +++++++++++++
 include/linux/sched/task.h |  9 +++++++++
 lib/is_single_threaded.c   |  3 +++
 3 files changed, 25 insertions(+)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 66d3a2420aaf..6e0cd421aaee 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2440,6 +2440,19 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 	return submitted;
 }
 
+static int __io_uring_fd(const void *arg, struct file *file,
+			 unsigned int fd)
+{
+	return file->f_op == &io_uring_fops;
+}
+
+bool current_has_io_workers(void)
+{
+	if (iterate_fd(current->files, 0, __io_uring_fd, NULL))
+		return true;
+	return false;
+}
+
 static int io_sq_thread(void *data)
 {
 	struct io_ring_ctx *ctx = data;
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index f1879884238e..4569dd5c2054 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -176,4 +176,13 @@ static inline void task_unlock(struct task_struct *p)
 	spin_unlock(&p->alloc_lock);
 }
 
+#if defined(CONFIG_IO_URING)
+extern bool current_has_io_workers(void);
+#else
+static inline bool current_has_io_workers(void)
+{
+	return false;
+}
+#endif
+
 #endif /* _LINUX_SCHED_TASK_H */
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
index 8c98b20bfc41..9717ff883497 100644
--- a/lib/is_single_threaded.c
+++ b/lib/is_single_threaded.c
@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
 	if (atomic_read(&task->signal->live) != 1)
 		return false;
 
+	if (current_has_io_workers())
+		return false;
+
 	if (atomic_read(&mm->mm_users) == 1)
 		return true;
 
-- 
2.40.1