Blob Blame History Raw
From cac68d12c531aa3010509a5a55a5dfd18dedaa80 Mon Sep 17 00:00:00 2001
From: Jens Axboe <axboe@kernel.dk>
Date: Fri, 28 Feb 2020 15:20:18 -0700
Subject: io_uring: grab ->fs as part of async offload
References: bsc#1179434 CVE-2020-29373
Patch-mainline: v5.6-rc2
Git-commit: ff002b30181d30cdfbca316dadd099c3ca0d739c

[ Upstream commits 9392a27d88b9 and ff002b30181d ]

Ensure that the async work grabs ->fs from the queueing task if the
punted commands needs to do lookups.

We don't have these two commits in 5.4-stable:

ff002b30181d30cdfbca316dadd099c3ca0d739c
9392a27d88b9707145d713654eb26f0c29789e50

because they don't apply with the rework that was done in how io_uring
handles offload. Since there's no io-wq in 5.4, it doesn't make sense to
do two patches. I'm attaching my port of the two for 5.4-stable, it's
been tested. Please queue it up for the next 5.4-stable, thanks!

Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Acked-by: Jan Kara <jack@suse.cz>
---
 fs/io_uring.c |   48 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 48 insertions(+)

--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -70,6 +70,9 @@
 #include <linux/nospec.h>
 #include <linux/sizes.h>
 #include <linux/hugetlb.h>
+#ifndef __GENKSYMS__
+#include <linux/fs_struct.h>
+#endif
 
 #include <uapi/linux/io_uring.h>
 
@@ -346,6 +349,8 @@ struct io_kiocb {
 	u32			result;
 	u32			sequence;
 
+	struct fs_struct	*fs;
+
 	struct work_struct	work;
 };
 
@@ -599,6 +604,7 @@ static struct io_kiocb *io_get_req(struc
 	/* one is dropped after submission, the other at completion */
 	refcount_set(&req->refs, 2);
 	req->result = 0;
+	req->fs = NULL;
 	return req;
 out:
 	io_ring_drop_ctx_refs(ctx, 1);
@@ -1539,6 +1545,16 @@ static int io_send_recvmsg(struct io_kio
 			ret = -EINTR;
 	}
 
+	if (req->fs) {
+		struct fs_struct *fs = req->fs;
+
+		spin_lock(&req->fs->lock);
+		if (--fs->users)
+			fs = NULL;
+		spin_unlock(&req->fs->lock);
+		if (fs)
+			free_fs_struct(fs);
+	}
 	io_cqring_add_event(req->ctx, sqe->user_data, ret);
 	io_put_req(req);
 	return 0;
@@ -1912,6 +1928,7 @@ static inline bool io_sqe_needs_user(con
 static void io_sq_wq_submit_work(struct work_struct *work)
 {
 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+	struct fs_struct *old_fs_struct = current->fs;
 	struct io_ring_ctx *ctx = req->ctx;
 	struct mm_struct *cur_mm = NULL;
 	struct async_list *async_list;
@@ -1931,6 +1948,15 @@ restart:
 		/* Ensure we clear previously set non-block flag */
 		req->rw.ki_flags &= ~IOCB_NOWAIT;
 
+		if (req->fs != current->fs && current->fs != old_fs_struct) {
+			task_lock(current);
+			if (req->fs)
+				current->fs = req->fs;
+			else
+				current->fs = old_fs_struct;
+			task_unlock(current);
+		}
+
 		ret = 0;
 		if (io_sqe_needs_user(sqe) && !cur_mm) {
 			if (!mmget_not_zero(ctx->sqo_mm)) {
@@ -2029,6 +2055,11 @@ out:
 		mmput(cur_mm);
 	}
 	revert_creds(old_cred);
+	if (old_fs_struct) {
+		task_lock(current);
+		current->fs = old_fs_struct;
+		task_unlock(current);
+	}
 }
 
 /*
@@ -2203,6 +2234,23 @@ err:
 
 	req->user_data = s->sqe->user_data;
 
+#if defined(CONFIG_NET)
+	switch (READ_ONCE(s->sqe->opcode)) {
+	case IORING_OP_SENDMSG:
+	case IORING_OP_RECVMSG:
+		spin_lock(&current->fs->lock);
+		if (!current->fs->in_exec) {
+			req->fs = current->fs;
+			req->fs->users++;
+		}
+		spin_unlock(&current->fs->lock);
+		if (!req->fs) {
+			ret = -EAGAIN;
+			goto err_req;
+		}
+	}
+#endif
+
 	/*
 	 * If we already have a head request, queue this one for async
 	 * submittal once the head completes. If we don't have a head but