From d245bca6375bccfd589a6a7d5007df28575bb626 Mon Sep 17 00:00:00 2001
From: Pavel Begunkov <asml.silence@gmail.com>
Date: Fri, 17 Jun 2022 09:48:00 +0100
Subject: [PATCH] io_uring: don't expose io_fill_cqe_aux()
Git-commit: d245bca6375bccfd589a6a7d5007df28575bb626
Patch-mainline: v6.0-rc1
References: bsc#1211014 CVE-2023-2430
Deduplicate some code and add a helper for filling an aux CQE, locking
and notification.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/b7c6557c8f9dc5c4cfb01292116c682a0ff61081.1655455613.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
---
fs/io_uring.c | 77 +++++++++++++++++++++++-----------------------------------
1 file changed, 31 insertions(+), 46 deletions(-)
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2440,8 +2440,8 @@ static inline bool __io_fill_cqe_req(str
}
}
-static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
- s32 res, u32 cflags)
+static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
+ s32 res, u32 cflags)
{
struct io_uring_cqe *cqe;
@@ -2468,6 +2468,20 @@ static noinline bool io_fill_cqe_aux(str
return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
}
+static bool io_post_aux_cqe(struct io_ring_ctx *ctx,
+ u64 user_data, s32 res, u32 cflags)
+{
+ bool filled;
+
+ spin_lock(&ctx->completion_lock);
+ filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ if (filled)
+ io_cqring_ev_posted(ctx);
+ return filled;
+}
+
static void __io_req_complete_put(struct io_kiocb *req)
{
/*
@@ -4942,7 +4956,6 @@ static int io_msg_ring(struct io_kiocb *
{
struct io_ring_ctx *target_ctx;
struct io_msg *msg = &req->msg;
- bool filled;
int ret;
ret = -EBADFD;
@@ -4952,15 +4965,8 @@ static int io_msg_ring(struct io_kiocb *
ret = -EOVERFLOW;
target_ctx = req->file->private_data;
- spin_lock(&target_ctx->completion_lock);
- filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
- io_commit_cqring(target_ctx);
- spin_unlock(&target_ctx->completion_lock);
-
- if (filled) {
- io_cqring_ev_posted(target_ctx);
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
ret = 0;
- }
done:
if (ret < 0)
@@ -6271,22 +6277,12 @@ retry:
__io_req_complete(req, issue_flags, ret, 0);
return 0;
}
- if (ret >= 0) {
- bool filled;
-
- spin_lock(&ctx->completion_lock);
- filled = io_fill_cqe_aux(ctx, req->cqe.user_data, ret,
- IORING_CQE_F_MORE);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- if (filled) {
- io_cqring_ev_posted(ctx);
- goto retry;
- }
- ret = -ECANCELED;
- }
+ if (ret < 0)
+ return ret;
- return ret;
+ if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE))
+ goto retry;
+ return -ECANCELED;
}
static int io_connect_prep_async(struct io_kiocb *req)
@@ -6525,18 +6521,11 @@ static int io_poll_check_events(struct i
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
__poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events);
- bool filled;
- spin_lock(&ctx->completion_lock);
- filled = io_fill_cqe_aux(ctx, req->cqe.user_data,
- mask, IORING_CQE_F_MORE);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- if (filled) {
- io_cqring_ev_posted(ctx);
- continue;
- }
- return -ECANCELED;
+ if (!io_post_aux_cqe(ctx, req->cqe.user_data,
+ mask, IORING_CQE_F_MORE))
+ return -ECANCELED;
+ continue;
}
io_tw_lock(req->ctx, locked);
@@ -9598,17 +9587,13 @@ static void __io_rsrc_put_work(struct io
list_del(&prsrc->list);
if (prsrc->tag) {
- if (ctx->flags & IORING_SETUP_IOPOLL)
+ if (ctx->flags & IORING_SETUP_IOPOLL) {
mutex_lock(&ctx->uring_lock);
-
- spin_lock(&ctx->completion_lock);
- io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
-
- if (ctx->flags & IORING_SETUP_IOPOLL)
+ io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
mutex_unlock(&ctx->uring_lock);
+ } else {
+ io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
+ }
}
rsrc_data->do_put(ctx, prsrc);