Blob Blame History Raw
From: Alexei Starovoitov <ast@kernel.org>
Date: Mon, 24 Feb 2020 11:27:15 -0800
Subject: bpf: disable preemption for bpf progs attached to uprobe
Patch-mainline: v5.7-rc1
Git-commit: 70ed0706a48e3da3eb4515214fef658ff1184b9f
References: bsc#1162702,bsc#1177028

trace_call_bpf() no longer disables preemption on its own.
All callers of this function has to do it explicitly.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Daniel Wagner <dwagner@suse.de>
Acked-by: Gary Lin <glin@suse.com>
---
 kernel/trace/trace_uprobe.c |   11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1125,8 +1125,15 @@ static void __uprobe_perf_func(struct tr
 	int size, esize;
 	int rctx;
 
-	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
-		return;
+	if (bpf_prog_array_valid(call)) {
+		u32 ret;
+
+		preempt_disable();
+		ret = trace_call_bpf(call, regs);
+		preempt_enable();
+		if (!ret)
+			return;
+	}
 
 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));