Blob Blame History Raw
From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 14 Aug 2023 13:44:34 +0200
Subject: x86/cpu: Cleanup the untrain mess
Git-commit: e7c25c441e9e0fa75b4c83e0b26306b702cfe90d
Patch-mainline: v6.5-rc7
References: git-fixes

Since there can only be one active return_thunk, there only needs be
one (matching) untrain_ret. It fundamentally doesn't make sense to
allow multiple untrain_ret at the same time.

Fold all the 3 different untrain methods into a single (temporary)
helper stub.

Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230814121149.042774962@infradead.org

Acked-by: nborisov <nik.borisov@suse.com>
---
 arch/x86/include/asm/nospec-branch.h |   11 +++--------
 arch/x86/kernel/cpu/bugs.c           |    3 +++
 arch/x86/lib/retpoline.S             |    8 ++++++++
 3 files changed, 14 insertions(+), 8 deletions(-)

--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -174,7 +174,7 @@
  * return thunk isn't mapped into the userspace tables (then again, AMD
  * typically has NO_MELTDOWN).
  *
- * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
+ * While entry_untrain_ret() doesn't clobber anything but requires stack,
  * entry_ibpb() will clobber AX, CX, DX.
  *
  * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
@@ -183,14 +183,9 @@
 .macro UNTRAIN_RET
 #ifdef CONFIG_RETPOLINE
 	ALTERNATIVE_2 "",						\
-	              "call retbleed_untrain_ret", X86_FEATURE_UNRET,	\
+	              "call entry_untrain_ret", X86_FEATURE_UNRET,	\
 		      "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
 #endif
-
-#ifdef CONFIG_CPU_SRSO
-	ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
-			  "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
-#endif
 .endm
 
 #else /* __ASSEMBLY__ */
@@ -211,7 +206,7 @@
 #ifdef CONFIG_X86_64
 
 extern void __x86_return_thunk(void);
-extern void retbleed_untrain_ret(void);
+extern void entry_untrain_ret(void);
 extern void srso_untrain_ret(void);
 extern void srso_alias_untrain_ret(void);
 extern void entry_ibpb(void);
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1803,6 +1803,9 @@ static void __init srso_select_mitigatio
 
 	case SRSO_CMD_SAFE_RET:
 		if (IS_ENABLED(CONFIG_CPU_SRSO)) {
+
+			setup_force_cpu_cap(X86_FEATURE_UNRET);
+
 			if (boot_cpu_data.x86 == 0x19)
 				setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
 			else
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -188,6 +188,14 @@ ENDPROC(srso_safe_ret)
 ENDPROC(srso_untrain_ret)
 __EXPORT_THUNK(srso_untrain_ret)
 
+
+ENTRY(entry_untrain_ret)
+	ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
+		      "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
+		      "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+ENDPROC(entry_untrain_ret)
+__EXPORT_THUNK(entry_untrain_ret)
+
 ENTRY(__x86_return_thunk)
 	UNWIND_HINT_FUNC
 	ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \