Blob Blame History Raw
From: "Borislav Petkov (AMD)" <bp@alien8.de>
Date: Thu, 29 Jun 2023 17:43:40 +0200
Subject: x86/srso: Add SRSO_NO support
Git-commit: 1b5277c0ea0b247393a9c426769fde18cff5e2f6
Patch-mainline: v6.6 or v6.5-rc4 (next release)
References: bsc#1213287, CVE-2023-20569

Add support for the CPUID flag which denotes that the CPU is not
affected by SRSO.

Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Nikolay Borisov <nik.borisov@suse.com>
---
 arch/x86/include/asm/cpufeatures.h   |    2 ++
 arch/x86/include/asm/msr-index.h     |    1 +
 arch/x86/include/asm/nospec-branch.h |    6 +++---
 arch/x86/kernel/cpu/amd.c            |   12 ++++++------
 arch/x86/kernel/cpu/bugs.c           |   24 ++++++++++++++++++++----
 arch/x86/kernel/cpu/common.c         |    6 ++++--
 arch/x86/kvm/svm.c                   |    4 ++--
 7 files changed, 38 insertions(+), 17 deletions(-)

--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -380,7 +380,9 @@
  /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
 #define X86_FEATURE_LFENCE_RDTSC	(20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
 
+#define X86_FEATURE_SBPB		(20*32+27) /* "" Selective Branch Prediction Barrier */
 #define X86_FEATURE_IBPB_BRTYPE		(20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+#define X86_FEATURE_SRSO_NO		(20*32+29) /* "" CPU is not affected by SRSO */
 
 /*
  * BUG word(s)
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -55,6 +55,7 @@
 
 #define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
 #define PRED_CMD_IBPB			BIT(0)	   /* Indirect Branch Prediction Barrier */
+#define PRED_CMD_SBPB			BIT(7)	   /* Selective Branch Prediction Barrier */
 
 #define MSR_PPIN_CTL			0x0000004e
 #define MSR_PPIN			0x0000004f
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -343,11 +343,11 @@ void alternative_msr_write(unsigned int
 		: "memory");
 }
 
+extern u64 x86_pred_cmd;
+
 static inline void indirect_branch_prediction_barrier(void)
 {
-	u64 val = PRED_CMD_IBPB;
-
-	alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
+	alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
 }
 
 /* The Intel SPEC CTRL MSR base value cache */
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1256,14 +1256,14 @@ bool cpu_has_ibpb_brtype_microcode(void)
 {
 	u8 fam = boot_cpu_data.x86;
 
-	if (fam == 0x17) {
-		/* Zen1/2 IBPB flushes branch type predictions too. */
+	/* Zen1/2 IBPB flushes branch type predictions too. */
+	if (fam == 0x17)
 		return boot_cpu_has(X86_FEATURE_AMD_IBPB);
-	} else if (fam == 0x19) {
+	/* Poke the MSR bit on Zen3/4 to check its presence. */
+	else if (fam == 0x19)
+		return !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB);
+	else
 		return false;
-	}
-
-	return false;
 }
 
 static void zenbleed_check_cpu(void *unused)
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -56,6 +56,9 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
 
+u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
+EXPORT_SYMBOL_GPL(x86_pred_cmd);
+
 static DEFINE_MUTEX(spec_ctrl_mutex);
 
 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
@@ -1629,7 +1632,7 @@ static void __init srso_select_mitigatio
 	bool has_microcode;
 
 	if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
-		return;
+		goto pred_cmd;
 
 	/*
 	 * The first check is for the kernel running as a guest in order
@@ -1642,9 +1645,18 @@ static void __init srso_select_mitigatio
 	} else {
 		/*
 		 * Enable the synthetic (even if in a real CPUID leaf)
-		 * flag for guests.
+		 * flags for guests.
 		 */
 		setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
+		setup_force_cpu_cap(X86_FEATURE_SBPB);
+
+		/*
+		 * Zen1/2 with SMT off aren't vulnerable after the right
+		 * IBPB microcode has been applied.
+		 */
+		if ((boot_cpu_data.x86 < 0x19) &&
+		    (cpu_smt_control == CPU_SMT_DISABLED))
+			setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
 	}
 
 	switch (srso_cmd) {
@@ -1667,16 +1679,20 @@ static void __init srso_select_mitigatio
 			srso_mitigation = SRSO_MITIGATION_SAFE_RET;
 		} else {
 			pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
-			return;
+			goto pred_cmd;
 		}
 		break;
 
 	default:
 		break;
-
 	}
 
 	pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
+
+pred_cmd:
+	if (boot_cpu_has(X86_FEATURE_SRSO_NO) ||
+	    srso_cmd == SRSO_CMD_OFF)
+		x86_pred_cmd = PRED_CMD_SBPB;
 }
 
 #undef pr_fmt
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1165,8 +1165,10 @@ static void __init cpu_set_bug_bits(stru
 	    !(ia32_cap & ARCH_CAP_PBRSB_NO))
 		setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
 
-	if (cpu_matches(cpu_vuln_blacklist, SRSO))
-		setup_force_cpu_bug(X86_BUG_SRSO);
+	if (!cpu_has(c, X86_FEATURE_SRSO_NO)) {
+		if (cpu_matches(cpu_vuln_blacklist, SRSO))
+			setup_force_cpu_bug(X86_BUG_SRSO);
+	}
 
 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
 		return;
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4310,13 +4310,13 @@ static int svm_set_msr(struct kvm_vcpu *
 		    !guest_has_pred_cmd_msr(vcpu))
 			return 1;
 
-		if (data & ~PRED_CMD_IBPB)
+		if (data & ~(PRED_CMD_IBPB | (boot_cpu_has(X86_FEATURE_SBPB) ? PRED_CMD_SBPB : 0)))
 			return 1;
 
 		if (!data)
 			break;
 
-		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+		wrmsrl(MSR_IA32_PRED_CMD, data);
 		if (is_guest_mode(vcpu))
 			break;
 		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);