From: Joerg Roedel <jroedel@suse.de>
Date: Fri, 8 Jul 2022 16:15:16 +0200
Subject: [PATCH] CVE Mitigation for CVE-2022-29900 and CVE-2022-29901
Patch-mainline: Never, downstream CVE mitigation
References: bsc#1199657 CVE-2022-29900 CVE-2022-29901
Necessary changes to implement changes from upstream patches:
KVM: VMX: Prevent RSB underflow before vmenter
x86/speculation: Fill RSB on vmexit for IBRS
KVM: VMX: Fix IBRS handling after vmexit
KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS
KVM: VMX: Convert launched argument to flags
KVM: VMX: Flatten __vmx_vcpu_run()
Into the SLE12-SP5 code base.
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
arch/x86/include/asm/spec-ctrl.h | 20 ++++++++++++++++++++
arch/x86/kernel/cpu/bugs.c | 9 +--------
arch/x86/kvm/vmx.c | 22 +++++++++++-----------
tools/lib/subcmd/subcmd-util.h | 11 ++---------
4 files changed, 34 insertions(+), 28 deletions(-)
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index 5393babc0598..87bd2dd863e9 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -27,6 +27,17 @@ static inline
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
{
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
+
+ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
+ u64 guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
+ if (hostval != guestval) {
+ u32 low = (u32)guest_spec_ctrl, high = (u32)(guest_spec_ctrl >> 32);
+
+ asm volatile("wrmsr\n" :
+ : "c" (MSR_IA32_SPEC_CTRL), "a"(low), "d"(high)
+ : "memory");
+ }
+ }
}
/**
@@ -40,6 +51,15 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
static inline
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
{
+ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
+ u64 hostval = spec_ctrl_current();
+ u32 low = (u32)hostval, high = (u32)(hostval >> 32);
+
+ asm volatile("wrmsr\n" :
+ : "c" (MSR_IA32_SPEC_CTRL), "a"(low), "d"(high)
+ : "memory");
+ }
+
x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 8997b466d803..323253e44c19 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -187,16 +187,9 @@ void __init check_bugs(void)
void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
{
- u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
+ u64 guestval = guest_spec_ctrl, hostval;
struct thread_info *ti = current_thread_info();
- if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
- if (hostval != guestval) {
- msrval = setguest ? guestval : hostval;
- wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
- }
- }
-
/*
* If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
* MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2352a6534a35..f03f15f72797 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9910,14 +9910,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_arm_hv_timer(vcpu);
- /*
- * If this vCPU has touched SPEC_CTRL, restore the guest's value if
- * it's non-zero. Since vmentry is serialising on affected CPUs, there
- * is no need to worry about the conditional branch over the wrmsr
- * being speculatively taken.
- */
- x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
-
vmx->__launched = vmx->loaded_vmcs->launched;
/* L1D Flush includes CPU buffer clear to mitigate MDS */
@@ -9931,6 +9923,14 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_disable_fb_clear(vmx);
+ /*
+ * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+ * it's non-zero. Since vmentry is serialising on affected CPUs, there
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+
asm(
/* Store host registers */
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
@@ -10060,7 +10060,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
- vmx_enable_fb_clear(vmx);
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
/*
* We do not use IBRS in the kernel. If this vCPU has used the
@@ -10082,8 +10083,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
- /* Eliminate branch target predictions from guest mode */
- vmexit_fill_RSB();
+ vmx_enable_fb_clear(vmx);
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
if (debugctlmsr)
diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h
index 8fa5f036eff0..8a34305448d0 100644
--- a/tools/lib/subcmd/subcmd-util.h
+++ b/tools/lib/subcmd/subcmd-util.h
@@ -49,15 +49,8 @@ static NORETURN inline void die(const char *err, ...)
static inline void *xrealloc(void *ptr, size_t size)
{
void *ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret) {
- ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret)
- die("Out of memory, realloc failed");
- }
+ if (!ret)
+ die("Out of memory, realloc failed");
return ret;
}
--
2.36.1