From 1da26c73b40b5043711956004407340002ace405 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: May 06 2019 09:58:58 +0000 Subject: Merge remote-tracking branch 'origin/users/bpetkov/SLE15/15-bsc1111331-2' into SLE12-SP4-G4 Pull MDS fixes from Borislav Petkov. --- diff --git a/patches.arch/intel_idle-add-support-for-Jacobsville.patch b/patches.arch/intel_idle-add-support-for-Jacobsville.patch index 81b2bc5..9e2a8e6 100644 --- a/patches.arch/intel_idle-add-support-for-Jacobsville.patch +++ b/patches.arch/intel_idle-add-support-for-Jacobsville.patch @@ -13,21 +13,16 @@ Signed-off-by: Zhang Rui Signed-off-by: Rafael J. Wysocki Acked-by: Michal Suchanek --- - drivers/idle/intel_idle.c | 1 + + drivers/idle/intel_idle.c | 1 + 1 file changed, 1 insertion(+) -diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c -index 216d7ec88c0c..008eb4d58a86 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c -@@ -1099,6 +1099,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { +@@ -1099,6 +1099,7 @@ static const struct x86_cpu_id intel_idl ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt), - ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, idle_cpu_bxt), - ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv), + ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, idle_cpu_bxt), + ICPU(INTEL_FAM6_ATOM_GOLDMONT_X, idle_cpu_dnv), + ICPU(INTEL_FAM6_ATOM_TREMONT_X, idle_cpu_dnv), {} }; --- -2.20.1 - diff --git a/patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch b/patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch new file mode 100644 index 0000000..cf5063b --- /dev/null +++ b/patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch @@ -0,0 +1,47 @@ +From: Eduardo Habkost +Date: Wed, 5 Dec 2018 17:19:56 -0200 +Subject: kvm: x86: Report STIBP on GET_SUPPORTED_CPUID +Git-commit: d7b09c827a6cf291f66637a36f46928dd1423184 +Patch-mainline: v5.0-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +Months ago, we have added code to allow direct access to MSR_IA32_SPEC_CTRL +to the guest, which makes STIBP available to guests. This was implemented +by commits d28b387fb74d ("KVM/VMX: Allow direct access to +MSR_IA32_SPEC_CTRL") and b2ac58f90540 ("KVM/SVM: Allow direct access to +MSR_IA32_SPEC_CTRL"). + +However, we never updated GET_SUPPORTED_CPUID to let userspace know that +STIBP can be enabled in CPUID. Fix that by updating +kvm_cpuid_8000_0008_ebx_x86_features and kvm_cpuid_7_0_edx_x86_features. + +Signed-off-by: Eduardo Habkost +Reviewed-by: Jim Mattson +Reviewed-by: Konrad Rzeszutek Wilk +Signed-off-by: Paolo Bonzini +Acked-by: Borislav Petkov +--- + arch/x86/kvm/cpuid.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -367,7 +367,8 @@ static inline int __do_cpuid_ent(struct + + /* cpuid 0x80000008.ebx */ + const u32 kvm_cpuid_8000_0008_ebx_x86_features = +- F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | F(AMD_SSB_NO); ++ F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | ++ F(AMD_SSB_NO) | F(AMD_STIBP); + + /* cpuid 0xC0000001.edx */ + const u32 kvm_cpuid_C000_0001_edx_x86_features = +@@ -395,7 +396,7 @@ static inline int __do_cpuid_ent(struct + /* cpuid 7.0.edx*/ + const u32 kvm_cpuid_7_0_edx_x86_features = + F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | +- F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES); ++ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP); + + /* all calls to cpuid_count() should be made on the same cpu */ + get_cpu(); diff --git a/patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch b/patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch new file mode 100644 index 0000000..cec803d --- /dev/null +++ b/patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch @@ -0,0 +1,90 @@ +From: Will Deacon +Date: Tue, 19 Jun 2018 13:53:08 +0100 +Subject: locking/atomics, asm-generic: Move some macros from + to a new file +Git-commit: 8bd9cb51daac89337295b6f037b0486911e1b408 +Patch-mainline: v4.19-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +In preparation for implementing the asm-generic atomic bitops in terms +of atomic_long_*(), we need to prevent implementations from +pulling in . A common reason for this include is for the +BITS_PER_BYTE definition, so move this and some other BIT() and masking +macros into a new header file, . + +Signed-off-by: Will Deacon +Acked-by: Peter Zijlstra (Intel) +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Cc: linux-arm-kernel@lists.infradead.org +Cc: yamada.masahiro@socionext.com +Link: https://lore.kernel.org/lkml/1529412794-17720-4-git-send-email-will.deacon@arm.com +Signed-off-by: Ingo Molnar +Acked-by: Borislav Petkov +--- + include/linux/bitops.h | 21 +-------------------- + include/linux/bits.h | 26 ++++++++++++++++++++++++++ + 2 files changed, 27 insertions(+), 20 deletions(-) + +--- a/include/linux/bitops.h ++++ b/include/linux/bitops.h +@@ -1,28 +1,9 @@ + #ifndef _LINUX_BITOPS_H + #define _LINUX_BITOPS_H + #include ++#include + +-#ifdef __KERNEL__ +-#define BIT(nr) (1UL << (nr)) +-#define BIT_ULL(nr) (1ULL << (nr)) +-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) +-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) +-#define BITS_PER_BYTE 8 + #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) +-#endif +- +-/* +- * Create a contiguous bitmask starting at bit position @l and ending at +- * position @h. For example +- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. +- */ +-#define GENMASK(h, l) \ +- (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +- +-#define GENMASK_ULL(h, l) \ +- (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + + extern unsigned int __sw_hweight8(unsigned int w); + extern unsigned int __sw_hweight16(unsigned int w); +--- /dev/null ++++ b/include/linux/bits.h +@@ -0,0 +1,26 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __LINUX_BITS_H ++#define __LINUX_BITS_H ++#include ++ ++#define BIT(nr) (1UL << (nr)) ++#define BIT_ULL(nr) (1ULL << (nr)) ++#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) ++#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) ++#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) ++#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) ++#define BITS_PER_BYTE 8 ++ ++/* ++ * Create a contiguous bitmask starting at bit position @l and ending at ++ * position @h. For example ++ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. ++ */ ++#define GENMASK(h, l) \ ++ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) ++ ++#define GENMASK_ULL(h, l) \ ++ (((~0ULL) - (1ULL << (l)) + 1) & \ ++ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) ++ ++#endif /* __LINUX_BITS_H */ diff --git a/patches.arch/powercap-intel_rapl-add-support-for-Jacobsville.patch b/patches.arch/powercap-intel_rapl-add-support-for-Jacobsville.patch index 5a3982f..5a1ed53 100644 --- a/patches.arch/powercap-intel_rapl-add-support-for-Jacobsville.patch +++ b/patches.arch/powercap-intel_rapl-add-support-for-Jacobsville.patch @@ -13,15 +13,15 @@ Signed-off-by: Zhang Rui Signed-off-by: Rafael J. Wysocki Acked-by: Michal Suchanek --- - drivers/powercap/intel_rapl.c | 1 + + drivers/powercap/intel_rapl.c | 1 + 1 file changed, 1 insertion(+) --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -1166,6 +1166,7 @@ static const struct x86_cpu_id rapl_ids[ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, rapl_defaults_core), - RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core), + RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, rapl_defaults_core), + RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_X, rapl_defaults_core), + RAPL_CPU(INTEL_FAM6_ATOM_TREMONT_X, rapl_defaults_core), RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server), diff --git a/patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch b/patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch index 2114216..ea0130e 100644 --- a/patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch +++ b/patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch @@ -44,7 +44,7 @@ Acked-by: Borislav Petkov #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c -@@ -948,7 +948,8 @@ static void __init cpu_set_bug_bits(stru +@@ -946,7 +946,8 @@ static void __init cpu_set_bug_bits(stru rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); if (!x86_match_cpu(cpu_no_spec_store_bypass) && @@ -53,7 +53,7 @@ Acked-by: Borislav Petkov + !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); - if (x86_match_cpu(cpu_no_speculation)) + if (x86_match_cpu(cpu_no_meltdown)) --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -367,7 +367,7 @@ static inline int __do_cpuid_ent(struct diff --git a/patches.arch/x86-cpu-Add-Atom-Tremont-Jacobsville.patch b/patches.arch/x86-cpu-Add-Atom-Tremont-Jacobsville.patch index 5788f25..243d8cc 100644 --- a/patches.arch/x86-cpu-Add-Atom-Tremont-Jacobsville.patch +++ b/patches.arch/x86-cpu-Add-Atom-Tremont-Jacobsville.patch @@ -31,7 +31,7 @@ Cc: x86-ml Link: https://lkml.kernel.org/r/20190125195902.17109-4-tony.luck@intel.com Acked-by: Michal Suchanek --- - arch/x86/include/asm/intel-family.h | 3 ++- + arch/x86/include/asm/intel-family.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) --- a/arch/x86/include/asm/intel-family.h @@ -45,10 +45,10 @@ Acked-by: Michal Suchanek * * Things ending in "2" are usually because we have no better * name for them. There's no processor called "SILVERMONT2". -@@ -63,6 +63,7 @@ - #define INTEL_FAM6_ATOM_GOLDMONT 0x5C - #define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ - #define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A +@@ -67,6 +67,7 @@ + #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ + #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ + #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ +#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobswille */ /* Xeon Phi */ diff --git a/patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch b/patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch new file mode 100644 index 0000000..70223a4 --- /dev/null +++ b/patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch @@ -0,0 +1,602 @@ +From: Peter Zijlstra +Date: Tue, 7 Aug 2018 10:17:27 -0700 +Subject: x86/cpu: Sanitize FAM6_ATOM naming +Git-commit: f2c4db1bd80720cd8cb2a5aa220d9bc9f374f04e +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +commit f2c4db1bd80720cd8cb2a5aa220d9bc9f374f04e upstream + +Going primarily by: + + https://en.wikipedia.org/wiki/List_of_Intel_Atom_microprocessors + +with additional information gleaned from other related pages; notably: + + - Bonnell shrink was called Saltwell + - Moorefield is the Merriefield refresh which makes it Airmont + +The general naming scheme is: FAM6_ATOM_UARCH_SOCTYPE + + for i in `git grep -l FAM6_ATOM` ; do + sed -i -e 's/ATOM_PINEVIEW/ATOM_BONNELL/g' \ + -e 's/ATOM_LINCROFT/ATOM_BONNELL_MID/' \ + -e 's/ATOM_PENWELL/ATOM_SALTWELL_MID/g' \ + -e 's/ATOM_CLOVERVIEW/ATOM_SALTWELL_TABLET/g' \ + -e 's/ATOM_CEDARVIEW/ATOM_SALTWELL/g' \ + -e 's/ATOM_SILVERMONT1/ATOM_SILVERMONT/g' \ + -e 's/ATOM_SILVERMONT2/ATOM_SILVERMONT_X/g' \ + -e 's/ATOM_MERRIFIELD/ATOM_SILVERMONT_MID/g' \ + -e 's/ATOM_MOOREFIELD/ATOM_AIRMONT_MID/g' \ + -e 's/ATOM_DENVERTON/ATOM_GOLDMONT_X/g' \ + -e 's/ATOM_GEMINI_LAKE/ATOM_GOLDMONT_PLUS/g' ${i} + done + +Signed-off-by: Peter Zijlstra (Intel) +Cc: Alexander Shishkin +Cc: Arnaldo Carvalho de Melo +Cc: Jiri Olsa +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Stephane Eranian +Cc: Thomas Gleixner +Cc: Vince Weaver +Cc: dave.hansen@linux.intel.com +Cc: len.brown@intel.com +Signed-off-by: Ingo Molnar +Signed-off-by: Thomas Gleixner +[ 4.14.y speck backport, commit id there: f0fae1c931dd3a49cd42855836fc3f075960d4be ] +Acked-by: Borislav Petkov +--- + arch/x86/events/intel/core.c | 18 +++---- + arch/x86/events/intel/cstate.c | 4 - + arch/x86/events/msr.c | 4 - + arch/x86/include/asm/intel-family.h | 30 ++++++----- + arch/x86/kernel/cpu/common.c | 28 +++++----- + arch/x86/kernel/tsc.c | 2 + arch/x86/platform/atom/punit_atom_debug.c | 4 - + arch/x86/platform/intel-mid/device_libs/platform_bt.c | 2 + drivers/acpi/acpi_lpss.c | 2 + drivers/acpi/x86/utils.c | 2 + drivers/cpufreq/intel_pstate.c | 4 - + drivers/edac/pnd2_edac.c | 2 + drivers/idle/intel_idle.c | 18 +++---- + drivers/mmc/host/sdhci-acpi.c | 2 + drivers/pci/pci-mid.c | 4 - + drivers/platform/x86/intel_int0002_vgpio.c | 2 + drivers/platform/x86/intel_mid_powerbtn.c | 4 - + drivers/powercap/intel_rapl.c | 10 +-- + drivers/thermal/intel_soc_dts_thermal.c | 2 + tools/power/x86/turbostat/turbostat.c | 46 +++++++++--------- + 20 files changed, 98 insertions(+), 92 deletions(-) + +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -3724,11 +3724,11 @@ __init int intel_pmu_init(void) + pr_cont("Nehalem events, "); + break; + +- case INTEL_FAM6_ATOM_PINEVIEW: +- case INTEL_FAM6_ATOM_LINCROFT: +- case INTEL_FAM6_ATOM_PENWELL: +- case INTEL_FAM6_ATOM_CLOVERVIEW: +- case INTEL_FAM6_ATOM_CEDARVIEW: ++ case INTEL_FAM6_ATOM_BONNELL: ++ case INTEL_FAM6_ATOM_BONNELL_MID: ++ case INTEL_FAM6_ATOM_SALTWELL: ++ case INTEL_FAM6_ATOM_SALTWELL_MID: ++ case INTEL_FAM6_ATOM_SALTWELL_TABLET: + memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + +@@ -3740,9 +3740,11 @@ __init int intel_pmu_init(void) + pr_cont("Atom events, "); + break; + +- case INTEL_FAM6_ATOM_SILVERMONT1: +- case INTEL_FAM6_ATOM_SILVERMONT2: ++ case INTEL_FAM6_ATOM_SILVERMONT: ++ case INTEL_FAM6_ATOM_SILVERMONT_X: ++ case INTEL_FAM6_ATOM_SILVERMONT_MID: + case INTEL_FAM6_ATOM_AIRMONT: ++ case INTEL_FAM6_ATOM_AIRMONT_MID: + memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, +@@ -3759,7 +3761,7 @@ __init int intel_pmu_init(void) + break; + + case INTEL_FAM6_ATOM_GOLDMONT: +- case INTEL_FAM6_ATOM_DENVERTON: ++ case INTEL_FAM6_ATOM_GOLDMONT_X: + memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, +--- a/arch/x86/events/intel/cstate.c ++++ b/arch/x86/events/intel/cstate.c +@@ -531,8 +531,8 @@ static const struct x86_cpu_id intel_cst + + X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates), + +- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates), +- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates), ++ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates), ++ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates), + + X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE, snb_cstates), +--- a/arch/x86/events/msr.c ++++ b/arch/x86/events/msr.c +@@ -61,8 +61,8 @@ static bool test_intel(int idx) + case INTEL_FAM6_BROADWELL_GT3E: + case INTEL_FAM6_BROADWELL_X: + +- case INTEL_FAM6_ATOM_SILVERMONT1: +- case INTEL_FAM6_ATOM_SILVERMONT2: ++ case INTEL_FAM6_ATOM_SILVERMONT: ++ case INTEL_FAM6_ATOM_SILVERMONT_X: + case INTEL_FAM6_ATOM_AIRMONT: + if (idx == PERF_MSR_SMI) + return true; +--- a/arch/x86/include/asm/intel-family.h ++++ b/arch/x86/include/asm/intel-family.h +@@ -50,19 +50,23 @@ + + /* "Small Core" Processors (Atom) */ + +-#define INTEL_FAM6_ATOM_PINEVIEW 0x1C +-#define INTEL_FAM6_ATOM_LINCROFT 0x26 +-#define INTEL_FAM6_ATOM_PENWELL 0x27 +-#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35 +-#define INTEL_FAM6_ATOM_CEDARVIEW 0x36 +-#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */ +-#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */ +-#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */ +-#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */ +-#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */ +-#define INTEL_FAM6_ATOM_GOLDMONT 0x5C +-#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ +-#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A ++#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ ++#define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */ ++ ++#define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */ ++#define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */ ++#define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */ ++ ++#define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */ ++#define INTEL_FAM6_ATOM_SILVERMONT_X 0x4D /* Avaton, Rangely */ ++#define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */ ++ ++#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */ ++#define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */ ++ ++#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ ++#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ ++#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ + + /* Xeon Phi */ + +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -899,11 +899,11 @@ static void identify_cpu_without_cpuid(s + } + + static const __initconst struct x86_cpu_id cpu_no_speculation[] = { +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY }, + { X86_VENDOR_CENTAUR, 5 }, + { X86_VENDOR_INTEL, 5 }, + { X86_VENDOR_NSC, 5 }, +@@ -918,10 +918,10 @@ static const __initconst struct x86_cpu_ + + /* Only list CPUs which speculate but are non susceptible to SSB */ + static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, +@@ -934,14 +934,14 @@ static const __initconst struct x86_cpu_ + + static const __initconst struct x86_cpu_id cpu_no_l1tf[] = { + /* in addition to cpu_no_speculation */ +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, + {} +--- a/arch/x86/kernel/tsc.c ++++ b/arch/x86/kernel/tsc.c +@@ -620,7 +620,7 @@ unsigned long native_calibrate_tsc(void) + case INTEL_FAM6_KABYLAKE_DESKTOP: + crystal_khz = 24000; /* 24.0 MHz */ + break; +- case INTEL_FAM6_ATOM_DENVERTON: ++ case INTEL_FAM6_ATOM_GOLDMONT_X: + crystal_khz = 25000; /* 25.0 MHz */ + break; + case INTEL_FAM6_ATOM_GOLDMONT: +--- a/arch/x86/platform/atom/punit_atom_debug.c ++++ b/arch/x86/platform/atom/punit_atom_debug.c +@@ -154,8 +154,8 @@ static void punit_dbgfs_unregister(void) + (kernel_ulong_t)&drv_data } + + static const struct x86_cpu_id intel_punit_cpu_ids[] = { +- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt), +- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, punit_device_tng), ++ ICPU(INTEL_FAM6_ATOM_SILVERMONT, punit_device_byt), ++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, punit_device_tng), + ICPU(INTEL_FAM6_ATOM_AIRMONT, punit_device_cht), + {} + }; +--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c ++++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c +@@ -68,7 +68,7 @@ static struct bt_sfi_data tng_bt_sfi_dat + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata } + + static const struct x86_cpu_id bt_sfi_cpu_ids[] = { +- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, tng_bt_sfi_data), ++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, tng_bt_sfi_data), + {} + }; + +--- a/drivers/acpi/acpi_lpss.c ++++ b/drivers/acpi/acpi_lpss.c +@@ -291,7 +291,7 @@ static const struct lpss_device_desc bsw + #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } + + static const struct x86_cpu_id lpss_cpu_ids[] = { +- ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */ ++ ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */ + ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */ + {} + }; +--- a/drivers/acpi/x86/utils.c ++++ b/drivers/acpi/x86/utils.c +@@ -54,7 +54,7 @@ static const struct always_present_id al + * Bay / Cherry Trail PWM directly poked by GPU driver in win10, + * but Linux uses a separate PWM driver, harmless if not used. + */ +- ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1), {}), ++ ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}), + ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}), + /* + * The INT0002 device is necessary to clear wakeup interrupt sources +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -2019,7 +2019,7 @@ static const struct pstate_funcs knl_fun + static const struct x86_cpu_id intel_pstate_cpu_ids[] = { + ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs), + ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs), +- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_funcs), ++ ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs), + ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs), + ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs), + ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs), +@@ -2036,7 +2036,7 @@ static const struct x86_cpu_id intel_pst + ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs), + ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), + ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs), +- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, core_funcs), ++ ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, core_funcs), + ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), + {} + }; +--- a/drivers/edac/pnd2_edac.c ++++ b/drivers/edac/pnd2_edac.c +@@ -1541,7 +1541,7 @@ static struct dunit_ops dnv_ops = { + + static const struct x86_cpu_id pnd2_cpuids[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, 0, (kernel_ulong_t)&dnv_ops }, + { } + }; + MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids); +--- a/drivers/idle/intel_idle.c ++++ b/drivers/idle/intel_idle.c +@@ -1069,14 +1069,14 @@ static const struct x86_cpu_id intel_idl + ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem), + ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem), + ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem), +- ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom), +- ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft), ++ ICPU(INTEL_FAM6_ATOM_BONNELL, idle_cpu_atom), ++ ICPU(INTEL_FAM6_ATOM_BONNELL_MID, idle_cpu_lincroft), + ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem), + ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb), + ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb), +- ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom), +- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt), +- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier), ++ ICPU(INTEL_FAM6_ATOM_SALTWELL, idle_cpu_atom), ++ ICPU(INTEL_FAM6_ATOM_SILVERMONT, idle_cpu_byt), ++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, idle_cpu_tangier), + ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht), + ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb), + ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt), +@@ -1084,7 +1084,7 @@ static const struct x86_cpu_id intel_idl + ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw), + ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw), + ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw), +- ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn), ++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_X, idle_cpu_avn), + ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw), + ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw), + ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw), +@@ -1097,8 +1097,8 @@ static const struct x86_cpu_id intel_idl + ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl), + ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl), + ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt), +- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, idle_cpu_bxt), +- ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv), ++ ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, idle_cpu_bxt), ++ ICPU(INTEL_FAM6_ATOM_GOLDMONT_X, idle_cpu_dnv), + {} + }; + +@@ -1310,7 +1310,7 @@ static void intel_idle_state_table_updat + ivt_idle_state_table_update(); + break; + case INTEL_FAM6_ATOM_GOLDMONT: +- case INTEL_FAM6_ATOM_GEMINI_LAKE: ++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: + bxt_idle_state_table_update(); + break; + case INTEL_FAM6_SKYLAKE_DESKTOP: +--- a/drivers/mmc/host/sdhci-acpi.c ++++ b/drivers/mmc/host/sdhci-acpi.c +@@ -128,7 +128,7 @@ static const struct sdhci_acpi_chip sdhc + static bool sdhci_acpi_byt(void) + { + static const struct x86_cpu_id byt[] = { +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, + {} + }; + +--- a/drivers/pci/pci-mid.c ++++ b/drivers/pci/pci-mid.c +@@ -71,8 +71,8 @@ static const struct pci_platform_pm_ops + * arch/x86/platform/intel-mid/pwr.c. + */ + static const struct x86_cpu_id lpss_cpu_ids[] = { +- ICPU(INTEL_FAM6_ATOM_PENWELL), +- ICPU(INTEL_FAM6_ATOM_MERRIFIELD), ++ ICPU(INTEL_FAM6_ATOM_SALTWELL_MID), ++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID), + {} + }; + +--- a/drivers/platform/x86/intel_int0002_vgpio.c ++++ b/drivers/platform/x86/intel_int0002_vgpio.c +@@ -60,7 +60,7 @@ static const struct x86_cpu_id int0002_c + /* + * Limit ourselves to Cherry Trail for now, until testing shows we + * need to handle the INT0002 device on Baytrail too. +- * ICPU(INTEL_FAM6_ATOM_SILVERMONT1), * Valleyview, Bay Trail * ++ * ICPU(INTEL_FAM6_ATOM_SILVERMONT), * Valleyview, Bay Trail * + */ + ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */ + {} +--- a/drivers/platform/x86/intel_mid_powerbtn.c ++++ b/drivers/platform/x86/intel_mid_powerbtn.c +@@ -125,8 +125,8 @@ static struct mid_pb_ddata mrfld_ddata = + { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata } + + static const struct x86_cpu_id mid_pb_cpu_ids[] = { +- ICPU(INTEL_FAM6_ATOM_PENWELL, mfld_ddata), +- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, mrfld_ddata), ++ ICPU(INTEL_FAM6_ATOM_SALTWELL_MID, mfld_ddata), ++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, mrfld_ddata), + {} + }; + +--- a/drivers/powercap/intel_rapl.c ++++ b/drivers/powercap/intel_rapl.c +@@ -1159,13 +1159,13 @@ static const struct x86_cpu_id rapl_ids[ + RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE, rapl_defaults_core), + RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP, rapl_defaults_core), + +- RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1, rapl_defaults_byt), ++ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT, rapl_defaults_byt), + RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT, rapl_defaults_cht), +- RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng), +- RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann), ++ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT_MID,rapl_defaults_tng), ++ RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT_MID, rapl_defaults_ann), + RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core), +- RAPL_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, rapl_defaults_core), +- RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core), ++ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, rapl_defaults_core), ++ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_X, rapl_defaults_core), + + RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server), + RAPL_CPU(INTEL_FAM6_XEON_PHI_KNM, rapl_defaults_hsw_server), +--- a/drivers/thermal/intel_soc_dts_thermal.c ++++ b/drivers/thermal/intel_soc_dts_thermal.c +@@ -43,7 +43,7 @@ static irqreturn_t soc_irq_thread_fn(int + } + + static const struct x86_cpu_id soc_thermal_ids[] = { +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0, ++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, 0, + BYT_SOC_DTS_APIC_IRQ}, + {} + }; +--- a/tools/power/x86/turbostat/turbostat.c ++++ b/tools/power/x86/turbostat/turbostat.c +@@ -1839,7 +1839,7 @@ int has_turbo_ratio_group_limits(int fam + switch (model) { + case INTEL_FAM6_ATOM_GOLDMONT: + case INTEL_FAM6_SKYLAKE_X: +- case INTEL_FAM6_ATOM_DENVERTON: ++ case INTEL_FAM6_ATOM_GOLDMONT_X: + return 1; + } + return 0; +@@ -2701,9 +2701,9 @@ int probe_nhm_msrs(unsigned int family, + pkg_cstate_limits = skx_pkg_cstate_limits; + has_misc_feature_control = 1; + break; +- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */ ++ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ + no_MSR_MISC_PWR_MGMT = 1; +- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */ ++ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */ + pkg_cstate_limits = slv_pkg_cstate_limits; + break; + case INTEL_FAM6_ATOM_AIRMONT: /* AMT */ +@@ -2715,8 +2715,8 @@ int probe_nhm_msrs(unsigned int family, + pkg_cstate_limits = phi_pkg_cstate_limits; + break; + case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ +- case INTEL_FAM6_ATOM_GEMINI_LAKE: +- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ ++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: ++ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */ + pkg_cstate_limits = bxt_pkg_cstate_limits; + break; + default: +@@ -2745,9 +2745,9 @@ int has_slv_msrs(unsigned int family, un + return 0; + + switch (model) { +- case INTEL_FAM6_ATOM_SILVERMONT1: +- case INTEL_FAM6_ATOM_MERRIFIELD: +- case INTEL_FAM6_ATOM_MOOREFIELD: ++ case INTEL_FAM6_ATOM_SILVERMONT: ++ case INTEL_FAM6_ATOM_SILVERMONT_MID: ++ case INTEL_FAM6_ATOM_AIRMONT_MID: + return 1; + } + return 0; +@@ -2759,7 +2759,7 @@ int is_dnv(unsigned int family, unsigned + return 0; + + switch (model) { +- case INTEL_FAM6_ATOM_DENVERTON: ++ case INTEL_FAM6_ATOM_GOLDMONT_X: + return 1; + } + return 0; +@@ -3275,8 +3275,8 @@ double get_tdp(unsigned int model) + return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; + + switch (model) { +- case INTEL_FAM6_ATOM_SILVERMONT1: +- case INTEL_FAM6_ATOM_SILVERMONT2: ++ case INTEL_FAM6_ATOM_SILVERMONT: ++ case INTEL_FAM6_ATOM_SILVERMONT_X: + return 30.0; + default: + return 135.0; +@@ -3342,7 +3342,7 @@ void rapl_probe(unsigned int family, uns + } + break; + case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ +- case INTEL_FAM6_ATOM_GEMINI_LAKE: ++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: + do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO; + if (rapl_joules) + BIC_PRESENT(BIC_Pkg_J); +@@ -3400,8 +3400,8 @@ void rapl_probe(unsigned int family, uns + BIC_PRESENT(BIC_RAMWatt); + } + break; +- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */ +- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */ ++ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ ++ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */ + do_rapl = RAPL_PKG | RAPL_CORES; + if (rapl_joules) { + BIC_PRESENT(BIC_Pkg_J); +@@ -3411,7 +3411,7 @@ void rapl_probe(unsigned int family, uns + BIC_PRESENT(BIC_CorWatt); + } + break; +- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ ++ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */ + do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS; + BIC_PRESENT(BIC_PKG__); + BIC_PRESENT(BIC_RAM__); +@@ -3434,7 +3434,7 @@ void rapl_probe(unsigned int family, uns + return; + + rapl_power_units = 1.0 / (1 << (msr & 0xF)); +- if (model == INTEL_FAM6_ATOM_SILVERMONT1) ++ if (model == INTEL_FAM6_ATOM_SILVERMONT) + rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000; + else + rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F)); +@@ -3684,8 +3684,8 @@ int has_snb_msrs(unsigned int family, un + case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ + case INTEL_FAM6_SKYLAKE_X: /* SKX */ + case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ +- case INTEL_FAM6_ATOM_GEMINI_LAKE: +- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ ++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: ++ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */ + return 1; + } + return 0; +@@ -3716,7 +3716,7 @@ int has_hsw_msrs(unsigned int family, un + case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ + case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ + case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ +- case INTEL_FAM6_ATOM_GEMINI_LAKE: ++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: + return 1; + } + return 0; +@@ -3750,8 +3750,8 @@ int is_slm(unsigned int family, unsigned + if (!genuine_intel) + return 0; + switch (model) { +- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */ +- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */ ++ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */ ++ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */ + return 1; + } + return 0; +@@ -4106,11 +4106,11 @@ void process_cpuid() + crystal_hz = 24000000; /* 24.0 MHz */ + break; + case INTEL_FAM6_SKYLAKE_X: /* SKX */ +- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ ++ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */ + crystal_hz = 25000000; /* 25.0 MHz */ + break; + case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ +- case INTEL_FAM6_ATOM_GEMINI_LAKE: ++ case INTEL_FAM6_ATOM_GOLDMONT_PLUS: + crystal_hz = 19200000; /* 19.2 MHz */ + break; + default: diff --git a/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch b/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch new file mode 100644 index 0000000..8c82273 --- /dev/null +++ b/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch @@ -0,0 +1,45 @@ +From: Andi Kleen +Date: Fri, 18 Jan 2019 16:50:23 -0800 +Subject: x86/kvm: Expose X86_FEATURE_MD_CLEAR to guests +Git-commit: 6c4dbbd14730c43f4ed808a9c42ca41625925c22 +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +X86_FEATURE_MD_CLEAR is a new CPUID bit which is set when microcode +provides the mechanism to invoke a flush of various exploitable CPU buffers +by invoking the VERW instruction. + +Hand it through to guests so they can adjust their mitigations. + +This also requires corresponding qemu changes, which are available +separately. + +[ tglx: Massaged changelog ] + +Signed-off-by: Andi Kleen +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + arch/x86/kvm/cpuid.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index c07958b59f50..39501e7afdb4 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -410,7 +410,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, + /* cpuid 7.0.edx*/ + const u32 kvm_cpuid_7_0_edx_x86_features = + F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | +- F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP); ++ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | ++ F(MD_CLEAR); + + /* all calls to cpuid_count() should be made on the same cpu */ + get_cpu(); + diff --git a/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch b/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch new file mode 100644 index 0000000..92d1e7a --- /dev/null +++ b/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch @@ -0,0 +1,52 @@ +From: Thomas Gleixner +Date: Wed, 27 Feb 2019 12:48:14 +0100 +Subject: x86/kvm/vmx: Add MDS protection when L1D Flush is not active +Git-commit: 650b68a0622f933444a6d66936abb3103029413b +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +CPUs which are affected by L1TF and MDS mitigate MDS with the L1D Flush on +VMENTER when updated microcode is installed. + +If a CPU is not affected by L1TF or if the L1D Flush is not in use, then +MDS mitigation needs to be invoked explicitly. + +For these cases, follow the host mitigation state and invoke the MDS +mitigation before VMENTER. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Frederic Weisbecker +Reviewed-by: Borislav Petkov +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + arch/x86/kernel/cpu/bugs.c | 1 + + arch/x86/kvm/vmx.c | 3 +++ + 2 files changed, 4 insertions(+) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -62,6 +62,7 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always + + /* Control MDS CPU buffer clear before returning to user space */ + DEFINE_STATIC_KEY_FALSE(mds_user_clear); ++EXPORT_SYMBOL_GPL(mds_user_clear); + + void __init check_bugs(void) + { +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -9684,8 +9684,11 @@ static void __noclone vmx_vcpu_run(struc + + vmx->__launched = vmx->loaded_vmcs->launched; + ++ /* L1D Flush includes CPU buffer clear to mitigate MDS */ + if (static_branch_unlikely(&vmx_l1d_should_flush)) + vmx_l1d_flush(vcpu); ++ else if (static_branch_unlikely(&mds_user_clear)) ++ mds_clear_cpu_buffers(); + + asm( + /* Store host registers */ diff --git a/patches.arch/x86-msr-index-cleanup-bit-defines.patch b/patches.arch/x86-msr-index-cleanup-bit-defines.patch new file mode 100644 index 0000000..6f134cb --- /dev/null +++ b/patches.arch/x86-msr-index-cleanup-bit-defines.patch @@ -0,0 +1,102 @@ +From: Thomas Gleixner +Date: Thu, 21 Feb 2019 12:36:50 +0100 +Subject: x86/msr-index: Cleanup bit defines +Git-commit: d8eabc37310a92df40d07c5a8afc53cebf996716 +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +Greg pointed out that speculation related bit defines are using (1 << N) +format instead of BIT(N). Aside of that (1 << N) is wrong as it should use +1UL at least. + +Clean it up. + +[ Josh Poimboeuf: Fix tools build ] + +Reported-by: Greg Kroah-Hartman +Signed-off-by: Thomas Gleixner +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Borislav Petkov +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + arch/x86/include/asm/msr-index.h | 34 ++++++++++++++++++---------------- + tools/power/x86/turbostat/Makefile | 2 +- + 2 files changed, 19 insertions(+), 17 deletions(-) + +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -1,6 +1,8 @@ + #ifndef _ASM_X86_MSR_INDEX_H + #define _ASM_X86_MSR_INDEX_H + ++#include ++ + /* + * CPU model specific register (MSR) numbers. + * +@@ -39,14 +41,14 @@ + /* Intel MSRs. Some also available on other CPUs */ + + #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ +-#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ ++#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */ + #define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */ +-#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */ ++#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */ + #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ +-#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ ++#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ + + #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ +-#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ ++#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ + + #define MSR_PPIN_CTL 0x0000004e + #define MSR_PPIN 0x0000004f +@@ -68,20 +70,20 @@ + #define MSR_MTRRcap 0x000000fe + + #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a +-#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ +-#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ +-#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */ +-#define ARCH_CAP_SSB_NO (1 << 4) /* +- * Not susceptible to Speculative Store Bypass +- * attack, so no Speculative Store Bypass +- * control required. +- */ ++#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */ ++#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */ ++#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */ ++#define ARCH_CAP_SSB_NO BIT(4) /* ++ * Not susceptible to Speculative Store Bypass ++ * attack, so no Speculative Store Bypass ++ * control required. ++ */ + + #define MSR_IA32_FLUSH_CMD 0x0000010b +-#define L1D_FLUSH (1 << 0) /* +- * Writeback and invalidate the +- * L1 data cache. +- */ ++#define L1D_FLUSH BIT(0) /* ++ * Writeback and invalidate the ++ * L1 data cache. ++ */ + + #define MSR_IA32_BBL_CR_CTL 0x00000119 + #define MSR_IA32_BBL_CR_CTL3 0x0000011e +--- a/tools/power/x86/turbostat/Makefile ++++ b/tools/power/x86/turbostat/Makefile +@@ -8,7 +8,7 @@ ifeq ("$(origin O)", "command line") + endif + + turbostat : turbostat.c +-CFLAGS += -Wall ++override CFLAGS += -Wall -I../../../include + CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' + CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"' + diff --git a/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch b/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch new file mode 100644 index 0000000..2d9c602 --- /dev/null +++ b/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch @@ -0,0 +1,167 @@ +From: Thomas Gleixner +Date: Wed, 27 Feb 2019 10:10:23 +0100 +Subject: x86/speculation: Consolidate CPU whitelists +Git-commit: 36ad35131adacc29b328b9c8b6277a8bf0d6fd5d +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +The CPU vulnerability whitelists have some overlap and there are more +whitelists coming along. + +Use the driver_data field in the x86_cpu_id struct to denote the +whitelisted vulnerabilities and combine all whitelists into one. + +Suggested-by: Linus Torvalds +Signed-off-by: Thomas Gleixner +Reviewed-by: Frederic Weisbecker +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Borislav Petkov +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + arch/x86/kernel/cpu/common.c | 103 ++++++++++++++++++++++--------------------- + 1 file changed, 55 insertions(+), 48 deletions(-) + +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -898,60 +898,68 @@ static void identify_cpu_without_cpuid(s + #endif + } + +-static const __initconst struct x86_cpu_id cpu_no_speculation[] = { +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY }, +- { X86_VENDOR_CENTAUR, 5 }, +- { X86_VENDOR_INTEL, 5 }, +- { X86_VENDOR_NSC, 5 }, +- { X86_VENDOR_ANY, 4 }, +- {} +-}; ++#define NO_SPECULATION BIT(0) ++#define NO_MELTDOWN BIT(1) ++#define NO_SSB BIT(2) ++#define NO_L1TF BIT(3) ++ ++#define VULNWL(_vendor, _family, _model, _whitelist) \ ++ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } ++ ++#define VULNWL_INTEL(model, whitelist) \ ++ VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist) ++ ++#define VULNWL_AMD(family, whitelist) \ ++ VULNWL(AMD, family, X86_MODEL_ANY, whitelist) ++ ++static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { ++ VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION), ++ VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION), ++ VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), ++ VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), ++ ++ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION), ++ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION), ++ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION), ++ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION), ++ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION), ++ ++ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF), ++ ++ VULNWL_INTEL(CORE_YONAH, NO_SSB), ++ ++ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF), ++ VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF), ++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF), ++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF), ++ ++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF), ++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF), ++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF), ++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF), + +-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { +- { X86_VENDOR_AMD }, ++ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ ++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF), + {} + }; + +-/* Only list CPUs which speculate but are non susceptible to SSB */ +-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, +- { X86_VENDOR_AMD, 0x12, }, +- { X86_VENDOR_AMD, 0x11, }, +- { X86_VENDOR_AMD, 0x10, }, +- { X86_VENDOR_AMD, 0xf, }, +- {} +-}; ++static bool __init cpu_matches(unsigned long which) ++{ ++ const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist); + +-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = { +- /* in addition to cpu_no_speculation */ +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, +- {} +-}; ++ return m && !!(m->driver_data & which); ++} + + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + { + u64 ia32_cap = 0; + +- if (x86_match_cpu(cpu_no_speculation)) ++ if (cpu_matches(NO_SPECULATION)) + return; + + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); +@@ -960,15 +968,14 @@ static void __init cpu_set_bug_bits(stru + if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); + +- if (!x86_match_cpu(cpu_no_spec_store_bypass) && +- !(ia32_cap & ARCH_CAP_SSB_NO) && ++ if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && + !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) + setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); + + if (ia32_cap & ARCH_CAP_IBRS_ALL) + setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); + +- if (x86_match_cpu(cpu_no_meltdown)) ++ if (cpu_matches(NO_MELTDOWN)) + return; + + /* Rogue Data Cache Load? No! */ +@@ -977,7 +984,7 @@ static void __init cpu_set_bug_bits(stru + + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); + +- if (x86_match_cpu(cpu_no_l1tf)) ++ if (cpu_matches(NO_L1TF)) + return; + + setup_force_cpu_bug(X86_BUG_L1TF); diff --git a/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch b/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch new file mode 100644 index 0000000..94f91fb --- /dev/null +++ b/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch @@ -0,0 +1,148 @@ +From: Andi Kleen +Date: Fri, 18 Jan 2019 16:50:16 -0800 +Subject: x86/speculation/mds: Add basic bug infrastructure for MDS +Git-commit: ed5194c2732c8084af9fd159c146ea92bf137128 +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +Microarchitectural Data Sampling (MDS), is a class of side channel attacks +on internal buffers in Intel CPUs. The variants are: + + - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126) + - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130) + - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127) + +MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a +dependent load (store-to-load forwarding) as an optimization. The forward +can also happen to a faulting or assisting load operation for a different +memory address, which can be exploited under certain conditions. Store +buffers are partitioned between Hyper-Threads so cross thread forwarding is +not possible. But if a thread enters or exits a sleep state the store +buffer is repartitioned which can expose data from one thread to the other. + +MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage +L1 miss situations and to hold data which is returned or sent in response +to a memory or I/O operation. Fill buffers can forward data to a load +operation and also write data to the cache. When the fill buffer is +deallocated it can retain the stale data of the preceding operations which +can then be forwarded to a faulting or assisting load operation, which can +be exploited under certain conditions. Fill buffers are shared between +Hyper-Threads so cross thread leakage is possible. + +MLDPS leaks Load Port Data. Load ports are used to perform load operations +from memory or I/O. The received data is then forwarded to the register +file or a subsequent operation. In some implementations the Load Port can +contain stale data from a previous operation which can be forwarded to +faulting or assisting loads under certain conditions, which again can be +exploited eventually. Load ports are shared between Hyper-Threads so cross +thread leakage is possible. + +All variants have the same mitigation for single CPU thread case (SMT off), +so the kernel can treat them as one MDS issue. + +Add the basic infrastructure to detect if the current CPU is affected by +MDS. + +[ tglx: Rewrote changelog ] + +Signed-off-by: Andi Kleen +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + arch/x86/include/asm/cpufeatures.h | 2 ++ + arch/x86/include/asm/msr-index.h | 5 +++++ + arch/x86/kernel/cpu/common.c | 25 ++++++++++++++++--------- + 3 files changed, 23 insertions(+), 9 deletions(-) + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -341,6 +341,7 @@ + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ + #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ ++#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ + #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ + #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ + #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ +@@ -378,4 +379,5 @@ + #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ + #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ + #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ ++#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ + #endif /* _ASM_X86_CPUFEATURES_H */ +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -78,6 +78,11 @@ + * attack, so no Speculative Store Bypass + * control required. + */ ++#define ARCH_CAP_MDS_NO BIT(5) /* ++ * Not susceptible to ++ * Microarchitectural Data ++ * Sampling (MDS) vulnerabilities. ++ */ + + #define MSR_IA32_FLUSH_CMD 0x0000010b + #define L1D_FLUSH BIT(0) /* +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -902,6 +902,7 @@ static void identify_cpu_without_cpuid(s + #define NO_MELTDOWN BIT(1) + #define NO_SSB BIT(2) + #define NO_L1TF BIT(3) ++#define NO_MDS BIT(4) + + #define VULNWL(_vendor, _family, _model, _whitelist) \ + { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } +@@ -918,6 +919,7 @@ static const __initconst struct x86_cpu_ + VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), + VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), + ++ /* Intel Family 6 */ + VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION), + VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION), + VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION), +@@ -934,17 +936,19 @@ static const __initconst struct x86_cpu_ + VULNWL_INTEL(CORE_YONAH, NO_SSB), + + VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF), +- VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF), +- VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF), +- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF), +- +- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF), +- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF), +- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF), +- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF), ++ ++ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF), ++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF), ++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF), ++ ++ /* AMD Family 0xf - 0x12 */ ++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), ++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), ++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), ++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), + + /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ +- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF), ++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS), + {} + }; + +@@ -975,6 +979,9 @@ static void __init cpu_set_bug_bits(stru + if (ia32_cap & ARCH_CAP_IBRS_ALL) + setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); + ++ if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) ++ setup_force_cpu_bug(X86_BUG_MDS); ++ + if (cpu_matches(NO_MELTDOWN)) + return; + diff --git a/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch b/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch new file mode 100644 index 0000000..e789b6b --- /dev/null +++ b/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch @@ -0,0 +1,86 @@ +From: Thomas Gleixner +Date: Fri, 1 Mar 2019 20:21:08 +0100 +Subject: x86/speculation/mds: Add BUG_MSBDS_ONLY +Git-commit: e261f209c3666e842fd645a1e31f001c3a26def9 +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +This bug bit is set on CPUs which are only affected by Microarchitectural +Store Buffer Data Sampling (MSBDS) and not by any other MDS variant. + +This is important because the Store Buffers are partitioned between +Hyper-Threads so cross thread forwarding is not possible. But if a thread +enters or exits a sleep state the store buffer is repartitioned which can +expose data from one thread to the other. This transition can be mitigated. + +That means that for CPUs which are only affected by MSBDS SMT can be +enabled, if the CPU is not affected by other SMT sensitive vulnerabilities, +e.g. L1TF. The XEON PHI variants fall into that category. Also the +Silvermont/Airmont ATOMs, but for them it's not really relevant as they do +not support SMT, but mark them for completeness sake. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + arch/x86/include/asm/cpufeatures.h | 1 + + arch/x86/kernel/cpu/common.c | 20 ++++++++++++-------- + 2 files changed, 13 insertions(+), 8 deletions(-) + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -379,4 +379,5 @@ + #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ + #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ + #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ ++#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ + #endif /* _ASM_X86_CPUFEATURES_H */ +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -903,6 +903,7 @@ static void identify_cpu_without_cpuid(s + #define NO_SSB BIT(2) + #define NO_L1TF BIT(3) + #define NO_MDS BIT(4) ++#define MSBDS_ONLY BIT(5) + + #define VULNWL(_vendor, _family, _model, _whitelist) \ + { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } +@@ -926,16 +927,16 @@ static const __initconst struct x86_cpu_ + VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION), + VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION), + +- VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF), +- VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF), +- VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF), +- VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF), +- VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF), +- VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY), ++ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY), ++ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY), ++ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY), ++ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY), ++ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY), + + VULNWL_INTEL(CORE_YONAH, NO_SSB), + +- VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF), ++ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY), + + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF), + VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF), +@@ -979,8 +980,11 @@ static void __init cpu_set_bug_bits(stru + if (ia32_cap & ARCH_CAP_IBRS_ALL) + setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); + +- if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) ++ if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { + setup_force_cpu_bug(X86_BUG_MDS); ++ if (cpu_matches(MSBDS_ONLY)) ++ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); ++ } + + if (cpu_matches(NO_MELTDOWN)) + return; diff --git a/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch b/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch new file mode 100644 index 0000000..ace13f6 --- /dev/null +++ b/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch @@ -0,0 +1,85 @@ +From: Josh Poimboeuf +Date: Tue, 2 Apr 2019 09:59:33 -0500 +Subject: x86/speculation/mds: Add mds=full,nosmt cmdline option +Git-repo: tip/tip +Git-commit: d71eb0ce109a124b0fa714832823b9452f2762cf +Patch-mainline: Queued in a subsystem tree +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +Add the mds=full,nosmt cmdline option. This is like mds=full, but with +SMT disabled if the CPU is vulnerable. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Reviewed-by: Tyler Hicks +Acked-by: Jiri Kosina +Acked-by: Borislav Petkov +--- + Documentation/admin-guide/kernel-parameters.txt | 6 ++++-- + Documentation/x86/mds.rst | 4 ++++ + arch/x86/kernel/cpu/bugs.c | 10 ++++++++++ + 3 files changed, 18 insertions(+), 2 deletions(-) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -342,6 +342,7 @@ early_param("l1tf", l1tf_cmdline); + + /* Default mitigation for L1TF-affected CPUs */ + static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; ++static bool mds_nosmt __ro_after_init = false; + + static const char * const mds_strings[] = { + [MDS_MITIGATION_OFF] = "Vulnerable", +@@ -359,8 +360,13 @@ static void __init mds_select_mitigation + if (mds_mitigation == MDS_MITIGATION_FULL) { + if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) + mds_mitigation = MDS_MITIGATION_VMWERV; ++ + static_branch_enable(&mds_user_clear); ++ ++ if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) ++ cpu_smt_disable(false); + } ++ + pr_info("%s\n", mds_strings[mds_mitigation]); + } + +@@ -376,6 +382,10 @@ static int __init mds_cmdline(char *str) + mds_mitigation = MDS_MITIGATION_OFF; + else if (!strcmp(str, "full")) + mds_mitigation = MDS_MITIGATION_FULL; ++ else if (!strcmp(str, "full,nosmt")) { ++ mds_mitigation = MDS_MITIGATION_FULL; ++ mds_nosmt = true; ++ } + + return 0; + } +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2221,8 +2221,10 @@ + This parameter controls the MDS mitigation. The + options are: + +- full - Enable MDS mitigation on vulnerable CPUs +- off - Unconditionally disable MDS mitigation ++ full - Enable MDS mitigation on vulnerable CPUs ++ full,nosmt - Enable MDS mitigation and disable ++ SMT on vulnerable CPUs ++ off - Unconditionally disable MDS mitigation + + Not specifying this option is equivalent to + mds=full. +--- a/Documentation/x86/mds.rst ++++ b/Documentation/x86/mds.rst +@@ -119,6 +119,10 @@ Kernel internal mitigation modes + scenarios where the host has the updated microcode but the + hypervisor does not expose MD_CLEAR in CPUID. It's a best + effort approach without guarantee. ++ ++ full,nosmt The same as mds=full, with SMT disabled on vulnerable ++ CPUs. This is the complete mitigation. ++ + ======= ============================================================ + + If the CPU is affected and mds=off is not supplied on the kernel command diff --git a/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch b/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch new file mode 100644 index 0000000..be7aa07 --- /dev/null +++ b/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch @@ -0,0 +1,216 @@ +From: Thomas Gleixner +Date: Mon, 18 Feb 2019 23:13:06 +0100 +Subject: x86/speculation/mds: Add mds_clear_cpu_buffers() +Git-commit: 6a9e529272517755904b7afa639f6db59ddb793e +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +The Microarchitectural Data Sampling (MDS) vulernabilities are mitigated by +clearing the affected CPU buffers. The mechanism for clearing the buffers +uses the unused and obsolete VERW instruction in combination with a +microcode update which triggers a CPU buffer clear when VERW is executed. + +Provide a inline function with the assembly magic. The argument of the VERW +instruction must be a memory operand as documented: + + "MD_CLEAR enumerates that the memory-operand variant of VERW (for + example, VERW m16) has been extended to also overwrite buffers affected + by MDS. This buffer overwriting functionality is not guaranteed for the + register operand variant of VERW." + +Documentation also recommends to use a writable data segment selector: + + "The buffer overwriting occurs regardless of the result of the VERW + permission check, as well as when the selector is null or causes a + descriptor load segment violation. However, for lowest latency we + recommend using a selector that indicates a valid writable data + segment." + +Add x86 specific documentation about MDS and the internal workings of the +mitigation. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + Documentation/index.rst | 1 + Documentation/x86/conf.py | 10 +++ + Documentation/x86/index.rst | 8 ++ + Documentation/x86/mds.rst | 99 +++++++++++++++++++++++++++++++++++ + arch/x86/include/asm/nospec-branch.h | 25 ++++++++ + 5 files changed, 143 insertions(+) + +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -318,6 +318,31 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_ + DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); + DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + ++#include ++ ++/** ++ * mds_clear_cpu_buffers - Mitigation for MDS vulnerability ++ * ++ * This uses the otherwise unused and obsolete VERW instruction in ++ * combination with microcode which triggers a CPU buffer flush when the ++ * instruction is executed. ++ */ ++static inline void mds_clear_cpu_buffers(void) ++{ ++ static const u16 ds = __KERNEL_DS; ++ ++ /* ++ * Has to be the memory-operand variant because only that ++ * guarantees the CPU buffer flush functionality according to ++ * documentation. The register-operand variant does not. ++ * Works with any segment selector, but a valid writable ++ * data segment is the fastest variant. ++ * ++ * "cc" clobber is required because VERW modifies ZF. ++ */ ++ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); ++} ++ + #endif /* __ASSEMBLY__ */ + + /* +--- a/Documentation/index.rst ++++ b/Documentation/index.rst +@@ -34,6 +34,7 @@ the kernel interface as seen by applicat + :maxdepth: 2 + + userspace-api/index ++ x86/index + + + Introduction to kernel development +--- /dev/null ++++ b/Documentation/x86/conf.py +@@ -0,0 +1,10 @@ ++# -*- coding: utf-8; mode: python -*- ++ ++project = "X86 architecture specific documentation" ++ ++tags.add("subproject") ++ ++latex_documents = [ ++ ('index', 'x86.tex', project, ++ 'The kernel development community', 'manual'), ++] +--- /dev/null ++++ b/Documentation/x86/index.rst +@@ -0,0 +1,8 @@ ++========================== ++x86 architecture specifics ++========================== ++ ++.. toctree:: ++ :maxdepth: 1 ++ ++ mds +--- /dev/null ++++ b/Documentation/x86/mds.rst +@@ -0,0 +1,99 @@ ++Microarchitectural Data Sampling (MDS) mitigation ++================================================= ++ ++.. _mds: ++ ++Overview ++-------- ++ ++Microarchitectural Data Sampling (MDS) is a family of side channel attacks ++on internal buffers in Intel CPUs. The variants are: ++ ++ - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126) ++ - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130) ++ - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127) ++ ++MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a ++dependent load (store-to-load forwarding) as an optimization. The forward ++can also happen to a faulting or assisting load operation for a different ++memory address, which can be exploited under certain conditions. Store ++buffers are partitioned between Hyper-Threads so cross thread forwarding is ++not possible. But if a thread enters or exits a sleep state the store ++buffer is repartitioned which can expose data from one thread to the other. ++ ++MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage ++L1 miss situations and to hold data which is returned or sent in response ++to a memory or I/O operation. Fill buffers can forward data to a load ++operation and also write data to the cache. When the fill buffer is ++deallocated it can retain the stale data of the preceding operations which ++can then be forwarded to a faulting or assisting load operation, which can ++be exploited under certain conditions. Fill buffers are shared between ++Hyper-Threads so cross thread leakage is possible. ++ ++MLPDS leaks Load Port Data. Load ports are used to perform load operations ++from memory or I/O. The received data is then forwarded to the register ++file or a subsequent operation. In some implementations the Load Port can ++contain stale data from a previous operation which can be forwarded to ++faulting or assisting loads under certain conditions, which again can be ++exploited eventually. Load ports are shared between Hyper-Threads so cross ++thread leakage is possible. ++ ++ ++Exposure assumptions ++-------------------- ++ ++It is assumed that attack code resides in user space or in a guest with one ++exception. The rationale behind this assumption is that the code construct ++needed for exploiting MDS requires: ++ ++ - to control the load to trigger a fault or assist ++ ++ - to have a disclosure gadget which exposes the speculatively accessed ++ data for consumption through a side channel. ++ ++ - to control the pointer through which the disclosure gadget exposes the ++ data ++ ++The existence of such a construct in the kernel cannot be excluded with ++100% certainty, but the complexity involved makes it extremly unlikely. ++ ++There is one exception, which is untrusted BPF. The functionality of ++untrusted BPF is limited, but it needs to be thoroughly investigated ++whether it can be used to create such a construct. ++ ++ ++Mitigation strategy ++------------------- ++ ++All variants have the same mitigation strategy at least for the single CPU ++thread case (SMT off): Force the CPU to clear the affected buffers. ++ ++This is achieved by using the otherwise unused and obsolete VERW ++instruction in combination with a microcode update. The microcode clears ++the affected CPU buffers when the VERW instruction is executed. ++ ++For virtualization there are two ways to achieve CPU buffer ++clearing. Either the modified VERW instruction or via the L1D Flush ++command. The latter is issued when L1TF mitigation is enabled so the extra ++VERW can be avoided. If the CPU is not affected by L1TF then VERW needs to ++be issued. ++ ++If the VERW instruction with the supplied segment selector argument is ++executed on a CPU without the microcode update there is no side effect ++other than a small number of pointlessly wasted CPU cycles. ++ ++This does not protect against cross Hyper-Thread attacks except for MSBDS ++which is only exploitable cross Hyper-thread when one of the Hyper-Threads ++enters a C-state. ++ ++The kernel provides a function to invoke the buffer clearing: ++ ++ mds_clear_cpu_buffers() ++ ++The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state ++(idle) transitions. ++ ++According to current knowledge additional mitigations inside the kernel ++itself are not required because the necessary gadgets to expose the leaked ++data cannot be controlled in a way which allows exploitation from malicious ++user space or VM guests. diff --git a/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch b/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch new file mode 100644 index 0000000..5ba6fa6 --- /dev/null +++ b/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch @@ -0,0 +1,186 @@ +From: Thomas Gleixner +Date: Mon, 18 Feb 2019 22:04:08 +0100 +Subject: x86/speculation/mds: Add mitigation control for MDS +Git-commit: bc1241700acd82ec69fde98c5763ce51086269f8 +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +Now that the mitigations are in place, add a command line parameter to +control the mitigation, a mitigation selector function and a SMT update +mechanism. + +This is the minimal straight forward initial implementation which just +provides an always on/off mode. The command line parameter is: + + mds=[full|off] + +This is consistent with the existing mitigations for other speculative +hardware vulnerabilities. + +The idle invocation is dynamically updated according to the SMT state of +the system similar to the dynamic update of the STIBP mitigation. The idle +mitigation is limited to CPUs which are only affected by MSBDS and not any +other variant, because the other variants cannot be mitigated on SMT +enabled systems. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + Documentation/admin-guide/kernel-parameters.txt | 22 +++++++ + arch/x86/include/asm/processor.h | 5 + + arch/x86/kernel/cpu/bugs.c | 70 ++++++++++++++++++++++++ + 3 files changed, 97 insertions(+) + +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -982,4 +982,9 @@ enum l1tf_mitigations { + + extern enum l1tf_mitigations l1tf_mitigation; + ++enum mds_mitigations { ++ MDS_MITIGATION_OFF, ++ MDS_MITIGATION_FULL, ++}; ++ + #endif /* _ASM_X86_PROCESSOR_H */ +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -34,6 +34,7 @@ + static void __init spectre_v2_select_mitigation(void); + static void __init ssb_select_mitigation(void); + static void __init l1tf_select_mitigation(void); ++static void __init mds_select_mitigation(void); + + /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ + u64 x86_spec_ctrl_base; +@@ -105,6 +106,8 @@ void __init check_bugs(void) + + l1tf_select_mitigation(); + ++ mds_select_mitigation(); ++ + #ifdef CONFIG_X86_32 + /* + * Check whether we are able to run this kernel safely on SMP. +@@ -330,6 +333,50 @@ early_param("l1tf", l1tf_cmdline); + + + #undef pr_fmt ++#define pr_fmt(fmt) "MDS: " fmt ++ ++/* Default mitigation for L1TF-affected CPUs */ ++static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; ++ ++static const char * const mds_strings[] = { ++ [MDS_MITIGATION_OFF] = "Vulnerable", ++ [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers" ++}; ++ ++static void __init mds_select_mitigation(void) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_MDS)) { ++ mds_mitigation = MDS_MITIGATION_OFF; ++ return; ++ } ++ ++ if (mds_mitigation == MDS_MITIGATION_FULL) { ++ if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) ++ static_branch_enable(&mds_user_clear); ++ else ++ mds_mitigation = MDS_MITIGATION_OFF; ++ } ++ pr_info("%s\n", mds_strings[mds_mitigation]); ++} ++ ++static int __init mds_cmdline(char *str) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_MDS)) ++ return 0; ++ ++ if (!str) ++ return -EINVAL; ++ ++ if (!strcmp(str, "off")) ++ mds_mitigation = MDS_MITIGATION_OFF; ++ else if (!strcmp(str, "full")) ++ mds_mitigation = MDS_MITIGATION_FULL; ++ ++ return 0; ++} ++early_param("mds", mds_cmdline); ++ ++#undef pr_fmt + #define pr_fmt(fmt) "Spectre V2 : " fmt + + static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = +@@ -738,6 +785,26 @@ static void update_indir_branch_cond(voi + static_branch_disable(&switch_to_cond_stibp); + } + ++/* Update the static key controlling the MDS CPU buffer clear in idle */ ++static void update_mds_branch_idle(void) ++{ ++ /* ++ * Enable the idle clearing if SMT is active on CPUs which are ++ * affected only by MSBDS and not any other MDS variant. ++ * ++ * The other variants cannot be mitigated when SMT is enabled, so ++ * clearing the buffers on idle just to prevent the Store Buffer ++ * repartitioning leak would be a window dressing exercise. ++ */ ++ if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) ++ return; ++ ++ if (sched_smt_active()) ++ static_branch_enable(&mds_idle_clear); ++ else ++ static_branch_disable(&mds_idle_clear); ++} ++ + void arch_smt_update(void) + { + /* Enhanced IBRS implies STIBP. No update required. */ +@@ -758,6 +825,9 @@ void arch_smt_update(void) + break; + } + ++ if (mds_mitigation == MDS_MITIGATION_FULL) ++ update_mds_branch_idle(); ++ + mutex_unlock(&spec_ctrl_mutex); + } + +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2205,6 +2205,28 @@ + Format: , + Specifies range of consoles to be captured by the MDA. + ++ mds= [X86,INTEL] ++ Control mitigation for the Micro-architectural Data ++ Sampling (MDS) vulnerability. ++ ++ Certain CPUs are vulnerable to an exploit against CPU ++ internal buffers which can forward information to a ++ disclosure gadget under certain conditions. ++ ++ In vulnerable processors, the speculatively ++ forwarded data can be used in a cache side channel ++ attack, to access data to which the attacker does ++ not have direct access. ++ ++ This parameter controls the MDS mitigation. The ++ options are: ++ ++ full - Enable MDS mitigation on vulnerable CPUs ++ off - Unconditionally disable MDS mitigation ++ ++ Not specifying this option is equivalent to ++ mds=full. ++ + mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory + Amount of memory to be used when the kernel is not able + to see the whole system memory or for test. diff --git a/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch b/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch new file mode 100644 index 0000000..2892354 --- /dev/null +++ b/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch @@ -0,0 +1,131 @@ +From: Thomas Gleixner +Date: Wed, 20 Feb 2019 09:40:40 +0100 +Subject: x86/speculation/mds: Add mitigation mode VMWERV +Git-commit: 22dd8365088b6403630b82423cf906491859b65e +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +In virtualized environments it can happen that the host has the microcode +update which utilizes the VERW instruction to clear CPU buffers, but the +hypervisor is not yet updated to expose the X86_FEATURE_MD_CLEAR CPUID bit +to guests. + +Introduce an internal mitigation mode VMWERV which enables the invocation +of the CPU buffer clearing even if X86_FEATURE_MD_CLEAR is not set. If the +system has no updated microcode this results in a pointless execution of +the VERW instruction wasting a few CPU cycles. If the microcode is updated, +but not exposed to a guest then the CPU buffers will be cleared. + +That said: Virtual Machines Will Eventually Receive Vaccine + +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + Documentation/x86/mds.rst | 27 +++++++++++++++++++++++++++ + arch/x86/include/asm/processor.h | 1 + + arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++------ + 3 files changed, 40 insertions(+), 6 deletions(-) + +diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst +index 87ce8ac9f36e..3d6f943f1afb 100644 +--- a/Documentation/x86/mds.rst ++++ b/Documentation/x86/mds.rst +@@ -93,11 +93,38 @@ The kernel provides a function to invoke the buffer clearing: + The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state + (idle) transitions. + ++As a special quirk to address virtualization scenarios where the host has ++the microcode updated, but the hypervisor does not (yet) expose the ++MD_CLEAR CPUID bit to guests, the kernel issues the VERW instruction in the ++hope that it might actually clear the buffers. The state is reflected ++accordingly. ++ + According to current knowledge additional mitigations inside the kernel + itself are not required because the necessary gadgets to expose the leaked + data cannot be controlled in a way which allows exploitation from malicious + user space or VM guests. + ++Kernel internal mitigation modes ++-------------------------------- ++ ++ ======= ============================================================ ++ off Mitigation is disabled. Either the CPU is not affected or ++ mds=off is supplied on the kernel command line ++ ++ full Mitigation is eanbled. CPU is affected and MD_CLEAR is ++ advertised in CPUID. ++ ++ vmwerv Mitigation is enabled. CPU is affected and MD_CLEAR is not ++ advertised in CPUID. That is mainly for virtualization ++ scenarios where the host has the updated microcode but the ++ hypervisor does not expose MD_CLEAR in CPUID. It's a best ++ effort approach without guarantee. ++ ======= ============================================================ ++ ++If the CPU is affected and mds=off is not supplied on the kernel command ++line then the kernel selects the appropriate mitigation mode depending on ++the availability of the MD_CLEAR CPUID bit. ++ + Mitigation points + ----------------- + +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index 1f0295783325..aca1ef8cc79f 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -995,6 +995,7 @@ extern enum l1tf_mitigations l1tf_mitigation; + enum mds_mitigations { + MDS_MITIGATION_OFF, + MDS_MITIGATION_FULL, ++ MDS_MITIGATION_VMWERV, + }; + + #endif /* _ASM_X86_PROCESSOR_H */ +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 7ab16a6ed064..95cda38c8785 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -224,7 +224,8 @@ static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL + + static const char * const mds_strings[] = { + [MDS_MITIGATION_OFF] = "Vulnerable", +- [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers" ++ [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", ++ [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", + }; + + static void __init mds_select_mitigation(void) +@@ -235,10 +236,9 @@ static void __init mds_select_mitigation(void) + } + + if (mds_mitigation == MDS_MITIGATION_FULL) { +- if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) +- static_branch_enable(&mds_user_clear); +- else +- mds_mitigation = MDS_MITIGATION_OFF; ++ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) ++ mds_mitigation = MDS_MITIGATION_VMWERV; ++ static_branch_enable(&mds_user_clear); + } + pr_info("%s\n", mds_strings[mds_mitigation]); + } +@@ -705,8 +705,14 @@ void arch_smt_update(void) + break; + } + +- if (mds_mitigation == MDS_MITIGATION_FULL) ++ switch (mds_mitigation) { ++ case MDS_MITIGATION_FULL: ++ case MDS_MITIGATION_VMWERV: + update_mds_branch_idle(); ++ break; ++ case MDS_MITIGATION_OFF: ++ break; ++ } + + mutex_unlock(&spec_ctrl_mutex); + } + diff --git a/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch b/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch new file mode 100644 index 0000000..ee8d12f --- /dev/null +++ b/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch @@ -0,0 +1,62 @@ +From: Josh Poimboeuf +Date: Wed, 17 Apr 2019 16:39:02 -0500 +Subject: x86/speculation/mds: Add 'mitigations=' support for MDS +Git-repo: tip/tip +Git-commit: 5c14068f87d04adc73ba3f41c2a303d3c3d1fa12 +Patch-mainline: Queued in a subsystem tree +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +Add MDS to the new 'mitigations=' cmdline option. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Acked-by: Borislav Petkov +--- + Documentation/admin-guide/kernel-parameters.txt | 2 ++ + arch/x86/kernel/cpu/bugs.c | 5 +++-- + 2 files changed, 5 insertions(+), 2 deletions(-) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 9aa3543a8723..18cad2b0392a 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2556,6 +2556,7 @@ + spectre_v2_user=off [X86] + spec_store_bypass_disable=off [X86,PPC] + l1tf=off [X86] ++ mds=off [X86] + + auto (default) + Mitigate all CPU vulnerabilities, but leave SMT +@@ -2570,6 +2571,7 @@ + if needed. This is for users who always want to + be fully mitigated, even if it means losing SMT. + Equivalent to: l1tf=flush,nosmt [X86] ++ mds=full,nosmt [X86] + + mminit_loglevel= + [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 3c5c3c3ba734..667c273a66d7 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -233,7 +233,7 @@ static const char * const mds_strings[] = { + + static void __init mds_select_mitigation(void) + { +- if (!boot_cpu_has_bug(X86_BUG_MDS)) { ++ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { + mds_mitigation = MDS_MITIGATION_OFF; + return; + } +@@ -244,7 +244,8 @@ static void __init mds_select_mitigation(void) + + static_branch_enable(&mds_user_clear); + +- if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) ++ if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && ++ (mds_nosmt || cpu_mitigations_auto_nosmt())) + cpu_smt_disable(false); + } + + diff --git a/patches.arch/x86-speculation-mds-add-smt-warning-message.patch b/patches.arch/x86-speculation-mds-add-smt-warning-message.patch new file mode 100644 index 0000000..bb34433 --- /dev/null +++ b/patches.arch/x86-speculation-mds-add-smt-warning-message.patch @@ -0,0 +1,50 @@ +From: Josh Poimboeuf +Date: Tue, 2 Apr 2019 10:00:51 -0500 +Subject: x86/speculation/mds: Add SMT warning message +Git-commit: 39226ef02bfb43248b7db12a4fdccb39d95318e3 +Git-repo: tip/tip +Patch-mainline: Queued in a subsystem tree +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +MDS is vulnerable with SMT. Make that clear with a one-time printk +whenever SMT first gets enabled. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Reviewed-by: Tyler Hicks +Acked-by: Jiri Kosina +Acked-by: Borislav Petkov +--- + arch/x86/kernel/cpu/bugs.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -800,6 +800,9 @@ static void update_indir_branch_cond(voi + static_branch_disable(&switch_to_cond_stibp); + } + ++#undef pr_fmt ++#define pr_fmt(fmt) fmt ++ + /* Update the static key controlling the MDS CPU buffer clear in idle */ + static void update_mds_branch_idle(void) + { +@@ -820,6 +823,8 @@ static void update_mds_branch_idle(void) + static_branch_disable(&mds_idle_clear); + } + ++#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" ++ + void arch_smt_update(void) + { + /* Enhanced IBRS implies STIBP. No update required. */ +@@ -843,6 +848,8 @@ void arch_smt_update(void) + switch (mds_mitigation) { + case MDS_MITIGATION_FULL: + case MDS_MITIGATION_VMWERV: ++ if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) ++ pr_warn_once(MDS_MSG_SMT); + update_mds_branch_idle(); + break; + case MDS_MITIGATION_OFF: diff --git a/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch b/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch new file mode 100644 index 0000000..90f30b7 --- /dev/null +++ b/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch @@ -0,0 +1,120 @@ +From: Thomas Gleixner +Date: Mon, 18 Feb 2019 22:51:43 +0100 +Subject: x86/speculation/mds: Add sysfs reporting for MDS +Git-commit: 8a4b06d391b0a42a373808979b5028f5c84d9c6a +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +Add the sysfs reporting file for MDS. It exposes the vulnerability and +mitigation state similar to the existing files for the other speculative +hardware vulnerabilities. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Borislav Petkov +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + Documentation/ABI/testing/sysfs-devices-system-cpu | 1 + arch/x86/kernel/cpu/bugs.c | 25 +++++++++++++++++++++ + drivers/base/cpu.c | 8 ++++++ + include/linux/cpu.h | 2 + + 4 files changed, 36 insertions(+) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1170,6 +1170,22 @@ static ssize_t l1tf_show_state(char *buf + } + #endif + ++static ssize_t mds_show_state(char *buf) ++{ ++ if (!hypervisor_is_type(X86_HYPER_NATIVE)) { ++ return sprintf(buf, "%s; SMT Host state unknown\n", ++ mds_strings[mds_mitigation]); ++ } ++ ++ if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { ++ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], ++ sched_smt_active() ? "mitigated" : "disabled"); ++ } ++ ++ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], ++ sched_smt_active() ? "vulnerable" : "disabled"); ++} ++ + static char *stibp_state(void) + { + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) +@@ -1236,6 +1252,10 @@ static ssize_t cpu_show_common(struct de + return l1tf_show_state(buf); + break; + ++ ++ case X86_BUG_MDS: ++ return mds_show_state(buf); ++ + default: + break; + } +@@ -1267,4 +1287,9 @@ ssize_t cpu_show_l1tf(struct device *dev + { + return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); + } ++ ++ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_MDS); ++} + #endif +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -380,6 +380,7 @@ What: /sys/devices/system/cpu/vulnerabi + /sys/devices/system/cpu/vulnerabilities/spectre_v2 + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass + /sys/devices/system/cpu/vulnerabilities/l1tf ++ /sys/devices/system/cpu/vulnerabilities/mds + Date: January 2018 + Contact: Linux kernel mailing list + Description: Information about CPU vulnerabilities +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -533,11 +533,18 @@ ssize_t __weak cpu_show_l1tf(struct devi + return sprintf(buf, "Not affected\n"); + } + ++ssize_t __weak cpu_show_mds(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "Not affected\n"); ++} ++ + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); + static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); + static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); + static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); ++static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, +@@ -545,6 +552,7 @@ static struct attribute *cpu_root_vulner + &dev_attr_spectre_v2.attr, + &dev_attr_spec_store_bypass.attr, + &dev_attr_l1tf.attr, ++ &dev_attr_mds.attr, + NULL + }; + +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -56,6 +56,8 @@ extern ssize_t cpu_show_spec_store_bypas + struct device_attribute *attr, char *buf); + extern ssize_t cpu_show_l1tf(struct device *dev, + struct device_attribute *attr, char *buf); ++extern ssize_t cpu_show_mds(struct device *dev, ++ struct device_attribute *attr, char *buf); + + extern __printf(4, 5) + struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch b/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch new file mode 100644 index 0000000..bcdf46f --- /dev/null +++ b/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch @@ -0,0 +1,192 @@ +From: Thomas Gleixner +Date: Mon, 18 Feb 2019 23:42:51 +0100 +Subject: x86/speculation/mds: Clear CPU buffers on exit to user +Git-commit: 04dcbdb8057827b043b3c71aa397c4c63e67d086 +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +Add a static key which controls the invocation of the CPU buffer clear +mechanism on exit to user space and add the call into +prepare_exit_to_usermode() and do_nmi() right before actually returning. + +Add documentation which kernel to user space transition this covers and +explain why some corner cases are not mitigated. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Borislav Petkov +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + Documentation/x86/mds.rst | 52 +++++++++++++++++++++++++++++++++++ + arch/x86/entry/common.c | 3 ++ + arch/x86/include/asm/nospec-branch.h | 13 ++++++++ + arch/x86/kernel/cpu/bugs.c | 3 ++ + arch/x86/kernel/nmi.c | 4 ++ + arch/x86/kernel/traps.c | 8 +++++ + 6 files changed, 83 insertions(+) + +--- a/arch/x86/entry/common.c ++++ b/arch/x86/entry/common.c +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + + #define CREATE_TRACE_POINTS + #include +@@ -208,6 +209,8 @@ __visible inline void prepare_exit_to_us + #endif + + user_enter_irqoff(); ++ ++ mds_user_clear_cpu_buffers(); + } + + #define SYSCALL_EXIT_WORK_FLAGS \ +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -318,6 +318,8 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_ + DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); + DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + ++DECLARE_STATIC_KEY_FALSE(mds_user_clear); ++ + #include + + /** +@@ -343,6 +345,17 @@ static inline void mds_clear_cpu_buffers + asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); + } + ++/** ++ * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability ++ * ++ * Clear CPU buffers if the corresponding static key is enabled ++ */ ++static inline void mds_user_clear_cpu_buffers(void) ++{ ++ if (static_branch_likely(&mds_user_clear)) ++ mds_clear_cpu_buffers(); ++} ++ + #endif /* __ASSEMBLY__ */ + + /* +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -60,6 +60,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_i + /* Control unconditional IBPB in switch_mm() */ + DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + ++/* Control MDS CPU buffer clear before returning to user space */ ++DEFINE_STATIC_KEY_FALSE(mds_user_clear); ++ + void __init check_bugs(void) + { + identify_boot_cpu(); +--- a/arch/x86/kernel/nmi.c ++++ b/arch/x86/kernel/nmi.c +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #define CREATE_TRACE_POINTS + #include +@@ -533,6 +534,9 @@ nmi_restart: + write_cr2(this_cpu_read(nmi_cr2)); + if (this_cpu_dec_return(nmi_state)) + goto nmi_restart; ++ ++ if (user_mode(regs)) ++ mds_user_clear_cpu_buffers(); + } + NOKPROBE_SYMBOL(do_nmi); + +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -59,6 +59,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -393,6 +394,13 @@ dotraplinkage void do_double_fault(struc + regs->ip = (unsigned long)general_protection; + regs->sp = (unsigned long)&gpregs->orig_ax; + ++ /* ++ * This situation can be triggered by userspace via ++ * modify_ldt(2) and the return does not take the regular ++ * user space exit, so a CPU buffer clear is required when ++ * MDS mitigation is enabled. ++ */ ++ mds_user_clear_cpu_buffers(); + return; + } + #endif +--- a/Documentation/x86/mds.rst ++++ b/Documentation/x86/mds.rst +@@ -97,3 +97,55 @@ According to current knowledge additiona + itself are not required because the necessary gadgets to expose the leaked + data cannot be controlled in a way which allows exploitation from malicious + user space or VM guests. ++ ++Mitigation points ++----------------- ++ ++1. Return to user space ++^^^^^^^^^^^^^^^^^^^^^^^ ++ ++ When transitioning from kernel to user space the CPU buffers are flushed ++ on affected CPUs when the mitigation is not disabled on the kernel ++ command line. The migitation is enabled through the static key ++ mds_user_clear. ++ ++ The mitigation is invoked in prepare_exit_to_usermode() which covers ++ most of the kernel to user space transitions. There are a few exceptions ++ which are not invoking prepare_exit_to_usermode() on return to user ++ space. These exceptions use the paranoid exit code. ++ ++ - Non Maskable Interrupt (NMI): ++ ++ Access to sensible data like keys, credentials in the NMI context is ++ mostly theoretical: The CPU can do prefetching or execute a ++ misspeculated code path and thereby fetching data which might end up ++ leaking through a buffer. ++ ++ But for mounting other attacks the kernel stack address of the task is ++ already valuable information. So in full mitigation mode, the NMI is ++ mitigated on the return from do_nmi() to provide almost complete ++ coverage. ++ ++ - Double fault (#DF): ++ ++ A double fault is usually fatal, but the ESPFIX workaround, which can ++ be triggered from user space through modify_ldt(2) is a recoverable ++ double fault. #DF uses the paranoid exit path, so explicit mitigation ++ in the double fault handler is required. ++ ++ - Machine Check Exception (#MC): ++ ++ Another corner case is a #MC which hits between the CPU buffer clear ++ invocation and the actual return to user. As this still is in kernel ++ space it takes the paranoid exit path which does not clear the CPU ++ buffers. So the #MC handler repopulates the buffers to some ++ extent. Machine checks are not reliably controllable and the window is ++ extremly small so mitigation would just tick a checkbox that this ++ theoretical corner case is covered. To keep the amount of special ++ cases small, ignore #MC. ++ ++ - Debug Exception (#DB): ++ ++ This takes the paranoid exit path only when the INT1 breakpoint is in ++ kernel space. #DB on a user space address takes the regular exit path, ++ so no extra mitigation required. diff --git a/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch b/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch new file mode 100644 index 0000000..9102f23 --- /dev/null +++ b/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch @@ -0,0 +1,224 @@ +From: Thomas Gleixner +Date: Mon, 18 Feb 2019 23:04:01 +0100 +Subject: x86/speculation/mds: Conditionally clear CPU buffers on idle entry +Git-commit: 07f07f55a29cb705e221eda7894dd67ab81ef343 +Patch-mainline: v5.1-rc1 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +Add a static key which controls the invocation of the CPU buffer clear +mechanism on idle entry. This is independent of other MDS mitigations +because the idle entry invocation to mitigate the potential leakage due to +store buffer repartitioning is only necessary on SMT systems. + +Add the actual invocations to the different halt/mwait variants which +covers all usage sites. mwaitx is not patched as it's not available on +Intel CPUs. + +The buffer clear is only invoked before entering the C-State to prevent +that stale data from the idling CPU is spilled to the Hyper-Thread sibling +after the Store buffer got repartitioned and all entries are available to +the non idle sibling. + +When coming out of idle the store buffer is partitioned again so each +sibling has half of it available. Now CPU which returned from idle could be +speculatively exposed to contents of the sibling, but the buffers are +flushed either on exit to user space or on VMENTER. + +When later on conditional buffer clearing is implemented on top of this, +then there is no action required either because before returning to user +space the context switch will set the condition flag which causes a flush +on the return to user path. + +Note, that the buffer clearing on idle is only sensible on CPUs which are +solely affected by MSBDS and not any other variant of MDS because the other +MDS variants cannot be mitigated when SMT is enabled, so the buffer +clearing on idle would be a window dressing exercise. + +This intentionally does not handle the case in the acpi/processor_idle +driver which uses the legacy IO port interface for C-State transitions for +two reasons: + + - The acpi/processor_idle driver was replaced by the intel_idle driver + almost a decade ago. Anything Nehalem upwards supports it and defaults + to that new driver. + + - The legacy IO port interface is likely to be used on older and therefore + unaffected CPUs or on systems which do not receive microcode updates + anymore, so there is no point in adding that. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Acked-by: Borislav Petkov +--- + Documentation/x86/mds.rst | 42 ++++++++++++++++++++++++++++++++++++ + arch/x86/include/asm/irqflags.h | 4 ++++ + arch/x86/include/asm/mwait.h | 7 ++++++ + arch/x86/include/asm/nospec-branch.h | 12 +++++++++++ + arch/x86/kernel/cpu/bugs.c | 3 +++ + 5 files changed, 68 insertions(+) + +diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst +index 54d935bf283b..87ce8ac9f36e 100644 +--- a/Documentation/x86/mds.rst ++++ b/Documentation/x86/mds.rst +@@ -149,3 +149,45 @@ Mitigation points + This takes the paranoid exit path only when the INT1 breakpoint is in + kernel space. #DB on a user space address takes the regular exit path, + so no extra mitigation required. ++ ++ ++2. C-State transition ++^^^^^^^^^^^^^^^^^^^^^ ++ ++ When a CPU goes idle and enters a C-State the CPU buffers need to be ++ cleared on affected CPUs when SMT is active. This addresses the ++ repartitioning of the store buffer when one of the Hyper-Threads enters ++ a C-State. ++ ++ When SMT is inactive, i.e. either the CPU does not support it or all ++ sibling threads are offline CPU buffer clearing is not required. ++ ++ The idle clearing is enabled on CPUs which are only affected by MSBDS ++ and not by any other MDS variant. The other MDS variants cannot be ++ protected against cross Hyper-Thread attacks because the Fill Buffer and ++ the Load Ports are shared. So on CPUs affected by other variants, the ++ idle clearing would be a window dressing exercise and is therefore not ++ activated. ++ ++ The invocation is controlled by the static key mds_idle_clear which is ++ switched depending on the chosen mitigation mode and the SMT state of ++ the system. ++ ++ The buffer clear is only invoked before entering the C-State to prevent ++ that stale data from the idling CPU from spilling to the Hyper-Thread ++ sibling after the store buffer got repartitioned and all entries are ++ available to the non idle sibling. ++ ++ When coming out of idle the store buffer is partitioned again so each ++ sibling has half of it available. The back from idle CPU could be then ++ speculatively exposed to contents of the sibling. The buffers are ++ flushed either on exit to user space or on VMENTER so malicious code ++ in user space or the guest cannot speculatively access them. ++ ++ The mitigation is hooked into all variants of halt()/mwait(), but does ++ not cover the legacy ACPI IO-Port mechanism because the ACPI idle driver ++ has been superseded by the intel_idle driver around 2010 and is ++ preferred on all affected CPUs which are expected to gain the MD_CLEAR ++ functionality in microcode. Aside of that the IO-Port mechanism is a ++ legacy interface which is only used on older systems which are either ++ not affected or do not receive microcode updates anymore. +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h +index 058e40fed167..8a0e56e1dcc9 100644 +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -6,6 +6,8 @@ + + #ifndef __ASSEMBLY__ + ++#include ++ + /* Provide __cpuidle; we can't safely include */ + #define __cpuidle __attribute__((__section__(".cpuidle.text"))) + +@@ -54,11 +56,13 @@ static inline void native_irq_enable(void) + + static inline __cpuidle void native_safe_halt(void) + { ++ mds_idle_clear_cpu_buffers(); + asm volatile("sti; hlt": : :"memory"); + } + + static inline __cpuidle void native_halt(void) + { ++ mds_idle_clear_cpu_buffers(); + asm volatile("hlt": : :"memory"); + } + +diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h +index 39a2fb29378a..eb0f80ce8524 100644 +--- a/arch/x86/include/asm/mwait.h ++++ b/arch/x86/include/asm/mwait.h +@@ -6,6 +6,7 @@ + #include + + #include ++#include + + #define MWAIT_SUBSTATE_MASK 0xf + #define MWAIT_CSTATE_MASK 0xf +@@ -40,6 +41,8 @@ static inline void __monitorx(const void *eax, unsigned long ecx, + + static inline void __mwait(unsigned long eax, unsigned long ecx) + { ++ mds_idle_clear_cpu_buffers(); ++ + /* "mwait %eax, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); +@@ -74,6 +77,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) + static inline void __mwaitx(unsigned long eax, unsigned long ebx, + unsigned long ecx) + { ++ /* No MDS buffer clear as this is AMD/HYGON only */ ++ + /* "mwaitx %eax, %ebx, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xfb;" + :: "a" (eax), "b" (ebx), "c" (ecx)); +@@ -81,6 +86,8 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx, + + static inline void __sti_mwait(unsigned long eax, unsigned long ecx) + { ++ mds_idle_clear_cpu_buffers(); ++ + trace_hardirqs_on(); + /* "mwait %eax, %ecx;" */ + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index 65b747286d96..4e970390110f 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -319,6 +319,7 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); + DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + + DECLARE_STATIC_KEY_FALSE(mds_user_clear); ++DECLARE_STATIC_KEY_FALSE(mds_idle_clear); + + #include + +@@ -356,6 +357,17 @@ static inline void mds_user_clear_cpu_buffers(void) + mds_clear_cpu_buffers(); + } + ++/** ++ * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability ++ * ++ * Clear CPU buffers if the corresponding static key is enabled ++ */ ++static inline void mds_idle_clear_cpu_buffers(void) ++{ ++ if (static_branch_likely(&mds_idle_clear)) ++ mds_clear_cpu_buffers(); ++} ++ + #endif /* __ASSEMBLY__ */ + + /* +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 29ed8e8dfee2..916995167301 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -66,6 +66,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + /* Control MDS CPU buffer clear before returning to user space */ + DEFINE_STATIC_KEY_FALSE(mds_user_clear); + EXPORT_SYMBOL_GPL(mds_user_clear); ++/* Control MDS CPU buffer clear before idling (halt, mwait) */ ++DEFINE_STATIC_KEY_FALSE(mds_idle_clear); ++EXPORT_SYMBOL_GPL(mds_idle_clear); + + void __init check_bugs(void) + { + diff --git a/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch b/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch new file mode 100644 index 0000000..ef3abeb --- /dev/null +++ b/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch @@ -0,0 +1,49 @@ +From: Konrad Rzeszutek Wilk +Date: Fri, 12 Apr 2019 17:50:58 -0400 +Subject: x86/speculation/mds: Print SMT vulnerable on MSBDS with mitigations off +Git-repo: tip/tip +Git-commit: e2c3c94788b08891dcf3dbe608f9880523ecd71b +Patch-mainline: Queued in a subsystem tree +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +This code is only for CPUs which are affected by MSBDS, but are *not* +affected by the other two MDS issues. + +For such CPUs, enabling the mds_idle_clear mitigation is enough to +mitigate SMT. + +However if user boots with 'mds=off' and still has SMT enabled, we should +not report that SMT is mitigated: + +$cat /sys//devices/system/cpu/vulnerabilities/mds +Vulnerable; SMT mitigated + +But rather: +Vulnerable; SMT vulnerable + +Signed-off-by: Konrad Rzeszutek Wilk +Signed-off-by: Thomas Gleixner +Reviewed-by: Tyler Hicks +Reviewed-by: Josh Poimboeuf +Link: https://lkml.kernel.org/r/20190412215118.294906495@localhost.localdomain + +Acked-by: Borislav Petkov +--- + arch/x86/kernel/cpu/bugs.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 0642505dda69..6b8a55c7cebc 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1204,7 +1204,8 @@ static ssize_t mds_show_state(char *buf) + + if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { + return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], +- sched_smt_active() ? "mitigated" : "disabled"); ++ (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : ++ sched_smt_active() ? "mitigated" : "disabled")); + } + + return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], + diff --git a/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch b/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch new file mode 100644 index 0000000..c8c1edd --- /dev/null +++ b/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch @@ -0,0 +1,45 @@ +From: Josh Poimboeuf +Date: Tue, 2 Apr 2019 10:00:14 -0500 +Subject: x86/speculation: Move arch_smt_update() call to after mitigation decisions +Git-repo: tip/tip +Git-commit: 7c3658b20194a5b3209a143f63bc9c643c6a3ae2 +Patch-mainline: Queued in a subsystem tree +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +arch_smt_update() now has a dependency on both Spectre v2 and MDS +mitigations. Move its initial call to after all the mitigation decisions +have been made. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Reviewed-by: Tyler Hicks +Acked-by: Jiri Kosina +Acked-by: Borislav Petkov +--- + arch/x86/kernel/cpu/bugs.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 9f252082a83b..3f934ffef8cf 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -111,6 +111,8 @@ void __init check_bugs(void) + + mds_select_mitigation(); + ++ arch_smt_update(); ++ + #ifdef CONFIG_X86_32 + /* + * Check whether we are able to run this kernel safely on SMP. +@@ -638,9 +640,6 @@ static void __init spectre_v2_select_mitigation(void) + + /* Set up IBPB and STIBP depending on the general spectre V2 command */ + spectre_v2_user_select_mitigation(cmd); +- +- /* Enable STIBP if appropriate */ +- arch_smt_update(); + } + + static void update_stibp_msr(void * __unused) + diff --git a/patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch b/patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch new file mode 100644 index 0000000..1182e16 --- /dev/null +++ b/patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch @@ -0,0 +1,84 @@ +From: Dominik Brodowski +Date: Tue, 22 May 2018 11:05:39 +0200 +Subject: x86/speculation: Simplify the CPU bug detection logic +Git-commit: 8ecc4979b1bd9c94168e6fc92960033b7a951336 +Patch-mainline: v4.17-rc7 +References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091 + +Only CPUs which speculate can speculate. Therefore, it seems prudent +to test for cpu_no_speculation first and only then determine whether +a specific speculating CPU is susceptible to store bypass speculation. +This is underlined by all CPUs currently listed in cpu_no_speculation +were present in cpu_no_spec_store_bypass as well. + +Signed-off-by: Dominik Brodowski +Signed-off-by: Thomas Gleixner +Cc: bp@suse.de +Cc: konrad.wilk@oracle.com +Link: https://lkml.kernel.org/r/20180522090539.GA24668@light.dominikbrodowski.net + +Acked-by: Borislav Petkov +--- + arch/x86/kernel/cpu/common.c | 22 +++++++--------------- + 1 file changed, 7 insertions(+), 15 deletions(-) + +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 78decc3e3067..38276f58d3bf 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -942,12 +942,8 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { + {} + }; + ++/* Only list CPUs which speculate but are non susceptible to SSB */ + static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 }, +@@ -955,14 +951,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, + { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, +- { X86_VENDOR_CENTAUR, 5, }, +- { X86_VENDOR_INTEL, 5, }, +- { X86_VENDOR_NSC, 5, }, + { X86_VENDOR_AMD, 0x12, }, + { X86_VENDOR_AMD, 0x11, }, + { X86_VENDOR_AMD, 0x10, }, + { X86_VENDOR_AMD, 0xf, }, +- { X86_VENDOR_ANY, 4, }, + {} + }; + +@@ -970,6 +962,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + { + u64 ia32_cap = 0; + ++ if (x86_match_cpu(cpu_no_speculation)) ++ return; ++ ++ setup_force_cpu_bug(X86_BUG_SPECTRE_V1); ++ setup_force_cpu_bug(X86_BUG_SPECTRE_V2); ++ + if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); + +@@ -977,12 +975,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + !(ia32_cap & ARCH_CAP_SSB_NO)) + setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); + +- if (x86_match_cpu(cpu_no_speculation)) +- return; +- +- setup_force_cpu_bug(X86_BUG_SPECTRE_V1); +- setup_force_cpu_bug(X86_BUG_SPECTRE_V2); +- + if (x86_match_cpu(cpu_no_meltdown)) + return; + + diff --git a/patches.arch/x86-speculation-support-enhanced-ibrs-on-future-cpus.patch b/patches.arch/x86-speculation-support-enhanced-ibrs-on-future-cpus.patch index e0333fe..6a78531 100644 --- a/patches.arch/x86-speculation-support-enhanced-ibrs-on-future-cpus.patch +++ b/patches.arch/x86-speculation-support-enhanced-ibrs-on-future-cpus.patch @@ -95,7 +95,7 @@ Acked-by: Borislav Petkov }; #undef pr_fmt -@@ -347,6 +348,13 @@ static void __init spectre_v2_select_mit +@@ -348,6 +349,13 @@ static void __init spectre_v2_select_mit case SPECTRE_V2_CMD_FORCE: case SPECTRE_V2_CMD_AUTO: @@ -109,7 +109,7 @@ Acked-by: Borislav Petkov if (IS_ENABLED(CONFIG_RETPOLINE)) goto retpoline_auto; break; -@@ -384,6 +392,7 @@ retpoline_auto: +@@ -385,6 +393,7 @@ retpoline_auto: setup_force_cpu_cap(X86_FEATURE_RETPOLINE); } @@ -117,7 +117,7 @@ Acked-by: Borislav Petkov spectre_v2_enabled = mode; pr_info("%s\n", spectre_v2_strings[mode]); -@@ -406,9 +415,16 @@ retpoline_auto: +@@ -407,9 +416,16 @@ retpoline_auto: /* * Retpoline means the kernel is safe because it has no indirect @@ -138,9 +138,9 @@ Acked-by: Borislav Petkov } --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c -@@ -957,6 +957,9 @@ static void __init cpu_set_bug_bits(stru - setup_force_cpu_bug(X86_BUG_SPECTRE_V1); - setup_force_cpu_bug(X86_BUG_SPECTRE_V2); +@@ -956,6 +956,9 @@ static void __init cpu_set_bug_bits(stru + !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) + setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); + if (ia32_cap & ARCH_CAP_IBRS_ALL) + setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); diff --git a/patches.suse/0003-x86-idle-Control-Indirect-Branch-Speculation-in-idle.patch b/patches.suse/0003-x86-idle-Control-Indirect-Branch-Speculation-in-idle.patch index bc9f096..14fc7da 100644 --- a/patches.suse/0003-x86-idle-Control-Indirect-Branch-Speculation-in-idle.patch +++ b/patches.suse/0003-x86-idle-Control-Indirect-Branch-Speculation-in-idle.patch @@ -20,21 +20,13 @@ Originally-by: Tim Chen Signed-off-by: Thomas Gleixner Signed-off-by: Jiri Slaby --- - arch/x86/include/asm/mwait.h | 14 ++++++++++++++ + arch/x86/include/asm/mwait.h | 13 +++++++++++++ arch/x86/kernel/process.c | 14 ++++++++++++++ - 2 files changed, 28 insertions(+) + 2 files changed, 27 insertions(+) --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h -@@ -5,6 +5,7 @@ - #include - - #include -+#include - - #define MWAIT_SUBSTATE_MASK 0xf - #define MWAIT_CSTATE_MASK 0xf -@@ -105,7 +106,20 @@ static inline void mwait_idle_with_hints +@@ -112,7 +112,20 @@ static inline void mwait_idle_with_hints mb(); } @@ -57,7 +49,7 @@ Signed-off-by: Jiri Slaby } --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c -@@ -465,6 +465,20 @@ static __cpuidle void mwait_idle(void) +@@ -674,6 +674,20 @@ static __cpuidle void mwait_idle(void) mb(); /* quirk */ } diff --git a/series.conf b/series.conf index f81ff4a..f0b0104 100644 --- a/series.conf +++ b/series.conf @@ -16387,6 +16387,7 @@ patches.suse/msft-hv-1696-KVM-x86-fix-UD-address-of-failed-Hyper-V-hypercalls.patch patches.fixes/sched-tracing-Fix-trace_sched_pi_setprio-for-deboost.patch patches.arch/47-kvm-vmx-expose-ssbd-properly-to-guests.patch + patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch patches.fixes/tracing-Fix-crash-when-freeing-instances-with-event-.patch patches.suse/0001-tracing-Make-the-snapshot-trigger-work-with-instance.patch patches.fixes/afs-Fix-directory-permissions-check.patch @@ -18086,6 +18087,7 @@ patches.suse/sched-numa-Update-the-scan-period-without-holding-the-numa_group-lock.patch patches.suse/sched-numa-Use-group_weights-to-identify-if-migration-degrades-locality.patch patches.suse/sched-numa-Move-task_numa_placement-closer-to-numa_migrate_preferred.patch + patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch patches.arch/perf-x86-intel-lbr-fix-incomplete-lbr-call-stack patches.fixes/kprobes-make-list-and-blacklist-root-user-read-only.patch patches.arch/kprobes-x86-fix-p-uses-in-error-messages @@ -19515,6 +19517,7 @@ patches.fixes/s390-sles12sp4-pkey-move-pckmo-subfunction-available-checks-away-from-module-init.patch patches.suse/rcu-Make-need_resched-respond-to-urgent-RCU-QS-needs.patch patches.fixes/kprobes-Return-error-if-we-fail-to-reuse-kprobe-inst.patch + patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch patches.suse/sched-numa-remove-unused-code-from-update_numa_stats.patch patches.suse/sched-numa-remove-unused-nr_running-field.patch patches.arch/x86-corruption-check-fix-panic-in-memory_corruption_check-when-boot-option-without-value-is-provided @@ -20561,6 +20564,7 @@ patches.arch/kvm-nvmx-set-vm-instruction-error-for-vmptrld-of-unbacked-page patches.arch/kvm-nvmx-free-the-vmread-vmwrite-bitmaps-if-alloc_kvm_area-fails patches.arch/kvm-vmx-set-ia32_tsc_aux-for-legacy-mode-guests + patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch patches.fixes/arm-arm64-KVM-vgic-Force-VM-halt-when-changing-the-a.patch patches.arch/KVM-PPC-Book3S-HV-Fix-race-between-kvm_unmap_hva_ran.patch patches.fixes/KVM-PPC-Book3S-PR-Set-hflag-to-indicate-that-POWER9-.patch @@ -21842,6 +21846,25 @@ # end of sorted patches ######################################################## + # MDS + patches.arch/x86-msr-index-cleanup-bit-defines.patch + patches.arch/x86-speculation-consolidate-cpu-whitelists.patch + patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch + patches.arch/x86-speculation-mds-add-bug_msbds_only.patch + patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch + patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch + patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch + patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch + patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch + patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch + patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch + patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch + patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch + patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch + patches.arch/x86-speculation-mds-add-smt-warning-message.patch + patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch + patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch + ######################################################## # # packaging-specific patches (tweaks for autobuild,