From d2d8b17d6eb464c4846fdd955b4d97f52cbca2c7 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Oct 25 2018 16:07:27 +0000 Subject: Move x86 patches that have been upstreamed into the sorted section Refreshed some patches to be applied cleanly, too. The expanded result is 100% identical with the state before this change. --- diff --git a/patches.arch/0001-x86-speculation-l1tf-Increase-l1tf-memory-limit-for-.patch b/patches.arch/0001-x86-speculation-l1tf-Increase-l1tf-memory-limit-for-.patch index 52e5564..7fe8880 100644 --- a/patches.arch/0001-x86-speculation-l1tf-Increase-l1tf-memory-limit-for-.patch +++ b/patches.arch/0001-x86-speculation-l1tf-Increase-l1tf-memory-limit-for-.patch @@ -55,10 +55,10 @@ Acked-by: Michal Hocko + * NOTE: only to be used for l1tf mitigation + */ + u8 x86_cache_bits; - #ifndef __GENKSYMS__ unsigned initialized : 1; - #endif -@@ -184,7 +189,7 @@ extern void cpu_detect(struct cpuinfo_x8 + }; + +@@ -182,7 +187,7 @@ extern void cpu_detect(struct cpuinfo_x8 static inline unsigned long long l1tf_pfn_limit(void) { @@ -69,7 +69,7 @@ Acked-by: Michal Hocko extern void early_cpu_init(void); --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c -@@ -227,6 +227,47 @@ enum vmx_l1d_flush_state l1tf_vmx_mitiga +@@ -225,6 +225,47 @@ enum vmx_l1d_flush_state l1tf_vmx_mitiga EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); #endif @@ -117,7 +117,7 @@ Acked-by: Michal Hocko static void __init l1tf_select_mitigation(void) { u64 half_pa; -@@ -234,6 +275,8 @@ static void __init l1tf_select_mitigatio +@@ -232,6 +273,8 @@ static void __init l1tf_select_mitigatio if (!boot_cpu_has_bug(X86_BUG_L1TF)) return; @@ -126,7 +126,7 @@ Acked-by: Michal Hocko switch (l1tf_mitigation) { case L1TF_MITIGATION_OFF: case L1TF_MITIGATION_FLUSH_NOWARN: -@@ -253,11 +296,6 @@ static void __init l1tf_select_mitigatio +@@ -251,11 +294,6 @@ static void __init l1tf_select_mitigatio return; #endif diff --git a/patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch b/patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch index b84f021..fbc2e8e 100644 --- a/patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch +++ b/patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch @@ -90,7 +90,7 @@ Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h -@@ -967,4 +967,16 @@ bool xen_set_default_idle(void); +@@ -965,4 +965,16 @@ bool xen_set_default_idle(void); void stop_this_cpu(void *dummy); void df_debug(struct pt_regs *regs, long error_code); void microcode_check(void); @@ -109,7 +109,7 @@ Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de #endif /* _ASM_X86_PROCESSOR_H */ --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c -@@ -218,7 +218,11 @@ static void x86_amd_ssb_disable(void) +@@ -216,7 +216,11 @@ static void x86_amd_ssb_disable(void) wrmsrl(MSR_AMD64_LS_CFG, msrval); } @@ -121,7 +121,7 @@ Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); #endif -@@ -230,6 +234,20 @@ static void __init l1tf_select_mitigatio +@@ -228,6 +232,20 @@ static void __init l1tf_select_mitigatio if (!boot_cpu_has_bug(X86_BUG_L1TF)) return; @@ -142,7 +142,7 @@ Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de #if CONFIG_PGTABLE_LEVELS == 2 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); return; -@@ -249,6 +267,33 @@ static void __init l1tf_select_mitigatio +@@ -247,6 +265,33 @@ static void __init l1tf_select_mitigatio setup_force_cpu_cap(X86_FEATURE_L1TF_FIX); } @@ -224,7 +224,7 @@ Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages && !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER); -@@ -10002,16 +10015,33 @@ free_vcpu: +@@ -10009,16 +10022,33 @@ free_vcpu: return ERR_PTR(err); } @@ -299,9 +299,9 @@ Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de kvm-intel.ept= [KVM,Intel] Disable extended page tables (virtualized MMU) support on capable Intel chips. Default is 1 (enabled) -@@ -1909,6 +1903,68 @@ - Disables the paravirtualized spinlock slowpath - optimizations for KVM. +@@ -1905,6 +1899,68 @@ + feature (tagged TLBs) on capable Intel chips. + Default is 1 (enabled) + l1tf= [X86] Control mitigation of the L1TF vulnerability on + affected CPUs diff --git a/patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch b/patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch index 6adb9bf..f664860 100644 --- a/patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch +++ b/patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch @@ -34,8 +34,8 @@ Signed-off-by: Jiri Kosina struct vcpu_vmx { struct kvm_vcpu vcpu; unsigned long host_rsp; -@@ -657,9 +662,8 @@ struct vcpu_vmx { - struct loaded_vmcs *loaded_cpu_state; +@@ -653,9 +658,8 @@ struct vcpu_vmx { + struct loaded_vmcs *loaded_vmcs; bool __launched; /* temporary, used in vmx_vcpu_run */ struct msr_autoload { - unsigned nr; @@ -44,9 +44,9 @@ Signed-off-by: Jiri Kosina + struct vmx_msrs guest; + struct vmx_msrs host; } msr_autoload; - struct { -@@ -2041,18 +2045,18 @@ static void clear_atomic_switch_msr(stru + int loaded; +@@ -2037,18 +2041,18 @@ static void clear_atomic_switch_msr(stru } break; } @@ -74,7 +74,7 @@ Signed-off-by: Jiri Kosina } static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, -@@ -2104,24 +2108,25 @@ static void add_atomic_switch_msr(struct +@@ -2100,24 +2104,25 @@ static void add_atomic_switch_msr(struct wrmsrl(MSR_IA32_PEBS_ENABLE, 0); } @@ -110,7 +110,7 @@ Signed-off-by: Jiri Kosina } static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) -@@ -5770,9 +5775,9 @@ static void vmx_vcpu_setup(struct vcpu_v +@@ -5767,9 +5772,9 @@ static void vmx_vcpu_setup(struct vcpu_v vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); @@ -122,7 +122,7 @@ Signed-off-by: Jiri Kosina if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); -@@ -10867,10 +10872,10 @@ static int prepare_vmcs02(struct kvm_vcp +@@ -10874,10 +10879,10 @@ static int prepare_vmcs02(struct kvm_vcp * Set the MSR load/store lists to match L0's settings. */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); @@ -137,7 +137,7 @@ Signed-off-by: Jiri Kosina /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before -@@ -11827,8 +11832,8 @@ static void nested_vmx_vmexit(struct kvm +@@ -11834,8 +11839,8 @@ static void nested_vmx_vmexit(struct kvm vmx_segment_cache_clear(vmx); /* Update any VMCS fields that might have changed while L2 ran */ diff --git a/patches.arch/17-cpu-hotplug-Boot-HT-siblings-at-least-once.patch b/patches.arch/17-cpu-hotplug-Boot-HT-siblings-at-least-once.patch index 9593782..9c64e76 100644 --- a/patches.arch/17-cpu-hotplug-Boot-HT-siblings-at-least-once.patch +++ b/patches.arch/17-cpu-hotplug-Boot-HT-siblings-at-least-once.patch @@ -131,7 +131,7 @@ Signed-off-by: Jiri Kosina while (st->state < target) { st->state++; cpuhp_invoke_callback(cpu, st->state, true, NULL); -@@ -1984,7 +2007,8 @@ void __init boot_cpu_init(void) +@@ -1984,5 +2007,6 @@ void __init boot_cpu_init(void) */ void __init boot_cpu_hotplug_init(void) { @@ -139,5 +139,3 @@ Signed-off-by: Jiri Kosina + this_cpu_write(cpuhp_state.booted_once, true); + this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); } - - /* kabi */ diff --git a/patches.arch/31-x86-speculation-use-synthetic-bits-for-ibrs-ibpb-stibp.patch b/patches.arch/31-x86-speculation-use-synthetic-bits-for-ibrs-ibpb-stibp.patch index 26f890c..3608a35 100644 --- a/patches.arch/31-x86-speculation-use-synthetic-bits-for-ibrs-ibpb-stibp.patch +++ b/patches.arch/31-x86-speculation-use-synthetic-bits-for-ibrs-ibpb-stibp.patch @@ -41,14 +41,13 @@ Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ -@@ -212,12 +211,14 @@ +@@ -212,11 +211,13 @@ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ - #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ - #define X86_FEATURE_USE_IBRS ( 7*32+23) /* "" Use IBRS for Spectre v2 safety */ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+24) /* "" Disable Speculative Store Bypass. */ #define X86_FEATURE_AMD_SSBD (7*32+25) /* "" AMD SSBD implementation */ +#define X86_FEATURE_IBRS ( 7*32+26) /* Indirect Branch Restricted Speculation */ @@ -57,7 +56,7 @@ Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ -@@ -278,9 +279,9 @@ +@@ -277,9 +278,9 @@ #define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ @@ -128,7 +127,7 @@ Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic break; --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c -@@ -3931,7 +3931,7 @@ static int svm_get_msr(struct kvm_vcpu * +@@ -4055,7 +4055,7 @@ static int svm_get_msr(struct kvm_vcpu * break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && @@ -137,7 +136,7 @@ Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic return 1; msr_info->data = svm->spec_ctrl; -@@ -4029,7 +4029,7 @@ static int svm_set_msr(struct kvm_vcpu * +@@ -4156,7 +4156,7 @@ static int svm_set_msr(struct kvm_vcpu * break; case MSR_IA32_SPEC_CTRL: if (!msr->host_initiated && @@ -146,7 +145,7 @@ Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic return 1; /* The STIBP bit doesn't fault even if it's not advertised */ -@@ -4056,7 +4056,7 @@ static int svm_set_msr(struct kvm_vcpu * +@@ -4183,7 +4183,7 @@ static int svm_set_msr(struct kvm_vcpu * break; case MSR_IA32_PRED_CMD: if (!msr->host_initiated && @@ -157,7 +156,7 @@ Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic if (data & ~PRED_CMD_IBPB) --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c -@@ -3296,9 +3296,7 @@ static int vmx_get_msr(struct kvm_vcpu * +@@ -3294,9 +3294,7 @@ static int vmx_get_msr(struct kvm_vcpu * break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && @@ -168,7 +167,7 @@ Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic return 1; msr_info->data = to_vmx(vcpu)->spec_ctrl; -@@ -3418,9 +3416,7 @@ static int vmx_set_msr(struct kvm_vcpu * +@@ -3416,9 +3414,7 @@ static int vmx_set_msr(struct kvm_vcpu * break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && @@ -179,7 +178,7 @@ Link: https://lkml.kernel.org/r/20180504161815.GG9257@pd.tnic return 1; /* The STIBP bit doesn't fault even if it's not advertised */ -@@ -3450,7 +3446,6 @@ static int vmx_set_msr(struct kvm_vcpu * +@@ -3448,7 +3444,6 @@ static int vmx_set_msr(struct kvm_vcpu * break; case MSR_IA32_PRED_CMD: if (!msr_info->host_initiated && diff --git a/patches.arch/33-x86-cpufeatures-disentangle-ssbd-enumeration.patch b/patches.arch/33-x86-cpufeatures-disentangle-ssbd-enumeration.patch index 1b2c673..af9fbfa 100644 --- a/patches.arch/33-x86-cpufeatures-disentangle-ssbd-enumeration.patch +++ b/patches.arch/33-x86-cpufeatures-disentangle-ssbd-enumeration.patch @@ -29,7 +29,7 @@ Acked-by: Borislav Petkov --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h -@@ -208,7 +208,7 @@ +@@ -208,14 +208,14 @@ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ @@ -38,16 +38,15 @@ Acked-by: Borislav Petkov #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ -@@ -216,7 +216,7 @@ + #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ - #define X86_FEATURE_USE_IBRS ( 7*32+23) /* "" Use IBRS for Spectre v2 safety */ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+24) /* "" Disable Speculative Store Bypass. */ -#define X86_FEATURE_AMD_SSBD (7*32+25) /* "" AMD SSBD implementation */ +#define X86_FEATURE_LS_CFG_SSBD ( 7*32+25) /* "" AMD SSBD implementation via LS_CFG MSR */ #define X86_FEATURE_IBRS ( 7*32+26) /* Indirect Branch Restricted Speculation */ #define X86_FEATURE_IBPB ( 7*32+27) /* Indirect Branch Prediction Barrier */ #define X86_FEATURE_STIBP ( 7*32+28) /* Single Thread Indirect Branch Predictors */ -@@ -336,7 +336,7 @@ +@@ -338,7 +338,7 @@ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ @@ -82,7 +81,7 @@ Acked-by: Borislav Petkov #ifdef CONFIG_X86_32 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c -@@ -160,8 +160,8 @@ void x86_spec_ctrl_set_guest(u64 guest_s +@@ -158,8 +158,8 @@ void x86_spec_ctrl_set_guest(u64 guest_s if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) return; @@ -93,7 +92,7 @@ Acked-by: Borislav Petkov host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); if (host != guest_spec_ctrl) -@@ -177,8 +177,8 @@ void x86_spec_ctrl_restore_host(u64 gues +@@ -175,8 +175,8 @@ void x86_spec_ctrl_restore_host(u64 gues if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) return; @@ -104,7 +103,7 @@ Acked-by: Borislav Petkov host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags); if (host != guest_spec_ctrl) -@@ -190,7 +190,7 @@ static void x86_amd_ssb_disable(void) +@@ -188,7 +188,7 @@ static void x86_amd_ssb_disable(void) { u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; diff --git a/patches.arch/35-x86-speculation-handle-ht-correctly-on-amd.patch b/patches.arch/35-x86-speculation-handle-ht-correctly-on-amd.patch index 489100b..b421153 100644 --- a/patches.arch/35-x86-speculation-handle-ht-correctly-on-amd.patch +++ b/patches.arch/35-x86-speculation-handle-ht-correctly-on-amd.patch @@ -208,9 +208,9 @@ Acked-by: Borislav Petkov #include +#include - /* representing HT siblings of each logical CPU */ - DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); -@@ -242,6 +243,8 @@ static void notrace start_secondary(void + /* Number of siblings per CPU package */ + int smp_num_siblings = 1; +@@ -244,6 +245,8 @@ static void notrace start_secondary(void */ check_tsc_sync_target(); @@ -219,7 +219,7 @@ Acked-by: Borislav Petkov /* * Lock vector_lock and initialize the vectors on this cpu * before setting the cpu online. We must set it online with -@@ -1319,6 +1322,8 @@ void __init native_smp_prepare_cpus(unsi +@@ -1321,6 +1324,8 @@ void __init native_smp_prepare_cpus(unsi set_mtrr_aps_delayed_init(); smp_quirk_init_udelay(); diff --git a/patches.arch/36-x86-bugs-kvm-extend-speculation-control-for-virt_spec_ctrl.patch b/patches.arch/36-x86-bugs-kvm-extend-speculation-control-for-virt_spec_ctrl.patch index 1a7112e..75a6502 100644 --- a/patches.arch/36-x86-bugs-kvm-extend-speculation-control-for-virt_spec_ctrl.patch +++ b/patches.arch/36-x86-bugs-kvm-extend-speculation-control-for-virt_spec_ctrl.patch @@ -50,7 +50,7 @@ Acked-by: Borislav Petkov extern u64 x86_amd_ls_cfg_base; --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c -@@ -152,7 +152,15 @@ u64 x86_spec_ctrl_get_default(void) +@@ -150,7 +150,15 @@ u64 x86_spec_ctrl_get_default(void) } EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); @@ -67,7 +67,7 @@ Acked-by: Borislav Petkov { u64 host = x86_spec_ctrl_base; -@@ -169,7 +177,15 @@ void x86_spec_ctrl_set_guest(u64 guest_s +@@ -167,7 +175,15 @@ void x86_spec_ctrl_set_guest(u64 guest_s } EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest); @@ -107,18 +107,18 @@ Acked-by: Borislav Petkov if (!init_event) { svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | -@@ -5502,7 +5509,7 @@ static void svm_vcpu_run(struct kvm_vcpu +@@ -5504,7 +5511,7 @@ static void svm_vcpu_run(struct kvm_vcpu * is no need to worry about the conditional branch over the wrmsr * being speculatively taken. */ - x86_spec_ctrl_set_guest(svm->spec_ctrl); + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); - local_irq_enable(); - -@@ -5628,7 +5635,7 @@ static void svm_vcpu_run(struct kvm_vcpu - if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) - svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); + asm volatile ( + "push %%" _ASM_BP "; \n\t" +@@ -5631,7 +5638,7 @@ static void svm_vcpu_run(struct kvm_vcpu + if (svm->spec_ctrl) + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); - x86_spec_ctrl_restore_host(svm->spec_ctrl); + x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); @@ -127,7 +127,7 @@ Acked-by: Borislav Petkov --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c -@@ -9438,7 +9438,7 @@ static void __noclone vmx_vcpu_run(struc +@@ -9427,7 +9427,7 @@ static void __noclone vmx_vcpu_run(struc * is no need to worry about the conditional branch over the wrmsr * being speculatively taken. */ @@ -136,7 +136,7 @@ Acked-by: Borislav Petkov vmx->__launched = vmx->loaded_vmcs->launched; asm( -@@ -9576,7 +9576,7 @@ static void __noclone vmx_vcpu_run(struc +@@ -9565,7 +9565,7 @@ static void __noclone vmx_vcpu_run(struc if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); diff --git a/patches.arch/40-x86-bugs-expose-x86_spec_ctrl_base-directly.patch b/patches.arch/40-x86-bugs-expose-x86_spec_ctrl_base-directly.patch index e3dd80d..8ab017a 100644 --- a/patches.arch/40-x86-bugs-expose-x86_spec_ctrl_base-directly.patch +++ b/patches.arch/40-x86-bugs-expose-x86_spec_ctrl_base-directly.patch @@ -24,7 +24,7 @@ Acked-by: Borislav Petkov --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h -@@ -214,16 +214,7 @@ enum spectre_v2_mitigation { +@@ -217,16 +217,7 @@ enum spectre_v2_mitigation { SPECTRE_V2_IBRS, }; @@ -41,8 +41,8 @@ Acked-by: Borislav Petkov /* The Speculative Store Bypass disable variants */ enum ssb_mitigation { -@@ -309,6 +300,9 @@ static inline void unrestrict_branch_spe - : "memory"); +@@ -278,6 +269,9 @@ static inline void indirect_branch_predi + alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); } +/* The Intel SPEC CTRL MSR base value cache */ @@ -51,7 +51,7 @@ Acked-by: Borislav Petkov /* * With retpoline, we must use IBRS to restrict branch prediction * before calling into firmware. -@@ -317,7 +311,7 @@ static inline void unrestrict_branch_spe +@@ -286,7 +280,7 @@ static inline void indirect_branch_predi */ #define firmware_restrict_branch_speculation_start() \ do { \ @@ -60,7 +60,7 @@ Acked-by: Borislav Petkov \ preempt_disable(); \ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ -@@ -326,7 +320,7 @@ do { \ +@@ -295,7 +289,7 @@ do { \ #define firmware_restrict_branch_speculation_end() \ do { \ @@ -91,7 +91,7 @@ Acked-by: Borislav Petkov /* * The vendor and possibly platform specific bits which can be modified in -@@ -142,16 +143,6 @@ void x86_spec_ctrl_set(u64 val) +@@ -140,16 +141,6 @@ void x86_spec_ctrl_set(u64 val) } EXPORT_SYMBOL_GPL(x86_spec_ctrl_set); diff --git a/patches.arch/kvm-vmx-track-host_state-loaded-using-a-loaded_vmcs-pointer b/patches.arch/kvm-vmx-track-host_state-loaded-using-a-loaded_vmcs-pointer index f145170..0fa06ab 100644 --- a/patches.arch/kvm-vmx-track-host_state-loaded-using-a-loaded_vmcs-pointer +++ b/patches.arch/kvm-vmx-track-host_state-loaded-using-a-loaded_vmcs-pointer @@ -27,12 +27,12 @@ Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini Acked-by: Joerg Roedel --- - arch/x86/kvm/vmx.c | 22 +++++++++++++++------- + arch/x86/kvm/vmx.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c -@@ -596,18 +596,22 @@ struct vcpu_vmx { +@@ -745,17 +745,21 @@ struct vcpu_vmx { /* * loaded_vmcs points to the VMCS currently used in this vcpu. For a * non-nested (L1) guest, it always points to vmcs01. For a nested @@ -47,9 +47,8 @@ Acked-by: Joerg Roedel + struct loaded_vmcs *loaded_cpu_state; bool __launched; /* temporary, used in vmx_vcpu_run */ struct msr_autoload { - unsigned nr; - struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; - struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; + struct vmx_msrs guest; + struct vmx_msrs host; } msr_autoload; + struct { @@ -57,7 +56,7 @@ Acked-by: Joerg Roedel u16 fs_sel, gs_sel, ldt_sel; #ifdef CONFIG_X86_64 u16 ds_sel, es_sel; -@@ -2157,10 +2161,11 @@ static void vmx_save_host_state(struct k +@@ -2326,10 +2330,11 @@ static void vmx_save_host_state(struct k struct vcpu_vmx *vmx = to_vmx(vcpu); int i; @@ -71,7 +70,7 @@ Acked-by: Joerg Roedel /* * Set host fs and gs selectors. Unfortunately, 22.2.3 does not * allow segment selectors with cpl > 0 or ti == 1. -@@ -2211,11 +2216,14 @@ static void vmx_save_host_state(struct k +@@ -2380,11 +2385,14 @@ static void vmx_save_host_state(struct k static void __vmx_load_host_state(struct vcpu_vmx *vmx) { @@ -88,7 +87,7 @@ Acked-by: Joerg Roedel #ifdef CONFIG_X86_64 if (is_long_mode(&vmx->vcpu)) rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); -@@ -9648,8 +9656,8 @@ static void vmx_switch_vmcs(struct kvm_v +@@ -9906,8 +9914,8 @@ static void vmx_switch_vmcs(struct kvm_v return; cpu = get_cpu(); diff --git a/patches.arch/kvm-x86-svm-call-x86_spec_ctrl_set_guest-host-with-interrupts-disabled.patch b/patches.arch/kvm-x86-svm-call-x86_spec_ctrl_set_guest-host-with-interrupts-disabled.patch index 41e027f..79805fa 100644 --- a/patches.arch/kvm-x86-svm-call-x86_spec_ctrl_set_guest-host-with-interrupts-disabled.patch +++ b/patches.arch/kvm-x86-svm-call-x86_spec_ctrl_set_guest-host-with-interrupts-disabled.patch @@ -73,7 +73,7 @@ Acked-by: Borislav Petkov --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c -@@ -5447,8 +5447,6 @@ static void svm_vcpu_run(struct kvm_vcpu +@@ -5520,8 +5520,6 @@ static void svm_vcpu_run(struct kvm_vcpu clgi(); @@ -82,26 +82,25 @@ Acked-by: Borislav Petkov /* * If this vCPU has touched SPEC_CTRL, restore the guest's value if * it's non-zero. Since vmentry is serialising on affected CPUs, there -@@ -5458,6 +5456,8 @@ static void svm_vcpu_run(struct kvm_vcpu - if (svm->spec_ctrl) - native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); +@@ -5530,6 +5528,8 @@ static void svm_vcpu_run(struct kvm_vcpu + */ + x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); + local_irq_enable(); + asm volatile ( "push %%" _ASM_BP "; \n\t" "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" -@@ -5568,9 +5568,6 @@ static void svm_vcpu_run(struct kvm_vcpu +@@ -5652,15 +5652,15 @@ static void svm_vcpu_run(struct kvm_vcpu if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); - if (svm->spec_ctrl) - native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); - - /* Eliminate branch target predictions from guest mode */ - vmexit_fill_RSB(); + x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); -@@ -5587,6 +5584,9 @@ static void svm_vcpu_run(struct kvm_vcpu + reload_tss(vcpu); local_irq_disable(); diff --git a/patches.arch/x86-cpu-amd-have-smp_num_siblings-and-cpu_llc_id-always-be-present b/patches.arch/x86-cpu-amd-have-smp_num_siblings-and-cpu_llc_id-always-be-present index 5ee0c58..0f88031 100644 --- a/patches.arch/x86-cpu-amd-have-smp_num_siblings-and-cpu_llc_id-always-be-present +++ b/patches.arch/x86-cpu-amd-have-smp_num_siblings-and-cpu_llc_id-always-be-present @@ -18,10 +18,10 @@ Link: http://lkml.kernel.org/r/1524864877-111962-2-git-send-email-suravee.suthik Acked-by: Joerg Roedel --- - arch/x86/include/asm/smp.h | 1 - - arch/x86/kernel/cpu/amd.c | 10 +--------- - arch/x86/kernel/cpu/common.c | 7 +++++++ - arch/x86/kernel/smpboot.c | 7 ------- + arch/x86/include/asm/smp.h | 1 - + arch/x86/kernel/cpu/amd.c | 10 +--------- + arch/x86/kernel/cpu/common.c | 7 +++++++ + arch/x86/kernel/smpboot.c | 7 ------- 4 files changed, 8 insertions(+), 17 deletions(-) --- a/arch/x86/include/asm/smp.h @@ -36,7 +36,7 @@ Acked-by: Joerg Roedel extern unsigned disabled_cpus; --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c -@@ -318,7 +318,6 @@ static void legacy_fixup_core_id(struct +@@ -319,7 +319,6 @@ static void legacy_fixup_core_id(struct * Assumption: Number of cores in each internal node is the same. * (2) AMD processors supporting compute units */ @@ -44,7 +44,7 @@ Acked-by: Joerg Roedel static void amd_get_topology(struct cpuinfo_x86 *c) { u8 node_id; -@@ -374,7 +373,6 @@ static void amd_get_topology(struct cpui +@@ -375,7 +374,6 @@ static void amd_get_topology(struct cpui legacy_fixup_core_id(c); } } @@ -52,7 +52,7 @@ Acked-by: Joerg Roedel /* * On a AMD dual core setup the lower bits of the APIC id distinguish the cores. -@@ -382,7 +380,6 @@ static void amd_get_topology(struct cpui +@@ -383,7 +381,6 @@ static void amd_get_topology(struct cpui */ static void amd_detect_cmp(struct cpuinfo_x86 *c) { @@ -60,7 +60,7 @@ Acked-by: Joerg Roedel unsigned bits; int cpu = smp_processor_id(); -@@ -394,16 +391,11 @@ static void amd_detect_cmp(struct cpuinf +@@ -395,16 +392,11 @@ static void amd_detect_cmp(struct cpuinf /* use socket ID also for last level cache */ per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; amd_get_topology(c); @@ -96,9 +96,9 @@ Acked-by: Joerg Roedel { --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c -@@ -80,13 +80,6 @@ - #include +@@ -81,13 +81,6 @@ #include + #include -/* Number of siblings per CPU package */ -int smp_num_siblings = 1; diff --git a/patches.arch/x86-speculation-Protect-against-userspace-userspace-.patch b/patches.arch/x86-speculation-Protect-against-userspace-userspace-.patch index 4dab0b0..eb688c7 100644 --- a/patches.arch/x86-speculation-Protect-against-userspace-userspace-.patch +++ b/patches.arch/x86-speculation-Protect-against-userspace-userspace-.patch @@ -35,7 +35,7 @@ Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1807261308190.997@cbobk.fhfr.pm --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c -@@ -494,22 +494,15 @@ retpoline_auto: +@@ -388,22 +388,15 @@ retpoline_auto: pr_info("%s\n", spectre_v2_strings[mode]); /* @@ -55,8 +55,8 @@ Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1807261308190.997@cbobk.fhfr.pm + * - RSB underflow (and switch to BTB) on Skylake+ + * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs */ -- if ((!boot_cpu_has(X86_FEATURE_PTI) && !boot_cpu_has(X86_FEATURE_SMEP)) || -- (!boot_cpu_has(X86_FEATURE_USE_IBRS) && is_skylake_era())) { +- if ((!boot_cpu_has(X86_FEATURE_PTI) && +- !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { - setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); - pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); - } diff --git a/patches.arch/x86-speculation-l1tf-fix-overflow-in-l1tf_pfn_limit-on-32bit.patch b/patches.arch/x86-speculation-l1tf-fix-overflow-in-l1tf_pfn_limit-on-32bit.patch index 764aee1..40ffa61 100644 --- a/patches.arch/x86-speculation-l1tf-fix-overflow-in-l1tf_pfn_limit-on-32bit.patch +++ b/patches.arch/x86-speculation-l1tf-fix-overflow-in-l1tf_pfn_limit-on-32bit.patch @@ -37,7 +37,7 @@ Link: https://lkml.kernel.org/r/20180820095835.5298-1-vbabka@suse.cz --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h -@@ -182,9 +182,9 @@ extern const struct seq_operations cpuin +@@ -180,9 +180,9 @@ extern const struct seq_operations cpuin extern void cpu_detect(struct cpuinfo_x86 *c); @@ -51,7 +51,7 @@ Link: https://lkml.kernel.org/r/20180820095835.5298-1-vbabka@suse.cz extern void early_cpu_init(void); --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c -@@ -893,7 +893,7 @@ unsigned long max_swapfile_size(void) +@@ -894,7 +894,7 @@ unsigned long max_swapfile_size(void) if (boot_cpu_has_bug(X86_BUG_L1TF)) { /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ @@ -60,8 +60,8 @@ Link: https://lkml.kernel.org/r/20180820095835.5298-1-vbabka@suse.cz /* * We encode swap offsets also with 3 bits below those for pfn * which makes the usable limit higher. -@@ -901,7 +901,7 @@ unsigned long max_swapfile_size(void) - #ifdef CONFIG_X86_64 +@@ -902,7 +902,7 @@ unsigned long max_swapfile_size(void) + #if CONFIG_PGTABLE_LEVELS > 2 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; #endif - pages = min_t(unsigned long, l1tf_limit, pages); diff --git a/patches.arch/x86-speculation-l1tf-protect-pae-swap-entries-against-l1tf b/patches.arch/x86-speculation-l1tf-protect-pae-swap-entries-against-l1tf index cbaba47..7282aff 100644 --- a/patches.arch/x86-speculation-l1tf-protect-pae-swap-entries-against-l1tf +++ b/patches.arch/x86-speculation-l1tf-protect-pae-swap-entries-against-l1tf @@ -28,7 +28,7 @@ Acked-by: Joerg Roedel --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h -@@ -204,12 +204,43 @@ static inline pud_t native_pudp_get_and_ +@@ -205,12 +205,43 @@ static inline pud_t native_pudp_get_and_ #endif /* Encode and de-code a swap entry */ @@ -76,7 +76,7 @@ Acked-by: Joerg Roedel --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c -@@ -899,7 +899,7 @@ unsigned long max_swapfile_size(void) +@@ -898,7 +898,7 @@ unsigned long max_swapfile_size(void) * We encode swap offsets also with 3 bits below those for pfn * which makes the usable limit higher. */ @@ -84,4 +84,4 @@ Acked-by: Joerg Roedel +#if CONFIG_PGTABLE_LEVELS > 2 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; #endif - pages = min_t(unsigned long long, l1tf_limit, pages); + pages = min_t(unsigned long, l1tf_limit, pages); diff --git a/patches.fixes/bpf-properly-enforce-index-mask-to-prevent-out-of-bo.patch b/patches.fixes/bpf-properly-enforce-index-mask-to-prevent-out-of-bo.patch index b7c8115..d38a0d7 100644 --- a/patches.fixes/bpf-properly-enforce-index-mask-to-prevent-out-of-bo.patch +++ b/patches.fixes/bpf-properly-enforce-index-mask-to-prevent-out-of-bo.patch @@ -118,7 +118,7 @@ Acked-by: Gary Lin + unsigned long map_state; /* pointer/poison value for maps */ }; int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ - bool seen; /* this insn was processed by the verifier */ + int sanitize_stack_off; /* stack slot to be cleared */ --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -154,7 +154,29 @@ struct bpf_verifier_stack_elem { @@ -152,7 +152,7 @@ Acked-by: Gary Lin struct bpf_call_arg_meta { struct bpf_map *map_ptr; -@@ -1624,6 +1646,29 @@ static void clear_all_pkt_pointers(struc +@@ -1649,6 +1671,29 @@ static void clear_all_pkt_pointers(struc } } @@ -182,7 +182,7 @@ Acked-by: Gary Lin static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; -@@ -1682,13 +1727,6 @@ static int check_call(struct bpf_verifie +@@ -1707,13 +1752,6 @@ static int check_call(struct bpf_verifie err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; @@ -196,7 +196,7 @@ Acked-by: Gary Lin err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; -@@ -1699,6 +1737,10 @@ static int check_call(struct bpf_verifie +@@ -1724,6 +1762,10 @@ static int check_call(struct bpf_verifie if (err) return err; @@ -207,7 +207,7 @@ Acked-by: Gary Lin /* Mark slots with STACK_MISC in case of raw mode, stack offset * is inferred from register state. */ -@@ -1723,8 +1765,6 @@ static int check_call(struct bpf_verifie +@@ -1748,8 +1790,6 @@ static int check_call(struct bpf_verifie } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { @@ -216,7 +216,7 @@ Acked-by: Gary Lin regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); -@@ -1740,11 +1780,6 @@ static int check_call(struct bpf_verifie +@@ -1765,11 +1805,6 @@ static int check_call(struct bpf_verifie } regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; @@ -228,7 +228,7 @@ Acked-by: Gary Lin } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); -@@ -4307,6 +4342,7 @@ static int fixup_bpf_calls(struct bpf_ve +@@ -4360,6 +4395,7 @@ static int fixup_bpf_calls(struct bpf_ve struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; @@ -236,7 +236,7 @@ Acked-by: Gary Lin struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; -@@ -4355,19 +4391,22 @@ static int fixup_bpf_calls(struct bpf_ve +@@ -4408,19 +4444,22 @@ static int fixup_bpf_calls(struct bpf_ve insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; @@ -263,7 +263,7 @@ Acked-by: Gary Lin insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, map_ptr->max_entries, 2); insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, -@@ -4391,9 +4430,12 @@ static int fixup_bpf_calls(struct bpf_ve +@@ -4444,9 +4483,12 @@ static int fixup_bpf_calls(struct bpf_ve */ if (ebpf_jit_enabled() && BITS_PER_LONG == 64 && insn->imm == BPF_FUNC_map_lookup_elem) { diff --git a/patches.kabi/KABI-cpu-hotplug-provide-the-old-get-put_online_cpus.patch b/patches.kabi/KABI-cpu-hotplug-provide-the-old-get-put_online_cpus.patch index 5c1af10..fe0edce 100644 --- a/patches.kabi/KABI-cpu-hotplug-provide-the-old-get-put_online_cpus.patch +++ b/patches.kabi/KABI-cpu-hotplug-provide-the-old-get-put_online_cpus.patch @@ -8,15 +8,13 @@ References: bsc#1087405 Signed-off-by: Michal Suchanek --- - include/linux/cpu.h | 8 ++++---- - kernel/cpu.c | 8 ++++++++ + include/linux/cpu.h | 8 ++++---- + kernel/cpu.c | 8 ++++++++ 2 files changed, 12 insertions(+), 4 deletions(-) -diff --git a/include/linux/cpu.h b/include/linux/cpu.h -index 85f04d0fb18a..f9f91c4397d6 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h -@@ -128,10 +128,10 @@ static inline void cpu_hotplug_enable(void) { } +@@ -132,10 +132,10 @@ static inline void cpu_hotplug_enable(vo #endif /* !CONFIG_HOTPLUG_CPU */ /* Wrappers which go away once all code is converted */ @@ -31,13 +29,11 @@ index 85f04d0fb18a..f9f91c4397d6 100644 #ifdef CONFIG_PM_SLEEP_SMP extern int freeze_secondary_cpus(int primary); -diff --git a/kernel/cpu.c b/kernel/cpu.c -index 9518e1519de2..9d54a57145dd 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c -@@ -1761,3 +1761,11 @@ void __init boot_cpu_state_init(void) - { - per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE; +@@ -2074,3 +2074,11 @@ void __init boot_cpu_hotplug_init(void) + this_cpu_write(cpuhp_state.booted_once, true); + this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); } + +/* kabi */ @@ -47,6 +43,3 @@ index 9518e1519de2..9d54a57145dd 100644 +static void put_online_cpus(void) { cpus_read_unlock(); } +EXPORT_SYMBOL_GPL(get_online_cpus); +EXPORT_SYMBOL_GPL(put_online_cpus); --- -2.13.6 - diff --git a/patches.kabi/x86-cpuinfo_x86-ignore-initialized-member.patch b/patches.kabi/x86-cpuinfo_x86-ignore-initialized-member.patch index 6f8c3ed..1a2e6ae 100644 --- a/patches.kabi/x86-cpuinfo_x86-ignore-initialized-member.patch +++ b/patches.kabi/x86-cpuinfo_x86-ignore-initialized-member.patch @@ -13,10 +13,10 @@ Signed-off-by: Borislav Petkov --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h -@@ -131,7 +131,9 @@ struct cpuinfo_x86 { - /* Index into per_cpu list: */ - u16 cpu_index; - u32 microcode; +@@ -136,7 +136,9 @@ struct cpuinfo_x86 { + * NOTE: only to be used for l1tf mitigation + */ + u8 x86_cache_bits; +#ifndef __GENKSYMS__ unsigned initialized : 1; +#endif diff --git a/patches.suse/0001-kvm-Introduce-nopvspin-kernel-parameter.patch b/patches.suse/0001-kvm-Introduce-nopvspin-kernel-parameter.patch index 346c365..3baff30 100644 --- a/patches.suse/0001-kvm-Introduce-nopvspin-kernel-parameter.patch +++ b/patches.suse/0001-kvm-Introduce-nopvspin-kernel-parameter.patch @@ -11,18 +11,16 @@ cpu to vcpu mappings. Signed-off-by: Davidlohr Bueso --- - Documentation/admin-guide/kernel-parameters.txt | 4 ++++ - arch/x86/include/asm/qspinlock.h | 11 +++++++++- - arch/x86/kernel/kvm.c | 29 +++++++++++++++++++++++++ - arch/x86/kernel/paravirt.c | 8 +++++++ - arch/x86/kernel/smpboot.c | 2 ++ + Documentation/admin-guide/kernel-parameters.txt | 4 +++ + arch/x86/include/asm/qspinlock.h | 11 ++++++++- + arch/x86/kernel/kvm.c | 29 ++++++++++++++++++++++++ + arch/x86/kernel/paravirt.c | 8 ++++++ + arch/x86/kernel/smpboot.c | 2 + 5 files changed, 53 insertions(+), 1 deletion(-) -diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 2fce2a81eadc..1eaa4f204457 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -1886,6 +1886,10 @@ +@@ -1899,6 +1899,10 @@ feature (tagged TLBs) on capable Intel chips. Default is 1 (enabled) @@ -30,11 +28,9 @@ index 2fce2a81eadc..1eaa4f204457 100644 + Disables the paravirtualized spinlock slowpath + optimizations for KVM. + - l2cr= [PPC] + l1tf= [X86] Control mitigation of the L1TF vulnerability on + affected CPUs - l3cr= [PPC] -diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h -index 48a706f641f2..308dfd0714c7 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -1,6 +1,7 @@ @@ -45,7 +41,7 @@ index 48a706f641f2..308dfd0714c7 100644 #include #include #include -@@ -46,10 +47,14 @@ static inline void queued_spin_unlock(struct qspinlock *lock) +@@ -46,10 +47,14 @@ static inline void queued_spin_unlock(st #endif #ifdef CONFIG_PARAVIRT @@ -61,7 +57,7 @@ index 48a706f641f2..308dfd0714c7 100644 return false; /* -@@ -65,6 +70,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock) +@@ -65,6 +70,10 @@ static inline bool virt_spin_lock(struct return true; } @@ -72,8 +68,6 @@ index 48a706f641f2..308dfd0714c7 100644 #endif /* CONFIG_PARAVIRT */ #include -diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c -index 1fce20274e44..876aa6df42e4 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -47,6 +47,22 @@ @@ -99,7 +93,7 @@ index 1fce20274e44..876aa6df42e4 100644 static int kvmapf = 1; static int parse_no_kvmapf(char *arg) -@@ -454,6 +470,13 @@ static void __init sev_map_percpu_data(void) +@@ -454,6 +470,13 @@ static void __init sev_map_percpu_data(v } #ifdef CONFIG_SMP @@ -133,11 +127,9 @@ index 1fce20274e44..876aa6df42e4 100644 __pv_init_lock_hash(); pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); -diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c -index 63d4e63342bf..1d9f2b4f4edb 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c -@@ -115,6 +115,14 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target, +@@ -121,6 +121,14 @@ unsigned paravirt_patch_jmp(void *insnbu return 5; } @@ -152,11 +144,9 @@ index 63d4e63342bf..1d9f2b4f4edb 100644 /* Neat trick to map patch type back to the call within the * corresponding structure. */ static void *get_call_destination(u8 type) -diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c -index c82eb787f3e0..0de7c9bcef85 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c -@@ -1281,6 +1281,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) +@@ -1334,6 +1334,8 @@ void __init native_smp_prepare_cpus(unsi pr_info("CPU0: "); print_cpu_info(&cpu_data(0)); @@ -165,6 +155,3 @@ index c82eb787f3e0..0de7c9bcef85 100644 uv_system_init(); set_mtrr_aps_delayed_init(); --- -2.13.6 - diff --git a/patches.suse/0001-x86-speculation-Add-basic-IBRS-support-infrastructur.patch b/patches.suse/0001-x86-speculation-Add-basic-IBRS-support-infrastructur.patch index 1d4ecf6..e75326a 100644 --- a/patches.suse/0001-x86-speculation-Add-basic-IBRS-support-infrastructur.patch +++ b/patches.suse/0001-x86-speculation-Add-basic-IBRS-support-infrastructur.patch @@ -25,14 +25,14 @@ Signed-off-by: Jiri Slaby Documentation/admin-guide/kernel-parameters.txt | 1 arch/x86/include/asm/cpufeatures.h | 1 arch/x86/include/asm/nospec-branch.h | 2 - - arch/x86/kernel/cpu/bugs.c | 35 ++++++++++++++++++------ + arch/x86/kernel/cpu/bugs.c | 31 +++++++++++++++++++----- arch/x86/lib/Makefile | 2 - arch/x86/lib/retpoline.S | 5 +++ - 6 files changed, 35 insertions(+), 11 deletions(-) + 6 files changed, 33 insertions(+), 9 deletions(-) --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -3891,6 +3891,7 @@ +@@ -3980,6 +3980,7 @@ retpoline - replace indirect branches retpoline,generic - google's original retpoline retpoline,amd - AMD-specific minimal thunk @@ -43,16 +43,16 @@ Signed-off-by: Jiri Slaby --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -214,6 +214,7 @@ - + #define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ +#define X86_FEATURE_USE_IBRS ( 7*32+23) /* "" Use IBRS for Spectre v2 safety */ - - /* Virtualization flags: Linux defined, word 8 */ - #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ + #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+24) /* "" Disable Speculative Store Bypass. */ + #define X86_FEATURE_LS_CFG_SSBD ( 7*32+25) /* "" AMD SSBD implementation via LS_CFG MSR */ + #define X86_FEATURE_IBRS ( 7*32+26) /* Indirect Branch Restricted Speculation */ --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h -@@ -225,7 +225,6 @@ extern char __indirect_thunk_end[]; +@@ -236,7 +236,6 @@ extern char __indirect_thunk_end[]; */ static inline void vmexit_fill_RSB(void) { @@ -60,17 +60,17 @@ Signed-off-by: Jiri Slaby unsigned long loops; asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE -@@ -235,7 +234,6 @@ static inline void vmexit_fill_RSB(void) +@@ -246,7 +245,6 @@ static inline void vmexit_fill_RSB(void) "910:" : "=r" (loops), ASM_CALL_CONSTRAINT : : "memory" ); -#endif } - #define alternative_msr_write(_msr, _val, _feature) \ + static __always_inline --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c -@@ -79,6 +79,7 @@ enum spectre_v2_mitigation_cmd { +@@ -132,6 +132,7 @@ enum spectre_v2_mitigation_cmd { SPECTRE_V2_CMD_RETPOLINE, SPECTRE_V2_CMD_RETPOLINE_GENERIC, SPECTRE_V2_CMD_RETPOLINE_AMD, @@ -78,7 +78,7 @@ Signed-off-by: Jiri Slaby }; static const char *spectre_v2_strings[] = { -@@ -87,6 +88,7 @@ static const char *spectre_v2_strings[] +@@ -140,6 +141,7 @@ static const char *spectre_v2_strings[] [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", @@ -86,7 +86,7 @@ Signed-off-by: Jiri Slaby }; #undef pr_fmt -@@ -149,6 +151,7 @@ static const struct { +@@ -389,6 +391,7 @@ static const struct { { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, @@ -94,7 +94,7 @@ Signed-off-by: Jiri Slaby { "auto", SPECTRE_V2_CMD_AUTO, false }, }; -@@ -200,7 +203,7 @@ static enum spectre_v2_mitigation_cmd __ +@@ -440,7 +443,7 @@ static enum spectre_v2_mitigation_cmd __ return cmd; } @@ -103,7 +103,7 @@ Signed-off-by: Jiri Slaby static bool __init is_skylake_era(void) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && -@@ -234,19 +237,34 @@ static void __init spectre_v2_select_mit +@@ -474,19 +477,34 @@ static void __init spectre_v2_select_mit case SPECTRE_V2_CMD_NONE: return; @@ -143,7 +143,7 @@ Signed-off-by: Jiri Slaby case SPECTRE_V2_CMD_RETPOLINE: if (IS_ENABLED(CONFIG_RETPOLINE)) goto retpoline_auto; -@@ -273,6 +291,7 @@ retpoline_auto: +@@ -513,6 +531,7 @@ retpoline_auto: setup_force_cpu_cap(X86_FEATURE_RETPOLINE); } @@ -151,17 +151,6 @@ Signed-off-by: Jiri Slaby spectre_v2_enabled = mode; pr_info("%s\n", spectre_v2_strings[mode]); -@@ -288,8 +307,8 @@ retpoline_auto: - * or deactivated in favour of retpolines the RSB fill on context - * switch is required. - */ -- if ((!boot_cpu_has(X86_FEATURE_PTI) && -- !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { -+ if ((!boot_cpu_has(X86_FEATURE_PTI) && !boot_cpu_has(X86_FEATURE_SMEP)) || -+ (!boot_cpu_has(X86_FEATURE_USE_IBRS) && is_skylake_era())) { - setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); - pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); - } --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -25,7 +25,7 @@ lib-y += memcpy_$(BITS).o diff --git a/patches.suse/0002-x86-speculation-Add-inlines-to-control-Indirect-Bran.patch b/patches.suse/0002-x86-speculation-Add-inlines-to-control-Indirect-Bran.patch index 447a137..96727d0 100644 --- a/patches.suse/0002-x86-speculation-Add-inlines-to-control-Indirect-Bran.patch +++ b/patches.suse/0002-x86-speculation-Add-inlines-to-control-Indirect-Bran.patch @@ -28,10 +28,11 @@ Signed-off-by: Jiri Slaby --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h -@@ -253,6 +253,42 @@ static inline void indirect_branch_predi +@@ -265,6 +265,42 @@ static inline void indirect_branch_predi + alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); } - /* ++/* + * This also performs a barrier, and setting it again when it was already + * set is NOT a no-op. + */ @@ -67,7 +68,6 @@ Signed-off-by: Jiri Slaby + : "memory"); +} + -+/* - * With retpoline, we must use IBRS to restrict branch prediction - * before calling into firmware. - * + /* The Intel SPEC CTRL MSR base value cache */ + extern u64 x86_spec_ctrl_base; + diff --git a/patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch b/patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch index cdcdfbc..ccf4c74 100644 --- a/patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch +++ b/patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch @@ -37,8 +37,8 @@ Acked-by: Borislav Petkov --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h -@@ -236,15 +236,16 @@ static inline void vmexit_fill_RSB(void) - : : "memory" ); +@@ -241,15 +241,16 @@ static inline void vmexit_fill_RSB(void) + #endif } -#define alternative_msr_write(_msr, _val, _feature) \ diff --git a/patches.suse/05-x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch b/patches.suse/05-x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch index fc11d8c..410ebab 100644 --- a/patches.suse/05-x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch +++ b/patches.suse/05-x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch @@ -53,7 +53,7 @@ Acked-by: Borislav Petkov --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c -@@ -124,6 +124,24 @@ u64 x86_spec_ctrl_get_default(void) +@@ -122,6 +122,24 @@ u64 x86_spec_ctrl_get_default(void) } EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); @@ -80,7 +80,7 @@ Acked-by: Borislav Petkov --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c -@@ -5453,8 +5453,7 @@ static void svm_vcpu_run(struct kvm_vcpu +@@ -5504,8 +5504,7 @@ static void svm_vcpu_run(struct kvm_vcpu * is no need to worry about the conditional branch over the wrmsr * being speculatively taken. */ @@ -88,11 +88,11 @@ Acked-by: Borislav Petkov - native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); + x86_spec_ctrl_set_guest(svm->spec_ctrl); - local_irq_enable(); - -@@ -5568,6 +5567,8 @@ static void svm_vcpu_run(struct kvm_vcpu - if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) - svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); + asm volatile ( + "push %%" _ASM_BP "; \n\t" +@@ -5620,6 +5619,8 @@ static void svm_vcpu_run(struct kvm_vcpu + if (svm->spec_ctrl) + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); + x86_spec_ctrl_restore_host(svm->spec_ctrl); + @@ -101,7 +101,7 @@ Acked-by: Borislav Petkov --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c -@@ -9442,8 +9442,7 @@ static void __noclone vmx_vcpu_run(struc +@@ -9430,8 +9430,7 @@ static void __noclone vmx_vcpu_run(struc * is no need to worry about the conditional branch over the wrmsr * being speculatively taken. */ @@ -111,7 +111,7 @@ Acked-by: Borislav Petkov vmx->__launched = vmx->loaded_vmcs->launched; asm( -@@ -9581,8 +9580,7 @@ static void __noclone vmx_vcpu_run(struc +@@ -9569,8 +9568,7 @@ static void __noclone vmx_vcpu_run(struc if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); diff --git a/patches.suse/08-x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch b/patches.suse/08-x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch index 4535dc8..1624e85 100644 --- a/patches.suse/08-x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch +++ b/patches.suse/08-x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch @@ -53,16 +53,16 @@ Acked-by: Borislav Petkov --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -215,6 +215,7 @@ + #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ - #define X86_FEATURE_USE_IBRS ( 7*32+23) /* "" Use IBRS for Spectre v2 safety */ +#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+24) /* "" Disable Speculative Store Bypass. */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h -@@ -235,6 +235,12 @@ extern u64 x86_spec_ctrl_get_default(voi +@@ -238,6 +238,12 @@ extern u64 x86_spec_ctrl_get_default(voi extern void x86_spec_ctrl_set_guest(u64); extern void x86_spec_ctrl_restore_host(u64); @@ -98,7 +98,7 @@ Acked-by: Borislav Petkov #ifdef CONFIG_X86_32 /* * Check whether we are able to run this kernel safely on SMP. -@@ -376,6 +383,99 @@ retpoline_auto: +@@ -357,6 +364,99 @@ retpoline_auto: } #undef pr_fmt @@ -198,7 +198,7 @@ Acked-by: Borislav Petkov #ifdef CONFIG_SYSFS -@@ -401,6 +501,9 @@ ssize_t cpu_show_common(struct device *d +@@ -382,6 +482,9 @@ ssize_t cpu_show_common(struct device *d boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", spectre_v2_module_string()); @@ -210,7 +210,7 @@ Acked-by: Borislav Petkov } --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -2591,6 +2591,9 @@ +@@ -2592,6 +2592,9 @@ allow data leaks with this option, which is equivalent to spectre_v2=off. @@ -220,7 +220,7 @@ Acked-by: Borislav Petkov noxsave [BUGS=X86] Disables x86 extended register state save and restore using xsave. The kernel will fallback to enabling legacy floating-point and sse state. -@@ -3904,6 +3907,36 @@ +@@ -3899,6 +3902,36 @@ Not specifying this option is equivalent to spectre_v2=auto. diff --git a/patches.suse/11-x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch b/patches.suse/11-x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch index 7c3cd4f..9b660a4 100644 --- a/patches.suse/11-x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch +++ b/patches.suse/11-x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch @@ -30,8 +30,8 @@ Acked-by: Borislav Petkov --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -216,6 +216,7 @@ + #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ - #define X86_FEATURE_USE_IBRS ( 7*32+23) /* "" Use IBRS for Spectre v2 safety */ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+24) /* "" Disable Speculative Store Bypass. */ +#define X86_FEATURE_AMD_RDS (7*32+25) /* "" AMD RDS implementation */ @@ -39,7 +39,7 @@ Acked-by: Borislav Petkov #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h -@@ -241,6 +241,10 @@ enum ssb_mitigation { +@@ -244,6 +244,10 @@ enum ssb_mitigation { SPEC_STORE_BYPASS_DISABLE, }; @@ -125,7 +125,7 @@ Acked-by: Borislav Petkov */ if (boot_cpu_has(X86_FEATURE_IBRS)) rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); -@@ -155,6 +163,14 @@ void x86_spec_ctrl_restore_host(u64 gues +@@ -153,6 +161,14 @@ void x86_spec_ctrl_restore_host(u64 gues } EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host); @@ -140,7 +140,7 @@ Acked-by: Borislav Petkov #ifdef RETPOLINE static bool spectre_v2_bad_module; -@@ -461,6 +477,11 @@ static enum ssb_mitigation_cmd __init __ +@@ -442,6 +458,11 @@ static enum ssb_mitigation_cmd __init __ switch (cmd) { case SPEC_STORE_BYPASS_CMD_AUTO: @@ -152,7 +152,7 @@ Acked-by: Borislav Petkov case SPEC_STORE_BYPASS_CMD_ON: mode = SPEC_STORE_BYPASS_DISABLE; break; -@@ -487,6 +508,7 @@ static enum ssb_mitigation_cmd __init __ +@@ -468,6 +489,7 @@ static enum ssb_mitigation_cmd __init __ x86_spec_ctrl_set(SPEC_CTRL_RDS); break; case X86_VENDOR_AMD: @@ -160,7 +160,7 @@ Acked-by: Borislav Petkov break; } } -@@ -508,6 +530,9 @@ void x86_spec_ctrl_setup_ap(void) +@@ -489,6 +511,9 @@ void x86_spec_ctrl_setup_ap(void) { if (boot_cpu_has(X86_FEATURE_IBRS)) x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); diff --git a/patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch b/patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch index 5b8f412..18ae75f 100644 --- a/patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch +++ b/patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch @@ -38,15 +38,15 @@ Acked-by: Borislav Petkov --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -216,7 +216,7 @@ + #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ - #define X86_FEATURE_USE_IBRS ( 7*32+23) /* "" Use IBRS for Spectre v2 safety */ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+24) /* "" Disable Speculative Store Bypass. */ -#define X86_FEATURE_AMD_RDS (7*32+25) /* "" AMD RDS implementation */ +#define X86_FEATURE_AMD_SSBD (7*32+25) /* "" AMD SSBD implementation */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ -@@ -333,7 +333,7 @@ +@@ -336,7 +336,7 @@ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ @@ -187,7 +187,7 @@ Acked-by: Borislav Petkov void __init check_bugs(void) { -@@ -147,7 +147,7 @@ u64 x86_spec_ctrl_get_default(void) +@@ -145,7 +145,7 @@ u64 x86_spec_ctrl_get_default(void) u64 msrval = x86_spec_ctrl_base; if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) @@ -196,7 +196,7 @@ Acked-by: Borislav Petkov return msrval; } EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); -@@ -160,7 +160,7 @@ void x86_spec_ctrl_set_guest(u64 guest_s +@@ -158,7 +158,7 @@ void x86_spec_ctrl_set_guest(u64 guest_s return; if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) @@ -205,7 +205,7 @@ Acked-by: Borislav Petkov if (host != guest_spec_ctrl) wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl); -@@ -175,18 +175,18 @@ void x86_spec_ctrl_restore_host(u64 gues +@@ -173,18 +173,18 @@ void x86_spec_ctrl_restore_host(u64 gues return; if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) @@ -228,7 +228,7 @@ Acked-by: Borislav Petkov wrmsrl(MSR_AMD64_LS_CFG, msrval); } -@@ -491,7 +491,7 @@ static enum ssb_mitigation_cmd __init __ +@@ -472,7 +472,7 @@ static enum ssb_mitigation_cmd __init __ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE; enum ssb_mitigation_cmd cmd; @@ -237,7 +237,7 @@ Acked-by: Borislav Petkov return mode; cmd = ssb_parse_cmdline(); -@@ -525,7 +525,7 @@ static enum ssb_mitigation_cmd __init __ +@@ -506,7 +506,7 @@ static enum ssb_mitigation_cmd __init __ /* * We have three CPU feature flags that are in play here: * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. @@ -246,7 +246,7 @@ Acked-by: Borislav Petkov * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation */ if (mode == SPEC_STORE_BYPASS_DISABLE) { -@@ -536,12 +536,12 @@ static enum ssb_mitigation_cmd __init __ +@@ -517,12 +517,12 @@ static enum ssb_mitigation_cmd __init __ */ switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: @@ -263,7 +263,7 @@ Acked-by: Borislav Petkov break; } } -@@ -574,16 +574,16 @@ static int ssb_prctl_set(struct task_str +@@ -555,16 +555,16 @@ static int ssb_prctl_set(struct task_str if (task_spec_ssb_force_disable(task)) return -EPERM; task_clear_spec_ssb_disable(task); @@ -283,7 +283,7 @@ Acked-by: Borislav Petkov break; default: return -ERANGE; -@@ -653,7 +653,7 @@ void x86_spec_ctrl_setup_ap(void) +@@ -634,7 +634,7 @@ void x86_spec_ctrl_setup_ap(void) x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask); if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) @@ -353,7 +353,7 @@ Acked-by: Borislav Petkov /* all calls to cpuid_count() should be made on the same cpu */ --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c -@@ -3298,7 +3298,7 @@ static int vmx_get_msr(struct kvm_vcpu * +@@ -3296,7 +3296,7 @@ static int vmx_get_msr(struct kvm_vcpu * if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) && @@ -362,7 +362,7 @@ Acked-by: Borislav Petkov return 1; msr_info->data = to_vmx(vcpu)->spec_ctrl; -@@ -3420,11 +3420,11 @@ static int vmx_set_msr(struct kvm_vcpu * +@@ -3418,11 +3418,11 @@ static int vmx_set_msr(struct kvm_vcpu * if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) && diff --git a/patches.suse/bpf-prevent-memory-disambiguation-attack.patch b/patches.suse/bpf-prevent-memory-disambiguation-attack.patch index bf3762b..6ec86dc 100644 --- a/patches.suse/bpf-prevent-memory-disambiguation-attack.patch +++ b/patches.suse/bpf-prevent-memory-disambiguation-attack.patch @@ -36,7 +36,7 @@ Signed-off-by: Jiri Kosina --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -115,6 +115,7 @@ struct bpf_insn_aux_data { - unsigned long map_state; /* pointer/poison value for maps */ + struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ }; int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ + int sanitize_stack_off; /* stack slot to be cleared */ @@ -45,7 +45,7 @@ Signed-off-by: Jiri Kosina --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c -@@ -680,7 +680,7 @@ static bool is_spillable_regtype(enum bp +@@ -658,7 +658,7 @@ static bool is_spillable_regtype(enum bp */ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, @@ -54,7 +54,7 @@ Signed-off-by: Jiri Kosina { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; -@@ -711,8 +711,33 @@ static int check_stack_write(struct bpf_ +@@ -689,8 +689,33 @@ static int check_stack_write(struct bpf_ state->stack[spi].spilled_ptr = state->regs[value_regno]; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; @@ -89,7 +89,7 @@ Signed-off-by: Jiri Kosina } else { /* regular write of data into stack */ state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; -@@ -1203,7 +1228,7 @@ static int check_mem_access(struct bpf_v +@@ -1181,7 +1206,7 @@ static int check_mem_access(struct bpf_v if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, @@ -98,7 +98,7 @@ Signed-off-by: Jiri Kosina else err = check_stack_read(env, state, off, size, value_regno); -@@ -4267,6 +4292,34 @@ static int convert_ctx_accesses(struct b +@@ -4232,6 +4257,34 @@ static int convert_ctx_accesses(struct b else continue; diff --git a/series.conf b/series.conf index 7e2f22e..0af72f6 100644 --- a/series.conf +++ b/series.conf @@ -15603,6 +15603,53 @@ patches.fixes/sock_diag-fix-use-after-free-read-in-__sk_free.patch patches.drivers/net-sched-red-avoid-hashing-NULL-child.patch patches.drivers/cxgb4-fix-offset-in-collecting-TX-rate-limit-info.patch + patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch + patches.suse/02-x86-bugs-concentrate-bug-detection-into-a-separate-function.patch + patches.suse/03-x86-bugs-concentrate-bug-reporting-into-a-separate-function.patch + patches.suse/04-x86-bugs-read-spec_ctrl-msr-during-boot-and-re-use-reserved-bits.patch + patches.suse/05-x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch + patches.suse/06-x86-bugs-expose-sys-spec_store_bypass.patch + patches.suse/07-x86-cpufeatures-add-x86_feature_rds.patch + patches.suse/08-x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch + patches.suse/09-x86-bugs-intel-set-proper-cpu-features-and-setup-rds.patch + patches.suse/10-x86-bugs-whitelist-allowed-spec_ctrl-msr-values.patch + patches.suse/11-x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch + patches.suse/12-x86-kvm-vmx-expose-spec_ctrl-bit2-to-the-guest.patch + patches.suse/13-x86-speculation-create-spec-ctrl-h-to-avoid-include-hell.patch + patches.suse/14-prctl-add-speculation-control-prctls.patch + patches.suse/15-x86-process-allow-runtime-control-of-speculative-store-bypass.patch + patches.suse/16-x86-speculation-add-prctl-for-speculative-store-bypass-mitigation.patch + patches.suse/17-nospec-allow-getting-setting-on-non-current-task.patch + patches.suse/18-proc-provide-details-on-speculation-flaw-mitigations.patch + patches.suse/19-seccomp-enable-speculation-flaw-mitigations.patch + patches.suse/20-x86-bugs-make-boot-modes-_ro_after_init.patch + patches.suse/21-prctl-add-force-disable-speculation.patch + patches.suse/22-seccomp-use-pr_spec_force_disable.patch + patches.suse/23-seccomp-add-filter-flag-to-opt-out-of-ssb-mitigation.patch + patches.suse/24-seccomp-move-speculation-migitation-control-to-arch-code.patch + patches.suse/25-x86-speculation-make-seccomp-the-default-mode-for-speculative-store-bypass.patch + patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch + patches.suse/27-proc-use-underscores-for-ssbd-in-status.patch + patches.arch/28-x86-bugs-fix-_ssb_select_mitigation-return-type.patch + patches.arch/29-x86-bugs-make-cpu_show_common-static.patch + patches.arch/30-x86-bugs-fix-the-parameters-alignment-and-missing-void.patch + patches.arch/KVM--SVM--Move-spec-control-call-after-restore-of-GS.patch + patches.arch/31-x86-speculation-use-synthetic-bits-for-ibrs-ibpb-stibp.patch + patches.arch/32-x86-cpufeatures-disentangle-msr_spec_ctrl-enumeration-from-ibrs.patch + patches.arch/33-x86-cpufeatures-disentangle-ssbd-enumeration.patch + patches.arch/34-x86-cpufeatures-add-feature_zen.patch + patches.arch/35-x86-speculation-handle-ht-correctly-on-amd.patch + patches.arch/36-x86-bugs-kvm-extend-speculation-control-for-virt_spec_ctrl.patch + patches.arch/37-x86-speculation-add-virtualized-speculative-store-bypass-disable-support.patch + patches.arch/38-x86-speculation-rework-speculative_store_bypass_update.patch + patches.arch/39-x86-bugs-unify-x86_spec_ctrl_-set_guest-restore_host.patch + patches.arch/40-x86-bugs-expose-x86_spec_ctrl_base-directly.patch + patches.arch/41-x86-bugs-remove-x86_spec_ctrl_set.patch + patches.arch/42-x86-bugs-rework-spec_ctrl-base-and-mask-logic.patch + patches.arch/43-x86-speculation-kvm-implement-support-for-virt_spec_ctrl-ls_cfg.patch + patches.arch/44-kvm-svm-implement-virt_spec_ctrl-support-for-ssbd.patch + patches.arch/45-x86-bugs-rename-ssbd_no-to-ssb_no.patch + patches.suse/bpf-prevent-memory-disambiguation-attack.patch patches.fixes/0001-iov_iter-fix-return-type-of-_pipe_get_pages.patch patches.fixes/0002-iov_iter-fix-memory-leak-in-pipe_get_pages_alloc.patch patches.fixes/fs-don-t-scan-the-inode-cache-before-SB_BORN-is-set.patch @@ -15676,7 +15723,9 @@ patches.suse/kernel-sys.c-fix-potential-Spectre-v1-issue.patch patches.arch/x86-kvm-fix-lapic-timer-drift-when-guest-uses-periodic-mode patches.arch/kvm-x86-update-cpuid-properly-when-cr4-osxave-or-cr4-pke-is-changed + patches.arch/46-kvm-x86-ia32_arch_capabilities-is-always-supported.patch patches.suse/msft-hv-1696-KVM-x86-fix-UD-address-of-failed-Hyper-V-hypercalls.patch + patches.arch/47-kvm-vmx-expose-ssbd-properly-to-guests.patch patches.suse/0001-tracing-Make-the-snapshot-trigger-work-with-instance.patch patches.fixes/afs-Fix-directory-permissions-check.patch patches.drivers/Input-synaptics-Lenovo-Carbon-X1-Gen5-2017-devices-s @@ -16687,6 +16736,7 @@ patches.arch/x86-mce-check-for-alternate-indication-of-machine-check-recovery-on-skylake patches.arch/x86-mce-do-not-overwrite-mci_status-in-mce_no_way_out patches.arch/x86-mce-fix-incorrect-machine-check-from-unknown-source-message + patches.arch/x86-pti-xenpv-dont-report-as-vulnerable.patch patches.suse/x86-spectre_v1-Disable-compiler-optimizations-over-a.patch patches.arch/x86-call-fixup_exception-before-notify_die-in-math_error patches.fixes/xen-speculative_store_bypass_ht_init-call.patch @@ -17145,6 +17195,7 @@ patches.arch/x86-pti-check-the-return-value-of-pti_user_pagetable_walk_p4d patches.arch/x86-pti-check-the-return-value-of-pti_user_pagetable_walk_pmd patches.arch/x86-mm-remove-in_nmi-warning-from-vmalloc_fault + patches.arch/x86-speculation-Protect-against-userspace-userspace-.patch patches.arch/x86-tsc-add-missing-header-to-tsc_msr-c patches.arch/s390-sles15-15-02-qdio-reset-old-sbal_state-flags.patch patches.arch/s390-detect-etoken-facility.patch @@ -17161,7 +17212,70 @@ patches.fixes/ext4-check-for-NUL-characters-in-extended-attribute-.patch patches.fixes/ext4-fix-spectre-gadget-in-ext4_mb_regular_allocator.patch patches.suse/xfs-fix-a-null-pointer-dereference-in-xfs_bmap_exten.patch + patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch + patches.arch/x86-l1tf-02-change-order-of-offset-type.patch + patches.arch/x86-l1tf-03-protect-swap-entries.patch + patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch + patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch + patches.arch/x86-l1tf-06-add-sysfs-report.patch + patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch + patches.arch/x86-l1tf-07-limit-swap-file-size.patch + patches.arch/02-sched-smt-update-sched_smt_present-at-runtime.patch + patches.arch/03-x86-smp-provide-topology_is_primary_thread.patch + patches.arch/04-x86-topology-provide-topology_smt_supported.patch + patches.arch/05-cpu-hotplug-make-bringup-teardown-of-smp-threads-symmetric.patch + patches.arch/06-cpu-hotplug-split-do_cpu_down.patch + patches.arch/07-cpu-hotplug-provide-knobs-to-control-smt.patch + patches.arch/08-x86-cpu-remove-the-pointless-cpu-printout.patch + patches.arch/09-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch + patches.arch/10-x86-cpu-common-provide-detect_ht_early.patch + patches.arch/11-x86-cpu-topology-provide-detect_extended_topology_early.patch + patches.arch/12-x86-cpu-intel-evaluate-smp_num_siblings-early.patch + patches.arch/13-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch + patches.arch/14-x86-cpu-amd-evaluate-smp_num_siblings-early.patch + patches.arch/x86-speculation-l1tf-extend-64bit-swap-file-size-limit + patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch + patches.arch/16-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch + patches.arch/x86-speculation-l1tf-protect-pae-swap-entries-against-l1tf + patches.arch/x86-speculation-l1tf-fix-up-pte-pfn-conversion-for-pae + patches.arch/17-cpu-hotplug-Boot-HT-siblings-at-least-once.patch + patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch + patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch + patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch + patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch + patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch + patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch + patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch + patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch + patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch + patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch + patches.arch/18-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch + patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch + patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch + patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch + patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch + patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch + patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch + patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch + patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch + patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch + patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch + patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch + patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch + patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch + patches.arch/x86-kvm-vmx-don-t-set-l1tf_flush_l1d-to-true-from-vmx_l1d_flush + patches.arch/x86-kvm-vmx-replace-vmx_l1d_flush_always-with-vmx_l1d_flush_cond + patches.arch/x86-kvm-vmx-move-the-l1tf_flush_l1d-test-to-vmx_l1d_flush + patches.arch/x86-kvm-vmx-don-t-set-l1tf_flush_l1d-from-vmx_handle_external_intr + patches.arch/x86-speculation-simplify-sysfs-report-of-vmx-l1tf-vulnerability + patches.arch/x86-speculation-use-arch_capabilities-to-skip-l1d-flush-on-vmentry patches.arch/kvm-vmx-tell-the-nested-hypervisor-to-skip-l1d-flush-on-vmentry + patches.arch/cpu-hotplug-fix-smt-supported-evaluation + patches.arch/x86-speculation-l1tf-invert-all-not-present-mappings + patches.arch/x86-speculation-l1tf-make-pmd-pud_mknotpresent-invert + patches.arch/x86-mm-pat-make-set_memory_np-l1tf-safe + patches.arch/x86-mm-kmmio-make-the-tracer-robust-against-l1tf + patches.arch/x86-microcode-allow-late-microcode-loading-with-smt-disabled patches.fixes/blk-mq-avoid-to-synchronize-rcu-inside-blk_cleanup_q.patch patches.drivers/nvme-move-init-of-keep_alive-work-item-to-controller.patch patches.fixes/nvme-use-hw-qid-in-trace-events.patch @@ -17195,6 +17309,7 @@ patches.suse/0001-md-cluster-clear-another-node-s-suspend_area-after-t.patch patches.suse/0002-md-cluster-show-array-s-status-more-accurate.patch patches.suse/0003-md-cluster-don-t-send-msg-if-array-is-closing.patch + patches.arch/0001-x86-init-fix-build-with-CONFIG_SWAP-n.patch patches.drivers/spi-cadence-Change-usleep_range-to-udelay-for-atomic.patch patches.drivers/spi-davinci-fix-a-NULL-pointer-dereference.patch patches.drivers/spi-pxa2xx-Add-support-for-Intel-Ice-Lake.patch @@ -17469,6 +17584,7 @@ patches.drivers/mailbox-xgene-slimpro-Fix-potential-NULL-pointer-der patches.drivers/thermal_hwmon-Sanitize-attribute-name-passed-to-hwmo patches.drivers/thermal_hwmon-Pass-the-originating-device-down-to-hw + patches.arch/x86-speculation-l1tf-exempt-zeroed-ptes-from-inversion patches.suse/module-exclude-shn_undef-symbols-from-kallsyms-api.patch patches.suse/0001-module-make-it-clear-when-we-re-handling-the-module-.patch patches.suse/0002-module-setup-load-info-before-module_sig_check.patch @@ -17643,6 +17759,7 @@ patches.fixes/block-bfq-return-nbytes-and-not-zero-from-struct-cft.patch patches.arch/x86-kvm-avoid-unused-variable-warning patches.arch/kvm-x86-svm-call-x86_spec_ctrl_set_guest-host-with-interrupts-disabled.patch + patches.arch/kvm-vmx-fixes-for-vmentry_l1d_flush-module-parameter patches.drivers/platform-x86-ideapad-laptop-Apply-no_hw_rfkill-to-Y2 patches.drivers/platform-x86-thinkpad_acpi-Proper-model-release-matc patches.drivers/platform-x86-toshiba_acpi-Fix-defined-but-not-used-b.patch @@ -17723,6 +17840,10 @@ patches.arch/x86-vdso-fix-vdso-build-if-a-retpoline-is-emitted.patch patches.arch/x86-mce-add-notifier_block-forward-declaration patches.arch/x86-process-re-export-start_thread + patches.arch/x86-speculation-l1tf-fix-overflow-in-l1tf_pfn_limit-on-32bit.patch + patches.arch/x86-kvm-vmx-remove-duplicate-l1d-flush-definitions + patches.arch/0001-x86-speculation-l1tf-Fix-off-by-one-error-when-warni.patch + patches.arch/0001-x86-speculation-l1tf-Suggest-what-to-do-on-systems-w.patch patches.drivers/qed-Wait-for-ready-indication-before-rereading-the-s.patch patches.drivers/qed-Wait-for-MCP-halt-and-resume-commands-to-take-pl.patch patches.drivers/qed-Prevent-a-possible-deadlock-during-driver-load-a.patch @@ -17757,6 +17878,7 @@ patches.arch/x86-pae-use-64-bit-atomic-xchg-function-in-native_ptep_get_and_clear patches.suse/watchdog-Mark-watchdog-touch-functions-as-notrace.patch patches.arch/x86-spectre-add-missing-family-6-check-to-microcode-check.patch + patches.arch/0001-x86-speculation-l1tf-Increase-l1tf-memory-limit-for-.patch patches.arch/x86-entry-64-wipe-kasan-stack-shadow-before-rewind_stack_do_exit patches.arch/x86-nmi-fix-nmi-uaccess-race-against-cr3-switching patches.fixes/x86-mce-Fix-set_mce_nospec-to-avoid-GP-fault.patch @@ -18895,60 +19017,9 @@ patches.kabi/kabi-protect-enum-mem_type.patch - # SSB - patches.suse/01-x86-nospec-simplify-alternative_msr_write.patch - patches.suse/02-x86-bugs-concentrate-bug-detection-into-a-separate-function.patch - patches.suse/03-x86-bugs-concentrate-bug-reporting-into-a-separate-function.patch - patches.suse/04-x86-bugs-read-spec_ctrl-msr-during-boot-and-re-use-reserved-bits.patch - patches.suse/05-x86-bugs-kvm-support-the-combination-of-guest-and-host-ibrs.patch - patches.suse/06-x86-bugs-expose-sys-spec_store_bypass.patch - patches.suse/07-x86-cpufeatures-add-x86_feature_rds.patch - patches.suse/08-x86-bugs-provide-boot-parameters-for-the-spec_store_bypass_disable-mitigation.patch - patches.suse/09-x86-bugs-intel-set-proper-cpu-features-and-setup-rds.patch - patches.suse/10-x86-bugs-whitelist-allowed-spec_ctrl-msr-values.patch - patches.suse/11-x86-bugs-amd-add-support-to-disable-rds-on-famh-if-requested.patch - patches.suse/12-x86-kvm-vmx-expose-spec_ctrl-bit2-to-the-guest.patch - patches.suse/13-x86-speculation-create-spec-ctrl-h-to-avoid-include-hell.patch - patches.suse/14-prctl-add-speculation-control-prctls.patch - patches.suse/15-x86-process-allow-runtime-control-of-speculative-store-bypass.patch - patches.suse/16-x86-speculation-add-prctl-for-speculative-store-bypass-mitigation.patch - patches.suse/17-nospec-allow-getting-setting-on-non-current-task.patch - patches.suse/18-proc-provide-details-on-speculation-flaw-mitigations.patch - patches.suse/19-seccomp-enable-speculation-flaw-mitigations.patch - patches.suse/20-x86-bugs-make-boot-modes-_ro_after_init.patch - patches.suse/21-prctl-add-force-disable-speculation.patch - patches.suse/22-seccomp-use-pr_spec_force_disable.patch - patches.suse/23-seccomp-add-filter-flag-to-opt-out-of-ssb-mitigation.patch - patches.suse/24-seccomp-move-speculation-migitation-control-to-arch-code.patch - patches.suse/25-x86-speculation-make-seccomp-the-default-mode-for-speculative-store-bypass.patch - patches.suse/26-x86-bugs-rename-rds-to-ssbd.patch - patches.suse/27-proc-use-underscores-for-ssbd-in-status.patch - patches.arch/KVM--SVM--Move-spec-control-call-after-restore-of-GS.patch - patches.suse/bpf-prevent-memory-disambiguation-attack.patch patches.kabi/bpf-prevent-memory-disambiguation-attack.patch - patches.arch/28-x86-bugs-fix-_ssb_select_mitigation-return-type.patch - patches.arch/29-x86-bugs-make-cpu_show_common-static.patch - patches.arch/30-x86-bugs-fix-the-parameters-alignment-and-missing-void.patch - patches.arch/31-x86-speculation-use-synthetic-bits-for-ibrs-ibpb-stibp.patch - patches.arch/32-x86-cpufeatures-disentangle-msr_spec_ctrl-enumeration-from-ibrs.patch - patches.arch/33-x86-cpufeatures-disentangle-ssbd-enumeration.patch - patches.arch/34-x86-cpufeatures-add-feature_zen.patch - patches.arch/35-x86-speculation-handle-ht-correctly-on-amd.patch - patches.arch/36-x86-bugs-kvm-extend-speculation-control-for-virt_spec_ctrl.patch - patches.arch/37-x86-speculation-add-virtualized-speculative-store-bypass-disable-support.patch - patches.arch/38-x86-speculation-rework-speculative_store_bypass_update.patch - patches.arch/39-x86-bugs-unify-x86_spec_ctrl_-set_guest-restore_host.patch - patches.arch/40-x86-bugs-expose-x86_spec_ctrl_base-directly.patch - patches.arch/41-x86-bugs-remove-x86_spec_ctrl_set.patch - patches.arch/42-x86-bugs-rework-spec_ctrl-base-and-mask-logic.patch - patches.arch/43-x86-speculation-kvm-implement-support-for-virt_spec_ctrl-ls_cfg.patch - patches.arch/44-kvm-svm-implement-virt_spec_ctrl-support-for-ssbd.patch - patches.arch/45-x86-bugs-rename-ssbd_no-to-ssb_no.patch - patches.arch/46-kvm-x86-ia32_arch_capabilities-is-always-supported.patch - patches.arch/47-kvm-vmx-expose-ssbd-properly-to-guests.patch patches.kabi/fix-kvm-kabi.patch - patches.arch/x86-pti-xenpv-dont-report-as-vulnerable.patch patches.kabi/mm-swap-fix-race-between-swap-count-continuation-operation-kabi.patch @@ -18970,95 +19041,11 @@ patches.kabi/kvm-x86-kABI-fix-for-vm_alloc-vm_free-changes.patch patches.kabi/kABI-Hide-get_msr_feature-in-kvm_x86_ops.patch - ###### - # L1TF - ###### - # bare metal - patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch - patches.arch/x86-l1tf-02-change-order-of-offset-type.patch - patches.arch/x86-l1tf-03-protect-swap-entries.patch - patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch - patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch - patches.arch/x86-l1tf-06-add-sysfs-report.patch - patches.arch/x86-l1tf-07-limit-swap-file-size.patch - patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch - # smt=off - patches.arch/02-sched-smt-update-sched_smt_present-at-runtime.patch - patches.arch/03-x86-smp-provide-topology_is_primary_thread.patch - patches.arch/04-x86-topology-provide-topology_smt_supported.patch - patches.arch/05-cpu-hotplug-make-bringup-teardown-of-smp-threads-symmetric.patch - patches.arch/06-cpu-hotplug-split-do_cpu_down.patch - patches.arch/07-cpu-hotplug-provide-knobs-to-control-smt.patch - patches.arch/08-x86-cpu-remove-the-pointless-cpu-printout.patch - patches.arch/09-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch - patches.arch/10-x86-cpu-common-provide-detect_ht_early.patch - patches.arch/11-x86-cpu-topology-provide-detect_extended_topology_early.patch - patches.arch/12-x86-cpu-intel-evaluate-smp_num_siblings-early.patch - patches.arch/13-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch - patches.arch/14-x86-cpu-amd-evaluate-smp_num_siblings-early.patch - patches.arch/16-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch - patches.arch/17-cpu-hotplug-Boot-HT-siblings-at-least-once.patch - patches.arch/18-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch - # KVM - patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch - patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch - patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch - patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch - patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch - patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch - patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch - patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch - patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch - patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch - patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch - # L1TF runtime control - patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch - patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch - patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch - patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch - patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch - patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch - patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch - patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch - patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch - patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch - patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch - patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch - # fixes - patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch - patches.kabi/kvm_x86_ops-l1tf-kabi-fix.patch - patches.arch/x86-speculation-Protect-against-userspace-userspace-.patch - - patches.arch/x86-speculation-l1tf-extend-64bit-swap-file-size-limit - patches.arch/x86-speculation-l1tf-fix-overflow-in-l1tf_pfn_limit-on-32bit.patch - patches.arch/0001-x86-speculation-l1tf-Fix-off-by-one-error-when-warni.patch - patches.arch/0001-x86-speculation-l1tf-Suggest-what-to-do-on-systems-w.patch - patches.arch/0001-x86-speculation-l1tf-Increase-l1tf-memory-limit-for-.patch patches.kabi/0001-x86-kabi-speculation-l1tf-Increase-l1tf-memory-limit-for-.patch - patches.arch/0001-x86-init-fix-build-with-CONFIG_SWAP-n.patch - patches.arch/x86-speculation-use-arch_capabilities-to-skip-l1d-flush-on-vmentry - patches.arch/kvm-vmx-fixes-for-vmentry_l1d_flush-module-parameter patches.kabi/KVM-VMX-Work-around-kABI-breakage-in-enum-vmx_l1d_fl.patch - # bsc#1110006 - patches.arch/x86-speculation-l1tf-protect-pae-swap-entries-against-l1tf - patches.arch/x86-speculation-l1tf-fix-up-pte-pfn-conversion-for-pae - patches.arch/x86-kvm-vmx-don-t-set-l1tf_flush_l1d-to-true-from-vmx_l1d_flush - patches.arch/x86-kvm-vmx-replace-vmx_l1d_flush_always-with-vmx_l1d_flush_cond - patches.arch/x86-kvm-vmx-move-the-l1tf_flush_l1d-test-to-vmx_l1d_flush - patches.arch/x86-kvm-vmx-don-t-set-l1tf_flush_l1d-from-vmx_handle_external_intr - patches.arch/x86-speculation-simplify-sysfs-report-of-vmx-l1tf-vulnerability - patches.arch/cpu-hotplug-fix-smt-supported-evaluation - patches.arch/x86-speculation-l1tf-invert-all-not-present-mappings - patches.arch/x86-speculation-l1tf-make-pmd-pud_mknotpresent-invert - patches.arch/x86-mm-pat-make-set_memory_np-l1tf-safe - patches.arch/x86-mm-kmmio-make-the-tracer-robust-against-l1tf - patches.arch/x86-microcode-allow-late-microcode-loading-with-smt-disabled - patches.arch/x86-speculation-l1tf-exempt-zeroed-ptes-from-inversion - patches.arch/x86-kvm-vmx-remove-duplicate-l1d-flush-definitions - ######################################################## # You'd better have a good reason for adding a patch # below here.