From cc902761f065dfadae8d8592af932e9b51756d60 Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Jul 21 2022 14:31:28 +0000 Subject: Fix 1201644, 1201664, 1201672, 1201673, 1201676 All are reports of the same problem - the IBRS_* regs push/popping was wrong but it needs 1b331eeea7b8 ("x86/entry: Remove skip_r11rcx") too. Signed-off-by: Borislav Petkov --- diff --git a/patches.suse/x86-entry-Add-kernel-IBRS-implementation.patch b/patches.suse/x86-entry-Add-kernel-IBRS-implementation.patch deleted file mode 100644 index fe50b8d..0000000 --- a/patches.suse/x86-entry-Add-kernel-IBRS-implementation.patch +++ /dev/null @@ -1,334 +0,0 @@ -From: Peter Zijlstra -Date: Tue, 14 Jun 2022 23:15:53 +0200 -Subject: x86/entry: Add kernel IBRS implementation -Git-commit: 2dbb887e875b1de3ca8f40ddf26bcfe55798c609 -Patch-mainline: Queued in tip for 5.19 -Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git -References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 - -Implement Kernel IBRS - currently the only known option to mitigate RSB -underflow speculation issues on Skylake hardware. - -Note: since IBRS_ENTER requires fuller context established than -UNTRAIN_RET, it must be placed after it. However, since UNTRAIN_RET -itself implies a RET, it must come after IBRS_ENTER. This means -IBRS_ENTER needs to also move UNTRAIN_RET. - -Note 2: KERNEL_IBRS is sub-optimal for XenPV. - -Signed-off-by: Peter Zijlstra (Intel) -Signed-off-by: Borislav Petkov -Reviewed-by: Josh Poimboeuf -Signed-off-by: Borislav Petkov ---- - arch/x86/entry/calling.h | 78 +++++++++++++++++++++++++++++++++++++ - arch/x86/entry/entry_64.S | 45 ++++++++++++++++++--- - arch/x86/entry/entry_64_compat.S | 8 ++- - arch/x86/include/asm/cpufeatures.h | 2 - 4 files changed, 125 insertions(+), 8 deletions(-) - ---- a/arch/x86/entry/calling.h -+++ b/arch/x86/entry/calling.h -@@ -7,6 +7,8 @@ - #include - #include - #include -+#include -+#include - - /* - -@@ -310,6 +312,82 @@ For 32-bit we have the following convent - #endif - - /* -+ * IBRS kernel mitigation for Spectre_v2. -+ * -+ * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers -+ * the regs it uses (AX, CX, DX). Must be called before the first RET -+ * instruction (NOTE! UNTRAIN_RET includes a RET instruction) -+ * -+ * The optional argument is used to save/restore the current value, -+ * which is used on the paranoid paths. -+ * -+ * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. -+ */ -+.macro IBRS_ENTER save_reg -+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS -+ -+ push %rax -+ push %rcx -+ push %rdx -+ -+ movl $MSR_IA32_SPEC_CTRL, %ecx -+ -+.ifnb \save_reg -+ rdmsr -+ shl $32, %rdx -+ or %rdx, %rax -+ mov %rax, \save_reg -+ test $SPEC_CTRL_IBRS, %eax -+ jz .Ldo_wrmsr_\@ -+ lfence -+ jmp .Lend_\@ -+.Ldo_wrmsr_\@: -+.endif -+ -+ movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx -+ movl %edx, %eax -+ shr $32, %rdx -+ wrmsr -+ -+ pop %rdx -+ pop %rcx -+ pop %rax -+ -+.Lend_\@: -+.endm -+ -+/* -+ * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) -+ * regs. Must be called after the last RET. -+ */ -+.macro IBRS_EXIT save_reg -+ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS -+ -+ push %rax -+ push %rcx -+ push %rdx -+ -+ movl $MSR_IA32_SPEC_CTRL, %ecx -+ -+.ifnb \save_reg -+ mov \save_reg, %rdx -+.else -+ movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx -+ andl $(~SPEC_CTRL_IBRS), %edx -+.endif -+ -+ movl %edx, %eax -+ shr $32, %rdx -+ wrmsr -+ -+ pop %rdx -+ pop %rcx -+ pop %rax -+ -+.Lend_\@: -+.endm -+ -+/* - * Mitigate Spectre v1 for conditional swapgs code paths. - * - * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to ---- a/arch/x86/entry/entry_64_compat.S -+++ b/arch/x86/entry/entry_64_compat.S -@@ -4,7 +4,6 @@ - * - * Copyright 2000-2002 Andi Kleen, SuSE Labs. - */ --#include "calling.h" - #include - #include - #include -@@ -18,6 +17,8 @@ - #include - #include - -+#include "calling.h" -+ - .section .entry.text, "ax" - - /* -@@ -107,6 +108,8 @@ ENTRY(entry_SYSENTER_compat) - xorl %r15d, %r15d /* nospec r15 */ - cld - -+ -+ IBRS_ENTER - UNTRAIN_RET - - /* -@@ -213,7 +216,6 @@ ENTRY(entry_SYSCALL_compat) - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp - - SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL) -- UNTRAIN_RET - - /* Construct struct pt_regs on stack */ - pushq $__USER32_DS /* pt_regs->ss */ -@@ -273,6 +275,8 @@ sysret32_from_system_call: - */ - STACKLEAK_ERASE - TRACE_IRQS_ON /* User mode traces as IRQs on. */ -+ IBRS_EXIT -+ - movq RBX(%rsp), %rbx /* pt_regs->rbx */ - movq RBP(%rsp), %rbp /* pt_regs->rbp */ - movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ ---- a/arch/x86/entry/entry_64.S -+++ b/arch/x86/entry/entry_64.S -@@ -158,7 +158,6 @@ ENTRY(entry_SYSCALL_64) - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp - - GLOBAL(entry_SYSCALL_64_safe_stack) -- UNTRAIN_RET - - /* Construct struct pt_regs on stack */ - pushq $__USER_DS /* pt_regs->ss */ -@@ -176,6 +175,11 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) - /* IRQs are off. */ - movq %rax, %rdi - movq %rsp, %rsi -+ -+ /* clobbers %rax, make sure it is after saving the syscall nr */ -+ IBRS_ENTER -+ UNTRAIN_RET -+ - call do_syscall_64 /* returns with IRQs disabled */ - - TRACE_IRQS_IRETQ /* we're about to change IF */ -@@ -252,6 +256,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) - * perf profiles. Nothing jumps here. - */ - syscall_return_via_sysret: -+ IBRS_EXIT - POP_REGS pop_rdi=0 - - /* -@@ -712,6 +717,7 @@ ret_from_intr: - TRACE_IRQS_IRETQ - - GLOBAL(swapgs_restore_regs_and_return_to_usermode) -+ IBRS_EXIT - #ifdef CONFIG_DEBUG_ENTRY - /* Assert that pt_regs indicates user mode. */ - testb $3, CS(%rsp) -@@ -1316,6 +1322,9 @@ idtentry_vc X86_TRAP_VC asm_vmm_communic - * 1 -> no SWAPGS on exit - * - * Y GSBASE value at entry, must be restored in paranoid_exit -+ * -+ * R14 - old CR3 -+ * R15 - old SPEC_CTRL - */ - SYM_CODE_START_LOCAL(paranoid_entry) - UNWIND_HINT_FUNC -@@ -1339,7 +1348,6 @@ SYM_CODE_START_LOCAL(paranoid_entry) - * be retrieved from a kernel internal table. - */ - SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 -- UNTRAIN_RET - - /* - * Handling GSBASE depends on the availability of FSGSBASE. -@@ -1360,7 +1368,7 @@ SYM_CODE_START_LOCAL(paranoid_entry) - * mispredicted GSBASE. No extra FENCE required. - */ - SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx -- RET -+ jmp .Lparanoid_gsbase_done - - .Lparanoid_entry_checkgs: - /* EBX = 1 -> kernel GSBASE active, no restore required */ -@@ -1379,8 +1387,16 @@ SYM_CODE_START_LOCAL(paranoid_entry) - xorl %ebx, %ebx - SWAPGS - .Lparanoid_kernel_gsbase: -- - FENCE_SWAPGS_KERNEL_ENTRY -+.Lparanoid_gsbase_done: -+ -+ /* -+ * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like -+ * CR3 above, keep the old value in a callee saved register. -+ */ -+ IBRS_ENTER save_reg=%r15 -+ UNTRAIN_RET -+ - RET - SYM_CODE_END(paranoid_entry) - -@@ -1402,12 +1418,22 @@ SYM_CODE_END(paranoid_entry) - * 1 -> no SWAPGS on exit - * - * Y User space GSBASE, must be restored unconditionally -+ * -+ * R14 - old CR3 -+ * R15 - old SPEC_CTRL - */ - SYM_CODE_START_LOCAL(paranoid_exit) - UNWIND_HINT_REGS - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_OFF_DEBUG - TRACE_IRQS_IRETQ -+ -+ /* -+ * Must restore IBRS state before both CR3 and %GS since we need access -+ * to the per-CPU x86_spec_ctrl_shadow variable. -+ */ -+ IBRS_EXIT save_reg=%r15 -+ - /* - * The order of operations is important. RESTORE_CR3 requires - * kernel GSBASE. -@@ -1455,9 +1481,11 @@ SYM_CODE_START_LOCAL(error_entry) - FENCE_SWAPGS_USER_ENTRY - /* We have user CR3. Change to kernel CR3. */ - SWITCH_TO_KERNEL_CR3 scratch_reg=%rax -+ IBRS_ENTER - UNTRAIN_RET - - .Lerror_entry_from_usermode_after_swapgs: -+ - /* Put us onto the real thread stack. */ - popq %r12 /* save return addr in %12 */ - movq %rsp, %rdi /* arg0 = pt_regs pointer */ -@@ -1511,6 +1539,8 @@ SYM_CODE_START_LOCAL(error_entry) - SWAPGS - FENCE_SWAPGS_USER_ENTRY - SWITCH_TO_KERNEL_CR3 scratch_reg=%rax -+ IBRS_ENTER -+ UNTRAIN_RET - - /* - * Pretend that the exception came from user mode: set up pt_regs -@@ -1606,7 +1636,6 @@ ENTRY(nmi) - movq %rsp, %rdx - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp - UNWIND_HINT_IRET_REGS base=%rdx offset=8 -- UNTRAIN_RET - pushq 5*8(%rdx) /* pt_regs->ss */ - pushq 4*8(%rdx) /* pt_regs->rsp */ - pushq 3*8(%rdx) /* pt_regs->flags */ -@@ -1617,6 +1646,9 @@ ENTRY(nmi) - PUSH_AND_CLEAR_REGS rdx=(%rdx) - ENCODE_FRAME_POINTER - -+ IBRS_ENTER -+ UNTRAIN_RET -+ - /* - * At this point we no longer need to worry about stack damage - * due to nesting -- we're on the normal thread stack and we're -@@ -1840,6 +1872,9 @@ end_repeat_nmi: - movq $-1, %rsi - call do_nmi - -+ /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */ -+ IBRS_EXIT save_reg=%r15 -+ - /* Always restore stashed CR3 value (see paranoid_entry) */ - RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 - ---- a/arch/x86/include/asm/cpufeatures.h -+++ b/arch/x86/include/asm/cpufeatures.h -@@ -203,7 +203,7 @@ - #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ - #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ - #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ --/* FREE! ( 7*32+12) */ -+#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */ - /* FREE! ( 7*32+13) */ - #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ - #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ diff --git a/patches.suse/x86-entry-add-kernel-ibrs-implementation.patch b/patches.suse/x86-entry-add-kernel-ibrs-implementation.patch new file mode 100644 index 0000000..41e7186 --- /dev/null +++ b/patches.suse/x86-entry-add-kernel-ibrs-implementation.patch @@ -0,0 +1,325 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:53 +0200 +Subject: x86/entry: Add kernel IBRS implementation +Git-commit: 2dbb887e875b1de3ca8f40ddf26bcfe55798c609 +Patch-mainline: v5.19-rc7 +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Implement Kernel IBRS - currently the only known option to mitigate RSB +underflow speculation issues on Skylake hardware. + +Note: since IBRS_ENTER requires fuller context established than +UNTRAIN_RET, it must be placed after it. However, since UNTRAIN_RET +itself implies a RET, it must come after IBRS_ENTER. This means +IBRS_ENTER needs to also move UNTRAIN_RET. + +Note 2: KERNEL_IBRS is sub-optimal for XenPV. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/entry/calling.h | 58 +++++++++++++++++++++++++++++++++++++ + arch/x86/entry/entry_64.S | 43 ++++++++++++++++++++++++--- + arch/x86/entry/entry_64_compat.S | 15 ++++++++- + arch/x86/include/asm/cpufeatures.h | 2 - + 4 files changed, 110 insertions(+), 8 deletions(-) + +--- a/arch/x86/entry/calling.h ++++ b/arch/x86/entry/calling.h +@@ -7,6 +7,8 @@ + #include + #include + #include ++#include ++#include + + /* + +@@ -310,6 +312,62 @@ For 32-bit we have the following convent + #endif + + /* ++ * IBRS kernel mitigation for Spectre_v2. ++ * ++ * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers ++ * the regs it uses (AX, CX, DX). Must be called before the first RET ++ * instruction (NOTE! UNTRAIN_RET includes a RET instruction) ++ * ++ * The optional argument is used to save/restore the current value, ++ * which is used on the paranoid paths. ++ * ++ * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. ++ */ ++.macro IBRS_ENTER save_reg ++ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS ++ movl $MSR_IA32_SPEC_CTRL, %ecx ++ ++.ifnb \save_reg ++ rdmsr ++ shl $32, %rdx ++ or %rdx, %rax ++ mov %rax, \save_reg ++ test $SPEC_CTRL_IBRS, %eax ++ jz .Ldo_wrmsr_\@ ++ lfence ++ jmp .Lend_\@ ++.Ldo_wrmsr_\@: ++.endif ++ ++ movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx ++ movl %edx, %eax ++ shr $32, %rdx ++ wrmsr ++.Lend_\@: ++.endm ++ ++/* ++ * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) ++ * regs. Must be called after the last RET. ++ */ ++.macro IBRS_EXIT save_reg ++ ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS ++ movl $MSR_IA32_SPEC_CTRL, %ecx ++ ++.ifnb \save_reg ++ mov \save_reg, %rdx ++.else ++ movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx ++ andl $(~SPEC_CTRL_IBRS), %edx ++.endif ++ ++ movl %edx, %eax ++ shr $32, %rdx ++ wrmsr ++.Lend_\@: ++.endm ++ ++/* + * Mitigate Spectre v1 for conditional swapgs code paths. + * + * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to +--- a/arch/x86/entry/entry_64_compat.S ++++ b/arch/x86/entry/entry_64_compat.S +@@ -4,7 +4,6 @@ + * + * Copyright 2000-2002 Andi Kleen, SuSE Labs. + */ +-#include "calling.h" + #include + #include + #include +@@ -18,6 +17,8 @@ + #include + #include + ++#include "calling.h" ++ + .section .entry.text, "ax" + + /* +@@ -107,6 +108,8 @@ ENTRY(entry_SYSENTER_compat) + xorl %r15d, %r15d /* nospec r15 */ + cld + ++ ++ IBRS_ENTER + UNTRAIN_RET + + /* +@@ -213,7 +216,6 @@ ENTRY(entry_SYSCALL_compat) + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + + SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL) +- UNTRAIN_RET + + /* Construct struct pt_regs on stack */ + pushq $__USER32_DS /* pt_regs->ss */ +@@ -259,6 +261,9 @@ GLOBAL(entry_SYSCALL_compat_after_hwfram + */ + TRACE_IRQS_OFF + ++ IBRS_ENTER ++ UNTRAIN_RET ++ + movq %rsp, %rdi + call do_fast_syscall_32 + /* XEN PV guests always use IRET path */ +@@ -273,6 +278,9 @@ sysret32_from_system_call: + */ + STACKLEAK_ERASE + TRACE_IRQS_ON /* User mode traces as IRQs on. */ ++ ++ IBRS_EXIT ++ + movq RBX(%rsp), %rbx /* pt_regs->rbx */ + movq RBP(%rsp), %rbp /* pt_regs->rbp */ + movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ +@@ -415,6 +423,9 @@ ENTRY(entry_INT80_compat) + */ + TRACE_IRQS_OFF + ++ IBRS_ENTER ++ UNTRAIN_RET ++ + movq %rsp, %rdi + call do_int80_syscall_32 + .Lsyscall_32_done: +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -158,7 +158,6 @@ ENTRY(entry_SYSCALL_64) + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + + GLOBAL(entry_SYSCALL_64_safe_stack) +- UNTRAIN_RET + + /* Construct struct pt_regs on stack */ + pushq $__USER_DS /* pt_regs->ss */ +@@ -176,6 +175,11 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) + /* IRQs are off. */ + movq %rax, %rdi + movq %rsp, %rsi ++ ++ /* clobbers %rax, make sure it is after saving the syscall nr */ ++ IBRS_ENTER ++ UNTRAIN_RET ++ + call do_syscall_64 /* returns with IRQs disabled */ + + TRACE_IRQS_IRETQ /* we're about to change IF */ +@@ -252,6 +256,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) + * perf profiles. Nothing jumps here. + */ + syscall_return_via_sysret: ++ IBRS_EXIT + POP_REGS pop_rdi=0 + + /* +@@ -712,6 +717,7 @@ ret_from_intr: + TRACE_IRQS_IRETQ + + GLOBAL(swapgs_restore_regs_and_return_to_usermode) ++ IBRS_EXIT + #ifdef CONFIG_DEBUG_ENTRY + /* Assert that pt_regs indicates user mode. */ + testb $3, CS(%rsp) +@@ -1316,6 +1322,9 @@ idtentry_vc X86_TRAP_VC asm_vmm_communic + * 1 -> no SWAPGS on exit + * + * Y GSBASE value at entry, must be restored in paranoid_exit ++ * ++ * R14 - old CR3 ++ * R15 - old SPEC_CTRL + */ + SYM_CODE_START_LOCAL(paranoid_entry) + UNWIND_HINT_FUNC +@@ -1339,7 +1348,6 @@ SYM_CODE_START_LOCAL(paranoid_entry) + * be retrieved from a kernel internal table. + */ + SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 +- UNTRAIN_RET + + /* + * Handling GSBASE depends on the availability of FSGSBASE. +@@ -1360,7 +1368,7 @@ SYM_CODE_START_LOCAL(paranoid_entry) + * mispredicted GSBASE. No extra FENCE required. + */ + SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx +- RET ++ jmp .Lparanoid_gsbase_done + + .Lparanoid_entry_checkgs: + /* EBX = 1 -> kernel GSBASE active, no restore required */ +@@ -1379,8 +1387,16 @@ SYM_CODE_START_LOCAL(paranoid_entry) + xorl %ebx, %ebx + SWAPGS + .Lparanoid_kernel_gsbase: +- + FENCE_SWAPGS_KERNEL_ENTRY ++.Lparanoid_gsbase_done: ++ ++ /* ++ * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like ++ * CR3 above, keep the old value in a callee saved register. ++ */ ++ IBRS_ENTER save_reg=%r15 ++ UNTRAIN_RET ++ + RET + SYM_CODE_END(paranoid_entry) + +@@ -1402,12 +1418,22 @@ SYM_CODE_END(paranoid_entry) + * 1 -> no SWAPGS on exit + * + * Y User space GSBASE, must be restored unconditionally ++ * ++ * R14 - old CR3 ++ * R15 - old SPEC_CTRL + */ + SYM_CODE_START_LOCAL(paranoid_exit) + UNWIND_HINT_REGS + DISABLE_INTERRUPTS(CLBR_ANY) + TRACE_IRQS_OFF_DEBUG + TRACE_IRQS_IRETQ ++ ++ /* ++ * Must restore IBRS state before both CR3 and %GS since we need access ++ * to the per-CPU x86_spec_ctrl_shadow variable. ++ */ ++ IBRS_EXIT save_reg=%r15 ++ + /* + * The order of operations is important. RESTORE_CR3 requires + * kernel GSBASE. +@@ -1455,9 +1481,11 @@ SYM_CODE_START_LOCAL(error_entry) + FENCE_SWAPGS_USER_ENTRY + /* We have user CR3. Change to kernel CR3. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax ++ IBRS_ENTER + UNTRAIN_RET + + .Lerror_entry_from_usermode_after_swapgs: ++ + /* Put us onto the real thread stack. */ + popq %r12 /* save return addr in %12 */ + movq %rsp, %rdi /* arg0 = pt_regs pointer */ +@@ -1606,7 +1634,6 @@ ENTRY(nmi) + movq %rsp, %rdx + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + UNWIND_HINT_IRET_REGS base=%rdx offset=8 +- UNTRAIN_RET + pushq 5*8(%rdx) /* pt_regs->ss */ + pushq 4*8(%rdx) /* pt_regs->rsp */ + pushq 3*8(%rdx) /* pt_regs->flags */ +@@ -1617,6 +1644,9 @@ ENTRY(nmi) + PUSH_AND_CLEAR_REGS rdx=(%rdx) + ENCODE_FRAME_POINTER + ++ IBRS_ENTER ++ UNTRAIN_RET ++ + /* + * At this point we no longer need to worry about stack damage + * due to nesting -- we're on the normal thread stack and we're +@@ -1840,6 +1870,9 @@ end_repeat_nmi: + movq $-1, %rsi + call do_nmi + ++ /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */ ++ IBRS_EXIT save_reg=%r15 ++ + /* Always restore stashed CR3 value (see paranoid_entry) */ + RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -203,7 +203,7 @@ + #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ + #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ + #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ +-/* FREE! ( 7*32+12) */ ++#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */ + /* FREE! ( 7*32+13) */ + #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ + #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ diff --git a/series.conf b/series.conf index d5b67c0..951580a 100644 --- a/series.conf +++ b/series.conf @@ -56093,7 +56093,7 @@ patches.suse/x86-bugs-Add-AMD-retbleed-boot-parameter.patch patches.suse/x86-bugs-Enable-STIBP-for-JMP2RET.patch patches.suse/x86-bugs-Keep-a-per-CPU-IA32_SPEC_CTRL-value.patch - patches.suse/x86-entry-Add-kernel-IBRS-implementation.patch + patches.suse/x86-entry-add-kernel-ibrs-implementation.patch patches.suse/x86-bugs-Optimize-SPEC_CTRL-MSR-writes.patch patches.suse/x86-speculation-Add-spectre_v2-ibrs-option-to-support-Kern.patch patches.suse/x86-bugs-Split-spectre_v2_select_mitigation-and-spectre_v2.patch