diff --git a/blacklist.conf b/blacklist.conf index 2e599ad..2143862 100644 --- a/blacklist.conf +++ b/blacklist.conf @@ -2170,6 +2170,10 @@ e583b5c472bd23d450e06f148dc1f37be74f7666 # Theoretical problem on non-existent H e730558adffb88a52e562db089e969ee9510184a # Intrusive fix for obscure cornercase 54f0bad6686cdc50a3f4c5f7c4252c5018511459 # net: sungem_phy: fix code indentation ea8146c6845799142aa4ee2660741c215e340cdf # cxgb4: Fix the -Wmisleading-indentation warning +af8f3fb7fb077c9df9fed97113a031e792163def # net: stmmac: no such code +1c93fb45761e79b3c00080e71523886cefaf351c # no phy driver +e0fce6f945a26d4e953a147fe7ca11410322c9fe # there is no icmp_ndo_send helper +67c9a7e1e3ac491b5df018803639addc36f154ba # there is no icmp_ndo_send helper af8f3fb7fb077c9df9fed97113a031e792163def # net: stmmac: no such code 349bff48ae0f5f8aa2075d0bdc2091a30bd634f6 # relevant only if CONFIG_ACPI=n c8994b30d71d64d5dcc9bc0edbfdf367171aa96f # SPARC is not supported in SLE12 diff --git a/config/x86_64/default b/config/x86_64/default index fc7cbc4..8d8d99f 100644 --- a/config/x86_64/default +++ b/config/x86_64/default @@ -465,6 +465,9 @@ CONFIG_X86_X2APIC=y CONFIG_X86_MPPARSE=y # CONFIG_GOLDFISH is not set CONFIG_RETPOLINE=y +CONFIG_CC_HAS_SLS=y +CONFIG_CC_HAS_RETURN_THUNK=y +CONFIG_SLS=y CONFIG_X86_CPU_RESCTRL=y CONFIG_X86_EXTENDED_PLATFORM=y # CONFIG_X86_NUMACHIP is not set diff --git a/patches.suse/CVE-Mitigation-for-CVE-2022-29900-and-CVE-2022-29901.patch b/patches.suse/CVE-Mitigation-for-CVE-2022-29900-and-CVE-2022-29901.patch new file mode 100644 index 0000000..348abf6 --- /dev/null +++ b/patches.suse/CVE-Mitigation-for-CVE-2022-29900-and-CVE-2022-29901.patch @@ -0,0 +1,164 @@ +From: Joerg Roedel +Date: Fri, 8 Jul 2022 16:15:16 +0200 +Subject: [PATCH] CVE Mitigation for CVE-2022-29900 and CVE-2022-29901 +Patch-mainline: Never, downstream CVE mitigation +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Necessary changes to implement changes from upstream patches: + + KVM: VMX: Prevent RSB underflow before vmenter + x86/speculation: Fill RSB on vmexit for IBRS + KVM: VMX: Fix IBRS handling after vmexit + KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS + KVM: VMX: Convert launched argument to flags + KVM: VMX: Flatten __vmx_vcpu_run() + +Into the SLE12-SP5 code base. + +Signed-off-by: Joerg Roedel +--- + arch/x86/include/asm/spec-ctrl.h | 20 ++++++++++++++++++++ + arch/x86/kernel/cpu/bugs.c | 9 +-------- + arch/x86/kvm/vmx.c | 22 +++++++++++----------- + tools/lib/subcmd/subcmd-util.h | 11 ++--------- + 4 files changed, 34 insertions(+), 28 deletions(-) + +diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h +index 5393babc0598..87bd2dd863e9 100644 +--- a/arch/x86/include/asm/spec-ctrl.h ++++ b/arch/x86/include/asm/spec-ctrl.h +@@ -27,6 +27,17 @@ static inline + void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) + { + x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true); ++ ++ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { ++ u64 guestval = guest_spec_ctrl, hostval = spec_ctrl_current(); ++ if (hostval != guestval) { ++ u32 low = (u32)guest_spec_ctrl, high = (u32)(guest_spec_ctrl >> 32); ++ ++ asm volatile("wrmsr\n" : ++ : "c" (MSR_IA32_SPEC_CTRL), "a"(low), "d"(high) ++ : "memory"); ++ } ++ } + } + + /** +@@ -40,6 +51,15 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) + static inline + void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl) + { ++ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { ++ u64 hostval = spec_ctrl_current(); ++ u32 low = (u32)hostval, high = (u32)(hostval >> 32); ++ ++ asm volatile("wrmsr\n" : ++ : "c" (MSR_IA32_SPEC_CTRL), "a"(low), "d"(high) ++ : "memory"); ++ } ++ + x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false); + } + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 8997b466d803..323253e44c19 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -187,16 +187,9 @@ void __init check_bugs(void) + void + x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) + { +- u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current(); ++ u64 guestval = guest_spec_ctrl, hostval; + struct thread_info *ti = current_thread_info(); + +- if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { +- if (hostval != guestval) { +- msrval = setguest ? guestval : hostval; +- wrmsrl(MSR_IA32_SPEC_CTRL, msrval); +- } +- } +- + /* + * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update + * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 2352a6534a35..f03f15f72797 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -9910,14 +9910,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + + vmx_arm_hv_timer(vcpu); + +- /* +- * If this vCPU has touched SPEC_CTRL, restore the guest's value if +- * it's non-zero. Since vmentry is serialising on affected CPUs, there +- * is no need to worry about the conditional branch over the wrmsr +- * being speculatively taken. +- */ +- x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); +- + vmx->__launched = vmx->loaded_vmcs->launched; + + /* L1D Flush includes CPU buffer clear to mitigate MDS */ +@@ -9931,6 +9923,14 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + + vmx_disable_fb_clear(vmx); + ++ /* ++ * If this vCPU has touched SPEC_CTRL, restore the guest's value if ++ * it's non-zero. Since vmentry is serialising on affected CPUs, there ++ * is no need to worry about the conditional branch over the wrmsr ++ * being speculatively taken. ++ */ ++ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); ++ + asm( + /* Store host registers */ + "push %%" _ASM_DX "; push %%" _ASM_BP ";" +@@ -10060,7 +10060,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + #endif + ); + +- vmx_enable_fb_clear(vmx); ++ /* Eliminate branch target predictions from guest mode */ ++ vmexit_fill_RSB(); + + /* + * We do not use IBRS in the kernel. If this vCPU has used the +@@ -10082,8 +10083,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) + + x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); + +- /* Eliminate branch target predictions from guest mode */ +- vmexit_fill_RSB(); ++ vmx_enable_fb_clear(vmx); + + /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ + if (debugctlmsr) +diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h +index 8fa5f036eff0..8a34305448d0 100644 +--- a/tools/lib/subcmd/subcmd-util.h ++++ b/tools/lib/subcmd/subcmd-util.h +@@ -49,15 +49,8 @@ static NORETURN inline void die(const char *err, ...) + static inline void *xrealloc(void *ptr, size_t size) + { + void *ret = realloc(ptr, size); +- if (!ret && !size) +- ret = realloc(ptr, 1); +- if (!ret) { +- ret = realloc(ptr, size); +- if (!ret && !size) +- ret = realloc(ptr, 1); +- if (!ret) +- die("Out of memory, realloc failed"); +- } ++ if (!ret) ++ die("Out of memory, realloc failed"); + return ret; + } + +-- +2.36.1 + diff --git a/patches.suse/bnxt_en-Remove-the-setting-of-dev_port.patch b/patches.suse/bnxt_en-Remove-the-setting-of-dev_port.patch new file mode 100644 index 0000000..88e6b71 --- /dev/null +++ b/patches.suse/bnxt_en-Remove-the-setting-of-dev_port.patch @@ -0,0 +1,35 @@ +From 5e89443deb93e9343a6bfd0350b2b48622f4f2e6 Mon Sep 17 00:00:00 2001 +From: Michael Chan +Date: Mon, 27 Jan 2020 04:56:15 -0500 +Subject: [PATCH] bnxt_en: Remove the setting of dev_port. +Git-commit: 1d86859fdf31a0d50cc82b5d0d6bfb5fe98f6c00 +References: git-fixes +Patch-mainline: v5.6-rc1 + +The dev_port is meant to distinguish the network ports belonging to +the same PCI function. Our devices only have one network port +associated with each PCI function and so we should not set it for +correctness. + +Signed-off-by: Michael Chan +Signed-off-by: David S. Miller +Signed-off-by: Denis Kirjanov +--- + drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 0f6ba7b412a0..b1dbacc8d6d5 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -6581,7 +6581,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) + + pf->fw_fid = le16_to_cpu(resp->fid); + pf->port_id = le16_to_cpu(resp->port_id); +- bp->dev->dev_port = pf->port_id; + memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); + pf->first_vf_id = le16_to_cpu(resp->first_vf_id); + pf->max_vfs = le16_to_cpu(resp->max_vfs); +-- +2.16.4 + diff --git a/patches.suse/bonding-fix-bond_neigh_init.patch b/patches.suse/bonding-fix-bond_neigh_init.patch new file mode 100644 index 0000000..41ac810 --- /dev/null +++ b/patches.suse/bonding-fix-bond_neigh_init.patch @@ -0,0 +1,180 @@ +From 858367675a2cfdfebdf644c288f5f12d27b894e0 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Sat, 7 Dec 2019 14:10:34 -0800 +Subject: [PATCH] bonding: fix bond_neigh_init() +Git-commit: 9e99bfefdbce2e23ef37487a3bcb4adf90a791d1 +References: git-fixes +Patch-mainline: v5.5-rc3 + +1) syzbot reported an uninit-value in bond_neigh_setup() [1] + + bond_neigh_setup() uses a temporary on-stack 'struct neigh_parms parms', + but only clears parms.neigh_setup field. + + A stacked bonding device would then enter bond_neigh_setup() + and read garbage from parms->dev. + + If we get really unlucky and garbage is matching @dev, then we + could recurse and eventually crash. + + Let's make sure the whole structure is cleared to avoid surprises. + +2) bond_neigh_setup() can be called while another cpu manipulates + the master device, removing or adding a slave. + We need at least rcu protection to prevent use-after-free. + +Note: Prior code does not support a stack of bonding devices, + this patch does not attempt to fix this, and leave a comment instead. + +[1] + +BUG: KMSAN: uninit-value in bond_neigh_setup+0xa4/0x110 drivers/net/bonding/bond_main.c:3655 +CPU: 0 PID: 11256 Comm: syz-executor.0 Not tainted 5.4.0-rc8-syzkaller #0 +Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 +Call Trace: + + __dump_stack lib/dump_stack.c:77 [inline] + dump_stack+0x1c9/0x220 lib/dump_stack.c:118 + kmsan_report+0x128/0x220 mm/kmsan/kmsan_report.c:108 + __msan_warning+0x57/0xa0 mm/kmsan/kmsan_instr.c:245 + bond_neigh_setup+0xa4/0x110 drivers/net/bonding/bond_main.c:3655 + bond_neigh_init+0x216/0x4b0 drivers/net/bonding/bond_main.c:3626 + ___neigh_create+0x169e/0x2c40 net/core/neighbour.c:613 + __neigh_create+0xbd/0xd0 net/core/neighbour.c:674 + ip6_finish_output2+0x149a/0x2670 net/ipv6/ip6_output.c:113 + __ip6_finish_output+0x83d/0x8f0 net/ipv6/ip6_output.c:142 + ip6_finish_output+0x2db/0x420 net/ipv6/ip6_output.c:152 + NF_HOOK_COND include/linux/netfilter.h:294 [inline] + ip6_output+0x5d3/0x720 net/ipv6/ip6_output.c:175 + dst_output include/net/dst.h:436 [inline] + NF_HOOK include/linux/netfilter.h:305 [inline] + mld_sendpack+0xebd/0x13d0 net/ipv6/mcast.c:1682 + mld_send_cr net/ipv6/mcast.c:1978 [inline] + mld_ifc_timer_expire+0x116b/0x1680 net/ipv6/mcast.c:2477 + call_timer_fn+0x232/0x530 kernel/time/timer.c:1404 + expire_timers kernel/time/timer.c:1449 [inline] + __run_timers+0xd60/0x1270 kernel/time/timer.c:1773 + run_timer_softirq+0x2d/0x50 kernel/time/timer.c:1786 + __do_softirq+0x4a1/0x83a kernel/softirq.c:293 + invoke_softirq kernel/softirq.c:375 [inline] + irq_exit+0x230/0x280 kernel/softirq.c:416 + exiting_irq+0xe/0x10 arch/x86/include/asm/apic.h:536 + smp_apic_timer_interrupt+0x48/0x70 arch/x86/kernel/apic/apic.c:1138 + apic_timer_interrupt+0x2e/0x40 arch/x86/entry/entry_64.S:835 + +RIP: 0010:kmsan_free_page+0x18d/0x1c0 mm/kmsan/kmsan_shadow.c:439 +Code: 4c 89 ff 44 89 f6 e8 82 0d ee ff 65 ff 0d 9f 26 3b 60 65 8b 05 98 26 3b 60 85 c0 75 24 e8 5b f6 35 ff 4c 89 6d d0 ff 75 d0 9d <48> 83 c4 10 5b 41 5c 41 5d 41 5e 41 5f 5d c3 0f 0b 0f 0b 0f 0b 0f +RSP: 0018:ffffb328034af818 EFLAGS: 00000246 ORIG_RAX: ffffffffffffff13 +RAX: 0000000000000000 RBX: ffffe2d7471f8360 RCX: 0000000000000000 +RDX: ffffffffadea7000 RSI: 0000000000000004 RDI: ffff93496fcda104 +RBP: ffffb328034af850 R08: ffff934a47e86d00 R09: ffff93496fc41900 +R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000001 +R13: 0000000000000246 R14: 0000000000000000 R15: ffffe2d7472225c0 + free_pages_prepare mm/page_alloc.c:1138 [inline] + free_pcp_prepare mm/page_alloc.c:1230 [inline] + free_unref_page_prepare+0x1d9/0x770 mm/page_alloc.c:3025 + free_unref_page mm/page_alloc.c:3074 [inline] + free_the_page mm/page_alloc.c:4832 [inline] + __free_pages+0x154/0x230 mm/page_alloc.c:4840 + __vunmap+0xdac/0xf20 mm/vmalloc.c:2277 + __vfree mm/vmalloc.c:2325 [inline] + vfree+0x7c/0x170 mm/vmalloc.c:2355 + copy_entries_to_user net/ipv6/netfilter/ip6_tables.c:883 [inline] + get_entries net/ipv6/netfilter/ip6_tables.c:1041 [inline] + do_ip6t_get_ctl+0xfa4/0x1030 net/ipv6/netfilter/ip6_tables.c:1709 + nf_sockopt net/netfilter/nf_sockopt.c:104 [inline] + nf_getsockopt+0x481/0x4e0 net/netfilter/nf_sockopt.c:122 + ipv6_getsockopt+0x264/0x510 net/ipv6/ipv6_sockglue.c:1400 + tcp_getsockopt+0x1c6/0x1f0 net/ipv4/tcp.c:3688 + sock_common_getsockopt+0x13f/0x180 net/core/sock.c:3110 + __sys_getsockopt+0x533/0x7b0 net/socket.c:2129 + __do_sys_getsockopt net/socket.c:2144 [inline] + __se_sys_getsockopt+0xe1/0x100 net/socket.c:2141 + __x64_sys_getsockopt+0x62/0x80 net/socket.c:2141 + do_syscall_64+0xb6/0x160 arch/x86/entry/common.c:291 + entry_SYSCALL_64_after_hwframe+0x44/0xa9 +RIP: 0033:0x45d20a +Code: b8 34 01 00 00 0f 05 48 3d 01 f0 ff ff 0f 83 8d 8b fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 49 89 ca b8 37 00 00 00 0f 05 <48> 3d 01 f0 ff ff 0f 83 6a 8b fb ff c3 66 0f 1f 84 00 00 00 00 00 +RSP: 002b:0000000000a6f618 EFLAGS: 00000212 ORIG_RAX: 0000000000000037 +RAX: ffffffffffffffda RBX: 0000000000a6f640 RCX: 000000000045d20a +RDX: 0000000000000041 RSI: 0000000000000029 RDI: 0000000000000003 +RBP: 0000000000717cc0 R08: 0000000000a6f63c R09: 0000000000004000 +R10: 0000000000a6f740 R11: 0000000000000212 R12: 0000000000000003 +R13: 0000000000000000 R14: 0000000000000029 R15: 0000000000715b00 + +Local variable description: ----parms@bond_neigh_init +Variable was created at: + bond_neigh_init+0x8c/0x4b0 drivers/net/bonding/bond_main.c:3617 + bond_neigh_init+0x8c/0x4b0 drivers/net/bonding/bond_main.c:3617 + +Fixes: 9918d5bf329d ("bonding: modify only neigh_parms owned by us") +Fixes: 234bcf8a499e ("net/bonding: correctly proxy slave neigh param setup ndo function") +Signed-off-by: Eric Dumazet +Reported-by: syzbot +Cc: Jay Vosburgh +Cc: Veaceslav Falico +Cc: Andy Gospodarek +Signed-off-by: David S. Miller +Signed-off-by: Denis Kirjanov +--- + drivers/net/bonding/bond_main.c | 30 +++++++++++++++++++++--------- + 1 file changed, 21 insertions(+), 9 deletions(-) + +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index a40d4d035777..023f15b4b205 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -3654,20 +3654,26 @@ static int bond_neigh_init(struct neighbour *n) + const struct net_device_ops *slave_ops; + struct neigh_parms parms; + struct slave *slave; +- int ret; ++ int ret = 0; + +- slave = bond_first_slave(bond); ++ rcu_read_lock(); ++ slave = bond_first_slave_rcu(bond); + if (!slave) +- return 0; ++ goto out; + slave_ops = slave->dev->netdev_ops; + if (!slave_ops->ndo_neigh_setup) +- return 0; ++ goto out; + +- parms.neigh_setup = NULL; +- parms.neigh_cleanup = NULL; ++ /* TODO: find another way [1] to implement this. ++ * Passing a zeroed structure is fragile, ++ * but at least we do not pass garbage. ++ * ++ * [1] One way would be that ndo_neigh_setup() never touch ++ * struct neigh_parms, but propagate the new neigh_setup() ++ * back to ___neigh_create() / neigh_parms_alloc() ++ */ ++ memset(&parms, 0, sizeof(parms)); + ret = slave_ops->ndo_neigh_setup(slave->dev, &parms); +- if (ret) +- return ret; + + /* Assign slave's neigh_cleanup to neighbour in case cleanup is called + * after the last slave has been detached. Assumes that all slaves +@@ -3678,8 +3684,14 @@ static int bond_neigh_init(struct neighbour *n) + + if (!parms.neigh_setup) + return 0; ++ if (ret) ++ goto out; + +- return parms.neigh_setup(n); ++ if (parms.neigh_setup) ++ ret = parms.neigh_setup(n); ++out: ++ rcu_read_unlock(); ++ return ret; + } + + /* The bonding ndo_neigh_setup is called at init time beofre any +-- +2.16.4 + diff --git a/patches.suse/intel_idle-Disable-IBRS-during-long-idle.patch b/patches.suse/intel_idle-Disable-IBRS-during-long-idle.patch new file mode 100644 index 0000000..13433c2 --- /dev/null +++ b/patches.suse/intel_idle-Disable-IBRS-during-long-idle.patch @@ -0,0 +1,199 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:58 +0200 +Subject: intel_idle: Disable IBRS during long idle +Git-commit: bf5835bcdb9635c97f85120dba9bfa21e111130f +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Having IBRS enabled while the SMT sibling is idle unnecessarily slows +down the running sibling. OTOH, disabling IBRS around idle takes two +MSR writes, which will increase the idle latency. + +Therefore, only disable IBRS around deeper idle states. Shallow idle +states are bounded by the tick in duration, since NOHZ is not allowed +for them by virtue of their short target residency. + +Only do this for mwait-driven idle, since that keeps interrupts disabled +across idle, which makes disabling IBRS vs IRQ-entry a non-issue. + +Note: C6 is a random threshold, most importantly C1 probably shouldn't +disable IBRS, benchmarking needed. + +Suggested-by: Tim Chen +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/include/asm/nospec-branch.h | 1 + arch/x86/kernel/cpu/bugs.c | 6 +++ + drivers/idle/intel_idle.c | 64 +++++++++++++++++++++++++++++++---- + 3 files changed, 65 insertions(+), 6 deletions(-) + +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -345,6 +345,7 @@ static inline void unrestrict_branch_spe + /* The Intel SPEC CTRL MSR base value cache */ + extern u64 x86_spec_ctrl_base; + extern void write_spec_ctrl_current(u64 val, bool force); ++extern u64 spec_ctrl_current(void); + + /* + * With retpoline, we must use IBRS to restrict branch prediction +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -76,6 +76,12 @@ void write_spec_ctrl_current(u64 val, bo + wrmsrl(MSR_IA32_SPEC_CTRL, val); + } + ++u64 spec_ctrl_current(void) ++{ ++ return this_cpu_read(x86_spec_ctrl_current); ++} ++EXPORT_SYMBOL_GPL(spec_ctrl_current); ++ + /* + * The vendor and possibly platform specific bits which can be modified in + * x86_spec_ctrl_base. +--- a/drivers/idle/intel_idle.c ++++ b/drivers/idle/intel_idle.c +@@ -56,11 +56,13 @@ + #include + #include + #include ++#include + #include + #include + #include + #include + #include ++#include + #include + #include + +@@ -101,6 +103,12 @@ static void intel_idle_freeze(struct cpu + static struct cpuidle_state *cpuidle_state_table; + + /* ++ * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE ++ * above. ++ */ ++#define CPUIDLE_FLAG_IBRS BIT(16) ++ ++/* + * Set this flag for states where the HW flushes the TLB for us + * and so we don't need cross-calls to keep it consistent. + * If this flag is set, SW flushes the TLB, so even if the +@@ -616,7 +624,7 @@ static struct cpuidle_state skl_cstates[ + { + .name = "C6", + .desc = "MWAIT 0x20", +- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, ++ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, + .exit_latency = 85, + .target_residency = 200, + .enter = &intel_idle, +@@ -624,7 +632,7 @@ static struct cpuidle_state skl_cstates[ + { + .name = "C7s", + .desc = "MWAIT 0x33", +- .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED, ++ .flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, + .exit_latency = 124, + .target_residency = 800, + .enter = &intel_idle, +@@ -632,7 +640,7 @@ static struct cpuidle_state skl_cstates[ + { + .name = "C8", + .desc = "MWAIT 0x40", +- .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, ++ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, + .exit_latency = 200, + .target_residency = 800, + .enter = &intel_idle, +@@ -640,7 +648,7 @@ static struct cpuidle_state skl_cstates[ + { + .name = "C9", + .desc = "MWAIT 0x50", +- .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, ++ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, + .exit_latency = 480, + .target_residency = 5000, + .enter = &intel_idle, +@@ -648,7 +656,7 @@ static struct cpuidle_state skl_cstates[ + { + .name = "C10", + .desc = "MWAIT 0x60", +- .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, ++ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, + .exit_latency = 890, + .target_residency = 5000, + .enter = &intel_idle, +@@ -677,7 +685,7 @@ static struct cpuidle_state skx_cstates[ + { + .name = "C6", + .desc = "MWAIT 0x20", +- .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, ++ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS, + .exit_latency = 133, + .target_residency = 600, + .enter = &intel_idle, +@@ -934,6 +942,46 @@ static __cpuidle int intel_idle(struct c + return index; + } + ++/* ++ * MWAIT takes an 8-bit "hint" in EAX "suggesting" ++ * the C-state (top nibble) and sub-state (bottom nibble) ++ * 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc. ++ * ++ * We store the hint at the top of our "flags" for each state. ++ */ ++#define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) ++#define MWAIT2flg(eax) ((eax & 0xFF) << 24) ++ ++static __always_inline int __intel_idle(struct cpuidle_device *dev, ++ struct cpuidle_driver *drv, int index) ++{ ++ struct cpuidle_state *state = &drv->states[index]; ++ unsigned long eax = flg2MWAIT(state->flags); ++ unsigned long ecx = 1; /* break on interrupt flag */ ++ ++ mwait_idle_with_hints(eax, ecx); ++ ++ return index; ++} ++ ++static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev, ++ struct cpuidle_driver *drv, int index) ++{ ++ bool smt_active = sched_smt_active(); ++ u64 spec_ctrl = spec_ctrl_current(); ++ int ret; ++ ++ if (smt_active) ++ wrmsrl(MSR_IA32_SPEC_CTRL, 0); ++ ++ ret = __intel_idle(dev, drv, index); ++ ++ if (smt_active) ++ wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl); ++ ++ return ret; ++} ++ + /** + * intel_idle_freeze - simplified "enter" callback routine for suspend-to-idle + * @dev: cpuidle_device +@@ -1366,6 +1414,10 @@ static void __init intel_idle_cpuidle_dr + continue; + } + ++ if (cpu_feature_enabled(X86_FEATURE_USE_IBRS) && ++ cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) { ++ drv->states[drv->state_count].enter = intel_idle_ibrs; ++ } + + if (((mwait_cstate + 1) > 2) && + !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) diff --git a/patches.suse/net-mlx5e-Replace-reciprocal_scale-in-TX-select-queu.patch b/patches.suse/net-mlx5e-Replace-reciprocal_scale-in-TX-select-queu.patch deleted file mode 100644 index baff035..0000000 --- a/patches.suse/net-mlx5e-Replace-reciprocal_scale-in-TX-select-queu.patch +++ /dev/null @@ -1,110 +0,0 @@ -From d150c5dc211190dd15813e5fbed238503f10bb53 Mon Sep 17 00:00:00 2001 -From: Shay Agroskin -Date: Sun, 28 Apr 2019 10:14:23 +0300 -Subject: [PATCH] net/mlx5e: Replace reciprocal_scale in TX select queue - function -Git-commit: 57c70d8740f740498a52f9c0c0d7295829b944de -References: git-fixes -Patch-mainline: v5.2-rc6 - -The TX queue index returned by the fallback function ranges -between [0,NUM CHANNELS - 1] if QoS isn't set and -[0, (NUM CHANNELS)*(NUM TCs) -1] otherwise. - -Our HW uses different TC mapping than the fallback function -(which is denoted as 'up', user priority) so we only need to extract -a channel number out of the returned value. - -Since (NUM CHANNELS)*(NUM TCs) is a relatively small number, using -reciprocal scale almost always returns zero. -We instead access the 'txq2sq' table to extract the sq (and with it the -channel number) associated with the tx queue, thus getting -a more evenly distributed channel number. - -Perf: - -Rx/Tx side with Intel(R) Xeon(R) Silver 4108 CPU @ 1.80GHz and ConnectX-5. -Used 'iperf' UDP traffic, 10 threads, and priority 5. - -Before: 0.566Mpps -After: 2.37Mpps - -As expected, releasing the existing bottleneck of steering all traffic -to TX queue zero significantly improves transmission rates. - -Fixes: 7ccdd0841b30 ("net/mlx5e: Fix select queue callback") -Signed-off-by: Shay Agroskin -Reviewed-by: Tariq Toukan -Signed-off-by: Saeed Mahameed -Signed-off-by: Denis Kirjanov ---- - drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 + - drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 12 ++++++------ - 3 files changed, 8 insertions(+), 6 deletions(-) - -diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h -index 7d9f8555fa20..24c84fa9fe51 100644 ---- a/drivers/net/ethernet/mellanox/mlx5/core/en.h -+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h -@@ -393,6 +393,7 @@ struct mlx5e_txqsq { - /* control path */ - struct mlx5_wq_ctrl wq_ctrl; - struct mlx5e_channel *channel; -+ int ch_ix; - int txq_ix; - u32 rate_limit; - struct mlx5e_txqsq_recover { -diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c -index ed862fa63c57..1ee9f0e07600 100644 ---- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c -+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c -@@ -1161,6 +1161,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, - sq->clock = &mdev->clock; - sq->mkey_be = c->mkey_be; - sq->channel = c; -+ sq->ch_ix = c->ix; - sq->txq_ix = txq_ix; - sq->uar_map = mdev->mlx5e_res.bfreg.map; - sq->min_inline_mode = params->tx_min_inline_mode; -diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c -index 88e0523268cf..ac366dacb7f1 100644 ---- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c -+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c -@@ -114,12 +114,12 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - select_queue_fallback_t fallback) - { - struct mlx5e_priv *priv = netdev_priv(dev); -- int channel_ix = fallback(dev, skb, NULL); -+ int txq_ix = fallback(dev, skb, NULL); - u16 num_channels; - int up = 0; - - if (!netdev_get_num_tc(dev)) -- return channel_ix; -+ return txq_ix; - - #ifdef CONFIG_MLX5_CORE_EN_DCB - if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) -@@ -129,14 +129,14 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - if (skb_vlan_tag_present(skb)) - up = skb->vlan_tci >> VLAN_PRIO_SHIFT; - -- /* channel_ix can be larger than num_channels since -+ /* txq_ix can be larger than num_channels since - * dev->num_real_tx_queues = num_channels * num_tc - */ - num_channels = priv->channels.params.num_channels; -- if (channel_ix >= num_channels) -- channel_ix = reciprocal_scale(channel_ix, num_channels); -+ if (txq_ix >= num_channels) -+ txq_ix = priv->txq2sq[txq_ix]->ch_ix; - -- return priv->channel_tc2txq[channel_ix][up]; -+ return priv->channel_tc2txq[txq_ix][up]; - } - - static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) --- -2.16.4 - diff --git a/patches.suse/x86-Add-magic-AMD-return-thunk.patch b/patches.suse/x86-Add-magic-AMD-return-thunk.patch new file mode 100644 index 0000000..63642de --- /dev/null +++ b/patches.suse/x86-Add-magic-AMD-return-thunk.patch @@ -0,0 +1,289 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:48 +0200 +Subject: x86: Add magic AMD return-thunk +Git-commit: a149180fbcf336e97ce4eb2cdc13672727feb94d +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Note: needs to be in a section distinct from Retpolines such that the +Retpoline RET substitution cannot possibly use immediate jumps. + +ORC unwinding for zen_untrain_ret() and __x86_return_thunk() is a +little tricky but works due to the fact that zen_untrain_ret() doesn't +have any stack ops and as such will emit a single ORC entry at the +start (+0x3f). + +Meanwhile, unwinding an IP, including the __x86_return_thunk() one +(+0x40) will search for the largest ORC entry smaller or equal to the +IP, these will find the one ORC entry (+0x3f) and all works. + + [ Alexandre: SVM part. ] + [ bp: Build fix, massages. ] + +Suggested-by: Andrew Cooper +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/entry/entry_64.S | 11 ++++++ + arch/x86/entry/entry_64_compat.S | 7 +++ + arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/nospec-branch.h | 17 +++++++++ + arch/x86/kernel/vmlinux.lds.S | 2 - + arch/x86/kvm/svm.c | 4 ++ + arch/x86/lib/retpoline.S | 64 +++++++++++++++++++++++++++++++++-- + 7 files changed, 103 insertions(+), 3 deletions(-) + +--- a/arch/x86/entry/entry_64_compat.S ++++ b/arch/x86/entry/entry_64_compat.S +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -107,6 +108,8 @@ ENTRY(entry_SYSENTER_compat) + xorl %r15d, %r15d /* nospec r15 */ + cld + ++ UNTRAIN_RET ++ + /* + * SYSENTER doesn't filter flags, so we need to clear NT and AC + * ourselves. To save a few cycles, we can check whether +@@ -248,6 +251,8 @@ GLOBAL(entry_SYSCALL_compat_after_hwfram + /* Restrict Indirect Branch Speculation. All registers are saved already */ + RESTRICT_IB_SPEC_CLOBBER + ++ UNTRAIN_RET ++ + /* User mode is traced as though IRQs are on, and SYSENTER + * turned them off. + */ +@@ -433,6 +438,8 @@ ENTRY(entry_INT80_compat) + */ + TRACE_IRQS_OFF + ++ UNTRAIN_RET ++ + movq %rsp, %rdi + call do_int80_syscall_32 + .Lsyscall_32_done: +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -233,6 +233,9 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) + + /* IRQs are off. */ + movq %rsp, %rdi ++ ++ UNTRAIN_RET ++ + call do_syscall_64 /* returns with IRQs disabled */ + + TRACE_IRQS_IRETQ /* we're about to change IF */ +@@ -716,6 +719,7 @@ native_irq_return_ldt: + pushq %rdi /* Stash user RDI */ + SWAPGS /* to kernel GS */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ ++ UNTRAIN_RET + + /* + * There is no point in disabling Indirect Branch Speculation +@@ -870,8 +874,11 @@ ENTRY(switch_to_thread_stack) + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi + movq %rsp, %rdi + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp ++ + /* Restrict Indirect Branch Speculation */ + RESTRICT_IB_SPEC ++ UNTRAIN_RET ++ + UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI + + pushq 7*8(%rdi) /* regs->ss */ +@@ -1250,6 +1257,7 @@ ENTRY(error_entry) + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + /* Restrict Indirect Branch Speculation */ + RESTRICT_IB_SPEC_CLOBBER ++ UNTRAIN_RET + + .Lerror_entry_from_usermode_after_swapgs: + /* Put us onto the real thread stack. */ +@@ -1301,6 +1309,7 @@ ENTRY(error_entry) + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + /* Restrict Indirect Branch Speculation */ + RESTRICT_IB_SPEC_CLOBBER ++ UNTRAIN_RET + jmp .Lerror_entry_done + + .Lbstep_iret: +@@ -1318,6 +1327,7 @@ ENTRY(error_entry) + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + /* Restrict Indirect Branch Speculation */ + RESTRICT_IB_SPEC ++ UNTRAIN_RET + + /* + * Pretend that the exception came from user mode: set up pt_regs +@@ -1415,6 +1425,7 @@ ENTRY(nmi) + + /* Restrict Indirect Branch Speculation */ + RESTRICT_IB_SPEC ++ UNTRAIN_RET + + UNWIND_HINT_IRET_REGS base=%rdx offset=8 + pushq 5*8(%rdx) /* pt_regs->ss */ +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -288,6 +288,7 @@ + #define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ + #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ + #define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ ++#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ + + /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ + #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -150,6 +150,22 @@ + #endif + .endm + ++/* ++ * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the ++ * return thunk isn't mapped into the userspace tables (then again, AMD ++ * typically has NO_MELTDOWN). ++ * ++ * Doesn't clobber any registers but does require a stable stack. ++ * ++ * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point ++ * where we have a stack but before any RET instruction. ++ */ ++.macro UNTRAIN_RET ++#ifdef CONFIG_RETPOLINE ++ ALTERNATIVE "", "call zen_untrain_ret", X86_FEATURE_UNRET ++#endif ++.endm ++ + #else /* __ASSEMBLY__ */ + + #define ANNOTATE_NOSPEC_ALTERNATIVE \ +@@ -168,6 +184,7 @@ + #ifdef CONFIG_X86_64 + + extern void __x86_return_thunk(void); ++extern void zen_untrain_ret(void); + + /* + * Inline asm uses the %V modifier which is only in newer GCC +--- a/arch/x86/kernel/vmlinux.lds.S ++++ b/arch/x86/kernel/vmlinux.lds.S +@@ -127,7 +127,7 @@ SECTIONS + + #ifdef CONFIG_RETPOLINE + __indirect_thunk_start = .; +- *(.text.__x86.indirect_thunk) ++ *(.text.__x86.*) + __indirect_thunk_end = .; + #endif + +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -52,6 +52,7 @@ + #include + #include + #include ++#include + + #include + #include "trace.h" +@@ -5682,6 +5683,9 @@ static void svm_vcpu_run(struct kvm_vcpu + "mov %%r14, %c[r14](%[svm]) \n\t" + "mov %%r15, %c[r15](%[svm]) \n\t" + #endif ++ ++ ALTERNATIVE("", "call zen_untrain_ret", X86_FEATURE_UNRET) ++ + /* + * Clear host registers marked as clobbered to prevent + * speculative use. +--- a/arch/x86/lib/retpoline.S ++++ b/arch/x86/lib/retpoline.S +@@ -53,11 +53,71 @@ GENERATE_THUNK(r15) + * This function name is magical and is used by -mfunction-return=thunk-extern + * for the compiler to generate JMPs to it. + */ ++ ++ .section .text.__x86.return_thunk ++ ++/* ++ * Safety details here pertain to the AMD Zen{1,2} microarchitecture: ++ * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for ++ * alignment within the BTB. ++ * 2) The instruction at zen_untrain_ret must contain, and not ++ * end with, the 0xc3 byte of the RET. ++ * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread ++ * from re-poisioning the BTB prediction. ++ */ ++ .align 64 ++ .skip 63, 0xcc ++.globl zen_untrain_ret; ++zen_untrain_ret: ++ ++ /* ++ * As executed from zen_untrain_ret, this is: ++ * ++ * TEST $0xcc, %bl ++ * LFENCE ++ * JMP __x86_return_thunk ++ * ++ * Executing the TEST instruction has a side effect of evicting any BTB ++ * prediction (potentially attacker controlled) attached to the RET, as ++ * __x86_return_thunk + 1 isn't an instruction boundary at the moment. ++ */ ++ .byte 0xf6 ++ ++ /* ++ * As executed from __x86_return_thunk, this is a plain RET. ++ * ++ * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. ++ * ++ * We subsequently jump backwards and architecturally execute the RET. ++ * This creates a correct BTB prediction (type=ret), but in the ++ * meantime we suffer Straight Line Speculation (because the type was ++ * no branch) which is halted by the INT3. ++ * ++ * With SMT enabled and STIBP active, a sibling thread cannot poison ++ * RET's prediction to a type of its choice, but can evict the ++ * prediction due to competitive sharing. If the prediction is ++ * evicted, __x86_return_thunk will suffer Straight Line Speculation ++ * which will be contained safely by the INT3. ++ */ ++ + ENTRY(__x86_return_thunk) + ret + int3 +-ENDPROC(__x86_return_thunk) + +-__EXPORT_THUNK(__x86_return_thunk) ++ /* ++ * Ensure the TEST decoding / BTB invalidation is complete. ++ */ ++ lfence ++ ++ /* ++ * Jump back and execute the RET in the middle of the TEST instruction. ++ * INT3 is for SLS protection. ++ */ ++ jmp __x86_return_thunk ++ int3 ++ENDPROC(zen_untrain_ret) ++__EXPORT_THUNK(zen_untrain_ret) ++ ++EXPORT_SYMBOL(__x86_return_thunk) + #endif /* CONFIG_RETPOLINE */ + diff --git a/patches.suse/x86-Undo-return-thunk-damage.patch b/patches.suse/x86-Undo-return-thunk-damage.patch new file mode 100644 index 0000000..b81a31a --- /dev/null +++ b/patches.suse/x86-Undo-return-thunk-damage.patch @@ -0,0 +1,30 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:37 +0200 +Subject: x86: Undo return-thunk damage +Git-commit: 15e67227c49a57837108acfe1c80570e1bd9f962 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Introduce X86_FEATURE_RETHUNK for those afflicted with needing this. + + [ bp: Do only INT3 padding - simpler. ] + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/include/asm/cpufeatures.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -293,6 +293,7 @@ + /* FREE! (11*32+11) */ + #define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ + #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ ++#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ + + /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ + #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ diff --git a/patches.suse/x86-Use-return-thunk-in-asm-code.patch b/patches.suse/x86-Use-return-thunk-in-asm-code.patch new file mode 100644 index 0000000..0e55b79 --- /dev/null +++ b/patches.suse/x86-Use-return-thunk-in-asm-code.patch @@ -0,0 +1,120 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:45 +0200 +Subject: x86: Use return-thunk in asm code +Git-commit: aa3d480315ba6c3025a60958e1981072ea37c3df +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Use the return thunk in asm code. If the thunk isn't needed, it will +get patched into a RET instruction during boot by apply_returns(). + +Since alternatives can't handle relocations outside of the first +instruction, putting a 'jmp __x86_return_thunk' in one is not valid, +therefore carve out the memmove ERMS path into a separate label and jump +to it. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf + + [ Add the -D__DISABLE_EXPORTS guard from + + 156ff4a544ae ("x86/ibt: Base IBT bits") + + so that the realmode/ trampoline doesn't see __x86_return_thunk and the linker + fails the build. ] + +Signed-off-by: Borislav Petkov +--- + arch/x86/Makefile | 2 +- + arch/x86/boot/compressed/Makefile | 1 + + arch/x86/entry/vdso/Makefile | 1 + + arch/x86/include/asm/linkage.h | 8 ++++++++ + arch/x86/lib/memmove_64.S | 7 ++++++- + 5 files changed, 17 insertions(+), 2 deletions(-) + +--- a/arch/x86/boot/compressed/Makefile ++++ b/arch/x86/boot/compressed/Makefile +@@ -34,6 +34,7 @@ KBUILD_CFLAGS += $(cflags-y) + KBUILD_CFLAGS += -mno-mmx -mno-sse + KBUILD_CFLAGS += $(call cc-option,-ffreestanding) + KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) ++KBUILD_CFLAGS += -D__DISABLE_EXPORTS + + KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ + GCOV_PROFILE := n +--- a/arch/x86/entry/vdso/Makefile ++++ b/arch/x86/entry/vdso/Makefile +@@ -77,6 +77,7 @@ CFL := $(PROFILING) -mcmodel=small -fPIC + -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS) + + $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) ++$(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO + + # + # vDSO code runs in userspace and -pg doesn't help with profiling anyway. +--- a/arch/x86/include/asm/linkage.h ++++ b/arch/x86/include/asm/linkage.h +@@ -21,19 +21,27 @@ + #define __ALIGN_STR __stringify(__ALIGN) + #endif + ++#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) ++#define RET jmp __x86_return_thunk ++#else /* CONFIG_RETPOLINE */ + #ifdef CONFIG_SLS + #define RET ret; int3 + #else + #define RET ret + #endif ++#endif /* CONFIG_RETPOLINE */ + + #else /* __ASSEMBLY__ */ + ++#if defined(CONFIG_RETPOLINE) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO) ++#define ASM_RET "jmp __x86_return_thunk\n\t" ++#else /* CONFIG_RETPOLINE */ + #ifdef CONFIG_SLS + #define ASM_RET "ret; int3\n\t" + #else + #define ASM_RET "ret\n\t" + #endif ++#endif /* CONFIG_RETPOLINE */ + + #endif /* __ASSEMBLY__ */ + +--- a/arch/x86/lib/memmove_64.S ++++ b/arch/x86/lib/memmove_64.S +@@ -42,7 +42,7 @@ ENTRY(__memmove) + jg 2f + + .Lmemmove_begin_forward: +- ALTERNATIVE "", __stringify(movq %rdx, %rcx; rep movsb; RET), X86_FEATURE_ERMS ++ ALTERNATIVE "", "jmp .Lmemmove_erms", X86_FEATURE_ERMS + + /* + * movsq instruction have many startup latency +@@ -206,6 +206,11 @@ ENTRY(__memmove) + movb %r11b, (%rdi) + 13: + RET ++ ++.Lmemmove_erms: ++ movq %rdx, %rcx ++ rep movsb ++ RET + ENDPROC(__memmove) + ENDPROC(memmove) + EXPORT_SYMBOL(__memmove) +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -21,7 +21,7 @@ CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/ + M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS)) + + REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \ +- -DDISABLE_BRANCH_PROFILING \ ++ -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \ + -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ + -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ + -mno-mmx -mno-sse \ diff --git a/patches.suse/x86-add-straight-line-speculation-mitigation.patch b/patches.suse/x86-add-straight-line-speculation-mitigation.patch new file mode 100644 index 0000000..867f28e --- /dev/null +++ b/patches.suse/x86-add-straight-line-speculation-mitigation.patch @@ -0,0 +1,113 @@ +From: Peter Zijlstra +Date: Sat, 4 Dec 2021 14:43:44 +0100 +Subject: x86: Add straight-line-speculation mitigation +Git-commit: e463a09af2f0677b9485a7e8e4e70b396b2ffb6f +Patch-mainline: v5.17-rc1 +References: bsc#1201050 CVE-2021-26341 + +Make use of an upcoming GCC feature to mitigate +straight-line-speculation for x86: + + https://gcc.gnu.org/g:53a643f8568067d7700a9f2facc8ba39974973d3 + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102952 + https://bugs.llvm.org/show_bug.cgi?id=52323 + +It's built tested on x86_64-allyesconfig using GCC-12 and GCC-11. + +Maintenance overhead of this should be fairly low due to objtool +validation. + +Size overhead of all these additional int3 instructions comes to: + + text data bss dec hex filename + 22267751 6933356 2011368 31212475 1dc43bb defconfig-build/vmlinux + 22804126 6933356 1470696 31208178 1dc32f2 defconfig-build/vmlinux.sls + +Or roughly 2.4% additional text. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Link: https://lore.kernel.org/r/20211204134908.140103474@infradead.org +--- + arch/x86/Kconfig | 9 +++++++++ + arch/x86/Makefile | 4 ++++ + arch/x86/include/asm/linkage.h | 10 ++++++++++ + arch/x86/include/asm/nospec-branch.h | 2 +- + arch/x86/lib/memmove_64.S | 2 +- + 5 files changed, 25 insertions(+), 2 deletions(-) + +--- a/arch/x86/include/asm/linkage.h ++++ b/arch/x86/include/asm/linkage.h +@@ -21,9 +21,19 @@ + #define __ALIGN_STR __stringify(__ALIGN) + #endif + ++#ifdef CONFIG_SLS ++#define RET ret; int3 ++#else ++#define RET ret ++#endif ++ + #else /* __ASSEMBLY__ */ + ++#ifdef CONFIG_SLS ++#define ASM_RET "ret; int3\n\t" ++#else + #define ASM_RET "ret\n\t" ++#endif + + #endif /* __ASSEMBLY__ */ + +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -119,7 +119,7 @@ + ANNOTATE_NOSPEC_ALTERNATIVE + ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \ + __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ +- __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD ++ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg; int3), X86_FEATURE_RETPOLINE_AMD + #else + jmp *\reg + #endif +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -433,6 +433,15 @@ config RETPOLINE + branches. Requires a compiler with -mindirect-branch=thunk-extern + support for full protection. The kernel may run slower. + ++config SLS ++ bool "Mitigate Straight-Line-Speculation" ++ depends on X86_64 ++ default n ++ help ++ Compile the kernel with straight-line-speculation options to guard ++ against straight line speculation. The kernel image might be slightly ++ larger. ++ + config X86_CPU_RESCTRL + bool "x86 CPU resource control support" + depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) +--- a/arch/x86/lib/memmove_64.S ++++ b/arch/x86/lib/memmove_64.S +@@ -42,7 +42,7 @@ ENTRY(__memmove) + jg 2f + + .Lmemmove_begin_forward: +- ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; RET", X86_FEATURE_ERMS ++ ALTERNATIVE "", __stringify(movq %rdx, %rcx; rep movsb; RET), X86_FEATURE_ERMS + + /* + * movsq instruction have many startup latency +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -233,6 +233,10 @@ ifdef CONFIG_RETPOLINE + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE + endif + ++ifdef CONFIG_SLS ++ KBUILD_CFLAGS += $(call cc-option,-mharden-sls=all) ++endif ++ + archscripts: scripts_basic + $(Q)$(MAKE) $(build)=arch/x86/tools relocs + diff --git a/patches.suse/x86-bugs-Add-AMD-retbleed-boot-parameter.patch b/patches.suse/x86-bugs-Add-AMD-retbleed-boot-parameter.patch new file mode 100644 index 0000000..576310f --- /dev/null +++ b/patches.suse/x86-bugs-Add-AMD-retbleed-boot-parameter.patch @@ -0,0 +1,217 @@ +From: Alexandre Chartre +Date: Tue, 14 Jun 2022 23:15:50 +0200 +Subject: x86/bugs: Add AMD retbleed= boot parameter +Git-commit: 7fbf47c7ce50b38a64576b150e7011ae73d54669 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Add the "retbleed=" boot parameter to select a mitigation for +RETBleed. Possible values are "off", "auto" and "unret" +(JMP2RET mitigation). The default value is "auto". + +Currently, "retbleed=auto" will select the unret mitigation on +AMD and Hygon and no mitigation on Intel (JMP2RET is not effective on +Intel). + + [peterz: rebase; add hygon] + [jpoimboe: cleanups] + +Signed-off-by: Alexandre Chartre +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +[ Enable CC_HAS_RETURN_THUNK unconditionally as our compilers support it. ] +Signed-off-by: Borislav Petkov +--- + Documentation/admin-guide/kernel-parameters.txt | 14 +++ + arch/x86/Kconfig | 3 + arch/x86/Makefile | 3 + arch/x86/kernel/cpu/bugs.c | 108 +++++++++++++++++++++++- + 4 files changed, 127 insertions(+), 1 deletion(-) + +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -442,6 +442,9 @@ config SLS + against straight line speculation. The kernel image might be slightly + larger. + ++config CC_HAS_RETURN_THUNK ++ def_bool y ++ + config X86_CPU_RESCTRL + bool "x86 CPU resource control support" + depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -35,6 +35,7 @@ + #include "cpu.h" + + static void __init spectre_v1_select_mitigation(void); ++static void __init retbleed_select_mitigation(void); + static void __init spectre_v2_select_mitigation(void); + static void __init ssb_select_mitigation(void); + static void __init l1tf_select_mitigation(void); +@@ -110,6 +111,12 @@ void __init check_bugs(void) + + /* Select the proper CPU mitigations before patching alternatives: */ + spectre_v1_select_mitigation(); ++ retbleed_select_mitigation(); ++ /* ++ * spectre_v2_select_mitigation() relies on the state set by ++ * retbleed_select_mitigation(); specifically the STIBP selection is ++ * forced for UNRET. ++ */ + spectre_v2_select_mitigation(); + ssb_select_mitigation(); + l1tf_select_mitigation(); +@@ -826,6 +833,100 @@ static int __init nospectre_v1_cmdline(c + early_param("nospectre_v1", nospectre_v1_cmdline); + + #undef pr_fmt ++#define pr_fmt(fmt) "RETBleed: " fmt ++ ++enum retbleed_mitigation { ++ RETBLEED_MITIGATION_NONE, ++ RETBLEED_MITIGATION_UNRET, ++}; ++ ++enum retbleed_mitigation_cmd { ++ RETBLEED_CMD_OFF, ++ RETBLEED_CMD_AUTO, ++ RETBLEED_CMD_UNRET, ++}; ++ ++const char * const retbleed_strings[] = { ++ [RETBLEED_MITIGATION_NONE] = "Vulnerable", ++ [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", ++}; ++ ++static enum retbleed_mitigation retbleed_mitigation __ro_after_init = ++ RETBLEED_MITIGATION_NONE; ++static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = ++ RETBLEED_CMD_AUTO; ++ ++static int __init retbleed_parse_cmdline(char *str) ++{ ++ if (!str) ++ return -EINVAL; ++ ++ if (!strcmp(str, "off")) ++ ; ++ else if (!strcmp(str, "auto")) ++ retbleed_cmd = RETBLEED_CMD_AUTO; ++ else if (!strcmp(str, "unret")) ++ retbleed_cmd = RETBLEED_CMD_UNRET; ++ else ++ pr_err("Unknown retbleed option (%s). Defaulting to 'auto'\n", str); ++ ++ return 0; ++} ++early_param("retbleed", retbleed_parse_cmdline); ++ ++#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" ++#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n" ++ ++static void __init retbleed_select_mitigation(void) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) ++ return; ++ ++ switch (retbleed_cmd) { ++ case RETBLEED_CMD_OFF: ++ return; ++ ++ case RETBLEED_CMD_UNRET: ++ retbleed_mitigation = RETBLEED_MITIGATION_UNRET; ++ break; ++ ++ case RETBLEED_CMD_AUTO: ++ default: ++ if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) ++ break; ++ ++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || ++ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) ++ retbleed_mitigation = RETBLEED_MITIGATION_UNRET; ++ break; ++ } ++ ++ switch (retbleed_mitigation) { ++ case RETBLEED_MITIGATION_UNRET: ++ ++ if (!IS_ENABLED(CONFIG_RETPOLINE) || ++ !IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) { ++ pr_err(RETBLEED_COMPILER_MSG); ++ retbleed_mitigation = RETBLEED_MITIGATION_NONE; ++ break; ++ } ++ ++ setup_force_cpu_cap(X86_FEATURE_RETHUNK); ++ setup_force_cpu_cap(X86_FEATURE_UNRET); ++ ++ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && ++ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) ++ pr_err(RETBLEED_UNTRAIN_MSG); ++ break; ++ ++ default: ++ break; ++ } ++ ++ pr_info("%s\n", retbleed_strings[retbleed_mitigation]); ++} ++ ++#undef pr_fmt + #define pr_fmt(fmt) "Spectre V2 : " fmt + + static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = +@@ -1884,7 +1985,12 @@ static ssize_t srbds_show_state(char *bu + + static ssize_t retbleed_show_state(char *buf) + { +- return sprintf(buf, "Vulnerable\n"); ++ if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET && ++ (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && ++ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)) ++ return sprintf(buf, "Vulnerable: untrained return thunk on non-Zen uarch\n"); ++ ++ return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); + } + + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -241,6 +241,9 @@ ifdef CONFIG_SLS + KBUILD_CFLAGS += $(call cc-option,-mharden-sls=all) + endif + ++# RETHUNK ++KBUILD_CFLAGS += $(call cc-option,-mfunction-return=thunk-extern) ++ + archscripts: scripts_basic + $(Q)$(MAKE) $(build)=arch/x86/tools relocs + +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -3977,6 +3977,20 @@ + + retain_initrd [RAM] Keep initrd memory after extraction + ++ retbleed= [X86] Control mitigation of RETBleed (Arbitrary ++ Speculative Code Execution with Return Instructions) ++ vulnerability. ++ ++ auto - automatically select a migitation ++ unret - force enable untrained return thunks, ++ only effective on AMD Zen {1,2} ++ based systems. ++ ++ Selecting 'auto' will choose a mitigation method at run ++ time according to the CPU. ++ ++ Not specifying this option is equivalent to retbleed=auto. ++ + rfkill.default_state= + 0 "airplane mode". All wifi, bluetooth, wimax, gps, fm, + etc. communication is blocked by default. diff --git a/patches.suse/x86-bugs-Add-retbleed-ibpb.patch b/patches.suse/x86-bugs-Add-retbleed-ibpb.patch new file mode 100644 index 0000000..4dd3300 --- /dev/null +++ b/patches.suse/x86-bugs-Add-retbleed-ibpb.patch @@ -0,0 +1,259 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:16:02 +0200 +Subject: x86/bugs: Add retbleed=ibpb +Git-commit: 3ebc170068885b6fc7bedda6c667bb2c4d533159 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +jmp2ret mitigates the easy-to-attack case at relatively low overhead. +It mitigates the long speculation windows after a mispredicted RET, but +it does not mitigate the short speculation window from arbitrary +instruction boundaries. + +On Zen2, there is a chicken bit which needs setting, which mitigates +"arbitrary instruction boundaries" down to just "basic block boundaries". + +But there is no fix for the short speculation window on basic block +boundaries, other than to flush the entire BTB to evict all attacker +predictions. + +On the spectrum of "fast & blurry" -> "safe", there is (on top of STIBP +or no-SMT): + + 1) Nothing System wide open + 2) jmp2ret May stop a script kiddy + 3) jmp2ret+chickenbit Raises the bar rather further + 4) IBPB Only thing which can count as "safe". + +Tentative numbers put IBPB-on-entry at a 2.5x hit on Zen2, and a 10x hit +on Zen1 according to lmbench. + + [ bp: Fixup feature bit comments, document option, 32-bit build fix. ] + +Suggested-by: Andrew Cooper +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + Documentation/admin-guide/kernel-parameters.txt | 3 + + arch/x86/entry/Makefile | 2 - + arch/x86/entry/entry.S | 32 +++++++++++++++++ + arch/x86/include/asm/cpufeatures.h | 2 - + arch/x86/include/asm/nospec-branch.h | 8 +++- + arch/x86/kernel/cpu/bugs.c | 43 ++++++++++++++++++------ + 6 files changed, 77 insertions(+), 13 deletions(-) + +--- /dev/null ++++ b/arch/x86/entry/entry.S +@@ -0,0 +1,32 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Common place for both 32- and 64-bit entry routines. ++ */ ++ ++#include ++#include ++#include ++ ++.pushsection .noinstr.text, "ax" ++ ++ENTRY(entry_ibpb) ++ ++ push %rax ++ push %rcx ++ push %rdx ++ ++ movl $MSR_IA32_PRED_CMD, %ecx ++ movl $PRED_CMD_IBPB, %eax ++ xorl %edx, %edx ++ wrmsr ++ ++ pop %rdx ++ pop %rcx ++ pop %rax ++ ++ RET ++END(entry_ibpb) ++/* For KVM */ ++EXPORT_SYMBOL_GPL(entry_ibpb); ++ ++.popsection +--- a/arch/x86/entry/Makefile ++++ b/arch/x86/entry/Makefile +@@ -6,7 +6,7 @@ OBJECT_FILES_NON_STANDARD_entry_64_compa + + CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,) + CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,) +-obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o ++obj-y := entry.o entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o + obj-y += common.o + + obj-y += vdso/ +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -283,7 +283,7 @@ + #define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */ + #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */ + #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ +-/* FREE! (11*32+10) */ ++#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */ + /* FREE! (11*32+11) */ + #define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ + #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -155,14 +155,17 @@ + * return thunk isn't mapped into the userspace tables (then again, AMD + * typically has NO_MELTDOWN). + * +- * Doesn't clobber any registers but does require a stable stack. ++ * While zen_untrain_ret() doesn't clobber anything but requires stack, ++ * entry_ibpb() will clobber AX, CX, DX. + * + * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point + * where we have a stack but before any RET instruction. + */ + .macro UNTRAIN_RET + #ifdef CONFIG_RETPOLINE +- ALTERNATIVE "", "call zen_untrain_ret", X86_FEATURE_UNRET ++ ALTERNATIVE_2 "", \ ++ "call zen_untrain_ret", X86_FEATURE_UNRET, \ ++ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB + #endif + .endm + +@@ -185,6 +188,7 @@ + + extern void __x86_return_thunk(void); + extern void zen_untrain_ret(void); ++extern void entry_ibpb(void); + + /* + * Inline asm uses the %V modifier which is only in newer GCC +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -878,6 +878,7 @@ static enum spectre_v2_mitigation spectr + enum retbleed_mitigation { + RETBLEED_MITIGATION_NONE, + RETBLEED_MITIGATION_UNRET, ++ RETBLEED_MITIGATION_IBPB, + RETBLEED_MITIGATION_IBRS, + RETBLEED_MITIGATION_EIBRS, + }; +@@ -886,11 +887,13 @@ enum retbleed_mitigation_cmd { + RETBLEED_CMD_OFF, + RETBLEED_CMD_AUTO, + RETBLEED_CMD_UNRET, ++ RETBLEED_CMD_IBPB, + }; + + const char * const retbleed_strings[] = { + [RETBLEED_MITIGATION_NONE] = "Vulnerable", + [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", ++ [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", + [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", + [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", + }; +@@ -920,6 +923,8 @@ static int __init retbleed_parse_cmdline + retbleed_cmd = RETBLEED_CMD_AUTO; + } else if (!strcmp(str, "unret")) { + retbleed_cmd = RETBLEED_CMD_UNRET; ++ } else if (!strcmp(str, "ibpb")) { ++ retbleed_cmd = RETBLEED_CMD_IBPB; + } else if (!strcmp(str, "nosmt")) { + retbleed_nosmt = true; + } else { +@@ -934,11 +939,13 @@ static int __init retbleed_parse_cmdline + early_param("retbleed", retbleed_parse_cmdline); + + #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" +-#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n" ++#define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler; falling back to IBPB!\n" + #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" + + static void __init retbleed_select_mitigation(void) + { ++ bool mitigate_smt = false; ++ + if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) + return; + +@@ -950,11 +957,21 @@ static void __init retbleed_select_mitig + retbleed_mitigation = RETBLEED_MITIGATION_UNRET; + break; + ++ case RETBLEED_CMD_IBPB: ++ retbleed_mitigation = RETBLEED_MITIGATION_IBPB; ++ break; ++ + case RETBLEED_CMD_AUTO: + default: + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || +- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) +- retbleed_mitigation = RETBLEED_MITIGATION_UNRET; ++ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { ++ ++ if (IS_ENABLED(CONFIG_RETPOLINE) && ++ IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) ++ retbleed_mitigation = RETBLEED_MITIGATION_UNRET; ++ else ++ retbleed_mitigation = RETBLEED_MITIGATION_IBPB; ++ } + + /* + * The Intel mitigation (IBRS) was already selected in +@@ -970,26 +987,34 @@ static void __init retbleed_select_mitig + if (!IS_ENABLED(CONFIG_RETPOLINE) || + !IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) { + pr_err(RETBLEED_COMPILER_MSG); +- retbleed_mitigation = RETBLEED_MITIGATION_NONE; +- break; ++ retbleed_mitigation = RETBLEED_MITIGATION_IBPB; ++ goto retbleed_force_ibpb; + } + + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); + +- if (!boot_cpu_has(X86_FEATURE_STIBP) && +- (retbleed_nosmt || cpu_mitigations_auto_nosmt())) +- cpu_smt_disable(false); +- + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + pr_err(RETBLEED_UNTRAIN_MSG); ++ ++ mitigate_smt = true; ++ break; ++ ++ case RETBLEED_MITIGATION_IBPB: ++retbleed_force_ibpb: ++ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); ++ mitigate_smt = true; + break; + + default: + break; + } + ++ if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && ++ (retbleed_nosmt || cpu_mitigations_auto_nosmt())) ++ cpu_smt_disable(false); ++ + /* + * Let IBRS trump all on Intel without affecting the effects of the + * retbleed= cmdline option. +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -3986,6 +3986,9 @@ + disabling SMT if necessary for + the full mitigation (only on Zen1 + and older without STIBP). ++ ibpb - mitigate short speculation windows on ++ basic block boundaries too. Safe, highest ++ perf impact. + unret - force enable untrained return thunks, + only effective on AMD f15h-f17h + based systems. diff --git a/patches.suse/x86-bugs-Do-IBPB-fallback-check-only-once.patch b/patches.suse/x86-bugs-Do-IBPB-fallback-check-only-once.patch new file mode 100644 index 0000000..5191fac --- /dev/null +++ b/patches.suse/x86-bugs-Do-IBPB-fallback-check-only-once.patch @@ -0,0 +1,48 @@ +From: Josh Poimboeuf +Date: Tue, 14 Jun 2022 15:07:19 -0700 +Subject: x86/bugs: Do IBPB fallback check only once +Git-commit: 0fe4aeea9c01baabecc8c3afc7889c809d939bc2 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +When booting with retbleed=auto, if the kernel wasn't built with +CONFIG_CC_HAS_RETURN_THUNK, the mitigation falls back to IBPB. Make +sure a warning is printed in that case. The IBPB fallback check is done +twice, but it really only needs to be done once. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/kernel/cpu/bugs.c | 15 +++++---------- + 1 file changed, 5 insertions(+), 10 deletions(-) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 04077d13b3ae..108bd74289c5 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -891,18 +891,13 @@ static void __init retbleed_select_mitigation(void) + case RETBLEED_CMD_AUTO: + default: + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || +- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { +- +- if (IS_ENABLED(CONFIG_RETPOLINE) && +- IS_ENABLED(CONFIG_CC_HAS_RETURN_THUNK)) +- retbleed_mitigation = RETBLEED_MITIGATION_UNRET; +- else +- retbleed_mitigation = RETBLEED_MITIGATION_IBPB; +- } ++ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) ++ retbleed_mitigation = RETBLEED_MITIGATION_UNRET; + + /* +- * The Intel mitigation (IBRS) was already selected in +- * spectre_v2_select_mitigation(). ++ * The Intel mitigation (IBRS or eIBRS) was already selected in ++ * spectre_v2_select_mitigation(). 'retbleed_mitigation' will ++ * be set accordingly below. + */ + + break; + diff --git a/patches.suse/x86-bugs-Do-not-enable-IBPB-on-entry-when-IBPB-is-no.patch b/patches.suse/x86-bugs-Do-not-enable-IBPB-on-entry-when-IBPB-is-no.patch new file mode 100644 index 0000000..ac19510 --- /dev/null +++ b/patches.suse/x86-bugs-Do-not-enable-IBPB-on-entry-when-IBPB-is-no.patch @@ -0,0 +1,46 @@ +From: Thadeu Lima de Souza Cascardo +Date: Thu, 7 Jul 2022 13:41:52 -0300 +Subject: [PATCH] x86/bugs: Do not enable IBPB-on-entry when IBPB is not supported +Patch-mainline: Queued in tip for 5.19 +Git-commit: 31b74c1dfb6cb530920fdcd047614e2b5eb72f74 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +There are some VM configurations which have Skylake model but do not +support IBPB. In those cases, when using retbleed=ibpb, userspace is going +to be killed and kernel is going to panic. + +If the CPU does not support IBPB, warn and proceed with the auto option. Also, +do not fallback to IBPB on AMD/Hygon systems if it is not supported. + +Fixes: 3ebc17006888 ("x86/bugs: Add retbleed=ibpb") +Signed-off-by: Thadeu Lima de Souza Cascardo +Signed-off-by: Borislav Petkov +--- + arch/x86/kernel/cpu/bugs.c | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -930,14 +930,21 @@ static void __init retbleed_select_mitig + break; + + case RETBLEED_CMD_IBPB: ++ if (!boot_cpu_has(X86_FEATURE_IBPB)) { ++ pr_err("WARNING: CPU does not support IBPB.\n"); ++ goto do_cmd_auto; ++ } + retbleed_mitigation = RETBLEED_MITIGATION_IBPB; + break; + ++do_cmd_auto: + case RETBLEED_CMD_AUTO: + default: + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + retbleed_mitigation = RETBLEED_MITIGATION_UNRET; ++ else if (boot_cpu_has(X86_FEATURE_IBPB)) ++ retbleed_mitigation = RETBLEED_MITIGATION_IBPB; + + /* + * The Intel mitigation (IBRS or eIBRS) was already selected in diff --git a/patches.suse/x86-bugs-Enable-STIBP-for-JMP2RET.patch b/patches.suse/x86-bugs-Enable-STIBP-for-JMP2RET.patch new file mode 100644 index 0000000..652f968 --- /dev/null +++ b/patches.suse/x86-bugs-Enable-STIBP-for-JMP2RET.patch @@ -0,0 +1,125 @@ +From: Kim Phillips +Date: Tue, 14 Jun 2022 23:15:51 +0200 +Subject: x86/bugs: Enable STIBP for JMP2RET +Git-commit: e8ec1b6e08a2102d8755ccb06fa26d540f26a2fa +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +For untrained return thunks to be fully effective, STIBP must be enabled +or SMT disabled. + +Co-developed-by: Josh Poimboeuf +Signed-off-by: Josh Poimboeuf +Signed-off-by: Kim Phillips +Signed-off-by: Peter Zijlstra (Intel) + + [ bp: No SPECTRE_V2_USER_STRICT_PREFERRED ] + +Signed-off-by: Borislav Petkov +--- + Documentation/admin-guide/kernel-parameters.txt | 14 ++++-- + arch/x86/kernel/cpu/bugs.c | 50 ++++++++++++++++++------ + 2 files changed, 48 insertions(+), 16 deletions(-) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -856,19 +856,34 @@ static enum retbleed_mitigation retbleed + static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init = + RETBLEED_CMD_AUTO; + ++static int __ro_after_init retbleed_nosmt = false; ++ + static int __init retbleed_parse_cmdline(char *str) + { + if (!str) + return -EINVAL; + +- if (!strcmp(str, "off")) +- ; +- else if (!strcmp(str, "auto")) +- retbleed_cmd = RETBLEED_CMD_AUTO; +- else if (!strcmp(str, "unret")) +- retbleed_cmd = RETBLEED_CMD_UNRET; +- else +- pr_err("Unknown retbleed option (%s). Defaulting to 'auto'\n", str); ++ while (str) { ++ char *next = strchr(str, ','); ++ if (next) { ++ *next = 0; ++ next++; ++ } ++ ++ if (!strcmp(str, "off")) { ++ return -EINVAL; ++ } else if (!strcmp(str, "auto")) { ++ retbleed_cmd = RETBLEED_CMD_AUTO; ++ } else if (!strcmp(str, "unret")) { ++ retbleed_cmd = RETBLEED_CMD_UNRET; ++ } else if (!strcmp(str, "nosmt")) { ++ retbleed_nosmt = true; ++ } else { ++ pr_err("Ignoring unknown retbleed option (%s).", str); ++ } ++ ++ str = next; ++ } + + return 0; + } +@@ -914,6 +929,10 @@ static void __init retbleed_select_mitig + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); + ++ if (!boot_cpu_has(X86_FEATURE_STIBP) && ++ (retbleed_nosmt || cpu_mitigations_auto_nosmt())) ++ cpu_smt_disable(false); ++ + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + pr_err(RETBLEED_UNTRAIN_MSG); +@@ -1985,10 +2004,17 @@ static ssize_t srbds_show_state(char *bu + + static ssize_t retbleed_show_state(char *buf) + { +- if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET && +- (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && +- boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)) +- return sprintf(buf, "Vulnerable: untrained return thunk on non-Zen uarch\n"); ++ if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET) { ++ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && ++ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) ++ return sprintf(buf, "Vulnerable: untrained return thunk on non-Zen uarch\n"); ++ ++ return sprintf(buf, "%s; SMT %s\n", ++ retbleed_strings[retbleed_mitigation], ++ !sched_smt_active() ? "disabled" : ++ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ? ++ "enabled with STIBP protection" : "vulnerable"); ++ } + + return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); + } +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -3981,10 +3981,16 @@ + Speculative Code Execution with Return Instructions) + vulnerability. + +- auto - automatically select a migitation +- unret - force enable untrained return thunks, +- only effective on AMD Zen {1,2} +- based systems. ++ auto - automatically select a migitation ++ auto,nosmt - automatically select a mitigation, ++ disabling SMT if necessary for ++ the full mitigation (only on Zen1 ++ and older without STIBP). ++ unret - force enable untrained return thunks, ++ only effective on AMD f15h-f17h ++ based systems. ++ unret,nosmt - like unret, will disable SMT when STIBP ++ is not available. + + Selecting 'auto' will choose a mitigation method at run + time according to the CPU. diff --git a/patches.suse/x86-bugs-Keep-a-per-CPU-IA32_SPEC_CTRL-value.patch b/patches.suse/x86-bugs-Keep-a-per-CPU-IA32_SPEC_CTRL-value.patch new file mode 100644 index 0000000..304217c --- /dev/null +++ b/patches.suse/x86-bugs-Keep-a-per-CPU-IA32_SPEC_CTRL-value.patch @@ -0,0 +1,115 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:52 +0200 +Subject: x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value +Git-commit: caa0ff24d5d0e02abce5e65c3d2b7f20a6617be5 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Due to TIF_SSBD and TIF_SPEC_IB the actual IA32_SPEC_CTRL value can +differ from x86_spec_ctrl_base. As such, keep a per-CPU value +reflecting the current task's MSR content. + + [jpoimboe: rename] + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/include/asm/nospec-branch.h | 1 + + arch/x86/kernel/cpu/bugs.c | 28 +++++++++++++++++++++++----- + arch/x86/kernel/process.c | 2 +- + 3 files changed, 25 insertions(+), 6 deletions(-) + +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -253,6 +253,7 @@ static inline void indirect_branch_predi + + /* The Intel SPEC CTRL MSR base value cache */ + extern u64 x86_spec_ctrl_base; ++extern void write_spec_ctrl_current(u64 val); + + /* + * With retpoline, we must use IBRS to restrict branch prediction +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -48,12 +48,30 @@ static void __init taa_select_mitigation + static void __init mmio_select_mitigation(void); + static void __init srbds_select_mitigation(void); + +-/* The base value of the SPEC_CTRL MSR that always has to be preserved. */ ++/* The base value of the SPEC_CTRL MSR without task-specific bits set */ + u64 x86_spec_ctrl_base; + EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); ++ ++/* The current value of the SPEC_CTRL MSR with task-specific bits set */ ++DEFINE_PER_CPU(u64, x86_spec_ctrl_current); ++EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); ++ + static DEFINE_MUTEX(spec_ctrl_mutex); + + /* ++ * Keep track of the SPEC_CTRL MSR value for the current task, which may differ ++ * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). ++ */ ++void write_spec_ctrl_current(u64 val) ++{ ++ if (this_cpu_read(x86_spec_ctrl_current) == val) ++ return; ++ ++ this_cpu_write(x86_spec_ctrl_current, val); ++ wrmsrl(MSR_IA32_SPEC_CTRL, val); ++} ++ ++/* + * The vendor and possibly platform specific bits which can be modified in + * x86_spec_ctrl_base. + */ +@@ -1235,7 +1253,7 @@ static void __init spectre_v2_select_mit + if (spectre_v2_in_eibrs_mode(mode)) { + /* Force it so VMEXIT will restore correctly */ + x86_spec_ctrl_base |= SPEC_CTRL_IBRS; +- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); ++ write_spec_ctrl_current(x86_spec_ctrl_base); + } + + switch (mode) { +@@ -1290,7 +1308,7 @@ static void __init spectre_v2_select_mit + + static void update_stibp_msr(void * __unused) + { +- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); ++ write_spec_ctrl_current(x86_spec_ctrl_base); + } + + /* Update x86_spec_ctrl_base in case SMT state changed. */ +@@ -1522,7 +1540,7 @@ static enum ssb_mitigation __init __ssb_ + x86_amd_ssb_disable(); + } else { + x86_spec_ctrl_base |= SPEC_CTRL_SSBD; +- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); ++ write_spec_ctrl_current(x86_spec_ctrl_base); + } + } + +@@ -1740,7 +1758,7 @@ int arch_prctl_spec_ctrl_get(struct task + void x86_spec_ctrl_setup_ap(void) + { + if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) +- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); ++ write_spec_ctrl_current(x86_spec_ctrl_base); + + if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) + x86_amd_ssb_disable(); +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -599,7 +599,7 @@ static __always_inline void __speculatio + } + + if (updmsr) +- wrmsrl(MSR_IA32_SPEC_CTRL, msr); ++ write_spec_ctrl_current(msr); + } + + static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) diff --git a/patches.suse/x86-bugs-Optimize-SPEC_CTRL-MSR-writes.patch b/patches.suse/x86-bugs-Optimize-SPEC_CTRL-MSR-writes.patch new file mode 100644 index 0000000..5de05a8 --- /dev/null +++ b/patches.suse/x86-bugs-Optimize-SPEC_CTRL-MSR-writes.patch @@ -0,0 +1,105 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:54 +0200 +Subject: x86/bugs: Optimize SPEC_CTRL MSR writes +Git-commit: c779bc1a9002fa474175b80e72b85c9bf628abb0 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +When changing SPEC_CTRL for user control, the WRMSR can be delayed +until return-to-user when KERNEL_IBRS has been enabled. + +This avoids an MSR write during context switch. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/include/asm/nospec-branch.h | 2 +- + arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++------ + arch/x86/kernel/process.c | 2 +- + 3 files changed, 14 insertions(+), 8 deletions(-) + +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -344,7 +344,7 @@ static inline void unrestrict_branch_spe + + /* The Intel SPEC CTRL MSR base value cache */ + extern u64 x86_spec_ctrl_base; +-extern void write_spec_ctrl_current(u64 val); ++extern void write_spec_ctrl_current(u64 val, bool force); + + /* + * With retpoline, we must use IBRS to restrict branch prediction +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -60,13 +60,19 @@ static DEFINE_MUTEX(spec_ctrl_mutex); + * Keep track of the SPEC_CTRL MSR value for the current task, which may differ + * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). + */ +-void write_spec_ctrl_current(u64 val) ++void write_spec_ctrl_current(u64 val, bool force) + { + if (this_cpu_read(x86_spec_ctrl_current) == val) + return; + + this_cpu_write(x86_spec_ctrl_current, val); +- wrmsrl(MSR_IA32_SPEC_CTRL, val); ++ ++ /* ++ * When KERNEL_IBRS this MSR is written on return-to-user, unless ++ * forced the update can be delayed until that time. ++ */ ++ if (force || !cpu_feature_enabled(X86_FEATURE_USE_IBRS)) ++ wrmsrl(MSR_IA32_SPEC_CTRL, val); + } + + /* +@@ -1364,7 +1370,7 @@ static void __init spectre_v2_select_mit + if (spectre_v2_in_eibrs_mode(mode)) { + /* Force it so VMEXIT will restore correctly */ + x86_spec_ctrl_base |= SPEC_CTRL_IBRS; +- write_spec_ctrl_current(x86_spec_ctrl_base); ++ write_spec_ctrl_current(x86_spec_ctrl_base, true); + } + + switch (mode) { +@@ -1423,7 +1429,7 @@ specv2_set_mode: + + static void update_stibp_msr(void * __unused) + { +- write_spec_ctrl_current(x86_spec_ctrl_base); ++ write_spec_ctrl_current(x86_spec_ctrl_base, true); + } + + /* Update x86_spec_ctrl_base in case SMT state changed. */ +@@ -1654,7 +1660,7 @@ static enum ssb_mitigation __init __ssb_ + x86_amd_ssb_disable(); + } else { + x86_spec_ctrl_base |= SPEC_CTRL_SSBD; +- write_spec_ctrl_current(x86_spec_ctrl_base); ++ write_spec_ctrl_current(x86_spec_ctrl_base, true); + } + } + +@@ -1862,7 +1868,7 @@ int arch_prctl_spec_ctrl_get(struct task + void x86_spec_ctrl_setup_ap(void) + { + if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) +- write_spec_ctrl_current(x86_spec_ctrl_base); ++ write_spec_ctrl_current(x86_spec_ctrl_base, true); + + if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) + x86_amd_ssb_disable(); +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -436,7 +436,7 @@ static __always_inline void __speculatio + } + + if (updmsr) +- write_spec_ctrl_current(msr); ++ write_spec_ctrl_current(msr, false); + } + + static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) diff --git a/patches.suse/x86-bugs-Report-AMD-retbleed-vulnerability.patch b/patches.suse/x86-bugs-Report-AMD-retbleed-vulnerability.patch new file mode 100644 index 0000000..1360e26 --- /dev/null +++ b/patches.suse/x86-bugs-Report-AMD-retbleed-vulnerability.patch @@ -0,0 +1,166 @@ +From: Alexandre Chartre +Date: Tue, 14 Jun 2022 23:15:49 +0200 +Subject: x86/bugs: Report AMD retbleed vulnerability +Git-commit: 6b80b59b3555706508008f1f127b5412c89c7fd8 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Report that AMD x86 CPUs are vulnerable to the RETBleed (Arbitrary +Speculative Code Execution with Return Instructions) attack. + + [peterz: add hygon] + [kim: invert parity; fam15h] + +Co-developed-by: Kim Phillips +Signed-off-by: Kim Phillips +Signed-off-by: Alexandre Chartre +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/include/asm/cpufeatures.h | 1 + + arch/x86/kernel/cpu/bugs.c | 13 +++++++++++++ + arch/x86/kernel/cpu/common.c | 19 +++++++++++++++++++ + drivers/base/cpu.c | 8 ++++++++ + include/linux/cpu.h | 2 ++ + 5 files changed, 43 insertions(+) + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -407,5 +407,6 @@ + #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ + #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ + #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ ++#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1882,6 +1882,11 @@ static ssize_t srbds_show_state(char *bu + return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); + } + ++static ssize_t retbleed_show_state(char *buf) ++{ ++ return sprintf(buf, "Vulnerable\n"); ++} ++ + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, + char *buf, unsigned int bug) + { +@@ -1929,6 +1934,9 @@ static ssize_t cpu_show_common(struct de + case X86_BUG_MMIO_STALE_DATA: + return mmio_stale_data_show_state(buf); + ++ case X86_BUG_RETBLEED: ++ return retbleed_show_state(buf); ++ + default: + break; + } +@@ -1985,4 +1993,9 @@ ssize_t cpu_show_mmio_stale_data(struct + { + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); + } ++ ++ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); ++} + #endif +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -997,16 +997,27 @@ static const __initconst struct x86_cpu_ + {} + }; + ++#define VULNBL(vendor, family, model, blacklist) \ ++ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist) ++ + #define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ + X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ + INTEL_FAM6_##model, steppings, \ + X86_FEATURE_ANY, issues) + ++#define VULNBL_AMD(family, blacklist) \ ++ VULNBL(AMD, family, X86_MODEL_ANY, blacklist) ++ ++#define VULNBL_HYGON(family, blacklist) \ ++ VULNBL(HYGON, family, X86_MODEL_ANY, blacklist) ++ + #define SRBDS BIT(0) + /* CPU is affected by X86_BUG_MMIO_STALE_DATA */ + #define MMIO BIT(1) + /* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ + #define MMIO_SBDS BIT(2) ++/* CPU is affected by RETbleed, speculating where you would not expect it */ ++#define RETBLEED BIT(3) + + static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), +@@ -1032,6 +1043,11 @@ static const struct x86_cpu_id cpu_vuln_ + VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPINGS(0x1, 0x1), MMIO), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), ++ ++ VULNBL_AMD(0x15, RETBLEED), ++ VULNBL_AMD(0x16, RETBLEED), ++ VULNBL_AMD(0x17, RETBLEED), ++ VULNBL_HYGON(0x18, RETBLEED), + {} + }; + +@@ -1131,6 +1147,9 @@ static void __init cpu_set_bug_bits(stru + !arch_cap_mmio_immune(ia32_cap)) + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); + ++ if (cpu_matches(cpu_vuln_blacklist, RETBLEED)) ++ setup_force_cpu_bug(X86_BUG_RETBLEED); ++ + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; + +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -564,6 +564,12 @@ ssize_t __weak cpu_show_mmio_stale_data( + return sprintf(buf, "Not affected\n"); + } + ++ssize_t __weak cpu_show_retbleed(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "Not affected\n"); ++} ++ + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); + static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); +@@ -574,6 +580,7 @@ static DEVICE_ATTR(tsx_async_abort, 0444 + static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); + static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); + static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); ++static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, +@@ -586,6 +593,7 @@ static struct attribute *cpu_root_vulner + &dev_attr_itlb_multihit.attr, + &dev_attr_srbds.attr, + &dev_attr_mmio_stale_data.attr, ++ &dev_attr_retbleed.attr, + NULL + }; + +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -66,6 +66,8 @@ extern ssize_t cpu_show_itlb_multihit(st + extern ssize_t cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, + char *buf); ++extern ssize_t cpu_show_retbleed(struct device *dev, ++ struct device_attribute *attr, char *buf); + + extern __printf(4, 5) + struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/patches.suse/x86-bugs-Report-Intel-retbleed-vulnerability.patch b/patches.suse/x86-bugs-Report-Intel-retbleed-vulnerability.patch new file mode 100644 index 0000000..f004843 --- /dev/null +++ b/patches.suse/x86-bugs-Report-Intel-retbleed-vulnerability.patch @@ -0,0 +1,159 @@ +From: Peter Zijlstra +Date: Fri, 24 Jun 2022 13:48:58 +0200 +Subject: x86/bugs: Report Intel retbleed vulnerability +Git-commit: 6ad0ad2bf8a67e27d1f9d006a1dabb0e1c360cc3 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Skylake suffers from RSB underflow speculation issues; report this +vulnerability and it's mitigation (spectre_v2=ibrs). + + [jpoimboe: cleanups, eibrs] + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/include/asm/msr-index.h | 1 + + arch/x86/kernel/cpu/bugs.c | 39 +++++++++++++++++++++++++++++++++------ + arch/x86/kernel/cpu/common.c | 14 +++++++------- + 3 files changed, 41 insertions(+), 13 deletions(-) + +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -81,6 +81,7 @@ + #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a + #define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */ + #define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */ ++#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */ + #define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */ + #define ARCH_CAP_SSB_NO BIT(4) /* + * Not susceptible to Speculative Store Bypass +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -863,12 +863,17 @@ static int __init nospectre_v1_cmdline(c + } + early_param("nospectre_v1", nospectre_v1_cmdline); + ++static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = ++ SPECTRE_V2_NONE; ++ + #undef pr_fmt + #define pr_fmt(fmt) "RETBleed: " fmt + + enum retbleed_mitigation { + RETBLEED_MITIGATION_NONE, + RETBLEED_MITIGATION_UNRET, ++ RETBLEED_MITIGATION_IBRS, ++ RETBLEED_MITIGATION_EIBRS, + }; + + enum retbleed_mitigation_cmd { +@@ -880,6 +885,8 @@ enum retbleed_mitigation_cmd { + const char * const retbleed_strings[] = { + [RETBLEED_MITIGATION_NONE] = "Vulnerable", + [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", ++ [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", ++ [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", + }; + + static enum retbleed_mitigation retbleed_mitigation __ro_after_init = +@@ -922,6 +929,7 @@ early_param("retbleed", retbleed_parse_c + + #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" + #define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n" ++#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" + + static void __init retbleed_select_mitigation(void) + { +@@ -938,12 +946,15 @@ static void __init retbleed_select_mitig + + case RETBLEED_CMD_AUTO: + default: +- if (!boot_cpu_has_bug(X86_BUG_RETBLEED)) +- break; +- + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + retbleed_mitigation = RETBLEED_MITIGATION_UNRET; ++ ++ /* ++ * The Intel mitigation (IBRS) was already selected in ++ * spectre_v2_select_mitigation(). ++ */ ++ + break; + } + +@@ -973,15 +984,31 @@ static void __init retbleed_select_mitig + break; + } + ++ /* ++ * Let IBRS trump all on Intel without affecting the effects of the ++ * retbleed= cmdline option. ++ */ ++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { ++ switch (spectre_v2_enabled) { ++ case SPECTRE_V2_IBRS: ++ retbleed_mitigation = RETBLEED_MITIGATION_IBRS; ++ break; ++ case SPECTRE_V2_EIBRS: ++ case SPECTRE_V2_EIBRS_RETPOLINE: ++ case SPECTRE_V2_EIBRS_LFENCE: ++ retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; ++ break; ++ default: ++ pr_err(RETBLEED_INTEL_MSG); ++ } ++ } ++ + pr_info("%s\n", retbleed_strings[retbleed_mitigation]); + } + + #undef pr_fmt + #define pr_fmt(fmt) "Spectre V2 : " fmt + +-static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = +- SPECTRE_V2_NONE; +- + static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = + SPECTRE_V2_USER_NONE; + static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1029,17 +1029,17 @@ static const struct x86_cpu_id cpu_vuln_ + VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) | +- BIT(7) | BIT(0xB), MMIO), +- VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO), ++ BIT(7) | BIT(0xB), MMIO | RETBLEED), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0x8), SRBDS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0x8), SRBDS), +- VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPINGS(0x1, 0x1), MMIO), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), +@@ -1147,7 +1147,7 @@ static void __init cpu_set_bug_bits(stru + !arch_cap_mmio_immune(ia32_cap)) + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); + +- if (cpu_matches(cpu_vuln_blacklist, RETBLEED)) ++ if ((cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))) + setup_force_cpu_bug(X86_BUG_RETBLEED); + + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) diff --git a/patches.suse/x86-bugs-Split-spectre_v2_select_mitigation-and-spectre_v2.patch b/patches.suse/x86-bugs-Split-spectre_v2_select_mitigation-and-spectre_v2.patch new file mode 100644 index 0000000..c204225 --- /dev/null +++ b/patches.suse/x86-bugs-Split-spectre_v2_select_mitigation-and-spectre_v2.patch @@ -0,0 +1,103 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:56 +0200 +Subject: x86/bugs: Split spectre_v2_select_mitigation() and + spectre_v2_user_select_mitigation() +Git-commit: 166115c08a9b0b846b783088808a27d739be6e8d +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +retbleed will depend on spectre_v2, while spectre_v2_user depends on +retbleed. Break this cycle. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/kernel/cpu/bugs.c | 25 +++++++++++++++++-------- + 1 file changed, 17 insertions(+), 8 deletions(-) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 2d7896493220..c546a9e4ed17 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -37,8 +37,9 @@ + #include "cpu.h" + + static void __init spectre_v1_select_mitigation(void); +-static void __init retbleed_select_mitigation(void); + static void __init spectre_v2_select_mitigation(void); ++static void __init retbleed_select_mitigation(void); ++static void __init spectre_v2_user_select_mitigation(void); + static void __init ssb_select_mitigation(void); + static void __init l1tf_select_mitigation(void); + static void __init mds_select_mitigation(void); +@@ -145,13 +146,19 @@ void __init check_bugs(void) + + /* Select the proper CPU mitigations before patching alternatives: */ + spectre_v1_select_mitigation(); ++ spectre_v2_select_mitigation(); ++ /* ++ * retbleed_select_mitigation() relies on the state set by ++ * spectre_v2_select_mitigation(); specifically it wants to know about ++ * spectre_v2=ibrs. ++ */ + retbleed_select_mitigation(); + /* +- * spectre_v2_select_mitigation() relies on the state set by ++ * spectre_v2_user_select_mitigation() relies on the state set by + * retbleed_select_mitigation(); specifically the STIBP selection is + * forced for UNRET. + */ +- spectre_v2_select_mitigation(); ++ spectre_v2_user_select_mitigation(); + ssb_select_mitigation(); + l1tf_select_mitigation(); + md_clear_select_mitigation(); +@@ -1013,13 +1020,15 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure) + pr_info("spectre_v2_user=%s forced on command line.\n", reason); + } + ++static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; ++ + static enum spectre_v2_user_cmd __init +-spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) ++spectre_v2_parse_user_cmdline(void) + { + char arg[20]; + int ret, i; + +- switch (v2_cmd) { ++ switch (spectre_v2_cmd) { + case SPECTRE_V2_CMD_NONE: + return SPECTRE_V2_USER_CMD_NONE; + case SPECTRE_V2_CMD_FORCE: +@@ -1054,7 +1063,7 @@ static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) + } + + static void __init +-spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) ++spectre_v2_user_select_mitigation(void) + { + enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; + bool smt_possible = IS_ENABLED(CONFIG_SMP); +@@ -1067,7 +1076,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) + cpu_smt_control == CPU_SMT_NOT_SUPPORTED) + smt_possible = false; + +- cmd = spectre_v2_parse_user_cmdline(v2_cmd); ++ cmd = spectre_v2_parse_user_cmdline(); + switch (cmd) { + case SPECTRE_V2_USER_CMD_NONE: + goto set_mode; +@@ -1391,7 +1400,7 @@ static void __init spectre_v2_select_mitigation(void) + } + + /* Set up IBPB and STIBP depending on the general spectre V2 command */ +- spectre_v2_user_select_mitigation(cmd); ++ spectre_v2_cmd = cmd; + } + + static void update_stibp_msr(void * __unused) + diff --git a/patches.suse/x86-common-Stamp-out-the-stepping-madness.patch b/patches.suse/x86-common-Stamp-out-the-stepping-madness.patch new file mode 100644 index 0000000..cfe5893 --- /dev/null +++ b/patches.suse/x86-common-Stamp-out-the-stepping-madness.patch @@ -0,0 +1,60 @@ +From: Peter Zijlstra +Date: Fri, 24 Jun 2022 14:03:25 +0200 +Subject: x86/common: Stamp out the stepping madness +Git-commit: 7a05bc95ed1c5a59e47aaade9fb4083c27de9e62 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +The whole MMIO/RETBLEED enumeration went overboard on steppings. Get +rid of all that and simply use ANY. + +If a future stepping of these models would not be affected, it had +better set the relevant ARCH_CAP_$FOO_NO bit in +IA32_ARCH_CAPABILITIES. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Acked-by: Dave Hansen +Signed-off-by: Borislav Petkov +--- + arch/x86/kernel/cpu/common.c | 25 ++++++++++--------------- + 1 file changed, 10 insertions(+), 15 deletions(-) + +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1024,24 +1024,19 @@ static const struct x86_cpu_id cpu_vuln_ + VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO), +- VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x5), MMIO), ++ VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO), ++ VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO | RETBLEED), +- VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) | +- BIT(7) | BIT(0xB), MMIO | RETBLEED), +- VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO | RETBLEED), +- VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO | RETBLEED), +- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0x8), SRBDS), +- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO | RETBLEED), +- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0x8), SRBDS), +- VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS | RETBLEED), +- VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPINGS(0x1, 0x1), MMIO), +- VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED), ++ VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), ++ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPING_ANY, MMIO), ++ VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), + + VULNBL_AMD(0x15, RETBLEED), diff --git a/patches.suse/x86-cpu-amd-Add-Spectral-Chicken.patch b/patches.suse/x86-cpu-amd-Add-Spectral-Chicken.patch new file mode 100644 index 0000000..65561a2 --- /dev/null +++ b/patches.suse/x86-cpu-amd-Add-Spectral-Chicken.patch @@ -0,0 +1,107 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:16:04 +0200 +Subject: x86/cpu/amd: Add Spectral Chicken +Git-commit: d7caac991feeef1b871ee6988fd2c9725df09039 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Zen2 uarchs have an undocumented, unnamed, MSR that contains a chicken +bit for some speculation behaviour. It needs setting. + +Note: very belatedly AMD released naming; it's now officially called + MSR_AMD64_DE_CFG2 and MSR_AMD64_DE_CFG2_SUPPRESS_NOBR_PRED_BIT + but shall remain the SPECTRAL CHICKEN. + +Suggested-by: Andrew Cooper +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/include/asm/msr-index.h | 3 +++ + arch/x86/kernel/cpu/amd.c | 26 +++++++++++++++++++++++++- + arch/x86/kernel/cpu/cpu.h | 2 ++ + arch/x86/kernel/cpu/hygon.c | 6 ++++++ + 4 files changed, 36 insertions(+), 1 deletion(-) + +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -425,6 +425,9 @@ + /* Fam 17h MSRs */ + #define MSR_F17H_IRPERF 0xc00000e9 + ++#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3 ++#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1) ++ + /* Fam 16h MSRs */ + #define MSR_F16H_L2I_PERF_CTL 0xc0010230 + #define MSR_F16H_L2I_PERF_CTR 0xc0010231 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -875,6 +875,26 @@ static void init_amd_bd(struct cpuinfo_x + clear_rdrand_cpuid_bit(c); + } + ++void init_spectral_chicken(struct cpuinfo_x86 *c) ++{ ++ u64 value; ++ ++ /* ++ * On Zen2 we offer this chicken (bit) on the altar of Speculation. ++ * ++ * This suppresses speculation from the middle of a basic block, i.e. it ++ * suppresses non-branch predictions. ++ * ++ * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H ++ */ ++ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) { ++ if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { ++ value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; ++ wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); ++ } ++ } ++} ++ + static void init_amd_zn(struct cpuinfo_x86 *c) + { + set_cpu_cap(c, X86_FEATURE_ZEN); +@@ -920,7 +940,11 @@ static void init_amd(struct cpuinfo_x86 + case 0x12: init_amd_ln(c); break; + case 0x15: init_amd_bd(c); break; + case 0x16: init_amd_jg(c); break; +- case 0x17: init_amd_zn(c); break; ++ case 0x17: ++ init_spectral_chicken(c); ++ init_amd_zn(c); ++ break; ++ + } + + /* +--- a/arch/x86/kernel/cpu/cpu.h ++++ b/arch/x86/kernel/cpu/cpu.h +@@ -60,6 +60,8 @@ extern void tsx_disable(void); + static inline void tsx_init(void) { } + #endif /* CONFIG_CPU_SUP_INTEL */ + ++extern void init_spectral_chicken(struct cpuinfo_x86 *c); ++ + extern void get_cpu_cap(struct cpuinfo_x86 *c); + extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); + extern void x86_spec_ctrl_setup_ap(void); +--- a/arch/x86/kernel/cpu/hygon.c ++++ b/arch/x86/kernel/cpu/hygon.c +@@ -318,6 +318,12 @@ static void init_hygon(struct cpuinfo_x8 + /* get apicid instead of initial apic id from cpuid */ + c->apicid = hard_smp_processor_id(); + ++ /* ++ * XXX someone from Hygon needs to confirm this DTRT ++ * ++ init_spectral_chicken(c); ++ */ ++ + set_cpu_cap(c, X86_FEATURE_ZEN); + set_cpu_cap(c, X86_FEATURE_CPB); + diff --git a/patches.suse/x86-cpu-amd-Enumerate-BTC_NO.patch b/patches.suse/x86-cpu-amd-Enumerate-BTC_NO.patch new file mode 100644 index 0000000..3bef428 --- /dev/null +++ b/patches.suse/x86-cpu-amd-Enumerate-BTC_NO.patch @@ -0,0 +1,80 @@ +From: Andrew Cooper +Date: Fri, 24 Jun 2022 14:41:21 +0100 +Subject: x86/cpu/amd: Enumerate BTC_NO +Git-commit: 26aae8ccbc1972233afd08fb3f368947c0314265 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +BTC_NO indicates that hardware is not susceptible to Branch Type Confusion. + +Zen3 CPUs don't suffer BTC. + +Hypervisors are expected to synthesise BTC_NO when it is appropriate +given the migration pool, to prevent kernels using heuristics. + + [ bp: Massage. ] + +Signed-off-by: Andrew Cooper +Signed-off-by: Borislav Petkov +--- + arch/x86/include/asm/cpufeatures.h | 1 + + arch/x86/kernel/cpu/amd.c | 21 +++++++++++++++------ + arch/x86/kernel/cpu/common.c | 6 ++++-- + 3 files changed, 20 insertions(+), 8 deletions(-) + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -304,6 +304,7 @@ + #define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ + #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ + #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ ++#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */ + + /* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ + #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -903,12 +903,21 @@ static void init_amd_zn(struct cpuinfo_x + node_reclaim_distance = 32; + #endif + +- /* +- * Fix erratum 1076: CPB feature bit not being set in CPUID. +- * Always set it, except when running under a hypervisor. +- */ +- if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB)) +- set_cpu_cap(c, X86_FEATURE_CPB); ++ /* Fix up CPUID bits, but only if not virtualised. */ ++ if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { ++ ++ /* Erratum 1076: CPB feature bit not being set in CPUID. */ ++ if (!cpu_has(c, X86_FEATURE_CPB)) ++ set_cpu_cap(c, X86_FEATURE_CPB); ++ ++ /* ++ * Zen3 (Fam19 model < 0x10) parts are not susceptible to ++ * Branch Type Confusion, but predate the allocation of the ++ * BTC_NO bit. ++ */ ++ if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO)) ++ set_cpu_cap(c, X86_FEATURE_BTC_NO); ++ } + } + + static void init_amd(struct cpuinfo_x86 *c) +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1142,8 +1142,10 @@ static void __init cpu_set_bug_bits(stru + !arch_cap_mmio_immune(ia32_cap)) + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); + +- if ((cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))) +- setup_force_cpu_bug(X86_BUG_RETBLEED); ++ if (!cpu_has(c, X86_FEATURE_BTC_NO)) { ++ if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)) ++ setup_force_cpu_bug(X86_BUG_RETBLEED); ++ } + + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; diff --git a/patches.suse/x86-cpufeatures-Move-RETPOLINE-flags-to-word-11.patch b/patches.suse/x86-cpufeatures-Move-RETPOLINE-flags-to-word-11.patch new file mode 100644 index 0000000..967fc39 --- /dev/null +++ b/patches.suse/x86-cpufeatures-Move-RETPOLINE-flags-to-word-11.patch @@ -0,0 +1,44 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:33 +0200 +Subject: x86/cpufeatures: Move RETPOLINE flags to word 11 +Git-commit: a883d624aed463c84c22596006e5a96f5b44db31 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +Patch-mainline: Queued in tip for v5.19 +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +In order to extend the RETPOLINE features to 4, move them to word 11 +where there is still room. This mostly keeps DISABLE_RETPOLINE +simple. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/include/asm/cpufeatures.h | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -203,8 +203,8 @@ + #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ + #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ + #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ +-#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ +-#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */ ++/* FREE! ( 7*32+12) */ ++/* FREE! ( 7*32+13) */ + #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ + #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ + #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ +@@ -282,6 +282,10 @@ + #define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */ + #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */ + #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ ++/* FREE! (11*32+10) */ ++/* FREE! (11*32+11) */ ++#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ ++#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ + + /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ + #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ diff --git a/patches.suse/x86-entry-Add-kernel-IBRS-implementation.patch b/patches.suse/x86-entry-Add-kernel-IBRS-implementation.patch new file mode 100644 index 0000000..b7ed0fc --- /dev/null +++ b/patches.suse/x86-entry-Add-kernel-IBRS-implementation.patch @@ -0,0 +1,126 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:53 +0200 +Subject: x86/entry: Add kernel IBRS implementation +Git-commit: 2dbb887e875b1de3ca8f40ddf26bcfe55798c609 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Implement Kernel IBRS - currently the only known option to mitigate RSB +underflow speculation issues on Skylake hardware. + +Note: since IBRS_ENTER requires fuller context established than +UNTRAIN_RET, it must be placed after it. However, since UNTRAIN_RET +itself implies a RET, it must come after IBRS_ENTER. This means +IBRS_ENTER needs to also move UNTRAIN_RET. + +Note 2: KERNEL_IBRS is sub-optimal for XenPV. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf + + [ bp: Use the IBRS implementation which is already present in the SLE kernel ] + +Signed-off-by: Borislav Petkov +--- + arch/x86/entry/entry_64.S | 14 ++++++++------ + arch/x86/entry/entry_64_compat.S | 8 ++++---- + 2 files changed, 12 insertions(+), 10 deletions(-) + +--- a/arch/x86/entry/entry_64_compat.S ++++ b/arch/x86/entry/entry_64_compat.S +@@ -3,7 +3,6 @@ + * + * Copyright 2000-2002 Andi Kleen, SuSE Labs. + */ +-#include "calling.h" + #include + #include + #include +@@ -17,6 +16,8 @@ + #include + #include + ++#include "calling.h" ++ + .section .entry.text, "ax" + + /* +@@ -54,8 +55,6 @@ ENTRY(entry_SYSENTER_compat) + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp +- /* Restrict Indirect Branch Speculation */ +- RESTRICT_IB_SPEC + + /* + * User tracing code (ptrace or signal handlers) might assume that +@@ -108,6 +107,8 @@ ENTRY(entry_SYSENTER_compat) + xorl %r15d, %r15d /* nospec r15 */ + cld + ++ ++ RESTRICT_IB_SPEC + UNTRAIN_RET + + /* +@@ -250,7 +251,6 @@ GLOBAL(entry_SYSCALL_compat_after_hwfram + + /* Restrict Indirect Branch Speculation. All registers are saved already */ + RESTRICT_IB_SPEC_CLOBBER +- + UNTRAIN_RET + + /* User mode is traced as though IRQs are on, and SYSENTER +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -215,8 +215,6 @@ ENTRY(entry_SYSCALL_64) + */ + movq %rsp, PER_CPU_VAR(rsp_scratch) + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp +- /* Restrict Indirect Branch Speculation */ +- RESTRICT_IB_SPEC + + /* Construct struct pt_regs on stack */ + pushq $__USER_DS /* pt_regs->ss */ +@@ -234,6 +232,9 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) + /* IRQs are off. */ + movq %rsp, %rdi + ++ /* Restrict Indirect Branch Speculation */ ++ RESTRICT_IB_SPEC ++ + UNTRAIN_RET + + call do_syscall_64 /* returns with IRQs disabled */ +@@ -1192,6 +1193,7 @@ ENTRY(paranoid_entry) + SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 + /* Restrict Indirect Branch speculation */ + RESTRICT_IB_SPEC_SAVE_AND_CLOBBER save_reg=%r13d ++ UNTRAIN_RET + /* + * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an + * unconditional CR3 write, even in the PTI case. So do an lfence +@@ -1420,10 +1422,6 @@ ENTRY(nmi) + movq %rsp, %rdx + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + +- /* Restrict Indirect Branch Speculation */ +- RESTRICT_IB_SPEC +- UNTRAIN_RET +- + UNWIND_HINT_IRET_REGS base=%rdx offset=8 + pushq 5*8(%rdx) /* pt_regs->ss */ + pushq 4*8(%rdx) /* pt_regs->rsp */ +@@ -1435,6 +1433,10 @@ ENTRY(nmi) + PUSH_AND_CLEAR_REGS rdx=(%rdx) + ENCODE_FRAME_POINTER + ++ /* Restrict Indirect Branch Speculation */ ++ RESTRICT_IB_SPEC ++ UNTRAIN_RET ++ + /* + * At this point we no longer need to worry about stack damage + * due to nesting -- we're on the normal thread stack and we're diff --git a/patches.suse/x86-kexec-Disable-RET-on-kexec.patch b/patches.suse/x86-kexec-Disable-RET-on-kexec.patch new file mode 100644 index 0000000..f4455be --- /dev/null +++ b/patches.suse/x86-kexec-Disable-RET-on-kexec.patch @@ -0,0 +1,144 @@ +From: Konrad Rzeszutek Wilk +Date: Fri, 8 Jul 2022 19:10:11 +0200 +Subject: [PATCH] x86/kexec: Disable RET on kexec +Git-commit: 4c5d5e03fbcc1ebfee05498edc7b47915921c76c +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +All the invocations unroll to __x86_return_thunk and this file +must be PIC independent. + +This fixes kexec on 64-bit AMD boxes. + +Reported-by: Edward Tran +Reported-by: Awais Tanveer +Suggested-by: Ankur Arora +Signed-off-by: Konrad Rzeszutek Wilk +Signed-off-by: Alexandre Chartre +Signed-off-by: Borislav Petkov +--- + arch/x86/kernel/relocate_kernel_32.S | 16 +++++++++++----- + arch/x86/kernel/relocate_kernel_64.S | 18 ++++++++++++------ + 2 files changed, 23 insertions(+), 11 deletions(-) + +--- a/arch/x86/kernel/relocate_kernel_32.S ++++ b/arch/x86/kernel/relocate_kernel_32.S +@@ -12,7 +12,8 @@ + #include + + /* +- * Must be relocatable PIC code callable as a C function ++ * Must be relocatable PIC code callable as a C function, in particular ++ * there must be a plain RET and not jump to return thunk. + */ + + #define PTR(x) (x << 2) +@@ -94,7 +95,8 @@ relocate_kernel: + movl %edi, %eax + addl $(identity_mapped - relocate_kernel), %eax + pushl %eax +- RET ++ ret ++ int3 + + identity_mapped: + /* set return address to 0 if not preserving context */ +@@ -161,12 +163,14 @@ identity_mapped: + xorl %edx, %edx + xorl %esi, %esi + xorl %ebp, %ebp +- RET ++ ret ++ int3 + 1: + popl %edx + movl CP_PA_SWAP_PAGE(%edi), %esp + addl $PAGE_SIZE, %esp + 2: ++ ANNOTATE_RETPOLINE_SAFE + call *%edx + + /* get the re-entry point of the peer system */ +@@ -209,7 +213,8 @@ virtual_mapped: + popl %edi + popl %esi + popl %ebx +- RET ++ ret ++ int3 + + /* Do the copies */ + swap_pages: +@@ -271,7 +276,8 @@ swap_pages: + popl %edi + popl %ebx + popl %ebp +- RET ++ ret ++ int3 + + .globl kexec_control_code_size + .set kexec_control_code_size, . - relocate_kernel +--- a/arch/x86/kernel/relocate_kernel_64.S ++++ b/arch/x86/kernel/relocate_kernel_64.S +@@ -13,7 +13,8 @@ + #include + + /* +- * Must be relocatable PIC code callable as a C function ++ * Must be relocatable PIC code callable as a C function, in particular ++ * there must be a plain RET and not jump to return thunk. + */ + + #define PTR(x) (x << 3) +@@ -104,7 +105,8 @@ relocate_kernel: + /* jump to identity mapped page */ + addq $(identity_mapped - relocate_kernel), %r8 + pushq %r8 +- RET ++ ret ++ int3 + + identity_mapped: + /* set return address to 0 if not preserving context */ +@@ -189,7 +191,8 @@ identity_mapped: + xorl %r14d, %r14d + xorl %r15d, %r15d + +- RET ++ ret ++ int3 + + 1: + popq %rdx +@@ -210,7 +213,8 @@ identity_mapped: + call swap_pages + movq $virtual_mapped, %rax + pushq %rax +- RET ++ ret ++ int3 + + virtual_mapped: + movq RSP(%r8), %rsp +@@ -229,7 +233,8 @@ virtual_mapped: + popq %r12 + popq %rbp + popq %rbx +- RET ++ ret ++ int3 + + /* Do the copies */ + swap_pages: +@@ -284,7 +289,8 @@ swap_pages: + lea PAGE_SIZE(%rax), %rsi + jmp 0b + 3: +- RET ++ ret ++ int3 + + .globl kexec_control_code_size + .set kexec_control_code_size, . - relocate_kernel diff --git a/patches.suse/x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch b/patches.suse/x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch new file mode 100644 index 0000000..ab5d1ba --- /dev/null +++ b/patches.suse/x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch @@ -0,0 +1,93 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:42 +0200 +Subject: x86/kvm: Fix SETcc emulation for return thunks +Git-commit: af2e140f34208a5dfb6b7a8ad2d56bda88f0524d +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Prepare the SETcc fastop stuff for when RET can be larger still. + +The tricky bit here is that the expressions should not only be +constant C expressions, but also absolute GAS expressions. This means +no ?: and 'true' is ~0. + +Also ensure em_setcc() has the same alignment as the actual FOP_SETCC() +ops, this ensures there cannot be an alignment hole between em_setcc() +and the first op. + +Additionally, add a .skip directive to the FOP_SETCC() macro to fill +any remaining space with INT3 traps; however the primary purpose of +this directive is to generate AS warnings when the remaining space +goes negative. Which is a very good indication the alignment magic +went side-ways. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/kvm/emulate.c | 31 ++++++++++++++++++++++++++----- + 1 file changed, 26 insertions(+), 5 deletions(-) + +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -321,11 +321,18 @@ static int fastop(struct x86_emulate_ctx + + #define FOP_RET "ret \n\t" + +-#define FOP_START(op) \ ++#define __FOP_RET(name) \ ++ "11: " ASM_RET \ ++ ".size " name ", .-" name "\n\t" ++ ++#define __FOP_START(op, align) \ + extern void em_##op(struct fastop *fake); \ + asm(".pushsection .text, \"ax\" \n\t" \ + ".global em_" #op " \n\t" \ +- FOP_FUNC("em_" #op) ++ ".align " __stringify(align) " \n\t" \ ++ "em_" #op ":\n\t" ++ ++#define FOP_START(op) __FOP_START(op, FASTOP_SIZE) + + #define FOP_END \ + ".popsection") +@@ -419,20 +426,34 @@ static int fastop(struct x86_emulate_ctx + ON64(FOP3E(op##q, rax, rdx, cl)) \ + FOP_END + ++/* ++ * Depending on .config the SETcc functions look like: ++ * ++ * ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT] ++ * SETcc %al [3 bytes] ++ * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETPOLINE] ++ * INT3 [1 byte; CONFIG_SLS] ++ */ ++#define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETPOLINE)) + \ ++ IS_ENABLED(CONFIG_SLS)) ++#define SETCC_LENGTH (3 + RET_LENGTH) ++#define SETCC_ALIGN (4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1)) ++ + /* Special case for SETcc - 1 instruction per cc */ + #define FOP_SETCC(op) \ +- ".align 4 \n\t" \ ++ ".align " __stringify(SETCC_ALIGN) " \n\t" \ + ".type " #op ", @function \n\t" \ + #op ": \n\t" \ + #op " %al \n\t" \ +- FOP_RET ++ __FOP_RET(#op) \ ++ ".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t" + + asm(".pushsection .fixup, \"ax\"\n" + ".global kvm_fastop_exception \n" + "kvm_fastop_exception: xor %esi, %esi; " ASM_RET + ".popsection"); + +-FOP_START(setcc) ++__FOP_START(setcc, SETCC_ALIGN) + FOP_SETCC(seto) + FOP_SETCC(setno) + FOP_SETCC(setc) diff --git a/patches.suse/x86-lib-atomic64_386_32-rename-things.patch b/patches.suse/x86-lib-atomic64_386_32-rename-things.patch new file mode 100644 index 0000000..4782bf6 --- /dev/null +++ b/patches.suse/x86-lib-atomic64_386_32-rename-things.patch @@ -0,0 +1,242 @@ +From: Peter Zijlstra +Date: Sat, 4 Dec 2021 14:43:39 +0100 +Subject: x86/lib/atomic64_386_32: Rename things +Git-commit: 22da5a07c75e1104caf6a42f189c97b83d070073 +Patch-mainline: v5.17-rc1 +References: bsc#1201050 CVE-2021-26341 + +Principally, in order to get rid of #define RET in this code to make +place for a new RET, but also to clarify the code, rename a bunch of +things: + + s/UNLOCK/IRQ_RESTORE/ + s/LOCK/IRQ_SAVE/ + s/BEGIN/BEGIN_IRQ_SAVE/ + s/\/RET_IRQ_RESTORE/ + s/RET_ENDP/\tRET_IRQ_RESTORE\rENDP/ + +which then leaves RET unused so it can be removed. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Link: https://lore.kernel.org/r/20211204134907.841623970@infradead.org +--- + arch/x86/lib/atomic64_386_32.S | 84 ++++++++++++++++++++++------------------- + 1 file changed, 46 insertions(+), 38 deletions(-) + +--- a/arch/x86/lib/atomic64_386_32.S ++++ b/arch/x86/lib/atomic64_386_32.S +@@ -9,81 +9,83 @@ + #include + + /* if you want SMP support, implement these with real spinlocks */ +-.macro LOCK reg ++.macro IRQ_SAVE reg + pushfl + cli + .endm + +-.macro UNLOCK reg ++.macro IRQ_RESTORE reg + popfl + .endm + +-#define BEGIN(op) \ ++#define BEGIN_IRQ_SAVE(op) \ + .macro endp; \ + ENDPROC(atomic64_##op##_386); \ + .purgem endp; \ + .endm; \ + ENTRY(atomic64_##op##_386); \ +- LOCK v; ++ IRQ_SAVE v; + + #define ENDP endp + +-#define RET \ +- UNLOCK v; \ ++#define RET_IRQ_RESTORE \ ++ IRQ_RESTORE v; \ + ret + +-#define RET_ENDP \ +- RET; \ +- ENDP +- + #define v %ecx +-BEGIN(read) ++BEGIN_IRQ_SAVE(read) + movl (v), %eax + movl 4(v), %edx +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v + + #define v %esi +-BEGIN(set) ++BEGIN_IRQ_SAVE(set) + movl %ebx, (v) + movl %ecx, 4(v) +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v + + #define v %esi +-BEGIN(xchg) ++BEGIN_IRQ_SAVE(xchg) + movl (v), %eax + movl 4(v), %edx + movl %ebx, (v) + movl %ecx, 4(v) +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v + + #define v %ecx +-BEGIN(add) ++BEGIN_IRQ_SAVE(add) + addl %eax, (v) + adcl %edx, 4(v) +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v + + #define v %ecx +-BEGIN(add_return) ++BEGIN_IRQ_SAVE(add_return) + addl (v), %eax + adcl 4(v), %edx + movl %eax, (v) + movl %edx, 4(v) +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v + + #define v %ecx +-BEGIN(sub) ++BEGIN_IRQ_SAVE(sub) + subl %eax, (v) + sbbl %edx, 4(v) +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v + + #define v %ecx +-BEGIN(sub_return) ++BEGIN_IRQ_SAVE(sub_return) + negl %edx + negl %eax + sbbl $0, %edx +@@ -91,47 +93,52 @@ BEGIN(sub_return) + adcl 4(v), %edx + movl %eax, (v) + movl %edx, 4(v) +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v + + #define v %esi +-BEGIN(inc) ++BEGIN_IRQ_SAVE(inc) + addl $1, (v) + adcl $0, 4(v) +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v + + #define v %esi +-BEGIN(inc_return) ++BEGIN_IRQ_SAVE(inc_return) + movl (v), %eax + movl 4(v), %edx + addl $1, %eax + adcl $0, %edx + movl %eax, (v) + movl %edx, 4(v) +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v + + #define v %esi +-BEGIN(dec) ++BEGIN_IRQ_SAVE(dec) + subl $1, (v) + sbbl $0, 4(v) +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v + + #define v %esi +-BEGIN(dec_return) ++BEGIN_IRQ_SAVE(dec_return) + movl (v), %eax + movl 4(v), %edx + subl $1, %eax + sbbl $0, %edx + movl %eax, (v) + movl %edx, 4(v) +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v + + #define v %esi +-BEGIN(add_unless) ++BEGIN_IRQ_SAVE(add_unless) + addl %eax, %ecx + adcl %edx, %edi + addl (v), %eax +@@ -143,7 +150,7 @@ BEGIN(add_unless) + movl %edx, 4(v) + movl $1, %eax + 2: +- RET ++ RET_IRQ_RESTORE + 3: + cmpl %edx, %edi + jne 1b +@@ -153,7 +160,7 @@ ENDP + #undef v + + #define v %esi +-BEGIN(inc_not_zero) ++BEGIN_IRQ_SAVE(inc_not_zero) + movl (v), %eax + movl 4(v), %edx + testl %eax, %eax +@@ -165,7 +172,7 @@ BEGIN(inc_not_zero) + movl %edx, 4(v) + movl $1, %eax + 2: +- RET ++ RET_IRQ_RESTORE + 3: + testl %edx, %edx + jne 1b +@@ -174,7 +181,7 @@ ENDP + #undef v + + #define v %esi +-BEGIN(dec_if_positive) ++BEGIN_IRQ_SAVE(dec_if_positive) + movl (v), %eax + movl 4(v), %edx + subl $1, %eax +@@ -183,5 +190,6 @@ BEGIN(dec_if_positive) + movl %eax, (v) + movl %edx, 4(v) + 1: +-RET_ENDP ++ RET_IRQ_RESTORE ++ENDP + #undef v diff --git a/patches.suse/x86-prepare-asm-files-for-straight-line-speculation.patch b/patches.suse/x86-prepare-asm-files-for-straight-line-speculation.patch new file mode 100644 index 0000000..85d8d2c --- /dev/null +++ b/patches.suse/x86-prepare-asm-files-for-straight-line-speculation.patch @@ -0,0 +1,1881 @@ +From: Peter Zijlstra +Date: Sat, 4 Dec 2021 14:43:40 +0100 +Subject: x86: Prepare asm files for straight-line-speculation +Git-commit: f94909ceb1ed4bfdb2ada72f93236305e6d6951f +Patch-mainline: v5.17-rc1 +References: bsc#1201050 CVE-2021-26341 + +Replace all ret/retq instructions with RET in preparation of making +RET a macro. Since AS is case insensitive it's a big no-op without +RET defined. + + find arch/x86/ -name \*.S | while read file + do + sed -i 's/\/RET/' $file + done + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org +--- + arch/x86/boot/compressed/efi_thunk_64.S | 2 - + arch/x86/boot/compressed/mem_encrypt.S | 4 +- + arch/x86/crypto/aes_ctrby8_avx-x86_64.S | 2 - + arch/x86/crypto/aesni-intel_asm.S | 42 +++++++++++++-------------- + arch/x86/crypto/blowfish-x86_64-asm_64.S | 12 +++---- + arch/x86/crypto/camellia-aesni-avx-asm_64.S | 14 ++++----- + arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 14 ++++----- + arch/x86/crypto/cast5-avx-x86_64-asm_64.S | 12 +++---- + arch/x86/crypto/cast6-avx-x86_64-asm_64.S | 10 +++--- + arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 2 - + arch/x86/crypto/crct10dif-pcl-asm_64.S | 2 - + arch/x86/crypto/des3_ede-asm_64.S | 4 +- + arch/x86/crypto/ghash-clmulni-intel_asm.S | 6 +-- + arch/x86/crypto/serpent-avx-x86_64-asm_64.S | 10 +++--- + arch/x86/crypto/serpent-avx2-asm_64.S | 10 +++--- + arch/x86/crypto/sha512-avx-asm.S | 2 - + arch/x86/crypto/sha512-avx2-asm.S | 2 - + arch/x86/crypto/sha512-ssse3-asm.S | 2 - + arch/x86/crypto/twofish-avx-x86_64-asm_64.S | 10 +++--- + arch/x86/crypto/twofish-i586-asm_32.S | 4 +- + arch/x86/crypto/twofish-x86_64-asm_64-3way.S | 6 +-- + arch/x86/crypto/twofish-x86_64-asm_64.S | 4 +- + arch/x86/entry/entry_32.S | 2 - + arch/x86/entry/entry_64.S | 8 ++--- + arch/x86/entry/thunk_64.S | 2 - + arch/x86/entry/vdso/vdso32/system_call.S | 2 - + arch/x86/entry/vsyscall/vsyscall_emu_64.S | 6 +-- + arch/x86/kernel/acpi/wakeup_32.S | 6 +-- + arch/x86/kernel/relocate_kernel_32.S | 10 +++--- + arch/x86/kernel/relocate_kernel_64.S | 10 +++--- + arch/x86/kernel/verify_cpu.S | 4 +- + arch/x86/lib/atomic64_386_32.S | 2 - + arch/x86/lib/atomic64_cx8_32.S | 16 +++++----- + arch/x86/lib/cmpxchg8b_emu.S | 4 +- + arch/x86/lib/copy_page_64.S | 4 +- + arch/x86/lib/copy_user_64.S | 8 ++--- + arch/x86/lib/getuser.S | 14 ++++----- + arch/x86/lib/hweight.S | 6 +-- + arch/x86/lib/memcpy_64.S | 12 +++---- + arch/x86/lib/memmove_64.S | 4 +- + arch/x86/lib/memset_64.S | 6 +-- + arch/x86/lib/msr-reg.S | 4 +- + arch/x86/math-emu/div_Xsig.S | 2 - + arch/x86/math-emu/mul_Xsig.S | 6 +-- + arch/x86/math-emu/polynom_Xsig.S | 2 - + arch/x86/math-emu/reg_norm.S | 6 +-- + arch/x86/math-emu/reg_round.S | 2 - + arch/x86/math-emu/reg_u_div.S | 2 - + arch/x86/math-emu/reg_u_mul.S | 2 - + arch/x86/math-emu/reg_u_sub.S | 2 - + arch/x86/math-emu/round_Xsig.S | 4 +- + arch/x86/math-emu/shr_Xsig.S | 8 ++--- + arch/x86/math-emu/wm_shrx.S | 16 +++++----- + arch/x86/mm/mem_encrypt_boot.S | 4 +- + arch/x86/platform/efi/efi_stub_32.S | 2 - + arch/x86/platform/efi/efi_stub_64.S | 2 - + arch/x86/platform/efi/efi_thunk_64.S | 2 - + arch/x86/platform/olpc/xo1-wakeup.S | 6 +-- + arch/x86/power/hibernate_asm_64.S | 4 +- + arch/x86/um/checksum_32.S | 4 +- + arch/x86/um/setjmp_32.S | 2 - + arch/x86/um/setjmp_64.S | 2 - + 62 files changed, 193 insertions(+), 193 deletions(-) + +--- a/arch/x86/boot/compressed/efi_thunk_64.S ++++ b/arch/x86/boot/compressed/efi_thunk_64.S +@@ -95,7 +95,7 @@ ENTRY(efi64_thunk) + addq $8, %rsp + pop %rbx + pop %rbp +- ret ++ RET + ENDPROC(efi64_thunk) + + ENTRY(efi_exit32) +--- a/arch/x86/boot/compressed/mem_encrypt.S ++++ b/arch/x86/boot/compressed/mem_encrypt.S +@@ -67,7 +67,7 @@ ENTRY(get_sev_encryption_bit) + + #endif /* CONFIG_AMD_MEM_ENCRYPT */ + +- ret ++ RET + ENDPROC(get_sev_encryption_bit) + + .code64 +@@ -95,7 +95,7 @@ ENTRY(get_sev_encryption_mask) + pop %rbp + #endif + +- ret ++ RET + ENDPROC(get_sev_encryption_mask) + + .data +--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S ++++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +@@ -534,7 +534,7 @@ ddq_add_8: + /* return updated IV */ + vpshufb xbyteswap, xcounter, xcounter + vmovdqu xcounter, (p_iv) +- ret ++ RET + .endm + + /* +--- a/arch/x86/crypto/aesni-intel_asm.S ++++ b/arch/x86/crypto/aesni-intel_asm.S +@@ -1475,7 +1475,7 @@ _return_T_done_decrypt: + pop %r14 + pop %r13 + pop %r12 +- ret ++ RET + ENDPROC(aesni_gcm_dec) + + +@@ -1739,7 +1739,7 @@ _return_T_done_encrypt: + pop %r14 + pop %r13 + pop %r12 +- ret ++ RET + ENDPROC(aesni_gcm_enc) + + #endif +@@ -1756,7 +1756,7 @@ _key_expansion_256a: + pxor %xmm1, %xmm0 + movaps %xmm0, (TKEYP) + add $0x10, TKEYP +- ret ++ RET + ENDPROC(_key_expansion_128) + ENDPROC(_key_expansion_256a) + +@@ -1782,7 +1782,7 @@ _key_expansion_192a: + shufps $0b01001110, %xmm2, %xmm1 + movaps %xmm1, 0x10(TKEYP) + add $0x20, TKEYP +- ret ++ RET + ENDPROC(_key_expansion_192a) + + .align 4 +@@ -1802,7 +1802,7 @@ _key_expansion_192b: + + movaps %xmm0, (TKEYP) + add $0x10, TKEYP +- ret ++ RET + ENDPROC(_key_expansion_192b) + + .align 4 +@@ -1815,7 +1815,7 @@ _key_expansion_256b: + pxor %xmm1, %xmm2 + movaps %xmm2, (TKEYP) + add $0x10, TKEYP +- ret ++ RET + ENDPROC(_key_expansion_256b) + + /* +@@ -1930,7 +1930,7 @@ ENTRY(aesni_set_key) + popl KEYP + #endif + FRAME_END +- ret ++ RET + ENDPROC(aesni_set_key) + + /* +@@ -1954,7 +1954,7 @@ ENTRY(aesni_enc) + popl KEYP + #endif + FRAME_END +- ret ++ RET + ENDPROC(aesni_enc) + + /* +@@ -2012,7 +2012,7 @@ _aesni_enc1: + AESENC KEY STATE + movaps 0x70(TKEYP), KEY + AESENCLAST KEY STATE +- ret ++ RET + ENDPROC(_aesni_enc1) + + /* +@@ -2121,7 +2121,7 @@ _aesni_enc4: + AESENCLAST KEY STATE2 + AESENCLAST KEY STATE3 + AESENCLAST KEY STATE4 +- ret ++ RET + ENDPROC(_aesni_enc4) + + /* +@@ -2146,7 +2146,7 @@ ENTRY(aesni_dec) + popl KEYP + #endif + FRAME_END +- ret ++ RET + ENDPROC(aesni_dec) + + /* +@@ -2204,7 +2204,7 @@ _aesni_dec1: + AESDEC KEY STATE + movaps 0x70(TKEYP), KEY + AESDECLAST KEY STATE +- ret ++ RET + ENDPROC(_aesni_dec1) + + /* +@@ -2313,7 +2313,7 @@ _aesni_dec4: + AESDECLAST KEY STATE2 + AESDECLAST KEY STATE3 + AESDECLAST KEY STATE4 +- ret ++ RET + ENDPROC(_aesni_dec4) + + /* +@@ -2373,7 +2373,7 @@ ENTRY(aesni_ecb_enc) + popl LEN + #endif + FRAME_END +- ret ++ RET + ENDPROC(aesni_ecb_enc) + + /* +@@ -2434,7 +2434,7 @@ ENTRY(aesni_ecb_dec) + popl LEN + #endif + FRAME_END +- ret ++ RET + ENDPROC(aesni_ecb_dec) + + /* +@@ -2478,7 +2478,7 @@ ENTRY(aesni_cbc_enc) + popl IVP + #endif + FRAME_END +- ret ++ RET + ENDPROC(aesni_cbc_enc) + + /* +@@ -2571,7 +2571,7 @@ ENTRY(aesni_cbc_dec) + popl IVP + #endif + FRAME_END +- ret ++ RET + ENDPROC(aesni_cbc_dec) + + #ifdef __x86_64__ +@@ -2600,7 +2600,7 @@ _aesni_inc_init: + mov $1, TCTR_LOW + MOVQ_R64_XMM TCTR_LOW INC + MOVQ_R64_XMM CTR TCTR_LOW +- ret ++ RET + ENDPROC(_aesni_inc_init) + + /* +@@ -2629,7 +2629,7 @@ _aesni_inc: + .Linc_low: + movaps CTR, IV + PSHUFB_XMM BSWAP_MASK IV +- ret ++ RET + ENDPROC(_aesni_inc) + + /* +@@ -2692,7 +2692,7 @@ ENTRY(aesni_ctr_enc) + movups IV, (IVP) + .Lctr_enc_just_ret: + FRAME_END +- ret ++ RET + ENDPROC(aesni_ctr_enc) + + /* +@@ -2845,7 +2845,7 @@ ENTRY(aesni_xts_decrypt) + movups IV, (IVP) + + FRAME_END +- ret ++ RET + ENDPROC(aesni_xts_decrypt) + + #endif +--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S ++++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S +@@ -150,10 +150,10 @@ ENTRY(__blowfish_enc_blk) + jnz .L__enc_xor; + + write_block(); +- ret; ++ RET; + .L__enc_xor: + xor_block(); +- ret; ++ RET; + ENDPROC(__blowfish_enc_blk) + + ENTRY(blowfish_dec_blk) +@@ -185,7 +185,7 @@ ENTRY(blowfish_dec_blk) + + movq %r11, %r12; + +- ret; ++ RET; + ENDPROC(blowfish_dec_blk) + + /********************************************************************** +@@ -337,14 +337,14 @@ ENTRY(__blowfish_enc_blk_4way) + + popq %rbx; + popq %r12; +- ret; ++ RET; + + .L__enc_xor4: + xor_block4(); + + popq %rbx; + popq %r12; +- ret; ++ RET; + ENDPROC(__blowfish_enc_blk_4way) + + ENTRY(blowfish_dec_blk_4way) +@@ -379,5 +379,5 @@ ENTRY(blowfish_dec_blk_4way) + popq %rbx; + popq %r12; + +- ret; ++ RET; + ENDPROC(blowfish_dec_blk_4way) +--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S ++++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +@@ -232,7 +232,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_ + roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, + %rcx, (%r9)); +- ret; ++ RET; + ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) + + .align 8 +@@ -240,7 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_ + roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, + %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, + %rax, (%r9)); +- ret; ++ RET; + ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) + + /* +@@ -830,7 +830,7 @@ __camellia_enc_blk32: + %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax)); + + FRAME_END +- ret; ++ RET; + + .align 8 + .Lenc_max32: +@@ -917,7 +917,7 @@ __camellia_dec_blk32: + %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); + + FRAME_END +- ret; ++ RET; + + .align 8 + .Ldec_max32: +@@ -962,7 +962,7 @@ ENTRY(camellia_ecb_enc_32way) + vzeroupper; + + FRAME_END +- ret; ++ RET; + ENDPROC(camellia_ecb_enc_32way) + + ENTRY(camellia_ecb_dec_32way) +@@ -996,7 +996,7 @@ ENTRY(camellia_ecb_dec_32way) + vzeroupper; + + FRAME_END +- ret; ++ RET; + ENDPROC(camellia_ecb_dec_32way) + + ENTRY(camellia_cbc_dec_32way) +@@ -1064,7 +1064,7 @@ ENTRY(camellia_cbc_dec_32way) + vzeroupper; + + FRAME_END +- ret; ++ RET; + ENDPROC(camellia_cbc_dec_32way) + + #define inc_le128(x, minus_one, tmp) \ +--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S ++++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S +@@ -193,7 +193,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_ + roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, + %rcx, (%r9)); +- ret; ++ RET; + ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) + + .align 8 +@@ -201,7 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_ + roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, + %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11, + %rax, (%r9)); +- ret; ++ RET; + ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) + + /* +@@ -787,7 +787,7 @@ __camellia_enc_blk16: + %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax)); + + FRAME_END +- ret; ++ RET; + + .align 8 + .Lenc_max32: +@@ -874,7 +874,7 @@ __camellia_dec_blk16: + %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); + + FRAME_END +- ret; ++ RET; + + .align 8 + .Ldec_max32: +@@ -915,7 +915,7 @@ ENTRY(camellia_ecb_enc_16way) + %xmm8, %rsi); + + FRAME_END +- ret; ++ RET; + ENDPROC(camellia_ecb_enc_16way) + + ENTRY(camellia_ecb_dec_16way) +@@ -945,7 +945,7 @@ ENTRY(camellia_ecb_dec_16way) + %xmm8, %rsi); + + FRAME_END +- ret; ++ RET; + ENDPROC(camellia_ecb_dec_16way) + + ENTRY(camellia_cbc_dec_16way) +@@ -996,7 +996,7 @@ ENTRY(camellia_cbc_dec_16way) + %xmm8, %rsi); + + FRAME_END +- ret; ++ RET; + ENDPROC(camellia_cbc_dec_16way) + + #define inc_le128(x, minus_one, tmp) \ +--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +@@ -294,7 +294,7 @@ __cast5_enc_blk16: + outunpack_blocks(RR3, RL3, RTMP, RX, RKM); + outunpack_blocks(RR4, RL4, RTMP, RX, RKM); + +- ret; ++ RET; + ENDPROC(__cast5_enc_blk16) + + .align 16 +@@ -367,7 +367,7 @@ __cast5_dec_blk16: + outunpack_blocks(RR3, RL3, RTMP, RX, RKM); + outunpack_blocks(RR4, RL4, RTMP, RX, RKM); + +- ret; ++ RET; + + .L__skip_dec: + vpsrldq $4, RKR, RKR; +@@ -408,7 +408,7 @@ ENTRY(cast5_ecb_enc_16way) + + popq %r15; + FRAME_END +- ret; ++ RET; + ENDPROC(cast5_ecb_enc_16way) + + ENTRY(cast5_ecb_dec_16way) +@@ -446,7 +446,7 @@ ENTRY(cast5_ecb_dec_16way) + + popq %r15; + FRAME_END +- ret; ++ RET; + ENDPROC(cast5_ecb_dec_16way) + + ENTRY(cast5_cbc_dec_16way) +@@ -498,7 +498,7 @@ ENTRY(cast5_cbc_dec_16way) + popq %r15; + popq %r12; + FRAME_END +- ret; ++ RET; + ENDPROC(cast5_cbc_dec_16way) + + ENTRY(cast5_ctr_16way) +@@ -574,5 +574,5 @@ ENTRY(cast5_ctr_16way) + popq %r15; + popq %r12; + FRAME_END +- ret; ++ RET; + ENDPROC(cast5_ctr_16way) +--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +@@ -306,7 +306,7 @@ __cast6_enc_blk8: + outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); + outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); + +- ret; ++ RET; + ENDPROC(__cast6_enc_blk8) + + .align 8 +@@ -353,7 +353,7 @@ __cast6_dec_blk8: + outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); + outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); + +- ret; ++ RET; + ENDPROC(__cast6_dec_blk8) + + ENTRY(cast6_ecb_enc_8way) +@@ -376,7 +376,7 @@ ENTRY(cast6_ecb_enc_8way) + + popq %r15; + FRAME_END +- ret; ++ RET; + ENDPROC(cast6_ecb_enc_8way) + + ENTRY(cast6_ecb_dec_8way) +@@ -399,7 +399,7 @@ ENTRY(cast6_ecb_dec_8way) + + popq %r15; + FRAME_END +- ret; ++ RET; + ENDPROC(cast6_ecb_dec_8way) + + ENTRY(cast6_cbc_dec_8way) +@@ -425,7 +425,7 @@ ENTRY(cast6_cbc_dec_8way) + popq %r15; + popq %r12; + FRAME_END +- ret; ++ RET; + ENDPROC(cast6_cbc_dec_8way) + + ENTRY(cast6_ctr_8way) +--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S ++++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +@@ -310,7 +310,7 @@ do_return: + popq %rsi + popq %rdi + popq %rbx +- ret ++ RET + ENDPROC(crc_pcl) + + .section .rodata, "a", @progbits +--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S ++++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S +@@ -367,7 +367,7 @@ _cleanup: + # scale the result back to 16 bits + shr $16, %eax + mov %rcx, %rsp +- ret ++ RET + + ######################################################################## + +--- a/arch/x86/crypto/des3_ede-asm_64.S ++++ b/arch/x86/crypto/des3_ede-asm_64.S +@@ -252,7 +252,7 @@ ENTRY(des3_ede_x86_64_crypt_blk) + popq %r12; + popq %rbx; + +- ret; ++ RET; + ENDPROC(des3_ede_x86_64_crypt_blk) + + /*********************************************************************** +@@ -537,7 +537,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) + popq %r12; + popq %rbx; + +- ret; ++ RET; + ENDPROC(des3_ede_x86_64_crypt_blk_3way) + + .section .rodata, "a", @progbits +--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S ++++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S +@@ -89,7 +89,7 @@ __clmul_gf128mul_ble: + psrlq $1, T2 + pxor T2, T1 + pxor T1, DATA +- ret ++ RET + ENDPROC(__clmul_gf128mul_ble) + + /* void clmul_ghash_mul(char *dst, const u128 *shash) */ +@@ -103,7 +103,7 @@ ENTRY(clmul_ghash_mul) + PSHUFB_XMM BSWAP DATA + movups DATA, (%rdi) + FRAME_END +- ret ++ RET + ENDPROC(clmul_ghash_mul) + + /* +@@ -132,5 +132,5 @@ ENTRY(clmul_ghash_update) + movups DATA, (%rdi) + .Lupdate_just_ret: + FRAME_END +- ret ++ RET + ENDPROC(clmul_ghash_update) +--- a/arch/x86/crypto/serpent-avx2-asm_64.S ++++ b/arch/x86/crypto/serpent-avx2-asm_64.S +@@ -616,7 +616,7 @@ __serpent_enc_blk16: + write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); + write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); + +- ret; ++ RET; + ENDPROC(__serpent_enc_blk16) + + .align 8 +@@ -670,7 +670,7 @@ __serpent_dec_blk16: + write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); + write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); + +- ret; ++ RET; + ENDPROC(__serpent_dec_blk16) + + ENTRY(serpent_ecb_enc_16way) +@@ -692,7 +692,7 @@ ENTRY(serpent_ecb_enc_16way) + vzeroupper; + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_ecb_enc_16way) + + ENTRY(serpent_ecb_dec_16way) +@@ -714,7 +714,7 @@ ENTRY(serpent_ecb_dec_16way) + vzeroupper; + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_ecb_dec_16way) + + ENTRY(serpent_cbc_dec_16way) +@@ -737,7 +737,7 @@ ENTRY(serpent_cbc_dec_16way) + vzeroupper; + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_cbc_dec_16way) + + ENTRY(serpent_ctr_16way) +--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +@@ -620,7 +620,7 @@ __serpent_enc_blk8_avx: + write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); + write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); + +- ret; ++ RET; + ENDPROC(__serpent_enc_blk8_avx) + + .align 8 +@@ -674,7 +674,7 @@ __serpent_dec_blk8_avx: + write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); + write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); + +- ret; ++ RET; + ENDPROC(__serpent_dec_blk8_avx) + + ENTRY(serpent_ecb_enc_8way_avx) +@@ -692,7 +692,7 @@ ENTRY(serpent_ecb_enc_8way_avx) + store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_ecb_enc_8way_avx) + + ENTRY(serpent_ecb_dec_8way_avx) +@@ -710,7 +710,7 @@ ENTRY(serpent_ecb_dec_8way_avx) + store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_ecb_dec_8way_avx) + + ENTRY(serpent_cbc_dec_8way_avx) +@@ -728,7 +728,7 @@ ENTRY(serpent_cbc_dec_8way_avx) + store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_cbc_dec_8way_avx) + + ENTRY(serpent_ctr_8way_avx) +--- a/arch/x86/crypto/sha512-avx2-asm.S ++++ b/arch/x86/crypto/sha512-avx2-asm.S +@@ -681,7 +681,7 @@ done_hash: + + # Restore Stack Pointer + mov frame_RSPSAVE(%rsp), %rsp +- ret ++ RET + ENDPROC(sha512_transform_rorx) + + ######################################################################## +--- a/arch/x86/crypto/sha512-avx-asm.S ++++ b/arch/x86/crypto/sha512-avx-asm.S +@@ -364,7 +364,7 @@ updateblock: + mov frame_RSPSAVE(%rsp), %rsp + + nowork: +- ret ++ RET + ENDPROC(sha512_transform_avx) + + ######################################################################## +--- a/arch/x86/crypto/sha512-ssse3-asm.S ++++ b/arch/x86/crypto/sha512-ssse3-asm.S +@@ -363,7 +363,7 @@ updateblock: + mov frame_RSPSAVE(%rsp), %rsp + + nowork: +- ret ++ RET + ENDPROC(sha512_transform_ssse3) + + ######################################################################## +--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S ++++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +@@ -287,7 +287,7 @@ __twofish_enc_blk8: + outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); + outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); + +- ret; ++ RET; + ENDPROC(__twofish_enc_blk8) + + .align 8 +@@ -327,7 +327,7 @@ __twofish_dec_blk8: + outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); + outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); + +- ret; ++ RET; + ENDPROC(__twofish_dec_blk8) + + ENTRY(twofish_ecb_enc_8way) +@@ -347,7 +347,7 @@ ENTRY(twofish_ecb_enc_8way) + store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); + + FRAME_END +- ret; ++ RET; + ENDPROC(twofish_ecb_enc_8way) + + ENTRY(twofish_ecb_dec_8way) +@@ -367,7 +367,7 @@ ENTRY(twofish_ecb_dec_8way) + store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + + FRAME_END +- ret; ++ RET; + ENDPROC(twofish_ecb_dec_8way) + + ENTRY(twofish_cbc_dec_8way) +@@ -392,7 +392,7 @@ ENTRY(twofish_cbc_dec_8way) + popq %r12; + + FRAME_END +- ret; ++ RET; + ENDPROC(twofish_cbc_dec_8way) + + ENTRY(twofish_ctr_8way) +--- a/arch/x86/crypto/twofish-i586-asm_32.S ++++ b/arch/x86/crypto/twofish-i586-asm_32.S +@@ -273,7 +273,7 @@ ENTRY(twofish_enc_blk) + pop %ebx + pop %ebp + mov $1, %eax +- ret ++ RET + ENDPROC(twofish_enc_blk) + + ENTRY(twofish_dec_blk) +@@ -330,5 +330,5 @@ ENTRY(twofish_dec_blk) + pop %ebx + pop %ebp + mov $1, %eax +- ret ++ RET + ENDPROC(twofish_dec_blk) +--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S ++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +@@ -273,7 +273,7 @@ ENTRY(__twofish_enc_blk_3way) + popq %rbx; + popq %r12; + popq %r13; +- ret; ++ RET; + + .L__enc_xor3: + outunpack_enc3(xor); +@@ -281,7 +281,7 @@ ENTRY(__twofish_enc_blk_3way) + popq %rbx; + popq %r12; + popq %r13; +- ret; ++ RET; + ENDPROC(__twofish_enc_blk_3way) + + ENTRY(twofish_dec_blk_3way) +@@ -316,5 +316,5 @@ ENTRY(twofish_dec_blk_3way) + popq %rbx; + popq %r12; + popq %r13; +- ret; ++ RET; + ENDPROC(twofish_dec_blk_3way) +--- a/arch/x86/crypto/twofish-x86_64-asm_64.S ++++ b/arch/x86/crypto/twofish-x86_64-asm_64.S +@@ -265,7 +265,7 @@ ENTRY(twofish_enc_blk) + + popq R1 + movl $1,%eax +- ret ++ RET + ENDPROC(twofish_enc_blk) + + ENTRY(twofish_dec_blk) +@@ -317,5 +317,5 @@ ENTRY(twofish_dec_blk) + + popq R1 + movl $1,%eax +- ret ++ RET + ENDPROC(twofish_dec_blk) +--- a/arch/x86/entry/entry_32.S ++++ b/arch/x86/entry/entry_32.S +@@ -280,7 +280,7 @@ ENTRY(schedule_tail_wrapper) + popl %eax + + FRAME_END +- ret ++ RET + ENDPROC(schedule_tail_wrapper) + /* + * A newly forked process directly context switches into this address. +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -868,7 +868,7 @@ ENTRY(switch_to_thread_stack) + UNWIND_HINT_FUNC + + movq (%rdi), %rdi +- ret ++ RET + END(switch_to_thread_stack) + + .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0 +@@ -1002,7 +1002,7 @@ ENTRY(native_load_gs_index) + SWAPGS + popfq + FRAME_END +- ret ++ RET + ENDPROC(native_load_gs_index) + EXPORT_SYMBOL(native_load_gs_index) + +@@ -1245,13 +1245,13 @@ ENTRY(error_entry) + */ + TRACE_IRQS_OFF + CALL_enter_from_user_mode +- ret ++ RET + + .Lerror_entry_done_lfence: + FENCE_SWAPGS_KERNEL_ENTRY + .Lerror_entry_done: + TRACE_IRQS_OFF +- ret ++ RET + + /* + * There are two places in the kernel that can potentially fault with +--- a/arch/x86/entry/thunk_64.S ++++ b/arch/x86/entry/thunk_64.S +@@ -68,6 +68,6 @@ + popq %rsi + popq %rdi + popq %rbp +- ret ++ RET + _ASM_NOKPROBE(.L_restore) + #endif +--- a/arch/x86/entry/vdso/vdso32/system_call.S ++++ b/arch/x86/entry/vdso/vdso32/system_call.S +@@ -77,7 +77,7 @@ GLOBAL(int80_landing_pad) + popl %ecx + CFI_RESTORE ecx + CFI_ADJUST_CFA_OFFSET -4 +- ret ++ RET + CFI_ENDPROC + + .size __kernel_vsyscall,.-__kernel_vsyscall +--- a/arch/x86/entry/vsyscall/vsyscall_emu_64.S ++++ b/arch/x86/entry/vsyscall/vsyscall_emu_64.S +@@ -20,17 +20,17 @@ __vsyscall_page: + + mov $__NR_gettimeofday, %rax + syscall +- ret ++ RET + + .balign 1024, 0xcc + mov $__NR_time, %rax + syscall +- ret ++ RET + + .balign 1024, 0xcc + mov $__NR_getcpu, %rax + syscall +- ret ++ RET + + .balign 4096, 0xcc + +--- a/arch/x86/kernel/acpi/wakeup_32.S ++++ b/arch/x86/kernel/acpi/wakeup_32.S +@@ -59,7 +59,7 @@ save_registers: + popl saved_context_eflags + + movl $ret_point, saved_eip +- ret ++ RET + + + restore_registers: +@@ -69,7 +69,7 @@ restore_registers: + movl saved_context_edi, %edi + pushl saved_context_eflags + popfl +- ret ++ RET + + ENTRY(do_suspend_lowlevel) + call save_processor_state +@@ -85,7 +85,7 @@ ENTRY(do_suspend_lowlevel) + ret_point: + call restore_registers + call restore_processor_state +- ret ++ RET + + .data + ALIGN +--- a/arch/x86/kernel/relocate_kernel_32.S ++++ b/arch/x86/kernel/relocate_kernel_32.S +@@ -94,7 +94,7 @@ relocate_kernel: + movl %edi, %eax + addl $(identity_mapped - relocate_kernel), %eax + pushl %eax +- ret ++ RET + + identity_mapped: + /* set return address to 0 if not preserving context */ +@@ -161,7 +161,7 @@ identity_mapped: + xorl %edx, %edx + xorl %esi, %esi + xorl %ebp, %ebp +- ret ++ RET + 1: + popl %edx + movl CP_PA_SWAP_PAGE(%edi), %esp +@@ -192,7 +192,7 @@ identity_mapped: + movl %edi, %eax + addl $(virtual_mapped - relocate_kernel), %eax + pushl %eax +- ret ++ RET + + virtual_mapped: + movl CR4(%edi), %eax +@@ -209,7 +209,7 @@ virtual_mapped: + popl %edi + popl %esi + popl %ebx +- ret ++ RET + + /* Do the copies */ + swap_pages: +@@ -271,7 +271,7 @@ swap_pages: + popl %edi + popl %ebx + popl %ebp +- ret ++ RET + + .globl kexec_control_code_size + .set kexec_control_code_size, . - relocate_kernel +--- a/arch/x86/kernel/relocate_kernel_64.S ++++ b/arch/x86/kernel/relocate_kernel_64.S +@@ -104,7 +104,7 @@ relocate_kernel: + /* jump to identity mapped page */ + addq $(identity_mapped - relocate_kernel), %r8 + pushq %r8 +- ret ++ RET + + identity_mapped: + /* set return address to 0 if not preserving context */ +@@ -189,7 +189,7 @@ identity_mapped: + xorl %r14d, %r14d + xorl %r15d, %r15d + +- ret ++ RET + + 1: + popq %rdx +@@ -210,7 +210,7 @@ identity_mapped: + call swap_pages + movq $virtual_mapped, %rax + pushq %rax +- ret ++ RET + + virtual_mapped: + movq RSP(%r8), %rsp +@@ -229,7 +229,7 @@ virtual_mapped: + popq %r12 + popq %rbp + popq %rbx +- ret ++ RET + + /* Do the copies */ + swap_pages: +@@ -284,7 +284,7 @@ swap_pages: + lea PAGE_SIZE(%rax), %rsi + jmp 0b + 3: +- ret ++ RET + + .globl kexec_control_code_size + .set kexec_control_code_size, . - relocate_kernel +--- a/arch/x86/kernel/verify_cpu.S ++++ b/arch/x86/kernel/verify_cpu.S +@@ -134,9 +134,9 @@ ENTRY(verify_cpu) + .Lverify_cpu_no_longmode: + popf # Restore caller passed flags + movl $1,%eax +- ret ++ RET + .Lverify_cpu_sse_ok: + popf # Restore caller passed flags + xorl %eax, %eax +- ret ++ RET + ENDPROC(verify_cpu) +--- a/arch/x86/lib/atomic64_386_32.S ++++ b/arch/x86/lib/atomic64_386_32.S +@@ -34,7 +34,7 @@ ENTRY(atomic64_##op##_386); \ + + #define RET_IRQ_RESTORE \ + IRQ_RESTORE v; \ +- ret ++ RET + + #define v %ecx + BEGIN_IRQ_SAVE(read) +--- a/arch/x86/lib/atomic64_cx8_32.S ++++ b/arch/x86/lib/atomic64_cx8_32.S +@@ -22,7 +22,7 @@ + + ENTRY(atomic64_read_cx8) + read64 %ecx +- ret ++ RET + ENDPROC(atomic64_read_cx8) + + ENTRY(atomic64_set_cx8) +@@ -32,7 +32,7 @@ ENTRY(atomic64_set_cx8) + cmpxchg8b (%esi) + jne 1b + +- ret ++ RET + ENDPROC(atomic64_set_cx8) + + ENTRY(atomic64_xchg_cx8) +@@ -41,7 +41,7 @@ ENTRY(atomic64_xchg_cx8) + cmpxchg8b (%esi) + jne 1b + +- ret ++ RET + ENDPROC(atomic64_xchg_cx8) + + .macro addsub_return func ins insc +@@ -72,7 +72,7 @@ ENTRY(atomic64_\func\()_return_cx8) + popl %esi + popl %ebx + popl %ebp +- ret ++ RET + ENDPROC(atomic64_\func\()_return_cx8) + .endm + +@@ -97,7 +97,7 @@ ENTRY(atomic64_\func\()_return_cx8) + movl %ebx, %eax + movl %ecx, %edx + popl %ebx +- ret ++ RET + ENDPROC(atomic64_\func\()_return_cx8) + .endm + +@@ -122,7 +122,7 @@ ENTRY(atomic64_dec_if_positive_cx8) + movl %ebx, %eax + movl %ecx, %edx + popl %ebx +- ret ++ RET + ENDPROC(atomic64_dec_if_positive_cx8) + + ENTRY(atomic64_add_unless_cx8) +@@ -153,7 +153,7 @@ ENTRY(atomic64_add_unless_cx8) + addl $8, %esp + popl %ebx + popl %ebp +- ret ++ RET + 4: + cmpl %edx, 4(%esp) + jne 2b +@@ -180,5 +180,5 @@ ENTRY(atomic64_inc_not_zero_cx8) + movl $1, %eax + 3: + popl %ebx +- ret ++ RET + ENDPROC(atomic64_inc_not_zero_cx8) +--- a/arch/x86/lib/cmpxchg8b_emu.S ++++ b/arch/x86/lib/cmpxchg8b_emu.S +@@ -38,7 +38,7 @@ ENTRY(cmpxchg8b_emu) + movl %ecx, 4(%esi) + + popfl +- ret ++ RET + + .Lnot_same: + movl (%esi), %eax +@@ -46,7 +46,7 @@ ENTRY(cmpxchg8b_emu) + movl 4(%esi), %edx + + popfl +- ret ++ RET + + ENDPROC(cmpxchg8b_emu) + EXPORT_SYMBOL(cmpxchg8b_emu) +--- a/arch/x86/lib/copy_page_64.S ++++ b/arch/x86/lib/copy_page_64.S +@@ -16,7 +16,7 @@ ENTRY(copy_page) + ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD + movl $4096/8, %ecx + rep movsq +- ret ++ RET + ENDPROC(copy_page) + EXPORT_SYMBOL(copy_page) + +@@ -84,5 +84,5 @@ ENTRY(copy_page_regs) + movq (%rsp), %rbx + movq 1*8(%rsp), %r12 + addq $2*8, %rsp +- ret ++ RET + ENDPROC(copy_page_regs) +--- a/arch/x86/lib/copy_user_64.S ++++ b/arch/x86/lib/copy_user_64.S +@@ -80,7 +80,7 @@ ENTRY(copy_user_generic_unrolled) + jnz 21b + 23: xor %eax,%eax + ASM_CLAC +- ret ++ RET + + .section .fixup,"ax" + 30: shll $6,%ecx +@@ -148,7 +148,7 @@ ENTRY(copy_user_generic_string) + movsb + xorl %eax,%eax + ASM_CLAC +- ret ++ RET + + .section .fixup,"ax" + 11: leal (%rdx,%rcx,8),%ecx +@@ -182,7 +182,7 @@ ENTRY(copy_user_enhanced_fast_string) + movsb + xorl %eax,%eax + ASM_CLAC +- ret ++ RET + + .section .fixup,"ax" + 12: movl %ecx,%edx /* ecx is zerorest also */ +@@ -299,7 +299,7 @@ ENTRY(__copy_user_nocache) + xorl %eax,%eax + ASM_CLAC + sfence +- ret ++ RET + + .section .fixup,"ax" + .L_fixup_4x8b_copy: +--- a/arch/x86/lib/getuser.S ++++ b/arch/x86/lib/getuser.S +@@ -45,7 +45,7 @@ ENTRY(__get_user_1) + 1: movzbl (%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC +- ret ++ RET + ENDPROC(__get_user_1) + EXPORT_SYMBOL(__get_user_1) + +@@ -61,7 +61,7 @@ ENTRY(__get_user_2) + 2: movzwl -1(%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC +- ret ++ RET + ENDPROC(__get_user_2) + EXPORT_SYMBOL(__get_user_2) + +@@ -77,7 +77,7 @@ ENTRY(__get_user_4) + 3: movl -3(%_ASM_AX),%edx + xor %eax,%eax + ASM_CLAC +- ret ++ RET + ENDPROC(__get_user_4) + EXPORT_SYMBOL(__get_user_4) + +@@ -94,7 +94,7 @@ ENTRY(__get_user_8) + 4: movq -7(%_ASM_AX),%rdx + xor %eax,%eax + ASM_CLAC +- ret ++ RET + #else + add $7,%_ASM_AX + jc bad_get_user_8 +@@ -108,7 +108,7 @@ ENTRY(__get_user_8) + 5: movl -3(%_ASM_AX),%ecx + xor %eax,%eax + ASM_CLAC +- ret ++ RET + #endif + ENDPROC(__get_user_8) + EXPORT_SYMBOL(__get_user_8) +@@ -118,7 +118,7 @@ bad_get_user: + xor %edx,%edx + mov $(-EFAULT),%_ASM_AX + ASM_CLAC +- ret ++ RET + END(bad_get_user) + + #ifdef CONFIG_X86_32 +@@ -127,7 +127,7 @@ bad_get_user_8: + xor %ecx,%ecx + mov $(-EFAULT),%_ASM_AX + ASM_CLAC +- ret ++ RET + END(bad_get_user_8) + #endif + +--- a/arch/x86/lib/hweight.S ++++ b/arch/x86/lib/hweight.S +@@ -31,7 +31,7 @@ ENTRY(__sw_hweight32) + imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101 + shrl $24, %eax # w = w_tmp >> 24 + __ASM_SIZE(pop,) %__ASM_REG(dx) +- ret ++ RET + ENDPROC(__sw_hweight32) + EXPORT_SYMBOL(__sw_hweight32) + +@@ -64,7 +64,7 @@ ENTRY(__sw_hweight64) + + popq %rdx + popq %rdi +- ret ++ RET + #else /* CONFIG_X86_32 */ + /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */ + pushl %ecx +@@ -76,7 +76,7 @@ ENTRY(__sw_hweight64) + addl %ecx, %eax # result + + popl %ecx +- ret ++ RET + #endif + ENDPROC(__sw_hweight64) + EXPORT_SYMBOL(__sw_hweight64) +--- a/arch/x86/lib/memcpy_64.S ++++ b/arch/x86/lib/memcpy_64.S +@@ -39,7 +39,7 @@ ENTRY(memcpy) + rep movsq + movl %edx, %ecx + rep movsb +- ret ++ RET + ENDPROC(memcpy) + ENDPROC(__memcpy) + EXPORT_SYMBOL(memcpy) +@@ -53,7 +53,7 @@ ENTRY(memcpy_erms) + movq %rdi, %rax + movq %rdx, %rcx + rep movsb +- ret ++ RET + ENDPROC(memcpy_erms) + + ENTRY(memcpy_orig) +@@ -137,7 +137,7 @@ ENTRY(memcpy_orig) + movq %r9, 1*8(%rdi) + movq %r10, -2*8(%rdi, %rdx) + movq %r11, -1*8(%rdi, %rdx) +- retq ++ RET + .p2align 4 + .Lless_16bytes: + cmpl $8, %edx +@@ -149,7 +149,7 @@ ENTRY(memcpy_orig) + movq -1*8(%rsi, %rdx), %r9 + movq %r8, 0*8(%rdi) + movq %r9, -1*8(%rdi, %rdx) +- retq ++ RET + .p2align 4 + .Lless_8bytes: + cmpl $4, %edx +@@ -162,7 +162,7 @@ ENTRY(memcpy_orig) + movl -4(%rsi, %rdx), %r8d + movl %ecx, (%rdi) + movl %r8d, -4(%rdi, %rdx) +- retq ++ RET + .p2align 4 + .Lless_3bytes: + subl $1, %edx +@@ -180,7 +180,7 @@ ENTRY(memcpy_orig) + movb %cl, (%rdi) + + .Lend: +- retq ++ RET + ENDPROC(memcpy_orig) + + #ifndef CONFIG_UML +--- a/arch/x86/lib/memmove_64.S ++++ b/arch/x86/lib/memmove_64.S +@@ -42,7 +42,7 @@ ENTRY(__memmove) + jg 2f + + .Lmemmove_begin_forward: +- ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS ++ ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; RET", X86_FEATURE_ERMS + + /* + * movsq instruction have many startup latency +@@ -205,7 +205,7 @@ ENTRY(__memmove) + movb (%rsi), %r11b + movb %r11b, (%rdi) + 13: +- retq ++ RET + ENDPROC(__memmove) + ENDPROC(memmove) + EXPORT_SYMBOL(__memmove) +--- a/arch/x86/lib/memset_64.S ++++ b/arch/x86/lib/memset_64.S +@@ -41,7 +41,7 @@ ENTRY(__memset) + movl %edx,%ecx + rep stosb + movq %r9,%rax +- ret ++ RET + ENDPROC(memset) + ENDPROC(__memset) + EXPORT_SYMBOL(memset) +@@ -64,7 +64,7 @@ ENTRY(memset_erms) + movq %rdx,%rcx + rep stosb + movq %r9,%rax +- ret ++ RET + ENDPROC(memset_erms) + + ENTRY(memset_orig) +@@ -126,7 +126,7 @@ ENTRY(memset_orig) + + .Lende: + movq %r10,%rax +- ret ++ RET + + .Lbad_alignment: + cmpq $7,%rdx +--- a/arch/x86/lib/msr-reg.S ++++ b/arch/x86/lib/msr-reg.S +@@ -34,7 +34,7 @@ ENTRY(\op\()_safe_regs) + movl %edi, 28(%r10) + popq %r12 + popq %rbx +- ret ++ RET + 3: + movl $-EIO, %r11d + jmp 2b +@@ -76,7 +76,7 @@ ENTRY(\op\()_safe_regs) + popl %esi + popl %ebp + popl %ebx +- ret ++ RET + 3: + movl $-EIO, 4(%esp) + jmp 2b +--- a/arch/x86/math-emu/div_Xsig.S ++++ b/arch/x86/math-emu/div_Xsig.S +@@ -340,7 +340,7 @@ L_exit: + popl %esi + + leave +- ret ++ RET + + + #ifdef PARANOID +--- a/arch/x86/math-emu/mul_Xsig.S ++++ b/arch/x86/math-emu/mul_Xsig.S +@@ -61,7 +61,7 @@ ENTRY(mul32_Xsig) + + popl %esi + leave +- ret ++ RET + + + ENTRY(mul64_Xsig) +@@ -113,7 +113,7 @@ ENTRY(mul64_Xsig) + + popl %esi + leave +- ret ++ RET + + + +@@ -172,5 +172,5 @@ ENTRY(mul_Xsig_Xsig) + + popl %esi + leave +- ret ++ RET + +--- a/arch/x86/math-emu/polynom_Xsig.S ++++ b/arch/x86/math-emu/polynom_Xsig.S +@@ -132,4 +132,4 @@ L_accum_done: + popl %edi + popl %esi + leave +- ret ++ RET +--- a/arch/x86/math-emu/reg_norm.S ++++ b/arch/x86/math-emu/reg_norm.S +@@ -71,7 +71,7 @@ L_exit_valid: + L_exit: + popl %ebx + leave +- ret ++ RET + + + L_zero: +@@ -136,7 +136,7 @@ L_exit_nuo_valid: + + popl %ebx + leave +- ret ++ RET + + L_exit_nuo_zero: + movl TAG_Zero,%eax +@@ -144,4 +144,4 @@ L_exit_nuo_zero: + + popl %ebx + leave +- ret ++ RET +--- a/arch/x86/math-emu/reg_round.S ++++ b/arch/x86/math-emu/reg_round.S +@@ -436,7 +436,7 @@ fpu_Arith_exit: + popl %edi + popl %esi + leave +- ret ++ RET + + + /* +--- a/arch/x86/math-emu/reg_u_div.S ++++ b/arch/x86/math-emu/reg_u_div.S +@@ -467,5 +467,5 @@ L_exit: + popl %esi + + leave +- ret ++ RET + #endif /* PARANOID */ +--- a/arch/x86/math-emu/reg_u_mul.S ++++ b/arch/x86/math-emu/reg_u_mul.S +@@ -143,6 +143,6 @@ L_exit: + popl %edi + popl %esi + leave +- ret ++ RET + #endif /* PARANOID */ + +--- a/arch/x86/math-emu/reg_u_sub.S ++++ b/arch/x86/math-emu/reg_u_sub.S +@@ -269,4 +269,4 @@ L_exit: + popl %edi + popl %esi + leave +- ret ++ RET +--- a/arch/x86/math-emu/round_Xsig.S ++++ b/arch/x86/math-emu/round_Xsig.S +@@ -77,7 +77,7 @@ L_exit: + popl %esi + popl %ebx + leave +- ret ++ RET + + + +@@ -137,5 +137,5 @@ L_n_exit: + popl %esi + popl %ebx + leave +- ret ++ RET + +--- a/arch/x86/math-emu/shr_Xsig.S ++++ b/arch/x86/math-emu/shr_Xsig.S +@@ -44,7 +44,7 @@ ENTRY(shr_Xsig) + popl %ebx + popl %esi + leave +- ret ++ RET + + L_more_than_31: + cmpl $64,%ecx +@@ -60,7 +60,7 @@ L_more_than_31: + movl $0,8(%esi) + popl %esi + leave +- ret ++ RET + + L_more_than_63: + cmpl $96,%ecx +@@ -75,7 +75,7 @@ L_more_than_63: + movl %edx,8(%esi) + popl %esi + leave +- ret ++ RET + + L_more_than_95: + xorl %eax,%eax +@@ -84,4 +84,4 @@ L_more_than_95: + movl %eax,8(%esi) + popl %esi + leave +- ret ++ RET +--- a/arch/x86/math-emu/wm_shrx.S ++++ b/arch/x86/math-emu/wm_shrx.S +@@ -54,7 +54,7 @@ ENTRY(FPU_shrx) + popl %ebx + popl %esi + leave +- ret ++ RET + + L_more_than_31: + cmpl $64,%ecx +@@ -69,7 +69,7 @@ L_more_than_31: + movl $0,4(%esi) + popl %esi + leave +- ret ++ RET + + L_more_than_63: + cmpl $96,%ecx +@@ -83,7 +83,7 @@ L_more_than_63: + movl %edx,4(%esi) + popl %esi + leave +- ret ++ RET + + L_more_than_95: + xorl %eax,%eax +@@ -91,7 +91,7 @@ L_more_than_95: + movl %eax,4(%esi) + popl %esi + leave +- ret ++ RET + + + /*---------------------------------------------------------------------------+ +@@ -144,7 +144,7 @@ ENTRY(FPU_shrxs) + popl %ebx + popl %esi + leave +- ret ++ RET + + /* Shift by [0..31] bits */ + Ls_less_than_32: +@@ -161,7 +161,7 @@ Ls_less_than_32: + popl %ebx + popl %esi + leave +- ret ++ RET + + /* Shift by [64..95] bits */ + Ls_more_than_63: +@@ -187,7 +187,7 @@ Ls_more_than_63: + popl %ebx + popl %esi + leave +- ret ++ RET + + Ls_more_than_95: + /* Shift by [96..inf) bits */ +@@ -201,4 +201,4 @@ Ls_more_than_95: + popl %ebx + popl %esi + leave +- ret ++ RET +--- a/arch/x86/mm/mem_encrypt_boot.S ++++ b/arch/x86/mm/mem_encrypt_boot.S +@@ -68,7 +68,7 @@ ENTRY(sme_encrypt_execute) + movq %rbp, %rsp /* Restore original stack pointer */ + pop %rbp + +- ret ++ RET + ENDPROC(sme_encrypt_execute) + + ENTRY(__enc_copy) +@@ -154,6 +154,6 @@ ENTRY(__enc_copy) + pop %r12 + pop %r15 + +- ret ++ RET + .L__enc_copy_end: + ENDPROC(__enc_copy) +--- a/arch/x86/platform/efi/efi_stub_32.S ++++ b/arch/x86/platform/efi/efi_stub_32.S +@@ -112,7 +112,7 @@ ENTRY(efi_call_phys) + leal saved_return_addr, %edx + movl (%edx), %ecx + pushl %ecx +- ret ++ RET + ENDPROC(efi_call_phys) + .previous + +--- a/arch/x86/platform/efi/efi_stub_64.S ++++ b/arch/x86/platform/efi/efi_stub_64.S +@@ -53,5 +53,5 @@ ENTRY(efi_call) + addq $48, %rsp + RESTORE_XMM + popq %rbp +- ret ++ RET + ENDPROC(efi_call) +--- a/arch/x86/platform/efi/efi_thunk_64.S ++++ b/arch/x86/platform/efi/efi_thunk_64.S +@@ -58,7 +58,7 @@ ENTRY(efi64_thunk) + movq efi_saved_sp(%rip), %rsp + pop %rbx + pop %rbp +- retq ++ RET + ENDPROC(efi64_thunk) + + /* +--- a/arch/x86/platform/olpc/xo1-wakeup.S ++++ b/arch/x86/platform/olpc/xo1-wakeup.S +@@ -76,7 +76,7 @@ save_registers: + pushfl + popl saved_context_eflags + +- ret ++ RET + + restore_registers: + movl saved_context_ebp, %ebp +@@ -87,7 +87,7 @@ restore_registers: + pushl saved_context_eflags + popfl + +- ret ++ RET + + ENTRY(do_olpc_suspend_lowlevel) + call save_processor_state +@@ -108,7 +108,7 @@ ret_point: + + call restore_registers + call restore_processor_state +- ret ++ RET + + .data + saved_gdt: .long 0,0 +--- a/arch/x86/power/hibernate_asm_64.S ++++ b/arch/x86/power/hibernate_asm_64.S +@@ -50,7 +50,7 @@ ENTRY(swsusp_arch_suspend) + FRAME_BEGIN + call swsusp_save + FRAME_END +- ret ++ RET + ENDPROC(swsusp_arch_suspend) + + ENTRY(restore_image) +@@ -142,5 +142,5 @@ ENTRY(restore_registers) + /* tell the hibernation core that we've just restored the memory */ + movq %rax, in_suspend(%rip) + +- ret ++ RET + ENDPROC(restore_registers) +--- a/arch/x86/um/checksum_32.S ++++ b/arch/x86/um/checksum_32.S +@@ -114,7 +114,7 @@ csum_partial: + 7: + popl %ebx + popl %esi +- ret ++ RET + + #else + +@@ -212,7 +212,7 @@ csum_partial: + 80: + popl %ebx + popl %esi +- ret ++ RET + + #endif + EXPORT_SYMBOL(csum_partial) +--- a/arch/x86/um/setjmp_32.S ++++ b/arch/x86/um/setjmp_32.S +@@ -33,7 +33,7 @@ setjmp: + movl %esi,12(%edx) + movl %edi,16(%edx) + movl %ecx,20(%edx) # Return address +- ret ++ RET + + .size setjmp,.-setjmp + +--- a/arch/x86/um/setjmp_64.S ++++ b/arch/x86/um/setjmp_64.S +@@ -32,7 +32,7 @@ setjmp: + movq %r14,40(%rdi) + movq %r15,48(%rdi) + movq %rsi,56(%rdi) # Return address +- ret ++ RET + + .size setjmp,.-setjmp + diff --git a/patches.suse/x86-prepare-inline-asm-for-straight-line-speculation.patch b/patches.suse/x86-prepare-inline-asm-for-straight-line-speculation.patch new file mode 100644 index 0000000..04ede02 --- /dev/null +++ b/patches.suse/x86-prepare-inline-asm-for-straight-line-speculation.patch @@ -0,0 +1,111 @@ +From: Peter Zijlstra +Date: Sat, 4 Dec 2021 14:43:41 +0100 +Subject: x86: Prepare inline-asm for straight-line-speculation +Git-commit: b17c2baa305cccbd16bafa289fd743cc2db77966 +Patch-mainline: v5.17-rc1 +References: bsc#1201050 CVE-2021-26341 + +Replace all ret/retq instructions with ASM_RET in preparation of +making it more than a single instruction. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Link: https://lore.kernel.org/r/20211204134907.964635458@infradead.org +--- + arch/x86/include/asm/linkage.h | 4 ++++ + arch/x86/include/asm/paravirt.h | 2 +- + arch/x86/include/asm/qspinlock_paravirt.h | 4 ++-- + arch/x86/kernel/alternative.c | 2 +- + arch/x86/kernel/kprobes/core.c | 2 +- + arch/x86/kernel/paravirt.c | 2 +- + arch/x86/kvm/emulate.c | 2 +- + 7 files changed, 11 insertions(+), 7 deletions(-) + +--- a/arch/x86/include/asm/linkage.h ++++ b/arch/x86/include/asm/linkage.h +@@ -21,6 +21,10 @@ + #define __ALIGN_STR __stringify(__ALIGN) + #endif + ++#else /* __ASSEMBLY__ */ ++ ++#define ASM_RET "ret\n\t" ++ + #endif /* __ASSEMBLY__ */ + + #endif /* _ASM_X86_LINKAGE_H */ +--- a/arch/x86/include/asm/paravirt.h ++++ b/arch/x86/include/asm/paravirt.h +@@ -793,7 +793,7 @@ static __always_inline bool pv_vcpu_is_p + "call " #func ";" \ + PV_RESTORE_ALL_CALLER_REGS \ + FRAME_END \ +- "ret;" \ ++ ASM_RET \ + ".popsection") + + /* Get a reference to a callee-save function */ +--- a/arch/x86/include/asm/qspinlock_paravirt.h ++++ b/arch/x86/include/asm/qspinlock_paravirt.h +@@ -47,7 +47,7 @@ asm (".pushsection .text;" + "jne .slowpath;" + "pop %rdx;" + FRAME_END +- "ret;" ++ ASM_RET + ".slowpath: " + "push %rsi;" + "movzbl %al,%esi;" +@@ -55,7 +55,7 @@ asm (".pushsection .text;" + "pop %rsi;" + "pop %rdx;" + FRAME_END +- "ret;" ++ ASM_RET + ".size " PV_UNLOCK ", .-" PV_UNLOCK ";" + ".popsection"); + +--- a/arch/x86/kernel/alternative.c ++++ b/arch/x86/kernel/alternative.c +@@ -635,7 +635,7 @@ asm ( + " .type int3_magic, @function\n" + "int3_magic:\n" + " movl $1, (%" _ASM_ARG1 ")\n" +-" ret\n" ++ ASM_RET + " .size int3_magic, .-int3_magic\n" + " .popsection\n" + ); +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -736,7 +736,7 @@ asm( + RESTORE_REGS_STRING + " popf\n" + #endif +- " ret\n" ++ ASM_RET + ".size kretprobe_trampoline, .-kretprobe_trampoline\n" + ); + NOKPROBE_SYMBOL(kretprobe_trampoline); +--- a/arch/x86/kernel/paravirt.c ++++ b/arch/x86/kernel/paravirt.c +@@ -50,7 +50,7 @@ extern void _paravirt_nop(void); + asm (".pushsection .entry.text, \"ax\"\n" + ".global _paravirt_nop\n" + "_paravirt_nop:\n\t" +- "ret\n\t" ++ ASM_RET + ".size _paravirt_nop, . - _paravirt_nop\n\t" + ".type _paravirt_nop, @function\n\t" + ".popsection"); +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -429,7 +429,7 @@ static int fastop(struct x86_emulate_ctx + + asm(".pushsection .fixup, \"ax\"\n" + ".global kvm_fastop_exception \n" +- "kvm_fastop_exception: xor %esi, %esi; ret\n" ++ "kvm_fastop_exception: xor %esi, %esi; " ASM_RET + ".popsection"); + + FOP_START(setcc) diff --git a/patches.suse/x86-retpoline-Use-mfunction-return.patch b/patches.suse/x86-retpoline-Use-mfunction-return.patch new file mode 100644 index 0000000..33c5451 --- /dev/null +++ b/patches.suse/x86-retpoline-Use-mfunction-return.patch @@ -0,0 +1,85 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:36 +0200 +Subject: x86/retpoline: Use -mfunction-return +Git-commit: 0b53c374b9eff2255a386f1f1cfb9a928e52a5ae +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +Patch-mainline: Queued in tip for v5.19 +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Utilize -mfunction-return=thunk-extern when available to have the +compiler replace RET instructions with direct JMPs to the symbol +__x86_return_thunk. This does not affect assembler (.S) sources, only C +sources. + +-mfunction-return=thunk-extern has been available since gcc 7.3 and +clang 15. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Nick Desaulniers +Reviewed-by: Josh Poimboeuf +Tested-by: Nick Desaulniers +Signed-off-by: Borislav Petkov +--- + arch/x86/Makefile | 6 +++++- + arch/x86/entry/vdso/Makefile | 1 + + arch/x86/include/asm/nospec-branch.h | 2 ++ + arch/x86/lib/retpoline.S | 10 ++++++++++ + 4 files changed, 18 insertions(+), 1 deletion(-) + +--- a/arch/x86/entry/vdso/Makefile ++++ b/arch/x86/entry/vdso/Makefile +@@ -66,6 +66,7 @@ endef + $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE + $(call if_changed,vdso2c) + ++RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register) + # + # Don't omit frame pointers for ease of userspace debugging, but do + # optimize sibling calls. +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -167,6 +167,8 @@ + #ifdef CONFIG_RETPOLINE + #ifdef CONFIG_X86_64 + ++extern void __x86_return_thunk(void); ++ + /* + * Inline asm uses the %V modifier which is only in newer GCC + * which is ensured when CONFIG_RETPOLINE is defined. +--- a/arch/x86/lib/retpoline.S ++++ b/arch/x86/lib/retpoline.S +@@ -49,5 +49,15 @@ GENERATE_THUNK(r14) + GENERATE_THUNK(r15) + #endif + ++/* ++ * This function name is magical and is used by -mfunction-return=thunk-extern ++ * for the compiler to generate JMPs to it. ++ */ ++ENTRY(__x86_return_thunk) ++ ret ++ int3 ++ENDPROC(__x86_return_thunk) ++ ++__EXPORT_THUNK(__x86_return_thunk) + #endif /* CONFIG_RETPOLINE */ + +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -230,9 +230,13 @@ KBUILD_AFLAGS += $(mflags-y) + # Avoid indirect branches in kernel to deal with Spectre + ifdef CONFIG_RETPOLINE + RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) +- KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE ++ RETPOLINE_CFLAGS += $(call cc-option,-mfunction-return=thunk-extern) ++ KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE + endif + ++# for vdso Makefile to exclude ++export RETPOLINE_CFLAGS ++ + ifdef CONFIG_SLS + KBUILD_CFLAGS += $(call cc-option,-mharden-sls=all) + endif diff --git a/patches.suse/x86-sev-Avoid-using-__x86_return_thunk.patch b/patches.suse/x86-sev-Avoid-using-__x86_return_thunk.patch new file mode 100644 index 0000000..eac41e7 --- /dev/null +++ b/patches.suse/x86-sev-Avoid-using-__x86_return_thunk.patch @@ -0,0 +1,44 @@ +From: Kim Phillips +Date: Tue, 14 Jun 2022 23:15:44 +0200 +Subject: x86/sev: Avoid using __x86_return_thunk +Git-commit: 0ee9073000e8791f8b134a8ded31bcc767f7f232 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Specifically, it's because __enc_copy() encrypts the kernel after +being relocated outside the kernel in sme_encrypt_execute(), and the +RET macro's jmp offset isn't amended prior to execution. + +Signed-off-by: Kim Phillips +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/mm/mem_encrypt_boot.S | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +--- a/arch/x86/mm/mem_encrypt_boot.S ++++ b/arch/x86/mm/mem_encrypt_boot.S +@@ -65,7 +65,9 @@ ENTRY(sme_encrypt_execute) + movq %rbp, %rsp /* Restore original stack pointer */ + pop %rbp + +- RET ++ /* Offset to __x86_return_thunk would be wrong here */ ++ ret ++ int3 + ENDPROC(sme_encrypt_execute) + + ENTRY(__enc_copy) +@@ -151,6 +153,8 @@ ENTRY(__enc_copy) + pop %r12 + pop %r15 + +- RET ++ /* Offset to __x86_return_thunk would be wrong here */ ++ ret ++ int3 + .L__enc_copy_end: + ENDPROC(__enc_copy) diff --git a/patches.suse/x86-speculation-Add-spectre_v2-ibrs-option-to-support-Kern.patch b/patches.suse/x86-speculation-Add-spectre_v2-ibrs-option-to-support-Kern.patch new file mode 100644 index 0000000..60152fa --- /dev/null +++ b/patches.suse/x86-speculation-Add-spectre_v2-ibrs-option-to-support-Kern.patch @@ -0,0 +1,148 @@ +From: Pawan Gupta +Date: Tue, 14 Jun 2022 23:15:55 +0200 +Subject: x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS +Git-commit: 7c693f54c873691a4b7da05c7e0f74e67745d144 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +Extend spectre_v2= boot option with Kernel IBRS. + + [jpoimboe: no STIBP with IBRS] + +Signed-off-by: Pawan Gupta +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/kernel/cpu/bugs.c | 59 ++++++++++++++++++++++++++++++++++----------- + 1 file changed, 45 insertions(+), 14 deletions(-) + +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1117,11 +1117,12 @@ spectre_v2_parse_user_cmdline(enum spect + return SPECTRE_V2_USER_CMD_AUTO; + } + +-static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode) ++static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) + { +- return (mode == SPECTRE_V2_EIBRS || +- mode == SPECTRE_V2_EIBRS_RETPOLINE || +- mode == SPECTRE_V2_EIBRS_LFENCE); ++ return mode == SPECTRE_V2_IBRS || ++ mode == SPECTRE_V2_EIBRS || ++ mode == SPECTRE_V2_EIBRS_RETPOLINE || ++ mode == SPECTRE_V2_EIBRS_LFENCE; + } + + static void __init +@@ -1186,12 +1187,12 @@ spectre_v2_user_select_mitigation(enum s + } + + /* +- * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not +- * required. ++ * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible, ++ * STIBP is not required. + */ + if (!boot_cpu_has(X86_FEATURE_STIBP) || + !smt_possible || +- spectre_v2_in_eibrs_mode(spectre_v2_enabled)) ++ spectre_v2_in_ibrs_mode(spectre_v2_enabled)) + return; + + spectre_v2_user_stibp = mode; +@@ -1288,6 +1289,24 @@ static enum spectre_v2_mitigation_cmd __ + return SPECTRE_V2_CMD_AUTO; + } + ++ if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { ++ pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", ++ mitigation_options[i].option); ++ return SPECTRE_V2_CMD_AUTO; ++ } ++ ++ if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { ++ pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", ++ mitigation_options[i].option); ++ return SPECTRE_V2_CMD_AUTO; ++ } ++ ++ if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) { ++ pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", ++ mitigation_options[i].option); ++ return SPECTRE_V2_CMD_AUTO; ++ } ++ + spec_v2_print_cond(mitigation_options[i].option, + mitigation_options[i].secure); + return cmd; +@@ -1335,6 +1354,14 @@ static void __init spectre_v2_select_mit + break; + } + ++ if (boot_cpu_has_bug(X86_BUG_RETBLEED) && ++ retbleed_cmd != RETBLEED_CMD_OFF && ++ boot_cpu_has(X86_FEATURE_IBRS) && ++ boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { ++ mode = SPECTRE_V2_IBRS; ++ break; ++ } ++ + mode = spectre_v2_select_retpoline(); + break; + +@@ -1367,7 +1394,7 @@ static void __init spectre_v2_select_mit + if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) + pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); + +- if (spectre_v2_in_eibrs_mode(mode)) { ++ if (spectre_v2_in_ibrs_mode(mode)) { + /* Force it so VMEXIT will restore correctly */ + x86_spec_ctrl_base |= SPEC_CTRL_IBRS; + write_spec_ctrl_current(x86_spec_ctrl_base, true); +@@ -1378,6 +1405,10 @@ static void __init spectre_v2_select_mit + case SPECTRE_V2_EIBRS: + break; + ++ case SPECTRE_V2_IBRS: ++ setup_force_cpu_cap(X86_FEATURE_USE_IBRS); ++ break; ++ + case SPECTRE_V2_LFENCE: + case SPECTRE_V2_EIBRS_LFENCE: + setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); +@@ -1408,17 +1439,17 @@ specv2_set_mode: + pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); + + /* +- * Retpoline means the kernel is safe because it has no indirect +- * branches. Enhanced IBRS protects firmware too, so, enable restricted +- * speculation around firmware calls only when Enhanced IBRS isn't +- * supported. ++ * Retpoline protects the kernel, but doesn't protect firmware. IBRS ++ * and Enhanced IBRS protect firmware too, so enable IBRS around ++ * firmware calls only when IBRS / Enhanced IBRS aren't otherwise ++ * enabled. + * + * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because + * the user might select retpoline on the kernel command line and if + * the CPU supports Enhanced IBRS, kernel might un-intentionally not + * enable IBRS around firmware calls. + */ +- if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) { ++ if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { + setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); + pr_info("Enabling Restricted Speculation for firmware calls\n"); + } +@@ -1972,7 +2003,7 @@ static ssize_t mmio_stale_data_show_stat + + static char *stibp_state(void) + { +- if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) ++ if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) + return ""; + + switch (spectre_v2_user_stibp) { diff --git a/patches.suse/x86-speculation-Fix-SPEC_CTRL-write-on-SMT-state-change.patch b/patches.suse/x86-speculation-Fix-SPEC_CTRL-write-on-SMT-state-change.patch new file mode 100644 index 0000000..5ea97f5 --- /dev/null +++ b/patches.suse/x86-speculation-Fix-SPEC_CTRL-write-on-SMT-state-change.patch @@ -0,0 +1,33 @@ +From: Josh Poimboeuf +Date: Tue, 14 Jun 2022 23:16:07 +0200 +Subject: x86/speculation: Fix SPEC_CTRL write on SMT state change +Git-commit: 56aa4d221f1ee2c3a49b45b800778ec6e0ab73c5 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +If the SMT state changes, SSBD might get accidentally disabled. Fix +that. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +--- + arch/x86/kernel/cpu/bugs.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 108bd74289c5..9b80059c4e02 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1458,7 +1458,8 @@ static void __init spectre_v2_select_mitigation(void) + + static void update_stibp_msr(void * __unused) + { +- write_spec_ctrl_current(x86_spec_ctrl_base, true); ++ u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); ++ write_spec_ctrl_current(val, true); + } + + /* Update x86_spec_ctrl_base in case SMT state changed. */ + diff --git a/patches.suse/x86-speculation-Fix-firmware-entry-SPEC_CTRL-handling.patch b/patches.suse/x86-speculation-Fix-firmware-entry-SPEC_CTRL-handling.patch new file mode 100644 index 0000000..c1b5e65 --- /dev/null +++ b/patches.suse/x86-speculation-Fix-firmware-entry-SPEC_CTRL-handling.patch @@ -0,0 +1,45 @@ +From: Josh Poimboeuf +Date: Tue, 14 Jun 2022 23:16:06 +0200 +Subject: x86/speculation: Fix firmware entry SPEC_CTRL handling +Git-commit: e6aa13622ea8283cc699cac5d018cc40a2ba2010 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +The firmware entry code may accidentally clear STIBP or SSBD. Fix that. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +--- + arch/x86/include/asm/nospec-branch.h | 10 ++++------ + 1 file changed, 4 insertions(+), 6 deletions(-) + +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index 08b03c12e6c2..dee9ef77af13 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -285,18 +285,16 @@ extern u64 spec_ctrl_current(void); + */ + #define firmware_restrict_branch_speculation_start() \ + do { \ +- u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \ +- \ + preempt_disable(); \ +- alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ ++ alternative_msr_write(MSR_IA32_SPEC_CTRL, \ ++ spec_ctrl_current() | SPEC_CTRL_IBRS, \ + X86_FEATURE_USE_IBRS_FW); \ + } while (0) + + #define firmware_restrict_branch_speculation_end() \ + do { \ +- u64 val = x86_spec_ctrl_base; \ +- \ +- alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \ ++ alternative_msr_write(MSR_IA32_SPEC_CTRL, \ ++ spec_ctrl_current(), \ + X86_FEATURE_USE_IBRS_FW); \ + preempt_enable(); \ + } while (0) + diff --git a/patches.suse/x86-speculation-Remove-x86_spec_ctrl_mask.patch b/patches.suse/x86-speculation-Remove-x86_spec_ctrl_mask.patch new file mode 100644 index 0000000..e3aad1e --- /dev/null +++ b/patches.suse/x86-speculation-Remove-x86_spec_ctrl_mask.patch @@ -0,0 +1,87 @@ +From: Josh Poimboeuf +Date: Fri, 17 Jun 2022 12:12:48 -0700 +Subject: x86/speculation: Remove x86_spec_ctrl_mask +Git-commit: acac5e98ef8d638a411cfa2ee676c87e1973f126 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +This mask has been made redundant by kvm_spec_ctrl_test_value(). And it +doesn't even work when MSR interception is disabled, as the guest can +just write to SPEC_CTRL directly. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +Reviewed-by: Paolo Bonzini +Signed-off-by: Borislav Petkov +--- + arch/x86/kernel/cpu/bugs.c | 31 +------------------------------ + 1 file changed, 1 insertion(+), 30 deletions(-) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index f884f9497666..efff8b9f1bbd 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -85,12 +85,6 @@ u64 spec_ctrl_current(void) + } + EXPORT_SYMBOL_GPL(spec_ctrl_current); + +-/* +- * The vendor and possibly platform specific bits which can be modified in +- * x86_spec_ctrl_base. +- */ +-static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; +- + /* + * AMD specific MSR info for Speculative Store Bypass control. + * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). +@@ -146,10 +140,6 @@ void __init check_bugs(void) + if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) + rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + +- /* Allow STIBP in MSR_SPEC_CTRL if supported */ +- if (boot_cpu_has(X86_FEATURE_STIBP)) +- x86_spec_ctrl_mask |= SPEC_CTRL_STIBP; +- + /* Select the proper CPU mitigations before patching alternatives: */ + spectre_v1_select_mitigation(); + spectre_v2_select_mitigation(); +@@ -208,19 +198,10 @@ void __init check_bugs(void) + void + x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) + { +- u64 msrval, guestval, hostval = spec_ctrl_current(); ++ u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current(); + struct thread_info *ti = current_thread_info(); + +- /* Is MSR_SPEC_CTRL implemented ? */ + if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) { +- /* +- * Restrict guest_spec_ctrl to supported values. Clear the +- * modifiable bits in the host base value and or the +- * modifiable bits from the guest value. +- */ +- guestval = hostval & ~x86_spec_ctrl_mask; +- guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; +- + if (hostval != guestval) { + msrval = setguest ? guestval : hostval; + wrmsrl(MSR_IA32_SPEC_CTRL, msrval); +@@ -1665,16 +1646,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void) + break; + } + +- /* +- * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper +- * bit in the mask to allow guests to use the mitigation even in the +- * case where the host does not enable it. +- */ +- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || +- static_cpu_has(X86_FEATURE_AMD_SSBD)) { +- x86_spec_ctrl_mask |= SPEC_CTRL_SSBD; +- } +- + /* + * We have three CPU feature flags that are in play here: + * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. + diff --git a/patches.suse/x86-speculation-Use-cached-host-SPEC_CTRL-value-for-guest-.patch b/patches.suse/x86-speculation-Use-cached-host-SPEC_CTRL-value-for-guest-.patch new file mode 100644 index 0000000..986cc12 --- /dev/null +++ b/patches.suse/x86-speculation-Use-cached-host-SPEC_CTRL-value-for-guest-.patch @@ -0,0 +1,56 @@ +From: Josh Poimboeuf +Date: Tue, 14 Jun 2022 23:16:08 +0200 +Subject: x86/speculation: Use cached host SPEC_CTRL value for guest entry/exit +Git-commit: bbb69e8bee1bd882784947095ffb2bfe0f7c9470 +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +There's no need to recalculate the host value for every entry/exit. +Just use the cached value in spec_ctrl_current(). + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +--- + arch/x86/kernel/cpu/bugs.c | 12 +----------- + 1 file changed, 1 insertion(+), 11 deletions(-) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 9b80059c4e02..f884f9497666 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -208,7 +208,7 @@ void __init check_bugs(void) + void + x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) + { +- u64 msrval, guestval, hostval = x86_spec_ctrl_base; ++ u64 msrval, guestval, hostval = spec_ctrl_current(); + struct thread_info *ti = current_thread_info(); + + /* Is MSR_SPEC_CTRL implemented ? */ +@@ -221,15 +221,6 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) + guestval = hostval & ~x86_spec_ctrl_mask; + guestval |= guest_spec_ctrl & x86_spec_ctrl_mask; + +- /* SSBD controlled in MSR_SPEC_CTRL */ +- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || +- static_cpu_has(X86_FEATURE_AMD_SSBD)) +- hostval |= ssbd_tif_to_spec_ctrl(ti->flags); +- +- /* Conditional STIBP enabled? */ +- if (static_branch_unlikely(&switch_to_cond_stibp)) +- hostval |= stibp_tif_to_spec_ctrl(ti->flags); +- + if (hostval != guestval) { + msrval = setguest ? guestval : hostval; + wrmsrl(MSR_IA32_SPEC_CTRL, msrval); +@@ -1397,7 +1388,6 @@ static void __init spectre_v2_select_mitigation(void) + pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); + + if (spectre_v2_in_ibrs_mode(mode)) { +- /* Force it so VMEXIT will restore correctly */ + x86_spec_ctrl_base |= SPEC_CTRL_IBRS; + write_spec_ctrl_current(x86_spec_ctrl_base, true); + } + diff --git a/patches.suse/x86-speculation-rename-retpoline_amd-to-retpoline_lfence.patch b/patches.suse/x86-speculation-rename-retpoline_amd-to-retpoline_lfence.patch index c89c13b..4c9187a 100644 --- a/patches.suse/x86-speculation-rename-retpoline_amd-to-retpoline_lfence.patch +++ b/patches.suse/x86-speculation-rename-retpoline_amd-to-retpoline_lfence.patch @@ -44,8 +44,8 @@ Reviewed-by: Thomas Gleixner ANNOTATE_NOSPEC_ALTERNATIVE ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \ __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ -- __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD -+ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_LFENCE +- __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg; int3), X86_FEATURE_RETPOLINE_AMD ++ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg; int3), X86_FEATURE_RETPOLINE_LFENCE #else jmp *\reg #endif diff --git a/patches.suse/x86-vsyscall_emu-64-Don-t-use-RET-in-vsyscall-emulation.patch b/patches.suse/x86-vsyscall_emu-64-Don-t-use-RET-in-vsyscall-emulation.patch new file mode 100644 index 0000000..11ef0b5 --- /dev/null +++ b/patches.suse/x86-vsyscall_emu-64-Don-t-use-RET-in-vsyscall-emulation.patch @@ -0,0 +1,47 @@ +From: Peter Zijlstra +Date: Tue, 14 Jun 2022 23:15:43 +0200 +Subject: x86/vsyscall_emu/64: Don't use RET in vsyscall emulation +Git-commit: 15583e514eb16744b80be85dea0774ece153177d +Patch-mainline: Queued in tip for 5.19 +Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git +References: bsc#1199657 CVE-2022-29900 CVE-2022-29901 + +This is userspace code and doesn't play by the normal kernel rules. + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov +Reviewed-by: Josh Poimboeuf +Signed-off-by: Borislav Petkov +--- + arch/x86/entry/vsyscall/vsyscall_emu_64.S | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/arch/x86/entry/vsyscall/vsyscall_emu_64.S b/arch/x86/entry/vsyscall/vsyscall_emu_64.S +index 15e35159ebb6..ef2dd1827243 100644 +--- a/arch/x86/entry/vsyscall/vsyscall_emu_64.S ++++ b/arch/x86/entry/vsyscall/vsyscall_emu_64.S +@@ -19,17 +19,20 @@ __vsyscall_page: + + mov $__NR_gettimeofday, %rax + syscall +- RET ++ ret ++ int3 + + .balign 1024, 0xcc + mov $__NR_time, %rax + syscall +- RET ++ ret ++ int3 + + .balign 1024, 0xcc + mov $__NR_getcpu, %rax + syscall +- RET ++ ret ++ int3 + + .balign 4096, 0xcc + + diff --git a/series.conf b/series.conf index b865d0b..53c879b 100644 --- a/series.conf +++ b/series.conf @@ -50328,7 +50328,6 @@ patches.suse/can-purge-socket-error-queue-on-sock-destruct.patch patches.suse/net-mlx5-Update-pci-error-handler-entries-and-comman.patch patches.suse/net-mlx5-Avoid-reloading-already-removed-devices.patch - patches.suse/net-mlx5e-Replace-reciprocal_scale-in-TX-select-queu.patch patches.suse/ibmvnic-Do-not-close-unopened-driver-during-reset.patch patches.suse/ibmvnic-Refresh-device-multicast-list-after-reset.patch patches.suse/ibmvnic-Fix-unchecked-return-codes-of-memory-allocat.patch @@ -54048,6 +54047,7 @@ patches.suse/s390-unwind-stop-gracefully-at-user-mode-pt_regs-in-irq-stack.patch patches.suse/s390-ftrace-save-traced-function-caller.patch patches.suse/tracing-have-the-histogram-compare-functions-convert-to-u64-first.patch + patches.suse/bonding-fix-bond_neigh_init.patch patches.suse/sctp-fully-initialize-v4-addr-in-some-functions.patch patches.suse/netfilter-ctnetlink-netns-exit-must-wait-for-callbac.patch patches.suse/netfilter-nf_queue-enqueue-skbs-with-NULL-dst.patch @@ -54438,6 +54438,7 @@ patches.suse/Revert-ath10k-fix-DMA-related-firmware-crashes-on-mu.patch patches.suse/ath10k-Correct-the-DMA-direction-for-management-tx-b.patch patches.suse/ar5523-Add-USB-ID-of-SMCWUSBT-G2-wireless-adapter.patch + patches.suse/bnxt_en-Remove-the-setting-of-dev_port.patch patches.suse/ALSA-control-remove-useless-assignment-in-.info-call.patch patches.suse/ALSA-usx2y-Adjust-indentation-in-snd_usX2Y_hwdep_dsp.patch patches.suse/ALSA-ctl-allow-TLV-read-operation-for-callback-type-.patch @@ -61060,6 +61061,10 @@ patches.suse/usb-ftdi-elan-fix-memory-leak-on-device-disconnect.patch patches.suse/USB-Fix-slab-out-of-bounds-Write-bug-in-usb_hcd_poll.patch patches.suse/USB-core-Fix-bug-in-resuming-hub-s-handling-of-wakeu.patch + patches.suse/x86-lib-atomic64_386_32-rename-things.patch + patches.suse/x86-prepare-asm-files-for-straight-line-speculation.patch + patches.suse/x86-prepare-inline-asm-for-straight-line-speculation.patch + patches.suse/x86-add-straight-line-speculation-mitigation.patch patches.suse/RDMA-bnxt_re-Scan-the-whole-bitmap-when-checking-if-.patch patches.suse/RDMA-hns-Validate-the-pkey-index.patch patches.suse/RDMA-core-Let-ib_find_gid-continue-search-even-after.patch @@ -61603,6 +61608,38 @@ patches.suse/x86-speculation-mmio-Reuse-SRBDS-mitigation-for-SBDS.patch patches.suse/KVM-x86-speculation-Disable-Fill-buffer-clear-within-guests.patch + # tip + patches.suse/x86-cpufeatures-Move-RETPOLINE-flags-to-word-11.patch + patches.suse/x86-retpoline-Use-mfunction-return.patch + patches.suse/x86-Undo-return-thunk-damage.patch + patches.suse/x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch + patches.suse/x86-vsyscall_emu-64-Don-t-use-RET-in-vsyscall-emulation.patch + patches.suse/x86-sev-Avoid-using-__x86_return_thunk.patch + patches.suse/x86-Use-return-thunk-in-asm-code.patch + patches.suse/x86-Add-magic-AMD-return-thunk.patch + patches.suse/x86-bugs-Report-AMD-retbleed-vulnerability.patch + patches.suse/x86-bugs-Add-AMD-retbleed-boot-parameter.patch + patches.suse/x86-bugs-Enable-STIBP-for-JMP2RET.patch + patches.suse/x86-bugs-Keep-a-per-CPU-IA32_SPEC_CTRL-value.patch + patches.suse/x86-entry-Add-kernel-IBRS-implementation.patch + patches.suse/x86-bugs-Optimize-SPEC_CTRL-MSR-writes.patch + patches.suse/x86-speculation-Add-spectre_v2-ibrs-option-to-support-Kern.patch + patches.suse/x86-bugs-Split-spectre_v2_select_mitigation-and-spectre_v2.patch + patches.suse/x86-bugs-Report-Intel-retbleed-vulnerability.patch + patches.suse/intel_idle-Disable-IBRS-during-long-idle.patch + patches.suse/x86-bugs-Add-retbleed-ibpb.patch + patches.suse/x86-bugs-Do-IBPB-fallback-check-only-once.patch + patches.suse/x86-cpu-amd-Add-Spectral-Chicken.patch + patches.suse/x86-speculation-Fix-firmware-entry-SPEC_CTRL-handling.patch + patches.suse/x86-speculation-Fix-SPEC_CTRL-write-on-SMT-state-change.patch + patches.suse/x86-speculation-Use-cached-host-SPEC_CTRL-value-for-guest-.patch + patches.suse/x86-speculation-Remove-x86_spec_ctrl_mask.patch + patches.suse/x86-common-Stamp-out-the-stepping-madness.patch + patches.suse/x86-cpu-amd-Enumerate-BTC_NO.patch + patches.suse/x86-bugs-Do-not-enable-IBPB-on-entry-when-IBPB-is-no.patch + patches.suse/CVE-Mitigation-for-CVE-2022-29900-and-CVE-2022-29901.patch + patches.suse/x86-kexec-Disable-RET-on-kexec.patch + ######################################################## # S/390 ########################################################