From 25ace36e1694545f145cf67844050cd2ddbb3fac Mon Sep 17 00:00:00 2001 From: Kernel Build Daemon Date: Jul 31 2022 10:47:17 +0000 Subject: Merge branch 'users/jdelvare/SLE15-SP2-LTSS/for-next' into SLE15-SP2-LTSS --- diff --git a/patches.suse/fsnotify-invalidate-dcache-before-IN_DELETE-event.patch b/patches.suse/fsnotify-invalidate-dcache-before-IN_DELETE-event.patch new file mode 100644 index 0000000..434cece --- /dev/null +++ b/patches.suse/fsnotify-invalidate-dcache-before-IN_DELETE-event.patch @@ -0,0 +1,173 @@ +From a37d9a17f099072fe4d3a9048b0321978707a918 Mon Sep 17 00:00:00 2001 +From: Amir Goldstein +Date: Thu, 20 Jan 2022 23:53:04 +0200 +Subject: [PATCH] fsnotify: invalidate dcache before IN_DELETE event +Git-commit: a37d9a17f099072fe4d3a9048b0321978707a918 +Patch-mainline: v5.17-rc2 +References: bsc#1195478 bsc#1200905 + +Apparently, there are some applications that use IN_DELETE event as an +invalidation mechanism and expect that if they try to open a file with +the name reported with the delete event, that it should not contain the +content of the deleted file. + +Commit 49246466a989 ("fsnotify: move fsnotify_nameremove() hook out of +d_delete()") moved the fsnotify delete hook before d_delete() so fsnotify +will have access to a positive dentry. + +This allowed a race where opening the deleted file via cached dentry +is now possible after receiving the IN_DELETE event. + +To fix the regression, create a new hook fsnotify_delete() that takes +the unlinked inode as an argument and use a helper d_delete_notify() to +pin the inode, so we can pass it to fsnotify_delete() after d_delete(). + +Backporting hint: this regression is from v5.3. Although patch will +apply with only trivial conflicts to v5.4 and v5.10, it won't build, +because fsnotify_delete() implementation is different in each of those +versions (see fsnotify_link()). + +A follow up patch will fix the fsnotify_unlink/rmdir() calls in pseudo +filesystem that do not need to call d_delete(). + +Link: https://lore.kernel.org/r/20220120215305.282577-1-amir73il@gmail.com +Reported-by: Ivan Delalande +Link: https://lore.kernel.org/linux-fsdevel/YeNyzoDM5hP5LtGW@visor/ +Fixes: 49246466a989 ("fsnotify: move fsnotify_nameremove() hook out of d_delete()") +Cc: stable@vger.kernel.org # v5.3+ +Signed-off-by: Amir Goldstein +Signed-off-by: Jan Kara +Acked-by: Jan Kara + +--- + fs/btrfs/ioctl.c | 6 +---- + fs/namei.c | 10 ++++----- + include/linux/fsnotify.h | 48 +++++++++++++++++++++++++++++++++++++++++------ + 3 files changed, 49 insertions(+), 15 deletions(-) + +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -2956,10 +2956,8 @@ static noinline int btrfs_ioctl_snap_des + inode_lock(inode); + err = btrfs_delete_subvolume(dir, dentry); + inode_unlock(inode); +- if (!err) { +- fsnotify_rmdir(dir, dentry); +- d_delete(dentry); +- } ++ if (!err) ++ d_delete_notify(dir, dentry); + + out_dput: + dput(dentry); +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -3895,13 +3895,12 @@ int vfs_rmdir(struct inode *dir, struct + dentry->d_inode->i_flags |= S_DEAD; + dont_mount(dentry); + detach_mounts(dentry); +- fsnotify_rmdir(dir, dentry); + + out: + inode_unlock(dentry->d_inode); + dput(dentry); + if (!error) +- d_delete(dentry); ++ d_delete_notify(dir, dentry); + return error; + } + EXPORT_SYMBOL(vfs_rmdir); +@@ -4014,7 +4013,6 @@ int vfs_unlink(struct inode *dir, struct + if (!error) { + dont_mount(dentry); + detach_mounts(dentry); +- fsnotify_unlink(dir, dentry); + } + } + } +@@ -4022,9 +4020,11 @@ out: + inode_unlock(target); + + /* We don't d_delete() NFS sillyrenamed files--they still exist. */ +- if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { ++ if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) { ++ fsnotify_unlink(dir, dentry); ++ } else if (!error) { + fsnotify_link_count(target); +- d_delete(dentry); ++ d_delete_notify(dir, dentry); + } + + return error; +--- a/include/linux/fsnotify.h ++++ b/include/linux/fsnotify.h +@@ -192,16 +192,52 @@ static inline void fsnotify_link(struct + } + + /* ++ * fsnotify_delete - @dentry was unlinked and unhashed ++ * ++ * Caller must make sure that dentry->d_name is stable. ++ * ++ * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode ++ * as this may be called after d_delete() and old_dentry may be negative. ++ */ ++static inline void fsnotify_delete(struct inode *dir, struct inode *inode, ++ struct dentry *dentry) ++{ ++ __u32 mask = FS_DELETE; ++ ++ if (S_ISDIR(inode->i_mode)) ++ mask |= FS_ISDIR; ++ ++ fsnotify(dir, mask, inode, FSNOTIFY_EVENT_INODE, &dentry->d_name, 0); ++} ++ ++/** ++ * d_delete_notify - delete a dentry and call fsnotify_delete() ++ * @dentry: The dentry to delete ++ * ++ * This helper is used to guaranty that the unlinked inode cannot be found ++ * by lookup of this name after fsnotify_delete() event has been delivered. ++ */ ++static inline void d_delete_notify(struct inode *dir, struct dentry *dentry) ++{ ++ struct inode *inode = d_inode(dentry); ++ ++ ihold(inode); ++ d_delete(dentry); ++ fsnotify_delete(dir, inode, dentry); ++ iput(inode); ++} ++ ++/* + * fsnotify_unlink - 'name' was unlinked + * + * Caller must make sure that dentry->d_name is stable. + */ + static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) + { +- /* Expected to be called before d_delete() */ +- WARN_ON_ONCE(d_is_negative(dentry)); ++ if (WARN_ON_ONCE(d_is_negative(dentry))) ++ return; + +- fsnotify_dirent(dir, dentry, FS_DELETE); ++ fsnotify_delete(dir, d_inode(dentry), dentry); + } + + /* +@@ -221,10 +257,10 @@ static inline void fsnotify_mkdir(struct + */ + static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) + { +- /* Expected to be called before d_delete() */ +- WARN_ON_ONCE(d_is_negative(dentry)); ++ if (WARN_ON_ONCE(d_is_negative(dentry))) ++ return; + +- fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR); ++ fsnotify_delete(dir, d_inode(dentry), dentry); + } + + /* diff --git a/patches.suse/kvm-emulate-do-not-adjust-size-of-fastop-and-setcc-subroutines.patch b/patches.suse/kvm-emulate-do-not-adjust-size-of-fastop-and-setcc-subroutines.patch new file mode 100644 index 0000000..4264105 --- /dev/null +++ b/patches.suse/kvm-emulate-do-not-adjust-size-of-fastop-and-setcc-subroutines.patch @@ -0,0 +1,63 @@ +From: Paolo Bonzini +Date: Fri, 15 Jul 2022 07:34:55 -0400 +Subject: KVM: emulate: do not adjust size of fastop and setcc subroutines +Git-commit: 79629181607e801c0b41b8790ac4ee2eb5d7bc3e +Patch-mainline: v5.19-rc7 +References: bsc#1201930 + +Instead of doing complicated calculations to find the size of the subroutines +(which are even more complicated because they need to be stringified into +an asm statement), just hardcode to 16. + +It is less dense for a few combinations of IBT/SLS/retbleed, but it has +the advantage of being really simple. + +Cc: stable@vger.kernel.org # 5.15.x: 84e7051c0bc1: x86/kvm: fix FASTOP_SIZE when return thunks are enabled +Cc: stable@vger.kernel.org +Suggested-by: Linus Torvalds +Signed-off-by: Paolo Bonzini +Acked-by: Borislav Petkov +--- + arch/x86/kvm/emulate.c | 15 +++++++-------- + 1 file changed, 7 insertions(+), 8 deletions(-) + +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -188,9 +188,6 @@ + #define X8(x...) X4(x), X4(x) + #define X16(x...) X8(x), X8(x) + +-#define NR_FASTOP (ilog2(sizeof(ulong)) + 1) +-#define FASTOP_SIZE 8 +- + /* + * fastop functions have a special calling convention: + * +@@ -206,8 +203,14 @@ + * + * fastop functions are declared as taking a never-defined fastop parameter, + * so they can't be called from C directly. ++ * ++ * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR ++ * and 1 for the straight line speculation INT3, leaves 7 bytes for the ++ * body of the function. Currently none is larger than 4. + */ + ++#define FASTOP_SIZE 16 ++ + struct fastop; + + struct opcode { +@@ -443,11 +446,7 @@ static int fastop(struct x86_emulate_ctx + * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETPOLINE] + * INT3 [1 byte; CONFIG_SLS] + */ +-#define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETPOLINE)) + \ +- IS_ENABLED(CONFIG_SLS)) +-#define SETCC_LENGTH (3 + RET_LENGTH) +-#define SETCC_ALIGN (4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1)) +-static_assert(SETCC_LENGTH <= SETCC_ALIGN); ++#define SETCC_ALIGN 16 + + /* Special case for SETcc - 1 instruction per cc */ + #define FOP_SETCC(op) \ diff --git a/patches.suse/kvm-emulate-fix-setcc-emulation-function-offsets-with-sls.patch b/patches.suse/kvm-emulate-fix-setcc-emulation-function-offsets-with-sls.patch new file mode 100644 index 0000000..54febad --- /dev/null +++ b/patches.suse/kvm-emulate-fix-setcc-emulation-function-offsets-with-sls.patch @@ -0,0 +1,107 @@ +From: Borislav Petkov +Date: Wed, 16 Mar 2022 22:05:52 +0100 +Subject: kvm/emulate: Fix SETcc emulation function offsets with SLS +Git-commit: fe83f5eae432ccc8e90082d6ed506d5233547473 +Patch-mainline: v5.17 +References: bsc#1201930 + +The commit in Fixes started adding INT3 after RETs as a mitigation +against straight-line speculation. + +The fastop SETcc implementation in kvm's insn emulator uses macro magic +to generate all possible SETcc functions and to jump to them when +emulating the respective instruction. + +However, it hardcodes the size and alignment of those functions to 4: a +three-byte SETcc insn and a single-byte RET. BUT, with SLS, there's an +INT3 that gets slapped after the RET, which brings the whole scheme out +of alignment: + + 15: 0f 90 c0 seto %al + 18: c3 ret + 19: cc int3 + 1a: 0f 1f 00 nopl (%rax) + 1d: 0f 91 c0 setno %al + 20: c3 ret + 21: cc int3 + 22: 0f 1f 00 nopl (%rax) + 25: 0f 92 c0 setb %al + 28: c3 ret + 29: cc int3 + +and this explodes like this: + + int3: 0000 [#1] PREEMPT SMP PTI + CPU: 0 PID: 2435 Comm: qemu-system-x86 Not tainted 5.17.0-rc8-sls #1 + Hardware name: Dell Inc. Precision WorkStation T3400 /0TP412, BIOS A14 04/30/2012 + RIP: 0010:setc+0x5/0x8 [kvm] + Code: 00 00 0f 1f 00 0f b6 05 43 24 06 00 c3 cc 0f 1f 80 00 00 00 00 0f 90 c0 c3 cc 0f \ + 1f 00 0f 91 c0 c3 cc 0f 1f 00 0f 92 c0 c3 cc <0f> 1f 00 0f 93 c0 c3 cc 0f 1f 00 \ + 0f 94 c0 c3 cc 0f 1f 00 0f 95 c0 + Call Trace: + + ? x86_emulate_insn [kvm] + ? x86_emulate_instruction [kvm] + ? vmx_handle_exit [kvm_intel] + ? kvm_arch_vcpu_ioctl_run [kvm] + ? kvm_vcpu_ioctl [kvm] + ? __x64_sys_ioctl + ? do_syscall_64 + ? entry_SYSCALL_64_after_hwframe + + +Raise the alignment value when SLS is enabled and use a macro for that +instead of hard-coding naked numbers. + +Fixes: e463a09af2f0 ("x86: Add straight-line-speculation mitigation") +Reported-by: Jamie Heilman +Signed-off-by: Borislav Petkov +Acked-by: Peter Zijlstra (Intel) +Tested-by: Jamie Heilman +Link: https://lore.kernel.org/r/YjGzJwjrvxg5YZ0Z@audible.transient.net +[Add a comment and a bit of safety checking, since this is going to be changed + again for IBT support. - Paolo] +Signed-off-by: Paolo Bonzini +--- + arch/x86/kvm/emulate.c | 19 +++++++++++++++++-- + 1 file changed, 17 insertions(+), 2 deletions(-) + +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 5719d8cfdbd9..e86d610dc6b7 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -429,8 +429,23 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); + FOP_END + + /* Special case for SETcc - 1 instruction per cc */ ++ ++/* ++ * Depending on .config the SETcc functions look like: ++ * ++ * SETcc %al [3 bytes] ++ * RET [1 byte] ++ * INT3 [1 byte; CONFIG_SLS] ++ * ++ * Which gives possible sizes 4 or 5. When rounded up to the ++ * next power-of-two alignment they become 4 or 8. ++ */ ++#define SETCC_LENGTH (4 + IS_ENABLED(CONFIG_SLS)) ++#define SETCC_ALIGN (4 << IS_ENABLED(CONFIG_SLS)) ++static_assert(SETCC_LENGTH <= SETCC_ALIGN); ++ + #define FOP_SETCC(op) \ +- ".align 4 \n\t" \ ++ ".align " __stringify(SETCC_ALIGN) " \n\t" \ + ".type " #op ", @function \n\t" \ + #op ": \n\t" \ + #op " %al \n\t" \ +@@ -1047,7 +1062,7 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt) + static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) + { + u8 rc; +- void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf); ++ void (*fop)(void) = (void *)em_setcc + SETCC_ALIGN * (condition & 0xf); + + flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; + asm("push %[flags]; popf; " CALL_NOSPEC + diff --git a/patches.suse/x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch b/patches.suse/x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch index 4ba6172..25725e1 100644 --- a/patches.suse/x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch +++ b/patches.suse/x86-kvm-Fix-SETcc-emulation-for-return-thunks.patch @@ -26,8 +26,8 @@ Signed-off-by: Borislav Petkov Reviewed-by: Josh Poimboeuf Signed-off-by: Borislav Petkov --- - arch/x86/kvm/emulate.c | 27 ++++++++++++++++++++++----- - 1 file changed, 22 insertions(+), 5 deletions(-) + arch/x86/kvm/emulate.c | 30 ++++++++++++++++-------------- + 1 file changed, 16 insertions(+), 14 deletions(-) --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -49,28 +49,37 @@ Signed-off-by: Borislav Petkov #define FOP_END \ ".popsection") -@@ -433,20 +435,35 @@ static int fastop(struct x86_emulate_ctx +@@ -433,35 +435,35 @@ static int fastop(struct x86_emulate_ctx ON64(FOP3E(op##q, rax, rdx, cl)) \ FOP_END -+/* -+ * Depending on .config the SETcc functions look like: -+ * +-/* Special case for SETcc - 1 instruction per cc */ +- + /* + * Depending on .config the SETcc functions look like: + * +- * SETcc %al [3 bytes] +- * RET [1 byte] +- * INT3 [1 byte; CONFIG_SLS] +- * +- * Which gives possible sizes 4 or 5. When rounded up to the +- * next power-of-two alignment they become 4 or 8. + * ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT] + * SETcc %al [3 bytes] + * RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETPOLINE] + * INT3 [1 byte; CONFIG_SLS] -+ */ + */ +-#define SETCC_LENGTH (4 + IS_ENABLED(CONFIG_SLS)) +-#define SETCC_ALIGN (4 << IS_ENABLED(CONFIG_SLS)) +#define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETPOLINE)) + \ + IS_ENABLED(CONFIG_SLS)) +#define SETCC_LENGTH (3 + RET_LENGTH) +#define SETCC_ALIGN (4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1)) -+static_assert(SETCC_LENGTH <= SETCC_ALIGN); -+ - /* Special case for SETcc - 1 instruction per cc */ + static_assert(SETCC_LENGTH <= SETCC_ALIGN); + ++/* Special case for SETcc - 1 instruction per cc */ #define FOP_SETCC(op) \ -- ".align 4 \n\t" \ -+ ".align " __stringify(SETCC_ALIGN) " \n\t" \ + ".align " __stringify(SETCC_ALIGN) " \n\t" \ ".type " #op ", @function \n\t" \ #op ": \n\t" \ #op " %al \n\t" \ diff --git a/patches.suse/x86-prepare-asm-files-for-straight-line-speculation.patch b/patches.suse/x86-prepare-asm-files-for-straight-line-speculation.patch index 2f5cef7..bbb0c65 100644 --- a/patches.suse/x86-prepare-asm-files-for-straight-line-speculation.patch +++ b/patches.suse/x86-prepare-asm-files-for-straight-line-speculation.patch @@ -22,13 +22,15 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org arch/x86/boot/compressed/head_64.S | 2 - arch/x86/boot/compressed/mem_encrypt.S | 4 +- arch/x86/crypto/aegis128-aesni-asm.S | 48 +++++++++++++-------------- + arch/x86/crypto/aes-x86_64-asm_64.S | 2 - arch/x86/crypto/aes_ctrby8_avx-x86_64.S | 2 - arch/x86/crypto/aesni-intel_asm.S | 48 +++++++++++++-------------- arch/x86/crypto/blowfish-x86_64-asm_64.S | 12 +++--- - arch/x86/crypto/camellia-aesni-avx-asm_64.S | 14 +++---- - arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 14 +++---- + arch/x86/crypto/camellia-aesni-avx-asm_64.S | 18 +++++----- + arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 18 +++++----- + arch/x86/crypto/camellia-x86_64-asm_64.S | 12 +++--- arch/x86/crypto/cast5-avx-x86_64-asm_64.S | 12 +++--- - arch/x86/crypto/cast6-avx-x86_64-asm_64.S | 10 ++--- + arch/x86/crypto/cast6-avx-x86_64-asm_64.S | 16 ++++----- arch/x86/crypto/chacha-avx2-x86_64.S | 6 +-- arch/x86/crypto/chacha-avx512vl-x86_64.S | 6 +-- arch/x86/crypto/chacha-ssse3-x86_64.S | 8 ++-- @@ -37,12 +39,14 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org arch/x86/crypto/des3_ede-asm_64.S | 4 +- arch/x86/crypto/ghash-clmulni-intel_asm.S | 6 +-- arch/x86/crypto/nh-sse2-x86_64.S | 2 - - arch/x86/crypto/serpent-avx-x86_64-asm_64.S | 10 ++--- - arch/x86/crypto/serpent-avx2-asm_64.S | 10 ++--- + arch/x86/crypto/serpent-avx-x86_64-asm_64.S | 16 ++++----- + arch/x86/crypto/serpent-avx2-asm_64.S | 16 ++++----- + arch/x86/crypto/serpent-sse2-i586-asm_32.S | 6 +-- + arch/x86/crypto/serpent-sse2-x86_64-asm_64.S | 6 +-- arch/x86/crypto/sha512-avx-asm.S | 2 - arch/x86/crypto/sha512-avx2-asm.S | 2 - arch/x86/crypto/sha512-ssse3-asm.S | 2 - - arch/x86/crypto/twofish-avx-x86_64-asm_64.S | 10 ++--- + arch/x86/crypto/twofish-avx-x86_64-asm_64.S | 16 ++++----- arch/x86/crypto/twofish-i586-asm_32.S | 4 +- arch/x86/crypto/twofish-x86_64-asm_64-3way.S | 6 +-- arch/x86/crypto/twofish-x86_64-asm_64.S | 4 +- @@ -58,6 +62,7 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org arch/x86/kvm/vmx/vmenter.S | 10 ++--- arch/x86/lib/atomic64_386_32.S | 2 - arch/x86/lib/atomic64_cx8_32.S | 16 ++++----- + arch/x86/lib/checksum_32.S | 8 ++-- arch/x86/lib/cmpxchg8b_emu.S | 4 +- arch/x86/lib/copy_page_64.S | 4 +- arch/x86/lib/copy_user_64.S | 10 ++--- @@ -87,7 +92,7 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org arch/x86/um/checksum_32.S | 4 +- arch/x86/um/setjmp_32.S | 2 - arch/x86/um/setjmp_64.S | 2 - - 69 files changed, 237 insertions(+), 237 deletions(-) + 74 files changed, 270 insertions(+), 270 deletions(-) --- a/arch/x86/boot/compressed/efi_thunk_64.S +++ b/arch/x86/boot/compressed/efi_thunk_64.S @@ -564,6 +569,17 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org ENDPROC(aesni_ctr_enc) /* +--- a/arch/x86/crypto/aes-x86_64-asm_64.S ++++ b/arch/x86/crypto/aes-x86_64-asm_64.S +@@ -74,7 +74,7 @@ + movl r6 ## E,4(r9); \ + movl r7 ## E,8(r9); \ + movl r8 ## E,12(r9); \ +- ret; \ ++ RET; \ + ENDPROC(FUNC); + + #define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \ --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S @@ -135,10 +135,10 @@ ENTRY(__blowfish_enc_blk) @@ -677,6 +693,24 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org ENDPROC(camellia_cbc_dec_32way) #define inc_le128(x, minus_one, tmp) \ +@@ -1199,7 +1199,7 @@ ENTRY(camellia_ctr_32way) + vzeroupper; + + FRAME_END +- ret; ++ RET; + ENDPROC(camellia_ctr_32way) + + #define gf128mul_x_ble(iv, mask, tmp) \ +@@ -1366,7 +1366,7 @@ camellia_xts_crypt_32way: + vzeroupper; + + FRAME_END +- ret; ++ RET; + ENDPROC(camellia_xts_crypt_32way) + + ENTRY(camellia_xts_enc_32way) --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -193,7 +193,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_ @@ -742,6 +776,75 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org ENDPROC(camellia_cbc_dec_16way) #define inc_le128(x, minus_one, tmp) \ +@@ -1109,7 +1109,7 @@ ENTRY(camellia_ctr_16way) + %xmm8, %rsi); + + FRAME_END +- ret; ++ RET; + ENDPROC(camellia_ctr_16way) + + #define gf128mul_x_ble(iv, mask, tmp) \ +@@ -1253,7 +1253,7 @@ camellia_xts_crypt_16way: + %xmm8, %rsi); + + FRAME_END +- ret; ++ RET; + ENDPROC(camellia_xts_crypt_16way) + + ENTRY(camellia_xts_enc_16way) +--- a/arch/x86/crypto/camellia-x86_64-asm_64.S ++++ b/arch/x86/crypto/camellia-x86_64-asm_64.S +@@ -213,13 +213,13 @@ ENTRY(__camellia_enc_blk) + enc_outunpack(mov, RT1); + + movq RR12, %r12; +- ret; ++ RET; + + .L__enc_xor: + enc_outunpack(xor, RT1); + + movq RR12, %r12; +- ret; ++ RET; + ENDPROC(__camellia_enc_blk) + + ENTRY(camellia_dec_blk) +@@ -257,7 +257,7 @@ ENTRY(camellia_dec_blk) + dec_outunpack(); + + movq RR12, %r12; +- ret; ++ RET; + ENDPROC(camellia_dec_blk) + + /********************************************************************** +@@ -448,14 +448,14 @@ ENTRY(__camellia_enc_blk_2way) + + movq RR12, %r12; + popq %rbx; +- ret; ++ RET; + + .L__enc2_xor: + enc_outunpack2(xor, RT2); + + movq RR12, %r12; + popq %rbx; +- ret; ++ RET; + ENDPROC(__camellia_enc_blk_2way) + + ENTRY(camellia_dec_blk_2way) +@@ -495,5 +495,5 @@ ENTRY(camellia_dec_blk_2way) + + movq RR12, %r12; + movq RXOR, %rbx; +- ret; ++ RET; + ENDPROC(camellia_dec_blk_2way) --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S @@ -279,7 +279,7 @@ __cast5_enc_blk16: @@ -843,6 +946,31 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org ENDPROC(cast6_cbc_dec_8way) ENTRY(cast6_ctr_8way) +@@ -438,7 +438,7 @@ ENTRY(cast6_ctr_8way) + popq %r15; + popq %r12; + FRAME_END +- ret; ++ RET; + ENDPROC(cast6_ctr_8way) + + ENTRY(cast6_xts_enc_8way) +@@ -465,7 +465,7 @@ ENTRY(cast6_xts_enc_8way) + + popq %r15; + FRAME_END +- ret; ++ RET; + ENDPROC(cast6_xts_enc_8way) + + ENTRY(cast6_xts_dec_8way) +@@ -492,5 +492,5 @@ ENTRY(cast6_xts_dec_8way) + + popq %r15; + FRAME_END +- ret; ++ RET; + ENDPROC(cast6_xts_dec_8way) --- a/arch/x86/crypto/chacha-avx2-x86_64.S +++ b/arch/x86/crypto/chacha-avx2-x86_64.S @@ -193,7 +193,7 @@ ENTRY(chacha_2block_xor_avx2) @@ -1064,6 +1192,31 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org ENDPROC(serpent_cbc_dec_16way) ENTRY(serpent_ctr_16way) +@@ -757,7 +757,7 @@ ENTRY(serpent_ctr_16way) + vzeroupper; + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_ctr_16way) + + ENTRY(serpent_xts_enc_16way) +@@ -783,7 +783,7 @@ ENTRY(serpent_xts_enc_16way) + vzeroupper; + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_xts_enc_16way) + + ENTRY(serpent_xts_dec_16way) +@@ -809,5 +809,5 @@ ENTRY(serpent_xts_dec_16way) + vzeroupper; + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_xts_dec_16way) --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S @@ -605,7 +605,7 @@ __serpent_enc_blk8_avx: @@ -1111,6 +1264,80 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org ENDPROC(serpent_cbc_dec_8way_avx) ENTRY(serpent_ctr_8way_avx) +@@ -733,7 +733,7 @@ ENTRY(serpent_ctr_8way_avx) + store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_ctr_8way_avx) + + ENTRY(serpent_xts_enc_8way_avx) +@@ -755,7 +755,7 @@ ENTRY(serpent_xts_enc_8way_avx) + store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_xts_enc_8way_avx) + + ENTRY(serpent_xts_dec_8way_avx) +@@ -777,5 +777,5 @@ ENTRY(serpent_xts_dec_8way_avx) + store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); + + FRAME_END +- ret; ++ RET; + ENDPROC(serpent_xts_dec_8way_avx) +--- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S ++++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S +@@ -553,12 +553,12 @@ ENTRY(__serpent_enc_blk_4way) + + write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); + +- ret; ++ RET; + + .L__enc_xor4: + xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); + +- ret; ++ RET; + ENDPROC(__serpent_enc_blk_4way) + + ENTRY(serpent_dec_blk_4way) +@@ -612,5 +612,5 @@ ENTRY(serpent_dec_blk_4way) + movl arg_dst(%esp), %eax; + write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); + +- ret; ++ RET; + ENDPROC(serpent_dec_blk_4way) +--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S ++++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S +@@ -675,13 +675,13 @@ ENTRY(__serpent_enc_blk_8way) + write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); + write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); + +- ret; ++ RET; + + .L__enc_xor8: + xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); + xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); + +- ret; ++ RET; + ENDPROC(__serpent_enc_blk_8way) + + ENTRY(serpent_dec_blk_8way) +@@ -735,5 +735,5 @@ ENTRY(serpent_dec_blk_8way) + write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2); + write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); + +- ret; ++ RET; + ENDPROC(serpent_dec_blk_8way) --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -681,7 +681,7 @@ done_hash: @@ -1191,6 +1418,31 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org ENDPROC(twofish_cbc_dec_8way) ENTRY(twofish_ctr_8way) +@@ -404,7 +404,7 @@ ENTRY(twofish_ctr_8way) + popq %r12; + + FRAME_END +- ret; ++ RET; + ENDPROC(twofish_ctr_8way) + + ENTRY(twofish_xts_enc_8way) +@@ -428,7 +428,7 @@ ENTRY(twofish_xts_enc_8way) + store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); + + FRAME_END +- ret; ++ RET; + ENDPROC(twofish_xts_enc_8way) + + ENTRY(twofish_xts_dec_8way) +@@ -452,5 +452,5 @@ ENTRY(twofish_xts_dec_8way) + store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); + + FRAME_END +- ret; ++ RET; + ENDPROC(twofish_xts_dec_8way) --- a/arch/x86/crypto/twofish-i586-asm_32.S +++ b/arch/x86/crypto/twofish-i586-asm_32.S @@ -260,7 +260,7 @@ ENTRY(twofish_enc_blk) @@ -1593,6 +1845,44 @@ Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org - ret + RET ENDPROC(atomic64_inc_not_zero_cx8) +--- a/arch/x86/lib/checksum_32.S ++++ b/arch/x86/lib/checksum_32.S +@@ -127,7 +127,7 @@ ENTRY(csum_partial) + 8: + popl %ebx + popl %esi +- ret ++ RET + ENDPROC(csum_partial) + + #else +@@ -245,7 +245,7 @@ ENTRY(csum_partial) + 90: + popl %ebx + popl %esi +- ret ++ RET + ENDPROC(csum_partial) + + #endif +@@ -397,7 +397,7 @@ DST( movb %cl, (%edi) ) + popl %esi + popl %edi + popl %ecx # equivalent to addl $4,%esp +- ret ++ RET + ENDPROC(csum_partial_copy_generic) + + #else +@@ -482,7 +482,7 @@ DST( movb %dl, (%edi) ) + popl %esi + popl %edi + popl %ebx +- ret ++ RET + ENDPROC(csum_partial_copy_generic) + + #undef ROUND --- a/arch/x86/lib/cmpxchg8b_emu.S +++ b/arch/x86/lib/cmpxchg8b_emu.S @@ -32,7 +32,7 @@ ENTRY(cmpxchg8b_emu) diff --git a/series.conf b/series.conf index e0343b0..fb3e363 100644 --- a/series.conf +++ b/series.conf @@ -22961,6 +22961,7 @@ patches.suse/gve-Fix-GFP-flags-when-allocing-pages.patch patches.suse/udf-Fix-NULL-ptr-deref-when-converting-from-inline-f.patch patches.suse/udf-Restore-i_lenAlloc-when-inode-expansion-fails.patch + patches.suse/fsnotify-invalidate-dcache-before-IN_DELETE-event.patch patches.suse/msft-hv-2513-video-hyperv_fb-Fix-validation-of-screen-resolution.patch patches.suse/blk-mq-Fix-wrong-wakeup-batch-configuration-which-wi.patch patches.suse/cgroup-v1-Require-capabilities-to-set-release_agent.patch @@ -23000,6 +23001,7 @@ patches.suse/xen-netfront-react-properly-to-failing-gnttab_end_fo.patch patches.suse/esp-Fix-possible-buffer-overflow-in-ESP-transformati.patch patches.suse/af_key-add-__GFP_ZERO-flag-for-compose_sadb_supporte.patch + patches.suse/kvm-emulate-fix-setcc-emulation-function-offsets-with-sls.patch patches.suse/exec-Force-single-empty-string-when-argv-is-empty.patch patches.suse/ALSA-pcm-Fix-races-among-concurrent-hw_params-and-hw.patch patches.suse/ALSA-pcm-Fix-races-among-concurrent-read-write-and-b.patch @@ -23102,6 +23104,7 @@ patches.suse/x86-cpu-amd-Enumerate-BTC_NO.patch patches.suse/x86-bugs-Do-not-enable-IBPB-on-entry-when-IBPB-is-not-supp.patch patches.suse/x86-kexec-disable-ret-on-kexec.patch + patches.suse/kvm-emulate-do-not-adjust-size-of-fastop-and-setcc-subroutines.patch patches.suse/x86-bugs-remove-apostrophe-typo.patch patches.suse/lockdown-Fix-kexec-lockdown-bypass-with-ima-policy.patch patches.suse/netfilter-nf_queue-do-not-allow-packet-truncation-be.patch