From 7e8de6e0c259613e111deb6c57d34448569e3c5b Mon Sep 17 00:00:00 2001 From: Kernel Build Daemon Date: Nov 23 2021 06:00:35 +0000 Subject: Merge branch 'SLE12-SP5' into SLE12-SP5-AZURE --- diff --git a/blacklist.conf b/blacklist.conf index 34e6708..3476142 100644 --- a/blacklist.conf +++ b/blacklist.conf @@ -2033,3 +2033,4 @@ ed65df63a39a3f6ed04f7258de8b6789e5021c18 # not worth it, the problem should be r dcb713d53e2eadf42b878c12a471e74dc6ed3145 # obsoletes the above ff363f480e5997051dd1de949121ffda3b753741 # would need a more involved backport 0c0e37dc11671384e53ba6ede53a4d91162a2cc5 # ditto +8520e224f547cd070c7c8f97b1fc6d58cff7ccaa # kABI breaker, see also bsc#1191279 diff --git a/patches.suse/btrfs-fix-memory-ordering-between-normal-and-ordered-work-functions.patch b/patches.suse/btrfs-fix-memory-ordering-between-normal-and-ordered-work-functions.patch new file mode 100644 index 0000000..4c8d0e5 --- /dev/null +++ b/patches.suse/btrfs-fix-memory-ordering-between-normal-and-ordered-work-functions.patch @@ -0,0 +1,83 @@ +From: Nikolay Borisov +Date: Tue, 2 Nov 2021 14:49:16 +0200 +Subject: btrfs: fix memory ordering between normal and ordered work functions +Git-commit: 45da9c1767ac31857df572f0a909fbe88fd5a7e9 +Patch-mainline: v5.17 or v5.16-rc2 (next release) +References: git-fixes + +Ordered work functions aren't guaranteed to be handled by the same thread +which executed the normal work functions. The only way execution between +normal/ordered functions is synchronized is via the WORK_DONE_BIT, +unfortunately the used bitops don't guarantee any ordering whatsoever. + +This manifested as seemingly inexplicable crashes on ARM64, where +async_chunk::inode is seen as non-null in async_cow_submit which causes +submit_compressed_extents to be called and crash occurs because +async_chunk::inode suddenly became NULL. The call trace was similar to: + + pc : submit_compressed_extents+0x38/0x3d0 + lr : async_cow_submit+0x50/0xd0 + sp : ffff800015d4bc20 + + + + Call trace: + submit_compressed_extents+0x38/0x3d0 + async_cow_submit+0x50/0xd0 + run_ordered_work+0xc8/0x280 + btrfs_work_helper+0x98/0x250 + process_one_work+0x1f0/0x4ac + worker_thread+0x188/0x504 + kthread+0x110/0x114 + ret_from_fork+0x10/0x18 + +Fix this by adding respective barrier calls which ensure that all +accesses preceding setting of WORK_DONE_BIT are strictly ordered before +setting the flag. At the same time add a read barrier after reading of +WORK_DONE_BIT in run_ordered_work which ensures all subsequent loads +would be strictly ordered after reading the bit. This in turn ensures +are all accesses before WORK_DONE_BIT are going to be strictly ordered +before any access that can occur in ordered_func. + +Reported-by: Chris Murphy +Fixes: 08a9ff326418 ("btrfs: Added btrfs_workqueue_struct implemented ordered execution based on kernel workqueue") +CC: stable@vger.kernel.org # 4.4+ +Link: https://bugzilla.redhat.com/show_bug.cgi?id=2011928 +Reviewed-by: Josef Bacik +Tested-by: Chris Murphy +Signed-off-by: Nikolay Borisov +Signed-off-by: David Sterba +--- + fs/btrfs/async-thread.c | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +--- a/fs/btrfs/async-thread.c ++++ b/fs/btrfs/async-thread.c +@@ -282,6 +282,13 @@ static void run_ordered_work(struct __bt + ordered_list); + if (!test_bit(WORK_DONE_BIT, &work->flags)) + break; ++ /* ++ * Orders all subsequent loads after reading WORK_DONE_BIT, ++ * paired with the smp_mb__before_atomic in btrfs_work_helper ++ * this guarantees that the ordered function will see all ++ * updates from ordinary work function. ++ */ ++ smp_rmb(); + + /* + * we are going to call the ordered done function, but +@@ -336,6 +343,13 @@ static void normal_work_helper(struct bt + thresh_exec_hook(wq); + work->func(work); + if (need_order) { ++ /* ++ * Ensures all memory accesses done in the work function are ++ * ordered before setting the WORK_DONE_BIT. Ensuring the thread ++ * which is going to executed the ordered work sees them. ++ * Pairs with the smp_rmb in run_ordered_work. ++ */ ++ smp_mb__before_atomic(); + set_bit(WORK_DONE_BIT, &work->flags); + run_ordered_work(wq); + } diff --git a/patches.suse/mm-hugetlb-initialize-hugetlb_usage-in-mm_init.patch b/patches.suse/mm-hugetlb-initialize-hugetlb_usage-in-mm_init.patch new file mode 100644 index 0000000..2be07b6 --- /dev/null +++ b/patches.suse/mm-hugetlb-initialize-hugetlb_usage-in-mm_init.patch @@ -0,0 +1,76 @@ +From: Liu Zixian +Date: Wed, 8 Sep 2021 18:10:05 -0700 +Subject: mm/hugetlb: initialize hugetlb_usage in mm_init +Git-commit: 13db8c50477d83ad3e3b9b0ae247e5cd833a7ae4 +Patch-mainline: v5.15-rc1 +References: bsc#1192906 + +After fork, the child process will get incorrect (2x) hugetlb_usage. If +a process uses 5 2MB hugetlb pages in an anonymous mapping, + + HugetlbPages: 10240 kB + +and then forks, the child will show, + + HugetlbPages: 20480 kB + +The reason for double the amount is because hugetlb_usage will be copied +from the parent and then increased when we copy page tables from parent +to child. Child will have 2x actual usage. + +Fix this by adding hugetlb_count_init in mm_init. + +Link: https://lkml.kernel.org/r/20210826071742.877-1-liuzixian4@huawei.com +Fixes: 5d317b2b6536 ("mm: hugetlb: proc: add HugetlbPages field to /proc/PID/status") +Signed-off-by: Liu Zixian +Reviewed-by: Naoya Horiguchi +Reviewed-by: Mike Kravetz +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Acked-by: Michal Koutný +--- + include/linux/hugetlb.h | 9 +++++++++ + kernel/fork.c | 1 + + 2 files changed, 10 insertions(+) + +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h +index f7ca1a3870ea..1faebe1cd0ed 100644 +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -858,6 +858,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, + + void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); + ++static inline void hugetlb_count_init(struct mm_struct *mm) ++{ ++ atomic_long_set(&mm->hugetlb_usage, 0); ++} ++ + static inline void hugetlb_count_add(long l, struct mm_struct *mm) + { + atomic_long_add(l, &mm->hugetlb_usage); +@@ -1042,6 +1047,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, + return &mm->page_table_lock; + } + ++static inline void hugetlb_count_init(struct mm_struct *mm) ++{ ++} ++ + static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) + { + } +diff --git a/kernel/fork.c b/kernel/fork.c +index ff5be23800af..38681ad44c76 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1063,6 +1063,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, + mm->pmd_huge_pte = NULL; + #endif + mm_init_uprobes_state(mm); ++ hugetlb_count_init(mm); + + if (current->mm) { + mm->flags = current->mm->flags & MMF_INIT_MASK; + diff --git a/series.conf b/series.conf index fdfe2ea..07cc297 100644 --- a/series.conf +++ b/series.conf @@ -60229,6 +60229,7 @@ patches.suse/profiling-fix-shift-out-of-bounds-bugs.patch patches.suse/prctl-allow-to-setup-brk-for-et_dyn-executables.patch patches.suse/SUNRPC-improve-error-response-to-over-size-gss-crede.patch + patches.suse/mm-hugetlb-initialize-hugetlb_usage-in-mm_init.patch patches.suse/s390-unwind-use-current_frame_address-to-unwind-current-task.patch patches.suse/time-Handle-negative-seconds-correctly-in-timespec64.patch patches.suse/drivers-base-cacheinfo-Get-rid-of-DEFINE_SMP_CALL_CA.patch @@ -60341,6 +60342,7 @@ patches.suse/xen-Fix-implicit-type-conversion.patch patches.suse/soc-fsl-dpio-replace-smp_processor_id-with-raw_smp_p.patch patches.suse/arm64-pgtable-make-__pte_to_phys-__phys_to_pte_val-i.patch + patches.suse/btrfs-fix-memory-ordering-between-normal-and-ordered-work-functions.patch # dhowells/linux-fs keys-uefi patches.suse/0001-KEYS-Allow-unrestricted-boot-time-addition-of-keys-t.patch