From 49bf8b854dc000c77c293db44acb58157b61d64e Mon Sep 17 00:00:00 2001 From: Pedro Falcato <pfalcato@suse.de> Date: Jun 02 2025 19:56:10 +0000 Subject: mm/huge_memory: fix dereferencing invalid pmd migration entry (CVE-2025-37958 bsc#1243539). --- diff --git a/patches.suse/mm-huge_memory-fix-dereferencing-invalid-pmd-migration-ent.patch b/patches.suse/mm-huge_memory-fix-dereferencing-invalid-pmd-migration-ent.patch new file mode 100644 index 0000000..9f11c8b --- /dev/null +++ b/patches.suse/mm-huge_memory-fix-dereferencing-invalid-pmd-migration-ent.patch @@ -0,0 +1,99 @@ +From: Gavin Guo <gavinguo@igalia.com> +Date: Mon, 21 Apr 2025 19:35:36 +0800 +Subject: mm/huge_memory: fix dereferencing invalid pmd migration entry +Git-commit: be6e843fc51a584672dfd9c4a6a24c8cb81d5fb7 +Patch-mainline: v6.15-rc6 +References: CVE-2025-37958 bsc#1243539 + +When migrating a THP, concurrent access to the PMD migration entry during +a deferred split scan can lead to an invalid address access, as +illustrated below. To prevent this invalid access, it is necessary to +check the PMD migration entry and return early. In this context, there is +no need to use pmd_to_swp_entry and pfn_swap_entry_to_page to verify the +equality of the target folio. Since the PMD migration entry is locked, it +cannot be served as the target. + +Mailing list discussion and explanation from Hugh Dickins: "An anon_vma +lookup points to a location which may contain the folio of interest, but +might instead contain another folio: and weeding out those other folios is +precisely what the "folio != pmd_folio((*pmd)" check (and the "risk of +replacing the wrong folio" comment a few lines above it) is for." + +BUG: unable to handle page fault for address: ffffea60001db008 +CPU: 0 UID: 0 PID: 2199114 Comm: tee Not tainted 6.14.0+ #4 NONE +Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 +RIP: 0010:split_huge_pmd_locked+0x3b5/0x2b60 +Call Trace: +<TASK> +try_to_migrate_one+0x28c/0x3730 +rmap_walk_anon+0x4f6/0x770 +unmap_folio+0x196/0x1f0 +split_huge_page_to_list_to_order+0x9f6/0x1560 +deferred_split_scan+0xac5/0x12a0 +shrinker_debugfs_scan_write+0x376/0x470 +full_proxy_write+0x15c/0x220 +vfs_write+0x2fc/0xcb0 +ksys_write+0x146/0x250 +do_syscall_64+0x6a/0x120 +entry_SYSCALL_64_after_hwframe+0x76/0x7e + +The bug is found by syzkaller on an internal kernel, then confirmed on +upstream. + +Link: https://lkml.kernel.org/r/20250421113536.3682201-1-gavinguo@igalia.com +Link: https://lore.kernel.org/all/20250414072737.1698513-1-gavinguo@igalia.com/ +Link: https://lore.kernel.org/all/20250418085802.2973519-1-gavinguo@igalia.com/ +Fixes: 84c3fc4e9c56 ("mm: thp: check pmd migration entry in common path") +Signed-off-by: Gavin Guo <gavinguo@igalia.com> +Acked-by: David Hildenbrand <david@redhat.com> +Acked-by: Hugh Dickins <hughd@google.com> +Acked-by: Zi Yan <ziy@nvidia.com> +Reviewed-by: Gavin Shan <gshan@redhat.com> +Cc: Florent Revest <revest@google.com> +Cc: Matthew Wilcox (Oracle) <willy@infradead.org> +Cc: Miaohe Lin <linmiaohe@huawei.com> +Cc: <stable@vger.kernel.org> +Signed-off-by: Andrew Morton <akpm@linux-foundation.org> +Acked-by: Pedro Falcato <pfalcato@suse.de> +[pfalcato@suse.de: Adapted patch to 6.4. Special care needs to be had +to perform the check under the pmd lock] +--- + mm/huge_memory.c | 11 ++++++----- + 1 file changed, 6 insertions(+), 5 deletions(-) + +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -2274,6 +2274,7 @@ static void __split_huge_pmd_locked(stru + void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long address, bool freeze, struct folio *folio) + { ++ bool pmd_migration; + spinlock_t *ptl; + struct mmu_notifier_range range; + +@@ -2282,6 +2283,7 @@ void __split_huge_pmd(struct vm_area_str + (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); + mmu_notifier_invalidate_range_start(&range); + ptl = pmd_lock(vma->vm_mm, pmd); ++ pmd_migration = is_pmd_migration_entry(*pmd); + + /* + * If caller asks to setup a migration entry, we need a folio to check +@@ -2290,13 +2292,12 @@ void __split_huge_pmd(struct vm_area_str + VM_BUG_ON(freeze && !folio); + VM_WARN_ON_ONCE(folio && !folio_test_locked(folio)); + +- if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || +- is_pmd_migration_entry(*pmd)) { ++ if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || pmd_migration) { + /* +- * It's safe to call pmd_page when folio is set because it's +- * guaranteed that pmd is present. ++ * It's safe to call pmd_page when folio is set (except when pmd_migration) ++ * because it's guaranteed that pmd is present. + */ +- if (folio && folio != page_folio(pmd_page(*pmd))) ++ if (folio && (pmd_migration || folio != page_folio(pmd_page(*pmd)))) + goto out; + __split_huge_pmd_locked(vma, pmd, range.start, freeze); + } diff --git a/series.conf b/series.conf index 6413fb0..ce10246 100644 --- a/series.conf +++ b/series.conf @@ -31825,6 +31825,7 @@ patches.suse/usb-usbtmc-Fix-erroneous-wait_srq-ioctl-return.patch patches.suse/usb-usbtmc-Fix-erroneous-generic_read-ioctl-return.patch patches.suse/ocfs2-fix-the-issue-with-discontiguous-allocation-in.patch + patches.suse/mm-huge_memory-fix-dereferencing-invalid-pmd-migration-ent.patch patches.suse/Input-cyttsp5-ensure-minimum-reset-pulse-width.patch patches.suse/Input-xpad-add-support-for-8BitDo-Ultimate-2-Wireles.patch patches.suse/Input-mtk-pmic-keys-fix-possible-null-pointer-derefe.patch