From 155a0a926a69a7ff22ba08c6a7df53f3c7afdf25 Mon Sep 17 00:00:00 2001
From: Tony Jones <tonyj@suse.de>
Date: Jun 02 2025 22:43:07 +0000
Subject: Merge remote-tracking branch 'origin/users/pfalcato/SLE15-SP6/for-next' into SLE15-SP6


---

diff --git a/patches.kabi/icmp-prevent-possible-NULL-dereferences-from-icmp_bu.patch b/patches.kabi/icmp-prevent-possible-NULL-dereferences-from-icmp_bu.patch
index df57dfb..0a023db 100644
--- a/patches.kabi/icmp-prevent-possible-NULL-dereferences-from-icmp_bu.patch
+++ b/patches.kabi/icmp-prevent-possible-NULL-dereferences-from-icmp_bu.patch
@@ -6,21 +6,19 @@ Subject: [PATCH] Kabi workaround for icmp: prevent possible NULL dereferences fr
 icmp_build_probe()
 
 Signed-off-by: Davide Benini <davide.benini@suse.com>
-Signed-off-by: Pedro Falcato <pfalcato@suse.de>
 ---
  net/ipv4/icmp.c |    2 ++
  1 file changed, 2 insertions(+)
 
 --- a/net/ipv4/icmp.c
 +++ b/net/ipv4/icmp.c
-@@ -92,8 +92,10 @@
+@@ -92,7 +92,9 @@
  #include <net/inet_common.h>
  #include <net/ip_fib.h>
  #include <net/l3mdev.h>
 +#ifndef __GENKSYMS__
  #include <net/addrconf.h>
- #include <net/inet_dscp.h>
 +#endif
+ #include <net/inet_dscp.h>
  /*
   *	Build xmit assembly blocks
-  */
diff --git a/patches.suse/memblock-Accept-allocated-memory-before-use-in-memblock_do.patch b/patches.suse/memblock-Accept-allocated-memory-before-use-in-memblock_do.patch
new file mode 100644
index 0000000..01e875a
--- /dev/null
+++ b/patches.suse/memblock-Accept-allocated-memory-before-use-in-memblock_do.patch
@@ -0,0 +1,72 @@
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Thu, 8 May 2025 12:24:10 -0500
+Subject: memblock: Accept allocated memory before use in
+ memblock_double_array()
+Git-commit: da8bf5daa5e55a6af2b285ecda460d6454712ff4
+Patch-mainline: v6.15-rc6
+References: CVE-2025-37960 bsc#1243519
+
+When increasing the array size in memblock_double_array() and the slab
+is not yet available, a call to memblock_find_in_range() is used to
+reserve/allocate memory. However, the range returned may not have been
+accepted, which can result in a crash when booting an SNP guest:
+
+  RIP: 0010:memcpy_orig+0x68/0x130
+  Code: ...
+  RSP: 0000:ffffffff9cc03ce8 EFLAGS: 00010006
+  RAX: ff11001ff83e5000 RBX: 0000000000000000 RCX: fffffffffffff000
+  RDX: 0000000000000bc0 RSI: ffffffff9dba8860 RDI: ff11001ff83e5c00
+  RBP: 0000000000002000 R08: 0000000000000000 R09: 0000000000002000
+  R10: 000000207fffe000 R11: 0000040000000000 R12: ffffffff9d06ef78
+  R13: ff11001ff83e5000 R14: ffffffff9dba7c60 R15: 0000000000000c00
+  memblock_double_array+0xff/0x310
+  memblock_add_range+0x1fb/0x2f0
+  memblock_reserve+0x4f/0xa0
+  memblock_alloc_range_nid+0xac/0x130
+  memblock_alloc_internal+0x53/0xc0
+  memblock_alloc_try_nid+0x3d/0xa0
+  swiotlb_init_remap+0x149/0x2f0
+  mem_init+0xb/0xb0
+  mm_core_init+0x8f/0x350
+  start_kernel+0x17e/0x5d0
+  x86_64_start_reservations+0x14/0x30
+  x86_64_start_kernel+0x92/0xa0
+  secondary_startup_64_no_verify+0x194/0x19b
+
+Mitigate this by calling accept_memory() on the memory range returned
+before the slab is available.
+
+Prior to v6.12, the accept_memory() interface used a 'start' and 'end'
+parameter instead of 'start' and 'size', therefore the accept_memory()
+call must be adjusted to specify 'start + size' for 'end' when applying
+to kernels prior to v6.12.
+
+Cc: stable@vger.kernel.org # see patch description, needs adjustments for <= 6.11
+Fixes: dcdfdd40fa82 ("mm: Add support for unaccepted memory")
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Link: https://lore.kernel.org/r/da1ac73bf4ded761e21b4e4bb5178382a580cd73.1746725050.git.thomas.lendacky@amd.com
+Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Acked-by: Pedro Falcato <pfalcato@suse.de>
+[pfalcato@suse.de: Adapt the accept_memory() call to the old [start, end] interface]
+---
+ mm/memblock.c |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -455,7 +455,14 @@ static int __init_memblock memblock_doub
+ 				min(new_area_start, memblock.current_limit),
+ 				new_alloc_size, PAGE_SIZE);
+ 
+-		new_array = addr ? __va(addr) : NULL;
++		if (addr) {
++			/* The memory may not have been accepted, yet. */
++			accept_memory(addr, addr + new_alloc_size);
++
++			new_array = __va(addr);
++		} else {
++			new_array = NULL;
++		}
+ 	}
+ 	if (!addr) {
+ 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
diff --git a/patches.suse/mm-huge_memory-fix-dereferencing-invalid-pmd-migration-ent.patch b/patches.suse/mm-huge_memory-fix-dereferencing-invalid-pmd-migration-ent.patch
new file mode 100644
index 0000000..9f11c8b
--- /dev/null
+++ b/patches.suse/mm-huge_memory-fix-dereferencing-invalid-pmd-migration-ent.patch
@@ -0,0 +1,99 @@
+From: Gavin Guo <gavinguo@igalia.com>
+Date: Mon, 21 Apr 2025 19:35:36 +0800
+Subject: mm/huge_memory: fix dereferencing invalid pmd migration entry
+Git-commit: be6e843fc51a584672dfd9c4a6a24c8cb81d5fb7
+Patch-mainline: v6.15-rc6
+References: CVE-2025-37958 bsc#1243539
+
+When migrating a THP, concurrent access to the PMD migration entry during
+a deferred split scan can lead to an invalid address access, as
+illustrated below.  To prevent this invalid access, it is necessary to
+check the PMD migration entry and return early.  In this context, there is
+no need to use pmd_to_swp_entry and pfn_swap_entry_to_page to verify the
+equality of the target folio.  Since the PMD migration entry is locked, it
+cannot be served as the target.
+
+Mailing list discussion and explanation from Hugh Dickins: "An anon_vma
+lookup points to a location which may contain the folio of interest, but
+might instead contain another folio: and weeding out those other folios is
+precisely what the "folio != pmd_folio((*pmd)" check (and the "risk of
+replacing the wrong folio" comment a few lines above it) is for."
+
+BUG: unable to handle page fault for address: ffffea60001db008
+CPU: 0 UID: 0 PID: 2199114 Comm: tee Not tainted 6.14.0+ #4 NONE
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+RIP: 0010:split_huge_pmd_locked+0x3b5/0x2b60
+Call Trace:
+<TASK>
+try_to_migrate_one+0x28c/0x3730
+rmap_walk_anon+0x4f6/0x770
+unmap_folio+0x196/0x1f0
+split_huge_page_to_list_to_order+0x9f6/0x1560
+deferred_split_scan+0xac5/0x12a0
+shrinker_debugfs_scan_write+0x376/0x470
+full_proxy_write+0x15c/0x220
+vfs_write+0x2fc/0xcb0
+ksys_write+0x146/0x250
+do_syscall_64+0x6a/0x120
+entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+The bug is found by syzkaller on an internal kernel, then confirmed on
+upstream.
+
+Link: https://lkml.kernel.org/r/20250421113536.3682201-1-gavinguo@igalia.com
+Link: https://lore.kernel.org/all/20250414072737.1698513-1-gavinguo@igalia.com/
+Link: https://lore.kernel.org/all/20250418085802.2973519-1-gavinguo@igalia.com/
+Fixes: 84c3fc4e9c56 ("mm: thp: check pmd migration entry in common path")
+Signed-off-by: Gavin Guo <gavinguo@igalia.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Acked-by: Zi Yan <ziy@nvidia.com>
+Reviewed-by: Gavin Shan <gshan@redhat.com>
+Cc: Florent Revest <revest@google.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Miaohe Lin <linmiaohe@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Pedro Falcato <pfalcato@suse.de>
+[pfalcato@suse.de: Adapted patch to 6.4. Special care needs to be had
+to perform the check under the pmd lock]
+---
+ mm/huge_memory.c |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2274,6 +2274,7 @@ static void __split_huge_pmd_locked(stru
+ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ 		unsigned long address, bool freeze, struct folio *folio)
+ {
++	bool pmd_migration;
+ 	spinlock_t *ptl;
+ 	struct mmu_notifier_range range;
+ 
+@@ -2282,6 +2283,7 @@ void __split_huge_pmd(struct vm_area_str
+ 				(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
+ 	mmu_notifier_invalidate_range_start(&range);
+ 	ptl = pmd_lock(vma->vm_mm, pmd);
++	pmd_migration = is_pmd_migration_entry(*pmd);
+ 
+ 	/*
+ 	 * If caller asks to setup a migration entry, we need a folio to check
+@@ -2290,13 +2292,12 @@ void __split_huge_pmd(struct vm_area_str
+ 	VM_BUG_ON(freeze && !folio);
+ 	VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
+ 
+-	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
+-	    is_pmd_migration_entry(*pmd)) {
++	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || pmd_migration) {
+ 		/*
+-		 * It's safe to call pmd_page when folio is set because it's
+-		 * guaranteed that pmd is present.
++		 * It's safe to call pmd_page when folio is set (except when pmd_migration)
++		 * because it's guaranteed that pmd is present.
+ 		 */
+-		if (folio && folio != page_folio(pmd_page(*pmd)))
++		if (folio && (pmd_migration || folio != page_folio(pmd_page(*pmd))))
+ 			goto out;
+ 		__split_huge_pmd_locked(vma, pmd, range.start, freeze);
+ 	}
diff --git a/series.conf b/series.conf
index 4804999..c385577 100644
--- a/series.conf
+++ b/series.conf
@@ -31832,6 +31832,8 @@
 	patches.suse/usb-usbtmc-Fix-erroneous-wait_srq-ioctl-return.patch
 	patches.suse/usb-usbtmc-Fix-erroneous-generic_read-ioctl-return.patch
 	patches.suse/ocfs2-fix-the-issue-with-discontiguous-allocation-in.patch
+	patches.suse/mm-huge_memory-fix-dereferencing-invalid-pmd-migration-ent.patch
+	patches.suse/memblock-Accept-allocated-memory-before-use-in-memblock_do.patch
 	patches.suse/Input-cyttsp5-ensure-minimum-reset-pulse-width.patch
 	patches.suse/Input-xpad-add-support-for-8BitDo-Ultimate-2-Wireles.patch
 	patches.suse/Input-mtk-pmic-keys-fix-possible-null-pointer-derefe.patch