diff --git a/patches.suse/net-mlx5-DR-Fix-NULL-vs-IS_ERR-checking-in-dr_domain.patch b/patches.suse/net-mlx5-DR-Fix-NULL-vs-IS_ERR-checking-in-dr_domain.patch new file mode 100644 index 0000000..c0c5988 --- /dev/null +++ b/patches.suse/net-mlx5-DR-Fix-NULL-vs-IS_ERR-checking-in-dr_domain.patch @@ -0,0 +1,40 @@ +From: Miaoqian Lin +Date: Wed, 22 Dec 2021 06:54:53 +0000 +Subject: net/mlx5: DR, Fix NULL vs IS_ERR checking in dr_domain_init_resources +Patch-mainline: v5.16-rc8 +Git-commit: 6b8b42585886c59a008015083282aae434349094 +References: bsc#1208845 CVE-2023-23006 + +The mlx5_get_uars_page() function returns error pointers. +Using IS_ERR() to check the return value to fix this. + +Fixes: 4ec9e7b02697 ("net/mlx5: DR, Expose steering domain functionality") +Signed-off-by: Miaoqian Lin +Signed-off-by: Saeed Mahameed +Acked-by: Thomas Bogendoerfer +--- + drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +@@ -2,6 +2,7 @@ + /* Copyright (c) 2019 Mellanox Technologies. */ + + #include ++#include + #include "dr_types.h" + + static int dr_domain_init_cache(struct mlx5dr_domain *dmn) +@@ -64,9 +65,9 @@ static int dr_domain_init_resources(stru + } + + dmn->uar = mlx5_get_uars_page(dmn->mdev); +- if (!dmn->uar) { ++ if (IS_ERR(dmn->uar)) { + mlx5dr_err(dmn, "Couldn't allocate UAR\n"); +- ret = -ENOMEM; ++ ret = PTR_ERR(dmn->uar); + goto clean_pd; + } + diff --git a/patches.suse/x86-mm-Randomize-per-cpu-entry-area.patch b/patches.suse/x86-mm-Randomize-per-cpu-entry-area.patch new file mode 100644 index 0000000..295c5bb --- /dev/null +++ b/patches.suse/x86-mm-Randomize-per-cpu-entry-area.patch @@ -0,0 +1,157 @@ +From: Peter Zijlstra +Date: Thu, 27 Oct 2022 14:54:41 -0700 +Subject: x86/mm: Randomize per-cpu entry area +Git-commit: 97e3d26b5e5f371b3ee223d94dd123e6c442ba80 +Patch-mainline: v6.2-rc1 +References: bsc#1207845 CVE-2023-0597 + +Seth found that the CPU-entry-area; the piece of per-cpu data that is +mapped into the userspace page-tables for kPTI is not subject to any +randomization -- irrespective of kASLR settings. + +On x86_64 a whole P4D (512 GB) of virtual address space is reserved for +this structure, which is plenty large enough to randomize things a +little. + +As such, use a straight forward randomization scheme that avoids +duplicates to spread the existing CPUs over the available space. + + [ bp: Fix le build. ] + +Reported-by: Seth Jenkins +Reviewed-by: Kees Cook +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Dave Hansen +Signed-off-by: Borislav Petkov +[mkoutny: v5.14 backport: init_cea_offsets() is called way before + prandom_init_early() initcall, prandom_u32_max() is not properly + seeded yet. Use KASLR seed and local state to generate CPU entry + areas offsets, this is based on the approach in + kernel_randomize_memory() and should provide same randomness + guarantees -- beware we don't get cryptographically secure random + offsets. This reduces effective entropy in exfiltrating *all* CPU + entry areas by log2(nr_cpus) bits, entropy for *any* CPU is + unaffected. + This was chosen instead of backporting f62384995e4c ("random: split + initialization into early step and later step") and crng related + reworks.] +[mkoutny: v5.3 backport: dropped hw_breakpoint hunk without 24ae0c91cbc5 + ("x86/hw_breakpoint: Prevent data breakpoints on cpu_entry_area"), + adjusted context for missing doublefault_stack on 32b] +Acked-by: Michal Koutný +--- + arch/x86/include/asm/cpu_entry_area.h | 13 ++++----- + arch/x86/mm/cpu_entry_area.c | 48 ++++++++++++++++++++++++++++++++-- + 2 files changed, 53 insertions(+), 8 deletions(-) + +--- a/arch/x86/include/asm/cpu_entry_area.h ++++ b/arch/x86/include/asm/cpu_entry_area.h +@@ -111,10 +111,6 @@ struct cpu_entry_area { + }; + + #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) +-#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) +- +-/* Total size includes the readonly IDT mapping page as well: */ +-#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE) + + DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); + DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks); +@@ -128,8 +124,13 @@ extern void cea_set_pte(void *cea_vaddr, + + #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) + +-#define CPU_ENTRY_AREA_MAP_SIZE \ +- (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE) ++#ifdef CONFIG_X86_32 ++#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \ ++ (CPU_ENTRY_AREA_SIZE * NR_CPUS) - \ ++ CPU_ENTRY_AREA_BASE) ++#else ++#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE ++#endif + + extern struct cpu_entry_area *get_cpu_entry_area(int cpu); + +--- a/arch/x86/mm/cpu_entry_area.c ++++ b/arch/x86/mm/cpu_entry_area.c +@@ -4,6 +4,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -15,11 +16,53 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struc + #ifdef CONFIG_X86_64 + static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks); + DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks); ++ ++static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset); ++ ++static __always_inline unsigned int cea_offset(unsigned int cpu) ++{ ++ return per_cpu(_cea_offset, cpu); ++} ++ ++static __init void init_cea_offsets(void) ++{ ++ struct rnd_state rand_state; ++ unsigned int max_cea, rand; ++ unsigned int i, j; ++ ++ max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE; ++ prandom_seed_state(&rand_state, kaslr_get_random_long("CPU entry")); ++ ++ /* O(sodding terrible) */ ++ for_each_possible_cpu(i) { ++ unsigned int cea; ++ ++again: ++ prandom_bytes_state(&rand_state, &rand, sizeof(rand)); ++ cea = rand % max_cea; ++ ++ for_each_possible_cpu(j) { ++ if (cea_offset(j) == cea) ++ goto again; ++ ++ if (i == j) ++ break; ++ } ++ ++ per_cpu(_cea_offset, i) = cea; ++ } ++} ++#else /* !X86_64 */ ++static __always_inline unsigned int cea_offset(unsigned int cpu) ++{ ++ return cpu; ++} ++static inline void init_cea_offsets(void) { } + #endif + + struct cpu_entry_area *get_cpu_entry_area(int cpu) + { +- unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; ++ unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE; + BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); + + return (struct cpu_entry_area *) va; +@@ -180,7 +223,6 @@ static __init void setup_cpu_entry_area_ + + /* The +1 is for the readonly IDT: */ + BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE); +- BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE); + BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); + + start = CPU_ENTRY_AREA_BASE; +@@ -196,6 +238,8 @@ void __init setup_cpu_entry_areas(void) + { + unsigned int cpu; + ++ init_cea_offsets(); ++ + setup_cpu_entry_area_ptes(); + + for_each_possible_cpu(cpu) diff --git a/series.conf b/series.conf index 764775a..338da24 100644 --- a/series.conf +++ b/series.conf @@ -22973,6 +22973,7 @@ patches.suse/tee-handle-lookup-of-shm-with-reference-count-0.patch patches.suse/x86-pkey-fix-undefined-behaviour-with-pkru_wd_bit.patch patches.suse/recordmcount.pl-fix-typo-in-s390-mcount-regex.patch + patches.suse/net-mlx5-DR-Fix-NULL-vs-IS_ERR-checking-in-dr_domain.patch patches.suse/sctp-use-call_rcu-to-free-endpoint.patch patches.suse/net-ena-Fix-undefined-state-when-tx-request-id-is-ou.patch patches.suse/net-ena-Fix-wrong-rx-request-id-by-resetting-device.patch @@ -23337,6 +23338,7 @@ patches.suse/xen-netback-don-t-call-kfree_skb-with-interrupts-dis.patch patches.suse/0001-drm-vmwgfx-Validate-the-box-size-for-the-snooped-cur.patch patches.suse/net-mana-Assign-interrupts-to-CPUs-based-on-NUMA-nod.patch + patches.suse/x86-mm-Randomize-per-cpu-entry-area.patch patches.suse/x86-bugs-Flush-IBP-in-ib_prctl_set.patch patches.suse/net-sched-atm-dont-intepret-cls-results-when-asked-t.patch patches.suse/net-sched-cbq-dont-intepret-cls-results-when-asked-t.patch