diff --git a/patches.suse/bpf-x86-Validate-computation-of-branch-displacements.patch b/patches.suse/bpf-x86-Validate-computation-of-branch-displacements.patch new file mode 100644 index 0000000..6e231fc --- /dev/null +++ b/patches.suse/bpf-x86-Validate-computation-of-branch-displacements.patch @@ -0,0 +1,63 @@ +From: Piotr Krysiuk +Date: Mon, 5 Apr 2021 22:52:15 +0100 +Subject: bpf, x86: Validate computation of branch displacements for x86-64 +Patch-mainline: v5.12-rc7 +Git-commit: e4d4d456436bfb2fe412ee2cd489f7658449b098 +References: bsc#1184391 CVE-2021-29154 + +The branch displacement logic in the BPF JIT compilers for x86 assumes +that, for any generated branch instruction, the distance cannot +increase between optimization passes. + +But this assumption can be violated due to how the distances are +computed. Specifically, whenever a backward branch is processed in +do_jit(), the distance is computed by subtracting the positions in the +machine code from different optimization passes. This is because part +of addrs[] is already updated for the current optimization pass, before +the branch instruction is visited. + +And so the optimizer can expand blocks of machine code in some cases. + +This can confuse the optimizer logic, where it assumes that a fixed +point has been reached for all machine code blocks once the total +program size stops changing. And then the JIT compiler can output +abnormal machine code containing incorrect branch displacements. + +To mitigate this issue, we assert that a fixed point is reached while +populating the output image. This rejects any problematic programs. +The issue affects both x86-32 and x86-64. We mitigate separately to +ease backporting. + +Signed-off-by: Piotr Krysiuk +Reviewed-by: Daniel Borkmann +Signed-off-by: Daniel Borkmann +Acked-by: Gary Lin + +NOTE from Gary: + * x86-64 and x86-32 use the same jit code in this branch, so there is no + need the backport the x86-32 fix (26f55a59dc65ff77cd1c4b37991e26497fc68049). + +--- + arch/x86/net/bpf_jit_comp.c | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +--- a/arch/x86/net/bpf_jit_comp.c ++++ b/arch/x86/net/bpf_jit_comp.c +@@ -1060,7 +1060,16 @@ common_load: + } + + if (image) { +- if (unlikely(proglen + ilen > oldproglen)) { ++ /* ++ * When populating the image, assert that: ++ * ++ * i) We do not write beyond the allocated space, and ++ * ii) addrs[i] did not change from the prior run, in order ++ * to validate assumptions made for computing branch ++ * displacements. ++ */ ++ if (unlikely(proglen + ilen > oldproglen || ++ proglen + ilen != addrs[i])) { + pr_err("bpf_jit: fatal error\n"); + return -EFAULT; + } diff --git a/patches.suse/xen-events-fix-setting-irq-affinity.patch b/patches.suse/xen-events-fix-setting-irq-affinity.patch new file mode 100644 index 0000000..5b1cf38 --- /dev/null +++ b/patches.suse/xen-events-fix-setting-irq-affinity.patch @@ -0,0 +1,49 @@ +Patch-mainline: Submitted, 2021/04/21 - stable@vger.kernel.org +From: Juergen Gross +Date: Mon, 12 Apr 2021 07:50:03 +0200 +References: bsc#1184583 XSA-332 CVE-2020-27673 +Subject: [PATCH] xen/events: fix setting irq affinity + +The backport of upstream patch 25da4618af240fbec61 ("xen/events: don't +unmask an event channel when an eoi is pending") introduced a +regression for stable kernels 5.10 and older: setting IRQ affinity for +IRQs related to interdomain events would no longer work, as moving the +IRQ to its new cpu was not included in the irq_ack callback for those +events. + +Fix that by adding the needed call. + +Note that kernels 5.11 and later don't need the explicit moving of the +IRQ to the target cpu in the irq_ack callback, due to a rework of the +affinity setting in kernel 5.11. + +Signed-off-by: Juergen Gross +--- + drivers/xen/events/events_base.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c +index 7bd03f6e0422..ee5269331406 100644 +--- a/drivers/xen/events/events_base.c ++++ b/drivers/xen/events/events_base.c +@@ -1809,7 +1809,7 @@ static void lateeoi_ack_dynirq(struct irq_data *data) + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EOI_PENDING); +- event_handler_exit(info); ++ ack_dynirq(data); + } + } + +@@ -1820,7 +1820,7 @@ static void lateeoi_mask_ack_dynirq(struct irq_data *data) + + if (VALID_EVTCHN(evtchn)) { + do_mask(info, EVT_MASK_REASON_EXPLICIT); +- event_handler_exit(info); ++ ack_dynirq(data); + } + } + +-- +2.26.2 + diff --git a/series.conf b/series.conf index 2f57ac4..09d6865 100644 --- a/series.conf +++ b/series.conf @@ -58366,6 +58366,7 @@ patches.suse/nfc-fix-refcount-leak-in-llcp_sock_connect.patch patches.suse/nfc-fix-memory-leak-in-llcp_sock_connect.patch patches.suse/nfc-Avoid-endless-loops-caused-by-repeated-llcp_sock.patch + patches.suse/bpf-x86-Validate-computation-of-branch-displacements.patch # netdev/net-next patches.suse/ibmvnic-Use-skb_frag_address-instead-of-hand-coding-.patch @@ -59159,6 +59160,8 @@ patches.suse/xen-disable_hotplug_cpu.patch patches.suse/0001-xen-don-t-reschedule-in-preemption-off-sections.patch + patches.suse/xen-events-fix-setting-irq-affinity.patch + # XSA-349 cleanup patches patches.suse/0001-xen-revert-Disallow-pending-watch-messages.patch patches.suse/0002-xen-revert-Count-pending-messages-for-each-watch.patch