diff --git a/patches.suse/powerpc-64-signal-Fix-regression-in-__kernel_sigtram.patch b/patches.suse/powerpc-64-signal-Fix-regression-in-__kernel_sigtram.patch index a7919b4..dd7569b 100644 --- a/patches.suse/powerpc-64-signal-Fix-regression-in-__kernel_sigtram.patch +++ b/patches.suse/powerpc-64-signal-Fix-regression-in-__kernel_sigtram.patch @@ -66,26 +66,15 @@ index bbf68cd01088..2d4067561293 100644 li r0,__NR_rt_sigreturn sc diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S +index 6164d1a1ba11..2f3c359cacd3 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S -@@ -150,7 +150,7 @@ VERSION - __kernel_get_tbfreq; - __kernel_sync_dicache; - __kernel_sync_dicache_p5; -- __kernel_sigtramp_rt64; -+ __kernel_start_sigtramp_rt64; - __kernel_getcpu; - __kernel_time; - -diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c ---- a/arch/powerpc/kernel/vdso.c -+++ b/arch/powerpc/kernel/vdso.c -@@ -503,7 +503,7 @@ static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32, - */ - - #ifdef CONFIG_PPC64 -- vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64"); -+ vdso64_rt_sigtramp = find_function64(v64, "__kernel_start_sigtramp_rt64"); - #endif - vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32"); - vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32"); +@@ -131,4 +131,4 @@ VERSION + /* + * Make the sigreturn code visible to the kernel. + */ +-VDSO_sigtramp_rt64 = __kernel_sigtramp_rt64; ++VDSO_sigtramp_rt64 = __kernel_start_sigtramp_rt64; +-- +2.26.2 + diff --git a/patches.suse/powerpc-vdso-Replace-vdso_base-by-vdso.patch b/patches.suse/powerpc-vdso-Replace-vdso_base-by-vdso.patch new file mode 100644 index 0000000..40b5cb2 --- /dev/null +++ b/patches.suse/powerpc-vdso-Replace-vdso_base-by-vdso.patch @@ -0,0 +1,231 @@ +From c102f07667486dc4a6ae1e3fe7aa67135cb40e3e Mon Sep 17 00:00:00 2001 +From: Christophe Leroy +Date: Sun, 27 Sep 2020 09:16:29 +0000 +Subject: [PATCH] powerpc/vdso: Replace vdso_base by vdso + +References: bsc#1183002 +Patch-mainline: v5.11-rc1 +Git-commit: c102f07667486dc4a6ae1e3fe7aa67135cb40e3e + +All other architectures but s390 use a void pointer named 'vdso' +to reference the VDSO mapping. + +In a following patch, the VDSO data page will be put in front of +text, vdso_base will then not anymore point to VDSO text. + +To avoid confusion between vdso_base and VDSO text, rename vdso_base +into vdso and make it a void __user *. + +Signed-off-by: Christophe Leroy +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/8e6cefe474aa4ceba028abb729485cd46c140990.1601197618.git.christophe.leroy@csgroup.eu +Acked-by: Michal Suchanek +--- + arch/powerpc/include/asm/book3s/32/mmu-hash.h | 2 +- + arch/powerpc/include/asm/book3s/64/mmu.h | 2 +- + arch/powerpc/include/asm/elf.h | 2 +- + arch/powerpc/include/asm/mmu_context.h | 6 ++++-- + arch/powerpc/include/asm/nohash/32/mmu-40x.h | 2 +- + arch/powerpc/include/asm/nohash/32/mmu-44x.h | 2 +- + arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 2 +- + arch/powerpc/include/asm/nohash/mmu-book3e.h | 2 +- + arch/powerpc/kernel/signal_32.c | 8 ++++---- + arch/powerpc/kernel/signal_64.c | 4 ++-- + arch/powerpc/kernel/vdso.c | 8 ++++---- + arch/powerpc/perf/callchain_32.c | 8 ++++---- + arch/powerpc/perf/callchain_64.c | 4 ++-- + 13 files changed, 27 insertions(+), 25 deletions(-) + +--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h ++++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h +@@ -90,7 +90,7 @@ struct hash_pte { + + typedef struct { + unsigned long id; +- unsigned long vdso_base; ++ void __user *vdso; + } mm_context_t; + + void update_bats(void); +--- a/arch/powerpc/include/asm/book3s/64/mmu.h ++++ b/arch/powerpc/include/asm/book3s/64/mmu.h +@@ -123,7 +123,7 @@ typedef struct { + + struct hash_mm_context *hash_context; + +- unsigned long vdso_base; ++ void __user *vdso; + /* + * pagetable fragment support + */ +--- a/arch/powerpc/include/asm/elf.h ++++ b/arch/powerpc/include/asm/elf.h +@@ -171,7 +171,7 @@ do { \ + NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ + NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ + NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ +- VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \ ++ VDSO_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long)current->mm->context.vdso);\ + ARCH_DLINFO_CACHE_GEOMETRY; \ + } while (0) + +--- a/arch/powerpc/include/asm/mmu_context.h ++++ b/arch/powerpc/include/asm/mmu_context.h +@@ -234,8 +234,10 @@ extern void arch_exit_mmap(struct mm_str + static inline void arch_unmap(struct mm_struct *mm, + unsigned long start, unsigned long end) + { +- if (start <= mm->context.vdso_base && mm->context.vdso_base < end) +- mm->context.vdso_base = 0; ++ unsigned long vdso_base = (unsigned long)mm->context.vdso; ++ ++ if (start <= vdso_base && vdso_base < end) ++ mm->context.vdso = NULL; + } + + static inline void arch_bprm_mm_init(struct mm_struct *mm, +--- a/arch/powerpc/include/asm/nohash/32/mmu-40x.h ++++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h +@@ -57,7 +57,7 @@ + typedef struct { + unsigned int id; + unsigned int active; +- unsigned long vdso_base; ++ void __user *vdso; + } mm_context_t; + + #endif /* !__ASSEMBLY__ */ +--- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h ++++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h +@@ -108,7 +108,7 @@ extern unsigned int tlb_44x_index; + typedef struct { + unsigned int id; + unsigned int active; +- unsigned long vdso_base; ++ void __user *vdso; + } mm_context_t; + + /* patch sites */ +--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h ++++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +@@ -209,7 +209,7 @@ struct slice_mask { + typedef struct { + unsigned int id; + unsigned int active; +- unsigned long vdso_base; ++ void __user *vdso; + #ifdef CONFIG_PPC_MM_SLICES + u16 user_psize; /* page size index */ + unsigned char low_slices_psize[SLICE_ARRAY_SIZE]; +--- a/arch/powerpc/include/asm/nohash/mmu-book3e.h ++++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h +@@ -239,7 +239,7 @@ extern unsigned int tlbcam_index; + typedef struct { + unsigned int id; + unsigned int active; +- unsigned long vdso_base; ++ void __user *vdso; + } mm_context_t; + + /* Page size definitions, common between 32 and 64-bit +--- a/arch/powerpc/kernel/signal_64.c ++++ b/arch/powerpc/kernel/signal_64.c +@@ -855,8 +855,8 @@ int handle_rt_signal64(struct ksignal *k + tsk->thread.fp_state.fpscr = 0; + + /* Set up to return from userspace. */ +- if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) { +- regs->nip = tsk->mm->context.vdso_base + vdso64_rt_sigtramp; ++ if (vdso64_rt_sigtramp && tsk->mm->context.vdso) { ++ regs->nip = (unsigned long)tsk->mm->context.vdso + vdso64_rt_sigtramp; + } else { + err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); + if (err) +--- a/arch/powerpc/kernel/vdso.c ++++ b/arch/powerpc/kernel/vdso.c +@@ -177,7 +177,7 @@ int arch_setup_additional_pages(struct l + vdso_base = VDSO32_MBASE; + #endif + +- current->mm->context.vdso_base = 0; ++ current->mm->context.vdso = (void __user *)0; + + /* vDSO has a problem and was disabled, just don't "enable" it for the + * process +@@ -212,7 +212,7 @@ int arch_setup_additional_pages(struct l + * install_special_mapping or the perf counter mmap tracking code + * will fail to recognise it as a vDSO (since arch_vma_name fails). + */ +- current->mm->context.vdso_base = vdso_base; ++ mm->context.vdso = (void __user *)vdso_base; + + /* + * our vma flags don't have VM_WRITE so by default, the process isn't +@@ -229,7 +229,7 @@ int arch_setup_additional_pages(struct l + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + vdso_pagelist); + if (rc) { +- current->mm->context.vdso_base = 0; ++ current->mm->context.vdso = 0; + goto fail_mmapsem; + } + +@@ -243,7 +243,7 @@ int arch_setup_additional_pages(struct l + + const char *arch_vma_name(struct vm_area_struct *vma) + { +- if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) ++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) + return "[vdso]"; + return NULL; + } +--- a/arch/powerpc/perf/callchain_32.c ++++ b/arch/powerpc/perf/callchain_32.c +@@ -77,8 +77,8 @@ static int is_sigreturn_32_address(unsig + { + if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad)) + return 1; +- if (vdso32_sigtramp && current->mm->context.vdso_base && +- nip == current->mm->context.vdso_base + vdso32_sigtramp) ++ if (vdso32_sigtramp && current->mm->context.vdso && ++ nip == (unsigned long)current->mm->context.vdso + vdso32_sigtramp) + return 1; + return 0; + } +@@ -88,8 +88,8 @@ static int is_rt_sigreturn_32_address(un + if (nip == fp + offsetof(struct rt_signal_frame_32, + uc.uc_mcontext.mc_pad)) + return 1; +- if (vdso32_rt_sigtramp && current->mm->context.vdso_base && +- nip == current->mm->context.vdso_base + vdso32_rt_sigtramp) ++ if (vdso32_rt_sigtramp && current->mm->context.vdso && ++ nip == (unsigned long)current->mm->context.vdso + vdso32_rt_sigtramp) + return 1; + return 0; + } +--- a/arch/powerpc/perf/callchain_64.c ++++ b/arch/powerpc/perf/callchain_64.c +@@ -95,8 +95,8 @@ static int is_sigreturn_64_address(unsig + { + if (nip == fp + offsetof(struct signal_frame_64, tramp)) + return 1; +- if (vdso64_rt_sigtramp && current->mm->context.vdso_base && +- nip == current->mm->context.vdso_base + vdso64_rt_sigtramp) ++ if (vdso64_rt_sigtramp && current->mm->context.vdso && ++ nip == (unsigned long)current->mm->context.vdso + vdso64_rt_sigtramp) + return 1; + return 0; + } +--- a/arch/powerpc/include/asm/mm-arch-hooks.h ++++ b/arch/powerpc/include/asm/mm-arch-hooks.h +@@ -17,8 +17,8 @@ static inline void arch_remap(struct mm_struct *mm, + * mremap() doesn't allow moving multiple vmas so we can limit the + * check to old_start == vdso_base. + */ +- if (old_start == mm->context.vdso_base) +- mm->context.vdso_base = new_start; ++ if (old_start == mm->context.vdso) ++ mm->context.vdso = new_start; + } + #define arch_remap arch_remap + diff --git a/patches.suse/powerpc-vdso-Retrieve-sigtramp-offsets-at-buildtime.patch b/patches.suse/powerpc-vdso-Retrieve-sigtramp-offsets-at-buildtime.patch new file mode 100644 index 0000000..de294eb --- /dev/null +++ b/patches.suse/powerpc-vdso-Retrieve-sigtramp-offsets-at-buildtime.patch @@ -0,0 +1,253 @@ +From 91bf695596f594e42d69d70deb2ae53cafecf77c Mon Sep 17 00:00:00 2001 +From: Christophe Leroy +Date: Sun, 27 Sep 2020 09:16:33 +0000 +Subject: [PATCH] powerpc/vdso: Retrieve sigtramp offsets at buildtime + +References: bsc#1183002 +Patch-mainline: v5.11-rc1 +Git-commit: 91bf695596f594e42d69d70deb2ae53cafecf77c + +This is copied from arm64. + +Instead of using runtime generated signal trampoline offsets, +get offsets at buildtime. + +If the said trampoline doesn't exist, build will fail. So no +need to check whether the trampoline exists or not in the VDSO. + +Signed-off-by: Christophe Leroy +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/f8bfd6812c3e3678b1cdb4d55a52f9eb022b40d3.1601197618.git.christophe.leroy@csgroup.eu +Acked-by: Michal Suchanek +--- + arch/powerpc/Makefile | 15 +++++++++++++++ + arch/powerpc/include/asm/vdso.h | 12 ++++++++++++ + arch/powerpc/kernel/signal_32.c | 8 ++++---- + arch/powerpc/kernel/signal_64.c | 4 ++-- + arch/powerpc/kernel/vdso32/Makefile | 8 ++++++++ + arch/powerpc/kernel/vdso32/gen_vdso_offsets.sh | 16 ++++++++++++++++ + arch/powerpc/kernel/vdso32/vdso32.lds.S | 6 ++++++ + arch/powerpc/kernel/vdso64/Makefile | 8 ++++++++ + arch/powerpc/kernel/vdso64/gen_vdso_offsets.sh | 16 ++++++++++++++++ + arch/powerpc/kernel/vdso64/vdso64.lds.S | 5 +++++ + arch/powerpc/perf/callchain_32.c | 8 ++++---- + arch/powerpc/perf/callchain_64.c | 4 ++-- + 12 files changed, 98 insertions(+), 12 deletions(-) + create mode 100755 arch/powerpc/kernel/vdso32/gen_vdso_offsets.sh + create mode 100755 arch/powerpc/kernel/vdso64/gen_vdso_offsets.sh + +diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile +index 86c925bfbb76..fde3dbe57bda 100644 +--- a/arch/powerpc/Makefile ++++ b/arch/powerpc/Makefile +@@ -409,6 +409,21 @@ install: + archclean: + $(Q)$(MAKE) $(clean)=$(boot) + ++ifeq ($(KBUILD_EXTMOD),) ++# We need to generate vdso-offsets.h before compiling certain files in kernel/. ++# In order to do that, we should use the archprepare target, but we can't since ++# asm-offsets.h is included in some files used to generate vdso-offsets.h, and ++# asm-offsets.h is built in prepare0, for which archprepare is a dependency. ++# Therefore we need to generate the header after prepare0 has been made, hence ++# this hack. ++prepare: vdso_prepare ++vdso_prepare: prepare0 ++ $(if $(CONFIG_VDSO32),$(Q)$(MAKE) \ ++ $(build)=arch/powerpc/kernel/vdso32 include/generated/vdso32-offsets.h) ++ $(if $(CONFIG_PPC64),$(Q)$(MAKE) \ ++ $(build)=arch/powerpc/kernel/vdso64 include/generated/vdso64-offsets.h) ++endif ++ + archprepare: checkbin + + archheaders: +diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h +index 2ff884853f97..f5257b7f17d0 100644 +--- a/arch/powerpc/include/asm/vdso.h ++++ b/arch/powerpc/include/asm/vdso.h +@@ -15,6 +15,18 @@ + + #ifndef __ASSEMBLY__ + ++#ifdef CONFIG_PPC64 ++#include ++#endif ++ ++#ifdef CONFIG_VDSO32 ++#include ++#endif ++ ++#define VDSO64_SYMBOL(base, name) ((unsigned long)(base) + (vdso64_offset_##name)) ++ ++#define VDSO32_SYMBOL(base, name) ((unsigned long)(base) + (vdso32_offset_##name)) ++ + /* Offsets relative to thread->vdso_base */ + extern unsigned long vdso64_rt_sigtramp; + extern unsigned long vdso32_sigtramp; +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c +index 68e850bd5ef7..f9e4a1ac440f 100644 +--- a/arch/powerpc/kernel/signal_64.c ++++ b/arch/powerpc/kernel/signal_64.c +@@ -854,8 +854,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, + tsk->thread.fp_state.fpscr = 0; + + /* Set up to return from userspace. */ +- if (vdso64_rt_sigtramp && tsk->mm->context.vdso) { +- regs->nip = (unsigned long)tsk->mm->context.vdso + vdso64_rt_sigtramp; ++ if (tsk->mm->context.vdso) { ++ regs->nip = VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64); + } else { + err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); + if (err) +diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile +--- a/arch/powerpc/kernel/vdso32/Makefile ++++ b/arch/powerpc/kernel/vdso32/Makefile +@@ -50,6 +50,14 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE + $(obj-vdso32): %.o: %.S FORCE + $(call if_changed_dep,vdso32as) + ++# Generate VDSO offsets using helper script ++gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh ++quiet_cmd_vdsosym = VDSOSYM $@ ++ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ ++ ++include/generated/vdso32-offsets.h: $(obj)/vdso32.so.dbg FORCE ++ $(call if_changed,vdsosym) ++ + # actual build commands + quiet_cmd_vdso32ld = VDSO32L $@ + cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) +diff --git a/arch/powerpc/kernel/vdso32/gen_vdso_offsets.sh b/arch/powerpc/kernel/vdso32/gen_vdso_offsets.sh +new file mode 100755 +index 000000000000..c7b54a5dcd3e +--- /dev/null ++++ b/arch/powerpc/kernel/vdso32/gen_vdso_offsets.sh +@@ -0,0 +1,16 @@ ++#!/bin/sh ++# SPDX-License-Identifier: GPL-2.0 ++ ++# ++# Match symbols in the DSO that look like VDSO_*; produce a header file ++# of constant offsets into the shared object. ++# ++# Doing this inside the Makefile will break the $(filter-out) function, ++# causing Kbuild to rebuild the vdso-offsets header file every time. ++# ++# Author: Will Deacon $@ ++ ++include/generated/vdso64-offsets.h: $(obj)/vdso64.so.dbg FORCE ++ $(call if_changed,vdsosym) ++ + # actual build commands + quiet_cmd_vdso64ld = VDSO64L $@ + cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) +diff --git a/arch/powerpc/kernel/vdso64/gen_vdso_offsets.sh b/arch/powerpc/kernel/vdso64/gen_vdso_offsets.sh +new file mode 100755 +index 000000000000..4bf15ffd5933 +--- /dev/null ++++ b/arch/powerpc/kernel/vdso64/gen_vdso_offsets.sh +@@ -0,0 +1,16 @@ ++#!/bin/sh ++# SPDX-License-Identifier: GPL-2.0 ++ ++# ++# Match symbols in the DSO that look like VDSO_*; produce a header file ++# of constant offsets into the shared object. ++# ++# Doing this inside the Makefile will break the $(filter-out) function, ++# causing Kbuild to rebuild the vdso-offsets header file every time. ++# ++# Author: Will Deacon mm->context.vdso && +- nip == (unsigned long)current->mm->context.vdso + vdso32_sigtramp) ++ if (current->mm->context.vdso && ++ nip == VDSO32_SYMBOL(current->mm->context.vdso, sigtramp32)) + return 1; + return 0; + } +@@ -70,8 +70,8 @@ static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp) + if (nip == fp + offsetof(struct rt_signal_frame_32, + uc.uc_mcontext.mc_pad)) + return 1; +- if (vdso32_rt_sigtramp && current->mm->context.vdso && +- nip == (unsigned long)current->mm->context.vdso + vdso32_rt_sigtramp) ++ if (current->mm->context.vdso && ++ nip == VDSO32_SYMBOL(current->mm->context.vdso, sigtramp_rt32)) + return 1; + return 0; + } +diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c +index 6b9c06058c33..8d0df4226328 100644 +--- a/arch/powerpc/perf/callchain_64.c ++++ b/arch/powerpc/perf/callchain_64.c +@@ -68,8 +68,8 @@ static int is_sigreturn_64_address(unsigned long nip, unsigned long fp) + { + if (nip == fp + offsetof(struct signal_frame_64, tramp)) + return 1; +- if (vdso64_rt_sigtramp && current->mm->context.vdso && +- nip == (unsigned long)current->mm->context.vdso + vdso64_rt_sigtramp) ++ if (current->mm->context.vdso && ++ nip == VDSO64_SYMBOL(current->mm->context.vdso, sigtramp_rt64)) + return 1; + return 0; + } +-- +2.26.2 + diff --git a/series.conf b/series.conf index 50fb8fb..9f93b50 100644 --- a/series.conf +++ b/series.conf @@ -46131,6 +46131,8 @@ patches.suse/powerpc-powernv-memtrace-Fix-crashing-the-kernel-whe.patch patches.suse/powerpc-perf-Fix-crash-with-is_sier_available-when-p.patch patches.suse/powerpc-signal-Move-inline-functions-in-signal.h.patch + patches.suse/powerpc-vdso-Replace-vdso_base-by-vdso.patch + patches.suse/powerpc-vdso-Retrieve-sigtramp-offsets-at-buildtime.patch patches.suse/powerpc-64-Fix-an-EMIT_BUG_ENTRY-in-head_64.S.patch patches.suse/powerpc-perf-Invoke-per-CPU-variable-access-with-dis.patch patches.suse/powerpc-Refactor-is_kvm_guest-declaration-to-new-hea.patch