Commit df24e178 authored by Helge Deller's avatar Helge Deller
Browse files

parisc: Add vDSO support

Add minimal vDSO support, which provides the signal trampoline helpers,
but none of the userspace syscall helpers like time wrappers.

The big benefit of this vDSO implementation is, that we now don't need
an executeable stack any longer. PA-RISC is one of the last
architectures where an executeable stack was needed in oder to implement
the signal trampolines by putting assembly instructions on the stack
which then gets executed. Instead the kernel will provide the relevant
code in the vDSO page and only put the pointers to the signal
information on the stack.

By dropping the need for executable stacks we avoid running into issues
with applications which want non executable stacks for security reasons.
Additionally, alternative stacks on memory areas without exec
permissions are supported too.

This code is based on an initial implementation by Randolph Chung from 2006:
https://lore.kernel.org/linux-parisc/4544A34A.6080700@tausq.org/



I did the porting and lifted the code to current code base. Dave fixed
the unwind code so that gdb and glibc are able to backtrace through the
code. An additional patch to gdb will be pushed upstream by Dave.

Signed-off-by: default avatarHelge Deller <deller@gmx.de>
Signed-off-by: default avatarDave Anglin <dave.anglin@bell.net>
Cc: Randolph Chung <randolph@tausq.org>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 14615ecc
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -10,6 +10,7 @@ config PARISC
	select ARCH_HAS_ELF_RANDOMIZE
	select ARCH_HAS_ELF_RANDOMIZE
	select ARCH_HAS_STRICT_KERNEL_RWX
	select ARCH_HAS_STRICT_KERNEL_RWX
	select ARCH_HAS_UBSAN_SANITIZE_ALL
	select ARCH_HAS_UBSAN_SANITIZE_ALL
	select ARCH_HAS_PTE_SPECIAL
	select ARCH_NO_SG_CHAIN
	select ARCH_NO_SG_CHAIN
	select ARCH_SUPPORTS_HUGETLBFS if PA20
	select ARCH_SUPPORTS_HUGETLBFS if PA20
	select ARCH_SUPPORTS_MEMORY_FAILURE
	select ARCH_SUPPORTS_MEMORY_FAILURE
+30 −0
Original line number Original line Diff line number Diff line
@@ -44,6 +44,16 @@ endif


export LD_BFD
export LD_BFD


# Set default 32 bits cross compilers for vdso
CC_ARCHES_32 = hppa hppa2.0 hppa1.1
CC_SUFFIXES  = linux linux-gnu unknown-linux-gnu
CROSS32_COMPILE := $(call cc-cross-prefix, \
	$(foreach a,$(CC_ARCHES_32), \
	$(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
CROSS32CC := $(CROSS32_COMPILE)gcc
export CROSS32CC

# Set default cross compiler for kernel build
ifdef cross_compiling
ifdef cross_compiling
	ifeq ($(CROSS_COMPILE),)
	ifeq ($(CROSS_COMPILE),)
		CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
		CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
@@ -163,6 +173,26 @@ vmlinuz: vmlinux
	@$(KGZIP) -cf -9 $< > $@
	@$(KGZIP) -cf -9 $< > $@
endif
endif


ifeq ($(KBUILD_EXTMOD),)
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
# In order to do that, we should use the archprepare target, but we can't since
# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
# Therefore we need to generate the header after prepare0 has been made, hence
# this hack.
prepare: vdso_prepare
vdso_prepare: prepare0
	$(if $(CONFIG_64BIT),$(Q)$(MAKE) \
		$(build)=arch/parisc/kernel/vdso64 include/generated/vdso64-offsets.h)
	$(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 include/generated/vdso32-offsets.h
endif

PHONY += vdso_install

vdso_install:
	$(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso $@
	$(if $(CONFIG_COMPAT_VDSO), \
		$(Q)$(MAKE) $(build)=arch/parisc/kernel/vdso32 $@)
install:
install:
	$(CONFIG_SHELL) $(srctree)/arch/parisc/install.sh \
	$(CONFIG_SHELL) $(srctree)/arch/parisc/install.sh \
			$(KERNELRELEASE) vmlinux System.map "$(INSTALL_PATH)"
			$(KERNELRELEASE) vmlinux System.map "$(INSTALL_PATH)"
+15 −0
Original line number Original line Diff line number Diff line
@@ -359,4 +359,19 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *);
extern unsigned long arch_randomize_brk(struct mm_struct *);
#define arch_randomize_brk arch_randomize_brk
#define arch_randomize_brk arch_randomize_brk



#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
					int executable_stack);
#define VDSO_AUX_ENT(a, b) NEW_AUX_ENT(a, b)
#define VDSO_CURRENT_BASE current->mm->context.vdso_base

#define ARCH_DLINFO						\
do {								\
	if (VDSO_CURRENT_BASE) {				\
		NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE);\
	}							\
} while (0)

#endif
#endif
+4 −2
Original line number Original line Diff line number Diff line
@@ -2,7 +2,9 @@
#ifndef _PARISC_MMU_H_
#ifndef _PARISC_MMU_H_
#define _PARISC_MMU_H_
#define _PARISC_MMU_H_


/* On parisc, we store the space id here */
typedef struct {
typedef unsigned long mm_context_t;
	unsigned long space_id;
	unsigned long vdso_base;
} mm_context_t;


#endif /* _PARISC_MMU_H_ */
#endif /* _PARISC_MMU_H_ */
+8 −8
Original line number Original line Diff line number Diff line
@@ -20,7 +20,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
{
	BUG_ON(atomic_read(&mm->mm_users) != 1);
	BUG_ON(atomic_read(&mm->mm_users) != 1);


	mm->context = alloc_sid();
	mm->context.space_id = alloc_sid();
	return 0;
	return 0;
}
}


@@ -28,22 +28,22 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static inline void
static inline void
destroy_context(struct mm_struct *mm)
destroy_context(struct mm_struct *mm)
{
{
	free_sid(mm->context);
	free_sid(mm->context.space_id);
	mm->context = 0;
	mm->context.space_id = 0;
}
}


static inline unsigned long __space_to_prot(mm_context_t context)
static inline unsigned long __space_to_prot(mm_context_t context)
{
{
#if SPACEID_SHIFT == 0
#if SPACEID_SHIFT == 0
	return context << 1;
	return context.space_id << 1;
#else
#else
	return context >> (SPACEID_SHIFT - 1);
	return context.space_id >> (SPACEID_SHIFT - 1);
#endif
#endif
}
}


static inline void load_context(mm_context_t context)
static inline void load_context(mm_context_t context)
{
{
	mtsp(context, 3);
	mtsp(context.space_id, 3);
	mtctl(__space_to_prot(context), 8);
	mtctl(__space_to_prot(context), 8);
}
}


@@ -89,8 +89,8 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)


	BUG_ON(next == &init_mm); /* Should never happen */
	BUG_ON(next == &init_mm); /* Should never happen */


	if (next->context == 0)
	if (next->context.space_id == 0)
	    next->context = alloc_sid();
		next->context.space_id = alloc_sid();


	switch_mm(prev,next,current);
	switch_mm(prev,next,current);
}
}
Loading