Commit 53630a1f authored by Catalin Marinas's avatar Catalin Marinas
Browse files

Merge branch 'for-next/misc' into for-next/core

* for-next/misc:
  : Miscellaneous patches
  arm64/kprobe: Optimize the performance of patching single-step slot
  ARM64: reloc_test: add __init/__exit annotations to module init/exit funcs
  arm64/mm: fold check for KFENCE into can_set_direct_map()
  arm64: uaccess: simplify uaccess_mask_ptr()
  arm64: mte: move register initialization to C
  arm64: mm: handle ARM64_KERNEL_USES_PMD_MAPS in vmemmap_populate()
  arm64: dma: Drop cache invalidation from arch_dma_prep_coherent()
  arm64: support huge vmalloc mappings
  arm64: spectre: increase parameters that can be used to turn off bhb mitigation individually
  arm64: run softirqs on the per-CPU IRQ stack
  arm64: compat: Implement misalignment fixups for multiword loads
parents c704cf27 a0caebbd
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -3207,6 +3207,7 @@
					       spectre_v2_user=off [X86]
					       spec_store_bypass_disable=off [X86,PPC]
					       ssbd=force-off [ARM64]
					       nospectre_bhb [ARM64]
					       l1tf=off [X86]
					       mds=off [X86]
					       tsx_async_abort=off [X86]
@@ -3613,7 +3614,7 @@

	nohugeiomap	[KNL,X86,PPC,ARM64] Disable kernel huge I/O mappings.

	nohugevmalloc	[PPC] Disable kernel huge vmalloc mappings.
	nohugevmalloc	[KNL,X86,PPC,ARM64] Disable kernel huge vmalloc mappings.

	nosmt		[KNL,S390] Disable symmetric multithreading (SMT).
			Equivalent to smt=1.
@@ -3631,6 +3632,10 @@
			vulnerability. System may allow data leaks with this
			option.

	nospectre_bhb	[ARM64] Disable all mitigations for Spectre-BHB (branch
			history injection) vulnerability. System may allow data leaks
			with this option.

	nospec_store_bypass_disable
			[HW] Disable all mitigations for the Speculative Store Bypass vulnerability

+5 −0
Original line number Diff line number Diff line
@@ -149,6 +149,7 @@ config ARM64
	select HAVE_ARCH_AUDITSYSCALL
	select HAVE_ARCH_BITREVERSE
	select HAVE_ARCH_COMPILER_H
	select HAVE_ARCH_HUGE_VMALLOC
	select HAVE_ARCH_HUGE_VMAP
	select HAVE_ARCH_JUMP_LABEL
	select HAVE_ARCH_JUMP_LABEL_RELATIVE
@@ -230,6 +231,7 @@ config ARM64
	select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
	select TRACE_IRQFLAGS_SUPPORT
	select TRACE_IRQFLAGS_NMI_SUPPORT
	select HAVE_SOFTIRQ_ON_OWN_STACK
	help
	  ARM 64-bit (AArch64) Linux support.

@@ -1575,6 +1577,9 @@ config THUMB2_COMPAT_VDSO
	  Compile the compat vDSO with '-mthumb -fomit-frame-pointer' if y,
	  otherwise with '-marm'.

config COMPAT_ALIGNMENT_FIXUPS
	bool "Fix up misaligned multi-word loads and stores in user space"

menuconfig ARMV8_DEPRECATED
	bool "Emulate deprecated/obsolete ARMv8 instructions"
	depends on SYSCTL
+1 −0
Original line number Diff line number Diff line
@@ -71,6 +71,7 @@ void do_sysinstr(unsigned long esr, struct pt_regs *regs);
void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs);
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr);
void do_cp15instr(unsigned long esr, struct pt_regs *regs);
int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
+5 −0
Original line number Diff line number Diff line
@@ -42,7 +42,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next);
void mte_cpu_setup(void);
void mte_suspend_enter(void);
void mte_suspend_exit(void);
long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request,
@@ -72,6 +74,9 @@ static inline void mte_thread_switch(struct task_struct *next)
static inline void mte_suspend_enter(void)
{
}
static inline void mte_suspend_exit(void)
{
}
static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
	return 0;
+11 −11
Original line number Diff line number Diff line
@@ -203,9 +203,11 @@ static inline void uaccess_enable_privileged(void)
}

/*
 * Sanitise a uaccess pointer such that it becomes NULL if above the maximum
 * user address. In case the pointer is tagged (has the top byte set), untag
 * the pointer before checking.
 * Sanitize a uaccess pointer such that it cannot reach any kernel address.
 *
 * Clearing bit 55 ensures the pointer cannot address any portion of the TTBR1
 * address range (i.e. any kernel address), and either the pointer falls within
 * the TTBR0 address range or must cause a fault.
 */
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
@@ -213,14 +215,12 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
	void __user *safe_ptr;

	asm volatile(
	"	bics	xzr, %3, %2\n"
	"	csel	%0, %1, xzr, eq\n"
	: "=&r" (safe_ptr)
	: "r" (ptr), "r" (TASK_SIZE_MAX - 1),
	  "r" (untagged_addr(ptr))
	: "cc");

	csdb();
	"	bic	%0, %1, %2\n"
	: "=r" (safe_ptr)
	: "r" (ptr),
	  "i" (BIT(55))
	);

	return safe_ptr;
}

Loading