Commit 82821be8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull arm64 fixes from Catalin Marinas:

 - Set the minimum GCC version to 5.1 for arm64 due to earlier compiler
   bugs.

 - Make atomic helpers __always_inline to avoid a section mismatch when
   compiling with clang.

 - Fix the CMA and crashkernel reservations to use ZONE_DMA (remove the
   arm64_dma32_phys_limit variable, no longer needed with a dynamic
   ZONE_DMA sizing in 5.11).

 - Remove redundant IRQ flag tracing that was leaving lockdep
   inconsistent with the hardware state.

 - Revert perf events based hard lockup detector that was causing
   smp_processor_id() to be called in preemptible context.

 - Some trivial cleanups - spelling fix, renaming S_FRAME_SIZE to
   PT_REGS_SIZE, function prototypes added.

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: selftests: Fix spelling of 'Mismatch'
  arm64: syscall: include prototype for EL0 SVC functions
  compiler.h: Raise minimum version of GCC to 5.1 for arm64
  arm64: make atomic helpers __always_inline
  arm64: rename S_FRAME_SIZE to PT_REGS_SIZE
  Revert "arm64: Enable perf events based hard lockup detector"
  arm64: entry: remove redundant IRQ flag tracing
  arm64: Remove arm64_dma32_phys_limit and its uses
parents f288c895 3a57a643
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -174,8 +174,6 @@ config ARM64
	select HAVE_NMI
	select HAVE_PATA_PLATFORM
	select HAVE_PERF_EVENTS
	select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI && HW_PERF_EVENTS
	select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
	select HAVE_PERF_REGS
	select HAVE_PERF_USER_STACK_DUMP
	select HAVE_REGS_AND_STACK_ACCESS_API
+5 −5
Original line number Diff line number Diff line
@@ -17,7 +17,7 @@
#include <asm/lse.h>

#define ATOMIC_OP(op)							\
static inline void arch_##op(int i, atomic_t *v)			\
static __always_inline void arch_##op(int i, atomic_t *v)		\
{									\
	__lse_ll_sc_body(op, i, v);					\
}
@@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
#undef ATOMIC_OP

#define ATOMIC_FETCH_OP(name, op)					\
static inline int arch_##op##name(int i, atomic_t *v)			\
static __always_inline int arch_##op##name(int i, atomic_t *v)		\
{									\
	return __lse_ll_sc_body(op##name, i, v);			\
}
@@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
#undef ATOMIC_FETCH_OPS

#define ATOMIC64_OP(op)							\
static inline void arch_##op(long i, atomic64_t *v)			\
static __always_inline void arch_##op(long i, atomic64_t *v)		\
{									\
	__lse_ll_sc_body(op, i, v);					\
}
@@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
#undef ATOMIC64_OP

#define ATOMIC64_FETCH_OP(name, op)					\
static inline long arch_##op##name(long i, atomic64_t *v)		\
static __always_inline long arch_##op##name(long i, atomic64_t *v)	\
{									\
	return __lse_ll_sc_body(op##name, i, v);			\
}
@@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_FETCH_OPS

static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
	return __lse_ll_sc_body(atomic64_dec_if_positive, v);
}
+1 −2
Original line number Diff line number Diff line
@@ -94,8 +94,7 @@
#endif /* CONFIG_ARM64_FORCE_52BIT */

extern phys_addr_t arm64_dma_phys_limit;
extern phys_addr_t arm64_dma32_phys_limit;
#define ARCH_LOW_ADDRESS_LIMIT	((arm64_dma_phys_limit ? : arm64_dma32_phys_limit) - 1)
#define ARCH_LOW_ADDRESS_LIMIT	(arm64_dma_phys_limit - 1)

struct debug_info {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
+1 −1
Original line number Diff line number Diff line
@@ -75,7 +75,7 @@ int main(void)
  DEFINE(S_SDEI_TTBR1,		offsetof(struct pt_regs, sdei_ttbr1));
  DEFINE(S_PMR_SAVE,		offsetof(struct pt_regs, pmr_save));
  DEFINE(S_STACKFRAME,		offsetof(struct pt_regs, stackframe));
  DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
  DEFINE(PT_REGS_SIZE,		sizeof(struct pt_regs));
  BLANK();
#ifdef CONFIG_COMPAT
  DEFINE(COMPAT_SIGFRAME_REGS_OFFSET,		offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
+6 −6
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@
 */
	.macro  ftrace_regs_entry, allregs=0
	/* Make room for pt_regs, plus a callee frame */
	sub	sp, sp, #(S_FRAME_SIZE + 16)
	sub	sp, sp, #(PT_REGS_SIZE + 16)

	/* Save function arguments (and x9 for simplicity) */
	stp	x0, x1, [sp, #S_X0]
@@ -61,15 +61,15 @@
	.endif

	/* Save the callsite's SP and LR */
	add	x10, sp, #(S_FRAME_SIZE + 16)
	add	x10, sp, #(PT_REGS_SIZE + 16)
	stp	x9, x10, [sp, #S_LR]

	/* Save the PC after the ftrace callsite */
	str	x30, [sp, #S_PC]

	/* Create a frame record for the callsite above pt_regs */
	stp	x29, x9, [sp, #S_FRAME_SIZE]
	add	x29, sp, #S_FRAME_SIZE
	stp	x29, x9, [sp, #PT_REGS_SIZE]
	add	x29, sp, #PT_REGS_SIZE

	/* Create our frame record within pt_regs. */
	stp	x29, x30, [sp, #S_STACKFRAME]
@@ -120,7 +120,7 @@ ftrace_common_return:
	ldr	x9, [sp, #S_PC]

	/* Restore the callsite's SP */
	add	sp, sp, #S_FRAME_SIZE + 16
	add	sp, sp, #PT_REGS_SIZE + 16

	ret	x9
SYM_CODE_END(ftrace_common)
@@ -130,7 +130,7 @@ SYM_CODE_START(ftrace_graph_caller)
	ldr	x0, [sp, #S_PC]
	sub	x0, x0, #AARCH64_INSN_SIZE	// ip (callsite's BL insn)
	add	x1, sp, #S_LR			// parent_ip (callsite's LR)
	ldr	x2, [sp, #S_FRAME_SIZE]	   	// parent fp (callsite's FP)
	ldr	x2, [sp, #PT_REGS_SIZE]	   	// parent fp (callsite's FP)
	bl	prepare_ftrace_return
	b	ftrace_common_return
SYM_CODE_END(ftrace_graph_caller)
Loading