Commit 63d1cb53 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull powerpc fixes from Michael Ellerman:

 - Fix a regression in the conversion of the 64-bit BookE interrupt
   entry to C.

 - Fix KVM hosts running with the hash MMU since the recent KVM gfn
   changes.

 - Fix a deadlock in our paravirt spinlocks when hcall tracing is
   enabled.

 - Several fixes for oopses in our runtime code patching for security
   mitigations.

 - A couple of minor fixes for the recent conversion of 32-bit interrupt
   entry/exit to C.

 - Fix __get_user() causing spurious crashes in sigreturn due to a bad
   inline asm constraint, spotted with GCC 11.

 - A fix for the way we track IRQ masking state vs NMI interrupts when
   using the new scv system call entry path.

 - A couple more minor fixes.

Thanks to Cédric Le Goater, Christian Zigotzky, Christophe Leroy,
Naveen N. Rao, Nicholas Piggin Paul Menzel, and Sean Christopherson.

* tag 'powerpc-5.13-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/64e/interrupt: Fix nvgprs being clobbered
  powerpc/64s: Make NMI record implicitly soft-masked code as irqs disabled
  powerpc/64s: Fix stf mitigation patching w/strict RWX & hash
  powerpc/64s: Fix entry flush patching w/strict RWX & hash
  powerpc/64s: Fix crashes when toggling entry flush barrier
  powerpc/64s: Fix crashes when toggling stf barrier
  KVM: PPC: Book3S HV: Fix kvm_unmap_gfn_range_hv() for Hash MMU
  powerpc/legacy_serial: Fix UBSAN: array-index-out-of-bounds
  powerpc/signal: Fix possible build failure with unsafe_copy_fpr_{to/from}_user
  powerpc/uaccess: Fix __get_user() with CONFIG_CC_HAS_ASM_GOTO_OUTPUT
  powerpc/pseries: warn if recursing into the hcall tracing code
  powerpc/pseries: use notrace hcall variant for H_CEDE idle
  powerpc/pseries: Don't trace hcall tracing wrapper
  powerpc/pseries: Fix hcall tracing recursion in pv queued spinlocks
  powerpc/syscall: Calling kuap_save_and_lock() is wrong
  powerpc/interrupts: Fix kuep_unlock() call
parents c12a29ed c6ac667b
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -448,6 +448,9 @@
 */
long plpar_hcall_norets(unsigned long opcode, ...);

/* Variant which does not do hcall tracing */
long plpar_hcall_norets_notrace(unsigned long opcode, ...);

/**
 * plpar_hcall: - Make a pseries hypervisor call
 * @opcode: The hypervisor call to make.
+7 −2
Original line number Diff line number Diff line
@@ -153,8 +153,6 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
 */
static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
	if (user_mode(regs))
		kuep_unlock();
}

static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
@@ -222,6 +220,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
	local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;

	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
				regs->nip < (unsigned long)__end_interrupts) {
		// Kernel code running below __end_interrupts is
		// implicitly soft-masked.
		regs->softe = IRQS_ALL_DISABLED;
	}

	/* Don't do any per-CPU operations until interrupt state is fixed */

	if (nmi_disables_ftrace(regs)) {
+19 −3
Original line number Diff line number Diff line
@@ -28,19 +28,35 @@ static inline u32 yield_count_of(int cpu)
	return be32_to_cpu(yield_count);
}

/*
 * Spinlock code confers and prods, so don't trace the hcalls because the
 * tracing code takes spinlocks which can cause recursion deadlocks.
 *
 * These calls are made while the lock is not held: the lock slowpath yields if
 * it can not acquire the lock, and unlock slow path might prod if a waiter has
 * yielded). So this may not be a problem for simple spin locks because the
 * tracing does not technically recurse on the lock, but we avoid it anyway.
 *
 * However the queued spin lock contended path is more strictly ordered: the
 * H_CONFER hcall is made after the task has queued itself on the lock, so then
 * recursing on that lock will cause the task to then queue up again behind the
 * first instance (or worse: queued spinlocks use tricks that assume a context
 * never waits on more than one spinlock, so such recursion may cause random
 * corruption in the lock code).
 */
static inline void yield_to_preempted(int cpu, u32 yield_count)
{
	plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
	plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
}

static inline void prod_cpu(int cpu)
{
	plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
	plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
}

static inline void yield_to_any(void)
{
	plpar_hcall_norets(H_CONFER, -1, 0);
	plpar_hcall_norets_notrace(H_CONFER, -1, 0);
}
#else
static inline bool is_shared_processor(void)
+5 −1
Original line number Diff line number Diff line
@@ -28,7 +28,11 @@ static inline void set_cede_latency_hint(u8 latency_hint)

static inline long cede_processor(void)
{
	return plpar_hcall_norets(H_CEDE);
	/*
	 * We cannot call tracepoints inside RCU idle regions which
	 * means we must not trace H_CEDE.
	 */
	return plpar_hcall_norets_notrace(H_CEDE);
}

static inline long extended_cede_processor(unsigned long latency_hint)
+1 −1
Original line number Diff line number Diff line
@@ -157,7 +157,7 @@ do { \
		"2:	lwz%X1 %L0, %L1\n"			\
		EX_TABLE(1b, %l2)				\
		EX_TABLE(2b, %l2)				\
		: "=r" (x)					\
		: "=&r" (x)					\
		: "m" (*addr)					\
		:						\
		: label)
Loading