Commit 837c07cf authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull powerpc fixes from Michael Ellerman:
 "It's a bit of a big batch for rc6, but just because I didn't send any
  fixes the last week or two while I was on vacation, next week should
  be quieter:

   - Fix a few objtool warnings since we recently enabled objtool.

   - Fix a deadlock with the hash MMU vs perf record.

   - Fix perf profiling of asynchronous interrupt handlers.

   - Revert the IMC PMU nest_init_lock to being a mutex.

   - Two commits fixing problems with the kexec_file FDT size
     estimation.

   - Two commits fixing problems with strict RWX vs kernels running at
     non-zero.

   - Reconnect tlb_flush() to hash__tlb_flush()

  Thanks to Kajol Jain, Nicholas Piggin, Sachin Sant Sathvika Vasireddy,
  and Sourabh Jain"

* tag 'powerpc-6.2-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/64s: Reconnect tlb_flush() to hash__tlb_flush()
  powerpc/kexec_file: Count hot-pluggable memory in FDT estimate
  powerpc/64s/radix: Fix RWX mapping with relocated kernel
  powerpc/64s/radix: Fix crash with unaligned relocated kernel
  powerpc/kexec_file: Fix division by zero in extra size estimation
  powerpc/imc-pmu: Revert nest_init_lock to being a mutex
  powerpc/64: Fix perf profiling asynchronous interrupt handlers
  powerpc/64s: Fix local irq disable when PMIs are disabled
  powerpc/kvm: Fix unannotated intra-function call warning
  powerpc/85xx: Fix unannotated intra-function call warning
parents 95078069 1665c027
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -97,6 +97,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
{
	if (radix_enabled())
		radix__tlb_flush(tlb);

	return hash__tlb_flush(tlb);
}

#ifdef CONFIG_SMP
+30 −13
Original line number Diff line number Diff line
@@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
	return flags;
}

static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
{
	unsigned long flags = irq_soft_mask_return();

	irq_soft_mask_set(flags & ~mask);

	return flags;
}

static inline unsigned long arch_local_save_flags(void)
{
	return irq_soft_mask_return();
@@ -192,7 +201,7 @@ static inline void arch_local_irq_enable(void)

static inline unsigned long arch_local_irq_save(void)
{
	return irq_soft_mask_set_return(IRQS_DISABLED);
	return irq_soft_mask_or_return(IRQS_DISABLED);
}

static inline bool arch_irqs_disabled_flags(unsigned long flags)
@@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);
 * is a different soft-masked interrupt pending that requires hard
 * masking.
 */
static inline bool should_hard_irq_enable(void)
static inline bool should_hard_irq_enable(struct pt_regs *regs)
{
	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
		WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
		WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
		WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
		WARN_ON(mfmsr() & MSR_EE);
	}

@@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)
	 *
	 * TODO: Add test for 64e
	 */
	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
		if (!power_pmu_wants_prompt_pmi())
			return false;
		/*
		 * If PMIs are disabled then IRQs should be disabled as well,
		 * so we shouldn't see this condition, check for it just in
		 * case because we are about to enable PMIs.
		 */
		if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
			return false;
	}

	if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
		return false;
@@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)

/*
 * Do the hard enabling, only call this if should_hard_irq_enable is true.
 * This allows PMI interrupts to profile irq handlers.
 */
static inline void do_hard_irq_enable(void)
{
	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
		WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
		WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
		WARN_ON(mfmsr() & MSR_EE);
	}
	/*
	 * This allows PMI interrupts (and watchdog soft-NMIs) through.
	 * There is no other reason to enable this way.
	 * Asynch interrupts come in with IRQS_ALL_DISABLED,
	 * PACA_IRQ_HARD_DIS, and MSR[EE]=0.
	 */
	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
		irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
	get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
	__hard_irq_enable();
}
@@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
	return !(regs->msr & MSR_EE);
}

static __always_inline bool should_hard_irq_enable(void)
static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
{
	return false;
}
+1 −1
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)

	ppc_msgsync();

	if (should_hard_irq_enable())
	if (should_hard_irq_enable(regs))
		do_hard_irq_enable();

	kvmppc_clear_host_ipi(smp_processor_id());
+2 −1
Original line number Diff line number Diff line
@@ -864,7 +864,7 @@ _GLOBAL(load_up_spe)
 * SPE unavailable trap from kernel - print a message, but let
 * the task use SPE in the kernel until it returns to user mode.
 */
KernelSPE:
SYM_FUNC_START_LOCAL(KernelSPE)
	lwz	r3,_MSR(r1)
	oris	r3,r3,MSR_SPE@h
	stw	r3,_MSR(r1)	/* enable use of SPE after return */
@@ -881,6 +881,7 @@ KernelSPE:
#endif
	.align	4,0

SYM_FUNC_END(KernelSPE)
#endif /* CONFIG_SPE */

/*
+1 −1
Original line number Diff line number Diff line
@@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
	irq = static_call(ppc_get_irq)();

	/* We can hard enable interrupts now to allow perf interrupts */
	if (should_hard_irq_enable())
	if (should_hard_irq_enable(regs))
		do_hard_irq_enable();

	/* And finally process it */
Loading