Commit a3314262 authored by Michael Ellerman's avatar Michael Ellerman
Browse files

Merge branch 'fixes' into next

Merge our fixes branch into next.

That lets us resolve a conflict in arch/powerpc/sysdev/xive/common.c.

Between cbc06f05 ("powerpc/xive: Do not skip CPU-less nodes when
creating the IPIs"), which moved request_irq() out of xive_init_ipis(),
and 17df41fe ("powerpc: use IRQF_NO_DEBUG for IPIs") which added
IRQF_NO_DEBUG to that request_irq() call, which has now moved.
parents e432fe97 787c70f2
Loading
Loading
Loading
Loading
+20 −0
Original line number Diff line number Diff line
@@ -4,6 +4,8 @@

#include <asm/bug.h>
#include <asm/book3s/32/mmu-hash.h>
#include <asm/mmu.h>
#include <asm/synch.h>

#ifndef __ASSEMBLY__

@@ -28,6 +30,15 @@ static inline void kuep_lock(void)
		return;

	update_user_segments(mfsr(0) | SR_NX);
	/*
	 * This isync() shouldn't be necessary as the kernel is not excepted to
	 * run any instruction in userspace soon after the update of segments,
	 * but hash based cores (at least G3) seem to exhibit a random
	 * behaviour when the 'isync' is not there. 603 cores don't have this
	 * behaviour so don't do the 'isync' as it saves several CPU cycles.
	 */
	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
		isync();	/* Context sync required after mtsr() */
}

static inline void kuep_unlock(void)
@@ -36,6 +47,15 @@ static inline void kuep_unlock(void)
		return;

	update_user_segments(mfsr(0) & ~SR_NX);
	/*
	 * This isync() shouldn't be necessary as a 'rfi' will soon be executed
	 * to return to userspace, but hash based cores (at least G3) seem to
	 * exhibit a random behaviour when the 'isync' is not there. 603 cores
	 * don't have this behaviour so don't do the 'isync' as it saves several
	 * CPU cycles.
	 */
	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
		isync();	/* Context sync required after mtsr() */
}

#ifdef CONFIG_PPC_KUAP
+3 −0
Original line number Diff line number Diff line
@@ -583,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);

DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);

/* irq.c */
DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);

void __noreturn unrecoverable_exception(struct pt_regs *regs);

void replay_system_reset(void);
+1 −1
Original line number Diff line number Diff line
@@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
extern void *hardirq_ctx[NR_CPUS];
extern void *softirq_ctx[NR_CPUS];

extern void do_IRQ(struct pt_regs *regs);
void __do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void);
extern void __do_irq(struct pt_regs *regs);

+16 −0
Original line number Diff line number Diff line
@@ -77,6 +77,22 @@ struct pt_regs
		unsigned long __pad[4];	/* Maintain 16 byte interrupt stack alignment */
	};
#endif
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
	struct { /* Must be a multiple of 16 bytes */
		unsigned long mas0;
		unsigned long mas1;
		unsigned long mas2;
		unsigned long mas3;
		unsigned long mas6;
		unsigned long mas7;
		unsigned long srr0;
		unsigned long srr1;
		unsigned long csrr0;
		unsigned long csrr1;
		unsigned long dsrr0;
		unsigned long dsrr1;
	};
#endif
};
#endif

+14 −17
Original line number Diff line number Diff line
@@ -302,24 +302,21 @@ int main(void)
	STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif

#if defined(CONFIG_PPC32)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
	DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
	DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
	STACK_PT_REGS_OFFSET(MAS0, mas0);
	/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
	DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
	DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
	DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
	DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
	DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
	DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
	DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
	DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
	DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
	DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
	DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
	DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
#endif
	STACK_PT_REGS_OFFSET(MMUCR, mas0);
	STACK_PT_REGS_OFFSET(MAS1, mas1);
	STACK_PT_REGS_OFFSET(MAS2, mas2);
	STACK_PT_REGS_OFFSET(MAS3, mas3);
	STACK_PT_REGS_OFFSET(MAS6, mas6);
	STACK_PT_REGS_OFFSET(MAS7, mas7);
	STACK_PT_REGS_OFFSET(_SRR0, srr0);
	STACK_PT_REGS_OFFSET(_SRR1, srr1);
	STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
	STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
	STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
	STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
#endif

	/* About the CPU features table */
Loading