Commit dc5dac74 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman
Browse files

powerpc/64: Add support to build with prefixed instructions



Add an option to build kernel and module with prefixed instructions if
the CPU and toolchain support it.

This is not related to kernel support for userspace execution of
prefixed instructions.

Building with prefixed instructions breaks some extended inline asm
memory addressing, for example it will provide immediates that exceed
the range of simple load/store displacement. Whether this is a
toolchain or a kernel asm problem remains to be seen. For now, these
are replaced with simpler and less efficient direct register addressing
when compiling with prefixed.

Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230408021752.862660-4-npiggin@gmail.com
parent b270bebd
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -4,6 +4,9 @@ source "arch/powerpc/platforms/Kconfig.cputype"
config CC_HAS_ELFV2
	def_bool PPC64 && $(cc-option, -mabi=elfv2)

config CC_HAS_PREFIXED
	def_bool PPC64 && $(cc-option, -mcpu=power10 -mprefixed)

config 32BIT
	bool
	default y if PPC32
+4 −0
Original line number Diff line number Diff line
@@ -180,7 +180,11 @@ ifdef CONFIG_476FPE_ERR46
endif

# No prefix or pcrel
ifdef CONFIG_PPC_KERNEL_PREFIXED
KBUILD_CFLAGS += $(call cc-option,-mprefixed)
else
KBUILD_CFLAGS += $(call cc-option,-mno-prefixed)
endif
KBUILD_CFLAGS += $(call cc-option,-mno-pcrel)

# No AltiVec or VSX or MMA instructions when building kernel
+20 −4
Original line number Diff line number Diff line
@@ -27,6 +27,10 @@ static __inline__ int arch_atomic_read(const atomic_t *v)
{
	int t;

	/* -mprefixed can generate offsets beyond range, fall back hack */
	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
		__asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
	else
		__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));

	return t;
@@ -34,6 +38,10 @@ static __inline__ int arch_atomic_read(const atomic_t *v)

static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
	/* -mprefixed can generate offsets beyond range, fall back hack */
	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
		__asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
	else
		__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}

@@ -197,6 +205,10 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
{
	s64 t;

	/* -mprefixed can generate offsets beyond range, fall back hack */
	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
		__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
	else
		__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));

	return t;
@@ -204,6 +216,10 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)

static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
{
	/* -mprefixed can generate offsets beyond range, fall back hack */
	if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
		__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
	else
		__asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}

+37 −0
Original line number Diff line number Diff line
@@ -97,6 +97,42 @@ extern bool isa_io_special;
 *
 */

/* -mprefixed can generate offsets beyond range, fall back hack */
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define DEF_MMIO_IN_X(name, size, insn)				\
static inline u##size name(const volatile u##size __iomem *addr)	\
{									\
	u##size ret;							\
	__asm__ __volatile__("sync;"#insn" %0,0,%1;twi 0,%0,0;isync"	\
		: "=r" (ret) : "r" (addr) : "memory");			\
	return ret;							\
}

#define DEF_MMIO_OUT_X(name, size, insn)				\
static inline void name(volatile u##size __iomem *addr, u##size val)	\
{									\
	__asm__ __volatile__("sync;"#insn" %1,0,%0"			\
		: : "r" (addr), "r" (val) : "memory");			\
	mmiowb_set_pending();						\
}

#define DEF_MMIO_IN_D(name, size, insn)				\
static inline u##size name(const volatile u##size __iomem *addr)	\
{									\
	u##size ret;							\
	__asm__ __volatile__("sync;"#insn" %0,0(%1);twi 0,%0,0;isync"\
		: "=r" (ret) : "b" (addr) : "memory");	\
	return ret;							\
}

#define DEF_MMIO_OUT_D(name, size, insn)				\
static inline void name(volatile u##size __iomem *addr, u##size val)	\
{									\
	__asm__ __volatile__("sync;"#insn" %1,0(%0)"			\
		: : "b" (addr), "r" (val) : "memory");	\
	mmiowb_set_pending();						\
}
#else
#define DEF_MMIO_IN_X(name, size, insn)				\
static inline u##size name(const volatile u##size __iomem *addr)	\
{									\
@@ -130,6 +166,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
		: "=m<>" (*addr) : "r" (val) : "memory");	\
	mmiowb_set_pending();						\
}
#endif

DEF_MMIO_IN_D(in_8,     8, lbz);
DEF_MMIO_OUT_D(out_8,   8, stb);
+26 −2
Original line number Diff line number Diff line
@@ -71,6 +71,17 @@ __pu_failed: \
 * because we do not write to any memory gcc knows about, so there
 * are no aliasing issues.
 */
/* -mprefixed can generate offsets beyond range, fall back hack */
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm_goto(x, addr, label, op)			\
	asm_volatile_goto(					\
		"1:	" op " %0,0(%1)	# put_user\n"		\
		EX_TABLE(1b, %l2)				\
		:						\
		: "r" (x), "b" (addr)				\
		:						\
		: label)
#else
#define __put_user_asm_goto(x, addr, label, op)			\
	asm_volatile_goto(					\
		"1:	" op "%U1%X1 %0,%1	# put_user\n"	\
@@ -79,6 +90,7 @@ __pu_failed: \
		: "r" (x), "m<>" (*addr)			\
		:						\
		: label)
#endif

#ifdef __powerpc64__
#define __put_user_asm2_goto(x, ptr, label)			\
@@ -131,6 +143,17 @@ do { \

#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT

/* -mprefixed can generate offsets beyond range, fall back hack */
#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __get_user_asm_goto(x, addr, label, op)			\
	asm_volatile_goto(					\
		"1:	"op" %0,0(%1)	# get_user\n"		\
		EX_TABLE(1b, %l2)				\
		: "=r" (x)					\
		: "b" (addr)					\
		:						\
		: label)
#else
#define __get_user_asm_goto(x, addr, label, op)			\
	asm_volatile_goto(					\
		"1:	"op"%U1%X1 %0, %1	# get_user\n"	\
@@ -139,6 +162,7 @@ do { \
		: "m<>" (*addr)					\
		:						\
		: label)
#endif

#ifdef __powerpc64__
#define __get_user_asm2_goto(x, addr, label)			\
Loading