Unverified Commit 1f77ed94 authored by Palmer Dabbelt's avatar Palmer Dabbelt
Browse files

riscv: switch to relative extable and other improvements

Similar as other architectures such as arm64, x86 and so on, use
offsets relative to the exception table entry values rather than
absolute addresses for both the exception locationand the fixup.
And recently, arm64 and x86 remove anonymous out-of-line fixups, we
want to acchieve the same result.
parents dacef016 a2ceb8c4
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
generic-y += early_ioremap.h
generic-y += extable.h
generic-y += flat.h
generic-y += kvm_para.h
generic-y += user.h
+65 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_ASM_EXTABLE_H
#define __ASM_ASM_EXTABLE_H

#define EX_TYPE_NONE			0
#define EX_TYPE_FIXUP			1
#define EX_TYPE_BPF			2
#define EX_TYPE_UACCESS_ERR_ZERO	3

#ifdef __ASSEMBLY__

#define __ASM_EXTABLE_RAW(insn, fixup, type, data)	\
	.pushsection	__ex_table, "a";		\
	.balign		4;				\
	.long		((insn) - .);			\
	.long		((fixup) - .);			\
	.short		(type);				\
	.short		(data);				\
	.popsection;

	.macro		_asm_extable, insn, fixup
	__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
	.endm

#else /* __ASSEMBLY__ */

#include <linux/bits.h>
#include <linux/stringify.h>
#include <asm/gpr-num.h>

#define __ASM_EXTABLE_RAW(insn, fixup, type, data)	\
	".pushsection	__ex_table, \"a\"\n"		\
	".balign	4\n"				\
	".long		((" insn ") - .)\n"		\
	".long		((" fixup ") - .)\n"		\
	".short		(" type ")\n"			\
	".short		(" data ")\n"			\
	".popsection\n"

#define _ASM_EXTABLE(insn, fixup)	\
	__ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")

#define EX_DATA_REG_ERR_SHIFT	0
#define EX_DATA_REG_ERR		GENMASK(4, 0)
#define EX_DATA_REG_ZERO_SHIFT	5
#define EX_DATA_REG_ZERO	GENMASK(9, 5)

#define EX_DATA_REG(reg, gpr)						\
	"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"

#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)		\
	__DEFINE_ASM_GPR_NUMS						\
	__ASM_EXTABLE_RAW(#insn, #fixup, 				\
			  __stringify(EX_TYPE_UACCESS_ERR_ZERO),	\
			  "("						\
			    EX_DATA_REG(ERR, err) " | "			\
			    EX_DATA_REG(ZERO, zero)			\
			  ")")

#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err)			\
	_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)

#endif /* __ASSEMBLY__ */

#endif /* __ASM_ASM_EXTABLE_H */
+48 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_EXTABLE_H
#define _ASM_RISCV_EXTABLE_H

/*
 * The exception table consists of pairs of relative offsets: the first
 * is the relative offset to an instruction that is allowed to fault,
 * and the second is the relative offset at which the program should
 * continue. No registers are modified, so it is entirely up to the
 * continuation code to figure out what to do.
 *
 * All the routines below use bits of fixup code that are out of line
 * with the main instruction path.  This means when everything is well,
 * we don't even have to jump over them.  Further, they do not intrude
 * on our cache or tlb entries.
 */

struct exception_table_entry {
	int insn, fixup;
	short type, data;
};

#define ARCH_HAS_RELATIVE_EXTABLE

#define swap_ex_entry_fixup(a, b, tmp, delta)		\
do {							\
	(a)->fixup = (b)->fixup + (delta);		\
	(b)->fixup = (tmp).fixup - (delta);		\
	(a)->type = (b)->type;				\
	(b)->type = (tmp).type;				\
	(a)->data = (b)->data;				\
	(b)->data = (tmp).data;				\
} while (0)

bool fixup_exception(struct pt_regs *regs);

#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I)
bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs);
#else
static inline bool
ex_handler_bpf(const struct exception_table_entry *ex,
	       struct pt_regs *regs)
{
	return false;
}
#endif

#endif
+7 −23
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/asm-extable.h>

/* We don't even really need the extable code, but for now keep it simple */
#ifndef CONFIG_MMU
@@ -20,23 +21,14 @@

#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)	\
{								\
	uintptr_t tmp;						\
	__enable_user_access();					\
	__asm__ __volatile__ (					\
	"1:	" insn "				\n"	\
	"2:						\n"	\
	"	.section .fixup,\"ax\"			\n"	\
	"	.balign 4				\n"	\
	"3:	li %[r],%[e]				\n"	\
	"	jump 2b,%[t]				\n"	\
	"	.previous				\n"	\
	"	.section __ex_table,\"a\"		\n"	\
	"	.balign " RISCV_SZPTR "			\n"	\
	"	" RISCV_PTR " 1b, 3b			\n"	\
	"	.previous				\n"	\
	_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %[r])			\
	: [r] "+r" (ret), [ov] "=&r" (oldval),			\
	  [u] "+m" (*uaddr), [t] "=&r" (tmp)			\
	: [op] "Jr" (oparg), [e] "i" (-EFAULT)			\
	  [u] "+m" (*uaddr)					\
	: [op] "Jr" (oparg)					\
	: "memory");						\
	__disable_user_access();				\
}
@@ -98,18 +90,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
	"2:	sc.w.aqrl %[t],%z[nv],%[u]		\n"
	"	bnez %[t],1b				\n"
	"3:						\n"
	"	.section .fixup,\"ax\"			\n"
	"	.balign 4				\n"
	"4:	li %[r],%[e]				\n"
	"	jump 3b,%[t]				\n"
	"	.previous				\n"
	"	.section __ex_table,\"a\"		\n"
	"	.balign " RISCV_SZPTR "			\n"
	"	" RISCV_PTR " 1b, 4b			\n"
	"	" RISCV_PTR " 2b, 4b			\n"
	"	.previous				\n"
		_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r])	\
		_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r])	\
	: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
	: [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "i" (-EFAULT)
	: [ov] "Jr" (oldval), [nv] "Jr" (newval)
	: "memory");
	__disable_user_access();

+77 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_GPR_NUM_H
#define __ASM_GPR_NUM_H

#ifdef __ASSEMBLY__
	.equ	.L__gpr_num_zero,	0
	.equ	.L__gpr_num_ra,		1
	.equ	.L__gpr_num_sp,		2
	.equ	.L__gpr_num_gp,		3
	.equ	.L__gpr_num_tp,		4
	.equ	.L__gpr_num_t0,		5
	.equ	.L__gpr_num_t1,		6
	.equ	.L__gpr_num_t2,		7
	.equ	.L__gpr_num_s0,		8
	.equ	.L__gpr_num_s1,		9
	.equ	.L__gpr_num_a0,		10
	.equ	.L__gpr_num_a1,		11
	.equ	.L__gpr_num_a2,		12
	.equ	.L__gpr_num_a3,		13
	.equ	.L__gpr_num_a4,		14
	.equ	.L__gpr_num_a5,		15
	.equ	.L__gpr_num_a6,		16
	.equ	.L__gpr_num_a7,		17
	.equ	.L__gpr_num_s2,		18
	.equ	.L__gpr_num_s3,		19
	.equ	.L__gpr_num_s4,		20
	.equ	.L__gpr_num_s5,		21
	.equ	.L__gpr_num_s6,		22
	.equ	.L__gpr_num_s7,		23
	.equ	.L__gpr_num_s8,		24
	.equ	.L__gpr_num_s9,		25
	.equ	.L__gpr_num_s10,	26
	.equ	.L__gpr_num_s11,	27
	.equ	.L__gpr_num_t3,		28
	.equ	.L__gpr_num_t4,		29
	.equ	.L__gpr_num_t5,		30
	.equ	.L__gpr_num_t6,		31

#else /* __ASSEMBLY__ */

#define __DEFINE_ASM_GPR_NUMS					\
"	.equ	.L__gpr_num_zero,	0\n"			\
"	.equ	.L__gpr_num_ra,		1\n"			\
"	.equ	.L__gpr_num_sp,		2\n"			\
"	.equ	.L__gpr_num_gp,		3\n"			\
"	.equ	.L__gpr_num_tp,		4\n"			\
"	.equ	.L__gpr_num_t0,		5\n"			\
"	.equ	.L__gpr_num_t1,		6\n"			\
"	.equ	.L__gpr_num_t2,		7\n"			\
"	.equ	.L__gpr_num_s0,		8\n"			\
"	.equ	.L__gpr_num_s1,		9\n"			\
"	.equ	.L__gpr_num_a0,		10\n"			\
"	.equ	.L__gpr_num_a1,		11\n"			\
"	.equ	.L__gpr_num_a2,		12\n"			\
"	.equ	.L__gpr_num_a3,		13\n"			\
"	.equ	.L__gpr_num_a4,		14\n"			\
"	.equ	.L__gpr_num_a5,		15\n"			\
"	.equ	.L__gpr_num_a6,		16\n"			\
"	.equ	.L__gpr_num_a7,		17\n"			\
"	.equ	.L__gpr_num_s2,		18\n"			\
"	.equ	.L__gpr_num_s3,		19\n"			\
"	.equ	.L__gpr_num_s4,		20\n"			\
"	.equ	.L__gpr_num_s5,		21\n"			\
"	.equ	.L__gpr_num_s6,		22\n"			\
"	.equ	.L__gpr_num_s7,		23\n"			\
"	.equ	.L__gpr_num_s8,		24\n"			\
"	.equ	.L__gpr_num_s9,		25\n"			\
"	.equ	.L__gpr_num_s10,	26\n"			\
"	.equ	.L__gpr_num_s11,	27\n"			\
"	.equ	.L__gpr_num_t3,		28\n"			\
"	.equ	.L__gpr_num_t4,		29\n"			\
"	.equ	.L__gpr_num_t5,		30\n"			\
"	.equ	.L__gpr_num_t6,		31\n"

#endif /* __ASSEMBLY__ */

#endif /* __ASM_GPR_NUM_H */
Loading