Commit 04324f44 authored by Thomas Bogendoerfer's avatar Thomas Bogendoerfer
Browse files

MIPS: Remove get_fs/set_fs



All get_fs/set_fs calls in MIPS code are gone, so remove implementation
of it.  With the clear separation of user/kernel space access we no
longer need the EVA special handling, so get rid of that, too.

Signed-off-by: default avatarThomas Bogendoerfer <tsbogend@alpha.franken.de>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 45deb5fa
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -94,7 +94,6 @@ config MIPS
	select PERF_USE_VMALLOC
	select PERF_USE_VMALLOC
	select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
	select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
	select RTC_LIB
	select RTC_LIB
	select SET_FS
	select SYSCTL_EXCEPTION_TRACE
	select SYSCTL_EXCEPTION_TRACE
	select VIRT_TO_BUS
	select VIRT_TO_BUS
	select ARCH_HAS_ELFCORE_COMPAT
	select ARCH_HAS_ELFCORE_COMPAT
+0 −4
Original line number Original line Diff line number Diff line
@@ -221,10 +221,6 @@ struct nlm_cop2_state {
#define COP2_INIT
#define COP2_INIT
#endif
#endif


typedef struct {
	unsigned long seg;
} mm_segment_t;

#ifdef CONFIG_CPU_HAS_MSA
#ifdef CONFIG_CPU_HAS_MSA
# define ARCH_MIN_TASKALIGN	16
# define ARCH_MIN_TASKALIGN	16
# define FPU_ALIGN		__aligned(16)
# define FPU_ALIGN		__aligned(16)
+0 −6
Original line number Original line Diff line number Diff line
@@ -28,11 +28,6 @@ struct thread_info {
	unsigned long		tp_value;	/* thread pointer */
	unsigned long		tp_value;	/* thread pointer */
	__u32			cpu;		/* current CPU */
	__u32			cpu;		/* current CPU */
	int			preempt_count;	/* 0 => preemptable, <0 => BUG */
	int			preempt_count;	/* 0 => preemptable, <0 => BUG */
	mm_segment_t		addr_limit;	/*
						 * thread address space limit:
						 * 0x7fffffff for user-thead
						 * 0xffffffff for kernel-thread
						 */
	struct pt_regs		*regs;
	struct pt_regs		*regs;
	long			syscall;	/* syscall number */
	long			syscall;	/* syscall number */
};
};
@@ -46,7 +41,6 @@ struct thread_info {
	.flags		= _TIF_FIXADE,		\
	.flags		= _TIF_FIXADE,		\
	.cpu		= 0,			\
	.cpu		= 0,			\
	.preempt_count	= INIT_PREEMPT_COUNT,	\
	.preempt_count	= INIT_PREEMPT_COUNT,	\
	.addr_limit	= KERNEL_DS,		\
}
}


/*
/*
+146 −303
Original line number Original line Diff line number Diff line
@@ -16,13 +16,6 @@
#include <asm/asm-eva.h>
#include <asm/asm-eva.h>
#include <asm/extable.h>
#include <asm/extable.h>


/*
 * The fs value determines whether argument validity checking should be
 * performed or not.  If get_fs() == USER_DS, checking is performed, with
 * get_fs() == KERNEL_DS, checking is bypassed.
 *
 * For historical reasons, these macros are grossly misnamed.
 */
#ifdef CONFIG_32BIT
#ifdef CONFIG_32BIT


#define __UA_LIMIT 0x80000000UL
#define __UA_LIMIT 0x80000000UL
@@ -49,38 +42,6 @@ extern u64 __ua_limit;


#endif /* CONFIG_64BIT */
#endif /* CONFIG_64BIT */


/*
 * USER_DS is a bitmask that has the bits set that may not be set in a valid
 * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
 * the arithmetic we're doing only works if the limit is a power of two, so
 * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
 * address in this range it's the process's problem, not ours :-)
 */

#define KERNEL_DS	((mm_segment_t) { 0UL })
#define USER_DS		((mm_segment_t) { __UA_LIMIT })

#define get_fs()	(current_thread_info()->addr_limit)
#define set_fs(x)	(current_thread_info()->addr_limit = (x))

#define uaccess_kernel()	(get_fs().seg == KERNEL_DS.seg)

/*
 * eva_kernel_access() - determine whether kernel memory access on an EVA system
 *
 * Determines whether memory accesses should be performed to kernel memory
 * on a system using Extended Virtual Addressing (EVA).
 *
 * Return: true if a kernel memory access on an EVA system, else false.
 */
static inline bool eva_kernel_access(void)
{
	if (!IS_ENABLED(CONFIG_EVA))
		return false;

	return uaccess_kernel();
}

/*
/*
 * Is a address valid? This does a straightforward calculation rather
 * Is a address valid? This does a straightforward calculation rather
 * than tests.
 * than tests.
@@ -118,7 +79,7 @@ static inline bool eva_kernel_access(void)
static inline int __access_ok(const void __user *p, unsigned long size)
static inline int __access_ok(const void __user *p, unsigned long size)
{
{
	unsigned long addr = (unsigned long)p;
	unsigned long addr = (unsigned long)p;
	return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
	return (__UA_LIMIT & (addr | (addr + size) | __ua_size(size))) == 0;
}
}


#define access_ok(addr, size)					\
#define access_ok(addr, size)					\
@@ -215,43 +176,6 @@ static inline int __access_ok(const void __user *p, unsigned long size)
struct __large_struct { unsigned long buf[100]; };
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct __user *)(x))
#define __m(x) (*(struct __large_struct __user *)(x))


/*
 * Yuck.  We need two variants, one for 64bit operation and one
 * for 32 bit mode and old iron.
 */
#ifndef CONFIG_EVA
#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
#else
/*
 * Kernel specific functions for EVA. We need to use normal load instructions
 * to read data from kernel when operating in EVA mode. We use these macros to
 * avoid redefining __get_user_asm for EVA.
 */
#undef _loadd
#undef _loadw
#undef _loadh
#undef _loadb
#ifdef CONFIG_32BIT
#define _loadd			_loadw
#else
#define _loadd(reg, addr)	"ld " reg ", " addr
#endif
#define _loadw(reg, addr)	"lw " reg ", " addr
#define _loadh(reg, addr)	"lh " reg ", " addr
#define _loadb(reg, addr)	"lb " reg ", " addr

#define __get_kernel_common(val, size, ptr)				\
do {									\
	switch (size) {							\
	case 1: __get_data_asm(val, _loadb, ptr); break;		\
	case 2: __get_data_asm(val, _loadh, ptr); break;		\
	case 4: __get_data_asm(val, _loadw, ptr); break;		\
	case 8: __GET_DW(val, _loadd, ptr); break;			\
	default: __get_user_unknown(); break;				\
	}								\
} while (0)
#endif

#ifdef CONFIG_32BIT
#ifdef CONFIG_32BIT
#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
#endif
#endif
@@ -276,12 +200,9 @@ do { \
({									\
({									\
	int __gu_err;							\
	int __gu_err;							\
									\
									\
	if (eva_kernel_access()) {					\
		__get_kernel_common((x), size, ptr);			\
	} else {							\
	__chk_user_ptr(ptr);						\
	__chk_user_ptr(ptr);						\
	__get_user_common((x), size, ptr);				\
	__get_user_common((x), size, ptr);				\
	}								\
									\
	__gu_err;							\
	__gu_err;							\
})
})


@@ -292,9 +213,6 @@ do { \
									\
									\
	might_fault();							\
	might_fault();							\
	if (likely(access_ok(__gu_ptr, size))) {			\
	if (likely(access_ok(__gu_ptr, size))) {			\
		if (eva_kernel_access())				\
			__get_kernel_common((x), size, __gu_ptr);	\
		else							\
		__get_user_common((x), size, __gu_ptr);			\
		__get_user_common((x), size, __gu_ptr);			\
	} else								\
	} else								\
		(x) = 0;						\
		(x) = 0;						\
@@ -361,46 +279,31 @@ do { \
do {									\
do {									\
	int __gu_err;							\
	int __gu_err;							\
									\
									\
	__get_kernel_common(*((type *)(dst)), sizeof(type),		\
	switch (sizeof(type)) {						\
	case 1:								\
		__get_data_asm(*(type *)(dst), kernel_lb,		\
			       (__force type *)(src));			\
		break;							\
	case 2:								\
		__get_data_asm(*(type *)(dst), kernel_lh,		\
			       (__force type *)(src));			\
		break;							\
	case 4:								\
		 __get_data_asm(*(type *)(dst), kernel_lw,		\
			       (__force type *)(src));			\
		break;							\
	case 8:								\
		__GET_DW(*(type *)(dst), kernel_ld,			\
			 (__force type *)(src));			\
			 (__force type *)(src));			\
		break;							\
	default:							\
		__get_user_unknown();					\
		break;							\
	}								\
	if (unlikely(__gu_err))						\
	if (unlikely(__gu_err))						\
		goto err_label;						\
		goto err_label;						\
} while (0)
} while (0)


#ifndef CONFIG_EVA
#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
#else
/*
 * Kernel specific functions for EVA. We need to use normal load instructions
 * to read data from kernel when operating in EVA mode. We use these macros to
 * avoid redefining __get_data_asm for EVA.
 */
#undef _stored
#undef _storew
#undef _storeh
#undef _storeb
#ifdef CONFIG_32BIT
#define _stored			_storew
#else
#define _stored(reg, addr)	"ld " reg ", " addr
#endif

#define _storew(reg, addr)	"sw " reg ", " addr
#define _storeh(reg, addr)	"sh " reg ", " addr
#define _storeb(reg, addr)	"sb " reg ", " addr

#define __put_kernel_common(ptr, size)					\
do {									\
	switch (size) {							\
	case 1: __put_data_asm(_storeb, ptr); break;			\
	case 2: __put_data_asm(_storeh, ptr); break;			\
	case 4: __put_data_asm(_storew, ptr); break;			\
	case 8: __PUT_DW(_stored, ptr); break;				\
	default: __put_user_unknown(); break;				\
	}								\
} while(0)
#endif

/*
/*
 * Yuck.  We need two variants, one for 64bit operation and one
 * Yuck.  We need two variants, one for 64bit operation and one
 * for 32 bit mode and old iron.
 * for 32 bit mode and old iron.
@@ -429,12 +332,9 @@ do { \
	int __pu_err = 0;						\
	int __pu_err = 0;						\
									\
									\
	__pu_val = (x);							\
	__pu_val = (x);							\
	if (eva_kernel_access()) {					\
		__put_kernel_common(ptr, size);				\
	} else {							\
	__chk_user_ptr(ptr);						\
	__chk_user_ptr(ptr);						\
	__put_user_common(ptr, size);					\
	__put_user_common(ptr, size);					\
	}								\
									\
	__pu_err;							\
	__pu_err;							\
})
})


@@ -446,9 +346,6 @@ do { \
									\
									\
	might_fault();							\
	might_fault();							\
	if (likely(access_ok(__pu_addr, size))) {			\
	if (likely(access_ok(__pu_addr, size))) {			\
		if (eva_kernel_access())				\
			__put_kernel_common(__pu_addr, size);		\
		else							\
		__put_user_common(__pu_addr, size);			\
		__put_user_common(__pu_addr, size);			\
	}								\
	}								\
									\
									\
@@ -501,7 +398,23 @@ do { \
	int __pu_err = 0;						\
	int __pu_err = 0;						\
									\
									\
	__pu_val = *(__force type *)(src);				\
	__pu_val = *(__force type *)(src);				\
	__put_kernel_common(((type *)(dst)), sizeof(type));		\
	switch (sizeof(type)) {						\
	case 1:								\
		__put_data_asm(kernel_sb, (type *)(dst));		\
		break;							\
	case 2:								\
		__put_data_asm(kernel_sh, (type *)(dst));		\
		break;							\
	case 4:								\
		__put_data_asm(kernel_sw, (type *)(dst))		\
		break;							\
	case 8:								\
		__PUT_DW(kernel_sd, (type *)(dst));			\
		break;							\
	default:							\
		__put_user_unknown();					\
		break;							\
	}								\
	if (unlikely(__pu_err))						\
	if (unlikely(__pu_err))						\
		goto err_label;						\
		goto err_label;						\
} while (0)
} while (0)
@@ -529,109 +442,55 @@ do { \
#define DADDI_SCRATCH "$0"
#define DADDI_SCRATCH "$0"
#endif
#endif


extern size_t __copy_user(void *__to, const void *__from, size_t __n);
extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n);
extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n);
extern size_t __raw_copy_in_user(void *__to, const void *__from, size_t __n);


#define __invoke_copy_from(func, to, from, n)				\
static inline unsigned long
({									\
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
	register void *__cu_to_r __asm__("$4");				\
{
	register const void __user *__cu_from_r __asm__("$5");		\
	register void *__cu_to_r __asm__("$4");
	register long __cu_len_r __asm__("$6");				\
	register const void __user *__cu_from_r __asm__("$5");
									\
	register long __cu_len_r __asm__("$6");
	__cu_to_r = (to);						\
	__cu_from_r = (from);						\
	__cu_len_r = (n);						\
	__asm__ __volatile__(						\
	".set\tnoreorder\n\t"						\
	__MODULE_JAL(func)						\
	".set\tnoat\n\t"						\
	__UA_ADDU "\t$1, %1, %2\n\t"					\
	".set\tat\n\t"							\
	".set\treorder"							\
	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
	:								\
	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
	  DADDI_SCRATCH, "memory");					\
	__cu_len_r;							\
})

#define __invoke_copy_to(func, to, from, n)				\
({									\
	register void __user *__cu_to_r __asm__("$4");			\
	register const void *__cu_from_r __asm__("$5");			\
	register long __cu_len_r __asm__("$6");				\
									\
	__cu_to_r = (to);						\
	__cu_from_r = (from);						\
	__cu_len_r = (n);						\
	__asm__ __volatile__(						\
	__MODULE_JAL(func)						\
	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
	:								\
	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
	  DADDI_SCRATCH, "memory");					\
	__cu_len_r;							\
})

#define __invoke_copy_from_kernel(to, from, n)				\
	__invoke_copy_from(__copy_user, to, from, n)

#define __invoke_copy_to_kernel(to, from, n)				\
	__invoke_copy_to(__copy_user, to, from, n)

#define ___invoke_copy_in_kernel(to, from, n)				\
	__invoke_copy_from(__copy_user, to, from, n)

#ifndef CONFIG_EVA
#define __invoke_copy_from_user(to, from, n)				\
	__invoke_copy_from(__copy_user, to, from, n)

#define __invoke_copy_to_user(to, from, n)				\
	__invoke_copy_to(__copy_user, to, from, n)

#define ___invoke_copy_in_user(to, from, n)				\
	__invoke_copy_from(__copy_user, to, from, n)

#else

/* EVA specific functions */

extern size_t __copy_from_user_eva(void *__to, const void *__from,
				   size_t __n);
extern size_t __copy_to_user_eva(void *__to, const void *__from,
				 size_t __n);
extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);

/*
 * Source or destination address is in userland. We need to go through
 * the TLB
 */
#define __invoke_copy_from_user(to, from, n)				\
	__invoke_copy_from(__copy_from_user_eva, to, from, n)


#define __invoke_copy_to_user(to, from, n)				\
	__cu_to_r = to;
	__invoke_copy_to(__copy_to_user_eva, to, from, n)
	__cu_from_r = from;
	__cu_len_r = n;


#define ___invoke_copy_in_user(to, from, n)				\
	__asm__ __volatile__(
	__invoke_copy_from(__copy_in_user_eva, to, from, n)
		".set\tnoreorder\n\t"
		__MODULE_JAL(__raw_copy_from_user)
		".set\tnoat\n\t"
		__UA_ADDU "\t$1, %1, %2\n\t"
		".set\tat\n\t"
		".set\treorder"
		: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
		:
		: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
		  DADDI_SCRATCH, "memory");


#endif /* CONFIG_EVA */
	return __cu_len_r;
}


static inline unsigned long
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
{
	if (eva_kernel_access())
	register void __user *__cu_to_r __asm__("$4");
		return __invoke_copy_to_kernel(to, from, n);
	register const void *__cu_from_r __asm__("$5");
	else
	register long __cu_len_r __asm__("$6");
		return __invoke_copy_to_user(to, from, n);
}


static inline unsigned long
	__cu_to_r = (to);
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
	__cu_from_r = (from);
{
	__cu_len_r = (n);
	if (eva_kernel_access())

		return __invoke_copy_from_kernel(to, from, n);
	__asm__ __volatile__(
	else
		__MODULE_JAL(__raw_copy_to_user)
		return __invoke_copy_from_user(to, from, n);
		: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
		:
		: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
		  DADDI_SCRATCH, "memory");

	return __cu_len_r;
}
}


#define INLINE_COPY_FROM_USER
#define INLINE_COPY_FROM_USER
@@ -640,13 +499,28 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
static inline unsigned long
static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
{
	if (eva_kernel_access())
	register void __user *__cu_to_r __asm__("$4");
		return ___invoke_copy_in_kernel(to, from, n);
	register const void __user *__cu_from_r __asm__("$5");
	else
	register long __cu_len_r __asm__("$6");
		return ___invoke_copy_in_user(to, from,	n);

	__cu_to_r = to;
	__cu_from_r = from;
	__cu_len_r = n;

	__asm__ __volatile__(
		".set\tnoreorder\n\t"
		__MODULE_JAL(__raw_copy_in_user)
		".set\tnoat\n\t"
		__UA_ADDU "\t$1, %1, %2\n\t"
		".set\tat\n\t"
		".set\treorder"
		: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
		:
		: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
		  DADDI_SCRATCH, "memory");
	return __cu_len_r;
}
}


extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);


/*
/*
@@ -672,17 +546,6 @@ __clear_user(void __user *addr, __kernel_size_t size)
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
#endif /* CONFIG_CPU_MICROMIPS */
#endif /* CONFIG_CPU_MICROMIPS */


	if (eva_kernel_access()) {
		__asm__ __volatile__(
			"move\t$4, %1\n\t"
			"move\t$5, $0\n\t"
			"move\t$6, %2\n\t"
			__MODULE_JAL(__bzero_kernel)
			"move\t%0, $6"
			: "=r" (res)
			: "r" (addr), "r" (size)
			: bzero_clobbers);
	} else {
	might_fault();
	might_fault();
	__asm__ __volatile__(
	__asm__ __volatile__(
		"move\t$4, %1\n\t"
		"move\t$4, %1\n\t"
@@ -693,7 +556,6 @@ __clear_user(void __user *addr, __kernel_size_t size)
		: "=r" (res)
		: "=r" (res)
		: "r" (addr), "r" (size)
		: "r" (addr), "r" (size)
		: bzero_clobbers);
		: bzero_clobbers);
	}


	return res;
	return res;
}
}
@@ -707,7 +569,6 @@ __clear_user(void __user *addr, __kernel_size_t size)
	__cl_size;							\
	__cl_size;							\
})
})


extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);


/*
/*
@@ -733,17 +594,9 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
{
{
	long res;
	long res;


	if (eva_kernel_access()) {
	if (!access_ok(__from, __len))
		__asm__ __volatile__(
		return -EFAULT;
			"move\t$4, %1\n\t"

			"move\t$5, %2\n\t"
			"move\t$6, %3\n\t"
			__MODULE_JAL(__strncpy_from_kernel_asm)
			"move\t%0, $2"
			: "=r" (res)
			: "r" (__to), "r" (__from), "r" (__len)
			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
	} else {
	might_fault();
	might_fault();
	__asm__ __volatile__(
	__asm__ __volatile__(
		"move\t$4, %1\n\t"
		"move\t$4, %1\n\t"
@@ -754,12 +607,10 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
		: "=r" (res)
		: "=r" (res)
		: "r" (__to), "r" (__from), "r" (__len)
		: "r" (__to), "r" (__from), "r" (__len)
		: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
		: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
	}


	return res;
	return res;
}
}


extern long __strnlen_kernel_asm(const char __user *s, long n);
extern long __strnlen_user_asm(const char __user *s, long n);
extern long __strnlen_user_asm(const char __user *s, long n);


/*
/*
@@ -779,17 +630,10 @@ static inline long strnlen_user(const char __user *s, long n)
{
{
	long res;
	long res;


	if (!access_ok(s, n))
		return -0;

	might_fault();
	might_fault();
	if (eva_kernel_access()) {
		__asm__ __volatile__(
			"move\t$4, %1\n\t"
			"move\t$5, %2\n\t"
			__MODULE_JAL(__strnlen_kernel_asm)
			"move\t%0, $2"
			: "=r" (res)
			: "r" (s), "r" (n)
			: "$2", "$4", "$5", __UA_t0, "$31");
	} else {
	__asm__ __volatile__(
	__asm__ __volatile__(
		"move\t$4, %1\n\t"
		"move\t$4, %1\n\t"
		"move\t$5, %2\n\t"
		"move\t$5, %2\n\t"
@@ -798,7 +642,6 @@ static inline long strnlen_user(const char __user *s, long n)
		: "=r" (res)
		: "=r" (res)
		: "r" (s), "r" (n)
		: "r" (s), "r" (n)
		: "$2", "$4", "$5", __UA_t0, "$31");
		: "$2", "$4", "$5", __UA_t0, "$31");
	}


	return res;
	return res;
}
}
+0 −1
Original line number Original line Diff line number Diff line
@@ -98,7 +98,6 @@ void output_thread_info_defines(void)
	OFFSET(TI_TP_VALUE, thread_info, tp_value);
	OFFSET(TI_TP_VALUE, thread_info, tp_value);
	OFFSET(TI_CPU, thread_info, cpu);
	OFFSET(TI_CPU, thread_info, cpu);
	OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
	OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
	OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
	OFFSET(TI_REGS, thread_info, regs);
	OFFSET(TI_REGS, thread_info, regs);
	DEFINE(_THREAD_SIZE, THREAD_SIZE);
	DEFINE(_THREAD_SIZE, THREAD_SIZE);
	DEFINE(_THREAD_MASK, THREAD_MASK);
	DEFINE(_THREAD_MASK, THREAD_MASK);
Loading