Commit eda8dd12 authored by Max Filippov's avatar Max Filippov
Browse files

xtensa: use a14 instead of a15 in inline assembly



a15 is a frame pointer in the call0 xtensa ABI, don't use it explicitly
in the inline assembly. Use a14 instead, as it has the same properties
as a15 w.r.t. window overflow.

Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent e369953a
Loading
Loading
Loading
Loading
+13 −13
Original line number Diff line number Diff line
@@ -25,15 +25,15 @@
 *
 * Locking interrupts looks like this:
 *
 *    rsil a15, TOPLEVEL
 *    rsil a14, TOPLEVEL
 *    <code>
 *    wsr  a15, PS
 *    wsr  a14, PS
 *    rsync
 *
 * Note that a15 is used here because the register allocation
 * Note that a14 is used here because the register allocation
 * done by the compiler is not guaranteed and a window overflow
 * may not occur between the rsil and wsr instructions. By using
 * a15 in the rsil, the machine is guaranteed to be in a state
 * a14 in the rsil, the machine is guaranteed to be in a state
 * where no register reference will cause an overflow.
 */

@@ -185,15 +185,15 @@ static inline void arch_atomic_##op(int i, atomic_t * v) \
	unsigned int vval;						\
									\
	__asm__ __volatile__(						\
			"       rsil    a15, "__stringify(TOPLEVEL)"\n"	\
			"       rsil    a14, "__stringify(TOPLEVEL)"\n"	\
			"       l32i    %[result], %[mem]\n"		\
			"       " #op " %[result], %[result], %[i]\n"	\
			"       s32i    %[result], %[mem]\n"		\
			"       wsr     a15, ps\n"			\
			"       wsr     a14, ps\n"			\
			"       rsync\n"				\
			: [result] "=&a" (vval), [mem] "+m" (*v)	\
			: [i] "a" (i)					\
			: "a15", "memory"				\
			: "a14", "memory"				\
			);						\
}									\

@@ -203,15 +203,15 @@ static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
	unsigned int vval;						\
									\
	__asm__ __volatile__(						\
			"       rsil    a15,"__stringify(TOPLEVEL)"\n"	\
			"       rsil    a14,"__stringify(TOPLEVEL)"\n"	\
			"       l32i    %[result], %[mem]\n"		\
			"       " #op " %[result], %[result], %[i]\n"	\
			"       s32i    %[result], %[mem]\n"		\
			"       wsr     a15, ps\n"			\
			"       wsr     a14, ps\n"			\
			"       rsync\n"				\
			: [result] "=&a" (vval), [mem] "+m" (*v)	\
			: [i] "a" (i)					\
			: "a15", "memory"				\
			: "a14", "memory"				\
			);						\
									\
	return vval;							\
@@ -223,16 +223,16 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
	unsigned int tmp, vval;						\
									\
	__asm__ __volatile__(						\
			"       rsil    a15,"__stringify(TOPLEVEL)"\n"	\
			"       rsil    a14,"__stringify(TOPLEVEL)"\n"	\
			"       l32i    %[result], %[mem]\n"		\
			"       " #op " %[tmp], %[result], %[i]\n"	\
			"       s32i    %[tmp], %[mem]\n"		\
			"       wsr     a15, ps\n"			\
			"       wsr     a14, ps\n"			\
			"       rsync\n"				\
			: [result] "=&a" (vval), [tmp] "=&a" (tmp),	\
			  [mem] "+m" (*v)				\
			: [i] "a" (i)					\
			: "a15", "memory"				\
			: "a14", "memory"				\
			);						\
									\
	return vval;							\
+8 −8
Original line number Diff line number Diff line
@@ -52,16 +52,16 @@ __cmpxchg_u32(volatile int *p, int old, int new)
	return new;
#else
	__asm__ __volatile__(
			"       rsil    a15, "__stringify(TOPLEVEL)"\n"
			"       rsil    a14, "__stringify(TOPLEVEL)"\n"
			"       l32i    %[old], %[mem]\n"
			"       bne     %[old], %[cmp], 1f\n"
			"       s32i    %[new], %[mem]\n"
			"1:\n"
			"       wsr     a15, ps\n"
			"       wsr     a14, ps\n"
			"       rsync\n"
			: [old] "=&a" (old), [mem] "+m" (*p)
			: [cmp] "a" (old), [new] "r" (new)
			: "a15", "memory");
			: "a14", "memory");
	return old;
#endif
}
@@ -116,10 +116,10 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
/*
 * xchg_u32
 *
 * Note that a15 is used here because the register allocation
 * Note that a14 is used here because the register allocation
 * done by the compiler is not guaranteed and a window overflow
 * may not occur between the rsil and wsr instructions. By using
 * a15 in the rsil, the machine is guaranteed to be in a state
 * a14 in the rsil, the machine is guaranteed to be in a state
 * where no register reference will cause an overflow.
 */

@@ -157,14 +157,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#else
	unsigned long tmp;
	__asm__ __volatile__(
			"       rsil    a15, "__stringify(TOPLEVEL)"\n"
			"       rsil    a14, "__stringify(TOPLEVEL)"\n"
			"       l32i    %[tmp], %[mem]\n"
			"       s32i    %[val], %[mem]\n"
			"       wsr     a15, ps\n"
			"       wsr     a14, ps\n"
			"       rsync\n"
			: [tmp] "=&a" (tmp), [mem] "+m" (*m)
			: [val] "a" (val)
			: "a15", "memory");
			: "a14", "memory");
	return tmp;
#endif
}