Loading arch/parisc/lib/bitops.c +6 −6 Original line number Diff line number Diff line Loading @@ -18,7 +18,7 @@ arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { #endif #ifdef CONFIG_64BIT unsigned long __xchg64(unsigned long x, volatile unsigned long *ptr) unsigned long notrace __xchg64(unsigned long x, volatile unsigned long *ptr) { unsigned long temp, flags; Loading @@ -30,7 +30,7 @@ unsigned long __xchg64(unsigned long x, volatile unsigned long *ptr) } #endif unsigned long __xchg32(int x, volatile int *ptr) unsigned long notrace __xchg32(int x, volatile int *ptr) { unsigned long flags; long temp; Loading @@ -43,7 +43,7 @@ unsigned long __xchg32(int x, volatile int *ptr) } unsigned long __xchg8(char x, volatile char *ptr) unsigned long notrace __xchg8(char x, volatile char *ptr) { unsigned long flags; long temp; Loading @@ -56,7 +56,7 @@ unsigned long __xchg8(char x, volatile char *ptr) } u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) { unsigned long flags; u64 prev; Loading @@ -68,7 +68,7 @@ u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) return prev; } unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) unsigned long notrace __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) { unsigned long flags; unsigned int prev; Loading @@ -80,7 +80,7 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign return (unsigned long)prev; } u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) { unsigned long flags; u8 prev; Loading Loading
arch/parisc/lib/bitops.c +6 −6 Original line number Diff line number Diff line Loading @@ -18,7 +18,7 @@ arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { #endif #ifdef CONFIG_64BIT unsigned long __xchg64(unsigned long x, volatile unsigned long *ptr) unsigned long notrace __xchg64(unsigned long x, volatile unsigned long *ptr) { unsigned long temp, flags; Loading @@ -30,7 +30,7 @@ unsigned long __xchg64(unsigned long x, volatile unsigned long *ptr) } #endif unsigned long __xchg32(int x, volatile int *ptr) unsigned long notrace __xchg32(int x, volatile int *ptr) { unsigned long flags; long temp; Loading @@ -43,7 +43,7 @@ unsigned long __xchg32(int x, volatile int *ptr) } unsigned long __xchg8(char x, volatile char *ptr) unsigned long notrace __xchg8(char x, volatile char *ptr) { unsigned long flags; long temp; Loading @@ -56,7 +56,7 @@ unsigned long __xchg8(char x, volatile char *ptr) } u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) { unsigned long flags; u64 prev; Loading @@ -68,7 +68,7 @@ u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) return prev; } unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) unsigned long notrace __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) { unsigned long flags; unsigned int prev; Loading @@ -80,7 +80,7 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign return (unsigned long)prev; } u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) { unsigned long flags; u8 prev; Loading