Loading arch/blackfin/include/asm/atomic.h +7 −102 Original line number Diff line number Diff line #ifndef __ARCH_BLACKFIN_ATOMIC__ #define __ARCH_BLACKFIN_ATOMIC__ #ifndef CONFIG_SMP # include <asm-generic/atomic.h> #else #include <linux/types.h> #include <asm/system.h> /* local_irq_XXX() */ /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. * * Generally we do not concern about SMP BFIN systems, so we don't have * to deal with that. * * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001 */ #define ATOMIC_INIT(i) { (i) } #define atomic_set(v, i) (((v)->counter) = i) #ifdef CONFIG_SMP #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); Loading Loading @@ -84,100 +81,6 @@ static inline int atomic_test_mask(int mask, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() #else /* !CONFIG_SMP */ #define atomic_read(v) ((v)->counter) static inline void atomic_add(int i, atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter += i; local_irq_restore_hw(flags); } static inline void atomic_sub(int i, atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter -= i; local_irq_restore_hw(flags); } static inline int atomic_add_return(int i, atomic_t *v) { int __temp = 0; unsigned long flags; local_irq_save_hw(flags); v->counter += i; __temp = v->counter; local_irq_restore_hw(flags); return __temp; } static inline int atomic_sub_return(int i, atomic_t *v) { int __temp = 0; unsigned long flags; local_irq_save_hw(flags); v->counter -= i; __temp = v->counter; local_irq_restore_hw(flags); return __temp; } static inline void atomic_inc(volatile atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter++; local_irq_restore_hw(flags); } static inline void atomic_dec(volatile atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter--; local_irq_restore_hw(flags); } static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter &= ~mask; local_irq_restore_hw(flags); } static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter |= mask; local_irq_restore_hw(flags); } /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() #endif /* !CONFIG_SMP */ #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) Loading Loading @@ -210,4 +113,6 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) #include <asm-generic/atomic-long.h> #endif /* __ARCH_BLACKFIN_ATOMIC __ */ #endif #endif arch/blackfin/include/asm/bitops.h +9 −189 Original line number Diff line number Diff line #ifndef _BLACKFIN_BITOPS_H #define _BLACKFIN_BITOPS_H /* * Copyright 1992, Linus Torvalds. */ #include <linux/compiler.h> #include <asm/byteorder.h> /* swab32 */ #ifdef __KERNEL__ #ifndef CONFIG_SMP # include <asm-generic/bitops.h> #else #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif #include <linux/compiler.h> #include <asm/byteorder.h> /* swab32 */ #include <asm-generic/bitops/ffs.h> #include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/ffz.h> #ifdef CONFIG_SMP #include <linux/linkage.h> asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr); Loading Loading @@ -79,189 +75,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) return __raw_bit_test_toggle_asm(a, nr & 0x1f); } #else /* !CONFIG_SMP */ #include <asm/system.h> /* save_flags */ static inline void set_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); local_irq_save_hw(flags); *a |= mask; local_irq_restore_hw(flags); } static inline void clear_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); local_irq_save_hw(flags); *a &= ~mask; local_irq_restore_hw(flags); } static inline void change_bit(int nr, volatile unsigned long *addr) { int mask; unsigned long flags; unsigned long *ADDR = (unsigned long *)addr; ADDR += nr >> 5; mask = 1 << (nr & 31); local_irq_save_hw(flags); *ADDR ^= mask; local_irq_restore_hw(flags); } static inline int test_and_set_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); local_irq_save_hw(flags); retval = (mask & *a) != 0; *a |= mask; local_irq_restore_hw(flags); return retval; } static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); local_irq_save_hw(flags); retval = (mask & *a) != 0; *a &= ~mask; local_irq_restore_hw(flags); return retval; } static inline int test_and_change_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); local_irq_save_hw(flags); retval = (mask & *a) != 0; *a ^= mask; local_irq_restore_hw(flags); return retval; } #endif /* CONFIG_SMP */ /* * clear_bit() doesn't provide any barrier for the compiler. */ #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() static inline void __set_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; a += nr >> 5; mask = 1 << (nr & 0x1f); *a |= mask; } static inline void __clear_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; a += nr >> 5; mask = 1 << (nr & 0x1f); *a &= ~mask; } static inline void __change_bit(int nr, volatile unsigned long *addr) { int mask; unsigned long *ADDR = (unsigned long *)addr; ADDR += nr >> 5; mask = 1 << (nr & 31); *ADDR ^= mask; } static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a |= mask; return retval; } static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a &= ~mask; return retval; } static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a ^= mask; return retval; } static inline int __test_bit(int nr, const void *addr) { int *a = (int *)addr; int mask; a += nr >> 5; mask = 1 << (nr & 0x1f); return ((mask & *a) != 0); } #ifndef CONFIG_SMP /* * This routine doesn't need irq save and restore ops in UP * context. */ static inline int test_bit(int nr, const void *addr) { return __test_bit(nr, addr); } #endif #include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/hweight.h> Loading @@ -272,10 +92,10 @@ static inline int test_bit(int nr, const void *addr) #include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> #endif /* CONFIG_SMP */ #endif /* _BLACKFIN_BITOPS_H */ arch/blackfin/include/asm/mutex.h +1 −1 Original line number Diff line number Diff line Loading @@ -10,7 +10,7 @@ #define _ASM_MUTEX_H #ifndef CONFIG_SMP #include <asm-generic/mutex-dec.h> #include <asm-generic/mutex.h> #else static inline void Loading arch/blackfin/include/asm/spinlock.h +6 −0 Original line number Diff line number Diff line #ifndef __BFIN_SPINLOCK_H #define __BFIN_SPINLOCK_H #ifndef CONFIG_SMP # include <asm-generic/spinlock.h> #else #include <asm/atomic.h> asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); Loading Loading @@ -86,4 +90,6 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() #endif #endif /* !__BFIN_SPINLOCK_H */ arch/blackfin/include/asm/swab.h +1 −5 Original line number Diff line number Diff line Loading @@ -2,11 +2,7 @@ #define _BLACKFIN_SWAB_H #include <linux/types.h> #include <linux/compiler.h> #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) # define __SWAB_64_THRU_32__ #endif #include <asm-generic/swab.h> #ifdef __GNUC__ Loading Loading
arch/blackfin/include/asm/atomic.h +7 −102 Original line number Diff line number Diff line #ifndef __ARCH_BLACKFIN_ATOMIC__ #define __ARCH_BLACKFIN_ATOMIC__ #ifndef CONFIG_SMP # include <asm-generic/atomic.h> #else #include <linux/types.h> #include <asm/system.h> /* local_irq_XXX() */ /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. * * Generally we do not concern about SMP BFIN systems, so we don't have * to deal with that. * * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001 */ #define ATOMIC_INIT(i) { (i) } #define atomic_set(v, i) (((v)->counter) = i) #ifdef CONFIG_SMP #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); Loading Loading @@ -84,100 +81,6 @@ static inline int atomic_test_mask(int mask, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() #else /* !CONFIG_SMP */ #define atomic_read(v) ((v)->counter) static inline void atomic_add(int i, atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter += i; local_irq_restore_hw(flags); } static inline void atomic_sub(int i, atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter -= i; local_irq_restore_hw(flags); } static inline int atomic_add_return(int i, atomic_t *v) { int __temp = 0; unsigned long flags; local_irq_save_hw(flags); v->counter += i; __temp = v->counter; local_irq_restore_hw(flags); return __temp; } static inline int atomic_sub_return(int i, atomic_t *v) { int __temp = 0; unsigned long flags; local_irq_save_hw(flags); v->counter -= i; __temp = v->counter; local_irq_restore_hw(flags); return __temp; } static inline void atomic_inc(volatile atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter++; local_irq_restore_hw(flags); } static inline void atomic_dec(volatile atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter--; local_irq_restore_hw(flags); } static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter &= ~mask; local_irq_restore_hw(flags); } static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { unsigned long flags; local_irq_save_hw(flags); v->counter |= mask; local_irq_restore_hw(flags); } /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() #endif /* !CONFIG_SMP */ #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) Loading Loading @@ -210,4 +113,6 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) #include <asm-generic/atomic-long.h> #endif /* __ARCH_BLACKFIN_ATOMIC __ */ #endif #endif
arch/blackfin/include/asm/bitops.h +9 −189 Original line number Diff line number Diff line #ifndef _BLACKFIN_BITOPS_H #define _BLACKFIN_BITOPS_H /* * Copyright 1992, Linus Torvalds. */ #include <linux/compiler.h> #include <asm/byteorder.h> /* swab32 */ #ifdef __KERNEL__ #ifndef CONFIG_SMP # include <asm-generic/bitops.h> #else #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif #include <linux/compiler.h> #include <asm/byteorder.h> /* swab32 */ #include <asm-generic/bitops/ffs.h> #include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/ffz.h> #ifdef CONFIG_SMP #include <linux/linkage.h> asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr); Loading Loading @@ -79,189 +75,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) return __raw_bit_test_toggle_asm(a, nr & 0x1f); } #else /* !CONFIG_SMP */ #include <asm/system.h> /* save_flags */ static inline void set_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); local_irq_save_hw(flags); *a |= mask; local_irq_restore_hw(flags); } static inline void clear_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); local_irq_save_hw(flags); *a &= ~mask; local_irq_restore_hw(flags); } static inline void change_bit(int nr, volatile unsigned long *addr) { int mask; unsigned long flags; unsigned long *ADDR = (unsigned long *)addr; ADDR += nr >> 5; mask = 1 << (nr & 31); local_irq_save_hw(flags); *ADDR ^= mask; local_irq_restore_hw(flags); } static inline int test_and_set_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); local_irq_save_hw(flags); retval = (mask & *a) != 0; *a |= mask; local_irq_restore_hw(flags); return retval; } static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); local_irq_save_hw(flags); retval = (mask & *a) != 0; *a &= ~mask; local_irq_restore_hw(flags); return retval; } static inline int test_and_change_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); local_irq_save_hw(flags); retval = (mask & *a) != 0; *a ^= mask; local_irq_restore_hw(flags); return retval; } #endif /* CONFIG_SMP */ /* * clear_bit() doesn't provide any barrier for the compiler. */ #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() static inline void __set_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; a += nr >> 5; mask = 1 << (nr & 0x1f); *a |= mask; } static inline void __clear_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; a += nr >> 5; mask = 1 << (nr & 0x1f); *a &= ~mask; } static inline void __change_bit(int nr, volatile unsigned long *addr) { int mask; unsigned long *ADDR = (unsigned long *)addr; ADDR += nr >> 5; mask = 1 << (nr & 31); *ADDR ^= mask; } static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a |= mask; return retval; } static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a &= ~mask; return retval; } static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a ^= mask; return retval; } static inline int __test_bit(int nr, const void *addr) { int *a = (int *)addr; int mask; a += nr >> 5; mask = 1 << (nr & 0x1f); return ((mask & *a) != 0); } #ifndef CONFIG_SMP /* * This routine doesn't need irq save and restore ops in UP * context. */ static inline int test_bit(int nr, const void *addr) { return __test_bit(nr, addr); } #endif #include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/hweight.h> Loading @@ -272,10 +92,10 @@ static inline int test_bit(int nr, const void *addr) #include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> #endif /* CONFIG_SMP */ #endif /* _BLACKFIN_BITOPS_H */
arch/blackfin/include/asm/mutex.h +1 −1 Original line number Diff line number Diff line Loading @@ -10,7 +10,7 @@ #define _ASM_MUTEX_H #ifndef CONFIG_SMP #include <asm-generic/mutex-dec.h> #include <asm-generic/mutex.h> #else static inline void Loading
arch/blackfin/include/asm/spinlock.h +6 −0 Original line number Diff line number Diff line #ifndef __BFIN_SPINLOCK_H #define __BFIN_SPINLOCK_H #ifndef CONFIG_SMP # include <asm-generic/spinlock.h> #else #include <asm/atomic.h> asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); Loading Loading @@ -86,4 +90,6 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() #endif #endif /* !__BFIN_SPINLOCK_H */
arch/blackfin/include/asm/swab.h +1 −5 Original line number Diff line number Diff line Loading @@ -2,11 +2,7 @@ #define _BLACKFIN_SWAB_H #include <linux/types.h> #include <linux/compiler.h> #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) # define __SWAB_64_THRU_32__ #endif #include <asm-generic/swab.h> #ifdef __GNUC__ Loading