Loading arch/x86/include/asm/atomic_32.h +36 −6 Original line number Diff line number Diff line Loading @@ -4,6 +4,7 @@ #include <linux/compiler.h> #include <linux/types.h> #include <asm/processor.h> #include <asm/alternative.h> #include <asm/cmpxchg.h> /* Loading Loading @@ -145,8 +146,8 @@ static inline int atomic_inc_and_test(atomic_t *v) /** * atomic_add_negative - add and test if negative * @v: pointer of type atomic_t * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v and returns true * if the result is negative, or false when Loading @@ -164,8 +165,8 @@ static inline int atomic_add_negative(int i, atomic_t *v) /** * atomic_add_return - add integer and return * @v: pointer of type atomic_t * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v and returns @i + @v */ Loading Loading @@ -206,6 +207,9 @@ static inline int atomic_sub_return(int i, atomic_t *v) return atomic_add_return(-i, v); } #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { return cmpxchg(&v->counter, old, new); Loading Loading @@ -242,8 +246,33 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) /** * atomic_inc_short - increment of a short integer * @v: pointer to type int * * Atomically adds 1 to @v * Returns the new value of @u */ static inline short int atomic_inc_short(short int *v) { asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); return *v; } #ifdef CONFIG_X86_64 /** * atomic_or_long - OR of two long integers * @v1: pointer to type unsigned long * @v2: pointer to type unsigned long * * Atomically ORs @v1 and @v2 * Returns the result of the OR */ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) { asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); } #endif /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ Loading @@ -252,7 +281,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_set_mask(mask, addr) \ asm volatile(LOCK_PREFIX "orl %0,%1" \ : : "r" (mask), "m" (*(addr)) : "memory") : : "r" ((unsigned)(mask)), "m" (*(addr)) \ : "memory") /* Atomic operations are already serializing on x86 */ #define smp_mb__before_atomic_dec() barrier() Loading arch/x86/include/asm/atomic_64.h +53 −28 Original line number Diff line number Diff line #ifndef _ASM_X86_ATOMIC_64_H #define _ASM_X86_ATOMIC_64_H #include <linux/compiler.h> #include <linux/types.h> #include <asm/processor.h> #include <asm/alternative.h> #include <asm/cmpxchg.h> Loading Loading @@ -45,12 +47,12 @@ static inline void atomic_set(atomic_t *v, int i) static inline void atomic_add(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "addl %1,%0" : "=m" (v->counter) : "ir" (i), "m" (v->counter)); : "+m" (v->counter) : "ir" (i)); } /** * atomic_sub - subtract the atomic variable * atomic_sub - subtract integer from atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t * Loading @@ -59,8 +61,8 @@ static inline void atomic_add(int i, atomic_t *v) static inline void atomic_sub(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "subl %1,%0" : "=m" (v->counter) : "ir" (i), "m" (v->counter)); : "+m" (v->counter) : "ir" (i)); } /** Loading @@ -77,8 +79,8 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" : "=m" (v->counter), "=qm" (c) : "ir" (i), "m" (v->counter) : "memory"); : "+m" (v->counter), "=qm" (c) : "ir" (i) : "memory"); return c; } Loading @@ -91,8 +93,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) static inline void atomic_inc(atomic_t *v) { asm volatile(LOCK_PREFIX "incl %0" : "=m" (v->counter) : "m" (v->counter)); : "+m" (v->counter)); } /** Loading @@ -104,8 +105,7 @@ static inline void atomic_inc(atomic_t *v) static inline void atomic_dec(atomic_t *v) { asm volatile(LOCK_PREFIX "decl %0" : "=m" (v->counter) : "m" (v->counter)); : "+m" (v->counter)); } /** Loading @@ -121,8 +121,8 @@ static inline int atomic_dec_and_test(atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "decl %0; sete %1" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); : "+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; } Loading @@ -139,8 +139,8 @@ static inline int atomic_inc_and_test(atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "incl %0; sete %1" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); : "+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; } Loading @@ -158,13 +158,13 @@ static inline int atomic_add_negative(int i, atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" : "=m" (v->counter), "=qm" (c) : "ir" (i), "m" (v->counter) : "memory"); : "+m" (v->counter), "=qm" (c) : "ir" (i) : "memory"); return c; } /** * atomic_add_return - add and return * atomic_add_return - add integer and return * @i: integer value to add * @v: pointer of type atomic_t * Loading @@ -172,13 +172,36 @@ static inline int atomic_add_negative(int i, atomic_t *v) */ static inline int atomic_add_return(int i, atomic_t *v) { int __i = i; int __i; #ifdef CONFIG_M386 unsigned long flags; if (unlikely(boot_cpu_data.x86 <= 3)) goto no_xadd; #endif /* Modern 486+ processor */ __i = i; asm volatile(LOCK_PREFIX "xaddl %0, %1" : "+r" (i), "+m" (v->counter) : : "memory"); return i + __i; #ifdef CONFIG_M386 no_xadd: /* Legacy 386 processor */ local_irq_save(flags); __i = atomic_read(v); atomic_set(v, i + __i); local_irq_restore(flags); return i + __i; #endif } /** * atomic_sub_return - subtract integer and return * @v: pointer of type atomic_t * @i: integer value to subtract * * Atomically subtracts @i from @v and returns @v - @i */ static inline int atomic_sub_return(int i, atomic_t *v) { return atomic_add_return(-i, v); Loading @@ -187,23 +210,23 @@ static inline int atomic_sub_return(int i, atomic_t *v) #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) static inline long atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { return cmpxchg(&v->counter, old, new); } static inline long atomic_xchg(atomic_t *v, int new) static inline int atomic_xchg(atomic_t *v, int new) { return xchg(&v->counter, new); } /** * atomic_add_unless - add unless the number is a given value * atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as @v was not already @u. * Returns non-zero if @v was not @u, and zero otherwise. */ static inline int atomic_add_unless(atomic_t *v, int a, int u) Loading Loading @@ -236,6 +259,7 @@ static inline short int atomic_inc_short(short int *v) return *v; } #ifdef CONFIG_X86_64 /** * atomic_or_long - OR of two long integers * @v1: pointer to type unsigned long Loading @@ -248,6 +272,7 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) { asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); } #endif /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ Loading Loading
arch/x86/include/asm/atomic_32.h +36 −6 Original line number Diff line number Diff line Loading @@ -4,6 +4,7 @@ #include <linux/compiler.h> #include <linux/types.h> #include <asm/processor.h> #include <asm/alternative.h> #include <asm/cmpxchg.h> /* Loading Loading @@ -145,8 +146,8 @@ static inline int atomic_inc_and_test(atomic_t *v) /** * atomic_add_negative - add and test if negative * @v: pointer of type atomic_t * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v and returns true * if the result is negative, or false when Loading @@ -164,8 +165,8 @@ static inline int atomic_add_negative(int i, atomic_t *v) /** * atomic_add_return - add integer and return * @v: pointer of type atomic_t * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v and returns @i + @v */ Loading Loading @@ -206,6 +207,9 @@ static inline int atomic_sub_return(int i, atomic_t *v) return atomic_add_return(-i, v); } #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { return cmpxchg(&v->counter, old, new); Loading Loading @@ -242,8 +246,33 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) /** * atomic_inc_short - increment of a short integer * @v: pointer to type int * * Atomically adds 1 to @v * Returns the new value of @u */ static inline short int atomic_inc_short(short int *v) { asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); return *v; } #ifdef CONFIG_X86_64 /** * atomic_or_long - OR of two long integers * @v1: pointer to type unsigned long * @v2: pointer to type unsigned long * * Atomically ORs @v1 and @v2 * Returns the result of the OR */ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) { asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); } #endif /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ Loading @@ -252,7 +281,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_set_mask(mask, addr) \ asm volatile(LOCK_PREFIX "orl %0,%1" \ : : "r" (mask), "m" (*(addr)) : "memory") : : "r" ((unsigned)(mask)), "m" (*(addr)) \ : "memory") /* Atomic operations are already serializing on x86 */ #define smp_mb__before_atomic_dec() barrier() Loading
arch/x86/include/asm/atomic_64.h +53 −28 Original line number Diff line number Diff line #ifndef _ASM_X86_ATOMIC_64_H #define _ASM_X86_ATOMIC_64_H #include <linux/compiler.h> #include <linux/types.h> #include <asm/processor.h> #include <asm/alternative.h> #include <asm/cmpxchg.h> Loading Loading @@ -45,12 +47,12 @@ static inline void atomic_set(atomic_t *v, int i) static inline void atomic_add(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "addl %1,%0" : "=m" (v->counter) : "ir" (i), "m" (v->counter)); : "+m" (v->counter) : "ir" (i)); } /** * atomic_sub - subtract the atomic variable * atomic_sub - subtract integer from atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t * Loading @@ -59,8 +61,8 @@ static inline void atomic_add(int i, atomic_t *v) static inline void atomic_sub(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "subl %1,%0" : "=m" (v->counter) : "ir" (i), "m" (v->counter)); : "+m" (v->counter) : "ir" (i)); } /** Loading @@ -77,8 +79,8 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" : "=m" (v->counter), "=qm" (c) : "ir" (i), "m" (v->counter) : "memory"); : "+m" (v->counter), "=qm" (c) : "ir" (i) : "memory"); return c; } Loading @@ -91,8 +93,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) static inline void atomic_inc(atomic_t *v) { asm volatile(LOCK_PREFIX "incl %0" : "=m" (v->counter) : "m" (v->counter)); : "+m" (v->counter)); } /** Loading @@ -104,8 +105,7 @@ static inline void atomic_inc(atomic_t *v) static inline void atomic_dec(atomic_t *v) { asm volatile(LOCK_PREFIX "decl %0" : "=m" (v->counter) : "m" (v->counter)); : "+m" (v->counter)); } /** Loading @@ -121,8 +121,8 @@ static inline int atomic_dec_and_test(atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "decl %0; sete %1" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); : "+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; } Loading @@ -139,8 +139,8 @@ static inline int atomic_inc_and_test(atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "incl %0; sete %1" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); : "+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; } Loading @@ -158,13 +158,13 @@ static inline int atomic_add_negative(int i, atomic_t *v) unsigned char c; asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" : "=m" (v->counter), "=qm" (c) : "ir" (i), "m" (v->counter) : "memory"); : "+m" (v->counter), "=qm" (c) : "ir" (i) : "memory"); return c; } /** * atomic_add_return - add and return * atomic_add_return - add integer and return * @i: integer value to add * @v: pointer of type atomic_t * Loading @@ -172,13 +172,36 @@ static inline int atomic_add_negative(int i, atomic_t *v) */ static inline int atomic_add_return(int i, atomic_t *v) { int __i = i; int __i; #ifdef CONFIG_M386 unsigned long flags; if (unlikely(boot_cpu_data.x86 <= 3)) goto no_xadd; #endif /* Modern 486+ processor */ __i = i; asm volatile(LOCK_PREFIX "xaddl %0, %1" : "+r" (i), "+m" (v->counter) : : "memory"); return i + __i; #ifdef CONFIG_M386 no_xadd: /* Legacy 386 processor */ local_irq_save(flags); __i = atomic_read(v); atomic_set(v, i + __i); local_irq_restore(flags); return i + __i; #endif } /** * atomic_sub_return - subtract integer and return * @v: pointer of type atomic_t * @i: integer value to subtract * * Atomically subtracts @i from @v and returns @v - @i */ static inline int atomic_sub_return(int i, atomic_t *v) { return atomic_add_return(-i, v); Loading @@ -187,23 +210,23 @@ static inline int atomic_sub_return(int i, atomic_t *v) #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) static inline long atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { return cmpxchg(&v->counter, old, new); } static inline long atomic_xchg(atomic_t *v, int new) static inline int atomic_xchg(atomic_t *v, int new) { return xchg(&v->counter, new); } /** * atomic_add_unless - add unless the number is a given value * atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as @v was not already @u. * Returns non-zero if @v was not @u, and zero otherwise. */ static inline int atomic_add_unless(atomic_t *v, int a, int u) Loading Loading @@ -236,6 +259,7 @@ static inline short int atomic_inc_short(short int *v) return *v; } #ifdef CONFIG_X86_64 /** * atomic_or_long - OR of two long integers * @v1: pointer to type unsigned long Loading @@ -248,6 +272,7 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) { asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2)); } #endif /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ Loading