Loading arch/alpha/include/asm/local.h +0 −17 Original line number Diff line number Diff line Loading @@ -98,21 +98,4 @@ static __inline__ long local_sub_return(long i, local_t * l) #define __local_add(i,l) ((l)->a.counter+=(i)) #define __local_sub(i,l) ((l)->a.counter-=(i)) /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable, not an address. */ #define cpu_local_read(l) local_read(&__get_cpu_var(l)) #define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i)) #define cpu_local_inc(l) local_inc(&__get_cpu_var(l)) #define cpu_local_dec(l) local_dec(&__get_cpu_var(l)) #define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l)) #define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l)) #define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l)) #define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l)) #define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l)) #define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l)) #endif /* _ALPHA_LOCAL_H */ arch/m32r/include/asm/local.h +0 −25 Original line number Diff line number Diff line Loading @@ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long mask, local_t *addr) * a variable, not an address. */ /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non local way. */ #define cpu_local_wrap_v(l) \ ({ local_t res__; \ preempt_disable(); \ res__ = (l); \ preempt_enable(); \ res__; }) #define cpu_local_wrap(l) \ ({ preempt_disable(); \ l; \ preempt_enable(); }) \ #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) #define __cpu_local_inc(l) cpu_local_inc(l) #define __cpu_local_dec(l) cpu_local_dec(l) #define __cpu_local_add(i, l) cpu_local_add((i), (l)) #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) #endif /* __M32R_LOCAL_H */ arch/mips/include/asm/local.h +0 −25 Original line number Diff line number Diff line Loading @@ -193,29 +193,4 @@ static __inline__ long local_sub_return(long i, local_t * l) #define __local_add(i, l) ((l)->a.counter+=(i)) #define __local_sub(i, l) ((l)->a.counter-=(i)) /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non atomic way. */ #define cpu_local_wrap_v(l) \ ({ local_t res__; \ preempt_disable(); \ res__ = (l); \ preempt_enable(); \ res__; }) #define cpu_local_wrap(l) \ ({ preempt_disable(); \ l; \ preempt_enable(); }) \ #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) #define __cpu_local_inc(l) cpu_local_inc(l) #define __cpu_local_dec(l) cpu_local_dec(l) #define __cpu_local_add(i, l) cpu_local_add((i), (l)) #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) #endif /* _ARCH_MIPS_LOCAL_H */ arch/powerpc/include/asm/local.h +0 −25 Original line number Diff line number Diff line Loading @@ -172,29 +172,4 @@ static __inline__ long local_dec_if_positive(local_t *l) #define __local_add(i,l) ((l)->a.counter+=(i)) #define __local_sub(i,l) ((l)->a.counter-=(i)) /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non atomic way. */ #define cpu_local_wrap_v(l) \ ({ local_t res__; \ preempt_disable(); \ res__ = (l); \ preempt_enable(); \ res__; }) #define cpu_local_wrap(l) \ ({ preempt_disable(); \ l; \ preempt_enable(); }) \ #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) #define __cpu_local_inc(l) cpu_local_inc(l) #define __cpu_local_dec(l) cpu_local_dec(l) #define __cpu_local_add(i, l) cpu_local_add((i), (l)) #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) #endif /* _ARCH_POWERPC_LOCAL_H */ arch/x86/include/asm/local.h +0 −37 Original line number Diff line number Diff line Loading @@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l) #define __local_add(i, l) local_add((i), (l)) #define __local_sub(i, l) local_sub((i), (l)) /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable, not an address. * * X86_64: This could be done better if we moved the per cpu data directly * after GS. */ /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non atomic way. */ #define cpu_local_wrap_v(l) \ ({ \ local_t res__; \ preempt_disable(); \ res__ = (l); \ preempt_enable(); \ res__; \ }) #define cpu_local_wrap(l) \ ({ \ preempt_disable(); \ (l); \ preempt_enable(); \ }) \ #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) #define __cpu_local_inc(l) cpu_local_inc((l)) #define __cpu_local_dec(l) cpu_local_dec((l)) #define __cpu_local_add(i, l) cpu_local_add((i), (l)) #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) #endif /* _ASM_X86_LOCAL_H */ Loading
arch/alpha/include/asm/local.h +0 −17 Original line number Diff line number Diff line Loading @@ -98,21 +98,4 @@ static __inline__ long local_sub_return(long i, local_t * l) #define __local_add(i,l) ((l)->a.counter+=(i)) #define __local_sub(i,l) ((l)->a.counter-=(i)) /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable, not an address. */ #define cpu_local_read(l) local_read(&__get_cpu_var(l)) #define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i)) #define cpu_local_inc(l) local_inc(&__get_cpu_var(l)) #define cpu_local_dec(l) local_dec(&__get_cpu_var(l)) #define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l)) #define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l)) #define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l)) #define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l)) #define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l)) #define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l)) #endif /* _ALPHA_LOCAL_H */
arch/m32r/include/asm/local.h +0 −25 Original line number Diff line number Diff line Loading @@ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long mask, local_t *addr) * a variable, not an address. */ /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non local way. */ #define cpu_local_wrap_v(l) \ ({ local_t res__; \ preempt_disable(); \ res__ = (l); \ preempt_enable(); \ res__; }) #define cpu_local_wrap(l) \ ({ preempt_disable(); \ l; \ preempt_enable(); }) \ #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) #define __cpu_local_inc(l) cpu_local_inc(l) #define __cpu_local_dec(l) cpu_local_dec(l) #define __cpu_local_add(i, l) cpu_local_add((i), (l)) #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) #endif /* __M32R_LOCAL_H */
arch/mips/include/asm/local.h +0 −25 Original line number Diff line number Diff line Loading @@ -193,29 +193,4 @@ static __inline__ long local_sub_return(long i, local_t * l) #define __local_add(i, l) ((l)->a.counter+=(i)) #define __local_sub(i, l) ((l)->a.counter-=(i)) /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non atomic way. */ #define cpu_local_wrap_v(l) \ ({ local_t res__; \ preempt_disable(); \ res__ = (l); \ preempt_enable(); \ res__; }) #define cpu_local_wrap(l) \ ({ preempt_disable(); \ l; \ preempt_enable(); }) \ #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) #define __cpu_local_inc(l) cpu_local_inc(l) #define __cpu_local_dec(l) cpu_local_dec(l) #define __cpu_local_add(i, l) cpu_local_add((i), (l)) #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) #endif /* _ARCH_MIPS_LOCAL_H */
arch/powerpc/include/asm/local.h +0 −25 Original line number Diff line number Diff line Loading @@ -172,29 +172,4 @@ static __inline__ long local_dec_if_positive(local_t *l) #define __local_add(i,l) ((l)->a.counter+=(i)) #define __local_sub(i,l) ((l)->a.counter-=(i)) /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non atomic way. */ #define cpu_local_wrap_v(l) \ ({ local_t res__; \ preempt_disable(); \ res__ = (l); \ preempt_enable(); \ res__; }) #define cpu_local_wrap(l) \ ({ preempt_disable(); \ l; \ preempt_enable(); }) \ #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) #define __cpu_local_inc(l) cpu_local_inc(l) #define __cpu_local_dec(l) cpu_local_dec(l) #define __cpu_local_add(i, l) cpu_local_add((i), (l)) #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) #endif /* _ARCH_POWERPC_LOCAL_H */
arch/x86/include/asm/local.h +0 −37 Original line number Diff line number Diff line Loading @@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l) #define __local_add(i, l) local_add((i), (l)) #define __local_sub(i, l) local_sub((i), (l)) /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable, not an address. * * X86_64: This could be done better if we moved the per cpu data directly * after GS. */ /* Need to disable preemption for the cpu local counters otherwise we could still access a variable of a previous CPU in a non atomic way. */ #define cpu_local_wrap_v(l) \ ({ \ local_t res__; \ preempt_disable(); \ res__ = (l); \ preempt_enable(); \ res__; \ }) #define cpu_local_wrap(l) \ ({ \ preempt_disable(); \ (l); \ preempt_enable(); \ }) \ #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) #define __cpu_local_inc(l) cpu_local_inc((l)) #define __cpu_local_dec(l) cpu_local_dec((l)) #define __cpu_local_add(i, l) cpu_local_add((i), (l)) #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) #endif /* _ASM_X86_LOCAL_H */