Loading Documentation/memory-barriers.txt +3 −3 Original line number Diff line number Diff line Loading @@ -1662,7 +1662,7 @@ CPU from reordering them. There are some more advanced barrier functions: (*) set_mb(var, value) (*) smp_store_mb(var, value) This assigns the value to the variable and then inserts a full memory barrier after it, depending on the function. It isn't guaranteed to Loading Loading @@ -1975,7 +1975,7 @@ after it has altered the task state: CPU 1 =============================== set_current_state(); set_mb(); smp_store_mb(); STORE current->state <general barrier> LOAD event_indicated Loading Loading @@ -2016,7 +2016,7 @@ between the STORE to indicate the event and the STORE to set TASK_RUNNING: CPU 1 CPU 2 =============================== =============================== set_current_state(); STORE event_indicated set_mb(); wake_up(); smp_store_mb(); wake_up(); STORE current->state <write barrier> <general barrier> STORE current->state LOAD event_indicated Loading arch/alpha/include/asm/cmpxchg.h +0 −2 Original line number Diff line number Diff line Loading @@ -66,6 +66,4 @@ #undef __ASM__MB #undef ____cmpxchg #define __HAVE_ARCH_CMPXCHG 1 #endif /* _ALPHA_CMPXCHG_H */ arch/arm/include/asm/barrier.h +1 −1 Original line number Diff line number Diff line Loading @@ -81,7 +81,7 @@ do { \ #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #define smp_mb__before_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb() Loading arch/arm64/include/asm/barrier.h +1 −1 Original line number Diff line number Diff line Loading @@ -114,7 +114,7 @@ do { \ #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #define nop() asm volatile("nop"); #define smp_mb__before_atomic() smp_mb() Loading arch/avr32/include/asm/cmpxchg.h +0 −2 Original line number Diff line number Diff line Loading @@ -70,8 +70,6 @@ extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( if something tries to do an invalid cmpxchg(). */ extern void __cmpxchg_called_with_bad_pointer(void); #define __HAVE_ARCH_CMPXCHG 1 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { Loading Loading
Documentation/memory-barriers.txt +3 −3 Original line number Diff line number Diff line Loading @@ -1662,7 +1662,7 @@ CPU from reordering them. There are some more advanced barrier functions: (*) set_mb(var, value) (*) smp_store_mb(var, value) This assigns the value to the variable and then inserts a full memory barrier after it, depending on the function. It isn't guaranteed to Loading Loading @@ -1975,7 +1975,7 @@ after it has altered the task state: CPU 1 =============================== set_current_state(); set_mb(); smp_store_mb(); STORE current->state <general barrier> LOAD event_indicated Loading Loading @@ -2016,7 +2016,7 @@ between the STORE to indicate the event and the STORE to set TASK_RUNNING: CPU 1 CPU 2 =============================== =============================== set_current_state(); STORE event_indicated set_mb(); wake_up(); smp_store_mb(); wake_up(); STORE current->state <write barrier> <general barrier> STORE current->state LOAD event_indicated Loading
arch/alpha/include/asm/cmpxchg.h +0 −2 Original line number Diff line number Diff line Loading @@ -66,6 +66,4 @@ #undef __ASM__MB #undef ____cmpxchg #define __HAVE_ARCH_CMPXCHG 1 #endif /* _ALPHA_CMPXCHG_H */
arch/arm/include/asm/barrier.h +1 −1 Original line number Diff line number Diff line Loading @@ -81,7 +81,7 @@ do { \ #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #define smp_mb__before_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb() Loading
arch/arm64/include/asm/barrier.h +1 −1 Original line number Diff line number Diff line Loading @@ -114,7 +114,7 @@ do { \ #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) #define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #define nop() asm volatile("nop"); #define smp_mb__before_atomic() smp_mb() Loading
arch/avr32/include/asm/cmpxchg.h +0 −2 Original line number Diff line number Diff line Loading @@ -70,8 +70,6 @@ extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( if something tries to do an invalid cmpxchg(). */ extern void __cmpxchg_called_with_bad_pointer(void); #define __HAVE_ARCH_CMPXCHG 1 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { Loading