barrier.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Generic barrier definitions.
  4. *
  5. * It should be possible to use these on really simple architectures,
  6. * but it serves more as a starting point for new ports.
  7. *
  8. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  9. * Written by David Howells (dhowells@redhat.com)
  10. */
  11. #ifndef __ASM_GENERIC_BARRIER_H
  12. #define __ASM_GENERIC_BARRIER_H
  13. #ifndef __ASSEMBLY__
  14. #include <linux/compiler.h>
  15. #include <asm/rwonce.h>
  16. #ifndef nop
  17. #define nop() asm volatile ("nop")
  18. #endif
  19. /*
  20. * Force strict CPU ordering. And yes, this is required on UP too when we're
  21. * talking to devices.
  22. *
  23. * Fall back to compiler barriers if nothing better is provided.
  24. */
  25. #ifndef mb
  26. #define mb() barrier()
  27. #endif
  28. #ifndef rmb
  29. #define rmb() mb()
  30. #endif
  31. #ifndef wmb
  32. #define wmb() mb()
  33. #endif
  34. #ifndef dma_rmb
  35. #define dma_rmb() rmb()
  36. #endif
  37. #ifndef dma_wmb
  38. #define dma_wmb() wmb()
  39. #endif
  40. #ifndef __smp_mb
  41. #define __smp_mb() mb()
  42. #endif
  43. #ifndef __smp_rmb
  44. #define __smp_rmb() rmb()
  45. #endif
  46. #ifndef __smp_wmb
  47. #define __smp_wmb() wmb()
  48. #endif
  49. #ifdef CONFIG_SMP
  50. #ifndef smp_mb
  51. #define smp_mb() __smp_mb()
  52. #endif
  53. #ifndef smp_rmb
  54. #define smp_rmb() __smp_rmb()
  55. #endif
  56. #ifndef smp_wmb
  57. #define smp_wmb() __smp_wmb()
  58. #endif
  59. #else /* !CONFIG_SMP */
  60. #ifndef smp_mb
  61. #define smp_mb() barrier()
  62. #endif
  63. #ifndef smp_rmb
  64. #define smp_rmb() barrier()
  65. #endif
  66. #ifndef smp_wmb
  67. #define smp_wmb() barrier()
  68. #endif
  69. #endif /* CONFIG_SMP */
  70. #ifndef __smp_store_mb
  71. #define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
  72. #endif
  73. #ifndef __smp_mb__before_atomic
  74. #define __smp_mb__before_atomic() __smp_mb()
  75. #endif
  76. #ifndef __smp_mb__after_atomic
  77. #define __smp_mb__after_atomic() __smp_mb()
  78. #endif
  79. #ifndef __smp_store_release
  80. #define __smp_store_release(p, v) \
  81. do { \
  82. compiletime_assert_atomic_type(*p); \
  83. __smp_mb(); \
  84. WRITE_ONCE(*p, v); \
  85. } while (0)
  86. #endif
  87. #ifndef __smp_load_acquire
  88. #define __smp_load_acquire(p) \
  89. ({ \
  90. __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
  91. compiletime_assert_atomic_type(*p); \
  92. __smp_mb(); \
  93. (typeof(*p))___p1; \
  94. })
  95. #endif
  96. #ifdef CONFIG_SMP
  97. #ifndef smp_store_mb
  98. #define smp_store_mb(var, value) __smp_store_mb(var, value)
  99. #endif
  100. #ifndef smp_mb__before_atomic
  101. #define smp_mb__before_atomic() __smp_mb__before_atomic()
  102. #endif
  103. #ifndef smp_mb__after_atomic
  104. #define smp_mb__after_atomic() __smp_mb__after_atomic()
  105. #endif
  106. #ifndef smp_store_release
  107. #define smp_store_release(p, v) __smp_store_release(p, v)
  108. #endif
  109. #ifndef smp_load_acquire
  110. #define smp_load_acquire(p) __smp_load_acquire(p)
  111. #endif
  112. #else /* !CONFIG_SMP */
  113. #ifndef smp_store_mb
  114. #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
  115. #endif
  116. #ifndef smp_mb__before_atomic
  117. #define smp_mb__before_atomic() barrier()
  118. #endif
  119. #ifndef smp_mb__after_atomic
  120. #define smp_mb__after_atomic() barrier()
  121. #endif
  122. #ifndef smp_store_release
  123. #define smp_store_release(p, v) \
  124. do { \
  125. compiletime_assert_atomic_type(*p); \
  126. barrier(); \
  127. WRITE_ONCE(*p, v); \
  128. } while (0)
  129. #endif
  130. #ifndef smp_load_acquire
  131. #define smp_load_acquire(p) \
  132. ({ \
  133. __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
  134. compiletime_assert_atomic_type(*p); \
  135. barrier(); \
  136. (typeof(*p))___p1; \
  137. })
  138. #endif
  139. #endif /* CONFIG_SMP */
  140. /* Barriers for virtual machine guests when talking to an SMP host */
  141. #define virt_mb() __smp_mb()
  142. #define virt_rmb() __smp_rmb()
  143. #define virt_wmb() __smp_wmb()
  144. #define virt_store_mb(var, value) __smp_store_mb(var, value)
  145. #define virt_mb__before_atomic() __smp_mb__before_atomic()
  146. #define virt_mb__after_atomic() __smp_mb__after_atomic()
  147. #define virt_store_release(p, v) __smp_store_release(p, v)
  148. #define virt_load_acquire(p) __smp_load_acquire(p)
  149. /**
  150. * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
  151. *
  152. * A control dependency provides a LOAD->STORE order, the additional RMB
  153. * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
  154. * aka. (load)-ACQUIRE.
  155. *
  156. * Architectures that do not do load speculation can have this be barrier().
  157. */
  158. #ifndef smp_acquire__after_ctrl_dep
  159. #define smp_acquire__after_ctrl_dep() smp_rmb()
  160. #endif
  161. /**
  162. * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
  163. * @ptr: pointer to the variable to wait on
  164. * @cond: boolean expression to wait for
  165. *
  166. * Equivalent to using READ_ONCE() on the condition variable.
  167. *
  168. * Due to C lacking lambda expressions we load the value of *ptr into a
  169. * pre-named variable @VAL to be used in @cond.
  170. */
  171. #ifndef smp_cond_load_relaxed
  172. #define smp_cond_load_relaxed(ptr, cond_expr) ({ \
  173. typeof(ptr) __PTR = (ptr); \
  174. __unqual_scalar_typeof(*ptr) VAL; \
  175. for (;;) { \
  176. VAL = READ_ONCE(*__PTR); \
  177. if (cond_expr) \
  178. break; \
  179. cpu_relax(); \
  180. } \
  181. (typeof(*ptr))VAL; \
  182. })
  183. #endif
  184. /**
  185. * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
  186. * @ptr: pointer to the variable to wait on
  187. * @cond: boolean expression to wait for
  188. *
  189. * Equivalent to using smp_load_acquire() on the condition variable but employs
  190. * the control dependency of the wait to reduce the barrier on many platforms.
  191. */
  192. #ifndef smp_cond_load_acquire
  193. #define smp_cond_load_acquire(ptr, cond_expr) ({ \
  194. __unqual_scalar_typeof(*ptr) _val; \
  195. _val = smp_cond_load_relaxed(ptr, cond_expr); \
  196. smp_acquire__after_ctrl_dep(); \
  197. (typeof(*ptr))_val; \
  198. })
  199. #endif
  200. /*
  201. * pmem_wmb() ensures that all stores for which the modification
  202. * are written to persistent storage by preceding instructions have
  203. * updated persistent storage before any data access or data transfer
  204. * caused by subsequent instructions is initiated.
  205. */
  206. #ifndef pmem_wmb
  207. #define pmem_wmb() wmb()
  208. #endif
  209. #endif /* !__ASSEMBLY__ */
  210. #endif /* __ASM_GENERIC_BARRIER_H */