atomic.h 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Atomic operations usable in machine independent code */
  3. #ifndef _LINUX_ATOMIC_H
  4. #define _LINUX_ATOMIC_H
  5. #include <linux/types.h>
  6. #include <asm/atomic.h>
  7. #include <asm/barrier.h>
  8. /*
  9. * Relaxed variants of xchg, cmpxchg and some atomic operations.
  10. *
  11. * We support four variants:
  12. *
  13. * - Fully ordered: The default implementation, no suffix required.
  14. * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
  15. * - Release: Provides RELEASE semantics, _release suffix.
  16. * - Relaxed: No ordering guarantees, _relaxed suffix.
  17. *
  18. * For compound atomics performing both a load and a store, ACQUIRE
  19. * semantics apply only to the load and RELEASE semantics only to the
  20. * store portion of the operation. Note that a failed cmpxchg_acquire
  21. * does -not- imply any memory ordering constraints.
  22. *
  23. * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
  24. */
  25. #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
  26. #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
  27. #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
  28. #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
  29. /*
  30. * The idea here is to build acquire/release variants by adding explicit
  31. * barriers on top of the relaxed variant. In the case where the relaxed
  32. * variant is already fully ordered, no additional barriers are needed.
  33. *
  34. * If an architecture overrides __atomic_acquire_fence() it will probably
  35. * want to define smp_mb__after_spinlock().
  36. */
  37. #ifndef __atomic_acquire_fence
  38. #define __atomic_acquire_fence smp_mb__after_atomic
  39. #endif
  40. #ifndef __atomic_release_fence
  41. #define __atomic_release_fence smp_mb__before_atomic
  42. #endif
  43. #ifndef __atomic_pre_full_fence
  44. #define __atomic_pre_full_fence smp_mb__before_atomic
  45. #endif
  46. #ifndef __atomic_post_full_fence
  47. #define __atomic_post_full_fence smp_mb__after_atomic
  48. #endif
  49. #define __atomic_op_acquire(op, args...) \
  50. ({ \
  51. typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
  52. __atomic_acquire_fence(); \
  53. __ret; \
  54. })
  55. #define __atomic_op_release(op, args...) \
  56. ({ \
  57. __atomic_release_fence(); \
  58. op##_relaxed(args); \
  59. })
  60. #define __atomic_op_fence(op, args...) \
  61. ({ \
  62. typeof(op##_relaxed(args)) __ret; \
  63. __atomic_pre_full_fence(); \
  64. __ret = op##_relaxed(args); \
  65. __atomic_post_full_fence(); \
  66. __ret; \
  67. })
  68. #ifdef ARCH_ATOMIC
  69. #include <linux/atomic-arch-fallback.h>
  70. #include <asm-generic/atomic-instrumented.h>
  71. #else
  72. #include <linux/atomic-fallback.h>
  73. #endif
  74. #include <asm-generic/atomic-long.h>
  75. #endif /* _LINUX_ATOMIC_H */