atomicops_internals_portable.h 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. // Copyright (c) 2014 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. // This file is an internal atomic implementation, use atomicops.h instead.
  5. //
  6. // This implementation uses C++11 atomics' member functions. The code base is
  7. // currently written assuming atomicity revolves around accesses instead of
  8. // C++11's memory locations. The burden is on the programmer to ensure that all
  9. // memory locations accessed atomically are never accessed non-atomically (tsan
  10. // should help with this).
  11. //
  12. // TODO(jfb) Modify the atomicops.h API and user code to declare atomic
  13. // locations as truly atomic. See the static_assert below.
  14. //
  15. // Of note in this implementation:
  16. // * All NoBarrier variants are implemented as relaxed.
  17. // * All Barrier variants are implemented as sequentially-consistent.
  18. // * Compare exchange's failure ordering is always the same as the success one
  19. // (except for release, which fails as relaxed): using a weaker ordering is
  20. // only valid under certain uses of compare exchange.
  21. // * Atomic increment is expected to return the post-incremented value, whereas
  22. // C11 fetch add returns the previous value. The implementation therefore
  23. // needs to increment twice (which the compiler should be able to detect and
  24. // optimize).
  25. #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
  26. #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
  27. #include <atomic>
  28. #include "build/build_config.h"
  29. namespace base {
  30. namespace subtle {
  31. // This implementation is transitional and maintains the original API for
  32. // atomicops.h. This requires casting memory locations to the atomic types, and
  33. // assumes that the API and the C++11 implementation are layout-compatible,
  34. // which isn't true for all implementations or hardware platforms. The static
  35. // assertion should detect this issue, were it to fire then this header
  36. // shouldn't be used.
  37. //
  38. // TODO(jfb) If this header manages to stay committed then the API should be
  39. // modified, and all call sites updated.
  40. typedef volatile std::atomic<Atomic32>* AtomicLocation32;
  41. static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
  42. "incompatible 32-bit atomic layout");
  43. inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
  44. Atomic32 old_value,
  45. Atomic32 new_value) {
  46. ((AtomicLocation32)ptr)
  47. ->compare_exchange_strong(old_value,
  48. new_value,
  49. std::memory_order_relaxed,
  50. std::memory_order_relaxed);
  51. return old_value;
  52. }
  53. inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
  54. Atomic32 new_value) {
  55. return ((AtomicLocation32)ptr)
  56. ->exchange(new_value, std::memory_order_relaxed);
  57. }
  58. inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
  59. Atomic32 increment) {
  60. return increment +
  61. ((AtomicLocation32)ptr)
  62. ->fetch_add(increment, std::memory_order_relaxed);
  63. }
  64. inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
  65. Atomic32 increment) {
  66. return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
  67. }
  68. inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
  69. Atomic32 old_value,
  70. Atomic32 new_value) {
  71. ((AtomicLocation32)ptr)
  72. ->compare_exchange_strong(old_value,
  73. new_value,
  74. std::memory_order_acquire,
  75. std::memory_order_acquire);
  76. return old_value;
  77. }
  78. inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
  79. Atomic32 old_value,
  80. Atomic32 new_value) {
  81. ((AtomicLocation32)ptr)
  82. ->compare_exchange_strong(old_value,
  83. new_value,
  84. std::memory_order_release,
  85. std::memory_order_relaxed);
  86. return old_value;
  87. }
  88. inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
  89. ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
  90. }
  91. inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
  92. ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
  93. }
  94. inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
  95. return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
  96. }
  97. inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
  98. return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
  99. }
  100. #if defined(ARCH_CPU_64_BITS)
  101. typedef volatile std::atomic<Atomic64>* AtomicLocation64;
  102. static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
  103. "incompatible 64-bit atomic layout");
  104. inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
  105. Atomic64 old_value,
  106. Atomic64 new_value) {
  107. ((AtomicLocation64)ptr)
  108. ->compare_exchange_strong(old_value,
  109. new_value,
  110. std::memory_order_relaxed,
  111. std::memory_order_relaxed);
  112. return old_value;
  113. }
  114. inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
  115. Atomic64 new_value) {
  116. return ((AtomicLocation64)ptr)
  117. ->exchange(new_value, std::memory_order_relaxed);
  118. }
  119. inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
  120. Atomic64 increment) {
  121. return increment +
  122. ((AtomicLocation64)ptr)
  123. ->fetch_add(increment, std::memory_order_relaxed);
  124. }
  125. inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
  126. Atomic64 increment) {
  127. return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
  128. }
  129. inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
  130. Atomic64 old_value,
  131. Atomic64 new_value) {
  132. ((AtomicLocation64)ptr)
  133. ->compare_exchange_strong(old_value,
  134. new_value,
  135. std::memory_order_acquire,
  136. std::memory_order_acquire);
  137. return old_value;
  138. }
  139. inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
  140. Atomic64 old_value,
  141. Atomic64 new_value) {
  142. ((AtomicLocation64)ptr)
  143. ->compare_exchange_strong(old_value,
  144. new_value,
  145. std::memory_order_release,
  146. std::memory_order_relaxed);
  147. return old_value;
  148. }
  149. inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
  150. ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
  151. }
  152. inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
  153. ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
  154. }
  155. inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
  156. return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
  157. }
  158. inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
  159. return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
  160. }
  161. #endif // defined(ARCH_CPU_64_BITS)
  162. } // namespace subtle
  163. } // namespace base
  164. #endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_