spinning_mutex.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. // Copyright 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPINNING_MUTEX_H_
  5. #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPINNING_MUTEX_H_
  6. #include <algorithm>
  7. #include <atomic>
  8. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  9. #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
  10. #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  13. #include "base/allocator/partition_allocator/yield_processor.h"
  14. #include "build/build_config.h"
  15. #if BUILDFLAG(IS_WIN)
  16. #include "base/allocator/partition_allocator/partition_alloc_base/win/windows_types.h"
  17. #endif
  18. #if BUILDFLAG(IS_POSIX)
  19. #include <errno.h>
  20. #include <pthread.h>
  21. #endif
  22. #if BUILDFLAG(IS_APPLE)
  23. #include <os/lock.h>
  24. #endif // BUILDFLAG(IS_APPLE)
  25. #if BUILDFLAG(IS_FUCHSIA)
  26. #include <lib/sync/mutex.h>
  27. #endif
  28. namespace partition_alloc::internal {
  29. // The behavior of this class depends on whether PA_HAS_FAST_MUTEX is defined.
  30. // 1. When it is defined:
  31. //
  32. // Simple spinning lock. It will spin in user space a set number of times before
  33. // going into the kernel to sleep.
  34. //
  35. // This is intended to give "the best of both worlds" between a SpinLock and
  36. // base::Lock:
  37. // - SpinLock: Inlined fast path, no external function calls, just
  38. // compare-and-swap. Short waits do not go into the kernel. Good behavior in
  39. // low contention cases.
  40. // - base::Lock: Good behavior in case of contention.
  41. //
  42. // We don't rely on base::Lock which we could make spin (by calling Try() in a
  43. // loop), as performance is below a custom spinlock as seen on high-level
  44. // benchmarks. Instead this implements a simple non-recursive mutex on top of
  45. // the futex() syscall on Linux, SRWLock on Windows, os_unfair_lock on macOS,
  46. // and pthread_mutex on POSIX. The main difference between this and a libc
  47. // implementation is that it only supports the simplest path: private (to a
  48. // process), non-recursive mutexes with no priority inheritance, no timed waits.
  49. //
  50. // As an interesting side-effect to be used in the allocator, this code does not
  51. // make any allocations, locks are small with a constexpr constructor and no
  52. // destructor.
  53. //
  54. // 2. Otherwise: This is a simple SpinLock, in the sense that it does not have
  55. // any awareness of other threads' behavior.
  56. class PA_LOCKABLE PA_COMPONENT_EXPORT(PARTITION_ALLOC) SpinningMutex {
  57. public:
  58. inline constexpr SpinningMutex();
  59. PA_ALWAYS_INLINE void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION();
  60. PA_ALWAYS_INLINE void Release() PA_UNLOCK_FUNCTION();
  61. PA_ALWAYS_INLINE bool Try() PA_EXCLUSIVE_TRYLOCK_FUNCTION(true);
  62. void AssertAcquired() const {} // Not supported.
  63. void Reinit() PA_UNLOCK_FUNCTION();
  64. private:
  65. PA_NOINLINE void AcquireSpinThenBlock() PA_EXCLUSIVE_LOCK_FUNCTION();
  66. void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION();
  67. // See below, the latency of PA_YIELD_PROCESSOR can be as high as ~150
  68. // cycles. Meanwhile, sleeping costs a few us. Spinning 64 times at 3GHz would
  69. // cost 150 * 64 / 3e9 ~= 3.2us.
  70. //
  71. // This applies to Linux kernels, on x86_64. On ARM we might want to spin
  72. // more.
  73. static constexpr int kSpinCount = 64;
  74. #if defined(PA_HAS_FAST_MUTEX)
  75. #if defined(PA_HAS_LINUX_KERNEL)
  76. void FutexWait();
  77. void FutexWake();
  78. static constexpr int kUnlocked = 0;
  79. static constexpr int kLockedUncontended = 1;
  80. static constexpr int kLockedContended = 2;
  81. std::atomic<int32_t> state_{kUnlocked};
  82. #elif BUILDFLAG(IS_WIN)
  83. PA_CHROME_SRWLOCK lock_ = SRWLOCK_INIT;
  84. #elif BUILDFLAG(IS_APPLE)
  85. os_unfair_lock unfair_lock_ = OS_UNFAIR_LOCK_INIT;
  86. #elif BUILDFLAG(IS_POSIX)
  87. pthread_mutex_t lock_ = PTHREAD_MUTEX_INITIALIZER;
  88. #elif BUILDFLAG(IS_FUCHSIA)
  89. sync_mutex lock_;
  90. #endif
  91. #else // defined(PA_HAS_FAST_MUTEX)
  92. std::atomic<bool> lock_{false};
  93. // Spinlock-like, fallback.
  94. PA_ALWAYS_INLINE bool TrySpinLock();
  95. PA_ALWAYS_INLINE void ReleaseSpinLock();
  96. void LockSlowSpinLock();
  97. #endif // defined(PA_HAS_FAST_MUTEX)
  98. };
  99. PA_ALWAYS_INLINE void SpinningMutex::Acquire() {
  100. // Not marked PA_LIKELY(), as:
  101. // 1. We don't know how much contention the lock would experience
  102. // 2. This may lead to weird-looking code layout when inlined into a caller
  103. // with (UN)PA_LIKELY() annotations.
  104. if (Try())
  105. return;
  106. return AcquireSpinThenBlock();
  107. }
  108. inline constexpr SpinningMutex::SpinningMutex() = default;
  109. #if defined(PA_HAS_FAST_MUTEX)
  110. #if defined(PA_HAS_LINUX_KERNEL)
  111. PA_ALWAYS_INLINE bool SpinningMutex::Try() {
  112. // Using the weak variant of compare_exchange(), which may fail spuriously. On
  113. // some architectures such as ARM, CAS is typically performed as a LDREX/STREX
  114. // pair, where the store may fail. In the strong version, there is a loop
  115. // inserted by the compiler to retry in these cases.
  116. //
  117. // Since we are retrying in Lock() anyway, there is no point having two nested
  118. // loops.
  119. int expected = kUnlocked;
  120. return (state_.load(std::memory_order_relaxed) == expected) &&
  121. state_.compare_exchange_weak(expected, kLockedUncontended,
  122. std::memory_order_acquire,
  123. std::memory_order_relaxed);
  124. }
  125. PA_ALWAYS_INLINE void SpinningMutex::Release() {
  126. if (PA_UNLIKELY(state_.exchange(kUnlocked, std::memory_order_release) ==
  127. kLockedContended)) {
  128. // |kLockedContended|: there is a waiter to wake up.
  129. //
  130. // Here there is a window where the lock is unlocked, since we just set it
  131. // to |kUnlocked| above. Meaning that another thread can grab the lock
  132. // in-between now and |FutexWake()| waking up a waiter. Aside from
  133. // potentially fairness, this is not an issue, as the newly-awaken thread
  134. // will check that the lock is still free.
  135. //
  136. // There is a small pessimization here though: if we have a single waiter,
  137. // then when it wakes up, the lock will be set to |kLockedContended|, so
  138. // when this waiter releases the lock, it will needlessly call
  139. // |FutexWake()|, even though there are no waiters. This is supported by the
  140. // kernel, and is what bionic (Android's libc) also does.
  141. FutexWake();
  142. }
  143. }
  144. #elif BUILDFLAG(IS_WIN)
  145. PA_ALWAYS_INLINE bool SpinningMutex::Try() {
  146. return !!::TryAcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&lock_));
  147. }
  148. PA_ALWAYS_INLINE void SpinningMutex::Release() {
  149. ::ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&lock_));
  150. }
  151. #elif BUILDFLAG(IS_APPLE)
  152. PA_ALWAYS_INLINE bool SpinningMutex::Try() {
  153. return os_unfair_lock_trylock(&unfair_lock_);
  154. }
  155. PA_ALWAYS_INLINE void SpinningMutex::Release() {
  156. return os_unfair_lock_unlock(&unfair_lock_);
  157. }
  158. #elif BUILDFLAG(IS_POSIX)
  159. PA_ALWAYS_INLINE bool SpinningMutex::Try() {
  160. int retval = pthread_mutex_trylock(&lock_);
  161. PA_DCHECK(retval == 0 || retval == EBUSY);
  162. return retval == 0;
  163. }
  164. PA_ALWAYS_INLINE void SpinningMutex::Release() {
  165. int retval = pthread_mutex_unlock(&lock_);
  166. PA_DCHECK(retval == 0);
  167. }
  168. #elif BUILDFLAG(IS_FUCHSIA)
  169. PA_ALWAYS_INLINE bool SpinningMutex::Try() {
  170. return sync_mutex_trylock(&lock_) == ZX_OK;
  171. }
  172. PA_ALWAYS_INLINE void SpinningMutex::Release() {
  173. sync_mutex_unlock(&lock_);
  174. }
  175. #endif
  176. #else // defined(PA_HAS_FAST_MUTEX)
  177. PA_ALWAYS_INLINE bool SpinningMutex::Try() {
  178. // Possibly faster than CAS. The theory is that if the cacheline is shared,
  179. // then it can stay shared, for the contended case.
  180. return !lock_.load(std::memory_order_relaxed) &&
  181. !lock_.exchange(true, std::memory_order_acquire);
  182. }
  183. PA_ALWAYS_INLINE void SpinningMutex::Release() {
  184. lock_.store(false, std::memory_order_release);
  185. }
  186. PA_ALWAYS_INLINE void SpinningMutex::LockSlow() {
  187. return LockSlowSpinLock();
  188. }
  189. #endif // defined(PA_HAS_FAST_MUTEX)
  190. } // namespace partition_alloc::internal
  191. #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPINNING_MUTEX_H_