spinning_mutex.cc 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. // Copyright 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/allocator/partition_allocator/spinning_mutex.h"
  5. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  6. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  7. #include "build/build_config.h"
  8. #if BUILDFLAG(IS_WIN)
  9. #include <windows.h>
  10. #endif
  11. #if BUILDFLAG(IS_POSIX)
  12. #include <pthread.h>
  13. #endif
  14. #if defined(PA_HAS_LINUX_KERNEL)
  15. #include <errno.h>
  16. #include <linux/futex.h>
  17. #include <sys/syscall.h>
  18. #include <unistd.h>
  19. #endif // defined(PA_HAS_LINUX_KERNEL)
  20. #if !defined(PA_HAS_FAST_MUTEX)
  21. #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
  22. #if BUILDFLAG(IS_POSIX)
  23. #include <sched.h>
  24. #define PA_YIELD_THREAD sched_yield()
  25. #else // Other OS
  26. #warning "Thread yield not supported on this OS."
  27. #define PA_YIELD_THREAD ((void)0)
  28. #endif
  29. #endif // !defined(PA_HAS_FAST_MUTEX)
  30. namespace partition_alloc::internal {
  31. void SpinningMutex::Reinit() {
  32. #if !BUILDFLAG(IS_APPLE)
  33. // On most platforms, no need to re-init the lock, can just unlock it.
  34. Release();
  35. #else
  36. unfair_lock_ = OS_UNFAIR_LOCK_INIT;
  37. #endif // BUILDFLAG(IS_APPLE)
  38. }
  39. void SpinningMutex::AcquireSpinThenBlock() {
  40. int tries = 0;
  41. int backoff = 1;
  42. do {
  43. if (PA_LIKELY(Try()))
  44. return;
  45. // Note: Per the intel optimization manual
  46. // (https://software.intel.com/content/dam/develop/public/us/en/documents/64-ia-32-architectures-optimization-manual.pdf),
  47. // the "pause" instruction is more costly on Skylake Client than on previous
  48. // architectures. The latency is found to be 141 cycles
  49. // there (from ~10 on previous ones, nice 14x).
  50. //
  51. // According to Agner Fog's instruction tables, the latency is still >100
  52. // cycles on Ice Lake, and from other sources, seems to be high as well on
  53. // Adler Lake. Separately, it is (from
  54. // https://agner.org/optimize/instruction_tables.pdf) also high on AMD Zen 3
  55. // (~65). So just assume that it's this way for most x86_64 architectures.
  56. //
  57. // Also, loop several times here, following the guidelines in section 2.3.4
  58. // of the manual, "Pause latency in Skylake Client Microarchitecture".
  59. for (int yields = 0; yields < backoff; yields++) {
  60. PA_YIELD_PROCESSOR;
  61. tries++;
  62. }
  63. constexpr int kMaxBackoff = 16;
  64. backoff = std::min(kMaxBackoff, backoff << 1);
  65. } while (tries < kSpinCount);
  66. LockSlow();
  67. }
  68. #if defined(PA_HAS_FAST_MUTEX)
  69. #if defined(PA_HAS_LINUX_KERNEL)
  70. void SpinningMutex::FutexWait() {
  71. // Save and restore errno.
  72. int saved_errno = errno;
  73. // Don't check the return value, as we will not be awaken by a timeout, since
  74. // none is specified.
  75. //
  76. // Ignoring the return value doesn't impact correctness, as this acts as an
  77. // immediate wakeup. For completeness, the possible errors for FUTEX_WAIT are:
  78. // - EACCES: state_ is not readable. Should not happen.
  79. // - EAGAIN: the value is not as expected, that is not |kLockedContended|, in
  80. // which case retrying the loop is the right behavior.
  81. // - EINTR: signal, looping is the right behavior.
  82. // - EINVAL: invalid argument.
  83. //
  84. // Note: not checking the return value is the approach used in bionic and
  85. // glibc as well.
  86. //
  87. // Will return immediately if |state_| is no longer equal to
  88. // |kLockedContended|. Otherwise, sleeps and wakes up when |state_| may not be
  89. // |kLockedContended| anymore. Note that even without spurious wakeups, the
  90. // value of |state_| is not guaranteed when this returns, as another thread
  91. // may get the lock before we get to run.
  92. int err = syscall(SYS_futex, &state_, FUTEX_WAIT | FUTEX_PRIVATE_FLAG,
  93. kLockedContended, nullptr, nullptr, 0);
  94. if (err) {
  95. // These are programming error, check them.
  96. PA_DCHECK(errno != EACCES);
  97. PA_DCHECK(errno != EINVAL);
  98. }
  99. errno = saved_errno;
  100. }
  101. void SpinningMutex::FutexWake() {
  102. int saved_errno = errno;
  103. long retval = syscall(SYS_futex, &state_, FUTEX_WAKE | FUTEX_PRIVATE_FLAG,
  104. 1 /* wake up a single waiter */, nullptr, nullptr, 0);
  105. PA_CHECK(retval != -1);
  106. errno = saved_errno;
  107. }
  108. void SpinningMutex::LockSlow() {
  109. // If this thread gets awaken but another one got the lock first, then go back
  110. // to sleeping. See comments in |FutexWait()| to see why a loop is required.
  111. while (state_.exchange(kLockedContended, std::memory_order_acquire) !=
  112. kUnlocked) {
  113. FutexWait();
  114. }
  115. }
  116. #elif BUILDFLAG(IS_WIN)
  117. void SpinningMutex::LockSlow() {
  118. ::AcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&lock_));
  119. }
  120. #elif BUILDFLAG(IS_APPLE)
  121. void SpinningMutex::LockSlow() {
  122. return os_unfair_lock_lock(&unfair_lock_);
  123. }
  124. #elif BUILDFLAG(IS_POSIX)
  125. void SpinningMutex::LockSlow() {
  126. int retval = pthread_mutex_lock(&lock_);
  127. PA_DCHECK(retval == 0);
  128. }
  129. #elif BUILDFLAG(IS_FUCHSIA)
  130. void SpinningMutex::LockSlow() {
  131. sync_mutex_lock(&lock_);
  132. }
  133. #endif
  134. #else // defined(PA_HAS_FAST_MUTEX)
  135. void SpinningMutex::LockSlowSpinLock() {
  136. int yield_thread_count = 0;
  137. do {
  138. if (yield_thread_count < 10) {
  139. PA_YIELD_THREAD;
  140. yield_thread_count++;
  141. } else {
  142. // At this point, it's likely that the lock is held by a lower priority
  143. // thread that is unavailable to finish its work because of higher
  144. // priority threads spinning here. Sleeping should ensure that they make
  145. // progress.
  146. base::PlatformThread::Sleep(base::Milliseconds(1));
  147. }
  148. } while (!TrySpinLock());
  149. }
  150. #endif // defined(PA_HAS_FAST_MUTEX)
  151. } // namespace partition_alloc::internal