partition_lock.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. // Copyright 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
  5. #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_
  6. #include <atomic>
  7. #include <type_traits>
  8. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  9. #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
  10. #include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  14. #include "base/allocator/partition_allocator/spinning_mutex.h"
  15. #include "build/build_config.h"
  16. namespace partition_alloc::internal {
  17. class PA_LOCKABLE Lock {
  18. public:
  19. inline constexpr Lock();
  20. void Acquire() PA_EXCLUSIVE_LOCK_FUNCTION() {
  21. #if BUILDFLAG(PA_DCHECK_IS_ON)
  22. // When PartitionAlloc is malloc(), it can easily become reentrant. For
  23. // instance, a DCHECK() triggers in external code (such as
  24. // base::Lock). DCHECK() error message formatting allocates, which triggers
  25. // PartitionAlloc, and then we get reentrancy, and in this case infinite
  26. // recursion.
  27. //
  28. // To avoid that, crash quickly when the code becomes reentrant.
  29. base::PlatformThreadRef current_thread = base::PlatformThread::CurrentRef();
  30. if (!lock_.Try()) {
  31. // The lock wasn't free when we tried to acquire it. This can be because
  32. // another thread or *this* thread was holding it.
  33. //
  34. // If it's this thread holding it, then it cannot have become free in the
  35. // meantime, and the current value of |owning_thread_ref_| is valid, as it
  36. // was set by this thread. Assuming that writes to |owning_thread_ref_|
  37. // are atomic, then if it's us, we are trying to recursively acquire a
  38. // non-recursive lock.
  39. //
  40. // Note that we don't rely on a DCHECK() in base::Lock(), as it would
  41. // itself allocate. Meaning that without this code, a reentrancy issue
  42. // hangs on Linux.
  43. if (PA_UNLIKELY(owning_thread_ref_.load(std::memory_order_acquire) ==
  44. current_thread)) {
  45. // Trying to acquire lock while it's held by this thread: reentrancy
  46. // issue.
  47. PA_IMMEDIATE_CRASH();
  48. }
  49. lock_.Acquire();
  50. }
  51. owning_thread_ref_.store(current_thread, std::memory_order_release);
  52. #else
  53. lock_.Acquire();
  54. #endif
  55. }
  56. void Release() PA_UNLOCK_FUNCTION() {
  57. #if BUILDFLAG(PA_DCHECK_IS_ON)
  58. owning_thread_ref_.store(base::PlatformThreadRef(),
  59. std::memory_order_release);
  60. #endif
  61. lock_.Release();
  62. }
  63. void AssertAcquired() const PA_ASSERT_EXCLUSIVE_LOCK() {
  64. lock_.AssertAcquired();
  65. #if BUILDFLAG(PA_DCHECK_IS_ON)
  66. PA_DCHECK(owning_thread_ref_.load(std ::memory_order_acquire) ==
  67. base::PlatformThread::CurrentRef());
  68. #endif
  69. }
  70. void Reinit() PA_UNLOCK_FUNCTION() {
  71. lock_.AssertAcquired();
  72. #if BUILDFLAG(PA_DCHECK_IS_ON)
  73. owning_thread_ref_.store(base::PlatformThreadRef(),
  74. std::memory_order_release);
  75. #endif
  76. lock_.Reinit();
  77. }
  78. private:
  79. SpinningMutex lock_;
  80. #if BUILDFLAG(PA_DCHECK_IS_ON)
  81. // Should in theory be protected by |lock_|, but we need to read it to detect
  82. // recursive lock acquisition (and thus, the allocator becoming reentrant).
  83. std::atomic<base::PlatformThreadRef> owning_thread_ref_{};
  84. #endif
  85. };
  86. class PA_SCOPED_LOCKABLE ScopedGuard {
  87. public:
  88. explicit ScopedGuard(Lock& lock) PA_EXCLUSIVE_LOCK_FUNCTION(lock)
  89. : lock_(lock) {
  90. lock_.Acquire();
  91. }
  92. ~ScopedGuard() PA_UNLOCK_FUNCTION() { lock_.Release(); }
  93. private:
  94. Lock& lock_;
  95. };
  96. class PA_SCOPED_LOCKABLE ScopedUnlockGuard {
  97. public:
  98. explicit ScopedUnlockGuard(Lock& lock) PA_UNLOCK_FUNCTION(lock)
  99. : lock_(lock) {
  100. lock_.Release();
  101. }
  102. ~ScopedUnlockGuard() PA_EXCLUSIVE_LOCK_FUNCTION() { lock_.Acquire(); }
  103. private:
  104. Lock& lock_;
  105. };
  106. constexpr Lock::Lock() = default;
  107. // We want PartitionRoot to not have a global destructor, so this should not
  108. // have one.
  109. static_assert(std::is_trivially_destructible<Lock>::value, "");
  110. } // namespace partition_alloc::internal
  111. namespace base {
  112. namespace internal {
  113. using PartitionLock = ::partition_alloc::internal::Lock;
  114. using PartitionAutoLock = ::partition_alloc::internal::ScopedGuard;
  115. } // namespace internal
  116. } // namespace base
  117. #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_LOCK_H_