partition_ref_count.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. // Copyright (c) 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
  5. #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
  6. #include <atomic>
  7. #include <cstdint>
  8. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  9. #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
  10. #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  15. #include "base/allocator/partition_allocator/partition_alloc_constants.h"
  16. #include "base/allocator/partition_allocator/partition_alloc_forward.h"
  17. #include "base/allocator/partition_allocator/tagging.h"
  18. #include "build/build_config.h"
  19. #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
  20. #include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
  21. #endif
  22. namespace partition_alloc::internal {
  23. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  24. namespace {
  25. [[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void
  26. DoubleFreeOrCorruptionDetected() {
  27. PA_NO_CODE_FOLDING();
  28. PA_IMMEDIATE_CRASH();
  29. }
  30. } // namespace
  31. // Special-purpose atomic reference count class used by BackupRefPtrImpl.
  32. // The least significant bit of the count is reserved for tracking the liveness
  33. // state of an allocation: it's set when the allocation is created and cleared
  34. // on free(). So the count can be:
  35. //
  36. // 1 for an allocation that is just returned from Alloc()
  37. // 2 * k + 1 for a "live" allocation with k references
  38. // 2 * k for an allocation with k dangling references after Free()
  39. //
  40. // This protects against double-free's, as we check whether the reference count
  41. // is odd in |ReleaseFromAllocator()|, and if not we have a double-free.
  42. class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
  43. public:
  44. // This class holds an atomic bit field: `count_`. It holds up to 4 values:
  45. //
  46. // bits name description
  47. // ----- --------------------- ----------------------------------------
  48. // 0 is_allocated Whether or not the memory is held by the
  49. // allocator.
  50. // - 1 at construction time.
  51. // - Decreased in ReleaseFromAllocator();
  52. //
  53. // 1-31 ptr_count Number of raw_ptr<T>.
  54. // - Increased in Acquire()
  55. // - Decreased in Release()
  56. //
  57. // 32 dangling_detected A dangling raw_ptr<> has been detected.
  58. //
  59. // 33-63 unprotected_ptr_count Number of
  60. // raw_ptr<T, DisableDanglingPtrDetection>
  61. // - Increased in AcquireFromUnprotectedPtr().
  62. // - Decreased in ReleaseFromUnprotectedPtr().
  63. //
  64. // The allocation is reclaimed if all of:
  65. // - |is_allocated|
  66. // - |ptr_count|
  67. // - |unprotected_ptr_count|
  68. // are zero.
  69. //
  70. // During ReleaseFromAllocator(), if |ptr_count| is not zero,
  71. // |dangling_detected| is set and the error is reported via
  72. // DanglingRawPtrDetected(id). The matching DanglingRawPtrReleased(id) will be
  73. // called when the last raw_ptr<> is released.
  74. #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
  75. using CountType = uint64_t;
  76. static constexpr CountType kMemoryHeldByAllocatorBit = 0x0000'0000'0000'0001;
  77. static constexpr CountType kPtrCountMask = 0x0000'0000'FFFF'FFFE;
  78. static constexpr CountType kUnprotectedPtrCountMask = 0xFFFF'FFFE'0000'0000;
  79. static constexpr CountType kDanglingRawPtrDetectedBit = 0x0000'0001'0000'0000;
  80. static constexpr CountType kPtrInc = 0x0000'0000'0000'0002;
  81. static constexpr CountType kUnprotectedPtrInc = 0x0000'0002'0000'0000;
  82. #else
  83. using CountType = uint32_t;
  84. static constexpr CountType kMemoryHeldByAllocatorBit = 0x0000'0001;
  85. static constexpr CountType kPtrCountMask = 0xFFFF'FFFE;
  86. static constexpr CountType kUnprotectedPtrCountMask = 0x0000'0000;
  87. static constexpr CountType kDanglingRawPtrDetectedBit = 0x0000'0000;
  88. static constexpr CountType kPtrInc = 0x0000'0002;
  89. #endif
  90. PartitionRefCount();
  91. // Incrementing the counter doesn't imply any visibility about modified
  92. // memory, hence relaxed atomics. For decrement, visibility is required before
  93. // the memory gets freed, necessitating an acquire/release barrier before
  94. // freeing the memory.
  95. //
  96. // For details, see base::AtomicRefCount, which has the same constraints and
  97. // characteristics.
  98. //
  99. // FYI: The assembly produced by the compiler on every platform, in particular
  100. // the uint64_t fetch_add on 32bit CPU.
  101. // https://docs.google.com/document/d/1cSTVDVEE-8l2dXLPcfyN75r6ihMbeiSp1ncL9ae3RZE
  102. PA_ALWAYS_INLINE void Acquire() {
  103. CheckCookieIfSupported();
  104. CountType old_count = count_.fetch_add(kPtrInc, std::memory_order_relaxed);
  105. // Check overflow.
  106. PA_CHECK((old_count & kPtrCountMask) != kPtrCountMask);
  107. }
  108. // Similar to |Acquire()|, but for raw_ptr<T, DisableDanglingPtrDetection>
  109. // instead of raw_ptr<T>.
  110. PA_ALWAYS_INLINE void AcquireFromUnprotectedPtr() {
  111. #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
  112. CheckCookieIfSupported();
  113. CountType old_count =
  114. count_.fetch_add(kUnprotectedPtrInc, std::memory_order_relaxed);
  115. // Check overflow.
  116. PA_CHECK((old_count & kUnprotectedPtrCountMask) !=
  117. kUnprotectedPtrCountMask);
  118. #else
  119. Acquire();
  120. #endif
  121. }
  122. // Returns true if the allocation should be reclaimed.
  123. PA_ALWAYS_INLINE bool Release() {
  124. CheckCookieIfSupported();
  125. CountType old_count = count_.fetch_sub(kPtrInc, std::memory_order_release);
  126. // Check underflow.
  127. PA_DCHECK(old_count & kPtrCountMask);
  128. #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
  129. // If a dangling raw_ptr<> was detected, report it.
  130. if (PA_UNLIKELY((old_count & kDanglingRawPtrDetectedBit) ==
  131. kDanglingRawPtrDetectedBit)) {
  132. partition_alloc::internal::DanglingRawPtrReleased(
  133. reinterpret_cast<uintptr_t>(this));
  134. }
  135. #endif
  136. return ReleaseCommon(old_count - kPtrInc);
  137. }
  138. // Similar to |Release()|, but for raw_ptr<T, DisableDanglingPtrDetection>
  139. // instead of raw_ptr<T>.
  140. PA_ALWAYS_INLINE bool ReleaseFromUnprotectedPtr() {
  141. #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
  142. CheckCookieIfSupported();
  143. CountType old_count =
  144. count_.fetch_sub(kUnprotectedPtrInc, std::memory_order_release);
  145. // Check underflow.
  146. PA_DCHECK(old_count & kUnprotectedPtrCountMask);
  147. return ReleaseCommon(old_count - kUnprotectedPtrInc);
  148. #else
  149. return Release();
  150. #endif
  151. }
  152. // Returns true if the allocation should be reclaimed.
  153. // This function should be called by the allocator during Free().
  154. PA_ALWAYS_INLINE bool ReleaseFromAllocator() {
  155. CheckCookieIfSupported();
  156. // TODO(bartekn): Make the double-free check more effective. Once freed, the
  157. // ref-count is overwritten by an encoded freelist-next pointer.
  158. CountType old_count =
  159. count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
  160. if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit)))
  161. DoubleFreeOrCorruptionDetected();
  162. if (PA_LIKELY(old_count == kMemoryHeldByAllocatorBit)) {
  163. std::atomic_thread_fence(std::memory_order_acquire);
  164. // The allocation is about to get freed, so clear the cookie.
  165. ClearCookieIfSupported();
  166. return true;
  167. }
  168. #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
  169. // Check if any raw_ptr<> still exists. It is now dangling.
  170. if (PA_UNLIKELY(old_count & kPtrCountMask)) {
  171. count_.fetch_or(kDanglingRawPtrDetectedBit, std::memory_order_relaxed);
  172. partition_alloc::internal::DanglingRawPtrDetected(
  173. reinterpret_cast<uintptr_t>(this));
  174. }
  175. #endif // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
  176. return false;
  177. }
  178. // "IsAlive" means is allocated and not freed. "KnownRefs" refers to
  179. // raw_ptr<T> references. There may be other references from raw pointers or
  180. // unique_ptr, but we have no way of tracking them, so we hope for the best.
  181. // To summarize, the function returns whether we believe the allocation can be
  182. // safely freed.
  183. PA_ALWAYS_INLINE bool IsAliveWithNoKnownRefs() {
  184. CheckCookieIfSupported();
  185. return count_.load(std::memory_order_acquire) == kMemoryHeldByAllocatorBit;
  186. }
  187. PA_ALWAYS_INLINE bool IsAlive() {
  188. bool alive =
  189. count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit;
  190. if (alive)
  191. CheckCookieIfSupported();
  192. return alive;
  193. }
  194. #if defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
  195. PA_ALWAYS_INLINE void SetRequestedSize(size_t size) {
  196. requested_size_ = static_cast<uint32_t>(size);
  197. }
  198. PA_ALWAYS_INLINE uint32_t requested_size() const { return requested_size_; }
  199. #endif // defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
  200. private:
  201. // The common parts shared by Release() and ReleaseFromUnprotectedPtr().
  202. // Called after updating the ref counts, |count| is the new value of |count_|
  203. // set by fetch_sub. Returns true if memory can be reclaimed.
  204. PA_ALWAYS_INLINE bool ReleaseCommon(CountType count) {
  205. // Do not release memory, if it is still held by any of:
  206. // - The allocator
  207. // - A raw_ptr<T>
  208. // - A raw_ptr<T, DisableDanglingPtrDetection>
  209. //
  210. // Assuming this raw_ptr is not dangling, the memory must still be held at
  211. // least by the allocator, so this is PA_LIKELY true.
  212. if (PA_LIKELY((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
  213. kUnprotectedPtrCountMask)))) {
  214. return false; // Do not release the memory.
  215. }
  216. // In most thread-safe reference count implementations, an acquire
  217. // barrier is required so that all changes made to an object from other
  218. // threads are visible to its destructor. In our case, the destructor
  219. // finishes before the final `Release` call, so it shouldn't be a problem.
  220. // However, we will keep it as a precautionary measure.
  221. std::atomic_thread_fence(std::memory_order_acquire);
  222. // The allocation is about to get freed, so clear the cookie.
  223. ClearCookieIfSupported();
  224. return true;
  225. }
  226. // The cookie helps us ensure that:
  227. // 1) The reference count pointer calculation is correct.
  228. // 2) The returned allocation slot is not freed.
  229. PA_ALWAYS_INLINE void CheckCookieIfSupported() {
  230. #if defined(PA_REF_COUNT_CHECK_COOKIE)
  231. PA_CHECK(brp_cookie_ == CalculateCookie());
  232. #endif
  233. }
  234. PA_ALWAYS_INLINE void ClearCookieIfSupported() {
  235. #if defined(PA_REF_COUNT_CHECK_COOKIE)
  236. brp_cookie_ = 0;
  237. #endif
  238. }
  239. #if defined(PA_REF_COUNT_CHECK_COOKIE)
  240. PA_ALWAYS_INLINE uint32_t CalculateCookie() {
  241. return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)) ^
  242. kCookieSalt;
  243. }
  244. #endif // defined(PA_REF_COUNT_CHECK_COOKIE)
  245. // Note that in free slots, this is overwritten by encoded freelist
  246. // pointer(s). The way the pointers are encoded on 64-bit little-endian
  247. // architectures, count_ happens stay even, which works well with the
  248. // double-free-detection in ReleaseFromAllocator(). Don't change the layout of
  249. // this class, to preserve this functionality.
  250. #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
  251. std::atomic<uint64_t> count_{kMemoryHeldByAllocatorBit};
  252. #else
  253. std::atomic<uint32_t> count_{kMemoryHeldByAllocatorBit};
  254. #endif
  255. #if defined(PA_REF_COUNT_CHECK_COOKIE)
  256. static constexpr uint32_t kCookieSalt = 0xc01dbeef;
  257. volatile uint32_t brp_cookie_;
  258. #endif
  259. #if defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
  260. uint32_t requested_size_;
  261. #endif
  262. };
  263. PA_ALWAYS_INLINE PartitionRefCount::PartitionRefCount()
  264. #if defined(PA_REF_COUNT_CHECK_COOKIE)
  265. : brp_cookie_(CalculateCookie())
  266. #endif
  267. {
  268. }
  269. #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  270. static_assert(kAlignment % alignof(PartitionRefCount) == 0,
  271. "kAlignment must be multiples of alignof(PartitionRefCount).");
  272. // Allocate extra space for the reference count to satisfy the alignment
  273. // requirement.
  274. static constexpr size_t kInSlotRefCountBufferSize = sizeof(PartitionRefCount);
  275. constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
  276. constexpr size_t kPartitionPastAllocationAdjustment = 0;
  277. #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
  278. #if defined(PA_REF_COUNT_CHECK_COOKIE) || \
  279. defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
  280. static constexpr size_t kPartitionRefCountSizeShift = 4;
  281. #else // defined(PA_REF_COUNT_CHECK_COOKIE) ||
  282. // defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
  283. static constexpr size_t kPartitionRefCountSizeShift = 3;
  284. #endif // defined(PA_REF_COUNT_CHECK_COOKIE) ||
  285. // defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
  286. #else // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
  287. #if defined(PA_REF_COUNT_CHECK_COOKIE) && \
  288. defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
  289. static constexpr size_t kPartitionRefCountSizeShift = 4;
  290. #elif defined(PA_REF_COUNT_CHECK_COOKIE) || \
  291. defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
  292. static constexpr size_t kPartitionRefCountSizeShift = 3;
  293. #else
  294. static constexpr size_t kPartitionRefCountSizeShift = 2;
  295. #endif
  296. #endif // defined(PA_REF_COUNT_CHECK_COOKIE)
  297. static_assert((1 << kPartitionRefCountSizeShift) == sizeof(PartitionRefCount));
  298. // We need one PartitionRefCount for each system page in a super page. They take
  299. // `x = sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize())` space.
  300. // They need to fit into a system page of metadata as sparsely as possible to
  301. // minimize cache line sharing, hence we calculate a multiplier as
  302. // `SystemPageSize() / x`.
  303. //
  304. // The multiplier is expressed as a bitshift to optimize the code generation.
  305. // SystemPageSize() isn't always a constrexpr, in which case the compiler
  306. // wouldn't know it's a power of two. The equivalence of these calculations is
  307. // checked in PartitionAllocGlobalInit().
  308. static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  309. GetPartitionRefCountIndexMultiplierShift() {
  310. return SystemPageShift() * 2 - kSuperPageShift - kPartitionRefCountSizeShift;
  311. }
  312. PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
  313. uintptr_t slot_start) {
  314. #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
  315. CheckThatSlotOffsetIsZero(slot_start);
  316. #endif
  317. if (PA_LIKELY(slot_start & SystemPageOffsetMask())) {
  318. uintptr_t refcount_address = slot_start - sizeof(PartitionRefCount);
  319. #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
  320. PA_CHECK(refcount_address % alignof(PartitionRefCount) == 0);
  321. #endif
  322. // Have to MTE-tag, because the address is untagged, but lies within a slot
  323. // area, which is protected by MTE.
  324. //
  325. // There could be a race condition though if the previous slot is
  326. // freed/retagged concurrently, so ideally the ref count should occupy its
  327. // own MTE granule.
  328. // TODO(richard.townsend@arm.com): improve this.
  329. return static_cast<PartitionRefCount*>(TagAddr(refcount_address));
  330. } else {
  331. // No need to tag, as the metadata region isn't protected by MTE.
  332. PartitionRefCount* bitmap_base = reinterpret_cast<PartitionRefCount*>(
  333. (slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
  334. size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
  335. << GetPartitionRefCountIndexMultiplierShift();
  336. #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
  337. PA_CHECK(sizeof(PartitionRefCount) * index <= SystemPageSize());
  338. #endif
  339. return bitmap_base + index;
  340. }
  341. }
  342. #else // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  343. // Allocate extra space for the reference count to satisfy the alignment
  344. // requirement.
  345. static constexpr size_t kInSlotRefCountBufferSize = kAlignment;
  346. constexpr size_t kPartitionRefCountOffsetAdjustment = kInSlotRefCountBufferSize;
  347. // This is for adjustment of pointers right past the allocation, which may point
  348. // to the next slot. First subtract 1 to bring them to the intended slot, and
  349. // only then we'll be able to find ref-count in that slot.
  350. constexpr size_t kPartitionPastAllocationAdjustment = 1;
  351. PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
  352. uintptr_t slot_start) {
  353. #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
  354. CheckThatSlotOffsetIsZero(slot_start);
  355. #endif
  356. // Have to MTE-tag, because the address is untagged, but lies within a slot
  357. // area, which is protected by MTE.
  358. return static_cast<PartitionRefCount*>(TagAddr(slot_start));
  359. }
  360. #endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  361. static_assert(sizeof(PartitionRefCount) <= kInSlotRefCountBufferSize,
  362. "PartitionRefCount should fit into the in-slot buffer.");
  363. #else // BUILDFLAG(USE_BACKUP_REF_PTR)
  364. static constexpr size_t kInSlotRefCountBufferSize = 0;
  365. constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
  366. #endif // BUILDFLAG(USE_BACKUP_REF_PTR)
  367. constexpr size_t kPartitionRefCountSizeAdjustment = kInSlotRefCountBufferSize;
  368. } // namespace partition_alloc::internal
  369. #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_