thread_cache.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. // Copyright 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_
  5. #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_
  6. #include <atomic>
  7. #include <cstdint>
  8. #include <limits>
  9. #include <memory>
  10. #include "base/allocator/partition_allocator/partition_alloc-inl.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h"
  15. #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
  16. #include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
  17. #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
  18. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  19. #include "base/allocator/partition_allocator/partition_alloc_forward.h"
  20. #include "base/allocator/partition_allocator/partition_bucket_lookup.h"
  21. #include "base/allocator/partition_allocator/partition_freelist_entry.h"
  22. #include "base/allocator/partition_allocator/partition_lock.h"
  23. #include "base/allocator/partition_allocator/partition_stats.h"
  24. #include "base/allocator/partition_allocator/partition_tls.h"
  25. #include "build/build_config.h"
  26. #if defined(ARCH_CPU_X86_64) && defined(PA_HAS_64_BITS_POINTERS)
  27. #include <algorithm>
  28. #endif
  29. namespace partition_alloc {
  30. class ThreadCache;
  31. namespace tools {
  32. // This is used from ThreadCacheInspector, which runs in a different process. It
  33. // scans the process memory looking for the two needles, to locate the thread
  34. // cache registry instance.
  35. //
  36. // These two values were chosen randomly, and in particular neither is a valid
  37. // pointer on most 64 bit architectures.
  38. #if defined(PA_HAS_64_BITS_POINTERS)
  39. constexpr uintptr_t kNeedle1 = 0xe69e32f3ad9ea63;
  40. constexpr uintptr_t kNeedle2 = 0x9615ee1c5eb14caf;
  41. #else
  42. constexpr uintptr_t kNeedle1 = 0xe69e32f3;
  43. constexpr uintptr_t kNeedle2 = 0x9615ee1c;
  44. #endif
  45. // This array contains, in order:
  46. // - kNeedle1
  47. // - &ThreadCacheRegistry::Instance()
  48. // - kNeedle2
  49. //
  50. // It is refererenced in the thread cache constructor to make sure it is not
  51. // removed by the compiler. It is also not const to make sure it ends up in
  52. // .data.
  53. constexpr size_t kThreadCacheNeedleArraySize = 4;
  54. extern uintptr_t kThreadCacheNeedleArray[kThreadCacheNeedleArraySize];
  55. class HeapDumper;
  56. class ThreadCacheInspector;
  57. } // namespace tools
  58. namespace internal {
  59. extern PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionTlsKey g_thread_cache_key;
  60. // On Android, we have to go through emutls, since this is always a shared
  61. // library, so don't bother.
  62. #if defined(PA_THREAD_LOCAL_TLS) && !BUILDFLAG(IS_ANDROID)
  63. #define PA_THREAD_CACHE_FAST_TLS
  64. #endif
  65. #if defined(PA_THREAD_CACHE_FAST_TLS)
  66. extern PA_COMPONENT_EXPORT(
  67. PARTITION_ALLOC) thread_local ThreadCache* g_thread_cache;
  68. #endif
  69. } // namespace internal
  70. struct ThreadCacheLimits {
  71. // When trying to conserve memory, set the thread cache limit to this.
  72. static constexpr size_t kDefaultSizeThreshold = 512;
  73. // 32kiB is chosen here as from local experiments, "zone" allocation in
  74. // V8 is performance-sensitive, and zones can (and do) grow up to 32kiB for
  75. // each individual allocation.
  76. static constexpr size_t kLargeSizeThreshold = 1 << 15;
  77. static_assert(kLargeSizeThreshold <= std::numeric_limits<uint16_t>::max(),
  78. "");
  79. };
  80. // Global registry of all ThreadCache instances.
  81. //
  82. // This class cannot allocate in the (Un)registerThreadCache() functions, as
  83. // they are called from ThreadCache constructor, which is from within the
  84. // allocator. However the other members can allocate.
  85. class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCacheRegistry {
  86. public:
  87. static ThreadCacheRegistry& Instance();
  88. // Do not instantiate.
  89. //
  90. // Several things are surprising here:
  91. // - The constructor is public even though this is intended to be a singleton:
  92. // we cannot use a "static local" variable in |Instance()| as this is
  93. // reached too early during CRT initialization on Windows, meaning that
  94. // static local variables don't work (as they call into the uninitialized
  95. // runtime). To sidestep that, we use a regular global variable in the .cc,
  96. // which is fine as this object's constructor is constexpr.
  97. // - Marked inline so that the chromium style plugin doesn't complain that a
  98. // "complex constructor" has an inline body. This warning is disabled when
  99. // the constructor is explicitly marked "inline". Note that this is a false
  100. // positive of the plugin, since constexpr implies inline.
  101. inline constexpr ThreadCacheRegistry();
  102. void RegisterThreadCache(ThreadCache* cache);
  103. void UnregisterThreadCache(ThreadCache* cache);
  104. // Prints statistics for all thread caches, or this thread's only.
  105. void DumpStats(bool my_thread_only, ThreadCacheStats* stats);
  106. // Purge() this thread's cache, and asks the other ones to trigger Purge() at
  107. // a later point (during a deallocation).
  108. void PurgeAll();
  109. // Runs `PurgeAll` and updates the next interval which
  110. // `GetPeriodicPurgeNextIntervalInMicroseconds` returns.
  111. //
  112. // Note that it's a caller's responsibility to invoke this member function
  113. // periodically with an appropriate interval. This function does not schedule
  114. // any task nor timer.
  115. void RunPeriodicPurge();
  116. // Returns the appropriate interval to invoke `RunPeriodicPurge` next time.
  117. int64_t GetPeriodicPurgeNextIntervalInMicroseconds() const;
  118. // Controls the thread cache size, by setting the multiplier to a value above
  119. // or below |ThreadCache::kDefaultMultiplier|.
  120. void SetThreadCacheMultiplier(float multiplier);
  121. void SetLargestActiveBucketIndex(uint8_t largest_active_bucket_index);
  122. static internal::Lock& GetLock() { return Instance().lock_; }
  123. // Purges all thread caches *now*. This is completely thread-unsafe, and
  124. // should only be called in a post-fork() handler.
  125. void ForcePurgeAllThreadAfterForkUnsafe();
  126. void ResetForTesting();
  127. static constexpr internal::base::TimeDelta kMinPurgeInterval =
  128. internal::base::Seconds(1);
  129. static constexpr internal::base::TimeDelta kMaxPurgeInterval =
  130. internal::base::Minutes(1);
  131. static constexpr internal::base::TimeDelta kDefaultPurgeInterval =
  132. 2 * kMinPurgeInterval;
  133. static constexpr size_t kMinCachedMemoryForPurging = 500 * 1024;
  134. private:
  135. friend class tools::ThreadCacheInspector;
  136. friend class tools::HeapDumper;
  137. // Not using base::Lock as the object's constructor must be constexpr.
  138. internal::Lock lock_;
  139. ThreadCache* list_head_ PA_GUARDED_BY(GetLock()) = nullptr;
  140. bool periodic_purge_is_initialized_ = false;
  141. internal::base::TimeDelta periodic_purge_next_interval_ =
  142. kDefaultPurgeInterval;
  143. #if BUILDFLAG(IS_NACL)
  144. // The thread cache is never used with NaCl, but its compiler doesn't
  145. // understand enough constexpr to handle the code below.
  146. uint8_t largest_active_bucket_index_ = 1;
  147. #else
  148. uint8_t largest_active_bucket_index_ = internal::BucketIndexLookup::GetIndex(
  149. ThreadCacheLimits::kDefaultSizeThreshold);
  150. #endif
  151. };
  152. constexpr ThreadCacheRegistry::ThreadCacheRegistry() = default;
  153. #if defined(PA_THREAD_CACHE_ENABLE_STATISTICS)
  154. #define PA_INCREMENT_COUNTER(counter) ++counter
  155. #else
  156. #define PA_INCREMENT_COUNTER(counter) \
  157. do { \
  158. } while (0)
  159. #endif // defined(PA_THREAD_CACHE_ENABLE_STATISTICS)
  160. #if BUILDFLAG(PA_DCHECK_IS_ON)
  161. namespace internal {
  162. class ReentrancyGuard {
  163. public:
  164. explicit ReentrancyGuard(bool& flag) : flag_(flag) {
  165. PA_CHECK(!flag_);
  166. flag_ = true;
  167. }
  168. ~ReentrancyGuard() { flag_ = false; }
  169. private:
  170. bool& flag_;
  171. };
  172. } // namespace internal
  173. #define PA_REENTRANCY_GUARD(x) \
  174. internal::ReentrancyGuard guard { x }
  175. #else // BUILDFLAG(PA_DCHECK_IS_ON)
  176. #define PA_REENTRANCY_GUARD(x) \
  177. do { \
  178. } while (0)
  179. #endif // BUILDFLAG(PA_DCHECK_IS_ON)
  180. // Per-thread cache. *Not* threadsafe, must only be accessed from a single
  181. // thread.
  182. //
  183. // In practice, this is easily enforced as long as only |instance| is
  184. // manipulated, as it is a thread_local member. As such, any
  185. // |ThreadCache::instance->*()| call will necessarily be done from a single
  186. // thread.
  187. class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ThreadCache {
  188. public:
  189. // Initializes the thread cache for |root|. May allocate, so should be called
  190. // with the thread cache disabled on the partition side, and without the
  191. // partition lock held.
  192. //
  193. // May only be called by a single PartitionRoot.
  194. static void Init(PartitionRoot<>* root);
  195. static void DeleteForTesting(ThreadCache* tcache);
  196. // Deletes existing thread cache and creates a new one for |root|.
  197. static void SwapForTesting(PartitionRoot<>* root);
  198. // Removes the tombstone marker that would be returned by Get() otherwise.
  199. static void RemoveTombstoneForTesting();
  200. // Can be called several times, must be called before any ThreadCache
  201. // interactions.
  202. static void EnsureThreadSpecificDataInitialized();
  203. static ThreadCache* Get() {
  204. #if defined(PA_THREAD_CACHE_FAST_TLS)
  205. return internal::g_thread_cache;
  206. #else
  207. // This region isn't MTE-tagged.
  208. return reinterpret_cast<ThreadCache*>(
  209. internal::PartitionTlsGet(internal::g_thread_cache_key));
  210. #endif
  211. }
  212. static bool IsValid(ThreadCache* tcache) {
  213. // Do not MTE-untag, as it'd mess up the sentinel value.
  214. return reinterpret_cast<uintptr_t>(tcache) & kTombstoneMask;
  215. }
  216. static bool IsTombstone(ThreadCache* tcache) {
  217. // Do not MTE-untag, as it'd mess up the sentinel value.
  218. return reinterpret_cast<uintptr_t>(tcache) == kTombstone;
  219. }
  220. // Create a new ThreadCache associated with |root|.
  221. // Must be called without the partition locked, as this may allocate.
  222. static ThreadCache* Create(PartitionRoot<>* root);
  223. ~ThreadCache();
  224. // Force placement new.
  225. void* operator new(size_t) = delete;
  226. void* operator new(size_t, void* buffer) { return buffer; }
  227. void operator delete(void* ptr) = delete;
  228. ThreadCache(const ThreadCache&) = delete;
  229. ThreadCache(const ThreadCache&&) = delete;
  230. ThreadCache& operator=(const ThreadCache&) = delete;
  231. // Tries to put a slot at |slot_start| into the cache.
  232. // The slot comes from the bucket at index |bucket_index| from the partition
  233. // this cache is for.
  234. //
  235. // Returns true if the slot was put in the cache, and false otherwise. This
  236. // can happen either because the cache is full or the allocation was too
  237. // large.
  238. PA_ALWAYS_INLINE bool MaybePutInCache(uintptr_t slot_start,
  239. size_t bucket_index);
  240. // Tries to allocate a memory slot from the cache.
  241. // Returns 0 on failure.
  242. //
  243. // Has the same behavior as RawAlloc(), that is: no cookie nor ref-count
  244. // handling. Sets |slot_size| to the allocated size upon success.
  245. PA_ALWAYS_INLINE uintptr_t GetFromCache(size_t bucket_index,
  246. size_t* slot_size);
  247. // Asks this cache to trigger |Purge()| at a later point. Can be called from
  248. // any thread.
  249. void SetShouldPurge();
  250. // Empties the cache.
  251. // The Partition lock must *not* be held when calling this.
  252. // Must be called from the thread this cache is for.
  253. void Purge();
  254. // |TryPurge| is the same as |Purge|, except that |TryPurge| will
  255. // not crash if the thread cache is inconsistent. Normally inconsistency
  256. // is a sign of a bug somewhere, so |Purge| should be preferred in most cases.
  257. void TryPurge();
  258. // Amount of cached memory for this thread's cache, in bytes.
  259. size_t CachedMemory() const;
  260. void AccumulateStats(ThreadCacheStats* stats) const;
  261. // Purge the thread cache of the current thread, if one exists.
  262. static void PurgeCurrentThread();
  263. size_t bucket_count_for_testing(size_t index) const {
  264. return buckets_[index].count;
  265. }
  266. internal::base::PlatformThreadId thread_id() const { return thread_id_; }
  267. // Sets the maximum size of allocations that may be cached by the thread
  268. // cache. This applies to all threads. However, the maximum size is bounded by
  269. // |kLargeSizeThreshold|.
  270. static void SetLargestCachedSize(size_t size);
  271. // Fill 1 / kBatchFillRatio * bucket.limit slots at a time.
  272. static constexpr uint16_t kBatchFillRatio = 8;
  273. // Limit for the smallest bucket will be kDefaultMultiplier *
  274. // kSmallBucketBaseCount by default.
  275. static constexpr float kDefaultMultiplier = 2.;
  276. static constexpr uint8_t kSmallBucketBaseCount = 64;
  277. static constexpr size_t kDefaultSizeThreshold =
  278. ThreadCacheLimits::kDefaultSizeThreshold;
  279. static constexpr size_t kLargeSizeThreshold =
  280. ThreadCacheLimits::kLargeSizeThreshold;
  281. const ThreadCache* prev_for_testing() const
  282. PA_EXCLUSIVE_LOCKS_REQUIRED(ThreadCacheRegistry::GetLock()) {
  283. return prev_;
  284. }
  285. const ThreadCache* next_for_testing() const
  286. PA_EXCLUSIVE_LOCKS_REQUIRED(ThreadCacheRegistry::GetLock()) {
  287. return next_;
  288. }
  289. private:
  290. friend class tools::HeapDumper;
  291. friend class tools::ThreadCacheInspector;
  292. struct Bucket {
  293. internal::PartitionFreelistEntry* freelist_head = nullptr;
  294. // Want to keep sizeof(Bucket) small, using small types.
  295. uint8_t count = 0;
  296. std::atomic<uint8_t> limit{}; // Can be changed from another thread.
  297. uint16_t slot_size = 0;
  298. Bucket();
  299. };
  300. static_assert(sizeof(Bucket) <= 2 * sizeof(void*), "Keep Bucket small.");
  301. explicit ThreadCache(PartitionRoot<>* root);
  302. static void Delete(void* thread_cache_ptr);
  303. void PurgeInternal();
  304. template <bool crash_on_corruption>
  305. void PurgeInternalHelper();
  306. // Fills a bucket from the central allocator.
  307. void FillBucket(size_t bucket_index);
  308. // Empties the |bucket| until there are at most |limit| objects in it.
  309. template <bool crash_on_corruption>
  310. void ClearBucketHelper(Bucket& bucket, size_t limit);
  311. void ClearBucket(Bucket& bucket, size_t limit);
  312. PA_ALWAYS_INLINE void PutInBucket(Bucket& bucket, uintptr_t slot_start);
  313. void ResetForTesting();
  314. // Releases the entire freelist starting at |head| to the root.
  315. template <bool crash_on_corruption>
  316. void FreeAfter(internal::PartitionFreelistEntry* head, size_t slot_size);
  317. static void SetGlobalLimits(PartitionRoot<>* root, float multiplier);
  318. #if BUILDFLAG(IS_NACL)
  319. // The thread cache is never used with NaCl, but its compiler doesn't
  320. // understand enough constexpr to handle the code below.
  321. static constexpr uint16_t kBucketCount = 1;
  322. #else
  323. static constexpr uint16_t kBucketCount =
  324. internal::BucketIndexLookup::GetIndex(ThreadCache::kLargeSizeThreshold) +
  325. 1;
  326. #endif
  327. static_assert(
  328. kBucketCount < internal::kNumBuckets,
  329. "Cannot have more cached buckets than what the allocator supports");
  330. // On some architectures, ThreadCache::Get() can be called and return
  331. // something after the thread cache has been destroyed. In this case, we set
  332. // it to this value, to signal that the thread is being terminated, and the
  333. // thread cache should not be used.
  334. //
  335. // This happens in particular on Windows, during program termination.
  336. //
  337. // We choose 0x1 as the value as it is an invalid pointer value, since it is
  338. // not aligned, and too low. Also, checking !(ptr & kTombstoneMask) checks for
  339. // nullptr and kTombstone at the same time.
  340. static constexpr uintptr_t kTombstone = 0x1;
  341. static constexpr uintptr_t kTombstoneMask = ~kTombstone;
  342. static uint8_t global_limits_[kBucketCount];
  343. // Index of the largest active bucket. Not all processes/platforms will use
  344. // all buckets, as using larger buckets increases the memory footprint.
  345. //
  346. // TODO(lizeb): Investigate making this per-thread rather than static, to
  347. // improve locality, and open the door to per-thread settings.
  348. static uint16_t largest_active_bucket_index_;
  349. // These are at the beginning as they're accessed for each allocation.
  350. uint32_t cached_memory_ = 0;
  351. std::atomic<bool> should_purge_;
  352. ThreadCacheStats stats_;
  353. // Buckets are quite big, though each is only 2 pointers.
  354. Bucket buckets_[kBucketCount];
  355. // Cold data below.
  356. PartitionRoot<>* const root_;
  357. const internal::base::PlatformThreadId thread_id_;
  358. #if BUILDFLAG(PA_DCHECK_IS_ON)
  359. bool is_in_thread_cache_ = false;
  360. #endif
  361. // Intrusive list since ThreadCacheRegistry::RegisterThreadCache() cannot
  362. // allocate.
  363. ThreadCache* next_ PA_GUARDED_BY(ThreadCacheRegistry::GetLock());
  364. ThreadCache* prev_ PA_GUARDED_BY(ThreadCacheRegistry::GetLock());
  365. friend class ThreadCacheRegistry;
  366. friend class PartitionAllocThreadCacheTest;
  367. friend class tools::ThreadCacheInspector;
  368. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, Simple);
  369. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
  370. MultipleObjectsCachedPerBucket);
  371. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
  372. LargeAllocationsAreNotCached);
  373. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
  374. MultipleThreadCaches);
  375. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, RecordStats);
  376. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
  377. ThreadCacheRegistry);
  378. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
  379. MultipleThreadCachesAccounting);
  380. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
  381. DynamicCountPerBucket);
  382. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
  383. DynamicCountPerBucketClamping);
  384. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
  385. DynamicCountPerBucketMultipleThreads);
  386. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
  387. DynamicSizeThreshold);
  388. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
  389. DynamicSizeThresholdPurge);
  390. PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, ClearFromTail);
  391. };
  392. PA_ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start,
  393. size_t bucket_index) {
  394. PA_REENTRANCY_GUARD(is_in_thread_cache_);
  395. PA_INCREMENT_COUNTER(stats_.cache_fill_count);
  396. if (PA_UNLIKELY(bucket_index > largest_active_bucket_index_)) {
  397. PA_INCREMENT_COUNTER(stats_.cache_fill_misses);
  398. return false;
  399. }
  400. auto& bucket = buckets_[bucket_index];
  401. PA_DCHECK(bucket.count != 0 || bucket.freelist_head == nullptr);
  402. PutInBucket(bucket, slot_start);
  403. cached_memory_ += bucket.slot_size;
  404. PA_INCREMENT_COUNTER(stats_.cache_fill_hits);
  405. // Relaxed ordering: we don't care about having an up-to-date or consistent
  406. // value, just want it to not change while we are using it, hence using
  407. // relaxed ordering, and loading into a local variable. Without it, we are
  408. // gambling that the compiler would not issue multiple loads.
  409. uint8_t limit = bucket.limit.load(std::memory_order_relaxed);
  410. // Batched deallocation, amortizing lock acquisitions.
  411. if (PA_UNLIKELY(bucket.count > limit)) {
  412. ClearBucket(bucket, limit / 2);
  413. }
  414. if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed)))
  415. PurgeInternal();
  416. return true;
  417. }
  418. PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
  419. size_t* slot_size) {
  420. #if defined(PA_THREAD_CACHE_ALLOC_STATS)
  421. stats_.allocs_per_bucket_[bucket_index]++;
  422. #endif
  423. PA_REENTRANCY_GUARD(is_in_thread_cache_);
  424. PA_INCREMENT_COUNTER(stats_.alloc_count);
  425. // Only handle "small" allocations.
  426. if (PA_UNLIKELY(bucket_index > largest_active_bucket_index_)) {
  427. PA_INCREMENT_COUNTER(stats_.alloc_miss_too_large);
  428. PA_INCREMENT_COUNTER(stats_.alloc_misses);
  429. return 0;
  430. }
  431. auto& bucket = buckets_[bucket_index];
  432. if (PA_LIKELY(bucket.freelist_head)) {
  433. PA_INCREMENT_COUNTER(stats_.alloc_hits);
  434. } else {
  435. PA_DCHECK(bucket.count == 0);
  436. PA_INCREMENT_COUNTER(stats_.alloc_miss_empty);
  437. PA_INCREMENT_COUNTER(stats_.alloc_misses);
  438. FillBucket(bucket_index);
  439. // Very unlikely, means that the central allocator is out of memory. Let it
  440. // deal with it (may return 0, may crash).
  441. if (PA_UNLIKELY(!bucket.freelist_head))
  442. return 0;
  443. }
  444. PA_DCHECK(bucket.count != 0);
  445. internal::PartitionFreelistEntry* entry = bucket.freelist_head;
  446. // Passes the bucket size to |GetNext()|, so that in case of freelist
  447. // corruption, we know the bucket size that lead to the crash, helping to
  448. // narrow down the search for culprit. |bucket| was touched just now, so this
  449. // does not introduce another cache miss.
  450. internal::PartitionFreelistEntry* next =
  451. entry->GetNextForThreadCache<true>(bucket.slot_size);
  452. PA_DCHECK(entry != next);
  453. bucket.count--;
  454. PA_DCHECK(bucket.count != 0 || !next);
  455. bucket.freelist_head = next;
  456. *slot_size = bucket.slot_size;
  457. PA_DCHECK(cached_memory_ >= bucket.slot_size);
  458. cached_memory_ -= bucket.slot_size;
  459. return internal::SlotStartPtr2Addr(entry);
  460. }
  461. PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
  462. uintptr_t slot_start) {
  463. #if defined(PA_HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) && \
  464. defined(PA_HAS_64_BITS_POINTERS)
  465. // We see freelist corruption crashes happening in the wild. These are likely
  466. // due to out-of-bounds accesses in the previous slot, or to a Use-After-Free
  467. // somewhere in the code.
  468. //
  469. // The issue is that we detect the UaF far away from the place where it
  470. // happens. As a consequence, we should try to make incorrect code crash as
  471. // early as possible. Poisoning memory at free() time works for UaF, but it
  472. // was seen in the past to incur a high performance cost.
  473. //
  474. // Here, only poison the current cacheline, which we are touching anyway.
  475. // TODO(lizeb): Make sure this does not hurt performance.
  476. // Everything below requires this alignment.
  477. static_assert(internal::kAlignment == 16, "");
  478. // The pointer is always 16 bytes aligned, so its start address is always == 0
  479. // % 16. Its distance to the next cacheline is
  480. // `64 - ((slot_start & 63) / 16) * 16`
  481. static_assert(
  482. internal::kPartitionCachelineSize == 64,
  483. "The computation below assumes that cache lines are 64 bytes long.");
  484. int distance_to_next_cacheline_in_16_bytes = 4 - ((slot_start >> 4) & 3);
  485. int slot_size_remaining_in_16_bytes =
  486. #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  487. // When BRP is on in the "previous slot" mode, this slot may have a BRP
  488. // ref-count of the next, potentially allocated slot. Make sure we don't
  489. // overwrite it.
  490. (bucket.slot_size - sizeof(PartitionRefCount)) / 16;
  491. #else
  492. bucket.slot_size / 16;
  493. #endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  494. slot_size_remaining_in_16_bytes = std::min(
  495. slot_size_remaining_in_16_bytes, distance_to_next_cacheline_in_16_bytes);
  496. static const uint32_t poison_16_bytes[4] = {0xbadbad00, 0xbadbad00,
  497. 0xbadbad00, 0xbadbad00};
  498. // Give a hint to the compiler in hope it'll vectorize the loop.
  499. #if PA_HAS_BUILTIN(__builtin_assume_aligned)
  500. void* slot_start_tagged = __builtin_assume_aligned(
  501. internal::SlotStartAddr2Ptr(slot_start), internal::kAlignment);
  502. #else
  503. void* slot_start_tagged = internal::SlotStartAddr2Ptr(slot_start);
  504. #endif
  505. uint32_t* address_aligned = static_cast<uint32_t*>(slot_start_tagged);
  506. for (int i = 0; i < slot_size_remaining_in_16_bytes; i++) {
  507. // Clang will expand the memcpy to a 16-byte write (movups on x86).
  508. memcpy(address_aligned, poison_16_bytes, sizeof(poison_16_bytes));
  509. address_aligned += 4;
  510. }
  511. #endif // defined(PA_HAS_FREELIST_SHADOW_ENTRY) && defined(ARCH_CPU_X86_64) &&
  512. // defined(PA_HAS_64_BITS_POINTERS)
  513. auto* entry = internal::PartitionFreelistEntry::EmplaceAndInitForThreadCache(
  514. slot_start, bucket.freelist_head);
  515. bucket.freelist_head = entry;
  516. bucket.count++;
  517. }
  518. } // namespace partition_alloc
  519. #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_