thread_cache.cc 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790
  1. // Copyright 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/allocator/partition_allocator/thread_cache.h"
  5. #include <sys/types.h>
  6. #include <algorithm>
  7. #include <atomic>
  8. #include <cstdint>
  9. #include "base/allocator/partition_allocator/partition_alloc-inl.h"
  10. #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_base/cxx17_backports.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
  15. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  16. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  17. #include "base/allocator/partition_allocator/partition_alloc_constants.h"
  18. #include "base/allocator/partition_allocator/partition_root.h"
  19. #include "build/build_config.h"
  20. namespace partition_alloc {
  21. namespace {
  22. ThreadCacheRegistry g_instance;
  23. } // namespace
  24. namespace tools {
  25. uintptr_t kThreadCacheNeedleArray[kThreadCacheNeedleArraySize] = {
  26. kNeedle1, reinterpret_cast<uintptr_t>(&g_instance),
  27. #if BUILDFLAG(RECORD_ALLOC_INFO)
  28. reinterpret_cast<uintptr_t>(&internal::g_allocs),
  29. #else
  30. 0,
  31. #endif
  32. kNeedle2};
  33. } // namespace tools
  34. namespace internal {
  35. PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionTlsKey g_thread_cache_key;
  36. #if defined(PA_THREAD_CACHE_FAST_TLS)
  37. PA_COMPONENT_EXPORT(PARTITION_ALLOC)
  38. thread_local ThreadCache* g_thread_cache;
  39. #endif
  40. } // namespace internal
  41. namespace {
  42. // Since |g_thread_cache_key| is shared, make sure that no more than one
  43. // PartitionRoot can use it.
  44. static std::atomic<PartitionRoot<>*> g_thread_cache_root;
  45. #if BUILDFLAG(IS_WIN)
  46. void OnDllProcessDetach() {
  47. // Very late allocations do occur (see crbug.com/1159411#c7 for instance),
  48. // including during CRT teardown. This is problematic for the thread cache
  49. // which relies on the CRT for TLS access for instance. This cannot be
  50. // mitigated inside the thread cache (since getting to it requires querying
  51. // TLS), but the PartitionRoot associated wih the thread cache can be made to
  52. // not use the thread cache anymore.
  53. g_thread_cache_root.load(std::memory_order_relaxed)->flags.with_thread_cache =
  54. false;
  55. }
  56. #endif
  57. static bool g_thread_cache_key_created = false;
  58. } // namespace
  59. constexpr internal::base::TimeDelta ThreadCacheRegistry::kMinPurgeInterval;
  60. constexpr internal::base::TimeDelta ThreadCacheRegistry::kMaxPurgeInterval;
  61. constexpr internal::base::TimeDelta ThreadCacheRegistry::kDefaultPurgeInterval;
  62. constexpr size_t ThreadCacheRegistry::kMinCachedMemoryForPurging;
  63. uint8_t ThreadCache::global_limits_[ThreadCache::kBucketCount];
  64. // Start with the normal size, not the maximum one.
  65. uint16_t ThreadCache::largest_active_bucket_index_ =
  66. internal::BucketIndexLookup::GetIndex(ThreadCache::kDefaultSizeThreshold);
  67. // static
  68. ThreadCacheRegistry& ThreadCacheRegistry::Instance() {
  69. return g_instance;
  70. }
  71. void ThreadCacheRegistry::RegisterThreadCache(ThreadCache* cache) {
  72. internal::ScopedGuard scoped_locker(GetLock());
  73. cache->next_ = nullptr;
  74. cache->prev_ = nullptr;
  75. ThreadCache* previous_head = list_head_;
  76. list_head_ = cache;
  77. cache->next_ = previous_head;
  78. if (previous_head)
  79. previous_head->prev_ = cache;
  80. }
  81. void ThreadCacheRegistry::UnregisterThreadCache(ThreadCache* cache) {
  82. internal::ScopedGuard scoped_locker(GetLock());
  83. if (cache->prev_)
  84. cache->prev_->next_ = cache->next_;
  85. if (cache->next_)
  86. cache->next_->prev_ = cache->prev_;
  87. if (cache == list_head_)
  88. list_head_ = cache->next_;
  89. }
  90. void ThreadCacheRegistry::DumpStats(bool my_thread_only,
  91. ThreadCacheStats* stats) {
  92. ThreadCache::EnsureThreadSpecificDataInitialized();
  93. memset(reinterpret_cast<void*>(stats), 0, sizeof(ThreadCacheStats));
  94. internal::ScopedGuard scoped_locker(GetLock());
  95. if (my_thread_only) {
  96. auto* tcache = ThreadCache::Get();
  97. if (!ThreadCache::IsValid(tcache))
  98. return;
  99. tcache->AccumulateStats(stats);
  100. } else {
  101. ThreadCache* tcache = list_head_;
  102. while (tcache) {
  103. // Racy, as other threads are still allocating. This is not an issue,
  104. // since we are only interested in statistics. However, this means that
  105. // count is not necessarily equal to hits + misses for the various types
  106. // of events.
  107. tcache->AccumulateStats(stats);
  108. tcache = tcache->next_;
  109. }
  110. }
  111. }
  112. void ThreadCacheRegistry::PurgeAll() {
  113. auto* current_thread_tcache = ThreadCache::Get();
  114. // May take a while, don't hold the lock while purging.
  115. //
  116. // In most cases, the current thread is more important than other ones. For
  117. // instance in renderers, it is the main thread. It is also the only thread
  118. // that we can synchronously purge.
  119. //
  120. // The reason why we trigger the purge for this one first is that assuming
  121. // that all threads are allocating memory, they will start purging
  122. // concurrently in the loop below. This will then make them all contend with
  123. // the main thread for the partition lock, since it is acquired/released once
  124. // per bucket. By purging the main thread first, we avoid these interferences
  125. // for this thread at least.
  126. if (ThreadCache::IsValid(current_thread_tcache))
  127. current_thread_tcache->Purge();
  128. {
  129. internal::ScopedGuard scoped_locker(GetLock());
  130. ThreadCache* tcache = list_head_;
  131. while (tcache) {
  132. PA_DCHECK(ThreadCache::IsValid(tcache));
  133. // Cannot purge directly, need to ask the other thread to purge "at some
  134. // point".
  135. // Note that this will not work if the other thread is sleeping forever.
  136. // TODO(lizeb): Handle sleeping threads.
  137. if (tcache != current_thread_tcache)
  138. tcache->SetShouldPurge();
  139. tcache = tcache->next_;
  140. }
  141. }
  142. }
  143. void ThreadCacheRegistry::ForcePurgeAllThreadAfterForkUnsafe() {
  144. internal::ScopedGuard scoped_locker(GetLock());
  145. ThreadCache* tcache = list_head_;
  146. while (tcache) {
  147. #if BUILDFLAG(PA_DCHECK_IS_ON)
  148. // Before fork(), locks are acquired in the parent process. This means that
  149. // a concurrent allocation in the parent which must be filled by the central
  150. // allocator (i.e. the thread cache bucket is empty) will block inside the
  151. // thread cache waiting for the lock to be released.
  152. //
  153. // In the child process, this allocation will never complete since this
  154. // thread will not be resumed. However, calling |Purge()| triggers the
  155. // reentrancy guard since the parent process thread was suspended from
  156. // within the thread cache.
  157. // Clear the guard to prevent this from crashing.
  158. tcache->is_in_thread_cache_ = false;
  159. #endif
  160. // There is a PA_DCHECK() in code called from |Purge()| checking that thread
  161. // cache memory accounting is correct. Since we are after fork() and the
  162. // other threads got interrupted mid-flight, this guarantee does not hold,
  163. // and we get inconsistent results. Rather than giving up on checking this
  164. // invariant in regular code, reset it here so that the PA_DCHECK()
  165. // passes. See crbug.com/1216964.
  166. tcache->cached_memory_ = tcache->CachedMemory();
  167. // At this point, we should call |TryPurge|. However, due to the thread
  168. // cache being possibly inconsistent at this point, this may crash. Rather
  169. // than crash, we'd prefer to simply not purge, even though this may leak
  170. // memory in some cases.
  171. //
  172. // see crbug.com/1289092 for details of the crashes.
  173. tcache = tcache->next_;
  174. }
  175. }
  176. void ThreadCacheRegistry::SetLargestActiveBucketIndex(
  177. uint8_t largest_active_bucket_index) {
  178. largest_active_bucket_index_ = largest_active_bucket_index;
  179. }
  180. void ThreadCacheRegistry::SetThreadCacheMultiplier(float multiplier) {
  181. // Two steps:
  182. // - Set the global limits, which will affect newly created threads.
  183. // - Enumerate all thread caches and set the limit to the global one.
  184. {
  185. internal::ScopedGuard scoped_locker(GetLock());
  186. ThreadCache* tcache = list_head_;
  187. // If this is called before *any* thread cache has serviced *any*
  188. // allocation, which can happen in tests, and in theory in non-test code as
  189. // well.
  190. if (!tcache)
  191. return;
  192. // Setting the global limit while locked, because we need |tcache->root_|.
  193. ThreadCache::SetGlobalLimits(tcache->root_, multiplier);
  194. while (tcache) {
  195. PA_DCHECK(ThreadCache::IsValid(tcache));
  196. for (int index = 0; index < ThreadCache::kBucketCount; index++) {
  197. // This is racy, but we don't care if the limit is enforced later, and
  198. // we really want to avoid atomic instructions on the fast path.
  199. tcache->buckets_[index].limit.store(ThreadCache::global_limits_[index],
  200. std::memory_order_relaxed);
  201. }
  202. tcache = tcache->next_;
  203. }
  204. }
  205. }
  206. void ThreadCacheRegistry::RunPeriodicPurge() {
  207. if (!periodic_purge_is_initialized_) {
  208. ThreadCache::EnsureThreadSpecificDataInitialized();
  209. periodic_purge_is_initialized_ = true;
  210. }
  211. // Summing across all threads can be slow, but is necessary. Otherwise we rely
  212. // on the assumption that the current thread is a good proxy for overall
  213. // allocation activity. This is not the case for all process types.
  214. //
  215. // Since there is no synchronization with other threads, the value is stale,
  216. // which is fine.
  217. size_t cached_memory_approx = 0;
  218. {
  219. internal::ScopedGuard scoped_locker(GetLock());
  220. ThreadCache* tcache = list_head_;
  221. // Can run when there is no thread cache, in which case there is nothing to
  222. // do, and the task should not be rescheduled. This would typically indicate
  223. // a case where the thread cache was never enabled, or got disabled.
  224. if (!tcache)
  225. return;
  226. while (tcache) {
  227. cached_memory_approx += tcache->cached_memory_;
  228. tcache = tcache->next_;
  229. }
  230. }
  231. // If cached memory is low, this means that either memory footprint is fine,
  232. // or the process is mostly idle, and not allocating much since the last
  233. // purge. In this case, back off. On the other hand, if there is a lot of
  234. // cached memory, make purge more frequent, but always within a set frequency
  235. // range.
  236. //
  237. // There is a potential drawback: a process that was idle for a long time and
  238. // suddenly becomes very active will take some time to go back to regularly
  239. // scheduled purge with a small enough interval. This is the case for instance
  240. // of a renderer moving to foreground. To mitigate that, if cached memory
  241. // jumps is very large, make a greater leap to faster purging.
  242. if (cached_memory_approx > 10 * kMinCachedMemoryForPurging) {
  243. periodic_purge_next_interval_ =
  244. std::min(kDefaultPurgeInterval, periodic_purge_next_interval_ / 2);
  245. } else if (cached_memory_approx > 2 * kMinCachedMemoryForPurging) {
  246. periodic_purge_next_interval_ =
  247. std::max(kMinPurgeInterval, periodic_purge_next_interval_ / 2);
  248. } else if (cached_memory_approx < kMinCachedMemoryForPurging) {
  249. periodic_purge_next_interval_ =
  250. std::min(kMaxPurgeInterval, periodic_purge_next_interval_ * 2);
  251. }
  252. // Make sure that the next interval is in the right bounds. Even though the
  253. // logic above should eventually converge to a reasonable interval, if a
  254. // sleeping background thread holds onto a large amount of cached memory, then
  255. // |PurgeAll()| will not free any memory from it, and the first branch above
  256. // can be taken repeatedly until the interval gets very small, as the amount
  257. // of cached memory cannot change between calls (since we do not purge
  258. // background threads, but only ask them to purge their own cache at the next
  259. // allocation).
  260. periodic_purge_next_interval_ = internal::base::clamp(
  261. periodic_purge_next_interval_, kMinPurgeInterval, kMaxPurgeInterval);
  262. PurgeAll();
  263. }
  264. int64_t ThreadCacheRegistry::GetPeriodicPurgeNextIntervalInMicroseconds()
  265. const {
  266. return periodic_purge_next_interval_.InMicroseconds();
  267. }
  268. void ThreadCacheRegistry::ResetForTesting() {
  269. periodic_purge_next_interval_ = kDefaultPurgeInterval;
  270. }
  271. // static
  272. void ThreadCache::EnsureThreadSpecificDataInitialized() {
  273. // Using the registry lock to protect from concurrent initialization without
  274. // adding a special-pupose lock.
  275. internal::ScopedGuard scoped_locker(
  276. ThreadCacheRegistry::Instance().GetLock());
  277. if (g_thread_cache_key_created)
  278. return;
  279. bool ok = internal::PartitionTlsCreate(&internal::g_thread_cache_key, Delete);
  280. PA_CHECK(ok);
  281. g_thread_cache_key_created = true;
  282. }
  283. // static
  284. void ThreadCache::DeleteForTesting(ThreadCache* tcache) {
  285. ThreadCache::Delete(tcache);
  286. }
  287. // static
  288. void ThreadCache::SwapForTesting(PartitionRoot<>* root) {
  289. auto* old_tcache = ThreadCache::Get();
  290. g_thread_cache_root.store(nullptr, std::memory_order_relaxed);
  291. if (old_tcache)
  292. ThreadCache::DeleteForTesting(old_tcache);
  293. if (root) {
  294. Init(root);
  295. Create(root);
  296. } else {
  297. #if BUILDFLAG(IS_WIN)
  298. // OnDllProcessDetach accesses g_thread_cache_root which is nullptr now.
  299. internal::PartitionTlsSetOnDllProcessDetach(nullptr);
  300. #endif
  301. }
  302. }
  303. // static
  304. void ThreadCache::RemoveTombstoneForTesting() {
  305. PA_CHECK(IsTombstone(Get()));
  306. internal::PartitionTlsSet(internal::g_thread_cache_key, nullptr);
  307. }
  308. // static
  309. void ThreadCache::Init(PartitionRoot<>* root) {
  310. #if BUILDFLAG(IS_NACL)
  311. PA_IMMEDIATE_CRASH();
  312. #endif
  313. PA_CHECK(root->buckets[kBucketCount - 1].slot_size ==
  314. ThreadCache::kLargeSizeThreshold);
  315. PA_CHECK(root->buckets[largest_active_bucket_index_].slot_size ==
  316. ThreadCache::kDefaultSizeThreshold);
  317. EnsureThreadSpecificDataInitialized();
  318. // Make sure that only one PartitionRoot wants a thread cache.
  319. PartitionRoot<>* expected = nullptr;
  320. if (!g_thread_cache_root.compare_exchange_strong(expected, root,
  321. std::memory_order_seq_cst,
  322. std::memory_order_seq_cst)) {
  323. PA_CHECK(false)
  324. << "Only one PartitionRoot is allowed to have a thread cache";
  325. }
  326. #if BUILDFLAG(IS_WIN)
  327. internal::PartitionTlsSetOnDllProcessDetach(OnDllProcessDetach);
  328. #endif
  329. SetGlobalLimits(root, kDefaultMultiplier);
  330. }
  331. // static
  332. void ThreadCache::SetGlobalLimits(PartitionRoot<>* root, float multiplier) {
  333. size_t initial_value =
  334. static_cast<size_t>(kSmallBucketBaseCount) * multiplier;
  335. for (int index = 0; index < kBucketCount; index++) {
  336. const auto& root_bucket = root->buckets[index];
  337. // Invalid bucket.
  338. if (!root_bucket.active_slot_spans_head) {
  339. global_limits_[index] = 0;
  340. continue;
  341. }
  342. // Smaller allocations are more frequent, and more performance-sensitive.
  343. // Cache more small objects, and fewer larger ones, to save memory.
  344. size_t slot_size = root_bucket.slot_size;
  345. size_t value;
  346. if (slot_size <= 128) {
  347. value = initial_value;
  348. } else if (slot_size <= 256) {
  349. value = initial_value / 2;
  350. } else if (slot_size <= 512) {
  351. value = initial_value / 4;
  352. } else {
  353. value = initial_value / 8;
  354. }
  355. // Bare minimum so that malloc() / free() in a loop will not hit the central
  356. // allocator each time.
  357. constexpr size_t kMinLimit = 1;
  358. // |PutInBucket()| is called on a full bucket, which should not overflow.
  359. constexpr size_t kMaxLimit = std::numeric_limits<uint8_t>::max() - 1;
  360. global_limits_[index] = static_cast<uint8_t>(
  361. internal::base::clamp(value, kMinLimit, kMaxLimit));
  362. PA_DCHECK(global_limits_[index] >= kMinLimit);
  363. PA_DCHECK(global_limits_[index] <= kMaxLimit);
  364. }
  365. }
  366. // static
  367. void ThreadCache::SetLargestCachedSize(size_t size) {
  368. if (size > ThreadCache::kLargeSizeThreshold)
  369. size = ThreadCache::kLargeSizeThreshold;
  370. largest_active_bucket_index_ =
  371. PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(size, false);
  372. PA_CHECK(largest_active_bucket_index_ < kBucketCount);
  373. ThreadCacheRegistry::Instance().SetLargestActiveBucketIndex(
  374. largest_active_bucket_index_);
  375. }
  376. // static
  377. ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
  378. PA_CHECK(root);
  379. // See comment in thread_cache.h, this is used to make sure
  380. // kThreadCacheNeedleArray is kept in the final binary.
  381. PA_CHECK(tools::kThreadCacheNeedleArray[0] == tools::kNeedle1);
  382. // Placement new and RawAlloc() are used, as otherwise when this partition is
  383. // the malloc() implementation, the memory allocated for the new thread cache
  384. // would make this code reentrant.
  385. //
  386. // This also means that deallocation must use RawFreeStatic(), hence the
  387. // operator delete() implementation below.
  388. size_t raw_size = root->AdjustSizeForExtrasAdd(sizeof(ThreadCache));
  389. size_t usable_size;
  390. bool already_zeroed;
  391. auto* bucket = root->buckets +
  392. PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
  393. raw_size, root->flags.with_denser_bucket_distribution);
  394. uintptr_t buffer = root->RawAlloc(bucket, AllocFlags::kZeroFill, raw_size,
  395. internal::PartitionPageSize(), &usable_size,
  396. &already_zeroed);
  397. ThreadCache* tcache =
  398. new (internal::SlotStartAddr2Ptr(buffer)) ThreadCache(root);
  399. // This may allocate.
  400. internal::PartitionTlsSet(internal::g_thread_cache_key, tcache);
  401. #if defined(PA_THREAD_CACHE_FAST_TLS)
  402. // |thread_local| variables with destructors cause issues on some platforms.
  403. // Since we need a destructor (to empty the thread cache), we cannot use it
  404. // directly. However, TLS accesses with |thread_local| are typically faster,
  405. // as it can turn into a fixed offset load from a register (GS/FS on Linux
  406. // x86, for instance). On Windows, saving/restoring the last error increases
  407. // cost as well.
  408. //
  409. // To still get good performance, use |thread_local| to store a raw pointer,
  410. // and rely on the platform TLS to call the destructor.
  411. internal::g_thread_cache = tcache;
  412. #endif // defined(PA_THREAD_CACHE_FAST_TLS)
  413. return tcache;
  414. }
  415. ThreadCache::ThreadCache(PartitionRoot<>* root)
  416. : should_purge_(false),
  417. root_(root),
  418. thread_id_(internal::base::PlatformThread::CurrentId()),
  419. next_(nullptr),
  420. prev_(nullptr) {
  421. ThreadCacheRegistry::Instance().RegisterThreadCache(this);
  422. memset(&stats_, 0, sizeof(stats_));
  423. for (int index = 0; index < kBucketCount; index++) {
  424. const auto& root_bucket = root->buckets[index];
  425. Bucket* tcache_bucket = &buckets_[index];
  426. tcache_bucket->freelist_head = nullptr;
  427. tcache_bucket->count = 0;
  428. tcache_bucket->limit.store(global_limits_[index],
  429. std::memory_order_relaxed);
  430. tcache_bucket->slot_size = root_bucket.slot_size;
  431. // Invalid bucket.
  432. if (!root_bucket.is_valid()) {
  433. // Explicitly set this, as size computations iterate over all buckets.
  434. tcache_bucket->limit.store(0, std::memory_order_relaxed);
  435. }
  436. }
  437. }
  438. ThreadCache::~ThreadCache() {
  439. ThreadCacheRegistry::Instance().UnregisterThreadCache(this);
  440. Purge();
  441. }
  442. // static
  443. void ThreadCache::Delete(void* tcache_ptr) {
  444. auto* tcache = static_cast<ThreadCache*>(tcache_ptr);
  445. if (!IsValid(tcache))
  446. return;
  447. #if defined(PA_THREAD_CACHE_FAST_TLS)
  448. internal::g_thread_cache = nullptr;
  449. #else
  450. internal::PartitionTlsSet(internal::g_thread_cache_key, nullptr);
  451. #endif
  452. auto* root = tcache->root_;
  453. tcache->~ThreadCache();
  454. // TreadCache was allocated using RawAlloc() and SlotStartAddr2Ptr(), so it
  455. // shifted by extras, but is MTE-tagged.
  456. root->RawFree(internal::SlotStartPtr2Addr(tcache_ptr));
  457. #if BUILDFLAG(IS_WIN)
  458. // On Windows, allocations do occur during thread/process teardown, make sure
  459. // they don't resurrect the thread cache.
  460. //
  461. // Don't MTE-tag, as it'd mess with the sentinel value.
  462. //
  463. // TODO(lizeb): Investigate whether this is needed on POSIX as well.
  464. internal::PartitionTlsSet(internal::g_thread_cache_key,
  465. reinterpret_cast<void*>(kTombstone));
  466. #if defined(PA_THREAD_CACHE_FAST_TLS)
  467. internal::g_thread_cache = reinterpret_cast<ThreadCache*>(kTombstone);
  468. #endif
  469. #endif // BUILDFLAG(IS_WIN)
  470. }
  471. ThreadCache::Bucket::Bucket() {
  472. limit.store(0, std::memory_order_relaxed);
  473. }
  474. void ThreadCache::FillBucket(size_t bucket_index) {
  475. // Filling multiple elements from the central allocator at a time has several
  476. // advantages:
  477. // - Amortize lock acquisition
  478. // - Increase hit rate
  479. // - Can improve locality, as consecutive allocations from the central
  480. // allocator will likely return close addresses, especially early on.
  481. //
  482. // However, do not take too many items, to prevent memory bloat.
  483. //
  484. // Cache filling / purging policy:
  485. // We aim at keeping the buckets neither empty nor full, while minimizing
  486. // requests to the central allocator.
  487. //
  488. // For each bucket, there is a |limit| of how many cached objects there are in
  489. // the bucket, so |count| < |limit| at all times.
  490. // - Clearing: limit -> limit / 2
  491. // - Filling: 0 -> limit / kBatchFillRatio
  492. //
  493. // These thresholds are somewhat arbitrary, with these considerations:
  494. // (1) Batched filling should not completely fill the bucket
  495. // (2) Batched clearing should not completely clear the bucket
  496. // (3) Batched filling should not be too eager
  497. //
  498. // If (1) and (2) do not hold, we risk oscillations of bucket filling /
  499. // clearing which would greatly increase calls to the central allocator. (3)
  500. // tries to keep memory usage low. So clearing half of the bucket, and filling
  501. // a quarter of it are sensible defaults.
  502. PA_INCREMENT_COUNTER(stats_.batch_fill_count);
  503. Bucket& bucket = buckets_[bucket_index];
  504. // Some buckets may have a limit lower than |kBatchFillRatio|, but we still
  505. // want to at least allocate a single slot, otherwise we wrongly return
  506. // nullptr, which ends up deactivating the bucket.
  507. //
  508. // In these cases, we do not really batch bucket filling, but this is expected
  509. // to be used for the largest buckets, where over-allocating is not advised.
  510. int count = std::max(
  511. 1, bucket.limit.load(std::memory_order_relaxed) / kBatchFillRatio);
  512. size_t usable_size;
  513. bool is_already_zeroed;
  514. PA_DCHECK(!root_->buckets[bucket_index].CanStoreRawSize());
  515. PA_DCHECK(!root_->buckets[bucket_index].is_direct_mapped());
  516. size_t allocated_slots = 0;
  517. // Same as calling RawAlloc() |count| times, but acquires the lock only once.
  518. internal::ScopedGuard guard(root_->lock_);
  519. for (int i = 0; i < count; i++) {
  520. // Thread cache fill should not trigger expensive operations, to not grab
  521. // the lock for a long time needlessly, but also to not inflate memory
  522. // usage. Indeed, without AllocFlags::kFastPathOrReturnNull, cache
  523. // fill may activate a new PartitionPage, or even a new SuperPage, which is
  524. // clearly not desirable.
  525. //
  526. // |raw_size| is set to the slot size, as we don't know it. However, it is
  527. // only used for direct-mapped allocations and single-slot ones anyway,
  528. // which are not handled here.
  529. uintptr_t slot_start = root_->AllocFromBucket(
  530. &root_->buckets[bucket_index],
  531. AllocFlags::kFastPathOrReturnNull | AllocFlags::kReturnNull,
  532. root_->buckets[bucket_index].slot_size /* raw_size */,
  533. internal::PartitionPageSize(), &usable_size, &is_already_zeroed);
  534. // Either the previous allocation would require a slow path allocation, or
  535. // the central allocator is out of memory. If the bucket was filled with
  536. // some objects, then the allocation will be handled normally. Otherwise,
  537. // this goes to the central allocator, which will service the allocation,
  538. // return nullptr or crash.
  539. if (!slot_start)
  540. break;
  541. allocated_slots++;
  542. PutInBucket(bucket, slot_start);
  543. }
  544. cached_memory_ += allocated_slots * bucket.slot_size;
  545. }
  546. void ThreadCache::ClearBucket(Bucket& bucket, size_t limit) {
  547. ClearBucketHelper<true>(bucket, limit);
  548. }
  549. template <bool crash_on_corruption>
  550. void ThreadCache::ClearBucketHelper(Bucket& bucket, size_t limit) {
  551. // Avoids acquiring the lock needlessly.
  552. if (!bucket.count || bucket.count <= limit)
  553. return;
  554. // This serves two purposes: error checking and avoiding stalls when grabbing
  555. // the lock:
  556. // 1. Error checking: this is pretty clear. Since this path is taken
  557. // infrequently, and is going to walk the entire freelist anyway, its
  558. // incremental cost should be very small. Indeed, we free from the tail of
  559. // the list, so all calls here will end up walking the entire freelist, and
  560. // incurring the same amount of cache misses.
  561. // 2. Avoiding stalls: If one of the freelist accesses in |FreeAfter()|
  562. // triggers a major page fault, and we are running on a low-priority
  563. // thread, we don't want the thread to be blocked while holding the lock,
  564. // causing a priority inversion.
  565. if constexpr (crash_on_corruption) {
  566. bucket.freelist_head->CheckFreeListForThreadCache(bucket.slot_size);
  567. }
  568. uint8_t count_before = bucket.count;
  569. if (limit == 0) {
  570. FreeAfter<crash_on_corruption>(bucket.freelist_head, bucket.slot_size);
  571. bucket.freelist_head = nullptr;
  572. } else {
  573. // Free the *end* of the list, not the head, since the head contains the
  574. // most recently touched memory.
  575. auto* head = bucket.freelist_head;
  576. size_t items = 1; // Cannot free the freelist head.
  577. while (items < limit) {
  578. head = head->GetNextForThreadCache<crash_on_corruption>(bucket.slot_size);
  579. items++;
  580. }
  581. FreeAfter<crash_on_corruption>(
  582. head->GetNextForThreadCache<crash_on_corruption>(bucket.slot_size),
  583. bucket.slot_size);
  584. head->SetNext(nullptr);
  585. }
  586. bucket.count = limit;
  587. uint8_t count_after = bucket.count;
  588. size_t freed_memory = (count_before - count_after) * bucket.slot_size;
  589. PA_DCHECK(cached_memory_ >= freed_memory);
  590. cached_memory_ -= freed_memory;
  591. PA_DCHECK(cached_memory_ == CachedMemory());
  592. }
  593. template <bool crash_on_corruption>
  594. void ThreadCache::FreeAfter(internal::PartitionFreelistEntry* head,
  595. size_t slot_size) {
  596. // Acquire the lock once. Deallocation from the same bucket are likely to be
  597. // hitting the same cache lines in the central allocator, and lock
  598. // acquisitions can be expensive.
  599. internal::ScopedGuard guard(root_->lock_);
  600. while (head) {
  601. uintptr_t slot_start = internal::SlotStartPtr2Addr(head);
  602. head = head->GetNextForThreadCache<crash_on_corruption>(slot_size);
  603. root_->RawFreeLocked(slot_start);
  604. }
  605. }
  606. void ThreadCache::ResetForTesting() {
  607. stats_.alloc_count = 0;
  608. stats_.alloc_hits = 0;
  609. stats_.alloc_misses = 0;
  610. stats_.alloc_miss_empty = 0;
  611. stats_.alloc_miss_too_large = 0;
  612. stats_.cache_fill_count = 0;
  613. stats_.cache_fill_hits = 0;
  614. stats_.cache_fill_misses = 0;
  615. stats_.batch_fill_count = 0;
  616. stats_.bucket_total_memory = 0;
  617. stats_.metadata_overhead = 0;
  618. Purge();
  619. PA_CHECK(cached_memory_ == 0u);
  620. should_purge_.store(false, std::memory_order_relaxed);
  621. }
  622. size_t ThreadCache::CachedMemory() const {
  623. size_t total = 0;
  624. for (const Bucket& bucket : buckets_)
  625. total += bucket.count * static_cast<size_t>(bucket.slot_size);
  626. return total;
  627. }
  628. void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
  629. stats->alloc_count += stats_.alloc_count;
  630. stats->alloc_hits += stats_.alloc_hits;
  631. stats->alloc_misses += stats_.alloc_misses;
  632. stats->alloc_miss_empty += stats_.alloc_miss_empty;
  633. stats->alloc_miss_too_large += stats_.alloc_miss_too_large;
  634. stats->cache_fill_count += stats_.cache_fill_count;
  635. stats->cache_fill_hits += stats_.cache_fill_hits;
  636. stats->cache_fill_misses += stats_.cache_fill_misses;
  637. stats->batch_fill_count += stats_.batch_fill_count;
  638. #if defined(PA_THREAD_CACHE_ALLOC_STATS)
  639. for (size_t i = 0; i < internal::kNumBuckets + 1; i++)
  640. stats->allocs_per_bucket_[i] += stats_.allocs_per_bucket_[i];
  641. #endif // defined(PA_THREAD_CACHE_ALLOC_STATS)
  642. // cached_memory_ is not necessarily equal to |CachedMemory()| here, since
  643. // this function can be called racily from another thread, to collect
  644. // statistics. Hence no DCHECK_EQ(CachedMemory(), cached_memory_).
  645. stats->bucket_total_memory += cached_memory_;
  646. stats->metadata_overhead += sizeof(*this);
  647. }
  648. void ThreadCache::SetShouldPurge() {
  649. should_purge_.store(true, std::memory_order_relaxed);
  650. }
  651. void ThreadCache::Purge() {
  652. PA_REENTRANCY_GUARD(is_in_thread_cache_);
  653. PurgeInternal();
  654. }
  655. void ThreadCache::TryPurge() {
  656. PA_REENTRANCY_GUARD(is_in_thread_cache_);
  657. PurgeInternalHelper<false>();
  658. }
  659. // static
  660. void ThreadCache::PurgeCurrentThread() {
  661. auto* tcache = Get();
  662. if (IsValid(tcache))
  663. tcache->Purge();
  664. }
  665. void ThreadCache::PurgeInternal() {
  666. PurgeInternalHelper<true>();
  667. }
  668. template <bool crash_on_corruption>
  669. void ThreadCache::PurgeInternalHelper() {
  670. should_purge_.store(false, std::memory_order_relaxed);
  671. // TODO(lizeb): Investigate whether lock acquisition should be less
  672. // frequent.
  673. //
  674. // Note: iterate over all buckets, even the inactive ones. Since
  675. // |largest_active_bucket_index_| can be lowered at runtime, there may be
  676. // memory already cached in the inactive buckets. They should still be
  677. // purged.
  678. for (auto& bucket : buckets_)
  679. ClearBucketHelper<crash_on_corruption>(bucket, 0);
  680. }
  681. } // namespace partition_alloc