thread_cache_unittest.cc 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339
  1. // Copyright 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/allocator/partition_allocator/thread_cache.h"
  5. #include <algorithm>
  6. #include <atomic>
  7. #include <vector>
  8. #include "base/allocator/partition_allocator/extended_api.h"
  9. #include "base/allocator/partition_allocator/partition_address_space.h"
  10. #include "base/allocator/partition_allocator/partition_alloc.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  15. #include "base/allocator/partition_allocator/partition_lock.h"
  16. #include "base/allocator/partition_allocator/tagging.h"
  17. #include "build/build_config.h"
  18. #include "testing/gtest/include/gtest/gtest.h"
  19. // With *SAN, PartitionAlloc is replaced in partition_alloc.h by ASAN, so we
  20. // cannot test the thread cache.
  21. //
  22. // Finally, the thread cache is not supported on all platforms.
  23. #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
  24. defined(PA_THREAD_CACHE_SUPPORTED)
  25. namespace partition_alloc {
  26. namespace {
  27. constexpr size_t kSmallSize = 12;
  28. constexpr size_t kDefaultCountForSmallBucket =
  29. ThreadCache::kSmallBucketBaseCount * ThreadCache::kDefaultMultiplier;
  30. constexpr size_t kFillCountForSmallBucket =
  31. kDefaultCountForSmallBucket / ThreadCache::kBatchFillRatio;
  32. constexpr size_t kMediumSize = 200;
  33. constexpr size_t kDefaultCountForMediumBucket = kDefaultCountForSmallBucket / 2;
  34. constexpr size_t kFillCountForMediumBucket =
  35. kDefaultCountForMediumBucket / ThreadCache::kBatchFillRatio;
  36. static_assert(kMediumSize <= ThreadCache::kDefaultSizeThreshold, "");
  37. class DeltaCounter {
  38. public:
  39. explicit DeltaCounter(uint64_t& value)
  40. : current_value_(value), initial_value_(value) {}
  41. void Reset() { initial_value_ = current_value_; }
  42. uint64_t Delta() const { return current_value_ - initial_value_; }
  43. private:
  44. uint64_t& current_value_;
  45. uint64_t initial_value_;
  46. };
  47. // Forbid extras, since they make finding out which bucket is used harder.
  48. ThreadSafePartitionRoot* CreatePartitionRoot() {
  49. ThreadSafePartitionRoot* root = new ThreadSafePartitionRoot({
  50. PartitionOptions::AlignedAlloc::kAllowed,
  51. #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  52. PartitionOptions::ThreadCache::kEnabled,
  53. #else
  54. PartitionOptions::ThreadCache::kDisabled,
  55. #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  56. PartitionOptions::Quarantine::kAllowed,
  57. PartitionOptions::Cookie::kDisallowed,
  58. PartitionOptions::BackupRefPtr::kDisabled,
  59. PartitionOptions::BackupRefPtrZapping::kDisabled,
  60. PartitionOptions::UseConfigurablePool::kNo,
  61. });
  62. root->UncapEmptySlotSpanMemoryForTesting();
  63. // We do this here instead of in SetUp()/TearDown() because we need this to
  64. // run before the task environment (which creates threads and hence is racy
  65. // with attempting to disable the thread cache).
  66. internal::SwapOutProcessThreadCacheForTesting(root);
  67. return root;
  68. }
  69. } // namespace
  70. class PartitionAllocThreadCacheTest : public ::testing::TestWithParam<bool> {
  71. public:
  72. PartitionAllocThreadCacheTest() : root_(CreatePartitionRoot()) {}
  73. ~PartitionAllocThreadCacheTest() override {
  74. ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
  75. internal::SwapInProcessThreadCacheForTesting(root_);
  76. ThreadSafePartitionRoot::DeleteForTesting(root_);
  77. // Cleanup the global state so next test can recreate ThreadCache.
  78. if (ThreadCache::IsTombstone(ThreadCache::Get()))
  79. ThreadCache::RemoveTombstoneForTesting();
  80. }
  81. protected:
  82. void SetUp() override {
  83. if (GetParam())
  84. root_->SwitchToDenserBucketDistribution();
  85. else
  86. root_->ResetBucketDistributionForTesting();
  87. #if defined(PA_HAS_64_BITS_POINTERS)
  88. // Another test can uninitialize the pools, so make sure they are
  89. // initialized.
  90. internal::PartitionAddressSpace::Init();
  91. #endif // defined(PA_HAS_64_BITS_POINTERS)
  92. ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
  93. ThreadCache::kDefaultMultiplier);
  94. ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
  95. // Make sure that enough slot spans have been touched, otherwise cache fill
  96. // becomes unpredictable (because it doesn't take slow paths in the
  97. // allocator), which is an issue for tests.
  98. FillThreadCacheAndReturnIndex(kSmallSize, 1000);
  99. FillThreadCacheAndReturnIndex(kMediumSize, 1000);
  100. // There are allocations, a thread cache is created.
  101. auto* tcache = root_->thread_cache_for_testing();
  102. ASSERT_TRUE(tcache);
  103. ThreadCacheRegistry::Instance().ResetForTesting();
  104. tcache->ResetForTesting();
  105. }
  106. void TearDown() override {
  107. auto* tcache = root_->thread_cache_for_testing();
  108. ASSERT_TRUE(tcache);
  109. tcache->Purge();
  110. ASSERT_EQ(root_->get_total_size_of_allocated_bytes(),
  111. GetBucketSizeForThreadCache());
  112. }
  113. // Returns the size of the smallest bucket fitting an allocation of
  114. // |sizeof(ThreadCache)| bytes.
  115. size_t GetBucketSizeForThreadCache() {
  116. size_t tc_bucket_index =
  117. root_->SizeToBucketIndex(sizeof(ThreadCache), false);
  118. auto* tc_bucket = &root_->buckets[tc_bucket_index];
  119. return tc_bucket->slot_size;
  120. }
  121. static size_t SizeToIndex(size_t size) {
  122. return PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(size,
  123. GetParam());
  124. }
  125. size_t FillThreadCacheAndReturnIndex(size_t size, size_t count = 1) {
  126. uint16_t bucket_index = SizeToIndex(size);
  127. std::vector<void*> allocated_data;
  128. for (size_t i = 0; i < count; ++i) {
  129. allocated_data.push_back(root_->Alloc(size, ""));
  130. }
  131. for (void* ptr : allocated_data) {
  132. root_->Free(ptr);
  133. }
  134. return bucket_index;
  135. }
  136. void FillThreadCacheWithMemory(size_t target_cached_memory) {
  137. for (int batch : {1, 2, 4, 8, 16}) {
  138. for (size_t allocation_size = 1;
  139. allocation_size <= ThreadCache::kLargeSizeThreshold;
  140. allocation_size++) {
  141. FillThreadCacheAndReturnIndex(allocation_size, batch);
  142. if (ThreadCache::Get()->CachedMemory() >= target_cached_memory)
  143. return;
  144. }
  145. }
  146. ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
  147. }
  148. ThreadSafePartitionRoot* root_;
  149. };
  150. INSTANTIATE_TEST_SUITE_P(AlternateBucketDistribution,
  151. PartitionAllocThreadCacheTest,
  152. ::testing::Values(false, true));
  153. TEST_P(PartitionAllocThreadCacheTest, Simple) {
  154. // There is a cache.
  155. auto* tcache = root_->thread_cache_for_testing();
  156. EXPECT_TRUE(tcache);
  157. DeltaCounter batch_fill_counter{tcache->stats_.batch_fill_count};
  158. void* ptr = root_->Alloc(kSmallSize, "");
  159. ASSERT_TRUE(ptr);
  160. uint16_t index = SizeToIndex(kSmallSize);
  161. EXPECT_EQ(kFillCountForSmallBucket - 1,
  162. tcache->bucket_count_for_testing(index));
  163. root_->Free(ptr);
  164. // Freeing fills the thread cache.
  165. EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
  166. void* ptr2 = root_->Alloc(kSmallSize, "");
  167. // MTE-untag, because Free() changes tag.
  168. EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
  169. // Allocated from the thread cache.
  170. EXPECT_EQ(kFillCountForSmallBucket - 1,
  171. tcache->bucket_count_for_testing(index));
  172. EXPECT_EQ(1u, batch_fill_counter.Delta());
  173. root_->Free(ptr2);
  174. }
  175. TEST_P(PartitionAllocThreadCacheTest, InexactSizeMatch) {
  176. void* ptr = root_->Alloc(kSmallSize, "");
  177. ASSERT_TRUE(ptr);
  178. // There is a cache.
  179. auto* tcache = root_->thread_cache_for_testing();
  180. EXPECT_TRUE(tcache);
  181. uint16_t index = SizeToIndex(kSmallSize);
  182. EXPECT_EQ(kFillCountForSmallBucket - 1,
  183. tcache->bucket_count_for_testing(index));
  184. root_->Free(ptr);
  185. // Freeing fills the thread cache.
  186. EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
  187. void* ptr2 = root_->Alloc(kSmallSize + 1, "");
  188. // MTE-untag, because Free() changes tag.
  189. EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
  190. // Allocated from the thread cache.
  191. EXPECT_EQ(kFillCountForSmallBucket - 1,
  192. tcache->bucket_count_for_testing(index));
  193. root_->Free(ptr2);
  194. }
  195. TEST_P(PartitionAllocThreadCacheTest, MultipleObjectsCachedPerBucket) {
  196. auto* tcache = root_->thread_cache_for_testing();
  197. DeltaCounter batch_fill_counter{tcache->stats_.batch_fill_count};
  198. size_t bucket_index =
  199. FillThreadCacheAndReturnIndex(kMediumSize, kFillCountForMediumBucket + 2);
  200. EXPECT_EQ(2 * kFillCountForMediumBucket,
  201. tcache->bucket_count_for_testing(bucket_index));
  202. // 2 batches, since there were more than |kFillCountForMediumBucket|
  203. // allocations.
  204. EXPECT_EQ(2u, batch_fill_counter.Delta());
  205. }
  206. TEST_P(PartitionAllocThreadCacheTest, ObjectsCachedCountIsLimited) {
  207. size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, 1000);
  208. auto* tcache = root_->thread_cache_for_testing();
  209. EXPECT_LT(tcache->bucket_count_for_testing(bucket_index), 1000u);
  210. }
  211. TEST_P(PartitionAllocThreadCacheTest, Purge) {
  212. size_t allocations = 10;
  213. size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, allocations);
  214. auto* tcache = root_->thread_cache_for_testing();
  215. EXPECT_EQ(
  216. (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket,
  217. tcache->bucket_count_for_testing(bucket_index));
  218. tcache->Purge();
  219. EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
  220. }
  221. TEST_P(PartitionAllocThreadCacheTest, NoCrossPartitionCache) {
  222. ThreadSafePartitionRoot root({
  223. PartitionOptions::AlignedAlloc::kAllowed,
  224. PartitionOptions::ThreadCache::kDisabled,
  225. PartitionOptions::Quarantine::kAllowed,
  226. PartitionOptions::Cookie::kDisallowed,
  227. PartitionOptions::BackupRefPtr::kDisabled,
  228. PartitionOptions::BackupRefPtrZapping::kDisabled,
  229. PartitionOptions::UseConfigurablePool::kNo,
  230. });
  231. size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
  232. void* ptr = root.Alloc(kSmallSize, "");
  233. ASSERT_TRUE(ptr);
  234. auto* tcache = root_->thread_cache_for_testing();
  235. EXPECT_EQ(kFillCountForSmallBucket,
  236. tcache->bucket_count_for_testing(bucket_index));
  237. ThreadSafePartitionRoot::Free(ptr);
  238. EXPECT_EQ(kFillCountForSmallBucket,
  239. tcache->bucket_count_for_testing(bucket_index));
  240. }
  241. #if defined(PA_ENABLE_THREAD_CACHE_STATISTICS) // Required to record hits and
  242. // misses.
  243. TEST_P(PartitionAllocThreadCacheTest, LargeAllocationsAreNotCached) {
  244. auto* tcache = root_->thread_cache_for_testing();
  245. DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
  246. DeltaCounter alloc_miss_too_large_counter{
  247. tcache->stats_.alloc_miss_too_large};
  248. DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
  249. DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
  250. FillThreadCacheAndReturnIndex(100 * 1024);
  251. tcache = root_->thread_cache_for_testing();
  252. EXPECT_EQ(1u, alloc_miss_counter.Delta());
  253. EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
  254. EXPECT_EQ(1u, cache_fill_counter.Delta());
  255. EXPECT_EQ(1u, cache_fill_misses_counter.Delta());
  256. }
  257. #endif // defined(PA_ENABLE_THREAD_CACHE_STATISTICS)
  258. TEST_P(PartitionAllocThreadCacheTest, DirectMappedAllocationsAreNotCached) {
  259. FillThreadCacheAndReturnIndex(1024 * 1024);
  260. // The line above would crash due to out of bounds access if this wasn't
  261. // properly handled.
  262. }
  263. // This tests that Realloc properly handles bookkeeping, specifically the path
  264. // that reallocates in place.
  265. TEST_P(PartitionAllocThreadCacheTest, DirectMappedReallocMetrics) {
  266. root_->ResetBookkeepingForTesting();
  267. size_t expected_allocated_size = root_->get_total_size_of_allocated_bytes();
  268. EXPECT_EQ(expected_allocated_size,
  269. root_->get_total_size_of_allocated_bytes());
  270. EXPECT_EQ(expected_allocated_size, root_->get_max_size_of_allocated_bytes());
  271. void* ptr = root_->Alloc(10 * internal::kMaxBucketed, "");
  272. EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
  273. root_->get_total_size_of_allocated_bytes());
  274. void* ptr2 = root_->Realloc(ptr, 9 * internal::kMaxBucketed, "");
  275. ASSERT_EQ(ptr, ptr2);
  276. EXPECT_EQ(expected_allocated_size + 9 * internal::kMaxBucketed,
  277. root_->get_total_size_of_allocated_bytes());
  278. ptr2 = root_->Realloc(ptr, 10 * internal::kMaxBucketed, "");
  279. ASSERT_EQ(ptr, ptr2);
  280. EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
  281. root_->get_total_size_of_allocated_bytes());
  282. root_->Free(ptr);
  283. }
  284. namespace {
  285. size_t FillThreadCacheAndReturnIndex(ThreadSafePartitionRoot* root,
  286. size_t size,
  287. bool with_denser_bucket_distribution,
  288. size_t count = 1) {
  289. uint16_t bucket_index =
  290. PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
  291. size, with_denser_bucket_distribution);
  292. std::vector<void*> allocated_data;
  293. for (size_t i = 0; i < count; ++i) {
  294. allocated_data.push_back(root->Alloc(size, ""));
  295. }
  296. for (void* ptr : allocated_data) {
  297. root->Free(ptr);
  298. }
  299. return bucket_index;
  300. }
  301. // TODO(1151236): To remove callback from partition allocator's DEPS,
  302. // rewrite the tests without BindLambdaForTesting and RepeatingClosure.
  303. // However this makes a little annoying to add more tests using their
  304. // own threads. Need to support an easier way to implement tests using
  305. // PlatformThreadForTesting::Create().
  306. class ThreadDelegateForMultipleThreadCaches
  307. : public internal::base::PlatformThreadForTesting::Delegate {
  308. public:
  309. ThreadDelegateForMultipleThreadCaches(ThreadCache* parent_thread_cache,
  310. ThreadSafePartitionRoot* root,
  311. bool with_denser_bucket_distribution)
  312. : parent_thread_tcache_(parent_thread_cache),
  313. root_(root),
  314. with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
  315. void ThreadMain() override {
  316. EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
  317. FillThreadCacheAndReturnIndex(root_, kMediumSize,
  318. with_denser_bucket_distribution_);
  319. auto* tcache = root_->thread_cache_for_testing();
  320. EXPECT_TRUE(tcache);
  321. EXPECT_NE(parent_thread_tcache_, tcache);
  322. }
  323. private:
  324. ThreadCache* parent_thread_tcache_ = nullptr;
  325. ThreadSafePartitionRoot* root_ = nullptr;
  326. bool with_denser_bucket_distribution_;
  327. };
  328. } // namespace
  329. TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCaches) {
  330. FillThreadCacheAndReturnIndex(kMediumSize);
  331. auto* parent_thread_tcache = root_->thread_cache_for_testing();
  332. ASSERT_TRUE(parent_thread_tcache);
  333. ThreadDelegateForMultipleThreadCaches delegate(parent_thread_tcache, root_,
  334. GetParam());
  335. internal::base::PlatformThreadHandle thread_handle;
  336. internal::base::PlatformThreadForTesting::Create(0, &delegate,
  337. &thread_handle);
  338. internal::base::PlatformThreadForTesting::Join(thread_handle);
  339. }
  340. namespace {
  341. class ThreadDelegateForThreadCacheReclaimedWhenThreadExits
  342. : public internal::base::PlatformThreadForTesting::Delegate {
  343. public:
  344. ThreadDelegateForThreadCacheReclaimedWhenThreadExits(
  345. ThreadSafePartitionRoot* root,
  346. void*& other_thread_ptr)
  347. : root_(root), other_thread_ptr_(other_thread_ptr) {}
  348. void ThreadMain() override {
  349. EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
  350. other_thread_ptr_ = root_->Alloc(kMediumSize, "");
  351. root_->Free(other_thread_ptr_);
  352. // |other_thread_ptr| is now in the thread cache.
  353. }
  354. private:
  355. ThreadSafePartitionRoot* root_ = nullptr;
  356. void*& other_thread_ptr_;
  357. };
  358. } // namespace
  359. TEST_P(PartitionAllocThreadCacheTest, ThreadCacheReclaimedWhenThreadExits) {
  360. // Make sure that there is always at least one object allocated in the test
  361. // bucket, so that the PartitionPage is no reclaimed.
  362. //
  363. // Allocate enough objects to force a cache fill at the next allocation.
  364. std::vector<void*> tmp;
  365. for (size_t i = 0; i < kDefaultCountForMediumBucket / 4; i++) {
  366. tmp.push_back(root_->Alloc(kMediumSize, ""));
  367. }
  368. void* other_thread_ptr = nullptr;
  369. ThreadDelegateForThreadCacheReclaimedWhenThreadExits delegate(
  370. root_, other_thread_ptr);
  371. internal::base::PlatformThreadHandle thread_handle;
  372. internal::base::PlatformThreadForTesting::Create(0, &delegate,
  373. &thread_handle);
  374. internal::base::PlatformThreadForTesting::Join(thread_handle);
  375. void* this_thread_ptr = root_->Alloc(kMediumSize, "");
  376. // |other_thread_ptr| was returned to the central allocator, and is returned
  377. // here, as it comes from the freelist.
  378. EXPECT_EQ(UntagPtr(this_thread_ptr), UntagPtr(other_thread_ptr));
  379. root_->Free(other_thread_ptr);
  380. for (void* ptr : tmp)
  381. root_->Free(ptr);
  382. }
  383. namespace {
  384. class ThreadDelegateForThreadCacheRegistry
  385. : public internal::base::PlatformThreadForTesting::Delegate {
  386. public:
  387. ThreadDelegateForThreadCacheRegistry(ThreadCache* parent_thread_cache,
  388. ThreadSafePartitionRoot* root,
  389. bool with_denser_bucket_distribution)
  390. : parent_thread_tcache_(parent_thread_cache),
  391. root_(root),
  392. with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
  393. void ThreadMain() override {
  394. EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
  395. FillThreadCacheAndReturnIndex(root_, kSmallSize,
  396. with_denser_bucket_distribution_);
  397. auto* tcache = root_->thread_cache_for_testing();
  398. EXPECT_TRUE(tcache);
  399. internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
  400. EXPECT_EQ(tcache->prev_for_testing(), nullptr);
  401. EXPECT_EQ(tcache->next_for_testing(), parent_thread_tcache_);
  402. }
  403. private:
  404. ThreadCache* parent_thread_tcache_ = nullptr;
  405. ThreadSafePartitionRoot* root_ = nullptr;
  406. bool with_denser_bucket_distribution_;
  407. };
  408. } // namespace
  409. TEST_P(PartitionAllocThreadCacheTest, ThreadCacheRegistry) {
  410. auto* parent_thread_tcache = root_->thread_cache_for_testing();
  411. ASSERT_TRUE(parent_thread_tcache);
  412. {
  413. internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
  414. EXPECT_EQ(parent_thread_tcache->prev_, nullptr);
  415. EXPECT_EQ(parent_thread_tcache->next_, nullptr);
  416. }
  417. ThreadDelegateForThreadCacheRegistry delegate(parent_thread_tcache, root_,
  418. GetParam());
  419. internal::base::PlatformThreadHandle thread_handle;
  420. internal::base::PlatformThreadForTesting::Create(0, &delegate,
  421. &thread_handle);
  422. internal::base::PlatformThreadForTesting::Join(thread_handle);
  423. internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
  424. EXPECT_EQ(parent_thread_tcache->prev_, nullptr);
  425. EXPECT_EQ(parent_thread_tcache->next_, nullptr);
  426. }
  427. #if defined(PA_ENABLE_THREAD_CACHE_STATISTICS)
  428. TEST_P(PartitionAllocThreadCacheTest, RecordStats) {
  429. auto* tcache = root_->thread_cache_for_testing();
  430. DeltaCounter alloc_counter{tcache->stats_.alloc_count};
  431. DeltaCounter alloc_hits_counter{tcache->stats_.alloc_hits};
  432. DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
  433. DeltaCounter alloc_miss_empty_counter{tcache->stats_.alloc_miss_empty};
  434. DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
  435. DeltaCounter cache_fill_hits_counter{tcache->stats_.cache_fill_hits};
  436. DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
  437. // Cache has been purged, first allocation is a miss.
  438. void* data = root_->Alloc(kMediumSize, "");
  439. EXPECT_EQ(1u, alloc_counter.Delta());
  440. EXPECT_EQ(1u, alloc_miss_counter.Delta());
  441. EXPECT_EQ(0u, alloc_hits_counter.Delta());
  442. // Cache fill worked.
  443. root_->Free(data);
  444. EXPECT_EQ(1u, cache_fill_counter.Delta());
  445. EXPECT_EQ(1u, cache_fill_hits_counter.Delta());
  446. EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
  447. tcache->Purge();
  448. cache_fill_counter.Reset();
  449. // Buckets are never full, fill always succeeds.
  450. size_t allocations = 10;
  451. size_t bucket_index = FillThreadCacheAndReturnIndex(
  452. kMediumSize, kDefaultCountForMediumBucket + allocations);
  453. EXPECT_EQ(kDefaultCountForMediumBucket + allocations,
  454. cache_fill_counter.Delta());
  455. EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
  456. // Memory footprint.
  457. ThreadCacheStats stats;
  458. ThreadCacheRegistry::Instance().DumpStats(true, &stats);
  459. // Bucket was cleared (set to kDefaultCountForMediumBucket / 2) after going
  460. // above the limit (-1), then refilled by batches (1 + floor(allocations /
  461. // kFillCountForSmallBucket) times).
  462. size_t expected_count =
  463. kDefaultCountForMediumBucket / 2 - 1 +
  464. (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket;
  465. EXPECT_EQ(root_->buckets[bucket_index].slot_size * expected_count,
  466. stats.bucket_total_memory);
  467. EXPECT_EQ(sizeof(ThreadCache), stats.metadata_overhead);
  468. }
  469. namespace {
  470. class ThreadDelegateForMultipleThreadCachesAccounting
  471. : public internal::base::PlatformThreadForTesting::Delegate {
  472. public:
  473. ThreadDelegateForMultipleThreadCachesAccounting(
  474. ThreadSafePartitionRoot* root,
  475. int alloc_ount,
  476. bool with_denser_bucket_distribution)
  477. : root_(root),
  478. alloc_count_(alloc_count),
  479. with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
  480. void ThreadMain() override {
  481. EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
  482. size_t bucket_index = FillThreadCacheAndReturnIndex(
  483. root_, kMediumSize, with_denser_bucket_distribution_);
  484. ThreadCacheStats stats;
  485. ThreadCacheRegistry::Instance().DumpStats(false, &stats);
  486. // 2* for this thread and the parent one.
  487. EXPECT_EQ(
  488. 2 * root_->buckets[bucket_index].slot_size * kFillCountForMediumBucket,
  489. stats.bucket_total_memory);
  490. EXPECT_EQ(2 * sizeof(ThreadCache), stats.metadata_overhead);
  491. uint64_t this_thread_alloc_count =
  492. root_->thread_cache_for_testing()->stats_.alloc_count;
  493. EXPECT_EQ(alloc_count_ + this_thread_alloc_count, stats.alloc_count);
  494. }
  495. private:
  496. private:
  497. ThreadSafePartitionRoot* root_ = nullptr;
  498. bool with_denser_bucket_distribution_;
  499. const int alloc_count_;
  500. };
  501. } // namespace
  502. TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCachesAccounting) {
  503. FillThreadCacheAndReturnIndex(kMediumSize);
  504. uint64_t alloc_count = root_->thread_cache_for_testing()->stats_.alloc_count;
  505. ThreadDelegateForMultipleThreadCachesAccounting delegate(root_, alloc_count,
  506. GetParam());
  507. internal::base::PlatformThreadHandle thread_handle;
  508. internal::base::PlatformThreadForTesting::Create(0, &delegate,
  509. &thread_handle);
  510. internal::base::PlatformThreadForTesting::Join(thread_handle);
  511. }
  512. #endif // defined(PA_ENABLE_THREAD_CACHE_STATISTICS)
  513. // TODO(https://crbug.com/1287799): Flaky on IOS.
  514. #if BUILDFLAG(IS_IOS)
  515. #define MAYBE_PurgeAll DISABLED_PurgeAll
  516. #else
  517. #define MAYBE_PurgeAll PurgeAll
  518. #endif
  519. namespace {
  520. class ThreadDelegateForPurgeAll
  521. : public internal::base::PlatformThreadForTesting::Delegate {
  522. public:
  523. ThreadDelegateForPurgeAll(ThreadSafePartitionRoot* root,
  524. ThreadCache*& other_thread_tcache,
  525. std::atomic<bool>& other_thread_started,
  526. std::atomic<bool>& purge_called,
  527. int bucket_index,
  528. bool with_denser_bucket_distribution)
  529. : root_(root),
  530. other_thread_tcache_(other_thread_tcache),
  531. other_thread_started_(other_thread_started),
  532. purge_called_(purge_called),
  533. bucket_index_(bucket_index),
  534. with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
  535. void ThreadMain() override PA_NO_THREAD_SAFETY_ANALYSIS {
  536. FillThreadCacheAndReturnIndex(root_, kSmallSize,
  537. with_denser_bucket_distribution_);
  538. other_thread_tcache_ = root_->thread_cache_for_testing();
  539. other_thread_started_.store(true, std::memory_order_release);
  540. while (!purge_called_.load(std::memory_order_acquire)) {
  541. }
  542. // Purge() was not triggered from the other thread.
  543. EXPECT_EQ(kFillCountForSmallBucket,
  544. other_thread_tcache_->bucket_count_for_testing(bucket_index_));
  545. // Allocations do not trigger Purge().
  546. void* data = root_->Alloc(kSmallSize, "");
  547. EXPECT_EQ(kFillCountForSmallBucket - 1,
  548. other_thread_tcache_->bucket_count_for_testing(bucket_index_));
  549. // But deallocations do.
  550. root_->Free(data);
  551. EXPECT_EQ(0u,
  552. other_thread_tcache_->bucket_count_for_testing(bucket_index_));
  553. }
  554. private:
  555. ThreadSafePartitionRoot* root_ = nullptr;
  556. ThreadCache*& other_thread_tcache_;
  557. std::atomic<bool>& other_thread_started_;
  558. std::atomic<bool>& purge_called_;
  559. const int bucket_index_;
  560. bool with_denser_bucket_distribution_;
  561. };
  562. } // namespace
  563. TEST_P(PartitionAllocThreadCacheTest, MAYBE_PurgeAll)
  564. PA_NO_THREAD_SAFETY_ANALYSIS {
  565. std::atomic<bool> other_thread_started{false};
  566. std::atomic<bool> purge_called{false};
  567. size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
  568. ThreadCache* this_thread_tcache = root_->thread_cache_for_testing();
  569. ThreadCache* other_thread_tcache = nullptr;
  570. ThreadDelegateForPurgeAll delegate(root_, other_thread_tcache,
  571. other_thread_started, purge_called,
  572. bucket_index, GetParam());
  573. internal::base::PlatformThreadHandle thread_handle;
  574. internal::base::PlatformThreadForTesting::Create(0, &delegate,
  575. &thread_handle);
  576. while (!other_thread_started.load(std::memory_order_acquire)) {
  577. }
  578. EXPECT_EQ(kFillCountForSmallBucket,
  579. this_thread_tcache->bucket_count_for_testing(bucket_index));
  580. EXPECT_EQ(kFillCountForSmallBucket,
  581. other_thread_tcache->bucket_count_for_testing(bucket_index));
  582. ThreadCacheRegistry::Instance().PurgeAll();
  583. // This thread is synchronously purged.
  584. EXPECT_EQ(0u, this_thread_tcache->bucket_count_for_testing(bucket_index));
  585. // Not the other one.
  586. EXPECT_EQ(kFillCountForSmallBucket,
  587. other_thread_tcache->bucket_count_for_testing(bucket_index));
  588. purge_called.store(true, std::memory_order_release);
  589. internal::base::PlatformThreadForTesting::Join(thread_handle);
  590. }
  591. TEST_P(PartitionAllocThreadCacheTest, PeriodicPurge) {
  592. auto& registry = ThreadCacheRegistry::Instance();
  593. auto NextInterval = [&registry]() {
  594. return internal::base::Microseconds(
  595. registry.GetPeriodicPurgeNextIntervalInMicroseconds());
  596. };
  597. EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
  598. // Small amount of memory, the period gets longer.
  599. auto* tcache = ThreadCache::Get();
  600. ASSERT_LT(tcache->CachedMemory(),
  601. ThreadCacheRegistry::kMinCachedMemoryForPurging);
  602. registry.RunPeriodicPurge();
  603. EXPECT_EQ(NextInterval(), 2 * ThreadCacheRegistry::kDefaultPurgeInterval);
  604. registry.RunPeriodicPurge();
  605. EXPECT_EQ(NextInterval(), 4 * ThreadCacheRegistry::kDefaultPurgeInterval);
  606. // Check that the purge interval is clamped at the maximum value.
  607. while (NextInterval() < ThreadCacheRegistry::kMaxPurgeInterval) {
  608. registry.RunPeriodicPurge();
  609. }
  610. registry.RunPeriodicPurge();
  611. // Not enough memory to decrease the interval.
  612. FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging +
  613. 1);
  614. registry.RunPeriodicPurge();
  615. EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
  616. FillThreadCacheWithMemory(
  617. 2 * ThreadCacheRegistry::kMinCachedMemoryForPurging + 1);
  618. registry.RunPeriodicPurge();
  619. EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval / 2);
  620. // Enough memory, interval doesn't change.
  621. FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging);
  622. registry.RunPeriodicPurge();
  623. EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval / 2);
  624. // No cached memory, increase the interval.
  625. registry.RunPeriodicPurge();
  626. EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
  627. // Cannot test the very large size with only one thread, this is tested below
  628. // in the multiple threads test.
  629. }
  630. namespace {
  631. void FillThreadCacheWithMemory(ThreadSafePartitionRoot* root,
  632. size_t target_cached_memory,
  633. bool with_denser_bucket_distribution) {
  634. for (int batch : {1, 2, 4, 8, 16}) {
  635. for (size_t allocation_size = 1;
  636. allocation_size <= ThreadCache::kLargeSizeThreshold;
  637. allocation_size++) {
  638. FillThreadCacheAndReturnIndex(root, allocation_size, batch,
  639. with_denser_bucket_distribution);
  640. if (ThreadCache::Get()->CachedMemory() >= target_cached_memory)
  641. return;
  642. }
  643. }
  644. ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
  645. }
  646. class ThreadDelegateForPeriodicPurgeSumsOverAllThreads
  647. : public internal::base::PlatformThreadForTesting::Delegate {
  648. public:
  649. ThreadDelegateForPeriodicPurgeSumsOverAllThreads(
  650. ThreadSafePartitionRoot* root,
  651. std::atomic<int>& allocations_done,
  652. std::atomic<bool>& can_finish,
  653. bool with_denser_bucket_distribution)
  654. : root_(root),
  655. allocations_done_(allocations_done),
  656. can_finish_(can_finish),
  657. with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
  658. void ThreadMain() override {
  659. FillThreadCacheWithMemory(
  660. root_, 5 * ThreadCacheRegistry::kMinCachedMemoryForPurging,
  661. with_denser_bucket_distribution_);
  662. allocations_done_.fetch_add(1, std::memory_order_release);
  663. // This thread needs to be alive when the next periodic purge task runs.
  664. while (!can_finish_.load(std::memory_order_acquire)) {
  665. }
  666. }
  667. private:
  668. ThreadSafePartitionRoot* root_ = nullptr;
  669. std::atomic<int>& allocations_done_;
  670. std::atomic<bool>& can_finish_;
  671. bool with_denser_bucket_distribution_;
  672. };
  673. } // namespace
  674. // Disabled due to flakiness: crbug.com/1220371
  675. TEST_P(PartitionAllocThreadCacheTest,
  676. DISABLED_PeriodicPurgeSumsOverAllThreads) {
  677. auto& registry = ThreadCacheRegistry::Instance();
  678. auto NextInterval = [&registry]() {
  679. return internal::base::Microseconds(
  680. registry.GetPeriodicPurgeNextIntervalInMicroseconds());
  681. };
  682. EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
  683. // Small amount of memory, the period gets longer.
  684. auto* tcache = ThreadCache::Get();
  685. ASSERT_LT(tcache->CachedMemory(),
  686. ThreadCacheRegistry::kMinCachedMemoryForPurging);
  687. registry.RunPeriodicPurge();
  688. EXPECT_EQ(NextInterval(), 2 * ThreadCacheRegistry::kDefaultPurgeInterval);
  689. registry.RunPeriodicPurge();
  690. EXPECT_EQ(NextInterval(), 4 * ThreadCacheRegistry::kDefaultPurgeInterval);
  691. // Check that the purge interval is clamped at the maximum value.
  692. while (NextInterval() < ThreadCacheRegistry::kMaxPurgeInterval) {
  693. registry.RunPeriodicPurge();
  694. }
  695. registry.RunPeriodicPurge();
  696. // Not enough memory on this thread to decrease the interval.
  697. FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging /
  698. 2);
  699. registry.RunPeriodicPurge();
  700. EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
  701. std::atomic<int> allocations_done{0};
  702. std::atomic<bool> can_finish{false};
  703. ThreadDelegateForPeriodicPurgeSumsOverAllThreads delegate(
  704. root_, allocations_done, can_finish, GetParam());
  705. internal::base::PlatformThreadHandle thread_handle;
  706. internal::base::PlatformThreadForTesting::Create(0, &delegate,
  707. &thread_handle);
  708. internal::base::PlatformThreadHandle thread_handle_2;
  709. internal::base::PlatformThreadForTesting::Create(0, &delegate,
  710. &thread_handle_2);
  711. while (allocations_done.load(std::memory_order_acquire) != 2) {
  712. internal::base::PlatformThreadForTesting::YieldCurrentThread();
  713. }
  714. // Many allocations on the other thread.
  715. registry.RunPeriodicPurge();
  716. EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
  717. can_finish.store(true, std::memory_order_release);
  718. internal::base::PlatformThreadForTesting::Join(thread_handle);
  719. internal::base::PlatformThreadForTesting::Join(thread_handle_2);
  720. }
  721. // TODO(https://crbug.com/1287799): Flaky on IOS.
  722. #if BUILDFLAG(IS_IOS)
  723. #define MAYBE_DynamicCountPerBucket DISABLED_DynamicCountPerBucket
  724. #else
  725. #define MAYBE_DynamicCountPerBucket DynamicCountPerBucket
  726. #endif
  727. TEST_P(PartitionAllocThreadCacheTest, MAYBE_DynamicCountPerBucket) {
  728. auto* tcache = root_->thread_cache_for_testing();
  729. size_t bucket_index =
  730. FillThreadCacheAndReturnIndex(kMediumSize, kDefaultCountForMediumBucket);
  731. EXPECT_EQ(kDefaultCountForMediumBucket, tcache->buckets_[bucket_index].count);
  732. ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
  733. ThreadCache::kDefaultMultiplier / 2);
  734. // No immediate batch deallocation.
  735. EXPECT_EQ(kDefaultCountForMediumBucket, tcache->buckets_[bucket_index].count);
  736. void* data = root_->Alloc(kMediumSize, "");
  737. // Not triggered by allocations.
  738. EXPECT_EQ(kDefaultCountForMediumBucket - 1,
  739. tcache->buckets_[bucket_index].count);
  740. // Free() triggers the purge within limits.
  741. root_->Free(data);
  742. EXPECT_LE(tcache->buckets_[bucket_index].count,
  743. kDefaultCountForMediumBucket / 2);
  744. // Won't go above anymore.
  745. FillThreadCacheAndReturnIndex(kMediumSize, 1000);
  746. EXPECT_LE(tcache->buckets_[bucket_index].count,
  747. kDefaultCountForMediumBucket / 2);
  748. // Limit can be raised.
  749. ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
  750. ThreadCache::kDefaultMultiplier * 2);
  751. FillThreadCacheAndReturnIndex(kMediumSize, 1000);
  752. EXPECT_GT(tcache->buckets_[bucket_index].count,
  753. kDefaultCountForMediumBucket / 2);
  754. }
  755. TEST_P(PartitionAllocThreadCacheTest, DynamicCountPerBucketClamping) {
  756. auto* tcache = root_->thread_cache_for_testing();
  757. ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
  758. ThreadCache::kDefaultMultiplier / 1000.);
  759. for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
  760. // Invalid bucket.
  761. if (!tcache->buckets_[i].limit.load(std::memory_order_relaxed)) {
  762. EXPECT_EQ(root_->buckets[i].active_slot_spans_head, nullptr);
  763. continue;
  764. }
  765. EXPECT_GE(tcache->buckets_[i].limit.load(std::memory_order_relaxed), 1u);
  766. }
  767. ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
  768. ThreadCache::kDefaultMultiplier * 1000.);
  769. for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
  770. // Invalid bucket.
  771. if (!tcache->buckets_[i].limit.load(std::memory_order_relaxed)) {
  772. EXPECT_EQ(root_->buckets[i].active_slot_spans_head, nullptr);
  773. continue;
  774. }
  775. EXPECT_LT(tcache->buckets_[i].limit.load(std::memory_order_relaxed), 0xff);
  776. }
  777. }
  778. // TODO(https://crbug.com/1287799): Flaky on IOS.
  779. #if BUILDFLAG(IS_IOS)
  780. #define MAYBE_DynamicCountPerBucketMultipleThreads \
  781. DISABLED_DynamicCountPerBucketMultipleThreads
  782. #else
  783. #define MAYBE_DynamicCountPerBucketMultipleThreads \
  784. DynamicCountPerBucketMultipleThreads
  785. #endif
  786. namespace {
  787. class ThreadDelegateForDynamicCountPerBucketMultipleThreads
  788. : public internal::base::PlatformThreadForTesting::Delegate {
  789. public:
  790. ThreadDelegateForDynamicCountPerBucketMultipleThreads(
  791. ThreadSafePartitionRoot* root,
  792. std::atomic<bool>& other_thread_started,
  793. std::atomic<bool>& threshold_changed,
  794. int bucket_index,
  795. bool with_denser_bucket_distribution)
  796. : root_(root),
  797. other_thread_started_(other_thread_started),
  798. threshold_changed_(threshold_changed),
  799. bucket_index_(bucket_index),
  800. with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
  801. void ThreadMain() override {
  802. FillThreadCacheAndReturnIndex(root_, kSmallSize,
  803. with_denser_bucket_distribution_,
  804. kDefaultCountForSmallBucket + 10);
  805. auto* this_thread_tcache = root_->thread_cache_for_testing();
  806. // More than the default since the multiplier has changed.
  807. EXPECT_GT(this_thread_tcache->bucket_count_for_testing(bucket_index_),
  808. kDefaultCountForSmallBucket + 10);
  809. other_thread_started_.store(true, std::memory_order_release);
  810. while (!threshold_changed_.load(std::memory_order_acquire)) {
  811. }
  812. void* data = root_->Alloc(kSmallSize, "");
  813. // Deallocations trigger limit enforcement.
  814. root_->Free(data);
  815. // Since the bucket is too full, it gets halved by batched deallocation.
  816. EXPECT_EQ(static_cast<uint8_t>(ThreadCache::kSmallBucketBaseCount / 2),
  817. this_thread_tcache->bucket_count_for_testing(bucket_index_));
  818. }
  819. private:
  820. ThreadSafePartitionRoot* root_ = nullptr;
  821. std::atomic<bool>& other_thread_started_;
  822. std::atomic<bool>& threshold_changed_;
  823. const int bucket_index_;
  824. bool with_denser_bucket_distribution_;
  825. };
  826. } // namespace
  827. TEST_P(PartitionAllocThreadCacheTest,
  828. MAYBE_DynamicCountPerBucketMultipleThreads) {
  829. std::atomic<bool> other_thread_started{false};
  830. std::atomic<bool> threshold_changed{false};
  831. auto* tcache = root_->thread_cache_for_testing();
  832. size_t bucket_index =
  833. FillThreadCacheAndReturnIndex(kSmallSize, kDefaultCountForSmallBucket);
  834. EXPECT_EQ(kDefaultCountForSmallBucket, tcache->buckets_[bucket_index].count);
  835. // Change the ratio before starting the threads, checking that it will applied
  836. // to newly-created threads.
  837. ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
  838. ThreadCache::kDefaultMultiplier + 1);
  839. ThreadDelegateForDynamicCountPerBucketMultipleThreads delegate(
  840. root_, other_thread_started, threshold_changed, bucket_index, GetParam());
  841. internal::base::PlatformThreadHandle thread_handle;
  842. internal::base::PlatformThreadForTesting::Create(0, &delegate,
  843. &thread_handle);
  844. while (!other_thread_started.load(std::memory_order_acquire)) {
  845. }
  846. ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(1.);
  847. threshold_changed.store(true, std::memory_order_release);
  848. internal::base::PlatformThreadForTesting::Join(thread_handle);
  849. }
  850. TEST_P(PartitionAllocThreadCacheTest, DynamicSizeThreshold) {
  851. auto* tcache = root_->thread_cache_for_testing();
  852. DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
  853. DeltaCounter alloc_miss_too_large_counter{
  854. tcache->stats_.alloc_miss_too_large};
  855. DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
  856. DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
  857. // Default threshold at first.
  858. ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
  859. FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold);
  860. EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
  861. EXPECT_EQ(1u, cache_fill_counter.Delta());
  862. // Too large to be cached.
  863. FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
  864. EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
  865. // Increase.
  866. ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
  867. FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
  868. // No new miss.
  869. EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
  870. // Lower.
  871. ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
  872. FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
  873. EXPECT_EQ(2u, alloc_miss_too_large_counter.Delta());
  874. // Value is clamped.
  875. size_t too_large = 1024 * 1024;
  876. ThreadCache::SetLargestCachedSize(too_large);
  877. FillThreadCacheAndReturnIndex(too_large);
  878. EXPECT_EQ(3u, alloc_miss_too_large_counter.Delta());
  879. }
  880. // Disabled due to flakiness: crbug.com/1287811
  881. TEST_P(PartitionAllocThreadCacheTest, DISABLED_DynamicSizeThresholdPurge) {
  882. auto* tcache = root_->thread_cache_for_testing();
  883. DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
  884. DeltaCounter alloc_miss_too_large_counter{
  885. tcache->stats_.alloc_miss_too_large};
  886. DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
  887. DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
  888. // Cache large allocations.
  889. size_t large_allocation_size = ThreadCache::kLargeSizeThreshold;
  890. ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
  891. size_t index = FillThreadCacheAndReturnIndex(large_allocation_size);
  892. EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
  893. // Lower.
  894. ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
  895. FillThreadCacheAndReturnIndex(large_allocation_size);
  896. EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
  897. // There is memory trapped in the cache bucket.
  898. EXPECT_GT(tcache->buckets_[index].count, 0u);
  899. // Which is reclaimed by Purge().
  900. tcache->Purge();
  901. EXPECT_EQ(0u, tcache->buckets_[index].count);
  902. }
  903. TEST_P(PartitionAllocThreadCacheTest, ClearFromTail) {
  904. auto count_items = [](ThreadCache* tcache, size_t index) {
  905. uint8_t count = 0;
  906. auto* head = tcache->buckets_[index].freelist_head;
  907. while (head) {
  908. head = head->GetNext(tcache->buckets_[index].slot_size);
  909. count++;
  910. }
  911. return count;
  912. };
  913. auto* tcache = root_->thread_cache_for_testing();
  914. size_t index = FillThreadCacheAndReturnIndex(kSmallSize, 10);
  915. ASSERT_GE(count_items(tcache, index), 10);
  916. void* head = tcache->buckets_[index].freelist_head;
  917. for (size_t limit : {8, 3, 1}) {
  918. tcache->ClearBucket(tcache->buckets_[index], limit);
  919. EXPECT_EQ(head, static_cast<void*>(tcache->buckets_[index].freelist_head));
  920. EXPECT_EQ(count_items(tcache, index), limit);
  921. }
  922. tcache->ClearBucket(tcache->buckets_[index], 0);
  923. EXPECT_EQ(nullptr, static_cast<void*>(tcache->buckets_[index].freelist_head));
  924. }
  925. // TODO(https://crbug.com/1287799): Flaky on IOS.
  926. #if BUILDFLAG(IS_IOS)
  927. #define MAYBE_Bookkeeping DISABLED_Bookkeeping
  928. #else
  929. #define MAYBE_Bookkeeping Bookkeeping
  930. #endif
  931. TEST_P(PartitionAllocThreadCacheTest, MAYBE_Bookkeeping) {
  932. void* arr[kFillCountForMediumBucket] = {};
  933. auto* tcache = root_->thread_cache_for_testing();
  934. root_->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
  935. PurgeFlags::kDiscardUnusedSystemPages);
  936. root_->ResetBookkeepingForTesting();
  937. // The ThreadCache is allocated before we change buckets, so its size is
  938. // always based on the sparser distribution.
  939. size_t tc_bucket_index = root_->SizeToBucketIndex(sizeof(ThreadCache), false);
  940. auto* tc_bucket = &root_->buckets[tc_bucket_index];
  941. size_t expected_allocated_size =
  942. tc_bucket->slot_size; // For the ThreadCache itself.
  943. size_t expected_committed_size = kUseLazyCommit
  944. ? internal::SystemPageSize()
  945. : tc_bucket->get_bytes_per_span();
  946. EXPECT_EQ(expected_committed_size, root_->total_size_of_committed_pages);
  947. EXPECT_EQ(expected_committed_size, root_->max_size_of_committed_pages);
  948. EXPECT_EQ(expected_allocated_size,
  949. root_->get_total_size_of_allocated_bytes());
  950. EXPECT_EQ(expected_allocated_size, root_->get_max_size_of_allocated_bytes());
  951. void* ptr = root_->Alloc(kMediumSize, "");
  952. auto* medium_bucket = root_->buckets + SizeToIndex(kMediumSize);
  953. size_t medium_alloc_size = medium_bucket->slot_size;
  954. expected_allocated_size += medium_alloc_size;
  955. expected_committed_size += kUseLazyCommit
  956. ? internal::SystemPageSize()
  957. : medium_bucket->get_bytes_per_span();
  958. EXPECT_EQ(expected_committed_size, root_->total_size_of_committed_pages);
  959. EXPECT_EQ(expected_committed_size, root_->max_size_of_committed_pages);
  960. EXPECT_EQ(expected_allocated_size,
  961. root_->get_total_size_of_allocated_bytes());
  962. EXPECT_EQ(expected_allocated_size, root_->get_max_size_of_allocated_bytes());
  963. expected_allocated_size += kFillCountForMediumBucket * medium_alloc_size;
  964. // These allocations all come from the thread-cache.
  965. for (size_t i = 0; i < kFillCountForMediumBucket; i++) {
  966. arr[i] = root_->Alloc(kMediumSize, "");
  967. EXPECT_EQ(expected_committed_size, root_->total_size_of_committed_pages);
  968. EXPECT_EQ(expected_committed_size, root_->max_size_of_committed_pages);
  969. EXPECT_EQ(expected_allocated_size,
  970. root_->get_total_size_of_allocated_bytes());
  971. EXPECT_EQ(expected_allocated_size,
  972. root_->get_max_size_of_allocated_bytes());
  973. EXPECT_EQ((kFillCountForMediumBucket - 1 - i) * medium_alloc_size,
  974. tcache->CachedMemory());
  975. }
  976. EXPECT_EQ(0U, tcache->CachedMemory());
  977. root_->Free(ptr);
  978. for (auto*& el : arr) {
  979. root_->Free(el);
  980. }
  981. EXPECT_EQ(root_->get_total_size_of_allocated_bytes(),
  982. expected_allocated_size);
  983. tcache->Purge();
  984. EXPECT_EQ(root_->get_total_size_of_allocated_bytes(),
  985. GetBucketSizeForThreadCache());
  986. }
  987. TEST_P(PartitionAllocThreadCacheTest, TryPurgeNoAllocs) {
  988. auto* tcache = root_->thread_cache_for_testing();
  989. tcache->TryPurge();
  990. }
  991. TEST_P(PartitionAllocThreadCacheTest, TryPurgeMultipleCorrupted) {
  992. auto* tcache = root_->thread_cache_for_testing();
  993. void* ptr = root_->Alloc(kMediumSize, "");
  994. auto* medium_bucket = root_->buckets + SizeToIndex(kMediumSize);
  995. auto* curr = medium_bucket->active_slot_spans_head->get_freelist_head();
  996. curr = curr->GetNextForThreadCache<true>(kMediumSize);
  997. curr->CorruptNextForTesting(0x12345678);
  998. tcache->TryPurge();
  999. curr->SetNext(nullptr);
  1000. root_->Free(ptr);
  1001. }
  1002. TEST(AlternateBucketDistributionTest, SizeToIndex) {
  1003. using internal::BucketIndexLookup;
  1004. // The first 12 buckets are the same as the default bucket index.
  1005. for (size_t i = 1 << 0; i < 1 << 8; i <<= 1) {
  1006. for (size_t offset = 0; offset < 4; offset++) {
  1007. size_t n = i * (4 + offset) / 4;
  1008. EXPECT_EQ(BucketIndexLookup::GetIndex(n),
  1009. BucketIndexLookup::GetIndexForDenserBuckets(n));
  1010. }
  1011. }
  1012. // The alternate bucket distribution is different in the middle values.
  1013. //
  1014. // For each order, the top two buckets are removed compared with the default
  1015. // distribution. Values that would be allocated in those two buckets are
  1016. // instead allocated in the next power of two bucket.
  1017. //
  1018. // The first two buckets (each power of two and the next bucket up) remain
  1019. // the same between the two bucket distributions.
  1020. size_t expected_index = BucketIndexLookup::GetIndex(1 << 8);
  1021. for (size_t i = 1 << 8; i < internal::kHighThresholdForAlternateDistribution;
  1022. i <<= 1) {
  1023. // The first two buckets in the order should match up to the normal bucket
  1024. // distribution.
  1025. for (size_t offset = 0; offset < 2; offset++) {
  1026. size_t n = i * (4 + offset) / 4;
  1027. EXPECT_EQ(BucketIndexLookup::GetIndex(n),
  1028. BucketIndexLookup::GetIndexForDenserBuckets(n));
  1029. EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index++);
  1030. }
  1031. // The last two buckets in the order are "rounded up" to the same bucket
  1032. // as the next power of two.
  1033. expected_index += 2;
  1034. for (size_t offset = 2; offset < 4; offset++) {
  1035. size_t n = i * (4 + offset) / 4;
  1036. // These two are rounded up in the alternate distribution, so we expect
  1037. // the bucket index to be larger than the bucket index for the same
  1038. // allocation under the default distribution.
  1039. EXPECT_GT(BucketIndexLookup::GetIndex(n),
  1040. BucketIndexLookup::GetIndexForDenserBuckets(n));
  1041. // We expect both allocations in this loop to be rounded up to the next
  1042. // power of two bucket.
  1043. EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
  1044. }
  1045. }
  1046. // The rest of the buckets all match up exactly with the existing
  1047. // bucket distribution.
  1048. for (size_t i = internal::kHighThresholdForAlternateDistribution;
  1049. i < internal::kMaxBucketed; i <<= 1) {
  1050. for (size_t offset = 0; offset < 4; offset++) {
  1051. size_t n = i * (4 + offset) / 4;
  1052. EXPECT_EQ(BucketIndexLookup::GetIndex(n),
  1053. BucketIndexLookup::GetIndexForDenserBuckets(n));
  1054. }
  1055. }
  1056. }
  1057. // This test makes sure it's safe to switch to the alternate bucket distribution
  1058. // at runtime. This is intended to happen once, near the start of Chrome,
  1059. // once we have enabled features.
  1060. TEST(AlternateBucketDistributionTest, SwitchBeforeAlloc) {
  1061. auto* root = CreatePartitionRoot();
  1062. root->SwitchToDenserBucketDistribution();
  1063. constexpr size_t n = (1 << 12) * 3 / 2;
  1064. EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
  1065. internal::BucketIndexLookup::GetIndexForDenserBuckets(n));
  1066. void* ptr = root->Alloc(n, "");
  1067. root->ResetBucketDistributionForTesting();
  1068. root->Free(ptr);
  1069. // Clean up
  1070. ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
  1071. internal::SwapInProcessThreadCacheForTesting(root);
  1072. ThreadSafePartitionRoot::DeleteForTesting(root);
  1073. // Cleanup the global state so next test can recreate ThreadCache.
  1074. if (ThreadCache::IsTombstone(ThreadCache::Get()))
  1075. ThreadCache::RemoveTombstoneForTesting();
  1076. }
  1077. // This test makes sure it's safe to switch to the alternate bucket distribution
  1078. // at runtime. This is intended to happen once, near the start of Chrome,
  1079. // once we have enabled features.
  1080. TEST(AlternateBucketDistributionTest, SwitchAfterAlloc) {
  1081. auto* root = CreatePartitionRoot();
  1082. constexpr size_t n = (1 << 12) * 3 / 2;
  1083. EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
  1084. internal::BucketIndexLookup::GetIndexForDenserBuckets(n));
  1085. void* ptr = root->Alloc(n, "");
  1086. root->SwitchToDenserBucketDistribution();
  1087. void* ptr2 = root->Alloc(n, "");
  1088. root->Free(ptr2);
  1089. root->Free(ptr);
  1090. // Clean up
  1091. ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
  1092. internal::SwapInProcessThreadCacheForTesting(root);
  1093. ThreadSafePartitionRoot::DeleteForTesting(root);
  1094. // Cleanup the global state so next test can recreate ThreadCache.
  1095. if (ThreadCache::IsTombstone(ThreadCache::Get()))
  1096. ThreadCache::RemoveTombstoneForTesting();
  1097. }
  1098. } // namespace partition_alloc
  1099. #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) &&
  1100. // defined(PA_THREAD_CACHE_SUPPORTED)