1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339 |
- // Copyright 2020 The Chromium Authors. All rights reserved.
- // Use of this source code is governed by a BSD-style license that can be
- // found in the LICENSE file.
- #include "base/allocator/partition_allocator/thread_cache.h"
- #include <algorithm>
- #include <atomic>
- #include <vector>
- #include "base/allocator/partition_allocator/extended_api.h"
- #include "base/allocator/partition_allocator/partition_address_space.h"
- #include "base/allocator/partition_allocator/partition_alloc.h"
- #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
- #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_for_testing.h"
- #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
- #include "base/allocator/partition_allocator/partition_alloc_config.h"
- #include "base/allocator/partition_allocator/partition_lock.h"
- #include "base/allocator/partition_allocator/tagging.h"
- #include "build/build_config.h"
- #include "testing/gtest/include/gtest/gtest.h"
- // With *SAN, PartitionAlloc is replaced in partition_alloc.h by ASAN, so we
- // cannot test the thread cache.
- //
- // Finally, the thread cache is not supported on all platforms.
- #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && \
- defined(PA_THREAD_CACHE_SUPPORTED)
- namespace partition_alloc {
- namespace {
- constexpr size_t kSmallSize = 12;
- constexpr size_t kDefaultCountForSmallBucket =
- ThreadCache::kSmallBucketBaseCount * ThreadCache::kDefaultMultiplier;
- constexpr size_t kFillCountForSmallBucket =
- kDefaultCountForSmallBucket / ThreadCache::kBatchFillRatio;
- constexpr size_t kMediumSize = 200;
- constexpr size_t kDefaultCountForMediumBucket = kDefaultCountForSmallBucket / 2;
- constexpr size_t kFillCountForMediumBucket =
- kDefaultCountForMediumBucket / ThreadCache::kBatchFillRatio;
- static_assert(kMediumSize <= ThreadCache::kDefaultSizeThreshold, "");
- class DeltaCounter {
- public:
- explicit DeltaCounter(uint64_t& value)
- : current_value_(value), initial_value_(value) {}
- void Reset() { initial_value_ = current_value_; }
- uint64_t Delta() const { return current_value_ - initial_value_; }
- private:
- uint64_t& current_value_;
- uint64_t initial_value_;
- };
- // Forbid extras, since they make finding out which bucket is used harder.
- ThreadSafePartitionRoot* CreatePartitionRoot() {
- ThreadSafePartitionRoot* root = new ThreadSafePartitionRoot({
- PartitionOptions::AlignedAlloc::kAllowed,
- #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
- PartitionOptions::ThreadCache::kEnabled,
- #else
- PartitionOptions::ThreadCache::kDisabled,
- #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
- PartitionOptions::Quarantine::kAllowed,
- PartitionOptions::Cookie::kDisallowed,
- PartitionOptions::BackupRefPtr::kDisabled,
- PartitionOptions::BackupRefPtrZapping::kDisabled,
- PartitionOptions::UseConfigurablePool::kNo,
- });
- root->UncapEmptySlotSpanMemoryForTesting();
- // We do this here instead of in SetUp()/TearDown() because we need this to
- // run before the task environment (which creates threads and hence is racy
- // with attempting to disable the thread cache).
- internal::SwapOutProcessThreadCacheForTesting(root);
- return root;
- }
- } // namespace
- class PartitionAllocThreadCacheTest : public ::testing::TestWithParam<bool> {
- public:
- PartitionAllocThreadCacheTest() : root_(CreatePartitionRoot()) {}
- ~PartitionAllocThreadCacheTest() override {
- ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
- internal::SwapInProcessThreadCacheForTesting(root_);
- ThreadSafePartitionRoot::DeleteForTesting(root_);
- // Cleanup the global state so next test can recreate ThreadCache.
- if (ThreadCache::IsTombstone(ThreadCache::Get()))
- ThreadCache::RemoveTombstoneForTesting();
- }
- protected:
- void SetUp() override {
- if (GetParam())
- root_->SwitchToDenserBucketDistribution();
- else
- root_->ResetBucketDistributionForTesting();
- #if defined(PA_HAS_64_BITS_POINTERS)
- // Another test can uninitialize the pools, so make sure they are
- // initialized.
- internal::PartitionAddressSpace::Init();
- #endif // defined(PA_HAS_64_BITS_POINTERS)
- ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
- ThreadCache::kDefaultMultiplier);
- ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
- // Make sure that enough slot spans have been touched, otherwise cache fill
- // becomes unpredictable (because it doesn't take slow paths in the
- // allocator), which is an issue for tests.
- FillThreadCacheAndReturnIndex(kSmallSize, 1000);
- FillThreadCacheAndReturnIndex(kMediumSize, 1000);
- // There are allocations, a thread cache is created.
- auto* tcache = root_->thread_cache_for_testing();
- ASSERT_TRUE(tcache);
- ThreadCacheRegistry::Instance().ResetForTesting();
- tcache->ResetForTesting();
- }
- void TearDown() override {
- auto* tcache = root_->thread_cache_for_testing();
- ASSERT_TRUE(tcache);
- tcache->Purge();
- ASSERT_EQ(root_->get_total_size_of_allocated_bytes(),
- GetBucketSizeForThreadCache());
- }
- // Returns the size of the smallest bucket fitting an allocation of
- // |sizeof(ThreadCache)| bytes.
- size_t GetBucketSizeForThreadCache() {
- size_t tc_bucket_index =
- root_->SizeToBucketIndex(sizeof(ThreadCache), false);
- auto* tc_bucket = &root_->buckets[tc_bucket_index];
- return tc_bucket->slot_size;
- }
- static size_t SizeToIndex(size_t size) {
- return PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(size,
- GetParam());
- }
- size_t FillThreadCacheAndReturnIndex(size_t size, size_t count = 1) {
- uint16_t bucket_index = SizeToIndex(size);
- std::vector<void*> allocated_data;
- for (size_t i = 0; i < count; ++i) {
- allocated_data.push_back(root_->Alloc(size, ""));
- }
- for (void* ptr : allocated_data) {
- root_->Free(ptr);
- }
- return bucket_index;
- }
- void FillThreadCacheWithMemory(size_t target_cached_memory) {
- for (int batch : {1, 2, 4, 8, 16}) {
- for (size_t allocation_size = 1;
- allocation_size <= ThreadCache::kLargeSizeThreshold;
- allocation_size++) {
- FillThreadCacheAndReturnIndex(allocation_size, batch);
- if (ThreadCache::Get()->CachedMemory() >= target_cached_memory)
- return;
- }
- }
- ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
- }
- ThreadSafePartitionRoot* root_;
- };
- INSTANTIATE_TEST_SUITE_P(AlternateBucketDistribution,
- PartitionAllocThreadCacheTest,
- ::testing::Values(false, true));
- TEST_P(PartitionAllocThreadCacheTest, Simple) {
- // There is a cache.
- auto* tcache = root_->thread_cache_for_testing();
- EXPECT_TRUE(tcache);
- DeltaCounter batch_fill_counter{tcache->stats_.batch_fill_count};
- void* ptr = root_->Alloc(kSmallSize, "");
- ASSERT_TRUE(ptr);
- uint16_t index = SizeToIndex(kSmallSize);
- EXPECT_EQ(kFillCountForSmallBucket - 1,
- tcache->bucket_count_for_testing(index));
- root_->Free(ptr);
- // Freeing fills the thread cache.
- EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
- void* ptr2 = root_->Alloc(kSmallSize, "");
- // MTE-untag, because Free() changes tag.
- EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
- // Allocated from the thread cache.
- EXPECT_EQ(kFillCountForSmallBucket - 1,
- tcache->bucket_count_for_testing(index));
- EXPECT_EQ(1u, batch_fill_counter.Delta());
- root_->Free(ptr2);
- }
- TEST_P(PartitionAllocThreadCacheTest, InexactSizeMatch) {
- void* ptr = root_->Alloc(kSmallSize, "");
- ASSERT_TRUE(ptr);
- // There is a cache.
- auto* tcache = root_->thread_cache_for_testing();
- EXPECT_TRUE(tcache);
- uint16_t index = SizeToIndex(kSmallSize);
- EXPECT_EQ(kFillCountForSmallBucket - 1,
- tcache->bucket_count_for_testing(index));
- root_->Free(ptr);
- // Freeing fills the thread cache.
- EXPECT_EQ(kFillCountForSmallBucket, tcache->bucket_count_for_testing(index));
- void* ptr2 = root_->Alloc(kSmallSize + 1, "");
- // MTE-untag, because Free() changes tag.
- EXPECT_EQ(UntagPtr(ptr), UntagPtr(ptr2));
- // Allocated from the thread cache.
- EXPECT_EQ(kFillCountForSmallBucket - 1,
- tcache->bucket_count_for_testing(index));
- root_->Free(ptr2);
- }
- TEST_P(PartitionAllocThreadCacheTest, MultipleObjectsCachedPerBucket) {
- auto* tcache = root_->thread_cache_for_testing();
- DeltaCounter batch_fill_counter{tcache->stats_.batch_fill_count};
- size_t bucket_index =
- FillThreadCacheAndReturnIndex(kMediumSize, kFillCountForMediumBucket + 2);
- EXPECT_EQ(2 * kFillCountForMediumBucket,
- tcache->bucket_count_for_testing(bucket_index));
- // 2 batches, since there were more than |kFillCountForMediumBucket|
- // allocations.
- EXPECT_EQ(2u, batch_fill_counter.Delta());
- }
- TEST_P(PartitionAllocThreadCacheTest, ObjectsCachedCountIsLimited) {
- size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, 1000);
- auto* tcache = root_->thread_cache_for_testing();
- EXPECT_LT(tcache->bucket_count_for_testing(bucket_index), 1000u);
- }
- TEST_P(PartitionAllocThreadCacheTest, Purge) {
- size_t allocations = 10;
- size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize, allocations);
- auto* tcache = root_->thread_cache_for_testing();
- EXPECT_EQ(
- (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket,
- tcache->bucket_count_for_testing(bucket_index));
- tcache->Purge();
- EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
- }
- TEST_P(PartitionAllocThreadCacheTest, NoCrossPartitionCache) {
- ThreadSafePartitionRoot root({
- PartitionOptions::AlignedAlloc::kAllowed,
- PartitionOptions::ThreadCache::kDisabled,
- PartitionOptions::Quarantine::kAllowed,
- PartitionOptions::Cookie::kDisallowed,
- PartitionOptions::BackupRefPtr::kDisabled,
- PartitionOptions::BackupRefPtrZapping::kDisabled,
- PartitionOptions::UseConfigurablePool::kNo,
- });
- size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
- void* ptr = root.Alloc(kSmallSize, "");
- ASSERT_TRUE(ptr);
- auto* tcache = root_->thread_cache_for_testing();
- EXPECT_EQ(kFillCountForSmallBucket,
- tcache->bucket_count_for_testing(bucket_index));
- ThreadSafePartitionRoot::Free(ptr);
- EXPECT_EQ(kFillCountForSmallBucket,
- tcache->bucket_count_for_testing(bucket_index));
- }
- #if defined(PA_ENABLE_THREAD_CACHE_STATISTICS) // Required to record hits and
- // misses.
- TEST_P(PartitionAllocThreadCacheTest, LargeAllocationsAreNotCached) {
- auto* tcache = root_->thread_cache_for_testing();
- DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
- DeltaCounter alloc_miss_too_large_counter{
- tcache->stats_.alloc_miss_too_large};
- DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
- DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
- FillThreadCacheAndReturnIndex(100 * 1024);
- tcache = root_->thread_cache_for_testing();
- EXPECT_EQ(1u, alloc_miss_counter.Delta());
- EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
- EXPECT_EQ(1u, cache_fill_counter.Delta());
- EXPECT_EQ(1u, cache_fill_misses_counter.Delta());
- }
- #endif // defined(PA_ENABLE_THREAD_CACHE_STATISTICS)
- TEST_P(PartitionAllocThreadCacheTest, DirectMappedAllocationsAreNotCached) {
- FillThreadCacheAndReturnIndex(1024 * 1024);
- // The line above would crash due to out of bounds access if this wasn't
- // properly handled.
- }
- // This tests that Realloc properly handles bookkeeping, specifically the path
- // that reallocates in place.
- TEST_P(PartitionAllocThreadCacheTest, DirectMappedReallocMetrics) {
- root_->ResetBookkeepingForTesting();
- size_t expected_allocated_size = root_->get_total_size_of_allocated_bytes();
- EXPECT_EQ(expected_allocated_size,
- root_->get_total_size_of_allocated_bytes());
- EXPECT_EQ(expected_allocated_size, root_->get_max_size_of_allocated_bytes());
- void* ptr = root_->Alloc(10 * internal::kMaxBucketed, "");
- EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
- root_->get_total_size_of_allocated_bytes());
- void* ptr2 = root_->Realloc(ptr, 9 * internal::kMaxBucketed, "");
- ASSERT_EQ(ptr, ptr2);
- EXPECT_EQ(expected_allocated_size + 9 * internal::kMaxBucketed,
- root_->get_total_size_of_allocated_bytes());
- ptr2 = root_->Realloc(ptr, 10 * internal::kMaxBucketed, "");
- ASSERT_EQ(ptr, ptr2);
- EXPECT_EQ(expected_allocated_size + 10 * internal::kMaxBucketed,
- root_->get_total_size_of_allocated_bytes());
- root_->Free(ptr);
- }
- namespace {
- size_t FillThreadCacheAndReturnIndex(ThreadSafePartitionRoot* root,
- size_t size,
- bool with_denser_bucket_distribution,
- size_t count = 1) {
- uint16_t bucket_index =
- PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
- size, with_denser_bucket_distribution);
- std::vector<void*> allocated_data;
- for (size_t i = 0; i < count; ++i) {
- allocated_data.push_back(root->Alloc(size, ""));
- }
- for (void* ptr : allocated_data) {
- root->Free(ptr);
- }
- return bucket_index;
- }
- // TODO(1151236): To remove callback from partition allocator's DEPS,
- // rewrite the tests without BindLambdaForTesting and RepeatingClosure.
- // However this makes a little annoying to add more tests using their
- // own threads. Need to support an easier way to implement tests using
- // PlatformThreadForTesting::Create().
- class ThreadDelegateForMultipleThreadCaches
- : public internal::base::PlatformThreadForTesting::Delegate {
- public:
- ThreadDelegateForMultipleThreadCaches(ThreadCache* parent_thread_cache,
- ThreadSafePartitionRoot* root,
- bool with_denser_bucket_distribution)
- : parent_thread_tcache_(parent_thread_cache),
- root_(root),
- with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
- void ThreadMain() override {
- EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
- FillThreadCacheAndReturnIndex(root_, kMediumSize,
- with_denser_bucket_distribution_);
- auto* tcache = root_->thread_cache_for_testing();
- EXPECT_TRUE(tcache);
- EXPECT_NE(parent_thread_tcache_, tcache);
- }
- private:
- ThreadCache* parent_thread_tcache_ = nullptr;
- ThreadSafePartitionRoot* root_ = nullptr;
- bool with_denser_bucket_distribution_;
- };
- } // namespace
- TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCaches) {
- FillThreadCacheAndReturnIndex(kMediumSize);
- auto* parent_thread_tcache = root_->thread_cache_for_testing();
- ASSERT_TRUE(parent_thread_tcache);
- ThreadDelegateForMultipleThreadCaches delegate(parent_thread_tcache, root_,
- GetParam());
- internal::base::PlatformThreadHandle thread_handle;
- internal::base::PlatformThreadForTesting::Create(0, &delegate,
- &thread_handle);
- internal::base::PlatformThreadForTesting::Join(thread_handle);
- }
- namespace {
- class ThreadDelegateForThreadCacheReclaimedWhenThreadExits
- : public internal::base::PlatformThreadForTesting::Delegate {
- public:
- ThreadDelegateForThreadCacheReclaimedWhenThreadExits(
- ThreadSafePartitionRoot* root,
- void*& other_thread_ptr)
- : root_(root), other_thread_ptr_(other_thread_ptr) {}
- void ThreadMain() override {
- EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
- other_thread_ptr_ = root_->Alloc(kMediumSize, "");
- root_->Free(other_thread_ptr_);
- // |other_thread_ptr| is now in the thread cache.
- }
- private:
- ThreadSafePartitionRoot* root_ = nullptr;
- void*& other_thread_ptr_;
- };
- } // namespace
- TEST_P(PartitionAllocThreadCacheTest, ThreadCacheReclaimedWhenThreadExits) {
- // Make sure that there is always at least one object allocated in the test
- // bucket, so that the PartitionPage is no reclaimed.
- //
- // Allocate enough objects to force a cache fill at the next allocation.
- std::vector<void*> tmp;
- for (size_t i = 0; i < kDefaultCountForMediumBucket / 4; i++) {
- tmp.push_back(root_->Alloc(kMediumSize, ""));
- }
- void* other_thread_ptr = nullptr;
- ThreadDelegateForThreadCacheReclaimedWhenThreadExits delegate(
- root_, other_thread_ptr);
- internal::base::PlatformThreadHandle thread_handle;
- internal::base::PlatformThreadForTesting::Create(0, &delegate,
- &thread_handle);
- internal::base::PlatformThreadForTesting::Join(thread_handle);
- void* this_thread_ptr = root_->Alloc(kMediumSize, "");
- // |other_thread_ptr| was returned to the central allocator, and is returned
- // here, as it comes from the freelist.
- EXPECT_EQ(UntagPtr(this_thread_ptr), UntagPtr(other_thread_ptr));
- root_->Free(other_thread_ptr);
- for (void* ptr : tmp)
- root_->Free(ptr);
- }
- namespace {
- class ThreadDelegateForThreadCacheRegistry
- : public internal::base::PlatformThreadForTesting::Delegate {
- public:
- ThreadDelegateForThreadCacheRegistry(ThreadCache* parent_thread_cache,
- ThreadSafePartitionRoot* root,
- bool with_denser_bucket_distribution)
- : parent_thread_tcache_(parent_thread_cache),
- root_(root),
- with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
- void ThreadMain() override {
- EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
- FillThreadCacheAndReturnIndex(root_, kSmallSize,
- with_denser_bucket_distribution_);
- auto* tcache = root_->thread_cache_for_testing();
- EXPECT_TRUE(tcache);
- internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
- EXPECT_EQ(tcache->prev_for_testing(), nullptr);
- EXPECT_EQ(tcache->next_for_testing(), parent_thread_tcache_);
- }
- private:
- ThreadCache* parent_thread_tcache_ = nullptr;
- ThreadSafePartitionRoot* root_ = nullptr;
- bool with_denser_bucket_distribution_;
- };
- } // namespace
- TEST_P(PartitionAllocThreadCacheTest, ThreadCacheRegistry) {
- auto* parent_thread_tcache = root_->thread_cache_for_testing();
- ASSERT_TRUE(parent_thread_tcache);
- {
- internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
- EXPECT_EQ(parent_thread_tcache->prev_, nullptr);
- EXPECT_EQ(parent_thread_tcache->next_, nullptr);
- }
- ThreadDelegateForThreadCacheRegistry delegate(parent_thread_tcache, root_,
- GetParam());
- internal::base::PlatformThreadHandle thread_handle;
- internal::base::PlatformThreadForTesting::Create(0, &delegate,
- &thread_handle);
- internal::base::PlatformThreadForTesting::Join(thread_handle);
- internal::ScopedGuard lock(ThreadCacheRegistry::GetLock());
- EXPECT_EQ(parent_thread_tcache->prev_, nullptr);
- EXPECT_EQ(parent_thread_tcache->next_, nullptr);
- }
- #if defined(PA_ENABLE_THREAD_CACHE_STATISTICS)
- TEST_P(PartitionAllocThreadCacheTest, RecordStats) {
- auto* tcache = root_->thread_cache_for_testing();
- DeltaCounter alloc_counter{tcache->stats_.alloc_count};
- DeltaCounter alloc_hits_counter{tcache->stats_.alloc_hits};
- DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
- DeltaCounter alloc_miss_empty_counter{tcache->stats_.alloc_miss_empty};
- DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
- DeltaCounter cache_fill_hits_counter{tcache->stats_.cache_fill_hits};
- DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
- // Cache has been purged, first allocation is a miss.
- void* data = root_->Alloc(kMediumSize, "");
- EXPECT_EQ(1u, alloc_counter.Delta());
- EXPECT_EQ(1u, alloc_miss_counter.Delta());
- EXPECT_EQ(0u, alloc_hits_counter.Delta());
- // Cache fill worked.
- root_->Free(data);
- EXPECT_EQ(1u, cache_fill_counter.Delta());
- EXPECT_EQ(1u, cache_fill_hits_counter.Delta());
- EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
- tcache->Purge();
- cache_fill_counter.Reset();
- // Buckets are never full, fill always succeeds.
- size_t allocations = 10;
- size_t bucket_index = FillThreadCacheAndReturnIndex(
- kMediumSize, kDefaultCountForMediumBucket + allocations);
- EXPECT_EQ(kDefaultCountForMediumBucket + allocations,
- cache_fill_counter.Delta());
- EXPECT_EQ(0u, cache_fill_misses_counter.Delta());
- // Memory footprint.
- ThreadCacheStats stats;
- ThreadCacheRegistry::Instance().DumpStats(true, &stats);
- // Bucket was cleared (set to kDefaultCountForMediumBucket / 2) after going
- // above the limit (-1), then refilled by batches (1 + floor(allocations /
- // kFillCountForSmallBucket) times).
- size_t expected_count =
- kDefaultCountForMediumBucket / 2 - 1 +
- (1 + allocations / kFillCountForMediumBucket) * kFillCountForMediumBucket;
- EXPECT_EQ(root_->buckets[bucket_index].slot_size * expected_count,
- stats.bucket_total_memory);
- EXPECT_EQ(sizeof(ThreadCache), stats.metadata_overhead);
- }
- namespace {
- class ThreadDelegateForMultipleThreadCachesAccounting
- : public internal::base::PlatformThreadForTesting::Delegate {
- public:
- ThreadDelegateForMultipleThreadCachesAccounting(
- ThreadSafePartitionRoot* root,
- int alloc_ount,
- bool with_denser_bucket_distribution)
- : root_(root),
- alloc_count_(alloc_count),
- with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
- void ThreadMain() override {
- EXPECT_FALSE(root_->thread_cache_for_testing()); // No allocations yet.
- size_t bucket_index = FillThreadCacheAndReturnIndex(
- root_, kMediumSize, with_denser_bucket_distribution_);
- ThreadCacheStats stats;
- ThreadCacheRegistry::Instance().DumpStats(false, &stats);
- // 2* for this thread and the parent one.
- EXPECT_EQ(
- 2 * root_->buckets[bucket_index].slot_size * kFillCountForMediumBucket,
- stats.bucket_total_memory);
- EXPECT_EQ(2 * sizeof(ThreadCache), stats.metadata_overhead);
- uint64_t this_thread_alloc_count =
- root_->thread_cache_for_testing()->stats_.alloc_count;
- EXPECT_EQ(alloc_count_ + this_thread_alloc_count, stats.alloc_count);
- }
- private:
- private:
- ThreadSafePartitionRoot* root_ = nullptr;
- bool with_denser_bucket_distribution_;
- const int alloc_count_;
- };
- } // namespace
- TEST_P(PartitionAllocThreadCacheTest, MultipleThreadCachesAccounting) {
- FillThreadCacheAndReturnIndex(kMediumSize);
- uint64_t alloc_count = root_->thread_cache_for_testing()->stats_.alloc_count;
- ThreadDelegateForMultipleThreadCachesAccounting delegate(root_, alloc_count,
- GetParam());
- internal::base::PlatformThreadHandle thread_handle;
- internal::base::PlatformThreadForTesting::Create(0, &delegate,
- &thread_handle);
- internal::base::PlatformThreadForTesting::Join(thread_handle);
- }
- #endif // defined(PA_ENABLE_THREAD_CACHE_STATISTICS)
- // TODO(https://crbug.com/1287799): Flaky on IOS.
- #if BUILDFLAG(IS_IOS)
- #define MAYBE_PurgeAll DISABLED_PurgeAll
- #else
- #define MAYBE_PurgeAll PurgeAll
- #endif
- namespace {
- class ThreadDelegateForPurgeAll
- : public internal::base::PlatformThreadForTesting::Delegate {
- public:
- ThreadDelegateForPurgeAll(ThreadSafePartitionRoot* root,
- ThreadCache*& other_thread_tcache,
- std::atomic<bool>& other_thread_started,
- std::atomic<bool>& purge_called,
- int bucket_index,
- bool with_denser_bucket_distribution)
- : root_(root),
- other_thread_tcache_(other_thread_tcache),
- other_thread_started_(other_thread_started),
- purge_called_(purge_called),
- bucket_index_(bucket_index),
- with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
- void ThreadMain() override PA_NO_THREAD_SAFETY_ANALYSIS {
- FillThreadCacheAndReturnIndex(root_, kSmallSize,
- with_denser_bucket_distribution_);
- other_thread_tcache_ = root_->thread_cache_for_testing();
- other_thread_started_.store(true, std::memory_order_release);
- while (!purge_called_.load(std::memory_order_acquire)) {
- }
- // Purge() was not triggered from the other thread.
- EXPECT_EQ(kFillCountForSmallBucket,
- other_thread_tcache_->bucket_count_for_testing(bucket_index_));
- // Allocations do not trigger Purge().
- void* data = root_->Alloc(kSmallSize, "");
- EXPECT_EQ(kFillCountForSmallBucket - 1,
- other_thread_tcache_->bucket_count_for_testing(bucket_index_));
- // But deallocations do.
- root_->Free(data);
- EXPECT_EQ(0u,
- other_thread_tcache_->bucket_count_for_testing(bucket_index_));
- }
- private:
- ThreadSafePartitionRoot* root_ = nullptr;
- ThreadCache*& other_thread_tcache_;
- std::atomic<bool>& other_thread_started_;
- std::atomic<bool>& purge_called_;
- const int bucket_index_;
- bool with_denser_bucket_distribution_;
- };
- } // namespace
- TEST_P(PartitionAllocThreadCacheTest, MAYBE_PurgeAll)
- PA_NO_THREAD_SAFETY_ANALYSIS {
- std::atomic<bool> other_thread_started{false};
- std::atomic<bool> purge_called{false};
- size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
- ThreadCache* this_thread_tcache = root_->thread_cache_for_testing();
- ThreadCache* other_thread_tcache = nullptr;
- ThreadDelegateForPurgeAll delegate(root_, other_thread_tcache,
- other_thread_started, purge_called,
- bucket_index, GetParam());
- internal::base::PlatformThreadHandle thread_handle;
- internal::base::PlatformThreadForTesting::Create(0, &delegate,
- &thread_handle);
- while (!other_thread_started.load(std::memory_order_acquire)) {
- }
- EXPECT_EQ(kFillCountForSmallBucket,
- this_thread_tcache->bucket_count_for_testing(bucket_index));
- EXPECT_EQ(kFillCountForSmallBucket,
- other_thread_tcache->bucket_count_for_testing(bucket_index));
- ThreadCacheRegistry::Instance().PurgeAll();
- // This thread is synchronously purged.
- EXPECT_EQ(0u, this_thread_tcache->bucket_count_for_testing(bucket_index));
- // Not the other one.
- EXPECT_EQ(kFillCountForSmallBucket,
- other_thread_tcache->bucket_count_for_testing(bucket_index));
- purge_called.store(true, std::memory_order_release);
- internal::base::PlatformThreadForTesting::Join(thread_handle);
- }
- TEST_P(PartitionAllocThreadCacheTest, PeriodicPurge) {
- auto& registry = ThreadCacheRegistry::Instance();
- auto NextInterval = [®istry]() {
- return internal::base::Microseconds(
- registry.GetPeriodicPurgeNextIntervalInMicroseconds());
- };
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
- // Small amount of memory, the period gets longer.
- auto* tcache = ThreadCache::Get();
- ASSERT_LT(tcache->CachedMemory(),
- ThreadCacheRegistry::kMinCachedMemoryForPurging);
- registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), 2 * ThreadCacheRegistry::kDefaultPurgeInterval);
- registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), 4 * ThreadCacheRegistry::kDefaultPurgeInterval);
- // Check that the purge interval is clamped at the maximum value.
- while (NextInterval() < ThreadCacheRegistry::kMaxPurgeInterval) {
- registry.RunPeriodicPurge();
- }
- registry.RunPeriodicPurge();
- // Not enough memory to decrease the interval.
- FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging +
- 1);
- registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
- FillThreadCacheWithMemory(
- 2 * ThreadCacheRegistry::kMinCachedMemoryForPurging + 1);
- registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval / 2);
- // Enough memory, interval doesn't change.
- FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging);
- registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval / 2);
- // No cached memory, increase the interval.
- registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
- // Cannot test the very large size with only one thread, this is tested below
- // in the multiple threads test.
- }
- namespace {
- void FillThreadCacheWithMemory(ThreadSafePartitionRoot* root,
- size_t target_cached_memory,
- bool with_denser_bucket_distribution) {
- for (int batch : {1, 2, 4, 8, 16}) {
- for (size_t allocation_size = 1;
- allocation_size <= ThreadCache::kLargeSizeThreshold;
- allocation_size++) {
- FillThreadCacheAndReturnIndex(root, allocation_size, batch,
- with_denser_bucket_distribution);
- if (ThreadCache::Get()->CachedMemory() >= target_cached_memory)
- return;
- }
- }
- ASSERT_GE(ThreadCache::Get()->CachedMemory(), target_cached_memory);
- }
- class ThreadDelegateForPeriodicPurgeSumsOverAllThreads
- : public internal::base::PlatformThreadForTesting::Delegate {
- public:
- ThreadDelegateForPeriodicPurgeSumsOverAllThreads(
- ThreadSafePartitionRoot* root,
- std::atomic<int>& allocations_done,
- std::atomic<bool>& can_finish,
- bool with_denser_bucket_distribution)
- : root_(root),
- allocations_done_(allocations_done),
- can_finish_(can_finish),
- with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
- void ThreadMain() override {
- FillThreadCacheWithMemory(
- root_, 5 * ThreadCacheRegistry::kMinCachedMemoryForPurging,
- with_denser_bucket_distribution_);
- allocations_done_.fetch_add(1, std::memory_order_release);
- // This thread needs to be alive when the next periodic purge task runs.
- while (!can_finish_.load(std::memory_order_acquire)) {
- }
- }
- private:
- ThreadSafePartitionRoot* root_ = nullptr;
- std::atomic<int>& allocations_done_;
- std::atomic<bool>& can_finish_;
- bool with_denser_bucket_distribution_;
- };
- } // namespace
- // Disabled due to flakiness: crbug.com/1220371
- TEST_P(PartitionAllocThreadCacheTest,
- DISABLED_PeriodicPurgeSumsOverAllThreads) {
- auto& registry = ThreadCacheRegistry::Instance();
- auto NextInterval = [®istry]() {
- return internal::base::Microseconds(
- registry.GetPeriodicPurgeNextIntervalInMicroseconds());
- };
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
- // Small amount of memory, the period gets longer.
- auto* tcache = ThreadCache::Get();
- ASSERT_LT(tcache->CachedMemory(),
- ThreadCacheRegistry::kMinCachedMemoryForPurging);
- registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), 2 * ThreadCacheRegistry::kDefaultPurgeInterval);
- registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), 4 * ThreadCacheRegistry::kDefaultPurgeInterval);
- // Check that the purge interval is clamped at the maximum value.
- while (NextInterval() < ThreadCacheRegistry::kMaxPurgeInterval) {
- registry.RunPeriodicPurge();
- }
- registry.RunPeriodicPurge();
- // Not enough memory on this thread to decrease the interval.
- FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging /
- 2);
- registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
- std::atomic<int> allocations_done{0};
- std::atomic<bool> can_finish{false};
- ThreadDelegateForPeriodicPurgeSumsOverAllThreads delegate(
- root_, allocations_done, can_finish, GetParam());
- internal::base::PlatformThreadHandle thread_handle;
- internal::base::PlatformThreadForTesting::Create(0, &delegate,
- &thread_handle);
- internal::base::PlatformThreadHandle thread_handle_2;
- internal::base::PlatformThreadForTesting::Create(0, &delegate,
- &thread_handle_2);
- while (allocations_done.load(std::memory_order_acquire) != 2) {
- internal::base::PlatformThreadForTesting::YieldCurrentThread();
- }
- // Many allocations on the other thread.
- registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
- can_finish.store(true, std::memory_order_release);
- internal::base::PlatformThreadForTesting::Join(thread_handle);
- internal::base::PlatformThreadForTesting::Join(thread_handle_2);
- }
- // TODO(https://crbug.com/1287799): Flaky on IOS.
- #if BUILDFLAG(IS_IOS)
- #define MAYBE_DynamicCountPerBucket DISABLED_DynamicCountPerBucket
- #else
- #define MAYBE_DynamicCountPerBucket DynamicCountPerBucket
- #endif
- TEST_P(PartitionAllocThreadCacheTest, MAYBE_DynamicCountPerBucket) {
- auto* tcache = root_->thread_cache_for_testing();
- size_t bucket_index =
- FillThreadCacheAndReturnIndex(kMediumSize, kDefaultCountForMediumBucket);
- EXPECT_EQ(kDefaultCountForMediumBucket, tcache->buckets_[bucket_index].count);
- ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
- ThreadCache::kDefaultMultiplier / 2);
- // No immediate batch deallocation.
- EXPECT_EQ(kDefaultCountForMediumBucket, tcache->buckets_[bucket_index].count);
- void* data = root_->Alloc(kMediumSize, "");
- // Not triggered by allocations.
- EXPECT_EQ(kDefaultCountForMediumBucket - 1,
- tcache->buckets_[bucket_index].count);
- // Free() triggers the purge within limits.
- root_->Free(data);
- EXPECT_LE(tcache->buckets_[bucket_index].count,
- kDefaultCountForMediumBucket / 2);
- // Won't go above anymore.
- FillThreadCacheAndReturnIndex(kMediumSize, 1000);
- EXPECT_LE(tcache->buckets_[bucket_index].count,
- kDefaultCountForMediumBucket / 2);
- // Limit can be raised.
- ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
- ThreadCache::kDefaultMultiplier * 2);
- FillThreadCacheAndReturnIndex(kMediumSize, 1000);
- EXPECT_GT(tcache->buckets_[bucket_index].count,
- kDefaultCountForMediumBucket / 2);
- }
- TEST_P(PartitionAllocThreadCacheTest, DynamicCountPerBucketClamping) {
- auto* tcache = root_->thread_cache_for_testing();
- ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
- ThreadCache::kDefaultMultiplier / 1000.);
- for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
- // Invalid bucket.
- if (!tcache->buckets_[i].limit.load(std::memory_order_relaxed)) {
- EXPECT_EQ(root_->buckets[i].active_slot_spans_head, nullptr);
- continue;
- }
- EXPECT_GE(tcache->buckets_[i].limit.load(std::memory_order_relaxed), 1u);
- }
- ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
- ThreadCache::kDefaultMultiplier * 1000.);
- for (size_t i = 0; i < ThreadCache::kBucketCount; i++) {
- // Invalid bucket.
- if (!tcache->buckets_[i].limit.load(std::memory_order_relaxed)) {
- EXPECT_EQ(root_->buckets[i].active_slot_spans_head, nullptr);
- continue;
- }
- EXPECT_LT(tcache->buckets_[i].limit.load(std::memory_order_relaxed), 0xff);
- }
- }
- // TODO(https://crbug.com/1287799): Flaky on IOS.
- #if BUILDFLAG(IS_IOS)
- #define MAYBE_DynamicCountPerBucketMultipleThreads \
- DISABLED_DynamicCountPerBucketMultipleThreads
- #else
- #define MAYBE_DynamicCountPerBucketMultipleThreads \
- DynamicCountPerBucketMultipleThreads
- #endif
- namespace {
- class ThreadDelegateForDynamicCountPerBucketMultipleThreads
- : public internal::base::PlatformThreadForTesting::Delegate {
- public:
- ThreadDelegateForDynamicCountPerBucketMultipleThreads(
- ThreadSafePartitionRoot* root,
- std::atomic<bool>& other_thread_started,
- std::atomic<bool>& threshold_changed,
- int bucket_index,
- bool with_denser_bucket_distribution)
- : root_(root),
- other_thread_started_(other_thread_started),
- threshold_changed_(threshold_changed),
- bucket_index_(bucket_index),
- with_denser_bucket_distribution_(with_denser_bucket_distribution) {}
- void ThreadMain() override {
- FillThreadCacheAndReturnIndex(root_, kSmallSize,
- with_denser_bucket_distribution_,
- kDefaultCountForSmallBucket + 10);
- auto* this_thread_tcache = root_->thread_cache_for_testing();
- // More than the default since the multiplier has changed.
- EXPECT_GT(this_thread_tcache->bucket_count_for_testing(bucket_index_),
- kDefaultCountForSmallBucket + 10);
- other_thread_started_.store(true, std::memory_order_release);
- while (!threshold_changed_.load(std::memory_order_acquire)) {
- }
- void* data = root_->Alloc(kSmallSize, "");
- // Deallocations trigger limit enforcement.
- root_->Free(data);
- // Since the bucket is too full, it gets halved by batched deallocation.
- EXPECT_EQ(static_cast<uint8_t>(ThreadCache::kSmallBucketBaseCount / 2),
- this_thread_tcache->bucket_count_for_testing(bucket_index_));
- }
- private:
- ThreadSafePartitionRoot* root_ = nullptr;
- std::atomic<bool>& other_thread_started_;
- std::atomic<bool>& threshold_changed_;
- const int bucket_index_;
- bool with_denser_bucket_distribution_;
- };
- } // namespace
- TEST_P(PartitionAllocThreadCacheTest,
- MAYBE_DynamicCountPerBucketMultipleThreads) {
- std::atomic<bool> other_thread_started{false};
- std::atomic<bool> threshold_changed{false};
- auto* tcache = root_->thread_cache_for_testing();
- size_t bucket_index =
- FillThreadCacheAndReturnIndex(kSmallSize, kDefaultCountForSmallBucket);
- EXPECT_EQ(kDefaultCountForSmallBucket, tcache->buckets_[bucket_index].count);
- // Change the ratio before starting the threads, checking that it will applied
- // to newly-created threads.
- ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
- ThreadCache::kDefaultMultiplier + 1);
- ThreadDelegateForDynamicCountPerBucketMultipleThreads delegate(
- root_, other_thread_started, threshold_changed, bucket_index, GetParam());
- internal::base::PlatformThreadHandle thread_handle;
- internal::base::PlatformThreadForTesting::Create(0, &delegate,
- &thread_handle);
- while (!other_thread_started.load(std::memory_order_acquire)) {
- }
- ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(1.);
- threshold_changed.store(true, std::memory_order_release);
- internal::base::PlatformThreadForTesting::Join(thread_handle);
- }
- TEST_P(PartitionAllocThreadCacheTest, DynamicSizeThreshold) {
- auto* tcache = root_->thread_cache_for_testing();
- DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
- DeltaCounter alloc_miss_too_large_counter{
- tcache->stats_.alloc_miss_too_large};
- DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
- DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
- // Default threshold at first.
- ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
- FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold);
- EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
- EXPECT_EQ(1u, cache_fill_counter.Delta());
- // Too large to be cached.
- FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
- EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
- // Increase.
- ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
- FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
- // No new miss.
- EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
- // Lower.
- ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
- FillThreadCacheAndReturnIndex(ThreadCache::kDefaultSizeThreshold + 1);
- EXPECT_EQ(2u, alloc_miss_too_large_counter.Delta());
- // Value is clamped.
- size_t too_large = 1024 * 1024;
- ThreadCache::SetLargestCachedSize(too_large);
- FillThreadCacheAndReturnIndex(too_large);
- EXPECT_EQ(3u, alloc_miss_too_large_counter.Delta());
- }
- // Disabled due to flakiness: crbug.com/1287811
- TEST_P(PartitionAllocThreadCacheTest, DISABLED_DynamicSizeThresholdPurge) {
- auto* tcache = root_->thread_cache_for_testing();
- DeltaCounter alloc_miss_counter{tcache->stats_.alloc_misses};
- DeltaCounter alloc_miss_too_large_counter{
- tcache->stats_.alloc_miss_too_large};
- DeltaCounter cache_fill_counter{tcache->stats_.cache_fill_count};
- DeltaCounter cache_fill_misses_counter{tcache->stats_.cache_fill_misses};
- // Cache large allocations.
- size_t large_allocation_size = ThreadCache::kLargeSizeThreshold;
- ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
- size_t index = FillThreadCacheAndReturnIndex(large_allocation_size);
- EXPECT_EQ(0u, alloc_miss_too_large_counter.Delta());
- // Lower.
- ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
- FillThreadCacheAndReturnIndex(large_allocation_size);
- EXPECT_EQ(1u, alloc_miss_too_large_counter.Delta());
- // There is memory trapped in the cache bucket.
- EXPECT_GT(tcache->buckets_[index].count, 0u);
- // Which is reclaimed by Purge().
- tcache->Purge();
- EXPECT_EQ(0u, tcache->buckets_[index].count);
- }
- TEST_P(PartitionAllocThreadCacheTest, ClearFromTail) {
- auto count_items = [](ThreadCache* tcache, size_t index) {
- uint8_t count = 0;
- auto* head = tcache->buckets_[index].freelist_head;
- while (head) {
- head = head->GetNext(tcache->buckets_[index].slot_size);
- count++;
- }
- return count;
- };
- auto* tcache = root_->thread_cache_for_testing();
- size_t index = FillThreadCacheAndReturnIndex(kSmallSize, 10);
- ASSERT_GE(count_items(tcache, index), 10);
- void* head = tcache->buckets_[index].freelist_head;
- for (size_t limit : {8, 3, 1}) {
- tcache->ClearBucket(tcache->buckets_[index], limit);
- EXPECT_EQ(head, static_cast<void*>(tcache->buckets_[index].freelist_head));
- EXPECT_EQ(count_items(tcache, index), limit);
- }
- tcache->ClearBucket(tcache->buckets_[index], 0);
- EXPECT_EQ(nullptr, static_cast<void*>(tcache->buckets_[index].freelist_head));
- }
- // TODO(https://crbug.com/1287799): Flaky on IOS.
- #if BUILDFLAG(IS_IOS)
- #define MAYBE_Bookkeeping DISABLED_Bookkeeping
- #else
- #define MAYBE_Bookkeeping Bookkeeping
- #endif
- TEST_P(PartitionAllocThreadCacheTest, MAYBE_Bookkeeping) {
- void* arr[kFillCountForMediumBucket] = {};
- auto* tcache = root_->thread_cache_for_testing();
- root_->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans |
- PurgeFlags::kDiscardUnusedSystemPages);
- root_->ResetBookkeepingForTesting();
- // The ThreadCache is allocated before we change buckets, so its size is
- // always based on the sparser distribution.
- size_t tc_bucket_index = root_->SizeToBucketIndex(sizeof(ThreadCache), false);
- auto* tc_bucket = &root_->buckets[tc_bucket_index];
- size_t expected_allocated_size =
- tc_bucket->slot_size; // For the ThreadCache itself.
- size_t expected_committed_size = kUseLazyCommit
- ? internal::SystemPageSize()
- : tc_bucket->get_bytes_per_span();
- EXPECT_EQ(expected_committed_size, root_->total_size_of_committed_pages);
- EXPECT_EQ(expected_committed_size, root_->max_size_of_committed_pages);
- EXPECT_EQ(expected_allocated_size,
- root_->get_total_size_of_allocated_bytes());
- EXPECT_EQ(expected_allocated_size, root_->get_max_size_of_allocated_bytes());
- void* ptr = root_->Alloc(kMediumSize, "");
- auto* medium_bucket = root_->buckets + SizeToIndex(kMediumSize);
- size_t medium_alloc_size = medium_bucket->slot_size;
- expected_allocated_size += medium_alloc_size;
- expected_committed_size += kUseLazyCommit
- ? internal::SystemPageSize()
- : medium_bucket->get_bytes_per_span();
- EXPECT_EQ(expected_committed_size, root_->total_size_of_committed_pages);
- EXPECT_EQ(expected_committed_size, root_->max_size_of_committed_pages);
- EXPECT_EQ(expected_allocated_size,
- root_->get_total_size_of_allocated_bytes());
- EXPECT_EQ(expected_allocated_size, root_->get_max_size_of_allocated_bytes());
- expected_allocated_size += kFillCountForMediumBucket * medium_alloc_size;
- // These allocations all come from the thread-cache.
- for (size_t i = 0; i < kFillCountForMediumBucket; i++) {
- arr[i] = root_->Alloc(kMediumSize, "");
- EXPECT_EQ(expected_committed_size, root_->total_size_of_committed_pages);
- EXPECT_EQ(expected_committed_size, root_->max_size_of_committed_pages);
- EXPECT_EQ(expected_allocated_size,
- root_->get_total_size_of_allocated_bytes());
- EXPECT_EQ(expected_allocated_size,
- root_->get_max_size_of_allocated_bytes());
- EXPECT_EQ((kFillCountForMediumBucket - 1 - i) * medium_alloc_size,
- tcache->CachedMemory());
- }
- EXPECT_EQ(0U, tcache->CachedMemory());
- root_->Free(ptr);
- for (auto*& el : arr) {
- root_->Free(el);
- }
- EXPECT_EQ(root_->get_total_size_of_allocated_bytes(),
- expected_allocated_size);
- tcache->Purge();
- EXPECT_EQ(root_->get_total_size_of_allocated_bytes(),
- GetBucketSizeForThreadCache());
- }
- TEST_P(PartitionAllocThreadCacheTest, TryPurgeNoAllocs) {
- auto* tcache = root_->thread_cache_for_testing();
- tcache->TryPurge();
- }
- TEST_P(PartitionAllocThreadCacheTest, TryPurgeMultipleCorrupted) {
- auto* tcache = root_->thread_cache_for_testing();
- void* ptr = root_->Alloc(kMediumSize, "");
- auto* medium_bucket = root_->buckets + SizeToIndex(kMediumSize);
- auto* curr = medium_bucket->active_slot_spans_head->get_freelist_head();
- curr = curr->GetNextForThreadCache<true>(kMediumSize);
- curr->CorruptNextForTesting(0x12345678);
- tcache->TryPurge();
- curr->SetNext(nullptr);
- root_->Free(ptr);
- }
- TEST(AlternateBucketDistributionTest, SizeToIndex) {
- using internal::BucketIndexLookup;
- // The first 12 buckets are the same as the default bucket index.
- for (size_t i = 1 << 0; i < 1 << 8; i <<= 1) {
- for (size_t offset = 0; offset < 4; offset++) {
- size_t n = i * (4 + offset) / 4;
- EXPECT_EQ(BucketIndexLookup::GetIndex(n),
- BucketIndexLookup::GetIndexForDenserBuckets(n));
- }
- }
- // The alternate bucket distribution is different in the middle values.
- //
- // For each order, the top two buckets are removed compared with the default
- // distribution. Values that would be allocated in those two buckets are
- // instead allocated in the next power of two bucket.
- //
- // The first two buckets (each power of two and the next bucket up) remain
- // the same between the two bucket distributions.
- size_t expected_index = BucketIndexLookup::GetIndex(1 << 8);
- for (size_t i = 1 << 8; i < internal::kHighThresholdForAlternateDistribution;
- i <<= 1) {
- // The first two buckets in the order should match up to the normal bucket
- // distribution.
- for (size_t offset = 0; offset < 2; offset++) {
- size_t n = i * (4 + offset) / 4;
- EXPECT_EQ(BucketIndexLookup::GetIndex(n),
- BucketIndexLookup::GetIndexForDenserBuckets(n));
- EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index++);
- }
- // The last two buckets in the order are "rounded up" to the same bucket
- // as the next power of two.
- expected_index += 2;
- for (size_t offset = 2; offset < 4; offset++) {
- size_t n = i * (4 + offset) / 4;
- // These two are rounded up in the alternate distribution, so we expect
- // the bucket index to be larger than the bucket index for the same
- // allocation under the default distribution.
- EXPECT_GT(BucketIndexLookup::GetIndex(n),
- BucketIndexLookup::GetIndexForDenserBuckets(n));
- // We expect both allocations in this loop to be rounded up to the next
- // power of two bucket.
- EXPECT_EQ(BucketIndexLookup::GetIndex(n), expected_index);
- }
- }
- // The rest of the buckets all match up exactly with the existing
- // bucket distribution.
- for (size_t i = internal::kHighThresholdForAlternateDistribution;
- i < internal::kMaxBucketed; i <<= 1) {
- for (size_t offset = 0; offset < 4; offset++) {
- size_t n = i * (4 + offset) / 4;
- EXPECT_EQ(BucketIndexLookup::GetIndex(n),
- BucketIndexLookup::GetIndexForDenserBuckets(n));
- }
- }
- }
- // This test makes sure it's safe to switch to the alternate bucket distribution
- // at runtime. This is intended to happen once, near the start of Chrome,
- // once we have enabled features.
- TEST(AlternateBucketDistributionTest, SwitchBeforeAlloc) {
- auto* root = CreatePartitionRoot();
- root->SwitchToDenserBucketDistribution();
- constexpr size_t n = (1 << 12) * 3 / 2;
- EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
- internal::BucketIndexLookup::GetIndexForDenserBuckets(n));
- void* ptr = root->Alloc(n, "");
- root->ResetBucketDistributionForTesting();
- root->Free(ptr);
- // Clean up
- ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
- internal::SwapInProcessThreadCacheForTesting(root);
- ThreadSafePartitionRoot::DeleteForTesting(root);
- // Cleanup the global state so next test can recreate ThreadCache.
- if (ThreadCache::IsTombstone(ThreadCache::Get()))
- ThreadCache::RemoveTombstoneForTesting();
- }
- // This test makes sure it's safe to switch to the alternate bucket distribution
- // at runtime. This is intended to happen once, near the start of Chrome,
- // once we have enabled features.
- TEST(AlternateBucketDistributionTest, SwitchAfterAlloc) {
- auto* root = CreatePartitionRoot();
- constexpr size_t n = (1 << 12) * 3 / 2;
- EXPECT_NE(internal::BucketIndexLookup::GetIndex(n),
- internal::BucketIndexLookup::GetIndexForDenserBuckets(n));
- void* ptr = root->Alloc(n, "");
- root->SwitchToDenserBucketDistribution();
- void* ptr2 = root->Alloc(n, "");
- root->Free(ptr2);
- root->Free(ptr);
- // Clean up
- ThreadCache::SetLargestCachedSize(ThreadCache::kDefaultSizeThreshold);
- internal::SwapInProcessThreadCacheForTesting(root);
- ThreadSafePartitionRoot::DeleteForTesting(root);
- // Cleanup the global state so next test can recreate ThreadCache.
- if (ThreadCache::IsTombstone(ThreadCache::Get()))
- ThreadCache::RemoveTombstoneForTesting();
- }
- } // namespace partition_alloc
- #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) &&
- // defined(PA_THREAD_CACHE_SUPPORTED)
|