partition_root.cc 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395
  1. // Copyright (c) 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/allocator/partition_allocator/partition_root.h"
  5. #include <cstdint>
  6. #include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
  7. #include "base/allocator/partition_allocator/oom.h"
  8. #include "base/allocator/partition_allocator/page_allocator.h"
  9. #include "base/allocator/partition_allocator/partition_address_space.h"
  10. #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
  15. #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
  16. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  17. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  18. #include "base/allocator/partition_allocator/partition_alloc_constants.h"
  19. #include "base/allocator/partition_allocator/partition_bucket.h"
  20. #include "base/allocator/partition_allocator/partition_cookie.h"
  21. #include "base/allocator/partition_allocator/partition_oom.h"
  22. #include "base/allocator/partition_allocator/partition_page.h"
  23. #include "base/allocator/partition_allocator/reservation_offset_table.h"
  24. #include "base/allocator/partition_allocator/starscan/pcscan.h"
  25. #include "base/allocator/partition_allocator/tagging.h"
  26. #include "build/build_config.h"
  27. #if BUILDFLAG(IS_WIN)
  28. #include <windows.h>
  29. #include "wow64apiset.h"
  30. #endif
  31. #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
  32. #include <pthread.h>
  33. #endif
  34. #if BUILDFLAG(RECORD_ALLOC_INFO)
  35. namespace partition_alloc::internal {
  36. // Even if this is not hidden behind a BUILDFLAG, it should not use any memory
  37. // when recording is disabled, since it ends up in the .bss section.
  38. AllocInfo g_allocs = {};
  39. void RecordAllocOrFree(uintptr_t addr, size_t size) {
  40. g_allocs.allocs[g_allocs.index.fetch_add(1, std::memory_order_relaxed) %
  41. kAllocInfoSize] = {addr, size};
  42. }
  43. } // namespace partition_alloc::internal
  44. #endif // BUILDFLAG(RECORD_ALLOC_INFO)
  45. namespace partition_alloc {
  46. #if defined(PA_USE_PARTITION_ROOT_ENUMERATOR)
  47. namespace {
  48. internal::Lock g_root_enumerator_lock;
  49. }
  50. template <bool thread_safe>
  51. internal::Lock& PartitionRoot<thread_safe>::GetEnumeratorLock() {
  52. return g_root_enumerator_lock;
  53. }
  54. namespace internal {
  55. class PartitionRootEnumerator {
  56. public:
  57. using EnumerateCallback = void (*)(ThreadSafePartitionRoot* root,
  58. bool in_child);
  59. enum EnumerateOrder {
  60. kNormal,
  61. kReverse,
  62. };
  63. static PartitionRootEnumerator& Instance() {
  64. static PartitionRootEnumerator instance;
  65. return instance;
  66. }
  67. void Enumerate(EnumerateCallback callback,
  68. bool in_child,
  69. EnumerateOrder order) PA_NO_THREAD_SAFETY_ANALYSIS {
  70. if (order == kNormal) {
  71. ThreadSafePartitionRoot* root;
  72. for (root = Head(partition_roots_); root != nullptr;
  73. root = root->next_root)
  74. callback(root, in_child);
  75. } else {
  76. PA_DCHECK(order == kReverse);
  77. ThreadSafePartitionRoot* root;
  78. for (root = Tail(partition_roots_); root != nullptr;
  79. root = root->prev_root)
  80. callback(root, in_child);
  81. }
  82. }
  83. void Register(ThreadSafePartitionRoot* root) {
  84. internal::ScopedGuard guard(ThreadSafePartitionRoot::GetEnumeratorLock());
  85. root->next_root = partition_roots_;
  86. root->prev_root = nullptr;
  87. if (partition_roots_)
  88. partition_roots_->prev_root = root;
  89. partition_roots_ = root;
  90. }
  91. void Unregister(ThreadSafePartitionRoot* root) {
  92. internal::ScopedGuard guard(ThreadSafePartitionRoot::GetEnumeratorLock());
  93. ThreadSafePartitionRoot* prev = root->prev_root;
  94. ThreadSafePartitionRoot* next = root->next_root;
  95. if (prev) {
  96. PA_DCHECK(prev->next_root == root);
  97. prev->next_root = next;
  98. } else {
  99. PA_DCHECK(partition_roots_ == root);
  100. partition_roots_ = next;
  101. }
  102. if (next) {
  103. PA_DCHECK(next->prev_root == root);
  104. next->prev_root = prev;
  105. }
  106. root->next_root = nullptr;
  107. root->prev_root = nullptr;
  108. }
  109. private:
  110. constexpr PartitionRootEnumerator() = default;
  111. ThreadSafePartitionRoot* Head(ThreadSafePartitionRoot* roots) {
  112. return roots;
  113. }
  114. ThreadSafePartitionRoot* Tail(ThreadSafePartitionRoot* roots)
  115. PA_NO_THREAD_SAFETY_ANALYSIS {
  116. if (!roots)
  117. return nullptr;
  118. ThreadSafePartitionRoot* node = roots;
  119. for (; node->next_root != nullptr; node = node->next_root)
  120. ;
  121. return node;
  122. }
  123. ThreadSafePartitionRoot* partition_roots_
  124. PA_GUARDED_BY(ThreadSafePartitionRoot::GetEnumeratorLock()) = nullptr;
  125. };
  126. } // namespace internal
  127. #endif // PA_USE_PARTITION_ROOT_ENUMERATOR
  128. #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  129. namespace {
  130. #if defined(PA_HAS_ATFORK_HANDLER)
  131. void LockRoot(PartitionRoot<internal::ThreadSafe>* root,
  132. bool) PA_NO_THREAD_SAFETY_ANALYSIS {
  133. PA_DCHECK(root);
  134. root->lock_.Acquire();
  135. }
  136. // PA_NO_THREAD_SAFETY_ANALYSIS: acquires the lock and doesn't release it, by
  137. // design.
  138. void BeforeForkInParent() PA_NO_THREAD_SAFETY_ANALYSIS {
  139. // ThreadSafePartitionRoot::GetLock() is private. So use
  140. // g_root_enumerator_lock here.
  141. g_root_enumerator_lock.Acquire();
  142. internal::PartitionRootEnumerator::Instance().Enumerate(
  143. LockRoot, false,
  144. internal::PartitionRootEnumerator::EnumerateOrder::kNormal);
  145. ThreadCacheRegistry::GetLock().Acquire();
  146. }
  147. template <typename T>
  148. void UnlockOrReinit(T& lock, bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {
  149. // Only re-init the locks in the child process, in the parent can unlock
  150. // normally.
  151. if (in_child)
  152. lock.Reinit();
  153. else
  154. lock.Release();
  155. }
  156. void UnlockOrReinitRoot(PartitionRoot<internal::ThreadSafe>* root,
  157. bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {
  158. UnlockOrReinit(root->lock_, in_child);
  159. }
  160. void ReleaseLocks(bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {
  161. // In reverse order, even though there are no lock ordering dependencies.
  162. UnlockOrReinit(ThreadCacheRegistry::GetLock(), in_child);
  163. internal::PartitionRootEnumerator::Instance().Enumerate(
  164. UnlockOrReinitRoot, in_child,
  165. internal::PartitionRootEnumerator::EnumerateOrder::kReverse);
  166. // ThreadSafePartitionRoot::GetLock() is private. So use
  167. // g_root_enumerator_lock here.
  168. UnlockOrReinit(g_root_enumerator_lock, in_child);
  169. }
  170. void AfterForkInParent() {
  171. ReleaseLocks(/* in_child = */ false);
  172. }
  173. void AfterForkInChild() {
  174. ReleaseLocks(/* in_child = */ true);
  175. // Unsafe, as noted in the name. This is fine here however, since at this
  176. // point there is only one thread, this one (unless another post-fork()
  177. // handler created a thread, but it would have needed to allocate, which would
  178. // have deadlocked the process already).
  179. //
  180. // If we don't reclaim this memory, it is lost forever. Note that this is only
  181. // really an issue if we fork() a multi-threaded process without calling
  182. // exec() right away, which is discouraged.
  183. ThreadCacheRegistry::Instance().ForcePurgeAllThreadAfterForkUnsafe();
  184. }
  185. #endif // defined(PA_HAS_ATFORK_HANDLER)
  186. std::atomic<bool> g_global_init_called;
  187. void PartitionAllocMallocInitOnce() {
  188. bool expected = false;
  189. // No need to block execution for potential concurrent initialization, merely
  190. // want to make sure this is only called once.
  191. if (!g_global_init_called.compare_exchange_strong(expected, true))
  192. return;
  193. #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
  194. // When fork() is called, only the current thread continues to execute in the
  195. // child process. If the lock is held, but *not* by this thread when fork() is
  196. // called, we have a deadlock.
  197. //
  198. // The "solution" here is to acquire the lock on the forking thread before
  199. // fork(), and keep it held until fork() is done, in the parent and the
  200. // child. To clean up memory, we also must empty the thread caches in the
  201. // child, which is easier, since no threads except for the current one are
  202. // running right after the fork().
  203. //
  204. // This is not perfect though, since:
  205. // - Multiple pre/post-fork() handlers can be registered, they are then run in
  206. // LIFO order for the pre-fork handler, and FIFO order for the post-fork
  207. // one. So unless we are the first to register a handler, if another handler
  208. // allocates, then we deterministically deadlock.
  209. // - pthread handlers are *not* called when the application calls clone()
  210. // directly, which is what Chrome does to launch processes.
  211. //
  212. // However, no perfect solution really exists to make threads + fork()
  213. // cooperate, but deadlocks are real (and fork() is used in DEATH_TEST()s),
  214. // and other malloc() implementations use the same techniques.
  215. int err =
  216. pthread_atfork(BeforeForkInParent, AfterForkInParent, AfterForkInChild);
  217. PA_CHECK(err == 0);
  218. #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
  219. }
  220. } // namespace
  221. #if BUILDFLAG(IS_APPLE)
  222. void PartitionAllocMallocHookOnBeforeForkInParent() {
  223. BeforeForkInParent();
  224. }
  225. void PartitionAllocMallocHookOnAfterForkInParent() {
  226. AfterForkInParent();
  227. }
  228. void PartitionAllocMallocHookOnAfterForkInChild() {
  229. AfterForkInChild();
  230. }
  231. #endif // BUILDFLAG(IS_APPLE)
  232. #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  233. namespace internal {
  234. namespace {
  235. constexpr size_t kMaxPurgeableSlotsPerSystemPage = 2;
  236. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  237. MaxPurgeableSlotSize() {
  238. return SystemPageSize() / kMaxPurgeableSlotsPerSystemPage;
  239. }
  240. } // namespace
  241. template <bool thread_safe>
  242. static size_t PartitionPurgeSlotSpan(
  243. internal::SlotSpanMetadata<thread_safe>* slot_span,
  244. bool discard) {
  245. auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
  246. const internal::PartitionBucket<thread_safe>* bucket = slot_span->bucket;
  247. size_t slot_size = bucket->slot_size;
  248. // We will do nothing if slot_size is smaller than SystemPageSize() / 2
  249. // because |kMaxSlotCount| will be too large in that case, which leads to
  250. // |slot_usage| using up too much memory.
  251. if (slot_size < MaxPurgeableSlotSize() || !slot_span->num_allocated_slots)
  252. return 0;
  253. size_t bucket_num_slots = bucket->get_slots_per_span();
  254. size_t discardable_bytes = 0;
  255. if (slot_span->CanStoreRawSize()) {
  256. uint32_t utilized_slot_size = static_cast<uint32_t>(
  257. RoundUpToSystemPage(slot_span->GetUtilizedSlotSize()));
  258. discardable_bytes = bucket->slot_size - utilized_slot_size;
  259. if (discardable_bytes && discard) {
  260. uintptr_t slot_span_start =
  261. internal::SlotSpanMetadata<thread_safe>::ToSlotSpanStart(slot_span);
  262. uintptr_t committed_data_end = slot_span_start + utilized_slot_size;
  263. ScopedSyscallTimer timer{root};
  264. DiscardSystemPages(committed_data_end, discardable_bytes);
  265. }
  266. return discardable_bytes;
  267. }
  268. #if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
  269. constexpr size_t kMaxSlotCount =
  270. (PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan) /
  271. MaxPurgeableSlotSize();
  272. #elif BUILDFLAG(IS_APPLE) || (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
  273. // It's better for slot_usage to be stack-allocated and fixed-size, which
  274. // demands that its size be constexpr. On IS_APPLE and Linux on arm64,
  275. // PartitionPageSize() is always SystemPageSize() << 2, so regardless of
  276. // what the run time page size is, kMaxSlotCount can always be simplified
  277. // to this expression.
  278. constexpr size_t kMaxSlotCount =
  279. 4 * kMaxPurgeableSlotsPerSystemPage *
  280. internal::kMaxPartitionPagesPerRegularSlotSpan;
  281. PA_CHECK(kMaxSlotCount == (PartitionPageSize() *
  282. internal::kMaxPartitionPagesPerRegularSlotSpan) /
  283. MaxPurgeableSlotSize());
  284. #endif
  285. PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
  286. PA_DCHECK(slot_span->num_unprovisioned_slots < bucket_num_slots);
  287. size_t num_slots = bucket_num_slots - slot_span->num_unprovisioned_slots;
  288. char slot_usage[kMaxSlotCount];
  289. #if !BUILDFLAG(IS_WIN)
  290. // The last freelist entry should not be discarded when using OS_WIN.
  291. // DiscardVirtualMemory makes the contents of discarded memory undefined.
  292. size_t last_slot = static_cast<size_t>(-1);
  293. #endif
  294. memset(slot_usage, 1, num_slots);
  295. uintptr_t slot_span_start =
  296. SlotSpanMetadata<thread_safe>::ToSlotSpanStart(slot_span);
  297. // First, walk the freelist for this slot span and make a bitmap of which
  298. // slots are not in use.
  299. for (PartitionFreelistEntry* entry = slot_span->get_freelist_head(); entry;
  300. /**/) {
  301. size_t slot_number =
  302. bucket->GetSlotNumber(SlotStartPtr2Addr(entry) - slot_span_start);
  303. PA_DCHECK(slot_number < num_slots);
  304. slot_usage[slot_number] = 0;
  305. #if !BUILDFLAG(IS_WIN)
  306. // If we have a slot where the encoded next pointer is 0, we can actually
  307. // discard that entry because touching a discarded page is guaranteed to
  308. // return the original content or 0. (Note that this optimization won't be
  309. // effective on big-endian machines because the masking function is
  310. // negation.)
  311. if (entry->IsEncodedNextPtrZero())
  312. last_slot = slot_number;
  313. #endif
  314. entry = entry->GetNext(slot_size);
  315. }
  316. // If the slot(s) at the end of the slot span are not in used, we can truncate
  317. // them entirely and rewrite the freelist.
  318. size_t truncated_slots = 0;
  319. while (!slot_usage[num_slots - 1]) {
  320. truncated_slots++;
  321. num_slots--;
  322. PA_DCHECK(num_slots);
  323. }
  324. // First, do the work of calculating the discardable bytes. Don't actually
  325. // discard anything unless the discard flag was passed in.
  326. if (truncated_slots) {
  327. size_t unprovisioned_bytes = 0;
  328. uintptr_t begin_addr = slot_span_start + (num_slots * slot_size);
  329. uintptr_t end_addr = begin_addr + (slot_size * truncated_slots);
  330. // The slots that do not contain discarded pages should not be included to
  331. // |truncated_slots|. Detects those slots and fixes |truncated_slots| and
  332. // |num_slots| accordingly.
  333. uintptr_t rounded_up_begin_addr = RoundUpToSystemPage(begin_addr);
  334. for (size_t i = 0; i < kMaxPurgeableSlotsPerSystemPage; ++i) {
  335. begin_addr += slot_size;
  336. if (RoundUpToSystemPage(begin_addr) != rounded_up_begin_addr)
  337. break;
  338. --truncated_slots;
  339. ++num_slots;
  340. }
  341. begin_addr = rounded_up_begin_addr;
  342. // We round the end address here up and not down because we're at the end of
  343. // a slot span, so we "own" all the way up the page boundary.
  344. end_addr = RoundUpToSystemPage(end_addr);
  345. PA_DCHECK(end_addr <= slot_span_start + bucket->get_bytes_per_span());
  346. if (begin_addr < end_addr) {
  347. unprovisioned_bytes = end_addr - begin_addr;
  348. discardable_bytes += unprovisioned_bytes;
  349. }
  350. if (unprovisioned_bytes && discard) {
  351. PA_DCHECK(truncated_slots > 0);
  352. size_t new_unprovisioned_slots =
  353. truncated_slots + slot_span->num_unprovisioned_slots;
  354. PA_DCHECK(new_unprovisioned_slots <= bucket->get_slots_per_span());
  355. slot_span->num_unprovisioned_slots = new_unprovisioned_slots;
  356. // Rewrite the freelist.
  357. internal::PartitionFreelistEntry* head = nullptr;
  358. internal::PartitionFreelistEntry* back = head;
  359. size_t num_new_entries = 0;
  360. for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) {
  361. if (slot_usage[slot_index])
  362. continue;
  363. auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(
  364. slot_span_start + (slot_size * slot_index));
  365. if (!head) {
  366. head = entry;
  367. back = entry;
  368. } else {
  369. back->SetNext(entry);
  370. back = entry;
  371. }
  372. num_new_entries++;
  373. #if !BUILDFLAG(IS_WIN)
  374. last_slot = slot_index;
  375. #endif
  376. }
  377. slot_span->SetFreelistHead(head);
  378. PA_DCHECK(num_new_entries == num_slots - slot_span->num_allocated_slots);
  379. // Discard the memory.
  380. ScopedSyscallTimer timer{root};
  381. DiscardSystemPages(begin_addr, unprovisioned_bytes);
  382. }
  383. }
  384. if (slot_size < SystemPageSize()) {
  385. return discardable_bytes;
  386. }
  387. // Next, walk the slots and for any not in use, consider which system pages
  388. // are no longer needed. We can release any system pages back to the system as
  389. // long as we don't interfere with a freelist pointer or an adjacent used
  390. // slot.
  391. for (size_t i = 0; i < num_slots; ++i) {
  392. if (slot_usage[i]) {
  393. continue;
  394. }
  395. // The first address we can safely discard is just after the freelist
  396. // pointer. There's one quirk: if the freelist pointer is actually nullptr,
  397. // we can discard that pointer value too.
  398. uintptr_t begin_addr = slot_span_start + (i * slot_size);
  399. uintptr_t end_addr = begin_addr + slot_size;
  400. bool can_discard_free_list_pointer = false;
  401. #if !BUILDFLAG(IS_WIN)
  402. if (i != last_slot) {
  403. begin_addr += sizeof(internal::PartitionFreelistEntry);
  404. } else {
  405. can_discard_free_list_pointer = true;
  406. }
  407. #else
  408. begin_addr += sizeof(internal::PartitionFreelistEntry);
  409. #endif
  410. uintptr_t rounded_up_begin_addr = RoundUpToSystemPage(begin_addr);
  411. uintptr_t rounded_down_begin_addr = RoundDownToSystemPage(begin_addr);
  412. end_addr = RoundDownToSystemPage(end_addr);
  413. // |rounded_up_begin_addr| could be greater than |end_addr| only if slot
  414. // size was less than system page size, or if free list pointer crossed the
  415. // page boundary. Neither is possible here.
  416. PA_DCHECK(rounded_up_begin_addr <= end_addr);
  417. if (rounded_down_begin_addr < rounded_up_begin_addr && i != 0 &&
  418. !slot_usage[i - 1] && can_discard_free_list_pointer) {
  419. // This slot contains a partial page in the beginning. The rest of that
  420. // page is contained in the slot[i-1], which is also discardable.
  421. // Therefore we can discard this page.
  422. begin_addr = rounded_down_begin_addr;
  423. } else {
  424. begin_addr = rounded_up_begin_addr;
  425. }
  426. if (begin_addr < end_addr) {
  427. size_t partial_slot_bytes = end_addr - begin_addr;
  428. discardable_bytes += partial_slot_bytes;
  429. if (discard) {
  430. ScopedSyscallTimer timer{root};
  431. DiscardSystemPages(begin_addr, partial_slot_bytes);
  432. }
  433. }
  434. }
  435. return discardable_bytes;
  436. }
  437. template <bool thread_safe>
  438. static void PartitionPurgeBucket(
  439. internal::PartitionBucket<thread_safe>* bucket) {
  440. if (bucket->active_slot_spans_head !=
  441. internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
  442. for (internal::SlotSpanMetadata<thread_safe>* slot_span =
  443. bucket->active_slot_spans_head;
  444. slot_span; slot_span = slot_span->next_slot_span) {
  445. PA_DCHECK(
  446. slot_span !=
  447. internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
  448. PartitionPurgeSlotSpan(slot_span, true);
  449. }
  450. }
  451. }
  452. template <bool thread_safe>
  453. static void PartitionDumpSlotSpanStats(
  454. PartitionBucketMemoryStats* stats_out,
  455. internal::SlotSpanMetadata<thread_safe>* slot_span) {
  456. uint16_t bucket_num_slots = slot_span->bucket->get_slots_per_span();
  457. if (slot_span->is_decommitted()) {
  458. ++stats_out->num_decommitted_slot_spans;
  459. return;
  460. }
  461. stats_out->discardable_bytes += PartitionPurgeSlotSpan(slot_span, false);
  462. if (slot_span->CanStoreRawSize()) {
  463. stats_out->active_bytes += static_cast<uint32_t>(slot_span->GetRawSize());
  464. } else {
  465. stats_out->active_bytes +=
  466. (slot_span->num_allocated_slots * stats_out->bucket_slot_size);
  467. }
  468. stats_out->active_count += slot_span->num_allocated_slots;
  469. size_t slot_span_bytes_resident = RoundUpToSystemPage(
  470. (bucket_num_slots - slot_span->num_unprovisioned_slots) *
  471. stats_out->bucket_slot_size);
  472. stats_out->resident_bytes += slot_span_bytes_resident;
  473. if (slot_span->is_empty()) {
  474. stats_out->decommittable_bytes += slot_span_bytes_resident;
  475. ++stats_out->num_empty_slot_spans;
  476. } else if (slot_span->is_full()) {
  477. ++stats_out->num_full_slot_spans;
  478. } else {
  479. PA_DCHECK(slot_span->is_active());
  480. ++stats_out->num_active_slot_spans;
  481. }
  482. }
  483. template <bool thread_safe>
  484. static void PartitionDumpBucketStats(
  485. PartitionBucketMemoryStats* stats_out,
  486. const internal::PartitionBucket<thread_safe>* bucket) {
  487. PA_DCHECK(!bucket->is_direct_mapped());
  488. stats_out->is_valid = false;
  489. // If the active slot span list is empty (==
  490. // internal::SlotSpanMetadata::get_sentinel_slot_span()), the bucket might
  491. // still need to be reported if it has a list of empty, decommitted or full
  492. // slot spans.
  493. if (bucket->active_slot_spans_head ==
  494. internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() &&
  495. !bucket->empty_slot_spans_head && !bucket->decommitted_slot_spans_head &&
  496. !bucket->num_full_slot_spans)
  497. return;
  498. memset(stats_out, '\0', sizeof(*stats_out));
  499. stats_out->is_valid = true;
  500. stats_out->is_direct_map = false;
  501. stats_out->num_full_slot_spans =
  502. static_cast<size_t>(bucket->num_full_slot_spans);
  503. stats_out->bucket_slot_size = bucket->slot_size;
  504. uint16_t bucket_num_slots = bucket->get_slots_per_span();
  505. size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
  506. stats_out->allocated_slot_span_size = bucket->get_bytes_per_span();
  507. stats_out->active_bytes = bucket->num_full_slot_spans * bucket_useful_storage;
  508. stats_out->active_count = bucket->num_full_slot_spans * bucket_num_slots;
  509. stats_out->resident_bytes =
  510. bucket->num_full_slot_spans * stats_out->allocated_slot_span_size;
  511. for (internal::SlotSpanMetadata<thread_safe>* slot_span =
  512. bucket->empty_slot_spans_head;
  513. slot_span; slot_span = slot_span->next_slot_span) {
  514. PA_DCHECK(slot_span->is_empty() || slot_span->is_decommitted());
  515. PartitionDumpSlotSpanStats(stats_out, slot_span);
  516. }
  517. for (internal::SlotSpanMetadata<thread_safe>* slot_span =
  518. bucket->decommitted_slot_spans_head;
  519. slot_span; slot_span = slot_span->next_slot_span) {
  520. PA_DCHECK(slot_span->is_decommitted());
  521. PartitionDumpSlotSpanStats(stats_out, slot_span);
  522. }
  523. if (bucket->active_slot_spans_head !=
  524. internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span()) {
  525. for (internal::SlotSpanMetadata<thread_safe>* slot_span =
  526. bucket->active_slot_spans_head;
  527. slot_span; slot_span = slot_span->next_slot_span) {
  528. PA_DCHECK(
  529. slot_span !=
  530. internal::SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
  531. PartitionDumpSlotSpanStats(stats_out, slot_span);
  532. }
  533. }
  534. }
  535. #if BUILDFLAG(PA_DCHECK_IS_ON)
  536. void DCheckIfManagedByPartitionAllocBRPPool(uintptr_t address) {
  537. PA_DCHECK(IsManagedByPartitionAllocBRPPool(address));
  538. }
  539. #endif
  540. } // namespace internal
  541. template <bool thread_safe>
  542. [[noreturn]] PA_NOINLINE void PartitionRoot<thread_safe>::OutOfMemory(
  543. size_t size) {
  544. const size_t virtual_address_space_size =
  545. total_size_of_super_pages.load(std::memory_order_relaxed) +
  546. total_size_of_direct_mapped_pages.load(std::memory_order_relaxed);
  547. #if !defined(ARCH_CPU_64_BITS)
  548. const size_t uncommitted_size =
  549. virtual_address_space_size -
  550. total_size_of_committed_pages.load(std::memory_order_relaxed);
  551. // Check whether this OOM is due to a lot of super pages that are allocated
  552. // but not committed, probably due to http://crbug.com/421387.
  553. if (uncommitted_size > internal::kReasonableSizeOfUnusedPages) {
  554. internal::PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
  555. }
  556. #if BUILDFLAG(IS_WIN)
  557. // If true then we are running on 64-bit Windows.
  558. BOOL is_wow_64 = FALSE;
  559. // Intentionally ignoring failures.
  560. IsWow64Process(GetCurrentProcess(), &is_wow_64);
  561. // 32-bit address space on Windows is typically either 2 GiB (on 32-bit
  562. // Windows) or 4 GiB (on 64-bit Windows). 2.8 and 1.0 GiB are just rough
  563. // guesses as to how much address space PA can consume (note that code,
  564. // stacks, and other allocators will also consume address space).
  565. const size_t kReasonableVirtualSize = (is_wow_64 ? 2800 : 1024) * 1024 * 1024;
  566. // Make it obvious whether we are running on 64-bit Windows.
  567. PA_DEBUG_DATA_ON_STACK("is_wow_64", static_cast<size_t>(is_wow_64));
  568. #else
  569. constexpr size_t kReasonableVirtualSize =
  570. // 1.5GiB elsewhere, since address space is typically 3GiB.
  571. (1024 + 512) * 1024 * 1024;
  572. #endif
  573. if (virtual_address_space_size > kReasonableVirtualSize) {
  574. internal::PartitionOutOfMemoryWithLargeVirtualSize(
  575. virtual_address_space_size);
  576. }
  577. #endif // #if !defined(ARCH_CPU_64_BITS)
  578. // Out of memory can be due to multiple causes, such as:
  579. // - Out of GigaCage virtual address space
  580. // - Out of commit due to either our process, or another one
  581. // - Excessive allocations in the current process
  582. //
  583. // Saving these values make it easier to distinguish between these. See the
  584. // documentation in PA_DEBUG_DATA_ON_STACK() on how to get these from
  585. // minidumps.
  586. PA_DEBUG_DATA_ON_STACK("va_size", virtual_address_space_size);
  587. PA_DEBUG_DATA_ON_STACK("alloc", get_total_size_of_allocated_bytes());
  588. PA_DEBUG_DATA_ON_STACK("commit", get_total_size_of_committed_pages());
  589. PA_DEBUG_DATA_ON_STACK("size", size);
  590. if (internal::g_oom_handling_function)
  591. (*internal::g_oom_handling_function)(size);
  592. OOM_CRASH(size);
  593. }
  594. template <bool thread_safe>
  595. void PartitionRoot<thread_safe>::DecommitEmptySlotSpans() {
  596. ShrinkEmptySlotSpansRing(0);
  597. // Just decommitted everything, and holding the lock, should be exactly 0.
  598. PA_DCHECK(empty_slot_spans_dirty_bytes == 0);
  599. }
  600. template <bool thread_safe>
  601. void PartitionRoot<thread_safe>::DestructForTesting() {
  602. // We need to destruct the thread cache before we unreserve any of the super
  603. // pages below, which we currently are not doing. So, we should only call
  604. // this function on PartitionRoots without a thread cache.
  605. PA_CHECK(!flags.with_thread_cache);
  606. auto pool_handle = ChoosePool();
  607. auto* curr = first_extent;
  608. while (curr != nullptr) {
  609. auto* next = curr->next;
  610. internal::AddressPoolManager::GetInstance().UnreserveAndDecommit(
  611. pool_handle, SuperPagesBeginFromExtent(curr),
  612. internal::kSuperPageSize * curr->number_of_consecutive_super_pages);
  613. curr = next;
  614. }
  615. }
  616. template <bool thread_safe>
  617. void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
  618. {
  619. #if BUILDFLAG(IS_APPLE)
  620. // Needed to statically bound page size, which is a runtime constant on
  621. // apple OSes.
  622. PA_CHECK((internal::SystemPageSize() == (size_t{1} << 12)) ||
  623. (internal::SystemPageSize() == (size_t{1} << 14)));
  624. #elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
  625. // Check runtime pagesize. Though the code is currently the same, it is
  626. // not merged with the IS_APPLE case above as a 1 << 16 case needs to be
  627. // added here in the future, to allow 64 kiB pagesize. That is only
  628. // supported on Linux on arm64, not on IS_APPLE, but not yet present here
  629. // as the rest of the partition allocator does not currently support it.
  630. PA_CHECK((internal::SystemPageSize() == (size_t{1} << 12)) ||
  631. (internal::SystemPageSize() == (size_t{1} << 14)));
  632. #endif
  633. ::partition_alloc::internal::ScopedGuard guard{lock_};
  634. if (initialized)
  635. return;
  636. // Swaps out the active no-op tagging intrinsics with MTE-capable ones, if
  637. // running on the right hardware.
  638. ::partition_alloc::internal::InitializeMTESupportIfNeeded();
  639. #if defined(PA_HAS_64_BITS_POINTERS)
  640. // Reserve address space for partition alloc.
  641. internal::PartitionAddressSpace::Init();
  642. #endif
  643. flags.allow_aligned_alloc =
  644. opts.aligned_alloc == PartitionOptions::AlignedAlloc::kAllowed;
  645. flags.allow_cookie = opts.cookie == PartitionOptions::Cookie::kAllowed;
  646. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  647. flags.brp_enabled_ =
  648. opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kEnabled;
  649. flags.brp_zapping_enabled_ =
  650. opts.backup_ref_ptr_zapping ==
  651. PartitionOptions::BackupRefPtrZapping::kEnabled;
  652. PA_CHECK(!flags.brp_zapping_enabled_ || flags.brp_enabled_);
  653. #else
  654. PA_CHECK(opts.backup_ref_ptr == PartitionOptions::BackupRefPtr::kDisabled);
  655. #endif
  656. flags.use_configurable_pool =
  657. (opts.use_configurable_pool ==
  658. PartitionOptions::UseConfigurablePool::kIfAvailable) &&
  659. IsConfigurablePoolAvailable();
  660. PA_DCHECK(!flags.use_configurable_pool || IsConfigurablePoolAvailable());
  661. // brp_enabled() is not supported in the configurable pool because
  662. // BRP requires objects to be in a different Pool.
  663. PA_CHECK(!(flags.use_configurable_pool && brp_enabled()));
  664. // Ref-count messes up alignment needed for AlignedAlloc, making this
  665. // option incompatible. However, except in the
  666. // PUT_REF_COUNT_IN_PREVIOUS_SLOT case.
  667. #if BUILDFLAG(USE_BACKUP_REF_PTR) && !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  668. PA_CHECK(!flags.allow_aligned_alloc || !flags.brp_enabled_);
  669. #endif
  670. #if defined(PA_EXTRAS_REQUIRED)
  671. flags.extras_size = 0;
  672. flags.extras_offset = 0;
  673. if (flags.allow_cookie) {
  674. flags.extras_size += internal::kPartitionCookieSizeAdjustment;
  675. }
  676. if (brp_enabled()) {
  677. // TODO(tasak): In the PUT_REF_COUNT_IN_PREVIOUS_SLOT case, ref-count is
  678. // stored out-of-line for single-slot slot spans, so no need to
  679. // add/subtract its size in this case.
  680. flags.extras_size += internal::kPartitionRefCountSizeAdjustment;
  681. flags.extras_offset += internal::kPartitionRefCountOffsetAdjustment;
  682. }
  683. #endif // defined(PA_EXTRAS_REQUIRED)
  684. // Re-confirm the above PA_CHECKs, by making sure there are no
  685. // pre-allocation extras when AlignedAlloc is allowed. Post-allocation
  686. // extras are ok.
  687. PA_CHECK(!flags.allow_aligned_alloc || !flags.extras_offset);
  688. flags.quarantine_mode =
  689. #if defined(PA_ALLOW_PCSCAN)
  690. (opts.quarantine == PartitionOptions::Quarantine::kDisallowed
  691. ? QuarantineMode::kAlwaysDisabled
  692. : QuarantineMode::kDisabledByDefault);
  693. #else
  694. QuarantineMode::kAlwaysDisabled;
  695. #endif // defined(PA_ALLOW_PCSCAN)
  696. // We mark the sentinel slot span as free to make sure it is skipped by our
  697. // logic to find a new active slot span.
  698. memset(&sentinel_bucket, 0, sizeof(sentinel_bucket));
  699. sentinel_bucket.active_slot_spans_head = SlotSpan::get_sentinel_slot_span();
  700. // This is a "magic" value so we can test if a root pointer is valid.
  701. inverted_self = ~reinterpret_cast<uintptr_t>(this);
  702. // Set up the actual usable buckets first.
  703. constexpr internal::BucketIndexLookup lookup{};
  704. size_t bucket_index = 0;
  705. while (lookup.bucket_sizes()[bucket_index] !=
  706. internal::kInvalidBucketSize) {
  707. buckets[bucket_index].Init(lookup.bucket_sizes()[bucket_index]);
  708. bucket_index++;
  709. }
  710. PA_DCHECK(bucket_index < internal::kNumBuckets);
  711. // Remaining buckets are not usable, and not real.
  712. for (size_t index = bucket_index; index < internal::kNumBuckets; index++) {
  713. // Cannot init with size 0 since it computes 1 / size, but make sure the
  714. // bucket is invalid.
  715. buckets[index].Init(internal::kInvalidBucketSize);
  716. buckets[index].active_slot_spans_head = nullptr;
  717. PA_DCHECK(!buckets[index].is_valid());
  718. }
  719. #if !defined(PA_THREAD_CACHE_SUPPORTED)
  720. // TLS in ThreadCache not supported on other OSes.
  721. flags.with_thread_cache = false;
  722. #else
  723. ThreadCache::EnsureThreadSpecificDataInitialized();
  724. flags.with_thread_cache =
  725. (opts.thread_cache == PartitionOptions::ThreadCache::kEnabled);
  726. if (flags.with_thread_cache)
  727. ThreadCache::Init(this);
  728. #endif // !defined(PA_THREAD_CACHE_SUPPORTED)
  729. #if defined(PA_USE_PARTITION_ROOT_ENUMERATOR)
  730. internal::PartitionRootEnumerator::Instance().Register(this);
  731. #endif
  732. initialized = true;
  733. }
  734. // Called without the lock, might allocate.
  735. #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  736. PartitionAllocMallocInitOnce();
  737. #endif
  738. }
  739. template <bool thread_safe>
  740. PartitionRoot<thread_safe>::~PartitionRoot() {
  741. #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  742. PA_CHECK(!flags.with_thread_cache)
  743. << "Must not destroy a partition with a thread cache";
  744. #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  745. #if defined(PA_USE_PARTITION_ROOT_ENUMERATOR)
  746. if (initialized)
  747. internal::PartitionRootEnumerator::Instance().Unregister(this);
  748. #endif // defined(PA_USE_PARTITION_ALLOC_ENUMERATOR)
  749. }
  750. template <bool thread_safe>
  751. void PartitionRoot<thread_safe>::EnableThreadCacheIfSupported() {
  752. #if defined(PA_THREAD_CACHE_SUPPORTED)
  753. ::partition_alloc::internal::ScopedGuard guard{lock_};
  754. PA_CHECK(!flags.with_thread_cache);
  755. // By the time we get there, there may be multiple threads created in the
  756. // process. Since `with_thread_cache` is accessed without a lock, it can
  757. // become visible to another thread before the effects of
  758. // `internal::ThreadCacheInit()` are visible. To prevent that, we fake thread
  759. // cache creation being in-progress while this is running.
  760. //
  761. // This synchronizes with the acquire load in `MaybeInitThreadCacheAndAlloc()`
  762. // to ensure that we don't create (and thus use) a ThreadCache before
  763. // ThreadCache::Init()'s effects are visible.
  764. int before =
  765. thread_caches_being_constructed_.fetch_add(1, std::memory_order_acquire);
  766. PA_CHECK(before == 0);
  767. ThreadCache::Init(this);
  768. thread_caches_being_constructed_.fetch_sub(1, std::memory_order_release);
  769. flags.with_thread_cache = true;
  770. #endif // defined(PA_THREAD_CACHE_SUPPORTED)
  771. }
  772. template <bool thread_safe>
  773. bool PartitionRoot<thread_safe>::TryReallocInPlaceForDirectMap(
  774. internal::SlotSpanMetadata<thread_safe>* slot_span,
  775. size_t requested_size) {
  776. PA_DCHECK(slot_span->bucket->is_direct_mapped());
  777. // Slot-span metadata isn't MTE-tagged.
  778. PA_DCHECK(
  779. internal::IsManagedByDirectMap(reinterpret_cast<uintptr_t>(slot_span)));
  780. size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
  781. auto* extent = DirectMapExtent::FromSlotSpan(slot_span);
  782. size_t current_reservation_size = extent->reservation_size;
  783. // Calculate the new reservation size the way PartitionDirectMap() would, but
  784. // skip the alignment, because this call isn't requesting it.
  785. size_t new_reservation_size = GetDirectMapReservationSize(raw_size);
  786. // If new reservation would be larger, there is nothing we can do to
  787. // reallocate in-place.
  788. if (new_reservation_size > current_reservation_size)
  789. return false;
  790. // Don't reallocate in-place if new reservation size would be less than 80 %
  791. // of the current one, to avoid holding on to too much unused address space.
  792. // Make this check before comparing slot sizes, as even with equal or similar
  793. // slot sizes we can save a lot if the original allocation was heavily padded
  794. // for alignment.
  795. if ((new_reservation_size >> internal::SystemPageShift()) * 5 <
  796. (current_reservation_size >> internal::SystemPageShift()) * 4)
  797. return false;
  798. // Note that the new size isn't a bucketed size; this function is called
  799. // whenever we're reallocating a direct mapped allocation, so calculate it
  800. // the way PartitionDirectMap() would.
  801. size_t new_slot_size = GetDirectMapSlotSize(raw_size);
  802. if (new_slot_size < internal::kMinDirectMappedDownsize)
  803. return false;
  804. // Past this point, we decided we'll attempt to reallocate without relocating,
  805. // so we have to honor the padding for alignment in front of the original
  806. // allocation, even though this function isn't requesting any alignment.
  807. // bucket->slot_size is the currently committed size of the allocation.
  808. size_t current_slot_size = slot_span->bucket->slot_size;
  809. uintptr_t slot_start = SlotSpan::ToSlotSpanStart(slot_span);
  810. // This is the available part of the reservation up to which the new
  811. // allocation can grow.
  812. size_t available_reservation_size =
  813. current_reservation_size - extent->padding_for_alignment -
  814. PartitionRoot<thread_safe>::GetDirectMapMetadataAndGuardPagesSize();
  815. #if BUILDFLAG(PA_DCHECK_IS_ON)
  816. uintptr_t reservation_start = slot_start & internal::kSuperPageBaseMask;
  817. PA_DCHECK(internal::IsReservationStart(reservation_start));
  818. PA_DCHECK(slot_start + available_reservation_size ==
  819. reservation_start + current_reservation_size -
  820. GetDirectMapMetadataAndGuardPagesSize() +
  821. internal::PartitionPageSize());
  822. #endif
  823. if (new_slot_size == current_slot_size) {
  824. // No need to move any memory around, but update size and cookie below.
  825. // That's because raw_size may have changed.
  826. } else if (new_slot_size < current_slot_size) {
  827. // Shrink by decommitting unneeded pages and making them inaccessible.
  828. size_t decommit_size = current_slot_size - new_slot_size;
  829. DecommitSystemPagesForData(slot_start + new_slot_size, decommit_size,
  830. PageAccessibilityDisposition::kRequireUpdate);
  831. // Since the decommited system pages are still reserved, we don't need to
  832. // change the entries for decommitted pages in the reservation offset table.
  833. } else if (new_slot_size <= available_reservation_size) {
  834. // Grow within the actually reserved address space. Just need to make the
  835. // pages accessible again.
  836. size_t recommit_slot_size_growth = new_slot_size - current_slot_size;
  837. RecommitSystemPagesForData(slot_start + current_slot_size,
  838. recommit_slot_size_growth,
  839. PageAccessibilityDisposition::kRequireUpdate);
  840. // The recommited system pages had been already reserved and all the
  841. // entries in the reservation offset table (for entire reservation_size
  842. // region) have been already initialized.
  843. #if BUILDFLAG(PA_DCHECK_IS_ON)
  844. memset(reinterpret_cast<void*>(slot_start + current_slot_size),
  845. internal::kUninitializedByte, recommit_slot_size_growth);
  846. #endif
  847. } else {
  848. // We can't perform the realloc in-place.
  849. // TODO: support this too when possible.
  850. return false;
  851. }
  852. DecreaseTotalSizeOfAllocatedBytes(reinterpret_cast<uintptr_t>(slot_span),
  853. slot_span->bucket->slot_size);
  854. slot_span->SetRawSize(raw_size);
  855. slot_span->bucket->slot_size = new_slot_size;
  856. IncreaseTotalSizeOfAllocatedBytes(reinterpret_cast<uintptr_t>(slot_span),
  857. slot_span->bucket->slot_size, raw_size);
  858. #if BUILDFLAG(PA_DCHECK_IS_ON)
  859. // Write a new trailing cookie.
  860. if (flags.allow_cookie) {
  861. auto* object = static_cast<unsigned char*>(SlotStartToObject(slot_start));
  862. internal::PartitionCookieWriteValue(object +
  863. slot_span->GetUsableSize(this));
  864. }
  865. #endif
  866. return true;
  867. }
  868. template <bool thread_safe>
  869. bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
  870. void* object,
  871. SlotSpan* slot_span,
  872. size_t new_size) {
  873. uintptr_t slot_start = ObjectToSlotStart(object);
  874. PA_DCHECK(internal::IsManagedByNormalBuckets(slot_start));
  875. // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
  876. // new size is a significant percentage smaller. We could do the same if we
  877. // determine it is a win.
  878. if (AllocationCapacityFromRequestedSize(new_size) !=
  879. AllocationCapacityFromSlotStart(slot_start))
  880. return false;
  881. // Trying to allocate |new_size| would use the same amount of underlying
  882. // memory as we're already using, so re-use the allocation after updating
  883. // statistics (and cookie, if present).
  884. if (slot_span->CanStoreRawSize()) {
  885. #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && BUILDFLAG(PA_DCHECK_IS_ON)
  886. internal::PartitionRefCount* old_ref_count;
  887. if (brp_enabled()) {
  888. old_ref_count = internal::PartitionRefCountPointer(slot_start);
  889. }
  890. #endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) &&
  891. // BUILDFLAG(PA_DCHECK_IS_ON)
  892. size_t new_raw_size = AdjustSizeForExtrasAdd(new_size);
  893. slot_span->SetRawSize(new_raw_size);
  894. #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) && BUILDFLAG(PA_DCHECK_IS_ON)
  895. if (brp_enabled()) {
  896. internal::PartitionRefCount* new_ref_count =
  897. internal::PartitionRefCountPointer(slot_start);
  898. PA_DCHECK(new_ref_count == old_ref_count);
  899. }
  900. #endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) &&
  901. // BUILDFLAG(PA_DCHECK_IS_ON)
  902. #if BUILDFLAG(PA_DCHECK_IS_ON)
  903. // Write a new trailing cookie only when it is possible to keep track
  904. // raw size (otherwise we wouldn't know where to look for it later).
  905. if (flags.allow_cookie) {
  906. internal::PartitionCookieWriteValue(static_cast<unsigned char*>(object) +
  907. slot_span->GetUsableSize(this));
  908. }
  909. #endif // BUILDFLAG(PA_DCHECK_IS_ON)
  910. }
  911. return object;
  912. }
  913. template <bool thread_safe>
  914. void* PartitionRoot<thread_safe>::ReallocWithFlags(unsigned int flags,
  915. void* ptr,
  916. size_t new_size,
  917. const char* type_name) {
  918. #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
  919. CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags);
  920. void* result = realloc(ptr, new_size);
  921. PA_CHECK(result || flags & AllocFlags::kReturnNull);
  922. return result;
  923. #else
  924. bool no_hooks = flags & AllocFlags::kNoHooks;
  925. if (PA_UNLIKELY(!ptr)) {
  926. return no_hooks
  927. ? AllocWithFlagsNoHooks(flags, new_size,
  928. internal::PartitionPageSize())
  929. : AllocWithFlagsInternal(
  930. flags, new_size, internal::PartitionPageSize(), type_name);
  931. }
  932. if (PA_UNLIKELY(!new_size)) {
  933. Free(ptr);
  934. return nullptr;
  935. }
  936. if (new_size > internal::MaxDirectMapped()) {
  937. if (flags & AllocFlags::kReturnNull)
  938. return nullptr;
  939. internal::PartitionExcessiveAllocationSize(new_size);
  940. }
  941. const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
  942. bool overridden = false;
  943. size_t old_usable_size;
  944. if (PA_UNLIKELY(!no_hooks && hooks_enabled)) {
  945. overridden = PartitionAllocHooks::ReallocOverrideHookIfEnabled(
  946. &old_usable_size, ptr);
  947. }
  948. if (PA_LIKELY(!overridden)) {
  949. // |ptr| may have been allocated in another root.
  950. SlotSpan* slot_span = SlotSpan::FromObject(ptr);
  951. auto* old_root = PartitionRoot::FromSlotSpan(slot_span);
  952. bool success = false;
  953. bool tried_in_place_for_direct_map = false;
  954. {
  955. ::partition_alloc::internal::ScopedGuard guard{old_root->lock_};
  956. // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
  957. PA_DCHECK(IsValidSlotSpan(slot_span));
  958. old_usable_size = slot_span->GetUsableSize(old_root);
  959. if (PA_UNLIKELY(slot_span->bucket->is_direct_mapped())) {
  960. tried_in_place_for_direct_map = true;
  961. // We may be able to perform the realloc in place by changing the
  962. // accessibility of memory pages and, if reducing the size, decommitting
  963. // them.
  964. success = old_root->TryReallocInPlaceForDirectMap(slot_span, new_size);
  965. }
  966. }
  967. if (success) {
  968. if (PA_UNLIKELY(!no_hooks && hooks_enabled)) {
  969. PartitionAllocHooks::ReallocObserverHookIfEnabled(ptr, ptr, new_size,
  970. type_name);
  971. }
  972. return ptr;
  973. }
  974. if (PA_LIKELY(!tried_in_place_for_direct_map)) {
  975. if (old_root->TryReallocInPlaceForNormalBuckets(ptr, slot_span, new_size))
  976. return ptr;
  977. }
  978. }
  979. // This realloc cannot be resized in-place. Sadness.
  980. void* ret =
  981. no_hooks ? AllocWithFlagsNoHooks(flags, new_size,
  982. internal::PartitionPageSize())
  983. : AllocWithFlagsInternal(
  984. flags, new_size, internal::PartitionPageSize(), type_name);
  985. if (!ret) {
  986. if (flags & AllocFlags::kReturnNull)
  987. return nullptr;
  988. internal::PartitionExcessiveAllocationSize(new_size);
  989. }
  990. memcpy(ret, ptr, std::min(old_usable_size, new_size));
  991. Free(ptr); // Implicitly protects the old ptr on MTE systems.
  992. return ret;
  993. #endif
  994. }
  995. template <bool thread_safe>
  996. void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
  997. {
  998. ::partition_alloc::internal::ScopedGuard guard{lock_};
  999. // Avoid purging if there is PCScan task currently scheduled. Since pcscan
  1000. // takes snapshot of all allocated pages, decommitting pages here (even
  1001. // under the lock) is racy.
  1002. // TODO(bikineev): Consider rescheduling the purging after PCScan.
  1003. if (PCScan::IsInProgress())
  1004. return;
  1005. if (flags & PurgeFlags::kDecommitEmptySlotSpans)
  1006. DecommitEmptySlotSpans();
  1007. if (flags & PurgeFlags::kDiscardUnusedSystemPages) {
  1008. for (Bucket& bucket : buckets) {
  1009. if (bucket.slot_size == internal::kInvalidBucketSize)
  1010. continue;
  1011. if (bucket.slot_size >= internal::MaxPurgeableSlotSize())
  1012. internal::PartitionPurgeBucket(&bucket);
  1013. else
  1014. bucket.SortSlotSpanFreelists();
  1015. // Do it at the end, as the actions above change the status of slot
  1016. // spans (e.g. empty -> decommitted).
  1017. bucket.MaintainActiveList();
  1018. if (sort_active_slot_spans_)
  1019. bucket.SortActiveSlotSpans();
  1020. }
  1021. }
  1022. }
  1023. }
  1024. template <bool thread_safe>
  1025. void PartitionRoot<thread_safe>::ShrinkEmptySlotSpansRing(size_t limit) {
  1026. int16_t index = global_empty_slot_span_ring_index;
  1027. int16_t starting_index = index;
  1028. while (empty_slot_spans_dirty_bytes > limit) {
  1029. SlotSpan* slot_span = global_empty_slot_span_ring[index];
  1030. // The ring is not always full, may be nullptr.
  1031. if (slot_span) {
  1032. slot_span->DecommitIfPossible(this);
  1033. global_empty_slot_span_ring[index] = nullptr;
  1034. }
  1035. index += 1;
  1036. // Walk through the entirety of possible slots, even though the last ones
  1037. // are unused, if global_empty_slot_span_ring_size is smaller than
  1038. // kMaxFreeableSpans. It's simpler, and does not cost anything, since all
  1039. // the pointers are going to be nullptr.
  1040. if (index == internal::kMaxFreeableSpans)
  1041. index = 0;
  1042. // Went around the whole ring, since this is locked,
  1043. // empty_slot_spans_dirty_bytes should be exactly 0.
  1044. if (index == starting_index) {
  1045. PA_DCHECK(empty_slot_spans_dirty_bytes == 0);
  1046. // Metrics issue, don't crash, return.
  1047. break;
  1048. }
  1049. }
  1050. }
  1051. template <bool thread_safe>
  1052. void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
  1053. bool is_light_dump,
  1054. PartitionStatsDumper* dumper) {
  1055. static const size_t kMaxReportableDirectMaps = 4096;
  1056. // Allocate on the heap rather than on the stack to avoid stack overflow
  1057. // skirmishes (on Windows, in particular). Allocate before locking below,
  1058. // otherwise when PartitionAlloc is malloc() we get reentrancy issues. This
  1059. // inflates reported values a bit for detailed dumps though, by 16kiB.
  1060. std::unique_ptr<uint32_t[]> direct_map_lengths;
  1061. if (!is_light_dump) {
  1062. direct_map_lengths =
  1063. std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
  1064. }
  1065. PartitionBucketMemoryStats bucket_stats[internal::kNumBuckets];
  1066. size_t num_direct_mapped_allocations = 0;
  1067. PartitionMemoryStats stats = {0};
  1068. stats.syscall_count = syscall_count.load(std::memory_order_relaxed);
  1069. stats.syscall_total_time_ns =
  1070. syscall_total_time_ns.load(std::memory_order_relaxed);
  1071. // Collect data with the lock held, cannot allocate or call third-party code
  1072. // below.
  1073. {
  1074. ::partition_alloc::internal::ScopedGuard guard{lock_};
  1075. PA_DCHECK(total_size_of_allocated_bytes <= max_size_of_allocated_bytes);
  1076. stats.total_mmapped_bytes =
  1077. total_size_of_super_pages.load(std::memory_order_relaxed) +
  1078. total_size_of_direct_mapped_pages.load(std::memory_order_relaxed);
  1079. stats.total_committed_bytes =
  1080. total_size_of_committed_pages.load(std::memory_order_relaxed);
  1081. stats.max_committed_bytes =
  1082. max_size_of_committed_pages.load(std::memory_order_relaxed);
  1083. stats.total_allocated_bytes = total_size_of_allocated_bytes;
  1084. stats.max_allocated_bytes = max_size_of_allocated_bytes;
  1085. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  1086. stats.total_brp_quarantined_bytes =
  1087. total_size_of_brp_quarantined_bytes.load(std::memory_order_relaxed);
  1088. stats.total_brp_quarantined_count =
  1089. total_count_of_brp_quarantined_slots.load(std::memory_order_relaxed);
  1090. stats.cumulative_brp_quarantined_bytes =
  1091. cumulative_size_of_brp_quarantined_bytes.load(
  1092. std::memory_order_relaxed);
  1093. stats.cumulative_brp_quarantined_count =
  1094. cumulative_count_of_brp_quarantined_slots.load(
  1095. std::memory_order_relaxed);
  1096. #endif
  1097. size_t direct_mapped_allocations_total_size = 0;
  1098. for (size_t i = 0; i < internal::kNumBuckets; ++i) {
  1099. const Bucket* bucket = &bucket_at(i);
  1100. // Don't report the pseudo buckets that the generic allocator sets up in
  1101. // order to preserve a fast size->bucket map (see
  1102. // PartitionRoot::Init() for details).
  1103. if (!bucket->is_valid())
  1104. bucket_stats[i].is_valid = false;
  1105. else
  1106. internal::PartitionDumpBucketStats(&bucket_stats[i], bucket);
  1107. if (bucket_stats[i].is_valid) {
  1108. stats.total_resident_bytes += bucket_stats[i].resident_bytes;
  1109. stats.total_active_bytes += bucket_stats[i].active_bytes;
  1110. stats.total_active_count += bucket_stats[i].active_count;
  1111. stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
  1112. stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
  1113. }
  1114. }
  1115. for (DirectMapExtent* extent = direct_map_list;
  1116. extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
  1117. extent = extent->next_extent, ++num_direct_mapped_allocations) {
  1118. PA_DCHECK(!extent->next_extent ||
  1119. extent->next_extent->prev_extent == extent);
  1120. size_t slot_size = extent->bucket->slot_size;
  1121. direct_mapped_allocations_total_size += slot_size;
  1122. if (is_light_dump)
  1123. continue;
  1124. direct_map_lengths[num_direct_mapped_allocations] = slot_size;
  1125. }
  1126. stats.total_resident_bytes += direct_mapped_allocations_total_size;
  1127. stats.total_active_bytes += direct_mapped_allocations_total_size;
  1128. stats.total_active_count += num_direct_mapped_allocations;
  1129. stats.has_thread_cache = flags.with_thread_cache;
  1130. if (stats.has_thread_cache) {
  1131. ThreadCacheRegistry::Instance().DumpStats(
  1132. true, &stats.current_thread_cache_stats);
  1133. ThreadCacheRegistry::Instance().DumpStats(false,
  1134. &stats.all_thread_caches_stats);
  1135. }
  1136. }
  1137. // Do not hold the lock when calling |dumper|, as it may allocate.
  1138. if (!is_light_dump) {
  1139. for (auto& stat : bucket_stats) {
  1140. if (stat.is_valid)
  1141. dumper->PartitionsDumpBucketStats(partition_name, &stat);
  1142. }
  1143. for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
  1144. uint32_t size = direct_map_lengths[i];
  1145. PartitionBucketMemoryStats mapped_stats = {};
  1146. mapped_stats.is_valid = true;
  1147. mapped_stats.is_direct_map = true;
  1148. mapped_stats.num_full_slot_spans = 1;
  1149. mapped_stats.allocated_slot_span_size = size;
  1150. mapped_stats.bucket_slot_size = size;
  1151. mapped_stats.active_bytes = size;
  1152. mapped_stats.active_count = 1;
  1153. mapped_stats.resident_bytes = size;
  1154. dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats);
  1155. }
  1156. }
  1157. dumper->PartitionDumpTotals(partition_name, &stats);
  1158. }
  1159. // static
  1160. template <bool thread_safe>
  1161. void PartitionRoot<thread_safe>::DeleteForTesting(
  1162. PartitionRoot* partition_root) {
  1163. if (partition_root->flags.with_thread_cache) {
  1164. ThreadCache::SwapForTesting(nullptr);
  1165. partition_root->flags.with_thread_cache = false;
  1166. }
  1167. partition_root->DestructForTesting(); // IN-TEST
  1168. delete partition_root;
  1169. }
  1170. template <bool thread_safe>
  1171. void PartitionRoot<thread_safe>::ResetBookkeepingForTesting() {
  1172. ::partition_alloc::internal::ScopedGuard guard{lock_};
  1173. max_size_of_allocated_bytes = total_size_of_allocated_bytes;
  1174. max_size_of_committed_pages.store(total_size_of_committed_pages);
  1175. }
  1176. template <>
  1177. uintptr_t PartitionRoot<internal::ThreadSafe>::MaybeInitThreadCacheAndAlloc(
  1178. uint16_t bucket_index,
  1179. size_t* slot_size) {
  1180. auto* tcache = ThreadCache::Get();
  1181. // See comment in `EnableThreadCacheIfSupport()` for why this is an acquire
  1182. // load.
  1183. if (ThreadCache::IsTombstone(tcache) ||
  1184. thread_caches_being_constructed_.load(std::memory_order_acquire)) {
  1185. // Two cases:
  1186. // 1. Thread is being terminated, don't try to use the thread cache, and
  1187. // don't try to resurrect it.
  1188. // 2. Someone, somewhere is currently allocating a thread cache. This may
  1189. // be us, in which case we are re-entering and should not create a thread
  1190. // cache. If it is not us, then this merely delays thread cache
  1191. // construction a bit, which is not an issue.
  1192. return 0;
  1193. }
  1194. // There is no per-thread ThreadCache allocated here yet, and this partition
  1195. // has a thread cache, allocate a new one.
  1196. //
  1197. // The thread cache allocation itself will not reenter here, as it sidesteps
  1198. // the thread cache by using placement new and |RawAlloc()|. However,
  1199. // internally to libc, allocations may happen to create a new TLS
  1200. // variable. This would end up here again, which is not what we want (and
  1201. // likely is not supported by libc).
  1202. //
  1203. // To avoid this sort of reentrancy, increase the count of thread caches that
  1204. // are currently allocating a thread cache.
  1205. //
  1206. // Note that there is no deadlock or data inconsistency concern, since we do
  1207. // not hold the lock, and has such haven't touched any internal data.
  1208. int before =
  1209. thread_caches_being_constructed_.fetch_add(1, std::memory_order_relaxed);
  1210. PA_CHECK(before < std::numeric_limits<int>::max());
  1211. tcache = ThreadCache::Create(this);
  1212. thread_caches_being_constructed_.fetch_sub(1, std::memory_order_relaxed);
  1213. // Cache is created empty, but at least this will trigger batch fill, which
  1214. // may be useful, and we are already in a slow path anyway (first small
  1215. // allocation of this thread).
  1216. return tcache->GetFromCache(bucket_index, slot_size);
  1217. }
  1218. template <>
  1219. void PartitionRoot<internal::ThreadSafe>::EnableSortActiveSlotSpans() {
  1220. sort_active_slot_spans_ = true;
  1221. }
  1222. template struct PA_COMPONENT_EXPORT(PARTITION_ALLOC)
  1223. PartitionRoot<internal::ThreadSafe>;
  1224. static_assert(offsetof(PartitionRoot<internal::ThreadSafe>, sentinel_bucket) ==
  1225. offsetof(PartitionRoot<internal::ThreadSafe>, buckets) +
  1226. internal::kNumBuckets *
  1227. sizeof(PartitionRoot<internal::ThreadSafe>::Bucket),
  1228. "sentinel_bucket must be just after the regular buckets.");
  1229. static_assert(
  1230. offsetof(PartitionRoot<internal::ThreadSafe>, lock_) >= 64,
  1231. "The lock should not be on the same cacheline as the read-mostly flags");
  1232. } // namespace partition_alloc