partition_bucket.cc 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371
  1. // Copyright (c) 2018 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/allocator/partition_allocator/partition_bucket.h"
  5. #include <algorithm>
  6. #include <cstdint>
  7. #include <tuple>
  8. #include "base/allocator/partition_allocator/address_pool_manager.h"
  9. #include "base/allocator/partition_allocator/oom.h"
  10. #include "base/allocator/partition_allocator/page_allocator.h"
  11. #include "base/allocator/partition_allocator/page_allocator_constants.h"
  12. #include "base/allocator/partition_allocator/partition_address_space.h"
  13. #include "base/allocator/partition_allocator/partition_alloc.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
  15. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  16. #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
  17. #include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
  18. #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
  19. #include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
  20. #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
  21. #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
  22. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  23. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  24. #include "base/allocator/partition_allocator/partition_alloc_constants.h"
  25. #include "base/allocator/partition_allocator/partition_alloc_forward.h"
  26. #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
  27. #include "base/allocator/partition_allocator/partition_oom.h"
  28. #include "base/allocator/partition_allocator/partition_page.h"
  29. #include "base/allocator/partition_allocator/partition_tag.h"
  30. #include "base/allocator/partition_allocator/partition_tag_bitmap.h"
  31. #include "base/allocator/partition_allocator/reservation_offset_table.h"
  32. #include "base/allocator/partition_allocator/starscan/state_bitmap.h"
  33. #include "base/allocator/partition_allocator/tagging.h"
  34. #include "build/build_config.h"
  35. namespace partition_alloc::internal {
  36. namespace {
  37. template <bool thread_safe>
  38. [[noreturn]] PA_NOINLINE void PartitionOutOfMemoryMappingFailure(
  39. PartitionRoot<thread_safe>* root,
  40. size_t size) PA_LOCKS_EXCLUDED(root->lock_) {
  41. PA_NO_CODE_FOLDING();
  42. root->OutOfMemory(size);
  43. PA_IMMEDIATE_CRASH(); // Not required, kept as documentation.
  44. }
  45. template <bool thread_safe>
  46. [[noreturn]] PA_NOINLINE void PartitionOutOfMemoryCommitFailure(
  47. PartitionRoot<thread_safe>* root,
  48. size_t size) PA_LOCKS_EXCLUDED(root->lock_) {
  49. PA_NO_CODE_FOLDING();
  50. root->OutOfMemory(size);
  51. PA_IMMEDIATE_CRASH(); // Not required, kept as documentation.
  52. }
  53. #if !defined(PA_HAS_64_BITS_POINTERS) && BUILDFLAG(USE_BACKUP_REF_PTR)
  54. // |start| has to be aligned to kSuperPageSize, but |end| doesn't. This means
  55. // that a partial super page is allowed at the end. Since the block list uses
  56. // kSuperPageSize granularity, a partial super page is considered blocked if
  57. // there is a raw_ptr<T> pointing anywhere in that super page, even if doesn't
  58. // point to that partially allocated region.
  59. bool AreAllowedSuperPagesForBRPPool(uintptr_t start, uintptr_t end) {
  60. PA_DCHECK(!(start % kSuperPageSize));
  61. for (uintptr_t super_page = start; super_page < end;
  62. super_page += kSuperPageSize) {
  63. // If any blocked super page is found inside the given memory region,
  64. // the memory region is blocked.
  65. if (!AddressPoolManagerBitmap::IsAllowedSuperPageForBRPPool(super_page)) {
  66. AddressPoolManagerBitmap::IncrementBlocklistHitCount();
  67. return false;
  68. }
  69. }
  70. return true;
  71. }
  72. #endif // !defined(PA_HAS_64_BITS_POINTERS) && BUILDFLAG(USE_BACKUP_REF_PTR)
  73. // Reserves |requested_size| worth of super pages from the specified pool of the
  74. // GigaCage. If BRP pool is requested this function will honor BRP block list.
  75. //
  76. // The returned address will be aligned to kSuperPageSize, and so
  77. // |requested_address| should be. |requested_size| doesn't have to be, however.
  78. //
  79. // |requested_address| is merely a hint, which will be attempted, but easily
  80. // given up on if doesn't work the first time.
  81. //
  82. // The function doesn't need to hold root->lock_ or any other locks, because:
  83. // - It (1) reserves memory, (2) then consults AreAllowedSuperPagesForBRPPool
  84. // for that memory, and (3) returns the memory if
  85. // allowed, or unreserves and decommits if not allowed. So no other
  86. // overlapping region can be allocated while executing
  87. // AreAllowedSuperPagesForBRPPool.
  88. // - IsAllowedSuperPageForBRPPool (used by AreAllowedSuperPagesForBRPPool) is
  89. // designed to not need locking.
  90. uintptr_t ReserveMemoryFromGigaCage(pool_handle pool,
  91. uintptr_t requested_address,
  92. size_t requested_size) {
  93. PA_DCHECK(!(requested_address % kSuperPageSize));
  94. uintptr_t reserved_address = AddressPoolManager::GetInstance().Reserve(
  95. pool, requested_address, requested_size);
  96. // In 32-bit mode, when allocating from BRP pool, verify that the requested
  97. // allocation honors the block list. Find a better address otherwise.
  98. #if !defined(PA_HAS_64_BITS_POINTERS) && BUILDFLAG(USE_BACKUP_REF_PTR)
  99. if (pool == GetBRPPool()) {
  100. constexpr int kMaxRandomAddressTries = 10;
  101. for (int i = 0; i < kMaxRandomAddressTries; ++i) {
  102. if (!reserved_address ||
  103. AreAllowedSuperPagesForBRPPool(reserved_address,
  104. reserved_address + requested_size))
  105. break;
  106. AddressPoolManager::GetInstance().UnreserveAndDecommit(
  107. pool, reserved_address, requested_size);
  108. // No longer try to honor |requested_address|, because it didn't work for
  109. // us last time.
  110. reserved_address =
  111. AddressPoolManager::GetInstance().Reserve(pool, 0, requested_size);
  112. }
  113. // If the allocation attempt succeeds, we will break out of the following
  114. // loop immediately.
  115. //
  116. // Last resort: sequentially scan the whole 32-bit address space. The number
  117. // of blocked super-pages should be very small, so we expect to practically
  118. // never need to run the following code. Note that it may fail to find an
  119. // available super page, e.g., when it becomes available after the scan
  120. // passes through it, but we accept the risk.
  121. for (uintptr_t address_to_try = kSuperPageSize; address_to_try != 0;
  122. address_to_try += kSuperPageSize) {
  123. if (!reserved_address ||
  124. AreAllowedSuperPagesForBRPPool(reserved_address,
  125. reserved_address + requested_size))
  126. break;
  127. AddressPoolManager::GetInstance().UnreserveAndDecommit(
  128. pool, reserved_address, requested_size);
  129. // Reserve() can return a different pointer than attempted.
  130. reserved_address = AddressPoolManager::GetInstance().Reserve(
  131. pool, address_to_try, requested_size);
  132. }
  133. // If the loop ends naturally, the last allocated region hasn't been
  134. // verified. Do it now.
  135. if (reserved_address &&
  136. !AreAllowedSuperPagesForBRPPool(reserved_address,
  137. reserved_address + requested_size)) {
  138. AddressPoolManager::GetInstance().UnreserveAndDecommit(
  139. pool, reserved_address, requested_size);
  140. reserved_address = 0;
  141. }
  142. }
  143. #endif // !defined(PA_HAS_64_BITS_POINTERS) && BUILDFLAG(USE_BACKUP_REF_PTR)
  144. #if !defined(PA_HAS_64_BITS_POINTERS)
  145. // Only mark the region as belonging to the pool after it has passed the
  146. // blocklist check in order to avoid a potential race with destructing a
  147. // raw_ptr<T> object that points to non-PA memory in another thread.
  148. // If `MarkUsed` was called earlier, the other thread could incorrectly
  149. // determine that the allocation had come form PartitionAlloc.
  150. if (reserved_address)
  151. AddressPoolManager::GetInstance().MarkUsed(pool, reserved_address,
  152. requested_size);
  153. #endif
  154. PA_DCHECK(!(reserved_address % kSuperPageSize));
  155. return reserved_address;
  156. }
  157. template <bool thread_safe>
  158. SlotSpanMetadata<thread_safe>* PartitionDirectMap(
  159. PartitionRoot<thread_safe>* root,
  160. unsigned int flags,
  161. size_t raw_size,
  162. size_t slot_span_alignment) {
  163. PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
  164. base::bits::IsPowerOfTwo(slot_span_alignment));
  165. // No static EXCLUSIVE_LOCKS_REQUIRED(), as the checker doesn't understand
  166. // scoped unlocking.
  167. root->lock_.AssertAcquired();
  168. const bool return_null = flags & AllocFlags::kReturnNull;
  169. if (PA_UNLIKELY(raw_size > MaxDirectMapped())) {
  170. if (return_null)
  171. return nullptr;
  172. // The lock is here to protect PA from:
  173. // 1. Concurrent calls
  174. // 2. Reentrant calls
  175. //
  176. // This is fine here however, as:
  177. // 1. Concurrency: |PartitionRoot::OutOfMemory()| never returns, so the lock
  178. // will not be re-acquired, which would lead to acting on inconsistent
  179. // data that could have been modified in-between releasing and acquiring
  180. // it.
  181. // 2. Reentrancy: This is why we release the lock. On some platforms,
  182. // terminating the process may free() memory, or even possibly try to
  183. // allocate some. Calling free() is fine, but will deadlock since
  184. // |PartitionRoot::lock_| is not recursive.
  185. //
  186. // Supporting reentrant calls properly is hard, and not a requirement for
  187. // PA. However up to that point, we've only *read* data, not *written* to
  188. // any state. Reentrant calls are then fine, especially as we don't continue
  189. // on this path. The only downside is possibly endless recursion if the OOM
  190. // handler allocates and fails to use UncheckedMalloc() or equivalent, but
  191. // that's violating the contract of base::TerminateBecauseOutOfMemory().
  192. ScopedUnlockGuard unlock{root->lock_};
  193. PartitionExcessiveAllocationSize(raw_size);
  194. }
  195. PartitionDirectMapExtent<thread_safe>* map_extent = nullptr;
  196. PartitionPage<thread_safe>* page = nullptr;
  197. #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  198. const PartitionTag tag = root->GetNewPartitionTag();
  199. #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  200. {
  201. // Getting memory for direct-mapped allocations doesn't interact with the
  202. // rest of the allocator, but takes a long time, as it involves several
  203. // system calls. With GigaCage, no mmap() (or equivalent) call is made on 64
  204. // bit systems, but page permissions are changed with mprotect(), which is a
  205. // syscall.
  206. //
  207. // These calls are almost always slow (at least a couple us per syscall on a
  208. // desktop Linux machine), and they also have a very long latency tail,
  209. // possibly from getting descheduled. As a consequence, we should not hold
  210. // the lock when performing a syscall. This is not the only problematic
  211. // location, but since this one doesn't interact with the rest of the
  212. // allocator, we can safely drop and then re-acquire the lock.
  213. //
  214. // Note that this only affects allocations that are not served out of the
  215. // thread cache, but as a simple example the buffer partition in blink is
  216. // frequently used for large allocations (e.g. ArrayBuffer), and frequent,
  217. // small ones (e.g. WTF::String), and does not have a thread cache.
  218. ScopedUnlockGuard scoped_unlock{root->lock_};
  219. const size_t slot_size =
  220. PartitionRoot<thread_safe>::GetDirectMapSlotSize(raw_size);
  221. // The super page starts with a partition page worth of metadata and guard
  222. // pages, hence alignment requests ==PartitionPageSize() will be
  223. // automatically satisfied. Padding is needed for higher-order alignment
  224. // requests. Note, |slot_span_alignment| is at least 1 partition page.
  225. const size_t padding_for_alignment =
  226. slot_span_alignment - PartitionPageSize();
  227. const size_t reservation_size =
  228. PartitionRoot<thread_safe>::GetDirectMapReservationSize(
  229. raw_size + padding_for_alignment);
  230. #if BUILDFLAG(PA_DCHECK_IS_ON)
  231. const size_t available_reservation_size =
  232. reservation_size - padding_for_alignment -
  233. PartitionRoot<thread_safe>::GetDirectMapMetadataAndGuardPagesSize();
  234. PA_DCHECK(slot_size <= available_reservation_size);
  235. #endif
  236. // Allocate from GigaCage. Route to the appropriate GigaCage pool based on
  237. // BackupRefPtr support.
  238. pool_handle pool = root->ChoosePool();
  239. uintptr_t reservation_start;
  240. {
  241. // Reserving memory from the GigaCage is actually not a syscall on 64 bit
  242. // platforms.
  243. #if !defined(PA_HAS_64_BITS_POINTERS)
  244. ScopedSyscallTimer timer{root};
  245. #endif
  246. reservation_start = ReserveMemoryFromGigaCage(pool, 0, reservation_size);
  247. }
  248. if (PA_UNLIKELY(!reservation_start)) {
  249. if (return_null)
  250. return nullptr;
  251. PartitionOutOfMemoryMappingFailure(root, reservation_size);
  252. }
  253. root->total_size_of_direct_mapped_pages.fetch_add(
  254. reservation_size, std::memory_order_relaxed);
  255. // Shift by 1 partition page (metadata + guard pages) and alignment padding.
  256. const uintptr_t slot_start =
  257. reservation_start + PartitionPageSize() + padding_for_alignment;
  258. {
  259. ScopedSyscallTimer timer{root};
  260. RecommitSystemPages(
  261. reservation_start + SystemPageSize(),
  262. #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  263. // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is
  264. // used, allocate 2 SystemPages, one for SuperPage metadata and the
  265. // other for RefCount "bitmap" (only one of its elements will be
  266. // used).
  267. (pool == GetBRPPool()) ? SystemPageSize() * 2 : SystemPageSize(),
  268. #else
  269. SystemPageSize(),
  270. #endif
  271. PageAccessibilityConfiguration::kReadWrite,
  272. PageAccessibilityDisposition::kRequireUpdate);
  273. }
  274. // No need to hold root->lock_. Now that memory is reserved, no other
  275. // overlapping region can be allocated (because of how GigaCage works),
  276. // so no other thread can update the same offset table entries at the
  277. // same time. Furthermore, nobody will be ready these offsets until this
  278. // function returns.
  279. uintptr_t address_start = reservation_start;
  280. uintptr_t address_end = address_start + reservation_size;
  281. auto* offset_ptr = ReservationOffsetPointer(address_start);
  282. uint16_t offset = 0;
  283. while (address_start < address_end) {
  284. PA_DCHECK(offset_ptr < GetReservationOffsetTableEnd(address_start));
  285. PA_DCHECK(offset < kOffsetTagNormalBuckets);
  286. *offset_ptr++ = offset++;
  287. address_start += kSuperPageSize;
  288. }
  289. auto* super_page_extent =
  290. PartitionSuperPageToExtent<thread_safe>(reservation_start);
  291. super_page_extent->root = root;
  292. // The new structures are all located inside a fresh system page so they
  293. // will all be zeroed out. These DCHECKs are for documentation and to assert
  294. // our expectations of the kernel.
  295. PA_DCHECK(!super_page_extent->number_of_consecutive_super_pages);
  296. PA_DCHECK(!super_page_extent->next);
  297. PartitionPage<thread_safe>* first_page =
  298. reinterpret_cast<PartitionPage<thread_safe>*>(super_page_extent) + 1;
  299. page = PartitionPage<thread_safe>::FromAddr(slot_start);
  300. // |first_page| and |page| may be equal, if there is no alignment padding.
  301. if (page != first_page) {
  302. PA_DCHECK(page > first_page);
  303. PA_DCHECK(page - first_page <=
  304. PartitionPage<thread_safe>::kMaxSlotSpanMetadataOffset);
  305. PA_CHECK(!first_page->is_valid);
  306. first_page->has_valid_span_after_this = true;
  307. first_page->slot_span_metadata_offset = page - first_page;
  308. }
  309. auto* metadata =
  310. reinterpret_cast<PartitionDirectMapMetadata<thread_safe>*>(page);
  311. // Since direct map metadata is larger than PartitionPage, make sure the
  312. // first and the last bytes are on the same system page, i.e. within the
  313. // super page metadata region.
  314. PA_DCHECK(base::bits::AlignDown(reinterpret_cast<uintptr_t>(metadata),
  315. SystemPageSize()) ==
  316. base::bits::AlignDown(
  317. reinterpret_cast<uintptr_t>(metadata) +
  318. sizeof(PartitionDirectMapMetadata<thread_safe>) - 1,
  319. SystemPageSize()));
  320. PA_DCHECK(page == &metadata->page);
  321. page->is_valid = true;
  322. PA_DCHECK(!page->has_valid_span_after_this);
  323. PA_DCHECK(!page->slot_span_metadata_offset);
  324. PA_DCHECK(!page->slot_span_metadata.next_slot_span);
  325. PA_DCHECK(!page->slot_span_metadata.marked_full);
  326. PA_DCHECK(!page->slot_span_metadata.num_allocated_slots);
  327. PA_DCHECK(!page->slot_span_metadata.num_unprovisioned_slots);
  328. PA_DCHECK(!page->slot_span_metadata.in_empty_cache());
  329. PA_DCHECK(!metadata->subsequent_page.subsequent_page_metadata.raw_size);
  330. // Raw size is set later, by the caller.
  331. metadata->subsequent_page.slot_span_metadata_offset = 1;
  332. PA_DCHECK(!metadata->bucket.active_slot_spans_head);
  333. PA_DCHECK(!metadata->bucket.empty_slot_spans_head);
  334. PA_DCHECK(!metadata->bucket.decommitted_slot_spans_head);
  335. PA_DCHECK(!metadata->bucket.num_system_pages_per_slot_span);
  336. PA_DCHECK(!metadata->bucket.num_full_slot_spans);
  337. metadata->bucket.slot_size = slot_size;
  338. new (&page->slot_span_metadata)
  339. SlotSpanMetadata<thread_safe>(&metadata->bucket);
  340. // It is typically possible to map a large range of inaccessible pages, and
  341. // this is leveraged in multiple places, including the GigaCage. However,
  342. // this doesn't mean that we can commit all this memory. For the vast
  343. // majority of allocations, this just means that we crash in a slightly
  344. // different place, but for callers ready to handle failures, we have to
  345. // return nullptr. See crbug.com/1187404.
  346. //
  347. // Note that we didn't check above, because if we cannot even commit a
  348. // single page, then this is likely hopeless anyway, and we will crash very
  349. // soon.
  350. const bool ok = root->TryRecommitSystemPagesForData(
  351. slot_start, slot_size, PageAccessibilityDisposition::kRequireUpdate);
  352. if (!ok) {
  353. if (!return_null) {
  354. PartitionOutOfMemoryCommitFailure(root, slot_size);
  355. }
  356. {
  357. ScopedSyscallTimer timer{root};
  358. #if !defined(PA_HAS_64_BITS_POINTERS)
  359. AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
  360. reservation_size);
  361. #endif
  362. AddressPoolManager::GetInstance().UnreserveAndDecommit(
  363. pool, reservation_start, reservation_size);
  364. }
  365. root->total_size_of_direct_mapped_pages.fetch_sub(
  366. reservation_size, std::memory_order_relaxed);
  367. return nullptr;
  368. }
  369. auto* next_entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
  370. page->slot_span_metadata.SetFreelistHead(next_entry);
  371. map_extent = &metadata->direct_map_extent;
  372. map_extent->reservation_size = reservation_size;
  373. map_extent->padding_for_alignment = padding_for_alignment;
  374. map_extent->bucket = &metadata->bucket;
  375. #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  376. DirectMapPartitionTagSetValue(slot_start, tag);
  377. #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  378. }
  379. root->lock_.AssertAcquired();
  380. // Maintain the doubly-linked list of all direct mappings.
  381. map_extent->next_extent = root->direct_map_list;
  382. if (map_extent->next_extent)
  383. map_extent->next_extent->prev_extent = map_extent;
  384. map_extent->prev_extent = nullptr;
  385. root->direct_map_list = map_extent;
  386. return &page->slot_span_metadata;
  387. }
  388. uint8_t ComputeSystemPagesPerSlotSpanPreferSmall(size_t slot_size) {
  389. if (slot_size > MaxRegularSlotSpanSize()) {
  390. // This is technically not needed, as for now all the larger slot sizes are
  391. // multiples of the system page size.
  392. return base::bits::AlignUp(slot_size, SystemPageSize()) / SystemPageSize();
  393. }
  394. // Smaller slot spans waste less address space, as well as potentially lower
  395. // fragmentation:
  396. // - Address space: This comes from fuller SuperPages (since the tail end of a
  397. // SuperPage is more likely to be used when the slot span is smaller. Also,
  398. // if a slot span is partially used, a smaller slot span will use less
  399. // address space.
  400. // - In-slot fragmentation: Slot span management code will prioritize
  401. // almost-full slot spans, as well as trying to keep empty slot spans
  402. // empty. The more granular this logic can work, the better.
  403. //
  404. // Since metadata space overhead is constant per-PartitionPage, keeping
  405. // smaller slot spans makes sense.
  406. //
  407. // Underlying memory allocation is done per-PartitionPage, but memory commit
  408. // is done per system page. This means that we prefer to fill the entirety of
  409. // a PartitionPage with a slot span, but we can tolerate some system pages
  410. // being empty at the end, as these will not cost committed or dirty memory.
  411. //
  412. // The choice below is, for multi-slot slot spans:
  413. // - If a full PartitionPage slot span is possible with less than 2% of a
  414. // *single* system page wasted, use it. The smallest possible size wins.
  415. // - Otherwise, select the size with the smallest virtual address space
  416. // loss. Allow a SlotSpan to leave some slack in its PartitionPage, up to
  417. // 1/4 of the total.
  418. for (size_t partition_page_count = 1;
  419. partition_page_count <= kMaxPartitionPagesPerRegularSlotSpan;
  420. partition_page_count++) {
  421. size_t candidate_size = partition_page_count * PartitionPageSize();
  422. size_t waste = candidate_size % slot_size;
  423. if (waste <= .02 * SystemPageSize())
  424. return partition_page_count * NumSystemPagesPerPartitionPage();
  425. }
  426. size_t best_count = 0;
  427. size_t best_waste = std::numeric_limits<size_t>::max();
  428. for (size_t partition_page_count = 1;
  429. partition_page_count <= kMaxPartitionPagesPerRegularSlotSpan;
  430. partition_page_count++) {
  431. // Prefer no slack.
  432. for (size_t slack = 0; slack < partition_page_count; slack++) {
  433. size_t system_page_count =
  434. partition_page_count * NumSystemPagesPerPartitionPage() - slack;
  435. size_t candidate_size = system_page_count * SystemPageSize();
  436. size_t waste = candidate_size % slot_size;
  437. if (waste < best_waste) {
  438. best_waste = waste;
  439. best_count = system_page_count;
  440. }
  441. }
  442. }
  443. return best_count;
  444. }
  445. uint8_t ComputeSystemPagesPerSlotSpanInternal(size_t slot_size) {
  446. // This works out reasonably for the current bucket sizes of the generic
  447. // allocator, and the current values of partition page size and constants.
  448. // Specifically, we have enough room to always pack the slots perfectly into
  449. // some number of system pages. The only waste is the waste associated with
  450. // unfaulted pages (i.e. wasted address space).
  451. // TODO: we end up using a lot of system pages for very small sizes. For
  452. // example, we'll use 12 system pages for slot size 24. The slot size is so
  453. // small that the waste would be tiny with just 4, or 1, system pages. Later,
  454. // we can investigate whether there are anti-fragmentation benefits to using
  455. // fewer system pages.
  456. double best_waste_ratio = 1.0f;
  457. uint16_t best_pages = 0;
  458. if (slot_size > MaxRegularSlotSpanSize()) {
  459. // TODO(ajwong): Why is there a DCHECK here for this?
  460. // http://crbug.com/776537
  461. PA_DCHECK(!(slot_size % SystemPageSize()));
  462. best_pages = static_cast<uint16_t>(slot_size >> SystemPageShift());
  463. PA_CHECK(best_pages <= std::numeric_limits<uint8_t>::max());
  464. return static_cast<uint8_t>(best_pages);
  465. }
  466. PA_DCHECK(slot_size <= MaxRegularSlotSpanSize());
  467. for (uint16_t i = NumSystemPagesPerPartitionPage() - 1;
  468. i <= MaxSystemPagesPerRegularSlotSpan(); ++i) {
  469. size_t page_size = i << SystemPageShift();
  470. size_t num_slots = page_size / slot_size;
  471. size_t waste = page_size - (num_slots * slot_size);
  472. // Leaving a page unfaulted is not free; the page will occupy an empty page
  473. // table entry. Make a simple attempt to account for that.
  474. //
  475. // TODO(ajwong): This looks wrong. PTEs are allocated for all pages
  476. // regardless of whether or not they are wasted. Should it just
  477. // be waste += i * sizeof(void*)?
  478. // http://crbug.com/776537
  479. size_t num_remainder_pages = i & (NumSystemPagesPerPartitionPage() - 1);
  480. size_t num_unfaulted_pages =
  481. num_remainder_pages
  482. ? (NumSystemPagesPerPartitionPage() - num_remainder_pages)
  483. : 0;
  484. waste += sizeof(void*) * num_unfaulted_pages;
  485. double waste_ratio =
  486. static_cast<double>(waste) / static_cast<double>(page_size);
  487. if (waste_ratio < best_waste_ratio) {
  488. best_waste_ratio = waste_ratio;
  489. best_pages = i;
  490. }
  491. }
  492. PA_DCHECK(best_pages > 0);
  493. PA_CHECK(best_pages <= MaxSystemPagesPerRegularSlotSpan());
  494. return static_cast<uint8_t>(best_pages);
  495. }
  496. } // namespace
  497. uint8_t ComputeSystemPagesPerSlotSpan(size_t slot_size,
  498. bool prefer_smaller_slot_spans) {
  499. if (prefer_smaller_slot_spans)
  500. return ComputeSystemPagesPerSlotSpanPreferSmall(slot_size);
  501. else
  502. return ComputeSystemPagesPerSlotSpanInternal(slot_size);
  503. }
  504. template <bool thread_safe>
  505. void PartitionBucket<thread_safe>::Init(uint32_t new_slot_size) {
  506. slot_size = new_slot_size;
  507. slot_size_reciprocal = kReciprocalMask / new_slot_size + 1;
  508. active_slot_spans_head =
  509. SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
  510. empty_slot_spans_head = nullptr;
  511. decommitted_slot_spans_head = nullptr;
  512. num_full_slot_spans = 0;
  513. bool prefer_smaller_slot_spans =
  514. #if defined(PA_PREFER_SMALLER_SLOT_SPANS)
  515. true
  516. #else
  517. false
  518. #endif
  519. ;
  520. num_system_pages_per_slot_span =
  521. ComputeSystemPagesPerSlotSpan(slot_size, prefer_smaller_slot_spans);
  522. }
  523. template <bool thread_safe>
  524. PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
  525. PartitionBucket<thread_safe>::AllocNewSlotSpan(PartitionRoot<thread_safe>* root,
  526. unsigned int flags,
  527. size_t slot_span_alignment) {
  528. PA_DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
  529. PartitionPageSize()));
  530. PA_DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
  531. PartitionPageSize()));
  532. size_t num_partition_pages = get_pages_per_slot_span();
  533. size_t slot_span_reservation_size = num_partition_pages
  534. << PartitionPageShift();
  535. size_t slot_span_committed_size = get_bytes_per_span();
  536. PA_DCHECK(num_partition_pages <= NumPartitionPagesPerSuperPage());
  537. PA_DCHECK(slot_span_committed_size % SystemPageSize() == 0);
  538. PA_DCHECK(slot_span_committed_size <= slot_span_reservation_size);
  539. uintptr_t adjusted_next_partition_page =
  540. base::bits::AlignUp(root->next_partition_page, slot_span_alignment);
  541. if (PA_UNLIKELY(adjusted_next_partition_page + slot_span_reservation_size >
  542. root->next_partition_page_end)) {
  543. // AllocNewSuperPage() may crash (e.g. address space exhaustion), put data
  544. // on stack.
  545. PA_DEBUG_DATA_ON_STACK("slotsize", slot_size);
  546. PA_DEBUG_DATA_ON_STACK("spansize", slot_span_reservation_size);
  547. // In this case, we can no longer hand out pages from the current super page
  548. // allocation. Get a new super page.
  549. if (!AllocNewSuperPage(root, flags)) {
  550. return nullptr;
  551. }
  552. // AllocNewSuperPage() updates root->next_partition_page, re-query.
  553. adjusted_next_partition_page =
  554. base::bits::AlignUp(root->next_partition_page, slot_span_alignment);
  555. PA_CHECK(adjusted_next_partition_page + slot_span_reservation_size <=
  556. root->next_partition_page_end);
  557. }
  558. auto* gap_start_page =
  559. PartitionPage<thread_safe>::FromAddr(root->next_partition_page);
  560. auto* gap_end_page =
  561. PartitionPage<thread_safe>::FromAddr(adjusted_next_partition_page);
  562. for (auto* page = gap_start_page; page < gap_end_page; ++page) {
  563. PA_DCHECK(!page->is_valid);
  564. page->has_valid_span_after_this = 1;
  565. }
  566. root->next_partition_page =
  567. adjusted_next_partition_page + slot_span_reservation_size;
  568. uintptr_t slot_span_start = adjusted_next_partition_page;
  569. auto* slot_span = &gap_end_page->slot_span_metadata;
  570. InitializeSlotSpan(slot_span);
  571. // Now that slot span is initialized, it's safe to call FromSlotStart.
  572. PA_DCHECK(slot_span ==
  573. SlotSpanMetadata<thread_safe>::FromSlotStart(slot_span_start));
  574. // System pages in the super page come in a decommited state. Commit them
  575. // before vending them back.
  576. // If lazy commit is enabled, pages will be committed when provisioning slots,
  577. // in ProvisionMoreSlotsAndAllocOne(), not here.
  578. if (!kUseLazyCommit) {
  579. PA_DEBUG_DATA_ON_STACK("slotsize", slot_size);
  580. PA_DEBUG_DATA_ON_STACK("spansize", slot_span_reservation_size);
  581. PA_DEBUG_DATA_ON_STACK("spancmt", slot_span_committed_size);
  582. root->RecommitSystemPagesForData(
  583. slot_span_start, slot_span_committed_size,
  584. PageAccessibilityDisposition::kRequireUpdate);
  585. }
  586. PA_CHECK(get_slots_per_span() <=
  587. SlotSpanMetadata<ThreadSafe>::kMaxSlotsPerSlotSpan);
  588. // Double check that we had enough space in the super page for the new slot
  589. // span.
  590. PA_DCHECK(root->next_partition_page <= root->next_partition_page_end);
  591. #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  592. PA_DCHECK(root->next_tag_bitmap_page);
  593. uintptr_t next_tag_bitmap_page =
  594. base::bits::AlignUp(reinterpret_cast<uintptr_t>(
  595. PartitionTagPointer(root->next_partition_page)),
  596. SystemPageSize());
  597. if (root->next_tag_bitmap_page < next_tag_bitmap_page) {
  598. #if BUILDFLAG(PA_DCHECK_IS_ON)
  599. uintptr_t super_page =
  600. reinterpret_cast<uintptr_t>(slot_span) & kSuperPageBaseMask;
  601. uintptr_t tag_bitmap = super_page + PartitionPageSize();
  602. PA_DCHECK(next_tag_bitmap_page <= tag_bitmap + ActualTagBitmapSize());
  603. PA_DCHECK(next_tag_bitmap_page > tag_bitmap);
  604. #endif
  605. SetSystemPagesAccess(root->next_tag_bitmap_page,
  606. next_tag_bitmap_page - root->next_tag_bitmap_page,
  607. PageAccessibilityConfiguration::kReadWrite);
  608. root->next_tag_bitmap_page = next_tag_bitmap_page;
  609. }
  610. #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  611. return slot_span;
  612. }
  613. template <bool thread_safe>
  614. PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::AllocNewSuperPage(
  615. PartitionRoot<thread_safe>* root,
  616. unsigned int flags) {
  617. // Need a new super page. We want to allocate super pages in a contiguous
  618. // address region as much as possible. This is important for not causing
  619. // page table bloat and not fragmenting address spaces in 32 bit
  620. // architectures.
  621. uintptr_t requested_address = root->next_super_page;
  622. // Allocate from GigaCage. Route to the appropriate GigaCage pool based on
  623. // BackupRefPtr support.
  624. pool_handle pool = root->ChoosePool();
  625. uintptr_t super_page =
  626. ReserveMemoryFromGigaCage(pool, requested_address, kSuperPageSize);
  627. if (PA_UNLIKELY(!super_page)) {
  628. if (flags & AllocFlags::kReturnNull)
  629. return 0;
  630. // Didn't manage to get a new uncommitted super page -> address space issue.
  631. ScopedUnlockGuard unlock{root->lock_};
  632. PartitionOutOfMemoryMappingFailure(root, kSuperPageSize);
  633. }
  634. *ReservationOffsetPointer(super_page) = kOffsetTagNormalBuckets;
  635. root->total_size_of_super_pages.fetch_add(kSuperPageSize,
  636. std::memory_order_relaxed);
  637. root->next_super_page = super_page + kSuperPageSize;
  638. // TODO(crbug.com/1307514): Add direct map support.
  639. uintptr_t state_bitmap = super_page + PartitionPageSize() +
  640. (is_direct_mapped() ? 0 : ReservedTagBitmapSize());
  641. PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap);
  642. const size_t state_bitmap_reservation_size =
  643. root->IsQuarantineAllowed() ? ReservedStateBitmapSize() : 0;
  644. const size_t state_bitmap_size_to_commit =
  645. root->IsQuarantineAllowed() ? CommittedStateBitmapSize() : 0;
  646. PA_DCHECK(state_bitmap_reservation_size % PartitionPageSize() == 0);
  647. PA_DCHECK(state_bitmap_size_to_commit % SystemPageSize() == 0);
  648. PA_DCHECK(state_bitmap_size_to_commit <= state_bitmap_reservation_size);
  649. uintptr_t payload = state_bitmap + state_bitmap_reservation_size;
  650. root->next_partition_page = payload;
  651. root->next_partition_page_end = root->next_super_page - PartitionPageSize();
  652. PA_DCHECK(payload ==
  653. SuperPagePayloadBegin(super_page, root->IsQuarantineAllowed()));
  654. PA_DCHECK(root->next_partition_page_end == SuperPagePayloadEnd(super_page));
  655. // Keep the first partition page in the super page inaccessible to serve as a
  656. // guard page, except an "island" in the middle where we put page metadata and
  657. // also a tiny amount of extent metadata.
  658. {
  659. ScopedSyscallTimer timer{root};
  660. RecommitSystemPages(
  661. super_page + SystemPageSize(),
  662. #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  663. // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is on, and if the BRP pool is used,
  664. // allocate 2 SystemPages, one for SuperPage metadata and the other for
  665. // RefCount bitmap.
  666. (pool == GetBRPPool()) ? SystemPageSize() * 2 : SystemPageSize(),
  667. #else
  668. SystemPageSize(),
  669. #endif
  670. PageAccessibilityConfiguration::kReadWrite,
  671. PageAccessibilityDisposition::kRequireUpdate);
  672. }
  673. // If we were after a specific address, but didn't get it, assume that
  674. // the system chose a lousy address. Here most OS'es have a default
  675. // algorithm that isn't randomized. For example, most Linux
  676. // distributions will allocate the mapping directly before the last
  677. // successful mapping, which is far from random. So we just get fresh
  678. // randomness for the next mapping attempt.
  679. if (requested_address && requested_address != super_page)
  680. root->next_super_page = 0;
  681. // We allocated a new super page so update super page metadata.
  682. // First check if this is a new extent or not.
  683. auto* latest_extent = PartitionSuperPageToExtent<thread_safe>(super_page);
  684. // By storing the root in every extent metadata object, we have a fast way
  685. // to go from a pointer within the partition to the root object.
  686. latest_extent->root = root;
  687. // Most new extents will be part of a larger extent, and these two fields
  688. // are unused, but we initialize them to 0 so that we get a clear signal
  689. // in case they are accidentally used.
  690. latest_extent->number_of_consecutive_super_pages = 0;
  691. latest_extent->next = nullptr;
  692. latest_extent->number_of_nonempty_slot_spans = 0;
  693. PartitionSuperPageExtentEntry<thread_safe>* current_extent =
  694. root->current_extent;
  695. const bool is_new_extent = super_page != requested_address;
  696. if (PA_UNLIKELY(is_new_extent)) {
  697. if (PA_UNLIKELY(!current_extent)) {
  698. PA_DCHECK(!root->first_extent);
  699. root->first_extent = latest_extent;
  700. } else {
  701. PA_DCHECK(current_extent->number_of_consecutive_super_pages);
  702. current_extent->next = latest_extent;
  703. }
  704. root->current_extent = latest_extent;
  705. latest_extent->number_of_consecutive_super_pages = 1;
  706. } else {
  707. // We allocated next to an existing extent so just nudge the size up a
  708. // little.
  709. PA_DCHECK(current_extent->number_of_consecutive_super_pages);
  710. ++current_extent->number_of_consecutive_super_pages;
  711. PA_DCHECK(payload > SuperPagesBeginFromExtent(current_extent) &&
  712. payload < SuperPagesEndFromExtent(current_extent));
  713. }
  714. #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  715. // `root->next_partition_page` currently points at the start of the
  716. // super page payload. We point `root->next_tag_bitmap_page` to the
  717. // corresponding point in the tag bitmap and let the caller
  718. // (slot span allocation) take care of the rest.
  719. root->next_tag_bitmap_page =
  720. base::bits::AlignDown(reinterpret_cast<uintptr_t>(
  721. PartitionTagPointer(root->next_partition_page)),
  722. SystemPageSize());
  723. PA_DCHECK(root->next_tag_bitmap_page >= super_page + PartitionPageSize())
  724. << "tag bitmap can never intrude on metadata partition page";
  725. #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  726. // If PCScan is used, commit the state bitmap. Otherwise, leave it uncommitted
  727. // and let PartitionRoot::RegisterScannableRoot() commit it when needed. Make
  728. // sure to register the super-page after it has been fully initialized.
  729. // Otherwise, the concurrent scanner may try to access |extent->root| which
  730. // could be not initialized yet.
  731. if (root->IsQuarantineEnabled()) {
  732. {
  733. ScopedSyscallTimer timer{root};
  734. RecommitSystemPages(state_bitmap, state_bitmap_size_to_commit,
  735. PageAccessibilityConfiguration::kReadWrite,
  736. PageAccessibilityDisposition::kRequireUpdate);
  737. }
  738. PCScan::RegisterNewSuperPage(root, super_page);
  739. }
  740. return payload;
  741. }
  742. template <bool thread_safe>
  743. PA_ALWAYS_INLINE void PartitionBucket<thread_safe>::InitializeSlotSpan(
  744. SlotSpanMetadata<thread_safe>* slot_span) {
  745. new (slot_span) SlotSpanMetadata<thread_safe>(this);
  746. slot_span->Reset();
  747. uint16_t num_partition_pages = get_pages_per_slot_span();
  748. auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(slot_span);
  749. for (uint16_t i = 0; i < num_partition_pages; ++i, ++page) {
  750. PA_DCHECK(i <= PartitionPage<thread_safe>::kMaxSlotSpanMetadataOffset);
  751. page->slot_span_metadata_offset = i;
  752. page->is_valid = true;
  753. }
  754. }
  755. template <bool thread_safe>
  756. PA_ALWAYS_INLINE uintptr_t
  757. PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
  758. PartitionRoot<thread_safe>* root,
  759. SlotSpanMetadata<thread_safe>* slot_span) {
  760. PA_DCHECK(slot_span !=
  761. SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
  762. size_t num_slots = slot_span->num_unprovisioned_slots;
  763. PA_DCHECK(num_slots);
  764. PA_DCHECK(num_slots <= get_slots_per_span());
  765. // We should only get here when _every_ slot is either used or unprovisioned.
  766. // (The third possible state is "on the freelist". If we have a non-empty
  767. // freelist, we should not get here.)
  768. PA_DCHECK(num_slots + slot_span->num_allocated_slots == get_slots_per_span());
  769. // Similarly, make explicitly sure that the freelist is empty.
  770. PA_DCHECK(!slot_span->get_freelist_head());
  771. PA_DCHECK(!slot_span->is_full());
  772. uintptr_t slot_span_start =
  773. SlotSpanMetadata<thread_safe>::ToSlotSpanStart(slot_span);
  774. // If we got here, the first unallocated slot is either partially or fully on
  775. // an uncommitted page. If the latter, it must be at the start of that page.
  776. uintptr_t return_slot =
  777. slot_span_start + (slot_size * slot_span->num_allocated_slots);
  778. uintptr_t next_slot = return_slot + slot_size;
  779. uintptr_t commit_start = base::bits::AlignUp(return_slot, SystemPageSize());
  780. PA_DCHECK(next_slot > commit_start);
  781. uintptr_t commit_end = base::bits::AlignUp(next_slot, SystemPageSize());
  782. // If the slot was partially committed, |return_slot| and |next_slot| fall
  783. // in different pages. If the slot was fully uncommitted, |return_slot| points
  784. // to the page start and |next_slot| doesn't, thus only the latter gets
  785. // rounded up.
  786. PA_DCHECK(commit_end > commit_start);
  787. // The slot being returned is considered allocated.
  788. slot_span->num_allocated_slots++;
  789. // Round down, because a slot that doesn't fully fit in the new page(s) isn't
  790. // provisioned.
  791. size_t slots_to_provision = (commit_end - return_slot) / slot_size;
  792. slot_span->num_unprovisioned_slots -= slots_to_provision;
  793. PA_DCHECK(slot_span->num_allocated_slots +
  794. slot_span->num_unprovisioned_slots <=
  795. get_slots_per_span());
  796. // If lazy commit is enabled, meaning system pages in the slot span come
  797. // in an initially decommitted state, commit them here.
  798. // Note, we can't use PageAccessibilityDisposition::kAllowKeepForPerf, because
  799. // we have no knowledge which pages have been committed before (it doesn't
  800. // matter on Windows anyway).
  801. if (kUseLazyCommit) {
  802. // TODO(lizeb): Handle commit failure.
  803. root->RecommitSystemPagesForData(
  804. commit_start, commit_end - commit_start,
  805. PageAccessibilityDisposition::kRequireUpdate);
  806. }
  807. if (PA_LIKELY(slot_size <= kMaxMemoryTaggingSize)) {
  808. // Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
  809. TagMemoryRangeRandomly(return_slot, slot_size);
  810. }
  811. #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  812. NormalBucketPartitionTagSetValue(return_slot, slot_size,
  813. root->GetNewPartitionTag());
  814. #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  815. // Add all slots that fit within so far committed pages to the free list.
  816. PartitionFreelistEntry* prev_entry = nullptr;
  817. uintptr_t next_slot_end = next_slot + slot_size;
  818. size_t free_list_entries_added = 0;
  819. while (next_slot_end <= commit_end) {
  820. void* next_slot_ptr;
  821. if (PA_LIKELY(slot_size <= kMaxMemoryTaggingSize)) {
  822. // Ensure the MTE-tag of the memory pointed by other provisioned slot is
  823. // unguessable. They will be returned to the app as is, and the MTE-tag
  824. // will only change upon calling Free().
  825. next_slot_ptr = TagMemoryRangeRandomly(next_slot, slot_size);
  826. } else {
  827. // No MTE-tagging for larger slots, just cast.
  828. next_slot_ptr = reinterpret_cast<void*>(next_slot);
  829. }
  830. #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  831. NormalBucketPartitionTagSetValue(next_slot, slot_size,
  832. root->GetNewPartitionTag());
  833. #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  834. auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
  835. if (!slot_span->get_freelist_head()) {
  836. PA_DCHECK(!prev_entry);
  837. PA_DCHECK(!free_list_entries_added);
  838. slot_span->SetFreelistHead(entry);
  839. } else {
  840. PA_DCHECK(free_list_entries_added);
  841. prev_entry->SetNext(entry);
  842. }
  843. next_slot = next_slot_end;
  844. next_slot_end = next_slot + slot_size;
  845. prev_entry = entry;
  846. #if BUILDFLAG(PA_DCHECK_IS_ON)
  847. free_list_entries_added++;
  848. #endif
  849. }
  850. #if BUILDFLAG(PA_DCHECK_IS_ON)
  851. // The only provisioned slot not added to the free list is the one being
  852. // returned.
  853. PA_DCHECK(slots_to_provision == free_list_entries_added + 1);
  854. // We didn't necessarily provision more than one slot (e.g. if |slot_size|
  855. // is large), meaning that |slot_span->freelist_head| can be nullptr.
  856. if (slot_span->get_freelist_head()) {
  857. PA_DCHECK(free_list_entries_added);
  858. slot_span->get_freelist_head()->CheckFreeList(slot_size);
  859. }
  860. #endif
  861. // We had no free slots, and created some (potentially 0) in sorted order.
  862. slot_span->set_freelist_sorted();
  863. return return_slot;
  864. }
  865. template <bool thread_safe>
  866. bool PartitionBucket<thread_safe>::SetNewActiveSlotSpan() {
  867. SlotSpanMetadata<thread_safe>* slot_span = active_slot_spans_head;
  868. if (slot_span == SlotSpanMetadata<thread_safe>::get_sentinel_slot_span())
  869. return false;
  870. SlotSpanMetadata<thread_safe>* next_slot_span;
  871. // The goal here is to find a suitable slot span in the active list. Suitable
  872. // slot spans are |is_active()|, i.e. they either have (a) freelist entries,
  873. // or (b) unprovisioned free space. The first case is preferable, since it
  874. // doesn't cost a system call, and doesn't cause new memory to become dirty.
  875. //
  876. // While looking for a new slot span, active list maintenance is performed,
  877. // that is:
  878. // - Empty and decommitted slot spans are moved to their respective lists.
  879. // - Full slot spans are removed from the active list but are not moved
  880. // anywhere. They could be tracked in a separate list, but this would
  881. // increase cost non trivially. Indeed, a full slot span is likely to become
  882. // non-full at some point (due to a free() hitting it). Since we only have
  883. // space in the metadata for a single linked list pointer, removing the
  884. // newly-non-full slot span from the "full" list would require walking it
  885. // (to know what's before it in the full list).
  886. //
  887. // Since we prefer slot spans with provisioned freelist entries, maintenance
  888. // happens in two stages:
  889. // 1. Walk the list to find candidates. Each of the skipped slot span is moved
  890. // to either:
  891. // - one of the long-lived lists: empty, decommitted
  892. // - the temporary "active slots spans with no freelist entry" list
  893. // - Nowhere for full slot spans.
  894. // 2. Once we have a candidate:
  895. // - Set it as the new active list head
  896. // - Reattach the temporary list
  897. //
  898. // Note that in most cases, the whole list will not be walked and maintained
  899. // at this stage.
  900. SlotSpanMetadata<thread_safe>* to_provision_head = nullptr;
  901. SlotSpanMetadata<thread_safe>* to_provision_tail = nullptr;
  902. for (; slot_span; slot_span = next_slot_span) {
  903. next_slot_span = slot_span->next_slot_span;
  904. PA_DCHECK(slot_span->bucket == this);
  905. PA_DCHECK(slot_span != empty_slot_spans_head);
  906. PA_DCHECK(slot_span != decommitted_slot_spans_head);
  907. if (slot_span->is_active()) {
  908. // Has provisioned slots.
  909. if (slot_span->get_freelist_head()) {
  910. // Will use this slot span, no need to go further.
  911. break;
  912. } else {
  913. // Keeping head and tail because we don't want to reverse the list.
  914. if (!to_provision_head)
  915. to_provision_head = slot_span;
  916. if (to_provision_tail)
  917. to_provision_tail->next_slot_span = slot_span;
  918. to_provision_tail = slot_span;
  919. slot_span->next_slot_span = nullptr;
  920. }
  921. } else if (slot_span->is_empty()) {
  922. slot_span->next_slot_span = empty_slot_spans_head;
  923. empty_slot_spans_head = slot_span;
  924. } else if (PA_LIKELY(slot_span->is_decommitted())) {
  925. slot_span->next_slot_span = decommitted_slot_spans_head;
  926. decommitted_slot_spans_head = slot_span;
  927. } else {
  928. PA_DCHECK(slot_span->is_full());
  929. // Move this slot span... nowhere, and also mark it as full. We need it
  930. // marked so that free'ing can tell, and move it back into the active
  931. // list.
  932. slot_span->marked_full = 1;
  933. ++num_full_slot_spans;
  934. // Overflow. Most likely a correctness issue in the code. It is in theory
  935. // possible that the number of full slot spans really reaches (1 << 24),
  936. // but this is very unlikely (and not possible with most GigaCage
  937. // settings).
  938. PA_CHECK(num_full_slot_spans);
  939. // Not necessary but might help stop accidents.
  940. slot_span->next_slot_span = nullptr;
  941. }
  942. }
  943. bool usable_active_list_head = false;
  944. // Found an active slot span with provisioned entries on the freelist.
  945. if (slot_span) {
  946. usable_active_list_head = true;
  947. // We have active slot spans with unprovisioned entries. Re-attach them into
  948. // the active list, past the span with freelist entries.
  949. if (to_provision_head) {
  950. auto* next = slot_span->next_slot_span;
  951. slot_span->next_slot_span = to_provision_head;
  952. to_provision_tail->next_slot_span = next;
  953. }
  954. active_slot_spans_head = slot_span;
  955. } else if (to_provision_head) {
  956. usable_active_list_head = true;
  957. // Need to provision new slots.
  958. active_slot_spans_head = to_provision_head;
  959. } else {
  960. // Active list is now empty.
  961. active_slot_spans_head =
  962. SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
  963. }
  964. return usable_active_list_head;
  965. }
  966. template <bool thread_safe>
  967. void PartitionBucket<thread_safe>::MaintainActiveList() {
  968. SlotSpanMetadata<thread_safe>* slot_span = active_slot_spans_head;
  969. if (slot_span == SlotSpanMetadata<thread_safe>::get_sentinel_slot_span())
  970. return;
  971. SlotSpanMetadata<thread_safe>* new_active_slot_spans_head = nullptr;
  972. SlotSpanMetadata<thread_safe>* new_active_slot_spans_tail = nullptr;
  973. SlotSpanMetadata<thread_safe>* next_slot_span;
  974. for (; slot_span; slot_span = next_slot_span) {
  975. next_slot_span = slot_span->next_slot_span;
  976. if (slot_span->is_active()) {
  977. // Ordering in the active slot span list matters, don't reverse it.
  978. if (!new_active_slot_spans_head)
  979. new_active_slot_spans_head = slot_span;
  980. if (new_active_slot_spans_tail)
  981. new_active_slot_spans_tail->next_slot_span = slot_span;
  982. new_active_slot_spans_tail = slot_span;
  983. slot_span->next_slot_span = nullptr;
  984. } else if (slot_span->is_empty()) {
  985. // For the empty and decommitted lists, LIFO ordering makes sense (since
  986. // it would lead to reusing memory which has been touched relatively
  987. // recently, which only matters for committed spans though).
  988. slot_span->next_slot_span = empty_slot_spans_head;
  989. empty_slot_spans_head = slot_span;
  990. } else if (slot_span->is_decommitted()) {
  991. slot_span->next_slot_span = decommitted_slot_spans_head;
  992. decommitted_slot_spans_head = slot_span;
  993. } else {
  994. // Full slot spans are not tracked, just accounted for.
  995. PA_DCHECK(slot_span->is_full());
  996. slot_span->marked_full = 1;
  997. ++num_full_slot_spans;
  998. PA_CHECK(num_full_slot_spans); // Overflow.
  999. slot_span->next_slot_span = nullptr;
  1000. }
  1001. }
  1002. if (!new_active_slot_spans_head) {
  1003. new_active_slot_spans_head =
  1004. SlotSpanMetadata<thread_safe>::get_sentinel_slot_span();
  1005. }
  1006. active_slot_spans_head = new_active_slot_spans_head;
  1007. }
  1008. template <bool thread_safe>
  1009. void PartitionBucket<thread_safe>::SortSlotSpanFreelists() {
  1010. for (auto* slot_span = active_slot_spans_head; slot_span;
  1011. slot_span = slot_span->next_slot_span) {
  1012. // No need to sort the freelist if it's already sorted. Note that if the
  1013. // freelist is sorted, this means that it didn't change at all since the
  1014. // last call. This may be a good signal to shrink it if possible (if an
  1015. // entire OS page is free, we can decommit it).
  1016. //
  1017. // Besides saving CPU, this also avoids touching memory of fully idle slot
  1018. // spans, which may required paging.
  1019. if (slot_span->num_allocated_slots > 0 && !slot_span->freelist_is_sorted())
  1020. slot_span->SortFreelist();
  1021. }
  1022. }
  1023. PA_COMPONENT_EXPORT(PARTITION_ALLOC)
  1024. bool CompareSlotSpans(SlotSpanMetadata<ThreadSafe>* a,
  1025. SlotSpanMetadata<ThreadSafe>* b) {
  1026. auto criteria_tuple = [](SlotSpanMetadata<ThreadSafe> const* a) {
  1027. size_t freelist_length = a->GetFreelistLength();
  1028. // The criteria are, in order (hence the lexicographic comparison below):
  1029. // 1. Prefer slot spans with freelist entries. The ones without freelist
  1030. // entries would be skipped in SetNewActiveSlotSpan() anyway.
  1031. // 2. Then the ones with the fewest freelist entries. They are either close
  1032. // to being full (for the provisioned memory), or close to being pushed
  1033. // at the end of the list (since they would not have freelist entries
  1034. // anymore, and would either fall into the first case, or be skipped by
  1035. // SetNewActiveSlotSpan()).
  1036. // 3. The ones with the fewer unprovisioned slots, meaning that they are
  1037. // close to being completely full.
  1038. //
  1039. // Note that this sorting order is not necessarily the best one when slot
  1040. // spans are partially provisioned. From local testing, in steady-state,
  1041. // most slot spans are entirely provisioned (or decommitted), which may be a
  1042. // consequence of the lack of partial slot span decommit, or of fairly
  1043. // effective fragmentation avoidance heuristics. Make sure to evaluate
  1044. // whether an alternative sorting order (sorting according to freelist size
  1045. // + unprovisioned slots) makes more sense.
  1046. return std::tuple<bool, size_t, size_t>{
  1047. freelist_length == 0, freelist_length, a->num_unprovisioned_slots};
  1048. };
  1049. return criteria_tuple(a) < criteria_tuple(b);
  1050. }
  1051. template <bool thread_safe>
  1052. void PartitionBucket<thread_safe>::SortActiveSlotSpans() {
  1053. // Sorting up to |kMaxSlotSpansToSort| slot spans. This is capped for two
  1054. // reasons:
  1055. // - Limiting execution time
  1056. // - Current code cannot allocate.
  1057. //
  1058. // In practice though, it's rare to have that many active slot spans.
  1059. SlotSpanMetadata<thread_safe>* active_spans_array[kMaxSlotSpansToSort];
  1060. size_t index = 0;
  1061. SlotSpanMetadata<thread_safe>* overflow_spans_start = nullptr;
  1062. for (auto* slot_span = active_slot_spans_head; slot_span;
  1063. slot_span = slot_span->next_slot_span) {
  1064. if (index < kMaxSlotSpansToSort) {
  1065. active_spans_array[index++] = slot_span;
  1066. } else {
  1067. // Starting from this one, not sorting the slot spans.
  1068. overflow_spans_start = slot_span;
  1069. break;
  1070. }
  1071. }
  1072. // We sort the active slot spans so that allocations are preferably serviced
  1073. // from the fullest ones. This way we hope to reduce fragmentation by keeping
  1074. // as few slot spans as full as possible.
  1075. //
  1076. // With perfect information on allocation lifespan, we would be able to pack
  1077. // allocations and get almost no fragmentation. This is obviously not the
  1078. // case, so we have partially full SlotSpans. Nevertheless, as a heuristic we
  1079. // want to:
  1080. // - Keep almost-empty slot spans as empty as possible
  1081. // - Keep mostly-full slot spans as full as possible
  1082. //
  1083. // The first part is done in the hope that future free()s will make these
  1084. // slot spans completely empty, allowing us to reclaim them. To that end, sort
  1085. // SlotSpans periodically so that the fullest ones are preferred.
  1086. //
  1087. // std::sort() is not completely guaranteed to never allocate memory. However,
  1088. // it may not throw std::bad_alloc, which constrains the implementation. In
  1089. // addition, this is protected by the reentrancy guard, so we would detect
  1090. // such an allocation.
  1091. std::sort(active_spans_array, active_spans_array + index, CompareSlotSpans);
  1092. active_slot_spans_head = overflow_spans_start;
  1093. // Reverse order, since we insert at the head of the list.
  1094. for (int i = index - 1; i >= 0; i--) {
  1095. active_spans_array[i]->next_slot_span = active_slot_spans_head;
  1096. active_slot_spans_head = active_spans_array[i];
  1097. }
  1098. }
  1099. template <bool thread_safe>
  1100. uintptr_t PartitionBucket<thread_safe>::SlowPathAlloc(
  1101. PartitionRoot<thread_safe>* root,
  1102. unsigned int flags,
  1103. size_t raw_size,
  1104. size_t slot_span_alignment,
  1105. bool* is_already_zeroed) {
  1106. PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
  1107. base::bits::IsPowerOfTwo(slot_span_alignment));
  1108. // The slow path is called when the freelist is empty. The only exception is
  1109. // when a higher-order alignment is requested, in which case the freelist
  1110. // logic is bypassed and we go directly for slot span allocation.
  1111. bool allocate_aligned_slot_span = slot_span_alignment > PartitionPageSize();
  1112. PA_DCHECK(!active_slot_spans_head->get_freelist_head() ||
  1113. allocate_aligned_slot_span);
  1114. SlotSpanMetadata<thread_safe>* new_slot_span = nullptr;
  1115. // |new_slot_span->bucket| will always be |this|, except when |this| is the
  1116. // sentinel bucket, which is used to signal a direct mapped allocation. In
  1117. // this case |new_bucket| will be set properly later. This avoids a read for
  1118. // most allocations.
  1119. PartitionBucket* new_bucket = this;
  1120. *is_already_zeroed = false;
  1121. // For the PartitionRoot::Alloc() API, we have a bunch of buckets
  1122. // marked as special cases. We bounce them through to the slow path so that
  1123. // we can still have a blazing fast hot path due to lack of corner-case
  1124. // branches.
  1125. //
  1126. // Note: The ordering of the conditionals matter! In particular,
  1127. // SetNewActiveSlotSpan() has a side-effect even when returning
  1128. // false where it sweeps the active list and may move things into the empty or
  1129. // decommitted lists which affects the subsequent conditional.
  1130. if (PA_UNLIKELY(is_direct_mapped())) {
  1131. PA_DCHECK(raw_size > kMaxBucketed);
  1132. PA_DCHECK(this == &root->sentinel_bucket);
  1133. PA_DCHECK(active_slot_spans_head ==
  1134. SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
  1135. // No fast path for direct-mapped allocations.
  1136. if (flags & AllocFlags::kFastPathOrReturnNull)
  1137. return 0;
  1138. new_slot_span =
  1139. PartitionDirectMap(root, flags, raw_size, slot_span_alignment);
  1140. if (new_slot_span)
  1141. new_bucket = new_slot_span->bucket;
  1142. // Memory from PageAllocator is always zeroed.
  1143. *is_already_zeroed = true;
  1144. } else if (PA_LIKELY(!allocate_aligned_slot_span && SetNewActiveSlotSpan())) {
  1145. // First, did we find an active slot span in the active list?
  1146. new_slot_span = active_slot_spans_head;
  1147. PA_DCHECK(new_slot_span->is_active());
  1148. } else if (PA_LIKELY(!allocate_aligned_slot_span &&
  1149. (empty_slot_spans_head != nullptr ||
  1150. decommitted_slot_spans_head != nullptr))) {
  1151. // Second, look in our lists of empty and decommitted slot spans.
  1152. // Check empty slot spans first, which are preferred, but beware that an
  1153. // empty slot span might have been decommitted.
  1154. while (PA_LIKELY((new_slot_span = empty_slot_spans_head) != nullptr)) {
  1155. PA_DCHECK(new_slot_span->bucket == this);
  1156. PA_DCHECK(new_slot_span->is_empty() || new_slot_span->is_decommitted());
  1157. empty_slot_spans_head = new_slot_span->next_slot_span;
  1158. // Accept the empty slot span unless it got decommitted.
  1159. if (new_slot_span->get_freelist_head()) {
  1160. new_slot_span->next_slot_span = nullptr;
  1161. new_slot_span->ToSuperPageExtent()
  1162. ->IncrementNumberOfNonemptySlotSpans();
  1163. // Re-activating an empty slot span, update accounting.
  1164. size_t dirty_size = base::bits::AlignUp(
  1165. new_slot_span->GetProvisionedSize(), SystemPageSize());
  1166. PA_DCHECK(root->empty_slot_spans_dirty_bytes >= dirty_size);
  1167. root->empty_slot_spans_dirty_bytes -= dirty_size;
  1168. break;
  1169. }
  1170. PA_DCHECK(new_slot_span->is_decommitted());
  1171. new_slot_span->next_slot_span = decommitted_slot_spans_head;
  1172. decommitted_slot_spans_head = new_slot_span;
  1173. }
  1174. if (PA_UNLIKELY(!new_slot_span) &&
  1175. PA_LIKELY(decommitted_slot_spans_head != nullptr)) {
  1176. // Commit can be expensive, don't do it.
  1177. if (flags & AllocFlags::kFastPathOrReturnNull)
  1178. return 0;
  1179. new_slot_span = decommitted_slot_spans_head;
  1180. PA_DCHECK(new_slot_span->bucket == this);
  1181. PA_DCHECK(new_slot_span->is_decommitted());
  1182. decommitted_slot_spans_head = new_slot_span->next_slot_span;
  1183. // If lazy commit is enabled, pages will be recommitted when provisioning
  1184. // slots, in ProvisionMoreSlotsAndAllocOne(), not here.
  1185. if (!kUseLazyCommit) {
  1186. uintptr_t slot_span_start =
  1187. SlotSpanMetadata<thread_safe>::ToSlotSpanStart(new_slot_span);
  1188. // Since lazy commit isn't used, we have a guarantee that all slot span
  1189. // pages have been previously committed, and then decommitted using
  1190. // PageAccessibilityDisposition::kAllowKeepForPerf, so use the
  1191. // same option as an optimization.
  1192. // TODO(lizeb): Handle commit failure.
  1193. root->RecommitSystemPagesForData(
  1194. slot_span_start, new_slot_span->bucket->get_bytes_per_span(),
  1195. PageAccessibilityDisposition::kAllowKeepForPerf);
  1196. }
  1197. new_slot_span->Reset();
  1198. *is_already_zeroed = DecommittedMemoryIsAlwaysZeroed();
  1199. }
  1200. PA_DCHECK(new_slot_span);
  1201. } else {
  1202. // Getting a new slot span is expensive, don't do it.
  1203. if (flags & AllocFlags::kFastPathOrReturnNull)
  1204. return 0;
  1205. // Third. If we get here, we need a brand new slot span.
  1206. // TODO(bartekn): For single-slot slot spans, we can use rounded raw_size
  1207. // as slot_span_committed_size.
  1208. new_slot_span = AllocNewSlotSpan(root, flags, slot_span_alignment);
  1209. // New memory from PageAllocator is always zeroed.
  1210. *is_already_zeroed = true;
  1211. }
  1212. // Bail if we had a memory allocation failure.
  1213. if (PA_UNLIKELY(!new_slot_span)) {
  1214. PA_DCHECK(active_slot_spans_head ==
  1215. SlotSpanMetadata<thread_safe>::get_sentinel_slot_span());
  1216. if (flags & AllocFlags::kReturnNull)
  1217. return 0;
  1218. // See comment in PartitionDirectMap() for unlocking.
  1219. ScopedUnlockGuard unlock{root->lock_};
  1220. root->OutOfMemory(raw_size);
  1221. PA_IMMEDIATE_CRASH(); // Not required, kept as documentation.
  1222. }
  1223. PA_DCHECK(new_bucket != &root->sentinel_bucket);
  1224. new_bucket->active_slot_spans_head = new_slot_span;
  1225. if (new_slot_span->CanStoreRawSize())
  1226. new_slot_span->SetRawSize(raw_size);
  1227. // If we found an active slot span with free slots, or an empty slot span, we
  1228. // have a usable freelist head.
  1229. if (PA_LIKELY(new_slot_span->get_freelist_head() != nullptr)) {
  1230. PartitionFreelistEntry* entry =
  1231. new_slot_span->PopForAlloc(new_bucket->slot_size);
  1232. // We may have set *is_already_zeroed to true above, make sure that the
  1233. // freelist entry doesn't contain data. Either way, it wouldn't be a good
  1234. // idea to let users see our internal data.
  1235. uintptr_t slot_start = entry->ClearForAllocation();
  1236. return slot_start;
  1237. }
  1238. // Otherwise, we need to provision more slots by committing more pages. Build
  1239. // the free list for the newly provisioned slots.
  1240. PA_DCHECK(new_slot_span->num_unprovisioned_slots);
  1241. return ProvisionMoreSlotsAndAllocOne(root, new_slot_span);
  1242. }
  1243. template struct PartitionBucket<ThreadSafe>;
  1244. } // namespace partition_alloc::internal