partition_page.h 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935
  1. // Copyright (c) 2018 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
  5. #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
  6. #include <cstdint>
  7. #include <cstring>
  8. #include <limits>
  9. #include <utility>
  10. #include "base/allocator/partition_allocator/address_pool_manager.h"
  11. #include "base/allocator/partition_allocator/address_pool_manager_types.h"
  12. #include "base/allocator/partition_allocator/partition_address_space.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  15. #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
  16. #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
  17. #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
  18. #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
  19. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  20. #include "base/allocator/partition_allocator/partition_alloc_constants.h"
  21. #include "base/allocator/partition_allocator/partition_alloc_forward.h"
  22. #include "base/allocator/partition_allocator/partition_bucket.h"
  23. #include "base/allocator/partition_allocator/partition_freelist_entry.h"
  24. #include "base/allocator/partition_allocator/partition_tag_bitmap.h"
  25. #include "base/allocator/partition_allocator/partition_tag_types.h"
  26. #include "base/allocator/partition_allocator/reservation_offset_table.h"
  27. #include "base/allocator/partition_allocator/starscan/state_bitmap.h"
  28. #include "base/allocator/partition_allocator/tagging.h"
  29. #include "build/build_config.h"
  30. #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  31. #include "base/allocator/partition_allocator/partition_ref_count.h"
  32. #endif
  33. namespace partition_alloc::internal {
  34. // An "extent" is a span of consecutive superpages. We link the partition's next
  35. // extent (if there is one) to the very start of a superpage's metadata area.
  36. template <bool thread_safe>
  37. struct PartitionSuperPageExtentEntry {
  38. PartitionRoot<thread_safe>* root;
  39. PartitionSuperPageExtentEntry<thread_safe>* next;
  40. uint16_t number_of_consecutive_super_pages;
  41. uint16_t number_of_nonempty_slot_spans;
  42. PA_ALWAYS_INLINE void IncrementNumberOfNonemptySlotSpans();
  43. PA_ALWAYS_INLINE void DecrementNumberOfNonemptySlotSpans();
  44. };
  45. static_assert(
  46. sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
  47. "PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
  48. static_assert(
  49. kMaxSuperPagesInPool / kSuperPageSize <=
  50. std::numeric_limits<
  51. decltype(PartitionSuperPageExtentEntry<
  52. ThreadSafe>::number_of_consecutive_super_pages)>::max(),
  53. "number_of_consecutive_super_pages must be big enough");
  54. // Returns the base of the first super page in the range of consecutive super
  55. // pages.
  56. //
  57. // CAUTION! |extent| must point to the extent of the first super page in the
  58. // range of consecutive super pages.
  59. template <bool thread_safe>
  60. PA_ALWAYS_INLINE uintptr_t SuperPagesBeginFromExtent(
  61. const PartitionSuperPageExtentEntry<thread_safe>* extent) {
  62. PA_DCHECK(0 < extent->number_of_consecutive_super_pages);
  63. uintptr_t extent_as_uintptr = reinterpret_cast<uintptr_t>(extent);
  64. PA_DCHECK(IsManagedByNormalBuckets(extent_as_uintptr));
  65. return base::bits::AlignDown(extent_as_uintptr, kSuperPageAlignment);
  66. }
  67. // Returns the end of the last super page in the range of consecutive super
  68. // pages.
  69. //
  70. // CAUTION! |extent| must point to the extent of the first super page in the
  71. // range of consecutive super pages.
  72. template <bool thread_safe>
  73. PA_ALWAYS_INLINE uintptr_t SuperPagesEndFromExtent(
  74. const PartitionSuperPageExtentEntry<thread_safe>* extent) {
  75. return SuperPagesBeginFromExtent(extent) +
  76. (extent->number_of_consecutive_super_pages * kSuperPageSize);
  77. }
  78. using AllocationStateMap =
  79. StateBitmap<kSuperPageSize, kSuperPageAlignment, kAlignment>;
  80. // Metadata of the slot span.
  81. //
  82. // Some notes on slot span states. It can be in one of four major states:
  83. // 1) Active.
  84. // 2) Full.
  85. // 3) Empty.
  86. // 4) Decommitted.
  87. // An active slot span has available free slots, as well as allocated ones.
  88. // A full slot span has no free slots. An empty slot span has no allocated
  89. // slots, and a decommitted slot span is an empty one that had its backing
  90. // memory released back to the system.
  91. //
  92. // There are three linked lists tracking slot spans. The "active" list is an
  93. // approximation of a list of active slot spans. It is an approximation because
  94. // full, empty and decommitted slot spans may briefly be present in the list
  95. // until we next do a scan over it. The "empty" list holds mostly empty slot
  96. // spans, but may briefly hold decommitted ones too. The "decommitted" list
  97. // holds only decommitted slot spans.
  98. //
  99. // The significant slot span transitions are:
  100. // - Free() will detect when a full slot span has a slot freed and immediately
  101. // return the slot span to the head of the active list.
  102. // - Free() will detect when a slot span is fully emptied. It _may_ add it to
  103. // the empty list or it _may_ leave it on the active list until a future
  104. // list scan.
  105. // - Alloc() _may_ scan the active page list in order to fulfil the request.
  106. // If it does this, full, empty and decommitted slot spans encountered will be
  107. // booted out of the active list. If there are no suitable active slot spans
  108. // found, an empty or decommitted slot spans (if one exists) will be pulled
  109. // from the empty/decommitted list on to the active list.
  110. #pragma pack(push, 1)
  111. template <bool thread_safe>
  112. struct SlotSpanMetadata {
  113. private:
  114. PartitionFreelistEntry* freelist_head = nullptr;
  115. public:
  116. // TODO(lizeb): Make as many fields as possible private or const, to
  117. // encapsulate things more clearly.
  118. SlotSpanMetadata<thread_safe>* next_slot_span = nullptr;
  119. PartitionBucket<thread_safe>* const bucket = nullptr;
  120. // CHECK()ed in AllocNewSlotSpan().
  121. #if defined(PA_HAS_64_BITS_POINTERS) && BUILDFLAG(IS_APPLE)
  122. // System page size is not a constant on Apple OSes, but is either 4 or 16kiB
  123. // (1 << 12 or 1 << 14), as checked in PartitionRoot::Init(). And
  124. // PartitionPageSize() is 4 times the OS page size.
  125. static constexpr size_t kMaxSlotsPerSlotSpan =
  126. 4 * (1 << 14) / kSmallestBucket;
  127. #elif BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)
  128. // System page size can be 4, 16, or 64 kiB on Linux on arm64. 64 kiB is
  129. // currently (kMaxSlotsPerSlotSpanBits == 13) not supported by the code,
  130. // so we use the 16 kiB maximum (64 kiB will crash).
  131. static constexpr size_t kMaxSlotsPerSlotSpan =
  132. 4 * (1 << 14) / kSmallestBucket;
  133. #else
  134. // A slot span can "span" multiple PartitionPages, but then its slot size is
  135. // larger, so it doesn't have as many slots.
  136. static constexpr size_t kMaxSlotsPerSlotSpan =
  137. PartitionPageSize() / kSmallestBucket;
  138. #endif // defined(PA_HAS_64_BITS_POINTERS) && BUILDFLAG(IS_APPLE)
  139. // The maximum number of bits needed to cover all currently supported OSes.
  140. static constexpr size_t kMaxSlotsPerSlotSpanBits = 13;
  141. static_assert(kMaxSlotsPerSlotSpan < (1 << kMaxSlotsPerSlotSpanBits), "");
  142. // |marked_full| isn't equivalent to being full. Slot span is marked as full
  143. // iff it isn't on the active slot span list (or any other list).
  144. uint32_t marked_full : 1;
  145. // |num_allocated_slots| is 0 for empty or decommitted slot spans, which can
  146. // be further differentiated by checking existence of the freelist.
  147. uint32_t num_allocated_slots : kMaxSlotsPerSlotSpanBits;
  148. uint32_t num_unprovisioned_slots : kMaxSlotsPerSlotSpanBits;
  149. private:
  150. const uint32_t can_store_raw_size_ : 1;
  151. uint32_t freelist_is_sorted_ : 1;
  152. uint32_t unused1_ : (32 - 1 - 2 * kMaxSlotsPerSlotSpanBits - 1 - 1);
  153. // If |in_empty_cache_|==1, |empty_cache_index| is undefined and mustn't be
  154. // used.
  155. uint16_t in_empty_cache_ : 1;
  156. uint16_t empty_cache_index_ : kEmptyCacheIndexBits; // < kMaxFreeableSpans.
  157. uint16_t unused2_ : (16 - 1 - kEmptyCacheIndexBits);
  158. // Can use only 48 bits (6B) in this bitfield, as this structure is embedded
  159. // in PartitionPage which has 2B worth of fields and must fit in 32B.
  160. public:
  161. PA_COMPONENT_EXPORT(PARTITION_ALLOC)
  162. explicit SlotSpanMetadata(PartitionBucket<thread_safe>* bucket);
  163. // Public API
  164. // Note the matching Alloc() functions are in PartitionPage.
  165. PA_COMPONENT_EXPORT(PARTITION_ALLOC)
  166. PA_NOINLINE void FreeSlowPath(size_t number_of_freed);
  167. PA_ALWAYS_INLINE PartitionFreelistEntry* PopForAlloc(size_t size);
  168. PA_ALWAYS_INLINE void Free(uintptr_t ptr);
  169. // Appends the passed freelist to the slot-span's freelist. Please note that
  170. // the function doesn't increment the tags of the passed freelist entries,
  171. // since FreeNoHooks() did it already.
  172. PA_ALWAYS_INLINE void AppendFreeList(PartitionFreelistEntry* head,
  173. PartitionFreelistEntry* tail,
  174. size_t number_of_freed);
  175. void Decommit(PartitionRoot<thread_safe>* root);
  176. void DecommitIfPossible(PartitionRoot<thread_safe>* root);
  177. // Sorts the freelist in ascending addresses order.
  178. void SortFreelist();
  179. // Inserts the slot span into the empty ring, making space for the new slot
  180. // span, and potentially shrinking the ring.
  181. void RegisterEmpty();
  182. // Pointer/address manipulation functions. These must be static as the input
  183. // |slot_span| pointer may be the result of an offset calculation and
  184. // therefore cannot be trusted. The objective of these functions is to
  185. // sanitize this input.
  186. PA_ALWAYS_INLINE static uintptr_t ToSlotSpanStart(
  187. const SlotSpanMetadata* slot_span);
  188. PA_ALWAYS_INLINE static SlotSpanMetadata* FromAddr(uintptr_t address);
  189. PA_ALWAYS_INLINE static SlotSpanMetadata* FromSlotStart(uintptr_t slot_start);
  190. PA_ALWAYS_INLINE static SlotSpanMetadata* FromObject(void* object);
  191. PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerAddr(
  192. uintptr_t address);
  193. PA_ALWAYS_INLINE static SlotSpanMetadata* FromObjectInnerPtr(void* ptr);
  194. PA_ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>*
  195. ToSuperPageExtent() const;
  196. // Checks if it is feasible to store raw_size.
  197. PA_ALWAYS_INLINE bool CanStoreRawSize() const { return can_store_raw_size_; }
  198. // The caller is responsible for ensuring that raw_size can be stored before
  199. // calling Set/GetRawSize.
  200. PA_ALWAYS_INLINE void SetRawSize(size_t raw_size);
  201. PA_ALWAYS_INLINE size_t GetRawSize() const;
  202. // Only meaningful when `this` refers to a slot span in a direct map
  203. // bucket.
  204. PA_ALWAYS_INLINE PartitionTag* DirectMapMTETag();
  205. PA_ALWAYS_INLINE PartitionFreelistEntry* get_freelist_head() const {
  206. return freelist_head;
  207. }
  208. PA_ALWAYS_INLINE void SetFreelistHead(PartitionFreelistEntry* new_head);
  209. // Returns size of the region used within a slot. The used region comprises
  210. // of actual allocated data, extras and possibly empty space in the middle.
  211. PA_ALWAYS_INLINE size_t GetUtilizedSlotSize() const {
  212. // The returned size can be:
  213. // - The slot size for small buckets.
  214. // - Exact size needed to satisfy allocation (incl. extras), for large
  215. // buckets and direct-mapped allocations (see also the comment in
  216. // CanStoreRawSize() for more info).
  217. if (PA_LIKELY(!CanStoreRawSize())) {
  218. return bucket->slot_size;
  219. }
  220. return GetRawSize();
  221. }
  222. // This includes padding due to rounding done at allocation; we don't know the
  223. // requested size at deallocation, so we use this in both places.
  224. PA_ALWAYS_INLINE size_t GetSlotSizeForBookkeeping() const {
  225. // This could be more precise for allocations where CanStoreRawSize()
  226. // returns true (large allocations). However this is called for *every*
  227. // allocation, so we don't want an extra branch there.
  228. return bucket->slot_size;
  229. }
  230. // Returns the size available to the app. It can be equal or higher than the
  231. // requested size. If higher, the overage won't exceed what's actually usable
  232. // by the app without a risk of running out of an allocated region or into
  233. // PartitionAlloc's internal data (like extras).
  234. PA_ALWAYS_INLINE size_t
  235. GetUsableSize(PartitionRoot<thread_safe>* root) const {
  236. // The returned size can be:
  237. // - The slot size minus extras, for small buckets. This could be more than
  238. // requested size.
  239. // - Raw size minus extras, for large buckets and direct-mapped allocations
  240. // (see also the comment in CanStoreRawSize() for more info). This is
  241. // equal to requested size.
  242. return root->AdjustSizeForExtrasSubtract(GetUtilizedSlotSize());
  243. }
  244. // Returns the total size of the slots that are currently provisioned.
  245. PA_ALWAYS_INLINE size_t GetProvisionedSize() const {
  246. size_t num_provisioned_slots =
  247. bucket->get_slots_per_span() - num_unprovisioned_slots;
  248. size_t provisioned_size = num_provisioned_slots * bucket->slot_size;
  249. PA_DCHECK(provisioned_size <= bucket->get_bytes_per_span());
  250. return provisioned_size;
  251. }
  252. // Return the number of entries in the freelist.
  253. size_t GetFreelistLength() const {
  254. size_t num_provisioned_slots =
  255. bucket->get_slots_per_span() - num_unprovisioned_slots;
  256. return num_provisioned_slots - num_allocated_slots;
  257. }
  258. PA_ALWAYS_INLINE void Reset();
  259. // TODO(ajwong): Can this be made private? https://crbug.com/787153
  260. PA_COMPONENT_EXPORT(PARTITION_ALLOC)
  261. static SlotSpanMetadata* get_sentinel_slot_span();
  262. // Slot span state getters.
  263. PA_ALWAYS_INLINE bool is_active() const;
  264. PA_ALWAYS_INLINE bool is_full() const;
  265. PA_ALWAYS_INLINE bool is_empty() const;
  266. PA_ALWAYS_INLINE bool is_decommitted() const;
  267. PA_ALWAYS_INLINE bool in_empty_cache() const { return in_empty_cache_; }
  268. PA_ALWAYS_INLINE bool freelist_is_sorted() const {
  269. return freelist_is_sorted_;
  270. }
  271. PA_ALWAYS_INLINE void set_freelist_sorted() { freelist_is_sorted_ = true; }
  272. private:
  273. // sentinel_slot_span_ is used as a sentinel to indicate that there is no slot
  274. // span in the active list. We could use nullptr, but in that case we need to
  275. // add a null-check branch to the hot allocation path. We want to avoid that.
  276. //
  277. // Note, this declaration is kept in the header as opposed to an anonymous
  278. // namespace so the getter can be fully inlined.
  279. static inline SlotSpanMetadata sentinel_slot_span_;
  280. // For the sentinel.
  281. constexpr SlotSpanMetadata() noexcept
  282. : marked_full(0),
  283. num_allocated_slots(0),
  284. num_unprovisioned_slots(0),
  285. can_store_raw_size_(false),
  286. freelist_is_sorted_(true),
  287. unused1_(0),
  288. in_empty_cache_(0),
  289. empty_cache_index_(0),
  290. unused2_(0) {}
  291. };
  292. #pragma pack(pop)
  293. static_assert(sizeof(SlotSpanMetadata<ThreadSafe>) <= kPageMetadataSize,
  294. "SlotSpanMetadata must fit into a Page Metadata slot.");
  295. // Metadata of a non-first partition page in a slot span.
  296. struct SubsequentPageMetadata {
  297. // Raw size is the size needed to satisfy the allocation (requested size +
  298. // extras). If available, it can be used to report better statistics or to
  299. // bring protective cookie closer to the allocated memory.
  300. //
  301. // It can be used only if:
  302. // - there is no more than one slot in the slot span (otherwise we wouldn't
  303. // know which slot the raw size applies to)
  304. // - there is more than one partition page in the slot span (the metadata of
  305. // the first one is used to store slot information, but the second one is
  306. // available for extra information)
  307. size_t raw_size;
  308. // Specific to when `this` is used in a direct map bucket. Since direct
  309. // maps don't have as many tags as the typical normal bucket slot span,
  310. // we can get away with just hiding the sole tag in here.
  311. //
  312. // See `//base/memory/mtecheckedptr.md` for details.
  313. PartitionTag direct_map_tag;
  314. };
  315. // Each partition page has metadata associated with it. The metadata of the
  316. // first page of a slot span, describes that slot span. If a slot span spans
  317. // more than 1 page, the page metadata may contain rudimentary additional
  318. // information.
  319. // "Pack" the union so that common page metadata still fits within
  320. // kPageMetadataSize. (SlotSpanMetadata is also "packed".)
  321. #pragma pack(push, 1)
  322. template <bool thread_safe>
  323. struct PartitionPage {
  324. union {
  325. SlotSpanMetadata<thread_safe> slot_span_metadata;
  326. SubsequentPageMetadata subsequent_page_metadata;
  327. // sizeof(PartitionPageMetadata) must always be:
  328. // - a power of 2 (for fast modulo operations)
  329. // - below kPageMetadataSize
  330. //
  331. // This makes sure that this is respected no matter the architecture.
  332. char optional_padding[kPageMetadataSize - sizeof(uint8_t) - sizeof(bool)];
  333. };
  334. // The first PartitionPage of the slot span holds its metadata. This offset
  335. // tells how many pages in from that first page we are.
  336. // For direct maps, the first page metadata (that isn't super page extent
  337. // entry) uses this field to tell how many pages to the right the direct map
  338. // metadata starts.
  339. //
  340. // 6 bits is enough to represent all possible offsets, given that the smallest
  341. // partition page is 16kiB and the offset won't exceed 1MiB.
  342. static constexpr uint16_t kMaxSlotSpanMetadataBits = 6;
  343. static constexpr uint16_t kMaxSlotSpanMetadataOffset =
  344. (1 << kMaxSlotSpanMetadataBits) - 1;
  345. uint8_t slot_span_metadata_offset : kMaxSlotSpanMetadataBits;
  346. // |is_valid| tells whether the page is part of a slot span. If |false|,
  347. // |has_valid_span_after_this| tells whether it's an unused region in between
  348. // slot spans within the super page.
  349. // Note, |is_valid| has been added for clarity, but if we ever need to save
  350. // this bit, it can be inferred from:
  351. // |!slot_span_metadata_offset && slot_span_metadata->bucket|.
  352. bool is_valid : 1;
  353. bool has_valid_span_after_this : 1;
  354. uint8_t unused;
  355. PA_ALWAYS_INLINE static PartitionPage* FromAddr(uintptr_t address);
  356. };
  357. #pragma pack(pop)
  358. static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize,
  359. "PartitionPage must be able to fit in a metadata slot");
  360. // Certain functions rely on PartitionPage being either SlotSpanMetadata or
  361. // SubsequentPageMetadata, and therefore freely casting between each other.
  362. static_assert(offsetof(PartitionPage<ThreadSafe>, slot_span_metadata) == 0, "");
  363. static_assert(offsetof(PartitionPage<ThreadSafe>, subsequent_page_metadata) ==
  364. 0,
  365. "");
  366. template <bool thread_safe>
  367. PA_ALWAYS_INLINE PartitionPage<thread_safe>* PartitionSuperPageToMetadataArea(
  368. uintptr_t super_page) {
  369. // This can't be just any super page, but it has to be the first super page of
  370. // the reservation, as we assume here that the metadata is near its beginning.
  371. PA_DCHECK(IsReservationStart(super_page));
  372. PA_DCHECK(!(super_page & kSuperPageOffsetMask));
  373. // The metadata area is exactly one system page (the guard page) into the
  374. // super page.
  375. return reinterpret_cast<PartitionPage<thread_safe>*>(super_page +
  376. SystemPageSize());
  377. }
  378. PA_ALWAYS_INLINE const SubsequentPageMetadata* GetSubsequentPageMetadata(
  379. const PartitionPage<ThreadSafe>* page) {
  380. return &(page + 1)->subsequent_page_metadata;
  381. }
  382. PA_ALWAYS_INLINE SubsequentPageMetadata* GetSubsequentPageMetadata(
  383. PartitionPage<ThreadSafe>* page) {
  384. return &(page + 1)->subsequent_page_metadata;
  385. }
  386. template <bool thread_safe>
  387. PA_ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>*
  388. PartitionSuperPageToExtent(uintptr_t super_page) {
  389. // The very first entry of the metadata is the super page extent entry.
  390. return reinterpret_cast<PartitionSuperPageExtentEntry<thread_safe>*>(
  391. PartitionSuperPageToMetadataArea<thread_safe>(super_page));
  392. }
  393. // Size that should be reserved for state bitmap (if present) inside a super
  394. // page. Elements of a super page are partition-page-aligned, hence the returned
  395. // size is a multiple of partition page size.
  396. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  397. ReservedStateBitmapSize() {
  398. return base::bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize());
  399. }
  400. // Size that should be committed for state bitmap (if present) inside a super
  401. // page. It is a multiple of system page size.
  402. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  403. CommittedStateBitmapSize() {
  404. return base::bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize());
  405. }
  406. // Returns the address/pointer to the state bitmap in the super page. It's the
  407. // caller's responsibility to ensure that the bitmaps even exist.
  408. PA_ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
  409. PA_DCHECK(!(super_page % kSuperPageAlignment));
  410. return super_page + PartitionPageSize() +
  411. (IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0);
  412. }
  413. PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
  414. uintptr_t super_page) {
  415. return reinterpret_cast<AllocationStateMap*>(
  416. SuperPageStateBitmapAddr(super_page));
  417. }
  418. // Returns the address of the tag bitmap of the `super_page`. Caller must ensure
  419. // that bitmap exists.
  420. PA_ALWAYS_INLINE uintptr_t SuperPageTagBitmapAddr(uintptr_t super_page) {
  421. PA_DCHECK(IsReservationStart(super_page));
  422. // Skip over the guard pages / metadata.
  423. return super_page + PartitionPageSize();
  424. }
  425. PA_ALWAYS_INLINE uintptr_t SuperPagePayloadBegin(uintptr_t super_page,
  426. bool with_quarantine) {
  427. PA_DCHECK(!(super_page % kSuperPageAlignment));
  428. return super_page + PartitionPageSize() +
  429. (IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0) +
  430. (with_quarantine ? ReservedStateBitmapSize() : 0);
  431. }
  432. PA_ALWAYS_INLINE uintptr_t SuperPagePayloadEnd(uintptr_t super_page) {
  433. PA_DCHECK(!(super_page % kSuperPageAlignment));
  434. return super_page + kSuperPageSize - PartitionPageSize();
  435. }
  436. PA_ALWAYS_INLINE size_t SuperPagePayloadSize(uintptr_t super_page,
  437. bool with_quarantine) {
  438. return SuperPagePayloadEnd(super_page) -
  439. SuperPagePayloadBegin(super_page, with_quarantine);
  440. }
  441. template <bool thread_safe>
  442. PA_ALWAYS_INLINE void PartitionSuperPageExtentEntry<
  443. thread_safe>::IncrementNumberOfNonemptySlotSpans() {
  444. #if BUILDFLAG(PA_DCHECK_IS_ON)
  445. uintptr_t super_page = base::bits::AlignDown(
  446. reinterpret_cast<uintptr_t>(this), kSuperPageAlignment);
  447. PA_DCHECK((SuperPagePayloadSize(super_page, root->IsQuarantineAllowed()) /
  448. PartitionPageSize()) > number_of_nonempty_slot_spans);
  449. #endif
  450. ++number_of_nonempty_slot_spans;
  451. }
  452. template <bool thread_safe>
  453. PA_ALWAYS_INLINE void PartitionSuperPageExtentEntry<
  454. thread_safe>::DecrementNumberOfNonemptySlotSpans() {
  455. PA_DCHECK(number_of_nonempty_slot_spans);
  456. --number_of_nonempty_slot_spans;
  457. }
  458. template <bool thread_safe>
  459. PA_ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>*
  460. SlotSpanMetadata<thread_safe>::ToSuperPageExtent() const {
  461. uintptr_t super_page = reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask;
  462. return PartitionSuperPageToExtent<thread_safe>(super_page);
  463. }
  464. // Returns whether the pointer lies within the super page's payload area (i.e.
  465. // area devoted to slot spans). It doesn't check whether it's within a valid
  466. // slot span. It merely ensures it doesn't fall in a meta-data region that would
  467. // surely never contain user data.
  468. PA_ALWAYS_INLINE bool IsWithinSuperPagePayload(uintptr_t address,
  469. bool with_quarantine) {
  470. // Quarantine can only be enabled for normal buckets in the current code.
  471. PA_DCHECK(!with_quarantine || IsManagedByNormalBuckets(address));
  472. uintptr_t super_page = address & kSuperPageBaseMask;
  473. uintptr_t payload_start = SuperPagePayloadBegin(super_page, with_quarantine);
  474. uintptr_t payload_end = SuperPagePayloadEnd(super_page);
  475. return address >= payload_start && address < payload_end;
  476. }
  477. // Converts from an address inside a super page into a pointer to the
  478. // PartitionPage object (within super pages's metadata) that describes the
  479. // partition page where |address| is located. |address| doesn't have to be
  480. // located within a valid (i.e. allocated) slot span, but must be within the
  481. // super page's payload area (i.e. area devoted to slot spans).
  482. //
  483. // While it is generally valid for |ptr| to be in the middle of an allocation,
  484. // care has to be taken with direct maps that span multiple super pages. This
  485. // function's behavior is undefined if |ptr| lies in a subsequent super page.
  486. template <bool thread_safe>
  487. PA_ALWAYS_INLINE PartitionPage<thread_safe>*
  488. PartitionPage<thread_safe>::FromAddr(uintptr_t address) {
  489. uintptr_t super_page = address & kSuperPageBaseMask;
  490. #if BUILDFLAG(PA_DCHECK_IS_ON)
  491. PA_DCHECK(IsReservationStart(super_page));
  492. auto* extent = PartitionSuperPageToExtent<thread_safe>(super_page);
  493. PA_DCHECK(IsWithinSuperPagePayload(address,
  494. IsManagedByNormalBuckets(address) &&
  495. extent->root->IsQuarantineAllowed()));
  496. #endif
  497. uintptr_t partition_page_index =
  498. (address & kSuperPageOffsetMask) >> PartitionPageShift();
  499. // Index 0 is invalid because it is the super page extent metadata and the
  500. // last index is invalid because the whole PartitionPage is set as guard
  501. // pages. This repeats part of the payload PA_DCHECK above, which also checks
  502. // for other exclusions.
  503. PA_DCHECK(partition_page_index);
  504. PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
  505. return PartitionSuperPageToMetadataArea<thread_safe>(super_page) +
  506. partition_page_index;
  507. }
  508. // Converts from a pointer to the SlotSpanMetadata object (within a super
  509. // pages's metadata) into a pointer to the beginning of the slot span. This
  510. // works on direct maps too.
  511. template <bool thread_safe>
  512. PA_ALWAYS_INLINE uintptr_t SlotSpanMetadata<thread_safe>::ToSlotSpanStart(
  513. const SlotSpanMetadata* slot_span) {
  514. uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(slot_span);
  515. uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
  516. // A valid |page| must be past the first guard System page and within
  517. // the following metadata region.
  518. PA_DCHECK(super_page_offset > SystemPageSize());
  519. // Must be less than total metadata region.
  520. PA_DCHECK(super_page_offset <
  521. SystemPageSize() +
  522. (NumPartitionPagesPerSuperPage() * kPageMetadataSize));
  523. uintptr_t partition_page_index =
  524. (super_page_offset - SystemPageSize()) >> kPageMetadataShift;
  525. // Index 0 is invalid because it is the super page extent metadata and the
  526. // last index is invalid because the whole PartitionPage is set as guard
  527. // pages.
  528. PA_DCHECK(partition_page_index);
  529. PA_DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
  530. uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
  531. return super_page_base + (partition_page_index << PartitionPageShift());
  532. }
  533. // Converts an address inside a slot span into a pointer to the SlotSpanMetadata
  534. // object (within super pages's metadata) that describes the slot span
  535. // containing that slot.
  536. //
  537. // CAUTION! For direct-mapped allocation, |address| has to be within the first
  538. // partition page.
  539. template <bool thread_safe>
  540. PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
  541. SlotSpanMetadata<thread_safe>::FromAddr(uintptr_t address) {
  542. auto* page = PartitionPage<thread_safe>::FromAddr(address);
  543. PA_DCHECK(page->is_valid);
  544. // Partition pages in the same slot span share the same SlotSpanMetadata
  545. // object (located in the first PartitionPage object of that span). Adjust
  546. // for that.
  547. page -= page->slot_span_metadata_offset;
  548. PA_DCHECK(page->is_valid);
  549. PA_DCHECK(!page->slot_span_metadata_offset);
  550. auto* slot_span = &page->slot_span_metadata;
  551. // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
  552. PA_DCHECK(PartitionRoot<thread_safe>::IsValidSlotSpan(slot_span));
  553. // For direct map, if |address| doesn't point within the first partition page,
  554. // |slot_span_metadata_offset| will be 0, |page| won't get shifted, leaving
  555. // |slot_size| at 0.
  556. PA_DCHECK(slot_span->bucket->slot_size);
  557. return slot_span;
  558. }
  559. // Like |FromAddr|, but asserts that |slot_start| indeed points to the
  560. // beginning of a slot. It doesn't check if the slot is actually allocated.
  561. //
  562. // This works on direct maps too.
  563. template <bool thread_safe>
  564. PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
  565. SlotSpanMetadata<thread_safe>::FromSlotStart(uintptr_t slot_start) {
  566. auto* slot_span = FromAddr(slot_start);
  567. #if BUILDFLAG(PA_DCHECK_IS_ON)
  568. // Checks that the pointer is a multiple of slot size.
  569. uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
  570. PA_DCHECK(!((slot_start - slot_span_start) % slot_span->bucket->slot_size));
  571. #endif // BUILDFLAG(PA_DCHECK_IS_ON)
  572. return slot_span;
  573. }
  574. // Like |FromAddr|, but asserts that |object| indeed points to the beginning of
  575. // an object. It doesn't check if the object is actually allocated.
  576. //
  577. // This works on direct maps too.
  578. template <bool thread_safe>
  579. PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
  580. SlotSpanMetadata<thread_safe>::FromObject(void* object) {
  581. uintptr_t object_addr = ObjectPtr2Addr(object);
  582. auto* slot_span = FromAddr(object_addr);
  583. #if BUILDFLAG(PA_DCHECK_IS_ON)
  584. // Checks that the object is exactly |extras_offset| away from a multiple of
  585. // slot size (i.e. from a slot start).
  586. uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
  587. auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
  588. PA_DCHECK((object_addr - slot_span_start) % slot_span->bucket->slot_size ==
  589. root->flags.extras_offset);
  590. #endif // BUILDFLAG(PA_DCHECK_IS_ON)
  591. return slot_span;
  592. }
  593. // Like |FromAddr|, but asserts that |address| indeed points within an object.
  594. // It doesn't check if the object is actually allocated.
  595. //
  596. // CAUTION! For direct-mapped allocation, |address| has to be within the first
  597. // partition page.
  598. template <bool thread_safe>
  599. PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
  600. SlotSpanMetadata<thread_safe>::FromObjectInnerAddr(uintptr_t address) {
  601. auto* slot_span = FromAddr(address);
  602. #if BUILDFLAG(PA_DCHECK_IS_ON)
  603. // Checks that the address is within the expected object boundaries.
  604. uintptr_t slot_span_start = ToSlotSpanStart(slot_span);
  605. auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
  606. uintptr_t shift_from_slot_start =
  607. (address - slot_span_start) % slot_span->bucket->slot_size;
  608. PA_DCHECK(shift_from_slot_start >= root->flags.extras_offset);
  609. // Use <= to allow an address immediately past the object.
  610. PA_DCHECK(shift_from_slot_start <=
  611. root->flags.extras_offset + slot_span->GetUsableSize(root));
  612. #endif // BUILDFLAG(PA_DCHECK_IS_ON)
  613. return slot_span;
  614. }
  615. template <bool thread_safe>
  616. PA_ALWAYS_INLINE SlotSpanMetadata<thread_safe>*
  617. SlotSpanMetadata<thread_safe>::FromObjectInnerPtr(void* ptr) {
  618. return FromObjectInnerAddr(ObjectInnerPtr2Addr(ptr));
  619. }
  620. template <bool thread_safe>
  621. PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetRawSize(
  622. size_t raw_size) {
  623. PA_DCHECK(CanStoreRawSize());
  624. auto* subsequent_page_metadata = GetSubsequentPageMetadata(
  625. reinterpret_cast<PartitionPage<thread_safe>*>(this));
  626. subsequent_page_metadata->raw_size = raw_size;
  627. }
  628. template <bool thread_safe>
  629. PA_ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
  630. PA_DCHECK(CanStoreRawSize());
  631. const auto* subsequent_page_metadata = GetSubsequentPageMetadata(
  632. reinterpret_cast<const PartitionPage<thread_safe>*>(this));
  633. return subsequent_page_metadata->raw_size;
  634. }
  635. template <bool thread_safe>
  636. PA_ALWAYS_INLINE PartitionTag*
  637. SlotSpanMetadata<thread_safe>::DirectMapMTETag() {
  638. PA_DCHECK(bucket->is_direct_mapped());
  639. auto* subsequent_page_metadata = GetSubsequentPageMetadata(
  640. reinterpret_cast<PartitionPage<thread_safe>*>(this));
  641. return &subsequent_page_metadata->direct_map_tag;
  642. }
  643. template <bool thread_safe>
  644. PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead(
  645. PartitionFreelistEntry* new_head) {
  646. #if BUILDFLAG(PA_DCHECK_IS_ON)
  647. // |this| is in the metadata region, hence isn't MTE-tagged. Untag |new_head|
  648. // as well.
  649. uintptr_t new_head_untagged = UntagPtr(new_head);
  650. PA_DCHECK(!new_head ||
  651. (reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
  652. (new_head_untagged & kSuperPageBaseMask));
  653. #endif
  654. freelist_head = new_head;
  655. // Inserted something new in the freelist, assume that it is not sorted
  656. // anymore.
  657. freelist_is_sorted_ = false;
  658. }
  659. template <bool thread_safe>
  660. PA_ALWAYS_INLINE PartitionFreelistEntry*
  661. SlotSpanMetadata<thread_safe>::PopForAlloc(size_t size) {
  662. // Not using bucket->slot_size directly as the compiler doesn't know that
  663. // |bucket->slot_size| is the same as |size|.
  664. PA_DCHECK(size == bucket->slot_size);
  665. PartitionFreelistEntry* result = freelist_head;
  666. // Not setting freelist_is_sorted_ to false since this doesn't destroy
  667. // ordering.
  668. freelist_head = freelist_head->GetNext(size);
  669. num_allocated_slots++;
  670. return result;
  671. }
  672. template <bool thread_safe>
  673. PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Free(uintptr_t slot_start)
  674. PA_EXCLUSIVE_LOCKS_REQUIRED(
  675. PartitionRoot<thread_safe>::FromSlotSpan(this)->lock_) {
  676. #if BUILDFLAG(PA_DCHECK_IS_ON)
  677. auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
  678. root->lock_.AssertAcquired();
  679. #endif
  680. auto* entry = static_cast<internal::PartitionFreelistEntry*>(
  681. SlotStartAddr2Ptr(slot_start));
  682. // Catches an immediate double free.
  683. PA_CHECK(entry != freelist_head);
  684. // Look for double free one level deeper in debug.
  685. PA_DCHECK(!freelist_head ||
  686. entry != freelist_head->GetNext(bucket->slot_size));
  687. entry->SetNext(freelist_head);
  688. SetFreelistHead(entry);
  689. // A best effort double-free check. Works only on empty slot spans.
  690. PA_CHECK(num_allocated_slots);
  691. --num_allocated_slots;
  692. // If the span is marked full, or became empty, take the slow path to update
  693. // internal state.
  694. if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
  695. FreeSlowPath(1);
  696. } else {
  697. // All single-slot allocations must go through the slow path to
  698. // correctly update the raw size.
  699. PA_DCHECK(!CanStoreRawSize());
  700. }
  701. }
  702. template <bool thread_safe>
  703. PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::AppendFreeList(
  704. PartitionFreelistEntry* head,
  705. PartitionFreelistEntry* tail,
  706. size_t number_of_freed)
  707. PA_EXCLUSIVE_LOCKS_REQUIRED(
  708. PartitionRoot<thread_safe>::FromSlotSpan(this)->lock_) {
  709. #if BUILDFLAG(PA_DCHECK_IS_ON)
  710. auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
  711. root->lock_.AssertAcquired();
  712. PA_DCHECK(!tail->GetNext(bucket->slot_size));
  713. PA_DCHECK(number_of_freed);
  714. PA_DCHECK(num_allocated_slots);
  715. if (CanStoreRawSize()) {
  716. PA_DCHECK(number_of_freed == 1);
  717. }
  718. {
  719. size_t number_of_entries = 0;
  720. for (auto* entry = head; entry;
  721. entry = entry->GetNext(bucket->slot_size), ++number_of_entries) {
  722. uintptr_t untagged_entry = UntagPtr(entry);
  723. // Check that all entries belong to this slot span.
  724. PA_DCHECK(ToSlotSpanStart(this) <= untagged_entry);
  725. PA_DCHECK(untagged_entry <
  726. ToSlotSpanStart(this) + bucket->get_bytes_per_span());
  727. }
  728. PA_DCHECK(number_of_entries == number_of_freed);
  729. }
  730. #endif
  731. tail->SetNext(freelist_head);
  732. SetFreelistHead(head);
  733. PA_DCHECK(num_allocated_slots >= number_of_freed);
  734. num_allocated_slots -= number_of_freed;
  735. // If the span is marked full, or became empty, take the slow path to update
  736. // internal state.
  737. if (PA_UNLIKELY(marked_full || num_allocated_slots == 0)) {
  738. FreeSlowPath(number_of_freed);
  739. } else {
  740. // All single-slot allocations must go through the slow path to
  741. // correctly update the raw size.
  742. PA_DCHECK(!CanStoreRawSize());
  743. }
  744. }
  745. template <bool thread_safe>
  746. PA_ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_active() const {
  747. PA_DCHECK(this != get_sentinel_slot_span());
  748. bool ret =
  749. (num_allocated_slots > 0 && (freelist_head || num_unprovisioned_slots));
  750. if (ret) {
  751. PA_DCHECK(!marked_full);
  752. PA_DCHECK(num_allocated_slots < bucket->get_slots_per_span());
  753. }
  754. return ret;
  755. }
  756. template <bool thread_safe>
  757. PA_ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_full() const {
  758. PA_DCHECK(this != get_sentinel_slot_span());
  759. bool ret = (num_allocated_slots == bucket->get_slots_per_span());
  760. if (ret) {
  761. PA_DCHECK(!freelist_head);
  762. PA_DCHECK(!num_unprovisioned_slots);
  763. // May or may not be marked full, so don't check for that.
  764. }
  765. return ret;
  766. }
  767. template <bool thread_safe>
  768. PA_ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_empty() const {
  769. PA_DCHECK(this != get_sentinel_slot_span());
  770. bool ret = (!num_allocated_slots && freelist_head);
  771. if (ret) {
  772. PA_DCHECK(!marked_full);
  773. }
  774. return ret;
  775. }
  776. template <bool thread_safe>
  777. PA_ALWAYS_INLINE bool SlotSpanMetadata<thread_safe>::is_decommitted() const {
  778. PA_DCHECK(this != get_sentinel_slot_span());
  779. bool ret = (!num_allocated_slots && !freelist_head);
  780. if (ret) {
  781. PA_DCHECK(!marked_full);
  782. PA_DCHECK(!num_unprovisioned_slots);
  783. PA_DCHECK(!in_empty_cache_);
  784. }
  785. return ret;
  786. }
  787. template <bool thread_safe>
  788. PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::Reset() {
  789. PA_DCHECK(is_decommitted());
  790. num_unprovisioned_slots = bucket->get_slots_per_span();
  791. PA_DCHECK(num_unprovisioned_slots);
  792. ToSuperPageExtent()->IncrementNumberOfNonemptySlotSpans();
  793. next_slot_span = nullptr;
  794. }
  795. // Returns the state bitmap from an address within a normal-bucket super page.
  796. // It's the caller's responsibility to ensure that the bitmap exists.
  797. PA_ALWAYS_INLINE AllocationStateMap* StateBitmapFromAddr(uintptr_t address) {
  798. PA_DCHECK(IsManagedByNormalBuckets(address));
  799. uintptr_t super_page = address & kSuperPageBaseMask;
  800. return SuperPageStateBitmap(super_page);
  801. }
  802. // Iterates over all slot spans in a super-page. |Callback| must return true if
  803. // early return is needed.
  804. template <bool thread_safe, typename Callback>
  805. void IterateSlotSpans(uintptr_t super_page,
  806. bool with_quarantine,
  807. Callback callback) {
  808. #if BUILDFLAG(PA_DCHECK_IS_ON)
  809. PA_DCHECK(!(super_page % kSuperPageAlignment));
  810. auto* extent_entry = PartitionSuperPageToExtent<thread_safe>(super_page);
  811. extent_entry->root->lock_.AssertAcquired();
  812. #endif
  813. using Page = PartitionPage<thread_safe>;
  814. using SlotSpan = SlotSpanMetadata<thread_safe>;
  815. auto* const first_page =
  816. Page::FromAddr(SuperPagePayloadBegin(super_page, with_quarantine));
  817. auto* const last_page =
  818. Page::FromAddr(SuperPagePayloadEnd(super_page) - PartitionPageSize());
  819. Page* page;
  820. SlotSpan* slot_span;
  821. for (page = first_page; page <= last_page;) {
  822. PA_DCHECK(!page->slot_span_metadata_offset); // Ensure slot span beginning.
  823. if (!page->is_valid) {
  824. if (page->has_valid_span_after_this) {
  825. // The page doesn't represent a valid slot span, but there is another
  826. // one somewhere after this. Keep iterating to find it.
  827. ++page;
  828. continue;
  829. }
  830. // There are currently no valid spans from here on. No need to iterate
  831. // the rest of the super page.
  832. break;
  833. }
  834. slot_span = &page->slot_span_metadata;
  835. if (callback(slot_span))
  836. return;
  837. page += slot_span->bucket->get_pages_per_slot_span();
  838. }
  839. // Each super page must have at least one valid slot span.
  840. PA_DCHECK(page > first_page);
  841. // Just a quick check that the search ended at a valid slot span and there
  842. // was no unnecessary iteration over gaps afterwards.
  843. PA_DCHECK(page == reinterpret_cast<Page*>(slot_span) +
  844. slot_span->bucket->get_pages_per_slot_span());
  845. }
  846. } // namespace partition_alloc::internal
  847. #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_