partition_alloc_constants.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. // Copyright (c) 2018 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
  5. #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
  6. #include <algorithm>
  7. #include <climits>
  8. #include <cstddef>
  9. #include <limits>
  10. #include "base/allocator/partition_allocator/address_pool_manager_types.h"
  11. #include "base/allocator/partition_allocator/page_allocator_constants.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_forward.h"
  15. #include "base/allocator/partition_allocator/tagging.h"
  16. #include "build/build_config.h"
  17. #if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
  18. #include <mach/vm_page_size.h>
  19. #endif
  20. namespace partition_alloc {
  21. // Bit flag constants used as `flag` argument of PartitionRoot::AllocWithFlags,
  22. // AlignedAllocWithFlags, etc.
  23. struct AllocFlags {
  24. static constexpr unsigned int kReturnNull = 1 << 0;
  25. static constexpr unsigned int kZeroFill = 1 << 1;
  26. // Don't allow allocation override hooks. Override hooks are expected to
  27. // check for the presence of this flag and return false if it is active.
  28. static constexpr unsigned int kNoOverrideHooks = 1 << 2;
  29. // Never let a memory tool like ASan (if active) perform the allocation.
  30. static constexpr unsigned int kNoMemoryToolOverride = 1 << 3;
  31. // Don't allow any hooks (override or observers).
  32. static constexpr unsigned int kNoHooks = 1 << 4; // Internal.
  33. // If the allocation requires a "slow path" (such as allocating/committing a
  34. // new slot span), return nullptr instead. Note this makes all large
  35. // allocations return nullptr, such as direct-mapped ones, and even for
  36. // smaller ones, a nullptr value is common.
  37. static constexpr unsigned int kFastPathOrReturnNull = 1 << 5; // Internal.
  38. static constexpr unsigned int kLastFlag = kFastPathOrReturnNull;
  39. };
  40. // Bit flag constants used as `flag` argument of PartitionRoot::FreeWithFlags.
  41. struct FreeFlags {
  42. // See AllocFlags::kNoMemoryToolOverride.
  43. static constexpr unsigned int kNoMemoryToolOverride = 1 << 0;
  44. static constexpr unsigned int kLastFlag = kNoMemoryToolOverride;
  45. };
  46. namespace internal {
  47. // Size of a cache line. Not all CPUs in the world have a 64 bytes cache line
  48. // size, but as of 2021, most do. This is in particular the case for almost all
  49. // x86_64 and almost all ARM CPUs supported by Chromium. As this is used for
  50. // static alignment, we cannot query the CPU at runtime to determine the actual
  51. // alignment, so use 64 bytes everywhere. Since this is only used to avoid false
  52. // sharing, getting this wrong only results in lower performance, not incorrect
  53. // code.
  54. constexpr size_t kPartitionCachelineSize = 64;
  55. // Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size.
  56. // It is typical for a `PartitionPage` to be based on multiple system pages.
  57. // Most references to "page" refer to `PartitionPage`s.
  58. //
  59. // *Super pages* are the underlying system allocations we make. Super pages
  60. // contain multiple partition pages and include space for a small amount of
  61. // metadata per partition page.
  62. //
  63. // Inside super pages, we store *slot spans*. A slot span is a continguous range
  64. // of one or more `PartitionPage`s that stores allocations of the same size.
  65. // Slot span sizes are adjusted depending on the allocation size, to make sure
  66. // the packing does not lead to unused (wasted) space at the end of the last
  67. // system page of the span. For our current maximum slot span size of 64 KiB and
  68. // other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
  69. // up against the end of a system page.
  70. #if defined(_MIPS_ARCH_LOONGSON)
  71. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  72. PartitionPageShift() {
  73. return 16; // 64 KiB
  74. }
  75. #elif defined(ARCH_CPU_PPC64)
  76. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  77. PartitionPageShift() {
  78. return 18; // 256 KiB
  79. }
  80. #elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
  81. (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
  82. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  83. PartitionPageShift() {
  84. return PageAllocationGranularityShift() + 2;
  85. }
  86. #else
  87. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  88. PartitionPageShift() {
  89. return 14; // 16 KiB
  90. }
  91. #endif
  92. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  93. PartitionPageSize() {
  94. return 1 << PartitionPageShift();
  95. }
  96. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  97. PartitionPageOffsetMask() {
  98. return PartitionPageSize() - 1;
  99. }
  100. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  101. PartitionPageBaseMask() {
  102. return ~PartitionPageOffsetMask();
  103. }
  104. // Number of system pages per regular slot span. Above this limit, we call it
  105. // a single-slot span, as the span literally hosts only one slot, and has
  106. // somewhat different implementation. At run-time, single-slot spans can be
  107. // differentiated with a call to CanStoreRawSize().
  108. // TODO: Should this be 1 on platforms with page size larger than 4kB, e.g.
  109. // ARM macOS or defined(_MIPS_ARCH_LOONGSON)?
  110. constexpr size_t kMaxPartitionPagesPerRegularSlotSpan = 4;
  111. // To avoid fragmentation via never-used freelist entries, we hand out partition
  112. // freelist sections gradually, in units of the dominant system page size. What
  113. // we're actually doing is avoiding filling the full `PartitionPage` (16 KiB)
  114. // with freelist pointers right away. Writing freelist pointers will fault and
  115. // dirty a private page, which is very wasteful if we never actually store
  116. // objects there.
  117. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  118. NumSystemPagesPerPartitionPage() {
  119. return PartitionPageSize() >> SystemPageShift();
  120. }
  121. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  122. MaxSystemPagesPerRegularSlotSpan() {
  123. return NumSystemPagesPerPartitionPage() *
  124. kMaxPartitionPagesPerRegularSlotSpan;
  125. }
  126. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  127. MaxRegularSlotSpanSize() {
  128. return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
  129. }
  130. // The maximum size that is used in an alternate bucket distribution. After this
  131. // threshold, we only have 1 slot per slot-span, so external fragmentation
  132. // doesn't matter. So, using the alternate bucket distribution after this
  133. // threshold has no benefit, and only increases internal fragmentation.
  134. //
  135. // We would like this to be |MaxRegularSlotSpanSize()| on all platforms, but
  136. // this is not constexpr on all platforms, so on other platforms we hardcode it,
  137. // even though this may be too low, e.g. on systems with a page size >4KiB.
  138. constexpr size_t kHighThresholdForAlternateDistribution =
  139. #if PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
  140. MaxRegularSlotSpanSize();
  141. #else
  142. 1 << 16;
  143. #endif
  144. // We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
  145. // These chunks are called *super pages*. We do this so that we can store
  146. // metadata in the first few pages of each 2 MiB-aligned section. This makes
  147. // freeing memory very fast. 2 MiB size & alignment were chosen, because this
  148. // virtual address block represents a full but single page table allocation on
  149. // ARM, ia32 and x64, which may be slightly more performance&memory efficient.
  150. // (Note, these super pages are backed by 4 KiB system pages and have nothing to
  151. // do with OS concept of "huge pages"/"large pages", even though the size
  152. // coincides.)
  153. //
  154. // The layout of the super page is as follows. The sizes below are the same for
  155. // 32- and 64-bit platforms.
  156. //
  157. // +-----------------------+
  158. // | Guard page (4 KiB) |
  159. // | Metadata page (4 KiB) |
  160. // | Guard pages (8 KiB) |
  161. // | TagBitmap |
  162. // | *Scan State Bitmap |
  163. // | Slot span |
  164. // | Slot span |
  165. // | ... |
  166. // | Slot span |
  167. // | Guard pages (16 KiB) |
  168. // +-----------------------+
  169. //
  170. // TagBitmap is only present when
  171. // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS) is true. State Bitmap
  172. // is inserted for partitions that may have quarantine enabled.
  173. //
  174. // If refcount_at_end_allocation is enabled, RefcountBitmap(4KiB) is inserted
  175. // after the Metadata page for BackupRefPtr. The guard pages after the bitmap
  176. // will be 4KiB.
  177. //
  178. //...
  179. // | Metadata page (4 KiB) |
  180. // | RefcountBitmap (4 KiB)|
  181. // | Guard pages (4 KiB) |
  182. //...
  183. //
  184. // Each slot span is a contiguous range of one or more `PartitionPage`s. Note
  185. // that slot spans of different sizes may co-exist with one super page. Even
  186. // slot spans of the same size may support different slot sizes. However, all
  187. // slots within a span have to be of the same size.
  188. //
  189. // The metadata page has the following format. Note that the `PartitionPage`
  190. // that is not at the head of a slot span is "unused" (by most part, it only
  191. // stores the offset from the head page). In other words, the metadata for the
  192. // slot span is stored only in the first `PartitionPage` of the slot span.
  193. // Metadata accesses to other `PartitionPage`s are redirected to the first
  194. // `PartitionPage`.
  195. //
  196. // +---------------------------------------------+
  197. // | SuperPageExtentEntry (32 B) |
  198. // | PartitionPage of slot span 1 (32 B, used) |
  199. // | PartitionPage of slot span 1 (32 B, unused) |
  200. // | PartitionPage of slot span 1 (32 B, unused) |
  201. // | PartitionPage of slot span 2 (32 B, used) |
  202. // | PartitionPage of slot span 3 (32 B, used) |
  203. // | ... |
  204. // | PartitionPage of slot span N (32 B, used) |
  205. // | PartitionPage of slot span N (32 B, unused) |
  206. // | PartitionPage of slot span N (32 B, unused) |
  207. // +---------------------------------------------+
  208. //
  209. // A direct-mapped page has an identical layout at the beginning to fake it
  210. // looking like a super page:
  211. //
  212. // +---------------------------------+
  213. // | Guard page (4 KiB) |
  214. // | Metadata page (4 KiB) |
  215. // | Guard pages (8 KiB) |
  216. // | Direct mapped object |
  217. // | Guard page (4 KiB, 32-bit only) |
  218. // +---------------------------------+
  219. //
  220. // A direct-mapped page's metadata page has the following layout (on 64 bit
  221. // architectures. On 32 bit ones, the layout is identical, some sizes are
  222. // different due to smaller pointers.):
  223. //
  224. // +----------------------------------+
  225. // | SuperPageExtentEntry (32 B) |
  226. // | PartitionPage (32 B) |
  227. // | PartitionBucket (40 B) |
  228. // | PartitionDirectMapExtent (32 B) |
  229. // +----------------------------------+
  230. //
  231. // See |PartitionDirectMapMetadata| for details.
  232. constexpr size_t kGiB = 1024 * 1024 * 1024ull;
  233. constexpr size_t kSuperPageShift = 21; // 2 MiB
  234. constexpr size_t kSuperPageSize = 1 << kSuperPageShift;
  235. constexpr size_t kSuperPageAlignment = kSuperPageSize;
  236. constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
  237. constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
  238. // GigaCage is generally split into two pools, one which supports BackupRefPtr
  239. // (BRP) and one that doesn't.
  240. #if defined(PA_HAS_64_BITS_POINTERS)
  241. // The 3rd, Configurable Pool is only available in 64-bit mode.
  242. constexpr size_t kNumPools = 3;
  243. // Maximum GigaCage pool size. With exception of Configurable Pool, it is also
  244. // the actual size, unless PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE is set, which
  245. // allows to choose a different size at initialization time for certain
  246. // configurations.
  247. //
  248. // Special-case Android and iOS, which incur test failures with larger
  249. // GigaCage. Regardless, allocating >8GiB with malloc() on these platforms is
  250. // unrealistic as of 2022.
  251. #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS)
  252. constexpr size_t kPoolMaxSize = 8 * kGiB;
  253. #else
  254. constexpr size_t kPoolMaxSize = 16 * kGiB;
  255. #endif
  256. #else // defined(PA_HAS_64_BITS_POINTERS)
  257. constexpr size_t kNumPools = 2;
  258. constexpr size_t kPoolMaxSize = 4 * kGiB;
  259. #endif
  260. constexpr size_t kMaxSuperPagesInPool = kPoolMaxSize / kSuperPageSize;
  261. static constexpr pool_handle kRegularPoolHandle = 1;
  262. static constexpr pool_handle kBRPPoolHandle = 2;
  263. static constexpr pool_handle kConfigurablePoolHandle = 3;
  264. // Slots larger than this size will not receive MTE protection. Pages intended
  265. // for allocations larger than this constant should not be backed with PROT_MTE
  266. // (which saves shadow tag memory). We also save CPU cycles by skipping tagging
  267. // of large areas which are less likely to benefit from MTE protection.
  268. // TODO(Richard.Townsend@arm.com): adjust RecommitSystemPagesForData to skip
  269. // PROT_MTE.
  270. constexpr size_t kMaxMemoryTaggingSize = 1024;
  271. #if defined(PA_HAS_MEMORY_TAGGING)
  272. // Returns whether the tag of |object| overflowed, meaning the containing slot
  273. // needs to be moved to quarantine.
  274. PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
  275. // The tag with which the slot is put to quarantine.
  276. constexpr uintptr_t kOverflowTag = 0x0f00000000000000uLL;
  277. static_assert((kOverflowTag & kPtrTagMask) != 0,
  278. "Overflow tag must be in tag bits");
  279. return (reinterpret_cast<uintptr_t>(object) & kPtrTagMask) == kOverflowTag;
  280. }
  281. #endif // defined(PA_HAS_MEMORY_TAGGING)
  282. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  283. NumPartitionPagesPerSuperPage() {
  284. return kSuperPageSize >> PartitionPageShift();
  285. }
  286. constexpr PA_ALWAYS_INLINE size_t MaxSuperPagesInPool() {
  287. return kMaxSuperPagesInPool;
  288. }
  289. #if defined(PA_HAS_64_BITS_POINTERS)
  290. // In 64-bit mode, the direct map allocation granularity is super page size,
  291. // because this is the reservation granularity of the GigaCage.
  292. constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
  293. return kSuperPageSize;
  294. }
  295. constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
  296. return kSuperPageShift;
  297. }
  298. #else // defined(PA_HAS_64_BITS_POINTERS)
  299. // In 32-bit mode, address space is space is a scarce resource. Use the system
  300. // allocation granularity, which is the lowest possible address space allocation
  301. // unit. However, don't go below partition page size, so that GigaCage bitmaps
  302. // don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
  303. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  304. DirectMapAllocationGranularity() {
  305. return std::max(PageAllocationGranularity(), PartitionPageSize());
  306. }
  307. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  308. DirectMapAllocationGranularityShift() {
  309. return std::max(PageAllocationGranularityShift(), PartitionPageShift());
  310. }
  311. #endif // defined(PA_HAS_64_BITS_POINTERS)
  312. PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  313. DirectMapAllocationGranularityOffsetMask() {
  314. return DirectMapAllocationGranularity() - 1;
  315. }
  316. // The "order" of an allocation is closely related to the power-of-1 size of the
  317. // allocation. More precisely, the order is the bit index of the
  318. // most-significant-bit in the allocation size, where the bit numbers starts at
  319. // index 1 for the least-significant-bit.
  320. //
  321. // In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
  322. // covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
  323. // PartitionAlloc should return memory properly aligned for any type, to behave
  324. // properly as a generic allocator. This is not strictly required as long as
  325. // types are explicitly allocated with PartitionAlloc, but is to use it as a
  326. // malloc() implementation, and generally to match malloc()'s behavior.
  327. //
  328. // In practice, this means 8 bytes alignment on 32 bit architectures, and 16
  329. // bytes on 64 bit ones.
  330. //
  331. // Keep in sync with //tools/memory/partition_allocator/objects_per_size_py.
  332. constexpr size_t kMinBucketedOrder =
  333. kAlignment == 16 ? 5 : 4; // 2^(order - 1), that is 16 or 8.
  334. // The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
  335. constexpr size_t kMaxBucketedOrder = 20;
  336. constexpr size_t kNumBucketedOrders =
  337. (kMaxBucketedOrder - kMinBucketedOrder) + 1;
  338. // 4 buckets per order (for the higher orders).
  339. constexpr size_t kNumBucketsPerOrderBits = 2;
  340. constexpr size_t kNumBucketsPerOrder = 1 << kNumBucketsPerOrderBits;
  341. constexpr size_t kNumBuckets = kNumBucketedOrders * kNumBucketsPerOrder;
  342. constexpr size_t kSmallestBucket = 1 << (kMinBucketedOrder - 1);
  343. constexpr size_t kMaxBucketSpacing =
  344. 1 << ((kMaxBucketedOrder - 1) - kNumBucketsPerOrderBits);
  345. constexpr size_t kMaxBucketed = (1 << (kMaxBucketedOrder - 1)) +
  346. ((kNumBucketsPerOrder - 1) * kMaxBucketSpacing);
  347. // Limit when downsizing a direct mapping using `realloc`:
  348. constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
  349. // Intentionally set to less than 2GiB to make sure that a 2GiB allocation
  350. // fails. This is a security choice in Chrome, to help making size_t vs int bugs
  351. // harder to exploit.
  352. // The definition of MaxDirectMapped does only depend on constants that are
  353. // unconditionally constexpr. Therefore it is not necessary to use
  354. // PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR here.
  355. constexpr PA_ALWAYS_INLINE size_t MaxDirectMapped() {
  356. // Subtract kSuperPageSize to accommodate for granularity inside
  357. // PartitionRoot::GetDirectMapReservationSize.
  358. return (1UL << 31) - kSuperPageSize;
  359. }
  360. // Max alignment supported by AlignedAllocWithFlags().
  361. // kSuperPageSize alignment can't be easily supported, because each super page
  362. // starts with guard pages & metadata.
  363. constexpr size_t kMaxSupportedAlignment = kSuperPageSize / 2;
  364. constexpr size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
  365. // When a SlotSpan becomes empty, the allocator tries to avoid re-using it
  366. // immediately, to help with fragmentation. At this point, it becomes dirty
  367. // committed memory, which we want to minimize. This could be decommitted
  368. // immediately, but that would imply doing a lot of system calls. In particular,
  369. // for single-slot SlotSpans, a malloc() / free() loop would cause a *lot* of
  370. // system calls.
  371. //
  372. // As an intermediate step, empty SlotSpans are placed into a per-partition
  373. // global ring buffer, giving the newly-empty SlotSpan a chance to be re-used
  374. // before getting decommitted. A new entry (i.e. a newly empty SlotSpan) taking
  375. // the place used by a previous one will lead the previous SlotSpan to be
  376. // decommitted immediately, provided that it is still empty.
  377. //
  378. // Setting this value higher means giving more time for reuse to happen, at the
  379. // cost of possibly increasing peak committed memory usage (and increasing the
  380. // size of PartitionRoot a bit, since the ring buffer is there). Note that the
  381. // ring buffer doesn't necessarily contain an empty SlotSpan, as SlotSpans are
  382. // *not* removed from it when re-used. So the ring buffer really is a buffer of
  383. // *possibly* empty SlotSpans.
  384. //
  385. // In all cases, PartitionRoot::PurgeMemory() with the
  386. // PurgeFlags::kDecommitEmptySlotSpans flag will eagerly decommit all entries
  387. // in the ring buffer, so with periodic purge enabled, this typically happens
  388. // every few seconds.
  389. constexpr size_t kEmptyCacheIndexBits = 7;
  390. // kMaxFreeableSpans is the buffer size, but is never used as an index value,
  391. // hence <= is appropriate.
  392. constexpr size_t kMaxFreeableSpans = 1 << kEmptyCacheIndexBits;
  393. constexpr size_t kDefaultEmptySlotSpanRingSize = 16;
  394. // If the total size in bytes of allocated but not committed pages exceeds this
  395. // value (probably it is a "out of virtual address space" crash), a special
  396. // crash stack trace is generated at
  397. // `PartitionOutOfMemoryWithLotsOfUncommitedPages`. This is to distinguish "out
  398. // of virtual address space" from "out of physical memory" in crash reports.
  399. constexpr size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1 GiB
  400. // These byte values match tcmalloc.
  401. constexpr unsigned char kUninitializedByte = 0xAB;
  402. constexpr unsigned char kFreedByte = 0xCD;
  403. constexpr unsigned char kQuarantinedByte = 0xEF;
  404. // 1 is smaller than anything we can use, as it is not properly aligned. Not
  405. // using a large size, since PartitionBucket::slot_size is a uint32_t, and
  406. // static_cast<uint32_t>(-1) is too close to a "real" size.
  407. constexpr size_t kInvalidBucketSize = 1;
  408. } // namespace internal
  409. // These constants are used outside PartitionAlloc itself, so we provide
  410. // non-internal aliases here.
  411. using ::partition_alloc::internal::kInvalidBucketSize;
  412. using ::partition_alloc::internal::kMaxSuperPagesInPool;
  413. using ::partition_alloc::internal::kMaxSupportedAlignment;
  414. using ::partition_alloc::internal::kNumBuckets;
  415. using ::partition_alloc::internal::kSuperPageSize;
  416. using ::partition_alloc::internal::MaxDirectMapped;
  417. using ::partition_alloc::internal::PartitionPageSize;
  418. } // namespace partition_alloc
  419. #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_