partition_root.h 84 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042
  1. // Copyright (c) 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_
  5. #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_
  6. // DESCRIPTION
  7. // PartitionRoot::Alloc() and PartitionRoot::Free() are approximately analogous
  8. // to malloc() and free().
  9. //
  10. // The main difference is that a PartitionRoot object must be supplied to these
  11. // functions, representing a specific "heap partition" that will be used to
  12. // satisfy the allocation. Different partitions are guaranteed to exist in
  13. // separate address spaces, including being separate from the main system
  14. // heap. If the contained objects are all freed, physical memory is returned to
  15. // the system but the address space remains reserved. See PartitionAlloc.md for
  16. // other security properties PartitionAlloc provides.
  17. //
  18. // THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
  19. // PartitionAllocator classes. To minimize the instruction count to the fullest
  20. // extent possible, the PartitionRoot is really just a header adjacent to other
  21. // data areas provided by the allocator class.
  22. //
  23. // The constraints for PartitionRoot::Alloc() are:
  24. // - Multi-threaded use against a single partition is ok; locking is handled.
  25. // - Allocations of any arbitrary size can be handled (subject to a limit of
  26. // INT_MAX bytes for security reasons).
  27. // - Bucketing is by approximate size, for example an allocation of 4000 bytes
  28. // might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
  29. // keep worst-case waste to ~10%.
  30. #include <algorithm>
  31. #include <atomic>
  32. #include <cstddef>
  33. #include <cstdint>
  34. #include "base/allocator/partition_allocator/address_pool_manager_types.h"
  35. #include "base/allocator/partition_allocator/allocation_guard.h"
  36. #include "base/allocator/partition_allocator/chromecast_buildflags.h"
  37. #include "base/allocator/partition_allocator/page_allocator.h"
  38. #include "base/allocator/partition_allocator/page_allocator_constants.h"
  39. #include "base/allocator/partition_allocator/partition_address_space.h"
  40. #include "base/allocator/partition_allocator/partition_alloc-inl.h"
  41. #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
  42. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  43. #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
  44. #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
  45. #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
  46. #include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
  47. #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
  48. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  49. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  50. #include "base/allocator/partition_allocator/partition_alloc_constants.h"
  51. #include "base/allocator/partition_allocator/partition_alloc_forward.h"
  52. #include "base/allocator/partition_allocator/partition_alloc_hooks.h"
  53. #include "base/allocator/partition_allocator/partition_alloc_notreached.h"
  54. #include "base/allocator/partition_allocator/partition_bucket_lookup.h"
  55. #include "base/allocator/partition_allocator/partition_cookie.h"
  56. #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
  57. #include "base/allocator/partition_allocator/partition_freelist_entry.h"
  58. #include "base/allocator/partition_allocator/partition_lock.h"
  59. #include "base/allocator/partition_allocator/partition_oom.h"
  60. #include "base/allocator/partition_allocator/partition_page.h"
  61. #include "base/allocator/partition_allocator/partition_ref_count.h"
  62. #include "base/allocator/partition_allocator/partition_tag.h"
  63. #include "base/allocator/partition_allocator/partition_tag_types.h"
  64. #include "base/allocator/partition_allocator/reservation_offset_table.h"
  65. #include "base/allocator/partition_allocator/starscan/pcscan.h"
  66. #include "base/allocator/partition_allocator/starscan/state_bitmap.h"
  67. #include "base/allocator/partition_allocator/tagging.h"
  68. #include "base/allocator/partition_allocator/thread_cache.h"
  69. #include "build/build_config.h"
  70. // We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
  71. // size as other alloc code.
  72. #define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \
  73. if (size > partition_alloc::internal::MaxDirectMapped()) { \
  74. if (flags & AllocFlags::kReturnNull) { \
  75. return nullptr; \
  76. } \
  77. PA_CHECK(false); \
  78. }
  79. namespace partition_alloc::internal {
  80. // This type trait verifies a type can be used as a pointer offset.
  81. //
  82. // We support pointer offsets in signed (ptrdiff_t) or unsigned (size_t) values.
  83. // Smaller types are also allowed.
  84. template <typename Z>
  85. static constexpr bool offset_type =
  86. std::is_integral_v<Z> && sizeof(Z) <= sizeof(ptrdiff_t);
  87. // We want this size to be big enough that we have time to start up other
  88. // scripts _before_ we wrap around.
  89. static constexpr size_t kAllocInfoSize = 1 << 24;
  90. struct AllocInfo {
  91. std::atomic<size_t> index{0};
  92. struct {
  93. uintptr_t addr;
  94. size_t size;
  95. } allocs[kAllocInfoSize] = {};
  96. };
  97. #if BUILDFLAG(RECORD_ALLOC_INFO)
  98. extern AllocInfo g_allocs;
  99. void RecordAllocOrFree(uintptr_t addr, size_t size);
  100. #endif // BUILDFLAG(RECORD_ALLOC_INFO)
  101. } // namespace partition_alloc::internal
  102. namespace partition_alloc {
  103. namespace internal {
  104. // Avoid including partition_address_space.h from this .h file, by moving the
  105. // call to IsManagedByPartitionAllocBRPPool into the .cc file.
  106. #if BUILDFLAG(PA_DCHECK_IS_ON)
  107. PA_COMPONENT_EXPORT(PARTITION_ALLOC)
  108. void DCheckIfManagedByPartitionAllocBRPPool(uintptr_t address);
  109. #else
  110. PA_ALWAYS_INLINE void DCheckIfManagedByPartitionAllocBRPPool(
  111. uintptr_t address) {}
  112. #endif
  113. #if defined(PA_USE_PARTITION_ROOT_ENUMERATOR)
  114. class PartitionRootEnumerator;
  115. #endif
  116. } // namespace internal
  117. // Bit flag constants used to purge memory. See PartitionRoot::PurgeMemory.
  118. //
  119. // In order to support bit operations like `flag_a | flag_b`, the old-fashioned
  120. // enum (+ surrounding named struct) is used instead of enum class.
  121. struct PurgeFlags {
  122. enum : int {
  123. // Decommitting the ring list of empty slot spans is reasonably fast.
  124. kDecommitEmptySlotSpans = 1 << 0,
  125. // Discarding unused system pages is slower, because it involves walking all
  126. // freelists in all active slot spans of all buckets >= system page
  127. // size. It often frees a similar amount of memory to decommitting the empty
  128. // slot spans, though.
  129. kDiscardUnusedSystemPages = 1 << 1,
  130. // Aggressively reclaim memory. This is meant to be used in low-memory
  131. // situations, not for periodic memory reclaiming.
  132. kAggressiveReclaim = 1 << 2,
  133. };
  134. };
  135. // Options struct used to configure PartitionRoot and PartitionAllocator.
  136. struct PartitionOptions {
  137. enum class AlignedAlloc : uint8_t {
  138. // By default all allocations will be aligned to `kAlignment`,
  139. // likely to be 8B or 16B depending on platforms and toolchains.
  140. // AlignedAlloc() allows to enforce higher alignment.
  141. // This option determines whether it is supported for the partition.
  142. // Allowing AlignedAlloc() comes at a cost of disallowing extras in front
  143. // of the allocation.
  144. kDisallowed,
  145. kAllowed,
  146. };
  147. enum class ThreadCache : uint8_t {
  148. kDisabled,
  149. kEnabled,
  150. };
  151. enum class Quarantine : uint8_t {
  152. kDisallowed,
  153. kAllowed,
  154. };
  155. enum class Cookie : uint8_t {
  156. kDisallowed,
  157. kAllowed,
  158. };
  159. enum class BackupRefPtr : uint8_t {
  160. kDisabled,
  161. kEnabled,
  162. };
  163. enum class BackupRefPtrZapping : uint8_t {
  164. kDisabled,
  165. kEnabled,
  166. };
  167. enum class UseConfigurablePool : uint8_t {
  168. kNo,
  169. kIfAvailable,
  170. };
  171. // Constructor to suppress aggregate initialization.
  172. constexpr PartitionOptions(AlignedAlloc aligned_alloc,
  173. ThreadCache thread_cache,
  174. Quarantine quarantine,
  175. Cookie cookie,
  176. BackupRefPtr backup_ref_ptr,
  177. BackupRefPtrZapping backup_ref_ptr_zapping,
  178. UseConfigurablePool use_configurable_pool)
  179. : aligned_alloc(aligned_alloc),
  180. thread_cache(thread_cache),
  181. quarantine(quarantine),
  182. cookie(cookie),
  183. backup_ref_ptr(backup_ref_ptr),
  184. backup_ref_ptr_zapping(backup_ref_ptr_zapping),
  185. use_configurable_pool(use_configurable_pool) {}
  186. AlignedAlloc aligned_alloc;
  187. ThreadCache thread_cache;
  188. Quarantine quarantine;
  189. Cookie cookie;
  190. BackupRefPtr backup_ref_ptr;
  191. BackupRefPtrZapping backup_ref_ptr_zapping;
  192. UseConfigurablePool use_configurable_pool;
  193. };
  194. // Never instantiate a PartitionRoot directly, instead use
  195. // PartitionAllocator.
  196. template <bool thread_safe>
  197. struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
  198. using SlotSpan = internal::SlotSpanMetadata<thread_safe>;
  199. using Page = internal::PartitionPage<thread_safe>;
  200. using Bucket = internal::PartitionBucket<thread_safe>;
  201. using FreeListEntry = internal::PartitionFreelistEntry;
  202. using SuperPageExtentEntry =
  203. internal::PartitionSuperPageExtentEntry<thread_safe>;
  204. using DirectMapExtent = internal::PartitionDirectMapExtent<thread_safe>;
  205. using PCScan = internal::PCScan;
  206. enum class QuarantineMode : uint8_t {
  207. kAlwaysDisabled,
  208. kDisabledByDefault,
  209. kEnabled,
  210. };
  211. enum class ScanMode : uint8_t {
  212. kDisabled,
  213. kEnabled,
  214. };
  215. // Flags accessed on fast paths.
  216. //
  217. // Careful! PartitionAlloc's performance is sensitive to its layout. Please
  218. // put the fast-path objects in the struct below, and the other ones after
  219. // the union..
  220. struct Flags {
  221. // Defines whether objects should be quarantined for this root.
  222. QuarantineMode quarantine_mode;
  223. // Defines whether the root should be scanned.
  224. ScanMode scan_mode;
  225. bool with_thread_cache = false;
  226. bool with_denser_bucket_distribution = false;
  227. bool allow_aligned_alloc;
  228. bool allow_cookie;
  229. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  230. bool brp_enabled_;
  231. bool brp_zapping_enabled_;
  232. #endif
  233. bool use_configurable_pool;
  234. #if defined(PA_EXTRAS_REQUIRED)
  235. uint32_t extras_size;
  236. uint32_t extras_offset;
  237. #else
  238. // Teach the compiler that code can be optimized in builds that use no
  239. // extras.
  240. static inline constexpr uint32_t extras_size = 0;
  241. static inline constexpr uint32_t extras_offset = 0;
  242. #endif // defined(PA_EXTRAS_REQUIRED)
  243. };
  244. // Read-mostly flags.
  245. union {
  246. Flags flags;
  247. // The flags above are accessed for all (de)allocations, and are mostly
  248. // read-only. They should not share a cacheline with the data below, which
  249. // is only touched when the lock is taken.
  250. uint8_t one_cacheline[internal::kPartitionCachelineSize];
  251. };
  252. // Not used on the fastest path (thread cache allocations), but on the fast
  253. // path of the central allocator.
  254. static_assert(thread_safe, "Only the thread-safe root is supported.");
  255. ::partition_alloc::internal::Lock lock_;
  256. Bucket buckets[internal::kNumBuckets] = {};
  257. Bucket sentinel_bucket{};
  258. // All fields below this comment are not accessed on the fast path.
  259. bool initialized = false;
  260. // Bookkeeping.
  261. // - total_size_of_super_pages - total virtual address space for normal bucket
  262. // super pages
  263. // - total_size_of_direct_mapped_pages - total virtual address space for
  264. // direct-map regions
  265. // - total_size_of_committed_pages - total committed pages for slots (doesn't
  266. // include metadata, bitmaps (if any), or any data outside or regions
  267. // described in #1 and #2)
  268. // Invariant: total_size_of_allocated_bytes <=
  269. // total_size_of_committed_pages <
  270. // total_size_of_super_pages +
  271. // total_size_of_direct_mapped_pages.
  272. // Invariant: total_size_of_committed_pages <= max_size_of_committed_pages.
  273. // Invariant: total_size_of_allocated_bytes <= max_size_of_allocated_bytes.
  274. // Invariant: max_size_of_allocated_bytes <= max_size_of_committed_pages.
  275. // Since all operations on the atomic variables have relaxed semantics, we
  276. // don't check these invariants with DCHECKs.
  277. std::atomic<size_t> total_size_of_committed_pages{0};
  278. std::atomic<size_t> max_size_of_committed_pages{0};
  279. std::atomic<size_t> total_size_of_super_pages{0};
  280. std::atomic<size_t> total_size_of_direct_mapped_pages{0};
  281. size_t total_size_of_allocated_bytes PA_GUARDED_BY(lock_) = 0;
  282. size_t max_size_of_allocated_bytes PA_GUARDED_BY(lock_) = 0;
  283. // Atomic, because system calls can be made without the lock held.
  284. std::atomic<uint64_t> syscall_count{};
  285. std::atomic<uint64_t> syscall_total_time_ns{};
  286. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  287. std::atomic<size_t> total_size_of_brp_quarantined_bytes{0};
  288. std::atomic<size_t> total_count_of_brp_quarantined_slots{0};
  289. std::atomic<size_t> cumulative_size_of_brp_quarantined_bytes{0};
  290. std::atomic<size_t> cumulative_count_of_brp_quarantined_slots{0};
  291. #endif
  292. // Slot span memory which has been provisioned, and is currently unused as
  293. // it's part of an empty SlotSpan. This is not clean memory, since it has
  294. // either been used for a memory allocation, and/or contains freelist
  295. // entries. But it might have been moved to swap. Note that all this memory
  296. // can be decommitted at any time.
  297. size_t empty_slot_spans_dirty_bytes PA_GUARDED_BY(lock_) = 0;
  298. // Only tolerate up to |total_size_of_committed_pages >>
  299. // max_empty_slot_spans_dirty_bytes_shift| dirty bytes in empty slot
  300. // spans. That is, the default value of 3 tolerates up to 1/8. Since
  301. // |empty_slot_spans_dirty_bytes| is never strictly larger than
  302. // total_size_of_committed_pages, setting this to 0 removes the cap. This is
  303. // useful to make tests deterministic and easier to reason about.
  304. int max_empty_slot_spans_dirty_bytes_shift = 3;
  305. uintptr_t next_super_page = 0;
  306. uintptr_t next_partition_page = 0;
  307. uintptr_t next_partition_page_end = 0;
  308. SuperPageExtentEntry* current_extent = nullptr;
  309. SuperPageExtentEntry* first_extent = nullptr;
  310. DirectMapExtent* direct_map_list PA_GUARDED_BY(lock_) = nullptr;
  311. SlotSpan*
  312. global_empty_slot_span_ring[internal::kMaxFreeableSpans] PA_GUARDED_BY(
  313. lock_) = {};
  314. int16_t global_empty_slot_span_ring_index PA_GUARDED_BY(lock_) = 0;
  315. int16_t global_empty_slot_span_ring_size PA_GUARDED_BY(lock_) =
  316. internal::kDefaultEmptySlotSpanRingSize;
  317. // Integrity check = ~reinterpret_cast<uintptr_t>(this).
  318. uintptr_t inverted_self = 0;
  319. std::atomic<int> thread_caches_being_constructed_{0};
  320. bool quarantine_always_for_testing = false;
  321. #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  322. partition_alloc::PartitionTag current_partition_tag = 0;
  323. // Points to the end of the committed tag bitmap region.
  324. uintptr_t next_tag_bitmap_page = 0;
  325. #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  326. PartitionRoot()
  327. : flags{.quarantine_mode = QuarantineMode::kAlwaysDisabled,
  328. .scan_mode = ScanMode::kDisabled} {}
  329. explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); }
  330. ~PartitionRoot();
  331. // This will unreserve any space in the GigaCage that the PartitionRoot is
  332. // using. This is needed because many tests create and destroy many
  333. // PartitionRoots over the lifetime of a process, which can exhaust the
  334. // GigaCage and cause tests to fail.
  335. void DestructForTesting();
  336. // Public API
  337. //
  338. // Allocates out of the given bucket. Properly, this function should probably
  339. // be in PartitionBucket, but because the implementation needs to be inlined
  340. // for performance, and because it needs to inspect SlotSpanMetadata,
  341. // it becomes impossible to have it in PartitionBucket as this causes a
  342. // cyclical dependency on SlotSpanMetadata function implementations.
  343. //
  344. // Moving it a layer lower couples PartitionRoot and PartitionBucket, but
  345. // preserves the layering of the includes.
  346. void Init(PartitionOptions);
  347. void EnableThreadCacheIfSupported();
  348. PA_ALWAYS_INLINE static bool IsValidSlotSpan(SlotSpan* slot_span);
  349. PA_ALWAYS_INLINE static PartitionRoot* FromSlotSpan(SlotSpan* slot_span);
  350. // These two functions work unconditionally for normal buckets.
  351. // For direct map, they only work for the first super page of a reservation,
  352. // (see partition_alloc_constants.h for the direct map allocation layout).
  353. // In particular, the functions always work for a pointer to the start of a
  354. // reservation.
  355. PA_ALWAYS_INLINE static PartitionRoot* FromFirstSuperPage(
  356. uintptr_t super_page);
  357. PA_ALWAYS_INLINE static PartitionRoot* FromAddrInFirstSuperpage(
  358. uintptr_t address);
  359. PA_ALWAYS_INLINE void DecreaseTotalSizeOfAllocatedBytes(SlotSpan* slot_span)
  360. PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  361. PA_ALWAYS_INLINE void IncreaseTotalSizeOfAllocatedBytes(SlotSpan* slot_span,
  362. size_t raw_size)
  363. PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  364. PA_ALWAYS_INLINE void DecreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
  365. size_t len)
  366. PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  367. PA_ALWAYS_INLINE void IncreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
  368. size_t len,
  369. size_t raw_size)
  370. PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  371. PA_ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
  372. PA_ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
  373. PA_ALWAYS_INLINE void DecommitSystemPagesForData(
  374. uintptr_t address,
  375. size_t length,
  376. PageAccessibilityDisposition accessibility_disposition)
  377. PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  378. PA_ALWAYS_INLINE void RecommitSystemPagesForData(
  379. uintptr_t address,
  380. size_t length,
  381. PageAccessibilityDisposition accessibility_disposition)
  382. PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  383. PA_ALWAYS_INLINE bool TryRecommitSystemPagesForData(
  384. uintptr_t address,
  385. size_t length,
  386. PageAccessibilityDisposition accessibility_disposition)
  387. PA_LOCKS_EXCLUDED(lock_);
  388. [[noreturn]] PA_NOINLINE void OutOfMemory(size_t size);
  389. // Returns a pointer aligned on |alignment|, or nullptr.
  390. //
  391. // |alignment| has to be a power of two and a multiple of sizeof(void*) (as in
  392. // posix_memalign() for POSIX systems). The returned pointer may include
  393. // padding, and can be passed to |Free()| later.
  394. //
  395. // NOTE: This is incompatible with anything that adds extras before the
  396. // returned pointer, such as ref-count.
  397. PA_ALWAYS_INLINE void* AlignedAllocWithFlags(unsigned int flags,
  398. size_t alignment,
  399. size_t requested_size);
  400. // PartitionAlloc supports multiple partitions, and hence multiple callers to
  401. // these functions. Setting PA_ALWAYS_INLINE bloats code, and can be
  402. // detrimental to performance, for instance if multiple callers are hot (by
  403. // increasing cache footprint). Set PA_NOINLINE on the "basic" top-level
  404. // functions to mitigate that for "vanilla" callers.
  405. PA_NOINLINE PA_MALLOC_FN void* Alloc(size_t requested_size,
  406. const char* type_name) PA_MALLOC_ALIGNED;
  407. PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocWithFlags(unsigned int flags,
  408. size_t requested_size,
  409. const char* type_name)
  410. PA_MALLOC_ALIGNED;
  411. // Same as |AllocWithFlags()|, but allows specifying |slot_span_alignment|. It
  412. // has to be a multiple of partition page size, greater than 0 and no greater
  413. // than kMaxSupportedAlignment. If it equals exactly 1 partition page, no
  414. // special action is taken as PartitoinAlloc naturally guarantees this
  415. // alignment, otherwise a sub-optimial allocation strategy is used to
  416. // guarantee the higher-order alignment.
  417. PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocWithFlagsInternal(
  418. unsigned int flags,
  419. size_t requested_size,
  420. size_t slot_span_alignment,
  421. const char* type_name) PA_MALLOC_ALIGNED;
  422. // Same as |AllocWithFlags()|, but bypasses the allocator hooks.
  423. //
  424. // This is separate from AllocWithFlags() because other callers of
  425. // AllocWithFlags() should not have the extra branch checking whether the
  426. // hooks should be ignored or not. This is the same reason why |FreeNoHooks()|
  427. // exists. However, |AlignedAlloc()| and |Realloc()| have few callers, so
  428. // taking the extra branch in the non-malloc() case doesn't hurt. In addition,
  429. // for the malloc() case, the compiler correctly removes the branch, since
  430. // this is marked |PA_ALWAYS_INLINE|.
  431. PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocWithFlagsNoHooks(
  432. unsigned int flags,
  433. size_t requested_size,
  434. size_t slot_span_alignment) PA_MALLOC_ALIGNED;
  435. PA_NOINLINE void* Realloc(void* ptr,
  436. size_t newize,
  437. const char* type_name) PA_MALLOC_ALIGNED;
  438. // Overload that may return nullptr if reallocation isn't possible. In this
  439. // case, |ptr| remains valid.
  440. PA_NOINLINE void* TryRealloc(void* ptr,
  441. size_t new_size,
  442. const char* type_name) PA_MALLOC_ALIGNED;
  443. PA_NOINLINE void* ReallocWithFlags(unsigned int flags,
  444. void* ptr,
  445. size_t new_size,
  446. const char* type_name) PA_MALLOC_ALIGNED;
  447. PA_NOINLINE static void Free(void* object);
  448. PA_ALWAYS_INLINE static void FreeWithFlags(unsigned int flags, void* object);
  449. // Same as |Free()|, bypasses the allocator hooks.
  450. PA_ALWAYS_INLINE static void FreeNoHooks(void* object);
  451. // Immediately frees the pointer bypassing the quarantine. |slot_start| is the
  452. // beginning of the slot that contains |object|.
  453. PA_ALWAYS_INLINE void FreeNoHooksImmediate(void* object,
  454. SlotSpan* slot_span,
  455. uintptr_t slot_start);
  456. PA_ALWAYS_INLINE static size_t GetUsableSize(void* ptr);
  457. PA_ALWAYS_INLINE size_t
  458. AllocationCapacityFromSlotStart(uintptr_t slot_start) const;
  459. PA_ALWAYS_INLINE size_t
  460. AllocationCapacityFromRequestedSize(size_t size) const;
  461. // Frees memory from this partition, if possible, by decommitting pages or
  462. // even entire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
  463. void PurgeMemory(int flags);
  464. // Reduces the size of the empty slot spans ring, until the dirty size is <=
  465. // |limit|.
  466. void ShrinkEmptySlotSpansRing(size_t limit)
  467. PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  468. // The empty slot span ring starts "small", can be enlarged later. This
  469. // improves performance by performing fewer system calls, at the cost of more
  470. // memory usage.
  471. void EnableLargeEmptySlotSpanRing() {
  472. ::partition_alloc::internal::ScopedGuard locker{lock_};
  473. global_empty_slot_span_ring_size = internal::kMaxFreeableSpans;
  474. }
  475. void DumpStats(const char* partition_name,
  476. bool is_light_dump,
  477. PartitionStatsDumper* partition_stats_dumper);
  478. static void DeleteForTesting(PartitionRoot* partition_root);
  479. void ResetBookkeepingForTesting();
  480. static uint16_t SizeToBucketIndex(size_t size,
  481. bool with_denser_bucket_distribution);
  482. PA_ALWAYS_INLINE void FreeInSlotSpan(uintptr_t slot_start,
  483. SlotSpan* slot_span)
  484. PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  485. // Frees memory, with |slot_start| as returned by |RawAlloc()|.
  486. PA_ALWAYS_INLINE void RawFree(uintptr_t slot_start);
  487. PA_ALWAYS_INLINE void RawFree(uintptr_t slot_start, SlotSpan* slot_span)
  488. PA_LOCKS_EXCLUDED(lock_);
  489. PA_ALWAYS_INLINE void RawFreeBatch(FreeListEntry* head,
  490. FreeListEntry* tail,
  491. size_t size,
  492. SlotSpan* slot_span)
  493. PA_LOCKS_EXCLUDED(lock_);
  494. PA_ALWAYS_INLINE void RawFreeWithThreadCache(uintptr_t slot_start,
  495. SlotSpan* slot_span);
  496. // This is safe to do because we are switching to a bucket distribution with
  497. // more buckets, meaning any allocations we have done before the switch are
  498. // guaranteed to have a bucket under the new distribution when they are
  499. // eventually deallocated. We do not need synchronization here or below.
  500. void SwitchToDenserBucketDistribution() {
  501. flags.with_denser_bucket_distribution = true;
  502. }
  503. // Switching back to the less dense bucket distribution is ok during tests.
  504. // At worst, we end up with deallocations that are sent to a bucket that we
  505. // cannot allocate from, which will not cause problems besides wasting
  506. // memory.
  507. void ResetBucketDistributionForTesting() {
  508. flags.with_denser_bucket_distribution = false;
  509. }
  510. ThreadCache* thread_cache_for_testing() const {
  511. return flags.with_thread_cache ? ThreadCache::Get() : nullptr;
  512. }
  513. size_t get_total_size_of_committed_pages() const {
  514. return total_size_of_committed_pages.load(std::memory_order_relaxed);
  515. }
  516. size_t get_max_size_of_committed_pages() const {
  517. return max_size_of_committed_pages.load(std::memory_order_relaxed);
  518. }
  519. size_t get_total_size_of_allocated_bytes() const {
  520. // Since this is only used for bookkeeping, we don't care if the value is
  521. // stale, so no need to get a lock here.
  522. return PA_TS_UNCHECKED_READ(total_size_of_allocated_bytes);
  523. }
  524. size_t get_max_size_of_allocated_bytes() const {
  525. // Since this is only used for bookkeeping, we don't care if the value is
  526. // stale, so no need to get a lock here.
  527. return PA_TS_UNCHECKED_READ(max_size_of_allocated_bytes);
  528. }
  529. internal::pool_handle ChoosePool() const {
  530. if (flags.use_configurable_pool) {
  531. return internal::GetConfigurablePool();
  532. }
  533. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  534. return brp_enabled() ? internal::GetBRPPool() : internal::GetRegularPool();
  535. #else
  536. return internal::GetRegularPool();
  537. #endif // BUILDFLAG(USE_BACKUP_REF_PTR)
  538. }
  539. PA_ALWAYS_INLINE bool IsQuarantineAllowed() const {
  540. return flags.quarantine_mode != QuarantineMode::kAlwaysDisabled;
  541. }
  542. PA_ALWAYS_INLINE bool IsQuarantineEnabled() const {
  543. return flags.quarantine_mode == QuarantineMode::kEnabled;
  544. }
  545. PA_ALWAYS_INLINE bool ShouldQuarantine(void* object) const {
  546. if (PA_UNLIKELY(flags.quarantine_mode != QuarantineMode::kEnabled))
  547. return false;
  548. #if defined(PA_HAS_MEMORY_TAGGING)
  549. if (PA_UNLIKELY(quarantine_always_for_testing))
  550. return true;
  551. // If quarantine is enabled and the tag overflows, move the containing slot
  552. // to quarantine, to prevent the attacker from exploiting a pointer that has
  553. // an old tag.
  554. return internal::HasOverflowTag(object);
  555. #else
  556. return true;
  557. #endif
  558. }
  559. PA_ALWAYS_INLINE void SetQuarantineAlwaysForTesting(bool value) {
  560. quarantine_always_for_testing = value;
  561. }
  562. PA_ALWAYS_INLINE bool IsScanEnabled() const {
  563. // Enabled scan implies enabled quarantine.
  564. PA_DCHECK(flags.scan_mode != ScanMode::kEnabled || IsQuarantineEnabled());
  565. return flags.scan_mode == ScanMode::kEnabled;
  566. }
  567. static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  568. GetDirectMapMetadataAndGuardPagesSize() {
  569. // Because we need to fake a direct-map region to look like a super page, we
  570. // need to allocate more pages around the payload:
  571. // - The first partition page is a combination of metadata and guard region.
  572. // - We also add a trailing guard page. In most cases, a system page would
  573. // suffice. But on 32-bit systems when BRP is on, we need a partition page
  574. // to match granularity of the BRP pool bitmap. For cosistency, we'll use
  575. // a partition page everywhere, which is cheap as it's uncommitted address
  576. // space anyway.
  577. return 2 * internal::PartitionPageSize();
  578. }
  579. static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
  580. GetDirectMapSlotSize(size_t raw_size) {
  581. // Caller must check that the size is not above the MaxDirectMapped()
  582. // limit before calling. This also guards against integer overflow in the
  583. // calculation here.
  584. PA_DCHECK(raw_size <= internal::MaxDirectMapped());
  585. return partition_alloc::internal::base::bits::AlignUp(
  586. raw_size, internal::SystemPageSize());
  587. }
  588. static PA_ALWAYS_INLINE size_t
  589. GetDirectMapReservationSize(size_t padded_raw_size) {
  590. // Caller must check that the size is not above the MaxDirectMapped()
  591. // limit before calling. This also guards against integer overflow in the
  592. // calculation here.
  593. PA_DCHECK(padded_raw_size <= internal::MaxDirectMapped());
  594. return partition_alloc::internal::base::bits::AlignUp(
  595. padded_raw_size + GetDirectMapMetadataAndGuardPagesSize(),
  596. internal::DirectMapAllocationGranularity());
  597. }
  598. PA_ALWAYS_INLINE size_t AdjustSize0IfNeeded(size_t size) const {
  599. // There are known cases where allowing size 0 would lead to problems:
  600. // 1. If extras are present only before allocation (e.g. BRP ref-count), the
  601. // extras will fill the entire kAlignment-sized slot, leading to
  602. // returning a pointer to the next slot. ReallocWithFlags() calls
  603. // SlotSpanMetadata::FromObject() prior to subtracting extras, thus
  604. // potentially getting a wrong slot span.
  605. // 2. If we put BRP ref-count in the previous slot, that slot may be free.
  606. // In this case, the slot needs to fit both, a free-list entry and a
  607. // ref-count. If sizeof(PartitionRefCount) is 8, it fills the entire
  608. // smallest slot on 32-bit systems (kSmallestBucket is 8), thus not
  609. // leaving space for the free-list entry.
  610. // 3. On macOS and iOS, PartitionGetSizeEstimate() is used for two purposes:
  611. // as a zone dispatcher and as an underlying implementation of
  612. // malloc_size(3). As a zone dispatcher, zero has a special meaning of
  613. // "doesn't belong to this zone". When extras fill out the entire slot,
  614. // the usable size is 0, thus confusing the zone dispatcher.
  615. //
  616. // To save ourselves a branch on this hot path, we could eliminate this
  617. // check at compile time for cases not listed above. The #if statement would
  618. // be rather complex. Then there is also the fear of the unknown. The
  619. // existing cases were discovered through obscure, painful-to-debug crashes.
  620. // Better save ourselves trouble with not-yet-discovered cases.
  621. if (PA_UNLIKELY(size == 0))
  622. return 1;
  623. return size;
  624. }
  625. // Adjusts the size by adding extras. Also include the 0->1 adjustment if
  626. // needed.
  627. PA_ALWAYS_INLINE size_t AdjustSizeForExtrasAdd(size_t size) const {
  628. size = AdjustSize0IfNeeded(size);
  629. PA_DCHECK(size + flags.extras_size >= size);
  630. return size + flags.extras_size;
  631. }
  632. // Adjusts the size by subtracing extras. Doesn't include the 0->1 adjustment,
  633. // which leads to an asymmetry with AdjustSizeForExtrasAdd, but callers of
  634. // AdjustSizeForExtrasSubtract either expect the adjustment to be included, or
  635. // are indifferent.
  636. PA_ALWAYS_INLINE size_t AdjustSizeForExtrasSubtract(size_t size) const {
  637. return size - flags.extras_size;
  638. }
  639. PA_ALWAYS_INLINE uintptr_t SlotStartToObjectAddr(uintptr_t slot_start) const {
  640. // TODO(bartekn): Check that |slot_start| is indeed a slot start.
  641. return slot_start + flags.extras_offset;
  642. }
  643. PA_ALWAYS_INLINE void* SlotStartToObject(uintptr_t slot_start) const {
  644. // TODO(bartekn): Check that |slot_start| is indeed a slot start.
  645. return internal::TagAddr(SlotStartToObjectAddr(slot_start));
  646. }
  647. PA_ALWAYS_INLINE uintptr_t ObjectToSlotStart(void* object) const {
  648. return UntagPtr(object) - flags.extras_offset;
  649. // TODO(bartekn): Check that the result is indeed a slot start.
  650. }
  651. bool brp_enabled() const {
  652. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  653. return flags.brp_enabled_;
  654. #else
  655. return false;
  656. #endif
  657. }
  658. bool brp_zapping_enabled() const {
  659. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  660. return flags.brp_zapping_enabled_;
  661. #else
  662. return false;
  663. #endif
  664. }
  665. PA_ALWAYS_INLINE bool uses_configurable_pool() const {
  666. return flags.use_configurable_pool;
  667. }
  668. // To make tests deterministic, it is necessary to uncap the amount of memory
  669. // waste incurred by empty slot spans. Otherwise, the size of various
  670. // freelists, and committed memory becomes harder to reason about (and
  671. // brittle) with a single thread, and non-deterministic with several.
  672. void UncapEmptySlotSpanMemoryForTesting() {
  673. max_empty_slot_spans_dirty_bytes_shift = 0;
  674. }
  675. #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  676. PA_ALWAYS_INLINE partition_alloc::PartitionTag GetNewPartitionTag() {
  677. // TODO(crbug.com/1298696): performance is not an issue. We can use
  678. // random tags in lieu of sequential ones.
  679. auto tag = ++current_partition_tag;
  680. tag += !tag; // Avoid 0.
  681. current_partition_tag = tag;
  682. return tag;
  683. }
  684. #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  685. // Enables the sorting of active slot spans in PurgeMemory().
  686. static void EnableSortActiveSlotSpans();
  687. private:
  688. static inline bool sort_active_slot_spans_ = false;
  689. // |buckets| has `kNumBuckets` elements, but we sometimes access it at index
  690. // `kNumBuckets`, which is occupied by the sentinel bucket. The correct layout
  691. // is enforced by a static_assert() in partition_root.cc, so this is
  692. // fine. However, UBSAN is correctly pointing out that there is an
  693. // out-of-bounds access, so disable it for these accesses.
  694. //
  695. // See crbug.com/1150772 for an instance of Clusterfuzz / UBSAN detecting
  696. // this.
  697. PA_ALWAYS_INLINE const Bucket& PA_NO_SANITIZE("undefined")
  698. bucket_at(size_t i) const {
  699. PA_DCHECK(i <= internal::kNumBuckets);
  700. return buckets[i];
  701. }
  702. // Returns whether a |bucket| from |this| root is direct-mapped. This function
  703. // does not touch |bucket|, contrary to PartitionBucket::is_direct_mapped().
  704. //
  705. // This is meant to be used in hot paths, and particularly *before* going into
  706. // the thread cache fast path. Indeed, real-world profiles show that accessing
  707. // an allocation's bucket is responsible for a sizable fraction of *total*
  708. // deallocation time. This can be understood because
  709. // - All deallocations have to access the bucket to know whether it is
  710. // direct-mapped. If not (vast majority of allocations), it can go through
  711. // the fast path, i.e. thread cache.
  712. // - The bucket is relatively frequently written to, by *all* threads
  713. // (e.g. every time a slot span becomes full or empty), so accessing it will
  714. // result in some amount of cacheline ping-pong.
  715. PA_ALWAYS_INLINE bool IsDirectMappedBucket(Bucket* bucket) const {
  716. // All regular allocations are associated with a bucket in the |buckets_|
  717. // array. A range check is then sufficient to identify direct-mapped
  718. // allocations.
  719. bool ret = !(bucket >= this->buckets && bucket <= &this->sentinel_bucket);
  720. PA_DCHECK(ret == bucket->is_direct_mapped());
  721. return ret;
  722. }
  723. // Allocates a memory slot, without initializing extras.
  724. //
  725. // - |flags| are as in AllocWithFlags().
  726. // - |raw_size| accommodates for extras on top of AllocWithFlags()'s
  727. // |requested_size|.
  728. // - |usable_size| and |is_already_zeroed| are output only. |usable_size| is
  729. // guaranteed to be larger or equal to AllocWithFlags()'s |requested_size|.
  730. PA_ALWAYS_INLINE uintptr_t RawAlloc(Bucket* bucket,
  731. unsigned int flags,
  732. size_t raw_size,
  733. size_t slot_span_alignment,
  734. size_t* usable_size,
  735. bool* is_already_zeroed);
  736. PA_ALWAYS_INLINE uintptr_t AllocFromBucket(Bucket* bucket,
  737. unsigned int flags,
  738. size_t raw_size,
  739. size_t slot_span_alignment,
  740. size_t* usable_size,
  741. bool* is_already_zeroed)
  742. PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  743. bool TryReallocInPlaceForNormalBuckets(void* object,
  744. SlotSpan* slot_span,
  745. size_t new_size);
  746. bool TryReallocInPlaceForDirectMap(
  747. internal::SlotSpanMetadata<thread_safe>* slot_span,
  748. size_t requested_size) PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  749. void DecommitEmptySlotSpans() PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  750. PA_ALWAYS_INLINE void RawFreeLocked(uintptr_t slot_start)
  751. PA_EXCLUSIVE_LOCKS_REQUIRED(lock_);
  752. uintptr_t MaybeInitThreadCacheAndAlloc(uint16_t bucket_index,
  753. size_t* slot_size);
  754. #if defined(PA_USE_PARTITION_ROOT_ENUMERATOR)
  755. static internal::Lock& GetEnumeratorLock();
  756. PartitionRoot* PA_GUARDED_BY(GetEnumeratorLock()) next_root = nullptr;
  757. PartitionRoot* PA_GUARDED_BY(GetEnumeratorLock()) prev_root = nullptr;
  758. friend class internal::PartitionRootEnumerator;
  759. #endif // defined(PA_USE_PARTITION_ROOT_ENUMERATOR)
  760. friend class ThreadCache;
  761. };
  762. namespace internal {
  763. class ScopedSyscallTimer {
  764. public:
  765. #if defined(PA_COUNT_SYSCALL_TIME)
  766. explicit ScopedSyscallTimer(PartitionRoot<>* root)
  767. : root_(root), tick_(base::TimeTicks::Now()) {}
  768. ~ScopedSyscallTimer() {
  769. root_->syscall_count.fetch_add(1, std::memory_order_relaxed);
  770. uint64_t elapsed_nanos = (base::TimeTicks::Now() - tick_).InNanoseconds();
  771. root_->syscall_total_time_ns.fetch_add(elapsed_nanos,
  772. std::memory_order_relaxed);
  773. }
  774. private:
  775. PartitionRoot<>* root_;
  776. const base::TimeTicks tick_;
  777. #else
  778. explicit ScopedSyscallTimer(PartitionRoot<>* root) {
  779. root->syscall_count.fetch_add(1, std::memory_order_relaxed);
  780. }
  781. #endif
  782. };
  783. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  784. PA_ALWAYS_INLINE uintptr_t
  785. PartitionAllocGetDirectMapSlotStartInBRPPool(uintptr_t address) {
  786. PA_DCHECK(IsManagedByPartitionAllocBRPPool(address));
  787. #if defined(PA_HAS_64_BITS_POINTERS)
  788. // Use this variant of GetDirectMapReservationStart as it has better
  789. // performance.
  790. uintptr_t offset = OffsetInBRPPool(address);
  791. uintptr_t reservation_start =
  792. GetDirectMapReservationStart(address, kBRPPoolHandle, offset);
  793. #else
  794. uintptr_t reservation_start = GetDirectMapReservationStart(address);
  795. #endif
  796. if (!reservation_start)
  797. return 0;
  798. // The direct map allocation may not start exactly from the first page, as
  799. // there may be padding for alignment. The first page metadata holds an offset
  800. // to where direct map metadata, and thus direct map start, are located.
  801. auto* first_page = PartitionPage<ThreadSafe>::FromAddr(reservation_start +
  802. PartitionPageSize());
  803. auto* page = first_page + first_page->slot_span_metadata_offset;
  804. PA_DCHECK(page->is_valid);
  805. PA_DCHECK(!page->slot_span_metadata_offset);
  806. auto* slot_span = &page->slot_span_metadata;
  807. uintptr_t slot_start =
  808. SlotSpanMetadata<ThreadSafe>::ToSlotSpanStart(slot_span);
  809. #if BUILDFLAG(PA_DCHECK_IS_ON)
  810. auto* metadata =
  811. PartitionDirectMapMetadata<ThreadSafe>::FromSlotSpan(slot_span);
  812. size_t padding_for_alignment =
  813. metadata->direct_map_extent.padding_for_alignment;
  814. PA_DCHECK(padding_for_alignment ==
  815. static_cast<size_t>(page - first_page) * PartitionPageSize());
  816. PA_DCHECK(slot_start ==
  817. reservation_start + PartitionPageSize() + padding_for_alignment);
  818. #endif // BUILDFLAG(PA_DCHECK_IS_ON)
  819. return slot_start;
  820. }
  821. // Gets the address to the beginning of the allocated slot. The input |address|
  822. // can point anywhere in the slot, including the slot start as well as
  823. // immediately past the slot.
  824. //
  825. // This isn't a general purpose function, it is used specifically for obtaining
  826. // BackupRefPtr's ref-count. The caller is responsible for ensuring that the
  827. // ref-count is in place for this allocation.
  828. PA_ALWAYS_INLINE uintptr_t
  829. PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
  830. // Adjust to support pointers right past the end of an allocation, which in
  831. // some cases appear to point outside the designated allocation slot.
  832. //
  833. // If ref-count is present before the allocation, then adjusting a valid
  834. // pointer down will not cause us to go down to the previous slot, otherwise
  835. // no adjustment is needed (and likely wouldn't be correct as there is
  836. // a risk of going down to the previous slot). Either way,
  837. // kPartitionPastAllocationAdjustment takes care of that detail.
  838. address -= kPartitionPastAllocationAdjustment;
  839. PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(address));
  840. DCheckIfManagedByPartitionAllocBRPPool(address);
  841. uintptr_t directmap_slot_start =
  842. PartitionAllocGetDirectMapSlotStartInBRPPool(address);
  843. if (PA_UNLIKELY(directmap_slot_start))
  844. return directmap_slot_start;
  845. auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromAddr(address);
  846. auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
  847. // Double check that ref-count is indeed present.
  848. PA_DCHECK(root->brp_enabled());
  849. // Get the offset from the beginning of the slot span.
  850. uintptr_t slot_span_start =
  851. SlotSpanMetadata<ThreadSafe>::ToSlotSpanStart(slot_span);
  852. size_t offset_in_slot_span = address - slot_span_start;
  853. auto* bucket = slot_span->bucket;
  854. return slot_span_start +
  855. bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span);
  856. }
  857. // Checks whether a given address stays within the same allocation slot after
  858. // modification.
  859. //
  860. // This isn't a general purpose function. The caller is responsible for ensuring
  861. // that the ref-count is in place for this allocation.
  862. template <typename Z, typename = std::enable_if_t<offset_type<Z>, void>>
  863. PA_ALWAYS_INLINE bool PartitionAllocIsValidPtrDelta(uintptr_t address,
  864. Z delta_in_bytes) {
  865. // Required for pointers right past an allocation. See
  866. // |PartitionAllocGetSlotStartInBRPPool()|.
  867. uintptr_t adjusted_address = address - kPartitionPastAllocationAdjustment;
  868. PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(adjusted_address));
  869. DCheckIfManagedByPartitionAllocBRPPool(adjusted_address);
  870. uintptr_t slot_start = PartitionAllocGetSlotStartInBRPPool(adjusted_address);
  871. // Don't use |adjusted_address| beyond this point at all. It was needed to
  872. // pick the right slot, but now we're dealing with very concrete addresses.
  873. // Zero it just in case, to catch errors.
  874. adjusted_address = 0;
  875. auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStart(slot_start);
  876. auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
  877. // Double check that ref-count is indeed present.
  878. PA_DCHECK(root->brp_enabled());
  879. uintptr_t object_addr = root->SlotStartToObjectAddr(slot_start);
  880. uintptr_t new_address = address + static_cast<uintptr_t>(delta_in_bytes);
  881. return object_addr <= new_address &&
  882. // We use "greater than or equal" below because we want to include
  883. // pointers right past the end of an allocation.
  884. new_address <= object_addr + slot_span->GetUsableSize(root);
  885. }
  886. PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
  887. PA_DCHECK(!PartitionRefCountPointer(slot_start)->IsAlive());
  888. auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStart(slot_start);
  889. auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
  890. // PartitionRefCount is required to be allocated inside a `PartitionRoot` that
  891. // supports reference counts.
  892. PA_DCHECK(root->brp_enabled());
  893. // memset() can be really expensive.
  894. #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
  895. DebugMemset(SlotStartAddr2Ptr(slot_start), kFreedByte,
  896. slot_span->GetUtilizedSlotSize()
  897. #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  898. - sizeof(PartitionRefCount)
  899. #endif
  900. );
  901. #endif
  902. root->total_size_of_brp_quarantined_bytes.fetch_sub(
  903. slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
  904. root->total_count_of_brp_quarantined_slots.fetch_sub(
  905. 1, std::memory_order_relaxed);
  906. root->RawFreeWithThreadCache(slot_start, slot_span);
  907. }
  908. #endif // BUILDFLAG(USE_BACKUP_REF_PTR)
  909. } // namespace internal
  910. template <bool thread_safe>
  911. PA_ALWAYS_INLINE uintptr_t
  912. PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
  913. unsigned int flags,
  914. size_t raw_size,
  915. size_t slot_span_alignment,
  916. size_t* usable_size,
  917. bool* is_already_zeroed) {
  918. PA_DCHECK((slot_span_alignment >= internal::PartitionPageSize()) &&
  919. internal::base::bits::IsPowerOfTwo(slot_span_alignment));
  920. SlotSpan* slot_span = bucket->active_slot_spans_head;
  921. // There always must be a slot span on the active list (could be a sentinel).
  922. PA_DCHECK(slot_span);
  923. // Check that it isn't marked full, which could only be true if the span was
  924. // removed from the active list.
  925. PA_DCHECK(!slot_span->marked_full);
  926. uintptr_t slot_start =
  927. internal::SlotStartPtr2Addr(slot_span->get_freelist_head());
  928. // Use the fast path when a slot is readily available on the free list of the
  929. // first active slot span. However, fall back to the slow path if a
  930. // higher-order alignment is requested, because an inner slot of an existing
  931. // slot span is unlikely to satisfy it.
  932. if (PA_LIKELY(slot_span_alignment <= internal::PartitionPageSize() &&
  933. slot_start)) {
  934. *is_already_zeroed = false;
  935. // This is a fast path, so avoid calling GetUsableSize() on Release builds
  936. // as it is more costly. Copy its small bucket path instead.
  937. *usable_size = AdjustSizeForExtrasSubtract(bucket->slot_size);
  938. PA_DCHECK(*usable_size == slot_span->GetUsableSize(this));
  939. // If these DCHECKs fire, you probably corrupted memory.
  940. // TODO(crbug.com/1257655): See if we can afford to make these CHECKs.
  941. PA_DCHECK(IsValidSlotSpan(slot_span));
  942. // All large allocations must go through the slow path to correctly update
  943. // the size metadata.
  944. PA_DCHECK(!slot_span->CanStoreRawSize());
  945. PA_DCHECK(!slot_span->bucket->is_direct_mapped());
  946. void* entry = slot_span->PopForAlloc(bucket->slot_size);
  947. PA_DCHECK(internal::SlotStartPtr2Addr(entry) == slot_start);
  948. PA_DCHECK(slot_span->bucket == bucket);
  949. } else {
  950. slot_start = bucket->SlowPathAlloc(this, flags, raw_size,
  951. slot_span_alignment, is_already_zeroed);
  952. if (PA_UNLIKELY(!slot_start))
  953. return 0;
  954. slot_span = SlotSpan::FromSlotStart(slot_start);
  955. // TODO(crbug.com/1257655): See if we can afford to make this a CHECK.
  956. PA_DCHECK(IsValidSlotSpan(slot_span));
  957. // For direct mapped allocations, |bucket| is the sentinel.
  958. PA_DCHECK((slot_span->bucket == bucket) ||
  959. (slot_span->bucket->is_direct_mapped() &&
  960. (bucket == &sentinel_bucket)));
  961. *usable_size = slot_span->GetUsableSize(this);
  962. }
  963. PA_DCHECK(slot_span->GetUtilizedSlotSize() <= slot_span->bucket->slot_size);
  964. IncreaseTotalSizeOfAllocatedBytes(slot_span, raw_size);
  965. return slot_start;
  966. }
  967. // static
  968. template <bool thread_safe>
  969. PA_NOINLINE void PartitionRoot<thread_safe>::Free(void* object) {
  970. return FreeWithFlags(0, object);
  971. }
  972. // static
  973. template <bool thread_safe>
  974. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeWithFlags(
  975. unsigned int flags,
  976. void* object) {
  977. PA_DCHECK(flags < FreeFlags::kLastFlag << 1);
  978. #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
  979. if (!(flags & FreeFlags::kNoMemoryToolOverride)) {
  980. free(object);
  981. return;
  982. }
  983. #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
  984. if (PA_UNLIKELY(!object))
  985. return;
  986. if (PartitionAllocHooks::AreHooksEnabled()) {
  987. PartitionAllocHooks::FreeObserverHookIfEnabled(object);
  988. if (PartitionAllocHooks::FreeOverrideHookIfEnabled(object))
  989. return;
  990. }
  991. FreeNoHooks(object);
  992. }
  993. // static
  994. template <bool thread_safe>
  995. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
  996. if (PA_UNLIKELY(!object))
  997. return;
  998. // Almost all calls to FreeNoNooks() will end up writing to |*object|, the
  999. // only cases where we don't would be delayed free() in PCScan, but |*object|
  1000. // can be cold in cache.
  1001. PA_PREFETCH(object);
  1002. uintptr_t object_addr = internal::ObjectPtr2Addr(object);
  1003. // On Android, malloc() interception is more fragile than on other
  1004. // platforms, as we use wrapped symbols. However, the GigaCage allows us to
  1005. // quickly tell that a pointer was allocated with PartitionAlloc.
  1006. //
  1007. // This is a crash to detect imperfect symbol interception. However, we can
  1008. // forward allocations we don't own to the system malloc() implementation in
  1009. // these rare cases, assuming that some remain.
  1010. //
  1011. // On Android Chromecast devices, this is already checked in PartitionFree()
  1012. // in the shim.
  1013. #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
  1014. (BUILDFLAG(IS_ANDROID) && !BUILDFLAG(PA_IS_CAST_ANDROID))
  1015. PA_CHECK(IsManagedByPartitionAlloc(object_addr));
  1016. #endif
  1017. // Fetch the root from the address, and not SlotSpanMetadata. This is
  1018. // important, as obtaining it from SlotSpanMetadata is a slow operation
  1019. // (looking into the metadata area, and following a pointer), which can induce
  1020. // cache coherency traffic (since they're read on every free(), and written to
  1021. // on any malloc()/free() that is not a hit in the thread cache). This way we
  1022. // change the critical path from object -> slot_span -> root into two
  1023. // *parallel* ones:
  1024. // 1. object -> root
  1025. // 2. object -> slot_span
  1026. auto* root = FromAddrInFirstSuperpage(object_addr);
  1027. SlotSpan* slot_span = SlotSpan::FromObject(object);
  1028. PA_DCHECK(FromSlotSpan(slot_span) == root);
  1029. uintptr_t slot_start = root->ObjectToSlotStart(object);
  1030. PA_DCHECK(slot_span == SlotSpan::FromSlotStart(slot_start));
  1031. #if defined(PA_HAS_MEMORY_TAGGING)
  1032. const size_t slot_size = slot_span->bucket->slot_size;
  1033. if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) {
  1034. internal::TagMemoryRangeIncrement(slot_start, slot_size);
  1035. // Incrementing the MTE-tag in the memory range invalidates the |object|'s
  1036. // tag, so it must be retagged.
  1037. object = internal::TagPtr(object);
  1038. }
  1039. #else
  1040. // We are going to read from |*slot_span| in all branches, but haven't done it
  1041. // yet.
  1042. //
  1043. // TODO(crbug.com/1207307): It would be much better to avoid touching
  1044. // |*slot_span| at all on the fast path, or at least to separate its read-only
  1045. // parts (i.e. bucket pointer) from the rest. Indeed, every thread cache miss
  1046. // (or batch fill) will *write* to |slot_span->freelist_head|, leading to
  1047. // cacheline ping-pong.
  1048. //
  1049. // Don't do it when memory tagging is enabled, as |*slot_span| has already
  1050. // been touched above.
  1051. PA_PREFETCH(slot_span);
  1052. #endif // defined(PA_HAS_MEMORY_TAGGING)
  1053. #if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  1054. if (!root->IsDirectMappedBucket(slot_span->bucket)) {
  1055. partition_alloc::internal::PartitionTagIncrementValue(
  1056. slot_start, slot_span->bucket->slot_size);
  1057. }
  1058. #endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
  1059. // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
  1060. // default.
  1061. if (PA_UNLIKELY(root->ShouldQuarantine(object))) {
  1062. // PCScan safepoint. Call before potentially scheduling scanning task.
  1063. PCScan::JoinScanIfNeeded();
  1064. if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
  1065. PCScan::MoveToQuarantine(object, slot_span->GetUsableSize(root),
  1066. slot_start, slot_span->bucket->slot_size);
  1067. return;
  1068. }
  1069. }
  1070. root->FreeNoHooksImmediate(object, slot_span, slot_start);
  1071. }
  1072. template <bool thread_safe>
  1073. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
  1074. void* object,
  1075. SlotSpan* slot_span,
  1076. uintptr_t slot_start) {
  1077. // The thread cache is added "in the middle" of the main allocator, that is:
  1078. // - After all the cookie/ref-count management
  1079. // - Before the "raw" allocator.
  1080. //
  1081. // On the deallocation side:
  1082. // 1. Check cookie/ref-count, adjust the pointer
  1083. // 2. Deallocation
  1084. // a. Return to the thread cache if possible. If it succeeds, return.
  1085. // b. Otherwise, call the "raw" allocator <-- Locking
  1086. PA_DCHECK(object);
  1087. PA_DCHECK(slot_span);
  1088. PA_DCHECK(IsValidSlotSpan(slot_span));
  1089. PA_DCHECK(slot_start);
  1090. // Layout inside the slot:
  1091. // |[refcnt]|...object...|[empty]|[cookie]|[unused]|
  1092. // <--------(a)--------->
  1093. // <--(b)---> + <--(b)--->
  1094. // <-----------------(c)------------------>
  1095. // (a) usable_size
  1096. // (b) extras
  1097. // (c) utilized_slot_size
  1098. //
  1099. // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is set, the layout is:
  1100. // |...object...|[empty]|[cookie]|[unused]|[refcnt]|
  1101. // <--------(a)--------->
  1102. // <--(b)---> + <--(b)--->
  1103. // <-------------(c)-------------> + <--(c)--->
  1104. //
  1105. // Note: ref-count and cookie can be 0-sized.
  1106. //
  1107. // For more context, see the other "Layout inside the slot" comment inside
  1108. // AllocWithFlagsNoHooks().
  1109. #if BUILDFLAG(PA_DCHECK_IS_ON)
  1110. if (flags.allow_cookie) {
  1111. // Verify the cookie after the allocated region.
  1112. // If this assert fires, you probably corrupted memory.
  1113. internal::PartitionCookieCheckValue(static_cast<unsigned char*>(object) +
  1114. slot_span->GetUsableSize(this));
  1115. }
  1116. #endif
  1117. // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
  1118. // default.
  1119. if (PA_UNLIKELY(IsQuarantineEnabled())) {
  1120. if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
  1121. // Mark the state in the state bitmap as freed.
  1122. internal::StateBitmapFromAddr(slot_start)->Free(slot_start);
  1123. }
  1124. }
  1125. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  1126. // TODO(keishi): Add PA_LIKELY when brp is fully enabled as |brp_enabled| will
  1127. // be false only for the aligned partition.
  1128. if (brp_enabled()) {
  1129. auto* ref_count = internal::PartitionRefCountPointer(slot_start);
  1130. // If there are no more references to the allocation, it can be freed
  1131. // immediately. Otherwise, defer the operation and zap the memory to turn
  1132. // potential use-after-free issues into unexploitable crashes.
  1133. if (PA_UNLIKELY(!ref_count->IsAliveWithNoKnownRefs() &&
  1134. brp_zapping_enabled()))
  1135. internal::SecureMemset(object, internal::kQuarantinedByte,
  1136. slot_span->GetUsableSize(this));
  1137. if (PA_UNLIKELY(!(ref_count->ReleaseFromAllocator()))) {
  1138. total_size_of_brp_quarantined_bytes.fetch_add(
  1139. slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
  1140. total_count_of_brp_quarantined_slots.fetch_add(1,
  1141. std::memory_order_relaxed);
  1142. cumulative_size_of_brp_quarantined_bytes.fetch_add(
  1143. slot_span->GetSlotSizeForBookkeeping(), std::memory_order_relaxed);
  1144. cumulative_count_of_brp_quarantined_slots.fetch_add(
  1145. 1, std::memory_order_relaxed);
  1146. return;
  1147. }
  1148. }
  1149. #endif // BUILDFLAG(USE_BACKUP_REF_PTR)
  1150. // memset() can be really expensive.
  1151. #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
  1152. internal::DebugMemset(internal::SlotStartAddr2Ptr(slot_start),
  1153. internal::kFreedByte,
  1154. slot_span->GetUtilizedSlotSize()
  1155. #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  1156. - sizeof(internal::PartitionRefCount)
  1157. #endif
  1158. );
  1159. #elif defined(PA_ZERO_RANDOMLY_ON_FREE)
  1160. // `memset` only once in a while: we're trading off safety for time
  1161. // efficiency.
  1162. if (PA_UNLIKELY(internal::RandomPeriod()) &&
  1163. !IsDirectMappedBucket(slot_span->bucket)) {
  1164. internal::SecureMemset(internal::SlotStartAddr2Ptr(slot_start), 0,
  1165. slot_span->GetUtilizedSlotSize()
  1166. #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  1167. - sizeof(internal::PartitionRefCount)
  1168. #endif
  1169. );
  1170. }
  1171. #endif // defined(PA_ZERO_RANDOMLY_ON_FREE)
  1172. RawFreeWithThreadCache(slot_start, slot_span);
  1173. }
  1174. template <bool thread_safe>
  1175. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeInSlotSpan(
  1176. uintptr_t slot_start,
  1177. SlotSpan* slot_span) {
  1178. DecreaseTotalSizeOfAllocatedBytes(slot_span);
  1179. return slot_span->Free(slot_start);
  1180. }
  1181. template <bool thread_safe>
  1182. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(
  1183. uintptr_t slot_start) {
  1184. SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
  1185. RawFree(slot_start, slot_span);
  1186. }
  1187. template <bool thread_safe>
  1188. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(uintptr_t slot_start,
  1189. SlotSpan* slot_span) {
  1190. // At this point we are about to acquire the lock, so we try to minimize the
  1191. // risk of blocking inside the locked section.
  1192. //
  1193. // For allocations that are not direct-mapped, there will always be a store at
  1194. // the beginning of |*slot_start|, to link the freelist. This is why there is
  1195. // a prefetch of it at the beginning of the free() path.
  1196. //
  1197. // However, the memory which is being freed can be very cold (for instance
  1198. // during browser shutdown, when various caches are finally completely freed),
  1199. // and so moved to either compressed memory or swap. This means that touching
  1200. // it here can cause a major page fault. This is in turn will cause
  1201. // descheduling of the thread *while locked*. Since we don't have priority
  1202. // inheritance locks on most platforms, avoiding long locked periods relies on
  1203. // the OS having proper priority boosting. There is evidence
  1204. // (crbug.com/1228523) that this is not always the case on Windows, and a very
  1205. // low priority background thread can block the main one for a long time,
  1206. // leading to hangs.
  1207. //
  1208. // To mitigate that, make sure that we fault *before* locking. Note that this
  1209. // is useless for direct-mapped allocations (which are very rare anyway), and
  1210. // that this path is *not* taken for thread cache bucket purge (since it calls
  1211. // RawFreeLocked()). This is intentional, as the thread cache is purged often,
  1212. // and the memory has a consequence the memory has already been touched
  1213. // recently (to link the thread cache freelist).
  1214. *static_cast<volatile uintptr_t*>(internal::SlotStartAddr2Ptr(slot_start)) =
  1215. 0;
  1216. // Note: even though we write to slot_start + sizeof(void*) as well, due to
  1217. // alignment constraints, the two locations are always going to be in the same
  1218. // OS page. No need to write to the second one as well.
  1219. //
  1220. // Do not move the store above inside the locked section.
  1221. __asm__ __volatile__("" : : "r"(slot_start) : "memory");
  1222. ::partition_alloc::internal::ScopedGuard guard{lock_};
  1223. FreeInSlotSpan(slot_start, slot_span);
  1224. }
  1225. template <bool thread_safe>
  1226. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeBatch(
  1227. FreeListEntry* head,
  1228. FreeListEntry* tail,
  1229. size_t size,
  1230. SlotSpan* slot_span) {
  1231. PA_DCHECK(head);
  1232. PA_DCHECK(tail);
  1233. PA_DCHECK(size > 0);
  1234. PA_DCHECK(slot_span);
  1235. PA_DCHECK(IsValidSlotSpan(slot_span));
  1236. // The passed freelist is likely to be just built up, which means that the
  1237. // corresponding pages were faulted in (without acquiring the lock). So there
  1238. // is no need to touch pages manually here before the lock.
  1239. ::partition_alloc::internal::ScopedGuard guard{lock_};
  1240. DecreaseTotalSizeOfAllocatedBytes(slot_span);
  1241. slot_span->AppendFreeList(head, tail, size);
  1242. }
  1243. template <bool thread_safe>
  1244. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeWithThreadCache(
  1245. uintptr_t slot_start,
  1246. SlotSpan* slot_span) {
  1247. // TLS access can be expensive, do a cheap local check first.
  1248. //
  1249. // PA_LIKELY: performance-sensitive partitions have a thread cache,
  1250. // direct-mapped allocations are uncommon.
  1251. if (PA_LIKELY(flags.with_thread_cache &&
  1252. !IsDirectMappedBucket(slot_span->bucket))) {
  1253. size_t bucket_index =
  1254. static_cast<size_t>(slot_span->bucket - this->buckets);
  1255. auto* thread_cache = ThreadCache::Get();
  1256. if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
  1257. thread_cache->MaybePutInCache(slot_start, bucket_index))) {
  1258. return;
  1259. }
  1260. }
  1261. RawFree(slot_start, slot_span);
  1262. }
  1263. template <bool thread_safe>
  1264. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeLocked(
  1265. uintptr_t slot_start) {
  1266. SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
  1267. // Direct-mapped deallocation releases then re-acquires the lock. The caller
  1268. // may not expect that, but we never call this function on direct-mapped
  1269. // allocations.
  1270. PA_DCHECK(!IsDirectMappedBucket(slot_span->bucket));
  1271. FreeInSlotSpan(slot_start, slot_span);
  1272. }
  1273. // static
  1274. template <bool thread_safe>
  1275. PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidSlotSpan(
  1276. SlotSpan* slot_span) {
  1277. PartitionRoot* root = FromSlotSpan(slot_span);
  1278. return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
  1279. }
  1280. template <bool thread_safe>
  1281. PA_ALWAYS_INLINE PartitionRoot<thread_safe>*
  1282. PartitionRoot<thread_safe>::FromSlotSpan(SlotSpan* slot_span) {
  1283. auto* extent_entry = reinterpret_cast<SuperPageExtentEntry*>(
  1284. reinterpret_cast<uintptr_t>(slot_span) & internal::SystemPageBaseMask());
  1285. return extent_entry->root;
  1286. }
  1287. template <bool thread_safe>
  1288. PA_ALWAYS_INLINE PartitionRoot<thread_safe>*
  1289. PartitionRoot<thread_safe>::FromFirstSuperPage(uintptr_t super_page) {
  1290. PA_DCHECK(internal::IsReservationStart(super_page));
  1291. auto* extent_entry =
  1292. internal::PartitionSuperPageToExtent<thread_safe>(super_page);
  1293. PartitionRoot* root = extent_entry->root;
  1294. PA_DCHECK(root->inverted_self == ~reinterpret_cast<uintptr_t>(root));
  1295. return root;
  1296. }
  1297. template <bool thread_safe>
  1298. PA_ALWAYS_INLINE PartitionRoot<thread_safe>*
  1299. PartitionRoot<thread_safe>::FromAddrInFirstSuperpage(uintptr_t address) {
  1300. uintptr_t super_page = address & internal::kSuperPageBaseMask;
  1301. PA_DCHECK(internal::IsReservationStart(super_page));
  1302. return FromFirstSuperPage(super_page);
  1303. }
  1304. template <bool thread_safe>
  1305. PA_ALWAYS_INLINE void
  1306. PartitionRoot<thread_safe>::IncreaseTotalSizeOfAllocatedBytes(
  1307. SlotSpan* slot_span,
  1308. size_t raw_size) {
  1309. IncreaseTotalSizeOfAllocatedBytes(reinterpret_cast<uintptr_t>(slot_span),
  1310. slot_span->GetSlotSizeForBookkeeping(),
  1311. raw_size);
  1312. }
  1313. template <bool thread_safe>
  1314. PA_ALWAYS_INLINE void
  1315. PartitionRoot<thread_safe>::DecreaseTotalSizeOfAllocatedBytes(
  1316. SlotSpan* slot_span) {
  1317. DecreaseTotalSizeOfAllocatedBytes(reinterpret_cast<uintptr_t>(slot_span),
  1318. slot_span->GetSlotSizeForBookkeeping());
  1319. }
  1320. template <bool thread_safe>
  1321. PA_ALWAYS_INLINE void
  1322. PartitionRoot<thread_safe>::IncreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
  1323. size_t len,
  1324. size_t raw_size) {
  1325. total_size_of_allocated_bytes += len;
  1326. max_size_of_allocated_bytes =
  1327. std::max(max_size_of_allocated_bytes, total_size_of_allocated_bytes);
  1328. #if BUILDFLAG(RECORD_ALLOC_INFO)
  1329. partition_alloc::internal::RecordAllocOrFree(addr | 0x01, raw_size);
  1330. #endif // BUILDFLAG(RECORD_ALLOC_INFO)
  1331. }
  1332. template <bool thread_safe>
  1333. PA_ALWAYS_INLINE void
  1334. PartitionRoot<thread_safe>::DecreaseTotalSizeOfAllocatedBytes(uintptr_t addr,
  1335. size_t len) {
  1336. // An underflow here means we've miscounted |total_size_of_allocated_bytes|
  1337. // somewhere.
  1338. PA_DCHECK(total_size_of_allocated_bytes >= len);
  1339. total_size_of_allocated_bytes -= len;
  1340. #if BUILDFLAG(RECORD_ALLOC_INFO)
  1341. partition_alloc::internal::RecordAllocOrFree(addr | 0x00, len);
  1342. #endif // BUILDFLAG(RECORD_ALLOC_INFO)
  1343. }
  1344. template <bool thread_safe>
  1345. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::IncreaseCommittedPages(
  1346. size_t len) {
  1347. const auto old_total =
  1348. total_size_of_committed_pages.fetch_add(len, std::memory_order_relaxed);
  1349. const auto new_total = old_total + len;
  1350. // This function is called quite frequently; to avoid performance problems, we
  1351. // don't want to hold a lock here, so we use compare and exchange instead.
  1352. size_t expected = max_size_of_committed_pages.load(std::memory_order_relaxed);
  1353. size_t desired;
  1354. do {
  1355. desired = std::max(expected, new_total);
  1356. } while (!max_size_of_committed_pages.compare_exchange_weak(
  1357. expected, desired, std::memory_order_relaxed, std::memory_order_relaxed));
  1358. }
  1359. template <bool thread_safe>
  1360. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::DecreaseCommittedPages(
  1361. size_t len) {
  1362. total_size_of_committed_pages.fetch_sub(len, std::memory_order_relaxed);
  1363. }
  1364. template <bool thread_safe>
  1365. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::DecommitSystemPagesForData(
  1366. uintptr_t address,
  1367. size_t length,
  1368. PageAccessibilityDisposition accessibility_disposition) {
  1369. internal::ScopedSyscallTimer timer{this};
  1370. DecommitSystemPages(address, length, accessibility_disposition);
  1371. DecreaseCommittedPages(length);
  1372. }
  1373. // Not unified with TryRecommitSystemPagesForData() to preserve error codes.
  1374. template <bool thread_safe>
  1375. PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RecommitSystemPagesForData(
  1376. uintptr_t address,
  1377. size_t length,
  1378. PageAccessibilityDisposition accessibility_disposition) {
  1379. internal::ScopedSyscallTimer timer{this};
  1380. bool ok = TryRecommitSystemPages(
  1381. address, length, PageAccessibilityConfiguration::kReadWriteTagged,
  1382. accessibility_disposition);
  1383. if (PA_UNLIKELY(!ok)) {
  1384. // Decommit some memory and retry. The alternative is crashing.
  1385. DecommitEmptySlotSpans();
  1386. RecommitSystemPages(address, length,
  1387. PageAccessibilityConfiguration::kReadWriteTagged,
  1388. accessibility_disposition);
  1389. }
  1390. IncreaseCommittedPages(length);
  1391. }
  1392. template <bool thread_safe>
  1393. PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::TryRecommitSystemPagesForData(
  1394. uintptr_t address,
  1395. size_t length,
  1396. PageAccessibilityDisposition accessibility_disposition) {
  1397. internal::ScopedSyscallTimer timer{this};
  1398. bool ok = TryRecommitSystemPages(
  1399. address, length, PageAccessibilityConfiguration::kReadWriteTagged,
  1400. accessibility_disposition);
  1401. #if defined(PA_COMMIT_CHARGE_IS_LIMITED)
  1402. if (PA_UNLIKELY(!ok)) {
  1403. {
  1404. ::partition_alloc::internal::ScopedGuard guard(lock_);
  1405. DecommitEmptySlotSpans();
  1406. }
  1407. ok = TryRecommitSystemPages(
  1408. address, length, PageAccessibilityConfiguration::kReadWriteTagged,
  1409. accessibility_disposition);
  1410. }
  1411. #endif // defined(PA_COMMIT_CHARGE_IS_LIMITED)
  1412. if (ok)
  1413. IncreaseCommittedPages(length);
  1414. return ok;
  1415. }
  1416. // static
  1417. //
  1418. // Returns the size available to the app. It can be equal or higher than the
  1419. // requested size. If higher, the overage won't exceed what's actually usable
  1420. // by the app without a risk of running out of an allocated region or into
  1421. // PartitionAlloc's internal data. Used as malloc_usable_size and malloc_size.
  1422. //
  1423. // |ptr| should preferably point to the beginning of an object returned from
  1424. // malloc() et al., but it doesn't have to. crbug.com/1292646 shows an example
  1425. // where this isn't the case. Note, an inner object pointer won't work for
  1426. // direct map, unless it is within the first partition page.
  1427. template <bool thread_safe>
  1428. PA_ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetUsableSize(void* ptr) {
  1429. // malloc_usable_size() is expected to handle NULL gracefully and return 0.
  1430. if (!ptr)
  1431. return 0;
  1432. auto* slot_span = SlotSpan::FromObjectInnerPtr(ptr);
  1433. auto* root = FromSlotSpan(slot_span);
  1434. return slot_span->GetUsableSize(root);
  1435. }
  1436. // Return the capacity of the underlying slot (adjusted for extras). This
  1437. // doesn't mean this capacity is readily available. It merely means that if
  1438. // a new allocation (or realloc) happened with that returned value, it'd use
  1439. // the same amount of underlying memory.
  1440. template <bool thread_safe>
  1441. PA_ALWAYS_INLINE size_t
  1442. PartitionRoot<thread_safe>::AllocationCapacityFromSlotStart(
  1443. uintptr_t slot_start) const {
  1444. auto* slot_span = SlotSpan::FromSlotStart(slot_start);
  1445. return AdjustSizeForExtrasSubtract(slot_span->bucket->slot_size);
  1446. }
  1447. // static
  1448. template <bool thread_safe>
  1449. PA_ALWAYS_INLINE uint16_t PartitionRoot<thread_safe>::SizeToBucketIndex(
  1450. size_t size,
  1451. bool with_denser_bucket_distribution) {
  1452. if (with_denser_bucket_distribution)
  1453. return internal::BucketIndexLookup::GetIndexForDenserBuckets(size);
  1454. return internal::BucketIndexLookup::GetIndex(size);
  1455. }
  1456. template <bool thread_safe>
  1457. PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlags(
  1458. unsigned int flags,
  1459. size_t requested_size,
  1460. const char* type_name) {
  1461. return AllocWithFlagsInternal(flags, requested_size,
  1462. internal::PartitionPageSize(), type_name);
  1463. }
  1464. template <bool thread_safe>
  1465. PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsInternal(
  1466. unsigned int flags,
  1467. size_t requested_size,
  1468. size_t slot_span_alignment,
  1469. const char* type_name) {
  1470. PA_DCHECK(
  1471. (slot_span_alignment >= internal::PartitionPageSize()) &&
  1472. partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
  1473. PA_DCHECK(flags < AllocFlags::kLastFlag << 1);
  1474. PA_DCHECK((flags & AllocFlags::kNoHooks) == 0); // Internal only.
  1475. PA_DCHECK(initialized);
  1476. #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
  1477. if (!(flags & AllocFlags::kNoMemoryToolOverride)) {
  1478. CHECK_MAX_SIZE_OR_RETURN_NULLPTR(requested_size, flags);
  1479. const bool zero_fill = flags & AllocFlags::kZeroFill;
  1480. void* result =
  1481. zero_fill ? calloc(1, requested_size) : malloc(requested_size);
  1482. PA_CHECK(result || flags & AllocFlags::kReturnNull);
  1483. return result;
  1484. }
  1485. #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
  1486. void* object = nullptr;
  1487. const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
  1488. if (PA_UNLIKELY(hooks_enabled)) {
  1489. if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(
  1490. &object, flags, requested_size, type_name)) {
  1491. PartitionAllocHooks::AllocationObserverHookIfEnabled(
  1492. object, requested_size, type_name);
  1493. return object;
  1494. }
  1495. }
  1496. object = AllocWithFlagsNoHooks(flags, requested_size, slot_span_alignment);
  1497. if (PA_UNLIKELY(hooks_enabled)) {
  1498. PartitionAllocHooks::AllocationObserverHookIfEnabled(object, requested_size,
  1499. type_name);
  1500. }
  1501. return object;
  1502. }
  1503. template <bool thread_safe>
  1504. PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsNoHooks(
  1505. unsigned int flags,
  1506. size_t requested_size,
  1507. size_t slot_span_alignment) {
  1508. PA_DCHECK(
  1509. (slot_span_alignment >= internal::PartitionPageSize()) &&
  1510. partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
  1511. // The thread cache is added "in the middle" of the main allocator, that is:
  1512. // - After all the cookie/ref-count management
  1513. // - Before the "raw" allocator.
  1514. //
  1515. // That is, the general allocation flow is:
  1516. // 1. Adjustment of requested size to make room for extras
  1517. // 2. Allocation:
  1518. // a. Call to the thread cache, if it succeeds, go to step 3.
  1519. // b. Otherwise, call the "raw" allocator <-- Locking
  1520. // 3. Handle cookie/ref-count, zero allocation if required
  1521. size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
  1522. PA_CHECK(raw_size >= requested_size); // check for overflows
  1523. // We should only call |SizeToBucketIndex| at most once when allocating.
  1524. // Otherwise, we risk having |with_denser_bucket_distribution| changed
  1525. // underneath us (between calls to |SizeToBucketIndex| during the same call),
  1526. // which would result in an inconsistent state.
  1527. uint16_t bucket_index =
  1528. SizeToBucketIndex(raw_size, this->flags.with_denser_bucket_distribution);
  1529. size_t usable_size;
  1530. bool is_already_zeroed = false;
  1531. uintptr_t slot_start = 0;
  1532. size_t slot_size;
  1533. const bool is_quarantine_enabled = IsQuarantineEnabled();
  1534. // PCScan safepoint. Call before trying to allocate from cache.
  1535. // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
  1536. // default.
  1537. if (PA_UNLIKELY(is_quarantine_enabled)) {
  1538. PCScan::JoinScanIfNeeded();
  1539. }
  1540. // Don't use thread cache if higher order alignment is requested, because the
  1541. // thread cache will not be able to satisfy it.
  1542. //
  1543. // PA_LIKELY: performance-sensitive partitions use the thread cache.
  1544. if (PA_LIKELY(this->flags.with_thread_cache &&
  1545. slot_span_alignment <= internal::PartitionPageSize())) {
  1546. auto* tcache = ThreadCache::Get();
  1547. // PA_LIKELY: Typically always true, except for the very first allocation of
  1548. // this thread.
  1549. if (PA_LIKELY(ThreadCache::IsValid(tcache))) {
  1550. slot_start = tcache->GetFromCache(bucket_index, &slot_size);
  1551. } else {
  1552. slot_start = MaybeInitThreadCacheAndAlloc(bucket_index, &slot_size);
  1553. }
  1554. // PA_LIKELY: median hit rate in the thread cache is 95%, from metrics.
  1555. if (PA_LIKELY(slot_start)) {
  1556. // This follows the logic of SlotSpanMetadata::GetUsableSize for small
  1557. // buckets, which is too expensive to call here.
  1558. // Keep it in sync!
  1559. usable_size = AdjustSizeForExtrasSubtract(slot_size);
  1560. #if BUILDFLAG(PA_DCHECK_IS_ON)
  1561. // Make sure that the allocated pointer comes from the same place it would
  1562. // for a non-thread cache allocation.
  1563. SlotSpan* slot_span = SlotSpan::FromSlotStart(slot_start);
  1564. PA_DCHECK(IsValidSlotSpan(slot_span));
  1565. PA_DCHECK(slot_span->bucket == &bucket_at(bucket_index));
  1566. PA_DCHECK(slot_span->bucket->slot_size == slot_size);
  1567. PA_DCHECK(usable_size == slot_span->GetUsableSize(this));
  1568. // All large allocations must go through the RawAlloc path to correctly
  1569. // set |usable_size|.
  1570. PA_DCHECK(!slot_span->CanStoreRawSize());
  1571. PA_DCHECK(!slot_span->bucket->is_direct_mapped());
  1572. #endif
  1573. } else {
  1574. slot_start =
  1575. RawAlloc(buckets + bucket_index, flags, raw_size, slot_span_alignment,
  1576. &usable_size, &is_already_zeroed);
  1577. }
  1578. } else {
  1579. slot_start =
  1580. RawAlloc(buckets + bucket_index, flags, raw_size, slot_span_alignment,
  1581. &usable_size, &is_already_zeroed);
  1582. }
  1583. if (PA_UNLIKELY(!slot_start))
  1584. return nullptr;
  1585. // Layout inside the slot:
  1586. // |[refcnt]|...object...|[empty]|[cookie]|[unused]|
  1587. // <----(a)----->
  1588. // <--------(b)--------->
  1589. // <--(c)---> + <--(c)--->
  1590. // <---------(d)---------> + <--(d)--->
  1591. // <-----------------(e)------------------>
  1592. // <----------------------(f)---------------------->
  1593. // (a) requested_size
  1594. // (b) usable_size
  1595. // (c) extras
  1596. // (d) raw_size
  1597. // (e) utilized_slot_size
  1598. // (f) slot_size
  1599. // Notes:
  1600. // - Ref-count may or may not exist in the slot, depending on brp_enabled().
  1601. // - Cookie exists only in the BUILDFLAG(PA_DCHECK_IS_ON) case.
  1602. // - Think of raw_size as the minimum size required internally to satisfy
  1603. // the allocation request (i.e. requested_size + extras)
  1604. // - Note, at most one "empty" or "unused" space can occur at a time. It
  1605. // occurs when slot_size is larger than raw_size. "unused" applies only to
  1606. // large allocations (direct-mapped and single-slot slot spans) and "empty"
  1607. // only to small allocations.
  1608. // Why either-or, one might ask? We make an effort to put the trailing
  1609. // cookie as close to data as possible to catch overflows (often
  1610. // off-by-one), but that's possible only if we have enough space in metadata
  1611. // to save raw_size, i.e. only for large allocations. For small allocations,
  1612. // we have no other choice than putting the cookie at the very end of the
  1613. // slot, thus creating the "empty" space.
  1614. //
  1615. // If PUT_REF_COUNT_IN_PREVIOUS_SLOT is set, the layout is:
  1616. // |...object...|[empty]|[cookie]|[unused]|[refcnt]|
  1617. // <----(a)----->
  1618. // <--------(b)--------->
  1619. // <--(c)---> + <--(c)--->
  1620. // <----(d)-----> + <--(d)---> + <--(d)--->
  1621. // <-------------(e)-------------> + <--(e)--->
  1622. // <----------------------(f)---------------------->
  1623. // Notes:
  1624. // If |slot_start| is not SystemPageSize()-aligned (possible only for small
  1625. // allocations), ref-count of this slot is stored at the end of the previous
  1626. // slot. Otherwise it is stored in ref-count table placed after the super page
  1627. // metadata. For simplicity, the space for ref-count is still reserved at the
  1628. // end of previous slot, even though redundant.
  1629. void* object = SlotStartToObject(slot_start);
  1630. #if BUILDFLAG(PA_DCHECK_IS_ON)
  1631. // Add the cookie after the allocation.
  1632. if (this->flags.allow_cookie) {
  1633. internal::PartitionCookieWriteValue(static_cast<unsigned char*>(object) +
  1634. usable_size);
  1635. }
  1636. #endif
  1637. // Fill the region kUninitializedByte (on debug builds, if not requested to 0)
  1638. // or 0 (if requested and not 0 already).
  1639. bool zero_fill = flags & AllocFlags::kZeroFill;
  1640. // PA_LIKELY: operator new() calls malloc(), not calloc().
  1641. if (PA_LIKELY(!zero_fill)) {
  1642. // memset() can be really expensive.
  1643. #if BUILDFLAG(PA_EXPENSIVE_DCHECKS_ARE_ON)
  1644. internal::DebugMemset(object, internal::kUninitializedByte, usable_size);
  1645. #endif
  1646. } else if (!is_already_zeroed) {
  1647. memset(object, 0, usable_size);
  1648. }
  1649. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  1650. // TODO(keishi): Add PA_LIKELY when brp is fully enabled as |brp_enabled| will
  1651. // be false only for the aligned partition.
  1652. if (brp_enabled()) {
  1653. auto* ref_count = new (internal::PartitionRefCountPointer(slot_start))
  1654. internal::PartitionRefCount();
  1655. #if defined(PA_REF_COUNT_STORE_REQUESTED_SIZE)
  1656. ref_count->SetRequestedSize(requested_size);
  1657. #else
  1658. (void)ref_count;
  1659. #endif
  1660. }
  1661. #endif // BUILDFLAG(USE_BACKUP_REF_PTR)
  1662. // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
  1663. // default.
  1664. if (PA_UNLIKELY(is_quarantine_enabled)) {
  1665. if (PA_LIKELY(internal::IsManagedByNormalBuckets(slot_start))) {
  1666. // Mark the corresponding bits in the state bitmap as allocated.
  1667. internal::StateBitmapFromAddr(slot_start)->Allocate(slot_start);
  1668. }
  1669. }
  1670. return object;
  1671. }
  1672. template <bool thread_safe>
  1673. PA_ALWAYS_INLINE uintptr_t
  1674. PartitionRoot<thread_safe>::RawAlloc(Bucket* bucket,
  1675. unsigned int flags,
  1676. size_t raw_size,
  1677. size_t slot_span_alignment,
  1678. size_t* usable_size,
  1679. bool* is_already_zeroed) {
  1680. ::partition_alloc::internal::ScopedGuard guard{lock_};
  1681. return AllocFromBucket(bucket, flags, raw_size, slot_span_alignment,
  1682. usable_size, is_already_zeroed);
  1683. }
  1684. template <bool thread_safe>
  1685. PA_ALWAYS_INLINE void* PartitionRoot<thread_safe>::AlignedAllocWithFlags(
  1686. unsigned int flags,
  1687. size_t alignment,
  1688. size_t requested_size) {
  1689. // Aligned allocation support relies on the natural alignment guarantees of
  1690. // PartitionAlloc. Specifically, it relies on the fact that slots within a
  1691. // slot span are aligned to slot size, from the beginning of the span.
  1692. //
  1693. // For alignments <=PartitionPageSize(), the code below adjusts the request
  1694. // size to be a power of two, no less than alignment. Since slot spans are
  1695. // aligned to PartitionPageSize(), which is also a power of two, this will
  1696. // automatically guarantee alignment on the adjusted size boundary, thanks to
  1697. // the natural alignment described above.
  1698. //
  1699. // For alignments >PartitionPageSize(), we need to pass the request down the
  1700. // stack to only give us a slot span aligned to this more restrictive
  1701. // boundary. In the current implementation, this code path will always
  1702. // allocate a new slot span and hand us the first slot, so no need to adjust
  1703. // the request size. As a consequence, allocating many small objects with
  1704. // such a high alignment can cause a non-negligable fragmentation,
  1705. // particularly if these allocations are back to back.
  1706. // TODO(bartekn): We should check that this is not causing issues in practice.
  1707. //
  1708. // Extras before the allocation are forbidden as they shift the returned
  1709. // allocation from the beginning of the slot, thus messing up alignment.
  1710. // Extras after the allocation are acceptable, but they have to be taken into
  1711. // account in the request size calculation to avoid crbug.com/1185484.
  1712. PA_DCHECK(this->flags.allow_aligned_alloc);
  1713. PA_DCHECK(!this->flags.extras_offset);
  1714. // This is mandated by |posix_memalign()|, so should never fire.
  1715. PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
  1716. // Catch unsupported alignment requests early.
  1717. PA_CHECK(alignment <= internal::kMaxSupportedAlignment);
  1718. size_t raw_size = AdjustSizeForExtrasAdd(requested_size);
  1719. size_t adjusted_size = requested_size;
  1720. if (alignment <= internal::PartitionPageSize()) {
  1721. // Handle cases such as size = 16, alignment = 64.
  1722. // Wastes memory when a large alignment is requested with a small size, but
  1723. // this is hard to avoid, and should not be too common.
  1724. if (PA_UNLIKELY(raw_size < alignment)) {
  1725. raw_size = alignment;
  1726. } else {
  1727. // PartitionAlloc only guarantees alignment for power-of-two sized
  1728. // allocations. To make sure this applies here, round up the allocation
  1729. // size.
  1730. raw_size =
  1731. static_cast<size_t>(1)
  1732. << (int{sizeof(size_t) * 8} -
  1733. partition_alloc::internal::base::bits::CountLeadingZeroBits(
  1734. raw_size - 1));
  1735. }
  1736. PA_DCHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(raw_size));
  1737. // Adjust back, because AllocWithFlagsNoHooks/Alloc will adjust it again.
  1738. adjusted_size = AdjustSizeForExtrasSubtract(raw_size);
  1739. // Overflow check. adjusted_size must be larger or equal to requested_size.
  1740. if (PA_UNLIKELY(adjusted_size < requested_size)) {
  1741. if (flags & AllocFlags::kReturnNull)
  1742. return nullptr;
  1743. // OutOfMemoryDeathTest.AlignedAlloc requires
  1744. // base::TerminateBecauseOutOfMemory (invoked by
  1745. // PartitionExcessiveAllocationSize).
  1746. internal::PartitionExcessiveAllocationSize(requested_size);
  1747. // internal::PartitionExcessiveAllocationSize(size) causes OOM_CRASH.
  1748. PA_NOTREACHED();
  1749. }
  1750. }
  1751. // Slot spans are naturally aligned on partition page size, but make sure you
  1752. // don't pass anything less, because it'll mess up callee's calculations.
  1753. size_t slot_span_alignment =
  1754. std::max(alignment, internal::PartitionPageSize());
  1755. bool no_hooks = flags & AllocFlags::kNoHooks;
  1756. void* object =
  1757. no_hooks
  1758. ? AllocWithFlagsNoHooks(0, adjusted_size, slot_span_alignment)
  1759. : AllocWithFlagsInternal(0, adjusted_size, slot_span_alignment, "");
  1760. // |alignment| is a power of two, but the compiler doesn't necessarily know
  1761. // that. A regular % operation is very slow, make sure to use the equivalent,
  1762. // faster form.
  1763. // No need to MTE-untag, as it doesn't change alignment.
  1764. PA_CHECK(!(reinterpret_cast<uintptr_t>(object) & (alignment - 1)));
  1765. return object;
  1766. }
  1767. template <bool thread_safe>
  1768. PA_NOINLINE void* PartitionRoot<thread_safe>::Alloc(size_t requested_size,
  1769. const char* type_name) {
  1770. return AllocWithFlags(0, requested_size, type_name);
  1771. }
  1772. template <bool thread_safe>
  1773. PA_NOINLINE void* PartitionRoot<thread_safe>::Realloc(void* ptr,
  1774. size_t new_size,
  1775. const char* type_name) {
  1776. return ReallocWithFlags(0, ptr, new_size, type_name);
  1777. }
  1778. template <bool thread_safe>
  1779. PA_NOINLINE void* PartitionRoot<thread_safe>::TryRealloc(
  1780. void* ptr,
  1781. size_t new_size,
  1782. const char* type_name) {
  1783. return ReallocWithFlags(AllocFlags::kReturnNull, ptr, new_size, type_name);
  1784. }
  1785. // Return the capacity of the underlying slot (adjusted for extras) that'd be
  1786. // used to satisfy a request of |size|. This doesn't mean this capacity would be
  1787. // readily available. It merely means that if an allocation happened with that
  1788. // returned value, it'd use the same amount of underlying memory as the
  1789. // allocation with |size|.
  1790. template <bool thread_safe>
  1791. PA_ALWAYS_INLINE size_t
  1792. PartitionRoot<thread_safe>::AllocationCapacityFromRequestedSize(
  1793. size_t size) const {
  1794. #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
  1795. return size;
  1796. #else
  1797. PA_DCHECK(PartitionRoot<thread_safe>::initialized);
  1798. size = AdjustSizeForExtrasAdd(size);
  1799. auto& bucket =
  1800. bucket_at(SizeToBucketIndex(size, flags.with_denser_bucket_distribution));
  1801. PA_DCHECK(!bucket.slot_size || bucket.slot_size >= size);
  1802. PA_DCHECK(!(bucket.slot_size % internal::kSmallestBucket));
  1803. if (PA_LIKELY(!bucket.is_direct_mapped())) {
  1804. size = bucket.slot_size;
  1805. } else if (size > internal::MaxDirectMapped()) {
  1806. // Too large to allocate => return the size unchanged.
  1807. } else {
  1808. size = GetDirectMapSlotSize(size);
  1809. }
  1810. size = AdjustSizeForExtrasSubtract(size);
  1811. return size;
  1812. #endif
  1813. }
  1814. using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
  1815. static_assert(offsetof(ThreadSafePartitionRoot, lock_) ==
  1816. internal::kPartitionCachelineSize,
  1817. "Padding is incorrect");
  1818. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  1819. // Usage in `raw_ptr.cc` is notable enough to merit a non-internal alias.
  1820. using ::partition_alloc::internal::PartitionAllocGetSlotStartInBRPPool;
  1821. #endif // BUILDFLAG(USE_BACKUP_REF_PTR)
  1822. } // namespace partition_alloc
  1823. #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_H_