partition_page.cc 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. // Copyright (c) 2018 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/allocator/partition_allocator/partition_page.h"
  5. #include <algorithm>
  6. #include <cstdint>
  7. #include "base/allocator/partition_allocator/address_pool_manager.h"
  8. #include "base/allocator/partition_allocator/page_allocator.h"
  9. #include "base/allocator/partition_allocator/page_allocator_constants.h"
  10. #include "base/allocator/partition_allocator/partition_address_space.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
  15. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  16. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  17. #include "base/allocator/partition_allocator/partition_alloc_constants.h"
  18. #include "base/allocator/partition_allocator/partition_alloc_forward.h"
  19. #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
  20. #include "base/allocator/partition_allocator/partition_root.h"
  21. #include "base/allocator/partition_allocator/reservation_offset_table.h"
  22. #include "base/allocator/partition_allocator/tagging.h"
  23. namespace partition_alloc::internal {
  24. namespace {
  25. void UnmapNow(uintptr_t reservation_start,
  26. size_t reservation_size,
  27. pool_handle pool);
  28. template <bool thread_safe>
  29. PA_ALWAYS_INLINE void PartitionDirectUnmap(
  30. SlotSpanMetadata<thread_safe>* slot_span) {
  31. auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
  32. root->lock_.AssertAcquired();
  33. auto* extent = PartitionDirectMapExtent<thread_safe>::FromSlotSpan(slot_span);
  34. // Maintain the doubly-linked list of all direct mappings.
  35. if (extent->prev_extent) {
  36. PA_DCHECK(extent->prev_extent->next_extent == extent);
  37. extent->prev_extent->next_extent = extent->next_extent;
  38. } else {
  39. root->direct_map_list = extent->next_extent;
  40. }
  41. if (extent->next_extent) {
  42. PA_DCHECK(extent->next_extent->prev_extent == extent);
  43. extent->next_extent->prev_extent = extent->prev_extent;
  44. }
  45. // The actual decommit is deferred below after releasing the lock.
  46. root->DecreaseCommittedPages(slot_span->bucket->slot_size);
  47. size_t reservation_size = extent->reservation_size;
  48. PA_DCHECK(!(reservation_size & DirectMapAllocationGranularityOffsetMask()));
  49. PA_DCHECK(root->total_size_of_direct_mapped_pages >= reservation_size);
  50. root->total_size_of_direct_mapped_pages -= reservation_size;
  51. uintptr_t reservation_start =
  52. SlotSpanMetadata<thread_safe>::ToSlotSpanStart(slot_span);
  53. // The mapping may start at an unspecified location within a super page, but
  54. // we always reserve memory aligned to super page size.
  55. reservation_start = base::bits::AlignDown(reservation_start, kSuperPageSize);
  56. // All the metadata have been updated above, in particular the mapping has
  57. // been unlinked. We can safely release the memory outside the lock, which is
  58. // important as decommitting memory can be expensive.
  59. //
  60. // This can create a fake "address space exhaustion" OOM, in the case where
  61. // e.g. a large allocation is freed on a thread, and another large one is made
  62. // from another *before* UnmapNow() has finished running. In this case the
  63. // second one may not find enough space in the GigaCage, and fail. This is
  64. // expected to be very rare though, and likely preferable to holding the lock
  65. // while releasing the address space.
  66. ScopedUnlockGuard unlock{root->lock_};
  67. ScopedSyscallTimer timer{root};
  68. UnmapNow(reservation_start, reservation_size, root->ChoosePool());
  69. }
  70. } // namespace
  71. template <bool thread_safe>
  72. PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::RegisterEmpty() {
  73. PA_DCHECK(is_empty());
  74. auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
  75. root->lock_.AssertAcquired();
  76. root->empty_slot_spans_dirty_bytes +=
  77. base::bits::AlignUp(GetProvisionedSize(), SystemPageSize());
  78. ToSuperPageExtent()->DecrementNumberOfNonemptySlotSpans();
  79. // If the slot span is already registered as empty, give it another life.
  80. if (in_empty_cache_) {
  81. PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans);
  82. PA_DCHECK(root->global_empty_slot_span_ring[empty_cache_index_] == this);
  83. root->global_empty_slot_span_ring[empty_cache_index_] = nullptr;
  84. }
  85. int16_t current_index = root->global_empty_slot_span_ring_index;
  86. SlotSpanMetadata<thread_safe>* slot_span_to_decommit =
  87. root->global_empty_slot_span_ring[current_index];
  88. // The slot span might well have been re-activated, filled up, etc. before we
  89. // get around to looking at it here.
  90. if (slot_span_to_decommit)
  91. slot_span_to_decommit->DecommitIfPossible(root);
  92. // We put the empty slot span on our global list of "slot spans that were once
  93. // empty", thus providing it a bit of breathing room to get re-used before we
  94. // really free it. This reduces the number of system calls. Otherwise any
  95. // free() from a single-slot slot span would lead to a syscall, for instance.
  96. root->global_empty_slot_span_ring[current_index] = this;
  97. empty_cache_index_ = current_index;
  98. in_empty_cache_ = 1;
  99. ++current_index;
  100. if (current_index == root->global_empty_slot_span_ring_size)
  101. current_index = 0;
  102. root->global_empty_slot_span_ring_index = current_index;
  103. // Avoid wasting too much memory on empty slot spans. Note that we only divide
  104. // by powers of two, since division can be very slow, and this path is taken
  105. // for every single-slot slot span deallocation.
  106. //
  107. // Empty slot spans are also all decommitted with MemoryReclaimer, but it may
  108. // never run, be delayed arbitrarily, and/or miss large memory spikes.
  109. size_t max_empty_dirty_bytes =
  110. root->total_size_of_committed_pages.load(std::memory_order_relaxed) >>
  111. root->max_empty_slot_spans_dirty_bytes_shift;
  112. if (root->empty_slot_spans_dirty_bytes > max_empty_dirty_bytes) {
  113. root->ShrinkEmptySlotSpansRing(std::min(
  114. root->empty_slot_spans_dirty_bytes / 2, max_empty_dirty_bytes));
  115. }
  116. }
  117. // static
  118. template <bool thread_safe>
  119. SlotSpanMetadata<thread_safe>*
  120. SlotSpanMetadata<thread_safe>::get_sentinel_slot_span() {
  121. return &sentinel_slot_span_;
  122. }
  123. template <bool thread_safe>
  124. SlotSpanMetadata<thread_safe>::SlotSpanMetadata(
  125. PartitionBucket<thread_safe>* bucket)
  126. : bucket(bucket), can_store_raw_size_(bucket->CanStoreRawSize()) {}
  127. template <bool thread_safe>
  128. void SlotSpanMetadata<thread_safe>::FreeSlowPath(size_t number_of_freed) {
  129. #if BUILDFLAG(PA_DCHECK_IS_ON)
  130. auto* root = PartitionRoot<thread_safe>::FromSlotSpan(this);
  131. root->lock_.AssertAcquired();
  132. #endif
  133. PA_DCHECK(this != get_sentinel_slot_span());
  134. // The caller has already modified |num_allocated_slots|. It is a
  135. // responsibility of this function to react to it, and update the state. We
  136. // can get here only if the slot span is marked full and/or is now empty. Both
  137. // are possible at the same time, which can happen when the caller lowered
  138. // |num_allocated_slots| from "all" to 0 (common for single-slot spans). First
  139. // execute the "is marked full" path, as it sets up |active_slot_spans_head|
  140. // in a way later needed for the "is empty" path.
  141. if (marked_full) {
  142. // Direct map slot spans aren't added to any lists, hence never marked full.
  143. PA_DCHECK(!bucket->is_direct_mapped());
  144. // Double check that the slot span was full.
  145. PA_DCHECK(num_allocated_slots ==
  146. bucket->get_slots_per_span() - number_of_freed);
  147. marked_full = 0;
  148. // Fully used slot span became partially used. It must be put back on the
  149. // non-full list. Also make it the current slot span to increase the
  150. // chances of it being filled up again. The old current slot span will be
  151. // the next slot span.
  152. PA_DCHECK(!next_slot_span);
  153. if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span()))
  154. next_slot_span = bucket->active_slot_spans_head;
  155. bucket->active_slot_spans_head = this;
  156. PA_CHECK(bucket->num_full_slot_spans); // Underflow.
  157. --bucket->num_full_slot_spans;
  158. }
  159. if (PA_LIKELY(num_allocated_slots == 0)) {
  160. // Slot span became fully unused.
  161. if (PA_UNLIKELY(bucket->is_direct_mapped())) {
  162. PartitionDirectUnmap(this);
  163. return;
  164. }
  165. #if BUILDFLAG(PA_DCHECK_IS_ON)
  166. freelist_head->CheckFreeList(bucket->slot_size);
  167. #endif
  168. // If it's the current active slot span, change it. We bounce the slot span
  169. // to the empty list as a force towards defragmentation.
  170. if (PA_LIKELY(this == bucket->active_slot_spans_head))
  171. bucket->SetNewActiveSlotSpan();
  172. PA_DCHECK(bucket->active_slot_spans_head != this);
  173. if (CanStoreRawSize())
  174. SetRawSize(0);
  175. RegisterEmpty();
  176. }
  177. }
  178. template <bool thread_safe>
  179. void SlotSpanMetadata<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) {
  180. root->lock_.AssertAcquired();
  181. PA_DCHECK(is_empty());
  182. PA_DCHECK(!bucket->is_direct_mapped());
  183. uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(this);
  184. // If lazy commit is enabled, only provisioned slots are committed.
  185. size_t dirty_size =
  186. base::bits::AlignUp(GetProvisionedSize(), SystemPageSize());
  187. size_t size_to_decommit =
  188. kUseLazyCommit ? dirty_size : bucket->get_bytes_per_span();
  189. PA_DCHECK(root->empty_slot_spans_dirty_bytes >= dirty_size);
  190. root->empty_slot_spans_dirty_bytes -= dirty_size;
  191. // Not decommitted slot span must've had at least 1 allocation.
  192. PA_DCHECK(size_to_decommit > 0);
  193. root->DecommitSystemPagesForData(
  194. slot_span_start, size_to_decommit,
  195. PageAccessibilityDisposition::kAllowKeepForPerf);
  196. // We actually leave the decommitted slot span in the active list. We'll sweep
  197. // it on to the decommitted list when we next walk the active list.
  198. // Pulling this trick enables us to use a singly-linked list for all
  199. // cases, which is critical in keeping the slot span metadata structure down
  200. // to 32 bytes in size.
  201. SetFreelistHead(nullptr);
  202. num_unprovisioned_slots = 0;
  203. PA_DCHECK(is_decommitted());
  204. PA_DCHECK(bucket);
  205. }
  206. template <bool thread_safe>
  207. void SlotSpanMetadata<thread_safe>::DecommitIfPossible(
  208. PartitionRoot<thread_safe>* root) {
  209. root->lock_.AssertAcquired();
  210. PA_DCHECK(in_empty_cache_);
  211. PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans);
  212. PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index_]);
  213. in_empty_cache_ = 0;
  214. if (is_empty())
  215. Decommit(root);
  216. }
  217. template <bool thread_safe>
  218. void SlotSpanMetadata<thread_safe>::SortFreelist() {
  219. std::bitset<kMaxSlotsPerSlotSpan> free_slots;
  220. uintptr_t slot_span_start = ToSlotSpanStart(this);
  221. size_t num_provisioned_slots =
  222. bucket->get_slots_per_span() - num_unprovisioned_slots;
  223. PA_CHECK(num_provisioned_slots <= kMaxSlotsPerSlotSpan);
  224. size_t num_free_slots = 0;
  225. size_t slot_size = bucket->slot_size;
  226. for (PartitionFreelistEntry* head = freelist_head; head;
  227. head = head->GetNext(slot_size)) {
  228. ++num_free_slots;
  229. size_t offset_in_slot_span = SlotStartPtr2Addr(head) - slot_span_start;
  230. size_t slot_number = bucket->GetSlotNumber(offset_in_slot_span);
  231. PA_DCHECK(slot_number < num_provisioned_slots);
  232. free_slots[slot_number] = true;
  233. }
  234. PA_DCHECK(num_free_slots == GetFreelistLength());
  235. // Empty or single-element list is always sorted.
  236. if (num_free_slots > 1) {
  237. PartitionFreelistEntry* back = nullptr;
  238. PartitionFreelistEntry* head = nullptr;
  239. for (size_t slot_number = 0; slot_number < num_provisioned_slots;
  240. slot_number++) {
  241. if (free_slots[slot_number]) {
  242. uintptr_t slot_start = slot_span_start + (slot_size * slot_number);
  243. auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
  244. if (!head)
  245. head = entry;
  246. else
  247. back->SetNext(entry);
  248. back = entry;
  249. }
  250. }
  251. SetFreelistHead(head);
  252. }
  253. freelist_is_sorted_ = true;
  254. }
  255. namespace {
  256. void UnmapNow(uintptr_t reservation_start,
  257. size_t reservation_size,
  258. pool_handle pool) {
  259. PA_DCHECK(reservation_start && reservation_size > 0);
  260. #if BUILDFLAG(PA_DCHECK_IS_ON)
  261. // When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
  262. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  263. if (pool == GetBRPPool()) {
  264. // In 32-bit mode, the beginning of a reservation may be excluded from the
  265. // BRP pool, so shift the pointer. Other pools don't have this logic.
  266. PA_DCHECK(IsManagedByPartitionAllocBRPPool(
  267. #if defined(PA_HAS_64_BITS_POINTERS)
  268. reservation_start
  269. #else
  270. reservation_start +
  271. AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
  272. AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap
  273. #endif
  274. ));
  275. } else
  276. #endif // BUILDFLAG(USE_BACKUP_REF_PTR)
  277. {
  278. PA_DCHECK(pool == GetRegularPool() ||
  279. (IsConfigurablePoolAvailable() && pool == GetConfigurablePool()));
  280. // Non-BRP pools don't need adjustment that BRP needs in 32-bit mode.
  281. PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) ||
  282. IsManagedByPartitionAllocConfigurablePool(reservation_start));
  283. }
  284. #endif // BUILDFLAG(PA_DCHECK_IS_ON)
  285. PA_DCHECK((reservation_start & kSuperPageOffsetMask) == 0);
  286. uintptr_t reservation_end = reservation_start + reservation_size;
  287. auto* offset_ptr = ReservationOffsetPointer(reservation_start);
  288. // Reset the offset table entries for the given memory before unreserving
  289. // it. Since the memory is not unreserved and not available for other
  290. // threads, the table entries for the memory are not modified by other
  291. // threads either. So we can update the table entries without race
  292. // condition.
  293. uint16_t i = 0;
  294. for (uintptr_t address = reservation_start; address < reservation_end;
  295. address += kSuperPageSize) {
  296. PA_DCHECK(offset_ptr < GetReservationOffsetTableEnd(address));
  297. PA_DCHECK(*offset_ptr == i++);
  298. *offset_ptr++ = kOffsetTagNotAllocated;
  299. }
  300. #if !defined(PA_HAS_64_BITS_POINTERS)
  301. AddressPoolManager::GetInstance().MarkUnused(pool, reservation_start,
  302. reservation_size);
  303. #endif
  304. // After resetting the table entries, unreserve and decommit the memory.
  305. AddressPoolManager::GetInstance().UnreserveAndDecommit(
  306. pool, reservation_start, reservation_size);
  307. }
  308. } // namespace
  309. template struct SlotSpanMetadata<ThreadSafe>;
  310. } // namespace partition_alloc::internal