page_allocator.cc 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. // Copyright (c) 2013 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/allocator/partition_allocator/page_allocator.h"
  5. #include <atomic>
  6. #include <cstdint>
  7. #include "base/allocator/partition_allocator/address_space_randomization.h"
  8. #include "base/allocator/partition_allocator/page_allocator_internal.h"
  9. #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
  10. #include "base/allocator/partition_allocator/partition_alloc_base/thread_annotations.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  12. #include "base/allocator/partition_allocator/partition_lock.h"
  13. #include "build/build_config.h"
  14. #if BUILDFLAG(IS_WIN)
  15. #include <windows.h>
  16. #endif
  17. #if BUILDFLAG(IS_WIN)
  18. #include "base/allocator/partition_allocator/page_allocator_internals_win.h"
  19. #elif BUILDFLAG(IS_POSIX)
  20. #include "base/allocator/partition_allocator/page_allocator_internals_posix.h"
  21. #elif BUILDFLAG(IS_FUCHSIA)
  22. #include "base/allocator/partition_allocator/page_allocator_internals_fuchsia.h"
  23. #else
  24. #error Platform not supported.
  25. #endif
  26. namespace partition_alloc {
  27. namespace {
  28. internal::Lock g_reserve_lock;
  29. // We may reserve/release address space on different threads.
  30. internal::Lock& GetReserveLock() {
  31. return g_reserve_lock;
  32. }
  33. std::atomic<size_t> g_total_mapped_address_space;
  34. // We only support a single block of reserved address space.
  35. uintptr_t s_reservation_address PA_GUARDED_BY(GetReserveLock()) = 0;
  36. size_t s_reservation_size PA_GUARDED_BY(GetReserveLock()) = 0;
  37. uintptr_t AllocPagesIncludingReserved(
  38. uintptr_t address,
  39. size_t length,
  40. PageAccessibilityConfiguration accessibility,
  41. PageTag page_tag) {
  42. uintptr_t ret =
  43. internal::SystemAllocPages(address, length, accessibility, page_tag);
  44. if (!ret) {
  45. const bool cant_alloc_length = internal::kHintIsAdvisory || !address;
  46. if (cant_alloc_length) {
  47. // The system cannot allocate |length| bytes. Release any reserved address
  48. // space and try once more.
  49. ReleaseReservation();
  50. ret =
  51. internal::SystemAllocPages(address, length, accessibility, page_tag);
  52. }
  53. }
  54. return ret;
  55. }
  56. // Trims memory at |base_address| to given |trim_length| and |alignment|.
  57. //
  58. // On failure, on Windows, this function returns 0 and frees memory at
  59. // |base_address|.
  60. uintptr_t TrimMapping(uintptr_t base_address,
  61. size_t base_length,
  62. size_t trim_length,
  63. uintptr_t alignment,
  64. uintptr_t alignment_offset,
  65. PageAccessibilityConfiguration accessibility) {
  66. PA_DCHECK(base_length >= trim_length);
  67. PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
  68. PA_DCHECK(alignment_offset < alignment);
  69. uintptr_t new_base =
  70. NextAlignedWithOffset(base_address, alignment, alignment_offset);
  71. PA_DCHECK(new_base >= base_address);
  72. size_t pre_slack = new_base - base_address;
  73. size_t post_slack = base_length - pre_slack - trim_length;
  74. PA_DCHECK(base_length == trim_length || pre_slack || post_slack);
  75. PA_DCHECK(pre_slack < base_length);
  76. PA_DCHECK(post_slack < base_length);
  77. return internal::TrimMappingInternal(base_address, base_length, trim_length,
  78. accessibility, pre_slack, post_slack);
  79. }
  80. } // namespace
  81. // Align |address| up to the closest, non-smaller address, that gives
  82. // |requested_offset| remainder modulo |alignment|.
  83. //
  84. // Examples for alignment=1024 and requested_offset=64:
  85. // 64 -> 64
  86. // 65 -> 1088
  87. // 1024 -> 1088
  88. // 1088 -> 1088
  89. // 1089 -> 2112
  90. // 2048 -> 2112
  91. uintptr_t NextAlignedWithOffset(uintptr_t address,
  92. uintptr_t alignment,
  93. uintptr_t requested_offset) {
  94. PA_DCHECK(internal::base::bits::IsPowerOfTwo(alignment));
  95. PA_DCHECK(requested_offset < alignment);
  96. uintptr_t actual_offset = address & (alignment - 1);
  97. uintptr_t new_address;
  98. if (actual_offset <= requested_offset)
  99. new_address = address + requested_offset - actual_offset;
  100. else
  101. new_address = address + alignment + requested_offset - actual_offset;
  102. PA_DCHECK(new_address >= address);
  103. PA_DCHECK(new_address - address < alignment);
  104. PA_DCHECK(new_address % alignment == requested_offset);
  105. return new_address;
  106. }
  107. namespace internal {
  108. uintptr_t SystemAllocPages(uintptr_t hint,
  109. size_t length,
  110. PageAccessibilityConfiguration accessibility,
  111. PageTag page_tag) {
  112. PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
  113. PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
  114. uintptr_t ret =
  115. internal::SystemAllocPagesInternal(hint, length, accessibility, page_tag);
  116. if (ret)
  117. g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
  118. return ret;
  119. }
  120. } // namespace internal
  121. uintptr_t AllocPages(size_t length,
  122. size_t align,
  123. PageAccessibilityConfiguration accessibility,
  124. PageTag page_tag) {
  125. return AllocPagesWithAlignOffset(0, length, align, 0, accessibility,
  126. page_tag);
  127. }
  128. uintptr_t AllocPages(uintptr_t address,
  129. size_t length,
  130. size_t align,
  131. PageAccessibilityConfiguration accessibility,
  132. PageTag page_tag) {
  133. return AllocPagesWithAlignOffset(address, length, align, 0, accessibility,
  134. page_tag);
  135. }
  136. void* AllocPages(void* address,
  137. size_t length,
  138. size_t align,
  139. PageAccessibilityConfiguration accessibility,
  140. PageTag page_tag) {
  141. return reinterpret_cast<void*>(
  142. AllocPages(reinterpret_cast<uintptr_t>(address), length, align,
  143. accessibility, page_tag));
  144. }
  145. uintptr_t AllocPagesWithAlignOffset(
  146. uintptr_t address,
  147. size_t length,
  148. size_t align,
  149. size_t align_offset,
  150. PageAccessibilityConfiguration accessibility,
  151. PageTag page_tag) {
  152. PA_DCHECK(length >= internal::PageAllocationGranularity());
  153. PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
  154. PA_DCHECK(align >= internal::PageAllocationGranularity());
  155. // Alignment must be power of 2 for masking math to work.
  156. PA_DCHECK(internal::base::bits::IsPowerOfTwo(align));
  157. PA_DCHECK(align_offset < align);
  158. PA_DCHECK(!(align_offset & internal::PageAllocationGranularityOffsetMask()));
  159. PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
  160. uintptr_t align_offset_mask = align - 1;
  161. uintptr_t align_base_mask = ~align_offset_mask;
  162. PA_DCHECK(!address || (address & align_offset_mask) == align_offset);
  163. // If the client passed null as the address, choose a good one.
  164. if (!address) {
  165. address = (GetRandomPageBase() & align_base_mask) + align_offset;
  166. }
  167. // First try to force an exact-size, aligned allocation from our random base.
  168. #if defined(ARCH_CPU_32_BITS)
  169. // On 32 bit systems, first try one random aligned address, and then try an
  170. // aligned address derived from the value of |ret|.
  171. constexpr int kExactSizeTries = 2;
  172. #else
  173. // On 64 bit systems, try 3 random aligned addresses.
  174. constexpr int kExactSizeTries = 3;
  175. #endif
  176. for (int i = 0; i < kExactSizeTries; ++i) {
  177. uintptr_t ret =
  178. AllocPagesIncludingReserved(address, length, accessibility, page_tag);
  179. if (ret) {
  180. // If the alignment is to our liking, we're done.
  181. if ((ret & align_offset_mask) == align_offset)
  182. return ret;
  183. // Free the memory and try again.
  184. FreePages(ret, length);
  185. } else {
  186. // |ret| is null; if this try was unhinted, we're OOM.
  187. if (internal::kHintIsAdvisory || !address)
  188. return 0;
  189. }
  190. #if defined(ARCH_CPU_32_BITS)
  191. // For small address spaces, try the first aligned address >= |ret|. Note
  192. // |ret| may be null, in which case |address| becomes null. If
  193. // |align_offset| is non-zero, this calculation may get us not the first,
  194. // but the next matching address.
  195. address = ((ret + align_offset_mask) & align_base_mask) + align_offset;
  196. #else // defined(ARCH_CPU_64_BITS)
  197. // Keep trying random addresses on systems that have a large address space.
  198. address = NextAlignedWithOffset(GetRandomPageBase(), align, align_offset);
  199. #endif
  200. }
  201. // Make a larger allocation so we can force alignment.
  202. size_t try_length = length + (align - internal::PageAllocationGranularity());
  203. PA_CHECK(try_length >= length);
  204. uintptr_t ret;
  205. do {
  206. // Continue randomizing only on POSIX.
  207. address = internal::kHintIsAdvisory ? GetRandomPageBase() : 0;
  208. ret = AllocPagesIncludingReserved(address, try_length, accessibility,
  209. page_tag);
  210. // The retries are for Windows, where a race can steal our mapping on
  211. // resize.
  212. } while (ret && (ret = TrimMapping(ret, try_length, length, align,
  213. align_offset, accessibility)) == 0);
  214. return ret;
  215. }
  216. void FreePages(uintptr_t address, size_t length) {
  217. PA_DCHECK(!(address & internal::PageAllocationGranularityOffsetMask()));
  218. PA_DCHECK(!(length & internal::PageAllocationGranularityOffsetMask()));
  219. internal::FreePagesInternal(address, length);
  220. PA_DCHECK(g_total_mapped_address_space.load(std::memory_order_relaxed) > 0);
  221. g_total_mapped_address_space.fetch_sub(length, std::memory_order_relaxed);
  222. }
  223. void FreePages(void* address, size_t length) {
  224. FreePages(reinterpret_cast<uintptr_t>(address), length);
  225. }
  226. bool TrySetSystemPagesAccess(uintptr_t address,
  227. size_t length,
  228. PageAccessibilityConfiguration accessibility) {
  229. PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
  230. return internal::TrySetSystemPagesAccessInternal(address, length,
  231. accessibility);
  232. }
  233. bool TrySetSystemPagesAccess(void* address,
  234. size_t length,
  235. PageAccessibilityConfiguration accessibility) {
  236. return TrySetSystemPagesAccess(reinterpret_cast<uintptr_t>(address), length,
  237. accessibility);
  238. }
  239. void SetSystemPagesAccess(uintptr_t address,
  240. size_t length,
  241. PageAccessibilityConfiguration accessibility) {
  242. PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
  243. internal::SetSystemPagesAccessInternal(address, length, accessibility);
  244. }
  245. void DecommitSystemPages(
  246. uintptr_t address,
  247. size_t length,
  248. PageAccessibilityDisposition accessibility_disposition) {
  249. PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
  250. PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
  251. internal::DecommitSystemPagesInternal(address, length,
  252. accessibility_disposition);
  253. }
  254. void DecommitSystemPages(
  255. void* address,
  256. size_t length,
  257. PageAccessibilityDisposition accessibility_disposition) {
  258. DecommitSystemPages(reinterpret_cast<uintptr_t>(address), length,
  259. accessibility_disposition);
  260. }
  261. void DecommitAndZeroSystemPages(uintptr_t address, size_t length) {
  262. PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
  263. PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
  264. internal::DecommitAndZeroSystemPagesInternal(address, length);
  265. }
  266. void DecommitAndZeroSystemPages(void* address, size_t length) {
  267. DecommitAndZeroSystemPages(reinterpret_cast<uintptr_t>(address), length);
  268. }
  269. void RecommitSystemPages(
  270. uintptr_t address,
  271. size_t length,
  272. PageAccessibilityConfiguration accessibility,
  273. PageAccessibilityDisposition accessibility_disposition) {
  274. PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
  275. PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
  276. PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
  277. internal::RecommitSystemPagesInternal(address, length, accessibility,
  278. accessibility_disposition);
  279. }
  280. bool TryRecommitSystemPages(
  281. uintptr_t address,
  282. size_t length,
  283. PageAccessibilityConfiguration accessibility,
  284. PageAccessibilityDisposition accessibility_disposition) {
  285. // Duplicated because we want errors to be reported at a lower level in the
  286. // crashing case.
  287. PA_DCHECK(!(address & internal::SystemPageOffsetMask()));
  288. PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
  289. PA_DCHECK(accessibility != PageAccessibilityConfiguration::kInaccessible);
  290. return internal::TryRecommitSystemPagesInternal(
  291. address, length, accessibility, accessibility_disposition);
  292. }
  293. void DiscardSystemPages(uintptr_t address, size_t length) {
  294. PA_DCHECK(!(length & internal::SystemPageOffsetMask()));
  295. internal::DiscardSystemPagesInternal(address, length);
  296. }
  297. void DiscardSystemPages(void* address, size_t length) {
  298. DiscardSystemPages(reinterpret_cast<uintptr_t>(address), length);
  299. }
  300. bool ReserveAddressSpace(size_t size) {
  301. // To avoid deadlock, call only SystemAllocPages.
  302. internal::ScopedGuard guard(GetReserveLock());
  303. if (!s_reservation_address) {
  304. uintptr_t mem = internal::SystemAllocPages(
  305. 0, size, PageAccessibilityConfiguration::kInaccessible,
  306. PageTag::kChromium);
  307. if (mem) {
  308. // We guarantee this alignment when reserving address space.
  309. PA_DCHECK(!(mem & internal::PageAllocationGranularityOffsetMask()));
  310. s_reservation_address = mem;
  311. s_reservation_size = size;
  312. return true;
  313. }
  314. }
  315. return false;
  316. }
  317. bool ReleaseReservation() {
  318. // To avoid deadlock, call only FreePages.
  319. internal::ScopedGuard guard(GetReserveLock());
  320. if (!s_reservation_address)
  321. return false;
  322. FreePages(s_reservation_address, s_reservation_size);
  323. s_reservation_address = 0;
  324. s_reservation_size = 0;
  325. return true;
  326. }
  327. bool HasReservationForTesting() {
  328. internal::ScopedGuard guard(GetReserveLock());
  329. return s_reservation_address;
  330. }
  331. uint32_t GetAllocPageErrorCode() {
  332. return internal::s_allocPageErrorCode;
  333. }
  334. size_t GetTotalMappedSize() {
  335. return g_total_mapped_address_space;
  336. }
  337. } // namespace partition_alloc