partition_address_space.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. // Copyright 2020 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
  5. #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
  6. #include <algorithm>
  7. #include <array>
  8. #include <limits>
  9. #include "base/allocator/partition_allocator/address_pool_manager_types.h"
  10. #include "base/allocator/partition_allocator/page_allocator_constants.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
  15. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  16. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  17. #include "base/allocator/partition_allocator/partition_alloc_constants.h"
  18. #include "base/allocator/partition_allocator/partition_alloc_forward.h"
  19. #include "base/allocator/partition_allocator/partition_alloc_notreached.h"
  20. #include "base/allocator/partition_allocator/tagging.h"
  21. #include "build/build_config.h"
  22. // The feature is not applicable to 32-bit address space.
  23. #if defined(PA_HAS_64_BITS_POINTERS)
  24. namespace partition_alloc {
  25. namespace internal {
  26. // Reserves address space for PartitionAllocator.
  27. class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
  28. public:
  29. // BRP stands for BackupRefPtr. GigaCage is split into pools, one which
  30. // supports BackupRefPtr and one that doesn't.
  31. static PA_ALWAYS_INLINE internal::pool_handle GetRegularPool() {
  32. return setup_.regular_pool_;
  33. }
  34. #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  35. static PA_ALWAYS_INLINE uintptr_t RegularPoolBaseMask() {
  36. return setup_.regular_pool_base_mask_;
  37. }
  38. #else
  39. static PA_ALWAYS_INLINE constexpr uintptr_t RegularPoolBaseMask() {
  40. return kRegularPoolBaseMask;
  41. }
  42. #endif
  43. static PA_ALWAYS_INLINE internal::pool_handle GetBRPPool() {
  44. return setup_.brp_pool_;
  45. }
  46. // The Configurable Pool can be created inside an existing mapping and so will
  47. // be located outside PartitionAlloc's GigaCage.
  48. static PA_ALWAYS_INLINE internal::pool_handle GetConfigurablePool() {
  49. return setup_.configurable_pool_;
  50. }
  51. static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
  52. uintptr_t address) {
  53. // When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
  54. #if !BUILDFLAG(USE_BACKUP_REF_PTR)
  55. PA_DCHECK(!IsInBRPPool(address));
  56. #endif
  57. pool_handle pool = 0;
  58. uintptr_t base = 0;
  59. if (IsInRegularPool(address)) {
  60. pool = GetRegularPool();
  61. base = setup_.regular_pool_base_address_;
  62. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  63. } else if (IsInBRPPool(address)) {
  64. pool = GetBRPPool();
  65. base = setup_.brp_pool_base_address_;
  66. #endif // BUILDFLAG(USE_BACKUP_REF_PTR)
  67. } else if (IsInConfigurablePool(address)) {
  68. pool = GetConfigurablePool();
  69. base = setup_.configurable_pool_base_address_;
  70. } else {
  71. PA_NOTREACHED();
  72. }
  73. return std::make_pair(pool, address - base);
  74. }
  75. static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMaxSize() {
  76. return kConfigurablePoolMaxSize;
  77. }
  78. static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMinSize() {
  79. return kConfigurablePoolMinSize;
  80. }
  81. // Initialize the GigaCage and the Pools inside of it.
  82. // This function must only be called from the main thread.
  83. static void Init();
  84. // Initialize the ConfigurablePool at the given address |pool_base|. It must
  85. // be aligned to the size of the pool. The size must be a power of two and
  86. // must be within [ConfigurablePoolMinSize(), ConfigurablePoolMaxSize()]. This
  87. // function must only be called from the main thread.
  88. static void InitConfigurablePool(uintptr_t pool_base, size_t size);
  89. static void UninitForTesting();
  90. static void UninitConfigurablePoolForTesting();
  91. static PA_ALWAYS_INLINE bool IsInitialized() {
  92. // Either neither or both regular and BRP pool are initialized. The
  93. // configurable pool is initialized separately.
  94. if (setup_.regular_pool_) {
  95. PA_DCHECK(setup_.brp_pool_ != 0);
  96. return true;
  97. }
  98. PA_DCHECK(setup_.brp_pool_ == 0);
  99. return false;
  100. }
  101. static PA_ALWAYS_INLINE bool IsConfigurablePoolInitialized() {
  102. return setup_.configurable_pool_base_address_ !=
  103. kUninitializedPoolBaseAddress;
  104. }
  105. // Returns false for nullptr.
  106. static PA_ALWAYS_INLINE bool IsInRegularPool(uintptr_t address) {
  107. #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  108. const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
  109. #else
  110. constexpr uintptr_t regular_pool_base_mask = kRegularPoolBaseMask;
  111. #endif
  112. return (address & regular_pool_base_mask) ==
  113. setup_.regular_pool_base_address_;
  114. }
  115. static PA_ALWAYS_INLINE uintptr_t RegularPoolBase() {
  116. return setup_.regular_pool_base_address_;
  117. }
  118. // Returns false for nullptr.
  119. static PA_ALWAYS_INLINE bool IsInBRPPool(uintptr_t address) {
  120. #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  121. const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
  122. #else
  123. constexpr uintptr_t brp_pool_base_mask = kBRPPoolBaseMask;
  124. #endif
  125. return (address & brp_pool_base_mask) == setup_.brp_pool_base_address_;
  126. }
  127. // Returns false for nullptr.
  128. static PA_ALWAYS_INLINE bool IsInConfigurablePool(uintptr_t address) {
  129. return (address & setup_.configurable_pool_base_mask_) ==
  130. setup_.configurable_pool_base_address_;
  131. }
  132. static PA_ALWAYS_INLINE uintptr_t ConfigurablePoolBase() {
  133. return setup_.configurable_pool_base_address_;
  134. }
  135. static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
  136. PA_DCHECK(IsInBRPPool(address));
  137. return address - setup_.brp_pool_base_address_;
  138. }
  139. // PartitionAddressSpace is static_only class.
  140. PartitionAddressSpace() = delete;
  141. PartitionAddressSpace(const PartitionAddressSpace&) = delete;
  142. void* operator new(size_t) = delete;
  143. void* operator new(size_t, void*) = delete;
  144. private:
  145. #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  146. static PA_ALWAYS_INLINE size_t RegularPoolSize();
  147. static PA_ALWAYS_INLINE size_t BRPPoolSize();
  148. #else
  149. // The pool sizes should be as large as maximum whenever possible.
  150. constexpr static PA_ALWAYS_INLINE size_t RegularPoolSize() {
  151. return kRegularPoolSize;
  152. }
  153. constexpr static PA_ALWAYS_INLINE size_t BRPPoolSize() {
  154. return kBRPPoolSize;
  155. }
  156. #endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  157. // On 64-bit systems, GigaCage is split into disjoint pools. The BRP pool, is
  158. // where all allocations have a BRP ref-count, thus pointers pointing there
  159. // can use a BRP protection against UaF. Allocations in the other pools don't
  160. // have that.
  161. //
  162. // Pool sizes have to be the power of two. Each pool will be aligned at its
  163. // own size boundary.
  164. //
  165. // NOTE! The BRP pool must be preceded by a reserved region, where allocations
  166. // are forbidden. This is to prevent a pointer immediately past a non-GigaCage
  167. // allocation from falling into the BRP pool, thus triggering BRP mechanism
  168. // and likely crashing. This "forbidden zone" can be as small as 1B, but it's
  169. // simpler to just reserve an allocation granularity unit.
  170. //
  171. // The ConfigurablePool is an optional Pool that can be created inside an
  172. // existing mapping by the embedder, and so will be outside of the GigaCage.
  173. // This Pool can be used when certain PA allocations must be located inside a
  174. // given virtual address region. One use case for this Pool is V8's virtual
  175. // memory cage, which requires that ArrayBuffers be located inside of it.
  176. static constexpr size_t kRegularPoolSize = kPoolMaxSize;
  177. static constexpr size_t kBRPPoolSize = kPoolMaxSize;
  178. static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize) &&
  179. base::bits::IsPowerOfTwo(kBRPPoolSize));
  180. #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  181. // We can't afford pool sizes as large as kPoolMaxSize on Windows <8.1 (see
  182. // crbug.com/1101421 and crbug.com/1217759).
  183. static constexpr size_t kRegularPoolSizeForLegacyWindows = 4 * kGiB;
  184. static constexpr size_t kBRPPoolSizeForLegacyWindows = 4 * kGiB;
  185. static_assert(kRegularPoolSizeForLegacyWindows < kRegularPoolSize);
  186. static_assert(kBRPPoolSizeForLegacyWindows < kBRPPoolSize);
  187. static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForLegacyWindows) &&
  188. base::bits::IsPowerOfTwo(kBRPPoolSizeForLegacyWindows));
  189. #endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  190. static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
  191. static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
  192. static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);
  193. static_assert(base::bits::IsPowerOfTwo(kConfigurablePoolMaxSize) &&
  194. base::bits::IsPowerOfTwo(kConfigurablePoolMinSize));
  195. #if BUILDFLAG(IS_IOS)
  196. #if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  197. #error iOS is only supported with a dynamically sized GigaCase.
  198. #endif
  199. // We can't afford pool sizes as large as kPoolMaxSize in iOS EarlGrey tests,
  200. // since the test process cannot use an extended virtual address space (see
  201. // crbug.com/1250788).
  202. static constexpr size_t kRegularPoolSizeForIOSTestProcess = kGiB / 4;
  203. static constexpr size_t kBRPPoolSizeForIOSTestProcess = kGiB / 4;
  204. static_assert(kRegularPoolSizeForIOSTestProcess < kRegularPoolSize);
  205. static_assert(kBRPPoolSizeForIOSTestProcess < kBRPPoolSize);
  206. static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForIOSTestProcess) &&
  207. base::bits::IsPowerOfTwo(kBRPPoolSizeForIOSTestProcess));
  208. #endif // BUILDFLAG(IOS_IOS)
  209. #if !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  210. // Masks used to easy determine belonging to a pool.
  211. static constexpr uintptr_t kRegularPoolOffsetMask =
  212. static_cast<uintptr_t>(kRegularPoolSize) - 1;
  213. static constexpr uintptr_t kRegularPoolBaseMask = ~kRegularPoolOffsetMask;
  214. static constexpr uintptr_t kBRPPoolOffsetMask =
  215. static_cast<uintptr_t>(kBRPPoolSize) - 1;
  216. static constexpr uintptr_t kBRPPoolBaseMask = ~kBRPPoolOffsetMask;
  217. #endif // !defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  218. // This must be set to such a value that IsIn*Pool() always returns false when
  219. // the pool isn't initialized.
  220. static constexpr uintptr_t kUninitializedPoolBaseAddress =
  221. static_cast<uintptr_t>(-1);
  222. struct GigaCageSetup {
  223. // Before PartitionAddressSpace::Init(), no allocation are allocated from a
  224. // reserved address space. Therefore, set *_pool_base_address_ initially to
  225. // -1, so that PartitionAddressSpace::IsIn*Pool() always returns false.
  226. constexpr GigaCageSetup()
  227. : regular_pool_base_address_(kUninitializedPoolBaseAddress),
  228. brp_pool_base_address_(kUninitializedPoolBaseAddress),
  229. configurable_pool_base_address_(kUninitializedPoolBaseAddress),
  230. #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  231. regular_pool_base_mask_(0),
  232. brp_pool_base_mask_(0),
  233. #endif
  234. configurable_pool_base_mask_(0),
  235. regular_pool_(0),
  236. brp_pool_(0),
  237. configurable_pool_(0) {
  238. }
  239. // Using a union to enforce padding.
  240. union {
  241. struct {
  242. uintptr_t regular_pool_base_address_;
  243. uintptr_t brp_pool_base_address_;
  244. uintptr_t configurable_pool_base_address_;
  245. #if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
  246. uintptr_t regular_pool_base_mask_;
  247. uintptr_t brp_pool_base_mask_;
  248. #endif
  249. uintptr_t configurable_pool_base_mask_;
  250. pool_handle regular_pool_;
  251. pool_handle brp_pool_;
  252. pool_handle configurable_pool_;
  253. };
  254. char one_cacheline_[kPartitionCachelineSize];
  255. };
  256. };
  257. static_assert(sizeof(GigaCageSetup) % kPartitionCachelineSize == 0,
  258. "GigaCageSetup has to fill a cacheline(s)");
  259. // See the comment describing the address layout above.
  260. //
  261. // These are write-once fields, frequently accessed thereafter. Make sure they
  262. // don't share a cacheline with other, potentially writeable data, through
  263. // alignment and padding.
  264. alignas(kPartitionCachelineSize) static GigaCageSetup setup_;
  265. };
  266. PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
  267. uintptr_t address) {
  268. return PartitionAddressSpace::GetPoolAndOffset(address);
  269. }
  270. PA_ALWAYS_INLINE pool_handle GetPool(uintptr_t address) {
  271. return std::get<0>(GetPoolAndOffset(address));
  272. }
  273. PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
  274. return PartitionAddressSpace::OffsetInBRPPool(address);
  275. }
  276. } // namespace internal
  277. // Returns false for nullptr.
  278. PA_ALWAYS_INLINE bool IsManagedByPartitionAlloc(uintptr_t address) {
  279. // When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
  280. #if !BUILDFLAG(USE_BACKUP_REF_PTR)
  281. PA_DCHECK(!internal::PartitionAddressSpace::IsInBRPPool(address));
  282. #endif
  283. return internal::PartitionAddressSpace::IsInRegularPool(address)
  284. #if BUILDFLAG(USE_BACKUP_REF_PTR)
  285. || internal::PartitionAddressSpace::IsInBRPPool(address)
  286. #endif
  287. || internal::PartitionAddressSpace::IsInConfigurablePool(address);
  288. }
  289. // Returns false for nullptr.
  290. PA_ALWAYS_INLINE bool IsManagedByPartitionAllocRegularPool(uintptr_t address) {
  291. return internal::PartitionAddressSpace::IsInRegularPool(address);
  292. }
  293. // Returns false for nullptr.
  294. PA_ALWAYS_INLINE bool IsManagedByPartitionAllocBRPPool(uintptr_t address) {
  295. return internal::PartitionAddressSpace::IsInBRPPool(address);
  296. }
  297. // Returns false for nullptr.
  298. PA_ALWAYS_INLINE bool IsManagedByPartitionAllocConfigurablePool(
  299. uintptr_t address) {
  300. return internal::PartitionAddressSpace::IsInConfigurablePool(address);
  301. }
  302. PA_ALWAYS_INLINE bool IsConfigurablePoolAvailable() {
  303. return internal::PartitionAddressSpace::IsConfigurablePoolInitialized();
  304. }
  305. } // namespace partition_alloc
  306. #endif // defined(PA_HAS_64_BITS_POINTERS)
  307. #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_