partition_freelist_entry.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. // Copyright (c) 2018 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
  5. #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
  6. #include <cstddef>
  7. #include <cstdint>
  8. #include "base/allocator/partition_allocator/partition_alloc-inl.h"
  9. #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
  10. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  11. #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_base/sys_byteorder.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
  15. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  16. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  17. #include "base/allocator/partition_allocator/partition_alloc_constants.h"
  18. #include "base/allocator/partition_allocator/partition_ref_count.h"
  19. #include "build/build_config.h"
  20. namespace partition_alloc::internal {
  21. namespace {
  22. [[noreturn]] PA_NOINLINE void FreelistCorruptionDetected(size_t extra) {
  23. // Make it visible in minidumps.
  24. PA_DEBUG_DATA_ON_STACK("extra", extra);
  25. PA_IMMEDIATE_CRASH();
  26. }
  27. } // namespace
  28. class PartitionFreelistEntry;
  29. class EncodedPartitionFreelistEntryPtr {
  30. private:
  31. explicit PA_ALWAYS_INLINE constexpr EncodedPartitionFreelistEntryPtr(
  32. std::nullptr_t)
  33. : encoded_(Transform(0)) {}
  34. explicit PA_ALWAYS_INLINE EncodedPartitionFreelistEntryPtr(void* ptr)
  35. // The encoded pointer stays MTE-tagged.
  36. : encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}
  37. PA_ALWAYS_INLINE PartitionFreelistEntry* Decode() const {
  38. return reinterpret_cast<PartitionFreelistEntry*>(Transform(encoded_));
  39. }
  40. PA_ALWAYS_INLINE constexpr uintptr_t Inverted() const { return ~encoded_; }
  41. PA_ALWAYS_INLINE constexpr void Override(uintptr_t encoded) {
  42. encoded_ = encoded;
  43. }
  44. explicit PA_ALWAYS_INLINE constexpr operator bool() const { return encoded_; }
  45. // Transform() works the same in both directions, so can be used for
  46. // encoding and decoding.
  47. PA_ALWAYS_INLINE static constexpr uintptr_t Transform(uintptr_t address) {
  48. // We use bswap on little endian as a fast transformation for two reasons:
  49. // 1) On 64 bit architectures, the pointer is very unlikely to be a
  50. // canonical address. Therefore, if an object is freed and its vtable is
  51. // used where the attacker doesn't get the chance to run allocations
  52. // between the free and use, the vtable dereference is likely to fault.
  53. // 2) If the attacker has a linear buffer overflow and elects to try and
  54. // corrupt a freelist pointer, partial pointer overwrite attacks are
  55. // thwarted.
  56. // For big endian, similar guarantees are arrived at with a negation.
  57. #if defined(ARCH_CPU_BIG_ENDIAN)
  58. uintptr_t transformed = ~address;
  59. #else
  60. uintptr_t transformed = base::ByteSwapUintPtrT(address);
  61. #endif
  62. return transformed;
  63. }
  64. uintptr_t encoded_;
  65. friend PartitionFreelistEntry;
  66. };
  67. // Freelist entries are encoded for security reasons. See
  68. // //base/allocator/partition_allocator/PartitionAlloc.md and |Transform()| for
  69. // the rationale and mechanism, respectively.
  70. class PartitionFreelistEntry {
  71. private:
  72. explicit constexpr PartitionFreelistEntry(std::nullptr_t)
  73. : encoded_next_(EncodedPartitionFreelistEntryPtr(nullptr))
  74. #if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
  75. ,
  76. shadow_(encoded_next_.Inverted())
  77. #endif
  78. {
  79. }
  80. explicit PartitionFreelistEntry(PartitionFreelistEntry* next)
  81. : encoded_next_(EncodedPartitionFreelistEntryPtr(next))
  82. #if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
  83. ,
  84. shadow_(encoded_next_.Inverted())
  85. #endif
  86. {
  87. }
  88. // For testing only.
  89. PartitionFreelistEntry(void* next, bool make_shadow_match)
  90. : encoded_next_(EncodedPartitionFreelistEntryPtr(next))
  91. #if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
  92. ,
  93. shadow_(make_shadow_match ? encoded_next_.Inverted() : 12345)
  94. #endif
  95. {
  96. }
  97. public:
  98. ~PartitionFreelistEntry() = delete;
  99. // Emplaces the freelist entry at the beginning of the given slot span, and
  100. // initializes it as null-terminated.
  101. static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
  102. void* slot_start_tagged) {
  103. // |slot_start_tagged| is MTE-tagged.
  104. auto* entry = new (slot_start_tagged) PartitionFreelistEntry(nullptr);
  105. return entry;
  106. }
  107. static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
  108. uintptr_t slot_start) {
  109. return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
  110. }
  111. // Emplaces the freelist entry at the beginning of the given slot span, and
  112. // initializes it with the given |next| pointer, but encoded.
  113. //
  114. // This freelist is built for the purpose of thread-cache. This means that we
  115. // can't perform a check that this and the next pointer belong to the same
  116. // super page, as thread-cache spans may chain slots across super pages.
  117. static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitForThreadCache(
  118. uintptr_t slot_start,
  119. PartitionFreelistEntry* next) {
  120. auto* entry =
  121. new (SlotStartAddr2Ptr(slot_start)) PartitionFreelistEntry(next);
  122. return entry;
  123. }
  124. // Emplaces the freelist entry at the beginning of the given slot span, and
  125. // initializes it with the given |next| pointer.
  126. //
  127. // This is for testing purposes only! |make_shadow_match| allows you to choose
  128. // if the shadow matches the next pointer properly or is trash.
  129. static PA_ALWAYS_INLINE void EmplaceAndInitForTest(uintptr_t slot_start,
  130. void* next,
  131. bool make_shadow_match) {
  132. new (SlotStartAddr2Ptr(slot_start))
  133. PartitionFreelistEntry(next, make_shadow_match);
  134. }
  135. void CorruptNextForTesting(uintptr_t v) {
  136. // We just need a value that can never be a valid pointer here.
  137. encoded_next_.Override(EncodedPartitionFreelistEntryPtr::Transform(v));
  138. }
  139. // Puts |extra| on the stack before crashing in case of memory
  140. // corruption. Meant to be used to report the failed allocation size.
  141. template <bool crash_on_corruption>
  142. PA_ALWAYS_INLINE PartitionFreelistEntry* GetNextForThreadCache(
  143. size_t extra) const;
  144. PA_ALWAYS_INLINE PartitionFreelistEntry* GetNext(size_t extra) const;
  145. PA_NOINLINE void CheckFreeList(size_t extra) const {
  146. for (auto* entry = this; entry; entry = entry->GetNext(extra)) {
  147. // |GetNext()| checks freelist integrity.
  148. }
  149. }
  150. PA_NOINLINE void CheckFreeListForThreadCache(size_t extra) const {
  151. for (auto* entry = this; entry;
  152. entry = entry->GetNextForThreadCache<true>(extra)) {
  153. // |GetNextForThreadCache()| checks freelist integrity.
  154. }
  155. }
  156. PA_ALWAYS_INLINE void SetNext(PartitionFreelistEntry* entry) {
  157. // SetNext() is either called on the freelist head, when provisioning new
  158. // slots, or when GetNext() has been called before, no need to pass the
  159. // size.
  160. #if BUILDFLAG(PA_DCHECK_IS_ON)
  161. // Regular freelists always point to an entry within the same super page.
  162. //
  163. // This is most likely a PartitionAlloc bug if this triggers.
  164. if (PA_UNLIKELY(entry &&
  165. (SlotStartPtr2Addr(this) & kSuperPageBaseMask) !=
  166. (SlotStartPtr2Addr(entry) & kSuperPageBaseMask))) {
  167. FreelistCorruptionDetected(0);
  168. }
  169. #endif // BUILDFLAG(PA_DCHECK_IS_ON)
  170. encoded_next_ = EncodedPartitionFreelistEntryPtr(entry);
  171. #if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
  172. shadow_ = encoded_next_.Inverted();
  173. #endif
  174. }
  175. // Zeroes out |this| before returning the slot. The pointer to this memory
  176. // will be returned to the user (caller of Alloc()), thus can't have internal
  177. // data.
  178. PA_ALWAYS_INLINE uintptr_t ClearForAllocation() {
  179. encoded_next_.Override(0);
  180. #if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
  181. shadow_ = 0;
  182. #endif
  183. return SlotStartPtr2Addr(this);
  184. }
  185. PA_ALWAYS_INLINE constexpr bool IsEncodedNextPtrZero() const {
  186. return !encoded_next_;
  187. }
  188. private:
  189. template <bool crash_on_corruption>
  190. PA_ALWAYS_INLINE PartitionFreelistEntry* GetNextInternal(
  191. size_t extra,
  192. bool for_thread_cache) const;
  193. static PA_ALWAYS_INLINE bool IsSane(const PartitionFreelistEntry* here,
  194. const PartitionFreelistEntry* next,
  195. bool for_thread_cache) {
  196. // Don't allow the freelist to be blindly followed to any location.
  197. // Checks two constraints:
  198. // - here and next must belong to the same superpage, unless this is in the
  199. // thread cache (they even always belong to the same slot span).
  200. // - next cannot point inside the metadata area.
  201. //
  202. // Also, the lightweight UaF detection (pointer shadow) is checked.
  203. uintptr_t here_address = SlotStartPtr2Addr(here);
  204. uintptr_t next_address = SlotStartPtr2Addr(next);
  205. #if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
  206. bool shadow_ptr_ok = here->encoded_next_.Inverted() == here->shadow_;
  207. #else
  208. bool shadow_ptr_ok = true;
  209. #endif
  210. bool same_superpage = (here_address & kSuperPageBaseMask) ==
  211. (next_address & kSuperPageBaseMask);
  212. // This is necessary but not sufficient when quarantine is enabled, see
  213. // SuperPagePayloadBegin() in partition_page.h. However we don't want to
  214. // fetch anything from the root in this function.
  215. bool not_in_metadata =
  216. (next_address & kSuperPageOffsetMask) >= PartitionPageSize();
  217. if (for_thread_cache)
  218. return shadow_ptr_ok & not_in_metadata;
  219. else
  220. return shadow_ptr_ok & same_superpage & not_in_metadata;
  221. }
  222. EncodedPartitionFreelistEntryPtr encoded_next_;
  223. // This is intended to detect unintentional corruptions of the freelist.
  224. // These can happen due to a Use-after-Free, or overflow of the previous
  225. // allocation in the slot span.
  226. #if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
  227. uintptr_t shadow_;
  228. #endif
  229. };
  230. static_assert(kSmallestBucket >= sizeof(PartitionFreelistEntry),
  231. "Need enough space for freelist entries in the smallest slot");
  232. #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  233. // The smallest bucket actually used. Note that the smallest request is 1 (if
  234. // it's 0, it gets patched to 1), and ref-count gets added to it.
  235. namespace {
  236. constexpr size_t kSmallestUsedBucket =
  237. base::bits::AlignUp(1 + sizeof(PartitionRefCount), kSmallestBucket);
  238. }
  239. static_assert(kSmallestUsedBucket >=
  240. sizeof(PartitionFreelistEntry) + sizeof(PartitionRefCount),
  241. "Need enough space for freelist entries and the ref-count in the "
  242. "smallest *used* slot");
  243. #endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
  244. template <bool crash_on_corruption>
  245. PA_ALWAYS_INLINE PartitionFreelistEntry*
  246. PartitionFreelistEntry::GetNextInternal(size_t extra,
  247. bool for_thread_cache) const {
  248. // GetNext() can be called on discarded memory, in which case |encoded_next_|
  249. // is 0, and none of the checks apply. Don't prefetch nullptr either.
  250. if (IsEncodedNextPtrZero())
  251. return nullptr;
  252. auto* ret = encoded_next_.Decode();
  253. // We rely on constant propagation to remove the branches coming from
  254. // |for_thread_cache|, since the argument is always a compile-time constant.
  255. if (PA_UNLIKELY(!IsSane(this, ret, for_thread_cache))) {
  256. if constexpr (crash_on_corruption) {
  257. // Put the corrupted data on the stack, it may give us more information
  258. // about what kind of corruption that was.
  259. PA_DEBUG_DATA_ON_STACK("first",
  260. static_cast<size_t>(encoded_next_.encoded_));
  261. #if defined(PA_HAS_FREELIST_SHADOW_ENTRY)
  262. PA_DEBUG_DATA_ON_STACK("second", static_cast<size_t>(shadow_));
  263. #endif
  264. FreelistCorruptionDetected(extra);
  265. } else {
  266. return nullptr;
  267. }
  268. }
  269. // In real-world profiles, the load of |encoded_next_| above is responsible
  270. // for a large fraction of the allocation cost. However, we cannot anticipate
  271. // it enough since it is accessed right after we know its address.
  272. //
  273. // In the case of repeated allocations, we can prefetch the access that will
  274. // be done at the *next* allocation, which will touch *ret, prefetch it.
  275. PA_PREFETCH(ret);
  276. return ret;
  277. }
  278. template <bool crash_on_corruption>
  279. PA_ALWAYS_INLINE PartitionFreelistEntry*
  280. PartitionFreelistEntry::GetNextForThreadCache(size_t extra) const {
  281. return GetNextInternal<crash_on_corruption>(extra, true);
  282. }
  283. PA_ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext(
  284. size_t extra) const {
  285. return GetNextInternal<true>(extra, false);
  286. }
  287. } // namespace partition_alloc::internal
  288. #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_