allocator_shim.cc 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. // Copyright 2016 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/allocator/allocator_shim.h"
  5. #include <errno.h>
  6. #include <atomic>
  7. #include <new>
  8. #include "base/allocator/buildflags.h"
  9. #include "base/bits.h"
  10. #include "base/check_op.h"
  11. #include "base/memory/page_size.h"
  12. #include "base/threading/platform_thread.h"
  13. #include "build/build_config.h"
  14. #if !BUILDFLAG(IS_WIN)
  15. #include <unistd.h>
  16. #else
  17. #include "base/allocator/winheap_stubs_win.h"
  18. #endif
  19. #if BUILDFLAG(IS_APPLE)
  20. #include <malloc/malloc.h>
  21. #include "base/allocator/allocator_interception_mac.h"
  22. #include "base/mac/mach_logging.h"
  23. #endif
  24. #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  25. #include "base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h"
  26. #endif
  27. // No calls to malloc / new in this file. They would would cause re-entrancy of
  28. // the shim, which is hard to deal with. Keep this code as simple as possible
  29. // and don't use any external C++ object here, not even //base ones. Even if
  30. // they are safe to use today, in future they might be refactored.
  31. namespace {
  32. std::atomic<const base::allocator::AllocatorDispatch*> g_chain_head{
  33. &base::allocator::AllocatorDispatch::default_dispatch};
  34. bool g_call_new_handler_on_malloc_failure = false;
  35. ALWAYS_INLINE size_t GetCachedPageSize() {
  36. static size_t pagesize = 0;
  37. if (!pagesize)
  38. pagesize = base::GetPageSize();
  39. return pagesize;
  40. }
  41. // Calls the std::new handler thread-safely. Returns true if a new_handler was
  42. // set and called, false if no new_handler was set.
  43. bool CallNewHandler(size_t size) {
  44. #if BUILDFLAG(IS_WIN)
  45. return base::allocator::WinCallNewHandler(size);
  46. #else
  47. std::new_handler nh = std::get_new_handler();
  48. if (!nh)
  49. return false;
  50. (*nh)();
  51. // Assume the new_handler will abort if it fails. Exception are disabled and
  52. // we don't support the case of a new_handler throwing std::bad_balloc.
  53. return true;
  54. #endif
  55. }
  56. ALWAYS_INLINE const base::allocator::AllocatorDispatch* GetChainHead() {
  57. return g_chain_head.load(std::memory_order_relaxed);
  58. }
  59. } // namespace
  60. namespace base {
  61. namespace allocator {
  62. void SetCallNewHandlerOnMallocFailure(bool value) {
  63. g_call_new_handler_on_malloc_failure = value;
  64. #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  65. base::internal::PartitionAllocSetCallNewHandlerOnMallocFailure(value);
  66. #endif
  67. }
  68. void* UncheckedAlloc(size_t size) {
  69. const allocator::AllocatorDispatch* const chain_head = GetChainHead();
  70. return chain_head->alloc_unchecked_function(chain_head, size, nullptr);
  71. }
  72. void UncheckedFree(void* ptr) {
  73. const allocator::AllocatorDispatch* const chain_head = GetChainHead();
  74. return chain_head->free_function(chain_head, ptr, nullptr);
  75. }
  76. void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
  77. // Loop in case of (an unlikely) race on setting the list head.
  78. size_t kMaxRetries = 7;
  79. for (size_t i = 0; i < kMaxRetries; ++i) {
  80. const AllocatorDispatch* chain_head = GetChainHead();
  81. dispatch->next = chain_head;
  82. // This function guarantees to be thread-safe w.r.t. concurrent
  83. // insertions. It also has to guarantee that all the threads always
  84. // see a consistent chain, hence the atomic_thread_fence() below.
  85. // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
  86. // we don't really want this to be a release-store with a corresponding
  87. // acquire-load during malloc().
  88. std::atomic_thread_fence(std::memory_order_seq_cst);
  89. // Set the chain head to the new dispatch atomically. If we lose the race,
  90. // retry.
  91. if (g_chain_head.compare_exchange_strong(chain_head, dispatch,
  92. std::memory_order_relaxed,
  93. std::memory_order_relaxed)) {
  94. // Success.
  95. return;
  96. }
  97. }
  98. CHECK(false); // Too many retries, this shouldn't happen.
  99. }
  100. void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
  101. DCHECK_EQ(GetChainHead(), dispatch);
  102. g_chain_head.store(dispatch->next, std::memory_order_relaxed);
  103. }
  104. } // namespace allocator
  105. } // namespace base
  106. // The Shim* functions below are the entry-points into the shim-layer and
  107. // are supposed to be invoked by the allocator_shim_override_*
  108. // headers to route the malloc / new symbols through the shim layer.
  109. // They are defined as ALWAYS_INLINE in order to remove a level of indirection
  110. // between the system-defined entry points and the shim implementations.
  111. extern "C" {
  112. // The general pattern for allocations is:
  113. // - Try to allocate, if succeded return the pointer.
  114. // - If the allocation failed:
  115. // - Call the std::new_handler if it was a C++ allocation.
  116. // - Call the std::new_handler if it was a malloc() (or calloc() or similar)
  117. // AND SetCallNewHandlerOnMallocFailure(true).
  118. // - If the std::new_handler is NOT set just return nullptr.
  119. // - If the std::new_handler is set:
  120. // - Assume it will abort() if it fails (very likely the new_handler will
  121. // just suicide printing a message).
  122. // - Assume it did succeed if it returns, in which case reattempt the alloc.
  123. ALWAYS_INLINE void* ShimCppNew(size_t size) {
  124. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  125. void* ptr;
  126. do {
  127. void* context = nullptr;
  128. #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  129. context = malloc_default_zone();
  130. #endif
  131. ptr = chain_head->alloc_function(chain_head, size, context);
  132. } while (!ptr && CallNewHandler(size));
  133. return ptr;
  134. }
  135. ALWAYS_INLINE void* ShimCppNewNoThrow(size_t size) {
  136. void* context = nullptr;
  137. #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  138. context = malloc_default_zone();
  139. #endif
  140. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  141. return chain_head->alloc_unchecked_function(chain_head, size, context);
  142. }
  143. ALWAYS_INLINE void* ShimCppAlignedNew(size_t size, size_t alignment) {
  144. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  145. void* ptr;
  146. do {
  147. void* context = nullptr;
  148. #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  149. context = malloc_default_zone();
  150. #endif
  151. ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
  152. context);
  153. } while (!ptr && CallNewHandler(size));
  154. return ptr;
  155. }
  156. ALWAYS_INLINE void ShimCppDelete(void* address) {
  157. void* context = nullptr;
  158. #if BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  159. context = malloc_default_zone();
  160. #endif
  161. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  162. return chain_head->free_function(chain_head, address, context);
  163. }
  164. ALWAYS_INLINE void* ShimMalloc(size_t size, void* context) {
  165. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  166. void* ptr;
  167. do {
  168. ptr = chain_head->alloc_function(chain_head, size, context);
  169. } while (!ptr && g_call_new_handler_on_malloc_failure &&
  170. CallNewHandler(size));
  171. return ptr;
  172. }
  173. ALWAYS_INLINE void* ShimCalloc(size_t n, size_t size, void* context) {
  174. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  175. void* ptr;
  176. do {
  177. ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size,
  178. context);
  179. } while (!ptr && g_call_new_handler_on_malloc_failure &&
  180. CallNewHandler(size));
  181. return ptr;
  182. }
  183. ALWAYS_INLINE void* ShimRealloc(void* address, size_t size, void* context) {
  184. // realloc(size == 0) means free() and might return a nullptr. We should
  185. // not call the std::new_handler in that case, though.
  186. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  187. void* ptr;
  188. do {
  189. ptr = chain_head->realloc_function(chain_head, address, size, context);
  190. } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
  191. CallNewHandler(size));
  192. return ptr;
  193. }
  194. ALWAYS_INLINE void* ShimMemalign(size_t alignment, size_t size, void* context) {
  195. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  196. void* ptr;
  197. do {
  198. ptr = chain_head->alloc_aligned_function(chain_head, alignment, size,
  199. context);
  200. } while (!ptr && g_call_new_handler_on_malloc_failure &&
  201. CallNewHandler(size));
  202. return ptr;
  203. }
  204. ALWAYS_INLINE int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
  205. // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
  206. // in tc_malloc.cc.
  207. if (((alignment % sizeof(void*)) != 0) ||
  208. !base::bits::IsPowerOfTwo(alignment)) {
  209. return EINVAL;
  210. }
  211. void* ptr = ShimMemalign(alignment, size, nullptr);
  212. *res = ptr;
  213. return ptr ? 0 : ENOMEM;
  214. }
  215. ALWAYS_INLINE void* ShimValloc(size_t size, void* context) {
  216. return ShimMemalign(GetCachedPageSize(), size, context);
  217. }
  218. ALWAYS_INLINE void* ShimPvalloc(size_t size) {
  219. // pvalloc(0) should allocate one page, according to its man page.
  220. if (size == 0) {
  221. size = GetCachedPageSize();
  222. } else {
  223. size = base::bits::AlignUp(size, GetCachedPageSize());
  224. }
  225. // The third argument is nullptr because pvalloc is glibc only and does not
  226. // exist on OSX/BSD systems.
  227. return ShimMemalign(GetCachedPageSize(), size, nullptr);
  228. }
  229. ALWAYS_INLINE void ShimFree(void* address, void* context) {
  230. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  231. return chain_head->free_function(chain_head, address, context);
  232. }
  233. ALWAYS_INLINE size_t ShimGetSizeEstimate(const void* address, void* context) {
  234. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  235. return chain_head->get_size_estimate_function(
  236. chain_head, const_cast<void*>(address), context);
  237. }
  238. ALWAYS_INLINE unsigned ShimBatchMalloc(size_t size,
  239. void** results,
  240. unsigned num_requested,
  241. void* context) {
  242. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  243. return chain_head->batch_malloc_function(chain_head, size, results,
  244. num_requested, context);
  245. }
  246. ALWAYS_INLINE void ShimBatchFree(void** to_be_freed,
  247. unsigned num_to_be_freed,
  248. void* context) {
  249. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  250. return chain_head->batch_free_function(chain_head, to_be_freed,
  251. num_to_be_freed, context);
  252. }
  253. ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
  254. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  255. return chain_head->free_definite_size_function(chain_head, ptr, size,
  256. context);
  257. }
  258. ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
  259. size_t alignment,
  260. void* context) {
  261. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  262. void* ptr = nullptr;
  263. do {
  264. ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
  265. context);
  266. } while (!ptr && g_call_new_handler_on_malloc_failure &&
  267. CallNewHandler(size));
  268. return ptr;
  269. }
  270. ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
  271. size_t size,
  272. size_t alignment,
  273. void* context) {
  274. // _aligned_realloc(size == 0) means _aligned_free() and might return a
  275. // nullptr. We should not call the std::new_handler in that case, though.
  276. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  277. void* ptr = nullptr;
  278. do {
  279. ptr = chain_head->aligned_realloc_function(chain_head, address, size,
  280. alignment, context);
  281. } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
  282. CallNewHandler(size));
  283. return ptr;
  284. }
  285. ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
  286. const base::allocator::AllocatorDispatch* const chain_head = GetChainHead();
  287. return chain_head->aligned_free_function(chain_head, address, context);
  288. }
  289. } // extern "C"
  290. #if !BUILDFLAG(IS_WIN) && \
  291. !(BUILDFLAG(IS_APPLE) && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC))
  292. // Cpp symbols (new / delete) should always be routed through the shim layer
  293. // except on Windows and macOS (except for PartitionAlloc-Everywhere) where the
  294. // malloc intercept is deep enough that it also catches the cpp calls.
  295. //
  296. // In case of PartitionAlloc-Everywhere on macOS, malloc backed by
  297. // base::internal::PartitionMalloc crashes on OOM, and we need to avoid crashes
  298. // in case of operator new() noexcept. Thus, operator new() noexcept needs to
  299. // be routed to base::internal::PartitionMallocUnchecked through the shim layer.
  300. #include "base/allocator/allocator_shim_override_cpp_symbols.h"
  301. #endif
  302. #if BUILDFLAG(IS_ANDROID)
  303. // Android does not support symbol interposition. The way malloc symbols are
  304. // intercepted on Android is by using link-time -wrap flags.
  305. #include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
  306. #elif BUILDFLAG(IS_WIN)
  307. // On Windows we use plain link-time overriding of the CRT symbols.
  308. #include "base/allocator/allocator_shim_override_ucrt_symbols_win.h"
  309. #elif BUILDFLAG(IS_APPLE)
  310. #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  311. #include "base/allocator/allocator_shim_override_mac_default_zone.h"
  312. #else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  313. #include "base/allocator/allocator_shim_override_mac_symbols.h"
  314. #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  315. #else
  316. #include "base/allocator/allocator_shim_override_libc_symbols.h"
  317. #endif
  318. // Some glibc versions (until commit 6c444ad6e953dbdf9c7be065308a0a777)
  319. // incorrectly call __libc_memalign() to allocate memory (see elf/dl-tls.c in
  320. // glibc 2.23 for instance), and free() to free it. This causes issues for us,
  321. // as we are then asked to free memory we didn't allocate.
  322. //
  323. // This only happened in glibc to allocate TLS storage metadata, and there are
  324. // no other callers of __libc_memalign() there as of September 2020. To work
  325. // around this issue, intercept this internal libc symbol to make sure that both
  326. // the allocation and the free() are caught by the shim.
  327. //
  328. // This seems fragile, and is, but there is ample precedent for it, making it
  329. // quite likely to keep working in the future. For instance, LLVM for LSAN uses
  330. // this mechanism.
  331. #if defined(LIBC_GLIBC) && BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  332. #include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
  333. #endif
  334. #if BUILDFLAG(IS_APPLE)
  335. namespace base {
  336. namespace allocator {
  337. void InitializeAllocatorShim() {
  338. #if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  339. // Prepares the default dispatch. After the intercepted malloc calls have
  340. // traversed the shim this will route them to the default malloc zone.
  341. InitializeDefaultDispatchToMacAllocator();
  342. MallocZoneFunctions functions = MallocZoneFunctionsToReplaceDefault();
  343. // This replaces the default malloc zone, causing calls to malloc & friends
  344. // from the codebase to be routed to ShimMalloc() above.
  345. base::allocator::ReplaceFunctionsForStoredZones(&functions);
  346. #endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  347. }
  348. } // namespace allocator
  349. } // namespace base
  350. #endif
  351. // Cross-checks.
  352. #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
  353. #error The allocator shim should not be compiled when building for memory tools.
  354. #endif
  355. #if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
  356. (defined(_MSC_VER) && defined(_CPPUNWIND))
  357. #error This code cannot be used when exceptions are turned on.
  358. #endif