heap_profiler_allocation_context_tracker.cc 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. // Copyright 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
  5. #include <string.h>
  6. #include <algorithm>
  7. #include <iterator>
  8. #include <ostream>
  9. #include "base/atomicops.h"
  10. #include "base/check_op.h"
  11. #include "base/debug/debugging_buildflags.h"
  12. #include "base/debug/leak_annotations.h"
  13. #include "base/debug/stack_trace.h"
  14. #include "base/no_destructor.h"
  15. #include "base/notreached.h"
  16. #include "base/threading/platform_thread.h"
  17. #include "base/threading/thread_local_storage.h"
  18. #include "base/trace_event/heap_profiler_allocation_context.h"
  19. #include "build/build_config.h"
  20. #if BUILDFLAG(IS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
  21. #include "base/trace_event/cfi_backtrace_android.h"
  22. #endif
  23. #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
  24. #include <sys/prctl.h>
  25. #endif
  26. namespace base {
  27. namespace trace_event {
  28. std::atomic<AllocationContextTracker::CaptureMode>
  29. AllocationContextTracker::capture_mode_{
  30. AllocationContextTracker::CaptureMode::DISABLED};
  31. namespace {
  32. const size_t kMaxStackDepth = 128u;
  33. const size_t kMaxTaskDepth = 16u;
  34. AllocationContextTracker* const kInitializingSentinel =
  35. reinterpret_cast<AllocationContextTracker*>(-1);
  36. // This function is added to the TLS slot to clean up the instance when the
  37. // thread exits.
  38. void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
  39. delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
  40. }
  41. ThreadLocalStorage::Slot& AllocationContextTrackerTLS() {
  42. static NoDestructor<ThreadLocalStorage::Slot> tls_alloc_ctx_tracker(
  43. &DestructAllocationContextTracker);
  44. return *tls_alloc_ctx_tracker;
  45. }
  46. // Cannot call ThreadIdNameManager::GetName because it holds a lock and causes
  47. // deadlock when lock is already held by ThreadIdNameManager before the current
  48. // allocation. Gets the thread name from kernel if available or returns a string
  49. // with id. This function intentionally leaks the allocated strings since they
  50. // are used to tag allocations even after the thread dies.
  51. const char* GetAndLeakThreadName() {
  52. char name[16];
  53. #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
  54. // If the thread name is not set, try to get it from prctl. Thread name might
  55. // not be set in cases where the thread started before heap profiling was
  56. // enabled.
  57. int err = prctl(PR_GET_NAME, name);
  58. if (!err) {
  59. return strdup(name);
  60. }
  61. #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
  62. // BUILDFLAG(IS_ANDROID)
  63. // Use tid if we don't have a thread name.
  64. snprintf(name, sizeof(name), "%lu",
  65. static_cast<unsigned long>(PlatformThread::CurrentId()));
  66. return strdup(name);
  67. }
  68. } // namespace
  69. // static
  70. AllocationContextTracker*
  71. AllocationContextTracker::GetInstanceForCurrentThread() {
  72. AllocationContextTracker* tracker = static_cast<AllocationContextTracker*>(
  73. AllocationContextTrackerTLS().Get());
  74. if (tracker == kInitializingSentinel)
  75. return nullptr; // Re-entrancy case.
  76. if (!tracker) {
  77. AllocationContextTrackerTLS().Set(kInitializingSentinel);
  78. tracker = new AllocationContextTracker();
  79. AllocationContextTrackerTLS().Set(tracker);
  80. }
  81. return tracker;
  82. }
  83. AllocationContextTracker::AllocationContextTracker() {
  84. tracked_stack_.reserve(kMaxStackDepth);
  85. task_contexts_.reserve(kMaxTaskDepth);
  86. task_contexts_.push_back("UntrackedTask");
  87. }
  88. AllocationContextTracker::~AllocationContextTracker() = default;
  89. // static
  90. void AllocationContextTracker::SetCurrentThreadName(const char* name) {
  91. if (name && capture_mode() != CaptureMode::DISABLED) {
  92. GetInstanceForCurrentThread()->thread_name_ = name;
  93. }
  94. }
  95. // static
  96. void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
  97. // Release ordering ensures that when a thread observes |capture_mode_| to
  98. // be true through an acquire load, the TLS slot has been initialized.
  99. capture_mode_.store(mode, std::memory_order_release);
  100. }
  101. void AllocationContextTracker::PushNativeStackFrame(const void* pc) {
  102. if (tracked_stack_.size() < kMaxStackDepth)
  103. tracked_stack_.push_back(StackFrame::FromProgramCounter(pc));
  104. else
  105. NOTREACHED();
  106. }
  107. void AllocationContextTracker::PopNativeStackFrame(const void* pc) {
  108. if (tracked_stack_.empty())
  109. return;
  110. DCHECK_EQ(pc, tracked_stack_.back().value);
  111. tracked_stack_.pop_back();
  112. }
  113. void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
  114. DCHECK(context);
  115. if (task_contexts_.size() < kMaxTaskDepth)
  116. task_contexts_.push_back(context);
  117. else
  118. NOTREACHED();
  119. }
  120. void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
  121. // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
  122. // scope, the context was never pushed, so it is possible that pop is called
  123. // on an empty stack. Note that the context always contains "UntrackedTask".
  124. if (task_contexts_.size() == 1)
  125. return;
  126. DCHECK_EQ(context, task_contexts_.back())
  127. << "Encountered an unmatched context end";
  128. task_contexts_.pop_back();
  129. }
  130. bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
  131. if (ignore_scope_depth_)
  132. return false;
  133. CaptureMode mode = capture_mode_.load(std::memory_order_relaxed);
  134. auto* backtrace = std::begin(ctx->backtrace.frames);
  135. #if !BUILDFLAG(IS_NACL)
  136. auto* backtrace_end = std::end(ctx->backtrace.frames);
  137. #endif
  138. if (!thread_name_) {
  139. // Ignore the string allocation made by GetAndLeakThreadName to avoid
  140. // reentrancy.
  141. ignore_scope_depth_++;
  142. thread_name_ = GetAndLeakThreadName();
  143. ANNOTATE_LEAKING_OBJECT_PTR(thread_name_);
  144. DCHECK(thread_name_);
  145. ignore_scope_depth_--;
  146. }
  147. // Add the thread name as the first entry in pseudo stack.
  148. if (thread_name_) {
  149. *backtrace++ = StackFrame::FromThreadName(thread_name_);
  150. }
  151. switch (mode) {
  152. case CaptureMode::DISABLED:
  153. {
  154. break;
  155. }
  156. case CaptureMode::NATIVE_STACK:
  157. {
  158. // Backtrace contract requires us to return bottom frames, i.e.
  159. // from main() and up. Stack unwinding produces top frames, i.e.
  160. // from this point and up until main(). We intentionally request
  161. // kMaxFrameCount + 1 frames, so that we know if there are more frames
  162. // than our backtrace capacity.
  163. #if !BUILDFLAG(IS_NACL) // We don't build base/debug/stack_trace.cc for NaCl.
  164. #if BUILDFLAG(IS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
  165. const void* frames[Backtrace::kMaxFrameCount + 1];
  166. static_assert(std::size(frames) >= Backtrace::kMaxFrameCount,
  167. "not requesting enough frames to fill Backtrace");
  168. size_t frame_count =
  169. CFIBacktraceAndroid::GetInitializedInstance()->Unwind(
  170. frames, std::size(frames));
  171. #elif BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
  172. const void* frames[Backtrace::kMaxFrameCount + 1];
  173. static_assert(std::size(frames) >= Backtrace::kMaxFrameCount,
  174. "not requesting enough frames to fill Backtrace");
  175. size_t frame_count = debug::TraceStackFramePointers(
  176. frames, std::size(frames),
  177. 1 /* exclude this function from the trace */);
  178. #else
  179. // Fall-back to capturing the stack with base::debug::StackTrace,
  180. // which is likely slower, but more reliable.
  181. base::debug::StackTrace stack_trace(Backtrace::kMaxFrameCount + 1);
  182. size_t frame_count = 0u;
  183. const void* const* frames = stack_trace.Addresses(&frame_count);
  184. #endif
  185. // If there are too many frames, keep the ones furthest from main().
  186. ptrdiff_t backtrace_capacity = backtrace_end - backtrace;
  187. ptrdiff_t starting_frame_index =
  188. base::checked_cast<ptrdiff_t>(frame_count);
  189. if (starting_frame_index > backtrace_capacity) {
  190. starting_frame_index = backtrace_capacity - 1;
  191. *backtrace++ = StackFrame::FromProgramCounter(nullptr);
  192. }
  193. for (ptrdiff_t i = starting_frame_index - 1; i >= 0; --i) {
  194. const void* frame = frames[i];
  195. *backtrace++ = StackFrame::FromProgramCounter(frame);
  196. }
  197. #endif // !BUILDFLAG(IS_NACL)
  198. break;
  199. }
  200. }
  201. ctx->backtrace.frame_count =
  202. static_cast<size_t>(backtrace - std::begin(ctx->backtrace.frames));
  203. ctx->type_name = TaskContext();
  204. return true;
  205. }
  206. } // namespace trace_event
  207. } // namespace base