stack_sampler_impl.cc 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. // Copyright 2019 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/profiler/stack_sampler_impl.h"
  5. #include <iterator>
  6. #include <utility>
  7. #include "base/check.h"
  8. #include "base/compiler_specific.h"
  9. #include "base/memory/raw_ptr.h"
  10. #include "base/metrics/histogram_functions.h"
  11. #include "base/numerics/safe_conversions.h"
  12. #include "base/profiler/metadata_recorder.h"
  13. #include "base/profiler/profile_builder.h"
  14. #include "base/profiler/sample_metadata.h"
  15. #include "base/profiler/stack_buffer.h"
  16. #include "base/profiler/stack_copier.h"
  17. #include "base/profiler/suspendable_thread_delegate.h"
  18. #include "base/profiler/unwinder.h"
  19. #include "base/ranges/algorithm.h"
  20. // IMPORTANT NOTE: Some functions within this implementation are invoked while
  21. // the target thread is suspended so it must not do any allocation from the
  22. // heap, including indirectly via use of DCHECK/CHECK or other logging
  23. // statements. Otherwise this code can deadlock on heap locks acquired by the
  24. // target thread before it was suspended. These functions are commented with "NO
  25. // HEAP ALLOCATIONS".
  26. namespace base {
  27. namespace {
  28. // Notifies the unwinders about the stack capture, and records metadata, while
  29. // the thread is suspended.
  30. class StackCopierDelegate : public StackCopier::Delegate {
  31. public:
  32. StackCopierDelegate(
  33. const base::circular_deque<std::unique_ptr<Unwinder>>* unwinders,
  34. ProfileBuilder* profile_builder,
  35. MetadataRecorder::MetadataProvider* metadata_provider)
  36. : unwinders_(unwinders),
  37. profile_builder_(profile_builder),
  38. metadata_provider_(metadata_provider) {}
  39. StackCopierDelegate(const StackCopierDelegate&) = delete;
  40. StackCopierDelegate& operator=(const StackCopierDelegate&) = delete;
  41. // StackCopier::Delegate:
  42. // IMPORTANT NOTE: to avoid deadlock this function must not invoke any
  43. // non-reentrant code that is also invoked by the target thread. In
  44. // particular, it may not perform any heap allocation or deallocation,
  45. // including indirectly via use of DCHECK/CHECK or other logging statements.
  46. void OnStackCopy() override {
  47. for (const auto& unwinder : *unwinders_)
  48. unwinder->OnStackCapture();
  49. profile_builder_->RecordMetadata(*metadata_provider_);
  50. }
  51. private:
  52. raw_ptr<const base::circular_deque<std::unique_ptr<Unwinder>>> unwinders_;
  53. const raw_ptr<ProfileBuilder> profile_builder_;
  54. const raw_ptr<const MetadataRecorder::MetadataProvider> metadata_provider_;
  55. };
  56. } // namespace
  57. StackSamplerImpl::StackSamplerImpl(std::unique_ptr<StackCopier> stack_copier,
  58. UnwindersFactory core_unwinders_factory,
  59. ModuleCache* module_cache,
  60. RepeatingClosure record_sample_callback,
  61. StackSamplerTestDelegate* test_delegate)
  62. : stack_copier_(std::move(stack_copier)),
  63. unwinders_factory_(std::move(core_unwinders_factory)),
  64. module_cache_(module_cache),
  65. record_sample_callback_(std::move(record_sample_callback)),
  66. test_delegate_(test_delegate) {
  67. DCHECK(unwinders_factory_);
  68. }
  69. StackSamplerImpl::~StackSamplerImpl() = default;
  70. void StackSamplerImpl::Initialize() {
  71. std::vector<std::unique_ptr<Unwinder>> unwinders =
  72. std::move(unwinders_factory_).Run();
  73. // |unwinders| is iterated backward since |unwinders_factory_| generates
  74. // unwinders in increasing priority order. |unwinders_| is stored in
  75. // decreasing priority order for ease of use within the class.
  76. unwinders_.insert(unwinders_.end(),
  77. std::make_move_iterator(unwinders.rbegin()),
  78. std::make_move_iterator(unwinders.rend()));
  79. for (const auto& unwinder : unwinders_)
  80. unwinder->Initialize(module_cache_);
  81. was_initialized_ = true;
  82. }
  83. void StackSamplerImpl::AddAuxUnwinder(std::unique_ptr<Unwinder> unwinder) {
  84. // Initialize() invokes Initialize() on the unwinders that are present
  85. // at the time. If it hasn't occurred yet, we allow it to add the initial
  86. // modules, otherwise we do it here.
  87. if (was_initialized_)
  88. unwinder->Initialize(module_cache_);
  89. unwinders_.push_front(std::move(unwinder));
  90. }
  91. void StackSamplerImpl::RecordStackFrames(StackBuffer* stack_buffer,
  92. ProfileBuilder* profile_builder,
  93. PlatformThreadId thread_id) {
  94. DCHECK(stack_buffer);
  95. if (record_sample_callback_)
  96. record_sample_callback_.Run();
  97. RegisterContext thread_context;
  98. uintptr_t stack_top;
  99. TimeTicks timestamp;
  100. bool copy_stack_succeeded;
  101. {
  102. // Make this scope as small as possible because |metadata_provider| is
  103. // holding a lock.
  104. MetadataRecorder::MetadataProvider metadata_provider(
  105. GetSampleMetadataRecorder(), thread_id);
  106. StackCopierDelegate delegate(&unwinders_, profile_builder,
  107. &metadata_provider);
  108. copy_stack_succeeded = stack_copier_->CopyStack(
  109. stack_buffer, &stack_top, &timestamp, &thread_context, &delegate);
  110. }
  111. if (!copy_stack_succeeded) {
  112. profile_builder->OnSampleCompleted(
  113. {}, timestamp.is_null() ? TimeTicks::Now() : timestamp);
  114. return;
  115. }
  116. for (const auto& unwinder : unwinders_)
  117. unwinder->UpdateModules();
  118. if (test_delegate_)
  119. test_delegate_->OnPreStackWalk();
  120. profile_builder->OnSampleCompleted(
  121. WalkStack(module_cache_, &thread_context, stack_top, unwinders_),
  122. timestamp);
  123. #if BUILDFLAG(IS_CHROMEOS)
  124. ptrdiff_t stack_size = reinterpret_cast<uint8_t*>(stack_top) -
  125. reinterpret_cast<uint8_t*>(stack_buffer->buffer());
  126. constexpr int kBytesPerKilobyte = 1024;
  127. if ((++stack_size_histogram_sampling_counter_ %
  128. kUMAHistogramDownsampleAmount) == 0) {
  129. // Record the size of the stack to tune kLargeStackSize.
  130. UmaHistogramMemoryKB("Memory.StackSamplingProfiler.StackSampleSize",
  131. saturated_cast<int>(stack_size / kBytesPerKilobyte));
  132. }
  133. // We expect to very rarely see stacks larger than kLargeStackSize. If we see
  134. // a stack larger than kLargeStackSize, we tell the kernel to discard the
  135. // contents of the buffer (using madvise(MADV_DONTNEED)) after the first
  136. // kLargeStackSize bytes to avoid permanently allocating memory that we won't
  137. // use again. We don't want kLargeStackSize to be too small, however; for if
  138. // we are constantly calling madvise(MADV_DONTNEED) and then writing to the
  139. // same parts of the buffer, we're not saving memory and we'll cause extra
  140. // page faults.
  141. constexpr ptrdiff_t kLargeStackSize = 512 * kBytesPerKilobyte;
  142. if (stack_size > kLargeStackSize) {
  143. stack_buffer->MarkUpperBufferContentsAsUnneeded(kLargeStackSize);
  144. }
  145. #endif // #if BUILDFLAG(IS_CHROMEOS)
  146. }
  147. // static
  148. std::vector<Frame> StackSamplerImpl::WalkStackForTesting(
  149. ModuleCache* module_cache,
  150. RegisterContext* thread_context,
  151. uintptr_t stack_top,
  152. const base::circular_deque<std::unique_ptr<Unwinder>>& unwinders) {
  153. return WalkStack(module_cache, thread_context, stack_top, unwinders);
  154. }
  155. // static
  156. std::vector<Frame> StackSamplerImpl::WalkStack(
  157. ModuleCache* module_cache,
  158. RegisterContext* thread_context,
  159. uintptr_t stack_top,
  160. const base::circular_deque<std::unique_ptr<Unwinder>>& unwinders) {
  161. std::vector<Frame> stack;
  162. // Reserve enough memory for most stacks, to avoid repeated
  163. // allocations. Approximately 99.9% of recorded stacks are 128 frames or
  164. // fewer.
  165. stack.reserve(128);
  166. // Record the first frame from the context values.
  167. stack.emplace_back(RegisterContextInstructionPointer(thread_context),
  168. module_cache->GetModuleForAddress(
  169. RegisterContextInstructionPointer(thread_context)));
  170. size_t prior_stack_size;
  171. UnwindResult result;
  172. do {
  173. // Choose an authoritative unwinder for the current module. Use the first
  174. // unwinder that thinks it can unwind from the current frame.
  175. auto unwinder = ranges::find_if(
  176. unwinders, [&stack](const std::unique_ptr<Unwinder>& unwinder) {
  177. return unwinder->CanUnwindFrom(stack.back());
  178. });
  179. if (unwinder == unwinders.end())
  180. return stack;
  181. prior_stack_size = stack.size();
  182. result = unwinder->get()->TryUnwind(thread_context, stack_top, &stack);
  183. // The unwinder with the lowest priority should be the only one that returns
  184. // COMPLETED since the stack starts in native code.
  185. DCHECK(result != UnwindResult::kCompleted ||
  186. unwinder->get() == unwinders.back().get());
  187. } while (result != UnwindResult::kAborted &&
  188. result != UnwindResult::kCompleted &&
  189. // Give up if the authoritative unwinder for the module was unable to
  190. // unwind.
  191. stack.size() > prior_stack_size);
  192. return stack;
  193. }
  194. } // namespace base