stack_sampling_profiler.cc 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. // Copyright 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/profiler/stack_sampling_profiler.h"
  5. #include <algorithm>
  6. #include <cmath>
  7. #include <map>
  8. #include <utility>
  9. #include "base/atomic_sequence_num.h"
  10. #include "base/atomicops.h"
  11. #include "base/bind.h"
  12. #include "base/callback.h"
  13. #include "base/callback_helpers.h"
  14. #include "base/location.h"
  15. #include "base/memory/ptr_util.h"
  16. #include "base/memory/raw_ptr.h"
  17. #include "base/memory/singleton.h"
  18. #include "base/profiler/profiler_buildflags.h"
  19. #include "base/profiler/stack_buffer.h"
  20. #include "base/profiler/stack_sampler.h"
  21. #include "base/profiler/unwinder.h"
  22. #include "base/synchronization/lock.h"
  23. #include "base/synchronization/waitable_event.h"
  24. #include "base/thread_annotations.h"
  25. #include "base/threading/thread.h"
  26. #include "base/threading/thread_restrictions.h"
  27. #include "base/threading/thread_task_runner_handle.h"
  28. #include "base/time/time.h"
  29. #include "base/trace_event/base_tracing.h"
  30. #include "build/build_config.h"
  31. #include "third_party/abseil-cpp/absl/types/optional.h"
  32. #if BUILDFLAG(IS_WIN)
  33. #include "base/win/static_constants.h"
  34. #endif
  35. #if BUILDFLAG(IS_APPLE)
  36. #include "base/mac/mac_util.h"
  37. #endif
  38. namespace base {
  39. // Allows StackSamplingProfiler to recall a thread which should already pretty
  40. // much be dead (thus it should be a fast Join()).
  41. class ScopedAllowThreadRecallForStackSamplingProfiler
  42. : public ScopedAllowBaseSyncPrimitivesOutsideBlockingScope {};
  43. namespace {
  44. // This value is used to initialize the WaitableEvent object. This MUST BE set
  45. // to MANUAL for correct operation of the IsSignaled() call in Start(). See the
  46. // comment there for why.
  47. constexpr WaitableEvent::ResetPolicy kResetPolicy =
  48. WaitableEvent::ResetPolicy::MANUAL;
  49. // This value is used when there is no collection in progress and thus no ID
  50. // for referencing the active collection to the SamplingThread.
  51. const int kNullProfilerId = -1;
  52. TimeTicks GetNextSampleTimeImpl(TimeTicks scheduled_current_sample_time,
  53. TimeDelta sampling_interval,
  54. TimeTicks now) {
  55. // Schedule the next sample at the next sampling_interval-aligned time in
  56. // the future that's sufficiently far enough from the current sample. In the
  57. // general case this will be one sampling_interval from the current
  58. // sample. In cases where sample tasks were unable to be executed, such as
  59. // during system suspend or bad system-wide jank, we may have missed some
  60. // samples. The right thing to do for those cases is to skip the missed
  61. // samples since the rest of the systems also wasn't executing.
  62. // Ensure that the next sample time is at least half a sampling interval
  63. // away. This causes the second sample after resume to be taken between 0.5
  64. // and 1.5 samples after the first, or 1 sample interval on average. The delay
  65. // also serves to provide a grace period in the normal sampling case where the
  66. // current sample may be taken slightly later than its scheduled time.
  67. const TimeTicks earliest_next_sample_time = now + sampling_interval / 2;
  68. const TimeDelta minimum_time_delta_to_next_sample =
  69. earliest_next_sample_time - scheduled_current_sample_time;
  70. // The minimum number of sampling intervals required to get from the scheduled
  71. // current sample time to the earliest next sample time.
  72. const int64_t required_sampling_intervals = static_cast<int64_t>(
  73. std::ceil(minimum_time_delta_to_next_sample / sampling_interval));
  74. return scheduled_current_sample_time +
  75. required_sampling_intervals * sampling_interval;
  76. }
  77. } // namespace
  78. // StackSamplingProfiler::SamplingThread --------------------------------------
  79. class StackSamplingProfiler::SamplingThread : public Thread {
  80. public:
  81. class TestPeer {
  82. public:
  83. // Reset the existing sampler. This will unfortunately create the object
  84. // unnecessarily if it doesn't already exist but there's no way around that.
  85. static void Reset();
  86. // Disables inherent idle-shutdown behavior.
  87. static void DisableIdleShutdown();
  88. // Begins an idle shutdown as if the idle-timer had expired and wait for
  89. // it to execute. Since the timer would have only been started at a time
  90. // when the sampling thread actually was idle, this must be called only
  91. // when it is known that there are no active sampling threads. If
  92. // |simulate_intervening_add| is true then, when executed, the shutdown
  93. // task will believe that a new collection has been added since it was
  94. // posted.
  95. static void ShutdownAssumingIdle(bool simulate_intervening_add);
  96. private:
  97. // Calls the sampling threads ShutdownTask and then signals an event.
  98. static void ShutdownTaskAndSignalEvent(SamplingThread* sampler,
  99. int add_events,
  100. WaitableEvent* event);
  101. };
  102. struct CollectionContext {
  103. CollectionContext(PlatformThreadId thread_id,
  104. const SamplingParams& params,
  105. WaitableEvent* finished,
  106. std::unique_ptr<StackSampler> sampler,
  107. std::unique_ptr<ProfileBuilder> profile_builder)
  108. : collection_id(next_collection_id.GetNext()),
  109. thread_id(thread_id),
  110. params(params),
  111. finished(finished),
  112. profile_builder(std::move(profile_builder)),
  113. sampler(std::move(sampler)) {}
  114. ~CollectionContext() = default;
  115. // An identifier for this collection, used to uniquely identify the
  116. // collection to outside interests.
  117. const int collection_id;
  118. const PlatformThreadId thread_id; // Thread id of the sampled thread.
  119. const SamplingParams params; // Information about how to sample.
  120. const raw_ptr<WaitableEvent>
  121. finished; // Signaled when all sampling complete.
  122. // Receives the sampling data and builds a CallStackProfile.
  123. std::unique_ptr<ProfileBuilder> profile_builder;
  124. // Platform-specific module that does the actual sampling.
  125. std::unique_ptr<StackSampler> sampler;
  126. // The absolute time for the next sample.
  127. TimeTicks next_sample_time;
  128. // The time that a profile was started, for calculating the total duration.
  129. TimeTicks profile_start_time;
  130. // Counter that indicates the current sample position along the acquisition.
  131. int sample_count = 0;
  132. // Sequence number for generating new collection ids.
  133. static AtomicSequenceNumber next_collection_id;
  134. };
  135. // Gets the single instance of this class.
  136. static SamplingThread* GetInstance();
  137. SamplingThread(const SamplingThread&) = delete;
  138. SamplingThread& operator=(const SamplingThread&) = delete;
  139. // Adds a new CollectionContext to the thread. This can be called externally
  140. // from any thread. This returns a collection id that can later be used to
  141. // stop the sampling.
  142. int Add(std::unique_ptr<CollectionContext> collection);
  143. // Adds an auxiliary unwinder to be used for the collection, to handle
  144. // additional, non-native-code unwind scenarios.
  145. void AddAuxUnwinder(int collection_id, std::unique_ptr<Unwinder> unwinder);
  146. // Applies the metadata to already recorded samples in all collections.
  147. void ApplyMetadataToPastSamples(base::TimeTicks period_start,
  148. base::TimeTicks period_end,
  149. uint64_t name_hash,
  150. absl::optional<int64_t> key,
  151. int64_t value,
  152. absl::optional<PlatformThreadId> thread_id);
  153. // Removes an active collection based on its collection id, forcing it to run
  154. // its callback if any data has been collected. This can be called externally
  155. // from any thread.
  156. void Remove(int collection_id);
  157. private:
  158. friend struct DefaultSingletonTraits<SamplingThread>;
  159. // The different states in which the sampling-thread can be.
  160. enum ThreadExecutionState {
  161. // The thread is not running because it has never been started. It will be
  162. // started when a sampling request is received.
  163. NOT_STARTED,
  164. // The thread is running and processing tasks. This is the state when any
  165. // sampling requests are active and during the "idle" period afterward
  166. // before the thread is stopped.
  167. RUNNING,
  168. // Once all sampling requests have finished and the "idle" period has
  169. // expired, the thread will be set to this state and its shutdown
  170. // initiated. A call to Stop() must be made to ensure the previous thread
  171. // has completely exited before calling Start() and moving back to the
  172. // RUNNING state.
  173. EXITING,
  174. };
  175. SamplingThread();
  176. ~SamplingThread() override;
  177. // Get task runner that is usable from the outside.
  178. scoped_refptr<SingleThreadTaskRunner> GetOrCreateTaskRunnerForAdd();
  179. scoped_refptr<SingleThreadTaskRunner> GetTaskRunner(
  180. ThreadExecutionState* out_state);
  181. // Get task runner that is usable from the sampling thread itself.
  182. scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread();
  183. // Finishes a collection. The collection's |finished| waitable event will be
  184. // signalled. The |collection| should already have been removed from
  185. // |active_collections_| by the caller, as this is needed to avoid flakiness
  186. // in unit tests.
  187. void FinishCollection(std::unique_ptr<CollectionContext> collection);
  188. // Check if the sampling thread is idle and begin a shutdown if it is.
  189. void ScheduleShutdownIfIdle();
  190. // These methods are tasks that get posted to the internal message queue.
  191. void AddCollectionTask(std::unique_ptr<CollectionContext> collection);
  192. void AddAuxUnwinderTask(int collection_id,
  193. std::unique_ptr<Unwinder> unwinder);
  194. void ApplyMetadataToPastSamplesTask(
  195. base::TimeTicks period_start,
  196. base::TimeTicks period_end,
  197. uint64_t name_hash,
  198. absl::optional<int64_t> key,
  199. int64_t value,
  200. absl::optional<PlatformThreadId> thread_id);
  201. void RemoveCollectionTask(int collection_id);
  202. void RecordSampleTask(int collection_id);
  203. void ShutdownTask(int add_events);
  204. // Thread:
  205. void CleanUp() override;
  206. // A stack-buffer used by the sampler for its work. This buffer is re-used
  207. // across multiple sampler objects since their execution is serialized on the
  208. // sampling thread.
  209. std::unique_ptr<StackBuffer> stack_buffer_;
  210. // A map of collection ids to collection contexts. Because this class is a
  211. // singleton that is never destroyed, context objects will never be destructed
  212. // except by explicit action. Thus, it's acceptable to pass unretained
  213. // pointers to these objects when posting tasks.
  214. std::map<int, std::unique_ptr<CollectionContext>> active_collections_;
  215. // State maintained about the current execution (or non-execution) of
  216. // the thread. This state must always be accessed while holding the
  217. // lock. A copy of the task-runner is maintained here for use by any
  218. // calling thread; this is necessary because Thread's accessor for it is
  219. // not itself thread-safe. The lock is also used to order calls to the
  220. // Thread API (Start, Stop, StopSoon, & DetachFromSequence) so that
  221. // multiple threads may make those calls.
  222. Lock thread_execution_state_lock_; // Protects all thread_execution_state_*
  223. ThreadExecutionState thread_execution_state_
  224. GUARDED_BY(thread_execution_state_lock_) = NOT_STARTED;
  225. scoped_refptr<SingleThreadTaskRunner> thread_execution_state_task_runner_
  226. GUARDED_BY(thread_execution_state_lock_);
  227. bool thread_execution_state_disable_idle_shutdown_for_testing_
  228. GUARDED_BY(thread_execution_state_lock_) = false;
  229. // A counter that notes adds of new collection requests. It is incremented
  230. // when changes occur so that delayed shutdown tasks are able to detect if
  231. // something new has happened while it was waiting. Like all "execution_state"
  232. // vars, this must be accessed while holding |thread_execution_state_lock_|.
  233. int thread_execution_state_add_events_
  234. GUARDED_BY(thread_execution_state_lock_) = 0;
  235. };
  236. // static
  237. void StackSamplingProfiler::SamplingThread::TestPeer::Reset() {
  238. SamplingThread* sampler = SamplingThread::GetInstance();
  239. ThreadExecutionState state;
  240. {
  241. AutoLock lock(sampler->thread_execution_state_lock_);
  242. state = sampler->thread_execution_state_;
  243. DCHECK(sampler->active_collections_.empty());
  244. }
  245. // Stop the thread and wait for it to exit. This has to be done through by
  246. // the thread itself because it has taken ownership of its own lifetime.
  247. if (state == RUNNING) {
  248. ShutdownAssumingIdle(false);
  249. state = EXITING;
  250. }
  251. // Make sure thread is cleaned up since state will be reset to NOT_STARTED.
  252. if (state == EXITING)
  253. sampler->Stop();
  254. // Reset internal variables to the just-initialized state.
  255. {
  256. AutoLock lock(sampler->thread_execution_state_lock_);
  257. sampler->thread_execution_state_ = NOT_STARTED;
  258. sampler->thread_execution_state_task_runner_ = nullptr;
  259. sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = false;
  260. sampler->thread_execution_state_add_events_ = 0;
  261. }
  262. }
  263. // static
  264. void StackSamplingProfiler::SamplingThread::TestPeer::DisableIdleShutdown() {
  265. SamplingThread* sampler = SamplingThread::GetInstance();
  266. {
  267. AutoLock lock(sampler->thread_execution_state_lock_);
  268. sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = true;
  269. }
  270. }
  271. // static
  272. void StackSamplingProfiler::SamplingThread::TestPeer::ShutdownAssumingIdle(
  273. bool simulate_intervening_add) {
  274. SamplingThread* sampler = SamplingThread::GetInstance();
  275. ThreadExecutionState state;
  276. scoped_refptr<SingleThreadTaskRunner> task_runner =
  277. sampler->GetTaskRunner(&state);
  278. DCHECK_EQ(RUNNING, state);
  279. DCHECK(task_runner);
  280. int add_events;
  281. {
  282. AutoLock lock(sampler->thread_execution_state_lock_);
  283. add_events = sampler->thread_execution_state_add_events_;
  284. if (simulate_intervening_add)
  285. ++sampler->thread_execution_state_add_events_;
  286. }
  287. WaitableEvent executed(WaitableEvent::ResetPolicy::MANUAL,
  288. WaitableEvent::InitialState::NOT_SIGNALED);
  289. // PostTaskAndReply won't work because thread and associated message-loop may
  290. // be shut down.
  291. task_runner->PostTask(
  292. FROM_HERE, BindOnce(&ShutdownTaskAndSignalEvent, Unretained(sampler),
  293. add_events, Unretained(&executed)));
  294. executed.Wait();
  295. }
  296. // static
  297. void StackSamplingProfiler::SamplingThread::TestPeer::
  298. ShutdownTaskAndSignalEvent(SamplingThread* sampler,
  299. int add_events,
  300. WaitableEvent* event) {
  301. sampler->ShutdownTask(add_events);
  302. event->Signal();
  303. }
  304. AtomicSequenceNumber StackSamplingProfiler::SamplingThread::CollectionContext::
  305. next_collection_id;
  306. StackSamplingProfiler::SamplingThread::SamplingThread()
  307. : Thread("StackSamplingProfiler") {}
  308. StackSamplingProfiler::SamplingThread::~SamplingThread() = default;
  309. StackSamplingProfiler::SamplingThread*
  310. StackSamplingProfiler::SamplingThread::GetInstance() {
  311. return Singleton<SamplingThread, LeakySingletonTraits<SamplingThread>>::get();
  312. }
  313. int StackSamplingProfiler::SamplingThread::Add(
  314. std::unique_ptr<CollectionContext> collection) {
  315. // This is not to be run on the sampling thread.
  316. int collection_id = collection->collection_id;
  317. scoped_refptr<SingleThreadTaskRunner> task_runner =
  318. GetOrCreateTaskRunnerForAdd();
  319. task_runner->PostTask(
  320. FROM_HERE, BindOnce(&SamplingThread::AddCollectionTask, Unretained(this),
  321. std::move(collection)));
  322. return collection_id;
  323. }
  324. void StackSamplingProfiler::SamplingThread::AddAuxUnwinder(
  325. int collection_id,
  326. std::unique_ptr<Unwinder> unwinder) {
  327. ThreadExecutionState state;
  328. scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
  329. if (state != RUNNING)
  330. return;
  331. DCHECK(task_runner);
  332. task_runner->PostTask(
  333. FROM_HERE, BindOnce(&SamplingThread::AddAuxUnwinderTask, Unretained(this),
  334. collection_id, std::move(unwinder)));
  335. }
  336. void StackSamplingProfiler::SamplingThread::ApplyMetadataToPastSamples(
  337. base::TimeTicks period_start,
  338. base::TimeTicks period_end,
  339. uint64_t name_hash,
  340. absl::optional<int64_t> key,
  341. int64_t value,
  342. absl::optional<PlatformThreadId> thread_id) {
  343. ThreadExecutionState state;
  344. scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
  345. if (state != RUNNING)
  346. return;
  347. DCHECK(task_runner);
  348. task_runner->PostTask(
  349. FROM_HERE, BindOnce(&SamplingThread::ApplyMetadataToPastSamplesTask,
  350. Unretained(this), period_start, period_end, name_hash,
  351. key, value, thread_id));
  352. }
  353. void StackSamplingProfiler::SamplingThread::Remove(int collection_id) {
  354. // This is not to be run on the sampling thread.
  355. ThreadExecutionState state;
  356. scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
  357. if (state != RUNNING)
  358. return;
  359. DCHECK(task_runner);
  360. // This can fail if the thread were to exit between acquisition of the task
  361. // runner above and the call below. In that case, however, everything has
  362. // stopped so there's no need to try to stop it.
  363. task_runner->PostTask(FROM_HERE,
  364. BindOnce(&SamplingThread::RemoveCollectionTask,
  365. Unretained(this), collection_id));
  366. }
  367. scoped_refptr<SingleThreadTaskRunner>
  368. StackSamplingProfiler::SamplingThread::GetOrCreateTaskRunnerForAdd() {
  369. AutoLock lock(thread_execution_state_lock_);
  370. // The increment of the "add events" count is why this method is to be only
  371. // called from "add".
  372. ++thread_execution_state_add_events_;
  373. if (thread_execution_state_ == RUNNING) {
  374. DCHECK(thread_execution_state_task_runner_);
  375. // This shouldn't be called from the sampling thread as it's inefficient.
  376. // Use GetTaskRunnerOnSamplingThread() instead.
  377. DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
  378. return thread_execution_state_task_runner_;
  379. }
  380. if (thread_execution_state_ == EXITING) {
  381. // StopSoon() was previously called to shut down the thread
  382. // asynchonously. Stop() must now be called before calling Start() again to
  383. // reset the thread state.
  384. //
  385. // We must allow blocking here to satisfy the Thread implementation, but in
  386. // practice the Stop() call is unlikely to actually block. For this to
  387. // happen a new profiling request would have to be made within the narrow
  388. // window between StopSoon() and thread exit following the end of the 60
  389. // second idle period.
  390. ScopedAllowThreadRecallForStackSamplingProfiler allow_thread_join;
  391. Stop();
  392. }
  393. DCHECK(!stack_buffer_);
  394. stack_buffer_ = StackSampler::CreateStackBuffer();
  395. // The thread is not running. Start it and get associated runner. The task-
  396. // runner has to be saved for future use because though it can be used from
  397. // any thread, it can be acquired via task_runner() only on the created
  398. // thread and the thread that creates it (i.e. this thread) for thread-safety
  399. // reasons which are alleviated in SamplingThread by gating access to it with
  400. // the |thread_execution_state_lock_|.
  401. Start();
  402. thread_execution_state_ = RUNNING;
  403. thread_execution_state_task_runner_ = Thread::task_runner();
  404. // Detach the sampling thread from the "sequence" (i.e. thread) that
  405. // started it so that it can be self-managed or stopped by another thread.
  406. DetachFromSequence();
  407. return thread_execution_state_task_runner_;
  408. }
  409. scoped_refptr<SingleThreadTaskRunner>
  410. StackSamplingProfiler::SamplingThread::GetTaskRunner(
  411. ThreadExecutionState* out_state) {
  412. AutoLock lock(thread_execution_state_lock_);
  413. if (out_state)
  414. *out_state = thread_execution_state_;
  415. if (thread_execution_state_ == RUNNING) {
  416. // This shouldn't be called from the sampling thread as it's inefficient.
  417. // Use GetTaskRunnerOnSamplingThread() instead.
  418. DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
  419. DCHECK(thread_execution_state_task_runner_);
  420. } else {
  421. DCHECK(!thread_execution_state_task_runner_);
  422. }
  423. return thread_execution_state_task_runner_;
  424. }
  425. scoped_refptr<SingleThreadTaskRunner>
  426. StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() {
  427. // This should be called only from the sampling thread as it has limited
  428. // accessibility.
  429. DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
  430. return Thread::task_runner();
  431. }
  432. void StackSamplingProfiler::SamplingThread::FinishCollection(
  433. std::unique_ptr<CollectionContext> collection) {
  434. DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
  435. DCHECK_EQ(0u, active_collections_.count(collection->collection_id));
  436. TimeDelta profile_duration = TimeTicks::Now() -
  437. collection->profile_start_time +
  438. collection->params.sampling_interval;
  439. collection->profile_builder->OnProfileCompleted(
  440. profile_duration, collection->params.sampling_interval);
  441. // Signal that this collection is finished.
  442. WaitableEvent* collection_finished = collection->finished;
  443. // Ensure the collection is destroyed before signaling, so that it may
  444. // not outlive StackSamplingProfiler.
  445. collection.reset();
  446. collection_finished->Signal();
  447. ScheduleShutdownIfIdle();
  448. }
  449. void StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle() {
  450. DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
  451. if (!active_collections_.empty())
  452. return;
  453. TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
  454. "StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle");
  455. int add_events;
  456. {
  457. AutoLock lock(thread_execution_state_lock_);
  458. if (thread_execution_state_disable_idle_shutdown_for_testing_)
  459. return;
  460. add_events = thread_execution_state_add_events_;
  461. }
  462. GetTaskRunnerOnSamplingThread()->PostDelayedTask(
  463. FROM_HERE,
  464. BindOnce(&SamplingThread::ShutdownTask, Unretained(this), add_events),
  465. Seconds(60));
  466. }
  467. void StackSamplingProfiler::SamplingThread::AddAuxUnwinderTask(
  468. int collection_id,
  469. std::unique_ptr<Unwinder> unwinder) {
  470. DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
  471. auto loc = active_collections_.find(collection_id);
  472. if (loc == active_collections_.end())
  473. return;
  474. loc->second->sampler->AddAuxUnwinder(std::move(unwinder));
  475. }
  476. void StackSamplingProfiler::SamplingThread::ApplyMetadataToPastSamplesTask(
  477. base::TimeTicks period_start,
  478. base::TimeTicks period_end,
  479. uint64_t name_hash,
  480. absl::optional<int64_t> key,
  481. int64_t value,
  482. absl::optional<PlatformThreadId> thread_id) {
  483. DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
  484. MetadataRecorder::Item item(name_hash, key, thread_id, value);
  485. for (auto& id_collection_pair : active_collections_) {
  486. if (thread_id && id_collection_pair.second->thread_id != thread_id)
  487. continue;
  488. id_collection_pair.second->profile_builder->ApplyMetadataRetrospectively(
  489. period_start, period_end, item);
  490. }
  491. }
  492. void StackSamplingProfiler::SamplingThread::AddCollectionTask(
  493. std::unique_ptr<CollectionContext> collection) {
  494. DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
  495. const int collection_id = collection->collection_id;
  496. const TimeDelta initial_delay = collection->params.initial_delay;
  497. collection->sampler->Initialize();
  498. active_collections_.insert(
  499. std::make_pair(collection_id, std::move(collection)));
  500. GetTaskRunnerOnSamplingThread()->PostDelayedTask(
  501. FROM_HERE,
  502. BindOnce(&SamplingThread::RecordSampleTask, Unretained(this),
  503. collection_id),
  504. initial_delay);
  505. // Another increment of "add events" serves to invalidate any pending
  506. // shutdown tasks that may have been initiated between the Add() and this
  507. // task running.
  508. {
  509. AutoLock lock(thread_execution_state_lock_);
  510. ++thread_execution_state_add_events_;
  511. }
  512. }
  513. void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(
  514. int collection_id) {
  515. DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
  516. auto found = active_collections_.find(collection_id);
  517. if (found == active_collections_.end())
  518. return;
  519. // Remove |collection| from |active_collections_|.
  520. std::unique_ptr<CollectionContext> collection = std::move(found->second);
  521. size_t count = active_collections_.erase(collection_id);
  522. DCHECK_EQ(1U, count);
  523. FinishCollection(std::move(collection));
  524. }
  525. void StackSamplingProfiler::SamplingThread::RecordSampleTask(
  526. int collection_id) {
  527. DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
  528. auto found = active_collections_.find(collection_id);
  529. // The task won't be found if it has been stopped.
  530. if (found == active_collections_.end())
  531. return;
  532. CollectionContext* collection = found->second.get();
  533. // If this is the first sample, the collection params need to be filled.
  534. if (collection->sample_count == 0) {
  535. collection->profile_start_time = TimeTicks::Now();
  536. collection->next_sample_time = TimeTicks::Now();
  537. }
  538. // Record a single sample.
  539. collection->sampler->RecordStackFrames(stack_buffer_.get(),
  540. collection->profile_builder.get(),
  541. collection->thread_id);
  542. // Schedule the next sample recording if there is one.
  543. if (++collection->sample_count < collection->params.samples_per_profile) {
  544. collection->next_sample_time = GetNextSampleTimeImpl(
  545. collection->next_sample_time, collection->params.sampling_interval,
  546. TimeTicks::Now());
  547. bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
  548. FROM_HERE,
  549. BindOnce(&SamplingThread::RecordSampleTask, Unretained(this),
  550. collection_id),
  551. std::max(collection->next_sample_time - TimeTicks::Now(), TimeDelta()));
  552. DCHECK(success);
  553. return;
  554. }
  555. // Take ownership of |collection| and remove it from the map.
  556. std::unique_ptr<CollectionContext> owned_collection =
  557. std::move(found->second);
  558. size_t count = active_collections_.erase(collection_id);
  559. DCHECK_EQ(1U, count);
  560. // All capturing has completed so finish the collection.
  561. FinishCollection(std::move(owned_collection));
  562. }
  563. void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) {
  564. DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
  565. // Holding this lock ensures that any attempt to start another job will
  566. // get postponed until |thread_execution_state_| is updated, thus eliminating
  567. // the race in starting a new thread while the previous one is exiting.
  568. AutoLock lock(thread_execution_state_lock_);
  569. // If the current count of creation requests doesn't match the passed count
  570. // then other tasks have been created since this was posted. Abort shutdown.
  571. if (thread_execution_state_add_events_ != add_events)
  572. return;
  573. TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
  574. "StackSamplingProfiler::SamplingThread::ShutdownTask");
  575. // There can be no new AddCollectionTasks at this point because creating
  576. // those always increments "add events". There may be other requests, like
  577. // Remove, but it's okay to schedule the thread to stop once they've been
  578. // executed (i.e. "soon").
  579. DCHECK(active_collections_.empty());
  580. StopSoon();
  581. // StopSoon will have set the owning sequence (again) so it must be detached
  582. // (again) in order for Stop/Start to be called (again) should more work
  583. // come in. Holding the |thread_execution_state_lock_| ensures the necessary
  584. // happens-after with regard to this detach and future Thread API calls.
  585. DetachFromSequence();
  586. // Set the thread_state variable so the thread will be restarted when new
  587. // work comes in. Remove the |thread_execution_state_task_runner_| to avoid
  588. // confusion.
  589. thread_execution_state_ = EXITING;
  590. thread_execution_state_task_runner_ = nullptr;
  591. stack_buffer_.reset();
  592. }
  593. void StackSamplingProfiler::SamplingThread::CleanUp() {
  594. DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
  595. // There should be no collections remaining when the thread stops.
  596. DCHECK(active_collections_.empty());
  597. // Let the parent clean up.
  598. Thread::CleanUp();
  599. }
  600. // StackSamplingProfiler ------------------------------------------------------
  601. // static
  602. void StackSamplingProfiler::TestPeer::Reset() {
  603. SamplingThread::TestPeer::Reset();
  604. }
  605. // static
  606. bool StackSamplingProfiler::TestPeer::IsSamplingThreadRunning() {
  607. return SamplingThread::GetInstance()->IsRunning();
  608. }
  609. // static
  610. void StackSamplingProfiler::TestPeer::DisableIdleShutdown() {
  611. SamplingThread::TestPeer::DisableIdleShutdown();
  612. }
  613. // static
  614. void StackSamplingProfiler::TestPeer::PerformSamplingThreadIdleShutdown(
  615. bool simulate_intervening_start) {
  616. SamplingThread::TestPeer::ShutdownAssumingIdle(simulate_intervening_start);
  617. }
  618. // static
  619. TimeTicks StackSamplingProfiler::TestPeer::GetNextSampleTime(
  620. TimeTicks scheduled_current_sample_time,
  621. TimeDelta sampling_interval,
  622. TimeTicks now) {
  623. return GetNextSampleTimeImpl(scheduled_current_sample_time, sampling_interval,
  624. now);
  625. }
  626. // static
  627. // The profiler is currently supported for Windows x64, macOS, iOS 64-bit, and
  628. // Android ARM32.
  629. bool StackSamplingProfiler::IsSupportedForCurrentPlatform() {
  630. #if (BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86_64)) || BUILDFLAG(IS_MAC) || \
  631. (BUILDFLAG(IS_IOS) && defined(ARCH_CPU_64_BITS)) || \
  632. (BUILDFLAG(IS_ANDROID) && BUILDFLAG(ENABLE_ARM_CFI_TABLE))
  633. #if BUILDFLAG(IS_WIN)
  634. // Do not start the profiler when Application Verifier is in use; running them
  635. // simultaneously can cause crashes and has no known use case.
  636. if (GetModuleHandleA(base::win::kApplicationVerifierDllName))
  637. return false;
  638. // Checks if Trend Micro DLLs are loaded in process, so we can disable the
  639. // profiler to avoid hitting their performance bug. See
  640. // https://crbug.com/1018291 and https://crbug.com/1113832.
  641. if (GetModuleHandleA("tmmon64.dll") || GetModuleHandleA("tmmonmgr64.dll"))
  642. return false;
  643. #endif
  644. return true;
  645. #else
  646. return false;
  647. #endif
  648. }
  649. StackSamplingProfiler::StackSamplingProfiler(
  650. SamplingProfilerThreadToken thread_token,
  651. const SamplingParams& params,
  652. std::unique_ptr<ProfileBuilder> profile_builder,
  653. UnwindersFactory core_unwinders_factory,
  654. RepeatingClosure record_sample_callback,
  655. StackSamplerTestDelegate* test_delegate)
  656. : StackSamplingProfiler(thread_token,
  657. params,
  658. std::move(profile_builder),
  659. std::unique_ptr<StackSampler>()) {
  660. sampler_ =
  661. StackSampler::Create(thread_token, profile_builder_->GetModuleCache(),
  662. std::move(core_unwinders_factory),
  663. std::move(record_sample_callback), test_delegate);
  664. }
  665. StackSamplingProfiler::StackSamplingProfiler(
  666. SamplingProfilerThreadToken thread_token,
  667. const SamplingParams& params,
  668. std::unique_ptr<ProfileBuilder> profile_builder,
  669. std::unique_ptr<StackSampler> sampler)
  670. : thread_token_(thread_token),
  671. params_(params),
  672. profile_builder_(std::move(profile_builder)),
  673. sampler_(std::move(sampler)),
  674. // The event starts "signaled" so code knows it's safe to start thread
  675. // and "manual" so that it can be waited in multiple places.
  676. profiling_inactive_(kResetPolicy, WaitableEvent::InitialState::SIGNALED),
  677. profiler_id_(kNullProfilerId) {
  678. TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
  679. "StackSamplingProfiler::StackSamplingProfiler");
  680. DCHECK(profile_builder_);
  681. }
  682. StackSamplingProfiler::~StackSamplingProfiler() {
  683. TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
  684. "StackSamplingProfiler::~StackSamplingProfiler");
  685. // Stop returns immediately but the shutdown runs asynchronously. There is a
  686. // non-zero probability that one more sample will be taken after this call
  687. // returns.
  688. Stop();
  689. // The behavior of sampling a thread that has exited is undefined and could
  690. // cause Bad Things(tm) to occur. The safety model provided by this class is
  691. // that an instance of this object is expected to live at least as long as
  692. // the thread it is sampling. However, because the sampling is performed
  693. // asynchronously by the SamplingThread, there is no way to guarantee this
  694. // is true without waiting for it to signal that it has finished.
  695. //
  696. // The wait time should, at most, be only as long as it takes to collect one
  697. // sample (~200us) or none at all if sampling has already completed.
  698. ScopedAllowBaseSyncPrimitivesOutsideBlockingScope allow_wait;
  699. profiling_inactive_.Wait();
  700. }
  701. void StackSamplingProfiler::Start() {
  702. TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
  703. "StackSamplingProfiler::Start");
  704. // Multiple calls to Start() for a single StackSamplingProfiler object is not
  705. // allowed. If profile_builder_ is nullptr, then Start() has been called
  706. // already.
  707. DCHECK(profile_builder_);
  708. // |sampler_| will be null if sampling isn't supported on the current
  709. // platform.
  710. if (!sampler_)
  711. return;
  712. // The IsSignaled() check below requires that the WaitableEvent be manually
  713. // reset, to avoid signaling the event in IsSignaled() itself.
  714. static_assert(kResetPolicy == WaitableEvent::ResetPolicy::MANUAL,
  715. "The reset policy must be set to MANUAL");
  716. // If a previous profiling phase is still winding down, wait for it to
  717. // complete. We can't use task posting for this coordination because the
  718. // thread owning the profiler may not have a message loop.
  719. if (!profiling_inactive_.IsSignaled())
  720. profiling_inactive_.Wait();
  721. profiling_inactive_.Reset();
  722. DCHECK_EQ(kNullProfilerId, profiler_id_);
  723. profiler_id_ = SamplingThread::GetInstance()->Add(
  724. std::make_unique<SamplingThread::CollectionContext>(
  725. thread_token_.id, params_, &profiling_inactive_, std::move(sampler_),
  726. std::move(profile_builder_)));
  727. DCHECK_NE(kNullProfilerId, profiler_id_);
  728. TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
  729. "StackSamplingProfiler::Started", "profiler_id", profiler_id_);
  730. }
  731. void StackSamplingProfiler::Stop() {
  732. TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
  733. "StackSamplingProfiler::Stop", "profiler_id", profiler_id_);
  734. SamplingThread::GetInstance()->Remove(profiler_id_);
  735. profiler_id_ = kNullProfilerId;
  736. }
  737. void StackSamplingProfiler::AddAuxUnwinder(std::unique_ptr<Unwinder> unwinder) {
  738. if (profiler_id_ == kNullProfilerId) {
  739. // We haven't started sampling, and so we can add |unwinder| to the sampler
  740. // directly
  741. if (sampler_)
  742. sampler_->AddAuxUnwinder(std::move(unwinder));
  743. return;
  744. }
  745. SamplingThread::GetInstance()->AddAuxUnwinder(profiler_id_,
  746. std::move(unwinder));
  747. }
  748. // static
  749. void StackSamplingProfiler::ApplyMetadataToPastSamples(
  750. base::TimeTicks period_start,
  751. base::TimeTicks period_end,
  752. uint64_t name_hash,
  753. absl::optional<int64_t> key,
  754. int64_t value,
  755. absl::optional<PlatformThreadId> thread_id) {
  756. SamplingThread::GetInstance()->ApplyMetadataToPastSamples(
  757. period_start, period_end, name_hash, key, value, thread_id);
  758. }
  759. } // namespace base