memory_dump_manager.cc 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. // Copyright 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/trace_event/memory_dump_manager.h"
  5. #include <inttypes.h>
  6. #include <stdio.h>
  7. #include <algorithm>
  8. #include <memory>
  9. #include <tuple>
  10. #include <utility>
  11. #include "base/allocator/buildflags.h"
  12. #include "base/base_switches.h"
  13. #include "base/command_line.h"
  14. #include "base/debug/alias.h"
  15. #include "base/debug/stack_trace.h"
  16. #include "base/logging.h"
  17. #include "base/memory/ptr_util.h"
  18. #include "base/strings/string_util.h"
  19. #include "base/task/sequenced_task_runner.h"
  20. #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
  21. #include "base/threading/thread.h"
  22. #include "base/threading/thread_task_runner_handle.h"
  23. #include "base/trace_event/heap_profiler.h"
  24. #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
  25. #include "base/trace_event/malloc_dump_provider.h"
  26. #include "base/trace_event/memory_dump_provider.h"
  27. #include "base/trace_event/memory_dump_scheduler.h"
  28. #include "base/trace_event/memory_infra_background_allowlist.h"
  29. #include "base/trace_event/process_memory_dump.h"
  30. #include "base/trace_event/trace_event.h"
  31. #include "base/trace_event/traced_value.h"
  32. #include "build/build_config.h"
  33. #if BUILDFLAG(IS_ANDROID)
  34. #include "base/trace_event/java_heap_dump_provider_android.h"
  35. #if BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
  36. #include "base/trace_event/cfi_backtrace_android.h"
  37. #endif
  38. #endif // BUILDFLAG(IS_ANDROID)
  39. #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  40. #include "base/trace_event/address_space_dump_provider.h"
  41. #endif
  42. namespace base {
  43. namespace trace_event {
  44. namespace {
  45. MemoryDumpManager* g_memory_dump_manager_for_testing = nullptr;
  46. // Temporary (until scheduler is moved outside of here)
  47. // trampoline function to match the |request_dump_function| passed to Initialize
  48. // to the callback expected by MemoryDumpScheduler.
  49. // TODO(primiano): remove this.
  50. void DoGlobalDumpWithoutCallback(
  51. MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,
  52. MemoryDumpType dump_type,
  53. MemoryDumpLevelOfDetail level_of_detail) {
  54. global_dump_fn.Run(dump_type, level_of_detail);
  55. }
  56. } // namespace
  57. // static
  58. constexpr const char* MemoryDumpManager::kTraceCategory;
  59. // static
  60. const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
  61. // static
  62. const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
  63. // static
  64. const char* const MemoryDumpManager::kSystemAllocatorPoolName =
  65. #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
  66. MallocDumpProvider::kAllocatedObjects;
  67. #else
  68. nullptr;
  69. #endif
  70. // static
  71. MemoryDumpManager* MemoryDumpManager::GetInstance() {
  72. if (g_memory_dump_manager_for_testing)
  73. return g_memory_dump_manager_for_testing;
  74. return Singleton<MemoryDumpManager,
  75. LeakySingletonTraits<MemoryDumpManager>>::get();
  76. }
  77. // static
  78. std::unique_ptr<MemoryDumpManager>
  79. MemoryDumpManager::CreateInstanceForTesting() {
  80. DCHECK(!g_memory_dump_manager_for_testing);
  81. std::unique_ptr<MemoryDumpManager> instance(new MemoryDumpManager());
  82. g_memory_dump_manager_for_testing = instance.get();
  83. return instance;
  84. }
  85. MemoryDumpManager::MemoryDumpManager() = default;
  86. MemoryDumpManager::~MemoryDumpManager() {
  87. Thread* dump_thread = nullptr;
  88. {
  89. AutoLock lock(lock_);
  90. if (dump_thread_) {
  91. dump_thread = dump_thread_.get();
  92. }
  93. }
  94. if (dump_thread) {
  95. dump_thread->Stop();
  96. }
  97. AutoLock lock(lock_);
  98. dump_thread_.reset();
  99. g_memory_dump_manager_for_testing = nullptr;
  100. }
  101. void MemoryDumpManager::Initialize(
  102. RequestGlobalDumpFunction request_dump_function,
  103. bool is_coordinator) {
  104. {
  105. AutoLock lock(lock_);
  106. DCHECK(!request_dump_function.is_null());
  107. DCHECK(!can_request_global_dumps());
  108. request_dump_function_ = request_dump_function;
  109. is_coordinator_ = is_coordinator;
  110. }
  111. // Enable the core dump providers.
  112. #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
  113. RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
  114. #endif
  115. #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
  116. RegisterDumpProvider(AddressSpaceDumpProvider::GetInstance(),
  117. "PartitionAlloc.AddressSpace", nullptr);
  118. #endif
  119. #if BUILDFLAG(IS_ANDROID)
  120. RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
  121. nullptr);
  122. #endif
  123. }
  124. void MemoryDumpManager::RegisterDumpProvider(
  125. MemoryDumpProvider* mdp,
  126. const char* name,
  127. scoped_refptr<SingleThreadTaskRunner> task_runner,
  128. MemoryDumpProvider::Options options) {
  129. options.dumps_on_single_thread_task_runner = true;
  130. RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
  131. }
  132. void MemoryDumpManager::RegisterDumpProvider(
  133. MemoryDumpProvider* mdp,
  134. const char* name,
  135. scoped_refptr<SingleThreadTaskRunner> task_runner) {
  136. // Set |dumps_on_single_thread_task_runner| to true because all providers
  137. // without task runner are run on dump thread.
  138. MemoryDumpProvider::Options options;
  139. options.dumps_on_single_thread_task_runner = true;
  140. RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
  141. }
  142. void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
  143. MemoryDumpProvider* mdp,
  144. const char* name,
  145. scoped_refptr<SequencedTaskRunner> task_runner,
  146. MemoryDumpProvider::Options options) {
  147. DCHECK(task_runner);
  148. options.dumps_on_single_thread_task_runner = false;
  149. RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
  150. }
  151. void MemoryDumpManager::RegisterDumpProviderInternal(
  152. MemoryDumpProvider* mdp,
  153. const char* name,
  154. scoped_refptr<SequencedTaskRunner> task_runner,
  155. const MemoryDumpProvider::Options& options) {
  156. if (dumper_registrations_ignored_for_testing_)
  157. return;
  158. // Only a handful of MDPs are required to compute the memory metrics. These
  159. // have small enough performance overhead that it is reasonable to run them
  160. // in the background while the user is doing other things. Those MDPs are
  161. // 'allowed in background mode'.
  162. bool allowed_in_background_mode = IsMemoryDumpProviderInAllowlist(name);
  163. scoped_refptr<MemoryDumpProviderInfo> mdpinfo = new MemoryDumpProviderInfo(
  164. mdp, name, std::move(task_runner), options, allowed_in_background_mode);
  165. {
  166. AutoLock lock(lock_);
  167. bool already_registered = !dump_providers_.insert(mdpinfo).second;
  168. // This actually happens in some tests which don't have a clean tear-down
  169. // path for RenderThreadImpl::Init().
  170. if (already_registered)
  171. return;
  172. }
  173. }
  174. void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
  175. UnregisterDumpProviderInternal(mdp, false /* delete_async */);
  176. }
  177. void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
  178. std::unique_ptr<MemoryDumpProvider> mdp) {
  179. UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
  180. }
  181. void MemoryDumpManager::UnregisterDumpProviderInternal(
  182. MemoryDumpProvider* mdp,
  183. bool take_mdp_ownership_and_delete_async) {
  184. std::unique_ptr<MemoryDumpProvider> owned_mdp;
  185. if (take_mdp_ownership_and_delete_async)
  186. owned_mdp.reset(mdp);
  187. AutoLock lock(lock_);
  188. auto mdp_iter = dump_providers_.begin();
  189. for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
  190. if ((*mdp_iter)->dump_provider == mdp)
  191. break;
  192. }
  193. if (mdp_iter == dump_providers_.end())
  194. return; // Not registered / already unregistered.
  195. if (take_mdp_ownership_and_delete_async) {
  196. // The MDP will be deleted whenever the MDPInfo struct will, that is either:
  197. // - At the end of this function, if no dump is in progress.
  198. // - In ContinueAsyncProcessDump() when MDPInfo is removed from
  199. // |pending_dump_providers|.
  200. DCHECK(!(*mdp_iter)->owned_dump_provider);
  201. (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
  202. } else {
  203. // If you hit this DCHECK, your dump provider has a bug.
  204. // Unregistration of a MemoryDumpProvider is safe only if:
  205. // - The MDP has specified a sequenced task runner affinity AND the
  206. // unregistration happens on the same task runner. So that the MDP cannot
  207. // unregister and be in the middle of a OnMemoryDump() at the same time.
  208. // - The MDP has NOT specified a task runner affinity and its ownership is
  209. // transferred via UnregisterAndDeleteDumpProviderSoon().
  210. // In all the other cases, it is not possible to guarantee that the
  211. // unregistration will not race with OnMemoryDump() calls.
  212. DCHECK((*mdp_iter)->task_runner &&
  213. (*mdp_iter)->task_runner->RunsTasksInCurrentSequence())
  214. << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
  215. << "unregister itself in a racy way. Please file a crbug.";
  216. }
  217. // The MDPInfo instance can still be referenced by the
  218. // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
  219. // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
  220. // to just skip it, without actually invoking the |mdp|, which might be
  221. // destroyed by the caller soon after this method returns.
  222. (*mdp_iter)->disabled = true;
  223. dump_providers_.erase(mdp_iter);
  224. }
  225. bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
  226. MemoryDumpProvider* provider) {
  227. AutoLock lock(lock_);
  228. for (const auto& info : dump_providers_) {
  229. if (info->dump_provider == provider)
  230. return true;
  231. }
  232. return false;
  233. }
  234. scoped_refptr<SequencedTaskRunner>
  235. MemoryDumpManager::GetDumpThreadTaskRunner() {
  236. base::AutoLock lock(lock_);
  237. return GetOrCreateBgTaskRunnerLocked();
  238. }
  239. scoped_refptr<base::SequencedTaskRunner>
  240. MemoryDumpManager::GetOrCreateBgTaskRunnerLocked() {
  241. if (dump_thread_)
  242. return dump_thread_->task_runner();
  243. dump_thread_ = std::make_unique<Thread>("MemoryInfra");
  244. bool started = dump_thread_->Start();
  245. CHECK(started);
  246. return dump_thread_->task_runner();
  247. }
  248. void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
  249. ProcessMemoryDumpCallback callback) {
  250. char guid_str[20];
  251. snprintf(guid_str, std::size(guid_str), "0x%" PRIx64, args.dump_guid);
  252. TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
  253. TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
  254. TRACE_STR_COPY(guid_str));
  255. // If argument filter is enabled then only background mode dumps should be
  256. // allowed. In case the trace config passed for background tracing session
  257. // missed the allowed modes argument, it crashes here instead of creating
  258. // unexpected dumps.
  259. if (TraceLog::GetInstance()
  260. ->GetCurrentTraceConfig()
  261. .IsArgumentFilterEnabled()) {
  262. CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
  263. }
  264. std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
  265. {
  266. AutoLock lock(lock_);
  267. pmd_async_state = std::make_unique<ProcessMemoryDumpAsyncState>(
  268. args, dump_providers_, std::move(callback),
  269. GetOrCreateBgTaskRunnerLocked());
  270. }
  271. // Start the process dump. This involves task runner hops as specified by the
  272. // MemoryDumpProvider(s) in RegisterDumpProvider()).
  273. ContinueAsyncProcessDump(pmd_async_state.release());
  274. }
  275. // Invokes OnMemoryDump() on all MDPs that are next in the pending list and run
  276. // on the current sequenced task runner. If the next MDP does not run in current
  277. // sequenced task runner, then switches to that task runner and continues. All
  278. // OnMemoryDump() invocations are linearized. |lock_| is used in these functions
  279. // purely to ensure consistency w.r.t. (un)registrations of |dump_providers_|.
  280. void MemoryDumpManager::ContinueAsyncProcessDump(
  281. ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
  282. HEAP_PROFILER_SCOPED_IGNORE;
  283. // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
  284. // in the PostTask below don't end up registering their own dump providers
  285. // (for discounting trace memory overhead) while holding the |lock_|.
  286. TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
  287. // In theory |owned_pmd_async_state| should be a unique_ptr. The only reason
  288. // why it isn't is because of the corner case logic of |did_post_task|
  289. // above, which needs to take back the ownership of the |pmd_async_state| when
  290. // the PostTask() fails.
  291. // Unfortunately, PostTask() destroys the unique_ptr arguments upon failure
  292. // to prevent accidental leaks. Using a unique_ptr would prevent us to to
  293. // skip the hop and move on. Hence the manual naked -> unique ptr juggling.
  294. auto pmd_async_state = WrapUnique(owned_pmd_async_state);
  295. owned_pmd_async_state = nullptr;
  296. while (!pmd_async_state->pending_dump_providers.empty()) {
  297. // Read MemoryDumpProviderInfo thread safety considerations in
  298. // memory_dump_manager.h when accessing |mdpinfo| fields.
  299. MemoryDumpProviderInfo* mdpinfo =
  300. pmd_async_state->pending_dump_providers.back().get();
  301. // If we are in background mode, we should invoke only the allowed
  302. // providers. Ignore other providers and continue.
  303. if (pmd_async_state->req_args.level_of_detail ==
  304. MemoryDumpLevelOfDetail::BACKGROUND &&
  305. !mdpinfo->allowed_in_background_mode) {
  306. pmd_async_state->pending_dump_providers.pop_back();
  307. continue;
  308. }
  309. // If the dump provider did not specify a task runner affinity, dump on
  310. // |dump_thread_|.
  311. scoped_refptr<SequencedTaskRunner> task_runner = mdpinfo->task_runner;
  312. if (!task_runner) {
  313. DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
  314. task_runner = pmd_async_state->dump_thread_task_runner;
  315. DCHECK(task_runner);
  316. }
  317. // If |RunsTasksInCurrentSequence()| is true then no PostTask is
  318. // required since we are on the right SequencedTaskRunner.
  319. if (task_runner->RunsTasksInCurrentSequence()) {
  320. InvokeOnMemoryDump(mdpinfo, pmd_async_state->process_memory_dump.get());
  321. pmd_async_state->pending_dump_providers.pop_back();
  322. continue;
  323. }
  324. bool did_post_task = task_runner->PostTask(
  325. FROM_HERE,
  326. BindOnce(&MemoryDumpManager::ContinueAsyncProcessDump, Unretained(this),
  327. Unretained(pmd_async_state.get())));
  328. if (did_post_task) {
  329. // Ownership is transferred to the posted task.
  330. std::ignore = pmd_async_state.release();
  331. return;
  332. }
  333. // PostTask usually fails only if the process or thread is shut down. So,
  334. // the dump provider is disabled here. But, don't disable unbound dump
  335. // providers, since the |dump_thread_| is controlled by MDM.
  336. if (mdpinfo->task_runner) {
  337. // A locked access is required to R/W |disabled| (for the
  338. // UnregisterAndDeleteDumpProviderSoon() case).
  339. AutoLock lock(lock_);
  340. mdpinfo->disabled = true;
  341. }
  342. // PostTask failed. Ignore the dump provider and continue.
  343. pmd_async_state->pending_dump_providers.pop_back();
  344. }
  345. FinishAsyncProcessDump(std::move(pmd_async_state));
  346. }
  347. // This function is called on the right task runner for current MDP. It is
  348. // either the task runner specified by MDP or |dump_thread_task_runner| if the
  349. // MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
  350. // (unless disabled).
  351. void MemoryDumpManager::InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
  352. ProcessMemoryDump* pmd) {
  353. HEAP_PROFILER_SCOPED_IGNORE;
  354. DCHECK(!mdpinfo->task_runner ||
  355. mdpinfo->task_runner->RunsTasksInCurrentSequence());
  356. TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
  357. "dump_provider.name", mdpinfo->name);
  358. // Do not add any other TRACE_EVENT macro (or function that might have them)
  359. // below this point. Under some rare circunstances, they can re-initialize
  360. // and invalide the current ThreadLocalEventBuffer MDP, making the
  361. // |should_dump| check below susceptible to TOCTTOU bugs
  362. // (https://crbug.com/763365).
  363. bool is_thread_bound;
  364. {
  365. // A locked access is required to R/W |disabled| (for the
  366. // UnregisterAndDeleteDumpProviderSoon() case).
  367. AutoLock lock(lock_);
  368. // Unregister the dump provider if it failed too many times consecutively.
  369. if (!mdpinfo->disabled &&
  370. mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
  371. mdpinfo->disabled = true;
  372. DLOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
  373. << "\". Dump failed multiple times consecutively.";
  374. }
  375. if (mdpinfo->disabled)
  376. return;
  377. is_thread_bound = mdpinfo->task_runner != nullptr;
  378. } // AutoLock lock(lock_);
  379. // Invoke the dump provider.
  380. // A stack allocated string with dump provider name is useful to debug
  381. // crashes while invoking dump after a |dump_provider| is not unregistered
  382. // in safe way.
  383. char provider_name_for_debugging[16];
  384. strncpy(provider_name_for_debugging, mdpinfo->name,
  385. sizeof(provider_name_for_debugging) - 1);
  386. provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
  387. base::debug::Alias(provider_name_for_debugging);
  388. ANNOTATE_BENIGN_RACE(&mdpinfo->disabled, "best-effort race detection");
  389. CHECK(!is_thread_bound ||
  390. !*(static_cast<volatile bool*>(&mdpinfo->disabled)));
  391. bool dump_successful =
  392. mdpinfo->dump_provider->OnMemoryDump(pmd->dump_args(), pmd);
  393. mdpinfo->consecutive_failures =
  394. dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
  395. }
  396. void MemoryDumpManager::FinishAsyncProcessDump(
  397. std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
  398. HEAP_PROFILER_SCOPED_IGNORE;
  399. DCHECK(pmd_async_state->pending_dump_providers.empty());
  400. const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
  401. if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
  402. scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
  403. pmd_async_state->callback_task_runner;
  404. callback_task_runner->PostTask(
  405. FROM_HERE, BindOnce(&MemoryDumpManager::FinishAsyncProcessDump,
  406. Unretained(this), std::move(pmd_async_state)));
  407. return;
  408. }
  409. TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinishAsyncProcessDump");
  410. if (!pmd_async_state->callback.is_null()) {
  411. std::move(pmd_async_state->callback)
  412. .Run(true /* success */, dump_guid,
  413. std::move(pmd_async_state->process_memory_dump));
  414. }
  415. TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
  416. TRACE_ID_LOCAL(dump_guid));
  417. }
  418. void MemoryDumpManager::SetupForTracing(
  419. const TraceConfig::MemoryDumpConfig& memory_dump_config) {
  420. AutoLock lock(lock_);
  421. // At this point we must have the ability to request global dumps.
  422. DCHECK(can_request_global_dumps());
  423. MemoryDumpScheduler::Config periodic_config;
  424. for (const auto& trigger : memory_dump_config.triggers) {
  425. if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
  426. if (periodic_config.triggers.empty()) {
  427. periodic_config.callback =
  428. BindRepeating(&DoGlobalDumpWithoutCallback, request_dump_function_,
  429. MemoryDumpType::PERIODIC_INTERVAL);
  430. }
  431. periodic_config.triggers.push_back(
  432. {trigger.level_of_detail, trigger.min_time_between_dumps_ms});
  433. }
  434. }
  435. // Only coordinator process triggers periodic memory dumps.
  436. if (is_coordinator_ && !periodic_config.triggers.empty()) {
  437. MemoryDumpScheduler::GetInstance()->Start(periodic_config,
  438. GetOrCreateBgTaskRunnerLocked());
  439. }
  440. }
  441. void MemoryDumpManager::TeardownForTracing() {
  442. // There might be a memory dump in progress while this happens. Therefore,
  443. // ensure that the MDM state which depends on the tracing enabled / disabled
  444. // state is always accessed by the dumping methods holding the |lock_|.
  445. AutoLock lock(lock_);
  446. MemoryDumpScheduler::GetInstance()->Stop();
  447. }
  448. MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
  449. MemoryDumpRequestArgs req_args,
  450. const MemoryDumpProviderInfo::OrderedSet& dump_providers,
  451. ProcessMemoryDumpCallback callback,
  452. scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)
  453. : req_args(req_args),
  454. callback(std::move(callback)),
  455. callback_task_runner(ThreadTaskRunnerHandle::Get()),
  456. dump_thread_task_runner(std::move(dump_thread_task_runner)) {
  457. pending_dump_providers.reserve(dump_providers.size());
  458. pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
  459. MemoryDumpArgs args = {req_args.level_of_detail, req_args.determinism,
  460. req_args.dump_guid};
  461. process_memory_dump = std::make_unique<ProcessMemoryDump>(args);
  462. }
  463. MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() =
  464. default;
  465. } // namespace trace_event
  466. } // namespace base