process_memory_dump.cc 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. // Copyright 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/trace_event/process_memory_dump.h"
  5. #include <errno.h>
  6. #include <memory>
  7. #include <vector>
  8. #include "base/bits.h"
  9. #include "base/logging.h"
  10. #include "base/memory/page_size.h"
  11. #include "base/memory/ptr_util.h"
  12. #include "base/memory/shared_memory_tracker.h"
  13. #include "base/process/process_metrics.h"
  14. #include "base/strings/string_util.h"
  15. #include "base/strings/stringprintf.h"
  16. #include "base/trace_event/memory_infra_background_allowlist.h"
  17. #include "base/trace_event/trace_event_impl.h"
  18. #include "base/trace_event/traced_value.h"
  19. #include "base/unguessable_token.h"
  20. #include "build/build_config.h"
  21. #include "third_party/abseil-cpp/absl/types/optional.h"
  22. #include "third_party/perfetto/protos/perfetto/trace/memory_graph.pbzero.h"
  23. #include "third_party/perfetto/protos/perfetto/trace/trace_packet.pbzero.h"
  24. #if BUILDFLAG(IS_IOS)
  25. #include <mach/vm_page_size.h>
  26. #endif
  27. #if BUILDFLAG(IS_POSIX)
  28. #include <sys/mman.h>
  29. #endif
  30. #if BUILDFLAG(IS_WIN)
  31. #include <windows.h> // Must be in front of other Windows header files
  32. #include <Psapi.h>
  33. #endif
  34. #if BUILDFLAG(IS_FUCHSIA)
  35. #include <tuple>
  36. #include "base/notreached.h"
  37. #endif
  38. using ProcessSnapshot =
  39. ::perfetto::protos::pbzero::MemoryTrackerSnapshot_ProcessSnapshot;
  40. namespace base {
  41. namespace trace_event {
  42. namespace {
  43. const char kEdgeTypeOwnership[] = "ownership";
  44. std::string GetSharedGlobalAllocatorDumpName(
  45. const MemoryAllocatorDumpGuid& guid) {
  46. return "global/" + guid.ToString();
  47. }
  48. #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
  49. size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
  50. return (mapped_size + page_size - 1) / page_size;
  51. }
  52. #endif
  53. UnguessableToken GetTokenForCurrentProcess() {
  54. static UnguessableToken instance = UnguessableToken::Create();
  55. return instance;
  56. }
  57. } // namespace
  58. // static
  59. bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
  60. #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
  61. // static
  62. size_t ProcessMemoryDump::GetSystemPageSize() {
  63. #if BUILDFLAG(IS_IOS)
  64. // On iOS, getpagesize() returns the user page sizes, but for allocating
  65. // arrays for mincore(), kernel page sizes is needed. Use vm_kernel_page_size
  66. // as recommended by Apple, https://forums.developer.apple.com/thread/47532/.
  67. // Refer to http://crbug.com/542671 and Apple rdar://23651782
  68. return vm_kernel_page_size;
  69. #else
  70. return base::GetPageSize();
  71. #endif // BUILDFLAG(IS_IOS)
  72. }
  73. // static
  74. absl::optional<size_t> ProcessMemoryDump::CountResidentBytes(
  75. void* start_address,
  76. size_t mapped_size) {
  77. const size_t page_size = GetSystemPageSize();
  78. const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
  79. DCHECK_EQ(0u, start_pointer % page_size);
  80. size_t offset = 0;
  81. size_t total_resident_pages = 0;
  82. bool failure = false;
  83. // An array as large as number of pages in memory segment needs to be passed
  84. // to the query function. To avoid allocating a large array, the given block
  85. // of memory is split into chunks of size |kMaxChunkSize|.
  86. const size_t kMaxChunkSize = 8 * 1024 * 1024;
  87. size_t max_vec_size =
  88. GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
  89. #if BUILDFLAG(IS_WIN)
  90. std::unique_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
  91. new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
  92. #elif BUILDFLAG(IS_APPLE)
  93. std::unique_ptr<char[]> vec(new char[max_vec_size]);
  94. #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
  95. std::unique_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
  96. #endif
  97. while (offset < mapped_size) {
  98. uintptr_t chunk_start = (start_pointer + offset);
  99. const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
  100. const size_t page_count = GetSystemPageCount(chunk_size, page_size);
  101. size_t resident_page_count = 0;
  102. #if BUILDFLAG(IS_WIN)
  103. for (size_t i = 0; i < page_count; i++) {
  104. vec[i].VirtualAddress =
  105. reinterpret_cast<void*>(chunk_start + i * page_size);
  106. }
  107. DWORD vec_size = static_cast<DWORD>(
  108. page_count * sizeof(PSAPI_WORKING_SET_EX_INFORMATION));
  109. failure = !QueryWorkingSetEx(GetCurrentProcess(), vec.get(), vec_size);
  110. for (size_t i = 0; i < page_count; i++)
  111. resident_page_count += vec[i].VirtualAttributes.Valid;
  112. #elif BUILDFLAG(IS_FUCHSIA)
  113. // TODO(crbug.com/851760): Implement counting resident bytes.
  114. // For now, log and avoid unused variable warnings.
  115. NOTIMPLEMENTED_LOG_ONCE();
  116. std::ignore = chunk_start;
  117. std::ignore = page_count;
  118. #elif BUILDFLAG(IS_APPLE)
  119. // mincore in MAC does not fail with EAGAIN.
  120. failure =
  121. !!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
  122. for (size_t i = 0; i < page_count; i++)
  123. resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
  124. #elif BUILDFLAG(IS_POSIX)
  125. int error_counter = 0;
  126. int result = 0;
  127. // HANDLE_EINTR tries for 100 times. So following the same pattern.
  128. do {
  129. result =
  130. #if BUILDFLAG(IS_AIX)
  131. mincore(reinterpret_cast<char*>(chunk_start), chunk_size,
  132. reinterpret_cast<char*>(vec.get()));
  133. #else
  134. mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
  135. #endif
  136. } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
  137. failure = !!result;
  138. for (size_t i = 0; i < page_count; i++)
  139. resident_page_count += vec[i] & 1;
  140. #endif
  141. if (failure)
  142. break;
  143. total_resident_pages += resident_page_count * page_size;
  144. offset += kMaxChunkSize;
  145. }
  146. DCHECK(!failure);
  147. if (failure) {
  148. LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
  149. return absl::nullopt;
  150. }
  151. return total_resident_pages;
  152. }
  153. // static
  154. absl::optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
  155. void* start_address,
  156. size_t mapped_size) {
  157. // `MapAt()` performs some internal arithmetic to allow non-page-aligned
  158. // offsets, but the memory accounting still expects to work with page-aligned
  159. // allocations.
  160. //
  161. // TODO(dcheng): one peculiarity here is that the shmem implementation uses
  162. // `base::SysInfo::VMAllocationGranularity()` while this file uses
  163. // `GetSystemPageSize()`. It'd be nice not to have two names for the same
  164. // thing...
  165. uint8_t* aligned_start_address = base::bits::AlignDown(
  166. static_cast<uint8_t*>(start_address), GetSystemPageSize());
  167. size_t adjusted_size =
  168. mapped_size + static_cast<size_t>(static_cast<uint8_t*>(start_address) -
  169. aligned_start_address);
  170. #if BUILDFLAG(IS_MAC)
  171. // On macOS, use mach_vm_region instead of mincore for performance
  172. // (crbug.com/742042).
  173. mach_vm_size_t dummy_size = 0;
  174. mach_vm_address_t address =
  175. reinterpret_cast<mach_vm_address_t>(aligned_start_address);
  176. vm_region_top_info_data_t info;
  177. MachVMRegionResult result =
  178. GetTopInfo(mach_task_self(), &dummy_size, &address, &info);
  179. if (result == MachVMRegionResult::Error) {
  180. LOG(ERROR) << "CountResidentBytesInSharedMemory failed. The resident size "
  181. "is invalid";
  182. return absl::optional<size_t>();
  183. }
  184. size_t resident_pages =
  185. info.private_pages_resident + info.shared_pages_resident;
  186. // On macOS, measurements for private memory footprint overcount by
  187. // faulted pages in anonymous shared memory. To discount for this, we touch
  188. // all the resident pages in anonymous shared memory here, thus making them
  189. // faulted as well. This relies on two assumptions:
  190. //
  191. // 1) Consumers use shared memory from front to back. Thus, if there are
  192. // (N) resident pages, those pages represent the first N * PAGE_SIZE bytes in
  193. // the shared memory region.
  194. //
  195. // 2) This logic is run shortly before the logic that calculates
  196. // phys_footprint, thus ensuring that the discrepancy between faulted and
  197. // resident pages is minimal.
  198. //
  199. // The performance penalty is expected to be small.
  200. //
  201. // * Most of the time, we expect the pages to already be resident and faulted,
  202. // thus incurring a cache penalty read hit [since we read from each resident
  203. // page].
  204. //
  205. // * Rarely, we expect the pages to be resident but not faulted, resulting in
  206. // soft faults + cache penalty.
  207. //
  208. // * If assumption (1) is invalid, this will potentially fault some
  209. // previously non-resident pages, thus increasing memory usage, without fixing
  210. // the accounting.
  211. //
  212. // Sanity check in case the mapped size is less than the total size of the
  213. // region.
  214. size_t pages_to_fault =
  215. std::min(resident_pages, (adjusted_size + PAGE_SIZE - 1) / PAGE_SIZE);
  216. volatile uint8_t* base_address = const_cast<uint8_t*>(aligned_start_address);
  217. for (size_t i = 0; i < pages_to_fault; ++i) {
  218. // Reading from a volatile is a visible side-effect for the purposes of
  219. // optimization. This guarantees that the optimizer will not kill this line.
  220. base_address[i * PAGE_SIZE];
  221. }
  222. return resident_pages * PAGE_SIZE;
  223. #else
  224. return CountResidentBytes(aligned_start_address, adjusted_size);
  225. #endif // BUILDFLAG(IS_MAC)
  226. }
  227. #endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
  228. ProcessMemoryDump::ProcessMemoryDump(
  229. const MemoryDumpArgs& dump_args)
  230. : process_token_(GetTokenForCurrentProcess()),
  231. dump_args_(dump_args) {}
  232. ProcessMemoryDump::~ProcessMemoryDump() = default;
  233. ProcessMemoryDump::ProcessMemoryDump(ProcessMemoryDump&& other) = default;
  234. ProcessMemoryDump& ProcessMemoryDump::operator=(ProcessMemoryDump&& other) =
  235. default;
  236. MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
  237. const std::string& absolute_name) {
  238. return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
  239. absolute_name, dump_args_.level_of_detail, GetDumpId(absolute_name)));
  240. }
  241. MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
  242. const std::string& absolute_name,
  243. const MemoryAllocatorDumpGuid& guid) {
  244. return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
  245. absolute_name, dump_args_.level_of_detail, guid));
  246. }
  247. MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
  248. std::unique_ptr<MemoryAllocatorDump> mad) {
  249. // In background mode return the black hole dump, if invalid dump name is
  250. // given.
  251. if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
  252. !IsMemoryAllocatorDumpNameInAllowlist(mad->absolute_name())) {
  253. return GetBlackHoleMad(mad->absolute_name());
  254. }
  255. auto insertion_result = allocator_dumps_.insert(
  256. std::make_pair(mad->absolute_name(), std::move(mad)));
  257. MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
  258. DCHECK(insertion_result.second) << "Duplicate name: "
  259. << inserted_mad->absolute_name();
  260. return inserted_mad;
  261. }
  262. MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
  263. const std::string& absolute_name) const {
  264. auto it = allocator_dumps_.find(absolute_name);
  265. if (it != allocator_dumps_.end())
  266. return it->second.get();
  267. return nullptr;
  268. }
  269. MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
  270. const std::string& absolute_name) {
  271. MemoryAllocatorDump* mad = GetAllocatorDump(absolute_name);
  272. return mad ? mad : CreateAllocatorDump(absolute_name);
  273. }
  274. MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
  275. const MemoryAllocatorDumpGuid& guid) {
  276. // A shared allocator dump can be shared within a process and the guid could
  277. // have been created already.
  278. MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
  279. if (mad && mad != black_hole_mad_.get()) {
  280. // The weak flag is cleared because this method should create a non-weak
  281. // dump.
  282. mad->clear_flags(MemoryAllocatorDump::Flags::WEAK);
  283. return mad;
  284. }
  285. return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
  286. }
  287. MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
  288. const MemoryAllocatorDumpGuid& guid) {
  289. MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
  290. if (mad && mad != black_hole_mad_.get())
  291. return mad;
  292. mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
  293. mad->set_flags(MemoryAllocatorDump::Flags::WEAK);
  294. return mad;
  295. }
  296. MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
  297. const MemoryAllocatorDumpGuid& guid) const {
  298. return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
  299. }
  300. void ProcessMemoryDump::DumpHeapUsage(
  301. const std::unordered_map<base::trace_event::AllocationContext,
  302. base::trace_event::AllocationMetrics>&
  303. metrics_by_context,
  304. base::trace_event::TraceEventMemoryOverhead& overhead,
  305. const char* allocator_name) {
  306. std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
  307. allocator_name);
  308. overhead.DumpInto(base_name.c_str(), this);
  309. }
  310. void ProcessMemoryDump::SetAllocatorDumpsForSerialization(
  311. std::vector<std::unique_ptr<MemoryAllocatorDump>> dumps) {
  312. DCHECK(allocator_dumps_.empty());
  313. for (std::unique_ptr<MemoryAllocatorDump>& dump : dumps)
  314. AddAllocatorDumpInternal(std::move(dump));
  315. }
  316. std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>
  317. ProcessMemoryDump::GetAllEdgesForSerialization() const {
  318. std::vector<MemoryAllocatorDumpEdge> edges;
  319. edges.reserve(allocator_dumps_edges_.size());
  320. for (const auto& it : allocator_dumps_edges_)
  321. edges.push_back(it.second);
  322. return edges;
  323. }
  324. void ProcessMemoryDump::SetAllEdgesForSerialization(
  325. const std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>& edges) {
  326. DCHECK(allocator_dumps_edges_.empty());
  327. for (const MemoryAllocatorDumpEdge& edge : edges) {
  328. auto it_and_inserted = allocator_dumps_edges_.emplace(edge.source, edge);
  329. DCHECK(it_and_inserted.second);
  330. }
  331. }
  332. void ProcessMemoryDump::Clear() {
  333. allocator_dumps_.clear();
  334. allocator_dumps_edges_.clear();
  335. }
  336. void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
  337. // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
  338. // into this ProcessMemoryDump, checking for duplicates.
  339. for (auto& it : other->allocator_dumps_)
  340. AddAllocatorDumpInternal(std::move(it.second));
  341. other->allocator_dumps_.clear();
  342. // Move all the edges.
  343. allocator_dumps_edges_.insert(other->allocator_dumps_edges_.begin(),
  344. other->allocator_dumps_edges_.end());
  345. other->allocator_dumps_edges_.clear();
  346. }
  347. void ProcessMemoryDump::SerializeAllocatorDumpsInto(TracedValue* value) const {
  348. if (allocator_dumps_.size() > 0) {
  349. value->BeginDictionary("allocators");
  350. for (const auto& allocator_dump_it : allocator_dumps_)
  351. allocator_dump_it.second->AsValueInto(value);
  352. value->EndDictionary();
  353. }
  354. value->BeginArray("allocators_graph");
  355. for (const auto& it : allocator_dumps_edges_) {
  356. const MemoryAllocatorDumpEdge& edge = it.second;
  357. value->BeginDictionary();
  358. value->SetString("source", edge.source.ToString());
  359. value->SetString("target", edge.target.ToString());
  360. value->SetInteger("importance", edge.importance);
  361. value->SetString("type", kEdgeTypeOwnership);
  362. value->EndDictionary();
  363. }
  364. value->EndArray();
  365. }
  366. void ProcessMemoryDump::SerializeAllocatorDumpsInto(
  367. perfetto::protos::pbzero::MemoryTrackerSnapshot* memory_snapshot,
  368. const base::ProcessId pid) const {
  369. ProcessSnapshot* process_snapshot =
  370. memory_snapshot->add_process_memory_dumps();
  371. process_snapshot->set_pid(static_cast<int>(pid));
  372. for (const auto& allocator_dump_it : allocator_dumps_) {
  373. ProcessSnapshot::MemoryNode* memory_node =
  374. process_snapshot->add_allocator_dumps();
  375. allocator_dump_it.second->AsProtoInto(memory_node);
  376. }
  377. for (const auto& it : allocator_dumps_edges_) {
  378. const MemoryAllocatorDumpEdge& edge = it.second;
  379. ProcessSnapshot::MemoryEdge* memory_edge =
  380. process_snapshot->add_memory_edges();
  381. memory_edge->set_source_id(edge.source.ToUint64());
  382. memory_edge->set_target_id(edge.target.ToUint64());
  383. // TODO(crbug.com/1333557): Fix .proto and remove this cast.
  384. memory_edge->set_importance(static_cast<uint32_t>(edge.importance));
  385. }
  386. }
  387. void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
  388. const MemoryAllocatorDumpGuid& target,
  389. int importance) {
  390. // This will either override an existing edge or create a new one.
  391. auto it = allocator_dumps_edges_.find(source);
  392. int max_importance = importance;
  393. if (it != allocator_dumps_edges_.end()) {
  394. DCHECK_EQ(target.ToUint64(), it->second.target.ToUint64());
  395. max_importance = std::max(importance, it->second.importance);
  396. }
  397. allocator_dumps_edges_[source] = {source, target, max_importance,
  398. false /* overridable */};
  399. }
  400. void ProcessMemoryDump::AddOwnershipEdge(
  401. const MemoryAllocatorDumpGuid& source,
  402. const MemoryAllocatorDumpGuid& target) {
  403. AddOwnershipEdge(source, target, 0 /* importance */);
  404. }
  405. void ProcessMemoryDump::AddOverridableOwnershipEdge(
  406. const MemoryAllocatorDumpGuid& source,
  407. const MemoryAllocatorDumpGuid& target,
  408. int importance) {
  409. if (allocator_dumps_edges_.count(source) == 0) {
  410. allocator_dumps_edges_[source] = {source, target, importance,
  411. true /* overridable */};
  412. } else {
  413. // An edge between the source and target already exits. So, do nothing here
  414. // since the new overridable edge is implicitly overridden by a strong edge
  415. // which was created earlier.
  416. DCHECK(!allocator_dumps_edges_[source].overridable);
  417. }
  418. }
  419. void ProcessMemoryDump::CreateSharedMemoryOwnershipEdge(
  420. const MemoryAllocatorDumpGuid& client_local_dump_guid,
  421. const UnguessableToken& shared_memory_guid,
  422. int importance) {
  423. CreateSharedMemoryOwnershipEdgeInternal(client_local_dump_guid,
  424. shared_memory_guid, importance,
  425. false /*is_weak*/);
  426. }
  427. void ProcessMemoryDump::CreateWeakSharedMemoryOwnershipEdge(
  428. const MemoryAllocatorDumpGuid& client_local_dump_guid,
  429. const UnguessableToken& shared_memory_guid,
  430. int importance) {
  431. CreateSharedMemoryOwnershipEdgeInternal(
  432. client_local_dump_guid, shared_memory_guid, importance, true /*is_weak*/);
  433. }
  434. void ProcessMemoryDump::CreateSharedMemoryOwnershipEdgeInternal(
  435. const MemoryAllocatorDumpGuid& client_local_dump_guid,
  436. const UnguessableToken& shared_memory_guid,
  437. int importance,
  438. bool is_weak) {
  439. DCHECK(!shared_memory_guid.is_empty());
  440. // New model where the global dumps created by SharedMemoryTracker are used
  441. // for the clients.
  442. // The guid of the local dump created by SharedMemoryTracker for the memory
  443. // segment.
  444. auto local_shm_guid =
  445. GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shared_memory_guid));
  446. // The dump guid of the global dump created by the tracker for the memory
  447. // segment.
  448. auto global_shm_guid =
  449. SharedMemoryTracker::GetGlobalDumpIdForTracing(shared_memory_guid);
  450. // Create an edge between local dump of the client and the local dump of the
  451. // SharedMemoryTracker. Do not need to create the dumps here since the tracker
  452. // would create them. The importance is also required here for the case of
  453. // single process mode.
  454. AddOwnershipEdge(client_local_dump_guid, local_shm_guid, importance);
  455. // TODO(ssid): Handle the case of weak dumps here. This needs a new function
  456. // GetOrCreaetGlobalDump() in PMD since we need to change the behavior of the
  457. // created global dump.
  458. // Create an edge that overrides the edge created by SharedMemoryTracker.
  459. AddOwnershipEdge(local_shm_guid, global_shm_guid, importance);
  460. }
  461. void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
  462. const std::string& target_node_name) {
  463. // Do not create new dumps for suballocations in background mode.
  464. if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
  465. return;
  466. std::string child_mad_name = target_node_name + "/__" + source.ToString();
  467. MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
  468. AddOwnershipEdge(source, target_child_mad->guid());
  469. }
  470. MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad(
  471. const std::string& absolute_name) {
  472. DCHECK(is_black_hole_non_fatal_for_testing_)
  473. << " unknown dump name " << absolute_name
  474. << " this likely means kAllocatorDumpNameAllowlist needs to be updated";
  475. if (!black_hole_mad_) {
  476. std::string name = "discarded";
  477. black_hole_mad_ = std::make_unique<MemoryAllocatorDump>(
  478. name, dump_args_.level_of_detail, GetDumpId(name));
  479. }
  480. return black_hole_mad_.get();
  481. }
  482. MemoryAllocatorDumpGuid ProcessMemoryDump::GetDumpId(
  483. const std::string& absolute_name) {
  484. return MemoryAllocatorDumpGuid(StringPrintf(
  485. "%s:%s", process_token().ToString().c_str(), absolute_name.c_str()));
  486. }
  487. bool ProcessMemoryDump::MemoryAllocatorDumpEdge::operator==(
  488. const MemoryAllocatorDumpEdge& other) const {
  489. return source == other.source && target == other.target &&
  490. importance == other.importance && overridable == other.overridable;
  491. }
  492. bool ProcessMemoryDump::MemoryAllocatorDumpEdge::operator!=(
  493. const MemoryAllocatorDumpEdge& other) const {
  494. return !(*this == other);
  495. }
  496. } // namespace trace_event
  497. } // namespace base