process_memory_dump_unittest.cc 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. // Copyright 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/trace_event/process_memory_dump.h"
  5. #include <stddef.h>
  6. #include <memory>
  7. #include "base/memory/aligned_memory.h"
  8. #include "base/memory/ptr_util.h"
  9. #include "base/memory/shared_memory_tracker.h"
  10. #include "base/memory/writable_shared_memory_region.h"
  11. #include "base/process/process_metrics.h"
  12. #include "base/trace_event/memory_allocator_dump_guid.h"
  13. #include "base/trace_event/memory_infra_background_allowlist.h"
  14. #include "base/trace_event/trace_log.h"
  15. #include "base/trace_event/traced_value.h"
  16. #include "build/build_config.h"
  17. #include "testing/gtest/include/gtest/gtest.h"
  18. #include "third_party/abseil-cpp/absl/types/optional.h"
  19. #if BUILDFLAG(IS_WIN)
  20. #include <windows.h>
  21. #include "winbase.h"
  22. #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
  23. #include <sys/mman.h>
  24. #endif
  25. namespace base {
  26. namespace trace_event {
  27. namespace {
  28. const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::DETAILED};
  29. const char* const kTestDumpNameAllowlist[] = {
  30. "Allowlisted/TestName", "Allowlisted/TestName_0x?",
  31. "Allowlisted/0x?/TestName", "Allowlisted/0x?", nullptr};
  32. void* Map(size_t size) {
  33. #if BUILDFLAG(IS_WIN)
  34. return ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT,
  35. PAGE_READWRITE);
  36. #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
  37. return ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
  38. 0, 0);
  39. #endif
  40. }
  41. void Unmap(void* addr, size_t size) {
  42. #if BUILDFLAG(IS_WIN)
  43. ::VirtualFree(addr, 0, MEM_DECOMMIT);
  44. #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
  45. ::munmap(addr, size);
  46. #else
  47. #error This architecture is not (yet) supported.
  48. #endif
  49. }
  50. } // namespace
  51. TEST(ProcessMemoryDumpTest, MoveConstructor) {
  52. ProcessMemoryDump pmd1 = ProcessMemoryDump(kDetailedDumpArgs);
  53. pmd1.CreateAllocatorDump("mad1");
  54. pmd1.CreateAllocatorDump("mad2");
  55. pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
  56. MemoryAllocatorDumpGuid(4242));
  57. ProcessMemoryDump pmd2(std::move(pmd1));
  58. EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
  59. EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
  60. EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
  61. pmd2.dump_args().level_of_detail);
  62. EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
  63. // Check that calling serialization routines doesn't cause a crash.
  64. auto traced_value = std::make_unique<TracedValue>();
  65. pmd2.SerializeAllocatorDumpsInto(traced_value.get());
  66. }
  67. TEST(ProcessMemoryDumpTest, MoveAssignment) {
  68. ProcessMemoryDump pmd1 = ProcessMemoryDump(kDetailedDumpArgs);
  69. pmd1.CreateAllocatorDump("mad1");
  70. pmd1.CreateAllocatorDump("mad2");
  71. pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
  72. MemoryAllocatorDumpGuid(4242));
  73. ProcessMemoryDump pmd2({MemoryDumpLevelOfDetail::BACKGROUND});
  74. pmd2.CreateAllocatorDump("malloc");
  75. pmd2 = std::move(pmd1);
  76. EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
  77. EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
  78. EXPECT_EQ(0u, pmd2.allocator_dumps().count("mad3"));
  79. EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
  80. pmd2.dump_args().level_of_detail);
  81. EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
  82. // Check that calling serialization routines doesn't cause a crash.
  83. auto traced_value = std::make_unique<TracedValue>();
  84. pmd2.SerializeAllocatorDumpsInto(traced_value.get());
  85. }
  86. TEST(ProcessMemoryDumpTest, Clear) {
  87. std::unique_ptr<ProcessMemoryDump> pmd1(
  88. new ProcessMemoryDump(kDetailedDumpArgs));
  89. pmd1->CreateAllocatorDump("mad1");
  90. pmd1->CreateAllocatorDump("mad2");
  91. ASSERT_FALSE(pmd1->allocator_dumps().empty());
  92. pmd1->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
  93. MemoryAllocatorDumpGuid(4242));
  94. MemoryAllocatorDumpGuid shared_mad_guid1(1);
  95. MemoryAllocatorDumpGuid shared_mad_guid2(2);
  96. pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
  97. pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid2);
  98. pmd1->Clear();
  99. ASSERT_TRUE(pmd1->allocator_dumps().empty());
  100. ASSERT_TRUE(pmd1->allocator_dumps_edges().empty());
  101. ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad1"));
  102. ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
  103. ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
  104. ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
  105. // Check that calling serialization routines doesn't cause a crash.
  106. auto traced_value = std::make_unique<TracedValue>();
  107. pmd1->SerializeAllocatorDumpsInto(traced_value.get());
  108. // Check that the pmd can be reused and behaves as expected.
  109. auto* mad1 = pmd1->CreateAllocatorDump("mad1");
  110. auto* mad3 = pmd1->CreateAllocatorDump("mad3");
  111. auto* shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
  112. auto* shared_mad2 =
  113. pmd1->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
  114. ASSERT_EQ(4u, pmd1->allocator_dumps().size());
  115. ASSERT_EQ(mad1, pmd1->GetAllocatorDump("mad1"));
  116. ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
  117. ASSERT_EQ(mad3, pmd1->GetAllocatorDump("mad3"));
  118. ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
  119. ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
  120. ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
  121. ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad2->flags());
  122. traced_value = std::make_unique<TracedValue>();
  123. pmd1->SerializeAllocatorDumpsInto(traced_value.get());
  124. pmd1.reset();
  125. }
  126. TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
  127. std::unique_ptr<TracedValue> traced_value(new TracedValue);
  128. std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
  129. metrics_by_context[AllocationContext()] = {1, 1};
  130. TraceEventMemoryOverhead overhead;
  131. std::unique_ptr<ProcessMemoryDump> pmd1(
  132. new ProcessMemoryDump(kDetailedDumpArgs));
  133. auto* mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
  134. auto* mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
  135. pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
  136. pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump1");
  137. pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump2");
  138. std::unique_ptr<ProcessMemoryDump> pmd2(
  139. new ProcessMemoryDump(kDetailedDumpArgs));
  140. auto* mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
  141. auto* mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
  142. pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
  143. pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump1");
  144. pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump2");
  145. MemoryAllocatorDumpGuid shared_mad_guid1(1);
  146. MemoryAllocatorDumpGuid shared_mad_guid2(2);
  147. auto* shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
  148. auto* shared_mad2 =
  149. pmd2->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
  150. pmd1->TakeAllDumpsFrom(pmd2.get());
  151. // Make sure that pmd2 is empty but still usable after it has been emptied.
  152. ASSERT_TRUE(pmd2->allocator_dumps().empty());
  153. ASSERT_TRUE(pmd2->allocator_dumps_edges().empty());
  154. pmd2->CreateAllocatorDump("pmd2/this_mad_stays_with_pmd2");
  155. ASSERT_EQ(1u, pmd2->allocator_dumps().size());
  156. ASSERT_EQ(1u, pmd2->allocator_dumps().count("pmd2/this_mad_stays_with_pmd2"));
  157. pmd2->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
  158. MemoryAllocatorDumpGuid(4242));
  159. // Check that calling serialization routines doesn't cause a crash.
  160. pmd2->SerializeAllocatorDumpsInto(traced_value.get());
  161. // Free the |pmd2| to check that the memory ownership of the two MAD(s)
  162. // has been transferred to |pmd1|.
  163. pmd2.reset();
  164. // Now check that |pmd1| has been effectively merged.
  165. ASSERT_EQ(6u, pmd1->allocator_dumps().size());
  166. ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad1"));
  167. ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
  168. ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd2/mad1"));
  169. ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
  170. ASSERT_EQ(2u, pmd1->allocator_dumps_edges().size());
  171. ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
  172. ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
  173. ASSERT_TRUE(MemoryAllocatorDump::Flags::WEAK & shared_mad2->flags());
  174. // Check that calling serialization routines doesn't cause a crash.
  175. traced_value = std::make_unique<TracedValue>();
  176. pmd1->SerializeAllocatorDumpsInto(traced_value.get());
  177. pmd1.reset();
  178. }
  179. TEST(ProcessMemoryDumpTest, OverrideOwnershipEdge) {
  180. std::unique_ptr<ProcessMemoryDump> pmd(
  181. new ProcessMemoryDump(kDetailedDumpArgs));
  182. auto* shm_dump1 = pmd->CreateAllocatorDump("shared_mem/seg1");
  183. auto* shm_dump2 = pmd->CreateAllocatorDump("shared_mem/seg2");
  184. auto* shm_dump3 = pmd->CreateAllocatorDump("shared_mem/seg3");
  185. auto* shm_dump4 = pmd->CreateAllocatorDump("shared_mem/seg4");
  186. // Create one allocation with an auto-assigned guid and mark it as a
  187. // suballocation of "fakealloc/allocated_objects".
  188. auto* child1_dump = pmd->CreateAllocatorDump("shared_mem/child/seg1");
  189. pmd->AddOverridableOwnershipEdge(child1_dump->guid(), shm_dump1->guid(),
  190. 0 /* importance */);
  191. auto* child2_dump = pmd->CreateAllocatorDump("shared_mem/child/seg2");
  192. pmd->AddOwnershipEdge(child2_dump->guid(), shm_dump2->guid(),
  193. 3 /* importance */);
  194. MemoryAllocatorDumpGuid shared_mad_guid(1);
  195. pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
  196. pmd->AddOverridableOwnershipEdge(shm_dump3->guid(), shared_mad_guid,
  197. 0 /* importance */);
  198. auto* child4_dump = pmd->CreateAllocatorDump("shared_mem/child/seg4");
  199. pmd->AddOverridableOwnershipEdge(child4_dump->guid(), shm_dump4->guid(),
  200. 4 /* importance */);
  201. const ProcessMemoryDump::AllocatorDumpEdgesMap& edges =
  202. pmd->allocator_dumps_edges();
  203. EXPECT_EQ(4u, edges.size());
  204. EXPECT_EQ(shm_dump1->guid(), edges.find(child1_dump->guid())->second.target);
  205. EXPECT_EQ(0, edges.find(child1_dump->guid())->second.importance);
  206. EXPECT_TRUE(edges.find(child1_dump->guid())->second.overridable);
  207. EXPECT_EQ(shm_dump2->guid(), edges.find(child2_dump->guid())->second.target);
  208. EXPECT_EQ(3, edges.find(child2_dump->guid())->second.importance);
  209. EXPECT_FALSE(edges.find(child2_dump->guid())->second.overridable);
  210. EXPECT_EQ(shared_mad_guid, edges.find(shm_dump3->guid())->second.target);
  211. EXPECT_EQ(0, edges.find(shm_dump3->guid())->second.importance);
  212. EXPECT_TRUE(edges.find(shm_dump3->guid())->second.overridable);
  213. EXPECT_EQ(shm_dump4->guid(), edges.find(child4_dump->guid())->second.target);
  214. EXPECT_EQ(4, edges.find(child4_dump->guid())->second.importance);
  215. EXPECT_TRUE(edges.find(child4_dump->guid())->second.overridable);
  216. // These should override old edges:
  217. pmd->AddOwnershipEdge(child1_dump->guid(), shm_dump1->guid(),
  218. 1 /* importance */);
  219. pmd->AddOwnershipEdge(shm_dump3->guid(), shared_mad_guid, 2 /* importance */);
  220. // This should not change the old edges.
  221. pmd->AddOverridableOwnershipEdge(child2_dump->guid(), shm_dump2->guid(),
  222. 0 /* importance */);
  223. pmd->AddOwnershipEdge(child4_dump->guid(), shm_dump4->guid(),
  224. 0 /* importance */);
  225. EXPECT_EQ(4u, edges.size());
  226. EXPECT_EQ(shm_dump1->guid(), edges.find(child1_dump->guid())->second.target);
  227. EXPECT_EQ(1, edges.find(child1_dump->guid())->second.importance);
  228. EXPECT_FALSE(edges.find(child1_dump->guid())->second.overridable);
  229. EXPECT_EQ(shm_dump2->guid(), edges.find(child2_dump->guid())->second.target);
  230. EXPECT_EQ(3, edges.find(child2_dump->guid())->second.importance);
  231. EXPECT_FALSE(edges.find(child2_dump->guid())->second.overridable);
  232. EXPECT_EQ(shared_mad_guid, edges.find(shm_dump3->guid())->second.target);
  233. EXPECT_EQ(2, edges.find(shm_dump3->guid())->second.importance);
  234. EXPECT_FALSE(edges.find(shm_dump3->guid())->second.overridable);
  235. EXPECT_EQ(shm_dump4->guid(), edges.find(child4_dump->guid())->second.target);
  236. EXPECT_EQ(4, edges.find(child4_dump->guid())->second.importance);
  237. EXPECT_FALSE(edges.find(child4_dump->guid())->second.overridable);
  238. }
  239. TEST(ProcessMemoryDumpTest, Suballocations) {
  240. std::unique_ptr<ProcessMemoryDump> pmd(
  241. new ProcessMemoryDump(kDetailedDumpArgs));
  242. const std::string allocator_dump_name = "fakealloc/allocated_objects";
  243. pmd->CreateAllocatorDump(allocator_dump_name);
  244. // Create one allocation with an auto-assigned guid and mark it as a
  245. // suballocation of "fakealloc/allocated_objects".
  246. auto* pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
  247. pmd->AddSuballocation(pic1_dump->guid(), allocator_dump_name);
  248. // Same here, but this time create an allocation with an explicit guid.
  249. auto* pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
  250. MemoryAllocatorDumpGuid(0x42));
  251. pmd->AddSuballocation(pic2_dump->guid(), allocator_dump_name);
  252. // Now check that AddSuballocation() has created anonymous child dumps under
  253. // "fakealloc/allocated_objects".
  254. auto anon_node_1_it = pmd->allocator_dumps().find(
  255. allocator_dump_name + "/__" + pic1_dump->guid().ToString());
  256. ASSERT_NE(pmd->allocator_dumps().end(), anon_node_1_it);
  257. auto anon_node_2_it =
  258. pmd->allocator_dumps().find(allocator_dump_name + "/__42");
  259. ASSERT_NE(pmd->allocator_dumps().end(), anon_node_2_it);
  260. // Finally check that AddSuballocation() has created also the
  261. // edges between the pictures and the anonymous allocator child dumps.
  262. bool found_edge[2]{false, false};
  263. for (const auto& e : pmd->allocator_dumps_edges()) {
  264. found_edge[0] |= (e.first == pic1_dump->guid() &&
  265. e.second.target == anon_node_1_it->second->guid());
  266. found_edge[1] |= (e.first == pic2_dump->guid() &&
  267. e.second.target == anon_node_2_it->second->guid());
  268. }
  269. ASSERT_TRUE(found_edge[0]);
  270. ASSERT_TRUE(found_edge[1]);
  271. // Check that calling serialization routines doesn't cause a crash.
  272. std::unique_ptr<TracedValue> traced_value(new TracedValue);
  273. pmd->SerializeAllocatorDumpsInto(traced_value.get());
  274. pmd.reset();
  275. }
  276. TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
  277. std::unique_ptr<ProcessMemoryDump> pmd(
  278. new ProcessMemoryDump(kDetailedDumpArgs));
  279. MemoryAllocatorDumpGuid shared_mad_guid(1);
  280. auto* shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
  281. ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
  282. ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
  283. auto* shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
  284. ASSERT_EQ(shared_mad1, shared_mad2);
  285. ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
  286. auto* shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
  287. ASSERT_EQ(shared_mad1, shared_mad3);
  288. ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
  289. auto* shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
  290. ASSERT_EQ(shared_mad1, shared_mad4);
  291. ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
  292. auto* shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
  293. ASSERT_EQ(shared_mad1, shared_mad5);
  294. ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
  295. }
  296. TEST(ProcessMemoryDumpTest, SharedMemoryOwnershipTest) {
  297. std::unique_ptr<ProcessMemoryDump> pmd(
  298. new ProcessMemoryDump(kDetailedDumpArgs));
  299. const ProcessMemoryDump::AllocatorDumpEdgesMap& edges =
  300. pmd->allocator_dumps_edges();
  301. auto* client_dump2 = pmd->CreateAllocatorDump("discardable/segment2");
  302. auto shm_token2 = UnguessableToken::Create();
  303. MemoryAllocatorDumpGuid shm_local_guid2 =
  304. pmd->GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shm_token2));
  305. MemoryAllocatorDumpGuid shm_global_guid2 =
  306. SharedMemoryTracker::GetGlobalDumpIdForTracing(shm_token2);
  307. pmd->AddOverridableOwnershipEdge(shm_local_guid2, shm_global_guid2,
  308. 0 /* importance */);
  309. pmd->CreateSharedMemoryOwnershipEdge(client_dump2->guid(), shm_token2,
  310. 1 /* importance */);
  311. EXPECT_EQ(2u, edges.size());
  312. EXPECT_EQ(shm_global_guid2, edges.find(shm_local_guid2)->second.target);
  313. EXPECT_EQ(1, edges.find(shm_local_guid2)->second.importance);
  314. EXPECT_FALSE(edges.find(shm_local_guid2)->second.overridable);
  315. EXPECT_EQ(shm_local_guid2, edges.find(client_dump2->guid())->second.target);
  316. EXPECT_EQ(1, edges.find(client_dump2->guid())->second.importance);
  317. EXPECT_FALSE(edges.find(client_dump2->guid())->second.overridable);
  318. }
  319. TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
  320. MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::BACKGROUND};
  321. std::unique_ptr<ProcessMemoryDump> pmd(
  322. new ProcessMemoryDump(background_args));
  323. ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
  324. SetAllocatorDumpNameAllowlistForTesting(kTestDumpNameAllowlist);
  325. MemoryAllocatorDump* black_hole_mad = pmd->GetBlackHoleMad(std::string());
  326. // GetAllocatorDump works for uncreated dumps.
  327. EXPECT_EQ(nullptr, pmd->GetAllocatorDump("NotAllowlisted/TestName"));
  328. EXPECT_EQ(nullptr, pmd->GetAllocatorDump("Allowlisted/TestName"));
  329. // Invalid dump names.
  330. EXPECT_EQ(black_hole_mad,
  331. pmd->CreateAllocatorDump("NotAllowlisted/TestName"));
  332. EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("TestName"));
  333. EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("Allowlisted/Test"));
  334. EXPECT_EQ(black_hole_mad,
  335. pmd->CreateAllocatorDump("Not/Allowlisted/TestName"));
  336. EXPECT_EQ(black_hole_mad,
  337. pmd->CreateAllocatorDump("Allowlisted/TestName/Google"));
  338. EXPECT_EQ(black_hole_mad,
  339. pmd->CreateAllocatorDump("Allowlisted/TestName/0x1a2Google"));
  340. EXPECT_EQ(black_hole_mad,
  341. pmd->CreateAllocatorDump("Allowlisted/TestName/__12/Google"));
  342. // Suballocations.
  343. MemoryAllocatorDumpGuid guid(1);
  344. pmd->AddSuballocation(guid, "malloc/allocated_objects");
  345. EXPECT_EQ(0u, pmd->allocator_dumps_edges_.size());
  346. EXPECT_EQ(0u, pmd->allocator_dumps_.size());
  347. // Global dumps.
  348. EXPECT_NE(black_hole_mad, pmd->CreateSharedGlobalAllocatorDump(guid));
  349. EXPECT_NE(black_hole_mad, pmd->CreateWeakSharedGlobalAllocatorDump(guid));
  350. EXPECT_NE(black_hole_mad, pmd->GetSharedGlobalAllocatorDump(guid));
  351. // Valid dump names.
  352. EXPECT_NE(black_hole_mad, pmd->CreateAllocatorDump("Allowlisted/TestName"));
  353. EXPECT_NE(black_hole_mad,
  354. pmd->CreateAllocatorDump("Allowlisted/TestName_0xA1b2"));
  355. EXPECT_NE(black_hole_mad,
  356. pmd->CreateAllocatorDump("Allowlisted/0xaB/TestName"));
  357. // GetAllocatorDump is consistent.
  358. EXPECT_EQ(nullptr, pmd->GetAllocatorDump("NotAllowlisted/TestName"));
  359. EXPECT_NE(black_hole_mad, pmd->GetAllocatorDump("Allowlisted/TestName"));
  360. // Test allowed entries.
  361. ASSERT_TRUE(IsMemoryAllocatorDumpNameInAllowlist("Allowlisted/TestName"));
  362. // Global dumps should be allowed.
  363. ASSERT_TRUE(IsMemoryAllocatorDumpNameInAllowlist("global/13456"));
  364. // Global dumps with non-guids should not be.
  365. ASSERT_FALSE(IsMemoryAllocatorDumpNameInAllowlist("global/random"));
  366. // Random names should not.
  367. ASSERT_FALSE(IsMemoryAllocatorDumpNameInAllowlist("NotAllowlisted/TestName"));
  368. // Check hex processing.
  369. ASSERT_TRUE(IsMemoryAllocatorDumpNameInAllowlist("Allowlisted/0xA1b2"));
  370. }
  371. TEST(ProcessMemoryDumpTest, GuidsTest) {
  372. MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
  373. const auto process_token_one = UnguessableToken::Create();
  374. const auto process_token_two = UnguessableToken::Create();
  375. ProcessMemoryDump pmd1(dump_args);
  376. pmd1.set_process_token_for_testing(process_token_one);
  377. MemoryAllocatorDump* mad1 = pmd1.CreateAllocatorDump("foo");
  378. ProcessMemoryDump pmd2(dump_args);
  379. pmd2.set_process_token_for_testing(process_token_one);
  380. MemoryAllocatorDump* mad2 = pmd2.CreateAllocatorDump("foo");
  381. // If we don't pass the argument we get a random PMD:
  382. ProcessMemoryDump pmd3(dump_args);
  383. MemoryAllocatorDump* mad3 = pmd3.CreateAllocatorDump("foo");
  384. // PMD's for different processes produce different GUIDs even for the same
  385. // names:
  386. ProcessMemoryDump pmd4(dump_args);
  387. pmd4.set_process_token_for_testing(process_token_two);
  388. MemoryAllocatorDump* mad4 = pmd4.CreateAllocatorDump("foo");
  389. ASSERT_EQ(mad1->guid(), mad2->guid());
  390. ASSERT_NE(mad2->guid(), mad3->guid());
  391. ASSERT_NE(mad3->guid(), mad4->guid());
  392. ASSERT_NE(mad4->guid(), mad2->guid());
  393. ASSERT_EQ(mad1->guid(), pmd1.GetDumpId("foo"));
  394. }
  395. #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
  396. #if BUILDFLAG(IS_FUCHSIA)
  397. // TODO(crbug.com/851760): Counting resident bytes is not supported on Fuchsia.
  398. #define MAYBE_CountResidentBytes DISABLED_CountResidentBytes
  399. #else
  400. #define MAYBE_CountResidentBytes CountResidentBytes
  401. #endif
  402. TEST(ProcessMemoryDumpTest, MAYBE_CountResidentBytes) {
  403. const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
  404. // Allocate few page of dirty memory and check if it is resident.
  405. const size_t size1 = 5 * page_size;
  406. void* memory1 = Map(size1);
  407. memset(memory1, 0, size1);
  408. absl::optional<size_t> res1 =
  409. ProcessMemoryDump::CountResidentBytes(memory1, size1);
  410. ASSERT_TRUE(res1.has_value());
  411. ASSERT_EQ(res1.value(), size1);
  412. Unmap(memory1, size1);
  413. // Allocate a large memory segment (> 8Mib).
  414. const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
  415. void* memory2 = Map(kVeryLargeMemorySize);
  416. memset(memory2, 0, kVeryLargeMemorySize);
  417. absl::optional<size_t> res2 =
  418. ProcessMemoryDump::CountResidentBytes(memory2, kVeryLargeMemorySize);
  419. ASSERT_TRUE(res2.has_value());
  420. ASSERT_EQ(res2.value(), kVeryLargeMemorySize);
  421. Unmap(memory2, kVeryLargeMemorySize);
  422. }
  423. #if BUILDFLAG(IS_FUCHSIA)
  424. // TODO(crbug.com/851760): Counting resident bytes is not supported on Fuchsia.
  425. #define MAYBE_CountResidentBytesInSharedMemory \
  426. DISABLED_CountResidentBytesInSharedMemory
  427. #else
  428. #define MAYBE_CountResidentBytesInSharedMemory CountResidentBytesInSharedMemory
  429. #endif
  430. TEST(ProcessMemoryDumpTest, MAYBE_CountResidentBytesInSharedMemory) {
  431. const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
  432. // Allocate few page of dirty memory and check if it is resident.
  433. {
  434. const size_t kDirtyMemorySize = 5 * page_size;
  435. auto region = base::WritableSharedMemoryRegion::Create(kDirtyMemorySize);
  436. base::WritableSharedMemoryMapping mapping = region.Map();
  437. memset(mapping.memory(), 0, kDirtyMemorySize);
  438. absl::optional<size_t> res1 =
  439. ProcessMemoryDump::CountResidentBytesInSharedMemory(
  440. mapping.memory(), mapping.mapped_size());
  441. ASSERT_TRUE(res1.has_value());
  442. ASSERT_EQ(res1.value(), kDirtyMemorySize);
  443. }
  444. // Allocate a shared memory segment but map at a non-page-aligned offset.
  445. {
  446. const size_t kDirtyMemorySize = 5 * page_size;
  447. auto region =
  448. base::WritableSharedMemoryRegion::Create(kDirtyMemorySize + page_size);
  449. base::WritableSharedMemoryMapping mapping =
  450. region.MapAt(page_size / 2, kDirtyMemorySize);
  451. memset(mapping.memory(), 0, kDirtyMemorySize);
  452. absl::optional<size_t> res1 =
  453. ProcessMemoryDump::CountResidentBytesInSharedMemory(
  454. mapping.memory(), mapping.mapped_size());
  455. ASSERT_TRUE(res1.has_value());
  456. ASSERT_EQ(res1.value(), kDirtyMemorySize + page_size);
  457. }
  458. // Allocate a large memory segment (> 8Mib).
  459. {
  460. const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
  461. auto region =
  462. base::WritableSharedMemoryRegion::Create(kVeryLargeMemorySize);
  463. base::WritableSharedMemoryMapping mapping = region.Map();
  464. memset(mapping.memory(), 0, kVeryLargeMemorySize);
  465. absl::optional<size_t> res2 =
  466. ProcessMemoryDump::CountResidentBytesInSharedMemory(
  467. mapping.memory(), mapping.mapped_size());
  468. ASSERT_TRUE(res2.has_value());
  469. ASSERT_EQ(res2.value(), kVeryLargeMemorySize);
  470. }
  471. // Allocate a large memory segment, but touch about half of all pages.
  472. {
  473. const size_t kTouchedMemorySize = 7 * 1024 * 1024;
  474. auto region = base::WritableSharedMemoryRegion::Create(kTouchedMemorySize);
  475. base::WritableSharedMemoryMapping mapping = region.Map();
  476. memset(mapping.memory(), 0, kTouchedMemorySize);
  477. absl::optional<size_t> res3 =
  478. ProcessMemoryDump::CountResidentBytesInSharedMemory(
  479. mapping.memory(), mapping.mapped_size());
  480. ASSERT_TRUE(res3.has_value());
  481. ASSERT_EQ(res3.value(), kTouchedMemorySize);
  482. }
  483. }
  484. #endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
  485. } // namespace trace_event
  486. } // namespace base