activity_tracker.cc 68 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799
  1. // Copyright 2016 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/debug/activity_tracker.h"
  5. #include <algorithm>
  6. #include <limits>
  7. #include <utility>
  8. #include "base/atomic_sequence_num.h"
  9. #include "base/bits.h"
  10. #include "base/containers/contains.h"
  11. #include "base/debug/stack_trace.h"
  12. #include "base/files/file.h"
  13. #include "base/files/file_path.h"
  14. #include "base/files/memory_mapped_file.h"
  15. #include "base/logging.h"
  16. #include "base/memory/ptr_util.h"
  17. #include "base/metrics/field_trial.h"
  18. #include "base/metrics/histogram_macros.h"
  19. #include "base/notreached.h"
  20. #include "base/pending_task.h"
  21. #include "base/pickle.h"
  22. #include "base/process/process.h"
  23. #include "base/process/process_handle.h"
  24. #include "base/strings/string_piece.h"
  25. #include "base/strings/string_util.h"
  26. #include "base/strings/utf_string_conversions.h"
  27. #include "base/threading/platform_thread.h"
  28. #include "build/build_config.h"
  29. #if BUILDFLAG(IS_WIN)
  30. #include <windows.h>
  31. #endif
  32. namespace base {
  33. namespace debug {
  34. namespace {
  35. // The minimum depth a stack should support.
  36. const int kMinStackDepth = 2;
  37. // The amount of memory set aside for holding arbitrary user data (key/value
  38. // pairs) globally or associated with ActivityData entries.
  39. const size_t kUserDataSize = 1 << 10; // 1 KiB
  40. const size_t kProcessDataSize = 4 << 10; // 4 KiB
  41. const size_t kMaxUserDataNameLength =
  42. static_cast<size_t>(std::numeric_limits<uint8_t>::max());
  43. // A constant used to indicate that module information is changing.
  44. const uint32_t kModuleInformationChanging = 0x80000000;
  45. // The key used to record process information.
  46. const char kProcessPhaseDataKey[] = "process-phase";
  47. // An atomically incrementing number, used to check for recreations of objects
  48. // in the same memory space.
  49. AtomicSequenceNumber g_next_id;
  50. // Gets the next non-zero identifier. It is only unique within a process.
  51. uint32_t GetNextDataId() {
  52. uint32_t id;
  53. while ((id = static_cast<uint32_t>(g_next_id.GetNext())) == 0) {
  54. }
  55. return id;
  56. }
  57. // Gets the current process-id, either from the GlobalActivityTracker if it
  58. // exists (where the PID can be defined for testing) or from the system if
  59. // there isn't such.
  60. ProcessId GetProcessId() {
  61. GlobalActivityTracker* global = GlobalActivityTracker::Get();
  62. if (global)
  63. return global->process_id();
  64. return GetCurrentProcId();
  65. }
  66. // Finds and reuses a specific allocation or creates a new one.
  67. PersistentMemoryAllocator::Reference AllocateFrom(
  68. PersistentMemoryAllocator* allocator,
  69. uint32_t from_type,
  70. size_t size,
  71. uint32_t to_type) {
  72. PersistentMemoryAllocator::Iterator iter(allocator);
  73. PersistentMemoryAllocator::Reference ref;
  74. while ((ref = iter.GetNextOfType(from_type)) != 0) {
  75. DCHECK_LE(size, allocator->GetAllocSize(ref));
  76. // This can fail if a another thread has just taken it. It is assumed that
  77. // the memory is cleared during the "free" operation.
  78. if (allocator->ChangeType(ref, to_type, from_type, /*clear=*/false))
  79. return ref;
  80. }
  81. return allocator->Allocate(size, to_type);
  82. }
  83. // Converts "tick" timing into wall time.
  84. Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) {
  85. return time_start + TimeDelta::FromInternalValue(ticks - ticks_start);
  86. }
  87. } // namespace
  88. union ThreadRef {
  89. int64_t as_id;
  90. #if BUILDFLAG(IS_WIN)
  91. // On Windows, the handle itself is often a pseudo-handle with a common
  92. // value meaning "this thread" and so the thread-id is used. The former
  93. // can be converted to a thread-id with a system call.
  94. PlatformThreadId as_tid;
  95. #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
  96. // On Posix and Fuchsia, the handle is always a unique identifier so no
  97. // conversion needs to be done. However, its value is officially opaque so
  98. // there is no one correct way to convert it to a numerical identifier.
  99. PlatformThreadHandle::Handle as_handle;
  100. #endif
  101. };
  102. OwningProcess::OwningProcess() = default;
  103. OwningProcess::~OwningProcess() = default;
  104. void OwningProcess::Release_Initialize(ProcessId pid) {
  105. uint32_t old_id = data_id.load(std::memory_order_acquire);
  106. DCHECK_EQ(0U, old_id);
  107. process_id = static_cast<int64_t>(pid != 0 ? pid : GetProcessId());
  108. create_stamp = Time::Now().ToInternalValue();
  109. data_id.store(GetNextDataId(), std::memory_order_release);
  110. }
  111. void OwningProcess::SetOwningProcessIdForTesting(ProcessId pid, int64_t stamp) {
  112. DCHECK_NE(0U, data_id);
  113. process_id = static_cast<int64_t>(pid);
  114. create_stamp = stamp;
  115. }
  116. // static
  117. bool OwningProcess::GetOwningProcessId(const void* memory,
  118. ProcessId* out_id,
  119. int64_t* out_stamp) {
  120. const OwningProcess* info = reinterpret_cast<const OwningProcess*>(memory);
  121. uint32_t id = info->data_id.load(std::memory_order_acquire);
  122. if (id == 0)
  123. return false;
  124. *out_id = static_cast<ProcessId>(info->process_id);
  125. *out_stamp = info->create_stamp;
  126. return id == info->data_id.load(std::memory_order_seq_cst);
  127. }
  128. // It doesn't matter what is contained in this (though it will be all zeros)
  129. // as only the address of it is important.
  130. const ActivityData kNullActivityData = {};
  131. ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
  132. ThreadRef thread_ref;
  133. thread_ref.as_id = 0; // Zero the union in case other is smaller.
  134. #if BUILDFLAG(IS_WIN)
  135. thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
  136. #elif BUILDFLAG(IS_POSIX)
  137. thread_ref.as_handle = handle.platform_handle();
  138. #endif
  139. return ForThread(thread_ref.as_id);
  140. }
  141. ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
  142. PersistentMemoryAllocator* allocator,
  143. uint32_t object_type,
  144. uint32_t object_free_type,
  145. size_t object_size,
  146. size_t cache_size,
  147. bool make_iterable)
  148. : allocator_(allocator),
  149. object_type_(object_type),
  150. object_free_type_(object_free_type),
  151. object_size_(object_size),
  152. cache_size_(cache_size),
  153. make_iterable_(make_iterable),
  154. iterator_(allocator),
  155. cache_values_(new Reference[cache_size]),
  156. cache_used_(0) {
  157. DCHECK(allocator);
  158. }
  159. ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() = default;
  160. ActivityTrackerMemoryAllocator::Reference
  161. ActivityTrackerMemoryAllocator::GetObjectReference() {
  162. // First see if there is a cached value that can be returned. This is much
  163. // faster than searching the memory system for free blocks.
  164. while (cache_used_ > 0) {
  165. Reference cached = cache_values_[--cache_used_];
  166. // Change the type of the cached object to the proper type and return it.
  167. // If the type-change fails that means another thread has taken this from
  168. // under us (via the search below) so ignore it and keep trying. Don't
  169. // clear the memory because that was done when the type was made "free".
  170. if (allocator_->ChangeType(cached, object_type_, object_free_type_, false))
  171. return cached;
  172. }
  173. // Fetch the next "free" object from persistent memory. Rather than restart
  174. // the iterator at the head each time and likely waste time going again
  175. // through objects that aren't relevant, the iterator continues from where
  176. // it last left off and is only reset when the end is reached. If the
  177. // returned reference matches |last|, then it has wrapped without finding
  178. // anything.
  179. const Reference last = iterator_.GetLast();
  180. while (true) {
  181. uint32_t type;
  182. Reference found = iterator_.GetNext(&type);
  183. if (found && type == object_free_type_) {
  184. // Found a free object. Change it to the proper type and return it. If
  185. // the type-change fails that means another thread has taken this from
  186. // under us so ignore it and keep trying.
  187. if (allocator_->ChangeType(found, object_type_, object_free_type_, false))
  188. return found;
  189. }
  190. if (found == last) {
  191. // Wrapped. No desired object was found.
  192. break;
  193. }
  194. if (!found) {
  195. // Reached end; start over at the beginning.
  196. iterator_.Reset();
  197. }
  198. }
  199. // No free block was found so instead allocate a new one.
  200. Reference allocated = allocator_->Allocate(object_size_, object_type_);
  201. if (allocated && make_iterable_)
  202. allocator_->MakeIterable(allocated);
  203. return allocated;
  204. }
  205. void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
  206. // Mark object as free.
  207. bool success = allocator_->ChangeType(ref, object_free_type_, object_type_,
  208. /*clear=*/true);
  209. DCHECK(success);
  210. // Add this reference to our "free" cache if there is space. If not, the type
  211. // has still been changed to indicate that it is free so this (or another)
  212. // thread can find it, albeit more slowly, using the iteration method above.
  213. if (cache_used_ < cache_size_)
  214. cache_values_[cache_used_++] = ref;
  215. }
  216. // static
  217. void Activity::FillFrom(Activity* activity,
  218. const void* program_counter,
  219. const void* origin,
  220. Type type,
  221. const ActivityData& data) {
  222. activity->time_internal = base::TimeTicks::Now().ToInternalValue();
  223. activity->calling_address = reinterpret_cast<uintptr_t>(program_counter);
  224. activity->origin_address = reinterpret_cast<uintptr_t>(origin);
  225. activity->activity_type = type;
  226. activity->data = data;
  227. #if (!BUILDFLAG(IS_NACL) && DCHECK_IS_ON()) || defined(ADDRESS_SANITIZER)
  228. // Create a stacktrace from the current location and get the addresses for
  229. // improved debuggability.
  230. StackTrace stack_trace;
  231. size_t stack_depth;
  232. const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
  233. // Copy the stack addresses, ignoring the first one (here).
  234. size_t i;
  235. for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) {
  236. activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]);
  237. }
  238. activity->call_stack[i - 1] = 0;
  239. #else
  240. activity->call_stack[0] = 0;
  241. #endif
  242. }
  243. ActivityUserData::TypedValue::TypedValue() = default;
  244. ActivityUserData::TypedValue::TypedValue(const TypedValue& other) = default;
  245. ActivityUserData::TypedValue::~TypedValue() = default;
  246. StringPiece ActivityUserData::TypedValue::Get() const {
  247. DCHECK_EQ(RAW_VALUE, type_);
  248. return long_value_;
  249. }
  250. StringPiece ActivityUserData::TypedValue::GetString() const {
  251. DCHECK_EQ(STRING_VALUE, type_);
  252. return long_value_;
  253. }
  254. bool ActivityUserData::TypedValue::GetBool() const {
  255. DCHECK_EQ(BOOL_VALUE, type_);
  256. return short_value_ != 0;
  257. }
  258. char ActivityUserData::TypedValue::GetChar() const {
  259. DCHECK_EQ(CHAR_VALUE, type_);
  260. return static_cast<char>(short_value_);
  261. }
  262. int64_t ActivityUserData::TypedValue::GetInt() const {
  263. DCHECK_EQ(SIGNED_VALUE, type_);
  264. return static_cast<int64_t>(short_value_);
  265. }
  266. uint64_t ActivityUserData::TypedValue::GetUint() const {
  267. DCHECK_EQ(UNSIGNED_VALUE, type_);
  268. return static_cast<uint64_t>(short_value_);
  269. }
  270. StringPiece ActivityUserData::TypedValue::GetReference() const {
  271. DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
  272. return ref_value_;
  273. }
  274. StringPiece ActivityUserData::TypedValue::GetStringReference() const {
  275. DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
  276. return ref_value_;
  277. }
  278. // These are required because std::atomic is (currently) not a POD type and
  279. // thus clang requires explicit out-of-line constructors and destructors even
  280. // when they do nothing.
  281. ActivityUserData::ValueInfo::ValueInfo() = default;
  282. ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
  283. ActivityUserData::ValueInfo::~ValueInfo() = default;
  284. ActivityUserData::MemoryHeader::MemoryHeader() = default;
  285. ActivityUserData::MemoryHeader::~MemoryHeader() = default;
  286. ActivityUserData::FieldHeader::FieldHeader() = default;
  287. ActivityUserData::FieldHeader::~FieldHeader() = default;
  288. ActivityUserData::ActivityUserData()
  289. : ActivityUserData(nullptr, 0, static_cast<ProcessId>(-1)) {}
  290. ActivityUserData::ActivityUserData(void* memory, size_t size, ProcessId pid)
  291. : memory_(reinterpret_cast<char*>(memory)),
  292. available_(bits::AlignDown(size, kMemoryAlignment)),
  293. header_(reinterpret_cast<MemoryHeader*>(memory)),
  294. orig_data_id(0),
  295. orig_process_id(0),
  296. orig_create_stamp(0) {
  297. // It's possible that no user data is being stored.
  298. if (!memory_)
  299. return;
  300. static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
  301. DCHECK_LT(sizeof(MemoryHeader), available_);
  302. if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
  303. header_->owner.Release_Initialize(pid);
  304. memory_ += sizeof(MemoryHeader);
  305. available_ -= sizeof(MemoryHeader);
  306. // Make a copy of identifying information for later comparison.
  307. *const_cast<uint32_t*>(&orig_data_id) =
  308. header_->owner.data_id.load(std::memory_order_acquire);
  309. *const_cast<ProcessId*>(&orig_process_id) =
  310. static_cast<ProcessId>(header_->owner.process_id);
  311. *const_cast<int64_t*>(&orig_create_stamp) = header_->owner.create_stamp;
  312. // If there is already data present, load that. This allows the same class
  313. // to be used for analysis through snapshots.
  314. ImportExistingData();
  315. }
  316. ActivityUserData::~ActivityUserData() = default;
  317. bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
  318. DCHECK(output_snapshot);
  319. DCHECK(output_snapshot->empty());
  320. // Find any new data that may have been added by an active instance of this
  321. // class that is adding records.
  322. ImportExistingData();
  323. // Add all the values to the snapshot.
  324. for (const auto& entry : values_) {
  325. TypedValue value;
  326. const size_t size = entry.second.size_ptr->load(std::memory_order_acquire);
  327. value.type_ = entry.second.type;
  328. DCHECK_GE(entry.second.extent, size);
  329. switch (entry.second.type) {
  330. case RAW_VALUE:
  331. case STRING_VALUE:
  332. value.long_value_ = std::string(
  333. reinterpret_cast<char*>(entry.second.memory.get()), size);
  334. break;
  335. case RAW_VALUE_REFERENCE:
  336. case STRING_VALUE_REFERENCE: {
  337. ReferenceRecord* ref =
  338. reinterpret_cast<ReferenceRecord*>(entry.second.memory.get());
  339. value.ref_value_ = StringPiece(
  340. reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
  341. static_cast<size_t>(ref->size));
  342. } break;
  343. case BOOL_VALUE:
  344. case CHAR_VALUE:
  345. value.short_value_ = static_cast<uint64_t>(
  346. reinterpret_cast<std::atomic<char>*>(entry.second.memory.get())
  347. ->load(std::memory_order_relaxed));
  348. break;
  349. case SIGNED_VALUE:
  350. case UNSIGNED_VALUE:
  351. value.short_value_ =
  352. reinterpret_cast<std::atomic<uint64_t>*>(entry.second.memory.get())
  353. ->load(std::memory_order_relaxed);
  354. break;
  355. case END_OF_VALUES: // Included for completeness purposes.
  356. NOTREACHED();
  357. }
  358. auto inserted = output_snapshot->emplace(std::string(entry.second.name),
  359. std::move(value));
  360. DCHECK(inserted.second); // True if inserted, false if existed.
  361. }
  362. // Another import attempt will validate that the underlying memory has not
  363. // been reused for another purpose. Entries added since the first import
  364. // will be ignored here but will be returned if another snapshot is created.
  365. ImportExistingData();
  366. if (!memory_) {
  367. output_snapshot->clear();
  368. return false;
  369. }
  370. // Successful snapshot.
  371. return true;
  372. }
  373. const void* ActivityUserData::GetBaseAddress() const {
  374. // The |memory_| pointer advances as elements are written but the |header_|
  375. // value is always at the start of the block so just return that.
  376. return header_;
  377. }
  378. void ActivityUserData::SetOwningProcessIdForTesting(ProcessId pid,
  379. int64_t stamp) {
  380. if (!header_)
  381. return;
  382. header_->owner.SetOwningProcessIdForTesting(pid, stamp);
  383. }
  384. // static
  385. bool ActivityUserData::GetOwningProcessId(const void* memory,
  386. ProcessId* out_id,
  387. int64_t* out_stamp) {
  388. const MemoryHeader* header = reinterpret_cast<const MemoryHeader*>(memory);
  389. return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
  390. }
  391. void* ActivityUserData::Set(StringPiece name,
  392. ValueType type,
  393. const void* memory,
  394. size_t size) {
  395. DCHECK_LT(name.length(), kMaxUserDataNameLength);
  396. // It's possible that no user data is being stored.
  397. if (!memory_)
  398. return nullptr;
  399. ValueInfo* info;
  400. auto existing = values_.find(name);
  401. if (existing != values_.end()) {
  402. info = &existing->second;
  403. } else {
  404. // The name size is limited to what can be held in a single byte but
  405. // because there are not alignment constraints on strings, it's set tight
  406. // against the header. Its extent (the reserved space, even if it's not
  407. // all used) is calculated so that, when pressed against the header, the
  408. // following field will be aligned properly.
  409. size_t name_size = name.length();
  410. size_t name_extent =
  411. bits::AlignUp(sizeof(FieldHeader) + name_size, kMemoryAlignment) -
  412. sizeof(FieldHeader);
  413. size_t value_extent = bits::AlignUp(size, kMemoryAlignment);
  414. // The "base size" is the size of the header and (padded) string key. Stop
  415. // now if there's not room enough for even this.
  416. size_t base_size = sizeof(FieldHeader) + name_extent;
  417. if (base_size > available_)
  418. return nullptr;
  419. // The "full size" is the size for storing the entire value. This must fit
  420. // into a uint16_t.
  421. size_t full_size =
  422. std::min({base_size + value_extent, available_,
  423. bits::AlignDown(size_t{std::numeric_limits<uint16_t>::max()},
  424. kMemoryAlignment)});
  425. // If the value is actually a single byte, see if it can be stuffed at the
  426. // end of the name extent rather than wasting kMemoryAlignment bytes.
  427. if (size == 1 && name_extent > name_size) {
  428. // This assignment is safe because `base_size` cannot be much larger than
  429. // UINT8_MAX.
  430. full_size = base_size;
  431. --name_extent;
  432. --base_size;
  433. }
  434. // Truncate the stored size to the amount of available memory. Stop now if
  435. // there's not any room for even part of the value.
  436. if (size != 0) {
  437. size = std::min(full_size - base_size, size);
  438. if (size == 0)
  439. return nullptr;
  440. }
  441. // Allocate a chunk of memory.
  442. FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_.get());
  443. memory_ += full_size;
  444. available_ -= full_size;
  445. // Datafill the header and name records. Memory must be zeroed. The |type|
  446. // is written last, atomically, to release all the other values.
  447. DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
  448. DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
  449. header->name_size = static_cast<uint8_t>(name_size);
  450. header->record_size = static_cast<uint16_t>(full_size);
  451. char* name_memory = reinterpret_cast<char*>(header) + sizeof(FieldHeader);
  452. void* value_memory =
  453. reinterpret_cast<char*>(header) + sizeof(FieldHeader) + name_extent;
  454. memcpy(name_memory, name.data(), name_size);
  455. header->type.store(type, std::memory_order_release);
  456. // Create an entry in |values_| so that this field can be found and changed
  457. // later on without having to allocate new entries.
  458. StringPiece persistent_name(name_memory, name_size);
  459. auto inserted =
  460. values_.insert(std::make_pair(persistent_name, ValueInfo()));
  461. DCHECK(inserted.second); // True if inserted, false if existed.
  462. info = &inserted.first->second;
  463. info->name = persistent_name;
  464. info->memory = value_memory;
  465. info->size_ptr = &header->value_size;
  466. info->extent = full_size - sizeof(FieldHeader) - name_extent;
  467. info->type = type;
  468. }
  469. // Copy the value data to storage. The |size| is written last, atomically, to
  470. // release the copied data. Until then, a parallel reader will just ignore
  471. // records with a zero size.
  472. DCHECK_EQ(type, info->type);
  473. size = std::min(size, info->extent);
  474. info->size_ptr->store(0, std::memory_order_seq_cst);
  475. memcpy(info->memory, memory, size);
  476. // This cast is safe because `size` <= info->extent < `full_size`, and
  477. // `full_size` fits in a uint16_t.
  478. info->size_ptr->store(static_cast<uint16_t>(size), std::memory_order_release);
  479. // The address of the stored value is returned so it can be re-used by the
  480. // caller, so long as it's done in an atomic way.
  481. return info->memory;
  482. }
  483. void ActivityUserData::SetReference(StringPiece name,
  484. ValueType type,
  485. const void* memory,
  486. size_t size) {
  487. ReferenceRecord rec;
  488. rec.address = reinterpret_cast<uintptr_t>(memory);
  489. rec.size = size;
  490. Set(name, type, &rec, sizeof(rec));
  491. }
  492. void ActivityUserData::ImportExistingData() const {
  493. // It's possible that no user data is being stored.
  494. if (!memory_)
  495. return;
  496. while (available_ > sizeof(FieldHeader)) {
  497. FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_.get());
  498. ValueType type =
  499. static_cast<ValueType>(header->type.load(std::memory_order_acquire));
  500. if (type == END_OF_VALUES)
  501. return;
  502. if (header->record_size > available_)
  503. return;
  504. size_t value_offset = bits::AlignUp(sizeof(FieldHeader) + header->name_size,
  505. kMemoryAlignment);
  506. if (header->record_size == value_offset &&
  507. header->value_size.load(std::memory_order_relaxed) == 1) {
  508. value_offset -= 1;
  509. }
  510. if (value_offset + header->value_size > header->record_size)
  511. return;
  512. ValueInfo info;
  513. info.name = StringPiece(memory_ + sizeof(FieldHeader), header->name_size);
  514. info.type = type;
  515. info.memory = memory_ + value_offset;
  516. info.size_ptr = &header->value_size;
  517. info.extent = header->record_size - value_offset;
  518. StringPiece key(info.name);
  519. values_.insert(std::make_pair(key, std::move(info)));
  520. memory_ += header->record_size;
  521. available_ -= header->record_size;
  522. }
  523. // Check if memory has been completely reused.
  524. if (header_->owner.data_id.load(std::memory_order_acquire) != orig_data_id ||
  525. static_cast<ProcessId>(header_->owner.process_id) != orig_process_id ||
  526. header_->owner.create_stamp != orig_create_stamp) {
  527. memory_ = nullptr;
  528. values_.clear();
  529. }
  530. }
  531. // This information is kept for every thread that is tracked. It is filled
  532. // the very first time the thread is seen. All fields must be of exact sizes
  533. // so there is no issue moving between 32 and 64-bit builds.
  534. struct ThreadActivityTracker::Header {
  535. // Defined in .h for analyzer access. Increment this if structure changes!
  536. static constexpr uint32_t kPersistentTypeId =
  537. GlobalActivityTracker::kTypeIdActivityTracker;
  538. // Expected size for 32/64-bit check.
  539. static constexpr size_t kExpectedInstanceSize =
  540. OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize +
  541. 72;
  542. // This information uniquely identifies a process.
  543. OwningProcess owner;
  544. // The thread-id (thread_ref.as_id) to which this data belongs. This number
  545. // is not guaranteed to mean anything but combined with the process-id from
  546. // OwningProcess is unique among all active trackers.
  547. ThreadRef thread_ref;
  548. // The start-time and start-ticks when the data was created. Each activity
  549. // record has a |time_internal| value that can be converted to a "wall time"
  550. // with these two values.
  551. int64_t start_time;
  552. int64_t start_ticks;
  553. // The number of Activity slots (spaces that can hold an Activity) that
  554. // immediately follow this structure in memory.
  555. uint32_t stack_slots;
  556. // Some padding to keep everything 64-bit aligned.
  557. uint32_t padding;
  558. // The current depth of the stack. This may be greater than the number of
  559. // slots. If the depth exceeds the number of slots, the newest entries
  560. // won't be recorded.
  561. std::atomic<uint32_t> current_depth;
  562. // A memory location used to indicate if changes have been made to the data
  563. // that would invalidate an in-progress read of its contents. The active
  564. // tracker will increment the value whenever something gets popped from the
  565. // stack. A monitoring tracker can check the value before and after access
  566. // to know, if it's still the same, that the contents didn't change while
  567. // being copied.
  568. std::atomic<uint32_t> data_version;
  569. // The last "exception" activity. This can't be stored on the stack because
  570. // that could get popped as things unwind.
  571. Activity last_exception;
  572. // The name of the thread (up to a maximum length). Dynamic-length names
  573. // are not practical since the memory has to come from the same persistent
  574. // allocator that holds this structure and to which this object has no
  575. // reference.
  576. char thread_name[32];
  577. };
  578. ThreadActivityTracker::Snapshot::Snapshot() = default;
  579. ThreadActivityTracker::Snapshot::~Snapshot() = default;
  580. ThreadActivityTracker::ScopedActivity::ScopedActivity(
  581. ThreadActivityTracker* tracker,
  582. const void* program_counter,
  583. const void* origin,
  584. Activity::Type type,
  585. const ActivityData& data)
  586. : tracker_(tracker) {
  587. if (tracker_)
  588. activity_id_ = tracker_->PushActivity(program_counter, origin, type, data);
  589. }
  590. ThreadActivityTracker::ScopedActivity::~ScopedActivity() {
  591. if (tracker_)
  592. tracker_->PopActivity(activity_id_);
  593. }
  594. bool ThreadActivityTracker::ScopedActivity::IsRecorded() {
  595. return tracker_ && tracker_->IsRecorded(activity_id_);
  596. }
  597. void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
  598. Activity::Type type,
  599. const ActivityData& data) {
  600. if (tracker_)
  601. tracker_->ChangeActivity(activity_id_, type, data);
  602. }
  603. ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
  604. : header_(static_cast<Header*>(base)),
  605. stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
  606. sizeof(Header))),
  607. #if DCHECK_IS_ON()
  608. thread_id_(PlatformThreadRef()),
  609. #endif
  610. stack_slots_(
  611. static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
  612. // Verify the parameters but fail gracefully if they're not valid so that
  613. // production code based on external inputs will not crash. IsValid() will
  614. // return false in this case.
  615. if (!base ||
  616. // Ensure there is enough space for the header and at least a few records.
  617. size < sizeof(Header) + kMinStackDepth * sizeof(Activity) ||
  618. // Ensure that the |stack_slots_| calculation didn't overflow.
  619. (size - sizeof(Header)) / sizeof(Activity) >
  620. std::numeric_limits<uint32_t>::max()) {
  621. NOTREACHED();
  622. return;
  623. }
  624. // Ensure that the thread reference doesn't exceed the size of the ID number.
  625. // This won't compile at the global scope because Header is a private struct.
  626. static_assert(
  627. sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
  628. "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
  629. // Ensure that the alignment of Activity.data is properly aligned to a
  630. // 64-bit boundary so there are no interoperability-issues across cpu
  631. // architectures.
  632. static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
  633. "ActivityData.data is not 64-bit aligned");
  634. // Provided memory should either be completely initialized or all zeros.
  635. if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
  636. // This is a new file. Double-check other fields and then initialize.
  637. DCHECK_EQ(0, header_->owner.process_id);
  638. DCHECK_EQ(0, header_->owner.create_stamp);
  639. DCHECK_EQ(0, header_->thread_ref.as_id);
  640. DCHECK_EQ(0, header_->start_time);
  641. DCHECK_EQ(0, header_->start_ticks);
  642. DCHECK_EQ(0U, header_->stack_slots);
  643. DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
  644. DCHECK_EQ(0U, header_->data_version.load(std::memory_order_relaxed));
  645. DCHECK_EQ(0, stack_[0].time_internal);
  646. DCHECK_EQ(0U, stack_[0].origin_address);
  647. DCHECK_EQ(0U, stack_[0].call_stack[0]);
  648. DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
  649. #if BUILDFLAG(IS_WIN)
  650. header_->thread_ref.as_tid = PlatformThread::CurrentId();
  651. #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
  652. header_->thread_ref.as_handle =
  653. PlatformThread::CurrentHandle().platform_handle();
  654. #endif
  655. header_->start_time = base::Time::Now().ToInternalValue();
  656. header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
  657. header_->stack_slots = stack_slots_;
  658. strlcpy(header_->thread_name, PlatformThread::GetName(),
  659. sizeof(header_->thread_name));
  660. // This is done last so as to guarantee that everything above is "released"
  661. // by the time this value gets written.
  662. header_->owner.Release_Initialize();
  663. valid_ = true;
  664. DCHECK(IsValid());
  665. } else {
  666. // This is a file with existing data. Perform basic consistency checks.
  667. valid_ = true;
  668. valid_ = IsValid();
  669. }
  670. }
  671. ThreadActivityTracker::~ThreadActivityTracker() = default;
  672. ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
  673. const void* program_counter,
  674. const void* origin,
  675. Activity::Type type,
  676. const ActivityData& data) {
  677. // A thread-checker creates a lock to check the thread-id which means
  678. // re-entry into this code if lock acquisitions are being tracked.
  679. DCHECK(type == Activity::ACT_LOCK_ACQUIRE || CalledOnValidThread());
  680. // Get the current depth of the stack. No access to other memory guarded
  681. // by this variable is done here so a "relaxed" load is acceptable.
  682. uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
  683. // Handle the case where the stack depth has exceeded the storage capacity.
  684. // Extra entries will be lost leaving only the base of the stack.
  685. if (depth >= stack_slots_) {
  686. // Since no other threads modify the data, no compare/exchange is needed.
  687. // Since no other memory is being modified, a "relaxed" store is acceptable.
  688. header_->current_depth.store(depth + 1, std::memory_order_relaxed);
  689. return depth;
  690. }
  691. // Get a pointer to the next activity and load it. No atomicity is required
  692. // here because the memory is known only to this thread. It will be made
  693. // known to other threads once the depth is incremented.
  694. Activity::FillFrom(&stack_[depth], program_counter, origin, type, data);
  695. // Save the incremented depth. Because this guards |activity| memory filled
  696. // above that may be read by another thread once the recorded depth changes,
  697. // a "release" store is required.
  698. header_->current_depth.store(depth + 1, std::memory_order_release);
  699. // The current depth is used as the activity ID because it simply identifies
  700. // an entry. Once an entry is pop'd, it's okay to reuse the ID.
  701. return depth;
  702. }
  703. void ThreadActivityTracker::ChangeActivity(ActivityId id,
  704. Activity::Type type,
  705. const ActivityData& data) {
  706. DCHECK(CalledOnValidThread());
  707. DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
  708. DCHECK_LT(id, header_->current_depth.load(std::memory_order_acquire));
  709. // Update the information if it is being recorded (i.e. within slot limit).
  710. if (id < stack_slots_) {
  711. Activity* activity = &stack_[id];
  712. if (type != Activity::ACT_NULL) {
  713. DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
  714. type & Activity::ACT_CATEGORY_MASK);
  715. activity->activity_type = type;
  716. }
  717. if (&data != &kNullActivityData)
  718. activity->data = data;
  719. }
  720. }
  721. void ThreadActivityTracker::PopActivity(ActivityId id) {
  722. // Do an atomic decrement of the depth. No changes to stack entries guarded
  723. // by this variable are done here so a "relaxed" operation is acceptable.
  724. // |depth| will receive the value BEFORE it was modified which means the
  725. // return value must also be decremented. The slot will be "free" after
  726. // this call but since only a single thread can access this object, the
  727. // data will remain valid until this method returns or calls outside.
  728. uint32_t depth =
  729. header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1;
  730. // Validate that everything is running correctly.
  731. DCHECK_EQ(id, depth);
  732. // A thread-checker creates a lock to check the thread-id which means
  733. // re-entry into this code if lock acquisitions are being tracked.
  734. DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
  735. CalledOnValidThread());
  736. // The stack has shrunk meaning that some other thread trying to copy the
  737. // contents for reporting purposes could get bad data. Increment the data
  738. // version so that it con tell that things have changed. This needs to
  739. // happen after the atomic |depth| operation above so a "release" store
  740. // is required.
  741. header_->data_version.fetch_add(1, std::memory_order_release);
  742. }
  743. bool ThreadActivityTracker::IsRecorded(ActivityId id) {
  744. return id < stack_slots_;
  745. }
  746. std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
  747. ActivityId id,
  748. ActivityTrackerMemoryAllocator* allocator) {
  749. // Don't allow user data for lock acquisition as recursion may occur.
  750. if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
  751. NOTREACHED();
  752. return std::make_unique<ActivityUserData>();
  753. }
  754. // User-data is only stored for activities actually held in the stack.
  755. if (id >= stack_slots_)
  756. return std::make_unique<ActivityUserData>();
  757. // Create and return a real UserData object.
  758. return CreateUserDataForActivity(&stack_[id], allocator);
  759. }
  760. bool ThreadActivityTracker::HasUserData(ActivityId id) {
  761. // User-data is only stored for activities actually held in the stack.
  762. return (id < stack_slots_ && stack_[id].user_data_ref);
  763. }
  764. void ThreadActivityTracker::ReleaseUserData(
  765. ActivityId id,
  766. ActivityTrackerMemoryAllocator* allocator) {
  767. // User-data is only stored for activities actually held in the stack.
  768. if (id < stack_slots_ && stack_[id].user_data_ref) {
  769. allocator->ReleaseObjectReference(stack_[id].user_data_ref);
  770. stack_[id].user_data_ref = 0;
  771. }
  772. }
  773. void ThreadActivityTracker::RecordExceptionActivity(const void* program_counter,
  774. const void* origin,
  775. Activity::Type type,
  776. const ActivityData& data) {
  777. // A thread-checker creates a lock to check the thread-id which means
  778. // re-entry into this code if lock acquisitions are being tracked.
  779. DCHECK(CalledOnValidThread());
  780. // Fill the reusable exception activity.
  781. Activity::FillFrom(&header_->last_exception, program_counter, origin, type,
  782. data);
  783. // The data has changed meaning that some other thread trying to copy the
  784. // contents for reporting purposes could get bad data.
  785. header_->data_version.fetch_add(1, std::memory_order_relaxed);
  786. }
  787. bool ThreadActivityTracker::IsValid() const {
  788. if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
  789. header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
  790. header_->start_time == 0 || header_->start_ticks == 0 ||
  791. header_->stack_slots != stack_slots_ ||
  792. header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
  793. return false;
  794. }
  795. return valid_;
  796. }
  797. bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
  798. DCHECK(output_snapshot);
  799. // There is no "called on valid thread" check for this method as it can be
  800. // called from other threads or even other processes. It is also the reason
  801. // why atomic operations must be used in certain places above.
  802. // It's possible for the data to change while reading it in such a way that it
  803. // invalidates the read. Make several attempts but don't try forever.
  804. const int kMaxAttempts = 10;
  805. uint32_t depth;
  806. // Stop here if the data isn't valid.
  807. if (!IsValid())
  808. return false;
  809. // Allocate the maximum size for the stack so it doesn't have to be done
  810. // during the time-sensitive snapshot operation. It is shrunk once the
  811. // actual size is known.
  812. output_snapshot->activity_stack.reserve(stack_slots_);
  813. for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
  814. // Remember the data IDs to ensure nothing is replaced during the snapshot
  815. // operation. Use "acquire" so that all the non-atomic fields of the
  816. // structure are valid (at least at the current moment in time).
  817. const uint32_t starting_id =
  818. header_->owner.data_id.load(std::memory_order_acquire);
  819. const int64_t starting_create_stamp = header_->owner.create_stamp;
  820. const auto starting_process_id =
  821. static_cast<ProcessId>(header_->owner.process_id);
  822. const int64_t starting_thread_id = header_->thread_ref.as_id;
  823. // Note the current |data_version| so it's possible to detect at the end
  824. // that nothing has changed since copying the data began. A "cst" operation
  825. // is required to ensure it occurs before everything else. Using "cst"
  826. // memory ordering is relatively expensive but this is only done during
  827. // analysis so doesn't directly affect the worker threads.
  828. const uint32_t pre_version =
  829. header_->data_version.load(std::memory_order_seq_cst);
  830. // Fetching the current depth also "acquires" the contents of the stack.
  831. depth = header_->current_depth.load(std::memory_order_acquire);
  832. uint32_t count = std::min(depth, stack_slots_);
  833. output_snapshot->activity_stack.resize(count);
  834. if (count > 0) {
  835. // Copy the existing contents. Memcpy is used for speed.
  836. memcpy(&output_snapshot->activity_stack[0], stack_,
  837. count * sizeof(Activity));
  838. }
  839. // Capture the last exception.
  840. memcpy(&output_snapshot->last_exception, &header_->last_exception,
  841. sizeof(Activity));
  842. // Snapshot other things here.
  843. // Retry if something changed during the copy. A "cst" operation ensures
  844. // it must happen after all the above operations.
  845. if (header_->data_version.load(std::memory_order_seq_cst) != pre_version)
  846. continue;
  847. // Stack copied. Record it's full depth.
  848. output_snapshot->activity_stack_depth = depth;
  849. // Get the general thread information.
  850. output_snapshot->thread_name =
  851. std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
  852. output_snapshot->create_stamp = header_->owner.create_stamp;
  853. output_snapshot->thread_id = header_->thread_ref.as_id;
  854. output_snapshot->process_id =
  855. static_cast<ProcessId>(header_->owner.process_id);
  856. // All characters of the thread-name buffer were copied so as to not break
  857. // if the trailing NUL were missing. Now limit the length if the actual
  858. // name is shorter.
  859. output_snapshot->thread_name.resize(
  860. strlen(output_snapshot->thread_name.c_str()));
  861. // If the data ID has changed then the tracker has exited and the memory
  862. // reused by a new one. Try again.
  863. if (header_->owner.data_id.load(std::memory_order_seq_cst) != starting_id ||
  864. output_snapshot->create_stamp != starting_create_stamp ||
  865. output_snapshot->process_id != starting_process_id ||
  866. output_snapshot->thread_id != starting_thread_id) {
  867. continue;
  868. }
  869. // Only successful if the data is still valid once everything is done since
  870. // it's possible for the thread to end somewhere in the middle and all its
  871. // values become garbage.
  872. if (!IsValid())
  873. return false;
  874. // Change all the timestamps in the activities from "ticks" to "wall" time.
  875. const Time start_time = Time::FromInternalValue(header_->start_time);
  876. const int64_t start_ticks = header_->start_ticks;
  877. for (Activity& activity : output_snapshot->activity_stack) {
  878. activity.time_internal =
  879. WallTimeFromTickTime(start_ticks, activity.time_internal, start_time)
  880. .ToInternalValue();
  881. }
  882. output_snapshot->last_exception.time_internal =
  883. WallTimeFromTickTime(start_ticks,
  884. output_snapshot->last_exception.time_internal,
  885. start_time)
  886. .ToInternalValue();
  887. // Success!
  888. return true;
  889. }
  890. // Too many attempts.
  891. return false;
  892. }
  893. const void* ThreadActivityTracker::GetBaseAddress() {
  894. return header_;
  895. }
  896. uint32_t ThreadActivityTracker::GetDataVersionForTesting() {
  897. return header_->data_version.load(std::memory_order_relaxed);
  898. }
  899. void ThreadActivityTracker::SetOwningProcessIdForTesting(ProcessId pid,
  900. int64_t stamp) {
  901. header_->owner.SetOwningProcessIdForTesting(pid, stamp);
  902. }
  903. // static
  904. bool ThreadActivityTracker::GetOwningProcessId(const void* memory,
  905. ProcessId* out_id,
  906. int64_t* out_stamp) {
  907. const Header* header = reinterpret_cast<const Header*>(memory);
  908. return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
  909. }
  910. // static
  911. size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
  912. return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
  913. }
  914. bool ThreadActivityTracker::CalledOnValidThread() {
  915. #if DCHECK_IS_ON()
  916. return thread_id_ == PlatformThreadRef();
  917. #else
  918. return true;
  919. #endif
  920. }
  921. std::unique_ptr<ActivityUserData>
  922. ThreadActivityTracker::CreateUserDataForActivity(
  923. Activity* activity,
  924. ActivityTrackerMemoryAllocator* allocator) {
  925. DCHECK_EQ(0U, activity->user_data_ref);
  926. PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
  927. void* memory = allocator->GetAsArray<char>(ref, kUserDataSize);
  928. if (memory) {
  929. std::unique_ptr<ActivityUserData> user_data =
  930. std::make_unique<ActivityUserData>(memory, kUserDataSize);
  931. activity->user_data_ref = ref;
  932. activity->user_data_id = user_data->id();
  933. return user_data;
  934. }
  935. // Return a dummy object that will still accept (but ignore) Set() calls.
  936. return std::make_unique<ActivityUserData>();
  937. }
  938. // The instantiation of the GlobalActivityTracker object.
  939. // The object held here will obviously not be destructed at process exit
  940. // but that's best since PersistentMemoryAllocator objects (that underlie
  941. // GlobalActivityTracker objects) are explicitly forbidden from doing anything
  942. // essential at exit anyway due to the fact that they depend on data managed
  943. // elsewhere and which could be destructed first.
  944. std::atomic<GlobalActivityTracker*> GlobalActivityTracker::g_tracker_{nullptr};
  945. GlobalActivityTracker::ModuleInfo::ModuleInfo() = default;
  946. GlobalActivityTracker::ModuleInfo::ModuleInfo(ModuleInfo&& rhs) = default;
  947. GlobalActivityTracker::ModuleInfo::ModuleInfo(const ModuleInfo& rhs) = default;
  948. GlobalActivityTracker::ModuleInfo::~ModuleInfo() = default;
  949. GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
  950. ModuleInfo&& rhs) = default;
  951. GlobalActivityTracker::ModuleInfo& GlobalActivityTracker::ModuleInfo::operator=(
  952. const ModuleInfo& rhs) = default;
  953. GlobalActivityTracker::ModuleInfoRecord::ModuleInfoRecord() = default;
  954. GlobalActivityTracker::ModuleInfoRecord::~ModuleInfoRecord() = default;
  955. bool GlobalActivityTracker::ModuleInfoRecord::DecodeTo(
  956. GlobalActivityTracker::ModuleInfo* info,
  957. size_t record_size) const {
  958. // Get the current "changes" indicator, acquiring all the other values.
  959. uint32_t current_changes = changes.load(std::memory_order_acquire);
  960. // Copy out the dynamic information.
  961. info->is_loaded = loaded != 0;
  962. info->address = static_cast<uintptr_t>(address);
  963. info->load_time = load_time;
  964. // Check to make sure no information changed while being read. A "seq-cst"
  965. // operation is expensive but is only done during analysis and it's the only
  966. // way to ensure this occurs after all the accesses above. If changes did
  967. // occur then return a "not loaded" result so that |size| and |address|
  968. // aren't expected to be accurate.
  969. if ((current_changes & kModuleInformationChanging) != 0 ||
  970. changes.load(std::memory_order_seq_cst) != current_changes) {
  971. info->is_loaded = false;
  972. }
  973. // Copy out the static information. These never change so don't have to be
  974. // protected by the atomic |current_changes| operations.
  975. info->size = static_cast<size_t>(size);
  976. info->timestamp = timestamp;
  977. info->age = age;
  978. memcpy(info->identifier, identifier, sizeof(info->identifier));
  979. if (offsetof(ModuleInfoRecord, pickle) + pickle_size > record_size)
  980. return false;
  981. Pickle pickler(pickle, pickle_size);
  982. PickleIterator iter(pickler);
  983. return iter.ReadString(&info->file) && iter.ReadString(&info->debug_file);
  984. }
  985. GlobalActivityTracker::ModuleInfoRecord*
  986. GlobalActivityTracker::ModuleInfoRecord::CreateFrom(
  987. const GlobalActivityTracker::ModuleInfo& info,
  988. PersistentMemoryAllocator* allocator) {
  989. Pickle pickler;
  990. pickler.WriteString(info.file);
  991. pickler.WriteString(info.debug_file);
  992. size_t required_size = offsetof(ModuleInfoRecord, pickle) + pickler.size();
  993. ModuleInfoRecord* record = allocator->New<ModuleInfoRecord>(required_size);
  994. if (!record)
  995. return nullptr;
  996. // These fields never changes and are done before the record is made
  997. // iterable so no thread protection is necessary.
  998. record->size = info.size;
  999. record->timestamp = info.timestamp;
  1000. record->age = info.age;
  1001. memcpy(record->identifier, info.identifier, sizeof(identifier));
  1002. memcpy(record->pickle, pickler.data(), pickler.size());
  1003. record->pickle_size = checked_cast<uint16_t>(pickler.size());
  1004. record->changes.store(0, std::memory_order_relaxed);
  1005. // Initialize the owner info.
  1006. record->owner.Release_Initialize();
  1007. // Now set those fields that can change.
  1008. bool success = record->UpdateFrom(info);
  1009. DCHECK(success);
  1010. return record;
  1011. }
  1012. bool GlobalActivityTracker::ModuleInfoRecord::UpdateFrom(
  1013. const GlobalActivityTracker::ModuleInfo& info) {
  1014. // Updates can occur after the record is made visible so make changes atomic.
  1015. // A "strong" exchange ensures no false failures.
  1016. uint32_t old_changes = changes.load(std::memory_order_relaxed);
  1017. uint32_t new_changes = old_changes | kModuleInformationChanging;
  1018. if ((old_changes & kModuleInformationChanging) != 0 ||
  1019. !changes.compare_exchange_strong(old_changes, new_changes,
  1020. std::memory_order_acquire,
  1021. std::memory_order_acquire)) {
  1022. NOTREACHED() << "Multiple sources are updating module information.";
  1023. return false;
  1024. }
  1025. loaded = info.is_loaded ? 1 : 0;
  1026. address = info.address;
  1027. load_time = Time::Now().ToInternalValue();
  1028. bool success = changes.compare_exchange_strong(new_changes, old_changes + 1,
  1029. std::memory_order_release,
  1030. std::memory_order_relaxed);
  1031. DCHECK(success);
  1032. return true;
  1033. }
  1034. GlobalActivityTracker::ScopedThreadActivity::ScopedThreadActivity(
  1035. const void* program_counter,
  1036. const void* origin,
  1037. Activity::Type type,
  1038. const ActivityData& data,
  1039. bool lock_allowed)
  1040. : ThreadActivityTracker::ScopedActivity(GetOrCreateTracker(lock_allowed),
  1041. program_counter,
  1042. origin,
  1043. type,
  1044. data) {}
  1045. GlobalActivityTracker::ScopedThreadActivity::~ScopedThreadActivity() {
  1046. if (tracker_ && tracker_->HasUserData(activity_id_)) {
  1047. GlobalActivityTracker* global = GlobalActivityTracker::Get();
  1048. AutoLock lock(global->user_data_allocator_lock_);
  1049. tracker_->ReleaseUserData(activity_id_, &global->user_data_allocator_);
  1050. }
  1051. }
  1052. ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
  1053. if (!user_data_) {
  1054. if (tracker_) {
  1055. GlobalActivityTracker* global = GlobalActivityTracker::Get();
  1056. AutoLock lock(global->user_data_allocator_lock_);
  1057. user_data_ =
  1058. tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
  1059. } else {
  1060. user_data_ = std::make_unique<ActivityUserData>();
  1061. }
  1062. }
  1063. return *user_data_;
  1064. }
  1065. GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
  1066. size_t size,
  1067. ProcessId pid)
  1068. : ActivityUserData(memory, size, pid) {}
  1069. GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() = default;
  1070. void* GlobalActivityTracker::ThreadSafeUserData::Set(StringPiece name,
  1071. ValueType type,
  1072. const void* memory,
  1073. size_t size) {
  1074. AutoLock lock(data_lock_);
  1075. return ActivityUserData::Set(name, type, memory, size);
  1076. }
  1077. GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
  1078. PersistentMemoryAllocator::Reference mem_reference,
  1079. void* base,
  1080. size_t size)
  1081. : ThreadActivityTracker(base, size),
  1082. mem_reference_(mem_reference),
  1083. mem_base_(base) {}
  1084. GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
  1085. // The global |g_tracker_| must point to the owner of this class since all
  1086. // objects of this type must be destructed before |g_tracker_| can be changed
  1087. // (something that only occurs in tests).
  1088. DCHECK(g_tracker_.load(std::memory_order_relaxed));
  1089. GlobalActivityTracker::Get()->ReturnTrackerMemory(this);
  1090. }
  1091. void GlobalActivityTracker::CreateWithAllocator(
  1092. std::unique_ptr<PersistentMemoryAllocator> allocator,
  1093. int stack_depth,
  1094. ProcessId process_id) {
  1095. // There's no need to do anything with the result. It is self-managing.
  1096. GlobalActivityTracker* global_tracker =
  1097. new GlobalActivityTracker(std::move(allocator), stack_depth, process_id);
  1098. // Create a tracker for this thread since it is known.
  1099. global_tracker->CreateTrackerForCurrentThread();
  1100. }
  1101. #if !BUILDFLAG(IS_NACL)
  1102. // static
  1103. bool GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
  1104. size_t size,
  1105. uint64_t id,
  1106. StringPiece name,
  1107. int stack_depth) {
  1108. DCHECK(!file_path.empty());
  1109. DCHECK_GE(static_cast<uint64_t>(std::numeric_limits<int64_t>::max()), size);
  1110. // Create and map the file into memory and make it globally available.
  1111. std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
  1112. bool success = mapped_file->Initialize(
  1113. File(file_path, File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
  1114. File::FLAG_WRITE | File::FLAG_WIN_SHARE_DELETE),
  1115. {0, size}, MemoryMappedFile::READ_WRITE_EXTEND);
  1116. if (!success)
  1117. return false;
  1118. if (!FilePersistentMemoryAllocator::IsFileAcceptable(*mapped_file, false))
  1119. return false;
  1120. CreateWithAllocator(std::make_unique<FilePersistentMemoryAllocator>(
  1121. std::move(mapped_file), size, id, name, false),
  1122. stack_depth, 0);
  1123. return true;
  1124. }
  1125. #endif // !BUILDFLAG(IS_NACL)
  1126. // static
  1127. bool GlobalActivityTracker::CreateWithLocalMemory(size_t size,
  1128. uint64_t id,
  1129. StringPiece name,
  1130. int stack_depth,
  1131. ProcessId process_id) {
  1132. CreateWithAllocator(
  1133. std::make_unique<LocalPersistentMemoryAllocator>(size, id, name),
  1134. stack_depth, process_id);
  1135. return true;
  1136. }
  1137. // static
  1138. bool GlobalActivityTracker::CreateWithSharedMemory(
  1139. base::WritableSharedMemoryMapping mapping,
  1140. uint64_t id,
  1141. StringPiece name,
  1142. int stack_depth) {
  1143. if (!mapping.IsValid() ||
  1144. !WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
  1145. mapping)) {
  1146. return false;
  1147. }
  1148. CreateWithAllocator(std::make_unique<WritableSharedPersistentMemoryAllocator>(
  1149. std::move(mapping), id, name),
  1150. stack_depth, 0);
  1151. return true;
  1152. }
  1153. // static
  1154. void GlobalActivityTracker::SetForTesting(
  1155. std::unique_ptr<GlobalActivityTracker> tracker) {
  1156. CHECK(!g_tracker_.load(std::memory_order_relaxed));
  1157. g_tracker_.store(tracker.release(), std::memory_order_release);
  1158. }
  1159. // static
  1160. std::unique_ptr<GlobalActivityTracker>
  1161. GlobalActivityTracker::ReleaseForTesting() {
  1162. GlobalActivityTracker* tracker = Get();
  1163. if (!tracker)
  1164. return nullptr;
  1165. // Thread trackers assume that the global tracker is present for some
  1166. // operations so ensure that there aren't any.
  1167. tracker->ReleaseTrackerForCurrentThreadForTesting();
  1168. DCHECK_EQ(0, tracker->thread_tracker_count_.load(std::memory_order_relaxed));
  1169. g_tracker_.store(nullptr, std::memory_order_release);
  1170. return WrapUnique(tracker);
  1171. }
  1172. ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
  1173. // It is not safe to use TLS once TLS has been destroyed.
  1174. if (base::ThreadLocalStorage::HasBeenDestroyed())
  1175. return nullptr;
  1176. DCHECK(!this_thread_tracker_.Get());
  1177. PersistentMemoryAllocator::Reference mem_reference;
  1178. {
  1179. base::AutoLock autolock(thread_tracker_allocator_lock_);
  1180. mem_reference = thread_tracker_allocator_.GetObjectReference();
  1181. }
  1182. if (!mem_reference) {
  1183. // Failure. This shouldn't happen. But be graceful if it does, probably
  1184. // because the underlying allocator wasn't given enough memory to satisfy
  1185. // to all possible requests.
  1186. NOTREACHED();
  1187. // Return null, just as if tracking wasn't enabled.
  1188. return nullptr;
  1189. }
  1190. // Convert the memory block found above into an actual memory address.
  1191. // Doing the conversion as a Header object enacts the 32/64-bit size
  1192. // consistency checks which would not otherwise be done.
  1193. DCHECK(mem_reference);
  1194. void* mem_base;
  1195. mem_base =
  1196. allocator_->GetAsObject<ThreadActivityTracker::Header>(mem_reference);
  1197. DCHECK(mem_base);
  1198. DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
  1199. // Create a tracker with the acquired memory and set it as the tracker
  1200. // for this particular thread in thread-local-storage.
  1201. auto tracker = std::make_unique<ManagedActivityTracker>(
  1202. mem_reference, mem_base, stack_memory_size_);
  1203. DCHECK(tracker->IsValid());
  1204. auto* tracker_raw = tracker.get();
  1205. this_thread_tracker_.Set(std::move(tracker));
  1206. thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
  1207. return tracker_raw;
  1208. }
  1209. void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
  1210. if (this_thread_tracker_.Get())
  1211. this_thread_tracker_.Set(nullptr);
  1212. }
  1213. void GlobalActivityTracker::SetBackgroundTaskRunner(
  1214. const scoped_refptr<SequencedTaskRunner>& runner) {
  1215. AutoLock lock(global_tracker_lock_);
  1216. background_task_runner_ = std::move(runner);
  1217. }
  1218. void GlobalActivityTracker::SetProcessExitCallback(
  1219. ProcessExitCallback callback) {
  1220. AutoLock lock(global_tracker_lock_);
  1221. process_exit_callback_ = callback;
  1222. }
  1223. void GlobalActivityTracker::RecordProcessLaunch(
  1224. ProcessId process_id,
  1225. const FilePath::StringType& cmd) {
  1226. DCHECK_NE(GetProcessId(), process_id);
  1227. DCHECK_NE(ProcessId{0}, process_id);
  1228. base::AutoLock lock(global_tracker_lock_);
  1229. if (base::Contains(known_processes_, process_id)) {
  1230. NOTREACHED() << "Process #" << process_id
  1231. << " was previously recorded as \"launched\""
  1232. << " with no corresponding exit.\n"
  1233. << known_processes_[process_id];
  1234. known_processes_.erase(process_id);
  1235. }
  1236. #if BUILDFLAG(IS_WIN)
  1237. known_processes_.insert(std::make_pair(process_id, WideToUTF8(cmd)));
  1238. #else
  1239. known_processes_.insert(std::make_pair(process_id, cmd));
  1240. #endif
  1241. }
  1242. void GlobalActivityTracker::RecordProcessLaunch(
  1243. ProcessId process_id,
  1244. const FilePath::StringType& exe,
  1245. const FilePath::StringType& args) {
  1246. if (exe.find(FILE_PATH_LITERAL(" "))) {
  1247. RecordProcessLaunch(process_id,
  1248. FilePath::StringType(FILE_PATH_LITERAL("\"")) + exe +
  1249. FILE_PATH_LITERAL("\" ") + args);
  1250. } else {
  1251. RecordProcessLaunch(process_id, exe + FILE_PATH_LITERAL(' ') + args);
  1252. }
  1253. }
  1254. void GlobalActivityTracker::RecordProcessExit(ProcessId process_id,
  1255. int exit_code) {
  1256. DCHECK_NE(GetProcessId(), process_id);
  1257. DCHECK_NE(ProcessId{0}, process_id);
  1258. scoped_refptr<SequencedTaskRunner> task_runner;
  1259. std::string command_line;
  1260. {
  1261. base::AutoLock lock(global_tracker_lock_);
  1262. task_runner = background_task_runner_;
  1263. auto found = known_processes_.find(process_id);
  1264. if (found != known_processes_.end()) {
  1265. command_line = std::move(found->second);
  1266. known_processes_.erase(found);
  1267. } else {
  1268. DLOG(ERROR) << "Recording exit of unknown process #" << process_id;
  1269. }
  1270. }
  1271. // Use the current time to differentiate the process that just exited
  1272. // from any that might be created in the future with the same ID.
  1273. int64_t now_stamp = Time::Now().ToInternalValue();
  1274. // The persistent allocator is thread-safe so run the iteration and
  1275. // adjustments on a worker thread if one was provided.
  1276. if (task_runner && !task_runner->RunsTasksInCurrentSequence()) {
  1277. task_runner->PostTask(
  1278. FROM_HERE,
  1279. BindOnce(&GlobalActivityTracker::CleanupAfterProcess, Unretained(this),
  1280. process_id, now_stamp, exit_code, std::move(command_line)));
  1281. return;
  1282. }
  1283. CleanupAfterProcess(process_id, now_stamp, exit_code,
  1284. std::move(command_line));
  1285. }
  1286. void GlobalActivityTracker::SetProcessPhase(ProcessPhase phase) {
  1287. process_data().SetInt(kProcessPhaseDataKey, phase);
  1288. }
  1289. void GlobalActivityTracker::CleanupAfterProcess(ProcessId process_id,
  1290. int64_t exit_stamp,
  1291. int exit_code,
  1292. std::string&& command_line) {
  1293. // The process may not have exited cleanly so its necessary to go through
  1294. // all the data structures it may have allocated in the persistent memory
  1295. // segment and mark them as "released". This will allow them to be reused
  1296. // later on.
  1297. PersistentMemoryAllocator::Iterator iter(allocator_.get());
  1298. PersistentMemoryAllocator::Reference ref;
  1299. ProcessExitCallback process_exit_callback;
  1300. {
  1301. AutoLock lock(global_tracker_lock_);
  1302. process_exit_callback = process_exit_callback_;
  1303. }
  1304. if (process_exit_callback) {
  1305. // Find the processes user-data record so the process phase can be passed
  1306. // to the callback.
  1307. ActivityUserData::Snapshot process_data_snapshot;
  1308. while ((ref = iter.GetNextOfType(kTypeIdProcessDataRecord)) != 0) {
  1309. const void* memory = allocator_->GetAsArray<char>(
  1310. ref, kTypeIdProcessDataRecord, PersistentMemoryAllocator::kSizeAny);
  1311. if (!memory)
  1312. continue;
  1313. ProcessId found_id;
  1314. int64_t create_stamp;
  1315. if (ActivityUserData::GetOwningProcessId(memory, &found_id,
  1316. &create_stamp)) {
  1317. if (found_id == process_id && create_stamp < exit_stamp) {
  1318. const ActivityUserData process_data(const_cast<void*>(memory),
  1319. allocator_->GetAllocSize(ref));
  1320. process_data.CreateSnapshot(&process_data_snapshot);
  1321. break; // No need to look for any others.
  1322. }
  1323. }
  1324. }
  1325. iter.Reset(); // So it starts anew when used below.
  1326. // Record the process's phase at exit so callback doesn't need to go
  1327. // searching based on a private key value.
  1328. ProcessPhase exit_phase = PROCESS_PHASE_UNKNOWN;
  1329. auto phase = process_data_snapshot.find(kProcessPhaseDataKey);
  1330. if (phase != process_data_snapshot.end())
  1331. exit_phase = static_cast<ProcessPhase>(phase->second.GetInt());
  1332. // Perform the callback.
  1333. process_exit_callback.Run(process_id, exit_stamp, exit_code, exit_phase,
  1334. std::move(command_line),
  1335. std::move(process_data_snapshot));
  1336. }
  1337. // Find all allocations associated with the exited process and free them.
  1338. uint32_t type;
  1339. while ((ref = iter.GetNext(&type)) != 0) {
  1340. switch (type) {
  1341. case kTypeIdActivityTracker:
  1342. case kTypeIdUserDataRecord:
  1343. case kTypeIdProcessDataRecord:
  1344. case ModuleInfoRecord::kPersistentTypeId: {
  1345. const void* memory = allocator_->GetAsArray<char>(
  1346. ref, type, PersistentMemoryAllocator::kSizeAny);
  1347. if (!memory)
  1348. continue;
  1349. ProcessId found_id;
  1350. int64_t create_stamp;
  1351. // By convention, the OwningProcess structure is always the first
  1352. // field of the structure so there's no need to handle all the
  1353. // cases separately.
  1354. if (OwningProcess::GetOwningProcessId(memory, &found_id,
  1355. &create_stamp)) {
  1356. // Only change the type to be "free" if the process ID matches and
  1357. // the creation time is before the exit time (so PID re-use doesn't
  1358. // cause the erasure of something that is in-use). Memory is cleared
  1359. // here, rather than when it's needed, so as to limit the impact at
  1360. // that critical time.
  1361. if (found_id == process_id && create_stamp < exit_stamp)
  1362. allocator_->ChangeType(ref, ~type, type, /*clear=*/true);
  1363. }
  1364. } break;
  1365. }
  1366. }
  1367. }
  1368. void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
  1369. // Allocate at least one extra byte so the string is NUL terminated. All
  1370. // memory returned by the allocator is guaranteed to be zeroed.
  1371. PersistentMemoryAllocator::Reference ref =
  1372. allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
  1373. char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
  1374. message.size() + 1);
  1375. if (memory) {
  1376. memcpy(memory, message.data(), message.size());
  1377. allocator_->MakeIterable(ref);
  1378. }
  1379. }
  1380. void GlobalActivityTracker::RecordModuleInfo(const ModuleInfo& info) {
  1381. AutoLock lock(modules_lock_);
  1382. auto found = modules_.find(info.file);
  1383. if (found != modules_.end()) {
  1384. ModuleInfoRecord* record = found->second;
  1385. DCHECK(record);
  1386. // Update the basic state of module information that has been already
  1387. // recorded. It is assumed that the string information (identifier,
  1388. // version, etc.) remain unchanged which means that there's no need
  1389. // to create a new record to accommodate a possibly longer length.
  1390. record->UpdateFrom(info);
  1391. return;
  1392. }
  1393. ModuleInfoRecord* record =
  1394. ModuleInfoRecord::CreateFrom(info, allocator_.get());
  1395. if (!record)
  1396. return;
  1397. allocator_->MakeIterable(record);
  1398. modules_.emplace(info.file, record);
  1399. }
  1400. void GlobalActivityTracker::RecordException(const void* pc,
  1401. const void* origin,
  1402. uint32_t code) {
  1403. RecordExceptionImpl(pc, origin, code);
  1404. }
  1405. void GlobalActivityTracker::MarkDeleted() {
  1406. allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
  1407. }
  1408. GlobalActivityTracker::GlobalActivityTracker(
  1409. std::unique_ptr<PersistentMemoryAllocator> allocator,
  1410. int stack_depth,
  1411. ProcessId process_id)
  1412. : allocator_(std::move(allocator)),
  1413. stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
  1414. process_id_(process_id == 0 ? GetCurrentProcId() : process_id),
  1415. thread_tracker_count_(0),
  1416. thread_tracker_allocator_(allocator_.get(),
  1417. kTypeIdActivityTracker,
  1418. kTypeIdActivityTrackerFree,
  1419. stack_memory_size_,
  1420. kCachedThreadMemories,
  1421. /*make_iterable=*/true),
  1422. user_data_allocator_(allocator_.get(),
  1423. kTypeIdUserDataRecord,
  1424. kTypeIdUserDataRecordFree,
  1425. kUserDataSize,
  1426. kCachedUserDataMemories,
  1427. /*make_iterable=*/true),
  1428. process_data_(allocator_->GetAsArray<char>(
  1429. AllocateFrom(allocator_.get(),
  1430. kTypeIdProcessDataRecordFree,
  1431. kProcessDataSize,
  1432. kTypeIdProcessDataRecord),
  1433. kTypeIdProcessDataRecord,
  1434. kProcessDataSize),
  1435. kProcessDataSize,
  1436. process_id_) {
  1437. DCHECK_NE(ProcessId{0}, process_id_);
  1438. // Ensure that there is no other global object and then make this one such.
  1439. DCHECK(!g_tracker_.load(std::memory_order_relaxed));
  1440. g_tracker_.store(this, std::memory_order_release);
  1441. // The data records must be iterable in order to be found by an analyzer.
  1442. allocator_->MakeIterable(allocator_->GetAsReference(
  1443. process_data_.GetBaseAddress(), kTypeIdProcessDataRecord));
  1444. // Note that this process has launched.
  1445. SetProcessPhase(PROCESS_LAUNCHED);
  1446. }
  1447. GlobalActivityTracker::~GlobalActivityTracker() {
  1448. DCHECK(Get() == nullptr || Get() == this);
  1449. DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
  1450. g_tracker_.store(nullptr, std::memory_order_release);
  1451. }
  1452. void GlobalActivityTracker::ReturnTrackerMemory(
  1453. ManagedActivityTracker* tracker) {
  1454. PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
  1455. void* mem_base = tracker->mem_base_;
  1456. DCHECK(mem_reference);
  1457. DCHECK(mem_base);
  1458. // Remove the destructed tracker from the set of known ones.
  1459. DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
  1460. thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
  1461. // Release this memory for re-use at a later time.
  1462. base::AutoLock autolock(thread_tracker_allocator_lock_);
  1463. thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
  1464. }
  1465. void GlobalActivityTracker::RecordExceptionImpl(const void* pc,
  1466. const void* origin,
  1467. uint32_t code) {
  1468. // Get an existing tracker for this thread. It's not possible to create
  1469. // one at this point because such would involve memory allocations and
  1470. // other potentially complex operations that can cause failures if done
  1471. // within an exception handler. In most cases various operations will
  1472. // have already created the tracker so this shouldn't generally be a
  1473. // problem.
  1474. ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
  1475. if (!tracker)
  1476. return;
  1477. tracker->RecordExceptionActivity(pc, origin, Activity::ACT_EXCEPTION,
  1478. ActivityData::ForException(code));
  1479. }
  1480. ScopedActivity::ScopedActivity(const void* program_counter,
  1481. uint8_t action,
  1482. uint32_t id,
  1483. int32_t info)
  1484. : GlobalActivityTracker::ScopedThreadActivity(
  1485. program_counter,
  1486. nullptr,
  1487. static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
  1488. ActivityData::ForGeneric(id, info),
  1489. /*lock_allowed=*/true),
  1490. id_(id) {
  1491. // The action must not affect the category bits of the activity type.
  1492. DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
  1493. }
  1494. void ScopedActivity::ChangeAction(uint8_t action) {
  1495. DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
  1496. ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
  1497. kNullActivityData);
  1498. }
  1499. void ScopedActivity::ChangeInfo(int32_t info) {
  1500. ChangeTypeAndData(Activity::ACT_NULL, ActivityData::ForGeneric(id_, info));
  1501. }
  1502. void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) {
  1503. DCHECK_EQ(0, action & Activity::ACT_CATEGORY_MASK);
  1504. ChangeTypeAndData(static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
  1505. ActivityData::ForGeneric(id_, info));
  1506. }
  1507. ScopedTaskRunActivity::ScopedTaskRunActivity(const void* program_counter,
  1508. const base::PendingTask& task)
  1509. : GlobalActivityTracker::ScopedThreadActivity(
  1510. program_counter,
  1511. task.posted_from.program_counter(),
  1512. Activity::ACT_TASK_RUN,
  1513. ActivityData::ForTask(static_cast<uint64_t>(task.sequence_num)),
  1514. /*lock_allowed=*/true) {}
  1515. ScopedLockAcquireActivity::ScopedLockAcquireActivity(
  1516. const void* program_counter,
  1517. const base::internal::LockImpl* lock)
  1518. : GlobalActivityTracker::ScopedThreadActivity(
  1519. program_counter,
  1520. nullptr,
  1521. Activity::ACT_LOCK_ACQUIRE,
  1522. ActivityData::ForLock(lock),
  1523. /*lock_allowed=*/false) {}
  1524. ScopedEventWaitActivity::ScopedEventWaitActivity(
  1525. const void* program_counter,
  1526. const base::WaitableEvent* event)
  1527. : GlobalActivityTracker::ScopedThreadActivity(
  1528. program_counter,
  1529. nullptr,
  1530. Activity::ACT_EVENT_WAIT,
  1531. ActivityData::ForEvent(event),
  1532. /*lock_allowed=*/true) {}
  1533. ScopedThreadJoinActivity::ScopedThreadJoinActivity(
  1534. const void* program_counter,
  1535. const base::PlatformThreadHandle* thread)
  1536. : GlobalActivityTracker::ScopedThreadActivity(
  1537. program_counter,
  1538. nullptr,
  1539. Activity::ACT_THREAD_JOIN,
  1540. ActivityData::ForThread(*thread),
  1541. /*lock_allowed=*/true) {}
  1542. #if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_IOS)
  1543. ScopedProcessWaitActivity::ScopedProcessWaitActivity(
  1544. const void* program_counter,
  1545. const base::Process* process)
  1546. : GlobalActivityTracker::ScopedThreadActivity(
  1547. program_counter,
  1548. nullptr,
  1549. Activity::ACT_PROCESS_WAIT,
  1550. ActivityData::ForProcess(process->Pid()),
  1551. /*lock_allowed=*/true) {}
  1552. #endif
  1553. } // namespace debug
  1554. } // namespace base