trace_log.cc 88 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516
  1. // Copyright 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/trace_event/trace_log.h"
  5. #include <cmath>
  6. #include <limits>
  7. #include <memory>
  8. #include <unordered_set>
  9. #include <utility>
  10. #include "base/base_switches.h"
  11. #include "base/bind.h"
  12. #include "base/command_line.h"
  13. #include "base/containers/contains.h"
  14. #include "base/debug/leak_annotations.h"
  15. #include "base/format_macros.h"
  16. #include "base/location.h"
  17. #include "base/logging.h"
  18. #include "base/memory/ptr_util.h"
  19. #include "base/memory/raw_ptr.h"
  20. #include "base/memory/ref_counted_memory.h"
  21. #include "base/no_destructor.h"
  22. #include "base/notreached.h"
  23. #include "base/process/process.h"
  24. #include "base/process/process_metrics.h"
  25. #include "base/ranges/algorithm.h"
  26. #include "base/strings/string_number_conversions.h"
  27. #include "base/strings/string_piece.h"
  28. #include "base/strings/string_split.h"
  29. #include "base/strings/string_tokenizer.h"
  30. #include "base/strings/stringprintf.h"
  31. #include "base/system/sys_info.h"
  32. #include "base/task/current_thread.h"
  33. #include "base/task/thread_pool.h"
  34. #include "base/threading/platform_thread.h"
  35. #include "base/threading/sequenced_task_runner_handle.h"
  36. #include "base/threading/thread_id_name_manager.h"
  37. #include "base/threading/thread_task_runner_handle.h"
  38. #include "base/time/time.h"
  39. #include "base/trace_event/event_name_filter.h"
  40. #include "base/trace_event/heap_profiler.h"
  41. #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
  42. #include "base/trace_event/memory_dump_manager.h"
  43. #include "base/trace_event/memory_dump_provider.h"
  44. #include "base/trace_event/process_memory_dump.h"
  45. #include "base/trace_event/trace_buffer.h"
  46. #include "base/trace_event/trace_event.h"
  47. #include "build/build_config.h"
  48. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  49. #include "base/numerics/safe_conversions.h"
  50. #include "base/run_loop.h"
  51. #include "base/task/thread_pool/thread_pool_instance.h"
  52. #include "base/tracing/perfetto_platform.h"
  53. #include "third_party/perfetto/include/perfetto/ext/trace_processor/export_json.h" // nogncheck
  54. #include "third_party/perfetto/include/perfetto/trace_processor/trace_processor_storage.h" // nogncheck
  55. #include "third_party/perfetto/protos/perfetto/config/chrome/chrome_config.gen.h" // nogncheck
  56. #include "third_party/perfetto/protos/perfetto/config/interceptor_config.gen.h" // nogncheck
  57. #include "third_party/perfetto/protos/perfetto/trace/track_event/process_descriptor.gen.h" // nogncheck
  58. #include "third_party/perfetto/protos/perfetto/trace/track_event/thread_descriptor.gen.h" // nogncheck
  59. #endif
  60. #if BUILDFLAG(IS_WIN)
  61. #include "base/trace_event/trace_event_etw_export_win.h"
  62. #endif
  63. #if BUILDFLAG(IS_ANDROID)
  64. #include "base/debug/elf_reader.h"
  65. // The linker assigns the virtual address of the start of current library to
  66. // this symbol.
  67. extern char __executable_start;
  68. #endif
  69. namespace base {
  70. namespace trace_event {
  71. namespace {
  72. // Controls the number of trace events we will buffer in-memory
  73. // before throwing them away.
  74. const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
  75. const size_t kTraceEventVectorBigBufferChunks =
  76. 512000000 / kTraceBufferChunkSize;
  77. static_assert(
  78. kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
  79. "Too many big buffer chunks");
  80. const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
  81. static_assert(
  82. kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
  83. "Too many vector buffer chunks");
  84. const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
  85. // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
  86. const size_t kEchoToConsoleTraceEventBufferChunks = 256;
  87. const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
  88. #if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  89. const int kThreadFlushTimeoutMs = 3000;
  90. #endif
  91. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  92. static bool g_perfetto_initialized_by_tracelog;
  93. #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  94. TraceLog* g_trace_log_for_testing = nullptr;
  95. #define MAX_TRACE_EVENT_FILTERS 32
  96. // List of TraceEventFilter objects from the most recent tracing session.
  97. std::vector<std::unique_ptr<TraceEventFilter>>& GetCategoryGroupFilters() {
  98. static auto* filters = new std::vector<std::unique_ptr<TraceEventFilter>>();
  99. return *filters;
  100. }
  101. ThreadTicks ThreadNow() {
  102. return ThreadTicks::IsSupported()
  103. ? base::subtle::ThreadTicksNowIgnoringOverride()
  104. : ThreadTicks();
  105. }
  106. template <typename T>
  107. void InitializeMetadataEvent(TraceEvent* trace_event,
  108. PlatformThreadId thread_id,
  109. const char* metadata_name,
  110. const char* arg_name,
  111. const T& value) {
  112. if (!trace_event)
  113. return;
  114. TraceArguments args(arg_name, value);
  115. base::TimeTicks now = TRACE_TIME_TICKS_NOW();
  116. ThreadTicks thread_now;
  117. trace_event->Reset(
  118. thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
  119. TraceLog::GetInstance()->GetCategoryGroupEnabled("__metadata"),
  120. metadata_name,
  121. trace_event_internal::kGlobalScope, // scope
  122. trace_event_internal::kNoId, // id
  123. trace_event_internal::kNoId, // bind_id
  124. &args, TRACE_EVENT_FLAG_NONE);
  125. }
  126. class AutoThreadLocalBoolean {
  127. public:
  128. explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
  129. : thread_local_boolean_(thread_local_boolean) {
  130. DCHECK(!thread_local_boolean_->Get());
  131. thread_local_boolean_->Set(true);
  132. }
  133. AutoThreadLocalBoolean(const AutoThreadLocalBoolean&) = delete;
  134. AutoThreadLocalBoolean& operator=(const AutoThreadLocalBoolean&) = delete;
  135. ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
  136. private:
  137. raw_ptr<ThreadLocalBoolean> thread_local_boolean_;
  138. };
  139. // Use this function instead of TraceEventHandle constructor to keep the
  140. // overhead of ScopedTracer (trace_event.h) constructor minimum.
  141. void MakeHandle(uint32_t chunk_seq,
  142. size_t chunk_index,
  143. size_t event_index,
  144. TraceEventHandle* handle) {
  145. DCHECK(chunk_seq);
  146. DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
  147. DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
  148. DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max());
  149. handle->chunk_seq = chunk_seq;
  150. handle->chunk_index = static_cast<uint16_t>(chunk_index);
  151. handle->event_index = static_cast<uint16_t>(event_index);
  152. }
  153. template <typename Function>
  154. void ForEachCategoryFilter(const unsigned char* category_group_enabled,
  155. Function filter_fn) {
  156. const TraceCategory* category =
  157. CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
  158. uint32_t filter_bitmap = category->enabled_filters();
  159. for (size_t index = 0; filter_bitmap != 0; filter_bitmap >>= 1, ++index) {
  160. if (filter_bitmap & 1 && GetCategoryGroupFilters()[index])
  161. filter_fn(GetCategoryGroupFilters()[index].get());
  162. }
  163. }
  164. // The fallback arguments filtering function will filter away every argument.
  165. bool DefaultIsTraceEventArgsAllowlisted(
  166. const char* category_group_name,
  167. const char* event_name,
  168. base::trace_event::ArgumentNameFilterPredicate* arg_name_filter) {
  169. return false;
  170. }
  171. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  172. class PerfettoProtoAppender
  173. : public base::trace_event::ConvertableToTraceFormat::ProtoAppender {
  174. public:
  175. explicit PerfettoProtoAppender(
  176. perfetto::protos::pbzero::DebugAnnotation* proto)
  177. : annotation_proto_(proto) {}
  178. ~PerfettoProtoAppender() override = default;
  179. // ProtoAppender implementation
  180. void AddBuffer(uint8_t* begin, uint8_t* end) override {
  181. ranges_.emplace_back();
  182. ranges_.back().begin = begin;
  183. ranges_.back().end = end;
  184. }
  185. size_t Finalize(uint32_t field_id) override {
  186. return annotation_proto_->AppendScatteredBytes(field_id, ranges_.data(),
  187. ranges_.size());
  188. }
  189. private:
  190. std::vector<protozero::ContiguousMemoryRange> ranges_;
  191. perfetto::protos::pbzero::DebugAnnotation* annotation_proto_;
  192. };
  193. void AddConvertableToTraceFormat(
  194. base::trace_event::ConvertableToTraceFormat* value,
  195. perfetto::protos::pbzero::DebugAnnotation* annotation) {
  196. PerfettoProtoAppender proto_appender(annotation);
  197. if (value->AppendToProto(&proto_appender)) {
  198. return;
  199. }
  200. std::string json;
  201. value->AppendAsTraceFormat(&json);
  202. annotation->set_legacy_json_value(json.c_str());
  203. }
  204. void WriteDebugAnnotations(base::trace_event::TraceEvent* trace_event,
  205. perfetto::protos::pbzero::TrackEvent* track_event) {
  206. for (size_t i = 0; i < trace_event->arg_size() && trace_event->arg_name(i);
  207. ++i) {
  208. auto type = trace_event->arg_type(i);
  209. auto* annotation = track_event->add_debug_annotations();
  210. annotation->set_name(trace_event->arg_name(i));
  211. if (type == TRACE_VALUE_TYPE_CONVERTABLE) {
  212. AddConvertableToTraceFormat(trace_event->arg_convertible_value(i),
  213. annotation);
  214. continue;
  215. }
  216. auto& value = trace_event->arg_value(i);
  217. switch (type) {
  218. case TRACE_VALUE_TYPE_BOOL:
  219. annotation->set_bool_value(value.as_bool);
  220. break;
  221. case TRACE_VALUE_TYPE_UINT:
  222. annotation->set_uint_value(value.as_uint);
  223. break;
  224. case TRACE_VALUE_TYPE_INT:
  225. annotation->set_int_value(value.as_int);
  226. break;
  227. case TRACE_VALUE_TYPE_DOUBLE:
  228. annotation->set_double_value(value.as_double);
  229. break;
  230. case TRACE_VALUE_TYPE_POINTER:
  231. annotation->set_pointer_value(static_cast<uint64_t>(
  232. reinterpret_cast<uintptr_t>(value.as_pointer)));
  233. break;
  234. case TRACE_VALUE_TYPE_STRING:
  235. case TRACE_VALUE_TYPE_COPY_STRING:
  236. annotation->set_string_value(value.as_string ? value.as_string
  237. : "NULL");
  238. break;
  239. case TRACE_VALUE_TYPE_PROTO: {
  240. auto data = value.as_proto->SerializeAsArray();
  241. annotation->AppendRawProtoBytes(data.data(), data.size());
  242. } break;
  243. default:
  244. NOTREACHED() << "Don't know how to serialize this value";
  245. break;
  246. }
  247. }
  248. }
  249. void OnAddLegacyTraceEvent(TraceEvent* trace_event,
  250. bool thread_will_flush,
  251. base::trace_event::TraceEventHandle* handle) {
  252. perfetto::DynamicCategory category(
  253. TraceLog::GetInstance()->GetCategoryGroupName(
  254. trace_event->category_group_enabled()));
  255. auto write_args = [trace_event](perfetto::EventContext ctx) {
  256. WriteDebugAnnotations(trace_event, ctx.event());
  257. uint32_t id_flags = trace_event->flags() & (TRACE_EVENT_FLAG_HAS_ID |
  258. TRACE_EVENT_FLAG_HAS_LOCAL_ID |
  259. TRACE_EVENT_FLAG_HAS_GLOBAL_ID);
  260. if (!id_flags &&
  261. perfetto::internal::TrackEventLegacy::PhaseToType(
  262. trace_event->phase()) !=
  263. perfetto::protos::pbzero::TrackEvent::TYPE_UNSPECIFIED) {
  264. return;
  265. }
  266. auto* legacy_event = ctx.event()->set_legacy_event();
  267. legacy_event->set_phase(trace_event->phase());
  268. switch (id_flags) {
  269. case TRACE_EVENT_FLAG_HAS_ID:
  270. legacy_event->set_unscoped_id(trace_event->id());
  271. break;
  272. case TRACE_EVENT_FLAG_HAS_LOCAL_ID:
  273. legacy_event->set_local_id(trace_event->id());
  274. break;
  275. case TRACE_EVENT_FLAG_HAS_GLOBAL_ID:
  276. legacy_event->set_global_id(trace_event->id());
  277. break;
  278. default:
  279. break;
  280. }
  281. };
  282. auto phase = trace_event->phase();
  283. auto flags = trace_event->flags();
  284. base::TimeTicks timestamp = trace_event->timestamp().is_null()
  285. ? TRACE_TIME_TICKS_NOW()
  286. : trace_event->timestamp();
  287. if (phase == TRACE_EVENT_PHASE_COMPLETE) {
  288. phase = TRACE_EVENT_PHASE_BEGIN;
  289. } else if (phase == TRACE_EVENT_PHASE_INSTANT) {
  290. auto scope = flags & TRACE_EVENT_FLAG_SCOPE_MASK;
  291. switch (scope) {
  292. case TRACE_EVENT_SCOPE_GLOBAL:
  293. PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
  294. phase, category, trace_event->name(), ::perfetto::Track::Global(0),
  295. timestamp, write_args);
  296. return;
  297. case TRACE_EVENT_SCOPE_PROCESS:
  298. PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
  299. phase, category, trace_event->name(),
  300. ::perfetto::ProcessTrack::Current(), timestamp, write_args);
  301. return;
  302. default:
  303. case TRACE_EVENT_SCOPE_THREAD: /* Fallthrough. */
  304. break;
  305. }
  306. }
  307. if (trace_event->thread_id() &&
  308. trace_event->thread_id() !=
  309. static_cast<int>(base::PlatformThread::CurrentId())) {
  310. PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
  311. phase, category, trace_event->name(),
  312. perfetto::ThreadTrack::ForThread(trace_event->thread_id()), timestamp,
  313. write_args);
  314. return;
  315. }
  316. PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
  317. phase, category, trace_event->name(),
  318. perfetto::internal::TrackEventInternal::kDefaultTrack, timestamp,
  319. write_args);
  320. }
  321. void OnUpdateLegacyTraceEventDuration(
  322. const unsigned char* category_group_enabled,
  323. const char* name,
  324. TraceEventHandle handle,
  325. PlatformThreadId thread_id,
  326. bool explicit_timestamps,
  327. const TimeTicks& now,
  328. const ThreadTicks& thread_now) {
  329. perfetto::DynamicCategory category(
  330. TraceLog::GetInstance()->GetCategoryGroupName(category_group_enabled));
  331. auto phase = TRACE_EVENT_PHASE_END;
  332. base::TimeTicks timestamp =
  333. explicit_timestamps ? now : TRACE_TIME_TICKS_NOW();
  334. if (thread_id && thread_id != base::PlatformThread::CurrentId()) {
  335. PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
  336. phase, category, name, perfetto::ThreadTrack::ForThread(thread_id),
  337. timestamp);
  338. return;
  339. }
  340. PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(
  341. phase, category, name,
  342. perfetto::internal::TrackEventInternal::kDefaultTrack, timestamp);
  343. }
  344. #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  345. } // namespace
  346. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY) && !BUILDFLAG(IS_NACL)
  347. namespace {
  348. // Perfetto provides us with a fully formed JSON trace file, while
  349. // TraceResultBuffer wants individual JSON fragments without a containing
  350. // object. We therefore need to strip away the outer object, including the
  351. // metadata fields, from the JSON stream.
  352. static constexpr char kJsonPrefix[] = "{\"traceEvents\":[\n";
  353. static constexpr char kJsonJoiner[] = ",\n";
  354. static constexpr char kJsonSuffix[] = "],\"metadata\":";
  355. } // namespace
  356. class JsonStringOutputWriter
  357. : public perfetto::trace_processor::json::OutputWriter {
  358. public:
  359. JsonStringOutputWriter(scoped_refptr<SequencedTaskRunner> flush_task_runner,
  360. TraceLog::OutputCallback flush_callback)
  361. : flush_task_runner_(flush_task_runner),
  362. flush_callback_(std::move(flush_callback)) {
  363. buffer_->data().reserve(kBufferReserveCapacity);
  364. }
  365. ~JsonStringOutputWriter() override { Flush(/*has_more=*/false); }
  366. perfetto::trace_processor::util::Status AppendString(
  367. const std::string& string) override {
  368. if (!did_strip_prefix_) {
  369. DCHECK_EQ(string, kJsonPrefix);
  370. did_strip_prefix_ = true;
  371. return perfetto::trace_processor::util::OkStatus();
  372. } else if (buffer_->data().empty() &&
  373. !strncmp(string.c_str(), kJsonJoiner, strlen(kJsonJoiner))) {
  374. // We only remove the leading joiner comma for the first chunk in a buffer
  375. // since the consumer is expected to insert commas between the buffers we
  376. // provide.
  377. buffer_->data() += string.substr(strlen(kJsonJoiner));
  378. } else if (!strncmp(string.c_str(), kJsonSuffix, strlen(kJsonSuffix))) {
  379. return perfetto::trace_processor::util::OkStatus();
  380. } else {
  381. buffer_->data() += string;
  382. }
  383. if (buffer_->data().size() > kBufferLimitInBytes) {
  384. Flush(/*has_more=*/true);
  385. // Reset the buffer_ after moving it above.
  386. buffer_ = new RefCountedString();
  387. buffer_->data().reserve(kBufferReserveCapacity);
  388. }
  389. return perfetto::trace_processor::util::OkStatus();
  390. }
  391. private:
  392. void Flush(bool has_more) {
  393. if (flush_task_runner_) {
  394. flush_task_runner_->PostTask(
  395. FROM_HERE,
  396. base::BindOnce(flush_callback_, std::move(buffer_), has_more));
  397. } else {
  398. flush_callback_.Run(std::move(buffer_), has_more);
  399. }
  400. }
  401. static constexpr size_t kBufferLimitInBytes = 100 * 1024;
  402. // Since we write each string before checking the limit, we'll always go
  403. // slightly over and hence we reserve some extra space to avoid most
  404. // reallocs.
  405. static constexpr size_t kBufferReserveCapacity = kBufferLimitInBytes * 5 / 4;
  406. scoped_refptr<SequencedTaskRunner> flush_task_runner_;
  407. TraceLog::OutputCallback flush_callback_;
  408. scoped_refptr<RefCountedString> buffer_ = new RefCountedString();
  409. bool did_strip_prefix_ = false;
  410. };
  411. #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY) && !BUILDFLAG(IS_NACL)
  412. // A helper class that allows the lock to be acquired in the middle of the scope
  413. // and unlocks at the end of scope if locked.
  414. class TraceLog::OptionalAutoLock {
  415. public:
  416. explicit OptionalAutoLock(Lock* lock) : lock_(lock) {}
  417. OptionalAutoLock(const OptionalAutoLock&) = delete;
  418. OptionalAutoLock& operator=(const OptionalAutoLock&) = delete;
  419. ~OptionalAutoLock() {
  420. if (locked_)
  421. lock_->Release();
  422. }
  423. void EnsureAcquired() EXCLUSIVE_LOCK_FUNCTION(lock_) {
  424. if (!locked_) {
  425. lock_->Acquire();
  426. locked_ = true;
  427. } else {
  428. lock_->AssertAcquired();
  429. }
  430. }
  431. private:
  432. Lock* lock_;
  433. bool locked_ = false;
  434. };
  435. class TraceLog::ThreadLocalEventBuffer
  436. : public CurrentThread::DestructionObserver,
  437. public MemoryDumpProvider {
  438. public:
  439. explicit ThreadLocalEventBuffer(TraceLog* trace_log);
  440. ThreadLocalEventBuffer(const ThreadLocalEventBuffer&) = delete;
  441. ThreadLocalEventBuffer& operator=(const ThreadLocalEventBuffer&) = delete;
  442. ~ThreadLocalEventBuffer() override;
  443. TraceEvent* AddTraceEvent(TraceEventHandle* handle);
  444. TraceEvent* GetEventByHandle(TraceEventHandle handle) {
  445. if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
  446. handle.chunk_index != chunk_index_) {
  447. return nullptr;
  448. }
  449. return chunk_->GetEventAt(handle.event_index);
  450. }
  451. int generation() const { return generation_; }
  452. private:
  453. // CurrentThread::DestructionObserver
  454. void WillDestroyCurrentMessageLoop() override;
  455. // MemoryDumpProvider implementation.
  456. bool OnMemoryDump(const MemoryDumpArgs& args,
  457. ProcessMemoryDump* pmd) override;
  458. void FlushWhileLocked();
  459. void CheckThisIsCurrentBuffer() const {
  460. DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
  461. }
  462. // Since TraceLog is a leaky singleton, trace_log_ will always be valid
  463. // as long as the thread exists.
  464. raw_ptr<TraceLog> trace_log_;
  465. std::unique_ptr<TraceBufferChunk> chunk_;
  466. size_t chunk_index_ = 0;
  467. int generation_;
  468. };
  469. TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
  470. : trace_log_(trace_log),
  471. generation_(trace_log->generation()) {
  472. // ThreadLocalEventBuffer is created only if the thread has a message loop, so
  473. // the following message_loop won't be NULL.
  474. CurrentThread::Get()->AddDestructionObserver(this);
  475. // This is to report the local memory usage when memory-infra is enabled.
  476. MemoryDumpManager::GetInstance()->RegisterDumpProvider(
  477. this, "ThreadLocalEventBuffer", ThreadTaskRunnerHandle::Get());
  478. auto thread_id = PlatformThread::CurrentId();
  479. AutoLock lock(trace_log->lock_);
  480. trace_log->thread_task_runners_[thread_id] = ThreadTaskRunnerHandle::Get();
  481. }
  482. TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
  483. CheckThisIsCurrentBuffer();
  484. CurrentThread::Get()->RemoveDestructionObserver(this);
  485. MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
  486. {
  487. AutoLock lock(trace_log_->lock_);
  488. FlushWhileLocked();
  489. auto thread_id = PlatformThread::CurrentId();
  490. trace_log_->thread_task_runners_.erase(thread_id);
  491. }
  492. trace_log_->thread_local_event_buffer_.Set(nullptr);
  493. }
  494. TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
  495. TraceEventHandle* handle) {
  496. CheckThisIsCurrentBuffer();
  497. if (chunk_ && chunk_->IsFull()) {
  498. AutoLock lock(trace_log_->lock_);
  499. FlushWhileLocked();
  500. chunk_.reset();
  501. }
  502. if (!chunk_) {
  503. AutoLock lock(trace_log_->lock_);
  504. chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
  505. trace_log_->CheckIfBufferIsFullWhileLocked();
  506. }
  507. if (!chunk_)
  508. return nullptr;
  509. size_t event_index;
  510. TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
  511. if (trace_event && handle)
  512. MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
  513. return trace_event;
  514. }
  515. void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
  516. delete this;
  517. }
  518. bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args,
  519. ProcessMemoryDump* pmd) {
  520. if (!chunk_)
  521. return true;
  522. std::string dump_base_name =
  523. "tracing/thread_" + NumberToString(PlatformThread::CurrentId());
  524. TraceEventMemoryOverhead overhead;
  525. chunk_->EstimateTraceMemoryOverhead(&overhead);
  526. overhead.DumpInto(dump_base_name.c_str(), pmd);
  527. return true;
  528. }
  529. void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
  530. if (!chunk_)
  531. return;
  532. trace_log_->lock_.AssertAcquired();
  533. if (trace_log_->CheckGeneration(generation_)) {
  534. // Return the chunk to the buffer only if the generation matches.
  535. trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_));
  536. }
  537. // Otherwise this method may be called from the destructor, or TraceLog will
  538. // find the generation mismatch and delete this buffer soon.
  539. }
  540. void TraceLog::SetAddTraceEventOverrides(
  541. const AddTraceEventOverrideFunction& add_event_override,
  542. const OnFlushFunction& on_flush_override,
  543. const UpdateDurationFunction& update_duration_override) {
  544. add_trace_event_override_.store(add_event_override);
  545. on_flush_override_.store(on_flush_override);
  546. update_duration_override_.store(update_duration_override);
  547. }
  548. struct TraceLog::RegisteredAsyncObserver {
  549. explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
  550. : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {}
  551. ~RegisteredAsyncObserver() = default;
  552. WeakPtr<AsyncEnabledStateObserver> observer;
  553. scoped_refptr<SequencedTaskRunner> task_runner;
  554. };
  555. TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
  556. TraceLogStatus::~TraceLogStatus() = default;
  557. // static
  558. TraceLog* TraceLog::GetInstance() {
  559. static base::NoDestructor<TraceLog> instance(0);
  560. return instance.get();
  561. }
  562. // static
  563. void TraceLog::ResetForTesting() {
  564. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  565. auto* self = GetInstance();
  566. AutoLock lock(self->observers_lock_);
  567. self->enabled_state_observers_.clear();
  568. self->owned_enabled_state_observer_copy_.clear();
  569. self->async_observers_.clear();
  570. self->InitializePerfettoIfNeeded();
  571. #else // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  572. if (!g_trace_log_for_testing)
  573. return;
  574. {
  575. AutoLock lock(g_trace_log_for_testing->lock_);
  576. CategoryRegistry::ResetForTesting();
  577. }
  578. // Don't reset the generation value back to 0. TraceLog is normally
  579. // supposed to be a singleton and the value of generation is never
  580. // supposed to decrease.
  581. const int generation = g_trace_log_for_testing->generation() + 1;
  582. g_trace_log_for_testing->~TraceLog();
  583. new (g_trace_log_for_testing) TraceLog(generation);
  584. #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  585. }
  586. TraceLog::TraceLog(int generation)
  587. : enabled_modes_(0),
  588. num_traces_recorded_(0),
  589. process_sort_index_(0),
  590. process_id_hash_(0),
  591. process_id_(base::kNullProcessId),
  592. trace_options_(kInternalRecordUntilFull),
  593. trace_config_(TraceConfig()),
  594. thread_shared_chunk_index_(0),
  595. generation_(generation),
  596. use_worker_thread_(false) {
  597. CategoryRegistry::Initialize();
  598. #if BUILDFLAG(IS_NACL) // NaCl shouldn't expose the process id.
  599. SetProcessID(0);
  600. #else
  601. SetProcessID(GetCurrentProcId());
  602. #endif
  603. logged_events_.reset(CreateTraceBuffer());
  604. MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog",
  605. nullptr);
  606. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  607. perfetto::TrackEvent::AddSessionObserver(this);
  608. // When using the Perfetto client library, TRACE_EVENT macros will bypass
  609. // TraceLog entirely. However, trace event embedders which haven't been ported
  610. // to Perfetto yet will still be using TRACE_EVENT_API_ADD_TRACE_EVENT, so we
  611. // need to route these events to Perfetto using an override here. This
  612. // override is also used to capture internal metadata events.
  613. SetAddTraceEventOverrides(&OnAddLegacyTraceEvent, nullptr,
  614. &OnUpdateLegacyTraceEventDuration);
  615. #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  616. g_trace_log_for_testing = this;
  617. }
  618. TraceLog::~TraceLog() {
  619. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  620. perfetto::TrackEvent::RemoveSessionObserver(this);
  621. #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  622. }
  623. void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
  624. // A ThreadLocalEventBuffer needs the message loop with a task runner
  625. // - to know when the thread exits;
  626. // - to handle the final flush.
  627. // For a thread without a message loop or if the message loop may be blocked,
  628. // the trace events will be added into the main buffer directly.
  629. if (thread_blocks_message_loop_.Get() || !CurrentThread::IsSet() ||
  630. !ThreadTaskRunnerHandle::IsSet()) {
  631. return;
  632. }
  633. HEAP_PROFILER_SCOPED_IGNORE;
  634. auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
  635. if (thread_local_event_buffer &&
  636. !CheckGeneration(thread_local_event_buffer->generation())) {
  637. delete thread_local_event_buffer;
  638. thread_local_event_buffer = nullptr;
  639. }
  640. if (!thread_local_event_buffer) {
  641. thread_local_event_buffer = new ThreadLocalEventBuffer(this);
  642. thread_local_event_buffer_.Set(thread_local_event_buffer);
  643. }
  644. }
  645. bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
  646. ProcessMemoryDump* pmd) {
  647. // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
  648. // (crbug.com/499731).
  649. TraceEventMemoryOverhead overhead;
  650. overhead.Add(TraceEventMemoryOverhead::kOther, sizeof(*this));
  651. {
  652. AutoLock lock(lock_);
  653. if (logged_events_)
  654. logged_events_->EstimateTraceMemoryOverhead(&overhead);
  655. for (auto& metadata_event : metadata_events_)
  656. metadata_event->EstimateTraceMemoryOverhead(&overhead);
  657. }
  658. overhead.AddSelf();
  659. overhead.DumpInto("tracing/main_trace_log", pmd);
  660. return true;
  661. }
  662. const unsigned char* TraceLog::GetCategoryGroupEnabled(
  663. const char* category_group) {
  664. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  665. return TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group);
  666. #else // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  667. TraceLog* tracelog = GetInstance();
  668. if (!tracelog) {
  669. DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
  670. return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
  671. }
  672. TraceCategory* category = CategoryRegistry::GetCategoryByName(category_group);
  673. if (!category) {
  674. // Slow path: in the case of a new category we have to repeat the check
  675. // holding the lock, as multiple threads might have reached this point
  676. // at the same time.
  677. auto category_initializer = [](TraceCategory* category) {
  678. TraceLog::GetInstance()->UpdateCategoryState(category);
  679. };
  680. AutoLock lock(tracelog->lock_);
  681. CategoryRegistry::GetOrCreateCategoryLocked(
  682. category_group, category_initializer, &category);
  683. }
  684. DCHECK(category->state_ptr());
  685. return category->state_ptr();
  686. #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  687. }
  688. const char* TraceLog::GetCategoryGroupName(
  689. const unsigned char* category_group_enabled) {
  690. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  691. return TRACE_EVENT_API_GET_CATEGORY_GROUP_NAME(category_group_enabled);
  692. #else // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  693. return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled)
  694. ->name();
  695. #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  696. }
  697. void TraceLog::UpdateCategoryState(TraceCategory* category) {
  698. lock_.AssertAcquired();
  699. DCHECK(category->is_valid());
  700. unsigned char state_flags = 0;
  701. if (enabled_modes_ & RECORDING_MODE &&
  702. trace_config_.IsCategoryGroupEnabled(category->name())) {
  703. state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
  704. }
  705. // TODO(primiano): this is a temporary workaround for catapult:#2341,
  706. // to guarantee that metadata events are always added even if the category
  707. // filter is "-*". See crbug.com/618054 for more details and long-term fix.
  708. if (enabled_modes_ & RECORDING_MODE &&
  709. category == CategoryRegistry::kCategoryMetadata) {
  710. state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
  711. }
  712. #if BUILDFLAG(IS_WIN)
  713. if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
  714. category->name())) {
  715. state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT;
  716. }
  717. #endif
  718. uint32_t enabled_filters_bitmap = 0;
  719. size_t index = 0;
  720. for (const auto& event_filter : enabled_event_filters_) {
  721. if (event_filter.IsCategoryGroupEnabled(category->name())) {
  722. state_flags |= TraceCategory::ENABLED_FOR_FILTERING;
  723. DCHECK(GetCategoryGroupFilters()[index]);
  724. enabled_filters_bitmap |= 1 << index;
  725. }
  726. if (index++ >= MAX_TRACE_EVENT_FILTERS) {
  727. NOTREACHED();
  728. break;
  729. }
  730. }
  731. category->set_enabled_filters(enabled_filters_bitmap);
  732. category->set_state(state_flags);
  733. }
  734. void TraceLog::UpdateCategoryRegistry() {
  735. lock_.AssertAcquired();
  736. CreateFiltersForTraceConfig();
  737. for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
  738. UpdateCategoryState(&category);
  739. }
  740. }
  741. void TraceLog::CreateFiltersForTraceConfig() {
  742. if (!(enabled_modes_ & FILTERING_MODE))
  743. return;
  744. // Filters were already added and tracing could be enabled. Filters list
  745. // cannot be changed when trace events are using them.
  746. if (GetCategoryGroupFilters().size())
  747. return;
  748. for (auto& filter_config : enabled_event_filters_) {
  749. if (GetCategoryGroupFilters().size() >= MAX_TRACE_EVENT_FILTERS) {
  750. NOTREACHED()
  751. << "Too many trace event filters installed in the current session";
  752. break;
  753. }
  754. std::unique_ptr<TraceEventFilter> new_filter;
  755. const std::string& predicate_name = filter_config.predicate_name();
  756. if (predicate_name == EventNameFilter::kName) {
  757. auto allowlist = std::make_unique<std::unordered_set<std::string>>();
  758. CHECK(filter_config.GetArgAsSet("event_name_allowlist", &*allowlist));
  759. new_filter = std::make_unique<EventNameFilter>(std::move(allowlist));
  760. } else {
  761. if (filter_factory_for_testing_)
  762. new_filter = filter_factory_for_testing_(predicate_name);
  763. CHECK(new_filter) << "Unknown trace filter " << predicate_name;
  764. }
  765. GetCategoryGroupFilters().push_back(std::move(new_filter));
  766. }
  767. }
  768. void TraceLog::SetEnabled(const TraceConfig& trace_config,
  769. uint8_t modes_to_enable) {
  770. DCHECK(trace_config.process_filter_config().IsEnabled(process_id_));
  771. AutoLock lock(lock_);
  772. // Perfetto only supports basic wildcard filtering, so check that we're not
  773. // trying to use more complex filters.
  774. for (const auto& excluded :
  775. trace_config.category_filter().excluded_categories()) {
  776. DCHECK(excluded.find("?") == std::string::npos);
  777. DCHECK(excluded.find("*") == std::string::npos ||
  778. excluded.find("*") == excluded.size() - 1);
  779. }
  780. for (const auto& included :
  781. trace_config.category_filter().included_categories()) {
  782. DCHECK(included.find("?") == std::string::npos);
  783. DCHECK(included.find("*") == std::string::npos ||
  784. included.find("*") == included.size() - 1);
  785. }
  786. for (const auto& disabled :
  787. trace_config.category_filter().disabled_categories()) {
  788. DCHECK(disabled.find("?") == std::string::npos);
  789. DCHECK(disabled.find("*") == std::string::npos ||
  790. disabled.find("*") == disabled.size() - 1);
  791. }
  792. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  793. DCHECK(modes_to_enable == RECORDING_MODE);
  794. DCHECK(!trace_config.IsArgumentFilterEnabled());
  795. perfetto::TraceConfig perfetto_config;
  796. size_t size_limit = trace_config.GetTraceBufferSizeInKb();
  797. if (size_limit == 0)
  798. size_limit = 200 * 1024;
  799. auto* buffer_config = perfetto_config.add_buffers();
  800. buffer_config->set_size_kb(checked_cast<uint32_t>(size_limit));
  801. switch (trace_config.GetTraceRecordMode()) {
  802. case base::trace_event::RECORD_UNTIL_FULL:
  803. case base::trace_event::RECORD_AS_MUCH_AS_POSSIBLE:
  804. buffer_config->set_fill_policy(
  805. perfetto::TraceConfig::BufferConfig::DISCARD);
  806. break;
  807. case base::trace_event::RECORD_CONTINUOUSLY:
  808. buffer_config->set_fill_policy(
  809. perfetto::TraceConfig::BufferConfig::RING_BUFFER);
  810. break;
  811. case base::trace_event::ECHO_TO_CONSOLE:
  812. // Handled below.
  813. break;
  814. }
  815. // Add the track event data source.
  816. // TODO(skyostil): Configure kTraceClockId as the primary trace clock.
  817. auto* data_source = perfetto_config.add_data_sources();
  818. auto* source_config = data_source->mutable_config();
  819. source_config->set_name("track_event");
  820. source_config->set_target_buffer(0);
  821. source_config->mutable_chrome_config()->set_convert_to_legacy_json(true);
  822. if (trace_config.GetTraceRecordMode() == base::trace_event::ECHO_TO_CONSOLE) {
  823. perfetto::ConsoleInterceptor::Register();
  824. source_config->mutable_interceptor_config()->set_name("console");
  825. }
  826. // Translate the category filter into included and excluded categories.
  827. perfetto::protos::gen::TrackEventConfig te_cfg;
  828. // If no categories are explicitly enabled, enable the default ones. Otherwise
  829. // only matching categories are enabled.
  830. if (!trace_config.category_filter().included_categories().empty())
  831. te_cfg.add_disabled_categories("*");
  832. // Metadata is always enabled.
  833. te_cfg.add_enabled_categories("__metadata");
  834. for (const auto& excluded :
  835. trace_config.category_filter().excluded_categories()) {
  836. te_cfg.add_disabled_categories(excluded);
  837. }
  838. for (const auto& included :
  839. trace_config.category_filter().included_categories()) {
  840. te_cfg.add_enabled_categories(included);
  841. }
  842. for (const auto& disabled :
  843. trace_config.category_filter().disabled_categories()) {
  844. te_cfg.add_enabled_categories(disabled);
  845. }
  846. source_config->set_track_event_config_raw(te_cfg.SerializeAsString());
  847. // Clear incremental state every 5 seconds, so that we lose at most the first
  848. // 5 seconds of the trace (if we wrap around Perfetto's central buffer).
  849. perfetto_config.mutable_incremental_state_config()->set_clear_period_ms(5000);
  850. SetEnabledImpl(trace_config, perfetto_config);
  851. #else // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  852. // Can't enable tracing when Flush() is in progress.
  853. DCHECK(!flush_task_runner_);
  854. InternalTraceOptions new_options =
  855. GetInternalOptionsFromTraceConfig(trace_config);
  856. InternalTraceOptions old_options = trace_options();
  857. if (dispatching_to_observers_) {
  858. // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
  859. DLOG(ERROR)
  860. << "Cannot manipulate TraceLog::Enabled state from an observer.";
  861. return;
  862. }
  863. // Clear all filters from previous tracing session. These filters are not
  864. // cleared at the end of tracing because some threads which hit trace event
  865. // when disabling, could try to use the filters.
  866. if (!enabled_modes_)
  867. GetCategoryGroupFilters().clear();
  868. // Update trace config for recording.
  869. const bool already_recording = enabled_modes_ & RECORDING_MODE;
  870. if (modes_to_enable & RECORDING_MODE) {
  871. if (already_recording) {
  872. trace_config_.Merge(trace_config);
  873. } else {
  874. trace_config_ = trace_config;
  875. }
  876. }
  877. // Update event filters only if filtering was not enabled.
  878. if (modes_to_enable & FILTERING_MODE && enabled_event_filters_.empty()) {
  879. DCHECK(!trace_config.event_filters().empty());
  880. enabled_event_filters_ = trace_config.event_filters();
  881. }
  882. // Keep the |trace_config_| updated with only enabled filters in case anyone
  883. // tries to read it using |GetCurrentTraceConfig| (even if filters are
  884. // empty).
  885. trace_config_.SetEventFilters(enabled_event_filters_);
  886. enabled_modes_ |= modes_to_enable;
  887. UpdateCategoryRegistry();
  888. // Do not notify observers or create trace buffer if only enabled for
  889. // filtering or if recording was already enabled.
  890. if (!(modes_to_enable & RECORDING_MODE) || already_recording)
  891. return;
  892. // Discard events if new trace options are different. Reducing trace buffer
  893. // size is not supported while already recording, so only replace trace
  894. // buffer if we were not already recording.
  895. if (new_options != old_options ||
  896. (trace_config_.GetTraceBufferSizeInEvents() && !already_recording)) {
  897. trace_options_.store(new_options, std::memory_order_relaxed);
  898. UseNextTraceBuffer();
  899. }
  900. num_traces_recorded_++;
  901. UpdateCategoryRegistry();
  902. dispatching_to_observers_ = true;
  903. {
  904. // Notify observers outside of the thread events lock, so they can trigger
  905. // trace events.
  906. AutoUnlock unlock(lock_);
  907. AutoLock lock2(observers_lock_);
  908. for (EnabledStateObserver* observer : enabled_state_observers_)
  909. observer->OnTraceLogEnabled();
  910. for (const auto& it : async_observers_) {
  911. it.second.task_runner->PostTask(
  912. FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogEnabled,
  913. it.second.observer));
  914. }
  915. }
  916. dispatching_to_observers_ = false;
  917. #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  918. }
  919. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  920. void TraceLog::InitializePerfettoIfNeeded() {
  921. // When we're using the Perfetto client library, only tests should be
  922. // recording traces directly through TraceLog. Production code should instead
  923. // use perfetto::Tracing::NewTrace(). Let's make sure the tracing service
  924. // didn't already initialize Perfetto in this process, because it's not safe
  925. // to consume trace data from arbitrary processes through TraceLog as the JSON
  926. // conversion here isn't sandboxed like with the real tracing service.
  927. //
  928. // Note that initializing Perfetto here requires the thread pool to be ready.
  929. CHECK(!perfetto::Tracing::IsInitialized() ||
  930. g_perfetto_initialized_by_tracelog)
  931. << "Don't use TraceLog for recording traces from non-test code. Use "
  932. "perfetto::Tracing::NewTrace() instead.";
  933. if (perfetto::Tracing::IsInitialized())
  934. return;
  935. g_perfetto_initialized_by_tracelog = true;
  936. auto* perfetto_platform = GetOrCreatePerfettoPlatform();
  937. perfetto::TracingInitArgs init_args;
  938. init_args.backends = perfetto::BackendType::kInProcessBackend;
  939. init_args.platform = perfetto_platform;
  940. perfetto::Tracing::Initialize(init_args);
  941. perfetto::TrackEvent::Register();
  942. }
  943. void TraceLog::SetEnabled(const TraceConfig& trace_config,
  944. const perfetto::TraceConfig& perfetto_config) {
  945. AutoLock lock(lock_);
  946. SetEnabledImpl(trace_config, perfetto_config);
  947. }
  948. void TraceLog::SetEnabledImpl(const TraceConfig& trace_config,
  949. const perfetto::TraceConfig& perfetto_config) {
  950. DCHECK(!perfetto::TrackEvent::IsEnabled());
  951. lock_.AssertAcquired();
  952. InitializePerfettoIfNeeded();
  953. trace_config_ = trace_config;
  954. perfetto_config_ = perfetto_config;
  955. tracing_session_ = perfetto::Tracing::NewTrace();
  956. AutoUnlock unlock(lock_);
  957. tracing_session_->Setup(perfetto_config);
  958. tracing_session_->StartBlocking();
  959. }
  960. #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  961. void TraceLog::SetArgumentFilterPredicate(
  962. const ArgumentFilterPredicate& argument_filter_predicate) {
  963. AutoLock lock(lock_);
  964. DCHECK(!argument_filter_predicate.is_null());
  965. // Replace the existing argument filter.
  966. argument_filter_predicate_ = argument_filter_predicate;
  967. }
  968. ArgumentFilterPredicate TraceLog::GetArgumentFilterPredicate() const {
  969. AutoLock lock(lock_);
  970. return argument_filter_predicate_;
  971. }
  972. void TraceLog::SetMetadataFilterPredicate(
  973. const MetadataFilterPredicate& metadata_filter_predicate) {
  974. AutoLock lock(lock_);
  975. DCHECK(!metadata_filter_predicate.is_null());
  976. // Replace the existing argument filter.
  977. metadata_filter_predicate_ = metadata_filter_predicate;
  978. }
  979. MetadataFilterPredicate TraceLog::GetMetadataFilterPredicate() const {
  980. AutoLock lock(lock_);
  981. return metadata_filter_predicate_;
  982. }
  983. void TraceLog::SetRecordHostAppPackageName(bool record_host_app_package_name) {
  984. record_host_app_package_name_ = record_host_app_package_name;
  985. }
  986. bool TraceLog::ShouldRecordHostAppPackageName() const {
  987. return record_host_app_package_name_;
  988. }
  989. TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
  990. const TraceConfig& config) {
  991. InternalTraceOptions ret = config.IsArgumentFilterEnabled()
  992. ? kInternalEnableArgumentFilter
  993. : kInternalNone;
  994. switch (config.GetTraceRecordMode()) {
  995. case RECORD_UNTIL_FULL:
  996. return ret | kInternalRecordUntilFull;
  997. case RECORD_CONTINUOUSLY:
  998. return ret | kInternalRecordContinuously;
  999. case ECHO_TO_CONSOLE:
  1000. return ret | kInternalEchoToConsole;
  1001. case RECORD_AS_MUCH_AS_POSSIBLE:
  1002. return ret | kInternalRecordAsMuchAsPossible;
  1003. }
  1004. NOTREACHED();
  1005. return kInternalNone;
  1006. }
  1007. TraceConfig TraceLog::GetCurrentTraceConfig() const {
  1008. AutoLock lock(lock_);
  1009. return trace_config_;
  1010. }
  1011. void TraceLog::SetDisabled() {
  1012. AutoLock lock(lock_);
  1013. SetDisabledWhileLocked(RECORDING_MODE);
  1014. }
  1015. void TraceLog::SetDisabled(uint8_t modes_to_disable) {
  1016. AutoLock lock(lock_);
  1017. SetDisabledWhileLocked(modes_to_disable);
  1018. }
  1019. void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
  1020. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  1021. if (!tracing_session_)
  1022. return;
  1023. AddMetadataEventsWhileLocked();
  1024. // Remove metadata events so they will not get added to a subsequent trace.
  1025. metadata_events_.clear();
  1026. perfetto::TrackEvent::Flush();
  1027. // If the current thread has an active task runner, allow nested tasks to run
  1028. // while stopping the session. This is needed by some tests, e.g., to allow
  1029. // data sources to properly flush themselves.
  1030. if (ThreadTaskRunnerHandle::IsSet()) {
  1031. RunLoop stop_loop(RunLoop::Type::kNestableTasksAllowed);
  1032. auto quit_closure = stop_loop.QuitClosure();
  1033. tracing_session_->SetOnStopCallback(
  1034. [&quit_closure] { quit_closure.Run(); });
  1035. tracing_session_->Stop();
  1036. AutoUnlock unlock(lock_);
  1037. stop_loop.Run();
  1038. } else {
  1039. tracing_session_->StopBlocking();
  1040. }
  1041. #else // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  1042. if (!(enabled_modes_ & modes_to_disable))
  1043. return;
  1044. if (dispatching_to_observers_) {
  1045. // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
  1046. DLOG(ERROR)
  1047. << "Cannot manipulate TraceLog::Enabled state from an observer.";
  1048. return;
  1049. }
  1050. bool is_recording_mode_disabled =
  1051. (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE);
  1052. enabled_modes_ &= ~modes_to_disable;
  1053. if (modes_to_disable & FILTERING_MODE)
  1054. enabled_event_filters_.clear();
  1055. if (modes_to_disable & RECORDING_MODE)
  1056. trace_config_.Clear();
  1057. UpdateCategoryRegistry();
  1058. // Add metadata events and notify observers only if recording mode was
  1059. // disabled now.
  1060. if (!is_recording_mode_disabled)
  1061. return;
  1062. AddMetadataEventsWhileLocked();
  1063. // Remove metadata events so they will not get added to a subsequent trace.
  1064. metadata_events_.clear();
  1065. dispatching_to_observers_ = true;
  1066. {
  1067. // Release trace events lock, so observers can trigger trace events.
  1068. AutoUnlock unlock(lock_);
  1069. AutoLock lock2(observers_lock_);
  1070. for (auto* it : enabled_state_observers_)
  1071. it->OnTraceLogDisabled();
  1072. for (const auto& it : async_observers_) {
  1073. it.second.task_runner->PostTask(
  1074. FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogDisabled,
  1075. it.second.observer));
  1076. }
  1077. }
  1078. dispatching_to_observers_ = false;
  1079. #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  1080. }
  1081. int TraceLog::GetNumTracesRecorded() {
  1082. AutoLock lock(lock_);
  1083. return (enabled_modes_ & RECORDING_MODE) ? num_traces_recorded_ : -1;
  1084. }
  1085. void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
  1086. AutoLock lock(observers_lock_);
  1087. enabled_state_observers_.push_back(listener);
  1088. }
  1089. void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
  1090. AutoLock lock(observers_lock_);
  1091. enabled_state_observers_.erase(
  1092. ranges::remove(enabled_state_observers_, listener),
  1093. enabled_state_observers_.end());
  1094. }
  1095. void TraceLog::AddOwnedEnabledStateObserver(
  1096. std::unique_ptr<EnabledStateObserver> listener) {
  1097. AutoLock lock(observers_lock_);
  1098. enabled_state_observers_.push_back(listener.get());
  1099. owned_enabled_state_observer_copy_.push_back(std::move(listener));
  1100. }
  1101. bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
  1102. AutoLock lock(observers_lock_);
  1103. return Contains(enabled_state_observers_, listener);
  1104. }
  1105. void TraceLog::AddAsyncEnabledStateObserver(
  1106. WeakPtr<AsyncEnabledStateObserver> listener) {
  1107. AutoLock lock(observers_lock_);
  1108. async_observers_.emplace(listener.get(), RegisteredAsyncObserver(listener));
  1109. }
  1110. void TraceLog::RemoveAsyncEnabledStateObserver(
  1111. AsyncEnabledStateObserver* listener) {
  1112. AutoLock lock(observers_lock_);
  1113. async_observers_.erase(listener);
  1114. }
  1115. bool TraceLog::HasAsyncEnabledStateObserver(
  1116. AsyncEnabledStateObserver* listener) const {
  1117. AutoLock lock(observers_lock_);
  1118. return Contains(async_observers_, listener);
  1119. }
  1120. void TraceLog::AddIncrementalStateObserver(IncrementalStateObserver* listener) {
  1121. AutoLock lock(observers_lock_);
  1122. incremental_state_observers_.push_back(listener);
  1123. }
  1124. void TraceLog::RemoveIncrementalStateObserver(
  1125. IncrementalStateObserver* listener) {
  1126. AutoLock lock(observers_lock_);
  1127. incremental_state_observers_.erase(
  1128. ranges::remove(incremental_state_observers_, listener),
  1129. incremental_state_observers_.end());
  1130. }
  1131. void TraceLog::OnIncrementalStateCleared() {
  1132. AutoLock lock(observers_lock_);
  1133. for (IncrementalStateObserver* observer : incremental_state_observers_)
  1134. observer->OnIncrementalStateCleared();
  1135. }
  1136. TraceLogStatus TraceLog::GetStatus() const {
  1137. AutoLock lock(lock_);
  1138. TraceLogStatus result;
  1139. result.event_capacity = static_cast<uint32_t>(logged_events_->Capacity());
  1140. result.event_count = static_cast<uint32_t>(logged_events_->Size());
  1141. return result;
  1142. }
  1143. bool TraceLog::BufferIsFull() const {
  1144. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  1145. // TODO(skyostil): Remove this method since there are no non-test usages.
  1146. DCHECK(false);
  1147. return false;
  1148. #else
  1149. AutoLock lock(lock_);
  1150. return logged_events_->IsFull();
  1151. #endif
  1152. }
  1153. TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
  1154. TraceEventHandle* handle,
  1155. bool check_buffer_is_full) {
  1156. if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
  1157. logged_events_->ReturnChunk(thread_shared_chunk_index_,
  1158. std::move(thread_shared_chunk_));
  1159. }
  1160. if (!thread_shared_chunk_) {
  1161. thread_shared_chunk_ =
  1162. logged_events_->GetChunk(&thread_shared_chunk_index_);
  1163. if (check_buffer_is_full)
  1164. CheckIfBufferIsFullWhileLocked();
  1165. }
  1166. if (!thread_shared_chunk_)
  1167. return nullptr;
  1168. size_t event_index;
  1169. TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
  1170. if (trace_event && handle) {
  1171. MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
  1172. event_index, handle);
  1173. }
  1174. return trace_event;
  1175. }
  1176. void TraceLog::CheckIfBufferIsFullWhileLocked() {
  1177. if (logged_events_->IsFull()) {
  1178. if (buffer_limit_reached_timestamp_.is_null()) {
  1179. buffer_limit_reached_timestamp_ = OffsetNow();
  1180. }
  1181. SetDisabledWhileLocked(RECORDING_MODE);
  1182. }
  1183. }
  1184. // Flush() works as the following:
  1185. // 1. Flush() is called in thread A whose task runner is saved in
  1186. // flush_task_runner_;
  1187. // 2. If thread_message_loops_ is not empty, thread A posts task to each message
  1188. // loop to flush the thread local buffers; otherwise finish the flush;
  1189. // 3. FlushCurrentThread() deletes the thread local event buffer:
  1190. // - The last batch of events of the thread are flushed into the main buffer;
  1191. // - The message loop will be removed from thread_message_loops_;
  1192. // If this is the last message loop, finish the flush;
  1193. // 4. If any thread hasn't finish its flush in time, finish the flush.
  1194. void TraceLog::Flush(const TraceLog::OutputCallback& cb,
  1195. bool use_worker_thread) {
  1196. FlushInternal(cb, use_worker_thread, false);
  1197. }
  1198. void TraceLog::CancelTracing(const OutputCallback& cb) {
  1199. SetDisabled();
  1200. FlushInternal(cb, false, true);
  1201. }
  1202. void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
  1203. bool use_worker_thread,
  1204. bool discard_events) {
  1205. use_worker_thread_ = use_worker_thread;
  1206. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY) && !BUILDFLAG(IS_NACL)
  1207. perfetto::TrackEvent::Flush();
  1208. if (discard_events) {
  1209. tracing_session_.reset();
  1210. scoped_refptr<RefCountedString> empty_result = new RefCountedString;
  1211. cb.Run(empty_result, /*has_more_events=*/false);
  1212. return;
  1213. }
  1214. bool convert_to_json = true;
  1215. for (const auto& data_source : perfetto_config_.data_sources()) {
  1216. if (data_source.config().has_chrome_config() &&
  1217. data_source.config().chrome_config().has_convert_to_legacy_json()) {
  1218. convert_to_json =
  1219. data_source.config().chrome_config().convert_to_legacy_json();
  1220. break;
  1221. }
  1222. }
  1223. if (convert_to_json) {
  1224. perfetto::trace_processor::Config processor_config;
  1225. trace_processor_ =
  1226. perfetto::trace_processor::TraceProcessorStorage::CreateInstance(
  1227. processor_config);
  1228. json_output_writer_.reset(new JsonStringOutputWriter(
  1229. use_worker_thread ? ThreadTaskRunnerHandle::Get() : nullptr, cb));
  1230. } else {
  1231. proto_output_callback_ = std::move(cb);
  1232. }
  1233. if (use_worker_thread) {
  1234. tracing_session_->ReadTrace(
  1235. [this](perfetto::TracingSession::ReadTraceCallbackArgs args) {
  1236. OnTraceData(args.data, args.size, args.has_more);
  1237. });
  1238. } else {
  1239. auto data = tracing_session_->ReadTraceBlocking();
  1240. OnTraceData(data.data(), data.size(), /*has_more=*/false);
  1241. }
  1242. #elif BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY) && BUILDFLAG(IS_NACL)
  1243. // Trace processor isn't built on NaCL, so we can't convert the resulting
  1244. // trace into JSON.
  1245. CHECK(false) << "JSON tracing isn't supported on NaCL";
  1246. #else
  1247. if (IsEnabled()) {
  1248. // Can't flush when tracing is enabled because otherwise PostTask would
  1249. // - generate more trace events;
  1250. // - deschedule the calling thread on some platforms causing inaccurate
  1251. // timing of the trace events.
  1252. scoped_refptr<RefCountedString> empty_result = new RefCountedString;
  1253. if (!cb.is_null())
  1254. cb.Run(empty_result, false);
  1255. LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
  1256. return;
  1257. }
  1258. int gen = generation();
  1259. // Copy of thread_task_runners_ to be used without locking.
  1260. std::vector<scoped_refptr<SingleThreadTaskRunner>> task_runners;
  1261. {
  1262. AutoLock lock(lock_);
  1263. DCHECK(!flush_task_runner_);
  1264. flush_task_runner_ = SequencedTaskRunnerHandle::IsSet()
  1265. ? SequencedTaskRunnerHandle::Get()
  1266. : nullptr;
  1267. DCHECK(thread_task_runners_.empty() || flush_task_runner_);
  1268. flush_output_callback_ = cb;
  1269. if (thread_shared_chunk_) {
  1270. logged_events_->ReturnChunk(thread_shared_chunk_index_,
  1271. std::move(thread_shared_chunk_));
  1272. }
  1273. for (const auto& it : thread_task_runners_)
  1274. task_runners.push_back(it.second);
  1275. }
  1276. if (!task_runners.empty()) {
  1277. for (auto& task_runner : task_runners) {
  1278. task_runner->PostTask(
  1279. FROM_HERE, BindOnce(&TraceLog::FlushCurrentThread, Unretained(this),
  1280. gen, discard_events));
  1281. }
  1282. flush_task_runner_->PostDelayedTask(
  1283. FROM_HERE,
  1284. BindOnce(&TraceLog::OnFlushTimeout, Unretained(this), gen,
  1285. discard_events),
  1286. Milliseconds(kThreadFlushTimeoutMs));
  1287. return;
  1288. }
  1289. FinishFlush(gen, discard_events);
  1290. #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY) && !BUILDFLAG(IS_NACL)
  1291. }
  1292. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY) && !BUILDFLAG(IS_NACL)
  1293. void TraceLog::OnTraceData(const char* data, size_t size, bool has_more) {
  1294. if (proto_output_callback_) {
  1295. scoped_refptr<RefCountedString> chunk = new RefCountedString();
  1296. if (size)
  1297. chunk->data().assign(data, size);
  1298. proto_output_callback_.Run(std::move(chunk), has_more);
  1299. if (!has_more) {
  1300. proto_output_callback_.Reset();
  1301. tracing_session_.reset();
  1302. }
  1303. return;
  1304. }
  1305. if (size) {
  1306. std::unique_ptr<uint8_t[]> data_copy(new uint8_t[size]);
  1307. memcpy(&data_copy[0], data, size);
  1308. auto status = trace_processor_->Parse(std::move(data_copy), size);
  1309. DCHECK(status.ok()) << status.message();
  1310. }
  1311. if (has_more)
  1312. return;
  1313. trace_processor_->NotifyEndOfFile();
  1314. auto status = perfetto::trace_processor::json::ExportJson(
  1315. trace_processor_.get(), json_output_writer_.get());
  1316. DCHECK(status.ok()) << status.message();
  1317. trace_processor_.reset();
  1318. tracing_session_.reset();
  1319. json_output_writer_.reset();
  1320. }
  1321. #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY) && !BUILDFLAG(IS_NACL)
  1322. // Usually it runs on a different thread.
  1323. void TraceLog::ConvertTraceEventsToTraceFormat(
  1324. std::unique_ptr<TraceBuffer> logged_events,
  1325. const OutputCallback& flush_output_callback,
  1326. const ArgumentFilterPredicate& argument_filter_predicate) {
  1327. if (flush_output_callback.is_null())
  1328. return;
  1329. HEAP_PROFILER_SCOPED_IGNORE;
  1330. // The callback need to be called at least once even if there is no events
  1331. // to let the caller know the completion of flush.
  1332. scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
  1333. const size_t kReserveCapacity = kTraceEventBufferSizeInBytes * 5 / 4;
  1334. json_events_str_ptr->data().reserve(kReserveCapacity);
  1335. while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
  1336. for (size_t j = 0; j < chunk->size(); ++j) {
  1337. size_t size = json_events_str_ptr->size();
  1338. if (size > kTraceEventBufferSizeInBytes) {
  1339. flush_output_callback.Run(json_events_str_ptr, true);
  1340. json_events_str_ptr = new RefCountedString();
  1341. json_events_str_ptr->data().reserve(kReserveCapacity);
  1342. } else if (size) {
  1343. json_events_str_ptr->data().append(",\n");
  1344. }
  1345. chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
  1346. argument_filter_predicate);
  1347. }
  1348. }
  1349. flush_output_callback.Run(json_events_str_ptr, false);
  1350. }
  1351. void TraceLog::FinishFlush(int generation, bool discard_events) {
  1352. std::unique_ptr<TraceBuffer> previous_logged_events;
  1353. OutputCallback flush_output_callback;
  1354. ArgumentFilterPredicate argument_filter_predicate;
  1355. if (!CheckGeneration(generation))
  1356. return;
  1357. {
  1358. AutoLock lock(lock_);
  1359. previous_logged_events.swap(logged_events_);
  1360. UseNextTraceBuffer();
  1361. thread_task_runners_.clear();
  1362. flush_task_runner_ = nullptr;
  1363. flush_output_callback = flush_output_callback_;
  1364. flush_output_callback_.Reset();
  1365. if (trace_options() & kInternalEnableArgumentFilter) {
  1366. // If argument filtering is activated and there is no filtering predicate,
  1367. // use the safe default filtering predicate.
  1368. if (argument_filter_predicate_.is_null()) {
  1369. argument_filter_predicate =
  1370. base::BindRepeating(&DefaultIsTraceEventArgsAllowlisted);
  1371. } else {
  1372. argument_filter_predicate = argument_filter_predicate_;
  1373. }
  1374. }
  1375. }
  1376. if (discard_events) {
  1377. if (!flush_output_callback.is_null()) {
  1378. scoped_refptr<RefCountedString> empty_result = new RefCountedString;
  1379. flush_output_callback.Run(empty_result, false);
  1380. }
  1381. return;
  1382. }
  1383. if (use_worker_thread_) {
  1384. base::ThreadPool::PostTask(
  1385. FROM_HERE,
  1386. {MayBlock(), TaskPriority::BEST_EFFORT,
  1387. TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
  1388. BindOnce(&TraceLog::ConvertTraceEventsToTraceFormat,
  1389. std::move(previous_logged_events), flush_output_callback,
  1390. argument_filter_predicate));
  1391. return;
  1392. }
  1393. ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
  1394. flush_output_callback,
  1395. argument_filter_predicate);
  1396. }
  1397. // Run in each thread holding a local event buffer.
  1398. void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
  1399. {
  1400. AutoLock lock(lock_);
  1401. if (!CheckGeneration(generation) || !flush_task_runner_) {
  1402. // This is late. The corresponding flush has finished.
  1403. return;
  1404. }
  1405. }
  1406. // This will flush the thread local buffer.
  1407. delete thread_local_event_buffer_.Get();
  1408. auto on_flush_override = on_flush_override_.load(std::memory_order_relaxed);
  1409. if (on_flush_override) {
  1410. on_flush_override();
  1411. }
  1412. // Scheduler uses TRACE_EVENT macros when posting a task, which can lead
  1413. // to acquiring a tracing lock. Given that posting a task requires grabbing
  1414. // a scheduler lock, we need to post this task outside tracing lock to avoid
  1415. // deadlocks.
  1416. scoped_refptr<SequencedTaskRunner> cached_flush_task_runner;
  1417. {
  1418. AutoLock lock(lock_);
  1419. cached_flush_task_runner = flush_task_runner_;
  1420. if (!CheckGeneration(generation) || !flush_task_runner_ ||
  1421. !thread_task_runners_.empty())
  1422. return;
  1423. }
  1424. cached_flush_task_runner->PostTask(
  1425. FROM_HERE, BindOnce(&TraceLog::FinishFlush, Unretained(this), generation,
  1426. discard_events));
  1427. }
  1428. void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
  1429. {
  1430. AutoLock lock(lock_);
  1431. if (!CheckGeneration(generation) || !flush_task_runner_) {
  1432. // Flush has finished before timeout.
  1433. return;
  1434. }
  1435. LOG(WARNING)
  1436. << "The following threads haven't finished flush in time. "
  1437. "If this happens stably for some thread, please call "
  1438. "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
  1439. "the thread to avoid its trace events from being lost.";
  1440. for (const auto& it : thread_task_runners_) {
  1441. LOG(WARNING) << "Thread: "
  1442. << ThreadIdNameManager::GetInstance()->GetName(it.first);
  1443. }
  1444. }
  1445. FinishFlush(generation, discard_events);
  1446. }
  1447. void TraceLog::UseNextTraceBuffer() {
  1448. logged_events_.reset(CreateTraceBuffer());
  1449. generation_.fetch_add(1, std::memory_order_relaxed);
  1450. thread_shared_chunk_.reset();
  1451. thread_shared_chunk_index_ = 0;
  1452. }
  1453. bool TraceLog::ShouldAddAfterUpdatingState(
  1454. char phase,
  1455. const unsigned char* category_group_enabled,
  1456. const char* name,
  1457. uint64_t id,
  1458. PlatformThreadId thread_id,
  1459. TraceArguments* args) {
  1460. if (!*category_group_enabled)
  1461. return false;
  1462. // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
  1463. // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
  1464. // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
  1465. if (thread_is_in_trace_event_.Get())
  1466. return false;
  1467. // Check and update the current thread name only if the event is for the
  1468. // current thread to avoid locks in most cases.
  1469. if (thread_id == PlatformThread::CurrentId()) {
  1470. const char* new_name =
  1471. ThreadIdNameManager::GetInstance()->GetNameForCurrentThread();
  1472. // Check if the thread name has been set or changed since the previous
  1473. // call (if any), but don't bother if the new name is empty. Note this will
  1474. // not detect a thread name change within the same char* buffer address: we
  1475. // favor common case performance over corner case correctness.
  1476. static auto* current_thread_name = new ThreadLocalPointer<const char>();
  1477. if (new_name != current_thread_name->Get() && new_name && *new_name) {
  1478. current_thread_name->Set(new_name);
  1479. AutoLock thread_info_lock(thread_info_lock_);
  1480. auto existing_name = thread_names_.find(thread_id);
  1481. if (existing_name == thread_names_.end()) {
  1482. // This is a new thread id, and a new name.
  1483. thread_names_[thread_id] = new_name;
  1484. } else {
  1485. // This is a thread id that we've seen before, but potentially with a
  1486. // new name.
  1487. std::vector<StringPiece> existing_names = base::SplitStringPiece(
  1488. existing_name->second, ",", base::KEEP_WHITESPACE,
  1489. base::SPLIT_WANT_NONEMPTY);
  1490. if (!Contains(existing_names, new_name)) {
  1491. if (!existing_names.empty())
  1492. existing_name->second.push_back(',');
  1493. existing_name->second.append(new_name);
  1494. }
  1495. }
  1496. }
  1497. }
  1498. #if BUILDFLAG(IS_WIN)
  1499. // This is done sooner rather than later, to avoid creating the event and
  1500. // acquiring the lock, which is not needed for ETW as it's already threadsafe.
  1501. if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT) {
  1502. // ETW export expects non-null event names.
  1503. name = name ? name : "";
  1504. TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
  1505. args);
  1506. }
  1507. #endif // BUILDFLAG(IS_WIN)
  1508. return true;
  1509. }
  1510. TraceEventHandle TraceLog::AddTraceEvent(
  1511. char phase,
  1512. const unsigned char* category_group_enabled,
  1513. const char* name,
  1514. const char* scope,
  1515. uint64_t id,
  1516. TraceArguments* args,
  1517. unsigned int flags) {
  1518. auto thread_id = base::PlatformThread::CurrentId();
  1519. base::TimeTicks now = TRACE_TIME_TICKS_NOW();
  1520. return AddTraceEventWithThreadIdAndTimestamp(
  1521. phase, category_group_enabled, name, scope, id,
  1522. trace_event_internal::kNoId, // bind_id
  1523. thread_id, now, args, flags);
  1524. }
  1525. TraceEventHandle TraceLog::AddTraceEventWithBindId(
  1526. char phase,
  1527. const unsigned char* category_group_enabled,
  1528. const char* name,
  1529. const char* scope,
  1530. uint64_t id,
  1531. uint64_t bind_id,
  1532. TraceArguments* args,
  1533. unsigned int flags) {
  1534. auto thread_id = base::PlatformThread::CurrentId();
  1535. base::TimeTicks now = TRACE_TIME_TICKS_NOW();
  1536. return AddTraceEventWithThreadIdAndTimestamp(
  1537. phase, category_group_enabled, name, scope, id, bind_id, thread_id, now,
  1538. args, flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
  1539. }
  1540. TraceEventHandle TraceLog::AddTraceEventWithProcessId(
  1541. char phase,
  1542. const unsigned char* category_group_enabled,
  1543. const char* name,
  1544. const char* scope,
  1545. uint64_t id,
  1546. ProcessId process_id,
  1547. TraceArguments* args,
  1548. unsigned int flags) {
  1549. base::TimeTicks now = TRACE_TIME_TICKS_NOW();
  1550. return AddTraceEventWithThreadIdAndTimestamp(
  1551. phase, category_group_enabled, name, scope, id,
  1552. trace_event_internal::kNoId, // bind_id
  1553. static_cast<PlatformThreadId>(process_id), now, args,
  1554. flags | TRACE_EVENT_FLAG_HAS_PROCESS_ID);
  1555. }
  1556. // Handle legacy calls to AddTraceEventWithThreadIdAndTimestamp
  1557. // with kNoId as bind_id
  1558. TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
  1559. char phase,
  1560. const unsigned char* category_group_enabled,
  1561. const char* name,
  1562. const char* scope,
  1563. uint64_t id,
  1564. PlatformThreadId thread_id,
  1565. const TimeTicks& timestamp,
  1566. TraceArguments* args,
  1567. unsigned int flags) {
  1568. return AddTraceEventWithThreadIdAndTimestamp(
  1569. phase, category_group_enabled, name, scope, id,
  1570. trace_event_internal::kNoId, // bind_id
  1571. thread_id, timestamp, args, flags);
  1572. }
  1573. TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
  1574. char phase,
  1575. const unsigned char* category_group_enabled,
  1576. const char* name,
  1577. const char* scope,
  1578. uint64_t id,
  1579. uint64_t bind_id,
  1580. PlatformThreadId thread_id,
  1581. const TimeTicks& timestamp,
  1582. TraceArguments* args,
  1583. unsigned int flags) {
  1584. ThreadTicks thread_now;
  1585. // If timestamp is provided explicitly, don't record thread time as it would
  1586. // be for the wrong timestamp. Similarly, if we record an event for another
  1587. // process or thread, we shouldn't report the current thread's thread time.
  1588. if (!(flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP ||
  1589. flags & TRACE_EVENT_FLAG_HAS_PROCESS_ID ||
  1590. thread_id != PlatformThread::CurrentId())) {
  1591. thread_now = ThreadNow();
  1592. }
  1593. return AddTraceEventWithThreadIdAndTimestamps(
  1594. phase, category_group_enabled, name, scope, id, bind_id, thread_id,
  1595. timestamp, thread_now, args, flags);
  1596. }
  1597. TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamps(
  1598. char phase,
  1599. const unsigned char* category_group_enabled,
  1600. const char* name,
  1601. const char* scope,
  1602. uint64_t id,
  1603. uint64_t bind_id,
  1604. PlatformThreadId thread_id,
  1605. const TimeTicks& timestamp,
  1606. const ThreadTicks& thread_timestamp,
  1607. TraceArguments* args,
  1608. unsigned int flags) NO_THREAD_SAFETY_ANALYSIS {
  1609. TraceEventHandle handle = {0, 0, 0};
  1610. if (!ShouldAddAfterUpdatingState(phase, category_group_enabled, name, id,
  1611. thread_id, args)) {
  1612. return handle;
  1613. }
  1614. DCHECK(!timestamp.is_null());
  1615. AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
  1616. // Flow bind_ids don't have scopes, so we need to mangle in-process ones to
  1617. // avoid collisions.
  1618. bool has_flow =
  1619. flags & (TRACE_EVENT_FLAG_FLOW_OUT | TRACE_EVENT_FLAG_FLOW_IN);
  1620. if (has_flow && (flags & TRACE_EVENT_FLAG_HAS_LOCAL_ID))
  1621. bind_id = MangleEventId(bind_id);
  1622. TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
  1623. ThreadLocalEventBuffer* thread_local_event_buffer = nullptr;
  1624. if (*category_group_enabled & RECORDING_MODE) {
  1625. // |thread_local_event_buffer_| can be null if the current thread doesn't
  1626. // have a message loop or the message loop is blocked.
  1627. InitializeThreadLocalEventBufferIfSupported();
  1628. thread_local_event_buffer = thread_local_event_buffer_.Get();
  1629. }
  1630. if (*category_group_enabled & RECORDING_MODE) {
  1631. auto trace_event_override =
  1632. add_trace_event_override_.load(std::memory_order_relaxed);
  1633. if (trace_event_override) {
  1634. TraceEvent new_trace_event(
  1635. thread_id, offset_event_timestamp, thread_timestamp, phase,
  1636. category_group_enabled, name, scope, id, bind_id, args, flags);
  1637. trace_event_override(
  1638. &new_trace_event,
  1639. /*thread_will_flush=*/thread_local_event_buffer != nullptr, &handle);
  1640. return handle;
  1641. }
  1642. }
  1643. std::string console_message;
  1644. std::unique_ptr<TraceEvent> filtered_trace_event;
  1645. bool disabled_by_filters = false;
  1646. if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) {
  1647. auto new_trace_event = std::make_unique<TraceEvent>(
  1648. thread_id, offset_event_timestamp, thread_timestamp, phase,
  1649. category_group_enabled, name, scope, id, bind_id, args, flags);
  1650. disabled_by_filters = true;
  1651. ForEachCategoryFilter(
  1652. category_group_enabled, [&new_trace_event, &disabled_by_filters](
  1653. TraceEventFilter* trace_event_filter) {
  1654. if (trace_event_filter->FilterTraceEvent(*new_trace_event))
  1655. disabled_by_filters = false;
  1656. });
  1657. if (!disabled_by_filters)
  1658. filtered_trace_event = std::move(new_trace_event);
  1659. }
  1660. // If enabled for recording, the event should be added only if one of the
  1661. // filters indicates or category is not enabled for filtering.
  1662. if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) &&
  1663. !disabled_by_filters) {
  1664. OptionalAutoLock lock(&lock_);
  1665. TraceEvent* trace_event = nullptr;
  1666. if (thread_local_event_buffer) {
  1667. trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
  1668. } else {
  1669. lock.EnsureAcquired();
  1670. trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
  1671. }
  1672. // NO_THREAD_SAFETY_ANALYSIS: Conditional locking above.
  1673. if (trace_event) {
  1674. if (filtered_trace_event) {
  1675. *trace_event = std::move(*filtered_trace_event);
  1676. } else {
  1677. trace_event->Reset(thread_id, offset_event_timestamp, thread_timestamp,
  1678. phase, category_group_enabled, name, scope, id,
  1679. bind_id, args, flags);
  1680. }
  1681. #if BUILDFLAG(IS_ANDROID)
  1682. trace_event->SendToATrace();
  1683. #endif
  1684. }
  1685. if (trace_options() & kInternalEchoToConsole) {
  1686. console_message = EventToConsoleMessage(
  1687. phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
  1688. timestamp, trace_event);
  1689. }
  1690. }
  1691. if (!console_message.empty())
  1692. LOG(ERROR) << console_message;
  1693. return handle;
  1694. }
  1695. void TraceLog::AddMetadataEvent(const unsigned char* category_group_enabled,
  1696. const char* name,
  1697. TraceArguments* args,
  1698. unsigned int flags) {
  1699. HEAP_PROFILER_SCOPED_IGNORE;
  1700. auto thread_id = base::PlatformThread::CurrentId();
  1701. ThreadTicks thread_now = ThreadNow();
  1702. TimeTicks now = OffsetNow();
  1703. AutoLock lock(lock_);
  1704. auto trace_event = std::make_unique<TraceEvent>(
  1705. thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
  1706. category_group_enabled, name,
  1707. trace_event_internal::kGlobalScope, // scope
  1708. trace_event_internal::kNoId, // id
  1709. trace_event_internal::kNoId, // bind_id
  1710. args, flags);
  1711. metadata_events_.push_back(std::move(trace_event));
  1712. }
  1713. // May be called when a COMPELETE event ends and the unfinished event has been
  1714. // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
  1715. std::string TraceLog::EventToConsoleMessage(char phase,
  1716. const TimeTicks& timestamp,
  1717. TraceEvent* trace_event) {
  1718. HEAP_PROFILER_SCOPED_IGNORE;
  1719. AutoLock thread_info_lock(thread_info_lock_);
  1720. // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
  1721. // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
  1722. DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
  1723. TimeDelta duration;
  1724. auto thread_id =
  1725. trace_event ? trace_event->thread_id() : PlatformThread::CurrentId();
  1726. if (phase == TRACE_EVENT_PHASE_END) {
  1727. duration = timestamp - thread_event_start_times_[thread_id].top();
  1728. thread_event_start_times_[thread_id].pop();
  1729. }
  1730. std::string thread_name = thread_names_[thread_id];
  1731. if (thread_colors_.find(thread_name) == thread_colors_.end()) {
  1732. size_t next_color = (thread_colors_.size() % 6) + 1;
  1733. thread_colors_[thread_name] = next_color;
  1734. }
  1735. std::ostringstream log;
  1736. log << base::StringPrintf("%s: \x1b[0;3%" PRIuS "m", thread_name.c_str(),
  1737. thread_colors_[thread_name]);
  1738. size_t depth = 0;
  1739. auto it = thread_event_start_times_.find(thread_id);
  1740. if (it != thread_event_start_times_.end())
  1741. depth = it->second.size();
  1742. for (size_t i = 0; i < depth; ++i)
  1743. log << "| ";
  1744. if (trace_event)
  1745. trace_event->AppendPrettyPrinted(&log);
  1746. if (phase == TRACE_EVENT_PHASE_END)
  1747. log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
  1748. log << "\x1b[0;m";
  1749. if (phase == TRACE_EVENT_PHASE_BEGIN)
  1750. thread_event_start_times_[thread_id].push(timestamp);
  1751. return log.str();
  1752. }
  1753. void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled,
  1754. const char* name,
  1755. TraceEventHandle handle) {
  1756. const char* category_name = GetCategoryGroupName(category_group_enabled);
  1757. ForEachCategoryFilter(
  1758. category_group_enabled,
  1759. [name, category_name](TraceEventFilter* trace_event_filter) {
  1760. trace_event_filter->EndEvent(category_name, name);
  1761. });
  1762. }
  1763. void TraceLog::UpdateTraceEventDuration(
  1764. const unsigned char* category_group_enabled,
  1765. const char* name,
  1766. TraceEventHandle handle) {
  1767. if (!*category_group_enabled)
  1768. return;
  1769. UpdateTraceEventDurationExplicit(
  1770. category_group_enabled, name, handle, base::PlatformThread::CurrentId(),
  1771. /*explicit_timestamps=*/false, OffsetNow(), ThreadNow());
  1772. }
  1773. void TraceLog::UpdateTraceEventDurationExplicit(
  1774. const unsigned char* category_group_enabled,
  1775. const char* name,
  1776. TraceEventHandle handle,
  1777. PlatformThreadId thread_id,
  1778. bool explicit_timestamps,
  1779. const TimeTicks& now,
  1780. const ThreadTicks& thread_now) {
  1781. if (!*category_group_enabled)
  1782. return;
  1783. // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
  1784. // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
  1785. // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
  1786. if (thread_is_in_trace_event_.Get())
  1787. return;
  1788. AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
  1789. #if BUILDFLAG(IS_WIN)
  1790. // Generate an ETW event that marks the end of a complete event.
  1791. if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT)
  1792. TraceEventETWExport::AddCompleteEndEvent(category_group_enabled, name);
  1793. #endif // BUILDFLAG(IS_WIN)
  1794. if (*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) {
  1795. auto update_duration_override =
  1796. update_duration_override_.load(std::memory_order_relaxed);
  1797. if (update_duration_override) {
  1798. update_duration_override(category_group_enabled, name, handle, thread_id,
  1799. explicit_timestamps, now, thread_now);
  1800. return;
  1801. }
  1802. }
  1803. std::string console_message;
  1804. if (*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) {
  1805. OptionalAutoLock lock(&lock_);
  1806. TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
  1807. if (trace_event) {
  1808. DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
  1809. trace_event->UpdateDuration(now, thread_now);
  1810. #if BUILDFLAG(IS_ANDROID)
  1811. trace_event->SendToATrace();
  1812. #endif
  1813. }
  1814. if (trace_options() & kInternalEchoToConsole) {
  1815. console_message =
  1816. EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
  1817. }
  1818. }
  1819. if (!console_message.empty())
  1820. LOG(ERROR) << console_message;
  1821. if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING)
  1822. EndFilteredEvent(category_group_enabled, name, handle);
  1823. }
  1824. uint64_t TraceLog::MangleEventId(uint64_t id) {
  1825. return id ^ process_id_hash_;
  1826. }
  1827. template <typename T>
  1828. void TraceLog::AddMetadataEventWhileLocked(PlatformThreadId thread_id,
  1829. const char* metadata_name,
  1830. const char* arg_name,
  1831. const T& value) {
  1832. auto trace_event_override =
  1833. add_trace_event_override_.load(std::memory_order_relaxed);
  1834. if (trace_event_override) {
  1835. TraceEvent trace_event;
  1836. InitializeMetadataEvent(&trace_event, thread_id, metadata_name, arg_name,
  1837. value);
  1838. trace_event_override(&trace_event, /*thread_will_flush=*/true, nullptr);
  1839. } else {
  1840. InitializeMetadataEvent(
  1841. AddEventToThreadSharedChunkWhileLocked(nullptr, false), thread_id,
  1842. metadata_name, arg_name, value);
  1843. }
  1844. }
  1845. void TraceLog::AddMetadataEventsWhileLocked() {
  1846. auto trace_event_override =
  1847. add_trace_event_override_.load(std::memory_order_relaxed);
  1848. // Move metadata added by |AddMetadataEvent| into the trace log.
  1849. if (trace_event_override) {
  1850. while (!metadata_events_.empty()) {
  1851. trace_event_override(metadata_events_.back().get(),
  1852. /*thread_will_flush=*/true, nullptr);
  1853. metadata_events_.pop_back();
  1854. }
  1855. } else {
  1856. while (!metadata_events_.empty()) {
  1857. TraceEvent* event =
  1858. AddEventToThreadSharedChunkWhileLocked(nullptr, false);
  1859. *event = std::move(*metadata_events_.back());
  1860. metadata_events_.pop_back();
  1861. }
  1862. }
  1863. #if !BUILDFLAG(IS_NACL) // NaCl shouldn't expose the process id.
  1864. AddMetadataEventWhileLocked(0, "num_cpus", "number",
  1865. base::SysInfo::NumberOfProcessors());
  1866. #endif
  1867. auto current_thread_id = base::PlatformThread::CurrentId();
  1868. if (process_sort_index_ != 0) {
  1869. AddMetadataEventWhileLocked(current_thread_id, "process_sort_index",
  1870. "sort_index", process_sort_index_);
  1871. }
  1872. #if BUILDFLAG(IS_ANDROID)
  1873. AddMetadataEventWhileLocked(current_thread_id, "chrome_library_address",
  1874. "start_address",
  1875. base::StringPrintf("%p", &__executable_start));
  1876. base::debug::ElfBuildIdBuffer build_id;
  1877. size_t build_id_length =
  1878. base::debug::ReadElfBuildId(&__executable_start, true, build_id);
  1879. if (build_id_length > 0) {
  1880. AddMetadataEventWhileLocked(current_thread_id, "chrome_library_module",
  1881. "id", std::string(build_id));
  1882. }
  1883. #endif
  1884. if (!process_labels_.empty()) {
  1885. std::vector<base::StringPiece> labels;
  1886. for (const auto& it : process_labels_)
  1887. labels.push_back(it.second);
  1888. AddMetadataEventWhileLocked(current_thread_id, "process_labels", "labels",
  1889. base::JoinString(labels, ","));
  1890. }
  1891. // Thread sort indices.
  1892. for (const auto& it : thread_sort_indices_) {
  1893. if (it.second == 0)
  1894. continue;
  1895. AddMetadataEventWhileLocked(it.first, "thread_sort_index", "sort_index",
  1896. it.second);
  1897. }
  1898. // If buffer is full, add a metadata record to report this.
  1899. if (!buffer_limit_reached_timestamp_.is_null()) {
  1900. AddMetadataEventWhileLocked(current_thread_id, "trace_buffer_overflowed",
  1901. "overflowed_at_ts",
  1902. buffer_limit_reached_timestamp_);
  1903. }
  1904. }
  1905. TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
  1906. return GetEventByHandleInternal(handle, nullptr);
  1907. }
  1908. TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
  1909. OptionalAutoLock* lock)
  1910. NO_THREAD_SAFETY_ANALYSIS {
  1911. if (!handle.chunk_seq)
  1912. return nullptr;
  1913. DCHECK(handle.chunk_seq);
  1914. DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
  1915. DCHECK(handle.event_index <= TraceBufferChunk::kTraceBufferChunkSize - 1);
  1916. if (thread_local_event_buffer_.Get()) {
  1917. TraceEvent* trace_event =
  1918. thread_local_event_buffer_.Get()->GetEventByHandle(handle);
  1919. if (trace_event)
  1920. return trace_event;
  1921. }
  1922. // The event has been out-of-control of the thread local buffer.
  1923. // Try to get the event from the main buffer with a lock.
  1924. // NO_THREAD_SAFETY_ANALYSIS: runtime-dependent locking here.
  1925. if (lock)
  1926. lock->EnsureAcquired();
  1927. if (thread_shared_chunk_ &&
  1928. handle.chunk_index == thread_shared_chunk_index_) {
  1929. return handle.chunk_seq == thread_shared_chunk_->seq()
  1930. ? thread_shared_chunk_->GetEventAt(handle.event_index)
  1931. : nullptr;
  1932. }
  1933. return logged_events_->GetEventByHandle(handle);
  1934. }
  1935. void TraceLog::SetProcessID(ProcessId process_id) {
  1936. process_id_ = process_id;
  1937. // Create a FNV hash from the process ID for XORing.
  1938. // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
  1939. const uint64_t kOffsetBasis = 14695981039346656037ull;
  1940. const uint64_t kFnvPrime = 1099511628211ull;
  1941. const uint64_t pid = static_cast<uint64_t>(process_id_);
  1942. process_id_hash_ = (kOffsetBasis ^ pid) * kFnvPrime;
  1943. }
  1944. void TraceLog::SetProcessSortIndex(int sort_index) {
  1945. AutoLock lock(lock_);
  1946. process_sort_index_ = sort_index;
  1947. }
  1948. void TraceLog::set_process_name(const std::string& process_name) {
  1949. {
  1950. AutoLock lock(lock_);
  1951. process_name_ = process_name;
  1952. }
  1953. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  1954. if (perfetto::Tracing::IsInitialized()) {
  1955. auto track = perfetto::ProcessTrack::Current();
  1956. auto desc = track.Serialize();
  1957. desc.mutable_process()->set_process_name(process_name);
  1958. desc.mutable_process()->set_pid(process_id_);
  1959. perfetto::TrackEvent::SetTrackDescriptor(track, std::move(desc));
  1960. }
  1961. #endif
  1962. }
  1963. void TraceLog::UpdateProcessLabel(int label_id,
  1964. const std::string& current_label) {
  1965. if (!current_label.length())
  1966. return RemoveProcessLabel(label_id);
  1967. AutoLock lock(lock_);
  1968. process_labels_[label_id] = current_label;
  1969. }
  1970. void TraceLog::RemoveProcessLabel(int label_id) {
  1971. AutoLock lock(lock_);
  1972. process_labels_.erase(label_id);
  1973. }
  1974. void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
  1975. AutoLock lock(lock_);
  1976. thread_sort_indices_[thread_id] = sort_index;
  1977. }
  1978. #if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  1979. void TraceLog::SetTimeOffset(TimeDelta offset) {
  1980. time_offset_ = offset;
  1981. }
  1982. #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  1983. size_t TraceLog::GetObserverCountForTest() const {
  1984. AutoLock lock(observers_lock_);
  1985. return enabled_state_observers_.size();
  1986. }
  1987. void TraceLog::SetCurrentThreadBlocksMessageLoop() {
  1988. thread_blocks_message_loop_.Set(true);
  1989. // This will flush the thread local buffer.
  1990. delete thread_local_event_buffer_.Get();
  1991. }
  1992. TraceBuffer* TraceLog::CreateTraceBuffer() {
  1993. HEAP_PROFILER_SCOPED_IGNORE;
  1994. InternalTraceOptions options = trace_options();
  1995. const size_t config_buffer_chunks =
  1996. trace_config_.GetTraceBufferSizeInEvents() / kTraceBufferChunkSize;
  1997. if (options & kInternalRecordContinuously) {
  1998. return TraceBuffer::CreateTraceBufferRingBuffer(
  1999. config_buffer_chunks > 0 ? config_buffer_chunks
  2000. : kTraceEventRingBufferChunks);
  2001. }
  2002. if (options & kInternalEchoToConsole) {
  2003. return TraceBuffer::CreateTraceBufferRingBuffer(
  2004. config_buffer_chunks > 0 ? config_buffer_chunks
  2005. : kEchoToConsoleTraceEventBufferChunks);
  2006. }
  2007. if (options & kInternalRecordAsMuchAsPossible) {
  2008. return TraceBuffer::CreateTraceBufferVectorOfSize(
  2009. config_buffer_chunks > 0 ? config_buffer_chunks
  2010. : kTraceEventVectorBigBufferChunks);
  2011. }
  2012. return TraceBuffer::CreateTraceBufferVectorOfSize(
  2013. config_buffer_chunks > 0 ? config_buffer_chunks
  2014. : kTraceEventVectorBufferChunks);
  2015. }
  2016. #if BUILDFLAG(IS_WIN)
  2017. void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
  2018. // Go through each category and set/clear the ETW bit depending on whether the
  2019. // category is enabled.
  2020. for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
  2021. if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
  2022. category.name())) {
  2023. category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
  2024. } else {
  2025. category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
  2026. }
  2027. }
  2028. }
  2029. #endif // BUILDFLAG(IS_WIN)
  2030. void TraceLog::SetTraceBufferForTesting(
  2031. std::unique_ptr<TraceBuffer> trace_buffer) {
  2032. AutoLock lock(lock_);
  2033. logged_events_ = std::move(trace_buffer);
  2034. }
  2035. #if BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  2036. tracing::PerfettoPlatform* TraceLog::GetOrCreatePerfettoPlatform() {
  2037. if (!perfetto_platform_) {
  2038. perfetto_platform_.reset(new tracing::PerfettoPlatform(
  2039. tracing::PerfettoPlatform::TaskRunnerType::kBuiltin));
  2040. }
  2041. return perfetto_platform_.get();
  2042. }
  2043. void TraceLog::OnSetup(const perfetto::DataSourceBase::SetupArgs&) {}
  2044. void TraceLog::OnStart(const perfetto::DataSourceBase::StartArgs&) {
  2045. AutoLock lock(observers_lock_);
  2046. for (EnabledStateObserver* observer : enabled_state_observers_)
  2047. observer->OnTraceLogEnabled();
  2048. for (const auto& it : async_observers_) {
  2049. it.second.task_runner->PostTask(
  2050. FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogEnabled,
  2051. it.second.observer));
  2052. }
  2053. }
  2054. void TraceLog::OnStop(const perfetto::DataSourceBase::StopArgs&) {
  2055. AutoLock lock(observers_lock_);
  2056. for (auto* it : enabled_state_observers_)
  2057. it->OnTraceLogDisabled();
  2058. for (const auto& it : async_observers_) {
  2059. it.second.task_runner->PostTask(
  2060. FROM_HERE, BindOnce(&AsyncEnabledStateObserver::OnTraceLogDisabled,
  2061. it.second.observer));
  2062. }
  2063. }
  2064. #endif // BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  2065. void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
  2066. TraceEventMemoryOverhead* overhead) {
  2067. overhead->Add(TraceEventMemoryOverhead::kConvertableToTraceFormat,
  2068. sizeof(*this));
  2069. }
  2070. } // namespace trace_event
  2071. } // namespace base
  2072. namespace trace_event_internal {
  2073. base::trace_event::TraceEventHandle AddTraceEvent(
  2074. char phase,
  2075. const unsigned char* category_group_enabled,
  2076. const char* name,
  2077. const char* scope,
  2078. uint64_t id,
  2079. base::trace_event::TraceArguments* args,
  2080. unsigned int flags) {
  2081. return base::trace_event::TraceLog::GetInstance()->AddTraceEvent(
  2082. phase, category_group_enabled, name, scope, id, args, flags);
  2083. }
  2084. base::trace_event::TraceEventHandle AddTraceEventWithBindId(
  2085. char phase,
  2086. const unsigned char* category_group_enabled,
  2087. const char* name,
  2088. const char* scope,
  2089. uint64_t id,
  2090. uint64_t bind_id,
  2091. base::trace_event::TraceArguments* args,
  2092. unsigned int flags) {
  2093. return base::trace_event::TraceLog::GetInstance()->AddTraceEventWithBindId(
  2094. phase, category_group_enabled, name, scope, id, bind_id, args, flags);
  2095. }
  2096. base::trace_event::TraceEventHandle AddTraceEventWithProcessId(
  2097. char phase,
  2098. const unsigned char* category_group_enabled,
  2099. const char* name,
  2100. const char* scope,
  2101. uint64_t id,
  2102. base::ProcessId process_id,
  2103. base::trace_event::TraceArguments* args,
  2104. unsigned int flags) {
  2105. return base::trace_event::TraceLog::GetInstance()->AddTraceEventWithProcessId(
  2106. phase, category_group_enabled, name, scope, id, process_id, args, flags);
  2107. }
  2108. base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
  2109. char phase,
  2110. const unsigned char* category_group_enabled,
  2111. const char* name,
  2112. const char* scope,
  2113. uint64_t id,
  2114. base::PlatformThreadId thread_id,
  2115. const base::TimeTicks& timestamp,
  2116. base::trace_event::TraceArguments* args,
  2117. unsigned int flags) {
  2118. return base::trace_event::TraceLog::GetInstance()
  2119. ->AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
  2120. name, scope, id, thread_id,
  2121. timestamp, args, flags);
  2122. }
  2123. base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
  2124. char phase,
  2125. const unsigned char* category_group_enabled,
  2126. const char* name,
  2127. const char* scope,
  2128. uint64_t id,
  2129. uint64_t bind_id,
  2130. base::PlatformThreadId thread_id,
  2131. const base::TimeTicks& timestamp,
  2132. base::trace_event::TraceArguments* args,
  2133. unsigned int flags) {
  2134. return base::trace_event::TraceLog::GetInstance()
  2135. ->AddTraceEventWithThreadIdAndTimestamp(
  2136. phase, category_group_enabled, name, scope, id, bind_id, thread_id,
  2137. timestamp, args, flags);
  2138. }
  2139. base::trace_event::TraceEventHandle AddTraceEventWithThreadIdAndTimestamps(
  2140. char phase,
  2141. const unsigned char* category_group_enabled,
  2142. const char* name,
  2143. const char* scope,
  2144. uint64_t id,
  2145. base::PlatformThreadId thread_id,
  2146. const base::TimeTicks& timestamp,
  2147. const base::ThreadTicks& thread_timestamp,
  2148. unsigned int flags) {
  2149. return base::trace_event::TraceLog::GetInstance()
  2150. ->AddTraceEventWithThreadIdAndTimestamps(
  2151. phase, category_group_enabled, name, scope, id,
  2152. /*bind_id=*/trace_event_internal::kNoId, thread_id, timestamp,
  2153. thread_timestamp, nullptr, flags);
  2154. }
  2155. void AddMetadataEvent(const unsigned char* category_group_enabled,
  2156. const char* name,
  2157. base::trace_event::TraceArguments* args,
  2158. unsigned int flags) {
  2159. return base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
  2160. category_group_enabled, name, args, flags);
  2161. }
  2162. int GetNumTracesRecorded() {
  2163. return base::trace_event::TraceLog::GetInstance()->GetNumTracesRecorded();
  2164. }
  2165. void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
  2166. const char* name,
  2167. base::trace_event::TraceEventHandle handle) {
  2168. return base::trace_event::TraceLog::GetInstance()->UpdateTraceEventDuration(
  2169. category_group_enabled, name, handle);
  2170. }
  2171. void UpdateTraceEventDurationExplicit(
  2172. const unsigned char* category_group_enabled,
  2173. const char* name,
  2174. base::trace_event::TraceEventHandle handle,
  2175. base::PlatformThreadId thread_id,
  2176. bool explicit_timestamps,
  2177. const base::TimeTicks& now,
  2178. const base::ThreadTicks& thread_now) {
  2179. return base::trace_event::TraceLog::GetInstance()
  2180. ->UpdateTraceEventDurationExplicit(category_group_enabled, name, handle,
  2181. thread_id, explicit_timestamps, now,
  2182. thread_now);
  2183. }
  2184. #if !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  2185. ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
  2186. const char* category_group,
  2187. const char* name) {
  2188. // The single atom works because for now the category_group can only be "gpu".
  2189. DCHECK_EQ(strcmp(category_group, "gpu"), 0);
  2190. static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
  2191. INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
  2192. category_group, atomic, category_group_enabled_);
  2193. name_ = name;
  2194. if (*category_group_enabled_) {
  2195. event_handle_ =
  2196. TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
  2197. TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
  2198. trace_event_internal::kGlobalScope, // scope
  2199. trace_event_internal::kNoId, // id
  2200. base::PlatformThread::CurrentId(), // thread_id
  2201. TRACE_TIME_TICKS_NOW(), nullptr, TRACE_EVENT_FLAG_NONE);
  2202. }
  2203. }
  2204. ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
  2205. if (*category_group_enabled_) {
  2206. TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
  2207. event_handle_);
  2208. }
  2209. }
  2210. #endif // !BUILDFLAG(USE_PERFETTO_CLIENT_LIBRARY)
  2211. } // namespace trace_event_internal