orderfile_call_graph_instrumentation.cc 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. // Copyright (c) 2019 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/android/orderfile/orderfile_instrumentation.h"
  5. #include <time.h>
  6. #include <unistd.h>
  7. #include <atomic>
  8. #include <cstdio>
  9. #include <cstring>
  10. #include <string>
  11. #include <thread>
  12. #include <vector>
  13. #include "base/android/library_loader/anchor_functions.h"
  14. #include "base/android/orderfile/orderfile_buildflags.h"
  15. #include "base/files/file.h"
  16. #include "base/format_macros.h"
  17. #include "base/json/json_writer.h"
  18. #include "base/logging.h"
  19. #include "base/strings/stringprintf.h"
  20. #include "base/values.h"
  21. #include "build/build_config.h"
  22. #if BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  23. #include <sstream>
  24. #include "base/command_line.h"
  25. #include "base/time/time.h"
  26. #include "base/trace_event/memory_dump_manager.h" // no-presubmit-check
  27. #include "base/trace_event/memory_dump_provider.h" // no-presubmit-check
  28. #endif // BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  29. #if !BUILDFLAG(SUPPORTS_CODE_ORDERING)
  30. #error Only supported on architectures supporting code ordering (arm/arm64).
  31. #endif // !BUILDFLAG(SUPPORTS_CODE_ORDERING)
  32. // Must be applied to all functions within this file.
  33. #define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function))
  34. #define INLINE_AND_NO_INSTRUMENT_FUNCTION \
  35. __attribute__((always_inline, no_instrument_function))
  36. namespace base {
  37. namespace android {
  38. namespace orderfile {
  39. namespace {
  40. #if BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  41. // This is defined in content/public/common/content_switches.h, which is not
  42. // accessible in ::base.
  43. constexpr const char kProcessTypeSwitch[] = "type";
  44. #else
  45. // Constant used for StartDelayedDump().
  46. constexpr int kDelayInSeconds = 30;
  47. #endif // BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  48. constexpr size_t kMaxTextSizeInBytes = 1 << 27;
  49. constexpr size_t kMaxElements = kMaxTextSizeInBytes / 4;
  50. // Native code currently have ~850k symbols, hence recording up to 1M symbols
  51. // can cover all possible callee symbols.
  52. constexpr size_t kMaxReachedSymbols = 1 << 20;
  53. // 3 callers are recorded per callee.
  54. constexpr size_t kCallerBuckets = 3;
  55. // The last bucket is to count for misses and callers from outside the
  56. // native code bounds.
  57. constexpr size_t kMissesBucketIndex = 3;
  58. constexpr size_t kTotalBuckets = 4;
  59. std::atomic<uint32_t> callee_map[kMaxElements];
  60. static_assert(sizeof(callee_map) == 128 * (1 << 20), "");
  61. // Contain caller offsets. 4 buckets of callers per callee where the
  62. // last bucket is for misses.
  63. std::atomic<uint32_t> g_caller_offset[kMaxReachedSymbols * kTotalBuckets];
  64. static_assert(sizeof(g_caller_offset) == 16 * (1 << 20), "");
  65. // Corresponding count of |g_caller_offset|.
  66. std::atomic<uint32_t> g_caller_count[kMaxReachedSymbols * kTotalBuckets];
  67. static_assert(sizeof(g_caller_count) == 16 * (1 << 20), "");
  68. // Index for |g_caller_offset| and |g_caller_count|.
  69. std::atomic<uint32_t> g_callers_index;
  70. std::atomic<uint32_t> g_calls_count;
  71. std::atomic<bool> g_disabled;
  72. #if BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  73. // Dump offsets when a memory dump is requested. Used only if
  74. // switches::kDevtoolsInstrumentationDumping is set.
  75. class OrderfileMemoryDumpHook : public base::trace_event::MemoryDumpProvider {
  76. NO_INSTRUMENT_FUNCTION bool OnMemoryDump(
  77. const base::trace_event::MemoryDumpArgs& args,
  78. base::trace_event::ProcessMemoryDump* pmd) override {
  79. if (!Disable())
  80. return true; // A dump has already been started.
  81. std::string process_type =
  82. base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
  83. kProcessTypeSwitch);
  84. if (process_type.empty())
  85. process_type = "browser";
  86. Dump(process_type);
  87. return true; // If something goes awry, a fatal error will be created
  88. // internally.
  89. }
  90. };
  91. #endif // BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  92. // This is not racy. It is guaranteed that any number of threads concurrently
  93. // calling this function in any order, will always end up with the same count
  94. // at the end. It returns |element|'s value before the increment.
  95. INLINE_AND_NO_INSTRUMENT_FUNCTION uint32_t
  96. AtomicIncrement(std::atomic<uint32_t>* element) {
  97. return element->fetch_add(1, std::memory_order_relaxed);
  98. }
  99. // Increment the miss bucket for a callee. |index| is the first bucket of
  100. // callers for this callee.
  101. INLINE_AND_NO_INSTRUMENT_FUNCTION void RecordMiss(size_t index) {
  102. AtomicIncrement(g_caller_count + index + kMissesBucketIndex);
  103. }
  104. // Increment the caller count if it has previously been registered.
  105. // If it hasn't, search for an empty bucket and register the caller.
  106. // Otherwise, return false.
  107. // |index| is the first bucket to register callers for a certain callee.
  108. INLINE_AND_NO_INSTRUMENT_FUNCTION bool RecordCaller(size_t index,
  109. size_t caller_offset) {
  110. for (size_t i = index; i < index + kCallerBuckets; i++) {
  111. auto offset = g_caller_offset[i].load(std::memory_order_relaxed);
  112. // This check is racy, a write could have happened between the load and the
  113. // check.
  114. if (offset == caller_offset) {
  115. // Caller already recorded, increment the count.
  116. AtomicIncrement(g_caller_count + i);
  117. return true;
  118. }
  119. }
  120. for (size_t i = index; i < index + kCallerBuckets; i++) {
  121. auto offset = g_caller_offset[i].load(std::memory_order_relaxed);
  122. size_t expected = 0;
  123. if (!offset) {
  124. // This is not racy as the compare and exchange is done atomically.
  125. // It is impossible to reset a bucket if it has already been set. It
  126. // exchanges the value in |g_caller_offset[i]| with |caller_offset| if
  127. // the value in |g_caller_offset[i] == expected|.
  128. // Otherwise, returns false and set |expected = g_caller_offset[i]|.
  129. if (g_caller_offset[i].compare_exchange_strong(
  130. expected, caller_offset, std::memory_order_relaxed,
  131. std::memory_order_relaxed)) {
  132. AtomicIncrement(g_caller_count + i);
  133. return true;
  134. }
  135. }
  136. // This will decrease the chances that we miss something due to unseen
  137. // changes made by another thread.
  138. if (offset == caller_offset || expected == caller_offset) {
  139. AtomicIncrement(g_caller_count + i);
  140. return true;
  141. }
  142. }
  143. return false;
  144. }
  145. template <bool for_testing>
  146. __attribute__((always_inline, no_instrument_function)) void RecordAddress(
  147. size_t callee_address,
  148. size_t caller_address) {
  149. bool disabled = g_disabled.load(std::memory_order_relaxed);
  150. if (disabled)
  151. return;
  152. const size_t start =
  153. for_testing ? kStartOfTextForTesting : base::android::kStartOfText;
  154. const size_t end =
  155. for_testing ? kEndOfTextForTesting : base::android::kEndOfText;
  156. if (UNLIKELY(callee_address < start || callee_address > end)) {
  157. // Only the code in the native library is instrumented. Callees are expected
  158. // to be within the native library bounds.
  159. Disable();
  160. IMMEDIATE_CRASH();
  161. }
  162. size_t offset = callee_address - start;
  163. static_assert(sizeof(int) == 4,
  164. "Collection and processing code assumes that sizeof(int) == 4");
  165. size_t offset_index = offset / 4;
  166. if (UNLIKELY(offset_index >= kMaxElements))
  167. return;
  168. std::atomic<uint32_t>* element = callee_map + offset_index;
  169. uint32_t callers_index = element->load(std::memory_order_relaxed);
  170. // Racy check.
  171. if (callers_index == 0) {
  172. // Fragmentation is possible as we increment the |insertion_index| based on
  173. // a racy check.
  174. uint32_t insertion_index = AtomicIncrement(&g_callers_index) + 1;
  175. if (UNLIKELY(insertion_index >= kMaxReachedSymbols))
  176. return;
  177. uint32_t expected = 0;
  178. // Exchanges the value in |element| with |insertion_index| if the value in
  179. // |element == expected|. Otherwise, set |expected = element|.
  180. element->compare_exchange_strong(expected, insertion_index,
  181. std::memory_order_relaxed,
  182. std::memory_order_relaxed);
  183. // If expected is set, then this callee has previously been seen and already
  184. // has a corresponding index in the callers array.
  185. callers_index = expected == 0 ? insertion_index : expected;
  186. }
  187. AtomicIncrement(&g_calls_count);
  188. callers_index *= kTotalBuckets;
  189. if (caller_address <= start || caller_address > end ||
  190. !RecordCaller(callers_index, caller_address - start)) {
  191. // Record as a Miss, if the caller is not within the bounds of the native
  192. // code or there are no empty buckets to record one more caller for this
  193. // callee.
  194. RecordMiss(callers_index);
  195. }
  196. }
  197. NO_INSTRUMENT_FUNCTION bool DumpToFile(const base::FilePath& path) {
  198. auto file =
  199. base::File(path, base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
  200. if (!file.IsValid()) {
  201. PLOG(ERROR) << "Could not open " << path;
  202. return false;
  203. }
  204. if (g_callers_index == 0) {
  205. LOG(ERROR) << "No entries to dump";
  206. return false;
  207. }
  208. // This can get very large as it constructs the whole data structure in
  209. // memory before dumping it to the file.
  210. Value root(Value::Type::DICTIONARY);
  211. uint32_t total_calls_count = g_calls_count.load(std::memory_order_relaxed);
  212. root.SetStringKey("total_calls_count",
  213. base::StringPrintf("%" PRIu32, total_calls_count));
  214. Value call_graph(Value::Type::LIST);
  215. for (size_t i = 0; i < kMaxElements; i++) {
  216. auto caller_index =
  217. callee_map[i].load(std::memory_order_relaxed) * kTotalBuckets;
  218. if (!caller_index)
  219. // This callee was never called.
  220. continue;
  221. Value callee_element(Value::Type::DICTIONARY);
  222. uint32_t callee_offset = i * 4;
  223. callee_element.SetStringKey("index",
  224. base::StringPrintf("%" PRIuS, caller_index));
  225. callee_element.SetStringKey("callee_offset",
  226. base::StringPrintf("%" PRIu32, callee_offset));
  227. std::string offset_str;
  228. Value callers_list(Value::Type::LIST);
  229. for (size_t j = 0; j < kTotalBuckets; j++) {
  230. uint32_t caller_offset =
  231. g_caller_offset[caller_index + j].load(std::memory_order_relaxed);
  232. // The last bucket is for misses or callers outside the native library,
  233. // the caller_offset for this bucket is 0.
  234. if (j != kMissesBucketIndex && !caller_offset)
  235. continue;
  236. uint32_t count =
  237. g_caller_count[caller_index + j].load(std::memory_order_relaxed);
  238. // The count can only be 0 for the misses bucket. Otherwise,
  239. // if |caller_offset| is set then the count must be >= 1.
  240. CHECK_EQ(count || j == kMissesBucketIndex, true);
  241. if (!count)
  242. // No misses.
  243. continue;
  244. Value caller_count(Value::Type::DICTIONARY);
  245. caller_count.SetStringKey("caller_offset",
  246. base::StringPrintf("%" PRIu32, caller_offset));
  247. caller_count.SetStringKey("count", base::StringPrintf("%" PRIu32, count));
  248. callers_list.Append(std::move(caller_count));
  249. }
  250. callee_element.SetKey("caller_and_count", std::move(callers_list));
  251. call_graph.Append(std::move(callee_element));
  252. }
  253. root.SetKey("call_graph", std::move(call_graph));
  254. std::string output_js;
  255. if (!JSONWriter::Write(root, &output_js)) {
  256. LOG(FATAL) << "Error getting JSON string";
  257. }
  258. if (file.WriteAtCurrentPos(output_js.c_str(),
  259. static_cast<int>(output_js.size())) < 0) {
  260. // If the file could be opened, but writing has failed, it's likely that
  261. // data was partially written. Producing incomplete profiling data would
  262. // lead to a poorly performing orderfile, but might not be otherwised
  263. // noticed. So we crash instead.
  264. LOG(FATAL) << "Error writing profile data";
  265. }
  266. return true;
  267. }
  268. // Stops recording, and outputs the data to |path|.
  269. NO_INSTRUMENT_FUNCTION void StopAndDumpToFile(int pid,
  270. uint64_t start_ns_since_epoch,
  271. const std::string& tag) {
  272. std::string tag_str;
  273. if (!tag.empty())
  274. tag_str = base::StringPrintf("%s-", tag.c_str());
  275. auto path = base::StringPrintf(
  276. "/data/local/tmp/chrome/orderfile/profile-hitmap-%s%d-%" PRIu64 ".txt",
  277. tag_str.c_str(), pid, start_ns_since_epoch);
  278. if (!DumpToFile(base::FilePath(path))) {
  279. LOG(ERROR) << "Problem with dump (" << tag << ")";
  280. }
  281. }
  282. } // namespace
  283. // It is safe to call any function after |Disable()| has been called. No risk of
  284. // infinite recursion.
  285. NO_INSTRUMENT_FUNCTION bool Disable() {
  286. bool disabled = g_disabled.exchange(true, std::memory_order_relaxed);
  287. std::atomic_thread_fence(std::memory_order_seq_cst);
  288. return !disabled;
  289. }
  290. NO_INSTRUMENT_FUNCTION void StartDelayedDump() {
  291. #if BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  292. static auto* g_orderfile_memory_dump_hook = new OrderfileMemoryDumpHook();
  293. base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
  294. g_orderfile_memory_dump_hook, "Orderfile", nullptr);
  295. // Return, letting devtools tracing handle any dumping.
  296. #else
  297. // Using std::thread and not using base::TimeTicks() in order to to not call
  298. // too many base:: symbols that would pollute the reached symbol dumps.
  299. struct timespec ts;
  300. if (clock_gettime(CLOCK_MONOTONIC, &ts))
  301. PLOG(FATAL) << "clock_gettime.";
  302. uint64_t start_ns_since_epoch =
  303. static_cast<uint64_t>(ts.tv_sec) * 1000 * 1000 * 1000 + ts.tv_nsec;
  304. int pid = getpid();
  305. std::thread([pid, start_ns_since_epoch]() {
  306. sleep(kDelayInSeconds);
  307. if (Disable())
  308. StopAndDumpToFile(pid, start_ns_since_epoch, "");
  309. }).detach();
  310. #endif // BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  311. }
  312. NO_INSTRUMENT_FUNCTION void Dump(const std::string& tag) {
  313. // As profiling has been disabled, none of the uses of ::base symbols below
  314. // will enter the symbol dump.
  315. StopAndDumpToFile(
  316. getpid(), (base::Time::Now() - base::Time::UnixEpoch()).InNanoseconds(),
  317. tag);
  318. }
  319. NO_INSTRUMENT_FUNCTION void ResetForTesting() {
  320. Disable();
  321. memset(reinterpret_cast<uint32_t*>(callee_map), 0,
  322. sizeof(uint32_t) * kMaxElements);
  323. memset(reinterpret_cast<uint32_t*>(g_caller_offset), 0,
  324. sizeof(uint32_t) * kMaxReachedSymbols * kTotalBuckets);
  325. memset(reinterpret_cast<uint32_t*>(g_caller_count), 0,
  326. sizeof(uint32_t) * kMaxReachedSymbols * kTotalBuckets);
  327. g_callers_index = 0;
  328. g_disabled = false;
  329. }
  330. NO_INSTRUMENT_FUNCTION void RecordAddressForTesting(size_t callee_address,
  331. size_t caller_address) {
  332. return RecordAddress<true>(callee_address, caller_address);
  333. }
  334. // Returns a flattened vector where each callee is allocated 9 buckets.
  335. // First bucket -> callee offset
  336. // 8 buckets -> [caller offset, count, ...]
  337. NO_INSTRUMENT_FUNCTION std::vector<size_t> GetOrderedOffsetsForTesting() {
  338. std::vector<size_t> result;
  339. for (size_t i = 0; i < kMaxElements; i++) {
  340. auto caller_index =
  341. callee_map[i].load(std::memory_order_relaxed) * kTotalBuckets;
  342. if (!caller_index)
  343. continue;
  344. result.push_back(i * 4);
  345. for (size_t j = 0; j < kTotalBuckets; j++) {
  346. uint32_t count =
  347. g_caller_count[caller_index + j].load(std::memory_order_relaxed);
  348. uint32_t caller_offset =
  349. g_caller_offset[caller_index + j].load(std::memory_order_relaxed);
  350. result.push_back(caller_offset);
  351. result.push_back(count);
  352. }
  353. }
  354. return result;
  355. }
  356. } // namespace orderfile
  357. } // namespace android
  358. } // namespace base
  359. extern "C" {
  360. NO_INSTRUMENT_FUNCTION void __cyg_profile_func_enter_bare() {
  361. base::android::orderfile::RecordAddress<false>(
  362. reinterpret_cast<size_t>(__builtin_return_address(0)),
  363. reinterpret_cast<size_t>(__builtin_return_address(1)));
  364. }
  365. } // extern "C"