orderfile_instrumentation.cc 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. // Copyright (c) 2017 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/android/orderfile/orderfile_instrumentation.h"
  5. #include <time.h>
  6. #include <unistd.h>
  7. #include <atomic>
  8. #include <cstdio>
  9. #include <cstring>
  10. #include <sstream>
  11. #include <string>
  12. #include <thread>
  13. #include <vector>
  14. #include "base/android/library_loader/anchor_functions.h"
  15. #include "base/android/orderfile/orderfile_buildflags.h"
  16. #include "base/files/file.h"
  17. #include "base/format_macros.h"
  18. #include "base/logging.h"
  19. #include "base/strings/stringprintf.h"
  20. #include "build/build_config.h"
  21. #if BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  22. #include <sstream>
  23. #include "base/command_line.h"
  24. #include "base/time/time.h"
  25. #include "base/trace_event/memory_dump_manager.h" // no-presubmit-check
  26. #include "base/trace_event/memory_dump_provider.h" // no-presubmit-check
  27. #endif // BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  28. #if !BUILDFLAG(SUPPORTS_CODE_ORDERING)
  29. #error Requires code ordering support (arm/arm64/x86/x86_64).
  30. #endif // !BUILDFLAG(SUPPORTS_CODE_ORDERING)
  31. // Must be applied to all functions within this file.
  32. #define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function))
  33. namespace base {
  34. namespace android {
  35. namespace orderfile {
  36. namespace {
  37. // Constants used for StartDelayedDump().
  38. constexpr int kDelayInSeconds = 30;
  39. constexpr int kInitialDelayInSeconds = kPhases == 1 ? kDelayInSeconds : 5;
  40. #if BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  41. // This is defined in content/public/common/content_switches.h, which is not
  42. // accessible in ::base.
  43. constexpr const char kProcessTypeSwitch[] = "type";
  44. #endif // BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  45. // These are large overestimates, which is not an issue, as the data is
  46. // allocated in .bss, and on linux doesn't take any actual memory when it's not
  47. // touched.
  48. constexpr size_t kBitfieldSize = 1 << 22;
  49. constexpr size_t kMaxTextSizeInBytes = kBitfieldSize * (4 * 32);
  50. constexpr size_t kMaxElements = 1 << 20;
  51. // Data required to log reached offsets.
  52. struct LogData {
  53. std::atomic<uint32_t> offsets[kBitfieldSize];
  54. std::atomic<size_t> ordered_offsets[kMaxElements];
  55. std::atomic<size_t> index;
  56. };
  57. LogData g_data[kPhases];
  58. std::atomic<int> g_data_index;
  59. // Number of unexpected addresses, that is addresses that are not within [start,
  60. // end) bounds for the executable code.
  61. //
  62. // This should be exactly 0, since the start and end of .text should be known
  63. // perfectly by the linker, but it does happen. See crbug.com/1186598.
  64. std::atomic<int> g_unexpected_addresses;
  65. #if BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  66. // Dump offsets when a memory dump is requested. Used only if
  67. // switches::kDevtoolsInstrumentationDumping is set.
  68. class OrderfileMemoryDumpHook : public base::trace_event::MemoryDumpProvider {
  69. NO_INSTRUMENT_FUNCTION bool OnMemoryDump(
  70. const base::trace_event::MemoryDumpArgs& args,
  71. base::trace_event::ProcessMemoryDump* pmd) override {
  72. // Disable instrumentation now to cut down on orderfile pollution.
  73. if (!Disable()) {
  74. return true; // A dump has already been started.
  75. }
  76. std::stringstream process_type_str;
  77. Dump(base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
  78. kProcessTypeSwitch));
  79. return true; // If something goes awry, a fatal error will be created
  80. // internally.
  81. }
  82. };
  83. #endif // BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  84. // |RecordAddress()| adds an element to a concurrent bitset and to a concurrent
  85. // append-only list of offsets.
  86. //
  87. // Ordering:
  88. // Two consecutive calls to |RecordAddress()| from the same thread will be
  89. // ordered in the same way in the result, as written by
  90. // |StopAndDumpToFile()|. The result will contain exactly one instance of each
  91. // unique offset relative to |kStartOfText| passed to |RecordAddress()|.
  92. //
  93. // Implementation:
  94. // The "set" part is implemented with a bitfield, |g_offset|. The insertion
  95. // order is recorded in |g_ordered_offsets|.
  96. // This is not a class to make sure there isn't a static constructor, as it
  97. // would cause issue with an instrumented static constructor calling this code.
  98. //
  99. // Limitations:
  100. // - Only records offsets to addresses between |kStartOfText| and |kEndOfText|.
  101. // - Capacity of the set is limited by |kMaxElements|.
  102. // - Some insertions at the end of collection may be lost.
  103. // Records that |address| has been reached, if recording is enabled.
  104. // To avoid infinite recursion, this *must* *never* call any instrumented
  105. // function, unless |Disable()| is called first.
  106. template <bool for_testing>
  107. __attribute__((always_inline, no_instrument_function)) void RecordAddress(
  108. size_t address) {
  109. int index = g_data_index.load(std::memory_order_relaxed);
  110. if (index >= kPhases)
  111. return;
  112. const size_t start =
  113. for_testing ? kStartOfTextForTesting : base::android::kStartOfText;
  114. const size_t end =
  115. for_testing ? kEndOfTextForTesting : base::android::kEndOfText;
  116. if (UNLIKELY(address < start || address > end)) {
  117. if (!AreAnchorsSane()) {
  118. // Something is really wrong with the anchors, and this is likely to be
  119. // triggered from within a static constructor, where logging is likely to
  120. // deadlock. By crashing immediately we at least have a chance to get a
  121. // stack trace from the system to give some clue about the nature of the
  122. // problem.
  123. IMMEDIATE_CRASH();
  124. }
  125. // We should really crash at the first instance, but it does happen on bots,
  126. // for a mysterious reason. Give it some leeway. Note that since we don't
  127. // remember the caller address, if a single function is misplaced but we get
  128. // many calls to it, then we still crash. If this is the case, add
  129. // deduplication.
  130. //
  131. // Bumped to 100 temporarily as part of crbug.com/1265928 investigation.
  132. if (g_unexpected_addresses.fetch_add(1, std::memory_order_relaxed) < 100) {
  133. return;
  134. }
  135. Disable();
  136. LOG(FATAL) << "Too many unexpected addresses! start = " << std::hex << start
  137. << " end = " << end << " address = " << address;
  138. }
  139. size_t offset = address - start;
  140. static_assert(sizeof(int) == 4,
  141. "Collection and processing code assumes that sizeof(int) == 4");
  142. size_t offset_index = offset / 4;
  143. auto* offsets = g_data[index].offsets;
  144. // Atomically set the corresponding bit in the array.
  145. std::atomic<uint32_t>* element = offsets + (offset_index / 32);
  146. // First, a racy check. This saves a CAS if the bit is already set, and
  147. // allows the cache line to remain shared acoss CPUs in this case.
  148. uint32_t value = element->load(std::memory_order_relaxed);
  149. uint32_t mask = 1 << (offset_index % 32);
  150. if (value & mask)
  151. return;
  152. auto before = element->fetch_or(mask, std::memory_order_relaxed);
  153. if (before & mask)
  154. return;
  155. // We were the first one to set the element, record it in the ordered
  156. // elements list.
  157. // Use relaxed ordering, as the value is not published, or used for
  158. // synchronization.
  159. auto* ordered_offsets = g_data[index].ordered_offsets;
  160. auto& ordered_offsets_index = g_data[index].index;
  161. size_t insertion_index =
  162. ordered_offsets_index.fetch_add(1, std::memory_order_relaxed);
  163. if (UNLIKELY(insertion_index >= kMaxElements)) {
  164. Disable();
  165. LOG(FATAL) << "Too many reached offsets";
  166. }
  167. ordered_offsets[insertion_index].store(offset, std::memory_order_relaxed);
  168. }
  169. NO_INSTRUMENT_FUNCTION bool DumpToFile(const base::FilePath& path,
  170. const LogData& data) {
  171. auto file =
  172. base::File(path, base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_WRITE);
  173. if (!file.IsValid()) {
  174. PLOG(ERROR) << "Could not open " << path;
  175. return false;
  176. }
  177. if (data.index == 0) {
  178. LOG(ERROR) << "No entries to dump";
  179. return false;
  180. }
  181. size_t count = data.index - 1;
  182. for (size_t i = 0; i < count; i++) {
  183. // |g_ordered_offsets| is initialized to 0, so a 0 in the middle of it
  184. // indicates a case where the index was incremented, but the write is not
  185. // visible in this thread yet. Safe to skip, also because the function at
  186. // the start of text is never called.
  187. auto offset = data.ordered_offsets[i].load(std::memory_order_relaxed);
  188. if (!offset)
  189. continue;
  190. auto offset_str = base::StringPrintf("%" PRIuS "\n", offset);
  191. if (file.WriteAtCurrentPos(offset_str.c_str(),
  192. static_cast<int>(offset_str.size())) < 0) {
  193. // If the file could be opened, but writing has failed, it's likely that
  194. // data was partially written. Producing incomplete profiling data would
  195. // lead to a poorly performing orderfile, but might not be otherwised
  196. // noticed. So we crash instead.
  197. LOG(FATAL) << "Error writing profile data";
  198. }
  199. }
  200. return true;
  201. }
  202. // Stops recording, and outputs the data to |path|.
  203. NO_INSTRUMENT_FUNCTION void StopAndDumpToFile(int pid,
  204. uint64_t start_ns_since_epoch,
  205. const std::string& tag) {
  206. Disable();
  207. for (int phase = 0; phase < kPhases; phase++) {
  208. std::string tag_str;
  209. if (!tag.empty())
  210. tag_str = base::StringPrintf("%s-", tag.c_str());
  211. auto path = base::StringPrintf(
  212. "/data/local/tmp/chrome/orderfile/profile-hitmap-%s%d-%" PRIu64
  213. ".txt_%d",
  214. tag_str.c_str(), pid, start_ns_since_epoch, phase);
  215. if (!DumpToFile(base::FilePath(path), g_data[phase])) {
  216. LOG(ERROR) << "Problem with dump " << phase << " (" << tag << ")";
  217. }
  218. }
  219. int unexpected_addresses =
  220. g_unexpected_addresses.load(std::memory_order_relaxed);
  221. if (unexpected_addresses != 0) {
  222. LOG(WARNING) << "Got " << unexpected_addresses << " unexpected addresses!";
  223. }
  224. }
  225. } // namespace
  226. // After a call to Disable(), any function can be called, as reentrancy into the
  227. // instrumentation function will be mitigated.
  228. NO_INSTRUMENT_FUNCTION bool Disable() {
  229. auto old_phase = g_data_index.exchange(kPhases, std::memory_order_relaxed);
  230. std::atomic_thread_fence(std::memory_order_seq_cst);
  231. return old_phase != kPhases;
  232. }
  233. NO_INSTRUMENT_FUNCTION void SanityChecks() {
  234. CHECK_LT(base::android::kEndOfText - base::android::kStartOfText,
  235. kMaxTextSizeInBytes);
  236. CHECK(base::android::IsOrderingSane());
  237. }
  238. NO_INSTRUMENT_FUNCTION bool SwitchToNextPhaseOrDump(
  239. int pid,
  240. uint64_t start_ns_since_epoch) {
  241. int before = g_data_index.fetch_add(1, std::memory_order_relaxed);
  242. if (before + 1 == kPhases) {
  243. StopAndDumpToFile(pid, start_ns_since_epoch, "");
  244. return true;
  245. }
  246. return false;
  247. }
  248. NO_INSTRUMENT_FUNCTION void StartDelayedDump() {
  249. // Using std::thread and not using base::TimeTicks() in order to to not call
  250. // too many base:: symbols that would pollute the reached symbol dumps.
  251. struct timespec ts;
  252. if (clock_gettime(CLOCK_MONOTONIC, &ts))
  253. PLOG(FATAL) << "clock_gettime.";
  254. uint64_t start_ns_since_epoch =
  255. static_cast<uint64_t>(ts.tv_sec) * 1000 * 1000 * 1000 + ts.tv_nsec;
  256. int pid = getpid();
  257. #if BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  258. static auto* g_orderfile_memory_dump_hook = new OrderfileMemoryDumpHook();
  259. base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
  260. g_orderfile_memory_dump_hook, "Orderfile", nullptr);
  261. #endif // BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  262. std::thread([pid, start_ns_since_epoch]() {
  263. sleep(kInitialDelayInSeconds);
  264. #if BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  265. SwitchToNextPhaseOrDump(pid, start_ns_since_epoch);
  266. // Return, letting devtools tracing handle any post-startup phases.
  267. #else
  268. while (!SwitchToNextPhaseOrDump(pid, start_ns_since_epoch))
  269. sleep(kDelayInSeconds);
  270. #endif // BUILDFLAG(DEVTOOLS_INSTRUMENTATION_DUMPING)
  271. })
  272. .detach();
  273. }
  274. NO_INSTRUMENT_FUNCTION void Dump(const std::string& tag) {
  275. // As profiling has been disabled, none of the uses of ::base symbols below
  276. // will enter the symbol dump.
  277. StopAndDumpToFile(
  278. getpid(), (base::Time::Now() - base::Time::UnixEpoch()).InNanoseconds(),
  279. tag);
  280. }
  281. NO_INSTRUMENT_FUNCTION void ResetForTesting() {
  282. Disable();
  283. g_data_index = 0;
  284. for (int i = 0; i < kPhases; i++) {
  285. memset(reinterpret_cast<uint32_t*>(g_data[i].offsets), 0,
  286. sizeof(uint32_t) * kBitfieldSize);
  287. memset(reinterpret_cast<uint32_t*>(g_data[i].ordered_offsets), 0,
  288. sizeof(uint32_t) * kMaxElements);
  289. g_data[i].index.store(0);
  290. }
  291. g_unexpected_addresses.store(0, std::memory_order_relaxed);
  292. }
  293. NO_INSTRUMENT_FUNCTION void RecordAddressForTesting(size_t address) {
  294. return RecordAddress<true>(address);
  295. }
  296. NO_INSTRUMENT_FUNCTION std::vector<size_t> GetOrderedOffsetsForTesting() {
  297. std::vector<size_t> result;
  298. size_t max_index = g_data[0].index.load(std::memory_order_relaxed);
  299. for (size_t i = 0; i < max_index; ++i) {
  300. auto value = g_data[0].ordered_offsets[i].load(std::memory_order_relaxed);
  301. if (value)
  302. result.push_back(value);
  303. }
  304. return result;
  305. }
  306. } // namespace orderfile
  307. } // namespace android
  308. } // namespace base
  309. extern "C" {
  310. NO_INSTRUMENT_FUNCTION void __cyg_profile_func_enter_bare() {
  311. base::android::orderfile::RecordAddress<false>(
  312. reinterpret_cast<size_t>(__builtin_return_address(0)));
  313. }
  314. } // extern "C"