reached_addresses_bitset.cc 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. // Copyright 2019 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/android/reached_addresses_bitset.h"
  5. #include "base/android/library_loader/anchor_functions.h"
  6. #include "base/android/library_loader/anchor_functions_buildflags.h"
  7. #include "base/check_op.h"
  8. #include "base/numerics/safe_conversions.h"
  9. namespace base {
  10. namespace android {
  11. namespace {
  12. constexpr size_t kBitsPerElement = sizeof(uint32_t) * 8;
  13. // Below an array of uint32_t in BSS is introduced and then casted to an array
  14. // of std::atomic<uint32_t>. In C++20 constructing an std::atomic is not
  15. // 'trivial'. See https://github.com/microsoft/STL/issues/661 for reasons of
  16. // this change in the standard.
  17. //
  18. // Assert that both types have the same size. The sizes do not have to match
  19. // according to a note in [atomics.types.generic] in C++17. With this assertion
  20. // in place it is unlikely that the constructor produces the value other than
  21. // (uint32_t)0.
  22. static_assert(sizeof(uint32_t) == sizeof(std::atomic<uint32_t>), "");
  23. // Keep the array in BSS only for non-official builds to avoid potential harm to
  24. // data locality and unspecified behavior from the reinterpret_cast below.
  25. // In order to start new experiments with base::Feature(ReachedCodeProfiler) on
  26. // Canary/Dev this array will need to be reintroduced to official builds. When
  27. // doing so, don't forget to update `kConfigurationSupported` in
  28. // `reached_code_profiler.cc`
  29. #if BUILDFLAG(SUPPORTS_CODE_ORDERING) && !defined(OFFICIAL_BUILD)
  30. // Enough for 1 << 29 bytes of code, 512MB.
  31. constexpr size_t kTextBitfieldSize = 1 << 20;
  32. uint32_t g_text_bitfield[kTextBitfieldSize];
  33. #endif
  34. } // namespace
  35. // static
  36. ReachedAddressesBitset* ReachedAddressesBitset::GetTextBitset() {
  37. #if BUILDFLAG(SUPPORTS_CODE_ORDERING) && !defined(OFFICIAL_BUILD)
  38. static ReachedAddressesBitset text_bitset(
  39. kStartOfText, kEndOfText,
  40. reinterpret_cast<std::atomic<uint32_t>*>(g_text_bitfield),
  41. kTextBitfieldSize);
  42. return &text_bitset;
  43. #else
  44. return nullptr;
  45. #endif
  46. }
  47. void ReachedAddressesBitset::RecordAddress(uintptr_t address) {
  48. // |address| is outside of the range.
  49. if (address < start_address_ || address >= end_address_)
  50. return;
  51. size_t offset = static_cast<size_t>(address - start_address_);
  52. uint32_t offset_index = checked_cast<uint32_t>(offset / kBytesGranularity);
  53. // Atomically set the corresponding bit in the array.
  54. std::atomic<uint32_t>* element = reached_ + (offset_index / kBitsPerElement);
  55. // First, a racy check. This saves a CAS if the bit is already set, and
  56. // allows the cache line to remain shared across CPUs in this case.
  57. uint32_t value = element->load(std::memory_order_relaxed);
  58. uint32_t mask = 1 << (offset_index % kBitsPerElement);
  59. if (value & mask)
  60. return;
  61. element->fetch_or(mask, std::memory_order_relaxed);
  62. }
  63. std::vector<uint32_t> ReachedAddressesBitset::GetReachedOffsets() const {
  64. std::vector<uint32_t> offsets;
  65. const size_t elements = NumberOfReachableElements();
  66. for (size_t i = 0; i < elements; ++i) {
  67. uint32_t element = reached_[i].load(std::memory_order_relaxed);
  68. // No reached addresses at this element.
  69. if (element == 0)
  70. continue;
  71. for (size_t j = 0; j < 32; ++j) {
  72. if (!((element >> j) & 1))
  73. continue;
  74. size_t offset_index = i * 32 + j;
  75. size_t offset = offset_index * kBytesGranularity;
  76. offsets.push_back(checked_cast<uint32_t>(offset));
  77. }
  78. }
  79. return offsets;
  80. }
  81. ReachedAddressesBitset::ReachedAddressesBitset(
  82. uintptr_t start_address,
  83. uintptr_t end_address,
  84. std::atomic<uint32_t>* storage_ptr,
  85. size_t storage_size)
  86. : start_address_(start_address),
  87. end_address_(end_address),
  88. reached_(storage_ptr) {
  89. DCHECK_LE(start_address_, end_address_);
  90. DCHECK_LE(NumberOfReachableElements(), storage_size * kBitsPerElement);
  91. }
  92. size_t ReachedAddressesBitset::NumberOfReachableElements() const {
  93. size_t reachable_bits =
  94. (end_address_ + kBytesGranularity - 1) / kBytesGranularity -
  95. start_address_ / kBytesGranularity;
  96. return (reachable_bits + kBitsPerElement - 1) / kBitsPerElement;
  97. }
  98. } // namespace android
  99. } // namespace base