metadata_recorder.cc 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. // Copyright 2019 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/profiler/metadata_recorder.h"
  5. #include "base/metrics/histogram_macros.h"
  6. #include "third_party/abseil-cpp/absl/types/optional.h"
  7. namespace base {
  8. const size_t MetadataRecorder::MAX_METADATA_COUNT;
  9. MetadataRecorder::Item::Item(uint64_t name_hash,
  10. absl::optional<int64_t> key,
  11. absl::optional<PlatformThreadId> thread_id,
  12. int64_t value)
  13. : name_hash(name_hash), key(key), thread_id(thread_id), value(value) {}
  14. MetadataRecorder::Item::Item() : name_hash(0), value(0) {}
  15. MetadataRecorder::Item::Item(const Item& other) = default;
  16. MetadataRecorder::Item& MetadataRecorder::Item::Item::operator=(
  17. const Item& other) = default;
  18. MetadataRecorder::ItemInternal::ItemInternal() = default;
  19. MetadataRecorder::ItemInternal::~ItemInternal() = default;
  20. MetadataRecorder::MetadataRecorder() {
  21. // Ensure that we have necessary atomic support.
  22. DCHECK(items_[0].is_active.is_lock_free());
  23. DCHECK(items_[0].value.is_lock_free());
  24. }
  25. MetadataRecorder::~MetadataRecorder() = default;
  26. void MetadataRecorder::Set(uint64_t name_hash,
  27. absl::optional<int64_t> key,
  28. absl::optional<PlatformThreadId> thread_id,
  29. int64_t value) {
  30. AutoLock lock(write_lock_);
  31. // Acquiring the |write_lock_| ensures that:
  32. //
  33. // - We don't try to write into the same new slot at the same time as
  34. // another thread
  35. // - We see all writes by other threads (acquiring a mutex implies acquire
  36. // semantics)
  37. size_t item_slots_used = item_slots_used_.load(std::memory_order_relaxed);
  38. for (size_t i = 0; i < item_slots_used; ++i) {
  39. auto& item = items_[i];
  40. if (item.name_hash == name_hash && item.key == key &&
  41. item.thread_id == thread_id) {
  42. item.value.store(value, std::memory_order_relaxed);
  43. const bool was_active =
  44. item.is_active.exchange(true, std::memory_order_release);
  45. if (!was_active)
  46. inactive_item_count_--;
  47. return;
  48. }
  49. }
  50. item_slots_used = TryReclaimInactiveSlots(item_slots_used);
  51. if (item_slots_used == items_.size()) {
  52. // The metadata recorder is full, forcing us to drop this metadata. The
  53. // above UMA histogram counting occupied metadata slots should help us set a
  54. // max size that avoids this condition during normal Chrome use.
  55. return;
  56. }
  57. // Wait until the item is fully created before setting |is_active| to true and
  58. // incrementing |item_slots_used_|, which will signal to readers that the item
  59. // is ready.
  60. auto& item = items_[item_slots_used];
  61. item.name_hash = name_hash;
  62. item.key = key;
  63. item.thread_id = thread_id;
  64. item.value.store(value, std::memory_order_relaxed);
  65. item.is_active.store(true, std::memory_order_release);
  66. item_slots_used_.fetch_add(1, std::memory_order_release);
  67. }
  68. void MetadataRecorder::Remove(uint64_t name_hash,
  69. absl::optional<int64_t> key,
  70. absl::optional<PlatformThreadId> thread_id) {
  71. AutoLock lock(write_lock_);
  72. size_t item_slots_used = item_slots_used_.load(std::memory_order_relaxed);
  73. for (size_t i = 0; i < item_slots_used; ++i) {
  74. auto& item = items_[i];
  75. if (item.name_hash == name_hash && item.key == key &&
  76. item.thread_id == thread_id) {
  77. // A removed item will occupy its slot until that slot is reclaimed.
  78. const bool was_active =
  79. item.is_active.exchange(false, std::memory_order_relaxed);
  80. if (was_active)
  81. inactive_item_count_++;
  82. return;
  83. }
  84. }
  85. }
  86. MetadataRecorder::MetadataProvider::MetadataProvider(
  87. MetadataRecorder* metadata_recorder,
  88. PlatformThreadId thread_id)
  89. : metadata_recorder_(metadata_recorder),
  90. thread_id_(thread_id),
  91. auto_lock_(metadata_recorder->read_lock_) {}
  92. MetadataRecorder::MetadataProvider::~MetadataProvider() = default;
  93. size_t MetadataRecorder::MetadataProvider::GetItems(
  94. ItemArray* const items) const {
  95. return metadata_recorder_->GetItems(items, thread_id_);
  96. }
  97. size_t MetadataRecorder::GetItems(ItemArray* const items,
  98. PlatformThreadId thread_id) const {
  99. // If a writer adds a new item after this load, it will be ignored. We do
  100. // this instead of calling item_slots_used_.load() explicitly in the for loop
  101. // bounds checking, which would be expensive.
  102. //
  103. // Also note that items are snapshotted sequentially and that items can be
  104. // modified mid-snapshot by non-suspended threads. This means that there's a
  105. // small chance that some items, especially those that occur later in the
  106. // array, may have values slightly "in the future" from when the sample was
  107. // actually collected. It also means that the array as returned may have never
  108. // existed in its entirety, although each name/value pair represents a
  109. // consistent item that existed very shortly after the thread was supended.
  110. size_t item_slots_used = item_slots_used_.load(std::memory_order_acquire);
  111. size_t write_index = 0;
  112. for (size_t read_index = 0; read_index < item_slots_used; ++read_index) {
  113. const auto& item = items_[read_index];
  114. // Because we wait until |is_active| is set to consider an item active and
  115. // that field is always set last, we ignore half-created items.
  116. if (item.is_active.load(std::memory_order_acquire) &&
  117. (!item.thread_id.has_value() || item.thread_id == thread_id)) {
  118. (*items)[write_index++] =
  119. Item{item.name_hash, item.key, item.thread_id,
  120. item.value.load(std::memory_order_relaxed)};
  121. }
  122. }
  123. return write_index;
  124. }
  125. size_t MetadataRecorder::TryReclaimInactiveSlots(size_t item_slots_used) {
  126. const size_t remaining_slots = MAX_METADATA_COUNT - item_slots_used;
  127. if (inactive_item_count_ == 0 || inactive_item_count_ < remaining_slots) {
  128. // This reclaiming threshold has a few nice properties:
  129. //
  130. // - It avoids reclaiming when no items have been removed
  131. // - It makes doing so more likely as free slots become more scarce
  132. // - It makes doing so less likely when the benefits are lower
  133. return item_slots_used;
  134. }
  135. if (read_lock_.Try()) {
  136. // The lock isn't already held by a reader or another thread reclaiming
  137. // slots.
  138. item_slots_used = ReclaimInactiveSlots(item_slots_used);
  139. read_lock_.Release();
  140. }
  141. return item_slots_used;
  142. }
  143. size_t MetadataRecorder::ReclaimInactiveSlots(size_t item_slots_used) {
  144. // From here until the end of the reclamation, we can safely use
  145. // memory_order_relaxed for all reads and writes. We don't need
  146. // memory_order_acquire because acquiring the write mutex gives acquire
  147. // semantics and no other threads can write after we hold that mutex. We don't
  148. // need memory_order_release because no readers can read until we release the
  149. // read mutex, which itself has release semantics.
  150. size_t first_inactive_item_idx = 0;
  151. size_t last_active_item_idx = item_slots_used - 1;
  152. while (first_inactive_item_idx < last_active_item_idx) {
  153. ItemInternal& inactive_item = items_[first_inactive_item_idx];
  154. ItemInternal& active_item = items_[last_active_item_idx];
  155. if (inactive_item.is_active.load(std::memory_order_relaxed)) {
  156. // Keep seeking forward to an inactive item.
  157. ++first_inactive_item_idx;
  158. continue;
  159. }
  160. if (!active_item.is_active.load(std::memory_order_relaxed)) {
  161. // Keep seeking backward to an active item. Skipping over this item
  162. // indicates that we're freeing the slot at this index.
  163. --last_active_item_idx;
  164. item_slots_used--;
  165. continue;
  166. }
  167. inactive_item.name_hash = active_item.name_hash;
  168. inactive_item.value.store(active_item.value.load(std::memory_order_relaxed),
  169. std::memory_order_relaxed);
  170. inactive_item.is_active.store(true, std::memory_order_relaxed);
  171. ++first_inactive_item_idx;
  172. --last_active_item_idx;
  173. item_slots_used--;
  174. }
  175. item_slots_used_.store(item_slots_used, std::memory_order_relaxed);
  176. return item_slots_used;
  177. }
  178. } // namespace base