trace_buffer.cc 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. // Copyright 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/trace_event/trace_buffer.h"
  5. #include <memory>
  6. #include <utility>
  7. #include <vector>
  8. #include "base/bind.h"
  9. #include "base/trace_event/heap_profiler.h"
  10. #include "base/trace_event/trace_event_impl.h"
  11. namespace base {
  12. namespace trace_event {
  13. namespace {
  14. class TraceBufferRingBuffer : public TraceBuffer {
  15. public:
  16. TraceBufferRingBuffer(size_t max_chunks)
  17. : max_chunks_(max_chunks),
  18. recyclable_chunks_queue_(new size_t[queue_capacity()]),
  19. queue_head_(0),
  20. queue_tail_(max_chunks),
  21. current_iteration_index_(0),
  22. current_chunk_seq_(1) {
  23. chunks_.reserve(max_chunks);
  24. for (size_t i = 0; i < max_chunks; ++i)
  25. recyclable_chunks_queue_[i] = i;
  26. }
  27. TraceBufferRingBuffer(const TraceBufferRingBuffer&) = delete;
  28. TraceBufferRingBuffer& operator=(const TraceBufferRingBuffer&) = delete;
  29. std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
  30. HEAP_PROFILER_SCOPED_IGNORE;
  31. // Because the number of threads is much less than the number of chunks,
  32. // the queue should never be empty.
  33. DCHECK(!QueueIsEmpty());
  34. *index = recyclable_chunks_queue_[queue_head_];
  35. queue_head_ = NextQueueIndex(queue_head_);
  36. current_iteration_index_ = queue_head_;
  37. if (*index >= chunks_.size())
  38. chunks_.resize(*index + 1);
  39. TraceBufferChunk* chunk = chunks_[*index].release();
  40. chunks_[*index] = nullptr; // Put nullptr in the slot of a in-flight chunk.
  41. if (chunk)
  42. chunk->Reset(current_chunk_seq_++);
  43. else
  44. chunk = new TraceBufferChunk(current_chunk_seq_++);
  45. return std::unique_ptr<TraceBufferChunk>(chunk);
  46. }
  47. void ReturnChunk(size_t index,
  48. std::unique_ptr<TraceBufferChunk> chunk) override {
  49. // When this method is called, the queue should not be full because it
  50. // can contain all chunks including the one to be returned.
  51. DCHECK(!QueueIsFull());
  52. DCHECK(chunk);
  53. DCHECK_LT(index, chunks_.size());
  54. DCHECK(!chunks_[index]);
  55. chunks_[index] = std::move(chunk);
  56. recyclable_chunks_queue_[queue_tail_] = index;
  57. queue_tail_ = NextQueueIndex(queue_tail_);
  58. }
  59. bool IsFull() const override { return false; }
  60. size_t Size() const override {
  61. // This is approximate because not all of the chunks are full.
  62. return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
  63. }
  64. size_t Capacity() const override {
  65. return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
  66. }
  67. TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
  68. if (handle.chunk_index >= chunks_.size())
  69. return nullptr;
  70. TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
  71. if (!chunk || chunk->seq() != handle.chunk_seq)
  72. return nullptr;
  73. return chunk->GetEventAt(handle.event_index);
  74. }
  75. const TraceBufferChunk* NextChunk() override {
  76. if (chunks_.empty())
  77. return nullptr;
  78. while (current_iteration_index_ != queue_tail_) {
  79. size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
  80. current_iteration_index_ = NextQueueIndex(current_iteration_index_);
  81. if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
  82. continue;
  83. DCHECK(chunks_[chunk_index]);
  84. return chunks_[chunk_index].get();
  85. }
  86. return nullptr;
  87. }
  88. void EstimateTraceMemoryOverhead(
  89. TraceEventMemoryOverhead* overhead) override {
  90. overhead->Add(TraceEventMemoryOverhead::kTraceBuffer, sizeof(*this));
  91. for (size_t queue_index = queue_head_; queue_index != queue_tail_;
  92. queue_index = NextQueueIndex(queue_index)) {
  93. size_t chunk_index = recyclable_chunks_queue_[queue_index];
  94. if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
  95. continue;
  96. chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
  97. }
  98. }
  99. private:
  100. bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
  101. size_t QueueSize() const {
  102. return queue_tail_ > queue_head_
  103. ? queue_tail_ - queue_head_
  104. : queue_tail_ + queue_capacity() - queue_head_;
  105. }
  106. bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
  107. size_t queue_capacity() const {
  108. // One extra space to help distinguish full state and empty state.
  109. return max_chunks_ + 1;
  110. }
  111. size_t NextQueueIndex(size_t index) const {
  112. index++;
  113. if (index >= queue_capacity())
  114. index = 0;
  115. return index;
  116. }
  117. size_t max_chunks_;
  118. std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
  119. std::unique_ptr<size_t[]> recyclable_chunks_queue_;
  120. size_t queue_head_;
  121. size_t queue_tail_;
  122. size_t current_iteration_index_;
  123. uint32_t current_chunk_seq_;
  124. };
  125. class TraceBufferVector : public TraceBuffer {
  126. public:
  127. TraceBufferVector(size_t max_chunks)
  128. : in_flight_chunk_count_(0),
  129. current_iteration_index_(0),
  130. max_chunks_(max_chunks) {
  131. chunks_.reserve(max_chunks_);
  132. }
  133. TraceBufferVector(const TraceBufferVector&) = delete;
  134. TraceBufferVector& operator=(const TraceBufferVector&) = delete;
  135. std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
  136. HEAP_PROFILER_SCOPED_IGNORE;
  137. // This function may be called when adding normal events or indirectly from
  138. // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
  139. // have to add the metadata events and flush thread-local buffers even if
  140. // the buffer is full.
  141. *index = chunks_.size();
  142. // Put nullptr in the slot of a in-flight chunk.
  143. chunks_.push_back(nullptr);
  144. ++in_flight_chunk_count_;
  145. // + 1 because zero chunk_seq is not allowed.
  146. return std::make_unique<TraceBufferChunk>(static_cast<uint32_t>(*index) +
  147. 1);
  148. }
  149. void ReturnChunk(size_t index,
  150. std::unique_ptr<TraceBufferChunk> chunk) override {
  151. DCHECK_GT(in_flight_chunk_count_, 0u);
  152. DCHECK_LT(index, chunks_.size());
  153. DCHECK(!chunks_[index]);
  154. --in_flight_chunk_count_;
  155. chunks_[index] = std::move(chunk);
  156. }
  157. bool IsFull() const override { return chunks_.size() >= max_chunks_; }
  158. size_t Size() const override {
  159. // This is approximate because not all of the chunks are full.
  160. return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
  161. }
  162. size_t Capacity() const override {
  163. return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
  164. }
  165. TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
  166. if (handle.chunk_index >= chunks_.size())
  167. return nullptr;
  168. TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
  169. if (!chunk || chunk->seq() != handle.chunk_seq)
  170. return nullptr;
  171. return chunk->GetEventAt(handle.event_index);
  172. }
  173. const TraceBufferChunk* NextChunk() override {
  174. while (current_iteration_index_ < chunks_.size()) {
  175. // Skip in-flight chunks.
  176. const TraceBufferChunk* chunk = chunks_[current_iteration_index_++].get();
  177. if (chunk)
  178. return chunk;
  179. }
  180. return nullptr;
  181. }
  182. void EstimateTraceMemoryOverhead(
  183. TraceEventMemoryOverhead* overhead) override {
  184. const size_t chunks_ptr_vector_allocated_size =
  185. sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
  186. const size_t chunks_ptr_vector_resident_size =
  187. sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
  188. overhead->Add(TraceEventMemoryOverhead::kTraceBuffer,
  189. chunks_ptr_vector_allocated_size,
  190. chunks_ptr_vector_resident_size);
  191. for (size_t i = 0; i < chunks_.size(); ++i) {
  192. TraceBufferChunk* chunk = chunks_[i].get();
  193. // Skip the in-flight (nullptr) chunks. They will be accounted by the
  194. // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
  195. if (chunk)
  196. chunk->EstimateTraceMemoryOverhead(overhead);
  197. }
  198. }
  199. private:
  200. size_t in_flight_chunk_count_;
  201. size_t current_iteration_index_;
  202. size_t max_chunks_;
  203. std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
  204. };
  205. } // namespace
  206. TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
  207. TraceBufferChunk::~TraceBufferChunk() = default;
  208. void TraceBufferChunk::Reset(uint32_t new_seq) {
  209. for (size_t i = 0; i < next_free_; ++i)
  210. chunk_[i].Reset();
  211. next_free_ = 0;
  212. seq_ = new_seq;
  213. cached_overhead_estimate_.reset();
  214. }
  215. TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
  216. DCHECK(!IsFull());
  217. *event_index = next_free_++;
  218. return &chunk_[*event_index];
  219. }
  220. void TraceBufferChunk::EstimateTraceMemoryOverhead(
  221. TraceEventMemoryOverhead* overhead) {
  222. if (!cached_overhead_estimate_) {
  223. cached_overhead_estimate_ = std::make_unique<TraceEventMemoryOverhead>();
  224. // When estimating the size of TraceBufferChunk, exclude the array of trace
  225. // events, as they are computed individually below.
  226. cached_overhead_estimate_->Add(TraceEventMemoryOverhead::kTraceBufferChunk,
  227. sizeof(*this) - sizeof(chunk_));
  228. }
  229. const size_t num_cached_estimated_events =
  230. cached_overhead_estimate_->GetCount(
  231. TraceEventMemoryOverhead::kTraceEvent);
  232. DCHECK_LE(num_cached_estimated_events, size());
  233. if (IsFull() && num_cached_estimated_events == size()) {
  234. overhead->Update(*cached_overhead_estimate_);
  235. return;
  236. }
  237. for (size_t i = num_cached_estimated_events; i < size(); ++i)
  238. chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
  239. if (IsFull()) {
  240. cached_overhead_estimate_->AddSelf();
  241. } else {
  242. // The unused TraceEvents in |chunks_| are not cached. They will keep
  243. // changing as new TraceEvents are added to this chunk, so they are
  244. // computed on the fly.
  245. const size_t num_unused_trace_events = capacity() - size();
  246. overhead->Add(TraceEventMemoryOverhead::kUnusedTraceEvent,
  247. num_unused_trace_events * sizeof(TraceEvent));
  248. }
  249. overhead->Update(*cached_overhead_estimate_);
  250. }
  251. TraceResultBuffer::OutputCallback
  252. TraceResultBuffer::SimpleOutput::GetCallback() {
  253. return BindRepeating(&SimpleOutput::Append, Unretained(this));
  254. }
  255. void TraceResultBuffer::SimpleOutput::Append(
  256. const std::string& json_trace_output) {
  257. json_output += json_trace_output;
  258. }
  259. TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
  260. TraceResultBuffer::~TraceResultBuffer() = default;
  261. void TraceResultBuffer::SetOutputCallback(OutputCallback json_chunk_callback) {
  262. output_callback_ = std::move(json_chunk_callback);
  263. }
  264. void TraceResultBuffer::Start() {
  265. append_comma_ = false;
  266. output_callback_.Run("[");
  267. }
  268. void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
  269. if (append_comma_)
  270. output_callback_.Run(",");
  271. append_comma_ = true;
  272. output_callback_.Run(trace_fragment);
  273. }
  274. void TraceResultBuffer::Finish() {
  275. output_callback_.Run("]");
  276. }
  277. TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
  278. return new TraceBufferRingBuffer(max_chunks);
  279. }
  280. TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
  281. return new TraceBufferVector(max_chunks);
  282. }
  283. } // namespace trace_event
  284. } // namespace base