persistent_histogram_allocator.cc 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995
  1. // Copyright 2016 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/metrics/persistent_histogram_allocator.h"
  5. #include <atomic>
  6. #include <limits>
  7. #include <utility>
  8. #include "base/files/file_path.h"
  9. #include "base/files/file_util.h"
  10. #include "base/files/important_file_writer.h"
  11. #include "base/files/memory_mapped_file.h"
  12. #include "base/lazy_instance.h"
  13. #include "base/logging.h"
  14. #include "base/memory/ptr_util.h"
  15. #include "base/memory/shared_memory_mapping.h"
  16. #include "base/memory/writable_shared_memory_region.h"
  17. #include "base/metrics/histogram.h"
  18. #include "base/metrics/histogram_base.h"
  19. #include "base/metrics/histogram_samples.h"
  20. #include "base/metrics/metrics_hashes.h"
  21. #include "base/metrics/persistent_sample_map.h"
  22. #include "base/metrics/sparse_histogram.h"
  23. #include "base/metrics/statistics_recorder.h"
  24. #include "base/notreached.h"
  25. #include "base/numerics/safe_conversions.h"
  26. #include "base/pickle.h"
  27. #include "base/process/process_handle.h"
  28. #include "base/strings/strcat.h"
  29. #include "base/strings/string_number_conversions.h"
  30. #include "base/strings/string_piece.h"
  31. #include "base/strings/string_split.h"
  32. #include "base/strings/stringprintf.h"
  33. #include "base/synchronization/lock.h"
  34. #include "build/build_config.h"
  35. namespace base {
  36. namespace {
  37. // Type identifiers used when storing in persistent memory so they can be
  38. // identified during extraction; the first 4 bytes of the SHA1 of the name
  39. // is used as a unique integer. A "version number" is added to the base
  40. // so that, if the structure of that object changes, stored older versions
  41. // will be safely ignored.
  42. enum : uint32_t {
  43. kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
  44. kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
  45. };
  46. // The current globally-active persistent allocator for all new histograms.
  47. // The object held here will obviously not be destructed at process exit
  48. // but that's best since PersistentMemoryAllocator objects (that underlie
  49. // GlobalHistogramAllocator objects) are explicitly forbidden from doing
  50. // anything essential at exit anyway due to the fact that they depend on data
  51. // managed elsewhere and which could be destructed first. An AtomicWord is
  52. // used instead of std::atomic because the latter can create global ctors
  53. // and dtors.
  54. subtle::AtomicWord g_histogram_allocator = 0;
  55. // Take an array of range boundaries and create a proper BucketRanges object
  56. // which is returned to the caller. A return of nullptr indicates that the
  57. // passed boundaries are invalid.
  58. std::unique_ptr<BucketRanges> CreateRangesFromData(
  59. HistogramBase::Sample* ranges_data,
  60. uint32_t ranges_checksum,
  61. size_t count) {
  62. // To avoid racy destruction at shutdown, the following may be leaked.
  63. std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
  64. DCHECK_EQ(count, ranges->size());
  65. for (size_t i = 0; i < count; ++i) {
  66. if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
  67. return nullptr;
  68. ranges->set_range(i, ranges_data[i]);
  69. }
  70. ranges->ResetChecksum();
  71. if (ranges->checksum() != ranges_checksum)
  72. return nullptr;
  73. return ranges;
  74. }
  75. // Calculate the number of bytes required to store all of a histogram's
  76. // "counts". This will return zero (0) if |bucket_count| is not valid.
  77. size_t CalculateRequiredCountsBytes(size_t bucket_count) {
  78. // 2 because each "sample count" also requires a backup "logged count"
  79. // used for calculating the delta during snapshot operations.
  80. const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
  81. // If the |bucket_count| is such that it would overflow the return type,
  82. // perhaps as the result of a malicious actor, then return zero to
  83. // indicate the problem to the caller.
  84. if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
  85. return 0;
  86. return bucket_count * kBytesPerBucket;
  87. }
  88. } // namespace
  89. const Feature kPersistentHistogramsFeature{
  90. "PersistentHistograms",
  91. #if BUILDFLAG(IS_FUCHSIA)
  92. // TODO(crbug.com/1295119): Enable once writable mmap() is supported.
  93. FEATURE_DISABLED_BY_DEFAULT
  94. #else
  95. FEATURE_ENABLED_BY_DEFAULT
  96. #endif // BUILDFLAG(IS_FUCHSIA)
  97. };
  98. PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
  99. PersistentMemoryAllocator* allocator)
  100. : allocator_(allocator), record_iterator_(allocator) {}
  101. PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() =
  102. default;
  103. PersistentSampleMapRecords*
  104. PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
  105. const void* user) {
  106. base::AutoLock auto_lock(lock_);
  107. return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
  108. }
  109. PersistentSampleMapRecords*
  110. PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
  111. uint64_t id) {
  112. auto found = sample_records_.find(id);
  113. if (found != sample_records_.end())
  114. return found->second.get();
  115. std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
  116. samples = std::make_unique<PersistentSampleMapRecords>(this, id);
  117. return samples.get();
  118. }
  119. bool PersistentSparseHistogramDataManager::LoadRecords(
  120. PersistentSampleMapRecords* sample_map_records) {
  121. // DataManager must be locked in order to access the found_ field of any
  122. // PersistentSampleMapRecords object.
  123. base::AutoLock auto_lock(lock_);
  124. bool found = false;
  125. // If there are already "found" entries for the passed object, move them.
  126. if (!sample_map_records->found_.empty()) {
  127. sample_map_records->records_.reserve(sample_map_records->records_.size() +
  128. sample_map_records->found_.size());
  129. sample_map_records->records_.insert(sample_map_records->records_.end(),
  130. sample_map_records->found_.begin(),
  131. sample_map_records->found_.end());
  132. sample_map_records->found_.clear();
  133. found = true;
  134. }
  135. // Acquiring a lock is a semi-expensive operation so load some records with
  136. // each call. More than this number may be loaded if it takes longer to
  137. // find at least one matching record for the passed object.
  138. const int kMinimumNumberToLoad = 10;
  139. const uint64_t match_id = sample_map_records->sample_map_id_;
  140. // Loop while no enty is found OR we haven't yet loaded the minimum number.
  141. // This will continue reading even after a match is found.
  142. for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
  143. // Get the next sample-record. The iterator will always resume from where
  144. // it left off even if it previously had nothing further to return.
  145. uint64_t found_id;
  146. PersistentMemoryAllocator::Reference ref =
  147. PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
  148. &found_id);
  149. // Stop immediately if there are none.
  150. if (!ref)
  151. break;
  152. // The sample-record could be for any sparse histogram. Add the reference
  153. // to the appropriate collection for later use.
  154. if (found_id == match_id) {
  155. sample_map_records->records_.push_back(ref);
  156. found = true;
  157. } else {
  158. PersistentSampleMapRecords* samples =
  159. GetSampleMapRecordsWhileLocked(found_id);
  160. DCHECK(samples);
  161. samples->found_.push_back(ref);
  162. }
  163. }
  164. return found;
  165. }
  166. PersistentSampleMapRecords::PersistentSampleMapRecords(
  167. PersistentSparseHistogramDataManager* data_manager,
  168. uint64_t sample_map_id)
  169. : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
  170. PersistentSampleMapRecords::~PersistentSampleMapRecords() = default;
  171. PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
  172. const void* user) {
  173. DCHECK(!user_);
  174. user_ = user;
  175. seen_ = 0;
  176. return this;
  177. }
  178. void PersistentSampleMapRecords::Release(const void* user) {
  179. DCHECK_EQ(user_, user);
  180. user_ = nullptr;
  181. }
  182. PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
  183. DCHECK(user_);
  184. // If there are no unseen records, lock and swap in all the found ones.
  185. if (records_.size() == seen_) {
  186. if (!data_manager_->LoadRecords(this))
  187. return false;
  188. }
  189. // Return the next record. Records *must* be returned in the same order
  190. // they are found in the persistent memory in order to ensure that all
  191. // objects using this data always have the same state. Race conditions
  192. // can cause duplicate records so using the "first found" is the only
  193. // guarantee that all objects always access the same one.
  194. DCHECK_LT(seen_, records_.size());
  195. return records_[seen_++];
  196. }
  197. PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
  198. HistogramBase::Sample value) {
  199. return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
  200. sample_map_id_, value);
  201. }
  202. // This data will be held in persistent memory in order for processes to
  203. // locate and use histograms created elsewhere.
  204. struct PersistentHistogramAllocator::PersistentHistogramData {
  205. // SHA1(Histogram): Increment this if structure changes!
  206. static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
  207. // Expected size for 32/64-bit check.
  208. static constexpr size_t kExpectedInstanceSize =
  209. 40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
  210. int32_t histogram_type;
  211. int32_t flags;
  212. int32_t minimum;
  213. int32_t maximum;
  214. uint32_t bucket_count;
  215. PersistentMemoryAllocator::Reference ranges_ref;
  216. uint32_t ranges_checksum;
  217. std::atomic<PersistentMemoryAllocator::Reference> counts_ref;
  218. HistogramSamples::Metadata samples_metadata;
  219. HistogramSamples::Metadata logged_metadata;
  220. // Space for the histogram name will be added during the actual allocation
  221. // request. This must be the last field of the structure. A zero-size array
  222. // or a "flexible" array would be preferred but is not (yet) valid C++.
  223. char name[sizeof(uint64_t)]; // Force 64-bit alignment on 32-bit builds.
  224. };
  225. PersistentHistogramAllocator::Iterator::Iterator(
  226. PersistentHistogramAllocator* allocator)
  227. : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
  228. std::unique_ptr<HistogramBase>
  229. PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
  230. PersistentMemoryAllocator::Reference ref;
  231. while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
  232. if (ref != ignore)
  233. return allocator_->GetHistogram(ref);
  234. }
  235. return nullptr;
  236. }
  237. PersistentHistogramAllocator::PersistentHistogramAllocator(
  238. std::unique_ptr<PersistentMemoryAllocator> memory)
  239. : memory_allocator_(std::move(memory)),
  240. sparse_histogram_data_manager_(memory_allocator_.get()) {}
  241. PersistentHistogramAllocator::~PersistentHistogramAllocator() = default;
  242. std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
  243. Reference ref) {
  244. // Unfortunately, the histogram "pickle" methods cannot be used as part of
  245. // the persistance because the deserialization methods always create local
  246. // count data (while these must reference the persistent counts) and always
  247. // add it to the local list of known histograms (while these may be simple
  248. // references to histograms in other processes).
  249. PersistentHistogramData* data =
  250. memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
  251. const size_t length = memory_allocator_->GetAllocSize(ref);
  252. // Check that metadata is reasonable: name is null-terminated and non-empty,
  253. // ID fields have been loaded with a hash of the name (0 is considered
  254. // unset/invalid).
  255. if (!data || data->name[0] == '\0' ||
  256. reinterpret_cast<char*>(data)[length - 1] != '\0' ||
  257. data->samples_metadata.id == 0 || data->logged_metadata.id == 0 ||
  258. // Note: Sparse histograms use |id + 1| in |logged_metadata|.
  259. (data->logged_metadata.id != data->samples_metadata.id &&
  260. data->logged_metadata.id != data->samples_metadata.id + 1) ||
  261. // Most non-matching values happen due to truncated names. Ideally, we
  262. // could just verify the name length based on the overall alloc length,
  263. // but that doesn't work because the allocated block may have been
  264. // aligned to the next boundary value.
  265. HashMetricName(data->name) != data->samples_metadata.id) {
  266. return nullptr;
  267. }
  268. return CreateHistogram(data);
  269. }
  270. std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
  271. HistogramType histogram_type,
  272. const std::string& name,
  273. int minimum,
  274. int maximum,
  275. const BucketRanges* bucket_ranges,
  276. int32_t flags,
  277. Reference* ref_ptr) {
  278. // If the allocator is corrupt, don't waste time trying anything else.
  279. // This also allows differentiating on the dashboard between allocations
  280. // failed due to a corrupt allocator and the number of process instances
  281. // with one, the latter being idicated by "newly corrupt", below.
  282. if (memory_allocator_->IsCorrupt())
  283. return nullptr;
  284. // Create the metadata necessary for a persistent sparse histogram. This
  285. // is done first because it is a small subset of what is required for
  286. // other histograms. The type is "under construction" so that a crash
  287. // during the datafill doesn't leave a bad record around that could cause
  288. // confusion by another process trying to read it. It will be corrected
  289. // once histogram construction is complete.
  290. PersistentHistogramData* histogram_data =
  291. memory_allocator_->New<PersistentHistogramData>(
  292. offsetof(PersistentHistogramData, name) + name.length() + 1);
  293. if (histogram_data) {
  294. memcpy(histogram_data->name, name.c_str(), name.size() + 1);
  295. histogram_data->histogram_type = histogram_type;
  296. histogram_data->flags = flags | HistogramBase::kIsPersistent;
  297. }
  298. // Create the remaining metadata necessary for regular histograms.
  299. if (histogram_type != SPARSE_HISTOGRAM) {
  300. size_t bucket_count = bucket_ranges->bucket_count();
  301. size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
  302. if (counts_bytes == 0) {
  303. // |bucket_count| was out-of-range.
  304. return nullptr;
  305. }
  306. // Since the StasticsRecorder keeps a global collection of BucketRanges
  307. // objects for re-use, it would be dangerous for one to hold a reference
  308. // from a persistent allocator that is not the global one (which is
  309. // permanent once set). If this stops being the case, this check can
  310. // become an "if" condition beside "!ranges_ref" below and before
  311. // set_persistent_reference() farther down.
  312. DCHECK_EQ(this, GlobalHistogramAllocator::Get());
  313. // Re-use an existing BucketRanges persistent allocation if one is known;
  314. // otherwise, create one.
  315. PersistentMemoryAllocator::Reference ranges_ref =
  316. bucket_ranges->persistent_reference();
  317. if (!ranges_ref) {
  318. size_t ranges_count = bucket_count + 1;
  319. size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
  320. ranges_ref =
  321. memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
  322. if (ranges_ref) {
  323. HistogramBase::Sample* ranges_data =
  324. memory_allocator_->GetAsArray<HistogramBase::Sample>(
  325. ranges_ref, kTypeIdRangesArray, ranges_count);
  326. if (ranges_data) {
  327. for (size_t i = 0; i < bucket_ranges->size(); ++i)
  328. ranges_data[i] = bucket_ranges->range(i);
  329. bucket_ranges->set_persistent_reference(ranges_ref);
  330. } else {
  331. // This should never happen but be tolerant if it does.
  332. ranges_ref = PersistentMemoryAllocator::kReferenceNull;
  333. }
  334. }
  335. } else {
  336. DCHECK_EQ(kTypeIdRangesArray, memory_allocator_->GetType(ranges_ref));
  337. }
  338. // Only continue here if all allocations were successful. If they weren't,
  339. // there is no way to free the space but that's not really a problem since
  340. // the allocations only fail because the space is full or corrupt and so
  341. // any future attempts will also fail.
  342. if (ranges_ref && histogram_data) {
  343. histogram_data->minimum = minimum;
  344. histogram_data->maximum = maximum;
  345. // |bucket_count| must fit within 32-bits or the allocation of the counts
  346. // array would have failed for being too large; the allocator supports
  347. // less than 4GB total size.
  348. histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
  349. histogram_data->ranges_ref = ranges_ref;
  350. histogram_data->ranges_checksum = bucket_ranges->checksum();
  351. } else {
  352. histogram_data = nullptr; // Clear this for proper handling below.
  353. }
  354. }
  355. if (histogram_data) {
  356. // Create the histogram using resources in persistent memory. This ends up
  357. // resolving the "ref" values stored in histogram_data instad of just
  358. // using what is already known above but avoids duplicating the switch
  359. // statement here and serves as a double-check that everything is
  360. // correct before commiting the new histogram to persistent space.
  361. std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
  362. DCHECK(histogram);
  363. DCHECK_NE(0U, histogram_data->samples_metadata.id);
  364. DCHECK_NE(0U, histogram_data->logged_metadata.id);
  365. PersistentMemoryAllocator::Reference histogram_ref =
  366. memory_allocator_->GetAsReference(histogram_data);
  367. if (ref_ptr != nullptr)
  368. *ref_ptr = histogram_ref;
  369. // By storing the reference within the allocator to this histogram, the
  370. // next import (which will happen before the next histogram creation)
  371. // will know to skip it.
  372. // See also the comment in ImportHistogramsToStatisticsRecorder().
  373. last_created_.store(histogram_ref, std::memory_order_relaxed);
  374. return histogram;
  375. }
  376. return nullptr;
  377. }
  378. void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
  379. bool registered) {
  380. if (registered) {
  381. // If the created persistent histogram was registered then it needs to
  382. // be marked as "iterable" in order to be found by other processes. This
  383. // happens only after the histogram is fully formed so it's impossible for
  384. // code iterating through the allocator to read a partially created record.
  385. memory_allocator_->MakeIterable(ref);
  386. } else {
  387. // If it wasn't registered then a race condition must have caused two to
  388. // be created. The allocator does not support releasing the acquired memory
  389. // so just change the type to be empty.
  390. memory_allocator_->ChangeType(ref, 0,
  391. PersistentHistogramData::kPersistentTypeId,
  392. /*clear=*/false);
  393. }
  394. }
  395. void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
  396. HistogramBase* histogram) {
  397. DCHECK(histogram);
  398. HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
  399. if (!existing) {
  400. // The above should never fail but if it does, no real harm is done.
  401. // The data won't be merged but it also won't be recorded as merged
  402. // so a future try, if successful, will get what was missed. If it
  403. // continues to fail, some metric data will be lost but that is better
  404. // than crashing.
  405. return;
  406. }
  407. // Merge the delta from the passed object to the one in the SR.
  408. existing->AddSamples(*histogram->SnapshotDelta());
  409. }
  410. void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
  411. const HistogramBase* histogram) {
  412. DCHECK(histogram);
  413. HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
  414. if (!existing) {
  415. // The above should never fail but if it does, no real harm is done.
  416. // Some metric data will be lost but that is better than crashing.
  417. return;
  418. }
  419. // Merge the delta from the passed object to the one in the SR.
  420. existing->AddSamples(*histogram->SnapshotFinalDelta());
  421. }
  422. PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
  423. uint64_t id,
  424. const void* user) {
  425. return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
  426. }
  427. void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
  428. memory_allocator_->CreateTrackingHistograms(name);
  429. }
  430. void PersistentHistogramAllocator::UpdateTrackingHistograms() {
  431. memory_allocator_->UpdateTrackingHistograms();
  432. }
  433. void PersistentHistogramAllocator::SetRangesManager(
  434. RangesManager* ranges_manager) {
  435. ranges_manager_.reset(ranges_manager);
  436. }
  437. void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
  438. last_created_.store(0, std::memory_order_relaxed);
  439. }
  440. std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
  441. PersistentHistogramData* histogram_data_ptr) {
  442. if (!histogram_data_ptr)
  443. return nullptr;
  444. // Sparse histograms are quite different so handle them as a special case.
  445. if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
  446. std::unique_ptr<HistogramBase> histogram =
  447. SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
  448. &histogram_data_ptr->samples_metadata,
  449. &histogram_data_ptr->logged_metadata);
  450. DCHECK(histogram);
  451. histogram->SetFlags(histogram_data_ptr->flags);
  452. return histogram;
  453. }
  454. // Copy the configuration fields from histogram_data_ptr to local storage
  455. // because anything in persistent memory cannot be trusted as it could be
  456. // changed at any moment by a malicious actor that shares access. The local
  457. // values are validated below and then used to create the histogram, knowing
  458. // they haven't changed between validation and use.
  459. int32_t histogram_type = histogram_data_ptr->histogram_type;
  460. int32_t histogram_flags = histogram_data_ptr->flags;
  461. int32_t histogram_minimum = histogram_data_ptr->minimum;
  462. int32_t histogram_maximum = histogram_data_ptr->maximum;
  463. uint32_t histogram_bucket_count = histogram_data_ptr->bucket_count;
  464. uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref;
  465. uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum;
  466. HistogramBase::Sample* ranges_data =
  467. memory_allocator_->GetAsArray<HistogramBase::Sample>(
  468. histogram_ranges_ref, kTypeIdRangesArray,
  469. PersistentMemoryAllocator::kSizeAny);
  470. const uint32_t max_buckets =
  471. std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
  472. size_t required_bytes =
  473. (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample);
  474. size_t allocated_bytes =
  475. memory_allocator_->GetAllocSize(histogram_ranges_ref);
  476. if (!ranges_data || histogram_bucket_count < 2 ||
  477. histogram_bucket_count >= max_buckets ||
  478. allocated_bytes < required_bytes) {
  479. return nullptr;
  480. }
  481. std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData(
  482. ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
  483. if (!created_ranges)
  484. return nullptr;
  485. DCHECK_EQ(created_ranges->size(), histogram_bucket_count + 1);
  486. DCHECK_EQ(created_ranges->range(1), histogram_minimum);
  487. DCHECK_EQ(created_ranges->range(histogram_bucket_count - 1),
  488. histogram_maximum);
  489. const BucketRanges* ranges;
  490. if (ranges_manager_) {
  491. ranges = ranges_manager_->RegisterOrDeleteDuplicateRanges(
  492. created_ranges.release());
  493. } else {
  494. ranges = StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
  495. created_ranges.release());
  496. }
  497. size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count);
  498. PersistentMemoryAllocator::Reference counts_ref =
  499. histogram_data_ptr->counts_ref.load(std::memory_order_acquire);
  500. if (counts_bytes == 0 ||
  501. (counts_ref != 0 &&
  502. memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
  503. return nullptr;
  504. }
  505. // The "counts" data (including both samples and logged samples) is a delayed
  506. // persistent allocation meaning that though its size and storage for a
  507. // reference is defined, no space is reserved until actually needed. When
  508. // it is needed, memory will be allocated from the persistent segment and
  509. // a reference to it stored at the passed address. Other threads can then
  510. // notice the valid reference and access the same data.
  511. DelayedPersistentAllocation counts_data(
  512. memory_allocator_.get(), &histogram_data_ptr->counts_ref,
  513. kTypeIdCountsArray, counts_bytes, false);
  514. // A second delayed allocations is defined using the same reference storage
  515. // location as the first so the allocation of one will automatically be found
  516. // by the other. Within the block, the first half of the space is for "counts"
  517. // and the second half is for "logged counts".
  518. DelayedPersistentAllocation logged_data(
  519. memory_allocator_.get(), &histogram_data_ptr->counts_ref,
  520. kTypeIdCountsArray, counts_bytes, counts_bytes / 2,
  521. /*make_iterable=*/false);
  522. // Create the right type of histogram.
  523. const char* name = histogram_data_ptr->name;
  524. std::unique_ptr<HistogramBase> histogram;
  525. switch (histogram_type) {
  526. case HISTOGRAM:
  527. histogram =
  528. Histogram::PersistentCreate(name, ranges, counts_data, logged_data,
  529. &histogram_data_ptr->samples_metadata,
  530. &histogram_data_ptr->logged_metadata);
  531. DCHECK(histogram);
  532. break;
  533. case LINEAR_HISTOGRAM:
  534. histogram = LinearHistogram::PersistentCreate(
  535. name, ranges, counts_data, logged_data,
  536. &histogram_data_ptr->samples_metadata,
  537. &histogram_data_ptr->logged_metadata);
  538. DCHECK(histogram);
  539. break;
  540. case BOOLEAN_HISTOGRAM:
  541. histogram = BooleanHistogram::PersistentCreate(
  542. name, ranges, counts_data, logged_data,
  543. &histogram_data_ptr->samples_metadata,
  544. &histogram_data_ptr->logged_metadata);
  545. DCHECK(histogram);
  546. break;
  547. case CUSTOM_HISTOGRAM:
  548. histogram = CustomHistogram::PersistentCreate(
  549. name, ranges, counts_data, logged_data,
  550. &histogram_data_ptr->samples_metadata,
  551. &histogram_data_ptr->logged_metadata);
  552. DCHECK(histogram);
  553. break;
  554. default:
  555. return nullptr;
  556. }
  557. if (histogram) {
  558. DCHECK_EQ(histogram_type, histogram->GetHistogramType());
  559. histogram->SetFlags(histogram_flags);
  560. }
  561. return histogram;
  562. }
  563. HistogramBase*
  564. PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
  565. const HistogramBase* histogram) {
  566. // This should never be called on the global histogram allocator as objects
  567. // created there are already within the global statistics recorder.
  568. DCHECK_NE(GlobalHistogramAllocator::Get(), this);
  569. DCHECK(histogram);
  570. HistogramBase* existing =
  571. StatisticsRecorder::FindHistogram(histogram->histogram_name());
  572. if (existing)
  573. return existing;
  574. // Adding the passed histogram to the SR would cause a problem if the
  575. // allocator that holds it eventually goes away. Instead, create a new
  576. // one from a serialized version. Deserialization calls the appropriate
  577. // FactoryGet() which will create the histogram in the global persistent-
  578. // histogram allocator if such is set.
  579. base::Pickle pickle;
  580. histogram->SerializeInfo(&pickle);
  581. PickleIterator iter(pickle);
  582. existing = DeserializeHistogramInfo(&iter);
  583. if (!existing)
  584. return nullptr;
  585. // Make sure there is no "serialization" flag set.
  586. DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
  587. // Record the newly created histogram in the SR.
  588. return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
  589. }
  590. GlobalHistogramAllocator::~GlobalHistogramAllocator() = default;
  591. // static
  592. void GlobalHistogramAllocator::CreateWithPersistentMemory(
  593. void* base,
  594. size_t size,
  595. size_t page_size,
  596. uint64_t id,
  597. StringPiece name) {
  598. Set(WrapUnique(
  599. new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>(
  600. base, size, page_size, id, name, false))));
  601. }
  602. // static
  603. void GlobalHistogramAllocator::CreateWithLocalMemory(
  604. size_t size,
  605. uint64_t id,
  606. StringPiece name) {
  607. Set(WrapUnique(new GlobalHistogramAllocator(
  608. std::make_unique<LocalPersistentMemoryAllocator>(size, id, name))));
  609. }
  610. #if !BUILDFLAG(IS_NACL)
  611. // static
  612. bool GlobalHistogramAllocator::CreateWithFile(const FilePath& file_path,
  613. size_t size,
  614. uint64_t id,
  615. StringPiece name,
  616. bool exclusive_write) {
  617. uint32_t flags = File::FLAG_OPEN_ALWAYS | File::FLAG_WIN_SHARE_DELETE |
  618. File::FLAG_READ | File::FLAG_WRITE;
  619. if (exclusive_write)
  620. flags |= File::FLAG_WIN_EXCLUSIVE_WRITE;
  621. File file(file_path, flags);
  622. if (!file.IsValid())
  623. return false;
  624. std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
  625. bool success = false;
  626. if (file.created()) {
  627. success = mmfile->Initialize(std::move(file), {0, size},
  628. MemoryMappedFile::READ_WRITE_EXTEND);
  629. } else {
  630. success = mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
  631. }
  632. if (!success ||
  633. !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
  634. return false;
  635. }
  636. Set(WrapUnique(new GlobalHistogramAllocator(
  637. std::make_unique<FilePersistentMemoryAllocator>(std::move(mmfile), 0, id,
  638. name, false))));
  639. Get()->SetPersistentLocation(file_path);
  640. return true;
  641. }
  642. // static
  643. bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
  644. const FilePath& active_path,
  645. const FilePath& spare_path,
  646. size_t size,
  647. uint64_t id,
  648. StringPiece name) {
  649. // Old "active" becomes "base".
  650. if (!base::ReplaceFile(active_path, base_path, nullptr))
  651. base::DeleteFile(base_path);
  652. if (base::PathExists(active_path))
  653. return false;
  654. // Move any "spare" into "active". Okay to continue if file doesn't exist.
  655. if (!spare_path.empty())
  656. base::ReplaceFile(spare_path, active_path, nullptr);
  657. return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
  658. name);
  659. }
  660. // static
  661. bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir,
  662. size_t size,
  663. uint64_t id,
  664. StringPiece name) {
  665. FilePath base_path = ConstructFilePath(dir, name);
  666. FilePath active_path = ConstructFilePathForActiveFile(dir, name);
  667. FilePath spare_path = ConstructFilePath(dir, std::string(name) + "-spare");
  668. return CreateWithActiveFile(base_path, active_path, spare_path, size, id,
  669. name);
  670. }
  671. // static
  672. FilePath GlobalHistogramAllocator::ConstructFilePath(const FilePath& dir,
  673. StringPiece name) {
  674. return dir.AppendASCII(name).AddExtension(
  675. PersistentMemoryAllocator::kFileExtension);
  676. }
  677. // static
  678. FilePath GlobalHistogramAllocator::ConstructFilePathForActiveFile(
  679. const FilePath& dir,
  680. StringPiece name) {
  681. return ConstructFilePath(dir, std::string(name) + "-active");
  682. }
  683. // static
  684. FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
  685. const FilePath& dir,
  686. StringPiece name,
  687. base::Time stamp,
  688. ProcessId pid) {
  689. return ConstructFilePath(
  690. dir,
  691. StringPrintf("%.*s-%lX-%lX", static_cast<int>(name.length()), name.data(),
  692. static_cast<long>(stamp.ToTimeT()), static_cast<long>(pid)));
  693. }
  694. // static
  695. FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
  696. const FilePath& dir,
  697. StringPiece name) {
  698. return ConstructFilePathForUploadDir(dir, name, Time::Now(),
  699. GetCurrentProcId());
  700. }
  701. // static
  702. bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path,
  703. std::string* out_name,
  704. Time* out_stamp,
  705. ProcessId* out_pid) {
  706. std::string filename = path.BaseName().AsUTF8Unsafe();
  707. std::vector<base::StringPiece> parts = base::SplitStringPiece(
  708. filename, "-.", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
  709. if (parts.size() != 4)
  710. return false;
  711. if (out_name)
  712. *out_name = std::string(parts[0]);
  713. if (out_stamp) {
  714. int64_t stamp;
  715. if (!HexStringToInt64(parts[1], &stamp))
  716. return false;
  717. *out_stamp = Time::FromTimeT(static_cast<time_t>(stamp));
  718. }
  719. if (out_pid) {
  720. int64_t pid;
  721. if (!HexStringToInt64(parts[2], &pid))
  722. return false;
  723. *out_pid = static_cast<ProcessId>(pid);
  724. }
  725. return true;
  726. }
  727. bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path,
  728. size_t size) {
  729. FilePath temp_spare_path = spare_path.AddExtension(FILE_PATH_LITERAL(".tmp"));
  730. bool success;
  731. {
  732. File spare_file(temp_spare_path, File::FLAG_CREATE_ALWAYS |
  733. File::FLAG_READ | File::FLAG_WRITE);
  734. success = spare_file.IsValid();
  735. if (success) {
  736. MemoryMappedFile mmfile;
  737. success = mmfile.Initialize(std::move(spare_file), {0, size},
  738. MemoryMappedFile::READ_WRITE_EXTEND);
  739. }
  740. }
  741. if (success)
  742. success = ReplaceFile(temp_spare_path, spare_path, nullptr);
  743. if (!success)
  744. DeleteFile(temp_spare_path);
  745. return success;
  746. }
  747. #endif // !BUILDFLAG(IS_NACL)
  748. // static
  749. void GlobalHistogramAllocator::CreateWithSharedMemoryRegion(
  750. const WritableSharedMemoryRegion& region) {
  751. base::WritableSharedMemoryMapping mapping = region.Map();
  752. if (!mapping.IsValid() ||
  753. !WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
  754. mapping)) {
  755. return;
  756. }
  757. Set(WrapUnique(new GlobalHistogramAllocator(
  758. std::make_unique<WritableSharedPersistentMemoryAllocator>(
  759. std::move(mapping), 0, StringPiece()))));
  760. }
  761. // static
  762. void GlobalHistogramAllocator::Set(
  763. std::unique_ptr<GlobalHistogramAllocator> allocator) {
  764. // Releasing or changing an allocator is extremely dangerous because it
  765. // likely has histograms stored within it. If the backing memory is also
  766. // also released, future accesses to those histograms will seg-fault.
  767. CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
  768. subtle::Release_Store(&g_histogram_allocator,
  769. reinterpret_cast<intptr_t>(allocator.release()));
  770. size_t existing = StatisticsRecorder::GetHistogramCount();
  771. DVLOG_IF(1, existing)
  772. << existing << " histograms were created before persistence was enabled.";
  773. }
  774. // static
  775. GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
  776. return reinterpret_cast<GlobalHistogramAllocator*>(
  777. subtle::Acquire_Load(&g_histogram_allocator));
  778. }
  779. // static
  780. std::unique_ptr<GlobalHistogramAllocator>
  781. GlobalHistogramAllocator::ReleaseForTesting() {
  782. GlobalHistogramAllocator* histogram_allocator = Get();
  783. if (!histogram_allocator)
  784. return nullptr;
  785. PersistentMemoryAllocator* memory_allocator =
  786. histogram_allocator->memory_allocator();
  787. // Before releasing the memory, it's necessary to have the Statistics-
  788. // Recorder forget about the histograms contained therein; otherwise,
  789. // some operations will try to access them and the released memory.
  790. PersistentMemoryAllocator::Iterator iter(memory_allocator);
  791. const PersistentHistogramData* data;
  792. while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
  793. StatisticsRecorder::ForgetHistogramForTesting(data->name);
  794. }
  795. subtle::Release_Store(&g_histogram_allocator, 0);
  796. return WrapUnique(histogram_allocator);
  797. }
  798. void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
  799. persistent_location_ = location;
  800. }
  801. const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
  802. return persistent_location_;
  803. }
  804. bool GlobalHistogramAllocator::WriteToPersistentLocation() {
  805. #if BUILDFLAG(IS_NACL)
  806. // NACL doesn't support file operations, including ImportantFileWriter.
  807. NOTREACHED();
  808. return false;
  809. #else
  810. // Stop if no destination is set.
  811. if (persistent_location_.empty()) {
  812. NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
  813. << " to file because no location was set.";
  814. return false;
  815. }
  816. StringPiece contents(static_cast<const char*>(data()), used());
  817. if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
  818. contents)) {
  819. LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
  820. << " to file: " << persistent_location_.value();
  821. return false;
  822. }
  823. return true;
  824. #endif
  825. }
  826. void GlobalHistogramAllocator::DeletePersistentLocation() {
  827. memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
  828. #if BUILDFLAG(IS_NACL)
  829. NOTREACHED();
  830. #else
  831. if (persistent_location_.empty())
  832. return;
  833. // Open (with delete) and then immediately close the file by going out of
  834. // scope. This is the only cross-platform safe way to delete a file that may
  835. // be open elsewhere. Open handles will continue to operate normally but
  836. // new opens will not be possible.
  837. File file(persistent_location_,
  838. File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
  839. #endif
  840. }
  841. GlobalHistogramAllocator::GlobalHistogramAllocator(
  842. std::unique_ptr<PersistentMemoryAllocator> memory)
  843. : PersistentHistogramAllocator(std::move(memory)),
  844. import_iterator_(this) {
  845. }
  846. void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
  847. // Skip the import if it's the histogram that was last created. Should a
  848. // race condition cause the "last created" to be overwritten before it
  849. // is recognized here then the histogram will be created and be ignored
  850. // when it is detected as a duplicate by the statistics-recorder. This
  851. // simple check reduces the time of creating persistent histograms by
  852. // about 40%.
  853. Reference record_to_ignore = last_created();
  854. // There is no lock on this because the iterator is lock-free while still
  855. // guaranteed to only return each entry only once. The StatisticsRecorder
  856. // has its own lock so the Register operation is safe.
  857. while (true) {
  858. std::unique_ptr<HistogramBase> histogram =
  859. import_iterator_.GetNextWithIgnore(record_to_ignore);
  860. if (!histogram)
  861. break;
  862. StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
  863. }
  864. }
  865. } // namespace base