persistent_memory_allocator.cc 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221
  1. // Copyright (c) 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/metrics/persistent_memory_allocator.h"
  5. #include <assert.h>
  6. #include <algorithm>
  7. #include "base/bits.h"
  8. #include "base/debug/alias.h"
  9. #include "base/files/memory_mapped_file.h"
  10. #include "base/logging.h"
  11. #include "base/metrics/histogram_functions.h"
  12. #include "base/metrics/sparse_histogram.h"
  13. #include "base/notreached.h"
  14. #include "base/numerics/checked_math.h"
  15. #include "base/numerics/safe_conversions.h"
  16. #include "base/strings/string_piece.h"
  17. #include "base/system/sys_info.h"
  18. #include "base/threading/scoped_blocking_call.h"
  19. #include "build/build_config.h"
  20. #include "third_party/abseil-cpp/absl/types/optional.h"
  21. #if BUILDFLAG(IS_WIN)
  22. #include <windows.h>
  23. // Must be after <windows.h>
  24. #include <winbase.h>
  25. #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
  26. #include <sys/mman.h>
  27. #endif
  28. namespace {
  29. // Limit of memory segment size. It has to fit in an unsigned 32-bit number
  30. // and should be a power of 2 in order to accommodate almost any page size.
  31. constexpr uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB
  32. // A constant (random) value placed in the shared metadata to identify
  33. // an already initialized memory segment.
  34. constexpr uint32_t kGlobalCookie = 0x408305DC;
  35. // The current version of the metadata. If updates are made that change
  36. // the metadata, the version number can be queried to operate in a backward-
  37. // compatible manner until the memory segment is completely re-initalized.
  38. constexpr uint32_t kGlobalVersion = 2;
  39. // Constant values placed in the block headers to indicate its state.
  40. constexpr uint32_t kBlockCookieFree = 0;
  41. constexpr uint32_t kBlockCookieQueue = 1;
  42. constexpr uint32_t kBlockCookieWasted = (uint32_t)-1;
  43. constexpr uint32_t kBlockCookieAllocated = 0xC8799269;
  44. // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
  45. // types rather than combined bitfield.
  46. // Flags stored in the flags_ field of the SharedMetadata structure below.
  47. constexpr uint32_t kFlagCorrupt = 1 << 0;
  48. constexpr uint32_t kFlagFull = 1 << 1;
  49. // Errors that are logged in "errors" histogram.
  50. enum AllocatorError : int {
  51. kMemoryIsCorrupt = 1,
  52. };
  53. bool CheckFlag(const volatile std::atomic<uint32_t>* flags, uint32_t flag) {
  54. uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
  55. return (loaded_flags & flag) != 0;
  56. }
  57. void SetFlag(volatile std::atomic<uint32_t>* flags, uint32_t flag) {
  58. uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
  59. for (;;) {
  60. uint32_t new_flags = (loaded_flags & ~flag) | flag;
  61. // In the failue case, actual "flags" value stored in loaded_flags.
  62. // These access are "relaxed" because they are completely independent
  63. // of all other values.
  64. if (flags->compare_exchange_weak(loaded_flags, new_flags,
  65. std::memory_order_relaxed,
  66. std::memory_order_relaxed)) {
  67. break;
  68. }
  69. }
  70. }
  71. } // namespace
  72. namespace base {
  73. // The block-header is placed at the top of every allocation within the
  74. // segment to describe the data that follows it.
  75. struct PersistentMemoryAllocator::BlockHeader {
  76. uint32_t size; // Number of bytes in this block, including header.
  77. uint32_t cookie; // Constant value indicating completed allocation.
  78. std::atomic<uint32_t> type_id; // Arbitrary number indicating data type.
  79. std::atomic<uint32_t> next; // Pointer to the next block when iterating.
  80. };
  81. // The shared metadata exists once at the top of the memory segment to
  82. // describe the state of the allocator to all processes. The size of this
  83. // structure must be a multiple of 64-bits to ensure compatibility between
  84. // architectures.
  85. struct PersistentMemoryAllocator::SharedMetadata {
  86. uint32_t cookie; // Some value that indicates complete initialization.
  87. uint32_t size; // Total size of memory segment.
  88. uint32_t page_size; // Paging size within memory segment.
  89. uint32_t version; // Version code so upgrades don't break.
  90. uint64_t id; // Arbitrary ID number given by creator.
  91. uint32_t name; // Reference to stored name string.
  92. uint32_t padding1; // Pad-out read-only data to 64-bit alignment.
  93. // Above is read-only after first construction. Below may be changed and
  94. // so must be marked "volatile" to provide correct inter-process behavior.
  95. // State of the memory, plus some padding to keep alignment.
  96. volatile std::atomic<uint8_t> memory_state; // MemoryState enum values.
  97. uint8_t padding2[3];
  98. // Bitfield of information flags. Access to this should be done through
  99. // the CheckFlag() and SetFlag() methods defined above.
  100. volatile std::atomic<uint32_t> flags;
  101. // Offset/reference to first free space in segment.
  102. volatile std::atomic<uint32_t> freeptr;
  103. // The "iterable" queue is an M&S Queue as described here, append-only:
  104. // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
  105. // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
  106. volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue.
  107. volatile BlockHeader queue; // Empty block for linked-list head/tail.
  108. };
  109. // The "queue" block header is used to detect "last node" so that zero/null
  110. // can be used to indicate that it hasn't been added at all. It is part of
  111. // the SharedMetadata structure which itself is always located at offset zero.
  112. const PersistentMemoryAllocator::Reference
  113. PersistentMemoryAllocator::kReferenceQueue =
  114. offsetof(SharedMetadata, queue);
  115. const base::FilePath::CharType PersistentMemoryAllocator::kFileExtension[] =
  116. FILE_PATH_LITERAL(".pma");
  117. PersistentMemoryAllocator::Iterator::Iterator(
  118. const PersistentMemoryAllocator* allocator)
  119. : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
  120. PersistentMemoryAllocator::Iterator::Iterator(
  121. const PersistentMemoryAllocator* allocator,
  122. Reference starting_after)
  123. : allocator_(allocator), last_record_(0), record_count_(0) {
  124. Reset(starting_after);
  125. }
  126. PersistentMemoryAllocator::Iterator::~Iterator() = default;
  127. void PersistentMemoryAllocator::Iterator::Reset() {
  128. last_record_.store(kReferenceQueue, std::memory_order_relaxed);
  129. record_count_.store(0, std::memory_order_relaxed);
  130. }
  131. void PersistentMemoryAllocator::Iterator::Reset(Reference starting_after) {
  132. if (starting_after == 0) {
  133. Reset();
  134. return;
  135. }
  136. last_record_.store(starting_after, std::memory_order_relaxed);
  137. record_count_.store(0, std::memory_order_relaxed);
  138. // Ensure that the starting point is a valid, iterable block (meaning it can
  139. // be read and has a non-zero "next" pointer).
  140. const volatile BlockHeader* block =
  141. allocator_->GetBlock(starting_after, 0, 0, false, false);
  142. if (!block || block->next.load(std::memory_order_relaxed) == 0) {
  143. NOTREACHED();
  144. last_record_.store(kReferenceQueue, std::memory_order_release);
  145. }
  146. }
  147. PersistentMemoryAllocator::Reference
  148. PersistentMemoryAllocator::Iterator::GetLast() {
  149. Reference last = last_record_.load(std::memory_order_relaxed);
  150. if (last == kReferenceQueue)
  151. return kReferenceNull;
  152. return last;
  153. }
  154. PersistentMemoryAllocator::Reference
  155. PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
  156. // Make a copy of the existing count of found-records, acquiring all changes
  157. // made to the allocator, notably "freeptr" (see comment in loop for why
  158. // the load of that value cannot be moved above here) that occurred during
  159. // any previous runs of this method, including those by parallel threads
  160. // that interrupted it. It pairs with the Release at the end of this method.
  161. //
  162. // Otherwise, if the compiler were to arrange the two loads such that
  163. // "count" was fetched _after_ "freeptr" then it would be possible for
  164. // this thread to be interrupted between them and other threads perform
  165. // multiple allocations, make-iterables, and iterations (with the included
  166. // increment of |record_count_|) culminating in the check at the bottom
  167. // mistakenly determining that a loop exists. Isn't this stuff fun?
  168. uint32_t count = record_count_.load(std::memory_order_acquire);
  169. Reference last = last_record_.load(std::memory_order_acquire);
  170. Reference next;
  171. while (true) {
  172. const volatile BlockHeader* block =
  173. allocator_->GetBlock(last, 0, 0, true, false);
  174. if (!block) // Invalid iterator state.
  175. return kReferenceNull;
  176. // The compiler and CPU can freely reorder all memory accesses on which
  177. // there are no dependencies. It could, for example, move the load of
  178. // "freeptr" to above this point because there are no explicit dependencies
  179. // between it and "next". If it did, however, then another block could
  180. // be queued after that but before the following load meaning there is
  181. // one more queued block than the future "detect loop by having more
  182. // blocks that could fit before freeptr" will allow.
  183. //
  184. // By "acquiring" the "next" value here, it's synchronized to the enqueue
  185. // of the node which in turn is synchronized to the allocation (which sets
  186. // freeptr). Thus, the scenario above cannot happen.
  187. next = block->next.load(std::memory_order_acquire);
  188. if (next == kReferenceQueue) // No next allocation in queue.
  189. return kReferenceNull;
  190. block = allocator_->GetBlock(next, 0, 0, false, false);
  191. if (!block) { // Memory is corrupt.
  192. allocator_->SetCorrupt();
  193. return kReferenceNull;
  194. }
  195. // Update the "last_record" pointer to be the reference being returned.
  196. // If it fails then another thread has already iterated past it so loop
  197. // again. Failing will also load the existing value into "last" so there
  198. // is no need to do another such load when the while-loop restarts. A
  199. // "strong" compare-exchange is used because failing unnecessarily would
  200. // mean repeating some fairly costly validations above.
  201. if (last_record_.compare_exchange_strong(
  202. last, next, std::memory_order_acq_rel, std::memory_order_acquire)) {
  203. *type_return = block->type_id.load(std::memory_order_relaxed);
  204. break;
  205. }
  206. }
  207. // Memory corruption could cause a loop in the list. Such must be detected
  208. // so as to not cause an infinite loop in the caller. This is done by simply
  209. // making sure it doesn't iterate more times than the absolute maximum
  210. // number of allocations that could have been made. Callers are likely
  211. // to loop multiple times before it is detected but at least it stops.
  212. const uint32_t freeptr = std::min(
  213. allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
  214. allocator_->mem_size_);
  215. const uint32_t max_records =
  216. freeptr / (sizeof(BlockHeader) + kAllocAlignment);
  217. if (count > max_records) {
  218. allocator_->SetCorrupt();
  219. return kReferenceNull;
  220. }
  221. // Increment the count and release the changes made above. It pairs with
  222. // the Acquire at the top of this method. Note that this operation is not
  223. // strictly synchonized with fetching of the object to return, which would
  224. // have to be done inside the loop and is somewhat complicated to achieve.
  225. // It does not matter if it falls behind temporarily so long as it never
  226. // gets ahead.
  227. record_count_.fetch_add(1, std::memory_order_release);
  228. return next;
  229. }
  230. PersistentMemoryAllocator::Reference
  231. PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
  232. Reference ref;
  233. uint32_t type_found;
  234. while ((ref = GetNext(&type_found)) != 0) {
  235. if (type_found == type_match)
  236. return ref;
  237. }
  238. return kReferenceNull;
  239. }
  240. // static
  241. bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
  242. size_t size,
  243. size_t page_size,
  244. bool readonly) {
  245. return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
  246. (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
  247. (size % kAllocAlignment == 0 || readonly) &&
  248. (page_size == 0 || size % page_size == 0 || readonly));
  249. }
  250. PersistentMemoryAllocator::PersistentMemoryAllocator(void* base,
  251. size_t size,
  252. size_t page_size,
  253. uint64_t id,
  254. base::StringPiece name,
  255. bool readonly)
  256. : PersistentMemoryAllocator(Memory(base, MEM_EXTERNAL),
  257. size,
  258. page_size,
  259. id,
  260. name,
  261. readonly) {}
  262. PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
  263. size_t size,
  264. size_t page_size,
  265. uint64_t id,
  266. base::StringPiece name,
  267. bool readonly)
  268. : mem_base_(static_cast<char*>(memory.base)),
  269. mem_type_(memory.type),
  270. mem_size_(checked_cast<uint32_t>(size)),
  271. mem_page_(checked_cast<uint32_t>((page_size ? page_size : size))),
  272. #if BUILDFLAG(IS_NACL)
  273. vm_page_size_(4096U), // SysInfo is not built for NACL.
  274. #else
  275. vm_page_size_(SysInfo::VMAllocationGranularity()),
  276. #endif
  277. readonly_(readonly),
  278. corrupt_(false),
  279. allocs_histogram_(nullptr),
  280. used_histogram_(nullptr),
  281. errors_histogram_(nullptr) {
  282. // These asserts ensure that the structures are 32/64-bit agnostic and meet
  283. // all the requirements of use within the allocator. They access private
  284. // definitions and so cannot be moved to the global scope.
  285. static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
  286. "struct is not portable across different natural word widths");
  287. static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
  288. "struct is not portable across different natural word widths");
  289. static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
  290. "BlockHeader is not a multiple of kAllocAlignment");
  291. static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
  292. "SharedMetadata is not a multiple of kAllocAlignment");
  293. static_assert(kReferenceQueue % kAllocAlignment == 0,
  294. "\"queue\" is not aligned properly; must be at end of struct");
  295. // Ensure that memory segment is of acceptable size.
  296. CHECK(IsMemoryAcceptable(memory.base, size, page_size, readonly));
  297. // These atomics operate inter-process and so must be lock-free.
  298. DCHECK(SharedMetadata().freeptr.is_lock_free());
  299. DCHECK(SharedMetadata().flags.is_lock_free());
  300. DCHECK(BlockHeader().next.is_lock_free());
  301. CHECK(corrupt_.is_lock_free());
  302. if (shared_meta()->cookie != kGlobalCookie) {
  303. if (readonly) {
  304. SetCorrupt();
  305. return;
  306. }
  307. // This block is only executed when a completely new memory segment is
  308. // being initialized. It's unshared and single-threaded...
  309. volatile BlockHeader* const first_block =
  310. reinterpret_cast<volatile BlockHeader*>(mem_base_ +
  311. sizeof(SharedMetadata));
  312. if (shared_meta()->cookie != 0 ||
  313. shared_meta()->size != 0 ||
  314. shared_meta()->version != 0 ||
  315. shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
  316. shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
  317. shared_meta()->id != 0 ||
  318. shared_meta()->name != 0 ||
  319. shared_meta()->tailptr != 0 ||
  320. shared_meta()->queue.cookie != 0 ||
  321. shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
  322. first_block->size != 0 ||
  323. first_block->cookie != 0 ||
  324. first_block->type_id.load(std::memory_order_relaxed) != 0 ||
  325. first_block->next != 0) {
  326. // ...or something malicious has been playing with the metadata.
  327. SetCorrupt();
  328. }
  329. // This is still safe to do even if corruption has been detected.
  330. shared_meta()->cookie = kGlobalCookie;
  331. shared_meta()->size = mem_size_;
  332. shared_meta()->page_size = mem_page_;
  333. shared_meta()->version = kGlobalVersion;
  334. shared_meta()->id = id;
  335. shared_meta()->freeptr.store(sizeof(SharedMetadata),
  336. std::memory_order_release);
  337. // Set up the queue of iterable allocations.
  338. shared_meta()->queue.size = sizeof(BlockHeader);
  339. shared_meta()->queue.cookie = kBlockCookieQueue;
  340. shared_meta()->queue.next.store(kReferenceQueue, std::memory_order_release);
  341. shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
  342. // Allocate space for the name so other processes can learn it.
  343. if (!name.empty()) {
  344. const size_t name_length = name.length() + 1;
  345. shared_meta()->name = Allocate(name_length, 0);
  346. char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
  347. if (name_cstr)
  348. memcpy(name_cstr, name.data(), name.length());
  349. }
  350. shared_meta()->memory_state.store(MEMORY_INITIALIZED,
  351. std::memory_order_release);
  352. } else {
  353. if (shared_meta()->size == 0 || shared_meta()->version != kGlobalVersion ||
  354. shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
  355. shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
  356. shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
  357. SetCorrupt();
  358. }
  359. if (!readonly) {
  360. // The allocator is attaching to a previously initialized segment of
  361. // memory. If the initialization parameters differ, make the best of it
  362. // by reducing the local construction parameters to match those of
  363. // the actual memory area. This ensures that the local object never
  364. // tries to write outside of the original bounds.
  365. // Because the fields are const to ensure that no code other than the
  366. // constructor makes changes to them as well as to give optimization
  367. // hints to the compiler, it's necessary to const-cast them for changes
  368. // here.
  369. if (shared_meta()->size < mem_size_)
  370. *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
  371. if (shared_meta()->page_size < mem_page_)
  372. *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
  373. // Ensure that settings are still valid after the above adjustments.
  374. if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly))
  375. SetCorrupt();
  376. }
  377. }
  378. }
  379. PersistentMemoryAllocator::~PersistentMemoryAllocator() {
  380. // It's strictly forbidden to do any memory access here in case there is
  381. // some issue with the underlying memory segment. The "Local" allocator
  382. // makes use of this to allow deletion of the segment on the heap from
  383. // within its destructor.
  384. }
  385. uint64_t PersistentMemoryAllocator::Id() const {
  386. return shared_meta()->id;
  387. }
  388. const char* PersistentMemoryAllocator::Name() const {
  389. Reference name_ref = shared_meta()->name;
  390. const char* name_cstr =
  391. GetAsArray<char>(name_ref, 0, PersistentMemoryAllocator::kSizeAny);
  392. if (!name_cstr)
  393. return "";
  394. size_t name_length = GetAllocSize(name_ref);
  395. if (name_cstr[name_length - 1] != '\0') {
  396. NOTREACHED();
  397. SetCorrupt();
  398. return "";
  399. }
  400. return name_cstr;
  401. }
  402. void PersistentMemoryAllocator::CreateTrackingHistograms(
  403. base::StringPiece name) {
  404. if (name.empty() || readonly_)
  405. return;
  406. std::string name_string(name);
  407. #if 0
  408. // This histogram wasn't being used so has been disabled. It is left here
  409. // in case development of a new use of the allocator could benefit from
  410. // recording (temporarily and locally) the allocation sizes.
  411. DCHECK(!allocs_histogram_);
  412. allocs_histogram_ = Histogram::FactoryGet(
  413. "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
  414. HistogramBase::kUmaTargetedHistogramFlag);
  415. #endif
  416. DCHECK(!used_histogram_);
  417. used_histogram_ = LinearHistogram::FactoryGet(
  418. "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
  419. HistogramBase::kUmaTargetedHistogramFlag);
  420. DCHECK(!errors_histogram_);
  421. errors_histogram_ = SparseHistogram::FactoryGet(
  422. "UMA.PersistentAllocator." + name_string + ".Errors",
  423. HistogramBase::kUmaTargetedHistogramFlag);
  424. }
  425. void PersistentMemoryAllocator::Flush(bool sync) {
  426. FlushPartial(used(), sync);
  427. }
  428. void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
  429. shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
  430. FlushPartial(sizeof(SharedMetadata), false);
  431. }
  432. uint8_t PersistentMemoryAllocator::GetMemoryState() const {
  433. return shared_meta()->memory_state.load(std::memory_order_relaxed);
  434. }
  435. size_t PersistentMemoryAllocator::used() const {
  436. return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
  437. mem_size_);
  438. }
  439. PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
  440. const void* memory,
  441. uint32_t type_id) const {
  442. uintptr_t address = reinterpret_cast<uintptr_t>(memory);
  443. if (address < reinterpret_cast<uintptr_t>(mem_base_))
  444. return kReferenceNull;
  445. uintptr_t offset = address - reinterpret_cast<uintptr_t>(mem_base_);
  446. if (offset >= mem_size_ || offset < sizeof(BlockHeader))
  447. return kReferenceNull;
  448. Reference ref = static_cast<Reference>(offset) - sizeof(BlockHeader);
  449. if (!GetBlockData(ref, type_id, kSizeAny))
  450. return kReferenceNull;
  451. return ref;
  452. }
  453. size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
  454. const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
  455. if (!block)
  456. return 0;
  457. uint32_t size = block->size;
  458. // Header was verified by GetBlock() but a malicious actor could change
  459. // the value between there and here. Check it again.
  460. if (size <= sizeof(BlockHeader) || ref + size > mem_size_) {
  461. SetCorrupt();
  462. return 0;
  463. }
  464. return size - sizeof(BlockHeader);
  465. }
  466. uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
  467. const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
  468. if (!block)
  469. return 0;
  470. return block->type_id.load(std::memory_order_relaxed);
  471. }
  472. bool PersistentMemoryAllocator::ChangeType(Reference ref,
  473. uint32_t to_type_id,
  474. uint32_t from_type_id,
  475. bool clear) {
  476. DCHECK(!readonly_);
  477. volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
  478. if (!block)
  479. return false;
  480. // "Strong" exchanges are used below because there is no loop that can retry
  481. // in the wake of spurious failures possible with "weak" exchanges. It is,
  482. // in aggregate, an "acquire-release" operation so no memory accesses can be
  483. // reordered either before or after this method (since changes based on type
  484. // could happen on either side).
  485. if (clear) {
  486. // If clearing the memory, first change it to the "transitioning" type so
  487. // there can be no confusion by other threads. After the memory is cleared,
  488. // it can be changed to its final type.
  489. if (!block->type_id.compare_exchange_strong(
  490. from_type_id, kTypeIdTransitioning, std::memory_order_acquire,
  491. std::memory_order_acquire)) {
  492. // Existing type wasn't what was expected: fail (with no changes)
  493. return false;
  494. }
  495. // Clear the memory in an atomic manner. Using "release" stores force
  496. // every write to be done after the ones before it. This is better than
  497. // using memset because (a) it supports "volatile" and (b) it creates a
  498. // reliable pattern upon which other threads may rely.
  499. volatile std::atomic<int>* data =
  500. reinterpret_cast<volatile std::atomic<int>*>(
  501. reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader));
  502. const uint32_t words = (block->size - sizeof(BlockHeader)) / sizeof(int);
  503. DCHECK_EQ(0U, (block->size - sizeof(BlockHeader)) % sizeof(int));
  504. for (uint32_t i = 0; i < words; ++i) {
  505. data->store(0, std::memory_order_release);
  506. ++data;
  507. }
  508. // If the destination type is "transitioning" then skip the final exchange.
  509. if (to_type_id == kTypeIdTransitioning)
  510. return true;
  511. // Finish the change to the desired type.
  512. from_type_id = kTypeIdTransitioning; // Exchange needs modifiable original.
  513. bool success = block->type_id.compare_exchange_strong(
  514. from_type_id, to_type_id, std::memory_order_release,
  515. std::memory_order_relaxed);
  516. DCHECK(success); // Should never fail.
  517. return success;
  518. }
  519. // One step change to the new type. Will return false if the existing value
  520. // doesn't match what is expected.
  521. return block->type_id.compare_exchange_strong(from_type_id, to_type_id,
  522. std::memory_order_acq_rel,
  523. std::memory_order_acquire);
  524. }
  525. PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
  526. size_t req_size,
  527. uint32_t type_id) {
  528. Reference ref = AllocateImpl(req_size, type_id);
  529. if (ref) {
  530. // Success: Record this allocation in usage stats (if active).
  531. if (allocs_histogram_)
  532. allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size));
  533. } else {
  534. // Failure: Record an allocation of zero for tracking.
  535. if (allocs_histogram_)
  536. allocs_histogram_->Add(0);
  537. }
  538. return ref;
  539. }
  540. PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
  541. size_t req_size,
  542. uint32_t type_id) {
  543. DCHECK(!readonly_);
  544. // Validate req_size to ensure it won't overflow when used as 32-bit value.
  545. if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) {
  546. NOTREACHED();
  547. return kReferenceNull;
  548. }
  549. // Round up the requested size, plus header, to the next allocation alignment.
  550. size_t size = bits::AlignUp(req_size + sizeof(BlockHeader), kAllocAlignment);
  551. if (size <= sizeof(BlockHeader) || size > mem_page_) {
  552. NOTREACHED();
  553. return kReferenceNull;
  554. }
  555. // Get the current start of unallocated memory. Other threads may
  556. // update this at any time and cause us to retry these operations.
  557. // This value should be treated as "const" to avoid confusion through
  558. // the code below but recognize that any failed compare-exchange operation
  559. // involving it will cause it to be loaded with a more recent value. The
  560. // code should either exit or restart the loop in that case.
  561. /* const */ uint32_t freeptr =
  562. shared_meta()->freeptr.load(std::memory_order_acquire);
  563. // Allocation is lockless so we do all our caculation and then, if saving
  564. // indicates a change has occurred since we started, scrap everything and
  565. // start over.
  566. for (;;) {
  567. if (IsCorrupt())
  568. return kReferenceNull;
  569. if (freeptr + size > mem_size_) {
  570. SetFlag(&shared_meta()->flags, kFlagFull);
  571. return kReferenceNull;
  572. }
  573. // Get pointer to the "free" block. If something has been allocated since
  574. // the load of freeptr above, it is still safe as nothing will be written
  575. // to that location until after the compare-exchange below.
  576. volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true);
  577. if (!block) {
  578. SetCorrupt();
  579. return kReferenceNull;
  580. }
  581. // An allocation cannot cross page boundaries. If it would, create a
  582. // "wasted" block and begin again at the top of the next page. This
  583. // area could just be left empty but we fill in the block header just
  584. // for completeness sake.
  585. const uint32_t page_free = mem_page_ - freeptr % mem_page_;
  586. if (size > page_free) {
  587. if (page_free <= sizeof(BlockHeader)) {
  588. SetCorrupt();
  589. return kReferenceNull;
  590. }
  591. const uint32_t new_freeptr = freeptr + page_free;
  592. if (shared_meta()->freeptr.compare_exchange_strong(
  593. freeptr, new_freeptr, std::memory_order_acq_rel,
  594. std::memory_order_acquire)) {
  595. block->size = page_free;
  596. block->cookie = kBlockCookieWasted;
  597. }
  598. continue;
  599. }
  600. // Don't leave a slice at the end of a page too small for anything. This
  601. // can result in an allocation up to two alignment-sizes greater than the
  602. // minimum required by requested-size + header + alignment.
  603. if (page_free - size < sizeof(BlockHeader) + kAllocAlignment) {
  604. size = page_free;
  605. if (freeptr + size > mem_size_) {
  606. SetCorrupt();
  607. return kReferenceNull;
  608. }
  609. }
  610. // This cast is safe because (freeptr + size) <= mem_size_.
  611. const uint32_t new_freeptr = static_cast<uint32_t>(freeptr + size);
  612. // Save our work. Try again if another thread has completed an allocation
  613. // while we were processing. A "weak" exchange would be permissable here
  614. // because the code will just loop and try again but the above processing
  615. // is significant so make the extra effort of a "strong" exchange.
  616. if (!shared_meta()->freeptr.compare_exchange_strong(
  617. freeptr, new_freeptr, std::memory_order_acq_rel,
  618. std::memory_order_acquire)) {
  619. continue;
  620. }
  621. // Given that all memory was zeroed before ever being given to an instance
  622. // of this class and given that we only allocate in a monotomic fashion
  623. // going forward, it must be that the newly allocated block is completely
  624. // full of zeros. If we find anything in the block header that is NOT a
  625. // zero then something must have previously run amuck through memory,
  626. // writing beyond the allocated space and into unallocated space.
  627. if (block->size != 0 ||
  628. block->cookie != kBlockCookieFree ||
  629. block->type_id.load(std::memory_order_relaxed) != 0 ||
  630. block->next.load(std::memory_order_relaxed) != 0) {
  631. SetCorrupt();
  632. return kReferenceNull;
  633. }
  634. // Make sure the memory exists by writing to the first byte of every memory
  635. // page it touches beyond the one containing the block header itself.
  636. // As the underlying storage is often memory mapped from disk or shared
  637. // space, sometimes things go wrong and those address don't actually exist
  638. // leading to a SIGBUS (or Windows equivalent) at some arbitrary location
  639. // in the code. This should concentrate all those failures into this
  640. // location for easy tracking and, eventually, proper handling.
  641. volatile char* mem_end = reinterpret_cast<volatile char*>(block) + size;
  642. volatile char* mem_begin = reinterpret_cast<volatile char*>(
  643. (reinterpret_cast<uintptr_t>(block) + sizeof(BlockHeader) +
  644. (vm_page_size_ - 1)) &
  645. ~static_cast<uintptr_t>(vm_page_size_ - 1));
  646. for (volatile char* memory = mem_begin; memory < mem_end;
  647. memory += vm_page_size_) {
  648. // It's required that a memory segment start as all zeros and thus the
  649. // newly allocated block is all zeros at this point. Thus, writing a
  650. // zero to it allows testing that the memory exists without actually
  651. // changing its contents. The compiler doesn't know about the requirement
  652. // and so cannot optimize-away these writes.
  653. *memory = 0;
  654. }
  655. // Load information into the block header. There is no "release" of the
  656. // data here because this memory can, currently, be seen only by the thread
  657. // performing the allocation. When it comes time to share this, the thread
  658. // will call MakeIterable() which does the release operation.
  659. // `size` is at most kSegmentMaxSize, so this cast is safe.
  660. block->size = static_cast<uint32_t>(size);
  661. block->cookie = kBlockCookieAllocated;
  662. block->type_id.store(type_id, std::memory_order_relaxed);
  663. return freeptr;
  664. }
  665. }
  666. void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
  667. uint32_t remaining = std::max(
  668. mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
  669. (uint32_t)sizeof(BlockHeader));
  670. meminfo->total = mem_size_;
  671. meminfo->free = remaining - sizeof(BlockHeader);
  672. }
  673. void PersistentMemoryAllocator::MakeIterable(Reference ref) {
  674. DCHECK(!readonly_);
  675. if (IsCorrupt())
  676. return;
  677. volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
  678. if (!block) // invalid reference
  679. return;
  680. if (block->next.load(std::memory_order_acquire) != 0) // Already iterable.
  681. return;
  682. block->next.store(kReferenceQueue, std::memory_order_release); // New tail.
  683. // Try to add this block to the tail of the queue. May take multiple tries.
  684. // If so, tail will be automatically updated with a more recent value during
  685. // compare-exchange operations.
  686. uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire);
  687. for (;;) {
  688. // Acquire the current tail-pointer released by previous call to this
  689. // method and validate it.
  690. block = GetBlock(tail, 0, 0, true, false);
  691. if (!block) {
  692. SetCorrupt();
  693. return;
  694. }
  695. // Try to insert the block at the tail of the queue. The tail node always
  696. // has an existing value of kReferenceQueue; if that is somehow not the
  697. // existing value then another thread has acted in the meantime. A "strong"
  698. // exchange is necessary so the "else" block does not get executed when
  699. // that is not actually the case (which can happen with a "weak" exchange).
  700. uint32_t next = kReferenceQueue; // Will get replaced with existing value.
  701. if (block->next.compare_exchange_strong(next, ref,
  702. std::memory_order_acq_rel,
  703. std::memory_order_acquire)) {
  704. // Update the tail pointer to the new offset. If the "else" clause did
  705. // not exist, then this could be a simple Release_Store to set the new
  706. // value but because it does, it's possible that other threads could add
  707. // one or more nodes at the tail before reaching this point. We don't
  708. // have to check the return value because it either operates correctly
  709. // or the exact same operation has already been done (by the "else"
  710. // clause) on some other thread.
  711. shared_meta()->tailptr.compare_exchange_strong(tail, ref,
  712. std::memory_order_release,
  713. std::memory_order_relaxed);
  714. return;
  715. }
  716. // In the unlikely case that a thread crashed or was killed between the
  717. // update of "next" and the update of "tailptr", it is necessary to
  718. // perform the operation that would have been done. There's no explicit
  719. // check for crash/kill which means that this operation may also happen
  720. // even when the other thread is in perfect working order which is what
  721. // necessitates the CompareAndSwap above.
  722. shared_meta()->tailptr.compare_exchange_strong(
  723. tail, next, std::memory_order_acq_rel, std::memory_order_acquire);
  724. }
  725. }
  726. // The "corrupted" state is held both locally and globally (shared). The
  727. // shared flag can't be trusted since a malicious actor could overwrite it.
  728. // Because corruption can be detected during read-only operations such as
  729. // iteration, this method may be called by other "const" methods. In this
  730. // case, it's safe to discard the constness and modify the local flag and
  731. // maybe even the shared flag if the underlying data isn't actually read-only.
  732. void PersistentMemoryAllocator::SetCorrupt() const {
  733. if (!corrupt_.load(std::memory_order_relaxed) &&
  734. !CheckFlag(
  735. const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
  736. kFlagCorrupt)) {
  737. LOG(ERROR) << "Corruption detected in shared-memory segment.";
  738. RecordError(kMemoryIsCorrupt);
  739. }
  740. corrupt_.store(true, std::memory_order_relaxed);
  741. if (!readonly_) {
  742. SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
  743. kFlagCorrupt);
  744. }
  745. }
  746. bool PersistentMemoryAllocator::IsCorrupt() const {
  747. if (corrupt_.load(std::memory_order_relaxed) ||
  748. CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
  749. SetCorrupt(); // Make sure all indicators are set.
  750. return true;
  751. }
  752. return false;
  753. }
  754. bool PersistentMemoryAllocator::IsFull() const {
  755. return CheckFlag(&shared_meta()->flags, kFlagFull);
  756. }
  757. // Dereference a block |ref| and ensure that it's valid for the desired
  758. // |type_id| and |size|. |special| indicates that we may try to access block
  759. // headers not available to callers but still accessed by this module. By
  760. // having internal dereferences go through this same function, the allocator
  761. // is hardened against corruption.
  762. const volatile PersistentMemoryAllocator::BlockHeader*
  763. PersistentMemoryAllocator::GetBlock(Reference ref,
  764. uint32_t type_id,
  765. size_t size,
  766. bool queue_ok,
  767. bool free_ok) const {
  768. // Handle special cases.
  769. if (ref == kReferenceQueue && queue_ok)
  770. return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
  771. // Validation of parameters.
  772. if (ref < sizeof(SharedMetadata))
  773. return nullptr;
  774. if (ref % kAllocAlignment != 0)
  775. return nullptr;
  776. size += sizeof(BlockHeader);
  777. if (ref + size > mem_size_)
  778. return nullptr;
  779. // Validation of referenced block-header.
  780. if (!free_ok) {
  781. const volatile BlockHeader* const block =
  782. reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
  783. if (block->cookie != kBlockCookieAllocated)
  784. return nullptr;
  785. if (block->size < size)
  786. return nullptr;
  787. if (ref + block->size > mem_size_)
  788. return nullptr;
  789. if (type_id != 0 &&
  790. block->type_id.load(std::memory_order_relaxed) != type_id) {
  791. return nullptr;
  792. }
  793. }
  794. // Return pointer to block data.
  795. return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
  796. }
  797. void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
  798. // Generally there is nothing to do as every write is done through volatile
  799. // memory with atomic instructions to guarantee consistency. This (virtual)
  800. // method exists so that derivced classes can do special things, such as
  801. // tell the OS to write changes to disk now rather than when convenient.
  802. }
  803. void PersistentMemoryAllocator::RecordError(int error) const {
  804. if (errors_histogram_)
  805. errors_histogram_->Add(error);
  806. }
  807. const volatile void* PersistentMemoryAllocator::GetBlockData(
  808. Reference ref,
  809. uint32_t type_id,
  810. size_t size) const {
  811. DCHECK(size > 0);
  812. const volatile BlockHeader* block =
  813. GetBlock(ref, type_id, size, false, false);
  814. if (!block)
  815. return nullptr;
  816. return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
  817. }
  818. void PersistentMemoryAllocator::UpdateTrackingHistograms() {
  819. DCHECK(!readonly_);
  820. if (used_histogram_) {
  821. MemoryInfo meminfo;
  822. GetMemoryInfo(&meminfo);
  823. HistogramBase::Sample used_percent = static_cast<HistogramBase::Sample>(
  824. ((meminfo.total - meminfo.free) * 100ULL / meminfo.total));
  825. used_histogram_->Add(used_percent);
  826. }
  827. }
  828. //----- LocalPersistentMemoryAllocator -----------------------------------------
  829. LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
  830. size_t size,
  831. uint64_t id,
  832. base::StringPiece name)
  833. : PersistentMemoryAllocator(AllocateLocalMemory(size),
  834. size, 0, id, name, false) {}
  835. LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
  836. DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_);
  837. }
  838. // static
  839. PersistentMemoryAllocator::Memory
  840. LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
  841. void* address;
  842. #if BUILDFLAG(IS_WIN)
  843. address =
  844. ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
  845. if (address)
  846. return Memory(address, MEM_VIRTUAL);
  847. UmaHistogramSparse("UMA.LocalPersistentMemoryAllocator.Failures.Win",
  848. static_cast<int>(::GetLastError()));
  849. #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
  850. // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
  851. // MAP_SHARED is not available on Linux <2.4 but required on Mac.
  852. address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
  853. MAP_ANON | MAP_SHARED, -1, 0);
  854. if (address != MAP_FAILED)
  855. return Memory(address, MEM_VIRTUAL);
  856. UmaHistogramSparse("UMA.LocalPersistentMemoryAllocator.Failures.Posix",
  857. errno);
  858. #else
  859. #error This architecture is not (yet) supported.
  860. #endif
  861. // As a last resort, just allocate the memory from the heap. This will
  862. // achieve the same basic result but the acquired memory has to be
  863. // explicitly zeroed and thus realized immediately (i.e. all pages are
  864. // added to the process now istead of only when first accessed).
  865. address = malloc(size);
  866. DPCHECK(address);
  867. memset(address, 0, size);
  868. return Memory(address, MEM_MALLOC);
  869. }
  870. // static
  871. void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
  872. size_t size,
  873. MemoryType type) {
  874. if (type == MEM_MALLOC) {
  875. free(memory);
  876. return;
  877. }
  878. DCHECK_EQ(MEM_VIRTUAL, type);
  879. #if BUILDFLAG(IS_WIN)
  880. BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
  881. DCHECK(success);
  882. #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
  883. int result = ::munmap(memory, size);
  884. DCHECK_EQ(0, result);
  885. #else
  886. #error This architecture is not (yet) supported.
  887. #endif
  888. }
  889. //----- WritableSharedPersistentMemoryAllocator --------------------------------
  890. WritableSharedPersistentMemoryAllocator::
  891. WritableSharedPersistentMemoryAllocator(
  892. base::WritableSharedMemoryMapping memory,
  893. uint64_t id,
  894. base::StringPiece name)
  895. : PersistentMemoryAllocator(Memory(memory.memory(), MEM_SHARED),
  896. memory.size(),
  897. 0,
  898. id,
  899. name,
  900. false),
  901. shared_memory_(std::move(memory)) {}
  902. WritableSharedPersistentMemoryAllocator::
  903. ~WritableSharedPersistentMemoryAllocator() = default;
  904. // static
  905. bool WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
  906. const base::WritableSharedMemoryMapping& memory) {
  907. return IsMemoryAcceptable(memory.memory(), memory.size(), 0, false);
  908. }
  909. //----- ReadOnlySharedPersistentMemoryAllocator --------------------------------
  910. ReadOnlySharedPersistentMemoryAllocator::
  911. ReadOnlySharedPersistentMemoryAllocator(
  912. base::ReadOnlySharedMemoryMapping memory,
  913. uint64_t id,
  914. base::StringPiece name)
  915. : PersistentMemoryAllocator(
  916. Memory(const_cast<void*>(memory.memory()), MEM_SHARED),
  917. memory.size(),
  918. 0,
  919. id,
  920. name,
  921. true),
  922. shared_memory_(std::move(memory)) {}
  923. ReadOnlySharedPersistentMemoryAllocator::
  924. ~ReadOnlySharedPersistentMemoryAllocator() = default;
  925. // static
  926. bool ReadOnlySharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
  927. const base::ReadOnlySharedMemoryMapping& memory) {
  928. return IsMemoryAcceptable(memory.memory(), memory.size(), 0, true);
  929. }
  930. #if !BUILDFLAG(IS_NACL)
  931. //----- FilePersistentMemoryAllocator ------------------------------------------
  932. FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
  933. std::unique_ptr<MemoryMappedFile> file,
  934. size_t max_size,
  935. uint64_t id,
  936. base::StringPiece name,
  937. bool read_only)
  938. : PersistentMemoryAllocator(
  939. Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
  940. max_size != 0 ? max_size : file->length(),
  941. 0,
  942. id,
  943. name,
  944. read_only),
  945. mapped_file_(std::move(file)) {}
  946. FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() = default;
  947. // static
  948. bool FilePersistentMemoryAllocator::IsFileAcceptable(
  949. const MemoryMappedFile& file,
  950. bool read_only) {
  951. return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
  952. }
  953. void FilePersistentMemoryAllocator::Cache() {
  954. // Since this method is expected to load data from permanent storage
  955. // into memory, blocking I/O may occur.
  956. base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
  957. base::BlockingType::MAY_BLOCK);
  958. // Calculate begin/end addresses so that the first byte of every page
  959. // in that range can be read. Keep within the used space. The |volatile|
  960. // keyword makes it so the compiler can't make assumptions about what is
  961. // in a given memory location and thus possibly avoid the read.
  962. const volatile char* mem_end = mem_base_ + used();
  963. const volatile char* mem_begin = mem_base_;
  964. // Iterate over the memory a page at a time, reading the first byte of
  965. // every page. The values are added to a |total| so that the compiler
  966. // can't omit the read.
  967. int total = 0;
  968. for (const volatile char* memory = mem_begin; memory < mem_end;
  969. memory += vm_page_size_) {
  970. total += *memory;
  971. }
  972. // Tell the compiler that |total| is used so that it can't optimize away
  973. // the memory accesses above.
  974. debug::Alias(&total);
  975. }
  976. void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
  977. if (IsReadonly())
  978. return;
  979. absl::optional<base::ScopedBlockingCall> scoped_blocking_call;
  980. if (sync)
  981. scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
  982. #if BUILDFLAG(IS_WIN)
  983. // Windows doesn't support asynchronous flush.
  984. scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
  985. BOOL success = ::FlushViewOfFile(data(), length);
  986. DPCHECK(success);
  987. #elif BUILDFLAG(IS_APPLE)
  988. // On OSX, "invalidate" removes all cached pages, forcing a re-read from
  989. // disk. That's not applicable to "flush" so omit it.
  990. int result =
  991. ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
  992. DCHECK_NE(EINVAL, result);
  993. #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
  994. // On POSIX, "invalidate" forces _other_ processes to recognize what has
  995. // been written to disk and so is applicable to "flush".
  996. int result = ::msync(const_cast<void*>(data()), length,
  997. MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
  998. DCHECK_NE(EINVAL, result);
  999. #else
  1000. #error Unsupported OS.
  1001. #endif
  1002. }
  1003. #endif // !BUILDFLAG(IS_NACL)
  1004. //----- DelayedPersistentAllocation --------------------------------------------
  1005. DelayedPersistentAllocation::DelayedPersistentAllocation(
  1006. PersistentMemoryAllocator* allocator,
  1007. std::atomic<Reference>* ref,
  1008. uint32_t type,
  1009. size_t size,
  1010. bool make_iterable)
  1011. : DelayedPersistentAllocation(allocator,
  1012. ref,
  1013. type,
  1014. size,
  1015. 0,
  1016. make_iterable) {}
  1017. DelayedPersistentAllocation::DelayedPersistentAllocation(
  1018. PersistentMemoryAllocator* allocator,
  1019. std::atomic<Reference>* ref,
  1020. uint32_t type,
  1021. size_t size,
  1022. size_t offset,
  1023. bool make_iterable)
  1024. : allocator_(allocator),
  1025. type_(type),
  1026. size_(checked_cast<uint32_t>(size)),
  1027. offset_(checked_cast<uint32_t>(offset)),
  1028. make_iterable_(make_iterable),
  1029. reference_(ref) {
  1030. DCHECK(allocator_);
  1031. DCHECK_NE(0U, type_);
  1032. DCHECK_LT(0U, size_);
  1033. DCHECK(reference_);
  1034. }
  1035. DelayedPersistentAllocation::~DelayedPersistentAllocation() = default;
  1036. void* DelayedPersistentAllocation::Get() const {
  1037. // Relaxed operations are acceptable here because it's not protecting the
  1038. // contents of the allocation in any way.
  1039. Reference ref = reference_->load(std::memory_order_acquire);
  1040. if (!ref) {
  1041. ref = allocator_->Allocate(size_, type_);
  1042. if (!ref)
  1043. return nullptr;
  1044. // Store the new reference in its proper location using compare-and-swap.
  1045. // Use a "strong" exchange to ensure no false-negatives since the operation
  1046. // cannot be retried.
  1047. Reference existing = 0; // Must be mutable; receives actual value.
  1048. if (reference_->compare_exchange_strong(existing, ref,
  1049. std::memory_order_release,
  1050. std::memory_order_relaxed)) {
  1051. if (make_iterable_)
  1052. allocator_->MakeIterable(ref);
  1053. } else {
  1054. // Failure indicates that something else has raced ahead, performed the
  1055. // allocation, and stored its reference. Purge the allocation that was
  1056. // just done and use the other one instead.
  1057. DCHECK_EQ(type_, allocator_->GetType(existing));
  1058. DCHECK_LE(size_, allocator_->GetAllocSize(existing));
  1059. allocator_->ChangeType(ref, 0, type_, /*clear=*/false);
  1060. ref = existing;
  1061. }
  1062. }
  1063. char* mem = allocator_->GetAsArray<char>(ref, type_, size_);
  1064. if (!mem) {
  1065. // This should never happen but be tolerant if it does as corruption from
  1066. // the outside is something to guard against.
  1067. NOTREACHED();
  1068. return nullptr;
  1069. }
  1070. return mem + offset_;
  1071. }
  1072. } // namespace base