persistent_memory_allocator.h 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928
  1. // Copyright (c) 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
  5. #define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
  6. #include <stdint.h>
  7. #include <atomic>
  8. #include <memory>
  9. #include <type_traits>
  10. #include "base/atomicops.h"
  11. #include "base/base_export.h"
  12. #include "base/check.h"
  13. #include "base/check_op.h"
  14. #include "base/files/file_path.h"
  15. #include "base/gtest_prod_util.h"
  16. #include "base/memory/raw_ptr.h"
  17. #include "base/memory/shared_memory_mapping.h"
  18. #include "base/strings/string_piece.h"
  19. #include "build/build_config.h"
  20. namespace base {
  21. class HistogramBase;
  22. class MemoryMappedFile;
  23. // Simple allocator for pieces of a memory block that may be persistent
  24. // to some storage or shared across multiple processes. This class resides
  25. // under base/metrics because it was written for that purpose. It is,
  26. // however, fully general-purpose and can be freely moved to base/memory
  27. // if other uses are found.
  28. //
  29. // This class provides for thread-secure (i.e. safe against other threads
  30. // or processes that may be compromised and thus have malicious intent)
  31. // allocation of memory within a designated block and also a mechanism by
  32. // which other threads can learn of these allocations.
  33. //
  34. // There is (currently) no way to release an allocated block of data because
  35. // doing so would risk invalidating pointers held by other processes and
  36. // greatly complicate the allocation algorithm.
  37. //
  38. // Construction of this object can accept new, clean (i.e. zeroed) memory
  39. // or previously initialized memory. In the first case, construction must
  40. // be allowed to complete before letting other allocators attach to the same
  41. // segment. In other words, don't share the segment until at least one
  42. // allocator has been attached to it.
  43. //
  44. // Note that memory not in active use is not accessed so it is possible to
  45. // use virtual memory, including memory-mapped files, as backing storage with
  46. // the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
  47. //
  48. // OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
  49. // character arrays and manipulating that memory manually, the better way is
  50. // generally to use the "object" methods to create and manage allocations. In
  51. // this way the sizing, type-checking, and construction are all automatic. For
  52. // this to work, however, every type of stored object must define two public
  53. // "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
  54. //
  55. // struct MyPersistentObjectType {
  56. // // SHA1(MyPersistentObjectType): Increment this if structure changes!
  57. // static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
  58. //
  59. // // Expected size for 32/64-bit check. Update this if structure changes!
  60. // static constexpr size_t kExpectedInstanceSize = 20;
  61. //
  62. // ...
  63. // };
  64. //
  65. // kPersistentTypeId: This value is an arbitrary identifier that allows the
  66. // identification of these objects in the allocator, including the ability
  67. // to find them via iteration. The number is arbitrary but using the first
  68. // four bytes of the SHA1 hash of the type name means that there shouldn't
  69. // be any conflicts with other types that may also be stored in the memory.
  70. // The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
  71. // be used to generate the hash if the type name seems common. Use a command
  72. // like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
  73. // If the structure layout changes, ALWAYS increment this number so that
  74. // newer versions of the code don't try to interpret persistent data written
  75. // by older versions with a different layout.
  76. //
  77. // kExpectedInstanceSize: This value is the hard-coded number that matches
  78. // what sizeof(T) would return. By providing it explicitly, the allocator can
  79. // verify that the structure is compatible between both 32-bit and 64-bit
  80. // versions of the code.
  81. //
  82. // Using New manages the memory and then calls the default constructor for the
  83. // object. Given that objects are persistent, no destructor is ever called
  84. // automatically though a caller can explicitly call Delete to destruct it and
  85. // change the type to something indicating it is no longer in use.
  86. //
  87. // Though persistent memory segments are transferrable between programs built
  88. // for different natural word widths, they CANNOT be exchanged between CPUs
  89. // of different endianess. Attempts to do so will simply see the existing data
  90. // as corrupt and refuse to access any of it.
  91. class BASE_EXPORT PersistentMemoryAllocator {
  92. public:
  93. typedef uint32_t Reference;
  94. // These states are used to indicate the overall condition of the memory
  95. // segment irrespective of what is stored within it. Because the data is
  96. // often persistent and thus needs to be readable by different versions of
  97. // a program, these values are fixed and can never change.
  98. enum MemoryState : uint8_t {
  99. // Persistent memory starts all zeros and so shows "uninitialized".
  100. MEMORY_UNINITIALIZED = 0,
  101. // The header has been written and the memory is ready for use.
  102. MEMORY_INITIALIZED = 1,
  103. // The data should be considered deleted. This would be set when the
  104. // allocator is being cleaned up. If file-backed, the file is likely
  105. // to be deleted but since deletion can fail for a variety of reasons,
  106. // having this extra status means a future reader can realize what
  107. // should have happened.
  108. MEMORY_DELETED = 2,
  109. // Outside code can create states starting with this number; these too
  110. // must also never change between code versions.
  111. MEMORY_USER_DEFINED = 100,
  112. };
  113. // Iterator for going through all iterable memory records in an allocator.
  114. // Like the allocator itself, iterators are lock-free and thread-secure.
  115. // That means that multiple threads can share an iterator and the same
  116. // reference will not be returned twice.
  117. //
  118. // The order of the items returned by an iterator matches the order in which
  119. // MakeIterable() was called on them. Once an allocation is made iterable,
  120. // it is always such so the only possible difference between successive
  121. // iterations is for more to be added to the end.
  122. //
  123. // Iteration, in general, is tolerant of corrupted memory. It will return
  124. // what it can and stop only when corruption forces it to. Bad corruption
  125. // could cause the same object to be returned many times but it will
  126. // eventually quit.
  127. class BASE_EXPORT Iterator {
  128. public:
  129. // Constructs an iterator on a given |allocator|, starting at the beginning.
  130. // The allocator must live beyond the lifetime of the iterator. This class
  131. // has read-only access to the allocator (hence "const") but the returned
  132. // references can be used on a read/write version, too.
  133. explicit Iterator(const PersistentMemoryAllocator* allocator);
  134. // As above but resuming from the |starting_after| reference. The first call
  135. // to GetNext() will return the next object found after that reference. The
  136. // reference must be to an "iterable" object; references to non-iterable
  137. // objects (those that never had MakeIterable() called for them) will cause
  138. // a run-time error.
  139. Iterator(const PersistentMemoryAllocator* allocator,
  140. Reference starting_after);
  141. Iterator(const Iterator&) = delete;
  142. Iterator& operator=(const Iterator&) = delete;
  143. ~Iterator();
  144. // Resets the iterator back to the beginning.
  145. void Reset();
  146. // Resets the iterator, resuming from the |starting_after| reference.
  147. void Reset(Reference starting_after);
  148. // Returns the previously retrieved reference, or kReferenceNull if none.
  149. // If constructor or reset with a starting_after location, this will return
  150. // that value.
  151. Reference GetLast();
  152. // Gets the next iterable, storing that type in |type_return|. The actual
  153. // return value is a reference to the allocation inside the allocator or
  154. // zero if there are no more. GetNext() may still be called again at a
  155. // later time to retrieve any new allocations that have been added.
  156. Reference GetNext(uint32_t* type_return);
  157. // Similar to above but gets the next iterable of a specific |type_match|.
  158. // This should not be mixed with calls to GetNext() because any allocations
  159. // skipped here due to a type mis-match will never be returned by later
  160. // calls to GetNext() meaning it's possible to completely miss entries.
  161. Reference GetNextOfType(uint32_t type_match);
  162. // As above but works using object type.
  163. template <typename T>
  164. Reference GetNextOfType() {
  165. return GetNextOfType(T::kPersistentTypeId);
  166. }
  167. // As above but works using objects and returns null if not found.
  168. template <typename T>
  169. const T* GetNextOfObject() {
  170. return GetAsObject<T>(GetNextOfType<T>());
  171. }
  172. // Converts references to objects. This is a convenience method so that
  173. // users of the iterator don't need to also have their own pointer to the
  174. // allocator over which the iterator runs in order to retrieve objects.
  175. // Because the iterator is not read/write, only "const" objects can be
  176. // fetched. Non-const objects can be fetched using the reference on a
  177. // non-const (external) pointer to the same allocator (or use const_cast
  178. // to remove the qualifier).
  179. template <typename T>
  180. const T* GetAsObject(Reference ref) const {
  181. return allocator_->GetAsObject<T>(ref);
  182. }
  183. // Similar to GetAsObject() but converts references to arrays of things.
  184. template <typename T>
  185. const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
  186. return allocator_->GetAsArray<T>(ref, type_id, count);
  187. }
  188. // Convert a generic pointer back into a reference. A null reference will
  189. // be returned if |memory| is not inside the persistent segment or does not
  190. // point to an object of the specified |type_id|.
  191. Reference GetAsReference(const void* memory, uint32_t type_id) const {
  192. return allocator_->GetAsReference(memory, type_id);
  193. }
  194. // As above but convert an object back into a reference.
  195. template <typename T>
  196. Reference GetAsReference(const T* obj) const {
  197. return allocator_->GetAsReference(obj);
  198. }
  199. private:
  200. // Weak-pointer to memory allocator being iterated over.
  201. raw_ptr<const PersistentMemoryAllocator> allocator_;
  202. // The last record that was returned.
  203. std::atomic<Reference> last_record_;
  204. // The number of records found; used for detecting loops.
  205. std::atomic<uint32_t> record_count_;
  206. };
  207. // Returned information about the internal state of the heap.
  208. struct MemoryInfo {
  209. size_t total;
  210. size_t free;
  211. };
  212. enum : Reference {
  213. // A common "null" reference value.
  214. kReferenceNull = 0,
  215. };
  216. enum : uint32_t {
  217. // A value that will match any type when doing lookups.
  218. kTypeIdAny = 0x00000000,
  219. // A value indicating that the type is in transition. Work is being done
  220. // on the contents to prepare it for a new type to come.
  221. kTypeIdTransitioning = 0xFFFFFFFF,
  222. };
  223. enum : size_t {
  224. kSizeAny = 1 // Constant indicating that any array size is acceptable.
  225. };
  226. // This is the standard file extension (suitable for being passed to the
  227. // AddExtension() method of base::FilePath) for dumps of persistent memory.
  228. static const base::FilePath::CharType kFileExtension[];
  229. // The allocator operates on any arbitrary block of memory. Creation and
  230. // persisting or sharing of that block with another process is the
  231. // responsibility of the caller. The allocator needs to know only the
  232. // block's |base| address, the total |size| of the block, and any internal
  233. // |page| size (zero if not paged) across which allocations should not span.
  234. // The |id| is an arbitrary value the caller can use to identify a
  235. // particular memory segment. It will only be loaded during the initial
  236. // creation of the segment and can be checked by the caller for consistency.
  237. // The |name|, if provided, is used to distinguish histograms for this
  238. // allocator. Only the primary owner of the segment should define this value;
  239. // other processes can learn it from the shared state. If the underlying
  240. // memory is |readonly| then no changes will be made to it. The resulting
  241. // object should be stored as a "const" pointer.
  242. //
  243. // PersistentMemoryAllocator does NOT take ownership of the memory block.
  244. // The caller must manage it and ensure it stays available throughout the
  245. // lifetime of this object.
  246. //
  247. // Memory segments for sharing must have had an allocator attached to them
  248. // before actually being shared. If the memory segment was just created, it
  249. // should be zeroed before being passed here. If it was an existing segment,
  250. // the values here will be compared to copies stored in the shared segment
  251. // as a guard against corruption.
  252. //
  253. // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
  254. // method below) before construction if the definition of the segment can
  255. // vary in any way at run-time. Invalid memory segments will cause a crash.
  256. PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
  257. uint64_t id, base::StringPiece name,
  258. bool readonly);
  259. PersistentMemoryAllocator(const PersistentMemoryAllocator&) = delete;
  260. PersistentMemoryAllocator& operator=(const PersistentMemoryAllocator&) =
  261. delete;
  262. virtual ~PersistentMemoryAllocator();
  263. // Check if memory segment is acceptable for creation of an Allocator. This
  264. // doesn't do any analysis of the data and so doesn't guarantee that the
  265. // contents are valid, just that the paramaters won't cause the program to
  266. // abort. The IsCorrupt() method will report detection of data problems
  267. // found during construction and general operation.
  268. static bool IsMemoryAcceptable(const void* data, size_t size,
  269. size_t page_size, bool readonly);
  270. // Get the internal identifier for this persistent memory segment.
  271. uint64_t Id() const;
  272. // Get the internal name of this allocator (possibly an empty string).
  273. const char* Name() const;
  274. // Is this segment open only for read?
  275. bool IsReadonly() const { return readonly_; }
  276. // Manage the saved state of the memory.
  277. void SetMemoryState(uint8_t memory_state);
  278. uint8_t GetMemoryState() const;
  279. // Create internal histograms for tracking memory use and allocation sizes
  280. // for allocator of |name| (which can simply be the result of Name()). This
  281. // is done seperately from construction for situations such as when the
  282. // histograms will be backed by memory provided by this very allocator.
  283. //
  284. // IMPORTANT: tools/metrics/histograms/metadata/uma/histograms.xml must
  285. // be updated with the following histograms for each |name| param:
  286. // UMA.PersistentAllocator.name.Errors
  287. // UMA.PersistentAllocator.name.UsedPct
  288. void CreateTrackingHistograms(base::StringPiece name);
  289. // Flushes the persistent memory to any backing store. This typically does
  290. // nothing but is used by the FilePersistentMemoryAllocator to inform the
  291. // OS that all the data should be sent to the disk immediately. This is
  292. // useful in the rare case where something has just been stored that needs
  293. // to survive a hard shutdown of the machine like from a power failure.
  294. // The |sync| parameter indicates if this call should block until the flush
  295. // is complete but is only advisory and may or may not have an effect
  296. // depending on the capabilities of the OS. Synchronous flushes are allowed
  297. // only from theads that are allowed to do I/O but since |sync| is only
  298. // advisory, all flushes should be done on IO-capable threads.
  299. void Flush(bool sync);
  300. // Direct access to underlying memory segment. If the segment is shared
  301. // across threads or processes, reading data through these values does
  302. // not guarantee consistency. Use with care. Do not write.
  303. const void* data() const { return const_cast<const char*>(mem_base_); }
  304. size_t length() const { return mem_size_; }
  305. size_t size() const { return mem_size_; }
  306. size_t used() const;
  307. // Get an object referenced by a |ref|. For safety reasons, the |type_id|
  308. // code and size-of(|T|) are compared to ensure the reference is valid
  309. // and cannot return an object outside of the memory segment. A |type_id| of
  310. // kTypeIdAny (zero) will match any though the size is still checked. NULL is
  311. // returned if any problem is detected, such as corrupted storage or incorrect
  312. // parameters. Callers MUST check that the returned value is not-null EVERY
  313. // TIME before accessing it or risk crashing! Once dereferenced, the pointer
  314. // is safe to reuse forever.
  315. //
  316. // It is essential that the object be of a fixed size. All fields must be of
  317. // a defined type that does not change based on the compiler or the CPU
  318. // natural word size. Acceptable are char, float, double, and (u)intXX_t.
  319. // Unacceptable are int, bool, and wchar_t which are implementation defined
  320. // with regards to their size.
  321. //
  322. // Alignment must also be consistent. A uint64_t after a uint32_t will pad
  323. // differently between 32 and 64 bit architectures. Either put the bigger
  324. // elements first, group smaller elements into blocks the size of larger
  325. // elements, or manually insert padding fields as appropriate for the
  326. // largest architecture, including at the end.
  327. //
  328. // To protected against mistakes, all objects must have the attribute
  329. // |kExpectedInstanceSize| (static constexpr size_t) that is a hard-coded
  330. // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
  331. // instance size is not fixed, at least one build will fail.
  332. //
  333. // If the size of a structure changes, the type-ID used to recognize it
  334. // should also change so later versions of the code don't try to read
  335. // incompatible structures from earlier versions.
  336. //
  337. // NOTE: Though this method will guarantee that an object of the specified
  338. // type can be accessed without going outside the bounds of the memory
  339. // segment, it makes no guarantees of the validity of the data within the
  340. // object itself. If it is expected that the contents of the segment could
  341. // be compromised with malicious intent, the object must be hardened as well.
  342. //
  343. // Though the persistent data may be "volatile" if it is shared with
  344. // other processes, such is not necessarily the case. The internal
  345. // "volatile" designation is discarded so as to not propagate the viral
  346. // nature of that keyword to the caller. It can add it back, if necessary,
  347. // based on knowledge of how the allocator is being used.
  348. template <typename T>
  349. T* GetAsObject(Reference ref) {
  350. static_assert(std::is_standard_layout<T>::value, "only standard objects");
  351. static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
  352. static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
  353. return const_cast<T*>(reinterpret_cast<volatile T*>(
  354. GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
  355. }
  356. template <typename T>
  357. const T* GetAsObject(Reference ref) const {
  358. static_assert(std::is_standard_layout<T>::value, "only standard objects");
  359. static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
  360. static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
  361. return const_cast<const T*>(reinterpret_cast<const volatile T*>(
  362. GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
  363. }
  364. // Like GetAsObject but get an array of simple, fixed-size types.
  365. //
  366. // Use a |count| of the required number of array elements, or kSizeAny.
  367. // GetAllocSize() can be used to calculate the upper bound but isn't reliable
  368. // because padding can make space for extra elements that were not written.
  369. //
  370. // Remember that an array of char is a string but may not be NUL terminated.
  371. //
  372. // There are no compile-time or run-time checks to ensure 32/64-bit size
  373. // compatibilty when using these accessors. Only use fixed-size types such
  374. // as char, float, double, or (u)intXX_t.
  375. template <typename T>
  376. T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
  377. static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
  378. return const_cast<T*>(reinterpret_cast<volatile T*>(
  379. GetBlockData(ref, type_id, count * sizeof(T))));
  380. }
  381. template <typename T>
  382. const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
  383. static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
  384. return const_cast<const char*>(reinterpret_cast<const volatile T*>(
  385. GetBlockData(ref, type_id, count * sizeof(T))));
  386. }
  387. // Get the corresponding reference for an object held in persistent memory.
  388. // If the |memory| is not valid or the type does not match, a kReferenceNull
  389. // result will be returned.
  390. Reference GetAsReference(const void* memory, uint32_t type_id) const;
  391. // Get the number of bytes allocated to a block. This is useful when storing
  392. // arrays in order to validate the ending boundary. The returned value will
  393. // include any padding added to achieve the required alignment and so could
  394. // be larger than given in the original Allocate() request.
  395. size_t GetAllocSize(Reference ref) const;
  396. // Access the internal "type" of an object. This generally isn't necessary
  397. // but can be used to "clear" the type and so effectively mark it as deleted
  398. // even though the memory stays valid and allocated. Changing the type is
  399. // an atomic compare/exchange and so requires knowing the existing value.
  400. // It will return false if the existing type is not what is expected.
  401. //
  402. // Changing the type doesn't mean the data is compatible with the new type.
  403. // Passing true for |clear| will zero the memory after the type has been
  404. // changed away from |from_type_id| but before it becomes |to_type_id| meaning
  405. // that it is done in a manner that is thread-safe. Memory is guaranteed to
  406. // be zeroed atomically by machine-word in a monotonically increasing order.
  407. //
  408. // It will likely be necessary to reconstruct the type before it can be used.
  409. // Changing the type WILL NOT invalidate existing pointers to the data, either
  410. // in this process or others, so changing the data structure could have
  411. // unpredicatable results. USE WITH CARE!
  412. uint32_t GetType(Reference ref) const;
  413. bool ChangeType(Reference ref,
  414. uint32_t to_type_id,
  415. uint32_t from_type_id,
  416. bool clear);
  417. // Allocated objects can be added to an internal list that can then be
  418. // iterated over by other processes. If an allocated object can be found
  419. // another way, such as by having its reference within a different object
  420. // that will be made iterable, then this call is not necessary. This always
  421. // succeeds unless corruption is detected; check IsCorrupted() to find out.
  422. // Once an object is made iterable, its position in iteration can never
  423. // change; new iterable objects will always be added after it in the series.
  424. // Changing the type does not alter its "iterable" status.
  425. void MakeIterable(Reference ref);
  426. // Get the information about the amount of free space in the allocator. The
  427. // amount of free space should be treated as approximate due to extras from
  428. // alignment and metadata. Concurrent allocations from other threads will
  429. // also make the true amount less than what is reported.
  430. void GetMemoryInfo(MemoryInfo* meminfo) const;
  431. // If there is some indication that the memory has become corrupted,
  432. // calling this will attempt to prevent further damage by indicating to
  433. // all processes that something is not as expected.
  434. void SetCorrupt() const;
  435. // This can be called to determine if corruption has been detected in the
  436. // segment, possibly my a malicious actor. Once detected, future allocations
  437. // will fail and iteration may not locate all objects.
  438. bool IsCorrupt() const;
  439. // Flag set if an allocation has failed because the memory segment was full.
  440. bool IsFull() const;
  441. // Update those "tracking" histograms which do not get updates during regular
  442. // operation, such as how much memory is currently used. This should be
  443. // called before such information is to be displayed or uploaded.
  444. void UpdateTrackingHistograms();
  445. // While the above works much like malloc & free, these next methods provide
  446. // an "object" interface similar to new and delete.
  447. // Reserve space in the memory segment of the desired |size| and |type_id|.
  448. // A return value of zero indicates the allocation failed, otherwise the
  449. // returned reference can be used by any process to get a real pointer via
  450. // the GetAsObject() or GetAsArray calls. The actual allocated size may be
  451. // larger and will always be a multiple of 8 bytes (64 bits).
  452. Reference Allocate(size_t size, uint32_t type_id);
  453. // Allocate and construct an object in persistent memory. The type must have
  454. // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
  455. // static constexpr fields that are used to ensure compatibility between
  456. // software versions. An optional size parameter can be specified to force
  457. // the allocation to be bigger than the size of the object; this is useful
  458. // when the last field is actually variable length.
  459. template <typename T>
  460. T* New(size_t size) {
  461. if (size < sizeof(T))
  462. size = sizeof(T);
  463. Reference ref = Allocate(size, T::kPersistentTypeId);
  464. void* mem =
  465. const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
  466. if (!mem)
  467. return nullptr;
  468. DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
  469. return new (mem) T();
  470. }
  471. template <typename T>
  472. T* New() {
  473. return New<T>(sizeof(T));
  474. }
  475. // Similar to New, above, but construct the object out of an existing memory
  476. // block and of an expected type. If |clear| is true, memory will be zeroed
  477. // before construction. Though this is not standard object behavior, it
  478. // is present to match with new allocations that always come from zeroed
  479. // memory. Anything previously present simply ceases to exist; no destructor
  480. // is called for it so explicitly Delete() the old object first if need be.
  481. // Calling this will not invalidate existing pointers to the object, either
  482. // in this process or others, so changing the object could have unpredictable
  483. // results. USE WITH CARE!
  484. template <typename T>
  485. T* New(Reference ref, uint32_t from_type_id, bool clear) {
  486. DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
  487. // Make sure the memory is appropriate. This won't be used until after
  488. // the type is changed but checking first avoids the possibility of having
  489. // to change the type back.
  490. void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
  491. if (!mem)
  492. return nullptr;
  493. // Ensure the allocator's internal alignment is sufficient for this object.
  494. // This protects against coding errors in the allocator.
  495. DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
  496. // Change the type, clearing the memory if so desired. The new type is
  497. // "transitioning" so that there is no race condition with the construction
  498. // of the object should another thread be simultaneously iterating over
  499. // data. This will "acquire" the memory so no changes get reordered before
  500. // it.
  501. if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear))
  502. return nullptr;
  503. // Construct an object of the desired type on this memory, just as if
  504. // New() had been called to create it.
  505. T* obj = new (mem) T();
  506. // Finally change the type to the desired one. This will "release" all of
  507. // the changes above and so provide a consistent view to other threads.
  508. bool success =
  509. ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false);
  510. DCHECK(success);
  511. return obj;
  512. }
  513. // Deletes an object by destructing it and then changing the type to a
  514. // different value (default 0).
  515. template <typename T>
  516. void Delete(T* obj, uint32_t new_type) {
  517. // Get the reference for the object.
  518. Reference ref = GetAsReference<T>(obj);
  519. // First change the type to "transitioning" so there is no race condition
  520. // where another thread could find the object through iteration while it
  521. // is been destructed. This will "acquire" the memory so no changes get
  522. // reordered before it. It will fail if |ref| is invalid.
  523. if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false))
  524. return;
  525. // Destruct the object.
  526. obj->~T();
  527. // Finally change the type to the desired value. This will "release" all
  528. // the changes above.
  529. bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false);
  530. DCHECK(success);
  531. }
  532. template <typename T>
  533. void Delete(T* obj) {
  534. Delete<T>(obj, 0);
  535. }
  536. // As above but works with objects allocated from persistent memory.
  537. template <typename T>
  538. Reference GetAsReference(const T* obj) const {
  539. return GetAsReference(obj, T::kPersistentTypeId);
  540. }
  541. // As above but works with an object allocated from persistent memory.
  542. template <typename T>
  543. void MakeIterable(const T* obj) {
  544. MakeIterable(GetAsReference<T>(obj));
  545. }
  546. protected:
  547. enum MemoryType {
  548. MEM_EXTERNAL,
  549. MEM_MALLOC,
  550. MEM_VIRTUAL,
  551. MEM_SHARED,
  552. MEM_FILE,
  553. };
  554. struct Memory {
  555. Memory(void* b, MemoryType t) : base(b), type(t) {}
  556. raw_ptr<void> base;
  557. MemoryType type;
  558. };
  559. // Constructs the allocator. Everything is the same as the public allocator
  560. // except |memory| which is a structure with additional information besides
  561. // the base address.
  562. PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size,
  563. uint64_t id, base::StringPiece name,
  564. bool readonly);
  565. // Implementation of Flush that accepts how much to flush.
  566. virtual void FlushPartial(size_t length, bool sync);
  567. volatile char* const mem_base_; // Memory base. (char so sizeof guaranteed 1)
  568. const MemoryType mem_type_; // Type of memory allocation.
  569. const uint32_t mem_size_; // Size of entire memory segment.
  570. const uint32_t mem_page_; // Page size allocations shouldn't cross.
  571. const size_t vm_page_size_; // The page size used by the OS.
  572. private:
  573. struct SharedMetadata;
  574. struct BlockHeader;
  575. // All allocations and data-structures must be aligned to this byte boundary.
  576. // Alignment as large as the physical bus between CPU and RAM is _required_
  577. // for some architectures, is simply more efficient on other CPUs, and
  578. // generally a Good Idea(tm) for all platforms as it reduces/eliminates the
  579. // chance that a type will span cache lines. Alignment mustn't be less
  580. // than 8 to ensure proper alignment for all types. The rest is a balance
  581. // between reducing spans across multiple cache lines and wasted space spent
  582. // padding out allocations. An alignment of 16 would ensure that the block
  583. // header structure always sits in a single cache line. An average of about
  584. // 1/2 this value will be wasted with every allocation.
  585. static constexpr size_t kAllocAlignment = 8;
  586. static const Reference kReferenceQueue;
  587. // The shared metadata is always located at the top of the memory segment.
  588. // These convenience functions eliminate constant casting of the base
  589. // pointer within the code.
  590. const SharedMetadata* shared_meta() const {
  591. return reinterpret_cast<const SharedMetadata*>(
  592. const_cast<const char*>(mem_base_));
  593. }
  594. SharedMetadata* shared_meta() {
  595. return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
  596. }
  597. // Actual method for doing the allocation.
  598. Reference AllocateImpl(size_t size, uint32_t type_id);
  599. // Get the block header associated with a specific reference.
  600. const volatile BlockHeader* GetBlock(Reference ref,
  601. uint32_t type_id,
  602. size_t size,
  603. bool queue_ok,
  604. bool free_ok) const;
  605. volatile BlockHeader* GetBlock(Reference ref,
  606. uint32_t type_id,
  607. size_t size,
  608. bool queue_ok,
  609. bool free_ok) {
  610. return const_cast<volatile BlockHeader*>(
  611. const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
  612. ref, type_id, size, queue_ok, free_ok));
  613. }
  614. // Get the actual data within a block associated with a specific reference.
  615. const volatile void* GetBlockData(Reference ref,
  616. uint32_t type_id,
  617. size_t size) const;
  618. volatile void* GetBlockData(Reference ref, uint32_t type_id, size_t size) {
  619. return const_cast<volatile void*>(
  620. const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
  621. ref, type_id, size));
  622. }
  623. // Record an error in the internal histogram.
  624. void RecordError(int error) const;
  625. const bool readonly_; // Indicates access to read-only memory.
  626. mutable std::atomic<bool> corrupt_; // Local version of "corrupted" flag.
  627. raw_ptr<HistogramBase> allocs_histogram_; // Histogram recording allocs.
  628. raw_ptr<HistogramBase> used_histogram_; // Histogram recording used space.
  629. raw_ptr<HistogramBase> errors_histogram_; // Histogram recording errors.
  630. friend class PersistentMemoryAllocatorTest;
  631. FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
  632. };
  633. // This allocator uses a local memory block it allocates from the general
  634. // heap. It is generally used when some kind of "death rattle" handler will
  635. // save the contents to persistent storage during process shutdown. It is
  636. // also useful for testing.
  637. class BASE_EXPORT LocalPersistentMemoryAllocator
  638. : public PersistentMemoryAllocator {
  639. public:
  640. LocalPersistentMemoryAllocator(size_t size, uint64_t id,
  641. base::StringPiece name);
  642. LocalPersistentMemoryAllocator(const LocalPersistentMemoryAllocator&) =
  643. delete;
  644. LocalPersistentMemoryAllocator& operator=(
  645. const LocalPersistentMemoryAllocator&) = delete;
  646. ~LocalPersistentMemoryAllocator() override;
  647. private:
  648. // Allocates a block of local memory of the specified |size|, ensuring that
  649. // the memory will not be physically allocated until accessed and will read
  650. // as zero when that happens.
  651. static Memory AllocateLocalMemory(size_t size);
  652. // Deallocates a block of local |memory| of the specified |size|.
  653. static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
  654. };
  655. // This allocator takes a writable shared memory mapping object and performs
  656. // allocation from it. The allocator takes ownership of the mapping object.
  657. class BASE_EXPORT WritableSharedPersistentMemoryAllocator
  658. : public PersistentMemoryAllocator {
  659. public:
  660. WritableSharedPersistentMemoryAllocator(
  661. base::WritableSharedMemoryMapping memory,
  662. uint64_t id,
  663. base::StringPiece name);
  664. WritableSharedPersistentMemoryAllocator(
  665. const WritableSharedPersistentMemoryAllocator&) = delete;
  666. WritableSharedPersistentMemoryAllocator& operator=(
  667. const WritableSharedPersistentMemoryAllocator&) = delete;
  668. ~WritableSharedPersistentMemoryAllocator() override;
  669. // Ensure that the memory isn't so invalid that it would crash when passing it
  670. // to the allocator. This doesn't guarantee the data is valid, just that it
  671. // won't cause the program to abort. The existing IsCorrupt() call will handle
  672. // the rest.
  673. static bool IsSharedMemoryAcceptable(
  674. const base::WritableSharedMemoryMapping& memory);
  675. private:
  676. base::WritableSharedMemoryMapping shared_memory_;
  677. };
  678. // This allocator takes a read-only shared memory mapping object and performs
  679. // allocation from it. The allocator takes ownership of the mapping object.
  680. class BASE_EXPORT ReadOnlySharedPersistentMemoryAllocator
  681. : public PersistentMemoryAllocator {
  682. public:
  683. ReadOnlySharedPersistentMemoryAllocator(
  684. base::ReadOnlySharedMemoryMapping memory,
  685. uint64_t id,
  686. base::StringPiece name);
  687. ReadOnlySharedPersistentMemoryAllocator(
  688. const ReadOnlySharedPersistentMemoryAllocator&) = delete;
  689. ReadOnlySharedPersistentMemoryAllocator& operator=(
  690. const ReadOnlySharedPersistentMemoryAllocator&) = delete;
  691. ~ReadOnlySharedPersistentMemoryAllocator() override;
  692. // Ensure that the memory isn't so invalid that it would crash when passing it
  693. // to the allocator. This doesn't guarantee the data is valid, just that it
  694. // won't cause the program to abort. The existing IsCorrupt() call will handle
  695. // the rest.
  696. static bool IsSharedMemoryAcceptable(
  697. const base::ReadOnlySharedMemoryMapping& memory);
  698. private:
  699. base::ReadOnlySharedMemoryMapping shared_memory_;
  700. };
  701. // NACL doesn't support any kind of file access in build.
  702. #if !BUILDFLAG(IS_NACL)
  703. // This allocator takes a memory-mapped file object and performs allocation
  704. // from it. The allocator takes ownership of the file object.
  705. class BASE_EXPORT FilePersistentMemoryAllocator
  706. : public PersistentMemoryAllocator {
  707. public:
  708. // A |max_size| of zero will use the length of the file as the maximum
  709. // size. The |file| object must have been already created with sufficient
  710. // permissions (read, read/write, or read/write/extend).
  711. FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
  712. size_t max_size,
  713. uint64_t id,
  714. base::StringPiece name,
  715. bool read_only);
  716. FilePersistentMemoryAllocator(const FilePersistentMemoryAllocator&) = delete;
  717. FilePersistentMemoryAllocator& operator=(
  718. const FilePersistentMemoryAllocator&) = delete;
  719. ~FilePersistentMemoryAllocator() override;
  720. // Ensure that the file isn't so invalid that it would crash when passing it
  721. // to the allocator. This doesn't guarantee the file is valid, just that it
  722. // won't cause the program to abort. The existing IsCorrupt() call will handle
  723. // the rest.
  724. static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
  725. // Load all or a portion of the file into memory for fast access. This can
  726. // be used to force the disk access to be done on a background thread and
  727. // then have the data available to be read on the main thread with a greatly
  728. // reduced risk of blocking due to I/O. The risk isn't eliminated completely
  729. // because the system could always release the memory when under pressure
  730. // but this can happen to any block of memory (i.e. swapped out).
  731. void Cache();
  732. protected:
  733. // PersistentMemoryAllocator:
  734. void FlushPartial(size_t length, bool sync) override;
  735. private:
  736. std::unique_ptr<MemoryMappedFile> mapped_file_;
  737. };
  738. #endif // !BUILDFLAG(IS_NACL)
  739. // An allocation that is defined but not executed until required at a later
  740. // time. This allows for potential users of an allocation to be decoupled
  741. // from the logic that defines it. In addition, there can be multiple users
  742. // of the same allocation or any region thereof that are guaranteed to always
  743. // use the same space. It's okay to copy/move these objects.
  744. //
  745. // This is a top-level class instead of an inner class of the PMA so that it
  746. // can be forward-declared in other header files without the need to include
  747. // the full contents of this file.
  748. class BASE_EXPORT DelayedPersistentAllocation {
  749. public:
  750. using Reference = PersistentMemoryAllocator::Reference;
  751. // Creates a delayed allocation using the specified |allocator|. When
  752. // needed, the memory will be allocated using the specified |type| and
  753. // |size|. If |offset| is given, the returned pointer will be at that
  754. // offset into the segment; this allows combining allocations into a
  755. // single persistent segment to reduce overhead and means an "all or
  756. // nothing" request. Note that |size| is always the total memory size
  757. // and |offset| is just indicating the start of a block within it. If
  758. // |make_iterable| was true, the allocation will made iterable when it
  759. // is created; already existing allocations are not changed.
  760. //
  761. // Once allocated, a reference to the segment will be stored at |ref|.
  762. // This shared location must be initialized to zero (0); it is checked
  763. // with every Get() request to see if the allocation has already been
  764. // done. If reading |ref| outside of this object, be sure to do an
  765. // "acquire" load. Don't write to it -- leave that to this object.
  766. DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
  767. std::atomic<Reference>* ref,
  768. uint32_t type,
  769. size_t size,
  770. bool make_iterable);
  771. DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
  772. std::atomic<Reference>* ref,
  773. uint32_t type,
  774. size_t size,
  775. size_t offset,
  776. bool make_iterable);
  777. ~DelayedPersistentAllocation();
  778. // Gets a pointer to the defined allocation. This will realize the request
  779. // and update the reference provided during construction. The memory will
  780. // be zeroed the first time it is returned, after that it is shared with
  781. // all other Get() requests and so shows any changes made to it elsewhere.
  782. //
  783. // If the allocation fails for any reason, null will be returned. This works
  784. // even on "const" objects because the allocation is already defined, just
  785. // delayed.
  786. void* Get() const;
  787. // Gets the internal reference value. If this returns a non-zero value then
  788. // a subsequent call to Get() will do nothing but convert that reference into
  789. // a memory location -- useful for accessing an existing allocation without
  790. // creating one unnecessarily.
  791. Reference reference() const {
  792. return reference_->load(std::memory_order_relaxed);
  793. }
  794. private:
  795. // The underlying object that does the actual allocation of memory. Its
  796. // lifetime must exceed that of all DelayedPersistentAllocation objects
  797. // that use it.
  798. PersistentMemoryAllocator* const allocator_;
  799. // The desired type and size of the allocated segment plus the offset
  800. // within it for the defined request.
  801. const uint32_t type_;
  802. const uint32_t size_;
  803. const uint32_t offset_;
  804. // Flag indicating if allocation should be made iterable when done.
  805. const bool make_iterable_;
  806. // The location at which a reference to the allocated segment is to be
  807. // stored once the allocation is complete. If multiple delayed allocations
  808. // share the same pointer then an allocation on one will amount to an
  809. // allocation for all.
  810. volatile std::atomic<Reference>* const reference_;
  811. // No DISALLOW_COPY_AND_ASSIGN as it's okay to copy/move these objects.
  812. };
  813. } // namespace base
  814. #endif // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_