discardable_shared_memory.cc 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. // Copyright 2014 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/memory/discardable_shared_memory.h"
  5. #include <stdint.h>
  6. #include <algorithm>
  7. #include "base/allocator/partition_allocator/page_allocator.h"
  8. #include "base/atomicops.h"
  9. #include "base/bits.h"
  10. #include "base/feature_list.h"
  11. #include "base/logging.h"
  12. #include "base/memory/discardable_memory.h"
  13. #include "base/memory/discardable_memory_internal.h"
  14. #include "base/memory/page_size.h"
  15. #include "base/memory/shared_memory_tracker.h"
  16. #include "base/numerics/safe_math.h"
  17. #include "base/tracing_buildflags.h"
  18. #include "build/build_config.h"
  19. #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
  20. // For madvise() which is available on all POSIX compatible systems.
  21. #include <sys/mman.h>
  22. #endif
  23. #if BUILDFLAG(IS_ANDROID)
  24. #include "third_party/ashmem/ashmem.h"
  25. #endif
  26. #if BUILDFLAG(IS_WIN)
  27. #include <windows.h>
  28. #include "base/win/windows_version.h"
  29. #endif
  30. #if BUILDFLAG(IS_FUCHSIA)
  31. #include <lib/zx/vmar.h>
  32. #include <zircon/types.h>
  33. #include "base/fuchsia/fuchsia_logging.h"
  34. #endif
  35. #if BUILDFLAG(ENABLE_BASE_TRACING)
  36. #include "base/trace_event/memory_allocator_dump.h" // no-presubmit-check
  37. #include "base/trace_event/process_memory_dump.h" // no-presubmit-check
  38. #endif // BUILDFLAG(ENABLE_BASE_TRACING)
  39. namespace base {
  40. namespace {
  41. // Use a machine-sized pointer as atomic type. It will use the Atomic32 or
  42. // Atomic64 routines, depending on the architecture.
  43. typedef intptr_t AtomicType;
  44. typedef uintptr_t UAtomicType;
  45. // Template specialization for timestamp serialization/deserialization. This
  46. // is used to serialize timestamps using Unix time on systems where AtomicType
  47. // does not have enough precision to contain a timestamp in the standard
  48. // serialized format.
  49. template <int>
  50. Time TimeFromWireFormat(int64_t value);
  51. template <int>
  52. int64_t TimeToWireFormat(Time time);
  53. // Serialize to Unix time when using 4-byte wire format.
  54. // Note: 19 January 2038, this will cease to work.
  55. template <>
  56. [[maybe_unused]] Time TimeFromWireFormat<4>(int64_t value) {
  57. return value ? Time::UnixEpoch() + Seconds(value) : Time();
  58. }
  59. template <>
  60. [[maybe_unused]] int64_t TimeToWireFormat<4>(Time time) {
  61. return time > Time::UnixEpoch() ? (time - Time::UnixEpoch()).InSeconds() : 0;
  62. }
  63. // Standard serialization format when using 8-byte wire format.
  64. template <>
  65. [[maybe_unused]] Time TimeFromWireFormat<8>(int64_t value) {
  66. return Time::FromInternalValue(value);
  67. }
  68. template <>
  69. [[maybe_unused]] int64_t TimeToWireFormat<8>(Time time) {
  70. return time.ToInternalValue();
  71. }
  72. struct SharedState {
  73. enum LockState { UNLOCKED = 0, LOCKED = 1 };
  74. explicit SharedState(AtomicType ivalue) { value.i = ivalue; }
  75. SharedState(LockState lock_state, Time timestamp) {
  76. int64_t wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
  77. DCHECK_GE(wire_timestamp, 0);
  78. DCHECK_EQ(lock_state & ~1, 0);
  79. value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
  80. }
  81. LockState GetLockState() const { return static_cast<LockState>(value.u & 1); }
  82. Time GetTimestamp() const {
  83. return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1);
  84. }
  85. // Bit 1: Lock state. Bit is set when locked.
  86. // Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or
  87. // purged.
  88. union {
  89. AtomicType i;
  90. UAtomicType u;
  91. } value;
  92. };
  93. // Shared state is stored at offset 0 in shared memory segments.
  94. SharedState* SharedStateFromSharedMemory(
  95. const WritableSharedMemoryMapping& shared_memory) {
  96. DCHECK(shared_memory.IsValid());
  97. return static_cast<SharedState*>(shared_memory.memory());
  98. }
  99. // Round up |size| to a multiple of page size.
  100. size_t AlignToPageSize(size_t size) {
  101. return bits::AlignUp(size, base::GetPageSize());
  102. }
  103. #if BUILDFLAG(IS_ANDROID)
  104. bool UseAshmemUnpinningForDiscardableMemory() {
  105. if (!ashmem_device_is_supported())
  106. return false;
  107. // If we are participating in the discardable memory backing trial, only
  108. // enable ashmem unpinning when we are in the corresponding trial group.
  109. if (base::DiscardableMemoryBackingFieldTrialIsEnabled()) {
  110. return base::GetDiscardableMemoryBackingFieldTrialGroup() ==
  111. base::DiscardableMemoryTrialGroup::kAshmem;
  112. }
  113. return true;
  114. }
  115. #endif // BUILDFLAG(IS_ANDROID)
  116. } // namespace
  117. DiscardableSharedMemory::DiscardableSharedMemory()
  118. : mapped_size_(0), locked_page_count_(0) {
  119. }
  120. DiscardableSharedMemory::DiscardableSharedMemory(
  121. UnsafeSharedMemoryRegion shared_memory_region)
  122. : shared_memory_region_(std::move(shared_memory_region)),
  123. mapped_size_(0),
  124. locked_page_count_(0) {}
  125. DiscardableSharedMemory::~DiscardableSharedMemory() = default;
  126. bool DiscardableSharedMemory::CreateAndMap(size_t size) {
  127. CheckedNumeric<size_t> checked_size = size;
  128. checked_size += AlignToPageSize(sizeof(SharedState));
  129. if (!checked_size.IsValid())
  130. return false;
  131. shared_memory_region_ =
  132. UnsafeSharedMemoryRegion::Create(checked_size.ValueOrDie());
  133. if (!shared_memory_region_.IsValid())
  134. return false;
  135. shared_memory_mapping_ = shared_memory_region_.Map();
  136. if (!shared_memory_mapping_.IsValid())
  137. return false;
  138. mapped_size_ = shared_memory_mapping_.mapped_size() -
  139. AlignToPageSize(sizeof(SharedState));
  140. locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
  141. #if DCHECK_IS_ON()
  142. for (size_t page = 0; page < locked_page_count_; ++page)
  143. locked_pages_.insert(page);
  144. #endif
  145. DCHECK(last_known_usage_.is_null());
  146. SharedState new_state(SharedState::LOCKED, Time());
  147. subtle::Release_Store(
  148. &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
  149. new_state.value.i);
  150. return true;
  151. }
  152. bool DiscardableSharedMemory::Map(size_t size) {
  153. DCHECK(!shared_memory_mapping_.IsValid());
  154. if (shared_memory_mapping_.IsValid())
  155. return false;
  156. shared_memory_mapping_ = shared_memory_region_.MapAt(
  157. 0, AlignToPageSize(sizeof(SharedState)) + size);
  158. if (!shared_memory_mapping_.IsValid())
  159. return false;
  160. mapped_size_ = shared_memory_mapping_.mapped_size() -
  161. AlignToPageSize(sizeof(SharedState));
  162. locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
  163. #if DCHECK_IS_ON()
  164. for (size_t page = 0; page < locked_page_count_; ++page)
  165. locked_pages_.insert(page);
  166. #endif
  167. return true;
  168. }
  169. bool DiscardableSharedMemory::Unmap() {
  170. if (!shared_memory_mapping_.IsValid())
  171. return false;
  172. shared_memory_mapping_ = WritableSharedMemoryMapping();
  173. locked_page_count_ = 0;
  174. #if DCHECK_IS_ON()
  175. locked_pages_.clear();
  176. #endif
  177. mapped_size_ = 0;
  178. return true;
  179. }
  180. DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
  181. size_t offset, size_t length) {
  182. DCHECK_EQ(AlignToPageSize(offset), offset);
  183. DCHECK_EQ(AlignToPageSize(length), length);
  184. // Calls to this function must be synchronized properly.
  185. DFAKE_SCOPED_LOCK(thread_collision_warner_);
  186. DCHECK(shared_memory_mapping_.IsValid());
  187. // We need to successfully acquire the platform independent lock before
  188. // individual pages can be locked.
  189. if (!locked_page_count_) {
  190. // Return false when instance has been purged or not initialized properly
  191. // by checking if |last_known_usage_| is NULL.
  192. if (last_known_usage_.is_null())
  193. return FAILED;
  194. SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
  195. SharedState new_state(SharedState::LOCKED, Time());
  196. SharedState result(subtle::Acquire_CompareAndSwap(
  197. &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
  198. old_state.value.i, new_state.value.i));
  199. if (result.value.u != old_state.value.u) {
  200. // Update |last_known_usage_| in case the above CAS failed because of
  201. // an incorrect timestamp.
  202. last_known_usage_ = result.GetTimestamp();
  203. return FAILED;
  204. }
  205. }
  206. // Zero for length means "everything onward".
  207. if (!length)
  208. length = AlignToPageSize(mapped_size_) - offset;
  209. size_t start = offset / base::GetPageSize();
  210. size_t end = start + length / base::GetPageSize();
  211. DCHECK_LE(start, end);
  212. DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
  213. // Add pages to |locked_page_count_|.
  214. // Note: Locking a page that is already locked is an error.
  215. locked_page_count_ += end - start;
  216. #if DCHECK_IS_ON()
  217. // Detect incorrect usage by keeping track of exactly what pages are locked.
  218. for (auto page = start; page < end; ++page) {
  219. auto result = locked_pages_.insert(page);
  220. DCHECK(result.second);
  221. }
  222. DCHECK_EQ(locked_pages_.size(), locked_page_count_);
  223. #endif
  224. // Always behave as if memory was purged when trying to lock a 0 byte segment.
  225. if (!length)
  226. return PURGED;
  227. #if BUILDFLAG(IS_ANDROID)
  228. // Ensure that the platform won't discard the required pages.
  229. return LockPages(shared_memory_region_,
  230. AlignToPageSize(sizeof(SharedState)) + offset, length);
  231. #elif BUILDFLAG(IS_APPLE)
  232. // On macOS, there is no mechanism to lock pages. However, we do need to call
  233. // madvise(MADV_FREE_REUSE) in order to correctly update accounting for memory
  234. // footprint via task_info().
  235. //
  236. // Note that calling madvise(MADV_FREE_REUSE) on regions that haven't had
  237. // madvise(MADV_FREE_REUSABLE) called on them has no effect.
  238. //
  239. // Note that the corresponding call to MADV_FREE_REUSABLE is in Purge(), since
  240. // that's where the memory is actually released, rather than Unlock(), which
  241. // is a no-op on macOS.
  242. //
  243. // For more information, see
  244. // https://bugs.chromium.org/p/chromium/issues/detail?id=823915.
  245. madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
  246. AlignToPageSize(sizeof(SharedState)),
  247. AlignToPageSize(mapped_size_), MADV_FREE_REUSE);
  248. return DiscardableSharedMemory::SUCCESS;
  249. #else
  250. return DiscardableSharedMemory::SUCCESS;
  251. #endif
  252. }
  253. void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
  254. DCHECK_EQ(AlignToPageSize(offset), offset);
  255. DCHECK_EQ(AlignToPageSize(length), length);
  256. // Calls to this function must be synchronized properly.
  257. DFAKE_SCOPED_LOCK(thread_collision_warner_);
  258. // Passing zero for |length| means "everything onward". Note that |length| may
  259. // still be zero after this calculation, e.g. if |mapped_size_| is zero.
  260. if (!length)
  261. length = AlignToPageSize(mapped_size_) - offset;
  262. DCHECK(shared_memory_mapping_.IsValid());
  263. // Allow the pages to be discarded by the platform, if supported.
  264. UnlockPages(shared_memory_region_,
  265. AlignToPageSize(sizeof(SharedState)) + offset, length);
  266. size_t start = offset / base::GetPageSize();
  267. size_t end = start + length / base::GetPageSize();
  268. DCHECK_LE(start, end);
  269. DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
  270. // Remove pages from |locked_page_count_|.
  271. // Note: Unlocking a page that is not locked is an error.
  272. DCHECK_GE(locked_page_count_, end - start);
  273. locked_page_count_ -= end - start;
  274. #if DCHECK_IS_ON()
  275. // Detect incorrect usage by keeping track of exactly what pages are locked.
  276. for (auto page = start; page < end; ++page) {
  277. auto erased_count = locked_pages_.erase(page);
  278. DCHECK_EQ(1u, erased_count);
  279. }
  280. DCHECK_EQ(locked_pages_.size(), locked_page_count_);
  281. #endif
  282. // Early out and avoid releasing the platform independent lock if some pages
  283. // are still locked.
  284. if (locked_page_count_)
  285. return;
  286. Time current_time = Now();
  287. DCHECK(!current_time.is_null());
  288. SharedState old_state(SharedState::LOCKED, Time());
  289. SharedState new_state(SharedState::UNLOCKED, current_time);
  290. // Note: timestamp cannot be NULL as that is a unique value used when
  291. // locked or purged.
  292. DCHECK(!new_state.GetTimestamp().is_null());
  293. // Timestamp precision should at least be accurate to the second.
  294. DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
  295. (current_time - Time::UnixEpoch()).InSeconds());
  296. SharedState result(subtle::Release_CompareAndSwap(
  297. &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
  298. old_state.value.i, new_state.value.i));
  299. DCHECK_EQ(old_state.value.u, result.value.u);
  300. last_known_usage_ = current_time;
  301. }
  302. void* DiscardableSharedMemory::memory() const {
  303. return static_cast<uint8_t*>(shared_memory_mapping_.memory()) +
  304. AlignToPageSize(sizeof(SharedState));
  305. }
  306. bool DiscardableSharedMemory::Purge(Time current_time) {
  307. // Calls to this function must be synchronized properly.
  308. DFAKE_SCOPED_LOCK(thread_collision_warner_);
  309. DCHECK(shared_memory_mapping_.IsValid());
  310. SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
  311. SharedState new_state(SharedState::UNLOCKED, Time());
  312. SharedState result(subtle::Acquire_CompareAndSwap(
  313. &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
  314. old_state.value.i, new_state.value.i));
  315. // Update |last_known_usage_| to |current_time| if the memory is locked. This
  316. // allows the caller to determine if purging failed because last known usage
  317. // was incorrect or memory was locked. In the second case, the caller should
  318. // most likely wait for some amount of time before attempting to purge the
  319. // the memory again.
  320. if (result.value.u != old_state.value.u) {
  321. last_known_usage_ = result.GetLockState() == SharedState::LOCKED
  322. ? current_time
  323. : result.GetTimestamp();
  324. return false;
  325. }
  326. // The next section will release as much resource as can be done
  327. // from the purging process, until the client process notices the
  328. // purge and releases its own references.
  329. // Note: this memory will not be accessed again. The segment will be
  330. // freed asynchronously at a later time, so just do the best
  331. // immediately.
  332. #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
  333. // Linux and Android provide MADV_REMOVE which is preferred as it has a
  334. // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
  335. // provide MADV_FREE which has the same result but memory is purged lazily.
  336. #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
  337. #define MADV_PURGE_ARGUMENT MADV_REMOVE
  338. #elif BUILDFLAG(IS_APPLE)
  339. // MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
  340. // reusable bit, which allows both Activity Monitor and memory-infra to
  341. // correctly track the pages.
  342. #define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
  343. #else
  344. #define MADV_PURGE_ARGUMENT MADV_FREE
  345. #endif
  346. // Advise the kernel to remove resources associated with purged pages.
  347. // Subsequent accesses of memory pages will succeed, but might result in
  348. // zero-fill-on-demand pages.
  349. if (madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
  350. AlignToPageSize(sizeof(SharedState)),
  351. AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) {
  352. DPLOG(ERROR) << "madvise() failed";
  353. }
  354. #elif BUILDFLAG(IS_WIN)
  355. // On Windows, discarded pages are not returned to the system immediately and
  356. // not guaranteed to be zeroed when returned to the application.
  357. using DiscardVirtualMemoryFunction =
  358. DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
  359. static DiscardVirtualMemoryFunction discard_virtual_memory =
  360. reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
  361. GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
  362. char* address = static_cast<char*>(shared_memory_mapping_.memory()) +
  363. AlignToPageSize(sizeof(SharedState));
  364. size_t length = AlignToPageSize(mapped_size_);
  365. // Use DiscardVirtualMemory when available because it releases faster than
  366. // MEM_RESET.
  367. DWORD ret = ERROR_NOT_SUPPORTED;
  368. if (discard_virtual_memory) {
  369. ret = discard_virtual_memory(address, length);
  370. }
  371. // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
  372. // failure.
  373. if (ret != ERROR_SUCCESS) {
  374. void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
  375. CHECK(ptr);
  376. }
  377. #elif BUILDFLAG(IS_FUCHSIA)
  378. // De-commit via our VMAR, rather than relying on the VMO handle, since the
  379. // handle may have been closed after the memory was mapped into this process.
  380. uint64_t address_int = reinterpret_cast<uint64_t>(
  381. static_cast<char*>(shared_memory_mapping_.memory()) +
  382. AlignToPageSize(sizeof(SharedState)));
  383. zx_status_t status = zx::vmar::root_self()->op_range(
  384. ZX_VMO_OP_DECOMMIT, address_int, AlignToPageSize(mapped_size_), nullptr,
  385. 0);
  386. ZX_DCHECK(status == ZX_OK, status) << "zx_vmo_op_range(ZX_VMO_OP_DECOMMIT)";
  387. #endif // BUILDFLAG(IS_FUCHSIA)
  388. last_known_usage_ = Time();
  389. return true;
  390. }
  391. void DiscardableSharedMemory::ReleaseMemoryIfPossible(size_t offset,
  392. size_t length) {
  393. #if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
  394. // Linux and Android provide MADV_REMOVE which is preferred as it has a
  395. // behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
  396. // provide MADV_FREE which has the same result but memory is purged lazily.
  397. #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
  398. #define MADV_PURGE_ARGUMENT MADV_REMOVE
  399. #elif BUILDFLAG(IS_APPLE)
  400. // MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
  401. // reusable bit, which allows both Activity Monitor and memory-infra to
  402. // correctly track the pages.
  403. #define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
  404. #else // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
  405. #define MADV_PURGE_ARGUMENT MADV_FREE
  406. #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
  407. // BUILDFLAG(IS_ANDROID)
  408. // Advise the kernel to remove resources associated with purged pages.
  409. // Subsequent accesses of memory pages will succeed, but might result in
  410. // zero-fill-on-demand pages.
  411. if (madvise(static_cast<char*>(shared_memory_mapping_.memory()) + offset,
  412. length, MADV_PURGE_ARGUMENT)) {
  413. DPLOG(ERROR) << "madvise() failed";
  414. }
  415. #else // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
  416. partition_alloc::DiscardSystemPages(
  417. static_cast<char*>(shared_memory_mapping_.memory()) + offset, length);
  418. #endif // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
  419. }
  420. bool DiscardableSharedMemory::IsMemoryResident() const {
  421. DCHECK(shared_memory_mapping_.IsValid());
  422. SharedState result(subtle::NoBarrier_Load(
  423. &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
  424. return result.GetLockState() == SharedState::LOCKED ||
  425. !result.GetTimestamp().is_null();
  426. }
  427. bool DiscardableSharedMemory::IsMemoryLocked() const {
  428. DCHECK(shared_memory_mapping_.IsValid());
  429. SharedState result(subtle::NoBarrier_Load(
  430. &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
  431. return result.GetLockState() == SharedState::LOCKED;
  432. }
  433. void DiscardableSharedMemory::Close() {
  434. shared_memory_region_ = UnsafeSharedMemoryRegion();
  435. }
  436. void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
  437. trace_event::MemoryAllocatorDump* local_segment_dump,
  438. trace_event::ProcessMemoryDump* pmd,
  439. bool is_owned) const {
  440. // Memory dumps are only supported when tracing support is enabled,.
  441. #if BUILDFLAG(ENABLE_BASE_TRACING)
  442. auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
  443. shared_memory_mapping_, pmd);
  444. // TODO(ssid): Clean this by a new api to inherit size of parent dump once the
  445. // we send the full PMD and calculate sizes inside chrome, crbug.com/704203.
  446. uint64_t resident_size = shared_memory_dump->GetSizeInternal();
  447. local_segment_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
  448. trace_event::MemoryAllocatorDump::kUnitsBytes,
  449. resident_size);
  450. // By creating an edge with a higher |importance| (w.r.t non-owned dumps)
  451. // the tracing UI will account the effective size of the segment to the
  452. // client instead of manager.
  453. // TODO(ssid): Define better constants in MemoryAllocatorDump for importance
  454. // values, crbug.com/754793.
  455. const int kImportance = is_owned ? 2 : 0;
  456. auto shared_memory_guid = shared_memory_mapping_.guid();
  457. local_segment_dump->AddString("id", "hash", shared_memory_guid.ToString());
  458. // Owned discardable segments which are allocated by client process, could
  459. // have been cleared by the discardable manager. So, the segment need not
  460. // exist in memory and weak dumps are created to indicate the UI that the dump
  461. // should exist only if the manager also created the global dump edge.
  462. if (is_owned) {
  463. pmd->CreateWeakSharedMemoryOwnershipEdge(local_segment_dump->guid(),
  464. shared_memory_guid, kImportance);
  465. } else {
  466. pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
  467. shared_memory_guid, kImportance);
  468. }
  469. #endif // BUILDFLAG(ENABLE_BASE_TRACING)
  470. }
  471. // static
  472. DiscardableSharedMemory::LockResult DiscardableSharedMemory::LockPages(
  473. const UnsafeSharedMemoryRegion& region,
  474. size_t offset,
  475. size_t length) {
  476. #if BUILDFLAG(IS_ANDROID)
  477. if (region.IsValid()) {
  478. if (UseAshmemUnpinningForDiscardableMemory()) {
  479. int pin_result =
  480. ashmem_pin_region(region.GetPlatformHandle(), offset, length);
  481. if (pin_result == ASHMEM_WAS_PURGED)
  482. return PURGED;
  483. if (pin_result < 0)
  484. return FAILED;
  485. }
  486. }
  487. #endif
  488. return SUCCESS;
  489. }
  490. // static
  491. void DiscardableSharedMemory::UnlockPages(
  492. const UnsafeSharedMemoryRegion& region,
  493. size_t offset,
  494. size_t length) {
  495. #if BUILDFLAG(IS_ANDROID)
  496. if (region.IsValid()) {
  497. if (UseAshmemUnpinningForDiscardableMemory()) {
  498. int unpin_result =
  499. ashmem_unpin_region(region.GetPlatformHandle(), offset, length);
  500. DCHECK_EQ(0, unpin_result);
  501. }
  502. }
  503. #endif
  504. }
  505. Time DiscardableSharedMemory::Now() const {
  506. return Time::Now();
  507. }
  508. #if BUILDFLAG(IS_ANDROID)
  509. // static
  510. bool DiscardableSharedMemory::IsAshmemDeviceSupportedForTesting() {
  511. return UseAshmemUnpinningForDiscardableMemory();
  512. }
  513. #endif
  514. } // namespace base