page_allocator_unittest.cc 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. // Copyright 2018 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/allocator/partition_allocator/page_allocator.h"
  5. #include <stdlib.h>
  6. #include <string.h>
  7. #include <algorithm>
  8. #include <cstdint>
  9. #include <string>
  10. #include <vector>
  11. #include "base/allocator/partition_allocator/address_space_randomization.h"
  12. #include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
  13. #include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
  14. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  15. #include "base/allocator/partition_allocator/partition_alloc_notreached.h"
  16. #include "base/allocator/partition_allocator/tagging.h"
  17. #include "build/build_config.h"
  18. #if BUILDFLAG(IS_ANDROID)
  19. #include "base/debug/proc_maps_linux.h"
  20. #endif // BUILDFLAG(IS_ANDROID)
  21. #include "testing/gtest/include/gtest/gtest.h"
  22. #if BUILDFLAG(IS_POSIX)
  23. #include <setjmp.h>
  24. #include <signal.h>
  25. #include <sys/mman.h>
  26. #include <sys/time.h>
  27. #endif // BUILDFLAG(IS_POSIX)
  28. #include "base/allocator/partition_allocator/arm_bti_test_functions.h"
  29. #if defined(PA_HAS_MEMORY_TAGGING)
  30. #include <arm_acle.h>
  31. #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
  32. #define MTE_KILLED_BY_SIGNAL_AVAILABLE
  33. #endif
  34. #endif
  35. #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
  36. namespace partition_alloc::internal {
  37. #if BUILDFLAG(IS_ANDROID)
  38. namespace base::debug {
  39. using ::base::debug::MappedMemoryRegion;
  40. using ::base::debug::ParseProcMaps;
  41. using ::base::debug::ReadProcMaps;
  42. } // namespace base::debug
  43. #endif
  44. namespace {
  45. // Any number of bytes that can be allocated with no trouble.
  46. size_t EasyAllocSize() {
  47. return (1024 * 1024) & ~(PageAllocationGranularity() - 1);
  48. }
  49. // A huge amount of memory, greater than or equal to the ASLR space.
  50. size_t HugeMemoryAmount() {
  51. return std::max(::partition_alloc::internal::ASLRMask(),
  52. std::size_t{2} * ::partition_alloc::internal::ASLRMask());
  53. }
  54. } // namespace
  55. TEST(PartitionAllocPageAllocatorTest, Rounding) {
  56. EXPECT_EQ(0u, RoundUpToSystemPage(0u));
  57. EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(1));
  58. EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(SystemPageSize() - 1));
  59. EXPECT_EQ(SystemPageSize(), RoundUpToSystemPage(SystemPageSize()));
  60. EXPECT_EQ(2 * SystemPageSize(), RoundUpToSystemPage(SystemPageSize() + 1));
  61. EXPECT_EQ(0u, RoundDownToSystemPage(0u));
  62. EXPECT_EQ(0u, RoundDownToSystemPage(SystemPageSize() - 1));
  63. EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(SystemPageSize()));
  64. EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(SystemPageSize() + 1));
  65. EXPECT_EQ(SystemPageSize(), RoundDownToSystemPage(2 * SystemPageSize() - 1));
  66. EXPECT_EQ(0u, RoundUpToPageAllocationGranularity(0u));
  67. EXPECT_EQ(PageAllocationGranularity(), RoundUpToPageAllocationGranularity(1));
  68. EXPECT_EQ(PageAllocationGranularity(), RoundUpToPageAllocationGranularity(
  69. PageAllocationGranularity() - 1));
  70. EXPECT_EQ(PageAllocationGranularity(),
  71. RoundUpToPageAllocationGranularity(PageAllocationGranularity()));
  72. EXPECT_EQ(
  73. 2 * PageAllocationGranularity(),
  74. RoundUpToPageAllocationGranularity(PageAllocationGranularity() + 1));
  75. EXPECT_EQ(0u, RoundDownToPageAllocationGranularity(0u));
  76. EXPECT_EQ(0u, RoundDownToPageAllocationGranularity(
  77. PageAllocationGranularity() - 1));
  78. EXPECT_EQ(PageAllocationGranularity(),
  79. RoundDownToPageAllocationGranularity(PageAllocationGranularity()));
  80. EXPECT_EQ(PageAllocationGranularity(), RoundDownToPageAllocationGranularity(
  81. PageAllocationGranularity() + 1));
  82. EXPECT_EQ(PageAllocationGranularity(),
  83. RoundDownToPageAllocationGranularity(
  84. 2 * PageAllocationGranularity() - 1));
  85. }
  86. TEST(PartitionAllocPageAllocatorTest, NextAlignedWithOffset) {
  87. EXPECT_EQ(1024u, NextAlignedWithOffset(1024, 1, 0));
  88. EXPECT_EQ(2024u, NextAlignedWithOffset(1024, 1024, 1000));
  89. EXPECT_EQ(2024u, NextAlignedWithOffset(2024, 1024, 1000));
  90. EXPECT_EQ(3048u, NextAlignedWithOffset(2025, 1024, 1000));
  91. EXPECT_EQ(2048u, NextAlignedWithOffset(1024, 2048, 0));
  92. EXPECT_EQ(2148u, NextAlignedWithOffset(1024, 2048, 100));
  93. EXPECT_EQ(2000u, NextAlignedWithOffset(1024, 2048, 2000));
  94. }
  95. // Test that failed page allocations invoke base::ReleaseReservation().
  96. // We detect this by making a reservation and ensuring that after failure, we
  97. // can make a new reservation.
  98. TEST(PartitionAllocPageAllocatorTest, AllocFailure) {
  99. // Release any reservation made by another test.
  100. ReleaseReservation();
  101. // We can make a reservation.
  102. EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
  103. // We can't make another reservation until we trigger an allocation failure.
  104. EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
  105. size_t size = HugeMemoryAmount();
  106. // Skip the test for sanitizers and platforms with ASLR turned off.
  107. if (size == 0)
  108. return;
  109. uintptr_t result = AllocPages(size, PageAllocationGranularity(),
  110. PageAccessibilityConfiguration::kInaccessible,
  111. PageTag::kChromium);
  112. if (!result) {
  113. // We triggered allocation failure. Our reservation should have been
  114. // released, and we should be able to make a new reservation.
  115. EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
  116. ReleaseReservation();
  117. return;
  118. }
  119. // We couldn't fail. Make sure reservation is still there.
  120. EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
  121. }
  122. // TODO(crbug.com/765801): Test failed on chromium.win/Win10 Tests x64.
  123. #if BUILDFLAG(IS_WIN) && defined(ARCH_CPU_64_BITS)
  124. #define MAYBE_ReserveAddressSpace DISABLED_ReserveAddressSpace
  125. #else
  126. #define MAYBE_ReserveAddressSpace ReserveAddressSpace
  127. #endif // BUILDFLAG(IS_WIN) && defined(ARCH_CPU_64_BITS)
  128. // Test that reserving address space can fail.
  129. TEST(PartitionAllocPageAllocatorTest, MAYBE_ReserveAddressSpace) {
  130. // Release any reservation made by another test.
  131. ReleaseReservation();
  132. size_t size = HugeMemoryAmount();
  133. // Skip the test for sanitizers and platforms with ASLR turned off.
  134. if (size == 0)
  135. return;
  136. bool success = ReserveAddressSpace(size);
  137. if (!success) {
  138. EXPECT_TRUE(ReserveAddressSpace(EasyAllocSize()));
  139. return;
  140. }
  141. // We couldn't fail. Make sure reservation is still there.
  142. EXPECT_FALSE(ReserveAddressSpace(EasyAllocSize()));
  143. }
  144. TEST(PartitionAllocPageAllocatorTest, AllocAndFreePages) {
  145. uintptr_t buffer = AllocPages(
  146. PageAllocationGranularity(), PageAllocationGranularity(),
  147. PageAccessibilityConfiguration::kReadWrite, PageTag::kChromium);
  148. EXPECT_TRUE(buffer);
  149. int* buffer0 = reinterpret_cast<int*>(buffer);
  150. *buffer0 = 42;
  151. EXPECT_EQ(42, *buffer0);
  152. FreePages(buffer, PageAllocationGranularity());
  153. }
  154. TEST(PartitionAllocPageAllocatorTest, AllocPagesAligned) {
  155. size_t alignment = 8 * PageAllocationGranularity();
  156. size_t sizes[] = {PageAllocationGranularity(),
  157. alignment - PageAllocationGranularity(), alignment,
  158. alignment + PageAllocationGranularity(), alignment * 4};
  159. size_t offsets[] = {0, PageAllocationGranularity(), alignment / 2,
  160. alignment - PageAllocationGranularity()};
  161. for (size_t size : sizes) {
  162. for (size_t offset : offsets) {
  163. uintptr_t buffer = AllocPagesWithAlignOffset(
  164. 0, size, alignment, offset,
  165. PageAccessibilityConfiguration::kReadWrite, PageTag::kChromium);
  166. EXPECT_TRUE(buffer);
  167. EXPECT_EQ(buffer % alignment, offset);
  168. FreePages(buffer, size);
  169. }
  170. }
  171. }
  172. TEST(PartitionAllocPageAllocatorTest,
  173. AllocAndFreePagesWithPageReadWriteTagged) {
  174. // This test checks that a page allocated with
  175. // PageAccessibilityConfiguration::kReadWriteTagged is safe to use on all
  176. // systems (even those which don't support MTE).
  177. uintptr_t buffer = AllocPages(
  178. PageAllocationGranularity(), PageAllocationGranularity(),
  179. PageAccessibilityConfiguration::kReadWriteTagged, PageTag::kChromium);
  180. EXPECT_TRUE(buffer);
  181. int* buffer0 = reinterpret_cast<int*>(buffer);
  182. *buffer0 = 42;
  183. EXPECT_EQ(42, *buffer0);
  184. FreePages(buffer, PageAllocationGranularity());
  185. }
  186. TEST(PartitionAllocPageAllocatorTest,
  187. AllocAndFreePagesWithPageReadExecuteConfirmCFI) {
  188. // This test checks that indirect branches to anything other than a valid
  189. // branch target in a PageAccessibilityConfiguration::kReadExecute-mapped
  190. // crash on systems which support the Armv8.5 Branch Target Identification
  191. // extension.
  192. base::CPU cpu;
  193. if (!cpu.has_bti()) {
  194. #if BUILDFLAG(IS_IOS)
  195. // Workaround for incorrectly failed iOS tests with GTEST_SKIP,
  196. // see crbug.com/912138 for details.
  197. return;
  198. #else
  199. GTEST_SKIP();
  200. #endif
  201. }
  202. #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
  203. // Next, map some read-write memory and copy the BTI-enabled function there.
  204. uintptr_t buffer = AllocPages(
  205. PageAllocationGranularity(), PageAllocationGranularity(),
  206. PageAccessibilityConfiguration::kReadWrite, PageTag::kChromium);
  207. ptrdiff_t function_range =
  208. reinterpret_cast<char*>(arm_bti_test_function_end) -
  209. reinterpret_cast<char*>(arm_bti_test_function);
  210. ptrdiff_t invalid_offset =
  211. reinterpret_cast<char*>(arm_bti_test_function_invalid_offset) -
  212. reinterpret_cast<char*>(arm_bti_test_function);
  213. memcpy(reinterpret_cast<void*>(buffer),
  214. reinterpret_cast<void*>(arm_bti_test_function), function_range);
  215. // Next re-protect the page.
  216. SetSystemPagesAccess(buffer, PageAllocationGranularity(),
  217. PageAccessibilityConfiguration::kReadExecuteProtected);
  218. using BTITestFunction = int64_t (*)(int64_t);
  219. // Attempt to call the function through the BTI-enabled entrypoint. Confirm
  220. // that it works.
  221. BTITestFunction bti_enabled_fn = reinterpret_cast<BTITestFunction>(buffer);
  222. BTITestFunction bti_invalid_fn =
  223. reinterpret_cast<BTITestFunction>(buffer + invalid_offset);
  224. EXPECT_EQ(bti_enabled_fn(15), 18);
  225. // Next, attempt to call the function without the entrypoint.
  226. EXPECT_EXIT({ bti_invalid_fn(15); }, testing::KilledBySignal(SIGILL),
  227. ""); // Should crash with SIGILL.
  228. FreePages(buffer, PageAllocationGranularity());
  229. #else
  230. PA_NOTREACHED();
  231. #endif
  232. }
  233. TEST(PartitionAllocPageAllocatorTest,
  234. AllocAndFreePagesWithPageReadWriteTaggedSynchronous) {
  235. // This test checks that a page allocated with
  236. // PageAccessibilityConfiguration::kReadWriteTagged generates tag violations
  237. // if allocated on a system which supports the
  238. // Armv8.5 Memory Tagging Extension.
  239. base::CPU cpu;
  240. if (!cpu.has_mte()) {
  241. // Skip this test if there's no MTE.
  242. #if BUILDFLAG(IS_IOS)
  243. return;
  244. #else
  245. GTEST_SKIP();
  246. #endif
  247. }
  248. #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
  249. uintptr_t buffer = AllocPages(
  250. PageAllocationGranularity(), PageAllocationGranularity(),
  251. PageAccessibilityConfiguration::kReadWriteTagged, PageTag::kChromium);
  252. EXPECT_TRUE(buffer);
  253. int* buffer0 = reinterpret_cast<int*>(buffer);
  254. // Assign an 0x1 tag to the first granule of buffer.
  255. int* buffer1 = __arm_mte_increment_tag(buffer0, 0x1);
  256. EXPECT_NE(buffer0, buffer1);
  257. __arm_mte_set_tag(buffer1);
  258. // Retrieve the tag to ensure that it's set.
  259. buffer1 = __arm_mte_get_tag(buffer0);
  260. // Prove that the tag is different (if they're the same, the test won't work).
  261. ASSERT_NE(buffer0, buffer1);
  262. TagViolationReportingMode parent_tagging_mode =
  263. GetMemoryTaggingModeForCurrentThread();
  264. EXPECT_EXIT(
  265. {
  266. // Switch to synchronous mode.
  267. #if BUILDFLAG(IS_ANDROID)
  268. ChangeMemoryTaggingModeForAllThreadsPerProcess(
  269. TagViolationReportingMode::kSynchronous);
  270. #else
  271. ChangeMemoryTaggingModeForCurrentThread(
  272. TagViolationReportingMode::kSynchronous);
  273. #endif // BUILDFLAG(IS_ANDROID)
  274. EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
  275. TagViolationReportingMode::kSynchronous);
  276. // Write to the buffer using its previous tag. A segmentation fault
  277. // should be delivered.
  278. *buffer0 = 42;
  279. },
  280. testing::KilledBySignal(SIGSEGV), "");
  281. EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(), parent_tagging_mode);
  282. FreePages(buffer, PageAllocationGranularity());
  283. #else
  284. PA_NOTREACHED();
  285. #endif
  286. }
  287. TEST(PartitionAllocPageAllocatorTest,
  288. AllocAndFreePagesWithPageReadWriteTaggedAsynchronous) {
  289. // This test checks that a page allocated with
  290. // PageAccessibilityConfiguration::kReadWriteTagged generates tag violations
  291. // if allocated on a system which supports MTE.
  292. base::CPU cpu;
  293. if (!cpu.has_mte()) {
  294. // Skip this test if there's no MTE.
  295. #if BUILDFLAG(IS_IOS)
  296. return;
  297. #else
  298. GTEST_SKIP();
  299. #endif
  300. }
  301. #if defined(MTE_KILLED_BY_SIGNAL_AVAILABLE)
  302. uintptr_t buffer = AllocPages(
  303. PageAllocationGranularity(), PageAllocationGranularity(),
  304. PageAccessibilityConfiguration::kReadWriteTagged, PageTag::kChromium);
  305. EXPECT_TRUE(buffer);
  306. int* buffer0 = reinterpret_cast<int*>(buffer);
  307. __arm_mte_set_tag(__arm_mte_increment_tag(buffer0, 0x1));
  308. int* buffer1 = __arm_mte_get_tag(buffer0);
  309. EXPECT_NE(buffer0, buffer1);
  310. TagViolationReportingMode parent_tagging_mode =
  311. GetMemoryTaggingModeForCurrentThread();
  312. EXPECT_EXIT(
  313. {
  314. // Switch to asynchronous mode.
  315. #if BUILDFLAG(IS_ANDROID)
  316. ChangeMemoryTaggingModeForAllThreadsPerProcess(
  317. TagViolationReportingMode::kAsynchronous);
  318. #else
  319. ChangeMemoryTaggingModeForCurrentThread(
  320. TagViolationReportingMode::kAsynchronous);
  321. #endif // BUILDFLAG(IS_ANDROID)
  322. EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(),
  323. TagViolationReportingMode::kAsynchronous);
  324. // Write to the buffer using its previous tag. A fault should be
  325. // generated at this point but we may not notice straight away...
  326. *buffer0 = 42;
  327. EXPECT_EQ(42, *buffer0);
  328. PA_LOG(ERROR) << "="; // Until we receive control back from the kernel
  329. // (e.g. on a system call).
  330. },
  331. testing::KilledBySignal(SIGSEGV), "");
  332. FreePages(buffer, PageAllocationGranularity());
  333. EXPECT_EQ(GetMemoryTaggingModeForCurrentThread(), parent_tagging_mode);
  334. #else
  335. PA_NOTREACHED();
  336. #endif
  337. }
  338. // Test permission setting on POSIX, where we can set a trap handler.
  339. #if BUILDFLAG(IS_POSIX)
  340. namespace {
  341. sigjmp_buf g_continuation;
  342. void SignalHandler(int signal, siginfo_t* info, void*) {
  343. siglongjmp(g_continuation, 1);
  344. }
  345. } // namespace
  346. // On Mac, sometimes we get SIGBUS instead of SIGSEGV, so handle that too.
  347. #if BUILDFLAG(IS_APPLE)
  348. #define EXTRA_FAULT_BEGIN_ACTION() \
  349. struct sigaction old_bus_action; \
  350. sigaction(SIGBUS, &action, &old_bus_action);
  351. #define EXTRA_FAULT_END_ACTION() sigaction(SIGBUS, &old_bus_action, nullptr);
  352. #else
  353. #define EXTRA_FAULT_BEGIN_ACTION()
  354. #define EXTRA_FAULT_END_ACTION()
  355. #endif
  356. // Install a signal handler so we can catch the fault we're about to trigger.
  357. #define FAULT_TEST_BEGIN() \
  358. struct sigaction action = {}; \
  359. struct sigaction old_action = {}; \
  360. action.sa_sigaction = SignalHandler; \
  361. sigemptyset(&action.sa_mask); \
  362. action.sa_flags = SA_SIGINFO; \
  363. sigaction(SIGSEGV, &action, &old_action); \
  364. EXTRA_FAULT_BEGIN_ACTION(); \
  365. int const save_sigs = 1; \
  366. if (!sigsetjmp(g_continuation, save_sigs)) {
  367. // Fault generating code goes here...
  368. // Handle when sigsetjmp returns nonzero (we are returning from our handler).
  369. #define FAULT_TEST_END() \
  370. } \
  371. else { \
  372. sigaction(SIGSEGV, &old_action, nullptr); \
  373. EXTRA_FAULT_END_ACTION(); \
  374. }
  375. TEST(PartitionAllocPageAllocatorTest, InaccessiblePages) {
  376. uintptr_t buffer = AllocPages(
  377. PageAllocationGranularity(), PageAllocationGranularity(),
  378. PageAccessibilityConfiguration::kInaccessible, PageTag::kChromium);
  379. EXPECT_TRUE(buffer);
  380. FAULT_TEST_BEGIN()
  381. // Reading from buffer should fault.
  382. int* buffer0 = reinterpret_cast<int*>(buffer);
  383. int buffer0_contents = *buffer0;
  384. EXPECT_EQ(buffer0_contents, *buffer0);
  385. EXPECT_TRUE(false);
  386. FAULT_TEST_END()
  387. FreePages(buffer, PageAllocationGranularity());
  388. }
  389. // TODO(crbug.com/1291888): Understand why we can't read from Read-Execute pages
  390. // on iOS.
  391. #if BUILDFLAG(IS_IOS)
  392. #define MAYBE_ReadExecutePages DISABLED_ReadExecutePages
  393. #else
  394. #define MAYBE_ReadExecutePages ReadExecutePages
  395. #endif // BUILDFLAG(IS_IOS)
  396. TEST(PartitionAllocPageAllocatorTest, MAYBE_ReadExecutePages) {
  397. uintptr_t buffer = AllocPages(
  398. PageAllocationGranularity(), PageAllocationGranularity(),
  399. PageAccessibilityConfiguration::kReadExecute, PageTag::kChromium);
  400. EXPECT_TRUE(buffer);
  401. int* buffer0 = reinterpret_cast<int*>(buffer);
  402. // Reading from buffer should succeed.
  403. int buffer0_contents = *buffer0;
  404. FAULT_TEST_BEGIN()
  405. // Writing to buffer should fault.
  406. *buffer0 = ~buffer0_contents;
  407. EXPECT_TRUE(false);
  408. FAULT_TEST_END()
  409. // Make sure no write occurred.
  410. EXPECT_EQ(buffer0_contents, *buffer0);
  411. FreePages(buffer, PageAllocationGranularity());
  412. }
  413. #endif // BUILDFLAG(IS_POSIX)
  414. #if BUILDFLAG(IS_ANDROID)
  415. TEST(PartitionAllocPageAllocatorTest, PageTagging) {
  416. uintptr_t buffer = AllocPages(
  417. PageAllocationGranularity(), PageAllocationGranularity(),
  418. PageAccessibilityConfiguration::kInaccessible, PageTag::kChromium);
  419. EXPECT_TRUE(buffer);
  420. std::string proc_maps;
  421. EXPECT_TRUE(base::debug::ReadProcMaps(&proc_maps));
  422. std::vector<base::debug::MappedMemoryRegion> regions;
  423. EXPECT_TRUE(base::debug::ParseProcMaps(proc_maps, &regions));
  424. bool found = false;
  425. for (const auto& region : regions) {
  426. if (region.start == buffer) {
  427. found = true;
  428. EXPECT_EQ("[anon:chromium]", region.path);
  429. break;
  430. }
  431. }
  432. FreePages(buffer, PageAllocationGranularity());
  433. EXPECT_TRUE(found);
  434. }
  435. #endif // BUILDFLAG(IS_ANDROID)
  436. TEST(PartitionAllocPageAllocatorTest, DecommitErasesMemory) {
  437. if (!DecommittedMemoryIsAlwaysZeroed())
  438. return;
  439. size_t size = PageAllocationGranularity();
  440. uintptr_t buffer = AllocPages(size, PageAllocationGranularity(),
  441. PageAccessibilityConfiguration::kReadWrite,
  442. PageTag::kChromium);
  443. ASSERT_TRUE(buffer);
  444. memset(reinterpret_cast<void*>(buffer), 42, size);
  445. DecommitSystemPages(buffer, size,
  446. PageAccessibilityDisposition::kAllowKeepForPerf);
  447. RecommitSystemPages(buffer, size, PageAccessibilityConfiguration::kReadWrite,
  448. PageAccessibilityDisposition::kAllowKeepForPerf);
  449. uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
  450. uint32_t sum = 0;
  451. for (size_t i = 0; i < size; i++) {
  452. sum += recommitted_buffer[i];
  453. }
  454. EXPECT_EQ(0u, sum) << "Data was not erased";
  455. FreePages(buffer, size);
  456. }
  457. TEST(PartitionAllocPageAllocatorTest, DecommitAndZero) {
  458. size_t size = PageAllocationGranularity();
  459. uintptr_t buffer = AllocPages(size, PageAllocationGranularity(),
  460. PageAccessibilityConfiguration::kReadWrite,
  461. PageTag::kChromium);
  462. ASSERT_TRUE(buffer);
  463. memset(reinterpret_cast<void*>(buffer), 42, size);
  464. DecommitAndZeroSystemPages(buffer, size);
  465. // Test permission setting on POSIX, where we can set a trap handler.
  466. #if BUILDFLAG(IS_POSIX)
  467. FAULT_TEST_BEGIN()
  468. // Reading from buffer should now fault.
  469. int* buffer0 = reinterpret_cast<int*>(buffer);
  470. int buffer0_contents = *buffer0;
  471. EXPECT_EQ(buffer0_contents, *buffer0);
  472. EXPECT_TRUE(false);
  473. FAULT_TEST_END()
  474. #endif
  475. // Clients of the DecommitAndZero API (in particular, V8), currently just
  476. // call SetSystemPagesAccess to mark the region as accessible again, so we
  477. // use that here as well.
  478. SetSystemPagesAccess(buffer, size,
  479. PageAccessibilityConfiguration::kReadWrite);
  480. uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
  481. uint32_t sum = 0;
  482. for (size_t i = 0; i < size; i++) {
  483. sum += recommitted_buffer[i];
  484. }
  485. EXPECT_EQ(0u, sum) << "Data was not erased";
  486. FreePages(buffer, size);
  487. }
  488. TEST(PartitionAllocPageAllocatorTest, MappedPagesAccounting) {
  489. size_t size = PageAllocationGranularity();
  490. // Ask for a large alignment to make sure that trimming doesn't change the
  491. // accounting.
  492. size_t alignment = 128 * PageAllocationGranularity();
  493. size_t offsets[] = {0, PageAllocationGranularity(), alignment / 2,
  494. alignment - PageAllocationGranularity()};
  495. size_t mapped_size_before = GetTotalMappedSize();
  496. for (size_t offset : offsets) {
  497. uintptr_t data = AllocPagesWithAlignOffset(
  498. 0, size, alignment, offset,
  499. PageAccessibilityConfiguration::kInaccessible, PageTag::kChromium);
  500. ASSERT_TRUE(data);
  501. EXPECT_EQ(mapped_size_before + size, GetTotalMappedSize());
  502. DecommitSystemPages(data, size,
  503. PageAccessibilityDisposition::kAllowKeepForPerf);
  504. EXPECT_EQ(mapped_size_before + size, GetTotalMappedSize());
  505. FreePages(data, size);
  506. EXPECT_EQ(mapped_size_before, GetTotalMappedSize());
  507. }
  508. }
  509. } // namespace partition_alloc::internal
  510. #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)