tagging.cc 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. // Copyright (c) 2021 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "base/allocator/partition_allocator/tagging.h"
  5. #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
  6. #include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
  7. #include "base/allocator/partition_allocator/partition_alloc_check.h"
  8. #include "base/allocator/partition_allocator/partition_alloc_config.h"
  9. #include "build/build_config.h"
  10. #if defined(PA_HAS_MEMORY_TAGGING)
  11. #include <arm_acle.h>
  12. #include <sys/auxv.h>
  13. #include <sys/prctl.h>
  14. #define PR_SET_TAGGED_ADDR_CTRL 55
  15. #define PR_GET_TAGGED_ADDR_CTRL 56
  16. #define PR_TAGGED_ADDR_ENABLE (1UL << 0)
  17. #if BUILDFLAG(IS_LINUX)
  18. #include <linux/version.h>
  19. // Linux headers already provide these since v5.10.
  20. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  21. #define HAS_PR_MTE_MACROS
  22. #endif
  23. #endif
  24. #ifndef HAS_PR_MTE_MACROS
  25. #define PR_MTE_TCF_SHIFT 1
  26. #define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
  27. #define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
  28. #define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
  29. #define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
  30. #define PR_MTE_TAG_SHIFT 3
  31. #define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
  32. #endif
  33. #endif
  34. #if BUILDFLAG(IS_ANDROID)
  35. #include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
  36. #include "base/allocator/partition_allocator/partition_alloc_base/native_library.h"
  37. #endif // BUILDFLAG(IS_ANDROID)
  38. namespace partition_alloc {
  39. #if defined(PA_HAS_MEMORY_TAGGING)
  40. namespace {
  41. void ChangeMemoryTaggingModeInternal(unsigned prctl_mask) {
  42. if (internal::base::CPU::GetInstanceNoAllocation().has_mte()) {
  43. int status = prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_mask, 0, 0, 0);
  44. PA_CHECK(status == 0);
  45. }
  46. }
  47. } // namespace
  48. #endif // defined(PA_HAS_MEMORY_TAGGING)
  49. void ChangeMemoryTaggingModeForCurrentThread(TagViolationReportingMode m) {
  50. #if defined(PA_HAS_MEMORY_TAGGING)
  51. if (m == TagViolationReportingMode::kSynchronous) {
  52. ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC |
  53. (0xfffe << PR_MTE_TAG_SHIFT));
  54. } else if (m == TagViolationReportingMode::kAsynchronous) {
  55. ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC |
  56. (0xfffe << PR_MTE_TAG_SHIFT));
  57. } else {
  58. ChangeMemoryTaggingModeInternal(PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_NONE);
  59. }
  60. #endif // defined(PA_HAS_MEMORY_TAGGING)
  61. }
  62. namespace internal {
  63. #if BUILDFLAG(IS_ANDROID)
  64. void ChangeMemoryTaggingModeForAllThreadsPerProcess(
  65. TagViolationReportingMode m) {
  66. #if defined(PA_HAS_MEMORY_TAGGING)
  67. // In order to support Android NDK API level below 26, we need to call
  68. // mallopt via dynamic linker.
  69. // int mallopt(int param, int value);
  70. using MalloptSignature = int (*)(int, int);
  71. static MalloptSignature mallopt_fnptr = []() {
  72. base::FilePath module_path;
  73. base::NativeLibraryLoadError load_error;
  74. base::FilePath library_path = module_path.Append("libc.so");
  75. base::NativeLibrary library =
  76. base::LoadNativeLibrary(library_path, &load_error);
  77. PA_CHECK(library);
  78. void* func_ptr =
  79. base::GetFunctionPointerFromNativeLibrary(library, "mallopt");
  80. PA_CHECK(func_ptr);
  81. return reinterpret_cast<MalloptSignature>(func_ptr);
  82. }();
  83. int status = 0;
  84. if (m == TagViolationReportingMode::kSynchronous) {
  85. status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
  86. M_HEAP_TAGGING_LEVEL_SYNC);
  87. } else if (m == TagViolationReportingMode::kAsynchronous) {
  88. status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
  89. M_HEAP_TAGGING_LEVEL_ASYNC);
  90. } else {
  91. status = mallopt_fnptr(M_BIONIC_SET_HEAP_TAGGING_LEVEL,
  92. M_HEAP_TAGGING_LEVEL_NONE);
  93. }
  94. PA_CHECK(status);
  95. #endif // defined(PA_HAS_MEMORY_TAGGING)
  96. }
  97. #endif // BUILDFLAG(IS_ANDROID)
  98. namespace {
  99. [[maybe_unused]] static bool CheckTagRegionParameters(void* ptr, size_t sz) {
  100. // Check that ptr and size are correct for MTE
  101. uintptr_t ptr_as_uint = reinterpret_cast<uintptr_t>(ptr);
  102. bool ret = (ptr_as_uint % kMemTagGranuleSize == 0) &&
  103. (sz % kMemTagGranuleSize == 0) && sz;
  104. return ret;
  105. }
  106. #if defined(PA_HAS_MEMORY_TAGGING)
  107. static bool HasCPUMemoryTaggingExtension() {
  108. return base::CPU::GetInstanceNoAllocation().has_mte();
  109. }
  110. #endif // defined(PA_HAS_MEMORY_TAGGING)
  111. #if defined(PA_HAS_MEMORY_TAGGING)
  112. void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
  113. // Randomly tag a region (MTE-enabled systems only). The first 16-byte
  114. // granule is randomly tagged, all other granules in the region are
  115. // then assigned that initial tag via __arm_mte_set_tag.
  116. if (!CheckTagRegionParameters(ptr, sz))
  117. return nullptr;
  118. // __arm_mte_create_random_tag generates a randomly tagged pointer via the
  119. // hardware's random number generator, but does not apply it to the memory.
  120. char* nptr = reinterpret_cast<char*>(__arm_mte_create_random_tag(ptr, mask));
  121. for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
  122. // Next, tag the first and all subsequent granules with the randomly tag.
  123. __arm_mte_set_tag(nptr +
  124. i); // Tag is taken from the top bits of the argument.
  125. }
  126. return nptr;
  127. }
  128. void* TagRegionIncrementForMTE(void* ptr, size_t sz) {
  129. // Increment a region's tag (MTE-enabled systems only), using the tag of the
  130. // first granule.
  131. if (!CheckTagRegionParameters(ptr, sz))
  132. return nullptr;
  133. // Increment ptr's tag.
  134. char* nptr = reinterpret_cast<char*>(__arm_mte_increment_tag(ptr, 1u));
  135. for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
  136. // Apply the tag to the first granule, and all subsequent granules.
  137. __arm_mte_set_tag(nptr + i);
  138. }
  139. return nptr;
  140. }
  141. void* RemaskVoidPtrForMTE(void* ptr) {
  142. if (PA_LIKELY(ptr)) {
  143. // Can't look up the tag for a null ptr (segfaults).
  144. return __arm_mte_get_tag(ptr);
  145. }
  146. return nullptr;
  147. }
  148. #endif
  149. void* TagRegionIncrementNoOp(void* ptr, size_t sz) {
  150. // Region parameters are checked even on non-MTE systems to check the
  151. // intrinsics are used correctly.
  152. return ptr;
  153. }
  154. void* TagRegionRandomlyNoOp(void* ptr, size_t sz, uint64_t mask) {
  155. // Verifies a 16-byte aligned tagging granule, size tagging granule (all
  156. // architectures).
  157. return ptr;
  158. }
  159. void* RemaskVoidPtrNoOp(void* ptr) {
  160. return ptr;
  161. }
  162. } // namespace
  163. void InitializeMTESupportIfNeeded() {
  164. #if defined(PA_HAS_MEMORY_TAGGING)
  165. if (HasCPUMemoryTaggingExtension()) {
  166. global_remask_void_ptr_fn = RemaskVoidPtrForMTE;
  167. global_tag_memory_range_increment_fn = TagRegionIncrementForMTE;
  168. global_tag_memory_range_randomly_fn = TagRegionRandomlyForMTE;
  169. }
  170. #endif
  171. }
  172. RemaskPtrInternalFn* global_remask_void_ptr_fn = RemaskVoidPtrNoOp;
  173. TagMemoryRangeIncrementInternalFn* global_tag_memory_range_increment_fn =
  174. TagRegionIncrementNoOp;
  175. TagMemoryRangeRandomlyInternalFn* global_tag_memory_range_randomly_fn =
  176. TagRegionRandomlyNoOp;
  177. TagViolationReportingMode GetMemoryTaggingModeForCurrentThread() {
  178. #if defined(PA_HAS_MEMORY_TAGGING)
  179. base::CPU cpu;
  180. if (!cpu.has_mte()) {
  181. return TagViolationReportingMode::kUndefined;
  182. }
  183. int status = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
  184. PA_CHECK(status >= 0);
  185. if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_SYNC)) {
  186. return TagViolationReportingMode::kSynchronous;
  187. }
  188. if ((status & PR_TAGGED_ADDR_ENABLE) && (status & PR_MTE_TCF_ASYNC)) {
  189. return TagViolationReportingMode::kAsynchronous;
  190. }
  191. #endif // defined(PA_HAS_MEMORY_TAGGING)
  192. return TagViolationReportingMode::kUndefined;
  193. }
  194. } // namespace internal
  195. } // namespace partition_alloc