mte.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2020 ARM Ltd.
  4. */
  5. #include <linux/bitops.h>
  6. #include <linux/cpu.h>
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/prctl.h>
  10. #include <linux/sched.h>
  11. #include <linux/sched/mm.h>
  12. #include <linux/string.h>
  13. #include <linux/swap.h>
  14. #include <linux/swapops.h>
  15. #include <linux/thread_info.h>
  16. #include <linux/types.h>
  17. #include <linux/uio.h>
  18. #include <asm/barrier.h>
  19. #include <asm/cpufeature.h>
  20. #include <asm/mte.h>
  21. #include <asm/ptrace.h>
  22. #include <asm/sysreg.h>
  23. static bool report_fault_once = true;
  24. static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);
  25. #ifdef CONFIG_KASAN_HW_TAGS
  26. /* Whether the MTE asynchronous mode is enabled. */
  27. DEFINE_STATIC_KEY_FALSE(mte_async_mode);
  28. EXPORT_SYMBOL_GPL(mte_async_mode);
  29. #endif
  30. static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
  31. {
  32. pte_t old_pte = READ_ONCE(*ptep);
  33. if (check_swap && is_swap_pte(old_pte)) {
  34. swp_entry_t entry = pte_to_swp_entry(old_pte);
  35. if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
  36. return;
  37. }
  38. page_kasan_tag_reset(page);
  39. /*
  40. * We need smp_wmb() in between setting the flags and clearing the
  41. * tags because if another thread reads page->flags and builds a
  42. * tagged address out of it, there is an actual dependency to the
  43. * memory access, but on the current thread we do not guarantee that
  44. * the new page->flags are visible before the tags were updated.
  45. */
  46. smp_wmb();
  47. mte_clear_page_tags(page_address(page));
  48. }
  49. void mte_sync_tags(pte_t *ptep, pte_t pte)
  50. {
  51. struct page *page = pte_page(pte);
  52. long i, nr_pages = compound_nr(page);
  53. bool check_swap = nr_pages == 1;
  54. /* if PG_mte_tagged is set, tags have already been initialised */
  55. for (i = 0; i < nr_pages; i++, page++) {
  56. if (!test_and_set_bit(PG_mte_tagged, &page->flags))
  57. mte_sync_page_tags(page, ptep, check_swap);
  58. }
  59. }
  60. int memcmp_pages(struct page *page1, struct page *page2)
  61. {
  62. char *addr1, *addr2;
  63. int ret;
  64. addr1 = page_address(page1);
  65. addr2 = page_address(page2);
  66. ret = memcmp(addr1, addr2, PAGE_SIZE);
  67. if (!system_supports_mte() || ret)
  68. return ret;
  69. /*
  70. * If the page content is identical but at least one of the pages is
  71. * tagged, return non-zero to avoid KSM merging. If only one of the
  72. * pages is tagged, set_pte_at() may zero or change the tags of the
  73. * other page via mte_sync_tags().
  74. */
  75. if (test_bit(PG_mte_tagged, &page1->flags) ||
  76. test_bit(PG_mte_tagged, &page2->flags))
  77. return addr1 != addr2;
  78. return ret;
  79. }
  80. static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
  81. {
  82. /* Enable MTE Sync Mode for EL1. */
  83. sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf);
  84. isb();
  85. pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
  86. }
  87. #ifdef CONFIG_KASAN_HW_TAGS
  88. void mte_enable_kernel_sync(void)
  89. {
  90. /*
  91. * Make sure we enter this function when no PE has set
  92. * async mode previously.
  93. */
  94. WARN_ONCE(system_uses_mte_async_mode(),
  95. "MTE async mode enabled system wide!");
  96. __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
  97. }
  98. void mte_enable_kernel_async(void)
  99. {
  100. __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
  101. /*
  102. * MTE async mode is set system wide by the first PE that
  103. * executes this function.
  104. *
  105. * Note: If in future KASAN acquires a runtime switching
  106. * mode in between sync and async, this strategy needs
  107. * to be reviewed.
  108. */
  109. if (!system_uses_mte_async_mode())
  110. static_branch_enable(&mte_async_mode);
  111. }
  112. #endif
  113. void mte_set_report_once(bool state)
  114. {
  115. WRITE_ONCE(report_fault_once, state);
  116. }
  117. bool mte_report_once(void)
  118. {
  119. return READ_ONCE(report_fault_once);
  120. }
  121. #ifdef CONFIG_KASAN_HW_TAGS
  122. void mte_check_tfsr_el1(void)
  123. {
  124. u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
  125. if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
  126. /*
  127. * Note: isb() is not required after this direct write
  128. * because there is no indirect read subsequent to it
  129. * (per ARM DDI 0487F.c table D13-1).
  130. */
  131. write_sysreg_s(0, SYS_TFSR_EL1);
  132. kasan_report_async();
  133. }
  134. }
  135. #endif
  136. static void mte_update_sctlr_user(struct task_struct *task)
  137. {
  138. /*
  139. * This must be called with preemption disabled and can only be called
  140. * on the current or next task since the CPU must match where the thread
  141. * is going to run. The caller is responsible for calling
  142. * update_sctlr_el1() later in the same preemption disabled block.
  143. */
  144. unsigned long sctlr = task->thread.sctlr_user;
  145. unsigned long mte_ctrl = task->thread.mte_ctrl;
  146. unsigned long pref, resolved_mte_tcf;
  147. pref = __this_cpu_read(mte_tcf_preferred);
  148. resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl;
  149. sctlr &= ~SCTLR_EL1_TCF0_MASK;
  150. if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
  151. sctlr |= SCTLR_EL1_TCF0_ASYNC;
  152. else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
  153. sctlr |= SCTLR_EL1_TCF0_SYNC;
  154. task->thread.sctlr_user = sctlr;
  155. }
  156. static void mte_update_gcr_excl(struct task_struct *task)
  157. {
  158. /*
  159. * SYS_GCR_EL1 will be set to current->thread.mte_ctrl value by
  160. * mte_set_user_gcr() in kernel_exit, but only if KASAN is enabled.
  161. */
  162. if (kasan_hw_tags_enabled())
  163. return;
  164. write_sysreg_s(
  165. ((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
  166. SYS_GCR_EL1_EXCL_MASK) | SYS_GCR_EL1_RRND,
  167. SYS_GCR_EL1);
  168. }
  169. void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
  170. __le32 *updptr, int nr_inst)
  171. {
  172. BUG_ON(nr_inst != 1); /* Branch -> NOP */
  173. if (kasan_hw_tags_enabled())
  174. *updptr = cpu_to_le32(aarch64_insn_gen_nop());
  175. }
  176. void mte_thread_init_user(void)
  177. {
  178. if (!system_supports_mte())
  179. return;
  180. /* clear any pending asynchronous tag fault */
  181. dsb(ish);
  182. write_sysreg_s(0, SYS_TFSRE0_EL1);
  183. clear_thread_flag(TIF_MTE_ASYNC_FAULT);
  184. /* disable tag checking and reset tag generation mask */
  185. set_mte_ctrl(current, 0);
  186. }
  187. void mte_thread_switch(struct task_struct *next)
  188. {
  189. if (!system_supports_mte())
  190. return;
  191. mte_update_sctlr_user(next);
  192. mte_update_gcr_excl(next);
  193. /*
  194. * Check if an async tag exception occurred at EL1.
  195. *
  196. * Note: On the context switch path we rely on the dsb() present
  197. * in __switch_to() to guarantee that the indirect writes to TFSR_EL1
  198. * are synchronized before this point.
  199. */
  200. isb();
  201. mte_check_tfsr_el1();
  202. }
  203. void mte_suspend_enter(void)
  204. {
  205. if (!system_supports_mte())
  206. return;
  207. /*
  208. * The barriers are required to guarantee that the indirect writes
  209. * to TFSR_EL1 are synchronized before we report the state.
  210. */
  211. dsb(nsh);
  212. isb();
  213. /* Report SYS_TFSR_EL1 before suspend entry */
  214. mte_check_tfsr_el1();
  215. }
  216. long set_mte_ctrl(struct task_struct *task, unsigned long arg)
  217. {
  218. u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
  219. SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT;
  220. if (!system_supports_mte())
  221. return 0;
  222. if (arg & PR_MTE_TCF_ASYNC)
  223. mte_ctrl |= MTE_CTRL_TCF_ASYNC;
  224. if (arg & PR_MTE_TCF_SYNC)
  225. mte_ctrl |= MTE_CTRL_TCF_SYNC;
  226. task->thread.mte_ctrl = mte_ctrl;
  227. if (task == current) {
  228. preempt_disable();
  229. mte_update_sctlr_user(task);
  230. mte_update_gcr_excl(task);
  231. update_sctlr_el1(task->thread.sctlr_user);
  232. preempt_enable();
  233. }
  234. return 0;
  235. }
  236. long get_mte_ctrl(struct task_struct *task)
  237. {
  238. unsigned long ret;
  239. u64 mte_ctrl = task->thread.mte_ctrl;
  240. u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
  241. SYS_GCR_EL1_EXCL_MASK;
  242. if (!system_supports_mte())
  243. return 0;
  244. ret = incl << PR_MTE_TAG_SHIFT;
  245. if (mte_ctrl & MTE_CTRL_TCF_ASYNC)
  246. ret |= PR_MTE_TCF_ASYNC;
  247. if (mte_ctrl & MTE_CTRL_TCF_SYNC)
  248. ret |= PR_MTE_TCF_SYNC;
  249. return ret;
  250. }
  251. /*
  252. * Access MTE tags in another process' address space as given in mm. Update
  253. * the number of tags copied. Return 0 if any tags copied, error otherwise.
  254. * Inspired by __access_remote_vm().
  255. */
  256. static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
  257. struct iovec *kiov, unsigned int gup_flags)
  258. {
  259. struct vm_area_struct *vma;
  260. void __user *buf = kiov->iov_base;
  261. size_t len = kiov->iov_len;
  262. int ret;
  263. int write = gup_flags & FOLL_WRITE;
  264. if (!access_ok(buf, len))
  265. return -EFAULT;
  266. if (mmap_read_lock_killable(mm))
  267. return -EIO;
  268. while (len) {
  269. unsigned long tags, offset;
  270. void *maddr;
  271. struct page *page = NULL;
  272. ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
  273. &vma, NULL);
  274. if (ret <= 0)
  275. break;
  276. /*
  277. * Only copy tags if the page has been mapped as PROT_MTE
  278. * (PG_mte_tagged set). Otherwise the tags are not valid and
  279. * not accessible to user. Moreover, an mprotect(PROT_MTE)
  280. * would cause the existing tags to be cleared if the page
  281. * was never mapped with PROT_MTE.
  282. */
  283. if (!(vma->vm_flags & VM_MTE)) {
  284. ret = -EOPNOTSUPP;
  285. put_page(page);
  286. break;
  287. }
  288. WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
  289. /* limit access to the end of the page */
  290. offset = offset_in_page(addr);
  291. tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
  292. maddr = page_address(page);
  293. if (write) {
  294. tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
  295. set_page_dirty_lock(page);
  296. } else {
  297. tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
  298. }
  299. put_page(page);
  300. /* error accessing the tracer's buffer */
  301. if (!tags)
  302. break;
  303. len -= tags;
  304. buf += tags;
  305. addr += tags * MTE_GRANULE_SIZE;
  306. }
  307. mmap_read_unlock(mm);
  308. /* return an error if no tags copied */
  309. kiov->iov_len = buf - kiov->iov_base;
  310. if (!kiov->iov_len) {
  311. /* check for error accessing the tracee's address space */
  312. if (ret <= 0)
  313. return -EIO;
  314. else
  315. return -EFAULT;
  316. }
  317. return 0;
  318. }
  319. /*
  320. * Copy MTE tags in another process' address space at 'addr' to/from tracer's
  321. * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
  322. */
  323. static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
  324. struct iovec *kiov, unsigned int gup_flags)
  325. {
  326. struct mm_struct *mm;
  327. int ret;
  328. mm = get_task_mm(tsk);
  329. if (!mm)
  330. return -EPERM;
  331. if (!tsk->ptrace || (current != tsk->parent) ||
  332. ((get_dumpable(mm) != SUID_DUMP_USER) &&
  333. !ptracer_capable(tsk, mm->user_ns))) {
  334. mmput(mm);
  335. return -EPERM;
  336. }
  337. ret = __access_remote_tags(mm, addr, kiov, gup_flags);
  338. mmput(mm);
  339. return ret;
  340. }
  341. int mte_ptrace_copy_tags(struct task_struct *child, long request,
  342. unsigned long addr, unsigned long data)
  343. {
  344. int ret;
  345. struct iovec kiov;
  346. struct iovec __user *uiov = (void __user *)data;
  347. unsigned int gup_flags = FOLL_FORCE;
  348. if (!system_supports_mte())
  349. return -EIO;
  350. if (get_user(kiov.iov_base, &uiov->iov_base) ||
  351. get_user(kiov.iov_len, &uiov->iov_len))
  352. return -EFAULT;
  353. if (request == PTRACE_POKEMTETAGS)
  354. gup_flags |= FOLL_WRITE;
  355. /* align addr to the MTE tag granule */
  356. addr &= MTE_GRANULE_MASK;
  357. ret = access_remote_tags(child, addr, &kiov, gup_flags);
  358. if (!ret)
  359. ret = put_user(kiov.iov_len, &uiov->iov_len);
  360. return ret;
  361. }
  362. static ssize_t mte_tcf_preferred_show(struct device *dev,
  363. struct device_attribute *attr, char *buf)
  364. {
  365. switch (per_cpu(mte_tcf_preferred, dev->id)) {
  366. case MTE_CTRL_TCF_ASYNC:
  367. return sysfs_emit(buf, "async\n");
  368. case MTE_CTRL_TCF_SYNC:
  369. return sysfs_emit(buf, "sync\n");
  370. default:
  371. return sysfs_emit(buf, "???\n");
  372. }
  373. }
  374. static ssize_t mte_tcf_preferred_store(struct device *dev,
  375. struct device_attribute *attr,
  376. const char *buf, size_t count)
  377. {
  378. u64 tcf;
  379. if (sysfs_streq(buf, "async"))
  380. tcf = MTE_CTRL_TCF_ASYNC;
  381. else if (sysfs_streq(buf, "sync"))
  382. tcf = MTE_CTRL_TCF_SYNC;
  383. else
  384. return -EINVAL;
  385. device_lock(dev);
  386. per_cpu(mte_tcf_preferred, dev->id) = tcf;
  387. device_unlock(dev);
  388. return count;
  389. }
  390. static DEVICE_ATTR_RW(mte_tcf_preferred);
  391. static int register_mte_tcf_preferred_sysctl(void)
  392. {
  393. unsigned int cpu;
  394. if (!system_supports_mte())
  395. return 0;
  396. for_each_possible_cpu(cpu) {
  397. per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC;
  398. device_create_file(get_cpu_device(cpu),
  399. &dev_attr_mte_tcf_preferred);
  400. }
  401. return 0;
  402. }
  403. subsys_initcall(register_mte_tcf_preferred_sysctl);