scs.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Shadow Call Stack support.
  4. *
  5. * Copyright (C) 2019 Google LLC
  6. */
  7. #include <linux/cpuhotplug.h>
  8. #include <linux/kasan.h>
  9. #include <linux/mm.h>
  10. #include <linux/scs.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/vmstat.h>
  13. static void __scs_account(void *s, int account)
  14. {
  15. struct page *scs_page = vmalloc_to_page(s);
  16. mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
  17. account * (SCS_SIZE / SZ_1K));
  18. }
  19. /* Matches NR_CACHED_STACKS for VMAP_STACK */
  20. #define NR_CACHED_SCS 2
  21. static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
  22. static void *__scs_alloc(int node)
  23. {
  24. int i;
  25. void *s;
  26. for (i = 0; i < NR_CACHED_SCS; i++) {
  27. s = this_cpu_xchg(scs_cache[i], NULL);
  28. if (s) {
  29. kasan_unpoison_vmalloc(s, SCS_SIZE);
  30. memset(s, 0, SCS_SIZE);
  31. return s;
  32. }
  33. }
  34. return __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END,
  35. GFP_SCS, PAGE_KERNEL, 0, node,
  36. __builtin_return_address(0));
  37. }
  38. void *scs_alloc(int node)
  39. {
  40. void *s;
  41. s = __scs_alloc(node);
  42. if (!s)
  43. return NULL;
  44. *__scs_magic(s) = SCS_END_MAGIC;
  45. /*
  46. * Poison the allocation to catch unintentional accesses to
  47. * the shadow stack when KASAN is enabled.
  48. */
  49. kasan_poison_vmalloc(s, SCS_SIZE);
  50. __scs_account(s, 1);
  51. return s;
  52. }
  53. void scs_free(void *s)
  54. {
  55. int i;
  56. __scs_account(s, -1);
  57. /*
  58. * We cannot sleep as this can be called in interrupt context,
  59. * so use this_cpu_cmpxchg to update the cache, and vfree_atomic
  60. * to free the stack.
  61. */
  62. for (i = 0; i < NR_CACHED_SCS; i++)
  63. if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL)
  64. return;
  65. kasan_unpoison_vmalloc(s, SCS_SIZE);
  66. vfree_atomic(s);
  67. }
  68. static int scs_cleanup(unsigned int cpu)
  69. {
  70. int i;
  71. void **cache = per_cpu_ptr(scs_cache, cpu);
  72. for (i = 0; i < NR_CACHED_SCS; i++) {
  73. vfree(cache[i]);
  74. cache[i] = NULL;
  75. }
  76. return 0;
  77. }
  78. void __init scs_init(void)
  79. {
  80. cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL,
  81. scs_cleanup);
  82. }
  83. int scs_prepare(struct task_struct *tsk, int node)
  84. {
  85. void *s = scs_alloc(node);
  86. if (!s)
  87. return -ENOMEM;
  88. task_scs(tsk) = task_scs_sp(tsk) = s;
  89. return 0;
  90. }
  91. static void scs_check_usage(struct task_struct *tsk)
  92. {
  93. static unsigned long highest;
  94. unsigned long *p, prev, curr = highest, used = 0;
  95. if (!IS_ENABLED(CONFIG_DEBUG_STACK_USAGE))
  96. return;
  97. for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) {
  98. if (!READ_ONCE_NOCHECK(*p))
  99. break;
  100. used += sizeof(*p);
  101. }
  102. while (used > curr) {
  103. prev = cmpxchg_relaxed(&highest, curr, used);
  104. if (prev == curr) {
  105. pr_info("%s (%d): highest shadow stack usage: %lu bytes\n",
  106. tsk->comm, task_pid_nr(tsk), used);
  107. break;
  108. }
  109. curr = prev;
  110. }
  111. }
  112. void scs_release(struct task_struct *tsk)
  113. {
  114. void *s = task_scs(tsk);
  115. if (!s)
  116. return;
  117. WARN(task_scs_end_corrupted(tsk),
  118. "corrupted shadow stack detected when freeing task\n");
  119. scs_check_usage(tsk);
  120. scs_free(s);
  121. }