cfi.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Clang Control Flow Integrity (CFI) error and slowpath handling.
  4. *
  5. * Copyright (C) 2019 Google LLC
  6. */
  7. #include <linux/hardirq.h>
  8. #include <linux/kallsyms.h>
  9. #include <linux/module.h>
  10. #include <linux/mutex.h>
  11. #include <linux/printk.h>
  12. #include <linux/ratelimit.h>
  13. #include <linux/rcupdate.h>
  14. #include <linux/vmalloc.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/set_memory.h>
  17. /* Compiler-defined handler names */
  18. #ifdef CONFIG_CFI_PERMISSIVE
  19. #define cfi_failure_handler __ubsan_handle_cfi_check_fail
  20. #define cfi_slowpath_handler __cfi_slowpath_diag
  21. #else /* enforcing */
  22. #define cfi_failure_handler __ubsan_handle_cfi_check_fail_abort
  23. #define cfi_slowpath_handler __cfi_slowpath
  24. #endif /* CONFIG_CFI_PERMISSIVE */
  25. static inline void handle_cfi_failure(void *ptr)
  26. {
  27. if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
  28. WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr);
  29. else
  30. panic("CFI failure (target: %pS)\n", ptr);
  31. }
  32. #ifdef CONFIG_MODULES
  33. #ifdef CONFIG_CFI_CLANG_SHADOW
  34. /*
  35. * Index type. A 16-bit index can address at most (2^16)-2 pages (taking
  36. * into account SHADOW_INVALID), i.e. ~256M with 4k pages.
  37. */
  38. typedef u16 shadow_t;
  39. #define SHADOW_INVALID ((shadow_t)~0UL)
  40. struct cfi_shadow {
  41. /* Page index for the beginning of the shadow */
  42. unsigned long base;
  43. /* An array of __cfi_check locations (as indices to the shadow) */
  44. shadow_t shadow[1];
  45. } __packed;
  46. /*
  47. * The shadow covers ~128M from the beginning of the module region. If
  48. * the region is larger, we fall back to __module_address for the rest.
  49. */
  50. #define __SHADOW_RANGE (_UL(SZ_128M) >> PAGE_SHIFT)
  51. /* The in-memory size of struct cfi_shadow, always at least one page */
  52. #define __SHADOW_PAGES ((__SHADOW_RANGE * sizeof(shadow_t)) >> PAGE_SHIFT)
  53. #define SHADOW_PAGES max(1UL, __SHADOW_PAGES)
  54. #define SHADOW_SIZE (SHADOW_PAGES << PAGE_SHIFT)
  55. /* The actual size of the shadow array, minus metadata */
  56. #define SHADOW_ARR_SIZE (SHADOW_SIZE - offsetof(struct cfi_shadow, shadow))
  57. #define SHADOW_ARR_SLOTS (SHADOW_ARR_SIZE / sizeof(shadow_t))
  58. static DEFINE_MUTEX(shadow_update_lock);
  59. static struct cfi_shadow __rcu *cfi_shadow __read_mostly;
  60. /* Returns the index in the shadow for the given address */
  61. static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr)
  62. {
  63. unsigned long index;
  64. unsigned long page = ptr >> PAGE_SHIFT;
  65. if (unlikely(page < s->base))
  66. return -1; /* Outside of module area */
  67. index = page - s->base;
  68. if (index >= SHADOW_ARR_SLOTS)
  69. return -1; /* Cannot be addressed with shadow */
  70. return (int)index;
  71. }
  72. /* Returns the page address for an index in the shadow */
  73. static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
  74. int index)
  75. {
  76. if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
  77. return 0;
  78. return (s->base + index) << PAGE_SHIFT;
  79. }
  80. /* Returns the __cfi_check function address for the given shadow location */
  81. static inline unsigned long shadow_to_check_fn(const struct cfi_shadow *s,
  82. int index)
  83. {
  84. if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
  85. return 0;
  86. if (unlikely(s->shadow[index] == SHADOW_INVALID))
  87. return 0;
  88. /* __cfi_check is always page aligned */
  89. return (s->base + s->shadow[index]) << PAGE_SHIFT;
  90. }
  91. static void prepare_next_shadow(const struct cfi_shadow __rcu *prev,
  92. struct cfi_shadow *next)
  93. {
  94. int i, index, check;
  95. /* Mark everything invalid */
  96. memset(next->shadow, 0xFF, SHADOW_ARR_SIZE);
  97. if (!prev)
  98. return; /* No previous shadow */
  99. /* If the base address didn't change, an update is not needed */
  100. if (prev->base == next->base) {
  101. memcpy(next->shadow, prev->shadow, SHADOW_ARR_SIZE);
  102. return;
  103. }
  104. /* Convert the previous shadow to the new address range */
  105. for (i = 0; i < SHADOW_ARR_SLOTS; ++i) {
  106. if (prev->shadow[i] == SHADOW_INVALID)
  107. continue;
  108. index = ptr_to_shadow(next, shadow_to_ptr(prev, i));
  109. if (index < 0)
  110. continue;
  111. check = ptr_to_shadow(next,
  112. shadow_to_check_fn(prev, prev->shadow[i]));
  113. if (check < 0)
  114. continue;
  115. next->shadow[index] = (shadow_t)check;
  116. }
  117. }
  118. static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod,
  119. unsigned long min_addr, unsigned long max_addr)
  120. {
  121. int check_index;
  122. unsigned long check = (unsigned long)mod->cfi_check;
  123. unsigned long ptr;
  124. if (unlikely(!PAGE_ALIGNED(check))) {
  125. pr_warn("cfi: not using shadow for module %s\n", mod->name);
  126. return;
  127. }
  128. check_index = ptr_to_shadow(s, check);
  129. if (check_index < 0)
  130. return; /* Module not addressable with shadow */
  131. /* For each page, store the check function index in the shadow */
  132. for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
  133. int index = ptr_to_shadow(s, ptr);
  134. if (index >= 0) {
  135. /* Each page must only contain one module */
  136. WARN_ON_ONCE(s->shadow[index] != SHADOW_INVALID);
  137. s->shadow[index] = (shadow_t)check_index;
  138. }
  139. }
  140. }
  141. static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod,
  142. unsigned long min_addr, unsigned long max_addr)
  143. {
  144. unsigned long ptr;
  145. for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
  146. int index = ptr_to_shadow(s, ptr);
  147. if (index >= 0)
  148. s->shadow[index] = SHADOW_INVALID;
  149. }
  150. }
  151. typedef void (*update_shadow_fn)(struct cfi_shadow *, struct module *,
  152. unsigned long min_addr, unsigned long max_addr);
  153. static void update_shadow(struct module *mod, unsigned long base_addr,
  154. update_shadow_fn fn)
  155. {
  156. struct cfi_shadow *prev;
  157. struct cfi_shadow *next;
  158. unsigned long min_addr, max_addr;
  159. next = (struct cfi_shadow *)vmalloc(SHADOW_SIZE);
  160. WARN_ON(!next);
  161. mutex_lock(&shadow_update_lock);
  162. prev = rcu_dereference_protected(cfi_shadow,
  163. mutex_is_locked(&shadow_update_lock));
  164. if (next) {
  165. next->base = base_addr >> PAGE_SHIFT;
  166. prepare_next_shadow(prev, next);
  167. min_addr = (unsigned long)mod->core_layout.base;
  168. max_addr = min_addr + mod->core_layout.text_size;
  169. fn(next, mod, min_addr & PAGE_MASK, max_addr & PAGE_MASK);
  170. set_memory_ro((unsigned long)next, SHADOW_PAGES);
  171. }
  172. rcu_assign_pointer(cfi_shadow, next);
  173. mutex_unlock(&shadow_update_lock);
  174. synchronize_rcu_expedited();
  175. if (prev) {
  176. set_memory_rw((unsigned long)prev, SHADOW_PAGES);
  177. vfree(prev);
  178. }
  179. }
  180. void cfi_module_add(struct module *mod, unsigned long base_addr)
  181. {
  182. update_shadow(mod, base_addr, add_module_to_shadow);
  183. }
  184. void cfi_module_remove(struct module *mod, unsigned long base_addr)
  185. {
  186. update_shadow(mod, base_addr, remove_module_from_shadow);
  187. }
  188. static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s,
  189. unsigned long ptr)
  190. {
  191. int index;
  192. if (unlikely(!s))
  193. return NULL; /* No shadow available */
  194. index = ptr_to_shadow(s, ptr);
  195. if (index < 0)
  196. return NULL; /* Cannot be addressed with shadow */
  197. return (cfi_check_fn)shadow_to_check_fn(s, index);
  198. }
  199. static inline cfi_check_fn __find_shadow_check_fn(unsigned long ptr)
  200. {
  201. cfi_check_fn fn;
  202. rcu_read_lock_sched_notrace();
  203. fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr);
  204. rcu_read_unlock_sched_notrace();
  205. return fn;
  206. }
  207. #else /* !CONFIG_CFI_CLANG_SHADOW */
  208. static inline cfi_check_fn __find_shadow_check_fn(unsigned long ptr)
  209. {
  210. return NULL;
  211. }
  212. #endif /* CONFIG_CFI_CLANG_SHADOW */
  213. static inline cfi_check_fn __find_module_check_fn(unsigned long ptr)
  214. {
  215. cfi_check_fn fn = NULL;
  216. struct module *mod;
  217. rcu_read_lock_sched_notrace();
  218. mod = __module_address(ptr);
  219. if (mod)
  220. fn = mod->cfi_check;
  221. rcu_read_unlock_sched_notrace();
  222. return fn;
  223. }
  224. static inline cfi_check_fn find_check_fn(unsigned long ptr)
  225. {
  226. bool rcu;
  227. cfi_check_fn fn = NULL;
  228. /*
  229. * Indirect call checks can happen when RCU is not watching. Both
  230. * the shadow and __module_address use RCU, so we need to wake it
  231. * up before proceeding. Use rcu_nmi_enter/exit() as these calls
  232. * can happen anywhere.
  233. */
  234. rcu = rcu_is_watching();
  235. if (!rcu)
  236. rcu_nmi_enter();
  237. if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW)) {
  238. fn = __find_shadow_check_fn(ptr);
  239. if (fn)
  240. goto out;
  241. }
  242. if (is_kernel_text(ptr)) {
  243. fn = __cfi_check;
  244. goto out;
  245. }
  246. fn = __find_module_check_fn(ptr);
  247. out:
  248. if (!rcu)
  249. rcu_nmi_exit();
  250. return fn;
  251. }
  252. void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag)
  253. {
  254. cfi_check_fn fn = find_check_fn((unsigned long)ptr);
  255. if (!IS_ENABLED(CONFIG_CFI_PERMISSIVE))
  256. diag = NULL;
  257. if (likely(fn))
  258. fn(id, ptr, diag);
  259. else /* Don't allow unchecked modules */
  260. handle_cfi_failure(ptr);
  261. }
  262. #else /* !CONFIG_MODULES */
  263. void cfi_slowpath_handler(uint64_t id, void *ptr, void *diag)
  264. {
  265. handle_cfi_failure(ptr); /* No modules */
  266. }
  267. #endif /* CONFIG_MODULES */
  268. EXPORT_SYMBOL(cfi_slowpath_handler);
  269. void cfi_failure_handler(void *data, void *ptr, void *vtable)
  270. {
  271. handle_cfi_failure(ptr);
  272. }
  273. EXPORT_SYMBOL(cfi_failure_handler);
  274. void __cfi_check_fail(void *data, void *ptr)
  275. {
  276. handle_cfi_failure(ptr);
  277. }