sdei.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2017 Arm Ltd.
  3. #define pr_fmt(fmt) "sdei: " fmt
  4. #include <linux/arm-smccc.h>
  5. #include <linux/arm_sdei.h>
  6. #include <linux/hardirq.h>
  7. #include <linux/irqflags.h>
  8. #include <linux/sched/task_stack.h>
  9. #include <linux/scs.h>
  10. #include <linux/uaccess.h>
  11. #include <asm/alternative.h>
  12. #include <asm/exception.h>
  13. #include <asm/kprobes.h>
  14. #include <asm/mmu.h>
  15. #include <asm/ptrace.h>
  16. #include <asm/sections.h>
  17. #include <asm/stacktrace.h>
  18. #include <asm/sysreg.h>
  19. #include <asm/vmap_stack.h>
  20. unsigned long sdei_exit_mode;
  21. /*
  22. * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
  23. * register, meaning SDEI has to switch to its own stack. We need two stacks as
  24. * a critical event may interrupt a normal event that has just taken a
  25. * synchronous exception, and is using sp as scratch register. For a critical
  26. * event interrupting a normal event, we can't reliably tell if we were on the
  27. * sdei stack.
  28. * For now, we allocate stacks when the driver is probed.
  29. */
  30. DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
  31. DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
  32. #ifdef CONFIG_VMAP_STACK
  33. DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
  34. DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
  35. #endif
  36. DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
  37. DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
  38. #ifdef CONFIG_SHADOW_CALL_STACK
  39. DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
  40. DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
  41. #endif
  42. static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
  43. {
  44. unsigned long *p;
  45. p = per_cpu(*ptr, cpu);
  46. if (p) {
  47. per_cpu(*ptr, cpu) = NULL;
  48. vfree(p);
  49. }
  50. }
  51. static void free_sdei_stacks(void)
  52. {
  53. int cpu;
  54. if (!IS_ENABLED(CONFIG_VMAP_STACK))
  55. return;
  56. for_each_possible_cpu(cpu) {
  57. _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
  58. _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
  59. }
  60. }
  61. static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
  62. {
  63. unsigned long *p;
  64. p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
  65. if (!p)
  66. return -ENOMEM;
  67. per_cpu(*ptr, cpu) = p;
  68. return 0;
  69. }
  70. static int init_sdei_stacks(void)
  71. {
  72. int cpu;
  73. int err = 0;
  74. if (!IS_ENABLED(CONFIG_VMAP_STACK))
  75. return 0;
  76. for_each_possible_cpu(cpu) {
  77. err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
  78. if (err)
  79. break;
  80. err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
  81. if (err)
  82. break;
  83. }
  84. if (err)
  85. free_sdei_stacks();
  86. return err;
  87. }
  88. static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu)
  89. {
  90. void *s;
  91. s = per_cpu(*ptr, cpu);
  92. if (s) {
  93. per_cpu(*ptr, cpu) = NULL;
  94. scs_free(s);
  95. }
  96. }
  97. static void free_sdei_scs(void)
  98. {
  99. int cpu;
  100. for_each_possible_cpu(cpu) {
  101. _free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
  102. _free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
  103. }
  104. }
  105. static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu)
  106. {
  107. void *s;
  108. s = scs_alloc(cpu_to_node(cpu));
  109. if (!s)
  110. return -ENOMEM;
  111. per_cpu(*ptr, cpu) = s;
  112. return 0;
  113. }
  114. static int init_sdei_scs(void)
  115. {
  116. int cpu;
  117. int err = 0;
  118. if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
  119. return 0;
  120. for_each_possible_cpu(cpu) {
  121. err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
  122. if (err)
  123. break;
  124. err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
  125. if (err)
  126. break;
  127. }
  128. if (err)
  129. free_sdei_scs();
  130. return err;
  131. }
  132. static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
  133. {
  134. unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
  135. unsigned long high = low + SDEI_STACK_SIZE;
  136. return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
  137. }
  138. static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
  139. {
  140. unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
  141. unsigned long high = low + SDEI_STACK_SIZE;
  142. return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
  143. }
  144. bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
  145. {
  146. if (!IS_ENABLED(CONFIG_VMAP_STACK))
  147. return false;
  148. if (on_sdei_critical_stack(sp, info))
  149. return true;
  150. if (on_sdei_normal_stack(sp, info))
  151. return true;
  152. return false;
  153. }
  154. unsigned long sdei_arch_get_entry_point(int conduit)
  155. {
  156. /*
  157. * SDEI works between adjacent exception levels. If we booted at EL1 we
  158. * assume a hypervisor is marshalling events. If we booted at EL2 and
  159. * dropped to EL1 because we don't support VHE, then we can't support
  160. * SDEI.
  161. */
  162. if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
  163. pr_err("Not supported on this hardware/boot configuration\n");
  164. goto out_err;
  165. }
  166. if (init_sdei_stacks())
  167. goto out_err;
  168. if (init_sdei_scs())
  169. goto out_err_free_stacks;
  170. sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
  171. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  172. if (arm64_kernel_unmapped_at_el0()) {
  173. unsigned long offset;
  174. offset = (unsigned long)__sdei_asm_entry_trampoline -
  175. (unsigned long)__entry_tramp_text_start;
  176. return TRAMP_VALIAS + offset;
  177. } else
  178. #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
  179. return (unsigned long)__sdei_asm_handler;
  180. out_err_free_stacks:
  181. free_sdei_stacks();
  182. out_err:
  183. return 0;
  184. }
  185. /*
  186. * __sdei_handler() returns one of:
  187. * SDEI_EV_HANDLED - success, return to the interrupted context.
  188. * SDEI_EV_FAILED - failure, return this error code to firmare.
  189. * virtual-address - success, return to this address.
  190. */
  191. static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
  192. struct sdei_registered_event *arg)
  193. {
  194. u32 mode;
  195. int i, err = 0;
  196. int clobbered_registers = 4;
  197. u64 elr = read_sysreg(elr_el1);
  198. u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
  199. unsigned long vbar = read_sysreg(vbar_el1);
  200. if (arm64_kernel_unmapped_at_el0())
  201. clobbered_registers++;
  202. /* Retrieve the missing registers values */
  203. for (i = 0; i < clobbered_registers; i++) {
  204. /* from within the handler, this call always succeeds */
  205. sdei_api_event_context(i, &regs->regs[i]);
  206. }
  207. /*
  208. * We didn't take an exception to get here, set PAN. UAO will be cleared
  209. * by sdei_event_handler()s force_uaccess_begin() call.
  210. */
  211. __uaccess_enable_hw_pan();
  212. err = sdei_event_handler(regs, arg);
  213. if (err)
  214. return SDEI_EV_FAILED;
  215. if (elr != read_sysreg(elr_el1)) {
  216. /*
  217. * We took a synchronous exception from the SDEI handler.
  218. * This could deadlock, and if you interrupt KVM it will
  219. * hyp-panic instead.
  220. */
  221. pr_warn("unsafe: exception during handler\n");
  222. }
  223. mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
  224. /*
  225. * If we interrupted the kernel with interrupts masked, we always go
  226. * back to wherever we came from.
  227. */
  228. if (mode == kernel_mode && !interrupts_enabled(regs))
  229. return SDEI_EV_HANDLED;
  230. /*
  231. * Otherwise, we pretend this was an IRQ. This lets user space tasks
  232. * receive signals before we return to them, and KVM to invoke it's
  233. * world switch to do the same.
  234. *
  235. * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
  236. * address'.
  237. */
  238. if (mode == kernel_mode)
  239. return vbar + 0x280;
  240. else if (mode & PSR_MODE32_BIT)
  241. return vbar + 0x680;
  242. return vbar + 0x480;
  243. }
  244. asmlinkage noinstr unsigned long
  245. __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
  246. {
  247. unsigned long ret;
  248. arm64_enter_nmi(regs);
  249. ret = _sdei_handler(regs, arg);
  250. arm64_exit_nmi(regs);
  251. return ret;
  252. }