entry.S 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Low-level exception handling code
  4. *
  5. * Copyright (C) 2012 ARM Ltd.
  6. * Authors: Catalin Marinas <catalin.marinas@arm.com>
  7. * Will Deacon <will.deacon@arm.com>
  8. */
  9. #include <linux/arm-smccc.h>
  10. #include <linux/init.h>
  11. #include <linux/linkage.h>
  12. #include <asm/alternative.h>
  13. #include <asm/assembler.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/asm_pointer_auth.h>
  16. #include <asm/bug.h>
  17. #include <asm/cpufeature.h>
  18. #include <asm/errno.h>
  19. #include <asm/esr.h>
  20. #include <asm/irq.h>
  21. #include <asm/memory.h>
  22. #include <asm/mmu.h>
  23. #include <asm/processor.h>
  24. #include <asm/ptrace.h>
  25. #include <asm/scs.h>
  26. #include <asm/thread_info.h>
  27. #include <asm/asm-uaccess.h>
  28. #include <asm/unistd.h>
  29. /*
  30. * Context tracking and irqflag tracing need to instrument transitions between
  31. * user and kernel mode.
  32. */
  33. .macro user_exit_irqoff
  34. #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
  35. bl enter_from_user_mode
  36. #endif
  37. .endm
  38. .macro user_enter_irqoff
  39. #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
  40. bl exit_to_user_mode
  41. #endif
  42. .endm
  43. .macro clear_gp_regs
  44. .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
  45. mov x\n, xzr
  46. .endr
  47. .endm
  48. /*
  49. * Bad Abort numbers
  50. *-----------------
  51. */
  52. #define BAD_SYNC 0
  53. #define BAD_IRQ 1
  54. #define BAD_FIQ 2
  55. #define BAD_ERROR 3
  56. .macro kernel_ventry, el, label, regsize = 64
  57. .align 7
  58. .Lventry_start\@:
  59. .if \el == 0
  60. /*
  61. * This must be the first instruction of the EL0 vector entries. It is
  62. * skipped by the trampoline vectors, to trigger the cleanup.
  63. */
  64. b .Lskip_tramp_vectors_cleanup\@
  65. .if \regsize == 64
  66. mrs x30, tpidrro_el0
  67. msr tpidrro_el0, xzr
  68. .else
  69. mov x30, xzr
  70. .endif
  71. .Lskip_tramp_vectors_cleanup\@:
  72. .endif
  73. sub sp, sp, #S_FRAME_SIZE
  74. #ifdef CONFIG_VMAP_STACK
  75. /*
  76. * Test whether the SP has overflowed, without corrupting a GPR.
  77. * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
  78. * should always be zero.
  79. */
  80. add sp, sp, x0 // sp' = sp + x0
  81. sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
  82. tbnz x0, #THREAD_SHIFT, 0f
  83. sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
  84. sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
  85. b el\()\el\()_\label
  86. 0:
  87. /*
  88. * Either we've just detected an overflow, or we've taken an exception
  89. * while on the overflow stack. Either way, we won't return to
  90. * userspace, and can clobber EL0 registers to free up GPRs.
  91. */
  92. /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
  93. msr tpidr_el0, x0
  94. /* Recover the original x0 value and stash it in tpidrro_el0 */
  95. sub x0, sp, x0
  96. msr tpidrro_el0, x0
  97. /* Switch to the overflow stack */
  98. adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
  99. /*
  100. * Check whether we were already on the overflow stack. This may happen
  101. * after panic() re-enables interrupts.
  102. */
  103. mrs x0, tpidr_el0 // sp of interrupted context
  104. sub x0, sp, x0 // delta with top of overflow stack
  105. tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
  106. b.ne __bad_stack // no? -> bad stack pointer
  107. /* We were already on the overflow stack. Restore sp/x0 and carry on. */
  108. sub sp, sp, x0
  109. mrs x0, tpidrro_el0
  110. #endif
  111. b el\()\el\()_\label
  112. .org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
  113. .endm
  114. .macro tramp_alias, dst, sym, tmp
  115. mov_q \dst, TRAMP_VALIAS
  116. adr_l \tmp, \sym
  117. add \dst, \dst, \tmp
  118. adr_l \tmp, .entry.tramp.text
  119. sub \dst, \dst, \tmp
  120. .endm
  121. /*
  122. * This macro corrupts x0-x3. It is the caller's duty to save/restore
  123. * them if required.
  124. */
  125. .macro apply_ssbd, state, tmp1, tmp2
  126. alternative_cb spectre_v4_patch_fw_mitigation_enable
  127. b .L__asm_ssbd_skip\@ // Patched to NOP
  128. alternative_cb_end
  129. ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
  130. cbz \tmp2, .L__asm_ssbd_skip\@
  131. ldr \tmp2, [tsk, #TSK_TI_FLAGS]
  132. tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
  133. mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
  134. mov w1, #\state
  135. alternative_cb smccc_patch_fw_mitigation_conduit
  136. nop // Patched to SMC/HVC #0
  137. alternative_cb_end
  138. .L__asm_ssbd_skip\@:
  139. .endm
  140. /* Check for MTE asynchronous tag check faults */
  141. .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
  142. #ifdef CONFIG_ARM64_MTE
  143. .arch_extension lse
  144. alternative_if_not ARM64_MTE
  145. b 1f
  146. alternative_else_nop_endif
  147. /*
  148. * Asynchronous tag check faults are only possible in ASYNC (2) or
  149. * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
  150. * set, so skip the check if it is unset.
  151. */
  152. tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
  153. mrs_s \tmp, SYS_TFSRE0_EL1
  154. tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
  155. /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
  156. mov \tmp, #_TIF_MTE_ASYNC_FAULT
  157. add \ti_flags, tsk, #TSK_TI_FLAGS
  158. stset \tmp, [\ti_flags]
  159. 1:
  160. #endif
  161. .endm
  162. /* Clear the MTE asynchronous tag check faults */
  163. .macro clear_mte_async_tcf thread_sctlr
  164. #ifdef CONFIG_ARM64_MTE
  165. alternative_if ARM64_MTE
  166. /* See comment in check_mte_async_tcf above. */
  167. tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
  168. dsb ish
  169. msr_s SYS_TFSRE0_EL1, xzr
  170. 1:
  171. alternative_else_nop_endif
  172. #endif
  173. .endm
  174. .macro mte_set_gcr, mte_ctrl, tmp
  175. #ifdef CONFIG_ARM64_MTE
  176. ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
  177. orr \tmp, \tmp, #SYS_GCR_EL1_RRND
  178. msr_s SYS_GCR_EL1, \tmp
  179. #endif
  180. .endm
  181. .macro mte_set_kernel_gcr, tmp, tmp2
  182. #ifdef CONFIG_KASAN_HW_TAGS
  183. alternative_cb kasan_hw_tags_enable
  184. b 1f
  185. alternative_cb_end
  186. mov \tmp, KERNEL_GCR_EL1
  187. msr_s SYS_GCR_EL1, \tmp
  188. 1:
  189. #endif
  190. .endm
  191. .macro mte_set_user_gcr, tsk, tmp, tmp2
  192. #ifdef CONFIG_KASAN_HW_TAGS
  193. alternative_cb kasan_hw_tags_enable
  194. b 1f
  195. alternative_cb_end
  196. ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
  197. mte_set_gcr \tmp, \tmp2
  198. 1:
  199. #endif
  200. .endm
  201. .macro kernel_entry, el, regsize = 64
  202. .if \regsize == 32
  203. mov w0, w0 // zero upper 32 bits of x0
  204. .endif
  205. stp x0, x1, [sp, #16 * 0]
  206. stp x2, x3, [sp, #16 * 1]
  207. stp x4, x5, [sp, #16 * 2]
  208. stp x6, x7, [sp, #16 * 3]
  209. stp x8, x9, [sp, #16 * 4]
  210. stp x10, x11, [sp, #16 * 5]
  211. stp x12, x13, [sp, #16 * 6]
  212. stp x14, x15, [sp, #16 * 7]
  213. stp x16, x17, [sp, #16 * 8]
  214. stp x18, x19, [sp, #16 * 9]
  215. stp x20, x21, [sp, #16 * 10]
  216. stp x22, x23, [sp, #16 * 11]
  217. stp x24, x25, [sp, #16 * 12]
  218. stp x26, x27, [sp, #16 * 13]
  219. stp x28, x29, [sp, #16 * 14]
  220. .if \el == 0
  221. clear_gp_regs
  222. mrs x21, sp_el0
  223. ldr_this_cpu tsk, __entry_task, x20
  224. msr sp_el0, tsk
  225. /*
  226. * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
  227. * when scheduling.
  228. */
  229. ldr x19, [tsk, #TSK_TI_FLAGS]
  230. disable_step_tsk x19, x20
  231. /* Check for asynchronous tag check faults in user space */
  232. ldr x0, [tsk, THREAD_SCTLR_USER]
  233. check_mte_async_tcf x22, x23, x0
  234. #ifdef CONFIG_ARM64_PTR_AUTH
  235. alternative_if ARM64_HAS_ADDRESS_AUTH
  236. /*
  237. * Enable IA for in-kernel PAC if the task had it disabled. Although
  238. * this could be implemented with an unconditional MRS which would avoid
  239. * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
  240. *
  241. * Install the kernel IA key only if IA was enabled in the task. If IA
  242. * was disabled on kernel exit then we would have left the kernel IA
  243. * installed so there is no need to install it again.
  244. */
  245. tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
  246. __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
  247. b 2f
  248. 1:
  249. mrs x0, sctlr_el1
  250. orr x0, x0, SCTLR_ELx_ENIA
  251. msr sctlr_el1, x0
  252. 2:
  253. alternative_else_nop_endif
  254. #endif
  255. apply_ssbd 1, x22, x23
  256. mte_set_kernel_gcr x22, x23
  257. /*
  258. * Any non-self-synchronizing system register updates required for
  259. * kernel entry should be placed before this point.
  260. */
  261. alternative_if ARM64_MTE
  262. isb
  263. b 1f
  264. alternative_else_nop_endif
  265. alternative_if ARM64_HAS_ADDRESS_AUTH
  266. isb
  267. alternative_else_nop_endif
  268. 1:
  269. scs_load tsk, x20
  270. .else
  271. add x21, sp, #S_FRAME_SIZE
  272. get_current_task tsk
  273. /* Save the task's original addr_limit and set USER_DS */
  274. ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
  275. str x20, [sp, #S_ORIG_ADDR_LIMIT]
  276. mov x20, #USER_DS
  277. str x20, [tsk, #TSK_TI_ADDR_LIMIT]
  278. /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
  279. .endif /* \el == 0 */
  280. mrs x22, elr_el1
  281. mrs x23, spsr_el1
  282. stp lr, x21, [sp, #S_LR]
  283. /*
  284. * In order to be able to dump the contents of struct pt_regs at the
  285. * time the exception was taken (in case we attempt to walk the call
  286. * stack later), chain it together with the stack frames.
  287. */
  288. .if \el == 0
  289. stp xzr, xzr, [sp, #S_STACKFRAME]
  290. .else
  291. stp x29, x22, [sp, #S_STACKFRAME]
  292. .endif
  293. add x29, sp, #S_STACKFRAME
  294. #ifdef CONFIG_ARM64_SW_TTBR0_PAN
  295. alternative_if_not ARM64_HAS_PAN
  296. bl __swpan_entry_el\el
  297. alternative_else_nop_endif
  298. #endif
  299. stp x22, x23, [sp, #S_PC]
  300. /* Not in a syscall by default (el0_svc overwrites for real syscall) */
  301. .if \el == 0
  302. mov w21, #NO_SYSCALL
  303. str w21, [sp, #S_SYSCALLNO]
  304. .endif
  305. /* Save pmr */
  306. alternative_if ARM64_HAS_IRQ_PRIO_MASKING
  307. mrs_s x20, SYS_ICC_PMR_EL1
  308. str x20, [sp, #S_PMR_SAVE]
  309. mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
  310. msr_s SYS_ICC_PMR_EL1, x20
  311. alternative_else_nop_endif
  312. /* Re-enable tag checking (TCO set on exception entry) */
  313. #ifdef CONFIG_ARM64_MTE
  314. alternative_if ARM64_MTE
  315. SET_PSTATE_TCO(0)
  316. alternative_else_nop_endif
  317. #endif
  318. /*
  319. * Registers that may be useful after this macro is invoked:
  320. *
  321. * x20 - ICC_PMR_EL1
  322. * x21 - aborted SP
  323. * x22 - aborted PC
  324. * x23 - aborted PSTATE
  325. */
  326. .endm
  327. .macro kernel_exit, el
  328. .if \el != 0
  329. disable_daif
  330. /* Restore the task's original addr_limit. */
  331. ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
  332. str x20, [tsk, #TSK_TI_ADDR_LIMIT]
  333. /* No need to restore UAO, it will be restored from SPSR_EL1 */
  334. .endif
  335. /* Restore pmr */
  336. alternative_if ARM64_HAS_IRQ_PRIO_MASKING
  337. ldr x20, [sp, #S_PMR_SAVE]
  338. msr_s SYS_ICC_PMR_EL1, x20
  339. mrs_s x21, SYS_ICC_CTLR_EL1
  340. tbz x21, #6, .L__skip_pmr_sync\@ // Check for ICC_CTLR_EL1.PMHE
  341. dsb sy // Ensure priority change is seen by redistributor
  342. .L__skip_pmr_sync\@:
  343. alternative_else_nop_endif
  344. ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
  345. #ifdef CONFIG_ARM64_SW_TTBR0_PAN
  346. alternative_if_not ARM64_HAS_PAN
  347. bl __swpan_exit_el\el
  348. alternative_else_nop_endif
  349. #endif
  350. .if \el == 0
  351. ldr x23, [sp, #S_SP] // load return stack pointer
  352. msr sp_el0, x23
  353. tst x22, #PSR_MODE32_BIT // native task?
  354. b.eq 3f
  355. #ifdef CONFIG_ARM64_ERRATUM_845719
  356. alternative_if ARM64_WORKAROUND_845719
  357. #ifdef CONFIG_PID_IN_CONTEXTIDR
  358. mrs x29, contextidr_el1
  359. msr contextidr_el1, x29
  360. #else
  361. msr contextidr_el1, xzr
  362. #endif
  363. alternative_else_nop_endif
  364. #endif
  365. 3:
  366. scs_save tsk, x0
  367. /* Ignore asynchronous tag check faults in the uaccess routines */
  368. ldr x0, [tsk, THREAD_SCTLR_USER]
  369. clear_mte_async_tcf x0
  370. #ifdef CONFIG_ARM64_PTR_AUTH
  371. alternative_if ARM64_HAS_ADDRESS_AUTH
  372. /*
  373. * IA was enabled for in-kernel PAC. Disable it now if needed, or
  374. * alternatively install the user's IA. All other per-task keys and
  375. * SCTLR bits were updated on task switch.
  376. *
  377. * No kernel C function calls after this.
  378. */
  379. tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
  380. __ptrauth_keys_install_user tsk, x0, x1, x2
  381. b 2f
  382. 1:
  383. mrs x0, sctlr_el1
  384. bic x0, x0, SCTLR_ELx_ENIA
  385. msr sctlr_el1, x0
  386. 2:
  387. alternative_else_nop_endif
  388. #endif
  389. mte_set_user_gcr tsk, x0, x1
  390. apply_ssbd 0, x0, x1
  391. .endif
  392. msr elr_el1, x21 // set up the return data
  393. msr spsr_el1, x22
  394. ldp x0, x1, [sp, #16 * 0]
  395. ldp x2, x3, [sp, #16 * 1]
  396. ldp x4, x5, [sp, #16 * 2]
  397. ldp x6, x7, [sp, #16 * 3]
  398. ldp x8, x9, [sp, #16 * 4]
  399. ldp x10, x11, [sp, #16 * 5]
  400. ldp x12, x13, [sp, #16 * 6]
  401. ldp x14, x15, [sp, #16 * 7]
  402. ldp x16, x17, [sp, #16 * 8]
  403. ldp x18, x19, [sp, #16 * 9]
  404. ldp x20, x21, [sp, #16 * 10]
  405. ldp x22, x23, [sp, #16 * 11]
  406. ldp x24, x25, [sp, #16 * 12]
  407. ldp x26, x27, [sp, #16 * 13]
  408. ldp x28, x29, [sp, #16 * 14]
  409. .if \el == 0
  410. alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
  411. ldr lr, [sp, #S_LR]
  412. add sp, sp, #S_FRAME_SIZE // restore sp
  413. eret
  414. alternative_else_nop_endif
  415. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  416. bne 4f
  417. msr far_el1, x29
  418. tramp_alias x30, tramp_exit_native, x29
  419. br x30
  420. 4:
  421. tramp_alias x30, tramp_exit_compat, x29
  422. br x30
  423. #endif
  424. .else
  425. ldr lr, [sp, #S_LR]
  426. add sp, sp, #S_FRAME_SIZE // restore sp
  427. /* Ensure any device/NC reads complete */
  428. alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
  429. eret
  430. .endif
  431. sb
  432. .endm
  433. #ifdef CONFIG_ARM64_SW_TTBR0_PAN
  434. /*
  435. * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
  436. * EL0, there is no need to check the state of TTBR0_EL1 since
  437. * accesses are always enabled.
  438. * Note that the meaning of this bit differs from the ARMv8.1 PAN
  439. * feature as all TTBR0_EL1 accesses are disabled, not just those to
  440. * user mappings.
  441. */
  442. SYM_CODE_START_LOCAL(__swpan_entry_el1)
  443. mrs x21, ttbr0_el1
  444. tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
  445. orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
  446. b.eq 1f // TTBR0 access already disabled
  447. and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
  448. SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
  449. __uaccess_ttbr0_disable x21
  450. 1: ret
  451. SYM_CODE_END(__swpan_entry_el1)
  452. /*
  453. * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
  454. * PAN bit checking.
  455. */
  456. SYM_CODE_START_LOCAL(__swpan_exit_el1)
  457. tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
  458. __uaccess_ttbr0_enable x0, x1
  459. 1: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit
  460. ret
  461. SYM_CODE_END(__swpan_exit_el1)
  462. SYM_CODE_START_LOCAL(__swpan_exit_el0)
  463. __uaccess_ttbr0_enable x0, x1
  464. /*
  465. * Enable errata workarounds only if returning to user. The only
  466. * workaround currently required for TTBR0_EL1 changes are for the
  467. * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
  468. * corruption).
  469. */
  470. b post_ttbr_update_workaround
  471. SYM_CODE_END(__swpan_exit_el0)
  472. #endif
  473. .macro irq_stack_entry
  474. mov x19, sp // preserve the original sp
  475. #ifdef CONFIG_SHADOW_CALL_STACK
  476. mov x24, scs_sp // preserve the original shadow stack
  477. #endif
  478. /*
  479. * Compare sp with the base of the task stack.
  480. * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
  481. * and should switch to the irq stack.
  482. */
  483. ldr x25, [tsk, TSK_STACK]
  484. eor x25, x25, x19
  485. and x25, x25, #~(THREAD_SIZE - 1)
  486. cbnz x25, 9998f
  487. ldr_this_cpu x25, irq_stack_ptr, x26
  488. mov x26, #IRQ_STACK_SIZE
  489. add x26, x25, x26
  490. /* switch to the irq stack */
  491. mov sp, x26
  492. #ifdef CONFIG_SHADOW_CALL_STACK
  493. /* also switch to the irq shadow stack */
  494. ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
  495. #endif
  496. 9998:
  497. .endm
  498. /*
  499. * The callee-saved regs (x19-x29) should be preserved between
  500. * irq_stack_entry and irq_stack_exit, but note that kernel_entry
  501. * uses x20-x23 to store data for later use.
  502. */
  503. .macro irq_stack_exit
  504. mov sp, x19
  505. #ifdef CONFIG_SHADOW_CALL_STACK
  506. mov scs_sp, x24
  507. #endif
  508. .endm
  509. /* GPRs used by entry code */
  510. tsk .req x28 // current thread_info
  511. /*
  512. * Interrupt handling.
  513. */
  514. .macro irq_handler, handler:req
  515. ldr_l x1, \handler
  516. mov x0, sp
  517. irq_stack_entry
  518. blr x1
  519. irq_stack_exit
  520. .endm
  521. #ifdef CONFIG_ARM64_PSEUDO_NMI
  522. /*
  523. * Set res to 0 if irqs were unmasked in interrupted context.
  524. * Otherwise set res to non-0 value.
  525. */
  526. .macro test_irqs_unmasked res:req, pmr:req
  527. alternative_if ARM64_HAS_IRQ_PRIO_MASKING
  528. sub \res, \pmr, #GIC_PRIO_IRQON
  529. alternative_else
  530. mov \res, xzr
  531. alternative_endif
  532. .endm
  533. #endif
  534. .macro gic_prio_kentry_setup, tmp:req
  535. #ifdef CONFIG_ARM64_PSEUDO_NMI
  536. alternative_if ARM64_HAS_IRQ_PRIO_MASKING
  537. mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
  538. msr_s SYS_ICC_PMR_EL1, \tmp
  539. alternative_else_nop_endif
  540. #endif
  541. .endm
  542. .macro el1_interrupt_handler, handler:req
  543. enable_da_f
  544. mov x0, sp
  545. bl enter_el1_irq_or_nmi
  546. irq_handler \handler
  547. #ifdef CONFIG_PREEMPTION
  548. ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
  549. alternative_if ARM64_HAS_IRQ_PRIO_MASKING
  550. /*
  551. * DA_F were cleared at start of handling. If anything is set in DAIF,
  552. * we come back from an NMI, so skip preemption
  553. */
  554. mrs x0, daif
  555. orr x24, x24, x0
  556. alternative_else_nop_endif
  557. cbnz x24, 1f // preempt count != 0 || NMI return path
  558. bl arm64_preempt_schedule_irq // irq en/disable is done inside
  559. 1:
  560. #endif
  561. mov x0, sp
  562. bl exit_el1_irq_or_nmi
  563. .endm
  564. .macro el0_interrupt_handler, handler:req
  565. user_exit_irqoff
  566. enable_da_f
  567. tbz x22, #55, 1f
  568. bl do_el0_irq_bp_hardening
  569. 1:
  570. irq_handler \handler
  571. .endm
  572. .text
  573. /*
  574. * Exception vectors.
  575. */
  576. .pushsection ".entry.text", "ax"
  577. .align 11
  578. SYM_CODE_START(vectors)
  579. kernel_ventry 1, sync_invalid // Synchronous EL1t
  580. kernel_ventry 1, irq_invalid // IRQ EL1t
  581. kernel_ventry 1, fiq_invalid // FIQ EL1t
  582. kernel_ventry 1, error_invalid // Error EL1t
  583. kernel_ventry 1, sync // Synchronous EL1h
  584. kernel_ventry 1, irq // IRQ EL1h
  585. kernel_ventry 1, fiq_invalid // FIQ EL1h
  586. kernel_ventry 1, error // Error EL1h
  587. kernel_ventry 0, sync // Synchronous 64-bit EL0
  588. kernel_ventry 0, irq // IRQ 64-bit EL0
  589. kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
  590. kernel_ventry 0, error // Error 64-bit EL0
  591. #ifdef CONFIG_COMPAT
  592. kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
  593. kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
  594. kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
  595. kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
  596. #else
  597. kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
  598. kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
  599. kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
  600. kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
  601. #endif
  602. SYM_CODE_END(vectors)
  603. #ifdef CONFIG_VMAP_STACK
  604. /*
  605. * We detected an overflow in kernel_ventry, which switched to the
  606. * overflow stack. Stash the exception regs, and head to our overflow
  607. * handler.
  608. */
  609. __bad_stack:
  610. /* Restore the original x0 value */
  611. mrs x0, tpidrro_el0
  612. /*
  613. * Store the original GPRs to the new stack. The orginal SP (minus
  614. * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
  615. */
  616. sub sp, sp, #S_FRAME_SIZE
  617. kernel_entry 1
  618. mrs x0, tpidr_el0
  619. add x0, x0, #S_FRAME_SIZE
  620. str x0, [sp, #S_SP]
  621. /* Stash the regs for handle_bad_stack */
  622. mov x0, sp
  623. /* Time to die */
  624. bl handle_bad_stack
  625. ASM_BUG()
  626. #endif /* CONFIG_VMAP_STACK */
  627. /*
  628. * Invalid mode handlers
  629. */
  630. .macro inv_entry, el, reason, regsize = 64
  631. kernel_entry \el, \regsize
  632. mov x0, sp
  633. mov x1, #\reason
  634. mrs x2, esr_el1
  635. bl bad_mode
  636. ASM_BUG()
  637. .endm
  638. SYM_CODE_START_LOCAL(el0_sync_invalid)
  639. inv_entry 0, BAD_SYNC
  640. SYM_CODE_END(el0_sync_invalid)
  641. SYM_CODE_START_LOCAL(el0_irq_invalid)
  642. inv_entry 0, BAD_IRQ
  643. SYM_CODE_END(el0_irq_invalid)
  644. SYM_CODE_START_LOCAL(el0_fiq_invalid)
  645. inv_entry 0, BAD_FIQ
  646. SYM_CODE_END(el0_fiq_invalid)
  647. SYM_CODE_START_LOCAL(el0_error_invalid)
  648. inv_entry 0, BAD_ERROR
  649. SYM_CODE_END(el0_error_invalid)
  650. #ifdef CONFIG_COMPAT
  651. SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
  652. inv_entry 0, BAD_FIQ, 32
  653. SYM_CODE_END(el0_fiq_invalid_compat)
  654. #endif
  655. SYM_CODE_START_LOCAL(el1_sync_invalid)
  656. inv_entry 1, BAD_SYNC
  657. SYM_CODE_END(el1_sync_invalid)
  658. SYM_CODE_START_LOCAL(el1_irq_invalid)
  659. inv_entry 1, BAD_IRQ
  660. SYM_CODE_END(el1_irq_invalid)
  661. SYM_CODE_START_LOCAL(el1_fiq_invalid)
  662. inv_entry 1, BAD_FIQ
  663. SYM_CODE_END(el1_fiq_invalid)
  664. SYM_CODE_START_LOCAL(el1_error_invalid)
  665. inv_entry 1, BAD_ERROR
  666. SYM_CODE_END(el1_error_invalid)
  667. /*
  668. * EL1 mode handlers.
  669. */
  670. .align 6
  671. SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
  672. kernel_entry 1
  673. mov x0, sp
  674. bl el1_sync_handler
  675. kernel_exit 1
  676. SYM_CODE_END(el1_sync)
  677. .align 6
  678. SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
  679. kernel_entry 1
  680. el1_interrupt_handler handle_arch_irq
  681. kernel_exit 1
  682. SYM_CODE_END(el1_irq)
  683. /*
  684. * EL0 mode handlers.
  685. */
  686. .align 6
  687. SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
  688. kernel_entry 0
  689. mov x0, sp
  690. bl el0_sync_handler
  691. b ret_to_user
  692. SYM_CODE_END(el0_sync)
  693. #ifdef CONFIG_COMPAT
  694. .align 6
  695. SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
  696. kernel_entry 0, 32
  697. mov x0, sp
  698. bl el0_sync_compat_handler
  699. b ret_to_user
  700. SYM_CODE_END(el0_sync_compat)
  701. .align 6
  702. SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
  703. kernel_entry 0, 32
  704. b el0_irq_naked
  705. SYM_CODE_END(el0_irq_compat)
  706. SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
  707. kernel_entry 0, 32
  708. b el0_error_naked
  709. SYM_CODE_END(el0_error_compat)
  710. #endif
  711. .align 6
  712. SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
  713. kernel_entry 0
  714. el0_irq_naked:
  715. el0_interrupt_handler handle_arch_irq
  716. b ret_to_user
  717. SYM_CODE_END(el0_irq)
  718. SYM_CODE_START_LOCAL(el1_error)
  719. kernel_entry 1
  720. mrs x1, esr_el1
  721. enable_dbg
  722. mov x0, sp
  723. bl do_serror
  724. kernel_exit 1
  725. SYM_CODE_END(el1_error)
  726. SYM_CODE_START_LOCAL(el0_error)
  727. kernel_entry 0
  728. el0_error_naked:
  729. mrs x25, esr_el1
  730. user_exit_irqoff
  731. enable_dbg
  732. mov x0, sp
  733. mov x1, x25
  734. bl do_serror
  735. enable_da_f
  736. b ret_to_user
  737. SYM_CODE_END(el0_error)
  738. /*
  739. * "slow" syscall return path.
  740. */
  741. SYM_CODE_START_LOCAL(ret_to_user)
  742. disable_daif
  743. gic_prio_kentry_setup tmp=x3
  744. #ifdef CONFIG_TRACE_IRQFLAGS
  745. bl trace_hardirqs_off
  746. #endif
  747. ldr x19, [tsk, #TSK_TI_FLAGS]
  748. and x2, x19, #_TIF_WORK_MASK
  749. cbnz x2, work_pending
  750. finish_ret_to_user:
  751. user_enter_irqoff
  752. enable_step_tsk x19, x2
  753. #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
  754. bl stackleak_erase
  755. #endif
  756. kernel_exit 0
  757. /*
  758. * Ok, we need to do extra processing, enter the slow path.
  759. */
  760. work_pending:
  761. mov x0, sp // 'regs'
  762. mov x1, x19
  763. bl do_notify_resume
  764. ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step
  765. b finish_ret_to_user
  766. SYM_CODE_END(ret_to_user)
  767. .popsection // .entry.text
  768. // Move from tramp_pg_dir to swapper_pg_dir
  769. .macro tramp_map_kernel, tmp
  770. mrs \tmp, ttbr1_el1
  771. add \tmp, \tmp, #(2 * PAGE_SIZE)
  772. bic \tmp, \tmp, #USER_ASID_FLAG
  773. msr ttbr1_el1, \tmp
  774. #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
  775. alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
  776. /* ASID already in \tmp[63:48] */
  777. movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
  778. movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
  779. /* 2MB boundary containing the vectors, so we nobble the walk cache */
  780. movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
  781. isb
  782. tlbi vae1, \tmp
  783. dsb nsh
  784. alternative_else_nop_endif
  785. #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
  786. .endm
  787. // Move from swapper_pg_dir to tramp_pg_dir
  788. .macro tramp_unmap_kernel, tmp
  789. mrs \tmp, ttbr1_el1
  790. sub \tmp, \tmp, #(2 * PAGE_SIZE)
  791. orr \tmp, \tmp, #USER_ASID_FLAG
  792. msr ttbr1_el1, \tmp
  793. /*
  794. * We avoid running the post_ttbr_update_workaround here because
  795. * it's only needed by Cavium ThunderX, which requires KPTI to be
  796. * disabled.
  797. */
  798. .endm
  799. .macro tramp_data_page dst
  800. adr_l \dst, .entry.tramp.text
  801. sub \dst, \dst, PAGE_SIZE
  802. .endm
  803. .macro tramp_data_read_var dst, var
  804. #ifdef CONFIG_RANDOMIZE_BASE
  805. tramp_data_page \dst
  806. add \dst, \dst, #:lo12:__entry_tramp_data_\var
  807. ldr \dst, [\dst]
  808. #else
  809. ldr \dst, =\var
  810. #endif
  811. .endm
  812. #define BHB_MITIGATION_NONE 0
  813. #define BHB_MITIGATION_LOOP 1
  814. #define BHB_MITIGATION_FW 2
  815. #define BHB_MITIGATION_INSN 3
  816. .macro tramp_ventry, vector_start, regsize, kpti, bhb
  817. .align 7
  818. 1:
  819. .if \regsize == 64
  820. msr tpidrro_el0, x30 // Restored in kernel_ventry
  821. .endif
  822. .if \bhb == BHB_MITIGATION_LOOP
  823. /*
  824. * This sequence must appear before the first indirect branch. i.e. the
  825. * ret out of tramp_ventry. It appears here because x30 is free.
  826. */
  827. __mitigate_spectre_bhb_loop x30
  828. .endif // \bhb == BHB_MITIGATION_LOOP
  829. .if \bhb == BHB_MITIGATION_INSN
  830. clearbhb
  831. isb
  832. .endif // \bhb == BHB_MITIGATION_INSN
  833. .if \kpti == 1
  834. /*
  835. * Defend against branch aliasing attacks by pushing a dummy
  836. * entry onto the return stack and using a RET instruction to
  837. * enter the full-fat kernel vectors.
  838. */
  839. bl 2f
  840. b .
  841. 2:
  842. tramp_map_kernel x30
  843. alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
  844. tramp_data_read_var x30, vectors
  845. alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
  846. prfm plil1strm, [x30, #(1b - \vector_start)]
  847. alternative_else_nop_endif
  848. msr vbar_el1, x30
  849. isb
  850. .else
  851. ldr x30, =vectors
  852. .endif // \kpti == 1
  853. .if \bhb == BHB_MITIGATION_FW
  854. /*
  855. * The firmware sequence must appear before the first indirect branch.
  856. * i.e. the ret out of tramp_ventry. But it also needs the stack to be
  857. * mapped to save/restore the registers the SMC clobbers.
  858. */
  859. __mitigate_spectre_bhb_fw
  860. .endif // \bhb == BHB_MITIGATION_FW
  861. add x30, x30, #(1b - \vector_start + 4)
  862. ret
  863. .org 1b + 128 // Did we overflow the ventry slot?
  864. .endm
  865. .macro tramp_exit, regsize = 64
  866. tramp_data_read_var x30, this_cpu_vector
  867. this_cpu_offset x29
  868. ldr x30, [x30, x29]
  869. msr vbar_el1, x30
  870. ldr lr, [sp, #S_LR]
  871. tramp_unmap_kernel x29
  872. .if \regsize == 64
  873. mrs x29, far_el1
  874. .endif
  875. add sp, sp, #S_FRAME_SIZE // restore sp
  876. eret
  877. sb
  878. .endm
  879. .macro generate_tramp_vector, kpti, bhb
  880. .Lvector_start\@:
  881. .space 0x400
  882. .rept 4
  883. tramp_ventry .Lvector_start\@, 64, \kpti, \bhb
  884. .endr
  885. .rept 4
  886. tramp_ventry .Lvector_start\@, 32, \kpti, \bhb
  887. .endr
  888. .endm
  889. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  890. /*
  891. * Exception vectors trampoline.
  892. * The order must match __bp_harden_el1_vectors and the
  893. * arm64_bp_harden_el1_vectors enum.
  894. */
  895. .pushsection ".entry.tramp.text", "ax"
  896. .align 11
  897. SYM_CODE_START_NOALIGN(tramp_vectors)
  898. #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
  899. generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
  900. generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
  901. generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN
  902. #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
  903. generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
  904. SYM_CODE_END(tramp_vectors)
  905. SYM_CODE_START(tramp_exit_native)
  906. tramp_exit
  907. SYM_CODE_END(tramp_exit_native)
  908. SYM_CODE_START(tramp_exit_compat)
  909. tramp_exit 32
  910. SYM_CODE_END(tramp_exit_compat)
  911. .ltorg
  912. .popsection // .entry.tramp.text
  913. #ifdef CONFIG_RANDOMIZE_BASE
  914. .pushsection ".rodata", "a"
  915. .align PAGE_SHIFT
  916. SYM_DATA_START(__entry_tramp_data_start)
  917. __entry_tramp_data_vectors:
  918. .quad vectors
  919. #ifdef CONFIG_ARM_SDE_INTERFACE
  920. __entry_tramp_data___sdei_asm_handler:
  921. .quad __sdei_asm_handler
  922. #endif /* CONFIG_ARM_SDE_INTERFACE */
  923. __entry_tramp_data_this_cpu_vector:
  924. .quad this_cpu_vector
  925. SYM_DATA_END(__entry_tramp_data_start)
  926. .popsection // .rodata
  927. #endif /* CONFIG_RANDOMIZE_BASE */
  928. #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
  929. /*
  930. * Exception vectors for spectre mitigations on entry from EL1 when
  931. * kpti is not in use.
  932. */
  933. .macro generate_el1_vector, bhb
  934. .Lvector_start\@:
  935. kernel_ventry 1, sync_invalid // Synchronous EL1t
  936. kernel_ventry 1, irq_invalid // IRQ EL1t
  937. kernel_ventry 1, fiq_invalid // FIQ EL1t
  938. kernel_ventry 1, error_invalid // Error EL1t
  939. kernel_ventry 1, sync // Synchronous EL1h
  940. kernel_ventry 1, irq // IRQ EL1h
  941. kernel_ventry 1, fiq_invalid // FIQ EL1h
  942. kernel_ventry 1, error // Error EL1h
  943. .rept 4
  944. tramp_ventry .Lvector_start\@, 64, 0, \bhb
  945. .endr
  946. .rept 4
  947. tramp_ventry .Lvector_start\@, 32, 0, \bhb
  948. .endr
  949. .endm
  950. /* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
  951. .pushsection ".entry.text", "ax"
  952. .align 11
  953. SYM_CODE_START(__bp_harden_el1_vectors)
  954. #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
  955. generate_el1_vector bhb=BHB_MITIGATION_LOOP
  956. generate_el1_vector bhb=BHB_MITIGATION_FW
  957. generate_el1_vector bhb=BHB_MITIGATION_INSN
  958. #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
  959. SYM_CODE_END(__bp_harden_el1_vectors)
  960. .popsection
  961. /*
  962. * Register switch for AArch64. The callee-saved registers need to be saved
  963. * and restored. On entry:
  964. * x0 = previous task_struct (must be preserved across the switch)
  965. * x1 = next task_struct
  966. * Previous and next are guaranteed not to be the same.
  967. *
  968. */
  969. SYM_FUNC_START(cpu_switch_to)
  970. mov x10, #THREAD_CPU_CONTEXT
  971. add x8, x0, x10
  972. mov x9, sp
  973. stp x19, x20, [x8], #16 // store callee-saved registers
  974. stp x21, x22, [x8], #16
  975. stp x23, x24, [x8], #16
  976. stp x25, x26, [x8], #16
  977. stp x27, x28, [x8], #16
  978. stp x29, x9, [x8], #16
  979. str lr, [x8]
  980. add x8, x1, x10
  981. ldp x19, x20, [x8], #16 // restore callee-saved registers
  982. ldp x21, x22, [x8], #16
  983. ldp x23, x24, [x8], #16
  984. ldp x25, x26, [x8], #16
  985. ldp x27, x28, [x8], #16
  986. ldp x29, x9, [x8], #16
  987. ldr lr, [x8]
  988. mov sp, x9
  989. msr sp_el0, x1
  990. ptrauth_keys_install_kernel x1, x8, x9, x10
  991. scs_save x0, x8
  992. scs_load x1, x8
  993. ret
  994. SYM_FUNC_END(cpu_switch_to)
  995. NOKPROBE(cpu_switch_to)
  996. /*
  997. * This is how we return from a fork.
  998. */
  999. SYM_CODE_START(ret_from_fork)
  1000. bl schedule_tail
  1001. cbz x19, 1f // not a kernel thread
  1002. mov x0, x20
  1003. blr x19
  1004. 1: get_current_task tsk
  1005. b ret_to_user
  1006. SYM_CODE_END(ret_from_fork)
  1007. NOKPROBE(ret_from_fork)
  1008. #ifdef CONFIG_ARM_SDE_INTERFACE
  1009. #include <asm/sdei.h>
  1010. #include <uapi/linux/arm_sdei.h>
  1011. .macro sdei_handler_exit exit_mode
  1012. /* On success, this call never returns... */
  1013. cmp \exit_mode, #SDEI_EXIT_SMC
  1014. b.ne 99f
  1015. smc #0
  1016. b .
  1017. 99: hvc #0
  1018. b .
  1019. .endm
  1020. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  1021. /*
  1022. * The regular SDEI entry point may have been unmapped along with the rest of
  1023. * the kernel. This trampoline restores the kernel mapping to make the x1 memory
  1024. * argument accessible.
  1025. *
  1026. * This clobbers x4, __sdei_handler() will restore this from firmware's
  1027. * copy.
  1028. */
  1029. .ltorg
  1030. .pushsection ".entry.tramp.text", "ax"
  1031. SYM_CODE_START(__sdei_asm_entry_trampoline)
  1032. mrs x4, ttbr1_el1
  1033. tbz x4, #USER_ASID_BIT, 1f
  1034. tramp_map_kernel tmp=x4
  1035. isb
  1036. mov x4, xzr
  1037. /*
  1038. * Use reg->interrupted_regs.addr_limit to remember whether to unmap
  1039. * the kernel on exit.
  1040. */
  1041. 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
  1042. tramp_data_read_var x4, __sdei_asm_handler
  1043. br x4
  1044. SYM_CODE_END(__sdei_asm_entry_trampoline)
  1045. NOKPROBE(__sdei_asm_entry_trampoline)
  1046. /*
  1047. * Make the exit call and restore the original ttbr1_el1
  1048. *
  1049. * x0 & x1: setup for the exit API call
  1050. * x2: exit_mode
  1051. * x4: struct sdei_registered_event argument from registration time.
  1052. */
  1053. SYM_CODE_START(__sdei_asm_exit_trampoline)
  1054. ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
  1055. cbnz x4, 1f
  1056. tramp_unmap_kernel tmp=x4
  1057. 1: sdei_handler_exit exit_mode=x2
  1058. SYM_CODE_END(__sdei_asm_exit_trampoline)
  1059. NOKPROBE(__sdei_asm_exit_trampoline)
  1060. .ltorg
  1061. .popsection // .entry.tramp.text
  1062. #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
  1063. /*
  1064. * Software Delegated Exception entry point.
  1065. *
  1066. * x0: Event number
  1067. * x1: struct sdei_registered_event argument from registration time.
  1068. * x2: interrupted PC
  1069. * x3: interrupted PSTATE
  1070. * x4: maybe clobbered by the trampoline
  1071. *
  1072. * Firmware has preserved x0->x17 for us, we must save/restore the rest to
  1073. * follow SMC-CC. We save (or retrieve) all the registers as the handler may
  1074. * want them.
  1075. */
  1076. SYM_CODE_START(__sdei_asm_handler)
  1077. stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
  1078. stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
  1079. stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
  1080. stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
  1081. stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
  1082. stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
  1083. stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
  1084. stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
  1085. stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
  1086. stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
  1087. stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
  1088. stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
  1089. stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
  1090. stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
  1091. mov x4, sp
  1092. stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
  1093. mov x19, x1
  1094. #if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
  1095. ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
  1096. #endif
  1097. #ifdef CONFIG_VMAP_STACK
  1098. /*
  1099. * entry.S may have been using sp as a scratch register, find whether
  1100. * this is a normal or critical event and switch to the appropriate
  1101. * stack for this CPU.
  1102. */
  1103. cbnz w4, 1f
  1104. ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
  1105. b 2f
  1106. 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
  1107. 2: mov x6, #SDEI_STACK_SIZE
  1108. add x5, x5, x6
  1109. mov sp, x5
  1110. #endif
  1111. #ifdef CONFIG_SHADOW_CALL_STACK
  1112. /* Use a separate shadow call stack for normal and critical events */
  1113. cbnz w4, 3f
  1114. ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
  1115. b 4f
  1116. 3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
  1117. 4:
  1118. #endif
  1119. /*
  1120. * We may have interrupted userspace, or a guest, or exit-from or
  1121. * return-to either of these. We can't trust sp_el0, restore it.
  1122. */
  1123. mrs x28, sp_el0
  1124. ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
  1125. msr sp_el0, x0
  1126. /* If we interrupted the kernel point to the previous stack/frame. */
  1127. and x0, x3, #0xc
  1128. mrs x1, CurrentEL
  1129. cmp x0, x1
  1130. csel x29, x29, xzr, eq // fp, or zero
  1131. csel x4, x2, xzr, eq // elr, or zero
  1132. stp x29, x4, [sp, #-16]!
  1133. mov x29, sp
  1134. add x0, x19, #SDEI_EVENT_INTREGS
  1135. mov x1, x19
  1136. bl __sdei_handler
  1137. msr sp_el0, x28
  1138. /* restore regs >x17 that we clobbered */
  1139. mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
  1140. ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
  1141. ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
  1142. ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
  1143. mov sp, x1
  1144. mov x1, x0 // address to complete_and_resume
  1145. /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
  1146. cmp x0, #1
  1147. mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
  1148. mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
  1149. csel x0, x2, x3, ls
  1150. ldr_l x2, sdei_exit_mode
  1151. alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
  1152. sdei_handler_exit exit_mode=x2
  1153. alternative_else_nop_endif
  1154. #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  1155. tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
  1156. br x5
  1157. #endif
  1158. SYM_CODE_END(__sdei_asm_handler)
  1159. NOKPROBE(__sdei_asm_handler)
  1160. #endif /* CONFIG_ARM_SDE_INTERFACE */