entry.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. */
  6. #include <linux/init.h>
  7. #include <linux/linkage.h>
  8. #include <asm/asm.h>
  9. #include <asm/csr.h>
  10. #include <asm/unistd.h>
  11. #include <asm/thread_info.h>
  12. #include <asm/asm-offsets.h>
  13. #if !IS_ENABLED(CONFIG_PREEMPTION)
  14. .set resume_kernel, restore_all
  15. #endif
  16. ENTRY(handle_exception)
  17. /*
  18. * If coming from userspace, preserve the user thread pointer and load
  19. * the kernel thread pointer. If we came from the kernel, the scratch
  20. * register will contain 0, and we should continue on the current TP.
  21. */
  22. csrrw tp, CSR_SCRATCH, tp
  23. bnez tp, _save_context
  24. _restore_kernel_tpsp:
  25. csrr tp, CSR_SCRATCH
  26. REG_S sp, TASK_TI_KERNEL_SP(tp)
  27. _save_context:
  28. REG_S sp, TASK_TI_USER_SP(tp)
  29. REG_L sp, TASK_TI_KERNEL_SP(tp)
  30. addi sp, sp, -(PT_SIZE_ON_STACK)
  31. REG_S x1, PT_RA(sp)
  32. REG_S x3, PT_GP(sp)
  33. REG_S x5, PT_T0(sp)
  34. REG_S x6, PT_T1(sp)
  35. REG_S x7, PT_T2(sp)
  36. REG_S x8, PT_S0(sp)
  37. REG_S x9, PT_S1(sp)
  38. REG_S x10, PT_A0(sp)
  39. REG_S x11, PT_A1(sp)
  40. REG_S x12, PT_A2(sp)
  41. REG_S x13, PT_A3(sp)
  42. REG_S x14, PT_A4(sp)
  43. REG_S x15, PT_A5(sp)
  44. REG_S x16, PT_A6(sp)
  45. REG_S x17, PT_A7(sp)
  46. REG_S x18, PT_S2(sp)
  47. REG_S x19, PT_S3(sp)
  48. REG_S x20, PT_S4(sp)
  49. REG_S x21, PT_S5(sp)
  50. REG_S x22, PT_S6(sp)
  51. REG_S x23, PT_S7(sp)
  52. REG_S x24, PT_S8(sp)
  53. REG_S x25, PT_S9(sp)
  54. REG_S x26, PT_S10(sp)
  55. REG_S x27, PT_S11(sp)
  56. REG_S x28, PT_T3(sp)
  57. REG_S x29, PT_T4(sp)
  58. REG_S x30, PT_T5(sp)
  59. REG_S x31, PT_T6(sp)
  60. /*
  61. * Disable user-mode memory access as it should only be set in the
  62. * actual user copy routines.
  63. *
  64. * Disable the FPU to detect illegal usage of floating point in kernel
  65. * space.
  66. */
  67. li t0, SR_SUM | SR_FS | SR_VS
  68. REG_L s0, TASK_TI_USER_SP(tp)
  69. csrrc s1, CSR_STATUS, t0
  70. csrr s2, CSR_EPC
  71. csrr s3, CSR_TVAL
  72. csrr s4, CSR_CAUSE
  73. csrr s5, CSR_SCRATCH
  74. REG_S s0, PT_SP(sp)
  75. REG_S s1, PT_STATUS(sp)
  76. REG_S s2, PT_EPC(sp)
  77. REG_S s3, PT_BADADDR(sp)
  78. REG_S s4, PT_CAUSE(sp)
  79. REG_S s5, PT_TP(sp)
  80. /*
  81. * Set the scratch register to 0, so that if a recursive exception
  82. * occurs, the exception vector knows it came from the kernel
  83. */
  84. csrw CSR_SCRATCH, x0
  85. /* Load the global pointer */
  86. .option push
  87. .option norelax
  88. la gp, __global_pointer$
  89. .option pop
  90. #ifdef CONFIG_TRACE_IRQFLAGS
  91. call __trace_hardirqs_off
  92. #endif
  93. #ifdef CONFIG_CONTEXT_TRACKING
  94. /* If previous state is in user mode, call context_tracking_user_exit. */
  95. li a0, SR_PP
  96. and a0, s1, a0
  97. bnez a0, skip_context_tracking
  98. call context_tracking_user_exit
  99. skip_context_tracking:
  100. #endif
  101. /*
  102. * MSB of cause differentiates between
  103. * interrupts and exceptions
  104. */
  105. bge s4, zero, 1f
  106. la ra, ret_from_exception
  107. /* Handle interrupts */
  108. move a0, sp /* pt_regs */
  109. la a1, handle_arch_irq
  110. REG_L a1, (a1)
  111. jr a1
  112. 1:
  113. /*
  114. * Exceptions run with interrupts enabled or disabled depending on the
  115. * state of SR_PIE in m/sstatus.
  116. */
  117. andi t0, s1, SR_PIE
  118. beqz t0, 1f
  119. #ifdef CONFIG_TRACE_IRQFLAGS
  120. call __trace_hardirqs_on
  121. REG_L s1, PT_STATUS(sp)
  122. #endif
  123. csrs CSR_STATUS, SR_IE
  124. 1:
  125. #ifdef CONFIG_TRACE_IRQFLAGS
  126. REG_L a0, PT_A0(sp)
  127. REG_L a1, PT_A1(sp)
  128. REG_L a2, PT_A2(sp)
  129. REG_L a3, PT_A3(sp)
  130. REG_L a4, PT_A4(sp)
  131. REG_L a5, PT_A5(sp)
  132. REG_L a6, PT_A6(sp)
  133. REG_L a7, PT_A7(sp)
  134. #endif
  135. la ra, ret_from_exception
  136. /* Handle syscalls */
  137. li t0, EXC_SYSCALL
  138. #ifdef CONFIG_TRACE_IRQFLAGS
  139. REG_L s4, PT_CAUSE(sp)
  140. #endif
  141. beq s4, t0, handle_syscall
  142. /* Handle other exceptions */
  143. slli t0, s4, RISCV_LGPTR
  144. la t1, excp_vect_table
  145. la t2, excp_vect_table_end
  146. move a0, sp /* pt_regs */
  147. add t0, t1, t0
  148. /* Check if exception code lies within bounds */
  149. bgeu t0, t2, 1f
  150. REG_L t0, 0(t0)
  151. jr t0
  152. 1:
  153. tail do_trap_unknown
  154. handle_syscall:
  155. #ifdef CONFIG_RISCV_M_MODE
  156. /*
  157. * When running is M-Mode (no MMU config), MPIE does not get set.
  158. * As a result, we need to force enable interrupts here because
  159. * handle_exception did not do set SR_IE as it always sees SR_PIE
  160. * being cleared.
  161. */
  162. csrs CSR_STATUS, SR_IE
  163. #endif
  164. #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
  165. /* Recover a0 - a7 for system calls */
  166. REG_L a0, PT_A0(sp)
  167. REG_L a1, PT_A1(sp)
  168. REG_L a2, PT_A2(sp)
  169. REG_L a3, PT_A3(sp)
  170. REG_L a4, PT_A4(sp)
  171. REG_L a5, PT_A5(sp)
  172. REG_L a6, PT_A6(sp)
  173. REG_L a7, PT_A7(sp)
  174. #endif
  175. /* save the initial A0 value (needed in signal handlers) */
  176. REG_S a0, PT_ORIG_A0(sp)
  177. /*
  178. * Advance SEPC to avoid executing the original
  179. * scall instruction on sret
  180. */
  181. #ifdef CONFIG_TRACE_IRQFLAGS
  182. REG_L s2, PT_EPC(sp)
  183. #endif
  184. addi s2, s2, 0x4
  185. REG_S s2, PT_EPC(sp)
  186. /* Trace syscalls, but only if requested by the user. */
  187. REG_L t0, TASK_TI_FLAGS(tp)
  188. andi t0, t0, _TIF_SYSCALL_WORK
  189. bnez t0, handle_syscall_trace_enter
  190. check_syscall_nr:
  191. /* Check to make sure we don't jump to a bogus syscall number. */
  192. li t0, __NR_syscalls
  193. la s0, sys_ni_syscall
  194. /*
  195. * Syscall number held in a7.
  196. * If syscall number is above allowed value, redirect to ni_syscall.
  197. */
  198. bgeu a7, t0, 3f
  199. #ifdef CONFIG_COMPAT
  200. REG_L s0, PT_STATUS(sp)
  201. srli s0, s0, SR_UXL_SHIFT
  202. andi s0, s0, (SR_UXL >> SR_UXL_SHIFT)
  203. li t0, (SR_UXL_32 >> SR_UXL_SHIFT)
  204. sub t0, s0, t0
  205. bnez t0, 1f
  206. /* Call compat_syscall */
  207. la s0, compat_sys_call_table
  208. j 2f
  209. 1:
  210. #endif
  211. /* Call syscall */
  212. la s0, sys_call_table
  213. 2:
  214. slli t0, a7, RISCV_LGPTR
  215. add s0, s0, t0
  216. REG_L s0, 0(s0)
  217. 3:
  218. jalr s0
  219. ret_from_syscall:
  220. /* Set user a0 to kernel a0 */
  221. REG_S a0, PT_A0(sp)
  222. /*
  223. * We didn't execute the actual syscall.
  224. * Seccomp already set return value for the current task pt_regs.
  225. * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
  226. */
  227. ret_from_syscall_rejected:
  228. /* Trace syscalls, but only if requested by the user. */
  229. REG_L t0, TASK_TI_FLAGS(tp)
  230. andi t0, t0, _TIF_SYSCALL_WORK
  231. bnez t0, handle_syscall_trace_exit
  232. ENTRY(ret_from_exception)
  233. REG_L s0, PT_STATUS(sp)
  234. csrc CSR_STATUS, SR_IE
  235. #ifdef CONFIG_TRACE_IRQFLAGS
  236. call __trace_hardirqs_off
  237. #endif
  238. #ifdef CONFIG_RISCV_M_MODE
  239. /* the MPP value is too large to be used as an immediate arg for addi */
  240. li t0, SR_MPP
  241. and s0, s0, t0
  242. #else
  243. andi s0, s0, SR_SPP
  244. #endif
  245. bnez s0, resume_kernel
  246. resume_userspace:
  247. /* Interrupts must be disabled here so flags are checked atomically */
  248. REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
  249. andi s1, s0, _TIF_WORK_MASK
  250. bnez s1, work_pending
  251. #ifdef CONFIG_CONTEXT_TRACKING
  252. call context_tracking_user_enter
  253. #endif
  254. /* Save unwound kernel stack pointer in thread_info */
  255. addi s0, sp, PT_SIZE_ON_STACK
  256. REG_S s0, TASK_TI_KERNEL_SP(tp)
  257. /*
  258. * Save TP into the scratch register , so we can find the kernel data
  259. * structures again.
  260. */
  261. csrw CSR_SCRATCH, tp
  262. restore_all:
  263. #ifdef CONFIG_TRACE_IRQFLAGS
  264. REG_L s1, PT_STATUS(sp)
  265. andi t0, s1, SR_PIE
  266. beqz t0, 1f
  267. call __trace_hardirqs_on
  268. j 2f
  269. 1:
  270. call __trace_hardirqs_off
  271. 2:
  272. #endif
  273. REG_L a0, PT_STATUS(sp)
  274. /*
  275. * The current load reservation is effectively part of the processor's
  276. * state, in the sense that load reservations cannot be shared between
  277. * different hart contexts. We can't actually save and restore a load
  278. * reservation, so instead here we clear any existing reservation --
  279. * it's always legal for implementations to clear load reservations at
  280. * any point (as long as the forward progress guarantee is kept, but
  281. * we'll ignore that here).
  282. *
  283. * Dangling load reservations can be the result of taking a trap in the
  284. * middle of an LR/SC sequence, but can also be the result of a taken
  285. * forward branch around an SC -- which is how we implement CAS. As a
  286. * result we need to clear reservations between the last CAS and the
  287. * jump back to the new context. While it is unlikely the store
  288. * completes, implementations are allowed to expand reservations to be
  289. * arbitrarily large.
  290. */
  291. REG_L a2, PT_EPC(sp)
  292. REG_SC x0, a2, PT_EPC(sp)
  293. csrw CSR_STATUS, a0
  294. csrw CSR_EPC, a2
  295. REG_L x1, PT_RA(sp)
  296. REG_L x3, PT_GP(sp)
  297. REG_L x4, PT_TP(sp)
  298. REG_L x5, PT_T0(sp)
  299. REG_L x6, PT_T1(sp)
  300. REG_L x7, PT_T2(sp)
  301. REG_L x8, PT_S0(sp)
  302. REG_L x9, PT_S1(sp)
  303. REG_L x10, PT_A0(sp)
  304. REG_L x11, PT_A1(sp)
  305. REG_L x12, PT_A2(sp)
  306. REG_L x13, PT_A3(sp)
  307. REG_L x14, PT_A4(sp)
  308. REG_L x15, PT_A5(sp)
  309. REG_L x16, PT_A6(sp)
  310. REG_L x17, PT_A7(sp)
  311. REG_L x18, PT_S2(sp)
  312. REG_L x19, PT_S3(sp)
  313. REG_L x20, PT_S4(sp)
  314. REG_L x21, PT_S5(sp)
  315. REG_L x22, PT_S6(sp)
  316. REG_L x23, PT_S7(sp)
  317. REG_L x24, PT_S8(sp)
  318. REG_L x25, PT_S9(sp)
  319. REG_L x26, PT_S10(sp)
  320. REG_L x27, PT_S11(sp)
  321. REG_L x28, PT_T3(sp)
  322. REG_L x29, PT_T4(sp)
  323. REG_L x30, PT_T5(sp)
  324. REG_L x31, PT_T6(sp)
  325. REG_L x2, PT_SP(sp)
  326. #ifdef CONFIG_RISCV_M_MODE
  327. mret
  328. #else
  329. sret
  330. #endif
  331. #if IS_ENABLED(CONFIG_PREEMPTION)
  332. resume_kernel:
  333. REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
  334. bnez s0, restore_all
  335. REG_L s0, TASK_TI_FLAGS(tp)
  336. andi s0, s0, _TIF_NEED_RESCHED
  337. beqz s0, restore_all
  338. call preempt_schedule_irq
  339. j restore_all
  340. #endif
  341. work_pending:
  342. /* Enter slow path for supplementary processing */
  343. la ra, ret_from_exception
  344. andi s1, s0, _TIF_NEED_RESCHED
  345. bnez s1, work_resched
  346. work_notifysig:
  347. /* Handle pending signals and notify-resume requests */
  348. csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
  349. move a0, sp /* pt_regs */
  350. move a1, s0 /* current_thread_info->flags */
  351. tail do_notify_resume
  352. work_resched:
  353. tail schedule
  354. /* Slow paths for ptrace. */
  355. handle_syscall_trace_enter:
  356. move a0, sp
  357. call do_syscall_trace_enter
  358. move t0, a0
  359. REG_L a0, PT_A0(sp)
  360. REG_L a1, PT_A1(sp)
  361. REG_L a2, PT_A2(sp)
  362. REG_L a3, PT_A3(sp)
  363. REG_L a4, PT_A4(sp)
  364. REG_L a5, PT_A5(sp)
  365. REG_L a6, PT_A6(sp)
  366. REG_L a7, PT_A7(sp)
  367. bnez t0, ret_from_syscall_rejected
  368. j check_syscall_nr
  369. handle_syscall_trace_exit:
  370. move a0, sp
  371. call do_syscall_trace_exit
  372. j ret_from_exception
  373. END(handle_exception)
  374. ENTRY(ret_from_fork)
  375. la ra, ret_from_exception
  376. tail schedule_tail
  377. ENDPROC(ret_from_fork)
  378. ENTRY(ret_from_kernel_thread)
  379. call schedule_tail
  380. /* Call fn(arg) */
  381. la ra, ret_from_exception
  382. move a0, s1
  383. jr s0
  384. ENDPROC(ret_from_kernel_thread)
  385. /*
  386. * Integer register context switch
  387. * The callee-saved registers must be saved and restored.
  388. *
  389. * a0: previous task_struct (must be preserved across the switch)
  390. * a1: next task_struct
  391. *
  392. * The value of a0 and a1 must be preserved by this function, as that's how
  393. * arguments are passed to schedule_tail.
  394. */
  395. ENTRY(__switch_to)
  396. /* Save context into prev->thread */
  397. li a4, TASK_THREAD_RA
  398. add a3, a0, a4
  399. add a4, a1, a4
  400. REG_S ra, TASK_THREAD_RA_RA(a3)
  401. REG_S sp, TASK_THREAD_SP_RA(a3)
  402. REG_S s0, TASK_THREAD_S0_RA(a3)
  403. REG_S s1, TASK_THREAD_S1_RA(a3)
  404. REG_S s2, TASK_THREAD_S2_RA(a3)
  405. REG_S s3, TASK_THREAD_S3_RA(a3)
  406. REG_S s4, TASK_THREAD_S4_RA(a3)
  407. REG_S s5, TASK_THREAD_S5_RA(a3)
  408. REG_S s6, TASK_THREAD_S6_RA(a3)
  409. REG_S s7, TASK_THREAD_S7_RA(a3)
  410. REG_S s8, TASK_THREAD_S8_RA(a3)
  411. REG_S s9, TASK_THREAD_S9_RA(a3)
  412. REG_S s10, TASK_THREAD_S10_RA(a3)
  413. REG_S s11, TASK_THREAD_S11_RA(a3)
  414. /* Restore context from next->thread */
  415. REG_L ra, TASK_THREAD_RA_RA(a4)
  416. REG_L sp, TASK_THREAD_SP_RA(a4)
  417. REG_L s0, TASK_THREAD_S0_RA(a4)
  418. REG_L s1, TASK_THREAD_S1_RA(a4)
  419. REG_L s2, TASK_THREAD_S2_RA(a4)
  420. REG_L s3, TASK_THREAD_S3_RA(a4)
  421. REG_L s4, TASK_THREAD_S4_RA(a4)
  422. REG_L s5, TASK_THREAD_S5_RA(a4)
  423. REG_L s6, TASK_THREAD_S6_RA(a4)
  424. REG_L s7, TASK_THREAD_S7_RA(a4)
  425. REG_L s8, TASK_THREAD_S8_RA(a4)
  426. REG_L s9, TASK_THREAD_S9_RA(a4)
  427. REG_L s10, TASK_THREAD_S10_RA(a4)
  428. REG_L s11, TASK_THREAD_S11_RA(a4)
  429. /* Swap the CPU entry around. */
  430. lw a3, TASK_TI_CPU(a0)
  431. lw a4, TASK_TI_CPU(a1)
  432. sw a3, TASK_TI_CPU(a1)
  433. sw a4, TASK_TI_CPU(a0)
  434. /* The offset of thread_info in task_struct is zero. */
  435. move tp, a1
  436. ret
  437. ENDPROC(__switch_to)
  438. #ifndef CONFIG_MMU
  439. #define do_page_fault do_trap_unknown
  440. #endif
  441. .section ".rodata"
  442. .align LGREG
  443. /* Exception vector table */
  444. ENTRY(excp_vect_table)
  445. RISCV_PTR do_trap_insn_misaligned
  446. RISCV_PTR do_trap_insn_fault
  447. RISCV_PTR do_trap_insn_illegal
  448. RISCV_PTR do_trap_break
  449. RISCV_PTR do_trap_load_misaligned
  450. RISCV_PTR do_trap_load_fault
  451. RISCV_PTR do_trap_store_misaligned
  452. RISCV_PTR do_trap_store_fault
  453. RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
  454. RISCV_PTR do_trap_ecall_s
  455. RISCV_PTR do_trap_unknown
  456. RISCV_PTR do_trap_ecall_m
  457. RISCV_PTR do_page_fault /* instruction page fault */
  458. RISCV_PTR do_page_fault /* load page fault */
  459. RISCV_PTR do_trap_unknown
  460. RISCV_PTR do_page_fault /* store page fault */
  461. excp_vect_table_end:
  462. END(excp_vect_table)
  463. #ifndef CONFIG_MMU
  464. ENTRY(__user_rt_sigreturn)
  465. li a7, __NR_rt_sigreturn
  466. scall
  467. END(__user_rt_sigreturn)
  468. #endif