fw_base.S 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/riscv_encoding.h>
  11. #include <sbi/sbi_platform.h>
  12. #include <sbi/sbi_scratch.h>
  13. #include <sbi/sbi_trap.h>
  14. #define BOOT_STATUS_RELOCATE_DONE 1
  15. #define BOOT_STATUS_BOOT_HART_DONE 2
  16. .macro MOV_3R __d0, __s0, __d1, __s1, __d2, __s2
  17. add \__d0, \__s0, zero
  18. add \__d1, \__s1, zero
  19. add \__d2, \__s2, zero
  20. .endm
  21. .macro MOV_5R __d0, __s0, __d1, __s1, __d2, __s2, __d3, __s3, __d4, __s4
  22. add \__d0, \__s0, zero
  23. add \__d1, \__s1, zero
  24. add \__d2, \__s2, zero
  25. add \__d3, \__s3, zero
  26. add \__d4, \__s4, zero
  27. .endm
  28. /*
  29. * If __start_reg <= __check_reg and __check_reg < __end_reg then
  30. * jump to __pass
  31. */
  32. .macro BRANGE __start_reg, __end_reg, __check_reg, __jump_lable
  33. blt \__check_reg, \__start_reg, 999f
  34. bge \__check_reg, \__end_reg, 999f
  35. j \__jump_lable
  36. 999:
  37. .endm
  38. .section .entry, "ax", %progbits
  39. .align 3
  40. .globl _start
  41. .globl _start_warm
  42. _start:
  43. /* Find preferred boot HART id */
  44. MOV_3R s0, a0, s1, a1, s2, a2
  45. call fw_boot_hart
  46. add a6, a0, zero
  47. MOV_3R a0, s0, a1, s1, a2, s2
  48. li a7, -1
  49. beq a6, a7, _try_lottery
  50. /* Jump to relocation wait loop if we are not boot hart */
  51. bne a0, a6, _wait_relocate_copy_done
  52. _try_lottery:
  53. /* Jump to relocation wait loop if we don't get relocation lottery */
  54. la a6, _relocate_lottery
  55. li a7, 1
  56. amoadd.w a6, a7, (a6)
  57. bnez a6, _wait_relocate_copy_done
  58. /* Save load address */
  59. la t0, _load_start
  60. la t1, _start
  61. REG_S t1, 0(t0)
  62. /* Relocate if load address != link address */
  63. _relocate:
  64. la t0, _link_start
  65. REG_L t0, 0(t0)
  66. la t1, _link_end
  67. REG_L t1, 0(t1)
  68. la t2, _load_start
  69. REG_L t2, 0(t2)
  70. sub t3, t1, t0
  71. add t3, t3, t2
  72. beq t0, t2, _relocate_done
  73. la t4, _relocate_done
  74. sub t4, t4, t2
  75. add t4, t4, t0
  76. blt t2, t0, _relocate_copy_to_upper
  77. _relocate_copy_to_lower:
  78. ble t1, t2, _relocate_copy_to_lower_loop
  79. la t3, _relocate_lottery
  80. BRANGE t2, t1, t3, _start_hang
  81. la t3, _boot_status
  82. BRANGE t2, t1, t3, _start_hang
  83. la t3, _relocate
  84. la t5, _relocate_done
  85. BRANGE t2, t1, t3, _start_hang
  86. BRANGE t2, t1, t5, _start_hang
  87. BRANGE t3, t5, t2, _start_hang
  88. _relocate_copy_to_lower_loop:
  89. REG_L t3, 0(t2)
  90. REG_S t3, 0(t0)
  91. add t0, t0, __SIZEOF_POINTER__
  92. add t2, t2, __SIZEOF_POINTER__
  93. blt t0, t1, _relocate_copy_to_lower_loop
  94. jr t4
  95. _relocate_copy_to_upper:
  96. ble t3, t0, _relocate_copy_to_upper_loop
  97. la t2, _relocate_lottery
  98. BRANGE t0, t3, t2, _start_hang
  99. la t2, _boot_status
  100. BRANGE t0, t3, t2, _start_hang
  101. la t2, _relocate
  102. la t5, _relocate_done
  103. BRANGE t0, t3, t2, _start_hang
  104. BRANGE t0, t3, t5, _start_hang
  105. BRANGE t2, t5, t0, _start_hang
  106. _relocate_copy_to_upper_loop:
  107. add t3, t3, -__SIZEOF_POINTER__
  108. add t1, t1, -__SIZEOF_POINTER__
  109. REG_L t2, 0(t3)
  110. REG_S t2, 0(t1)
  111. blt t0, t1, _relocate_copy_to_upper_loop
  112. jr t4
  113. _wait_relocate_copy_done:
  114. la t0, _start
  115. la t1, _link_start
  116. REG_L t1, 0(t1)
  117. beq t0, t1, _wait_for_boot_hart
  118. la t2, _boot_status
  119. la t3, _wait_for_boot_hart
  120. sub t3, t3, t0
  121. add t3, t3, t1
  122. 1:
  123. /* waitting for relocate copy done (_boot_status == 1) */
  124. li t4, BOOT_STATUS_RELOCATE_DONE
  125. REG_L t5, 0(t2)
  126. /* Reduce the bus traffic so that boot hart may proceed faster */
  127. nop
  128. nop
  129. nop
  130. bgt t4, t5, 1b
  131. jr t3
  132. _relocate_done:
  133. /*
  134. * Mark relocate copy done
  135. * Use _boot_status copy relative to the load address
  136. */
  137. la t0, _boot_status
  138. la t1, _link_start
  139. REG_L t1, 0(t1)
  140. la t2, _load_start
  141. REG_L t2, 0(t2)
  142. sub t0, t0, t1
  143. add t0, t0, t2
  144. li t1, BOOT_STATUS_RELOCATE_DONE
  145. REG_S t1, 0(t0)
  146. fence rw, rw
  147. /* At this point we are running from link address */
  148. /* Reset all registers for boot HART */
  149. li ra, 0
  150. call _reset_regs
  151. /* Zero-out BSS */
  152. la s4, _bss_start
  153. la s5, _bss_end
  154. _bss_zero:
  155. REG_S zero, (s4)
  156. add s4, s4, __SIZEOF_POINTER__
  157. blt s4, s5, _bss_zero
  158. /* Setup temporary trap handler */
  159. la s4, _start_hang
  160. csrw CSR_MTVEC, s4
  161. /* Setup temporary stack */
  162. la s4, _fw_end
  163. li s5, (SBI_SCRATCH_SIZE * 2)
  164. add sp, s4, s5
  165. /* Allow main firmware to save info */
  166. MOV_5R s0, a0, s1, a1, s2, a2, s3, a3, s4, a4
  167. call fw_save_info
  168. MOV_5R a0, s0, a1, s1, a2, s2, a3, s3, a4, s4
  169. #ifdef FW_FDT_PATH
  170. /* Override previous arg1 */
  171. la a1, fw_fdt_bin
  172. #endif
  173. /*
  174. * Initialize platform
  175. * Note: The a0 to a4 registers passed to the
  176. * firmware are parameters to this function.
  177. */
  178. MOV_5R s0, a0, s1, a1, s2, a2, s3, a3, s4, a4
  179. call fw_platform_init
  180. add t0, a0, zero
  181. MOV_5R a0, s0, a1, s1, a2, s2, a3, s3, a4, s4
  182. add a1, t0, zero
  183. /* Preload HART details
  184. * s7 -> HART Count
  185. * s8 -> HART Stack Size
  186. */
  187. la a4, platform
  188. #if __riscv_xlen == 64
  189. lwu s7, SBI_PLATFORM_HART_COUNT_OFFSET(a4)
  190. lwu s8, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(a4)
  191. #else
  192. lw s7, SBI_PLATFORM_HART_COUNT_OFFSET(a4)
  193. lw s8, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(a4)
  194. #endif
  195. /* Setup scratch space for all the HARTs*/
  196. la tp, _fw_end
  197. mul a5, s7, s8
  198. add tp, tp, a5
  199. /* Keep a copy of tp */
  200. add t3, tp, zero
  201. /* Counter */
  202. li t2, 1
  203. /* hartid 0 is mandated by ISA */
  204. li t1, 0
  205. _scratch_init:
  206. add tp, t3, zero
  207. mul a5, s8, t1
  208. sub tp, tp, a5
  209. li a5, SBI_SCRATCH_SIZE
  210. sub tp, tp, a5
  211. /* Initialize scratch space */
  212. /* Store fw_start and fw_size in scratch space */
  213. la a4, _fw_start
  214. la a5, _fw_end
  215. mul t0, s7, s8
  216. add a5, a5, t0
  217. sub a5, a5, a4
  218. REG_S a4, SBI_SCRATCH_FW_START_OFFSET(tp)
  219. REG_S a5, SBI_SCRATCH_FW_SIZE_OFFSET(tp)
  220. /* Store next arg1 in scratch space */
  221. MOV_3R s0, a0, s1, a1, s2, a2
  222. call fw_next_arg1
  223. REG_S a0, SBI_SCRATCH_NEXT_ARG1_OFFSET(tp)
  224. MOV_3R a0, s0, a1, s1, a2, s2
  225. /* Store next address in scratch space */
  226. MOV_3R s0, a0, s1, a1, s2, a2
  227. call fw_next_addr
  228. REG_S a0, SBI_SCRATCH_NEXT_ADDR_OFFSET(tp)
  229. MOV_3R a0, s0, a1, s1, a2, s2
  230. /* Store next mode in scratch space */
  231. MOV_3R s0, a0, s1, a1, s2, a2
  232. call fw_next_mode
  233. REG_S a0, SBI_SCRATCH_NEXT_MODE_OFFSET(tp)
  234. MOV_3R a0, s0, a1, s1, a2, s2
  235. /* Store warm_boot address in scratch space */
  236. la a4, _start_warm
  237. REG_S a4, SBI_SCRATCH_WARMBOOT_ADDR_OFFSET(tp)
  238. /* Store platform address in scratch space */
  239. la a4, platform
  240. REG_S a4, SBI_SCRATCH_PLATFORM_ADDR_OFFSET(tp)
  241. /* Store hartid-to-scratch function address in scratch space */
  242. la a4, _hartid_to_scratch
  243. REG_S a4, SBI_SCRATCH_HARTID_TO_SCRATCH_OFFSET(tp)
  244. /* Clear tmp0 in scratch space */
  245. REG_S zero, SBI_SCRATCH_TMP0_OFFSET(tp)
  246. /* Store firmware options in scratch space */
  247. MOV_3R s0, a0, s1, a1, s2, a2
  248. #ifdef FW_OPTIONS
  249. li a0, FW_OPTIONS
  250. #else
  251. call fw_options
  252. #endif
  253. REG_S a0, SBI_SCRATCH_OPTIONS_OFFSET(tp)
  254. MOV_3R a0, s0, a1, s1, a2, s2
  255. /* Move to next scratch space */
  256. add t1, t1, t2
  257. blt t1, s7, _scratch_init
  258. /*
  259. * Relocate Flatened Device Tree (FDT)
  260. * source FDT address = previous arg1
  261. * destination FDT address = next arg1
  262. *
  263. * Note: We will preserve a0 and a1 passed by
  264. * previous booting stage.
  265. */
  266. beqz a1, _fdt_reloc_done
  267. /* Mask values in a3 and a4 */
  268. li a3, ~(__SIZEOF_POINTER__ - 1)
  269. li a4, 0xff
  270. /* t1 = destination FDT start address */
  271. MOV_3R s0, a0, s1, a1, s2, a2
  272. call fw_next_arg1
  273. add t1, a0, zero
  274. MOV_3R a0, s0, a1, s1, a2, s2
  275. beqz t1, _fdt_reloc_done
  276. beq t1, a1, _fdt_reloc_done
  277. and t1, t1, a3
  278. /* t0 = source FDT start address */
  279. add t0, a1, zero
  280. and t0, t0, a3
  281. /* t2 = source FDT size in big-endian */
  282. #if __riscv_xlen == 64
  283. lwu t2, 4(t0)
  284. #else
  285. lw t2, 4(t0)
  286. #endif
  287. /* t3 = bit[15:8] of FDT size */
  288. add t3, t2, zero
  289. srli t3, t3, 16
  290. and t3, t3, a4
  291. slli t3, t3, 8
  292. /* t4 = bit[23:16] of FDT size */
  293. add t4, t2, zero
  294. srli t4, t4, 8
  295. and t4, t4, a4
  296. slli t4, t4, 16
  297. /* t5 = bit[31:24] of FDT size */
  298. add t5, t2, zero
  299. and t5, t5, a4
  300. slli t5, t5, 24
  301. /* t2 = bit[7:0] of FDT size */
  302. srli t2, t2, 24
  303. and t2, t2, a4
  304. /* t2 = FDT size in little-endian */
  305. or t2, t2, t3
  306. or t2, t2, t4
  307. or t2, t2, t5
  308. /* t2 = destination FDT end address */
  309. add t2, t1, t2
  310. /* FDT copy loop */
  311. ble t2, t1, _fdt_reloc_done
  312. _fdt_reloc_again:
  313. REG_L t3, 0(t0)
  314. REG_S t3, 0(t1)
  315. add t0, t0, __SIZEOF_POINTER__
  316. add t1, t1, __SIZEOF_POINTER__
  317. blt t1, t2, _fdt_reloc_again
  318. _fdt_reloc_done:
  319. /* mark boot hart done */
  320. li t0, BOOT_STATUS_BOOT_HART_DONE
  321. la t1, _boot_status
  322. REG_S t0, 0(t1)
  323. fence rw, rw
  324. j _start_warm
  325. /* waiting for boot hart to be done (_boot_status == 2) */
  326. _wait_for_boot_hart:
  327. li t0, BOOT_STATUS_BOOT_HART_DONE
  328. la t1, _boot_status
  329. REG_L t1, 0(t1)
  330. /* Reduce the bus traffic so that boot hart may proceed faster */
  331. nop
  332. nop
  333. nop
  334. bne t0, t1, _wait_for_boot_hart
  335. _start_warm:
  336. /* Reset all registers for non-boot HARTs */
  337. li ra, 0
  338. call _reset_regs
  339. /* Disable and clear all interrupts */
  340. csrw CSR_MIE, zero
  341. csrw CSR_MIP, zero
  342. /* Find HART count and HART stack size */
  343. la a4, platform
  344. #if __riscv_xlen == 64
  345. lwu s7, SBI_PLATFORM_HART_COUNT_OFFSET(a4)
  346. lwu s8, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(a4)
  347. #else
  348. lw s7, SBI_PLATFORM_HART_COUNT_OFFSET(a4)
  349. lw s8, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(a4)
  350. #endif
  351. REG_L s9, SBI_PLATFORM_HART_INDEX2ID_OFFSET(a4)
  352. /* Find HART id */
  353. csrr s6, CSR_MHARTID
  354. /* Find HART index */
  355. beqz s9, 3f
  356. li a4, 0
  357. 1:
  358. #if __riscv_xlen == 64
  359. lwu a5, (s9)
  360. #else
  361. lw a5, (s9)
  362. #endif
  363. beq a5, s6, 2f
  364. add s9, s9, 4
  365. add a4, a4, 1
  366. blt a4, s7, 1b
  367. li a4, -1
  368. 2: add s6, a4, zero
  369. 3: bge s6, s7, _start_hang
  370. /* Find the scratch space based on HART index */
  371. la tp, _fw_end
  372. mul a5, s7, s8
  373. add tp, tp, a5
  374. mul a5, s8, s6
  375. sub tp, tp, a5
  376. li a5, SBI_SCRATCH_SIZE
  377. sub tp, tp, a5
  378. /* update the mscratch */
  379. csrw CSR_MSCRATCH, tp
  380. /* Setup stack */
  381. add sp, tp, zero
  382. /* Setup trap handler */
  383. la a4, _trap_handler
  384. csrw CSR_MTVEC, a4
  385. /* Initialize SBI runtime */
  386. csrr a0, CSR_MSCRATCH
  387. call sbi_init
  388. /* We don't expect to reach here hence just hang */
  389. j _start_hang
  390. .align 3
  391. _relocate_lottery:
  392. RISCV_PTR 0
  393. _boot_status:
  394. RISCV_PTR 0
  395. _load_start:
  396. RISCV_PTR _fw_start
  397. _link_start:
  398. RISCV_PTR _fw_start
  399. _link_end:
  400. RISCV_PTR _fw_reloc_end
  401. .section .entry, "ax", %progbits
  402. .align 3
  403. .globl _hartid_to_scratch
  404. _hartid_to_scratch:
  405. /*
  406. * a0 -> HART ID (passed by caller)
  407. * a1 -> HART Index (passed by caller)
  408. * t0 -> HART Stack Size
  409. * t1 -> HART Stack End
  410. * t2 -> Temporary
  411. */
  412. la t2, platform
  413. #if __riscv_xlen == 64
  414. lwu t0, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(t2)
  415. lwu t2, SBI_PLATFORM_HART_COUNT_OFFSET(t2)
  416. #else
  417. lw t0, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(t2)
  418. lw t2, SBI_PLATFORM_HART_COUNT_OFFSET(t2)
  419. #endif
  420. sub t2, t2, a1
  421. mul t2, t2, t0
  422. la t1, _fw_end
  423. add t1, t1, t2
  424. li t2, SBI_SCRATCH_SIZE
  425. sub a0, t1, t2
  426. ret
  427. .section .entry, "ax", %progbits
  428. .align 3
  429. .globl _start_hang
  430. _start_hang:
  431. wfi
  432. j _start_hang
  433. .section .entry, "ax", %progbits
  434. .align 3
  435. .globl fw_platform_init
  436. .weak fw_platform_init
  437. fw_platform_init:
  438. add a0, a1, zero
  439. ret
  440. .section .entry, "ax", %progbits
  441. .align 3
  442. .globl _trap_handler
  443. _trap_handler:
  444. /* Swap TP and MSCRATCH */
  445. csrrw tp, CSR_MSCRATCH, tp
  446. /* Save T0 in scratch space */
  447. REG_S t0, SBI_SCRATCH_TMP0_OFFSET(tp)
  448. /*
  449. * Set T0 to appropriate exception stack
  450. *
  451. * Came_From_M_Mode = ((MSTATUS.MPP < PRV_M) ? 1 : 0) - 1;
  452. * Exception_Stack = TP ^ (Came_From_M_Mode & (SP ^ TP))
  453. *
  454. * Came_From_M_Mode = 0 ==> Exception_Stack = TP
  455. * Came_From_M_Mode = -1 ==> Exception_Stack = SP
  456. */
  457. csrr t0, CSR_MSTATUS
  458. srl t0, t0, MSTATUS_MPP_SHIFT
  459. and t0, t0, PRV_M
  460. slti t0, t0, PRV_M
  461. add t0, t0, -1
  462. xor sp, sp, tp
  463. and t0, t0, sp
  464. xor sp, sp, tp
  465. xor t0, tp, t0
  466. /* Save original SP on exception stack */
  467. REG_S sp, (SBI_TRAP_REGS_OFFSET(sp) - SBI_TRAP_REGS_SIZE)(t0)
  468. /* Set SP to exception stack and make room for trap registers */
  469. add sp, t0, -(SBI_TRAP_REGS_SIZE)
  470. /* Restore T0 from scratch space */
  471. REG_L t0, SBI_SCRATCH_TMP0_OFFSET(tp)
  472. /* Save T0 on stack */
  473. REG_S t0, SBI_TRAP_REGS_OFFSET(t0)(sp)
  474. /* Swap TP and MSCRATCH */
  475. csrrw tp, CSR_MSCRATCH, tp
  476. /* Save MEPC and MSTATUS CSRs */
  477. csrr t0, CSR_MEPC
  478. REG_S t0, SBI_TRAP_REGS_OFFSET(mepc)(sp)
  479. csrr t0, CSR_MSTATUS
  480. REG_S t0, SBI_TRAP_REGS_OFFSET(mstatus)(sp)
  481. REG_S zero, SBI_TRAP_REGS_OFFSET(mstatusH)(sp)
  482. #if __riscv_xlen == 32
  483. csrr t0, CSR_MISA
  484. srli t0, t0, ('H' - 'A')
  485. andi t0, t0, 0x1
  486. beq t0, zero, _skip_mstatush_save
  487. csrr t0, CSR_MSTATUSH
  488. REG_S t0, SBI_TRAP_REGS_OFFSET(mstatusH)(sp)
  489. _skip_mstatush_save:
  490. #endif
  491. /* Save all general regisers except SP and T0 */
  492. REG_S zero, SBI_TRAP_REGS_OFFSET(zero)(sp)
  493. REG_S ra, SBI_TRAP_REGS_OFFSET(ra)(sp)
  494. REG_S gp, SBI_TRAP_REGS_OFFSET(gp)(sp)
  495. REG_S tp, SBI_TRAP_REGS_OFFSET(tp)(sp)
  496. REG_S t1, SBI_TRAP_REGS_OFFSET(t1)(sp)
  497. REG_S t2, SBI_TRAP_REGS_OFFSET(t2)(sp)
  498. REG_S s0, SBI_TRAP_REGS_OFFSET(s0)(sp)
  499. REG_S s1, SBI_TRAP_REGS_OFFSET(s1)(sp)
  500. REG_S a0, SBI_TRAP_REGS_OFFSET(a0)(sp)
  501. REG_S a1, SBI_TRAP_REGS_OFFSET(a1)(sp)
  502. REG_S a2, SBI_TRAP_REGS_OFFSET(a2)(sp)
  503. REG_S a3, SBI_TRAP_REGS_OFFSET(a3)(sp)
  504. REG_S a4, SBI_TRAP_REGS_OFFSET(a4)(sp)
  505. REG_S a5, SBI_TRAP_REGS_OFFSET(a5)(sp)
  506. REG_S a6, SBI_TRAP_REGS_OFFSET(a6)(sp)
  507. REG_S a7, SBI_TRAP_REGS_OFFSET(a7)(sp)
  508. REG_S s2, SBI_TRAP_REGS_OFFSET(s2)(sp)
  509. REG_S s3, SBI_TRAP_REGS_OFFSET(s3)(sp)
  510. REG_S s4, SBI_TRAP_REGS_OFFSET(s4)(sp)
  511. REG_S s5, SBI_TRAP_REGS_OFFSET(s5)(sp)
  512. REG_S s6, SBI_TRAP_REGS_OFFSET(s6)(sp)
  513. REG_S s7, SBI_TRAP_REGS_OFFSET(s7)(sp)
  514. REG_S s8, SBI_TRAP_REGS_OFFSET(s8)(sp)
  515. REG_S s9, SBI_TRAP_REGS_OFFSET(s9)(sp)
  516. REG_S s10, SBI_TRAP_REGS_OFFSET(s10)(sp)
  517. REG_S s11, SBI_TRAP_REGS_OFFSET(s11)(sp)
  518. REG_S t3, SBI_TRAP_REGS_OFFSET(t3)(sp)
  519. REG_S t4, SBI_TRAP_REGS_OFFSET(t4)(sp)
  520. REG_S t5, SBI_TRAP_REGS_OFFSET(t5)(sp)
  521. REG_S t6, SBI_TRAP_REGS_OFFSET(t6)(sp)
  522. /* Call C routine */
  523. add a0, sp, zero
  524. call sbi_trap_handler
  525. /* Restore all general regisers except SP and T0 */
  526. REG_L ra, SBI_TRAP_REGS_OFFSET(ra)(sp)
  527. REG_L gp, SBI_TRAP_REGS_OFFSET(gp)(sp)
  528. REG_L tp, SBI_TRAP_REGS_OFFSET(tp)(sp)
  529. REG_L t1, SBI_TRAP_REGS_OFFSET(t1)(sp)
  530. REG_L t2, SBI_TRAP_REGS_OFFSET(t2)(sp)
  531. REG_L s0, SBI_TRAP_REGS_OFFSET(s0)(sp)
  532. REG_L s1, SBI_TRAP_REGS_OFFSET(s1)(sp)
  533. REG_L a0, SBI_TRAP_REGS_OFFSET(a0)(sp)
  534. REG_L a1, SBI_TRAP_REGS_OFFSET(a1)(sp)
  535. REG_L a2, SBI_TRAP_REGS_OFFSET(a2)(sp)
  536. REG_L a3, SBI_TRAP_REGS_OFFSET(a3)(sp)
  537. REG_L a4, SBI_TRAP_REGS_OFFSET(a4)(sp)
  538. REG_L a5, SBI_TRAP_REGS_OFFSET(a5)(sp)
  539. REG_L a6, SBI_TRAP_REGS_OFFSET(a6)(sp)
  540. REG_L a7, SBI_TRAP_REGS_OFFSET(a7)(sp)
  541. REG_L s2, SBI_TRAP_REGS_OFFSET(s2)(sp)
  542. REG_L s3, SBI_TRAP_REGS_OFFSET(s3)(sp)
  543. REG_L s4, SBI_TRAP_REGS_OFFSET(s4)(sp)
  544. REG_L s5, SBI_TRAP_REGS_OFFSET(s5)(sp)
  545. REG_L s6, SBI_TRAP_REGS_OFFSET(s6)(sp)
  546. REG_L s7, SBI_TRAP_REGS_OFFSET(s7)(sp)
  547. REG_L s8, SBI_TRAP_REGS_OFFSET(s8)(sp)
  548. REG_L s9, SBI_TRAP_REGS_OFFSET(s9)(sp)
  549. REG_L s10, SBI_TRAP_REGS_OFFSET(s10)(sp)
  550. REG_L s11, SBI_TRAP_REGS_OFFSET(s11)(sp)
  551. REG_L t3, SBI_TRAP_REGS_OFFSET(t3)(sp)
  552. REG_L t4, SBI_TRAP_REGS_OFFSET(t4)(sp)
  553. REG_L t5, SBI_TRAP_REGS_OFFSET(t5)(sp)
  554. REG_L t6, SBI_TRAP_REGS_OFFSET(t6)(sp)
  555. /* Restore MEPC and MSTATUS CSRs */
  556. REG_L t0, SBI_TRAP_REGS_OFFSET(mepc)(sp)
  557. csrw CSR_MEPC, t0
  558. REG_L t0, SBI_TRAP_REGS_OFFSET(mstatus)(sp)
  559. csrw CSR_MSTATUS, t0
  560. #if __riscv_xlen == 32
  561. csrr t0, CSR_MISA
  562. srli t0, t0, ('H' - 'A')
  563. andi t0, t0, 0x1
  564. beq t0, zero, _skip_mstatush_restore
  565. REG_L t0, SBI_TRAP_REGS_OFFSET(mstatusH)(sp)
  566. csrw CSR_MSTATUSH, t0
  567. _skip_mstatush_restore:
  568. #endif
  569. /* Restore T0 */
  570. REG_L t0, SBI_TRAP_REGS_OFFSET(t0)(sp)
  571. /* Restore SP */
  572. REG_L sp, SBI_TRAP_REGS_OFFSET(sp)(sp)
  573. mret
  574. .section .entry, "ax", %progbits
  575. .align 3
  576. .globl _reset_regs
  577. _reset_regs:
  578. /* flush the instruction cache */
  579. fence.i
  580. /* Reset all registers except ra, a0, a1 and a2 */
  581. li sp, 0
  582. li gp, 0
  583. li tp, 0
  584. li t0, 0
  585. li t1, 0
  586. li t2, 0
  587. li s0, 0
  588. li s1, 0
  589. li a3, 0
  590. li a4, 0
  591. li a5, 0
  592. li a6, 0
  593. li a7, 0
  594. li s2, 0
  595. li s3, 0
  596. li s4, 0
  597. li s5, 0
  598. li s6, 0
  599. li s7, 0
  600. li s8, 0
  601. li s9, 0
  602. li s10, 0
  603. li s11, 0
  604. li t3, 0
  605. li t4, 0
  606. li t5, 0
  607. li t6, 0
  608. csrw CSR_MSCRATCH, 0
  609. ret
  610. #ifdef FW_FDT_PATH
  611. .section .rodata
  612. .align 4
  613. .globl fw_fdt_bin
  614. fw_fdt_bin:
  615. .incbin FW_FDT_PATH
  616. #ifdef FW_FDT_PADDING
  617. .fill FW_FDT_PADDING, 1, 0
  618. #endif
  619. #endif