start.S 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. /*
  3. * (C) Copyright 2013
  4. * David Feng <fenghua@phytium.com.cn>
  5. */
  6. #include <asm-offsets.h>
  7. #include <config.h>
  8. #include <linux/linkage.h>
  9. #include <asm/macro.h>
  10. #include <asm/armv8/mmu.h>
  11. /*************************************************************************
  12. *
  13. * Startup Code (reset vector)
  14. *
  15. *************************************************************************/
  16. .globl _start
  17. _start:
  18. #if defined(CONFIG_LINUX_KERNEL_IMAGE_HEADER)
  19. #include <asm/boot0-linux-kernel-header.h>
  20. #elif defined(CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK)
  21. /*
  22. * Various SoCs need something special and SoC-specific up front in
  23. * order to boot, allow them to set that in their boot0.h file and then
  24. * use it here.
  25. */
  26. #include <asm/arch/boot0.h>
  27. #else
  28. b reset
  29. #endif
  30. .align 3
  31. .globl _TEXT_BASE
  32. _TEXT_BASE:
  33. .quad CONFIG_SYS_TEXT_BASE
  34. /*
  35. * These are defined in the linker script.
  36. */
  37. .globl _end_ofs
  38. _end_ofs:
  39. .quad _end - _start
  40. .globl _bss_start_ofs
  41. _bss_start_ofs:
  42. .quad __bss_start - _start
  43. .globl _bss_end_ofs
  44. _bss_end_ofs:
  45. .quad __bss_end - _start
  46. reset:
  47. /* Allow the board to save important registers */
  48. b save_boot_params
  49. .globl save_boot_params_ret
  50. save_boot_params_ret:
  51. #if CONFIG_POSITION_INDEPENDENT
  52. /* Verify that we're 4K aligned. */
  53. adr x0, _start
  54. ands x0, x0, #0xfff
  55. b.eq 1f
  56. 0:
  57. /*
  58. * FATAL, can't continue.
  59. * U-Boot needs to be loaded at a 4K aligned address.
  60. *
  61. * We use ADRP and ADD to load some symbol addresses during startup.
  62. * The ADD uses an absolute (non pc-relative) lo12 relocation
  63. * thus requiring 4K alignment.
  64. */
  65. wfi
  66. b 0b
  67. 1:
  68. /*
  69. * Fix .rela.dyn relocations. This allows U-Boot to be loaded to and
  70. * executed at a different address than it was linked at.
  71. */
  72. pie_fixup:
  73. adr x0, _start /* x0 <- Runtime value of _start */
  74. ldr x1, _TEXT_BASE /* x1 <- Linked value of _start */
  75. sub x9, x0, x1 /* x9 <- Run-vs-link offset */
  76. adrp x2, __rel_dyn_start /* x2 <- Runtime &__rel_dyn_start */
  77. add x2, x2, #:lo12:__rel_dyn_start
  78. adrp x3, __rel_dyn_end /* x3 <- Runtime &__rel_dyn_end */
  79. add x3, x3, #:lo12:__rel_dyn_end
  80. pie_fix_loop:
  81. ldp x0, x1, [x2], #16 /* (x0, x1) <- (Link location, fixup) */
  82. ldr x4, [x2], #8 /* x4 <- addend */
  83. cmp w1, #1027 /* relative fixup? */
  84. bne pie_skip_reloc
  85. /* relative fix: store addend plus offset at dest location */
  86. add x0, x0, x9
  87. add x4, x4, x9
  88. str x4, [x0]
  89. pie_skip_reloc:
  90. cmp x2, x3
  91. b.lo pie_fix_loop
  92. pie_fixup_done:
  93. #endif
  94. #ifdef CONFIG_SYS_RESET_SCTRL
  95. bl reset_sctrl
  96. #endif
  97. #if defined(CONFIG_ARMV8_SPL_EXCEPTION_VECTORS) || !defined(CONFIG_SPL_BUILD)
  98. .macro set_vbar, regname, reg
  99. msr \regname, \reg
  100. .endm
  101. adr x0, vectors
  102. #else
  103. .macro set_vbar, regname, reg
  104. .endm
  105. #endif
  106. /*
  107. * Could be EL3/EL2/EL1, Initial State:
  108. * Little Endian, MMU Disabled, i/dCache Disabled
  109. */
  110. switch_el x1, 3f, 2f, 1f
  111. 3: set_vbar vbar_el3, x0
  112. mrs x0, scr_el3
  113. orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
  114. msr scr_el3, x0
  115. msr cptr_el3, xzr /* Enable FP/SIMD */
  116. #ifdef COUNTER_FREQUENCY
  117. ldr x0, =COUNTER_FREQUENCY
  118. msr cntfrq_el0, x0 /* Initialize CNTFRQ */
  119. #endif
  120. b 0f
  121. 2: set_vbar vbar_el2, x0
  122. mov x0, #0x33ff
  123. msr cptr_el2, x0 /* Enable FP/SIMD */
  124. b 0f
  125. 1: set_vbar vbar_el1, x0
  126. mov x0, #3 << 20
  127. msr cpacr_el1, x0 /* Enable FP/SIMD */
  128. 0:
  129. isb
  130. /*
  131. * Enable SMPEN bit for coherency.
  132. * This register is not architectural but at the moment
  133. * this bit should be set for A53/A57/A72.
  134. */
  135. #ifdef CONFIG_ARMV8_SET_SMPEN
  136. switch_el x1, 3f, 1f, 1f
  137. 3:
  138. mrs x0, S3_1_c15_c2_1 /* cpuectlr_el1 */
  139. orr x0, x0, #0x40
  140. msr S3_1_c15_c2_1, x0
  141. isb
  142. 1:
  143. #endif
  144. /* Apply ARM core specific erratas */
  145. bl apply_core_errata
  146. /*
  147. * Cache/BPB/TLB Invalidate
  148. * i-cache is invalidated before enabled in icache_enable()
  149. * tlb is invalidated before mmu is enabled in dcache_enable()
  150. * d-cache is invalidated before enabled in dcache_enable()
  151. */
  152. /* Processor specific initialization */
  153. bl lowlevel_init
  154. #if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD)
  155. branch_if_master x0, x1, master_cpu
  156. b spin_table_secondary_jump
  157. /* never return */
  158. #elif defined(CONFIG_ARMV8_MULTIENTRY)
  159. branch_if_master x0, x1, master_cpu
  160. /*
  161. * Slave CPUs
  162. */
  163. slave_cpu:
  164. wfe
  165. ldr x1, =CPU_RELEASE_ADDR
  166. ldr x0, [x1]
  167. cbz x0, slave_cpu
  168. br x0 /* branch to the given address */
  169. #endif /* CONFIG_ARMV8_MULTIENTRY */
  170. master_cpu:
  171. bl _main
  172. #ifdef CONFIG_SYS_RESET_SCTRL
  173. reset_sctrl:
  174. switch_el x1, 3f, 2f, 1f
  175. 3:
  176. mrs x0, sctlr_el3
  177. b 0f
  178. 2:
  179. mrs x0, sctlr_el2
  180. b 0f
  181. 1:
  182. mrs x0, sctlr_el1
  183. 0:
  184. ldr x1, =0xfdfffffa
  185. and x0, x0, x1
  186. switch_el x1, 6f, 5f, 4f
  187. 6:
  188. msr sctlr_el3, x0
  189. b 7f
  190. 5:
  191. msr sctlr_el2, x0
  192. b 7f
  193. 4:
  194. msr sctlr_el1, x0
  195. 7:
  196. dsb sy
  197. isb
  198. b __asm_invalidate_tlb_all
  199. ret
  200. #endif
  201. /*-----------------------------------------------------------------------*/
  202. WEAK(apply_core_errata)
  203. mov x29, lr /* Save LR */
  204. /* For now, we support Cortex-A53, Cortex-A57 specific errata */
  205. /* Check if we are running on a Cortex-A53 core */
  206. branch_if_a53_core x0, apply_a53_core_errata
  207. /* Check if we are running on a Cortex-A57 core */
  208. branch_if_a57_core x0, apply_a57_core_errata
  209. 0:
  210. mov lr, x29 /* Restore LR */
  211. ret
  212. apply_a53_core_errata:
  213. #ifdef CONFIG_ARM_ERRATA_855873
  214. mrs x0, midr_el1
  215. tst x0, #(0xf << 20)
  216. b.ne 0b
  217. mrs x0, midr_el1
  218. and x0, x0, #0xf
  219. cmp x0, #3
  220. b.lt 0b
  221. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  222. /* Enable data cache clean as data cache clean/invalidate */
  223. orr x0, x0, #1 << 44
  224. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  225. isb
  226. #endif
  227. b 0b
  228. apply_a57_core_errata:
  229. #ifdef CONFIG_ARM_ERRATA_828024
  230. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  231. /* Disable non-allocate hint of w-b-n-a memory type */
  232. orr x0, x0, #1 << 49
  233. /* Disable write streaming no L1-allocate threshold */
  234. orr x0, x0, #3 << 25
  235. /* Disable write streaming no-allocate threshold */
  236. orr x0, x0, #3 << 27
  237. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  238. isb
  239. #endif
  240. #ifdef CONFIG_ARM_ERRATA_826974
  241. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  242. /* Disable speculative load execution ahead of a DMB */
  243. orr x0, x0, #1 << 59
  244. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  245. isb
  246. #endif
  247. #ifdef CONFIG_ARM_ERRATA_833471
  248. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  249. /* FPSCR write flush.
  250. * Note that in some cases where a flush is unnecessary this
  251. could impact performance. */
  252. orr x0, x0, #1 << 38
  253. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  254. isb
  255. #endif
  256. #ifdef CONFIG_ARM_ERRATA_829520
  257. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  258. /* Disable Indirect Predictor bit will prevent this erratum
  259. from occurring
  260. * Note that in some cases where a flush is unnecessary this
  261. could impact performance. */
  262. orr x0, x0, #1 << 4
  263. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  264. isb
  265. #endif
  266. #ifdef CONFIG_ARM_ERRATA_833069
  267. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  268. /* Disable Enable Invalidates of BTB bit */
  269. and x0, x0, #0xE
  270. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  271. isb
  272. #endif
  273. b 0b
  274. ENDPROC(apply_core_errata)
  275. /*-----------------------------------------------------------------------*/
  276. WEAK(lowlevel_init)
  277. mov x29, lr /* Save LR */
  278. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  279. branch_if_slave x0, 1f
  280. ldr x0, =GICD_BASE
  281. bl gic_init_secure
  282. 1:
  283. #if defined(CONFIG_GICV3)
  284. ldr x0, =GICR_BASE
  285. bl gic_init_secure_percpu
  286. #elif defined(CONFIG_GICV2)
  287. ldr x0, =GICD_BASE
  288. ldr x1, =GICC_BASE
  289. bl gic_init_secure_percpu
  290. #endif
  291. #endif
  292. #ifdef CONFIG_ARMV8_MULTIENTRY
  293. branch_if_master x0, x1, 2f
  294. /*
  295. * Slave should wait for master clearing spin table.
  296. * This sync prevent salves observing incorrect
  297. * value of spin table and jumping to wrong place.
  298. */
  299. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  300. #ifdef CONFIG_GICV2
  301. ldr x0, =GICC_BASE
  302. #endif
  303. bl gic_wait_for_interrupt
  304. #endif
  305. /*
  306. * All slaves will enter EL2 and optionally EL1.
  307. */
  308. adr x4, lowlevel_in_el2
  309. ldr x5, =ES_TO_AARCH64
  310. bl armv8_switch_to_el2
  311. lowlevel_in_el2:
  312. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  313. adr x4, lowlevel_in_el1
  314. ldr x5, =ES_TO_AARCH64
  315. bl armv8_switch_to_el1
  316. lowlevel_in_el1:
  317. #endif
  318. #endif /* CONFIG_ARMV8_MULTIENTRY */
  319. 2:
  320. mov lr, x29 /* Restore LR */
  321. ret
  322. ENDPROC(lowlevel_init)
  323. WEAK(smp_kick_all_cpus)
  324. /* Kick secondary cpus up by SGI 0 interrupt */
  325. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  326. ldr x0, =GICD_BASE
  327. b gic_kick_secondary_cpus
  328. #endif
  329. ret
  330. ENDPROC(smp_kick_all_cpus)
  331. /*-----------------------------------------------------------------------*/
  332. ENTRY(c_runtime_cpu_setup)
  333. #if defined(CONFIG_ARMV8_SPL_EXCEPTION_VECTORS) || !defined(CONFIG_SPL_BUILD)
  334. /* Relocate vBAR */
  335. adr x0, vectors
  336. switch_el x1, 3f, 2f, 1f
  337. 3: msr vbar_el3, x0
  338. b 0f
  339. 2: msr vbar_el2, x0
  340. b 0f
  341. 1: msr vbar_el1, x0
  342. 0:
  343. #endif
  344. ret
  345. ENDPROC(c_runtime_cpu_setup)
  346. WEAK(save_boot_params)
  347. b save_boot_params_ret /* back to my caller */
  348. ENDPROC(save_boot_params)