head.S 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Low-level CPU initialisation
  4. * Based on arch/arm/kernel/head.S
  5. *
  6. * Copyright (C) 1994-2002 Russell King
  7. * Copyright (C) 2003-2012 ARM Ltd.
  8. * Authors: Catalin Marinas <catalin.marinas@arm.com>
  9. * Will Deacon <will.deacon@arm.com>
  10. */
  11. #include <linux/linkage.h>
  12. #include <linux/init.h>
  13. #include <linux/pgtable.h>
  14. #include <asm/asm_pointer_auth.h>
  15. #include <asm/assembler.h>
  16. #include <asm/boot.h>
  17. #include <asm/ptrace.h>
  18. #include <asm/asm-offsets.h>
  19. #include <asm/cache.h>
  20. #include <asm/cputype.h>
  21. #include <asm/el2_setup.h>
  22. #include <asm/elf.h>
  23. #include <asm/image.h>
  24. #include <asm/kernel-pgtable.h>
  25. #include <asm/kvm_arm.h>
  26. #include <asm/memory.h>
  27. #include <asm/pgtable-hwdef.h>
  28. #include <asm/page.h>
  29. #include <asm/scs.h>
  30. #include <asm/smp.h>
  31. #include <asm/sysreg.h>
  32. #include <asm/thread_info.h>
  33. #include <asm/virt.h>
  34. #include "efi-header.S"
  35. #define __PHYS_OFFSET KERNEL_START
  36. #if (PAGE_OFFSET & 0x1fffff) != 0
  37. #error PAGE_OFFSET must be at least 2MB aligned
  38. #endif
  39. /*
  40. * Kernel startup entry point.
  41. * ---------------------------
  42. *
  43. * The requirements are:
  44. * MMU = off, D-cache = off, I-cache = on or off,
  45. * x0 = physical address to the FDT blob.
  46. *
  47. * This code is mostly position independent so you call this at
  48. * __pa(PAGE_OFFSET).
  49. *
  50. * Note that the callee-saved registers are used for storing variables
  51. * that are useful before the MMU is enabled. The allocations are described
  52. * in the entry routines.
  53. */
  54. __HEAD
  55. _head:
  56. /*
  57. * DO NOT MODIFY. Image header expected by Linux boot-loaders.
  58. */
  59. #ifdef CONFIG_EFI
  60. /*
  61. * This add instruction has no meaningful effect except that
  62. * its opcode forms the magic "MZ" signature required by UEFI.
  63. */
  64. add x13, x18, #0x16
  65. b primary_entry
  66. #else
  67. b primary_entry // branch to kernel start, magic
  68. .long 0 // reserved
  69. #endif
  70. .quad 0 // Image load offset from start of RAM, little-endian
  71. le64sym _kernel_size_le // Effective size of kernel image, little-endian
  72. le64sym _kernel_flags_le // Informative flags, little-endian
  73. .quad 0 // reserved
  74. .quad 0 // reserved
  75. .quad 0 // reserved
  76. .ascii ARM64_IMAGE_MAGIC // Magic number
  77. #ifdef CONFIG_EFI
  78. .long pe_header - _head // Offset to the PE header.
  79. pe_header:
  80. __EFI_PE_HEADER
  81. #else
  82. .long 0 // reserved
  83. #endif
  84. __INIT
  85. /*
  86. * The following callee saved general purpose registers are used on the
  87. * primary lowlevel boot path:
  88. *
  89. * Register Scope Purpose
  90. * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
  91. * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset
  92. * x28 __create_page_tables() callee preserved temp register
  93. * x19/x20 __primary_switch() callee preserved temp registers
  94. * x24 __primary_switch() .. relocate_kernel() current RELR displacement
  95. */
  96. SYM_CODE_START(primary_entry)
  97. bl preserve_boot_args
  98. bl init_kernel_el // w0=cpu_boot_mode
  99. adrp x23, __PHYS_OFFSET
  100. and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
  101. bl set_cpu_boot_mode_flag
  102. bl __create_page_tables
  103. /*
  104. * The following calls CPU setup code, see arch/arm64/mm/proc.S for
  105. * details.
  106. * On return, the CPU will be ready for the MMU to be turned on and
  107. * the TCR will have been set.
  108. */
  109. bl __cpu_setup // initialise processor
  110. b __primary_switch
  111. SYM_CODE_END(primary_entry)
  112. /*
  113. * Preserve the arguments passed by the bootloader in x0 .. x3
  114. */
  115. SYM_CODE_START_LOCAL(preserve_boot_args)
  116. mov x21, x0 // x21=FDT
  117. adr_l x0, boot_args // record the contents of
  118. stp x21, x1, [x0] // x0 .. x3 at kernel entry
  119. stp x2, x3, [x0, #16]
  120. dmb sy // needed before dc ivac with
  121. // MMU off
  122. mov x1, #0x20 // 4 x 8 bytes
  123. b __inval_dcache_area // tail call
  124. SYM_CODE_END(preserve_boot_args)
  125. /*
  126. * Macro to create a table entry to the next page.
  127. *
  128. * tbl: page table address
  129. * virt: virtual address
  130. * shift: #imm page table shift
  131. * ptrs: #imm pointers per table page
  132. *
  133. * Preserves: virt
  134. * Corrupts: ptrs, tmp1, tmp2
  135. * Returns: tbl -> next level table page address
  136. */
  137. .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
  138. add \tmp1, \tbl, #PAGE_SIZE
  139. phys_to_pte \tmp2, \tmp1
  140. orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
  141. lsr \tmp1, \virt, #\shift
  142. sub \ptrs, \ptrs, #1
  143. and \tmp1, \tmp1, \ptrs // table index
  144. str \tmp2, [\tbl, \tmp1, lsl #3]
  145. add \tbl, \tbl, #PAGE_SIZE // next level table page
  146. .endm
  147. /*
  148. * Macro to populate page table entries, these entries can be pointers to the next level
  149. * or last level entries pointing to physical memory.
  150. *
  151. * tbl: page table address
  152. * rtbl: pointer to page table or physical memory
  153. * index: start index to write
  154. * eindex: end index to write - [index, eindex] written to
  155. * flags: flags for pagetable entry to or in
  156. * inc: increment to rtbl between each entry
  157. * tmp1: temporary variable
  158. *
  159. * Preserves: tbl, eindex, flags, inc
  160. * Corrupts: index, tmp1
  161. * Returns: rtbl
  162. */
  163. .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
  164. .Lpe\@: phys_to_pte \tmp1, \rtbl
  165. orr \tmp1, \tmp1, \flags // tmp1 = table entry
  166. str \tmp1, [\tbl, \index, lsl #3]
  167. add \rtbl, \rtbl, \inc // rtbl = pa next level
  168. add \index, \index, #1
  169. cmp \index, \eindex
  170. b.ls .Lpe\@
  171. .endm
  172. /*
  173. * Compute indices of table entries from virtual address range. If multiple entries
  174. * were needed in the previous page table level then the next page table level is assumed
  175. * to be composed of multiple pages. (This effectively scales the end index).
  176. *
  177. * vstart: virtual address of start of range
  178. * vend: virtual address of end of range - we map [vstart, vend]
  179. * shift: shift used to transform virtual address into index
  180. * ptrs: number of entries in page table
  181. * istart: index in table corresponding to vstart
  182. * iend: index in table corresponding to vend
  183. * count: On entry: how many extra entries were required in previous level, scales
  184. * our end index.
  185. * On exit: returns how many extra entries required for next page table level
  186. *
  187. * Preserves: vstart, vend, shift, ptrs
  188. * Returns: istart, iend, count
  189. */
  190. .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
  191. lsr \iend, \vend, \shift
  192. mov \istart, \ptrs
  193. sub \istart, \istart, #1
  194. and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
  195. mov \istart, \ptrs
  196. mul \istart, \istart, \count
  197. add \iend, \iend, \istart // iend += (count - 1) * ptrs
  198. // our entries span multiple tables
  199. lsr \istart, \vstart, \shift
  200. mov \count, \ptrs
  201. sub \count, \count, #1
  202. and \istart, \istart, \count
  203. sub \count, \iend, \istart
  204. .endm
  205. /*
  206. * Map memory for specified virtual address range. Each level of page table needed supports
  207. * multiple entries. If a level requires n entries the next page table level is assumed to be
  208. * formed from n pages.
  209. *
  210. * tbl: location of page table
  211. * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
  212. * vstart: virtual address of start of range
  213. * vend: virtual address of end of range - we map [vstart, vend - 1]
  214. * flags: flags to use to map last level entries
  215. * phys: physical address corresponding to vstart - physical memory is contiguous
  216. * pgds: the number of pgd entries
  217. *
  218. * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
  219. * Preserves: vstart, flags
  220. * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv
  221. */
  222. .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
  223. sub \vend, \vend, #1
  224. add \rtbl, \tbl, #PAGE_SIZE
  225. mov \sv, \rtbl
  226. mov \count, #0
  227. compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
  228. populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
  229. mov \tbl, \sv
  230. mov \sv, \rtbl
  231. #if SWAPPER_PGTABLE_LEVELS > 3
  232. compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
  233. populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
  234. mov \tbl, \sv
  235. mov \sv, \rtbl
  236. #endif
  237. #if SWAPPER_PGTABLE_LEVELS > 2
  238. compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
  239. populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
  240. mov \tbl, \sv
  241. #endif
  242. compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
  243. bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
  244. populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
  245. .endm
  246. /*
  247. * Setup the initial page tables. We only setup the barest amount which is
  248. * required to get the kernel running. The following sections are required:
  249. * - identity mapping to enable the MMU (low address, TTBR0)
  250. * - first few MB of the kernel linear mapping to jump to once the MMU has
  251. * been enabled
  252. */
  253. SYM_FUNC_START_LOCAL(__create_page_tables)
  254. mov x28, lr
  255. /*
  256. * Invalidate the init page tables to avoid potential dirty cache lines
  257. * being evicted. Other page tables are allocated in rodata as part of
  258. * the kernel image, and thus are clean to the PoC per the boot
  259. * protocol.
  260. */
  261. adrp x0, init_pg_dir
  262. adrp x1, init_pg_end
  263. sub x1, x1, x0
  264. bl __inval_dcache_area
  265. /*
  266. * Clear the init page tables.
  267. */
  268. adrp x0, init_pg_dir
  269. adrp x1, init_pg_end
  270. sub x1, x1, x0
  271. 1: stp xzr, xzr, [x0], #16
  272. stp xzr, xzr, [x0], #16
  273. stp xzr, xzr, [x0], #16
  274. stp xzr, xzr, [x0], #16
  275. subs x1, x1, #64
  276. b.ne 1b
  277. mov x7, SWAPPER_MM_MMUFLAGS
  278. /*
  279. * Create the identity mapping.
  280. */
  281. adrp x0, idmap_pg_dir
  282. adrp x3, __idmap_text_start // __pa(__idmap_text_start)
  283. #ifdef CONFIG_ARM64_VA_BITS_52
  284. mrs_s x6, SYS_ID_AA64MMFR2_EL1
  285. and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
  286. mov x5, #52
  287. cbnz x6, 1f
  288. #endif
  289. mov x5, #VA_BITS_MIN
  290. 1:
  291. adr_l x6, vabits_actual
  292. str x5, [x6]
  293. dmb sy
  294. dc ivac, x6 // Invalidate potentially stale cache line
  295. /*
  296. * VA_BITS may be too small to allow for an ID mapping to be created
  297. * that covers system RAM if that is located sufficiently high in the
  298. * physical address space. So for the ID map, use an extended virtual
  299. * range in that case, and configure an additional translation level
  300. * if needed.
  301. *
  302. * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
  303. * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
  304. * this number conveniently equals the number of leading zeroes in
  305. * the physical address of __idmap_text_end.
  306. */
  307. adrp x5, __idmap_text_end
  308. clz x5, x5
  309. cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
  310. b.ge 1f // .. then skip VA range extension
  311. adr_l x6, idmap_t0sz
  312. str x5, [x6]
  313. dmb sy
  314. dc ivac, x6 // Invalidate potentially stale cache line
  315. #if (VA_BITS < 48)
  316. #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
  317. #define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
  318. /*
  319. * If VA_BITS < 48, we have to configure an additional table level.
  320. * First, we have to verify our assumption that the current value of
  321. * VA_BITS was chosen such that all translation levels are fully
  322. * utilised, and that lowering T0SZ will always result in an additional
  323. * translation level to be configured.
  324. */
  325. #if VA_BITS != EXTRA_SHIFT
  326. #error "Mismatch between VA_BITS and page size/number of translation levels"
  327. #endif
  328. mov x4, EXTRA_PTRS
  329. create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
  330. #else
  331. /*
  332. * If VA_BITS == 48, we don't have to configure an additional
  333. * translation level, but the top-level table has more entries.
  334. */
  335. mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
  336. str_l x4, idmap_ptrs_per_pgd, x5
  337. #endif
  338. 1:
  339. ldr_l x4, idmap_ptrs_per_pgd
  340. mov x5, x3 // __pa(__idmap_text_start)
  341. adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
  342. map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
  343. /*
  344. * Map the kernel image (starting with PHYS_OFFSET).
  345. */
  346. adrp x0, init_pg_dir
  347. mov_q x5, KIMAGE_VADDR // compile time __va(_text)
  348. add x5, x5, x23 // add KASLR displacement
  349. mov x4, PTRS_PER_PGD
  350. adrp x6, _end // runtime __pa(_end)
  351. adrp x3, _text // runtime __pa(_text)
  352. sub x6, x6, x3 // _end - _text
  353. add x6, x6, x5 // runtime __va(_end)
  354. map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
  355. /*
  356. * Since the page tables have been populated with non-cacheable
  357. * accesses (MMU disabled), invalidate those tables again to
  358. * remove any speculatively loaded cache lines.
  359. */
  360. dmb sy
  361. adrp x0, idmap_pg_dir
  362. adrp x1, idmap_pg_end
  363. sub x1, x1, x0
  364. bl __inval_dcache_area
  365. adrp x0, init_pg_dir
  366. adrp x1, init_pg_end
  367. sub x1, x1, x0
  368. bl __inval_dcache_area
  369. ret x28
  370. SYM_FUNC_END(__create_page_tables)
  371. /*
  372. * The following fragment of code is executed with the MMU enabled.
  373. *
  374. * x0 = __PHYS_OFFSET
  375. */
  376. SYM_FUNC_START_LOCAL(__primary_switched)
  377. adrp x4, init_thread_union
  378. add sp, x4, #THREAD_SIZE
  379. adr_l x5, init_task
  380. msr sp_el0, x5 // Save thread_info
  381. adr_l x8, vectors // load VBAR_EL1 with virtual
  382. msr vbar_el1, x8 // vector table address
  383. isb
  384. stp xzr, x30, [sp, #-16]!
  385. mov x29, sp
  386. #ifdef CONFIG_SHADOW_CALL_STACK
  387. adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
  388. #endif
  389. str_l x21, __fdt_pointer, x5 // Save FDT pointer
  390. ldr_l x4, kimage_vaddr // Save the offset between
  391. sub x4, x4, x0 // the kernel virtual and
  392. str_l x4, kimage_voffset, x5 // physical mappings
  393. // Clear BSS
  394. adr_l x0, __bss_start
  395. mov x1, xzr
  396. adr_l x2, __bss_stop
  397. sub x2, x2, x0
  398. bl __pi_memset
  399. dsb ishst // Make zero page visible to PTW
  400. #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
  401. bl kasan_early_init
  402. #endif
  403. mov x0, x21 // pass FDT address in x0
  404. bl early_fdt_map // Try mapping the FDT early
  405. bl init_feature_override // Parse cpu feature overrides
  406. #ifdef CONFIG_RANDOMIZE_BASE
  407. tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
  408. b.ne 0f
  409. bl kaslr_early_init // parse FDT for KASLR options
  410. cbz x0, 0f // KASLR disabled? just proceed
  411. orr x23, x23, x0 // record KASLR offset
  412. ldp x29, x30, [sp], #16 // we must enable KASLR, return
  413. ret // to __primary_switch()
  414. 0:
  415. #endif
  416. bl switch_to_vhe // Prefer VHE if possible
  417. add sp, sp, #16
  418. mov x29, #0
  419. mov x30, #0
  420. b start_kernel
  421. SYM_FUNC_END(__primary_switched)
  422. .pushsection ".rodata", "a"
  423. SYM_DATA_START(kimage_vaddr)
  424. .quad _text
  425. SYM_DATA_END(kimage_vaddr)
  426. EXPORT_SYMBOL(kimage_vaddr)
  427. .popsection
  428. /*
  429. * end early head section, begin head code that is also used for
  430. * hotplug and needs to have the same protections as the text region
  431. */
  432. .section ".idmap.text","awx"
  433. /*
  434. * Starting from EL2 or EL1, configure the CPU to execute at the highest
  435. * reachable EL supported by the kernel in a chosen default state. If dropping
  436. * from EL2 to EL1, configure EL2 before configuring EL1.
  437. *
  438. * Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if
  439. * SCTLR_ELx.EOS is clear), we place an ISB prior to ERET.
  440. *
  441. * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
  442. * booted in EL1 or EL2 respectively.
  443. */
  444. SYM_FUNC_START(init_kernel_el)
  445. mov_q x0, INIT_SCTLR_EL1_MMU_OFF
  446. msr sctlr_el1, x0
  447. mrs x0, CurrentEL
  448. cmp x0, #CurrentEL_EL2
  449. b.eq init_el2
  450. SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
  451. isb
  452. mov_q x0, INIT_PSTATE_EL1
  453. msr spsr_el1, x0
  454. msr elr_el1, lr
  455. mov w0, #BOOT_CPU_MODE_EL1
  456. eret
  457. SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
  458. mov_q x0, HCR_HOST_NVHE_FLAGS
  459. msr hcr_el2, x0
  460. isb
  461. init_el2_state
  462. /* Hypervisor stub */
  463. adr_l x0, __hyp_stub_vectors
  464. msr vbar_el2, x0
  465. isb
  466. msr elr_el2, lr
  467. mov w0, #BOOT_CPU_MODE_EL2
  468. eret
  469. SYM_FUNC_END(init_kernel_el)
  470. /*
  471. * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
  472. * in w0. See arch/arm64/include/asm/virt.h for more info.
  473. */
  474. SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
  475. adr_l x1, __boot_cpu_mode
  476. cmp w0, #BOOT_CPU_MODE_EL2
  477. b.ne 1f
  478. add x1, x1, #4
  479. 1: str w0, [x1] // This CPU has booted in EL1
  480. dmb sy
  481. dc ivac, x1 // Invalidate potentially stale cache line
  482. ret
  483. SYM_FUNC_END(set_cpu_boot_mode_flag)
  484. /*
  485. * These values are written with the MMU off, but read with the MMU on.
  486. * Writers will invalidate the corresponding address, discarding up to a
  487. * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
  488. * sufficient alignment that the CWG doesn't overlap another section.
  489. */
  490. .pushsection ".mmuoff.data.write", "aw"
  491. /*
  492. * We need to find out the CPU boot mode long after boot, so we need to
  493. * store it in a writable variable.
  494. *
  495. * This is not in .bss, because we set it sufficiently early that the boot-time
  496. * zeroing of .bss would clobber it.
  497. */
  498. SYM_DATA_START(__boot_cpu_mode)
  499. .long BOOT_CPU_MODE_EL2
  500. .long BOOT_CPU_MODE_EL1
  501. SYM_DATA_END(__boot_cpu_mode)
  502. /*
  503. * The booting CPU updates the failed status @__early_cpu_boot_status,
  504. * with MMU turned off.
  505. */
  506. SYM_DATA_START(__early_cpu_boot_status)
  507. .quad 0
  508. SYM_DATA_END(__early_cpu_boot_status)
  509. .popsection
  510. /*
  511. * This provides a "holding pen" for platforms to hold all secondary
  512. * cores are held until we're ready for them to initialise.
  513. */
  514. SYM_FUNC_START(secondary_holding_pen)
  515. bl init_kernel_el // w0=cpu_boot_mode
  516. bl set_cpu_boot_mode_flag
  517. mrs x0, mpidr_el1
  518. mov_q x1, MPIDR_HWID_BITMASK
  519. and x0, x0, x1
  520. adr_l x3, secondary_holding_pen_release
  521. pen: ldr x4, [x3]
  522. cmp x4, x0
  523. b.eq secondary_startup
  524. wfe
  525. b pen
  526. SYM_FUNC_END(secondary_holding_pen)
  527. /*
  528. * Secondary entry point that jumps straight into the kernel. Only to
  529. * be used where CPUs are brought online dynamically by the kernel.
  530. */
  531. SYM_FUNC_START(secondary_entry)
  532. bl init_kernel_el // w0=cpu_boot_mode
  533. bl set_cpu_boot_mode_flag
  534. b secondary_startup
  535. SYM_FUNC_END(secondary_entry)
  536. SYM_FUNC_START_LOCAL(secondary_startup)
  537. /*
  538. * Common entry point for secondary CPUs.
  539. */
  540. bl switch_to_vhe
  541. bl __cpu_secondary_check52bitva
  542. bl __cpu_setup // initialise processor
  543. adrp x1, swapper_pg_dir
  544. bl __enable_mmu
  545. ldr x8, =__secondary_switched
  546. br x8
  547. SYM_FUNC_END(secondary_startup)
  548. SYM_FUNC_START_LOCAL(__secondary_switched)
  549. adr_l x5, vectors
  550. msr vbar_el1, x5
  551. isb
  552. adr_l x0, secondary_data
  553. ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
  554. cbz x1, __secondary_too_slow
  555. mov sp, x1
  556. ldr x2, [x0, #CPU_BOOT_TASK]
  557. cbz x2, __secondary_too_slow
  558. msr sp_el0, x2
  559. scs_load x2, x3
  560. mov x29, #0
  561. mov x30, #0
  562. #ifdef CONFIG_ARM64_PTR_AUTH
  563. ptrauth_keys_init_cpu x2, x3, x4, x5
  564. #endif
  565. b secondary_start_kernel
  566. SYM_FUNC_END(__secondary_switched)
  567. SYM_FUNC_START_LOCAL(__secondary_too_slow)
  568. wfe
  569. wfi
  570. b __secondary_too_slow
  571. SYM_FUNC_END(__secondary_too_slow)
  572. /*
  573. * The booting CPU updates the failed status @__early_cpu_boot_status,
  574. * with MMU turned off.
  575. *
  576. * update_early_cpu_boot_status tmp, status
  577. * - Corrupts tmp1, tmp2
  578. * - Writes 'status' to __early_cpu_boot_status and makes sure
  579. * it is committed to memory.
  580. */
  581. .macro update_early_cpu_boot_status status, tmp1, tmp2
  582. mov \tmp2, #\status
  583. adr_l \tmp1, __early_cpu_boot_status
  584. str \tmp2, [\tmp1]
  585. dmb sy
  586. dc ivac, \tmp1 // Invalidate potentially stale cache line
  587. .endm
  588. /*
  589. * Enable the MMU.
  590. *
  591. * x0 = SCTLR_EL1 value for turning on the MMU.
  592. * x1 = TTBR1_EL1 value
  593. *
  594. * Returns to the caller via x30/lr. This requires the caller to be covered
  595. * by the .idmap.text section.
  596. *
  597. * Checks if the selected granule size is supported by the CPU.
  598. * If it isn't, park the CPU
  599. */
  600. SYM_FUNC_START(__enable_mmu)
  601. mrs x2, ID_AA64MMFR0_EL1
  602. ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
  603. cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
  604. b.ne __no_granule_support
  605. update_early_cpu_boot_status 0, x2, x3
  606. adrp x2, idmap_pg_dir
  607. phys_to_ttbr x1, x1
  608. phys_to_ttbr x2, x2
  609. msr ttbr0_el1, x2 // load TTBR0
  610. offset_ttbr1 x1, x3
  611. msr ttbr1_el1, x1 // load TTBR1
  612. isb
  613. set_sctlr_el1 x0
  614. ret
  615. SYM_FUNC_END(__enable_mmu)
  616. SYM_FUNC_START(__cpu_secondary_check52bitva)
  617. #ifdef CONFIG_ARM64_VA_BITS_52
  618. ldr_l x0, vabits_actual
  619. cmp x0, #52
  620. b.ne 2f
  621. mrs_s x0, SYS_ID_AA64MMFR2_EL1
  622. and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
  623. cbnz x0, 2f
  624. update_early_cpu_boot_status \
  625. CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
  626. 1: wfe
  627. wfi
  628. b 1b
  629. #endif
  630. 2: ret
  631. SYM_FUNC_END(__cpu_secondary_check52bitva)
  632. SYM_FUNC_START_LOCAL(__no_granule_support)
  633. /* Indicate that this CPU can't boot and is stuck in the kernel */
  634. update_early_cpu_boot_status \
  635. CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
  636. 1:
  637. wfe
  638. wfi
  639. b 1b
  640. SYM_FUNC_END(__no_granule_support)
  641. #ifdef CONFIG_RELOCATABLE
  642. SYM_FUNC_START_LOCAL(__relocate_kernel)
  643. /*
  644. * Iterate over each entry in the relocation table, and apply the
  645. * relocations in place.
  646. */
  647. ldr w9, =__rela_offset // offset to reloc table
  648. ldr w10, =__rela_size // size of reloc table
  649. mov_q x11, KIMAGE_VADDR // default virtual offset
  650. add x11, x11, x23 // actual virtual offset
  651. add x9, x9, x11 // __va(.rela)
  652. add x10, x9, x10 // __va(.rela) + sizeof(.rela)
  653. 0: cmp x9, x10
  654. b.hs 1f
  655. ldp x12, x13, [x9], #24
  656. ldr x14, [x9, #-8]
  657. cmp w13, #R_AARCH64_RELATIVE
  658. b.ne 0b
  659. add x14, x14, x23 // relocate
  660. str x14, [x12, x23]
  661. b 0b
  662. 1:
  663. #ifdef CONFIG_RELR
  664. /*
  665. * Apply RELR relocations.
  666. *
  667. * RELR is a compressed format for storing relative relocations. The
  668. * encoded sequence of entries looks like:
  669. * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
  670. *
  671. * i.e. start with an address, followed by any number of bitmaps. The
  672. * address entry encodes 1 relocation. The subsequent bitmap entries
  673. * encode up to 63 relocations each, at subsequent offsets following
  674. * the last address entry.
  675. *
  676. * The bitmap entries must have 1 in the least significant bit. The
  677. * assumption here is that an address cannot have 1 in lsb. Odd
  678. * addresses are not supported. Any odd addresses are stored in the RELA
  679. * section, which is handled above.
  680. *
  681. * Excluding the least significant bit in the bitmap, each non-zero
  682. * bit in the bitmap represents a relocation to be applied to
  683. * a corresponding machine word that follows the base address
  684. * word. The second least significant bit represents the machine
  685. * word immediately following the initial address, and each bit
  686. * that follows represents the next word, in linear order. As such,
  687. * a single bitmap can encode up to 63 relocations in a 64-bit object.
  688. *
  689. * In this implementation we store the address of the next RELR table
  690. * entry in x9, the address being relocated by the current address or
  691. * bitmap entry in x13 and the address being relocated by the current
  692. * bit in x14.
  693. *
  694. * Because addends are stored in place in the binary, RELR relocations
  695. * cannot be applied idempotently. We use x24 to keep track of the
  696. * currently applied displacement so that we can correctly relocate if
  697. * __relocate_kernel is called twice with non-zero displacements (i.e.
  698. * if there is both a physical misalignment and a KASLR displacement).
  699. */
  700. ldr w9, =__relr_offset // offset to reloc table
  701. ldr w10, =__relr_size // size of reloc table
  702. add x9, x9, x11 // __va(.relr)
  703. add x10, x9, x10 // __va(.relr) + sizeof(.relr)
  704. sub x15, x23, x24 // delta from previous offset
  705. cbz x15, 7f // nothing to do if unchanged
  706. mov x24, x23 // save new offset
  707. 2: cmp x9, x10
  708. b.hs 7f
  709. ldr x11, [x9], #8
  710. tbnz x11, #0, 3f // branch to handle bitmaps
  711. add x13, x11, x23
  712. ldr x12, [x13] // relocate address entry
  713. add x12, x12, x15
  714. str x12, [x13], #8 // adjust to start of bitmap
  715. b 2b
  716. 3: mov x14, x13
  717. 4: lsr x11, x11, #1
  718. cbz x11, 6f
  719. tbz x11, #0, 5f // skip bit if not set
  720. ldr x12, [x14] // relocate bit
  721. add x12, x12, x15
  722. str x12, [x14]
  723. 5: add x14, x14, #8 // move to next bit's address
  724. b 4b
  725. 6: /*
  726. * Move to the next bitmap's address. 8 is the word size, and 63 is the
  727. * number of significant bits in a bitmap entry.
  728. */
  729. add x13, x13, #(8 * 63)
  730. b 2b
  731. 7:
  732. #endif
  733. ret
  734. SYM_FUNC_END(__relocate_kernel)
  735. #endif
  736. SYM_FUNC_START_LOCAL(__primary_switch)
  737. #ifdef CONFIG_RANDOMIZE_BASE
  738. mov x19, x0 // preserve new SCTLR_EL1 value
  739. mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
  740. #endif
  741. adrp x1, init_pg_dir
  742. bl __enable_mmu
  743. #ifdef CONFIG_RELOCATABLE
  744. #ifdef CONFIG_RELR
  745. mov x24, #0 // no RELR displacement yet
  746. #endif
  747. bl __relocate_kernel
  748. #ifdef CONFIG_RANDOMIZE_BASE
  749. ldr x8, =__primary_switched
  750. adrp x0, __PHYS_OFFSET
  751. blr x8
  752. /*
  753. * If we return here, we have a KASLR displacement in x23 which we need
  754. * to take into account by discarding the current kernel mapping and
  755. * creating a new one.
  756. */
  757. pre_disable_mmu_workaround
  758. msr sctlr_el1, x20 // disable the MMU
  759. isb
  760. bl __create_page_tables // recreate kernel mapping
  761. tlbi vmalle1 // Remove any stale TLB entries
  762. dsb nsh
  763. isb
  764. set_sctlr_el1 x19 // re-enable the MMU
  765. bl __relocate_kernel
  766. #endif
  767. #endif
  768. ldr x8, =__primary_switched
  769. adrp x0, __PHYS_OFFSET
  770. br x8
  771. SYM_FUNC_END(__primary_switch)