cpu.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2017-2020 NXP
  4. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  5. */
  6. #include <common.h>
  7. #include <cpu_func.h>
  8. #include <env.h>
  9. #include <fsl_ddr_sdram.h>
  10. #include <init.h>
  11. #include <hang.h>
  12. #include <log.h>
  13. #include <net.h>
  14. #include <vsprintf.h>
  15. #include <asm/cache.h>
  16. #include <asm/global_data.h>
  17. #include <asm/io.h>
  18. #include <asm/ptrace.h>
  19. #include <linux/errno.h>
  20. #include <asm/system.h>
  21. #include <fm_eth.h>
  22. #include <asm/armv8/mmu.h>
  23. #include <asm/io.h>
  24. #include <asm/arch/fsl_serdes.h>
  25. #include <asm/arch/soc.h>
  26. #include <asm/arch/cpu.h>
  27. #include <asm/arch/speed.h>
  28. #include <fsl_immap.h>
  29. #include <asm/arch/mp.h>
  30. #include <efi_loader.h>
  31. #include <fsl-mc/fsl_mc.h>
  32. #ifdef CONFIG_FSL_ESDHC
  33. #include <fsl_esdhc.h>
  34. #endif
  35. #include <asm/armv8/sec_firmware.h>
  36. #ifdef CONFIG_SYS_FSL_DDR
  37. #include <fsl_ddr.h>
  38. #endif
  39. #include <asm/arch/clock.h>
  40. #include <hwconfig.h>
  41. #include <fsl_qbman.h>
  42. #ifdef CONFIG_TFABOOT
  43. #include <env_internal.h>
  44. #ifdef CONFIG_CHAIN_OF_TRUST
  45. #include <fsl_validate.h>
  46. #endif
  47. #endif
  48. #include <linux/mii.h>
  49. DECLARE_GLOBAL_DATA_PTR;
  50. static struct cpu_type cpu_type_list[] = {
  51. CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
  52. CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
  53. CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
  54. CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
  55. CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
  56. CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
  57. CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
  58. CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
  59. CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
  60. CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
  61. CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
  62. CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
  63. CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
  64. CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
  65. CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
  66. CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
  67. CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
  68. CPU_TYPE_ENTRY(LS1017A, LS1017A, 1),
  69. CPU_TYPE_ENTRY(LS1018A, LS1018A, 1),
  70. CPU_TYPE_ENTRY(LS1027A, LS1027A, 2),
  71. CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
  72. CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
  73. CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
  74. CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
  75. CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
  76. CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
  77. CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
  78. CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
  79. CPU_TYPE_ENTRY(LX2162A, LX2162A, 16),
  80. CPU_TYPE_ENTRY(LX2122A, LX2122A, 12),
  81. CPU_TYPE_ENTRY(LX2082A, LX2082A, 8),
  82. };
  83. #define EARLY_PGTABLE_SIZE 0x5000
  84. static struct mm_region early_map[] = {
  85. #ifdef CONFIG_FSL_LSCH3
  86. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  87. CONFIG_SYS_FSL_CCSR_SIZE,
  88. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  89. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  90. },
  91. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  92. SYS_FSL_OCRAM_SPACE_SIZE,
  93. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  94. },
  95. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  96. CONFIG_SYS_FSL_QSPI_SIZE1,
  97. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
  98. #ifdef CONFIG_FSL_IFC
  99. /* For IFC Region #1, only the first 4MB is cache-enabled */
  100. { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
  101. CONFIG_SYS_FSL_IFC_SIZE1_1,
  102. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  103. },
  104. { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  105. CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  106. CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
  107. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  108. },
  109. { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
  110. CONFIG_SYS_FSL_IFC_SIZE1,
  111. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  112. },
  113. #endif
  114. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  115. CONFIG_SYS_FSL_DRAM_SIZE1,
  116. #if defined(CONFIG_TFABOOT) || \
  117. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  118. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  119. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  120. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  121. #endif
  122. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  123. },
  124. #ifdef CONFIG_FSL_IFC
  125. /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
  126. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  127. CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
  128. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  129. },
  130. #endif
  131. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  132. CONFIG_SYS_FSL_DCSR_SIZE,
  133. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  134. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  135. },
  136. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  137. CONFIG_SYS_FSL_DRAM_SIZE2,
  138. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  139. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  140. },
  141. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  142. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  143. CONFIG_SYS_FSL_DRAM_SIZE3,
  144. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  145. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  146. },
  147. #endif
  148. #elif defined(CONFIG_FSL_LSCH2)
  149. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  150. CONFIG_SYS_FSL_CCSR_SIZE,
  151. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  152. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  153. },
  154. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  155. SYS_FSL_OCRAM_SPACE_SIZE,
  156. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  157. },
  158. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  159. CONFIG_SYS_FSL_DCSR_SIZE,
  160. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  161. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  162. },
  163. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  164. CONFIG_SYS_FSL_QSPI_SIZE,
  165. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  166. },
  167. #ifdef CONFIG_FSL_IFC
  168. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  169. CONFIG_SYS_FSL_IFC_SIZE,
  170. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  171. },
  172. #endif
  173. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  174. CONFIG_SYS_FSL_DRAM_SIZE1,
  175. #if defined(CONFIG_TFABOOT) || \
  176. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  177. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  178. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  179. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  180. #endif
  181. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  182. },
  183. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  184. CONFIG_SYS_FSL_DRAM_SIZE2,
  185. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  186. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  187. },
  188. #endif
  189. {}, /* list terminator */
  190. };
  191. static struct mm_region final_map[] = {
  192. #ifdef CONFIG_FSL_LSCH3
  193. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  194. CONFIG_SYS_FSL_CCSR_SIZE,
  195. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  196. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  197. },
  198. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  199. SYS_FSL_OCRAM_SPACE_SIZE,
  200. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  201. },
  202. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  203. CONFIG_SYS_FSL_DRAM_SIZE1,
  204. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  205. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  206. },
  207. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  208. CONFIG_SYS_FSL_QSPI_SIZE1,
  209. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  210. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  211. },
  212. { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
  213. CONFIG_SYS_FSL_QSPI_SIZE2,
  214. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  215. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  216. },
  217. #ifdef CONFIG_FSL_IFC
  218. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  219. CONFIG_SYS_FSL_IFC_SIZE2,
  220. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  221. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  222. },
  223. #endif
  224. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  225. CONFIG_SYS_FSL_DCSR_SIZE,
  226. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  227. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  228. },
  229. { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
  230. CONFIG_SYS_FSL_MC_SIZE,
  231. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  232. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  233. },
  234. { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
  235. CONFIG_SYS_FSL_NI_SIZE,
  236. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  237. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  238. },
  239. /* For QBMAN portal, only the first 64MB is cache-enabled */
  240. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  241. CONFIG_SYS_FSL_QBMAN_SIZE_1,
  242. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  243. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
  244. },
  245. { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  246. CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  247. CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
  248. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  249. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  250. },
  251. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  252. CONFIG_SYS_PCIE1_PHYS_SIZE,
  253. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  254. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  255. },
  256. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  257. CONFIG_SYS_PCIE2_PHYS_SIZE,
  258. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  259. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  260. },
  261. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  262. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  263. CONFIG_SYS_PCIE3_PHYS_SIZE,
  264. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  265. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  266. },
  267. #endif
  268. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  269. { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
  270. CONFIG_SYS_PCIE4_PHYS_SIZE,
  271. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  272. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  273. },
  274. #endif
  275. #ifdef SYS_PCIE5_PHYS_ADDR
  276. { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
  277. SYS_PCIE5_PHYS_SIZE,
  278. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  279. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  280. },
  281. #endif
  282. #ifdef SYS_PCIE6_PHYS_ADDR
  283. { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
  284. SYS_PCIE6_PHYS_SIZE,
  285. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  286. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  287. },
  288. #endif
  289. { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
  290. CONFIG_SYS_FSL_WRIOP1_SIZE,
  291. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  292. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  293. },
  294. { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
  295. CONFIG_SYS_FSL_AIOP1_SIZE,
  296. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  297. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  298. },
  299. { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
  300. CONFIG_SYS_FSL_PEBUF_SIZE,
  301. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  302. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  303. },
  304. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  305. CONFIG_SYS_FSL_DRAM_SIZE2,
  306. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  307. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  308. },
  309. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  310. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  311. CONFIG_SYS_FSL_DRAM_SIZE3,
  312. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  313. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  314. },
  315. #endif
  316. #elif defined(CONFIG_FSL_LSCH2)
  317. { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
  318. CONFIG_SYS_FSL_BOOTROM_SIZE,
  319. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  320. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  321. },
  322. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  323. CONFIG_SYS_FSL_CCSR_SIZE,
  324. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  325. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  326. },
  327. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  328. SYS_FSL_OCRAM_SPACE_SIZE,
  329. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  330. },
  331. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  332. CONFIG_SYS_FSL_DCSR_SIZE,
  333. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  334. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  335. },
  336. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  337. CONFIG_SYS_FSL_QSPI_SIZE,
  338. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  339. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  340. },
  341. #ifdef CONFIG_FSL_IFC
  342. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  343. CONFIG_SYS_FSL_IFC_SIZE,
  344. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  345. },
  346. #endif
  347. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  348. CONFIG_SYS_FSL_DRAM_SIZE1,
  349. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  350. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  351. },
  352. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  353. CONFIG_SYS_FSL_QBMAN_SIZE,
  354. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  355. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  356. },
  357. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  358. CONFIG_SYS_FSL_DRAM_SIZE2,
  359. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  360. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  361. },
  362. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  363. CONFIG_SYS_PCIE1_PHYS_SIZE,
  364. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  365. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  366. },
  367. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  368. CONFIG_SYS_PCIE2_PHYS_SIZE,
  369. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  370. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  371. },
  372. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  373. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  374. CONFIG_SYS_PCIE3_PHYS_SIZE,
  375. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  376. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  377. },
  378. #endif
  379. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  380. CONFIG_SYS_FSL_DRAM_SIZE3,
  381. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  382. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  383. },
  384. #endif
  385. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  386. {}, /* space holder for secure mem */
  387. #endif
  388. {},
  389. };
  390. struct mm_region *mem_map = early_map;
  391. void cpu_name(char *name)
  392. {
  393. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  394. unsigned int i, svr, ver;
  395. svr = gur_in32(&gur->svr);
  396. ver = SVR_SOC_VER(svr);
  397. for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
  398. if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
  399. strcpy(name, cpu_type_list[i].name);
  400. #if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
  401. if (IS_C_PROCESSOR(svr))
  402. strcat(name, "C");
  403. #endif
  404. if (IS_E_PROCESSOR(svr))
  405. strcat(name, "E");
  406. sprintf(name + strlen(name), " Rev%d.%d",
  407. SVR_MAJ(svr), SVR_MIN(svr));
  408. break;
  409. }
  410. if (i == ARRAY_SIZE(cpu_type_list))
  411. strcpy(name, "unknown");
  412. }
  413. #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  414. /*
  415. * To start MMU before DDR is available, we create MMU table in SRAM.
  416. * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
  417. * levels of translation tables here to cover 40-bit address space.
  418. * We use 4KB granule size, with 40 bits physical address, T0SZ=24
  419. * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
  420. * Note, the debug print in cache_v8.c is not usable for debugging
  421. * these early MMU tables because UART is not yet available.
  422. */
  423. static inline void early_mmu_setup(void)
  424. {
  425. unsigned int el = current_el();
  426. /* global data is already setup, no allocation yet */
  427. if (el == 3)
  428. gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
  429. else
  430. gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
  431. gd->arch.tlb_fillptr = gd->arch.tlb_addr;
  432. gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
  433. /* Create early page tables */
  434. setup_pgtables();
  435. /* point TTBR to the new table */
  436. set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
  437. get_tcr(el, NULL, NULL) &
  438. ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
  439. MEMORY_ATTRIBUTES);
  440. set_sctlr(get_sctlr() | CR_M);
  441. }
  442. static void fix_pcie_mmu_map(void)
  443. {
  444. #ifdef CONFIG_ARCH_LS2080A
  445. unsigned int i;
  446. u32 svr, ver;
  447. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  448. svr = gur_in32(&gur->svr);
  449. ver = SVR_SOC_VER(svr);
  450. /* Fix PCIE base and size for LS2088A */
  451. if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
  452. (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
  453. (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
  454. for (i = 0; i < ARRAY_SIZE(final_map); i++) {
  455. switch (final_map[i].phys) {
  456. case CONFIG_SYS_PCIE1_PHYS_ADDR:
  457. final_map[i].phys = 0x2000000000ULL;
  458. final_map[i].virt = 0x2000000000ULL;
  459. final_map[i].size = 0x800000000ULL;
  460. break;
  461. case CONFIG_SYS_PCIE2_PHYS_ADDR:
  462. final_map[i].phys = 0x2800000000ULL;
  463. final_map[i].virt = 0x2800000000ULL;
  464. final_map[i].size = 0x800000000ULL;
  465. break;
  466. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  467. case CONFIG_SYS_PCIE3_PHYS_ADDR:
  468. final_map[i].phys = 0x3000000000ULL;
  469. final_map[i].virt = 0x3000000000ULL;
  470. final_map[i].size = 0x800000000ULL;
  471. break;
  472. #endif
  473. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  474. case CONFIG_SYS_PCIE4_PHYS_ADDR:
  475. final_map[i].phys = 0x3800000000ULL;
  476. final_map[i].virt = 0x3800000000ULL;
  477. final_map[i].size = 0x800000000ULL;
  478. break;
  479. #endif
  480. default:
  481. break;
  482. }
  483. }
  484. }
  485. #endif
  486. }
  487. /*
  488. * The final tables look similar to early tables, but different in detail.
  489. * These tables are in DRAM. Sub tables are added to enable cache for
  490. * QBMan and OCRAM.
  491. *
  492. * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
  493. * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
  494. */
  495. static inline void final_mmu_setup(void)
  496. {
  497. u64 tlb_addr_save = gd->arch.tlb_addr;
  498. unsigned int el = current_el();
  499. int index;
  500. /* fix the final_map before filling in the block entries */
  501. fix_pcie_mmu_map();
  502. mem_map = final_map;
  503. /* Update mapping for DDR to actual size */
  504. for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
  505. /*
  506. * Find the entry for DDR mapping and update the address and
  507. * size. Zero-sized mapping will be skipped when creating MMU
  508. * table.
  509. */
  510. switch (final_map[index].virt) {
  511. case CONFIG_SYS_FSL_DRAM_BASE1:
  512. final_map[index].virt = gd->bd->bi_dram[0].start;
  513. final_map[index].phys = gd->bd->bi_dram[0].start;
  514. final_map[index].size = gd->bd->bi_dram[0].size;
  515. break;
  516. #ifdef CONFIG_SYS_FSL_DRAM_BASE2
  517. case CONFIG_SYS_FSL_DRAM_BASE2:
  518. #if (CONFIG_NR_DRAM_BANKS >= 2)
  519. final_map[index].virt = gd->bd->bi_dram[1].start;
  520. final_map[index].phys = gd->bd->bi_dram[1].start;
  521. final_map[index].size = gd->bd->bi_dram[1].size;
  522. #else
  523. final_map[index].size = 0;
  524. #endif
  525. break;
  526. #endif
  527. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  528. case CONFIG_SYS_FSL_DRAM_BASE3:
  529. #if (CONFIG_NR_DRAM_BANKS >= 3)
  530. final_map[index].virt = gd->bd->bi_dram[2].start;
  531. final_map[index].phys = gd->bd->bi_dram[2].start;
  532. final_map[index].size = gd->bd->bi_dram[2].size;
  533. #else
  534. final_map[index].size = 0;
  535. #endif
  536. break;
  537. #endif
  538. default:
  539. break;
  540. }
  541. }
  542. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  543. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  544. if (el == 3) {
  545. /*
  546. * Only use gd->arch.secure_ram if the address is
  547. * recalculated. Align to 4KB for MMU table.
  548. */
  549. /* put page tables in secure ram */
  550. index = ARRAY_SIZE(final_map) - 2;
  551. gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
  552. final_map[index].virt = gd->arch.secure_ram & ~0x3;
  553. final_map[index].phys = final_map[index].virt;
  554. final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
  555. final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
  556. gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
  557. tlb_addr_save = gd->arch.tlb_addr;
  558. } else {
  559. /* Use allocated (board_f.c) memory for TLB */
  560. tlb_addr_save = gd->arch.tlb_allocated;
  561. gd->arch.tlb_addr = tlb_addr_save;
  562. }
  563. }
  564. #endif
  565. /* Reset the fill ptr */
  566. gd->arch.tlb_fillptr = tlb_addr_save;
  567. /* Create normal system page tables */
  568. setup_pgtables();
  569. /* Create emergency page tables */
  570. gd->arch.tlb_addr = gd->arch.tlb_fillptr;
  571. gd->arch.tlb_emerg = gd->arch.tlb_addr;
  572. setup_pgtables();
  573. gd->arch.tlb_addr = tlb_addr_save;
  574. /* Disable cache and MMU */
  575. dcache_disable(); /* TLBs are invalidated */
  576. invalidate_icache_all();
  577. /* point TTBR to the new table */
  578. set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
  579. MEMORY_ATTRIBUTES);
  580. set_sctlr(get_sctlr() | CR_M);
  581. }
  582. u64 get_page_table_size(void)
  583. {
  584. return 0x10000;
  585. }
  586. int arch_cpu_init(void)
  587. {
  588. /*
  589. * This function is called before U-Boot relocates itself to speed up
  590. * on system running. It is not necessary to run if performance is not
  591. * critical. Skip if MMU is already enabled by SPL or other means.
  592. */
  593. if (get_sctlr() & CR_M)
  594. return 0;
  595. icache_enable();
  596. __asm_invalidate_dcache_all();
  597. __asm_invalidate_tlb_all();
  598. early_mmu_setup();
  599. set_sctlr(get_sctlr() | CR_C);
  600. return 0;
  601. }
  602. void mmu_setup(void)
  603. {
  604. final_mmu_setup();
  605. }
  606. /*
  607. * This function is called from common/board_r.c.
  608. * It recreates MMU table in main memory.
  609. */
  610. void enable_caches(void)
  611. {
  612. mmu_setup();
  613. __asm_invalidate_tlb_all();
  614. icache_enable();
  615. dcache_enable();
  616. }
  617. #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
  618. #ifdef CONFIG_TFABOOT
  619. enum boot_src __get_boot_src(u32 porsr1)
  620. {
  621. enum boot_src src = BOOT_SOURCE_RESERVED;
  622. u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
  623. #if !defined(CONFIG_NXP_LSCH3_2)
  624. u32 val;
  625. #endif
  626. debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
  627. #if defined(CONFIG_FSL_LSCH3)
  628. #if defined(CONFIG_NXP_LSCH3_2)
  629. switch (rcw_src) {
  630. case RCW_SRC_SDHC1_VAL:
  631. src = BOOT_SOURCE_SD_MMC;
  632. break;
  633. case RCW_SRC_SDHC2_VAL:
  634. src = BOOT_SOURCE_SD_MMC2;
  635. break;
  636. case RCW_SRC_I2C1_VAL:
  637. src = BOOT_SOURCE_I2C1_EXTENDED;
  638. break;
  639. case RCW_SRC_FLEXSPI_NAND2K_VAL:
  640. src = BOOT_SOURCE_XSPI_NAND;
  641. break;
  642. case RCW_SRC_FLEXSPI_NAND4K_VAL:
  643. src = BOOT_SOURCE_XSPI_NAND;
  644. break;
  645. case RCW_SRC_RESERVED_1_VAL:
  646. src = BOOT_SOURCE_RESERVED;
  647. break;
  648. case RCW_SRC_FLEXSPI_NOR_24B:
  649. src = BOOT_SOURCE_XSPI_NOR;
  650. break;
  651. default:
  652. src = BOOT_SOURCE_RESERVED;
  653. }
  654. #else
  655. val = rcw_src & RCW_SRC_TYPE_MASK;
  656. if (val == RCW_SRC_NOR_VAL) {
  657. val = rcw_src & NOR_TYPE_MASK;
  658. switch (val) {
  659. case NOR_16B_VAL:
  660. case NOR_32B_VAL:
  661. src = BOOT_SOURCE_IFC_NOR;
  662. break;
  663. default:
  664. src = BOOT_SOURCE_RESERVED;
  665. }
  666. } else {
  667. /* RCW SRC Serial Flash */
  668. val = rcw_src & RCW_SRC_SERIAL_MASK;
  669. switch (val) {
  670. case RCW_SRC_QSPI_VAL:
  671. /* RCW SRC Serial NOR (QSPI) */
  672. src = BOOT_SOURCE_QSPI_NOR;
  673. break;
  674. case RCW_SRC_SD_CARD_VAL:
  675. /* RCW SRC SD Card */
  676. src = BOOT_SOURCE_SD_MMC;
  677. break;
  678. case RCW_SRC_EMMC_VAL:
  679. /* RCW SRC EMMC */
  680. src = BOOT_SOURCE_SD_MMC;
  681. break;
  682. case RCW_SRC_I2C1_VAL:
  683. /* RCW SRC I2C1 Extended */
  684. src = BOOT_SOURCE_I2C1_EXTENDED;
  685. break;
  686. default:
  687. src = BOOT_SOURCE_RESERVED;
  688. }
  689. }
  690. #endif
  691. #elif defined(CONFIG_FSL_LSCH2)
  692. /* RCW SRC NAND */
  693. val = rcw_src & RCW_SRC_NAND_MASK;
  694. if (val == RCW_SRC_NAND_VAL) {
  695. val = rcw_src & NAND_RESERVED_MASK;
  696. if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
  697. src = BOOT_SOURCE_IFC_NAND;
  698. } else {
  699. /* RCW SRC NOR */
  700. val = rcw_src & RCW_SRC_NOR_MASK;
  701. if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
  702. src = BOOT_SOURCE_IFC_NOR;
  703. } else {
  704. switch (rcw_src) {
  705. case QSPI_VAL1:
  706. case QSPI_VAL2:
  707. src = BOOT_SOURCE_QSPI_NOR;
  708. break;
  709. case SD_VAL:
  710. src = BOOT_SOURCE_SD_MMC;
  711. break;
  712. default:
  713. src = BOOT_SOURCE_RESERVED;
  714. }
  715. }
  716. }
  717. #endif
  718. if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
  719. src = BOOT_SOURCE_QSPI_NOR;
  720. debug("%s: src 0x%x\n", __func__, src);
  721. return src;
  722. }
  723. enum boot_src get_boot_src(void)
  724. {
  725. struct pt_regs regs;
  726. u32 porsr1 = 0;
  727. #if defined(CONFIG_FSL_LSCH3)
  728. u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
  729. #elif defined(CONFIG_FSL_LSCH2)
  730. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  731. #endif
  732. if (current_el() == 2) {
  733. regs.regs[0] = SIP_SVC_RCW;
  734. smc_call(&regs);
  735. if (!regs.regs[0])
  736. porsr1 = regs.regs[1];
  737. }
  738. if (current_el() == 3 || !porsr1) {
  739. #ifdef CONFIG_FSL_LSCH3
  740. porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
  741. #elif defined(CONFIG_FSL_LSCH2)
  742. porsr1 = in_be32(&gur->porsr1);
  743. #endif
  744. }
  745. debug("%s: porsr1 0x%x\n", __func__, porsr1);
  746. return __get_boot_src(porsr1);
  747. }
  748. #ifdef CONFIG_ENV_IS_IN_MMC
  749. int mmc_get_env_dev(void)
  750. {
  751. enum boot_src src = get_boot_src();
  752. int dev = CONFIG_SYS_MMC_ENV_DEV;
  753. switch (src) {
  754. case BOOT_SOURCE_SD_MMC:
  755. dev = 0;
  756. break;
  757. case BOOT_SOURCE_SD_MMC2:
  758. dev = 1;
  759. break;
  760. default:
  761. break;
  762. }
  763. return dev;
  764. }
  765. #endif
  766. enum env_location env_get_location(enum env_operation op, int prio)
  767. {
  768. enum boot_src src = get_boot_src();
  769. enum env_location env_loc = ENVL_NOWHERE;
  770. if (prio)
  771. return ENVL_UNKNOWN;
  772. #ifdef CONFIG_ENV_IS_NOWHERE
  773. return env_loc;
  774. #endif
  775. switch (src) {
  776. case BOOT_SOURCE_IFC_NOR:
  777. env_loc = ENVL_FLASH;
  778. break;
  779. case BOOT_SOURCE_QSPI_NOR:
  780. /* FALLTHROUGH */
  781. case BOOT_SOURCE_XSPI_NOR:
  782. env_loc = ENVL_SPI_FLASH;
  783. break;
  784. case BOOT_SOURCE_IFC_NAND:
  785. /* FALLTHROUGH */
  786. case BOOT_SOURCE_QSPI_NAND:
  787. /* FALLTHROUGH */
  788. case BOOT_SOURCE_XSPI_NAND:
  789. env_loc = ENVL_NAND;
  790. break;
  791. case BOOT_SOURCE_SD_MMC:
  792. /* FALLTHROUGH */
  793. case BOOT_SOURCE_SD_MMC2:
  794. env_loc = ENVL_MMC;
  795. break;
  796. case BOOT_SOURCE_I2C1_EXTENDED:
  797. /* FALLTHROUGH */
  798. default:
  799. break;
  800. }
  801. return env_loc;
  802. }
  803. #endif /* CONFIG_TFABOOT */
  804. u32 initiator_type(u32 cluster, int init_id)
  805. {
  806. struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  807. u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
  808. u32 type = 0;
  809. type = gur_in32(&gur->tp_ityp[idx]);
  810. if (type & TP_ITYP_AV)
  811. return type;
  812. return 0;
  813. }
  814. u32 cpu_pos_mask(void)
  815. {
  816. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  817. int i = 0;
  818. u32 cluster, type, mask = 0;
  819. do {
  820. int j;
  821. cluster = gur_in32(&gur->tp_cluster[i].lower);
  822. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  823. type = initiator_type(cluster, j);
  824. if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
  825. mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
  826. }
  827. i++;
  828. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  829. return mask;
  830. }
  831. u32 cpu_mask(void)
  832. {
  833. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  834. int i = 0, count = 0;
  835. u32 cluster, type, mask = 0;
  836. do {
  837. int j;
  838. cluster = gur_in32(&gur->tp_cluster[i].lower);
  839. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  840. type = initiator_type(cluster, j);
  841. if (type) {
  842. if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
  843. mask |= 1 << count;
  844. count++;
  845. }
  846. }
  847. i++;
  848. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  849. return mask;
  850. }
  851. /*
  852. * Return the number of cores on this SOC.
  853. */
  854. int cpu_numcores(void)
  855. {
  856. return hweight32(cpu_mask());
  857. }
  858. int fsl_qoriq_core_to_cluster(unsigned int core)
  859. {
  860. struct ccsr_gur __iomem *gur =
  861. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  862. int i = 0, count = 0;
  863. u32 cluster;
  864. do {
  865. int j;
  866. cluster = gur_in32(&gur->tp_cluster[i].lower);
  867. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  868. if (initiator_type(cluster, j)) {
  869. if (count == core)
  870. return i;
  871. count++;
  872. }
  873. }
  874. i++;
  875. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  876. return -1; /* cannot identify the cluster */
  877. }
  878. u32 fsl_qoriq_core_to_type(unsigned int core)
  879. {
  880. struct ccsr_gur __iomem *gur =
  881. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  882. int i = 0, count = 0;
  883. u32 cluster, type;
  884. do {
  885. int j;
  886. cluster = gur_in32(&gur->tp_cluster[i].lower);
  887. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  888. type = initiator_type(cluster, j);
  889. if (type) {
  890. if (count == core)
  891. return type;
  892. count++;
  893. }
  894. }
  895. i++;
  896. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  897. return -1; /* cannot identify the cluster */
  898. }
  899. #ifndef CONFIG_FSL_LSCH3
  900. uint get_svr(void)
  901. {
  902. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  903. return gur_in32(&gur->svr);
  904. }
  905. #endif
  906. #ifdef CONFIG_DISPLAY_CPUINFO
  907. int print_cpuinfo(void)
  908. {
  909. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  910. struct sys_info sysinfo;
  911. char buf[32];
  912. unsigned int i, core;
  913. u32 type, rcw, svr = gur_in32(&gur->svr);
  914. puts("SoC: ");
  915. cpu_name(buf);
  916. printf(" %s (0x%x)\n", buf, svr);
  917. memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
  918. get_sys_info(&sysinfo);
  919. puts("Clock Configuration:");
  920. for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
  921. if (!(i % 3))
  922. puts("\n ");
  923. type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
  924. printf("CPU%d(%s):%-4s MHz ", core,
  925. type == TY_ITYP_VER_A7 ? "A7 " :
  926. (type == TY_ITYP_VER_A53 ? "A53" :
  927. (type == TY_ITYP_VER_A57 ? "A57" :
  928. (type == TY_ITYP_VER_A72 ? "A72" : " "))),
  929. strmhz(buf, sysinfo.freq_processor[core]));
  930. }
  931. /* Display platform clock as Bus frequency. */
  932. printf("\n Bus: %-4s MHz ",
  933. strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
  934. printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
  935. #ifdef CONFIG_SYS_DPAA_FMAN
  936. printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
  937. #endif
  938. #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
  939. if (soc_has_dp_ddr()) {
  940. printf(" DP-DDR: %-4s MT/s",
  941. strmhz(buf, sysinfo.freq_ddrbus2));
  942. }
  943. #endif
  944. puts("\n");
  945. /*
  946. * Display the RCW, so that no one gets confused as to what RCW
  947. * we're actually using for this boot.
  948. */
  949. puts("Reset Configuration Word (RCW):");
  950. for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
  951. rcw = gur_in32(&gur->rcwsr[i]);
  952. if ((i % 4) == 0)
  953. printf("\n %08x:", i * 4);
  954. printf(" %08x", rcw);
  955. }
  956. puts("\n");
  957. return 0;
  958. }
  959. #endif
  960. #ifdef CONFIG_FSL_ESDHC
  961. int cpu_mmc_init(struct bd_info *bis)
  962. {
  963. return fsl_esdhc_mmc_init(bis);
  964. }
  965. #endif
  966. int cpu_eth_init(struct bd_info *bis)
  967. {
  968. int error = 0;
  969. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  970. error = fsl_mc_ldpaa_init(bis);
  971. #endif
  972. #ifdef CONFIG_FMAN_ENET
  973. fm_standard_init(bis);
  974. #endif
  975. return error;
  976. }
  977. static inline int check_psci(void)
  978. {
  979. unsigned int psci_ver;
  980. psci_ver = sec_firmware_support_psci_version();
  981. if (psci_ver == PSCI_INVALID_VER)
  982. return 1;
  983. return 0;
  984. }
  985. static void config_core_prefetch(void)
  986. {
  987. char *buf = NULL;
  988. char buffer[HWCONFIG_BUFFER_SIZE];
  989. const char *prefetch_arg = NULL;
  990. size_t arglen;
  991. unsigned int mask;
  992. struct pt_regs regs;
  993. if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
  994. buf = buffer;
  995. else
  996. return;
  997. prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
  998. &arglen, buf);
  999. if (prefetch_arg) {
  1000. mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
  1001. if (mask & 0x1) {
  1002. printf("Core0 prefetch can't be disabled\n");
  1003. return;
  1004. }
  1005. #define SIP_PREFETCH_DISABLE_64 0xC200FF13
  1006. regs.regs[0] = SIP_PREFETCH_DISABLE_64;
  1007. regs.regs[1] = mask;
  1008. smc_call(&regs);
  1009. if (regs.regs[0])
  1010. printf("Prefetch disable config failed for mask ");
  1011. else
  1012. printf("Prefetch disable config passed for mask ");
  1013. printf("0x%x\n", mask);
  1014. }
  1015. }
  1016. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1017. __weak void set_ecam_icids(void)
  1018. {
  1019. }
  1020. #endif
  1021. int arch_early_init_r(void)
  1022. {
  1023. #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
  1024. u32 svr_dev_id;
  1025. /*
  1026. * erratum A009635 is valid only for LS2080A SoC and
  1027. * its personalitiesi
  1028. */
  1029. svr_dev_id = get_svr();
  1030. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1031. erratum_a009635();
  1032. #endif
  1033. #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
  1034. erratum_a009942_check_cpo();
  1035. #endif
  1036. if (check_psci()) {
  1037. debug("PSCI: PSCI does not exist.\n");
  1038. /* if PSCI does not exist, boot secondary cores here */
  1039. if (fsl_layerscape_wake_seconday_cores())
  1040. printf("Did not wake secondary cores\n");
  1041. }
  1042. config_core_prefetch();
  1043. #ifdef CONFIG_SYS_HAS_SERDES
  1044. fsl_serdes_init();
  1045. #endif
  1046. #ifdef CONFIG_SYS_FSL_HAS_RGMII
  1047. /* some dpmacs in armv8a based freescale layerscape SOCs can be
  1048. * configured via both serdes(sgmii, xfi, xlaui etc) bits and via
  1049. * EC*_PMUX(rgmii) bits in RCW.
  1050. * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
  1051. * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
  1052. * Now if a dpmac is enabled as RGMII through ECx_PMUX then it takes
  1053. * precedence over SerDes protocol. i.e. in LX2160A if we select serdes
  1054. * protocol that configures dpmac17 as SGMII and set the EC1_PMUX as
  1055. * RGMII, then the dpmac is RGMII and not SGMII.
  1056. *
  1057. * Therefore, even thought fsl_rgmii_init is after fsl_serdes_init
  1058. * function of SOC, the dpmac will be enabled as RGMII even if it was
  1059. * also enabled before as SGMII. If ECx_PMUX is not configured for
  1060. * RGMII, DPMAC will remain configured as SGMII from fsl_serdes_init().
  1061. */
  1062. fsl_rgmii_init();
  1063. #endif
  1064. #ifdef CONFIG_FMAN_ENET
  1065. #ifndef CONFIG_DM_ETH
  1066. fman_enet_init();
  1067. #endif
  1068. #endif
  1069. #ifdef CONFIG_SYS_DPAA_QBMAN
  1070. setup_qbman_portals();
  1071. #endif
  1072. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1073. set_ecam_icids();
  1074. #endif
  1075. return 0;
  1076. }
  1077. int timer_init(void)
  1078. {
  1079. u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
  1080. #ifdef CONFIG_FSL_LSCH3
  1081. u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
  1082. #endif
  1083. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1084. defined(CONFIG_ARCH_LS1028A)
  1085. u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
  1086. u32 svr_dev_id;
  1087. #endif
  1088. #ifdef COUNTER_FREQUENCY_REAL
  1089. unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
  1090. /* Update with accurate clock frequency */
  1091. if (current_el() == 3)
  1092. asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
  1093. #endif
  1094. #ifdef CONFIG_FSL_LSCH3
  1095. /* Enable timebase for all clusters.
  1096. * It is safe to do so even some clusters are not enabled.
  1097. */
  1098. out_le32(cltbenr, 0xf);
  1099. #endif
  1100. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1101. defined(CONFIG_ARCH_LS1028A)
  1102. /*
  1103. * In certain Layerscape SoCs, the clock for each core's
  1104. * has an enable bit in the PMU Physical Core Time Base Enable
  1105. * Register (PCTBENR), which allows the watchdog to operate.
  1106. */
  1107. setbits_le32(pctbenr, 0xff);
  1108. /*
  1109. * For LS2080A SoC and its personalities, timer controller
  1110. * offset is different
  1111. */
  1112. svr_dev_id = get_svr();
  1113. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1114. cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
  1115. #endif
  1116. /* Enable clock for timer
  1117. * This is a global setting.
  1118. */
  1119. out_le32(cntcr, 0x1);
  1120. return 0;
  1121. }
  1122. __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
  1123. void __efi_runtime reset_cpu(ulong addr)
  1124. {
  1125. #if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
  1126. /* clear the RST_REQ_MSK and SW_RST_REQ */
  1127. out_le32(rstcr, 0x0);
  1128. /* initiate the sw reset request */
  1129. out_le32(rstcr, 0x1);
  1130. #else
  1131. u32 val;
  1132. /* Raise RESET_REQ_B */
  1133. val = scfg_in32(rstcr);
  1134. val |= 0x02;
  1135. scfg_out32(rstcr, val);
  1136. #endif
  1137. }
  1138. #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
  1139. void __efi_runtime EFIAPI efi_reset_system(
  1140. enum efi_reset_type reset_type,
  1141. efi_status_t reset_status,
  1142. unsigned long data_size, void *reset_data)
  1143. {
  1144. switch (reset_type) {
  1145. case EFI_RESET_COLD:
  1146. case EFI_RESET_WARM:
  1147. case EFI_RESET_PLATFORM_SPECIFIC:
  1148. reset_cpu(0);
  1149. break;
  1150. case EFI_RESET_SHUTDOWN:
  1151. /* Nothing we can do */
  1152. break;
  1153. }
  1154. while (1) { }
  1155. }
  1156. efi_status_t efi_reset_system_init(void)
  1157. {
  1158. return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
  1159. }
  1160. #endif
  1161. /*
  1162. * Calculate reserved memory with given memory bank
  1163. * Return aligned memory size on success
  1164. * Return (ram_size + needed size) for failure
  1165. */
  1166. phys_size_t board_reserve_ram_top(phys_size_t ram_size)
  1167. {
  1168. phys_size_t ram_top = ram_size;
  1169. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  1170. ram_top = mc_get_dram_block_size();
  1171. if (ram_top > ram_size)
  1172. return ram_size + ram_top;
  1173. ram_top = ram_size - ram_top;
  1174. /* The start address of MC reserved memory needs to be aligned. */
  1175. ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
  1176. #endif
  1177. return ram_size - ram_top;
  1178. }
  1179. phys_size_t get_effective_memsize(void)
  1180. {
  1181. phys_size_t ea_size, rem = 0;
  1182. /*
  1183. * For ARMv8 SoCs, DDR memory is split into two or three regions. The
  1184. * first region is 2GB space at 0x8000_0000. Secure memory needs to
  1185. * allocated from first region. If the memory extends to the second
  1186. * region (or the third region if applicable), Management Complex (MC)
  1187. * memory should be put into the highest region, i.e. the end of DDR
  1188. * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
  1189. * U-Boot doesn't relocate itself into higher address. Should DDR be
  1190. * configured to skip the first region, this function needs to be
  1191. * adjusted.
  1192. */
  1193. if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
  1194. ea_size = CONFIG_MAX_MEM_MAPPED;
  1195. rem = gd->ram_size - ea_size;
  1196. } else {
  1197. ea_size = gd->ram_size;
  1198. }
  1199. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1200. /* Check if we have enough space for secure memory */
  1201. if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
  1202. ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1203. else
  1204. printf("Error: No enough space for secure memory.\n");
  1205. #endif
  1206. /* Check if we have enough memory for MC */
  1207. if (rem < board_reserve_ram_top(rem)) {
  1208. /* Not enough memory in high region to reserve */
  1209. if (ea_size > board_reserve_ram_top(ea_size))
  1210. ea_size -= board_reserve_ram_top(ea_size);
  1211. else
  1212. printf("Error: No enough space for reserved memory.\n");
  1213. }
  1214. return ea_size;
  1215. }
  1216. #ifdef CONFIG_TFABOOT
  1217. phys_size_t tfa_get_dram_size(void)
  1218. {
  1219. struct pt_regs regs;
  1220. phys_size_t dram_size = 0;
  1221. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1222. regs.regs[1] = -1;
  1223. smc_call(&regs);
  1224. if (regs.regs[0])
  1225. return 0;
  1226. dram_size = regs.regs[1];
  1227. return dram_size;
  1228. }
  1229. static int tfa_dram_init_banksize(void)
  1230. {
  1231. int i = 0, ret = 0;
  1232. struct pt_regs regs;
  1233. phys_size_t dram_size = tfa_get_dram_size();
  1234. debug("dram_size %llx\n", dram_size);
  1235. if (!dram_size)
  1236. return -EINVAL;
  1237. do {
  1238. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1239. regs.regs[1] = i;
  1240. smc_call(&regs);
  1241. if (regs.regs[0]) {
  1242. ret = -EINVAL;
  1243. break;
  1244. }
  1245. debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
  1246. regs.regs[2]);
  1247. gd->bd->bi_dram[i].start = regs.regs[1];
  1248. gd->bd->bi_dram[i].size = regs.regs[2];
  1249. dram_size -= gd->bd->bi_dram[i].size;
  1250. i++;
  1251. } while (dram_size);
  1252. if (i > 0)
  1253. ret = 0;
  1254. #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
  1255. /* Assign memory for MC */
  1256. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1257. if (gd->bd->bi_dram[2].size >=
  1258. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1259. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1260. gd->bd->bi_dram[2].size -
  1261. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1262. } else
  1263. #endif
  1264. {
  1265. if (gd->bd->bi_dram[1].size >=
  1266. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1267. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1268. gd->bd->bi_dram[1].size -
  1269. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1270. } else if (gd->bd->bi_dram[0].size >
  1271. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1272. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1273. gd->bd->bi_dram[0].size -
  1274. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1275. }
  1276. }
  1277. #endif /* CONFIG_RESV_RAM */
  1278. return ret;
  1279. }
  1280. #endif
  1281. int dram_init_banksize(void)
  1282. {
  1283. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1284. phys_size_t dp_ddr_size;
  1285. #endif
  1286. #ifdef CONFIG_TFABOOT
  1287. if (!tfa_dram_init_banksize())
  1288. return 0;
  1289. #endif
  1290. /*
  1291. * gd->ram_size has the total size of DDR memory, less reserved secure
  1292. * memory. The DDR extends from low region to high region(s) presuming
  1293. * no hole is created with DDR configuration. gd->arch.secure_ram tracks
  1294. * the location of secure memory. gd->arch.resv_ram tracks the location
  1295. * of reserved memory for Management Complex (MC). Because gd->ram_size
  1296. * is reduced by this function if secure memory is reserved, checking
  1297. * gd->arch.secure_ram should be done to avoid running it repeatedly.
  1298. */
  1299. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1300. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  1301. debug("No need to run again, skip %s\n", __func__);
  1302. return 0;
  1303. }
  1304. #endif
  1305. gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
  1306. if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
  1307. gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
  1308. gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
  1309. gd->bd->bi_dram[1].size = gd->ram_size -
  1310. CONFIG_SYS_DDR_BLOCK1_SIZE;
  1311. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1312. if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1313. gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
  1314. gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
  1315. CONFIG_SYS_DDR_BLOCK2_SIZE;
  1316. gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
  1317. }
  1318. #endif
  1319. } else {
  1320. gd->bd->bi_dram[0].size = gd->ram_size;
  1321. }
  1322. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1323. if (gd->bd->bi_dram[0].size >
  1324. CONFIG_SYS_MEM_RESERVE_SECURE) {
  1325. gd->bd->bi_dram[0].size -=
  1326. CONFIG_SYS_MEM_RESERVE_SECURE;
  1327. gd->arch.secure_ram = gd->bd->bi_dram[0].start +
  1328. gd->bd->bi_dram[0].size;
  1329. gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
  1330. gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1331. }
  1332. #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
  1333. #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
  1334. /* Assign memory for MC */
  1335. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1336. if (gd->bd->bi_dram[2].size >=
  1337. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1338. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1339. gd->bd->bi_dram[2].size -
  1340. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1341. } else
  1342. #endif
  1343. {
  1344. if (gd->bd->bi_dram[1].size >=
  1345. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1346. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1347. gd->bd->bi_dram[1].size -
  1348. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1349. } else if (gd->bd->bi_dram[0].size >
  1350. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1351. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1352. gd->bd->bi_dram[0].size -
  1353. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1354. }
  1355. }
  1356. #endif /* CONFIG_RESV_RAM */
  1357. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1358. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1359. #error "This SoC shouldn't have DP DDR"
  1360. #endif
  1361. if (soc_has_dp_ddr()) {
  1362. /* initialize DP-DDR here */
  1363. puts("DP-DDR: ");
  1364. /*
  1365. * DDR controller use 0 as the base address for binding.
  1366. * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
  1367. */
  1368. dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
  1369. CONFIG_DP_DDR_CTRL,
  1370. CONFIG_DP_DDR_NUM_CTRLS,
  1371. CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
  1372. NULL, NULL, NULL);
  1373. if (dp_ddr_size) {
  1374. gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
  1375. gd->bd->bi_dram[2].size = dp_ddr_size;
  1376. } else {
  1377. puts("Not detected");
  1378. }
  1379. }
  1380. #endif
  1381. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1382. debug("%s is called. gd->ram_size is reduced to %lu\n",
  1383. __func__, (ulong)gd->ram_size);
  1384. #endif
  1385. return 0;
  1386. }
  1387. #if CONFIG_IS_ENABLED(EFI_LOADER)
  1388. void efi_add_known_memory(void)
  1389. {
  1390. int i;
  1391. phys_addr_t ram_start;
  1392. phys_size_t ram_size;
  1393. /* Add RAM */
  1394. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  1395. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1396. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1397. #error "This SoC shouldn't have DP DDR"
  1398. #endif
  1399. if (i == 2)
  1400. continue; /* skip DP-DDR */
  1401. #endif
  1402. ram_start = gd->bd->bi_dram[i].start;
  1403. ram_size = gd->bd->bi_dram[i].size;
  1404. #ifdef CONFIG_RESV_RAM
  1405. if (gd->arch.resv_ram >= ram_start &&
  1406. gd->arch.resv_ram < ram_start + ram_size)
  1407. ram_size = gd->arch.resv_ram - ram_start;
  1408. #endif
  1409. efi_add_memory_map(ram_start, ram_size,
  1410. EFI_CONVENTIONAL_MEMORY);
  1411. }
  1412. }
  1413. #endif
  1414. /*
  1415. * Before DDR size is known, early MMU table have DDR mapped as device memory
  1416. * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
  1417. * needs to be set for these mappings.
  1418. * If a special case configures DDR with holes in the mapping, the holes need
  1419. * to be marked as invalid. This is not implemented in this function.
  1420. */
  1421. void update_early_mmu_table(void)
  1422. {
  1423. if (!gd->arch.tlb_addr)
  1424. return;
  1425. if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
  1426. mmu_change_region_attr(
  1427. CONFIG_SYS_SDRAM_BASE,
  1428. gd->ram_size,
  1429. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1430. PTE_BLOCK_OUTER_SHARE |
  1431. PTE_BLOCK_NS |
  1432. PTE_TYPE_VALID);
  1433. } else {
  1434. mmu_change_region_attr(
  1435. CONFIG_SYS_SDRAM_BASE,
  1436. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1437. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1438. PTE_BLOCK_OUTER_SHARE |
  1439. PTE_BLOCK_NS |
  1440. PTE_TYPE_VALID);
  1441. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1442. #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
  1443. #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
  1444. #endif
  1445. if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
  1446. CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1447. mmu_change_region_attr(
  1448. CONFIG_SYS_DDR_BLOCK2_BASE,
  1449. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1450. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1451. PTE_BLOCK_OUTER_SHARE |
  1452. PTE_BLOCK_NS |
  1453. PTE_TYPE_VALID);
  1454. mmu_change_region_attr(
  1455. CONFIG_SYS_DDR_BLOCK3_BASE,
  1456. gd->ram_size -
  1457. CONFIG_SYS_DDR_BLOCK1_SIZE -
  1458. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1459. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1460. PTE_BLOCK_OUTER_SHARE |
  1461. PTE_BLOCK_NS |
  1462. PTE_TYPE_VALID);
  1463. } else
  1464. #endif
  1465. {
  1466. mmu_change_region_attr(
  1467. CONFIG_SYS_DDR_BLOCK2_BASE,
  1468. gd->ram_size -
  1469. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1470. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1471. PTE_BLOCK_OUTER_SHARE |
  1472. PTE_BLOCK_NS |
  1473. PTE_TYPE_VALID);
  1474. }
  1475. }
  1476. }
  1477. __weak int dram_init(void)
  1478. {
  1479. fsl_initdram();
  1480. #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
  1481. defined(CONFIG_SPL_BUILD)
  1482. /* This will break-before-make MMU for DDR */
  1483. update_early_mmu_table();
  1484. #endif
  1485. return 0;
  1486. }
  1487. #ifdef CONFIG_ARCH_MISC_INIT
  1488. __weak int serdes_misc_init(void)
  1489. {
  1490. return 0;
  1491. }
  1492. int arch_misc_init(void)
  1493. {
  1494. serdes_misc_init();
  1495. return 0;
  1496. }
  1497. #endif