cpu.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2017-2019 NXP
  4. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  5. */
  6. #include <common.h>
  7. #include <cpu_func.h>
  8. #include <env.h>
  9. #include <fsl_ddr_sdram.h>
  10. #include <init.h>
  11. #include <hang.h>
  12. #include <log.h>
  13. #include <net.h>
  14. #include <vsprintf.h>
  15. #include <asm/cache.h>
  16. #include <asm/io.h>
  17. #include <asm/ptrace.h>
  18. #include <linux/errno.h>
  19. #include <asm/system.h>
  20. #include <fm_eth.h>
  21. #include <asm/armv8/mmu.h>
  22. #include <asm/io.h>
  23. #include <asm/arch/fsl_serdes.h>
  24. #include <asm/arch/soc.h>
  25. #include <asm/arch/cpu.h>
  26. #include <asm/arch/speed.h>
  27. #include <fsl_immap.h>
  28. #include <asm/arch/mp.h>
  29. #include <efi_loader.h>
  30. #include <fsl-mc/fsl_mc.h>
  31. #ifdef CONFIG_FSL_ESDHC
  32. #include <fsl_esdhc.h>
  33. #endif
  34. #include <asm/armv8/sec_firmware.h>
  35. #ifdef CONFIG_SYS_FSL_DDR
  36. #include <fsl_ddr.h>
  37. #endif
  38. #include <asm/arch/clock.h>
  39. #include <hwconfig.h>
  40. #include <fsl_qbman.h>
  41. #ifdef CONFIG_TFABOOT
  42. #include <env_internal.h>
  43. #ifdef CONFIG_CHAIN_OF_TRUST
  44. #include <fsl_validate.h>
  45. #endif
  46. #endif
  47. #include <linux/mii.h>
  48. DECLARE_GLOBAL_DATA_PTR;
  49. static struct cpu_type cpu_type_list[] = {
  50. CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
  51. CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
  52. CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
  53. CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
  54. CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
  55. CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
  56. CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
  57. CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
  58. CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
  59. CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
  60. CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
  61. CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
  62. CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
  63. CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
  64. CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
  65. CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
  66. CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
  67. CPU_TYPE_ENTRY(LS1017A, LS1017A, 1),
  68. CPU_TYPE_ENTRY(LS1018A, LS1018A, 1),
  69. CPU_TYPE_ENTRY(LS1027A, LS1027A, 2),
  70. CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
  71. CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
  72. CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
  73. CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
  74. CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
  75. CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
  76. CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
  77. CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
  78. };
  79. #define EARLY_PGTABLE_SIZE 0x5000
  80. static struct mm_region early_map[] = {
  81. #ifdef CONFIG_FSL_LSCH3
  82. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  83. CONFIG_SYS_FSL_CCSR_SIZE,
  84. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  85. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  86. },
  87. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  88. SYS_FSL_OCRAM_SPACE_SIZE,
  89. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  90. },
  91. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  92. CONFIG_SYS_FSL_QSPI_SIZE1,
  93. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
  94. #ifdef CONFIG_FSL_IFC
  95. /* For IFC Region #1, only the first 4MB is cache-enabled */
  96. { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
  97. CONFIG_SYS_FSL_IFC_SIZE1_1,
  98. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  99. },
  100. { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  101. CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  102. CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
  103. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  104. },
  105. { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
  106. CONFIG_SYS_FSL_IFC_SIZE1,
  107. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  108. },
  109. #endif
  110. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  111. CONFIG_SYS_FSL_DRAM_SIZE1,
  112. #if defined(CONFIG_TFABOOT) || \
  113. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  114. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  115. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  116. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  117. #endif
  118. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  119. },
  120. #ifdef CONFIG_FSL_IFC
  121. /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
  122. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  123. CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
  124. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  125. },
  126. #endif
  127. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  128. CONFIG_SYS_FSL_DCSR_SIZE,
  129. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  130. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  131. },
  132. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  133. CONFIG_SYS_FSL_DRAM_SIZE2,
  134. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  135. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  136. },
  137. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  138. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  139. CONFIG_SYS_FSL_DRAM_SIZE3,
  140. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  141. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  142. },
  143. #endif
  144. #elif defined(CONFIG_FSL_LSCH2)
  145. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  146. CONFIG_SYS_FSL_CCSR_SIZE,
  147. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  148. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  149. },
  150. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  151. SYS_FSL_OCRAM_SPACE_SIZE,
  152. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  153. },
  154. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  155. CONFIG_SYS_FSL_DCSR_SIZE,
  156. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  157. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  158. },
  159. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  160. CONFIG_SYS_FSL_QSPI_SIZE,
  161. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  162. },
  163. #ifdef CONFIG_FSL_IFC
  164. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  165. CONFIG_SYS_FSL_IFC_SIZE,
  166. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  167. },
  168. #endif
  169. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  170. CONFIG_SYS_FSL_DRAM_SIZE1,
  171. #if defined(CONFIG_TFABOOT) || \
  172. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  173. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  174. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  175. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  176. #endif
  177. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  178. },
  179. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  180. CONFIG_SYS_FSL_DRAM_SIZE2,
  181. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  182. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  183. },
  184. #endif
  185. {}, /* list terminator */
  186. };
  187. static struct mm_region final_map[] = {
  188. #ifdef CONFIG_FSL_LSCH3
  189. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  190. CONFIG_SYS_FSL_CCSR_SIZE,
  191. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  192. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  193. },
  194. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  195. SYS_FSL_OCRAM_SPACE_SIZE,
  196. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  197. },
  198. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  199. CONFIG_SYS_FSL_DRAM_SIZE1,
  200. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  201. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  202. },
  203. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  204. CONFIG_SYS_FSL_QSPI_SIZE1,
  205. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  206. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  207. },
  208. { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
  209. CONFIG_SYS_FSL_QSPI_SIZE2,
  210. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  211. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  212. },
  213. #ifdef CONFIG_FSL_IFC
  214. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  215. CONFIG_SYS_FSL_IFC_SIZE2,
  216. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  217. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  218. },
  219. #endif
  220. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  221. CONFIG_SYS_FSL_DCSR_SIZE,
  222. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  223. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  224. },
  225. { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
  226. CONFIG_SYS_FSL_MC_SIZE,
  227. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  228. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  229. },
  230. { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
  231. CONFIG_SYS_FSL_NI_SIZE,
  232. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  233. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  234. },
  235. /* For QBMAN portal, only the first 64MB is cache-enabled */
  236. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  237. CONFIG_SYS_FSL_QBMAN_SIZE_1,
  238. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  239. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
  240. },
  241. { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  242. CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  243. CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
  244. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  245. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  246. },
  247. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  248. CONFIG_SYS_PCIE1_PHYS_SIZE,
  249. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  250. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  251. },
  252. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  253. CONFIG_SYS_PCIE2_PHYS_SIZE,
  254. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  255. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  256. },
  257. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  258. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  259. CONFIG_SYS_PCIE3_PHYS_SIZE,
  260. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  261. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  262. },
  263. #endif
  264. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  265. { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
  266. CONFIG_SYS_PCIE4_PHYS_SIZE,
  267. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  268. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  269. },
  270. #endif
  271. #ifdef SYS_PCIE5_PHYS_ADDR
  272. { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
  273. SYS_PCIE5_PHYS_SIZE,
  274. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  275. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  276. },
  277. #endif
  278. #ifdef SYS_PCIE6_PHYS_ADDR
  279. { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
  280. SYS_PCIE6_PHYS_SIZE,
  281. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  282. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  283. },
  284. #endif
  285. { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
  286. CONFIG_SYS_FSL_WRIOP1_SIZE,
  287. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  288. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  289. },
  290. { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
  291. CONFIG_SYS_FSL_AIOP1_SIZE,
  292. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  293. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  294. },
  295. { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
  296. CONFIG_SYS_FSL_PEBUF_SIZE,
  297. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  298. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  299. },
  300. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  301. CONFIG_SYS_FSL_DRAM_SIZE2,
  302. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  303. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  304. },
  305. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  306. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  307. CONFIG_SYS_FSL_DRAM_SIZE3,
  308. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  309. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  310. },
  311. #endif
  312. #elif defined(CONFIG_FSL_LSCH2)
  313. { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
  314. CONFIG_SYS_FSL_BOOTROM_SIZE,
  315. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  316. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  317. },
  318. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  319. CONFIG_SYS_FSL_CCSR_SIZE,
  320. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  321. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  322. },
  323. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  324. SYS_FSL_OCRAM_SPACE_SIZE,
  325. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  326. },
  327. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  328. CONFIG_SYS_FSL_DCSR_SIZE,
  329. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  330. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  331. },
  332. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  333. CONFIG_SYS_FSL_QSPI_SIZE,
  334. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  335. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  336. },
  337. #ifdef CONFIG_FSL_IFC
  338. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  339. CONFIG_SYS_FSL_IFC_SIZE,
  340. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  341. },
  342. #endif
  343. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  344. CONFIG_SYS_FSL_DRAM_SIZE1,
  345. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  346. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  347. },
  348. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  349. CONFIG_SYS_FSL_QBMAN_SIZE,
  350. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  351. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  352. },
  353. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  354. CONFIG_SYS_FSL_DRAM_SIZE2,
  355. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  356. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  357. },
  358. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  359. CONFIG_SYS_PCIE1_PHYS_SIZE,
  360. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  361. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  362. },
  363. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  364. CONFIG_SYS_PCIE2_PHYS_SIZE,
  365. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  366. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  367. },
  368. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  369. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  370. CONFIG_SYS_PCIE3_PHYS_SIZE,
  371. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  372. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  373. },
  374. #endif
  375. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  376. CONFIG_SYS_FSL_DRAM_SIZE3,
  377. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  378. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  379. },
  380. #endif
  381. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  382. {}, /* space holder for secure mem */
  383. #endif
  384. {},
  385. };
  386. struct mm_region *mem_map = early_map;
  387. void cpu_name(char *name)
  388. {
  389. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  390. unsigned int i, svr, ver;
  391. svr = gur_in32(&gur->svr);
  392. ver = SVR_SOC_VER(svr);
  393. for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
  394. if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
  395. strcpy(name, cpu_type_list[i].name);
  396. #ifdef CONFIG_ARCH_LX2160A
  397. if (IS_C_PROCESSOR(svr))
  398. strcat(name, "C");
  399. #endif
  400. if (IS_E_PROCESSOR(svr))
  401. strcat(name, "E");
  402. sprintf(name + strlen(name), " Rev%d.%d",
  403. SVR_MAJ(svr), SVR_MIN(svr));
  404. break;
  405. }
  406. if (i == ARRAY_SIZE(cpu_type_list))
  407. strcpy(name, "unknown");
  408. }
  409. #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  410. /*
  411. * To start MMU before DDR is available, we create MMU table in SRAM.
  412. * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
  413. * levels of translation tables here to cover 40-bit address space.
  414. * We use 4KB granule size, with 40 bits physical address, T0SZ=24
  415. * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
  416. * Note, the debug print in cache_v8.c is not usable for debugging
  417. * these early MMU tables because UART is not yet available.
  418. */
  419. static inline void early_mmu_setup(void)
  420. {
  421. unsigned int el = current_el();
  422. /* global data is already setup, no allocation yet */
  423. if (el == 3)
  424. gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
  425. else
  426. gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
  427. gd->arch.tlb_fillptr = gd->arch.tlb_addr;
  428. gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
  429. /* Create early page tables */
  430. setup_pgtables();
  431. /* point TTBR to the new table */
  432. set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
  433. get_tcr(el, NULL, NULL) &
  434. ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
  435. MEMORY_ATTRIBUTES);
  436. set_sctlr(get_sctlr() | CR_M);
  437. }
  438. static void fix_pcie_mmu_map(void)
  439. {
  440. #ifdef CONFIG_ARCH_LS2080A
  441. unsigned int i;
  442. u32 svr, ver;
  443. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  444. svr = gur_in32(&gur->svr);
  445. ver = SVR_SOC_VER(svr);
  446. /* Fix PCIE base and size for LS2088A */
  447. if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
  448. (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
  449. (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
  450. for (i = 0; i < ARRAY_SIZE(final_map); i++) {
  451. switch (final_map[i].phys) {
  452. case CONFIG_SYS_PCIE1_PHYS_ADDR:
  453. final_map[i].phys = 0x2000000000ULL;
  454. final_map[i].virt = 0x2000000000ULL;
  455. final_map[i].size = 0x800000000ULL;
  456. break;
  457. case CONFIG_SYS_PCIE2_PHYS_ADDR:
  458. final_map[i].phys = 0x2800000000ULL;
  459. final_map[i].virt = 0x2800000000ULL;
  460. final_map[i].size = 0x800000000ULL;
  461. break;
  462. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  463. case CONFIG_SYS_PCIE3_PHYS_ADDR:
  464. final_map[i].phys = 0x3000000000ULL;
  465. final_map[i].virt = 0x3000000000ULL;
  466. final_map[i].size = 0x800000000ULL;
  467. break;
  468. #endif
  469. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  470. case CONFIG_SYS_PCIE4_PHYS_ADDR:
  471. final_map[i].phys = 0x3800000000ULL;
  472. final_map[i].virt = 0x3800000000ULL;
  473. final_map[i].size = 0x800000000ULL;
  474. break;
  475. #endif
  476. default:
  477. break;
  478. }
  479. }
  480. }
  481. #endif
  482. }
  483. /*
  484. * The final tables look similar to early tables, but different in detail.
  485. * These tables are in DRAM. Sub tables are added to enable cache for
  486. * QBMan and OCRAM.
  487. *
  488. * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
  489. * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
  490. */
  491. static inline void final_mmu_setup(void)
  492. {
  493. u64 tlb_addr_save = gd->arch.tlb_addr;
  494. unsigned int el = current_el();
  495. int index;
  496. /* fix the final_map before filling in the block entries */
  497. fix_pcie_mmu_map();
  498. mem_map = final_map;
  499. /* Update mapping for DDR to actual size */
  500. for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
  501. /*
  502. * Find the entry for DDR mapping and update the address and
  503. * size. Zero-sized mapping will be skipped when creating MMU
  504. * table.
  505. */
  506. switch (final_map[index].virt) {
  507. case CONFIG_SYS_FSL_DRAM_BASE1:
  508. final_map[index].virt = gd->bd->bi_dram[0].start;
  509. final_map[index].phys = gd->bd->bi_dram[0].start;
  510. final_map[index].size = gd->bd->bi_dram[0].size;
  511. break;
  512. #ifdef CONFIG_SYS_FSL_DRAM_BASE2
  513. case CONFIG_SYS_FSL_DRAM_BASE2:
  514. #if (CONFIG_NR_DRAM_BANKS >= 2)
  515. final_map[index].virt = gd->bd->bi_dram[1].start;
  516. final_map[index].phys = gd->bd->bi_dram[1].start;
  517. final_map[index].size = gd->bd->bi_dram[1].size;
  518. #else
  519. final_map[index].size = 0;
  520. #endif
  521. break;
  522. #endif
  523. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  524. case CONFIG_SYS_FSL_DRAM_BASE3:
  525. #if (CONFIG_NR_DRAM_BANKS >= 3)
  526. final_map[index].virt = gd->bd->bi_dram[2].start;
  527. final_map[index].phys = gd->bd->bi_dram[2].start;
  528. final_map[index].size = gd->bd->bi_dram[2].size;
  529. #else
  530. final_map[index].size = 0;
  531. #endif
  532. break;
  533. #endif
  534. default:
  535. break;
  536. }
  537. }
  538. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  539. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  540. if (el == 3) {
  541. /*
  542. * Only use gd->arch.secure_ram if the address is
  543. * recalculated. Align to 4KB for MMU table.
  544. */
  545. /* put page tables in secure ram */
  546. index = ARRAY_SIZE(final_map) - 2;
  547. gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
  548. final_map[index].virt = gd->arch.secure_ram & ~0x3;
  549. final_map[index].phys = final_map[index].virt;
  550. final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
  551. final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
  552. gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
  553. tlb_addr_save = gd->arch.tlb_addr;
  554. } else {
  555. /* Use allocated (board_f.c) memory for TLB */
  556. tlb_addr_save = gd->arch.tlb_allocated;
  557. gd->arch.tlb_addr = tlb_addr_save;
  558. }
  559. }
  560. #endif
  561. /* Reset the fill ptr */
  562. gd->arch.tlb_fillptr = tlb_addr_save;
  563. /* Create normal system page tables */
  564. setup_pgtables();
  565. /* Create emergency page tables */
  566. gd->arch.tlb_addr = gd->arch.tlb_fillptr;
  567. gd->arch.tlb_emerg = gd->arch.tlb_addr;
  568. setup_pgtables();
  569. gd->arch.tlb_addr = tlb_addr_save;
  570. /* Disable cache and MMU */
  571. dcache_disable(); /* TLBs are invalidated */
  572. invalidate_icache_all();
  573. /* point TTBR to the new table */
  574. set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
  575. MEMORY_ATTRIBUTES);
  576. set_sctlr(get_sctlr() | CR_M);
  577. }
  578. u64 get_page_table_size(void)
  579. {
  580. return 0x10000;
  581. }
  582. int arch_cpu_init(void)
  583. {
  584. /*
  585. * This function is called before U-Boot relocates itself to speed up
  586. * on system running. It is not necessary to run if performance is not
  587. * critical. Skip if MMU is already enabled by SPL or other means.
  588. */
  589. if (get_sctlr() & CR_M)
  590. return 0;
  591. icache_enable();
  592. __asm_invalidate_dcache_all();
  593. __asm_invalidate_tlb_all();
  594. early_mmu_setup();
  595. set_sctlr(get_sctlr() | CR_C);
  596. return 0;
  597. }
  598. void mmu_setup(void)
  599. {
  600. final_mmu_setup();
  601. }
  602. /*
  603. * This function is called from common/board_r.c.
  604. * It recreates MMU table in main memory.
  605. */
  606. void enable_caches(void)
  607. {
  608. mmu_setup();
  609. __asm_invalidate_tlb_all();
  610. icache_enable();
  611. dcache_enable();
  612. }
  613. #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
  614. #ifdef CONFIG_TFABOOT
  615. enum boot_src __get_boot_src(u32 porsr1)
  616. {
  617. enum boot_src src = BOOT_SOURCE_RESERVED;
  618. u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
  619. #if !defined(CONFIG_NXP_LSCH3_2)
  620. u32 val;
  621. #endif
  622. debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
  623. #if defined(CONFIG_FSL_LSCH3)
  624. #if defined(CONFIG_NXP_LSCH3_2)
  625. switch (rcw_src) {
  626. case RCW_SRC_SDHC1_VAL:
  627. src = BOOT_SOURCE_SD_MMC;
  628. break;
  629. case RCW_SRC_SDHC2_VAL:
  630. src = BOOT_SOURCE_SD_MMC2;
  631. break;
  632. case RCW_SRC_I2C1_VAL:
  633. src = BOOT_SOURCE_I2C1_EXTENDED;
  634. break;
  635. case RCW_SRC_FLEXSPI_NAND2K_VAL:
  636. src = BOOT_SOURCE_XSPI_NAND;
  637. break;
  638. case RCW_SRC_FLEXSPI_NAND4K_VAL:
  639. src = BOOT_SOURCE_XSPI_NAND;
  640. break;
  641. case RCW_SRC_RESERVED_1_VAL:
  642. src = BOOT_SOURCE_RESERVED;
  643. break;
  644. case RCW_SRC_FLEXSPI_NOR_24B:
  645. src = BOOT_SOURCE_XSPI_NOR;
  646. break;
  647. default:
  648. src = BOOT_SOURCE_RESERVED;
  649. }
  650. #else
  651. val = rcw_src & RCW_SRC_TYPE_MASK;
  652. if (val == RCW_SRC_NOR_VAL) {
  653. val = rcw_src & NOR_TYPE_MASK;
  654. switch (val) {
  655. case NOR_16B_VAL:
  656. case NOR_32B_VAL:
  657. src = BOOT_SOURCE_IFC_NOR;
  658. break;
  659. default:
  660. src = BOOT_SOURCE_RESERVED;
  661. }
  662. } else {
  663. /* RCW SRC Serial Flash */
  664. val = rcw_src & RCW_SRC_SERIAL_MASK;
  665. switch (val) {
  666. case RCW_SRC_QSPI_VAL:
  667. /* RCW SRC Serial NOR (QSPI) */
  668. src = BOOT_SOURCE_QSPI_NOR;
  669. break;
  670. case RCW_SRC_SD_CARD_VAL:
  671. /* RCW SRC SD Card */
  672. src = BOOT_SOURCE_SD_MMC;
  673. break;
  674. case RCW_SRC_EMMC_VAL:
  675. /* RCW SRC EMMC */
  676. src = BOOT_SOURCE_SD_MMC;
  677. break;
  678. case RCW_SRC_I2C1_VAL:
  679. /* RCW SRC I2C1 Extended */
  680. src = BOOT_SOURCE_I2C1_EXTENDED;
  681. break;
  682. default:
  683. src = BOOT_SOURCE_RESERVED;
  684. }
  685. }
  686. #endif
  687. #elif defined(CONFIG_FSL_LSCH2)
  688. /* RCW SRC NAND */
  689. val = rcw_src & RCW_SRC_NAND_MASK;
  690. if (val == RCW_SRC_NAND_VAL) {
  691. val = rcw_src & NAND_RESERVED_MASK;
  692. if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
  693. src = BOOT_SOURCE_IFC_NAND;
  694. } else {
  695. /* RCW SRC NOR */
  696. val = rcw_src & RCW_SRC_NOR_MASK;
  697. if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
  698. src = BOOT_SOURCE_IFC_NOR;
  699. } else {
  700. switch (rcw_src) {
  701. case QSPI_VAL1:
  702. case QSPI_VAL2:
  703. src = BOOT_SOURCE_QSPI_NOR;
  704. break;
  705. case SD_VAL:
  706. src = BOOT_SOURCE_SD_MMC;
  707. break;
  708. default:
  709. src = BOOT_SOURCE_RESERVED;
  710. }
  711. }
  712. }
  713. #endif
  714. if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
  715. src = BOOT_SOURCE_QSPI_NOR;
  716. debug("%s: src 0x%x\n", __func__, src);
  717. return src;
  718. }
  719. enum boot_src get_boot_src(void)
  720. {
  721. struct pt_regs regs;
  722. u32 porsr1 = 0;
  723. #if defined(CONFIG_FSL_LSCH3)
  724. u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
  725. #elif defined(CONFIG_FSL_LSCH2)
  726. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  727. #endif
  728. if (current_el() == 2) {
  729. regs.regs[0] = SIP_SVC_RCW;
  730. smc_call(&regs);
  731. if (!regs.regs[0])
  732. porsr1 = regs.regs[1];
  733. }
  734. if (current_el() == 3 || !porsr1) {
  735. #ifdef CONFIG_FSL_LSCH3
  736. porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
  737. #elif defined(CONFIG_FSL_LSCH2)
  738. porsr1 = in_be32(&gur->porsr1);
  739. #endif
  740. }
  741. debug("%s: porsr1 0x%x\n", __func__, porsr1);
  742. return __get_boot_src(porsr1);
  743. }
  744. #ifdef CONFIG_ENV_IS_IN_MMC
  745. int mmc_get_env_dev(void)
  746. {
  747. enum boot_src src = get_boot_src();
  748. int dev = CONFIG_SYS_MMC_ENV_DEV;
  749. switch (src) {
  750. case BOOT_SOURCE_SD_MMC:
  751. dev = 0;
  752. break;
  753. case BOOT_SOURCE_SD_MMC2:
  754. dev = 1;
  755. break;
  756. default:
  757. break;
  758. }
  759. return dev;
  760. }
  761. #endif
  762. enum env_location env_get_location(enum env_operation op, int prio)
  763. {
  764. enum boot_src src = get_boot_src();
  765. enum env_location env_loc = ENVL_NOWHERE;
  766. if (prio)
  767. return ENVL_UNKNOWN;
  768. #ifdef CONFIG_ENV_IS_NOWHERE
  769. return env_loc;
  770. #endif
  771. switch (src) {
  772. case BOOT_SOURCE_IFC_NOR:
  773. env_loc = ENVL_FLASH;
  774. break;
  775. case BOOT_SOURCE_QSPI_NOR:
  776. /* FALLTHROUGH */
  777. case BOOT_SOURCE_XSPI_NOR:
  778. env_loc = ENVL_SPI_FLASH;
  779. break;
  780. case BOOT_SOURCE_IFC_NAND:
  781. /* FALLTHROUGH */
  782. case BOOT_SOURCE_QSPI_NAND:
  783. /* FALLTHROUGH */
  784. case BOOT_SOURCE_XSPI_NAND:
  785. env_loc = ENVL_NAND;
  786. break;
  787. case BOOT_SOURCE_SD_MMC:
  788. /* FALLTHROUGH */
  789. case BOOT_SOURCE_SD_MMC2:
  790. env_loc = ENVL_MMC;
  791. break;
  792. case BOOT_SOURCE_I2C1_EXTENDED:
  793. /* FALLTHROUGH */
  794. default:
  795. break;
  796. }
  797. return env_loc;
  798. }
  799. #endif /* CONFIG_TFABOOT */
  800. u32 initiator_type(u32 cluster, int init_id)
  801. {
  802. struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  803. u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
  804. u32 type = 0;
  805. type = gur_in32(&gur->tp_ityp[idx]);
  806. if (type & TP_ITYP_AV)
  807. return type;
  808. return 0;
  809. }
  810. u32 cpu_pos_mask(void)
  811. {
  812. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  813. int i = 0;
  814. u32 cluster, type, mask = 0;
  815. do {
  816. int j;
  817. cluster = gur_in32(&gur->tp_cluster[i].lower);
  818. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  819. type = initiator_type(cluster, j);
  820. if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
  821. mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
  822. }
  823. i++;
  824. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  825. return mask;
  826. }
  827. u32 cpu_mask(void)
  828. {
  829. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  830. int i = 0, count = 0;
  831. u32 cluster, type, mask = 0;
  832. do {
  833. int j;
  834. cluster = gur_in32(&gur->tp_cluster[i].lower);
  835. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  836. type = initiator_type(cluster, j);
  837. if (type) {
  838. if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
  839. mask |= 1 << count;
  840. count++;
  841. }
  842. }
  843. i++;
  844. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  845. return mask;
  846. }
  847. /*
  848. * Return the number of cores on this SOC.
  849. */
  850. int cpu_numcores(void)
  851. {
  852. return hweight32(cpu_mask());
  853. }
  854. int fsl_qoriq_core_to_cluster(unsigned int core)
  855. {
  856. struct ccsr_gur __iomem *gur =
  857. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  858. int i = 0, count = 0;
  859. u32 cluster;
  860. do {
  861. int j;
  862. cluster = gur_in32(&gur->tp_cluster[i].lower);
  863. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  864. if (initiator_type(cluster, j)) {
  865. if (count == core)
  866. return i;
  867. count++;
  868. }
  869. }
  870. i++;
  871. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  872. return -1; /* cannot identify the cluster */
  873. }
  874. u32 fsl_qoriq_core_to_type(unsigned int core)
  875. {
  876. struct ccsr_gur __iomem *gur =
  877. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  878. int i = 0, count = 0;
  879. u32 cluster, type;
  880. do {
  881. int j;
  882. cluster = gur_in32(&gur->tp_cluster[i].lower);
  883. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  884. type = initiator_type(cluster, j);
  885. if (type) {
  886. if (count == core)
  887. return type;
  888. count++;
  889. }
  890. }
  891. i++;
  892. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  893. return -1; /* cannot identify the cluster */
  894. }
  895. #ifndef CONFIG_FSL_LSCH3
  896. uint get_svr(void)
  897. {
  898. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  899. return gur_in32(&gur->svr);
  900. }
  901. #endif
  902. #ifdef CONFIG_DISPLAY_CPUINFO
  903. int print_cpuinfo(void)
  904. {
  905. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  906. struct sys_info sysinfo;
  907. char buf[32];
  908. unsigned int i, core;
  909. u32 type, rcw, svr = gur_in32(&gur->svr);
  910. puts("SoC: ");
  911. cpu_name(buf);
  912. printf(" %s (0x%x)\n", buf, svr);
  913. memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
  914. get_sys_info(&sysinfo);
  915. puts("Clock Configuration:");
  916. for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
  917. if (!(i % 3))
  918. puts("\n ");
  919. type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
  920. printf("CPU%d(%s):%-4s MHz ", core,
  921. type == TY_ITYP_VER_A7 ? "A7 " :
  922. (type == TY_ITYP_VER_A53 ? "A53" :
  923. (type == TY_ITYP_VER_A57 ? "A57" :
  924. (type == TY_ITYP_VER_A72 ? "A72" : " "))),
  925. strmhz(buf, sysinfo.freq_processor[core]));
  926. }
  927. /* Display platform clock as Bus frequency. */
  928. printf("\n Bus: %-4s MHz ",
  929. strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
  930. printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
  931. #ifdef CONFIG_SYS_DPAA_FMAN
  932. printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
  933. #endif
  934. #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
  935. if (soc_has_dp_ddr()) {
  936. printf(" DP-DDR: %-4s MT/s",
  937. strmhz(buf, sysinfo.freq_ddrbus2));
  938. }
  939. #endif
  940. puts("\n");
  941. /*
  942. * Display the RCW, so that no one gets confused as to what RCW
  943. * we're actually using for this boot.
  944. */
  945. puts("Reset Configuration Word (RCW):");
  946. for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
  947. rcw = gur_in32(&gur->rcwsr[i]);
  948. if ((i % 4) == 0)
  949. printf("\n %08x:", i * 4);
  950. printf(" %08x", rcw);
  951. }
  952. puts("\n");
  953. return 0;
  954. }
  955. #endif
  956. #ifdef CONFIG_FSL_ESDHC
  957. int cpu_mmc_init(struct bd_info *bis)
  958. {
  959. return fsl_esdhc_mmc_init(bis);
  960. }
  961. #endif
  962. int cpu_eth_init(struct bd_info *bis)
  963. {
  964. int error = 0;
  965. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  966. error = fsl_mc_ldpaa_init(bis);
  967. #endif
  968. #ifdef CONFIG_FMAN_ENET
  969. fm_standard_init(bis);
  970. #endif
  971. return error;
  972. }
  973. static inline int check_psci(void)
  974. {
  975. unsigned int psci_ver;
  976. psci_ver = sec_firmware_support_psci_version();
  977. if (psci_ver == PSCI_INVALID_VER)
  978. return 1;
  979. return 0;
  980. }
  981. static void config_core_prefetch(void)
  982. {
  983. char *buf = NULL;
  984. char buffer[HWCONFIG_BUFFER_SIZE];
  985. const char *prefetch_arg = NULL;
  986. size_t arglen;
  987. unsigned int mask;
  988. struct pt_regs regs;
  989. if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
  990. buf = buffer;
  991. else
  992. return;
  993. prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
  994. &arglen, buf);
  995. if (prefetch_arg) {
  996. mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
  997. if (mask & 0x1) {
  998. printf("Core0 prefetch can't be disabled\n");
  999. return;
  1000. }
  1001. #define SIP_PREFETCH_DISABLE_64 0xC200FF13
  1002. regs.regs[0] = SIP_PREFETCH_DISABLE_64;
  1003. regs.regs[1] = mask;
  1004. smc_call(&regs);
  1005. if (regs.regs[0])
  1006. printf("Prefetch disable config failed for mask ");
  1007. else
  1008. printf("Prefetch disable config passed for mask ");
  1009. printf("0x%x\n", mask);
  1010. }
  1011. }
  1012. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1013. __weak void set_ecam_icids(void)
  1014. {
  1015. }
  1016. #endif
  1017. int arch_early_init_r(void)
  1018. {
  1019. #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
  1020. u32 svr_dev_id;
  1021. /*
  1022. * erratum A009635 is valid only for LS2080A SoC and
  1023. * its personalitiesi
  1024. */
  1025. svr_dev_id = get_svr();
  1026. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1027. erratum_a009635();
  1028. #endif
  1029. #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
  1030. erratum_a009942_check_cpo();
  1031. #endif
  1032. if (check_psci()) {
  1033. debug("PSCI: PSCI does not exist.\n");
  1034. /* if PSCI does not exist, boot secondary cores here */
  1035. if (fsl_layerscape_wake_seconday_cores())
  1036. printf("Did not wake secondary cores\n");
  1037. }
  1038. config_core_prefetch();
  1039. #ifdef CONFIG_SYS_HAS_SERDES
  1040. fsl_serdes_init();
  1041. #endif
  1042. #ifdef CONFIG_SYS_FSL_HAS_RGMII
  1043. /* some dpmacs in armv8a based freescale layerscape SOCs can be
  1044. * configured via both serdes(sgmii, xfi, xlaui etc) bits and via
  1045. * EC*_PMUX(rgmii) bits in RCW.
  1046. * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
  1047. * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
  1048. * Now if a dpmac is enabled by serdes bits then it takes precedence
  1049. * over EC*_PMUX bits. i.e. in LX2160A if we select serdes protocol
  1050. * that configures dpmac17 as SGMII and set the EC1_PMUX as RGMII,
  1051. * then the dpmac is SGMII and not RGMII.
  1052. *
  1053. * Therefore, move the fsl_rgmii_init after fsl_serdes_init. in
  1054. * fsl_rgmii_init function of SOC, we will check if the dpmac is enabled
  1055. * or not? if it is (fsl_serdes_init has already enabled the dpmac),
  1056. * then don't enable it.
  1057. */
  1058. fsl_rgmii_init();
  1059. #endif
  1060. #ifdef CONFIG_FMAN_ENET
  1061. #ifndef CONFIG_DM_ETH
  1062. fman_enet_init();
  1063. #endif
  1064. #endif
  1065. #ifdef CONFIG_SYS_DPAA_QBMAN
  1066. setup_qbman_portals();
  1067. #endif
  1068. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1069. set_ecam_icids();
  1070. #endif
  1071. return 0;
  1072. }
  1073. int timer_init(void)
  1074. {
  1075. u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
  1076. #ifdef CONFIG_FSL_LSCH3
  1077. u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
  1078. #endif
  1079. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1080. defined(CONFIG_ARCH_LS1028A)
  1081. u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
  1082. u32 svr_dev_id;
  1083. #endif
  1084. #ifdef COUNTER_FREQUENCY_REAL
  1085. unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
  1086. /* Update with accurate clock frequency */
  1087. if (current_el() == 3)
  1088. asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
  1089. #endif
  1090. #ifdef CONFIG_FSL_LSCH3
  1091. /* Enable timebase for all clusters.
  1092. * It is safe to do so even some clusters are not enabled.
  1093. */
  1094. out_le32(cltbenr, 0xf);
  1095. #endif
  1096. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1097. defined(CONFIG_ARCH_LS1028A)
  1098. /*
  1099. * In certain Layerscape SoCs, the clock for each core's
  1100. * has an enable bit in the PMU Physical Core Time Base Enable
  1101. * Register (PCTBENR), which allows the watchdog to operate.
  1102. */
  1103. setbits_le32(pctbenr, 0xff);
  1104. /*
  1105. * For LS2080A SoC and its personalities, timer controller
  1106. * offset is different
  1107. */
  1108. svr_dev_id = get_svr();
  1109. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1110. cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
  1111. #endif
  1112. /* Enable clock for timer
  1113. * This is a global setting.
  1114. */
  1115. out_le32(cntcr, 0x1);
  1116. return 0;
  1117. }
  1118. __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
  1119. void __efi_runtime reset_cpu(ulong addr)
  1120. {
  1121. u32 val;
  1122. #ifdef CONFIG_ARCH_LX2160A
  1123. val = in_le32(rstcr);
  1124. val |= 0x01;
  1125. out_le32(rstcr, val);
  1126. #else
  1127. /* Raise RESET_REQ_B */
  1128. val = scfg_in32(rstcr);
  1129. val |= 0x02;
  1130. scfg_out32(rstcr, val);
  1131. #endif
  1132. }
  1133. #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
  1134. void __efi_runtime EFIAPI efi_reset_system(
  1135. enum efi_reset_type reset_type,
  1136. efi_status_t reset_status,
  1137. unsigned long data_size, void *reset_data)
  1138. {
  1139. switch (reset_type) {
  1140. case EFI_RESET_COLD:
  1141. case EFI_RESET_WARM:
  1142. case EFI_RESET_PLATFORM_SPECIFIC:
  1143. reset_cpu(0);
  1144. break;
  1145. case EFI_RESET_SHUTDOWN:
  1146. /* Nothing we can do */
  1147. break;
  1148. }
  1149. while (1) { }
  1150. }
  1151. efi_status_t efi_reset_system_init(void)
  1152. {
  1153. return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
  1154. }
  1155. #endif
  1156. /*
  1157. * Calculate reserved memory with given memory bank
  1158. * Return aligned memory size on success
  1159. * Return (ram_size + needed size) for failure
  1160. */
  1161. phys_size_t board_reserve_ram_top(phys_size_t ram_size)
  1162. {
  1163. phys_size_t ram_top = ram_size;
  1164. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  1165. ram_top = mc_get_dram_block_size();
  1166. if (ram_top > ram_size)
  1167. return ram_size + ram_top;
  1168. ram_top = ram_size - ram_top;
  1169. /* The start address of MC reserved memory needs to be aligned. */
  1170. ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
  1171. #endif
  1172. return ram_size - ram_top;
  1173. }
  1174. phys_size_t get_effective_memsize(void)
  1175. {
  1176. phys_size_t ea_size, rem = 0;
  1177. /*
  1178. * For ARMv8 SoCs, DDR memory is split into two or three regions. The
  1179. * first region is 2GB space at 0x8000_0000. Secure memory needs to
  1180. * allocated from first region. If the memory extends to the second
  1181. * region (or the third region if applicable), Management Complex (MC)
  1182. * memory should be put into the highest region, i.e. the end of DDR
  1183. * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
  1184. * U-Boot doesn't relocate itself into higher address. Should DDR be
  1185. * configured to skip the first region, this function needs to be
  1186. * adjusted.
  1187. */
  1188. if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
  1189. ea_size = CONFIG_MAX_MEM_MAPPED;
  1190. rem = gd->ram_size - ea_size;
  1191. } else {
  1192. ea_size = gd->ram_size;
  1193. }
  1194. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1195. /* Check if we have enough space for secure memory */
  1196. if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
  1197. ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1198. else
  1199. printf("Error: No enough space for secure memory.\n");
  1200. #endif
  1201. /* Check if we have enough memory for MC */
  1202. if (rem < board_reserve_ram_top(rem)) {
  1203. /* Not enough memory in high region to reserve */
  1204. if (ea_size > board_reserve_ram_top(ea_size))
  1205. ea_size -= board_reserve_ram_top(ea_size);
  1206. else
  1207. printf("Error: No enough space for reserved memory.\n");
  1208. }
  1209. return ea_size;
  1210. }
  1211. #ifdef CONFIG_TFABOOT
  1212. phys_size_t tfa_get_dram_size(void)
  1213. {
  1214. struct pt_regs regs;
  1215. phys_size_t dram_size = 0;
  1216. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1217. regs.regs[1] = -1;
  1218. smc_call(&regs);
  1219. if (regs.regs[0])
  1220. return 0;
  1221. dram_size = regs.regs[1];
  1222. return dram_size;
  1223. }
  1224. static int tfa_dram_init_banksize(void)
  1225. {
  1226. int i = 0, ret = 0;
  1227. struct pt_regs regs;
  1228. phys_size_t dram_size = tfa_get_dram_size();
  1229. debug("dram_size %llx\n", dram_size);
  1230. if (!dram_size)
  1231. return -EINVAL;
  1232. do {
  1233. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1234. regs.regs[1] = i;
  1235. smc_call(&regs);
  1236. if (regs.regs[0]) {
  1237. ret = -EINVAL;
  1238. break;
  1239. }
  1240. debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
  1241. regs.regs[2]);
  1242. gd->bd->bi_dram[i].start = regs.regs[1];
  1243. gd->bd->bi_dram[i].size = regs.regs[2];
  1244. dram_size -= gd->bd->bi_dram[i].size;
  1245. i++;
  1246. } while (dram_size);
  1247. if (i > 0)
  1248. ret = 0;
  1249. #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
  1250. /* Assign memory for MC */
  1251. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1252. if (gd->bd->bi_dram[2].size >=
  1253. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1254. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1255. gd->bd->bi_dram[2].size -
  1256. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1257. } else
  1258. #endif
  1259. {
  1260. if (gd->bd->bi_dram[1].size >=
  1261. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1262. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1263. gd->bd->bi_dram[1].size -
  1264. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1265. } else if (gd->bd->bi_dram[0].size >
  1266. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1267. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1268. gd->bd->bi_dram[0].size -
  1269. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1270. }
  1271. }
  1272. #endif /* CONFIG_RESV_RAM */
  1273. return ret;
  1274. }
  1275. #endif
  1276. int dram_init_banksize(void)
  1277. {
  1278. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1279. phys_size_t dp_ddr_size;
  1280. #endif
  1281. #ifdef CONFIG_TFABOOT
  1282. if (!tfa_dram_init_banksize())
  1283. return 0;
  1284. #endif
  1285. /*
  1286. * gd->ram_size has the total size of DDR memory, less reserved secure
  1287. * memory. The DDR extends from low region to high region(s) presuming
  1288. * no hole is created with DDR configuration. gd->arch.secure_ram tracks
  1289. * the location of secure memory. gd->arch.resv_ram tracks the location
  1290. * of reserved memory for Management Complex (MC). Because gd->ram_size
  1291. * is reduced by this function if secure memory is reserved, checking
  1292. * gd->arch.secure_ram should be done to avoid running it repeatedly.
  1293. */
  1294. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1295. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  1296. debug("No need to run again, skip %s\n", __func__);
  1297. return 0;
  1298. }
  1299. #endif
  1300. gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
  1301. if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
  1302. gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
  1303. gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
  1304. gd->bd->bi_dram[1].size = gd->ram_size -
  1305. CONFIG_SYS_DDR_BLOCK1_SIZE;
  1306. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1307. if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1308. gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
  1309. gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
  1310. CONFIG_SYS_DDR_BLOCK2_SIZE;
  1311. gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
  1312. }
  1313. #endif
  1314. } else {
  1315. gd->bd->bi_dram[0].size = gd->ram_size;
  1316. }
  1317. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1318. if (gd->bd->bi_dram[0].size >
  1319. CONFIG_SYS_MEM_RESERVE_SECURE) {
  1320. gd->bd->bi_dram[0].size -=
  1321. CONFIG_SYS_MEM_RESERVE_SECURE;
  1322. gd->arch.secure_ram = gd->bd->bi_dram[0].start +
  1323. gd->bd->bi_dram[0].size;
  1324. gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
  1325. gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1326. }
  1327. #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
  1328. #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
  1329. /* Assign memory for MC */
  1330. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1331. if (gd->bd->bi_dram[2].size >=
  1332. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1333. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1334. gd->bd->bi_dram[2].size -
  1335. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1336. } else
  1337. #endif
  1338. {
  1339. if (gd->bd->bi_dram[1].size >=
  1340. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1341. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1342. gd->bd->bi_dram[1].size -
  1343. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1344. } else if (gd->bd->bi_dram[0].size >
  1345. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1346. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1347. gd->bd->bi_dram[0].size -
  1348. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1349. }
  1350. }
  1351. #endif /* CONFIG_RESV_RAM */
  1352. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1353. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1354. #error "This SoC shouldn't have DP DDR"
  1355. #endif
  1356. if (soc_has_dp_ddr()) {
  1357. /* initialize DP-DDR here */
  1358. puts("DP-DDR: ");
  1359. /*
  1360. * DDR controller use 0 as the base address for binding.
  1361. * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
  1362. */
  1363. dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
  1364. CONFIG_DP_DDR_CTRL,
  1365. CONFIG_DP_DDR_NUM_CTRLS,
  1366. CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
  1367. NULL, NULL, NULL);
  1368. if (dp_ddr_size) {
  1369. gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
  1370. gd->bd->bi_dram[2].size = dp_ddr_size;
  1371. } else {
  1372. puts("Not detected");
  1373. }
  1374. }
  1375. #endif
  1376. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1377. debug("%s is called. gd->ram_size is reduced to %lu\n",
  1378. __func__, (ulong)gd->ram_size);
  1379. #endif
  1380. return 0;
  1381. }
  1382. #if CONFIG_IS_ENABLED(EFI_LOADER)
  1383. void efi_add_known_memory(void)
  1384. {
  1385. int i;
  1386. phys_addr_t ram_start;
  1387. phys_size_t ram_size;
  1388. /* Add RAM */
  1389. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  1390. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1391. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1392. #error "This SoC shouldn't have DP DDR"
  1393. #endif
  1394. if (i == 2)
  1395. continue; /* skip DP-DDR */
  1396. #endif
  1397. ram_start = gd->bd->bi_dram[i].start;
  1398. ram_size = gd->bd->bi_dram[i].size;
  1399. #ifdef CONFIG_RESV_RAM
  1400. if (gd->arch.resv_ram >= ram_start &&
  1401. gd->arch.resv_ram < ram_start + ram_size)
  1402. ram_size = gd->arch.resv_ram - ram_start;
  1403. #endif
  1404. efi_add_memory_map(ram_start, ram_size,
  1405. EFI_CONVENTIONAL_MEMORY);
  1406. }
  1407. }
  1408. #endif
  1409. /*
  1410. * Before DDR size is known, early MMU table have DDR mapped as device memory
  1411. * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
  1412. * needs to be set for these mappings.
  1413. * If a special case configures DDR with holes in the mapping, the holes need
  1414. * to be marked as invalid. This is not implemented in this function.
  1415. */
  1416. void update_early_mmu_table(void)
  1417. {
  1418. if (!gd->arch.tlb_addr)
  1419. return;
  1420. if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
  1421. mmu_change_region_attr(
  1422. CONFIG_SYS_SDRAM_BASE,
  1423. gd->ram_size,
  1424. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1425. PTE_BLOCK_OUTER_SHARE |
  1426. PTE_BLOCK_NS |
  1427. PTE_TYPE_VALID);
  1428. } else {
  1429. mmu_change_region_attr(
  1430. CONFIG_SYS_SDRAM_BASE,
  1431. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1432. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1433. PTE_BLOCK_OUTER_SHARE |
  1434. PTE_BLOCK_NS |
  1435. PTE_TYPE_VALID);
  1436. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1437. #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
  1438. #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
  1439. #endif
  1440. if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
  1441. CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1442. mmu_change_region_attr(
  1443. CONFIG_SYS_DDR_BLOCK2_BASE,
  1444. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1445. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1446. PTE_BLOCK_OUTER_SHARE |
  1447. PTE_BLOCK_NS |
  1448. PTE_TYPE_VALID);
  1449. mmu_change_region_attr(
  1450. CONFIG_SYS_DDR_BLOCK3_BASE,
  1451. gd->ram_size -
  1452. CONFIG_SYS_DDR_BLOCK1_SIZE -
  1453. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1454. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1455. PTE_BLOCK_OUTER_SHARE |
  1456. PTE_BLOCK_NS |
  1457. PTE_TYPE_VALID);
  1458. } else
  1459. #endif
  1460. {
  1461. mmu_change_region_attr(
  1462. CONFIG_SYS_DDR_BLOCK2_BASE,
  1463. gd->ram_size -
  1464. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1465. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1466. PTE_BLOCK_OUTER_SHARE |
  1467. PTE_BLOCK_NS |
  1468. PTE_TYPE_VALID);
  1469. }
  1470. }
  1471. }
  1472. __weak int dram_init(void)
  1473. {
  1474. fsl_initdram();
  1475. #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
  1476. defined(CONFIG_SPL_BUILD)
  1477. /* This will break-before-make MMU for DDR */
  1478. update_early_mmu_table();
  1479. #endif
  1480. return 0;
  1481. }
  1482. #ifdef CONFIG_ARCH_MISC_INIT
  1483. __weak int serdes_misc_init(void)
  1484. {
  1485. return 0;
  1486. }
  1487. int arch_misc_init(void)
  1488. {
  1489. serdes_misc_init();
  1490. return 0;
  1491. }
  1492. #endif