cpu.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2017-2019 NXP
  4. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  5. */
  6. #include <common.h>
  7. #include <cpu_func.h>
  8. #include <env.h>
  9. #include <fsl_ddr_sdram.h>
  10. #include <init.h>
  11. #include <vsprintf.h>
  12. #include <asm/io.h>
  13. #include <linux/errno.h>
  14. #include <asm/system.h>
  15. #include <fm_eth.h>
  16. #include <asm/armv8/mmu.h>
  17. #include <asm/io.h>
  18. #include <asm/arch/fsl_serdes.h>
  19. #include <asm/arch/soc.h>
  20. #include <asm/arch/cpu.h>
  21. #include <asm/arch/speed.h>
  22. #include <fsl_immap.h>
  23. #include <asm/arch/mp.h>
  24. #include <efi_loader.h>
  25. #include <fsl-mc/fsl_mc.h>
  26. #ifdef CONFIG_FSL_ESDHC
  27. #include <fsl_esdhc.h>
  28. #endif
  29. #include <asm/armv8/sec_firmware.h>
  30. #ifdef CONFIG_SYS_FSL_DDR
  31. #include <fsl_ddr.h>
  32. #endif
  33. #include <asm/arch/clock.h>
  34. #include <hwconfig.h>
  35. #include <fsl_qbman.h>
  36. #ifdef CONFIG_TFABOOT
  37. #include <env_internal.h>
  38. #ifdef CONFIG_CHAIN_OF_TRUST
  39. #include <fsl_validate.h>
  40. #endif
  41. #endif
  42. #include <linux/mii.h>
  43. DECLARE_GLOBAL_DATA_PTR;
  44. static struct cpu_type cpu_type_list[] = {
  45. CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
  46. CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
  47. CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
  48. CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
  49. CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
  50. CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
  51. CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
  52. CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
  53. CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
  54. CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
  55. CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
  56. CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
  57. CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
  58. CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
  59. CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
  60. CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
  61. CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
  62. CPU_TYPE_ENTRY(LS1017A, LS1017A, 1),
  63. CPU_TYPE_ENTRY(LS1018A, LS1018A, 1),
  64. CPU_TYPE_ENTRY(LS1027A, LS1027A, 2),
  65. CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
  66. CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
  67. CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
  68. CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
  69. CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
  70. CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
  71. CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
  72. CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
  73. };
  74. #define EARLY_PGTABLE_SIZE 0x5000
  75. static struct mm_region early_map[] = {
  76. #ifdef CONFIG_FSL_LSCH3
  77. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  78. CONFIG_SYS_FSL_CCSR_SIZE,
  79. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  80. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  81. },
  82. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  83. SYS_FSL_OCRAM_SPACE_SIZE,
  84. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  85. },
  86. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  87. CONFIG_SYS_FSL_QSPI_SIZE1,
  88. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
  89. #ifdef CONFIG_FSL_IFC
  90. /* For IFC Region #1, only the first 4MB is cache-enabled */
  91. { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
  92. CONFIG_SYS_FSL_IFC_SIZE1_1,
  93. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  94. },
  95. { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  96. CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  97. CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
  98. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  99. },
  100. { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
  101. CONFIG_SYS_FSL_IFC_SIZE1,
  102. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  103. },
  104. #endif
  105. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  106. CONFIG_SYS_FSL_DRAM_SIZE1,
  107. #if defined(CONFIG_TFABOOT) || \
  108. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  109. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  110. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  111. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  112. #endif
  113. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  114. },
  115. #ifdef CONFIG_FSL_IFC
  116. /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
  117. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  118. CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
  119. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  120. },
  121. #endif
  122. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  123. CONFIG_SYS_FSL_DCSR_SIZE,
  124. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  125. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  126. },
  127. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  128. CONFIG_SYS_FSL_DRAM_SIZE2,
  129. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  130. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  131. },
  132. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  133. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  134. CONFIG_SYS_FSL_DRAM_SIZE3,
  135. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  136. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  137. },
  138. #endif
  139. #elif defined(CONFIG_FSL_LSCH2)
  140. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  141. CONFIG_SYS_FSL_CCSR_SIZE,
  142. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  143. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  144. },
  145. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  146. SYS_FSL_OCRAM_SPACE_SIZE,
  147. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  148. },
  149. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  150. CONFIG_SYS_FSL_DCSR_SIZE,
  151. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  152. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  153. },
  154. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  155. CONFIG_SYS_FSL_QSPI_SIZE,
  156. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  157. },
  158. #ifdef CONFIG_FSL_IFC
  159. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  160. CONFIG_SYS_FSL_IFC_SIZE,
  161. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  162. },
  163. #endif
  164. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  165. CONFIG_SYS_FSL_DRAM_SIZE1,
  166. #if defined(CONFIG_TFABOOT) || \
  167. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  168. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  169. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  170. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  171. #endif
  172. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  173. },
  174. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  175. CONFIG_SYS_FSL_DRAM_SIZE2,
  176. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  177. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  178. },
  179. #endif
  180. {}, /* list terminator */
  181. };
  182. static struct mm_region final_map[] = {
  183. #ifdef CONFIG_FSL_LSCH3
  184. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  185. CONFIG_SYS_FSL_CCSR_SIZE,
  186. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  187. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  188. },
  189. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  190. SYS_FSL_OCRAM_SPACE_SIZE,
  191. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  192. },
  193. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  194. CONFIG_SYS_FSL_DRAM_SIZE1,
  195. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  196. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  197. },
  198. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  199. CONFIG_SYS_FSL_QSPI_SIZE1,
  200. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  201. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  202. },
  203. { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
  204. CONFIG_SYS_FSL_QSPI_SIZE2,
  205. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  206. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  207. },
  208. #ifdef CONFIG_FSL_IFC
  209. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  210. CONFIG_SYS_FSL_IFC_SIZE2,
  211. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  212. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  213. },
  214. #endif
  215. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  216. CONFIG_SYS_FSL_DCSR_SIZE,
  217. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  218. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  219. },
  220. { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
  221. CONFIG_SYS_FSL_MC_SIZE,
  222. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  223. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  224. },
  225. { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
  226. CONFIG_SYS_FSL_NI_SIZE,
  227. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  228. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  229. },
  230. /* For QBMAN portal, only the first 64MB is cache-enabled */
  231. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  232. CONFIG_SYS_FSL_QBMAN_SIZE_1,
  233. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  234. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
  235. },
  236. { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  237. CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  238. CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
  239. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  240. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  241. },
  242. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  243. CONFIG_SYS_PCIE1_PHYS_SIZE,
  244. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  245. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  246. },
  247. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  248. CONFIG_SYS_PCIE2_PHYS_SIZE,
  249. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  250. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  251. },
  252. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  253. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  254. CONFIG_SYS_PCIE3_PHYS_SIZE,
  255. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  256. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  257. },
  258. #endif
  259. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  260. { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
  261. CONFIG_SYS_PCIE4_PHYS_SIZE,
  262. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  263. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  264. },
  265. #endif
  266. #ifdef SYS_PCIE5_PHYS_ADDR
  267. { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
  268. SYS_PCIE5_PHYS_SIZE,
  269. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  270. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  271. },
  272. #endif
  273. #ifdef SYS_PCIE6_PHYS_ADDR
  274. { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
  275. SYS_PCIE6_PHYS_SIZE,
  276. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  277. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  278. },
  279. #endif
  280. { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
  281. CONFIG_SYS_FSL_WRIOP1_SIZE,
  282. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  283. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  284. },
  285. { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
  286. CONFIG_SYS_FSL_AIOP1_SIZE,
  287. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  288. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  289. },
  290. { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
  291. CONFIG_SYS_FSL_PEBUF_SIZE,
  292. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  293. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  294. },
  295. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  296. CONFIG_SYS_FSL_DRAM_SIZE2,
  297. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  298. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  299. },
  300. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  301. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  302. CONFIG_SYS_FSL_DRAM_SIZE3,
  303. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  304. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  305. },
  306. #endif
  307. #elif defined(CONFIG_FSL_LSCH2)
  308. { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
  309. CONFIG_SYS_FSL_BOOTROM_SIZE,
  310. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  311. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  312. },
  313. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  314. CONFIG_SYS_FSL_CCSR_SIZE,
  315. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  316. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  317. },
  318. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  319. SYS_FSL_OCRAM_SPACE_SIZE,
  320. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  321. },
  322. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  323. CONFIG_SYS_FSL_DCSR_SIZE,
  324. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  325. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  326. },
  327. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  328. CONFIG_SYS_FSL_QSPI_SIZE,
  329. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  330. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  331. },
  332. #ifdef CONFIG_FSL_IFC
  333. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  334. CONFIG_SYS_FSL_IFC_SIZE,
  335. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  336. },
  337. #endif
  338. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  339. CONFIG_SYS_FSL_DRAM_SIZE1,
  340. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  341. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  342. },
  343. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  344. CONFIG_SYS_FSL_QBMAN_SIZE,
  345. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  346. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  347. },
  348. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  349. CONFIG_SYS_FSL_DRAM_SIZE2,
  350. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  351. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  352. },
  353. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  354. CONFIG_SYS_PCIE1_PHYS_SIZE,
  355. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  356. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  357. },
  358. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  359. CONFIG_SYS_PCIE2_PHYS_SIZE,
  360. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  361. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  362. },
  363. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  364. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  365. CONFIG_SYS_PCIE3_PHYS_SIZE,
  366. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  367. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  368. },
  369. #endif
  370. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  371. CONFIG_SYS_FSL_DRAM_SIZE3,
  372. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  373. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  374. },
  375. #endif
  376. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  377. {}, /* space holder for secure mem */
  378. #endif
  379. {},
  380. };
  381. struct mm_region *mem_map = early_map;
  382. void cpu_name(char *name)
  383. {
  384. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  385. unsigned int i, svr, ver;
  386. svr = gur_in32(&gur->svr);
  387. ver = SVR_SOC_VER(svr);
  388. for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
  389. if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
  390. strcpy(name, cpu_type_list[i].name);
  391. #ifdef CONFIG_ARCH_LX2160A
  392. if (IS_C_PROCESSOR(svr))
  393. strcat(name, "C");
  394. #endif
  395. if (IS_E_PROCESSOR(svr))
  396. strcat(name, "E");
  397. sprintf(name + strlen(name), " Rev%d.%d",
  398. SVR_MAJ(svr), SVR_MIN(svr));
  399. break;
  400. }
  401. if (i == ARRAY_SIZE(cpu_type_list))
  402. strcpy(name, "unknown");
  403. }
  404. #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  405. /*
  406. * To start MMU before DDR is available, we create MMU table in SRAM.
  407. * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
  408. * levels of translation tables here to cover 40-bit address space.
  409. * We use 4KB granule size, with 40 bits physical address, T0SZ=24
  410. * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
  411. * Note, the debug print in cache_v8.c is not usable for debugging
  412. * these early MMU tables because UART is not yet available.
  413. */
  414. static inline void early_mmu_setup(void)
  415. {
  416. unsigned int el = current_el();
  417. /* global data is already setup, no allocation yet */
  418. if (el == 3)
  419. gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
  420. else
  421. gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
  422. gd->arch.tlb_fillptr = gd->arch.tlb_addr;
  423. gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
  424. /* Create early page tables */
  425. setup_pgtables();
  426. /* point TTBR to the new table */
  427. set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
  428. get_tcr(el, NULL, NULL) &
  429. ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
  430. MEMORY_ATTRIBUTES);
  431. set_sctlr(get_sctlr() | CR_M);
  432. }
  433. static void fix_pcie_mmu_map(void)
  434. {
  435. #ifdef CONFIG_ARCH_LS2080A
  436. unsigned int i;
  437. u32 svr, ver;
  438. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  439. svr = gur_in32(&gur->svr);
  440. ver = SVR_SOC_VER(svr);
  441. /* Fix PCIE base and size for LS2088A */
  442. if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
  443. (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
  444. (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
  445. for (i = 0; i < ARRAY_SIZE(final_map); i++) {
  446. switch (final_map[i].phys) {
  447. case CONFIG_SYS_PCIE1_PHYS_ADDR:
  448. final_map[i].phys = 0x2000000000ULL;
  449. final_map[i].virt = 0x2000000000ULL;
  450. final_map[i].size = 0x800000000ULL;
  451. break;
  452. case CONFIG_SYS_PCIE2_PHYS_ADDR:
  453. final_map[i].phys = 0x2800000000ULL;
  454. final_map[i].virt = 0x2800000000ULL;
  455. final_map[i].size = 0x800000000ULL;
  456. break;
  457. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  458. case CONFIG_SYS_PCIE3_PHYS_ADDR:
  459. final_map[i].phys = 0x3000000000ULL;
  460. final_map[i].virt = 0x3000000000ULL;
  461. final_map[i].size = 0x800000000ULL;
  462. break;
  463. #endif
  464. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  465. case CONFIG_SYS_PCIE4_PHYS_ADDR:
  466. final_map[i].phys = 0x3800000000ULL;
  467. final_map[i].virt = 0x3800000000ULL;
  468. final_map[i].size = 0x800000000ULL;
  469. break;
  470. #endif
  471. default:
  472. break;
  473. }
  474. }
  475. }
  476. #endif
  477. }
  478. /*
  479. * The final tables look similar to early tables, but different in detail.
  480. * These tables are in DRAM. Sub tables are added to enable cache for
  481. * QBMan and OCRAM.
  482. *
  483. * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
  484. * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
  485. */
  486. static inline void final_mmu_setup(void)
  487. {
  488. u64 tlb_addr_save = gd->arch.tlb_addr;
  489. unsigned int el = current_el();
  490. int index;
  491. /* fix the final_map before filling in the block entries */
  492. fix_pcie_mmu_map();
  493. mem_map = final_map;
  494. /* Update mapping for DDR to actual size */
  495. for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
  496. /*
  497. * Find the entry for DDR mapping and update the address and
  498. * size. Zero-sized mapping will be skipped when creating MMU
  499. * table.
  500. */
  501. switch (final_map[index].virt) {
  502. case CONFIG_SYS_FSL_DRAM_BASE1:
  503. final_map[index].virt = gd->bd->bi_dram[0].start;
  504. final_map[index].phys = gd->bd->bi_dram[0].start;
  505. final_map[index].size = gd->bd->bi_dram[0].size;
  506. break;
  507. #ifdef CONFIG_SYS_FSL_DRAM_BASE2
  508. case CONFIG_SYS_FSL_DRAM_BASE2:
  509. #if (CONFIG_NR_DRAM_BANKS >= 2)
  510. final_map[index].virt = gd->bd->bi_dram[1].start;
  511. final_map[index].phys = gd->bd->bi_dram[1].start;
  512. final_map[index].size = gd->bd->bi_dram[1].size;
  513. #else
  514. final_map[index].size = 0;
  515. #endif
  516. break;
  517. #endif
  518. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  519. case CONFIG_SYS_FSL_DRAM_BASE3:
  520. #if (CONFIG_NR_DRAM_BANKS >= 3)
  521. final_map[index].virt = gd->bd->bi_dram[2].start;
  522. final_map[index].phys = gd->bd->bi_dram[2].start;
  523. final_map[index].size = gd->bd->bi_dram[2].size;
  524. #else
  525. final_map[index].size = 0;
  526. #endif
  527. break;
  528. #endif
  529. default:
  530. break;
  531. }
  532. }
  533. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  534. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  535. if (el == 3) {
  536. /*
  537. * Only use gd->arch.secure_ram if the address is
  538. * recalculated. Align to 4KB for MMU table.
  539. */
  540. /* put page tables in secure ram */
  541. index = ARRAY_SIZE(final_map) - 2;
  542. gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
  543. final_map[index].virt = gd->arch.secure_ram & ~0x3;
  544. final_map[index].phys = final_map[index].virt;
  545. final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
  546. final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
  547. gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
  548. tlb_addr_save = gd->arch.tlb_addr;
  549. } else {
  550. /* Use allocated (board_f.c) memory for TLB */
  551. tlb_addr_save = gd->arch.tlb_allocated;
  552. gd->arch.tlb_addr = tlb_addr_save;
  553. }
  554. }
  555. #endif
  556. /* Reset the fill ptr */
  557. gd->arch.tlb_fillptr = tlb_addr_save;
  558. /* Create normal system page tables */
  559. setup_pgtables();
  560. /* Create emergency page tables */
  561. gd->arch.tlb_addr = gd->arch.tlb_fillptr;
  562. gd->arch.tlb_emerg = gd->arch.tlb_addr;
  563. setup_pgtables();
  564. gd->arch.tlb_addr = tlb_addr_save;
  565. /* Disable cache and MMU */
  566. dcache_disable(); /* TLBs are invalidated */
  567. invalidate_icache_all();
  568. /* point TTBR to the new table */
  569. set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
  570. MEMORY_ATTRIBUTES);
  571. set_sctlr(get_sctlr() | CR_M);
  572. }
  573. u64 get_page_table_size(void)
  574. {
  575. return 0x10000;
  576. }
  577. int arch_cpu_init(void)
  578. {
  579. /*
  580. * This function is called before U-Boot relocates itself to speed up
  581. * on system running. It is not necessary to run if performance is not
  582. * critical. Skip if MMU is already enabled by SPL or other means.
  583. */
  584. if (get_sctlr() & CR_M)
  585. return 0;
  586. icache_enable();
  587. __asm_invalidate_dcache_all();
  588. __asm_invalidate_tlb_all();
  589. early_mmu_setup();
  590. set_sctlr(get_sctlr() | CR_C);
  591. return 0;
  592. }
  593. void mmu_setup(void)
  594. {
  595. final_mmu_setup();
  596. }
  597. /*
  598. * This function is called from common/board_r.c.
  599. * It recreates MMU table in main memory.
  600. */
  601. void enable_caches(void)
  602. {
  603. mmu_setup();
  604. __asm_invalidate_tlb_all();
  605. icache_enable();
  606. dcache_enable();
  607. }
  608. #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
  609. #ifdef CONFIG_TFABOOT
  610. enum boot_src __get_boot_src(u32 porsr1)
  611. {
  612. enum boot_src src = BOOT_SOURCE_RESERVED;
  613. u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
  614. #if !defined(CONFIG_NXP_LSCH3_2)
  615. u32 val;
  616. #endif
  617. debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
  618. #if defined(CONFIG_FSL_LSCH3)
  619. #if defined(CONFIG_NXP_LSCH3_2)
  620. switch (rcw_src) {
  621. case RCW_SRC_SDHC1_VAL:
  622. src = BOOT_SOURCE_SD_MMC;
  623. break;
  624. case RCW_SRC_SDHC2_VAL:
  625. src = BOOT_SOURCE_SD_MMC2;
  626. break;
  627. case RCW_SRC_I2C1_VAL:
  628. src = BOOT_SOURCE_I2C1_EXTENDED;
  629. break;
  630. case RCW_SRC_FLEXSPI_NAND2K_VAL:
  631. src = BOOT_SOURCE_XSPI_NAND;
  632. break;
  633. case RCW_SRC_FLEXSPI_NAND4K_VAL:
  634. src = BOOT_SOURCE_XSPI_NAND;
  635. break;
  636. case RCW_SRC_RESERVED_1_VAL:
  637. src = BOOT_SOURCE_RESERVED;
  638. break;
  639. case RCW_SRC_FLEXSPI_NOR_24B:
  640. src = BOOT_SOURCE_XSPI_NOR;
  641. break;
  642. default:
  643. src = BOOT_SOURCE_RESERVED;
  644. }
  645. #else
  646. val = rcw_src & RCW_SRC_TYPE_MASK;
  647. if (val == RCW_SRC_NOR_VAL) {
  648. val = rcw_src & NOR_TYPE_MASK;
  649. switch (val) {
  650. case NOR_16B_VAL:
  651. case NOR_32B_VAL:
  652. src = BOOT_SOURCE_IFC_NOR;
  653. break;
  654. default:
  655. src = BOOT_SOURCE_RESERVED;
  656. }
  657. } else {
  658. /* RCW SRC Serial Flash */
  659. val = rcw_src & RCW_SRC_SERIAL_MASK;
  660. switch (val) {
  661. case RCW_SRC_QSPI_VAL:
  662. /* RCW SRC Serial NOR (QSPI) */
  663. src = BOOT_SOURCE_QSPI_NOR;
  664. break;
  665. case RCW_SRC_SD_CARD_VAL:
  666. /* RCW SRC SD Card */
  667. src = BOOT_SOURCE_SD_MMC;
  668. break;
  669. case RCW_SRC_EMMC_VAL:
  670. /* RCW SRC EMMC */
  671. src = BOOT_SOURCE_SD_MMC;
  672. break;
  673. case RCW_SRC_I2C1_VAL:
  674. /* RCW SRC I2C1 Extended */
  675. src = BOOT_SOURCE_I2C1_EXTENDED;
  676. break;
  677. default:
  678. src = BOOT_SOURCE_RESERVED;
  679. }
  680. }
  681. #endif
  682. #elif defined(CONFIG_FSL_LSCH2)
  683. /* RCW SRC NAND */
  684. val = rcw_src & RCW_SRC_NAND_MASK;
  685. if (val == RCW_SRC_NAND_VAL) {
  686. val = rcw_src & NAND_RESERVED_MASK;
  687. if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
  688. src = BOOT_SOURCE_IFC_NAND;
  689. } else {
  690. /* RCW SRC NOR */
  691. val = rcw_src & RCW_SRC_NOR_MASK;
  692. if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
  693. src = BOOT_SOURCE_IFC_NOR;
  694. } else {
  695. switch (rcw_src) {
  696. case QSPI_VAL1:
  697. case QSPI_VAL2:
  698. src = BOOT_SOURCE_QSPI_NOR;
  699. break;
  700. case SD_VAL:
  701. src = BOOT_SOURCE_SD_MMC;
  702. break;
  703. default:
  704. src = BOOT_SOURCE_RESERVED;
  705. }
  706. }
  707. }
  708. #endif
  709. if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
  710. src = BOOT_SOURCE_QSPI_NOR;
  711. debug("%s: src 0x%x\n", __func__, src);
  712. return src;
  713. }
  714. enum boot_src get_boot_src(void)
  715. {
  716. struct pt_regs regs;
  717. u32 porsr1 = 0;
  718. #if defined(CONFIG_FSL_LSCH3)
  719. u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
  720. #elif defined(CONFIG_FSL_LSCH2)
  721. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  722. #endif
  723. if (current_el() == 2) {
  724. regs.regs[0] = SIP_SVC_RCW;
  725. smc_call(&regs);
  726. if (!regs.regs[0])
  727. porsr1 = regs.regs[1];
  728. }
  729. if (current_el() == 3 || !porsr1) {
  730. #ifdef CONFIG_FSL_LSCH3
  731. porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
  732. #elif defined(CONFIG_FSL_LSCH2)
  733. porsr1 = in_be32(&gur->porsr1);
  734. #endif
  735. }
  736. debug("%s: porsr1 0x%x\n", __func__, porsr1);
  737. return __get_boot_src(porsr1);
  738. }
  739. #ifdef CONFIG_ENV_IS_IN_MMC
  740. int mmc_get_env_dev(void)
  741. {
  742. enum boot_src src = get_boot_src();
  743. int dev = CONFIG_SYS_MMC_ENV_DEV;
  744. switch (src) {
  745. case BOOT_SOURCE_SD_MMC:
  746. dev = 0;
  747. break;
  748. case BOOT_SOURCE_SD_MMC2:
  749. dev = 1;
  750. break;
  751. default:
  752. break;
  753. }
  754. return dev;
  755. }
  756. #endif
  757. enum env_location env_get_location(enum env_operation op, int prio)
  758. {
  759. enum boot_src src = get_boot_src();
  760. enum env_location env_loc = ENVL_NOWHERE;
  761. if (prio)
  762. return ENVL_UNKNOWN;
  763. #ifdef CONFIG_ENV_IS_NOWHERE
  764. return env_loc;
  765. #endif
  766. switch (src) {
  767. case BOOT_SOURCE_IFC_NOR:
  768. env_loc = ENVL_FLASH;
  769. break;
  770. case BOOT_SOURCE_QSPI_NOR:
  771. /* FALLTHROUGH */
  772. case BOOT_SOURCE_XSPI_NOR:
  773. env_loc = ENVL_SPI_FLASH;
  774. break;
  775. case BOOT_SOURCE_IFC_NAND:
  776. /* FALLTHROUGH */
  777. case BOOT_SOURCE_QSPI_NAND:
  778. /* FALLTHROUGH */
  779. case BOOT_SOURCE_XSPI_NAND:
  780. env_loc = ENVL_NAND;
  781. break;
  782. case BOOT_SOURCE_SD_MMC:
  783. /* FALLTHROUGH */
  784. case BOOT_SOURCE_SD_MMC2:
  785. env_loc = ENVL_MMC;
  786. break;
  787. case BOOT_SOURCE_I2C1_EXTENDED:
  788. /* FALLTHROUGH */
  789. default:
  790. break;
  791. }
  792. return env_loc;
  793. }
  794. #endif /* CONFIG_TFABOOT */
  795. u32 initiator_type(u32 cluster, int init_id)
  796. {
  797. struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  798. u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
  799. u32 type = 0;
  800. type = gur_in32(&gur->tp_ityp[idx]);
  801. if (type & TP_ITYP_AV)
  802. return type;
  803. return 0;
  804. }
  805. u32 cpu_pos_mask(void)
  806. {
  807. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  808. int i = 0;
  809. u32 cluster, type, mask = 0;
  810. do {
  811. int j;
  812. cluster = gur_in32(&gur->tp_cluster[i].lower);
  813. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  814. type = initiator_type(cluster, j);
  815. if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
  816. mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
  817. }
  818. i++;
  819. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  820. return mask;
  821. }
  822. u32 cpu_mask(void)
  823. {
  824. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  825. int i = 0, count = 0;
  826. u32 cluster, type, mask = 0;
  827. do {
  828. int j;
  829. cluster = gur_in32(&gur->tp_cluster[i].lower);
  830. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  831. type = initiator_type(cluster, j);
  832. if (type) {
  833. if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
  834. mask |= 1 << count;
  835. count++;
  836. }
  837. }
  838. i++;
  839. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  840. return mask;
  841. }
  842. /*
  843. * Return the number of cores on this SOC.
  844. */
  845. int cpu_numcores(void)
  846. {
  847. return hweight32(cpu_mask());
  848. }
  849. int fsl_qoriq_core_to_cluster(unsigned int core)
  850. {
  851. struct ccsr_gur __iomem *gur =
  852. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  853. int i = 0, count = 0;
  854. u32 cluster;
  855. do {
  856. int j;
  857. cluster = gur_in32(&gur->tp_cluster[i].lower);
  858. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  859. if (initiator_type(cluster, j)) {
  860. if (count == core)
  861. return i;
  862. count++;
  863. }
  864. }
  865. i++;
  866. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  867. return -1; /* cannot identify the cluster */
  868. }
  869. u32 fsl_qoriq_core_to_type(unsigned int core)
  870. {
  871. struct ccsr_gur __iomem *gur =
  872. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  873. int i = 0, count = 0;
  874. u32 cluster, type;
  875. do {
  876. int j;
  877. cluster = gur_in32(&gur->tp_cluster[i].lower);
  878. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  879. type = initiator_type(cluster, j);
  880. if (type) {
  881. if (count == core)
  882. return type;
  883. count++;
  884. }
  885. }
  886. i++;
  887. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  888. return -1; /* cannot identify the cluster */
  889. }
  890. #ifndef CONFIG_FSL_LSCH3
  891. uint get_svr(void)
  892. {
  893. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  894. return gur_in32(&gur->svr);
  895. }
  896. #endif
  897. #ifdef CONFIG_DISPLAY_CPUINFO
  898. int print_cpuinfo(void)
  899. {
  900. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  901. struct sys_info sysinfo;
  902. char buf[32];
  903. unsigned int i, core;
  904. u32 type, rcw, svr = gur_in32(&gur->svr);
  905. puts("SoC: ");
  906. cpu_name(buf);
  907. printf(" %s (0x%x)\n", buf, svr);
  908. memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
  909. get_sys_info(&sysinfo);
  910. puts("Clock Configuration:");
  911. for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
  912. if (!(i % 3))
  913. puts("\n ");
  914. type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
  915. printf("CPU%d(%s):%-4s MHz ", core,
  916. type == TY_ITYP_VER_A7 ? "A7 " :
  917. (type == TY_ITYP_VER_A53 ? "A53" :
  918. (type == TY_ITYP_VER_A57 ? "A57" :
  919. (type == TY_ITYP_VER_A72 ? "A72" : " "))),
  920. strmhz(buf, sysinfo.freq_processor[core]));
  921. }
  922. /* Display platform clock as Bus frequency. */
  923. printf("\n Bus: %-4s MHz ",
  924. strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
  925. printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
  926. #ifdef CONFIG_SYS_DPAA_FMAN
  927. printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
  928. #endif
  929. #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
  930. if (soc_has_dp_ddr()) {
  931. printf(" DP-DDR: %-4s MT/s",
  932. strmhz(buf, sysinfo.freq_ddrbus2));
  933. }
  934. #endif
  935. puts("\n");
  936. /*
  937. * Display the RCW, so that no one gets confused as to what RCW
  938. * we're actually using for this boot.
  939. */
  940. puts("Reset Configuration Word (RCW):");
  941. for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
  942. rcw = gur_in32(&gur->rcwsr[i]);
  943. if ((i % 4) == 0)
  944. printf("\n %08x:", i * 4);
  945. printf(" %08x", rcw);
  946. }
  947. puts("\n");
  948. return 0;
  949. }
  950. #endif
  951. #ifdef CONFIG_FSL_ESDHC
  952. int cpu_mmc_init(bd_t *bis)
  953. {
  954. return fsl_esdhc_mmc_init(bis);
  955. }
  956. #endif
  957. int cpu_eth_init(bd_t *bis)
  958. {
  959. int error = 0;
  960. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  961. error = fsl_mc_ldpaa_init(bis);
  962. #endif
  963. #ifdef CONFIG_FMAN_ENET
  964. fm_standard_init(bis);
  965. #endif
  966. return error;
  967. }
  968. static inline int check_psci(void)
  969. {
  970. unsigned int psci_ver;
  971. psci_ver = sec_firmware_support_psci_version();
  972. if (psci_ver == PSCI_INVALID_VER)
  973. return 1;
  974. return 0;
  975. }
  976. static void config_core_prefetch(void)
  977. {
  978. char *buf = NULL;
  979. char buffer[HWCONFIG_BUFFER_SIZE];
  980. const char *prefetch_arg = NULL;
  981. size_t arglen;
  982. unsigned int mask;
  983. struct pt_regs regs;
  984. if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
  985. buf = buffer;
  986. else
  987. return;
  988. prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
  989. &arglen, buf);
  990. if (prefetch_arg) {
  991. mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
  992. if (mask & 0x1) {
  993. printf("Core0 prefetch can't be disabled\n");
  994. return;
  995. }
  996. #define SIP_PREFETCH_DISABLE_64 0xC200FF13
  997. regs.regs[0] = SIP_PREFETCH_DISABLE_64;
  998. regs.regs[1] = mask;
  999. smc_call(&regs);
  1000. if (regs.regs[0])
  1001. printf("Prefetch disable config failed for mask ");
  1002. else
  1003. printf("Prefetch disable config passed for mask ");
  1004. printf("0x%x\n", mask);
  1005. }
  1006. }
  1007. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1008. __weak void set_ecam_icids(void)
  1009. {
  1010. }
  1011. #endif
  1012. int arch_early_init_r(void)
  1013. {
  1014. #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
  1015. u32 svr_dev_id;
  1016. /*
  1017. * erratum A009635 is valid only for LS2080A SoC and
  1018. * its personalitiesi
  1019. */
  1020. svr_dev_id = get_svr();
  1021. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1022. erratum_a009635();
  1023. #endif
  1024. #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
  1025. erratum_a009942_check_cpo();
  1026. #endif
  1027. if (check_psci()) {
  1028. debug("PSCI: PSCI does not exist.\n");
  1029. /* if PSCI does not exist, boot secondary cores here */
  1030. if (fsl_layerscape_wake_seconday_cores())
  1031. printf("Did not wake secondary cores\n");
  1032. }
  1033. config_core_prefetch();
  1034. #ifdef CONFIG_SYS_HAS_SERDES
  1035. fsl_serdes_init();
  1036. #endif
  1037. #ifdef CONFIG_SYS_FSL_HAS_RGMII
  1038. /* some dpmacs in armv8a based freescale layerscape SOCs can be
  1039. * configured via both serdes(sgmii, xfi, xlaui etc) bits and via
  1040. * EC*_PMUX(rgmii) bits in RCW.
  1041. * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
  1042. * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
  1043. * Now if a dpmac is enabled by serdes bits then it takes precedence
  1044. * over EC*_PMUX bits. i.e. in LX2160A if we select serdes protocol
  1045. * that configures dpmac17 as SGMII and set the EC1_PMUX as RGMII,
  1046. * then the dpmac is SGMII and not RGMII.
  1047. *
  1048. * Therefore, move the fsl_rgmii_init after fsl_serdes_init. in
  1049. * fsl_rgmii_init function of SOC, we will check if the dpmac is enabled
  1050. * or not? if it is (fsl_serdes_init has already enabled the dpmac),
  1051. * then don't enable it.
  1052. */
  1053. fsl_rgmii_init();
  1054. #endif
  1055. #ifdef CONFIG_FMAN_ENET
  1056. fman_enet_init();
  1057. #endif
  1058. #ifdef CONFIG_SYS_DPAA_QBMAN
  1059. setup_qbman_portals();
  1060. #endif
  1061. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1062. set_ecam_icids();
  1063. #endif
  1064. return 0;
  1065. }
  1066. int timer_init(void)
  1067. {
  1068. u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
  1069. #ifdef CONFIG_FSL_LSCH3
  1070. u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
  1071. #endif
  1072. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1073. defined(CONFIG_ARCH_LS1028A)
  1074. u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
  1075. u32 svr_dev_id;
  1076. #endif
  1077. #ifdef COUNTER_FREQUENCY_REAL
  1078. unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
  1079. /* Update with accurate clock frequency */
  1080. if (current_el() == 3)
  1081. asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
  1082. #endif
  1083. #ifdef CONFIG_FSL_LSCH3
  1084. /* Enable timebase for all clusters.
  1085. * It is safe to do so even some clusters are not enabled.
  1086. */
  1087. out_le32(cltbenr, 0xf);
  1088. #endif
  1089. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1090. defined(CONFIG_ARCH_LS1028A)
  1091. /*
  1092. * In certain Layerscape SoCs, the clock for each core's
  1093. * has an enable bit in the PMU Physical Core Time Base Enable
  1094. * Register (PCTBENR), which allows the watchdog to operate.
  1095. */
  1096. setbits_le32(pctbenr, 0xff);
  1097. /*
  1098. * For LS2080A SoC and its personalities, timer controller
  1099. * offset is different
  1100. */
  1101. svr_dev_id = get_svr();
  1102. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1103. cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
  1104. #endif
  1105. /* Enable clock for timer
  1106. * This is a global setting.
  1107. */
  1108. out_le32(cntcr, 0x1);
  1109. return 0;
  1110. }
  1111. __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
  1112. void __efi_runtime reset_cpu(ulong addr)
  1113. {
  1114. u32 val;
  1115. #ifdef CONFIG_ARCH_LX2160A
  1116. val = in_le32(rstcr);
  1117. val |= 0x01;
  1118. out_le32(rstcr, val);
  1119. #else
  1120. /* Raise RESET_REQ_B */
  1121. val = scfg_in32(rstcr);
  1122. val |= 0x02;
  1123. scfg_out32(rstcr, val);
  1124. #endif
  1125. }
  1126. #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
  1127. void __efi_runtime EFIAPI efi_reset_system(
  1128. enum efi_reset_type reset_type,
  1129. efi_status_t reset_status,
  1130. unsigned long data_size, void *reset_data)
  1131. {
  1132. switch (reset_type) {
  1133. case EFI_RESET_COLD:
  1134. case EFI_RESET_WARM:
  1135. case EFI_RESET_PLATFORM_SPECIFIC:
  1136. reset_cpu(0);
  1137. break;
  1138. case EFI_RESET_SHUTDOWN:
  1139. /* Nothing we can do */
  1140. break;
  1141. }
  1142. while (1) { }
  1143. }
  1144. efi_status_t efi_reset_system_init(void)
  1145. {
  1146. return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
  1147. }
  1148. #endif
  1149. /*
  1150. * Calculate reserved memory with given memory bank
  1151. * Return aligned memory size on success
  1152. * Return (ram_size + needed size) for failure
  1153. */
  1154. phys_size_t board_reserve_ram_top(phys_size_t ram_size)
  1155. {
  1156. phys_size_t ram_top = ram_size;
  1157. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  1158. ram_top = mc_get_dram_block_size();
  1159. if (ram_top > ram_size)
  1160. return ram_size + ram_top;
  1161. ram_top = ram_size - ram_top;
  1162. /* The start address of MC reserved memory needs to be aligned. */
  1163. ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
  1164. #endif
  1165. return ram_size - ram_top;
  1166. }
  1167. phys_size_t get_effective_memsize(void)
  1168. {
  1169. phys_size_t ea_size, rem = 0;
  1170. /*
  1171. * For ARMv8 SoCs, DDR memory is split into two or three regions. The
  1172. * first region is 2GB space at 0x8000_0000. Secure memory needs to
  1173. * allocated from first region. If the memory extends to the second
  1174. * region (or the third region if applicable), Management Complex (MC)
  1175. * memory should be put into the highest region, i.e. the end of DDR
  1176. * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
  1177. * U-Boot doesn't relocate itself into higher address. Should DDR be
  1178. * configured to skip the first region, this function needs to be
  1179. * adjusted.
  1180. */
  1181. if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
  1182. ea_size = CONFIG_MAX_MEM_MAPPED;
  1183. rem = gd->ram_size - ea_size;
  1184. } else {
  1185. ea_size = gd->ram_size;
  1186. }
  1187. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1188. /* Check if we have enough space for secure memory */
  1189. if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
  1190. ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1191. else
  1192. printf("Error: No enough space for secure memory.\n");
  1193. #endif
  1194. /* Check if we have enough memory for MC */
  1195. if (rem < board_reserve_ram_top(rem)) {
  1196. /* Not enough memory in high region to reserve */
  1197. if (ea_size > board_reserve_ram_top(ea_size))
  1198. ea_size -= board_reserve_ram_top(ea_size);
  1199. else
  1200. printf("Error: No enough space for reserved memory.\n");
  1201. }
  1202. return ea_size;
  1203. }
  1204. #ifdef CONFIG_TFABOOT
  1205. phys_size_t tfa_get_dram_size(void)
  1206. {
  1207. struct pt_regs regs;
  1208. phys_size_t dram_size = 0;
  1209. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1210. regs.regs[1] = -1;
  1211. smc_call(&regs);
  1212. if (regs.regs[0])
  1213. return 0;
  1214. dram_size = regs.regs[1];
  1215. return dram_size;
  1216. }
  1217. static int tfa_dram_init_banksize(void)
  1218. {
  1219. int i = 0, ret = 0;
  1220. struct pt_regs regs;
  1221. phys_size_t dram_size = tfa_get_dram_size();
  1222. debug("dram_size %llx\n", dram_size);
  1223. if (!dram_size)
  1224. return -EINVAL;
  1225. do {
  1226. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1227. regs.regs[1] = i;
  1228. smc_call(&regs);
  1229. if (regs.regs[0]) {
  1230. ret = -EINVAL;
  1231. break;
  1232. }
  1233. debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
  1234. regs.regs[2]);
  1235. gd->bd->bi_dram[i].start = regs.regs[1];
  1236. gd->bd->bi_dram[i].size = regs.regs[2];
  1237. dram_size -= gd->bd->bi_dram[i].size;
  1238. i++;
  1239. } while (dram_size);
  1240. if (i > 0)
  1241. ret = 0;
  1242. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  1243. /* Assign memory for MC */
  1244. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1245. if (gd->bd->bi_dram[2].size >=
  1246. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1247. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1248. gd->bd->bi_dram[2].size -
  1249. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1250. } else
  1251. #endif
  1252. {
  1253. if (gd->bd->bi_dram[1].size >=
  1254. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1255. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1256. gd->bd->bi_dram[1].size -
  1257. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1258. } else if (gd->bd->bi_dram[0].size >
  1259. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1260. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1261. gd->bd->bi_dram[0].size -
  1262. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1263. }
  1264. }
  1265. #endif /* CONFIG_FSL_MC_ENET */
  1266. return ret;
  1267. }
  1268. #endif
  1269. int dram_init_banksize(void)
  1270. {
  1271. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1272. phys_size_t dp_ddr_size;
  1273. #endif
  1274. #ifdef CONFIG_TFABOOT
  1275. if (!tfa_dram_init_banksize())
  1276. return 0;
  1277. #endif
  1278. /*
  1279. * gd->ram_size has the total size of DDR memory, less reserved secure
  1280. * memory. The DDR extends from low region to high region(s) presuming
  1281. * no hole is created with DDR configuration. gd->arch.secure_ram tracks
  1282. * the location of secure memory. gd->arch.resv_ram tracks the location
  1283. * of reserved memory for Management Complex (MC). Because gd->ram_size
  1284. * is reduced by this function if secure memory is reserved, checking
  1285. * gd->arch.secure_ram should be done to avoid running it repeatedly.
  1286. */
  1287. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1288. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  1289. debug("No need to run again, skip %s\n", __func__);
  1290. return 0;
  1291. }
  1292. #endif
  1293. gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
  1294. if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
  1295. gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
  1296. gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
  1297. gd->bd->bi_dram[1].size = gd->ram_size -
  1298. CONFIG_SYS_DDR_BLOCK1_SIZE;
  1299. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1300. if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1301. gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
  1302. gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
  1303. CONFIG_SYS_DDR_BLOCK2_SIZE;
  1304. gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
  1305. }
  1306. #endif
  1307. } else {
  1308. gd->bd->bi_dram[0].size = gd->ram_size;
  1309. }
  1310. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1311. if (gd->bd->bi_dram[0].size >
  1312. CONFIG_SYS_MEM_RESERVE_SECURE) {
  1313. gd->bd->bi_dram[0].size -=
  1314. CONFIG_SYS_MEM_RESERVE_SECURE;
  1315. gd->arch.secure_ram = gd->bd->bi_dram[0].start +
  1316. gd->bd->bi_dram[0].size;
  1317. gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
  1318. gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1319. }
  1320. #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
  1321. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  1322. /* Assign memory for MC */
  1323. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1324. if (gd->bd->bi_dram[2].size >=
  1325. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1326. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1327. gd->bd->bi_dram[2].size -
  1328. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1329. } else
  1330. #endif
  1331. {
  1332. if (gd->bd->bi_dram[1].size >=
  1333. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1334. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1335. gd->bd->bi_dram[1].size -
  1336. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1337. } else if (gd->bd->bi_dram[0].size >
  1338. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1339. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1340. gd->bd->bi_dram[0].size -
  1341. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1342. }
  1343. }
  1344. #endif /* CONFIG_FSL_MC_ENET */
  1345. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1346. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1347. #error "This SoC shouldn't have DP DDR"
  1348. #endif
  1349. if (soc_has_dp_ddr()) {
  1350. /* initialize DP-DDR here */
  1351. puts("DP-DDR: ");
  1352. /*
  1353. * DDR controller use 0 as the base address for binding.
  1354. * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
  1355. */
  1356. dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
  1357. CONFIG_DP_DDR_CTRL,
  1358. CONFIG_DP_DDR_NUM_CTRLS,
  1359. CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
  1360. NULL, NULL, NULL);
  1361. if (dp_ddr_size) {
  1362. gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
  1363. gd->bd->bi_dram[2].size = dp_ddr_size;
  1364. } else {
  1365. puts("Not detected");
  1366. }
  1367. }
  1368. #endif
  1369. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1370. debug("%s is called. gd->ram_size is reduced to %lu\n",
  1371. __func__, (ulong)gd->ram_size);
  1372. #endif
  1373. return 0;
  1374. }
  1375. #if CONFIG_IS_ENABLED(EFI_LOADER)
  1376. void efi_add_known_memory(void)
  1377. {
  1378. int i;
  1379. phys_addr_t ram_start, start;
  1380. phys_size_t ram_size;
  1381. u64 pages;
  1382. /* Add RAM */
  1383. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  1384. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1385. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1386. #error "This SoC shouldn't have DP DDR"
  1387. #endif
  1388. if (i == 2)
  1389. continue; /* skip DP-DDR */
  1390. #endif
  1391. ram_start = gd->bd->bi_dram[i].start;
  1392. ram_size = gd->bd->bi_dram[i].size;
  1393. #ifdef CONFIG_RESV_RAM
  1394. if (gd->arch.resv_ram >= ram_start &&
  1395. gd->arch.resv_ram < ram_start + ram_size)
  1396. ram_size = gd->arch.resv_ram - ram_start;
  1397. #endif
  1398. start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
  1399. pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
  1400. efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
  1401. false);
  1402. }
  1403. }
  1404. #endif
  1405. /*
  1406. * Before DDR size is known, early MMU table have DDR mapped as device memory
  1407. * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
  1408. * needs to be set for these mappings.
  1409. * If a special case configures DDR with holes in the mapping, the holes need
  1410. * to be marked as invalid. This is not implemented in this function.
  1411. */
  1412. void update_early_mmu_table(void)
  1413. {
  1414. if (!gd->arch.tlb_addr)
  1415. return;
  1416. if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
  1417. mmu_change_region_attr(
  1418. CONFIG_SYS_SDRAM_BASE,
  1419. gd->ram_size,
  1420. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1421. PTE_BLOCK_OUTER_SHARE |
  1422. PTE_BLOCK_NS |
  1423. PTE_TYPE_VALID);
  1424. } else {
  1425. mmu_change_region_attr(
  1426. CONFIG_SYS_SDRAM_BASE,
  1427. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1428. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1429. PTE_BLOCK_OUTER_SHARE |
  1430. PTE_BLOCK_NS |
  1431. PTE_TYPE_VALID);
  1432. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1433. #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
  1434. #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
  1435. #endif
  1436. if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
  1437. CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1438. mmu_change_region_attr(
  1439. CONFIG_SYS_DDR_BLOCK2_BASE,
  1440. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1441. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1442. PTE_BLOCK_OUTER_SHARE |
  1443. PTE_BLOCK_NS |
  1444. PTE_TYPE_VALID);
  1445. mmu_change_region_attr(
  1446. CONFIG_SYS_DDR_BLOCK3_BASE,
  1447. gd->ram_size -
  1448. CONFIG_SYS_DDR_BLOCK1_SIZE -
  1449. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1450. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1451. PTE_BLOCK_OUTER_SHARE |
  1452. PTE_BLOCK_NS |
  1453. PTE_TYPE_VALID);
  1454. } else
  1455. #endif
  1456. {
  1457. mmu_change_region_attr(
  1458. CONFIG_SYS_DDR_BLOCK2_BASE,
  1459. gd->ram_size -
  1460. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1461. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1462. PTE_BLOCK_OUTER_SHARE |
  1463. PTE_BLOCK_NS |
  1464. PTE_TYPE_VALID);
  1465. }
  1466. }
  1467. }
  1468. __weak int dram_init(void)
  1469. {
  1470. fsl_initdram();
  1471. #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
  1472. defined(CONFIG_SPL_BUILD)
  1473. /* This will break-before-make MMU for DDR */
  1474. update_early_mmu_table();
  1475. #endif
  1476. return 0;
  1477. }