cpu.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2017-2019 NXP
  4. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  5. */
  6. #include <common.h>
  7. #include <cpu_func.h>
  8. #include <env.h>
  9. #include <fsl_ddr_sdram.h>
  10. #include <init.h>
  11. #include <hang.h>
  12. #include <net.h>
  13. #include <vsprintf.h>
  14. #include <asm/cache.h>
  15. #include <asm/io.h>
  16. #include <linux/errno.h>
  17. #include <asm/system.h>
  18. #include <fm_eth.h>
  19. #include <asm/armv8/mmu.h>
  20. #include <asm/io.h>
  21. #include <asm/arch/fsl_serdes.h>
  22. #include <asm/arch/soc.h>
  23. #include <asm/arch/cpu.h>
  24. #include <asm/arch/speed.h>
  25. #include <fsl_immap.h>
  26. #include <asm/arch/mp.h>
  27. #include <efi_loader.h>
  28. #include <fsl-mc/fsl_mc.h>
  29. #ifdef CONFIG_FSL_ESDHC
  30. #include <fsl_esdhc.h>
  31. #endif
  32. #include <asm/armv8/sec_firmware.h>
  33. #ifdef CONFIG_SYS_FSL_DDR
  34. #include <fsl_ddr.h>
  35. #endif
  36. #include <asm/arch/clock.h>
  37. #include <hwconfig.h>
  38. #include <fsl_qbman.h>
  39. #ifdef CONFIG_TFABOOT
  40. #include <env_internal.h>
  41. #ifdef CONFIG_CHAIN_OF_TRUST
  42. #include <fsl_validate.h>
  43. #endif
  44. #endif
  45. #include <linux/mii.h>
  46. DECLARE_GLOBAL_DATA_PTR;
  47. static struct cpu_type cpu_type_list[] = {
  48. CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
  49. CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
  50. CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
  51. CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
  52. CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
  53. CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
  54. CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
  55. CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
  56. CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
  57. CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
  58. CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
  59. CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
  60. CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
  61. CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
  62. CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
  63. CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
  64. CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
  65. CPU_TYPE_ENTRY(LS1017A, LS1017A, 1),
  66. CPU_TYPE_ENTRY(LS1018A, LS1018A, 1),
  67. CPU_TYPE_ENTRY(LS1027A, LS1027A, 2),
  68. CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
  69. CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
  70. CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
  71. CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
  72. CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
  73. CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
  74. CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
  75. CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
  76. };
  77. #define EARLY_PGTABLE_SIZE 0x5000
  78. static struct mm_region early_map[] = {
  79. #ifdef CONFIG_FSL_LSCH3
  80. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  81. CONFIG_SYS_FSL_CCSR_SIZE,
  82. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  83. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  84. },
  85. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  86. SYS_FSL_OCRAM_SPACE_SIZE,
  87. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  88. },
  89. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  90. CONFIG_SYS_FSL_QSPI_SIZE1,
  91. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
  92. #ifdef CONFIG_FSL_IFC
  93. /* For IFC Region #1, only the first 4MB is cache-enabled */
  94. { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
  95. CONFIG_SYS_FSL_IFC_SIZE1_1,
  96. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  97. },
  98. { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  99. CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  100. CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
  101. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  102. },
  103. { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
  104. CONFIG_SYS_FSL_IFC_SIZE1,
  105. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  106. },
  107. #endif
  108. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  109. CONFIG_SYS_FSL_DRAM_SIZE1,
  110. #if defined(CONFIG_TFABOOT) || \
  111. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  112. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  113. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  114. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  115. #endif
  116. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  117. },
  118. #ifdef CONFIG_FSL_IFC
  119. /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
  120. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  121. CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
  122. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  123. },
  124. #endif
  125. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  126. CONFIG_SYS_FSL_DCSR_SIZE,
  127. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  128. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  129. },
  130. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  131. CONFIG_SYS_FSL_DRAM_SIZE2,
  132. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  133. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  134. },
  135. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  136. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  137. CONFIG_SYS_FSL_DRAM_SIZE3,
  138. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  139. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  140. },
  141. #endif
  142. #elif defined(CONFIG_FSL_LSCH2)
  143. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  144. CONFIG_SYS_FSL_CCSR_SIZE,
  145. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  146. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  147. },
  148. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  149. SYS_FSL_OCRAM_SPACE_SIZE,
  150. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  151. },
  152. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  153. CONFIG_SYS_FSL_DCSR_SIZE,
  154. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  155. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  156. },
  157. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  158. CONFIG_SYS_FSL_QSPI_SIZE,
  159. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  160. },
  161. #ifdef CONFIG_FSL_IFC
  162. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  163. CONFIG_SYS_FSL_IFC_SIZE,
  164. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  165. },
  166. #endif
  167. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  168. CONFIG_SYS_FSL_DRAM_SIZE1,
  169. #if defined(CONFIG_TFABOOT) || \
  170. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  171. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  172. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  173. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  174. #endif
  175. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  176. },
  177. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  178. CONFIG_SYS_FSL_DRAM_SIZE2,
  179. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  180. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  181. },
  182. #endif
  183. {}, /* list terminator */
  184. };
  185. static struct mm_region final_map[] = {
  186. #ifdef CONFIG_FSL_LSCH3
  187. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  188. CONFIG_SYS_FSL_CCSR_SIZE,
  189. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  190. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  191. },
  192. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  193. SYS_FSL_OCRAM_SPACE_SIZE,
  194. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  195. },
  196. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  197. CONFIG_SYS_FSL_DRAM_SIZE1,
  198. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  199. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  200. },
  201. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  202. CONFIG_SYS_FSL_QSPI_SIZE1,
  203. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  204. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  205. },
  206. { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
  207. CONFIG_SYS_FSL_QSPI_SIZE2,
  208. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  209. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  210. },
  211. #ifdef CONFIG_FSL_IFC
  212. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  213. CONFIG_SYS_FSL_IFC_SIZE2,
  214. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  215. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  216. },
  217. #endif
  218. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  219. CONFIG_SYS_FSL_DCSR_SIZE,
  220. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  221. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  222. },
  223. { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
  224. CONFIG_SYS_FSL_MC_SIZE,
  225. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  226. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  227. },
  228. { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
  229. CONFIG_SYS_FSL_NI_SIZE,
  230. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  231. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  232. },
  233. /* For QBMAN portal, only the first 64MB is cache-enabled */
  234. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  235. CONFIG_SYS_FSL_QBMAN_SIZE_1,
  236. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  237. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
  238. },
  239. { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  240. CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  241. CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
  242. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  243. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  244. },
  245. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  246. CONFIG_SYS_PCIE1_PHYS_SIZE,
  247. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  248. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  249. },
  250. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  251. CONFIG_SYS_PCIE2_PHYS_SIZE,
  252. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  253. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  254. },
  255. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  256. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  257. CONFIG_SYS_PCIE3_PHYS_SIZE,
  258. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  259. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  260. },
  261. #endif
  262. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  263. { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
  264. CONFIG_SYS_PCIE4_PHYS_SIZE,
  265. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  266. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  267. },
  268. #endif
  269. #ifdef SYS_PCIE5_PHYS_ADDR
  270. { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
  271. SYS_PCIE5_PHYS_SIZE,
  272. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  273. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  274. },
  275. #endif
  276. #ifdef SYS_PCIE6_PHYS_ADDR
  277. { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
  278. SYS_PCIE6_PHYS_SIZE,
  279. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  280. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  281. },
  282. #endif
  283. { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
  284. CONFIG_SYS_FSL_WRIOP1_SIZE,
  285. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  286. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  287. },
  288. { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
  289. CONFIG_SYS_FSL_AIOP1_SIZE,
  290. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  291. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  292. },
  293. { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
  294. CONFIG_SYS_FSL_PEBUF_SIZE,
  295. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  296. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  297. },
  298. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  299. CONFIG_SYS_FSL_DRAM_SIZE2,
  300. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  301. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  302. },
  303. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  304. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  305. CONFIG_SYS_FSL_DRAM_SIZE3,
  306. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  307. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  308. },
  309. #endif
  310. #elif defined(CONFIG_FSL_LSCH2)
  311. { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
  312. CONFIG_SYS_FSL_BOOTROM_SIZE,
  313. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  314. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  315. },
  316. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  317. CONFIG_SYS_FSL_CCSR_SIZE,
  318. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  319. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  320. },
  321. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  322. SYS_FSL_OCRAM_SPACE_SIZE,
  323. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  324. },
  325. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  326. CONFIG_SYS_FSL_DCSR_SIZE,
  327. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  328. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  329. },
  330. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  331. CONFIG_SYS_FSL_QSPI_SIZE,
  332. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  333. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  334. },
  335. #ifdef CONFIG_FSL_IFC
  336. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  337. CONFIG_SYS_FSL_IFC_SIZE,
  338. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  339. },
  340. #endif
  341. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  342. CONFIG_SYS_FSL_DRAM_SIZE1,
  343. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  344. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  345. },
  346. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  347. CONFIG_SYS_FSL_QBMAN_SIZE,
  348. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  349. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  350. },
  351. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  352. CONFIG_SYS_FSL_DRAM_SIZE2,
  353. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  354. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  355. },
  356. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  357. CONFIG_SYS_PCIE1_PHYS_SIZE,
  358. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  359. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  360. },
  361. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  362. CONFIG_SYS_PCIE2_PHYS_SIZE,
  363. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  364. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  365. },
  366. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  367. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  368. CONFIG_SYS_PCIE3_PHYS_SIZE,
  369. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  370. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  371. },
  372. #endif
  373. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  374. CONFIG_SYS_FSL_DRAM_SIZE3,
  375. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  376. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  377. },
  378. #endif
  379. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  380. {}, /* space holder for secure mem */
  381. #endif
  382. {},
  383. };
  384. struct mm_region *mem_map = early_map;
  385. void cpu_name(char *name)
  386. {
  387. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  388. unsigned int i, svr, ver;
  389. svr = gur_in32(&gur->svr);
  390. ver = SVR_SOC_VER(svr);
  391. for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
  392. if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
  393. strcpy(name, cpu_type_list[i].name);
  394. #ifdef CONFIG_ARCH_LX2160A
  395. if (IS_C_PROCESSOR(svr))
  396. strcat(name, "C");
  397. #endif
  398. if (IS_E_PROCESSOR(svr))
  399. strcat(name, "E");
  400. sprintf(name + strlen(name), " Rev%d.%d",
  401. SVR_MAJ(svr), SVR_MIN(svr));
  402. break;
  403. }
  404. if (i == ARRAY_SIZE(cpu_type_list))
  405. strcpy(name, "unknown");
  406. }
  407. #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  408. /*
  409. * To start MMU before DDR is available, we create MMU table in SRAM.
  410. * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
  411. * levels of translation tables here to cover 40-bit address space.
  412. * We use 4KB granule size, with 40 bits physical address, T0SZ=24
  413. * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
  414. * Note, the debug print in cache_v8.c is not usable for debugging
  415. * these early MMU tables because UART is not yet available.
  416. */
  417. static inline void early_mmu_setup(void)
  418. {
  419. unsigned int el = current_el();
  420. /* global data is already setup, no allocation yet */
  421. if (el == 3)
  422. gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
  423. else
  424. gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
  425. gd->arch.tlb_fillptr = gd->arch.tlb_addr;
  426. gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
  427. /* Create early page tables */
  428. setup_pgtables();
  429. /* point TTBR to the new table */
  430. set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
  431. get_tcr(el, NULL, NULL) &
  432. ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
  433. MEMORY_ATTRIBUTES);
  434. set_sctlr(get_sctlr() | CR_M);
  435. }
  436. static void fix_pcie_mmu_map(void)
  437. {
  438. #ifdef CONFIG_ARCH_LS2080A
  439. unsigned int i;
  440. u32 svr, ver;
  441. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  442. svr = gur_in32(&gur->svr);
  443. ver = SVR_SOC_VER(svr);
  444. /* Fix PCIE base and size for LS2088A */
  445. if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
  446. (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
  447. (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
  448. for (i = 0; i < ARRAY_SIZE(final_map); i++) {
  449. switch (final_map[i].phys) {
  450. case CONFIG_SYS_PCIE1_PHYS_ADDR:
  451. final_map[i].phys = 0x2000000000ULL;
  452. final_map[i].virt = 0x2000000000ULL;
  453. final_map[i].size = 0x800000000ULL;
  454. break;
  455. case CONFIG_SYS_PCIE2_PHYS_ADDR:
  456. final_map[i].phys = 0x2800000000ULL;
  457. final_map[i].virt = 0x2800000000ULL;
  458. final_map[i].size = 0x800000000ULL;
  459. break;
  460. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  461. case CONFIG_SYS_PCIE3_PHYS_ADDR:
  462. final_map[i].phys = 0x3000000000ULL;
  463. final_map[i].virt = 0x3000000000ULL;
  464. final_map[i].size = 0x800000000ULL;
  465. break;
  466. #endif
  467. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  468. case CONFIG_SYS_PCIE4_PHYS_ADDR:
  469. final_map[i].phys = 0x3800000000ULL;
  470. final_map[i].virt = 0x3800000000ULL;
  471. final_map[i].size = 0x800000000ULL;
  472. break;
  473. #endif
  474. default:
  475. break;
  476. }
  477. }
  478. }
  479. #endif
  480. }
  481. /*
  482. * The final tables look similar to early tables, but different in detail.
  483. * These tables are in DRAM. Sub tables are added to enable cache for
  484. * QBMan and OCRAM.
  485. *
  486. * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
  487. * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
  488. */
  489. static inline void final_mmu_setup(void)
  490. {
  491. u64 tlb_addr_save = gd->arch.tlb_addr;
  492. unsigned int el = current_el();
  493. int index;
  494. /* fix the final_map before filling in the block entries */
  495. fix_pcie_mmu_map();
  496. mem_map = final_map;
  497. /* Update mapping for DDR to actual size */
  498. for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
  499. /*
  500. * Find the entry for DDR mapping and update the address and
  501. * size. Zero-sized mapping will be skipped when creating MMU
  502. * table.
  503. */
  504. switch (final_map[index].virt) {
  505. case CONFIG_SYS_FSL_DRAM_BASE1:
  506. final_map[index].virt = gd->bd->bi_dram[0].start;
  507. final_map[index].phys = gd->bd->bi_dram[0].start;
  508. final_map[index].size = gd->bd->bi_dram[0].size;
  509. break;
  510. #ifdef CONFIG_SYS_FSL_DRAM_BASE2
  511. case CONFIG_SYS_FSL_DRAM_BASE2:
  512. #if (CONFIG_NR_DRAM_BANKS >= 2)
  513. final_map[index].virt = gd->bd->bi_dram[1].start;
  514. final_map[index].phys = gd->bd->bi_dram[1].start;
  515. final_map[index].size = gd->bd->bi_dram[1].size;
  516. #else
  517. final_map[index].size = 0;
  518. #endif
  519. break;
  520. #endif
  521. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  522. case CONFIG_SYS_FSL_DRAM_BASE3:
  523. #if (CONFIG_NR_DRAM_BANKS >= 3)
  524. final_map[index].virt = gd->bd->bi_dram[2].start;
  525. final_map[index].phys = gd->bd->bi_dram[2].start;
  526. final_map[index].size = gd->bd->bi_dram[2].size;
  527. #else
  528. final_map[index].size = 0;
  529. #endif
  530. break;
  531. #endif
  532. default:
  533. break;
  534. }
  535. }
  536. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  537. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  538. if (el == 3) {
  539. /*
  540. * Only use gd->arch.secure_ram if the address is
  541. * recalculated. Align to 4KB for MMU table.
  542. */
  543. /* put page tables in secure ram */
  544. index = ARRAY_SIZE(final_map) - 2;
  545. gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
  546. final_map[index].virt = gd->arch.secure_ram & ~0x3;
  547. final_map[index].phys = final_map[index].virt;
  548. final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
  549. final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
  550. gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
  551. tlb_addr_save = gd->arch.tlb_addr;
  552. } else {
  553. /* Use allocated (board_f.c) memory for TLB */
  554. tlb_addr_save = gd->arch.tlb_allocated;
  555. gd->arch.tlb_addr = tlb_addr_save;
  556. }
  557. }
  558. #endif
  559. /* Reset the fill ptr */
  560. gd->arch.tlb_fillptr = tlb_addr_save;
  561. /* Create normal system page tables */
  562. setup_pgtables();
  563. /* Create emergency page tables */
  564. gd->arch.tlb_addr = gd->arch.tlb_fillptr;
  565. gd->arch.tlb_emerg = gd->arch.tlb_addr;
  566. setup_pgtables();
  567. gd->arch.tlb_addr = tlb_addr_save;
  568. /* Disable cache and MMU */
  569. dcache_disable(); /* TLBs are invalidated */
  570. invalidate_icache_all();
  571. /* point TTBR to the new table */
  572. set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
  573. MEMORY_ATTRIBUTES);
  574. set_sctlr(get_sctlr() | CR_M);
  575. }
  576. u64 get_page_table_size(void)
  577. {
  578. return 0x10000;
  579. }
  580. int arch_cpu_init(void)
  581. {
  582. /*
  583. * This function is called before U-Boot relocates itself to speed up
  584. * on system running. It is not necessary to run if performance is not
  585. * critical. Skip if MMU is already enabled by SPL or other means.
  586. */
  587. if (get_sctlr() & CR_M)
  588. return 0;
  589. icache_enable();
  590. __asm_invalidate_dcache_all();
  591. __asm_invalidate_tlb_all();
  592. early_mmu_setup();
  593. set_sctlr(get_sctlr() | CR_C);
  594. return 0;
  595. }
  596. void mmu_setup(void)
  597. {
  598. final_mmu_setup();
  599. }
  600. /*
  601. * This function is called from common/board_r.c.
  602. * It recreates MMU table in main memory.
  603. */
  604. void enable_caches(void)
  605. {
  606. mmu_setup();
  607. __asm_invalidate_tlb_all();
  608. icache_enable();
  609. dcache_enable();
  610. }
  611. #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
  612. #ifdef CONFIG_TFABOOT
  613. enum boot_src __get_boot_src(u32 porsr1)
  614. {
  615. enum boot_src src = BOOT_SOURCE_RESERVED;
  616. u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
  617. #if !defined(CONFIG_NXP_LSCH3_2)
  618. u32 val;
  619. #endif
  620. debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
  621. #if defined(CONFIG_FSL_LSCH3)
  622. #if defined(CONFIG_NXP_LSCH3_2)
  623. switch (rcw_src) {
  624. case RCW_SRC_SDHC1_VAL:
  625. src = BOOT_SOURCE_SD_MMC;
  626. break;
  627. case RCW_SRC_SDHC2_VAL:
  628. src = BOOT_SOURCE_SD_MMC2;
  629. break;
  630. case RCW_SRC_I2C1_VAL:
  631. src = BOOT_SOURCE_I2C1_EXTENDED;
  632. break;
  633. case RCW_SRC_FLEXSPI_NAND2K_VAL:
  634. src = BOOT_SOURCE_XSPI_NAND;
  635. break;
  636. case RCW_SRC_FLEXSPI_NAND4K_VAL:
  637. src = BOOT_SOURCE_XSPI_NAND;
  638. break;
  639. case RCW_SRC_RESERVED_1_VAL:
  640. src = BOOT_SOURCE_RESERVED;
  641. break;
  642. case RCW_SRC_FLEXSPI_NOR_24B:
  643. src = BOOT_SOURCE_XSPI_NOR;
  644. break;
  645. default:
  646. src = BOOT_SOURCE_RESERVED;
  647. }
  648. #else
  649. val = rcw_src & RCW_SRC_TYPE_MASK;
  650. if (val == RCW_SRC_NOR_VAL) {
  651. val = rcw_src & NOR_TYPE_MASK;
  652. switch (val) {
  653. case NOR_16B_VAL:
  654. case NOR_32B_VAL:
  655. src = BOOT_SOURCE_IFC_NOR;
  656. break;
  657. default:
  658. src = BOOT_SOURCE_RESERVED;
  659. }
  660. } else {
  661. /* RCW SRC Serial Flash */
  662. val = rcw_src & RCW_SRC_SERIAL_MASK;
  663. switch (val) {
  664. case RCW_SRC_QSPI_VAL:
  665. /* RCW SRC Serial NOR (QSPI) */
  666. src = BOOT_SOURCE_QSPI_NOR;
  667. break;
  668. case RCW_SRC_SD_CARD_VAL:
  669. /* RCW SRC SD Card */
  670. src = BOOT_SOURCE_SD_MMC;
  671. break;
  672. case RCW_SRC_EMMC_VAL:
  673. /* RCW SRC EMMC */
  674. src = BOOT_SOURCE_SD_MMC;
  675. break;
  676. case RCW_SRC_I2C1_VAL:
  677. /* RCW SRC I2C1 Extended */
  678. src = BOOT_SOURCE_I2C1_EXTENDED;
  679. break;
  680. default:
  681. src = BOOT_SOURCE_RESERVED;
  682. }
  683. }
  684. #endif
  685. #elif defined(CONFIG_FSL_LSCH2)
  686. /* RCW SRC NAND */
  687. val = rcw_src & RCW_SRC_NAND_MASK;
  688. if (val == RCW_SRC_NAND_VAL) {
  689. val = rcw_src & NAND_RESERVED_MASK;
  690. if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
  691. src = BOOT_SOURCE_IFC_NAND;
  692. } else {
  693. /* RCW SRC NOR */
  694. val = rcw_src & RCW_SRC_NOR_MASK;
  695. if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
  696. src = BOOT_SOURCE_IFC_NOR;
  697. } else {
  698. switch (rcw_src) {
  699. case QSPI_VAL1:
  700. case QSPI_VAL2:
  701. src = BOOT_SOURCE_QSPI_NOR;
  702. break;
  703. case SD_VAL:
  704. src = BOOT_SOURCE_SD_MMC;
  705. break;
  706. default:
  707. src = BOOT_SOURCE_RESERVED;
  708. }
  709. }
  710. }
  711. #endif
  712. if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
  713. src = BOOT_SOURCE_QSPI_NOR;
  714. debug("%s: src 0x%x\n", __func__, src);
  715. return src;
  716. }
  717. enum boot_src get_boot_src(void)
  718. {
  719. struct pt_regs regs;
  720. u32 porsr1 = 0;
  721. #if defined(CONFIG_FSL_LSCH3)
  722. u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
  723. #elif defined(CONFIG_FSL_LSCH2)
  724. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  725. #endif
  726. if (current_el() == 2) {
  727. regs.regs[0] = SIP_SVC_RCW;
  728. smc_call(&regs);
  729. if (!regs.regs[0])
  730. porsr1 = regs.regs[1];
  731. }
  732. if (current_el() == 3 || !porsr1) {
  733. #ifdef CONFIG_FSL_LSCH3
  734. porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
  735. #elif defined(CONFIG_FSL_LSCH2)
  736. porsr1 = in_be32(&gur->porsr1);
  737. #endif
  738. }
  739. debug("%s: porsr1 0x%x\n", __func__, porsr1);
  740. return __get_boot_src(porsr1);
  741. }
  742. #ifdef CONFIG_ENV_IS_IN_MMC
  743. int mmc_get_env_dev(void)
  744. {
  745. enum boot_src src = get_boot_src();
  746. int dev = CONFIG_SYS_MMC_ENV_DEV;
  747. switch (src) {
  748. case BOOT_SOURCE_SD_MMC:
  749. dev = 0;
  750. break;
  751. case BOOT_SOURCE_SD_MMC2:
  752. dev = 1;
  753. break;
  754. default:
  755. break;
  756. }
  757. return dev;
  758. }
  759. #endif
  760. enum env_location env_get_location(enum env_operation op, int prio)
  761. {
  762. enum boot_src src = get_boot_src();
  763. enum env_location env_loc = ENVL_NOWHERE;
  764. if (prio)
  765. return ENVL_UNKNOWN;
  766. #ifdef CONFIG_ENV_IS_NOWHERE
  767. return env_loc;
  768. #endif
  769. switch (src) {
  770. case BOOT_SOURCE_IFC_NOR:
  771. env_loc = ENVL_FLASH;
  772. break;
  773. case BOOT_SOURCE_QSPI_NOR:
  774. /* FALLTHROUGH */
  775. case BOOT_SOURCE_XSPI_NOR:
  776. env_loc = ENVL_SPI_FLASH;
  777. break;
  778. case BOOT_SOURCE_IFC_NAND:
  779. /* FALLTHROUGH */
  780. case BOOT_SOURCE_QSPI_NAND:
  781. /* FALLTHROUGH */
  782. case BOOT_SOURCE_XSPI_NAND:
  783. env_loc = ENVL_NAND;
  784. break;
  785. case BOOT_SOURCE_SD_MMC:
  786. /* FALLTHROUGH */
  787. case BOOT_SOURCE_SD_MMC2:
  788. env_loc = ENVL_MMC;
  789. break;
  790. case BOOT_SOURCE_I2C1_EXTENDED:
  791. /* FALLTHROUGH */
  792. default:
  793. break;
  794. }
  795. return env_loc;
  796. }
  797. #endif /* CONFIG_TFABOOT */
  798. u32 initiator_type(u32 cluster, int init_id)
  799. {
  800. struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  801. u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
  802. u32 type = 0;
  803. type = gur_in32(&gur->tp_ityp[idx]);
  804. if (type & TP_ITYP_AV)
  805. return type;
  806. return 0;
  807. }
  808. u32 cpu_pos_mask(void)
  809. {
  810. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  811. int i = 0;
  812. u32 cluster, type, mask = 0;
  813. do {
  814. int j;
  815. cluster = gur_in32(&gur->tp_cluster[i].lower);
  816. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  817. type = initiator_type(cluster, j);
  818. if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
  819. mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
  820. }
  821. i++;
  822. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  823. return mask;
  824. }
  825. u32 cpu_mask(void)
  826. {
  827. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  828. int i = 0, count = 0;
  829. u32 cluster, type, mask = 0;
  830. do {
  831. int j;
  832. cluster = gur_in32(&gur->tp_cluster[i].lower);
  833. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  834. type = initiator_type(cluster, j);
  835. if (type) {
  836. if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
  837. mask |= 1 << count;
  838. count++;
  839. }
  840. }
  841. i++;
  842. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  843. return mask;
  844. }
  845. /*
  846. * Return the number of cores on this SOC.
  847. */
  848. int cpu_numcores(void)
  849. {
  850. return hweight32(cpu_mask());
  851. }
  852. int fsl_qoriq_core_to_cluster(unsigned int core)
  853. {
  854. struct ccsr_gur __iomem *gur =
  855. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  856. int i = 0, count = 0;
  857. u32 cluster;
  858. do {
  859. int j;
  860. cluster = gur_in32(&gur->tp_cluster[i].lower);
  861. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  862. if (initiator_type(cluster, j)) {
  863. if (count == core)
  864. return i;
  865. count++;
  866. }
  867. }
  868. i++;
  869. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  870. return -1; /* cannot identify the cluster */
  871. }
  872. u32 fsl_qoriq_core_to_type(unsigned int core)
  873. {
  874. struct ccsr_gur __iomem *gur =
  875. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  876. int i = 0, count = 0;
  877. u32 cluster, type;
  878. do {
  879. int j;
  880. cluster = gur_in32(&gur->tp_cluster[i].lower);
  881. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  882. type = initiator_type(cluster, j);
  883. if (type) {
  884. if (count == core)
  885. return type;
  886. count++;
  887. }
  888. }
  889. i++;
  890. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  891. return -1; /* cannot identify the cluster */
  892. }
  893. #ifndef CONFIG_FSL_LSCH3
  894. uint get_svr(void)
  895. {
  896. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  897. return gur_in32(&gur->svr);
  898. }
  899. #endif
  900. #ifdef CONFIG_DISPLAY_CPUINFO
  901. int print_cpuinfo(void)
  902. {
  903. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  904. struct sys_info sysinfo;
  905. char buf[32];
  906. unsigned int i, core;
  907. u32 type, rcw, svr = gur_in32(&gur->svr);
  908. puts("SoC: ");
  909. cpu_name(buf);
  910. printf(" %s (0x%x)\n", buf, svr);
  911. memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
  912. get_sys_info(&sysinfo);
  913. puts("Clock Configuration:");
  914. for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
  915. if (!(i % 3))
  916. puts("\n ");
  917. type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
  918. printf("CPU%d(%s):%-4s MHz ", core,
  919. type == TY_ITYP_VER_A7 ? "A7 " :
  920. (type == TY_ITYP_VER_A53 ? "A53" :
  921. (type == TY_ITYP_VER_A57 ? "A57" :
  922. (type == TY_ITYP_VER_A72 ? "A72" : " "))),
  923. strmhz(buf, sysinfo.freq_processor[core]));
  924. }
  925. /* Display platform clock as Bus frequency. */
  926. printf("\n Bus: %-4s MHz ",
  927. strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
  928. printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
  929. #ifdef CONFIG_SYS_DPAA_FMAN
  930. printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
  931. #endif
  932. #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
  933. if (soc_has_dp_ddr()) {
  934. printf(" DP-DDR: %-4s MT/s",
  935. strmhz(buf, sysinfo.freq_ddrbus2));
  936. }
  937. #endif
  938. puts("\n");
  939. /*
  940. * Display the RCW, so that no one gets confused as to what RCW
  941. * we're actually using for this boot.
  942. */
  943. puts("Reset Configuration Word (RCW):");
  944. for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
  945. rcw = gur_in32(&gur->rcwsr[i]);
  946. if ((i % 4) == 0)
  947. printf("\n %08x:", i * 4);
  948. printf(" %08x", rcw);
  949. }
  950. puts("\n");
  951. return 0;
  952. }
  953. #endif
  954. #ifdef CONFIG_FSL_ESDHC
  955. int cpu_mmc_init(bd_t *bis)
  956. {
  957. return fsl_esdhc_mmc_init(bis);
  958. }
  959. #endif
  960. int cpu_eth_init(bd_t *bis)
  961. {
  962. int error = 0;
  963. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  964. error = fsl_mc_ldpaa_init(bis);
  965. #endif
  966. #ifdef CONFIG_FMAN_ENET
  967. fm_standard_init(bis);
  968. #endif
  969. return error;
  970. }
  971. static inline int check_psci(void)
  972. {
  973. unsigned int psci_ver;
  974. psci_ver = sec_firmware_support_psci_version();
  975. if (psci_ver == PSCI_INVALID_VER)
  976. return 1;
  977. return 0;
  978. }
  979. static void config_core_prefetch(void)
  980. {
  981. char *buf = NULL;
  982. char buffer[HWCONFIG_BUFFER_SIZE];
  983. const char *prefetch_arg = NULL;
  984. size_t arglen;
  985. unsigned int mask;
  986. struct pt_regs regs;
  987. if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
  988. buf = buffer;
  989. else
  990. return;
  991. prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
  992. &arglen, buf);
  993. if (prefetch_arg) {
  994. mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
  995. if (mask & 0x1) {
  996. printf("Core0 prefetch can't be disabled\n");
  997. return;
  998. }
  999. #define SIP_PREFETCH_DISABLE_64 0xC200FF13
  1000. regs.regs[0] = SIP_PREFETCH_DISABLE_64;
  1001. regs.regs[1] = mask;
  1002. smc_call(&regs);
  1003. if (regs.regs[0])
  1004. printf("Prefetch disable config failed for mask ");
  1005. else
  1006. printf("Prefetch disable config passed for mask ");
  1007. printf("0x%x\n", mask);
  1008. }
  1009. }
  1010. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1011. __weak void set_ecam_icids(void)
  1012. {
  1013. }
  1014. #endif
  1015. int arch_early_init_r(void)
  1016. {
  1017. #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
  1018. u32 svr_dev_id;
  1019. /*
  1020. * erratum A009635 is valid only for LS2080A SoC and
  1021. * its personalitiesi
  1022. */
  1023. svr_dev_id = get_svr();
  1024. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1025. erratum_a009635();
  1026. #endif
  1027. #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
  1028. erratum_a009942_check_cpo();
  1029. #endif
  1030. if (check_psci()) {
  1031. debug("PSCI: PSCI does not exist.\n");
  1032. /* if PSCI does not exist, boot secondary cores here */
  1033. if (fsl_layerscape_wake_seconday_cores())
  1034. printf("Did not wake secondary cores\n");
  1035. }
  1036. config_core_prefetch();
  1037. #ifdef CONFIG_SYS_HAS_SERDES
  1038. fsl_serdes_init();
  1039. #endif
  1040. #ifdef CONFIG_SYS_FSL_HAS_RGMII
  1041. /* some dpmacs in armv8a based freescale layerscape SOCs can be
  1042. * configured via both serdes(sgmii, xfi, xlaui etc) bits and via
  1043. * EC*_PMUX(rgmii) bits in RCW.
  1044. * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
  1045. * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
  1046. * Now if a dpmac is enabled by serdes bits then it takes precedence
  1047. * over EC*_PMUX bits. i.e. in LX2160A if we select serdes protocol
  1048. * that configures dpmac17 as SGMII and set the EC1_PMUX as RGMII,
  1049. * then the dpmac is SGMII and not RGMII.
  1050. *
  1051. * Therefore, move the fsl_rgmii_init after fsl_serdes_init. in
  1052. * fsl_rgmii_init function of SOC, we will check if the dpmac is enabled
  1053. * or not? if it is (fsl_serdes_init has already enabled the dpmac),
  1054. * then don't enable it.
  1055. */
  1056. fsl_rgmii_init();
  1057. #endif
  1058. #ifdef CONFIG_FMAN_ENET
  1059. #ifndef CONFIG_DM_ETH
  1060. fman_enet_init();
  1061. #endif
  1062. #endif
  1063. #ifdef CONFIG_SYS_DPAA_QBMAN
  1064. setup_qbman_portals();
  1065. #endif
  1066. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1067. set_ecam_icids();
  1068. #endif
  1069. return 0;
  1070. }
  1071. int timer_init(void)
  1072. {
  1073. u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
  1074. #ifdef CONFIG_FSL_LSCH3
  1075. u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
  1076. #endif
  1077. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1078. defined(CONFIG_ARCH_LS1028A)
  1079. u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
  1080. u32 svr_dev_id;
  1081. #endif
  1082. #ifdef COUNTER_FREQUENCY_REAL
  1083. unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
  1084. /* Update with accurate clock frequency */
  1085. if (current_el() == 3)
  1086. asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
  1087. #endif
  1088. #ifdef CONFIG_FSL_LSCH3
  1089. /* Enable timebase for all clusters.
  1090. * It is safe to do so even some clusters are not enabled.
  1091. */
  1092. out_le32(cltbenr, 0xf);
  1093. #endif
  1094. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1095. defined(CONFIG_ARCH_LS1028A)
  1096. /*
  1097. * In certain Layerscape SoCs, the clock for each core's
  1098. * has an enable bit in the PMU Physical Core Time Base Enable
  1099. * Register (PCTBENR), which allows the watchdog to operate.
  1100. */
  1101. setbits_le32(pctbenr, 0xff);
  1102. /*
  1103. * For LS2080A SoC and its personalities, timer controller
  1104. * offset is different
  1105. */
  1106. svr_dev_id = get_svr();
  1107. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1108. cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
  1109. #endif
  1110. /* Enable clock for timer
  1111. * This is a global setting.
  1112. */
  1113. out_le32(cntcr, 0x1);
  1114. return 0;
  1115. }
  1116. __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
  1117. void __efi_runtime reset_cpu(ulong addr)
  1118. {
  1119. u32 val;
  1120. #ifdef CONFIG_ARCH_LX2160A
  1121. val = in_le32(rstcr);
  1122. val |= 0x01;
  1123. out_le32(rstcr, val);
  1124. #else
  1125. /* Raise RESET_REQ_B */
  1126. val = scfg_in32(rstcr);
  1127. val |= 0x02;
  1128. scfg_out32(rstcr, val);
  1129. #endif
  1130. }
  1131. #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
  1132. void __efi_runtime EFIAPI efi_reset_system(
  1133. enum efi_reset_type reset_type,
  1134. efi_status_t reset_status,
  1135. unsigned long data_size, void *reset_data)
  1136. {
  1137. switch (reset_type) {
  1138. case EFI_RESET_COLD:
  1139. case EFI_RESET_WARM:
  1140. case EFI_RESET_PLATFORM_SPECIFIC:
  1141. reset_cpu(0);
  1142. break;
  1143. case EFI_RESET_SHUTDOWN:
  1144. /* Nothing we can do */
  1145. break;
  1146. }
  1147. while (1) { }
  1148. }
  1149. efi_status_t efi_reset_system_init(void)
  1150. {
  1151. return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
  1152. }
  1153. #endif
  1154. /*
  1155. * Calculate reserved memory with given memory bank
  1156. * Return aligned memory size on success
  1157. * Return (ram_size + needed size) for failure
  1158. */
  1159. phys_size_t board_reserve_ram_top(phys_size_t ram_size)
  1160. {
  1161. phys_size_t ram_top = ram_size;
  1162. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  1163. ram_top = mc_get_dram_block_size();
  1164. if (ram_top > ram_size)
  1165. return ram_size + ram_top;
  1166. ram_top = ram_size - ram_top;
  1167. /* The start address of MC reserved memory needs to be aligned. */
  1168. ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
  1169. #endif
  1170. return ram_size - ram_top;
  1171. }
  1172. phys_size_t get_effective_memsize(void)
  1173. {
  1174. phys_size_t ea_size, rem = 0;
  1175. /*
  1176. * For ARMv8 SoCs, DDR memory is split into two or three regions. The
  1177. * first region is 2GB space at 0x8000_0000. Secure memory needs to
  1178. * allocated from first region. If the memory extends to the second
  1179. * region (or the third region if applicable), Management Complex (MC)
  1180. * memory should be put into the highest region, i.e. the end of DDR
  1181. * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
  1182. * U-Boot doesn't relocate itself into higher address. Should DDR be
  1183. * configured to skip the first region, this function needs to be
  1184. * adjusted.
  1185. */
  1186. if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
  1187. ea_size = CONFIG_MAX_MEM_MAPPED;
  1188. rem = gd->ram_size - ea_size;
  1189. } else {
  1190. ea_size = gd->ram_size;
  1191. }
  1192. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1193. /* Check if we have enough space for secure memory */
  1194. if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
  1195. ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1196. else
  1197. printf("Error: No enough space for secure memory.\n");
  1198. #endif
  1199. /* Check if we have enough memory for MC */
  1200. if (rem < board_reserve_ram_top(rem)) {
  1201. /* Not enough memory in high region to reserve */
  1202. if (ea_size > board_reserve_ram_top(ea_size))
  1203. ea_size -= board_reserve_ram_top(ea_size);
  1204. else
  1205. printf("Error: No enough space for reserved memory.\n");
  1206. }
  1207. return ea_size;
  1208. }
  1209. #ifdef CONFIG_TFABOOT
  1210. phys_size_t tfa_get_dram_size(void)
  1211. {
  1212. struct pt_regs regs;
  1213. phys_size_t dram_size = 0;
  1214. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1215. regs.regs[1] = -1;
  1216. smc_call(&regs);
  1217. if (regs.regs[0])
  1218. return 0;
  1219. dram_size = regs.regs[1];
  1220. return dram_size;
  1221. }
  1222. static int tfa_dram_init_banksize(void)
  1223. {
  1224. int i = 0, ret = 0;
  1225. struct pt_regs regs;
  1226. phys_size_t dram_size = tfa_get_dram_size();
  1227. debug("dram_size %llx\n", dram_size);
  1228. if (!dram_size)
  1229. return -EINVAL;
  1230. do {
  1231. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1232. regs.regs[1] = i;
  1233. smc_call(&regs);
  1234. if (regs.regs[0]) {
  1235. ret = -EINVAL;
  1236. break;
  1237. }
  1238. debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
  1239. regs.regs[2]);
  1240. gd->bd->bi_dram[i].start = regs.regs[1];
  1241. gd->bd->bi_dram[i].size = regs.regs[2];
  1242. dram_size -= gd->bd->bi_dram[i].size;
  1243. i++;
  1244. } while (dram_size);
  1245. if (i > 0)
  1246. ret = 0;
  1247. #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
  1248. /* Assign memory for MC */
  1249. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1250. if (gd->bd->bi_dram[2].size >=
  1251. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1252. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1253. gd->bd->bi_dram[2].size -
  1254. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1255. } else
  1256. #endif
  1257. {
  1258. if (gd->bd->bi_dram[1].size >=
  1259. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1260. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1261. gd->bd->bi_dram[1].size -
  1262. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1263. } else if (gd->bd->bi_dram[0].size >
  1264. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1265. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1266. gd->bd->bi_dram[0].size -
  1267. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1268. }
  1269. }
  1270. #endif /* CONFIG_RESV_RAM */
  1271. return ret;
  1272. }
  1273. #endif
  1274. int dram_init_banksize(void)
  1275. {
  1276. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1277. phys_size_t dp_ddr_size;
  1278. #endif
  1279. #ifdef CONFIG_TFABOOT
  1280. if (!tfa_dram_init_banksize())
  1281. return 0;
  1282. #endif
  1283. /*
  1284. * gd->ram_size has the total size of DDR memory, less reserved secure
  1285. * memory. The DDR extends from low region to high region(s) presuming
  1286. * no hole is created with DDR configuration. gd->arch.secure_ram tracks
  1287. * the location of secure memory. gd->arch.resv_ram tracks the location
  1288. * of reserved memory for Management Complex (MC). Because gd->ram_size
  1289. * is reduced by this function if secure memory is reserved, checking
  1290. * gd->arch.secure_ram should be done to avoid running it repeatedly.
  1291. */
  1292. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1293. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  1294. debug("No need to run again, skip %s\n", __func__);
  1295. return 0;
  1296. }
  1297. #endif
  1298. gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
  1299. if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
  1300. gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
  1301. gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
  1302. gd->bd->bi_dram[1].size = gd->ram_size -
  1303. CONFIG_SYS_DDR_BLOCK1_SIZE;
  1304. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1305. if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1306. gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
  1307. gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
  1308. CONFIG_SYS_DDR_BLOCK2_SIZE;
  1309. gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
  1310. }
  1311. #endif
  1312. } else {
  1313. gd->bd->bi_dram[0].size = gd->ram_size;
  1314. }
  1315. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1316. if (gd->bd->bi_dram[0].size >
  1317. CONFIG_SYS_MEM_RESERVE_SECURE) {
  1318. gd->bd->bi_dram[0].size -=
  1319. CONFIG_SYS_MEM_RESERVE_SECURE;
  1320. gd->arch.secure_ram = gd->bd->bi_dram[0].start +
  1321. gd->bd->bi_dram[0].size;
  1322. gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
  1323. gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1324. }
  1325. #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
  1326. #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
  1327. /* Assign memory for MC */
  1328. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1329. if (gd->bd->bi_dram[2].size >=
  1330. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1331. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1332. gd->bd->bi_dram[2].size -
  1333. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1334. } else
  1335. #endif
  1336. {
  1337. if (gd->bd->bi_dram[1].size >=
  1338. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1339. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1340. gd->bd->bi_dram[1].size -
  1341. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1342. } else if (gd->bd->bi_dram[0].size >
  1343. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1344. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1345. gd->bd->bi_dram[0].size -
  1346. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1347. }
  1348. }
  1349. #endif /* CONFIG_RESV_RAM */
  1350. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1351. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1352. #error "This SoC shouldn't have DP DDR"
  1353. #endif
  1354. if (soc_has_dp_ddr()) {
  1355. /* initialize DP-DDR here */
  1356. puts("DP-DDR: ");
  1357. /*
  1358. * DDR controller use 0 as the base address for binding.
  1359. * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
  1360. */
  1361. dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
  1362. CONFIG_DP_DDR_CTRL,
  1363. CONFIG_DP_DDR_NUM_CTRLS,
  1364. CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
  1365. NULL, NULL, NULL);
  1366. if (dp_ddr_size) {
  1367. gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
  1368. gd->bd->bi_dram[2].size = dp_ddr_size;
  1369. } else {
  1370. puts("Not detected");
  1371. }
  1372. }
  1373. #endif
  1374. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1375. debug("%s is called. gd->ram_size is reduced to %lu\n",
  1376. __func__, (ulong)gd->ram_size);
  1377. #endif
  1378. return 0;
  1379. }
  1380. #if CONFIG_IS_ENABLED(EFI_LOADER)
  1381. void efi_add_known_memory(void)
  1382. {
  1383. int i;
  1384. phys_addr_t ram_start;
  1385. phys_size_t ram_size;
  1386. /* Add RAM */
  1387. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  1388. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1389. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1390. #error "This SoC shouldn't have DP DDR"
  1391. #endif
  1392. if (i == 2)
  1393. continue; /* skip DP-DDR */
  1394. #endif
  1395. ram_start = gd->bd->bi_dram[i].start;
  1396. ram_size = gd->bd->bi_dram[i].size;
  1397. #ifdef CONFIG_RESV_RAM
  1398. if (gd->arch.resv_ram >= ram_start &&
  1399. gd->arch.resv_ram < ram_start + ram_size)
  1400. ram_size = gd->arch.resv_ram - ram_start;
  1401. #endif
  1402. efi_add_memory_map(ram_start, ram_size,
  1403. EFI_CONVENTIONAL_MEMORY);
  1404. }
  1405. }
  1406. #endif
  1407. /*
  1408. * Before DDR size is known, early MMU table have DDR mapped as device memory
  1409. * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
  1410. * needs to be set for these mappings.
  1411. * If a special case configures DDR with holes in the mapping, the holes need
  1412. * to be marked as invalid. This is not implemented in this function.
  1413. */
  1414. void update_early_mmu_table(void)
  1415. {
  1416. if (!gd->arch.tlb_addr)
  1417. return;
  1418. if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
  1419. mmu_change_region_attr(
  1420. CONFIG_SYS_SDRAM_BASE,
  1421. gd->ram_size,
  1422. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1423. PTE_BLOCK_OUTER_SHARE |
  1424. PTE_BLOCK_NS |
  1425. PTE_TYPE_VALID);
  1426. } else {
  1427. mmu_change_region_attr(
  1428. CONFIG_SYS_SDRAM_BASE,
  1429. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1430. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1431. PTE_BLOCK_OUTER_SHARE |
  1432. PTE_BLOCK_NS |
  1433. PTE_TYPE_VALID);
  1434. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1435. #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
  1436. #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
  1437. #endif
  1438. if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
  1439. CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1440. mmu_change_region_attr(
  1441. CONFIG_SYS_DDR_BLOCK2_BASE,
  1442. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1443. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1444. PTE_BLOCK_OUTER_SHARE |
  1445. PTE_BLOCK_NS |
  1446. PTE_TYPE_VALID);
  1447. mmu_change_region_attr(
  1448. CONFIG_SYS_DDR_BLOCK3_BASE,
  1449. gd->ram_size -
  1450. CONFIG_SYS_DDR_BLOCK1_SIZE -
  1451. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1452. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1453. PTE_BLOCK_OUTER_SHARE |
  1454. PTE_BLOCK_NS |
  1455. PTE_TYPE_VALID);
  1456. } else
  1457. #endif
  1458. {
  1459. mmu_change_region_attr(
  1460. CONFIG_SYS_DDR_BLOCK2_BASE,
  1461. gd->ram_size -
  1462. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1463. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1464. PTE_BLOCK_OUTER_SHARE |
  1465. PTE_BLOCK_NS |
  1466. PTE_TYPE_VALID);
  1467. }
  1468. }
  1469. }
  1470. __weak int dram_init(void)
  1471. {
  1472. fsl_initdram();
  1473. #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
  1474. defined(CONFIG_SPL_BUILD)
  1475. /* This will break-before-make MMU for DDR */
  1476. update_early_mmu_table();
  1477. #endif
  1478. return 0;
  1479. }
  1480. #ifdef CONFIG_ARCH_MISC_INIT
  1481. __weak int serdes_misc_init(void)
  1482. {
  1483. return 0;
  1484. }
  1485. int arch_misc_init(void)
  1486. {
  1487. serdes_misc_init();
  1488. return 0;
  1489. }
  1490. #endif