cpu.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2017-2020 NXP
  4. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  5. */
  6. #include <common.h>
  7. #include <cpu_func.h>
  8. #include <env.h>
  9. #include <fsl_ddr_sdram.h>
  10. #include <init.h>
  11. #include <hang.h>
  12. #include <log.h>
  13. #include <net.h>
  14. #include <vsprintf.h>
  15. #include <asm/cache.h>
  16. #include <asm/io.h>
  17. #include <asm/ptrace.h>
  18. #include <linux/errno.h>
  19. #include <asm/system.h>
  20. #include <fm_eth.h>
  21. #include <asm/armv8/mmu.h>
  22. #include <asm/io.h>
  23. #include <asm/arch/fsl_serdes.h>
  24. #include <asm/arch/soc.h>
  25. #include <asm/arch/cpu.h>
  26. #include <asm/arch/speed.h>
  27. #include <fsl_immap.h>
  28. #include <asm/arch/mp.h>
  29. #include <efi_loader.h>
  30. #include <fsl-mc/fsl_mc.h>
  31. #ifdef CONFIG_FSL_ESDHC
  32. #include <fsl_esdhc.h>
  33. #endif
  34. #include <asm/armv8/sec_firmware.h>
  35. #ifdef CONFIG_SYS_FSL_DDR
  36. #include <fsl_ddr.h>
  37. #endif
  38. #include <asm/arch/clock.h>
  39. #include <hwconfig.h>
  40. #include <fsl_qbman.h>
  41. #ifdef CONFIG_TFABOOT
  42. #include <env_internal.h>
  43. #ifdef CONFIG_CHAIN_OF_TRUST
  44. #include <fsl_validate.h>
  45. #endif
  46. #endif
  47. #include <linux/mii.h>
  48. DECLARE_GLOBAL_DATA_PTR;
  49. static struct cpu_type cpu_type_list[] = {
  50. CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
  51. CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
  52. CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
  53. CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
  54. CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
  55. CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
  56. CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
  57. CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
  58. CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
  59. CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
  60. CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
  61. CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
  62. CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
  63. CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
  64. CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
  65. CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
  66. CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
  67. CPU_TYPE_ENTRY(LS1017A, LS1017A, 1),
  68. CPU_TYPE_ENTRY(LS1018A, LS1018A, 1),
  69. CPU_TYPE_ENTRY(LS1027A, LS1027A, 2),
  70. CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
  71. CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
  72. CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
  73. CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
  74. CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
  75. CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
  76. CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
  77. CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
  78. };
  79. #define EARLY_PGTABLE_SIZE 0x5000
  80. static struct mm_region early_map[] = {
  81. #ifdef CONFIG_FSL_LSCH3
  82. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  83. CONFIG_SYS_FSL_CCSR_SIZE,
  84. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  85. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  86. },
  87. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  88. SYS_FSL_OCRAM_SPACE_SIZE,
  89. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  90. },
  91. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  92. CONFIG_SYS_FSL_QSPI_SIZE1,
  93. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
  94. #ifdef CONFIG_FSL_IFC
  95. /* For IFC Region #1, only the first 4MB is cache-enabled */
  96. { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
  97. CONFIG_SYS_FSL_IFC_SIZE1_1,
  98. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  99. },
  100. { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  101. CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  102. CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
  103. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  104. },
  105. { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
  106. CONFIG_SYS_FSL_IFC_SIZE1,
  107. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  108. },
  109. #endif
  110. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  111. CONFIG_SYS_FSL_DRAM_SIZE1,
  112. #if defined(CONFIG_TFABOOT) || \
  113. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  114. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  115. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  116. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  117. #endif
  118. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  119. },
  120. #ifdef CONFIG_FSL_IFC
  121. /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
  122. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  123. CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
  124. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  125. },
  126. #endif
  127. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  128. CONFIG_SYS_FSL_DCSR_SIZE,
  129. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  130. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  131. },
  132. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  133. CONFIG_SYS_FSL_DRAM_SIZE2,
  134. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  135. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  136. },
  137. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  138. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  139. CONFIG_SYS_FSL_DRAM_SIZE3,
  140. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  141. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  142. },
  143. #endif
  144. #elif defined(CONFIG_FSL_LSCH2)
  145. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  146. CONFIG_SYS_FSL_CCSR_SIZE,
  147. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  148. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  149. },
  150. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  151. SYS_FSL_OCRAM_SPACE_SIZE,
  152. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  153. },
  154. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  155. CONFIG_SYS_FSL_DCSR_SIZE,
  156. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  157. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  158. },
  159. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  160. CONFIG_SYS_FSL_QSPI_SIZE,
  161. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  162. },
  163. #ifdef CONFIG_FSL_IFC
  164. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  165. CONFIG_SYS_FSL_IFC_SIZE,
  166. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  167. },
  168. #endif
  169. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  170. CONFIG_SYS_FSL_DRAM_SIZE1,
  171. #if defined(CONFIG_TFABOOT) || \
  172. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  173. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  174. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  175. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  176. #endif
  177. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  178. },
  179. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  180. CONFIG_SYS_FSL_DRAM_SIZE2,
  181. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  182. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  183. },
  184. #endif
  185. {}, /* list terminator */
  186. };
  187. static struct mm_region final_map[] = {
  188. #ifdef CONFIG_FSL_LSCH3
  189. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  190. CONFIG_SYS_FSL_CCSR_SIZE,
  191. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  192. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  193. },
  194. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  195. SYS_FSL_OCRAM_SPACE_SIZE,
  196. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  197. },
  198. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  199. CONFIG_SYS_FSL_DRAM_SIZE1,
  200. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  201. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  202. },
  203. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  204. CONFIG_SYS_FSL_QSPI_SIZE1,
  205. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  206. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  207. },
  208. { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
  209. CONFIG_SYS_FSL_QSPI_SIZE2,
  210. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  211. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  212. },
  213. #ifdef CONFIG_FSL_IFC
  214. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  215. CONFIG_SYS_FSL_IFC_SIZE2,
  216. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  217. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  218. },
  219. #endif
  220. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  221. CONFIG_SYS_FSL_DCSR_SIZE,
  222. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  223. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  224. },
  225. { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
  226. CONFIG_SYS_FSL_MC_SIZE,
  227. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  228. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  229. },
  230. { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
  231. CONFIG_SYS_FSL_NI_SIZE,
  232. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  233. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  234. },
  235. /* For QBMAN portal, only the first 64MB is cache-enabled */
  236. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  237. CONFIG_SYS_FSL_QBMAN_SIZE_1,
  238. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  239. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
  240. },
  241. { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  242. CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  243. CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
  244. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  245. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  246. },
  247. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  248. CONFIG_SYS_PCIE1_PHYS_SIZE,
  249. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  250. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  251. },
  252. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  253. CONFIG_SYS_PCIE2_PHYS_SIZE,
  254. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  255. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  256. },
  257. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  258. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  259. CONFIG_SYS_PCIE3_PHYS_SIZE,
  260. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  261. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  262. },
  263. #endif
  264. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  265. { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
  266. CONFIG_SYS_PCIE4_PHYS_SIZE,
  267. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  268. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  269. },
  270. #endif
  271. #ifdef SYS_PCIE5_PHYS_ADDR
  272. { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
  273. SYS_PCIE5_PHYS_SIZE,
  274. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  275. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  276. },
  277. #endif
  278. #ifdef SYS_PCIE6_PHYS_ADDR
  279. { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
  280. SYS_PCIE6_PHYS_SIZE,
  281. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  282. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  283. },
  284. #endif
  285. { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
  286. CONFIG_SYS_FSL_WRIOP1_SIZE,
  287. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  288. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  289. },
  290. { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
  291. CONFIG_SYS_FSL_AIOP1_SIZE,
  292. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  293. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  294. },
  295. { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
  296. CONFIG_SYS_FSL_PEBUF_SIZE,
  297. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  298. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  299. },
  300. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  301. CONFIG_SYS_FSL_DRAM_SIZE2,
  302. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  303. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  304. },
  305. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  306. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  307. CONFIG_SYS_FSL_DRAM_SIZE3,
  308. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  309. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  310. },
  311. #endif
  312. #elif defined(CONFIG_FSL_LSCH2)
  313. { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
  314. CONFIG_SYS_FSL_BOOTROM_SIZE,
  315. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  316. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  317. },
  318. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  319. CONFIG_SYS_FSL_CCSR_SIZE,
  320. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  321. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  322. },
  323. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  324. SYS_FSL_OCRAM_SPACE_SIZE,
  325. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  326. },
  327. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  328. CONFIG_SYS_FSL_DCSR_SIZE,
  329. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  330. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  331. },
  332. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  333. CONFIG_SYS_FSL_QSPI_SIZE,
  334. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  335. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  336. },
  337. #ifdef CONFIG_FSL_IFC
  338. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  339. CONFIG_SYS_FSL_IFC_SIZE,
  340. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  341. },
  342. #endif
  343. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  344. CONFIG_SYS_FSL_DRAM_SIZE1,
  345. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  346. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  347. },
  348. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  349. CONFIG_SYS_FSL_QBMAN_SIZE,
  350. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  351. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  352. },
  353. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  354. CONFIG_SYS_FSL_DRAM_SIZE2,
  355. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  356. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  357. },
  358. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  359. CONFIG_SYS_PCIE1_PHYS_SIZE,
  360. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  361. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  362. },
  363. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  364. CONFIG_SYS_PCIE2_PHYS_SIZE,
  365. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  366. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  367. },
  368. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  369. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  370. CONFIG_SYS_PCIE3_PHYS_SIZE,
  371. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  372. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  373. },
  374. #endif
  375. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  376. CONFIG_SYS_FSL_DRAM_SIZE3,
  377. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  378. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  379. },
  380. #endif
  381. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  382. {}, /* space holder for secure mem */
  383. #endif
  384. {},
  385. };
  386. struct mm_region *mem_map = early_map;
  387. void cpu_name(char *name)
  388. {
  389. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  390. unsigned int i, svr, ver;
  391. svr = gur_in32(&gur->svr);
  392. ver = SVR_SOC_VER(svr);
  393. for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
  394. if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
  395. strcpy(name, cpu_type_list[i].name);
  396. #ifdef CONFIG_ARCH_LX2160A
  397. if (IS_C_PROCESSOR(svr))
  398. strcat(name, "C");
  399. #endif
  400. if (IS_E_PROCESSOR(svr))
  401. strcat(name, "E");
  402. sprintf(name + strlen(name), " Rev%d.%d",
  403. SVR_MAJ(svr), SVR_MIN(svr));
  404. break;
  405. }
  406. if (i == ARRAY_SIZE(cpu_type_list))
  407. strcpy(name, "unknown");
  408. }
  409. #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  410. /*
  411. * To start MMU before DDR is available, we create MMU table in SRAM.
  412. * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
  413. * levels of translation tables here to cover 40-bit address space.
  414. * We use 4KB granule size, with 40 bits physical address, T0SZ=24
  415. * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
  416. * Note, the debug print in cache_v8.c is not usable for debugging
  417. * these early MMU tables because UART is not yet available.
  418. */
  419. static inline void early_mmu_setup(void)
  420. {
  421. unsigned int el = current_el();
  422. /* global data is already setup, no allocation yet */
  423. if (el == 3)
  424. gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
  425. else
  426. gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
  427. gd->arch.tlb_fillptr = gd->arch.tlb_addr;
  428. gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
  429. /* Create early page tables */
  430. setup_pgtables();
  431. /* point TTBR to the new table */
  432. set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
  433. get_tcr(el, NULL, NULL) &
  434. ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
  435. MEMORY_ATTRIBUTES);
  436. set_sctlr(get_sctlr() | CR_M);
  437. }
  438. static void fix_pcie_mmu_map(void)
  439. {
  440. #ifdef CONFIG_ARCH_LS2080A
  441. unsigned int i;
  442. u32 svr, ver;
  443. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  444. svr = gur_in32(&gur->svr);
  445. ver = SVR_SOC_VER(svr);
  446. /* Fix PCIE base and size for LS2088A */
  447. if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
  448. (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
  449. (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
  450. for (i = 0; i < ARRAY_SIZE(final_map); i++) {
  451. switch (final_map[i].phys) {
  452. case CONFIG_SYS_PCIE1_PHYS_ADDR:
  453. final_map[i].phys = 0x2000000000ULL;
  454. final_map[i].virt = 0x2000000000ULL;
  455. final_map[i].size = 0x800000000ULL;
  456. break;
  457. case CONFIG_SYS_PCIE2_PHYS_ADDR:
  458. final_map[i].phys = 0x2800000000ULL;
  459. final_map[i].virt = 0x2800000000ULL;
  460. final_map[i].size = 0x800000000ULL;
  461. break;
  462. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  463. case CONFIG_SYS_PCIE3_PHYS_ADDR:
  464. final_map[i].phys = 0x3000000000ULL;
  465. final_map[i].virt = 0x3000000000ULL;
  466. final_map[i].size = 0x800000000ULL;
  467. break;
  468. #endif
  469. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  470. case CONFIG_SYS_PCIE4_PHYS_ADDR:
  471. final_map[i].phys = 0x3800000000ULL;
  472. final_map[i].virt = 0x3800000000ULL;
  473. final_map[i].size = 0x800000000ULL;
  474. break;
  475. #endif
  476. default:
  477. break;
  478. }
  479. }
  480. }
  481. #endif
  482. }
  483. /*
  484. * The final tables look similar to early tables, but different in detail.
  485. * These tables are in DRAM. Sub tables are added to enable cache for
  486. * QBMan and OCRAM.
  487. *
  488. * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
  489. * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
  490. */
  491. static inline void final_mmu_setup(void)
  492. {
  493. u64 tlb_addr_save = gd->arch.tlb_addr;
  494. unsigned int el = current_el();
  495. int index;
  496. /* fix the final_map before filling in the block entries */
  497. fix_pcie_mmu_map();
  498. mem_map = final_map;
  499. /* Update mapping for DDR to actual size */
  500. for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
  501. /*
  502. * Find the entry for DDR mapping and update the address and
  503. * size. Zero-sized mapping will be skipped when creating MMU
  504. * table.
  505. */
  506. switch (final_map[index].virt) {
  507. case CONFIG_SYS_FSL_DRAM_BASE1:
  508. final_map[index].virt = gd->bd->bi_dram[0].start;
  509. final_map[index].phys = gd->bd->bi_dram[0].start;
  510. final_map[index].size = gd->bd->bi_dram[0].size;
  511. break;
  512. #ifdef CONFIG_SYS_FSL_DRAM_BASE2
  513. case CONFIG_SYS_FSL_DRAM_BASE2:
  514. #if (CONFIG_NR_DRAM_BANKS >= 2)
  515. final_map[index].virt = gd->bd->bi_dram[1].start;
  516. final_map[index].phys = gd->bd->bi_dram[1].start;
  517. final_map[index].size = gd->bd->bi_dram[1].size;
  518. #else
  519. final_map[index].size = 0;
  520. #endif
  521. break;
  522. #endif
  523. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  524. case CONFIG_SYS_FSL_DRAM_BASE3:
  525. #if (CONFIG_NR_DRAM_BANKS >= 3)
  526. final_map[index].virt = gd->bd->bi_dram[2].start;
  527. final_map[index].phys = gd->bd->bi_dram[2].start;
  528. final_map[index].size = gd->bd->bi_dram[2].size;
  529. #else
  530. final_map[index].size = 0;
  531. #endif
  532. break;
  533. #endif
  534. default:
  535. break;
  536. }
  537. }
  538. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  539. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  540. if (el == 3) {
  541. /*
  542. * Only use gd->arch.secure_ram if the address is
  543. * recalculated. Align to 4KB for MMU table.
  544. */
  545. /* put page tables in secure ram */
  546. index = ARRAY_SIZE(final_map) - 2;
  547. gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
  548. final_map[index].virt = gd->arch.secure_ram & ~0x3;
  549. final_map[index].phys = final_map[index].virt;
  550. final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
  551. final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
  552. gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
  553. tlb_addr_save = gd->arch.tlb_addr;
  554. } else {
  555. /* Use allocated (board_f.c) memory for TLB */
  556. tlb_addr_save = gd->arch.tlb_allocated;
  557. gd->arch.tlb_addr = tlb_addr_save;
  558. }
  559. }
  560. #endif
  561. /* Reset the fill ptr */
  562. gd->arch.tlb_fillptr = tlb_addr_save;
  563. /* Create normal system page tables */
  564. setup_pgtables();
  565. /* Create emergency page tables */
  566. gd->arch.tlb_addr = gd->arch.tlb_fillptr;
  567. gd->arch.tlb_emerg = gd->arch.tlb_addr;
  568. setup_pgtables();
  569. gd->arch.tlb_addr = tlb_addr_save;
  570. /* Disable cache and MMU */
  571. dcache_disable(); /* TLBs are invalidated */
  572. invalidate_icache_all();
  573. /* point TTBR to the new table */
  574. set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
  575. MEMORY_ATTRIBUTES);
  576. set_sctlr(get_sctlr() | CR_M);
  577. }
  578. u64 get_page_table_size(void)
  579. {
  580. return 0x10000;
  581. }
  582. int arch_cpu_init(void)
  583. {
  584. /*
  585. * This function is called before U-Boot relocates itself to speed up
  586. * on system running. It is not necessary to run if performance is not
  587. * critical. Skip if MMU is already enabled by SPL or other means.
  588. */
  589. if (get_sctlr() & CR_M)
  590. return 0;
  591. icache_enable();
  592. __asm_invalidate_dcache_all();
  593. __asm_invalidate_tlb_all();
  594. early_mmu_setup();
  595. set_sctlr(get_sctlr() | CR_C);
  596. return 0;
  597. }
  598. void mmu_setup(void)
  599. {
  600. final_mmu_setup();
  601. }
  602. /*
  603. * This function is called from common/board_r.c.
  604. * It recreates MMU table in main memory.
  605. */
  606. void enable_caches(void)
  607. {
  608. mmu_setup();
  609. __asm_invalidate_tlb_all();
  610. icache_enable();
  611. dcache_enable();
  612. }
  613. #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
  614. #ifdef CONFIG_TFABOOT
  615. enum boot_src __get_boot_src(u32 porsr1)
  616. {
  617. enum boot_src src = BOOT_SOURCE_RESERVED;
  618. u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
  619. #if !defined(CONFIG_NXP_LSCH3_2)
  620. u32 val;
  621. #endif
  622. debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
  623. #if defined(CONFIG_FSL_LSCH3)
  624. #if defined(CONFIG_NXP_LSCH3_2)
  625. switch (rcw_src) {
  626. case RCW_SRC_SDHC1_VAL:
  627. src = BOOT_SOURCE_SD_MMC;
  628. break;
  629. case RCW_SRC_SDHC2_VAL:
  630. src = BOOT_SOURCE_SD_MMC2;
  631. break;
  632. case RCW_SRC_I2C1_VAL:
  633. src = BOOT_SOURCE_I2C1_EXTENDED;
  634. break;
  635. case RCW_SRC_FLEXSPI_NAND2K_VAL:
  636. src = BOOT_SOURCE_XSPI_NAND;
  637. break;
  638. case RCW_SRC_FLEXSPI_NAND4K_VAL:
  639. src = BOOT_SOURCE_XSPI_NAND;
  640. break;
  641. case RCW_SRC_RESERVED_1_VAL:
  642. src = BOOT_SOURCE_RESERVED;
  643. break;
  644. case RCW_SRC_FLEXSPI_NOR_24B:
  645. src = BOOT_SOURCE_XSPI_NOR;
  646. break;
  647. default:
  648. src = BOOT_SOURCE_RESERVED;
  649. }
  650. #else
  651. val = rcw_src & RCW_SRC_TYPE_MASK;
  652. if (val == RCW_SRC_NOR_VAL) {
  653. val = rcw_src & NOR_TYPE_MASK;
  654. switch (val) {
  655. case NOR_16B_VAL:
  656. case NOR_32B_VAL:
  657. src = BOOT_SOURCE_IFC_NOR;
  658. break;
  659. default:
  660. src = BOOT_SOURCE_RESERVED;
  661. }
  662. } else {
  663. /* RCW SRC Serial Flash */
  664. val = rcw_src & RCW_SRC_SERIAL_MASK;
  665. switch (val) {
  666. case RCW_SRC_QSPI_VAL:
  667. /* RCW SRC Serial NOR (QSPI) */
  668. src = BOOT_SOURCE_QSPI_NOR;
  669. break;
  670. case RCW_SRC_SD_CARD_VAL:
  671. /* RCW SRC SD Card */
  672. src = BOOT_SOURCE_SD_MMC;
  673. break;
  674. case RCW_SRC_EMMC_VAL:
  675. /* RCW SRC EMMC */
  676. src = BOOT_SOURCE_SD_MMC;
  677. break;
  678. case RCW_SRC_I2C1_VAL:
  679. /* RCW SRC I2C1 Extended */
  680. src = BOOT_SOURCE_I2C1_EXTENDED;
  681. break;
  682. default:
  683. src = BOOT_SOURCE_RESERVED;
  684. }
  685. }
  686. #endif
  687. #elif defined(CONFIG_FSL_LSCH2)
  688. /* RCW SRC NAND */
  689. val = rcw_src & RCW_SRC_NAND_MASK;
  690. if (val == RCW_SRC_NAND_VAL) {
  691. val = rcw_src & NAND_RESERVED_MASK;
  692. if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
  693. src = BOOT_SOURCE_IFC_NAND;
  694. } else {
  695. /* RCW SRC NOR */
  696. val = rcw_src & RCW_SRC_NOR_MASK;
  697. if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
  698. src = BOOT_SOURCE_IFC_NOR;
  699. } else {
  700. switch (rcw_src) {
  701. case QSPI_VAL1:
  702. case QSPI_VAL2:
  703. src = BOOT_SOURCE_QSPI_NOR;
  704. break;
  705. case SD_VAL:
  706. src = BOOT_SOURCE_SD_MMC;
  707. break;
  708. default:
  709. src = BOOT_SOURCE_RESERVED;
  710. }
  711. }
  712. }
  713. #endif
  714. if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
  715. src = BOOT_SOURCE_QSPI_NOR;
  716. debug("%s: src 0x%x\n", __func__, src);
  717. return src;
  718. }
  719. enum boot_src get_boot_src(void)
  720. {
  721. struct pt_regs regs;
  722. u32 porsr1 = 0;
  723. #if defined(CONFIG_FSL_LSCH3)
  724. u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
  725. #elif defined(CONFIG_FSL_LSCH2)
  726. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  727. #endif
  728. if (current_el() == 2) {
  729. regs.regs[0] = SIP_SVC_RCW;
  730. smc_call(&regs);
  731. if (!regs.regs[0])
  732. porsr1 = regs.regs[1];
  733. }
  734. if (current_el() == 3 || !porsr1) {
  735. #ifdef CONFIG_FSL_LSCH3
  736. porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
  737. #elif defined(CONFIG_FSL_LSCH2)
  738. porsr1 = in_be32(&gur->porsr1);
  739. #endif
  740. }
  741. debug("%s: porsr1 0x%x\n", __func__, porsr1);
  742. return __get_boot_src(porsr1);
  743. }
  744. #ifdef CONFIG_ENV_IS_IN_MMC
  745. int mmc_get_env_dev(void)
  746. {
  747. enum boot_src src = get_boot_src();
  748. int dev = CONFIG_SYS_MMC_ENV_DEV;
  749. switch (src) {
  750. case BOOT_SOURCE_SD_MMC:
  751. dev = 0;
  752. break;
  753. case BOOT_SOURCE_SD_MMC2:
  754. dev = 1;
  755. break;
  756. default:
  757. break;
  758. }
  759. return dev;
  760. }
  761. #endif
  762. enum env_location env_get_location(enum env_operation op, int prio)
  763. {
  764. enum boot_src src = get_boot_src();
  765. enum env_location env_loc = ENVL_NOWHERE;
  766. if (prio)
  767. return ENVL_UNKNOWN;
  768. #ifdef CONFIG_ENV_IS_NOWHERE
  769. return env_loc;
  770. #endif
  771. switch (src) {
  772. case BOOT_SOURCE_IFC_NOR:
  773. env_loc = ENVL_FLASH;
  774. break;
  775. case BOOT_SOURCE_QSPI_NOR:
  776. /* FALLTHROUGH */
  777. case BOOT_SOURCE_XSPI_NOR:
  778. env_loc = ENVL_SPI_FLASH;
  779. break;
  780. case BOOT_SOURCE_IFC_NAND:
  781. /* FALLTHROUGH */
  782. case BOOT_SOURCE_QSPI_NAND:
  783. /* FALLTHROUGH */
  784. case BOOT_SOURCE_XSPI_NAND:
  785. env_loc = ENVL_NAND;
  786. break;
  787. case BOOT_SOURCE_SD_MMC:
  788. /* FALLTHROUGH */
  789. case BOOT_SOURCE_SD_MMC2:
  790. env_loc = ENVL_MMC;
  791. break;
  792. case BOOT_SOURCE_I2C1_EXTENDED:
  793. /* FALLTHROUGH */
  794. default:
  795. break;
  796. }
  797. return env_loc;
  798. }
  799. #endif /* CONFIG_TFABOOT */
  800. u32 initiator_type(u32 cluster, int init_id)
  801. {
  802. struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  803. u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
  804. u32 type = 0;
  805. type = gur_in32(&gur->tp_ityp[idx]);
  806. if (type & TP_ITYP_AV)
  807. return type;
  808. return 0;
  809. }
  810. u32 cpu_pos_mask(void)
  811. {
  812. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  813. int i = 0;
  814. u32 cluster, type, mask = 0;
  815. do {
  816. int j;
  817. cluster = gur_in32(&gur->tp_cluster[i].lower);
  818. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  819. type = initiator_type(cluster, j);
  820. if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
  821. mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
  822. }
  823. i++;
  824. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  825. return mask;
  826. }
  827. u32 cpu_mask(void)
  828. {
  829. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  830. int i = 0, count = 0;
  831. u32 cluster, type, mask = 0;
  832. do {
  833. int j;
  834. cluster = gur_in32(&gur->tp_cluster[i].lower);
  835. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  836. type = initiator_type(cluster, j);
  837. if (type) {
  838. if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
  839. mask |= 1 << count;
  840. count++;
  841. }
  842. }
  843. i++;
  844. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  845. return mask;
  846. }
  847. /*
  848. * Return the number of cores on this SOC.
  849. */
  850. int cpu_numcores(void)
  851. {
  852. return hweight32(cpu_mask());
  853. }
  854. int fsl_qoriq_core_to_cluster(unsigned int core)
  855. {
  856. struct ccsr_gur __iomem *gur =
  857. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  858. int i = 0, count = 0;
  859. u32 cluster;
  860. do {
  861. int j;
  862. cluster = gur_in32(&gur->tp_cluster[i].lower);
  863. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  864. if (initiator_type(cluster, j)) {
  865. if (count == core)
  866. return i;
  867. count++;
  868. }
  869. }
  870. i++;
  871. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  872. return -1; /* cannot identify the cluster */
  873. }
  874. u32 fsl_qoriq_core_to_type(unsigned int core)
  875. {
  876. struct ccsr_gur __iomem *gur =
  877. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  878. int i = 0, count = 0;
  879. u32 cluster, type;
  880. do {
  881. int j;
  882. cluster = gur_in32(&gur->tp_cluster[i].lower);
  883. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  884. type = initiator_type(cluster, j);
  885. if (type) {
  886. if (count == core)
  887. return type;
  888. count++;
  889. }
  890. }
  891. i++;
  892. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  893. return -1; /* cannot identify the cluster */
  894. }
  895. #ifndef CONFIG_FSL_LSCH3
  896. uint get_svr(void)
  897. {
  898. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  899. return gur_in32(&gur->svr);
  900. }
  901. #endif
  902. #ifdef CONFIG_DISPLAY_CPUINFO
  903. int print_cpuinfo(void)
  904. {
  905. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  906. struct sys_info sysinfo;
  907. char buf[32];
  908. unsigned int i, core;
  909. u32 type, rcw, svr = gur_in32(&gur->svr);
  910. puts("SoC: ");
  911. cpu_name(buf);
  912. printf(" %s (0x%x)\n", buf, svr);
  913. memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
  914. get_sys_info(&sysinfo);
  915. puts("Clock Configuration:");
  916. for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
  917. if (!(i % 3))
  918. puts("\n ");
  919. type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
  920. printf("CPU%d(%s):%-4s MHz ", core,
  921. type == TY_ITYP_VER_A7 ? "A7 " :
  922. (type == TY_ITYP_VER_A53 ? "A53" :
  923. (type == TY_ITYP_VER_A57 ? "A57" :
  924. (type == TY_ITYP_VER_A72 ? "A72" : " "))),
  925. strmhz(buf, sysinfo.freq_processor[core]));
  926. }
  927. /* Display platform clock as Bus frequency. */
  928. printf("\n Bus: %-4s MHz ",
  929. strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
  930. printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
  931. #ifdef CONFIG_SYS_DPAA_FMAN
  932. printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
  933. #endif
  934. #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
  935. if (soc_has_dp_ddr()) {
  936. printf(" DP-DDR: %-4s MT/s",
  937. strmhz(buf, sysinfo.freq_ddrbus2));
  938. }
  939. #endif
  940. puts("\n");
  941. /*
  942. * Display the RCW, so that no one gets confused as to what RCW
  943. * we're actually using for this boot.
  944. */
  945. puts("Reset Configuration Word (RCW):");
  946. for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
  947. rcw = gur_in32(&gur->rcwsr[i]);
  948. if ((i % 4) == 0)
  949. printf("\n %08x:", i * 4);
  950. printf(" %08x", rcw);
  951. }
  952. puts("\n");
  953. return 0;
  954. }
  955. #endif
  956. #ifdef CONFIG_FSL_ESDHC
  957. int cpu_mmc_init(struct bd_info *bis)
  958. {
  959. return fsl_esdhc_mmc_init(bis);
  960. }
  961. #endif
  962. int cpu_eth_init(struct bd_info *bis)
  963. {
  964. int error = 0;
  965. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  966. error = fsl_mc_ldpaa_init(bis);
  967. #endif
  968. #ifdef CONFIG_FMAN_ENET
  969. fm_standard_init(bis);
  970. #endif
  971. return error;
  972. }
  973. static inline int check_psci(void)
  974. {
  975. unsigned int psci_ver;
  976. psci_ver = sec_firmware_support_psci_version();
  977. if (psci_ver == PSCI_INVALID_VER)
  978. return 1;
  979. return 0;
  980. }
  981. static void config_core_prefetch(void)
  982. {
  983. char *buf = NULL;
  984. char buffer[HWCONFIG_BUFFER_SIZE];
  985. const char *prefetch_arg = NULL;
  986. size_t arglen;
  987. unsigned int mask;
  988. struct pt_regs regs;
  989. if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
  990. buf = buffer;
  991. else
  992. return;
  993. prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
  994. &arglen, buf);
  995. if (prefetch_arg) {
  996. mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
  997. if (mask & 0x1) {
  998. printf("Core0 prefetch can't be disabled\n");
  999. return;
  1000. }
  1001. #define SIP_PREFETCH_DISABLE_64 0xC200FF13
  1002. regs.regs[0] = SIP_PREFETCH_DISABLE_64;
  1003. regs.regs[1] = mask;
  1004. smc_call(&regs);
  1005. if (regs.regs[0])
  1006. printf("Prefetch disable config failed for mask ");
  1007. else
  1008. printf("Prefetch disable config passed for mask ");
  1009. printf("0x%x\n", mask);
  1010. }
  1011. }
  1012. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1013. __weak void set_ecam_icids(void)
  1014. {
  1015. }
  1016. #endif
  1017. int arch_early_init_r(void)
  1018. {
  1019. #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
  1020. u32 svr_dev_id;
  1021. /*
  1022. * erratum A009635 is valid only for LS2080A SoC and
  1023. * its personalitiesi
  1024. */
  1025. svr_dev_id = get_svr();
  1026. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1027. erratum_a009635();
  1028. #endif
  1029. #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
  1030. erratum_a009942_check_cpo();
  1031. #endif
  1032. if (check_psci()) {
  1033. debug("PSCI: PSCI does not exist.\n");
  1034. /* if PSCI does not exist, boot secondary cores here */
  1035. if (fsl_layerscape_wake_seconday_cores())
  1036. printf("Did not wake secondary cores\n");
  1037. }
  1038. config_core_prefetch();
  1039. #ifdef CONFIG_SYS_HAS_SERDES
  1040. fsl_serdes_init();
  1041. #endif
  1042. #ifdef CONFIG_SYS_FSL_HAS_RGMII
  1043. /* some dpmacs in armv8a based freescale layerscape SOCs can be
  1044. * configured via both serdes(sgmii, xfi, xlaui etc) bits and via
  1045. * EC*_PMUX(rgmii) bits in RCW.
  1046. * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
  1047. * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
  1048. * Now if a dpmac is enabled by serdes bits then it takes precedence
  1049. * over EC*_PMUX bits. i.e. in LX2160A if we select serdes protocol
  1050. * that configures dpmac17 as SGMII and set the EC1_PMUX as RGMII,
  1051. * then the dpmac is SGMII and not RGMII.
  1052. *
  1053. * Therefore, move the fsl_rgmii_init after fsl_serdes_init. in
  1054. * fsl_rgmii_init function of SOC, we will check if the dpmac is enabled
  1055. * or not? if it is (fsl_serdes_init has already enabled the dpmac),
  1056. * then don't enable it.
  1057. */
  1058. fsl_rgmii_init();
  1059. #endif
  1060. #ifdef CONFIG_FMAN_ENET
  1061. #ifndef CONFIG_DM_ETH
  1062. fman_enet_init();
  1063. #endif
  1064. #endif
  1065. #ifdef CONFIG_SYS_DPAA_QBMAN
  1066. setup_qbman_portals();
  1067. #endif
  1068. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1069. set_ecam_icids();
  1070. #endif
  1071. return 0;
  1072. }
  1073. int timer_init(void)
  1074. {
  1075. u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
  1076. #ifdef CONFIG_FSL_LSCH3
  1077. u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
  1078. #endif
  1079. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1080. defined(CONFIG_ARCH_LS1028A)
  1081. u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
  1082. u32 svr_dev_id;
  1083. #endif
  1084. #ifdef COUNTER_FREQUENCY_REAL
  1085. unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
  1086. /* Update with accurate clock frequency */
  1087. if (current_el() == 3)
  1088. asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
  1089. #endif
  1090. #ifdef CONFIG_FSL_LSCH3
  1091. /* Enable timebase for all clusters.
  1092. * It is safe to do so even some clusters are not enabled.
  1093. */
  1094. out_le32(cltbenr, 0xf);
  1095. #endif
  1096. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1097. defined(CONFIG_ARCH_LS1028A)
  1098. /*
  1099. * In certain Layerscape SoCs, the clock for each core's
  1100. * has an enable bit in the PMU Physical Core Time Base Enable
  1101. * Register (PCTBENR), which allows the watchdog to operate.
  1102. */
  1103. setbits_le32(pctbenr, 0xff);
  1104. /*
  1105. * For LS2080A SoC and its personalities, timer controller
  1106. * offset is different
  1107. */
  1108. svr_dev_id = get_svr();
  1109. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1110. cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
  1111. #endif
  1112. /* Enable clock for timer
  1113. * This is a global setting.
  1114. */
  1115. out_le32(cntcr, 0x1);
  1116. return 0;
  1117. }
  1118. __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
  1119. void __efi_runtime reset_cpu(ulong addr)
  1120. {
  1121. #ifdef CONFIG_ARCH_LX2160A
  1122. /* clear the RST_REQ_MSK and SW_RST_REQ */
  1123. out_le32(rstcr, 0x0);
  1124. /* initiate the sw reset request */
  1125. out_le32(rstcr, 0x1);
  1126. #else
  1127. u32 val;
  1128. /* Raise RESET_REQ_B */
  1129. val = scfg_in32(rstcr);
  1130. val |= 0x02;
  1131. scfg_out32(rstcr, val);
  1132. #endif
  1133. }
  1134. #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
  1135. void __efi_runtime EFIAPI efi_reset_system(
  1136. enum efi_reset_type reset_type,
  1137. efi_status_t reset_status,
  1138. unsigned long data_size, void *reset_data)
  1139. {
  1140. switch (reset_type) {
  1141. case EFI_RESET_COLD:
  1142. case EFI_RESET_WARM:
  1143. case EFI_RESET_PLATFORM_SPECIFIC:
  1144. reset_cpu(0);
  1145. break;
  1146. case EFI_RESET_SHUTDOWN:
  1147. /* Nothing we can do */
  1148. break;
  1149. }
  1150. while (1) { }
  1151. }
  1152. efi_status_t efi_reset_system_init(void)
  1153. {
  1154. return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
  1155. }
  1156. #endif
  1157. /*
  1158. * Calculate reserved memory with given memory bank
  1159. * Return aligned memory size on success
  1160. * Return (ram_size + needed size) for failure
  1161. */
  1162. phys_size_t board_reserve_ram_top(phys_size_t ram_size)
  1163. {
  1164. phys_size_t ram_top = ram_size;
  1165. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  1166. ram_top = mc_get_dram_block_size();
  1167. if (ram_top > ram_size)
  1168. return ram_size + ram_top;
  1169. ram_top = ram_size - ram_top;
  1170. /* The start address of MC reserved memory needs to be aligned. */
  1171. ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
  1172. #endif
  1173. return ram_size - ram_top;
  1174. }
  1175. phys_size_t get_effective_memsize(void)
  1176. {
  1177. phys_size_t ea_size, rem = 0;
  1178. /*
  1179. * For ARMv8 SoCs, DDR memory is split into two or three regions. The
  1180. * first region is 2GB space at 0x8000_0000. Secure memory needs to
  1181. * allocated from first region. If the memory extends to the second
  1182. * region (or the third region if applicable), Management Complex (MC)
  1183. * memory should be put into the highest region, i.e. the end of DDR
  1184. * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
  1185. * U-Boot doesn't relocate itself into higher address. Should DDR be
  1186. * configured to skip the first region, this function needs to be
  1187. * adjusted.
  1188. */
  1189. if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
  1190. ea_size = CONFIG_MAX_MEM_MAPPED;
  1191. rem = gd->ram_size - ea_size;
  1192. } else {
  1193. ea_size = gd->ram_size;
  1194. }
  1195. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1196. /* Check if we have enough space for secure memory */
  1197. if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
  1198. ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1199. else
  1200. printf("Error: No enough space for secure memory.\n");
  1201. #endif
  1202. /* Check if we have enough memory for MC */
  1203. if (rem < board_reserve_ram_top(rem)) {
  1204. /* Not enough memory in high region to reserve */
  1205. if (ea_size > board_reserve_ram_top(ea_size))
  1206. ea_size -= board_reserve_ram_top(ea_size);
  1207. else
  1208. printf("Error: No enough space for reserved memory.\n");
  1209. }
  1210. return ea_size;
  1211. }
  1212. #ifdef CONFIG_TFABOOT
  1213. phys_size_t tfa_get_dram_size(void)
  1214. {
  1215. struct pt_regs regs;
  1216. phys_size_t dram_size = 0;
  1217. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1218. regs.regs[1] = -1;
  1219. smc_call(&regs);
  1220. if (regs.regs[0])
  1221. return 0;
  1222. dram_size = regs.regs[1];
  1223. return dram_size;
  1224. }
  1225. static int tfa_dram_init_banksize(void)
  1226. {
  1227. int i = 0, ret = 0;
  1228. struct pt_regs regs;
  1229. phys_size_t dram_size = tfa_get_dram_size();
  1230. debug("dram_size %llx\n", dram_size);
  1231. if (!dram_size)
  1232. return -EINVAL;
  1233. do {
  1234. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1235. regs.regs[1] = i;
  1236. smc_call(&regs);
  1237. if (regs.regs[0]) {
  1238. ret = -EINVAL;
  1239. break;
  1240. }
  1241. debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
  1242. regs.regs[2]);
  1243. gd->bd->bi_dram[i].start = regs.regs[1];
  1244. gd->bd->bi_dram[i].size = regs.regs[2];
  1245. dram_size -= gd->bd->bi_dram[i].size;
  1246. i++;
  1247. } while (dram_size);
  1248. if (i > 0)
  1249. ret = 0;
  1250. #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
  1251. /* Assign memory for MC */
  1252. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1253. if (gd->bd->bi_dram[2].size >=
  1254. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1255. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1256. gd->bd->bi_dram[2].size -
  1257. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1258. } else
  1259. #endif
  1260. {
  1261. if (gd->bd->bi_dram[1].size >=
  1262. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1263. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1264. gd->bd->bi_dram[1].size -
  1265. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1266. } else if (gd->bd->bi_dram[0].size >
  1267. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1268. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1269. gd->bd->bi_dram[0].size -
  1270. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1271. }
  1272. }
  1273. #endif /* CONFIG_RESV_RAM */
  1274. return ret;
  1275. }
  1276. #endif
  1277. int dram_init_banksize(void)
  1278. {
  1279. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1280. phys_size_t dp_ddr_size;
  1281. #endif
  1282. #ifdef CONFIG_TFABOOT
  1283. if (!tfa_dram_init_banksize())
  1284. return 0;
  1285. #endif
  1286. /*
  1287. * gd->ram_size has the total size of DDR memory, less reserved secure
  1288. * memory. The DDR extends from low region to high region(s) presuming
  1289. * no hole is created with DDR configuration. gd->arch.secure_ram tracks
  1290. * the location of secure memory. gd->arch.resv_ram tracks the location
  1291. * of reserved memory for Management Complex (MC). Because gd->ram_size
  1292. * is reduced by this function if secure memory is reserved, checking
  1293. * gd->arch.secure_ram should be done to avoid running it repeatedly.
  1294. */
  1295. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1296. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  1297. debug("No need to run again, skip %s\n", __func__);
  1298. return 0;
  1299. }
  1300. #endif
  1301. gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
  1302. if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
  1303. gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
  1304. gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
  1305. gd->bd->bi_dram[1].size = gd->ram_size -
  1306. CONFIG_SYS_DDR_BLOCK1_SIZE;
  1307. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1308. if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1309. gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
  1310. gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
  1311. CONFIG_SYS_DDR_BLOCK2_SIZE;
  1312. gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
  1313. }
  1314. #endif
  1315. } else {
  1316. gd->bd->bi_dram[0].size = gd->ram_size;
  1317. }
  1318. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1319. if (gd->bd->bi_dram[0].size >
  1320. CONFIG_SYS_MEM_RESERVE_SECURE) {
  1321. gd->bd->bi_dram[0].size -=
  1322. CONFIG_SYS_MEM_RESERVE_SECURE;
  1323. gd->arch.secure_ram = gd->bd->bi_dram[0].start +
  1324. gd->bd->bi_dram[0].size;
  1325. gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
  1326. gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1327. }
  1328. #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
  1329. #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
  1330. /* Assign memory for MC */
  1331. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1332. if (gd->bd->bi_dram[2].size >=
  1333. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1334. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1335. gd->bd->bi_dram[2].size -
  1336. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1337. } else
  1338. #endif
  1339. {
  1340. if (gd->bd->bi_dram[1].size >=
  1341. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1342. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1343. gd->bd->bi_dram[1].size -
  1344. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1345. } else if (gd->bd->bi_dram[0].size >
  1346. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1347. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1348. gd->bd->bi_dram[0].size -
  1349. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1350. }
  1351. }
  1352. #endif /* CONFIG_RESV_RAM */
  1353. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1354. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1355. #error "This SoC shouldn't have DP DDR"
  1356. #endif
  1357. if (soc_has_dp_ddr()) {
  1358. /* initialize DP-DDR here */
  1359. puts("DP-DDR: ");
  1360. /*
  1361. * DDR controller use 0 as the base address for binding.
  1362. * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
  1363. */
  1364. dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
  1365. CONFIG_DP_DDR_CTRL,
  1366. CONFIG_DP_DDR_NUM_CTRLS,
  1367. CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
  1368. NULL, NULL, NULL);
  1369. if (dp_ddr_size) {
  1370. gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
  1371. gd->bd->bi_dram[2].size = dp_ddr_size;
  1372. } else {
  1373. puts("Not detected");
  1374. }
  1375. }
  1376. #endif
  1377. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1378. debug("%s is called. gd->ram_size is reduced to %lu\n",
  1379. __func__, (ulong)gd->ram_size);
  1380. #endif
  1381. return 0;
  1382. }
  1383. #if CONFIG_IS_ENABLED(EFI_LOADER)
  1384. void efi_add_known_memory(void)
  1385. {
  1386. int i;
  1387. phys_addr_t ram_start;
  1388. phys_size_t ram_size;
  1389. /* Add RAM */
  1390. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  1391. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1392. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1393. #error "This SoC shouldn't have DP DDR"
  1394. #endif
  1395. if (i == 2)
  1396. continue; /* skip DP-DDR */
  1397. #endif
  1398. ram_start = gd->bd->bi_dram[i].start;
  1399. ram_size = gd->bd->bi_dram[i].size;
  1400. #ifdef CONFIG_RESV_RAM
  1401. if (gd->arch.resv_ram >= ram_start &&
  1402. gd->arch.resv_ram < ram_start + ram_size)
  1403. ram_size = gd->arch.resv_ram - ram_start;
  1404. #endif
  1405. efi_add_memory_map(ram_start, ram_size,
  1406. EFI_CONVENTIONAL_MEMORY);
  1407. }
  1408. }
  1409. #endif
  1410. /*
  1411. * Before DDR size is known, early MMU table have DDR mapped as device memory
  1412. * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
  1413. * needs to be set for these mappings.
  1414. * If a special case configures DDR with holes in the mapping, the holes need
  1415. * to be marked as invalid. This is not implemented in this function.
  1416. */
  1417. void update_early_mmu_table(void)
  1418. {
  1419. if (!gd->arch.tlb_addr)
  1420. return;
  1421. if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
  1422. mmu_change_region_attr(
  1423. CONFIG_SYS_SDRAM_BASE,
  1424. gd->ram_size,
  1425. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1426. PTE_BLOCK_OUTER_SHARE |
  1427. PTE_BLOCK_NS |
  1428. PTE_TYPE_VALID);
  1429. } else {
  1430. mmu_change_region_attr(
  1431. CONFIG_SYS_SDRAM_BASE,
  1432. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1433. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1434. PTE_BLOCK_OUTER_SHARE |
  1435. PTE_BLOCK_NS |
  1436. PTE_TYPE_VALID);
  1437. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1438. #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
  1439. #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
  1440. #endif
  1441. if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
  1442. CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1443. mmu_change_region_attr(
  1444. CONFIG_SYS_DDR_BLOCK2_BASE,
  1445. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1446. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1447. PTE_BLOCK_OUTER_SHARE |
  1448. PTE_BLOCK_NS |
  1449. PTE_TYPE_VALID);
  1450. mmu_change_region_attr(
  1451. CONFIG_SYS_DDR_BLOCK3_BASE,
  1452. gd->ram_size -
  1453. CONFIG_SYS_DDR_BLOCK1_SIZE -
  1454. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1455. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1456. PTE_BLOCK_OUTER_SHARE |
  1457. PTE_BLOCK_NS |
  1458. PTE_TYPE_VALID);
  1459. } else
  1460. #endif
  1461. {
  1462. mmu_change_region_attr(
  1463. CONFIG_SYS_DDR_BLOCK2_BASE,
  1464. gd->ram_size -
  1465. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1466. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1467. PTE_BLOCK_OUTER_SHARE |
  1468. PTE_BLOCK_NS |
  1469. PTE_TYPE_VALID);
  1470. }
  1471. }
  1472. }
  1473. __weak int dram_init(void)
  1474. {
  1475. fsl_initdram();
  1476. #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
  1477. defined(CONFIG_SPL_BUILD)
  1478. /* This will break-before-make MMU for DDR */
  1479. update_early_mmu_table();
  1480. #endif
  1481. return 0;
  1482. }
  1483. #ifdef CONFIG_ARCH_MISC_INIT
  1484. __weak int serdes_misc_init(void)
  1485. {
  1486. return 0;
  1487. }
  1488. int arch_misc_init(void)
  1489. {
  1490. serdes_misc_init();
  1491. return 0;
  1492. }
  1493. #endif