cpu.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2017-2019 NXP
  4. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  5. */
  6. #include <common.h>
  7. #include <cpu_func.h>
  8. #include <env.h>
  9. #include <fsl_ddr_sdram.h>
  10. #include <init.h>
  11. #include <hang.h>
  12. #include <log.h>
  13. #include <net.h>
  14. #include <vsprintf.h>
  15. #include <asm/cache.h>
  16. #include <asm/io.h>
  17. #include <linux/errno.h>
  18. #include <asm/system.h>
  19. #include <fm_eth.h>
  20. #include <asm/armv8/mmu.h>
  21. #include <asm/io.h>
  22. #include <asm/arch/fsl_serdes.h>
  23. #include <asm/arch/soc.h>
  24. #include <asm/arch/cpu.h>
  25. #include <asm/arch/speed.h>
  26. #include <fsl_immap.h>
  27. #include <asm/arch/mp.h>
  28. #include <efi_loader.h>
  29. #include <fsl-mc/fsl_mc.h>
  30. #ifdef CONFIG_FSL_ESDHC
  31. #include <fsl_esdhc.h>
  32. #endif
  33. #include <asm/armv8/sec_firmware.h>
  34. #ifdef CONFIG_SYS_FSL_DDR
  35. #include <fsl_ddr.h>
  36. #endif
  37. #include <asm/arch/clock.h>
  38. #include <hwconfig.h>
  39. #include <fsl_qbman.h>
  40. #ifdef CONFIG_TFABOOT
  41. #include <env_internal.h>
  42. #ifdef CONFIG_CHAIN_OF_TRUST
  43. #include <fsl_validate.h>
  44. #endif
  45. #endif
  46. #include <linux/mii.h>
  47. DECLARE_GLOBAL_DATA_PTR;
  48. static struct cpu_type cpu_type_list[] = {
  49. CPU_TYPE_ENTRY(LS2080A, LS2080A, 8),
  50. CPU_TYPE_ENTRY(LS2085A, LS2085A, 8),
  51. CPU_TYPE_ENTRY(LS2045A, LS2045A, 4),
  52. CPU_TYPE_ENTRY(LS2088A, LS2088A, 8),
  53. CPU_TYPE_ENTRY(LS2084A, LS2084A, 8),
  54. CPU_TYPE_ENTRY(LS2048A, LS2048A, 4),
  55. CPU_TYPE_ENTRY(LS2044A, LS2044A, 4),
  56. CPU_TYPE_ENTRY(LS2081A, LS2081A, 8),
  57. CPU_TYPE_ENTRY(LS2041A, LS2041A, 4),
  58. CPU_TYPE_ENTRY(LS1043A, LS1043A, 4),
  59. CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4),
  60. CPU_TYPE_ENTRY(LS1023A, LS1023A, 2),
  61. CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2),
  62. CPU_TYPE_ENTRY(LS1046A, LS1046A, 4),
  63. CPU_TYPE_ENTRY(LS1026A, LS1026A, 2),
  64. CPU_TYPE_ENTRY(LS2040A, LS2040A, 4),
  65. CPU_TYPE_ENTRY(LS1012A, LS1012A, 1),
  66. CPU_TYPE_ENTRY(LS1017A, LS1017A, 1),
  67. CPU_TYPE_ENTRY(LS1018A, LS1018A, 1),
  68. CPU_TYPE_ENTRY(LS1027A, LS1027A, 2),
  69. CPU_TYPE_ENTRY(LS1028A, LS1028A, 2),
  70. CPU_TYPE_ENTRY(LS1088A, LS1088A, 8),
  71. CPU_TYPE_ENTRY(LS1084A, LS1084A, 8),
  72. CPU_TYPE_ENTRY(LS1048A, LS1048A, 4),
  73. CPU_TYPE_ENTRY(LS1044A, LS1044A, 4),
  74. CPU_TYPE_ENTRY(LX2160A, LX2160A, 16),
  75. CPU_TYPE_ENTRY(LX2120A, LX2120A, 12),
  76. CPU_TYPE_ENTRY(LX2080A, LX2080A, 8),
  77. };
  78. #define EARLY_PGTABLE_SIZE 0x5000
  79. static struct mm_region early_map[] = {
  80. #ifdef CONFIG_FSL_LSCH3
  81. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  82. CONFIG_SYS_FSL_CCSR_SIZE,
  83. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  84. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  85. },
  86. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  87. SYS_FSL_OCRAM_SPACE_SIZE,
  88. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  89. },
  90. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  91. CONFIG_SYS_FSL_QSPI_SIZE1,
  92. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE},
  93. #ifdef CONFIG_FSL_IFC
  94. /* For IFC Region #1, only the first 4MB is cache-enabled */
  95. { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1,
  96. CONFIG_SYS_FSL_IFC_SIZE1_1,
  97. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  98. },
  99. { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  100. CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1,
  101. CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1,
  102. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  103. },
  104. { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1,
  105. CONFIG_SYS_FSL_IFC_SIZE1,
  106. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  107. },
  108. #endif
  109. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  110. CONFIG_SYS_FSL_DRAM_SIZE1,
  111. #if defined(CONFIG_TFABOOT) || \
  112. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  113. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  114. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  115. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  116. #endif
  117. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  118. },
  119. #ifdef CONFIG_FSL_IFC
  120. /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
  121. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  122. CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2,
  123. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  124. },
  125. #endif
  126. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  127. CONFIG_SYS_FSL_DCSR_SIZE,
  128. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  129. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  130. },
  131. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  132. CONFIG_SYS_FSL_DRAM_SIZE2,
  133. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  134. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  135. },
  136. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  137. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  138. CONFIG_SYS_FSL_DRAM_SIZE3,
  139. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  140. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  141. },
  142. #endif
  143. #elif defined(CONFIG_FSL_LSCH2)
  144. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  145. CONFIG_SYS_FSL_CCSR_SIZE,
  146. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  147. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  148. },
  149. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  150. SYS_FSL_OCRAM_SPACE_SIZE,
  151. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  152. },
  153. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  154. CONFIG_SYS_FSL_DCSR_SIZE,
  155. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  156. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  157. },
  158. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  159. CONFIG_SYS_FSL_QSPI_SIZE,
  160. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  161. },
  162. #ifdef CONFIG_FSL_IFC
  163. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  164. CONFIG_SYS_FSL_IFC_SIZE,
  165. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  166. },
  167. #endif
  168. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  169. CONFIG_SYS_FSL_DRAM_SIZE1,
  170. #if defined(CONFIG_TFABOOT) || \
  171. (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
  172. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  173. #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
  174. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  175. #endif
  176. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  177. },
  178. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  179. CONFIG_SYS_FSL_DRAM_SIZE2,
  180. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN |
  181. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  182. },
  183. #endif
  184. {}, /* list terminator */
  185. };
  186. static struct mm_region final_map[] = {
  187. #ifdef CONFIG_FSL_LSCH3
  188. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  189. CONFIG_SYS_FSL_CCSR_SIZE,
  190. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  191. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  192. },
  193. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  194. SYS_FSL_OCRAM_SPACE_SIZE,
  195. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  196. },
  197. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  198. CONFIG_SYS_FSL_DRAM_SIZE1,
  199. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  200. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  201. },
  202. { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1,
  203. CONFIG_SYS_FSL_QSPI_SIZE1,
  204. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  205. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  206. },
  207. { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2,
  208. CONFIG_SYS_FSL_QSPI_SIZE2,
  209. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  210. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  211. },
  212. #ifdef CONFIG_FSL_IFC
  213. { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2,
  214. CONFIG_SYS_FSL_IFC_SIZE2,
  215. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  216. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  217. },
  218. #endif
  219. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  220. CONFIG_SYS_FSL_DCSR_SIZE,
  221. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  222. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  223. },
  224. { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE,
  225. CONFIG_SYS_FSL_MC_SIZE,
  226. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  227. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  228. },
  229. { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE,
  230. CONFIG_SYS_FSL_NI_SIZE,
  231. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  232. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  233. },
  234. /* For QBMAN portal, only the first 64MB is cache-enabled */
  235. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  236. CONFIG_SYS_FSL_QBMAN_SIZE_1,
  237. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  238. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS
  239. },
  240. { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  241. CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1,
  242. CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1,
  243. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  244. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  245. },
  246. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  247. CONFIG_SYS_PCIE1_PHYS_SIZE,
  248. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  249. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  250. },
  251. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  252. CONFIG_SYS_PCIE2_PHYS_SIZE,
  253. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  254. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  255. },
  256. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  257. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  258. CONFIG_SYS_PCIE3_PHYS_SIZE,
  259. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  260. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  261. },
  262. #endif
  263. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  264. { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR,
  265. CONFIG_SYS_PCIE4_PHYS_SIZE,
  266. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  267. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  268. },
  269. #endif
  270. #ifdef SYS_PCIE5_PHYS_ADDR
  271. { SYS_PCIE5_PHYS_ADDR, SYS_PCIE5_PHYS_ADDR,
  272. SYS_PCIE5_PHYS_SIZE,
  273. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  274. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  275. },
  276. #endif
  277. #ifdef SYS_PCIE6_PHYS_ADDR
  278. { SYS_PCIE6_PHYS_ADDR, SYS_PCIE6_PHYS_ADDR,
  279. SYS_PCIE6_PHYS_SIZE,
  280. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  281. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  282. },
  283. #endif
  284. { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE,
  285. CONFIG_SYS_FSL_WRIOP1_SIZE,
  286. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  287. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  288. },
  289. { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE,
  290. CONFIG_SYS_FSL_AIOP1_SIZE,
  291. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  292. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  293. },
  294. { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE,
  295. CONFIG_SYS_FSL_PEBUF_SIZE,
  296. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  297. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  298. },
  299. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  300. CONFIG_SYS_FSL_DRAM_SIZE2,
  301. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  302. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  303. },
  304. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  305. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  306. CONFIG_SYS_FSL_DRAM_SIZE3,
  307. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  308. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  309. },
  310. #endif
  311. #elif defined(CONFIG_FSL_LSCH2)
  312. { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE,
  313. CONFIG_SYS_FSL_BOOTROM_SIZE,
  314. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  315. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  316. },
  317. { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE,
  318. CONFIG_SYS_FSL_CCSR_SIZE,
  319. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  320. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  321. },
  322. { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE,
  323. SYS_FSL_OCRAM_SPACE_SIZE,
  324. PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE
  325. },
  326. { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE,
  327. CONFIG_SYS_FSL_DCSR_SIZE,
  328. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  329. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  330. },
  331. { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE,
  332. CONFIG_SYS_FSL_QSPI_SIZE,
  333. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  334. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  335. },
  336. #ifdef CONFIG_FSL_IFC
  337. { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE,
  338. CONFIG_SYS_FSL_IFC_SIZE,
  339. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE
  340. },
  341. #endif
  342. { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1,
  343. CONFIG_SYS_FSL_DRAM_SIZE1,
  344. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  345. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  346. },
  347. { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE,
  348. CONFIG_SYS_FSL_QBMAN_SIZE,
  349. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  350. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  351. },
  352. { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2,
  353. CONFIG_SYS_FSL_DRAM_SIZE2,
  354. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  355. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  356. },
  357. { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR,
  358. CONFIG_SYS_PCIE1_PHYS_SIZE,
  359. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  360. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  361. },
  362. { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR,
  363. CONFIG_SYS_PCIE2_PHYS_SIZE,
  364. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  365. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  366. },
  367. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  368. { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR,
  369. CONFIG_SYS_PCIE3_PHYS_SIZE,
  370. PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  371. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN
  372. },
  373. #endif
  374. { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3,
  375. CONFIG_SYS_FSL_DRAM_SIZE3,
  376. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  377. PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS
  378. },
  379. #endif
  380. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  381. {}, /* space holder for secure mem */
  382. #endif
  383. {},
  384. };
  385. struct mm_region *mem_map = early_map;
  386. void cpu_name(char *name)
  387. {
  388. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  389. unsigned int i, svr, ver;
  390. svr = gur_in32(&gur->svr);
  391. ver = SVR_SOC_VER(svr);
  392. for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
  393. if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
  394. strcpy(name, cpu_type_list[i].name);
  395. #ifdef CONFIG_ARCH_LX2160A
  396. if (IS_C_PROCESSOR(svr))
  397. strcat(name, "C");
  398. #endif
  399. if (IS_E_PROCESSOR(svr))
  400. strcat(name, "E");
  401. sprintf(name + strlen(name), " Rev%d.%d",
  402. SVR_MAJ(svr), SVR_MIN(svr));
  403. break;
  404. }
  405. if (i == ARRAY_SIZE(cpu_type_list))
  406. strcpy(name, "unknown");
  407. }
  408. #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  409. /*
  410. * To start MMU before DDR is available, we create MMU table in SRAM.
  411. * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
  412. * levels of translation tables here to cover 40-bit address space.
  413. * We use 4KB granule size, with 40 bits physical address, T0SZ=24
  414. * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
  415. * Note, the debug print in cache_v8.c is not usable for debugging
  416. * these early MMU tables because UART is not yet available.
  417. */
  418. static inline void early_mmu_setup(void)
  419. {
  420. unsigned int el = current_el();
  421. /* global data is already setup, no allocation yet */
  422. if (el == 3)
  423. gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
  424. else
  425. gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE;
  426. gd->arch.tlb_fillptr = gd->arch.tlb_addr;
  427. gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
  428. /* Create early page tables */
  429. setup_pgtables();
  430. /* point TTBR to the new table */
  431. set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
  432. get_tcr(el, NULL, NULL) &
  433. ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
  434. MEMORY_ATTRIBUTES);
  435. set_sctlr(get_sctlr() | CR_M);
  436. }
  437. static void fix_pcie_mmu_map(void)
  438. {
  439. #ifdef CONFIG_ARCH_LS2080A
  440. unsigned int i;
  441. u32 svr, ver;
  442. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  443. svr = gur_in32(&gur->svr);
  444. ver = SVR_SOC_VER(svr);
  445. /* Fix PCIE base and size for LS2088A */
  446. if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
  447. (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
  448. (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
  449. for (i = 0; i < ARRAY_SIZE(final_map); i++) {
  450. switch (final_map[i].phys) {
  451. case CONFIG_SYS_PCIE1_PHYS_ADDR:
  452. final_map[i].phys = 0x2000000000ULL;
  453. final_map[i].virt = 0x2000000000ULL;
  454. final_map[i].size = 0x800000000ULL;
  455. break;
  456. case CONFIG_SYS_PCIE2_PHYS_ADDR:
  457. final_map[i].phys = 0x2800000000ULL;
  458. final_map[i].virt = 0x2800000000ULL;
  459. final_map[i].size = 0x800000000ULL;
  460. break;
  461. #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
  462. case CONFIG_SYS_PCIE3_PHYS_ADDR:
  463. final_map[i].phys = 0x3000000000ULL;
  464. final_map[i].virt = 0x3000000000ULL;
  465. final_map[i].size = 0x800000000ULL;
  466. break;
  467. #endif
  468. #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
  469. case CONFIG_SYS_PCIE4_PHYS_ADDR:
  470. final_map[i].phys = 0x3800000000ULL;
  471. final_map[i].virt = 0x3800000000ULL;
  472. final_map[i].size = 0x800000000ULL;
  473. break;
  474. #endif
  475. default:
  476. break;
  477. }
  478. }
  479. }
  480. #endif
  481. }
  482. /*
  483. * The final tables look similar to early tables, but different in detail.
  484. * These tables are in DRAM. Sub tables are added to enable cache for
  485. * QBMan and OCRAM.
  486. *
  487. * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
  488. * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
  489. */
  490. static inline void final_mmu_setup(void)
  491. {
  492. u64 tlb_addr_save = gd->arch.tlb_addr;
  493. unsigned int el = current_el();
  494. int index;
  495. /* fix the final_map before filling in the block entries */
  496. fix_pcie_mmu_map();
  497. mem_map = final_map;
  498. /* Update mapping for DDR to actual size */
  499. for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
  500. /*
  501. * Find the entry for DDR mapping and update the address and
  502. * size. Zero-sized mapping will be skipped when creating MMU
  503. * table.
  504. */
  505. switch (final_map[index].virt) {
  506. case CONFIG_SYS_FSL_DRAM_BASE1:
  507. final_map[index].virt = gd->bd->bi_dram[0].start;
  508. final_map[index].phys = gd->bd->bi_dram[0].start;
  509. final_map[index].size = gd->bd->bi_dram[0].size;
  510. break;
  511. #ifdef CONFIG_SYS_FSL_DRAM_BASE2
  512. case CONFIG_SYS_FSL_DRAM_BASE2:
  513. #if (CONFIG_NR_DRAM_BANKS >= 2)
  514. final_map[index].virt = gd->bd->bi_dram[1].start;
  515. final_map[index].phys = gd->bd->bi_dram[1].start;
  516. final_map[index].size = gd->bd->bi_dram[1].size;
  517. #else
  518. final_map[index].size = 0;
  519. #endif
  520. break;
  521. #endif
  522. #ifdef CONFIG_SYS_FSL_DRAM_BASE3
  523. case CONFIG_SYS_FSL_DRAM_BASE3:
  524. #if (CONFIG_NR_DRAM_BANKS >= 3)
  525. final_map[index].virt = gd->bd->bi_dram[2].start;
  526. final_map[index].phys = gd->bd->bi_dram[2].start;
  527. final_map[index].size = gd->bd->bi_dram[2].size;
  528. #else
  529. final_map[index].size = 0;
  530. #endif
  531. break;
  532. #endif
  533. default:
  534. break;
  535. }
  536. }
  537. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  538. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  539. if (el == 3) {
  540. /*
  541. * Only use gd->arch.secure_ram if the address is
  542. * recalculated. Align to 4KB for MMU table.
  543. */
  544. /* put page tables in secure ram */
  545. index = ARRAY_SIZE(final_map) - 2;
  546. gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
  547. final_map[index].virt = gd->arch.secure_ram & ~0x3;
  548. final_map[index].phys = final_map[index].virt;
  549. final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
  550. final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
  551. gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
  552. tlb_addr_save = gd->arch.tlb_addr;
  553. } else {
  554. /* Use allocated (board_f.c) memory for TLB */
  555. tlb_addr_save = gd->arch.tlb_allocated;
  556. gd->arch.tlb_addr = tlb_addr_save;
  557. }
  558. }
  559. #endif
  560. /* Reset the fill ptr */
  561. gd->arch.tlb_fillptr = tlb_addr_save;
  562. /* Create normal system page tables */
  563. setup_pgtables();
  564. /* Create emergency page tables */
  565. gd->arch.tlb_addr = gd->arch.tlb_fillptr;
  566. gd->arch.tlb_emerg = gd->arch.tlb_addr;
  567. setup_pgtables();
  568. gd->arch.tlb_addr = tlb_addr_save;
  569. /* Disable cache and MMU */
  570. dcache_disable(); /* TLBs are invalidated */
  571. invalidate_icache_all();
  572. /* point TTBR to the new table */
  573. set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
  574. MEMORY_ATTRIBUTES);
  575. set_sctlr(get_sctlr() | CR_M);
  576. }
  577. u64 get_page_table_size(void)
  578. {
  579. return 0x10000;
  580. }
  581. int arch_cpu_init(void)
  582. {
  583. /*
  584. * This function is called before U-Boot relocates itself to speed up
  585. * on system running. It is not necessary to run if performance is not
  586. * critical. Skip if MMU is already enabled by SPL or other means.
  587. */
  588. if (get_sctlr() & CR_M)
  589. return 0;
  590. icache_enable();
  591. __asm_invalidate_dcache_all();
  592. __asm_invalidate_tlb_all();
  593. early_mmu_setup();
  594. set_sctlr(get_sctlr() | CR_C);
  595. return 0;
  596. }
  597. void mmu_setup(void)
  598. {
  599. final_mmu_setup();
  600. }
  601. /*
  602. * This function is called from common/board_r.c.
  603. * It recreates MMU table in main memory.
  604. */
  605. void enable_caches(void)
  606. {
  607. mmu_setup();
  608. __asm_invalidate_tlb_all();
  609. icache_enable();
  610. dcache_enable();
  611. }
  612. #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
  613. #ifdef CONFIG_TFABOOT
  614. enum boot_src __get_boot_src(u32 porsr1)
  615. {
  616. enum boot_src src = BOOT_SOURCE_RESERVED;
  617. u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT;
  618. #if !defined(CONFIG_NXP_LSCH3_2)
  619. u32 val;
  620. #endif
  621. debug("%s: rcw_src 0x%x\n", __func__, rcw_src);
  622. #if defined(CONFIG_FSL_LSCH3)
  623. #if defined(CONFIG_NXP_LSCH3_2)
  624. switch (rcw_src) {
  625. case RCW_SRC_SDHC1_VAL:
  626. src = BOOT_SOURCE_SD_MMC;
  627. break;
  628. case RCW_SRC_SDHC2_VAL:
  629. src = BOOT_SOURCE_SD_MMC2;
  630. break;
  631. case RCW_SRC_I2C1_VAL:
  632. src = BOOT_SOURCE_I2C1_EXTENDED;
  633. break;
  634. case RCW_SRC_FLEXSPI_NAND2K_VAL:
  635. src = BOOT_SOURCE_XSPI_NAND;
  636. break;
  637. case RCW_SRC_FLEXSPI_NAND4K_VAL:
  638. src = BOOT_SOURCE_XSPI_NAND;
  639. break;
  640. case RCW_SRC_RESERVED_1_VAL:
  641. src = BOOT_SOURCE_RESERVED;
  642. break;
  643. case RCW_SRC_FLEXSPI_NOR_24B:
  644. src = BOOT_SOURCE_XSPI_NOR;
  645. break;
  646. default:
  647. src = BOOT_SOURCE_RESERVED;
  648. }
  649. #else
  650. val = rcw_src & RCW_SRC_TYPE_MASK;
  651. if (val == RCW_SRC_NOR_VAL) {
  652. val = rcw_src & NOR_TYPE_MASK;
  653. switch (val) {
  654. case NOR_16B_VAL:
  655. case NOR_32B_VAL:
  656. src = BOOT_SOURCE_IFC_NOR;
  657. break;
  658. default:
  659. src = BOOT_SOURCE_RESERVED;
  660. }
  661. } else {
  662. /* RCW SRC Serial Flash */
  663. val = rcw_src & RCW_SRC_SERIAL_MASK;
  664. switch (val) {
  665. case RCW_SRC_QSPI_VAL:
  666. /* RCW SRC Serial NOR (QSPI) */
  667. src = BOOT_SOURCE_QSPI_NOR;
  668. break;
  669. case RCW_SRC_SD_CARD_VAL:
  670. /* RCW SRC SD Card */
  671. src = BOOT_SOURCE_SD_MMC;
  672. break;
  673. case RCW_SRC_EMMC_VAL:
  674. /* RCW SRC EMMC */
  675. src = BOOT_SOURCE_SD_MMC;
  676. break;
  677. case RCW_SRC_I2C1_VAL:
  678. /* RCW SRC I2C1 Extended */
  679. src = BOOT_SOURCE_I2C1_EXTENDED;
  680. break;
  681. default:
  682. src = BOOT_SOURCE_RESERVED;
  683. }
  684. }
  685. #endif
  686. #elif defined(CONFIG_FSL_LSCH2)
  687. /* RCW SRC NAND */
  688. val = rcw_src & RCW_SRC_NAND_MASK;
  689. if (val == RCW_SRC_NAND_VAL) {
  690. val = rcw_src & NAND_RESERVED_MASK;
  691. if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2)
  692. src = BOOT_SOURCE_IFC_NAND;
  693. } else {
  694. /* RCW SRC NOR */
  695. val = rcw_src & RCW_SRC_NOR_MASK;
  696. if (val == NOR_8B_VAL || val == NOR_16B_VAL) {
  697. src = BOOT_SOURCE_IFC_NOR;
  698. } else {
  699. switch (rcw_src) {
  700. case QSPI_VAL1:
  701. case QSPI_VAL2:
  702. src = BOOT_SOURCE_QSPI_NOR;
  703. break;
  704. case SD_VAL:
  705. src = BOOT_SOURCE_SD_MMC;
  706. break;
  707. default:
  708. src = BOOT_SOURCE_RESERVED;
  709. }
  710. }
  711. }
  712. #endif
  713. if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src)
  714. src = BOOT_SOURCE_QSPI_NOR;
  715. debug("%s: src 0x%x\n", __func__, src);
  716. return src;
  717. }
  718. enum boot_src get_boot_src(void)
  719. {
  720. struct pt_regs regs;
  721. u32 porsr1 = 0;
  722. #if defined(CONFIG_FSL_LSCH3)
  723. u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
  724. #elif defined(CONFIG_FSL_LSCH2)
  725. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  726. #endif
  727. if (current_el() == 2) {
  728. regs.regs[0] = SIP_SVC_RCW;
  729. smc_call(&regs);
  730. if (!regs.regs[0])
  731. porsr1 = regs.regs[1];
  732. }
  733. if (current_el() == 3 || !porsr1) {
  734. #ifdef CONFIG_FSL_LSCH3
  735. porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
  736. #elif defined(CONFIG_FSL_LSCH2)
  737. porsr1 = in_be32(&gur->porsr1);
  738. #endif
  739. }
  740. debug("%s: porsr1 0x%x\n", __func__, porsr1);
  741. return __get_boot_src(porsr1);
  742. }
  743. #ifdef CONFIG_ENV_IS_IN_MMC
  744. int mmc_get_env_dev(void)
  745. {
  746. enum boot_src src = get_boot_src();
  747. int dev = CONFIG_SYS_MMC_ENV_DEV;
  748. switch (src) {
  749. case BOOT_SOURCE_SD_MMC:
  750. dev = 0;
  751. break;
  752. case BOOT_SOURCE_SD_MMC2:
  753. dev = 1;
  754. break;
  755. default:
  756. break;
  757. }
  758. return dev;
  759. }
  760. #endif
  761. enum env_location env_get_location(enum env_operation op, int prio)
  762. {
  763. enum boot_src src = get_boot_src();
  764. enum env_location env_loc = ENVL_NOWHERE;
  765. if (prio)
  766. return ENVL_UNKNOWN;
  767. #ifdef CONFIG_ENV_IS_NOWHERE
  768. return env_loc;
  769. #endif
  770. switch (src) {
  771. case BOOT_SOURCE_IFC_NOR:
  772. env_loc = ENVL_FLASH;
  773. break;
  774. case BOOT_SOURCE_QSPI_NOR:
  775. /* FALLTHROUGH */
  776. case BOOT_SOURCE_XSPI_NOR:
  777. env_loc = ENVL_SPI_FLASH;
  778. break;
  779. case BOOT_SOURCE_IFC_NAND:
  780. /* FALLTHROUGH */
  781. case BOOT_SOURCE_QSPI_NAND:
  782. /* FALLTHROUGH */
  783. case BOOT_SOURCE_XSPI_NAND:
  784. env_loc = ENVL_NAND;
  785. break;
  786. case BOOT_SOURCE_SD_MMC:
  787. /* FALLTHROUGH */
  788. case BOOT_SOURCE_SD_MMC2:
  789. env_loc = ENVL_MMC;
  790. break;
  791. case BOOT_SOURCE_I2C1_EXTENDED:
  792. /* FALLTHROUGH */
  793. default:
  794. break;
  795. }
  796. return env_loc;
  797. }
  798. #endif /* CONFIG_TFABOOT */
  799. u32 initiator_type(u32 cluster, int init_id)
  800. {
  801. struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  802. u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
  803. u32 type = 0;
  804. type = gur_in32(&gur->tp_ityp[idx]);
  805. if (type & TP_ITYP_AV)
  806. return type;
  807. return 0;
  808. }
  809. u32 cpu_pos_mask(void)
  810. {
  811. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  812. int i = 0;
  813. u32 cluster, type, mask = 0;
  814. do {
  815. int j;
  816. cluster = gur_in32(&gur->tp_cluster[i].lower);
  817. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  818. type = initiator_type(cluster, j);
  819. if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
  820. mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
  821. }
  822. i++;
  823. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  824. return mask;
  825. }
  826. u32 cpu_mask(void)
  827. {
  828. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  829. int i = 0, count = 0;
  830. u32 cluster, type, mask = 0;
  831. do {
  832. int j;
  833. cluster = gur_in32(&gur->tp_cluster[i].lower);
  834. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  835. type = initiator_type(cluster, j);
  836. if (type) {
  837. if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
  838. mask |= 1 << count;
  839. count++;
  840. }
  841. }
  842. i++;
  843. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  844. return mask;
  845. }
  846. /*
  847. * Return the number of cores on this SOC.
  848. */
  849. int cpu_numcores(void)
  850. {
  851. return hweight32(cpu_mask());
  852. }
  853. int fsl_qoriq_core_to_cluster(unsigned int core)
  854. {
  855. struct ccsr_gur __iomem *gur =
  856. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  857. int i = 0, count = 0;
  858. u32 cluster;
  859. do {
  860. int j;
  861. cluster = gur_in32(&gur->tp_cluster[i].lower);
  862. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  863. if (initiator_type(cluster, j)) {
  864. if (count == core)
  865. return i;
  866. count++;
  867. }
  868. }
  869. i++;
  870. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  871. return -1; /* cannot identify the cluster */
  872. }
  873. u32 fsl_qoriq_core_to_type(unsigned int core)
  874. {
  875. struct ccsr_gur __iomem *gur =
  876. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  877. int i = 0, count = 0;
  878. u32 cluster, type;
  879. do {
  880. int j;
  881. cluster = gur_in32(&gur->tp_cluster[i].lower);
  882. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  883. type = initiator_type(cluster, j);
  884. if (type) {
  885. if (count == core)
  886. return type;
  887. count++;
  888. }
  889. }
  890. i++;
  891. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  892. return -1; /* cannot identify the cluster */
  893. }
  894. #ifndef CONFIG_FSL_LSCH3
  895. uint get_svr(void)
  896. {
  897. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  898. return gur_in32(&gur->svr);
  899. }
  900. #endif
  901. #ifdef CONFIG_DISPLAY_CPUINFO
  902. int print_cpuinfo(void)
  903. {
  904. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  905. struct sys_info sysinfo;
  906. char buf[32];
  907. unsigned int i, core;
  908. u32 type, rcw, svr = gur_in32(&gur->svr);
  909. puts("SoC: ");
  910. cpu_name(buf);
  911. printf(" %s (0x%x)\n", buf, svr);
  912. memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
  913. get_sys_info(&sysinfo);
  914. puts("Clock Configuration:");
  915. for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
  916. if (!(i % 3))
  917. puts("\n ");
  918. type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
  919. printf("CPU%d(%s):%-4s MHz ", core,
  920. type == TY_ITYP_VER_A7 ? "A7 " :
  921. (type == TY_ITYP_VER_A53 ? "A53" :
  922. (type == TY_ITYP_VER_A57 ? "A57" :
  923. (type == TY_ITYP_VER_A72 ? "A72" : " "))),
  924. strmhz(buf, sysinfo.freq_processor[core]));
  925. }
  926. /* Display platform clock as Bus frequency. */
  927. printf("\n Bus: %-4s MHz ",
  928. strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
  929. printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
  930. #ifdef CONFIG_SYS_DPAA_FMAN
  931. printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
  932. #endif
  933. #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
  934. if (soc_has_dp_ddr()) {
  935. printf(" DP-DDR: %-4s MT/s",
  936. strmhz(buf, sysinfo.freq_ddrbus2));
  937. }
  938. #endif
  939. puts("\n");
  940. /*
  941. * Display the RCW, so that no one gets confused as to what RCW
  942. * we're actually using for this boot.
  943. */
  944. puts("Reset Configuration Word (RCW):");
  945. for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
  946. rcw = gur_in32(&gur->rcwsr[i]);
  947. if ((i % 4) == 0)
  948. printf("\n %08x:", i * 4);
  949. printf(" %08x", rcw);
  950. }
  951. puts("\n");
  952. return 0;
  953. }
  954. #endif
  955. #ifdef CONFIG_FSL_ESDHC
  956. int cpu_mmc_init(bd_t *bis)
  957. {
  958. return fsl_esdhc_mmc_init(bis);
  959. }
  960. #endif
  961. int cpu_eth_init(bd_t *bis)
  962. {
  963. int error = 0;
  964. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  965. error = fsl_mc_ldpaa_init(bis);
  966. #endif
  967. #ifdef CONFIG_FMAN_ENET
  968. fm_standard_init(bis);
  969. #endif
  970. return error;
  971. }
  972. static inline int check_psci(void)
  973. {
  974. unsigned int psci_ver;
  975. psci_ver = sec_firmware_support_psci_version();
  976. if (psci_ver == PSCI_INVALID_VER)
  977. return 1;
  978. return 0;
  979. }
  980. static void config_core_prefetch(void)
  981. {
  982. char *buf = NULL;
  983. char buffer[HWCONFIG_BUFFER_SIZE];
  984. const char *prefetch_arg = NULL;
  985. size_t arglen;
  986. unsigned int mask;
  987. struct pt_regs regs;
  988. if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
  989. buf = buffer;
  990. else
  991. return;
  992. prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
  993. &arglen, buf);
  994. if (prefetch_arg) {
  995. mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
  996. if (mask & 0x1) {
  997. printf("Core0 prefetch can't be disabled\n");
  998. return;
  999. }
  1000. #define SIP_PREFETCH_DISABLE_64 0xC200FF13
  1001. regs.regs[0] = SIP_PREFETCH_DISABLE_64;
  1002. regs.regs[1] = mask;
  1003. smc_call(&regs);
  1004. if (regs.regs[0])
  1005. printf("Prefetch disable config failed for mask ");
  1006. else
  1007. printf("Prefetch disable config passed for mask ");
  1008. printf("0x%x\n", mask);
  1009. }
  1010. }
  1011. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1012. __weak void set_ecam_icids(void)
  1013. {
  1014. }
  1015. #endif
  1016. int arch_early_init_r(void)
  1017. {
  1018. #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
  1019. u32 svr_dev_id;
  1020. /*
  1021. * erratum A009635 is valid only for LS2080A SoC and
  1022. * its personalitiesi
  1023. */
  1024. svr_dev_id = get_svr();
  1025. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1026. erratum_a009635();
  1027. #endif
  1028. #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
  1029. erratum_a009942_check_cpo();
  1030. #endif
  1031. if (check_psci()) {
  1032. debug("PSCI: PSCI does not exist.\n");
  1033. /* if PSCI does not exist, boot secondary cores here */
  1034. if (fsl_layerscape_wake_seconday_cores())
  1035. printf("Did not wake secondary cores\n");
  1036. }
  1037. config_core_prefetch();
  1038. #ifdef CONFIG_SYS_HAS_SERDES
  1039. fsl_serdes_init();
  1040. #endif
  1041. #ifdef CONFIG_SYS_FSL_HAS_RGMII
  1042. /* some dpmacs in armv8a based freescale layerscape SOCs can be
  1043. * configured via both serdes(sgmii, xfi, xlaui etc) bits and via
  1044. * EC*_PMUX(rgmii) bits in RCW.
  1045. * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
  1046. * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
  1047. * Now if a dpmac is enabled by serdes bits then it takes precedence
  1048. * over EC*_PMUX bits. i.e. in LX2160A if we select serdes protocol
  1049. * that configures dpmac17 as SGMII and set the EC1_PMUX as RGMII,
  1050. * then the dpmac is SGMII and not RGMII.
  1051. *
  1052. * Therefore, move the fsl_rgmii_init after fsl_serdes_init. in
  1053. * fsl_rgmii_init function of SOC, we will check if the dpmac is enabled
  1054. * or not? if it is (fsl_serdes_init has already enabled the dpmac),
  1055. * then don't enable it.
  1056. */
  1057. fsl_rgmii_init();
  1058. #endif
  1059. #ifdef CONFIG_FMAN_ENET
  1060. #ifndef CONFIG_DM_ETH
  1061. fman_enet_init();
  1062. #endif
  1063. #endif
  1064. #ifdef CONFIG_SYS_DPAA_QBMAN
  1065. setup_qbman_portals();
  1066. #endif
  1067. #ifdef CONFIG_PCIE_ECAM_GENERIC
  1068. set_ecam_icids();
  1069. #endif
  1070. return 0;
  1071. }
  1072. int timer_init(void)
  1073. {
  1074. u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
  1075. #ifdef CONFIG_FSL_LSCH3
  1076. u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
  1077. #endif
  1078. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1079. defined(CONFIG_ARCH_LS1028A)
  1080. u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
  1081. u32 svr_dev_id;
  1082. #endif
  1083. #ifdef COUNTER_FREQUENCY_REAL
  1084. unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
  1085. /* Update with accurate clock frequency */
  1086. if (current_el() == 3)
  1087. asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
  1088. #endif
  1089. #ifdef CONFIG_FSL_LSCH3
  1090. /* Enable timebase for all clusters.
  1091. * It is safe to do so even some clusters are not enabled.
  1092. */
  1093. out_le32(cltbenr, 0xf);
  1094. #endif
  1095. #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
  1096. defined(CONFIG_ARCH_LS1028A)
  1097. /*
  1098. * In certain Layerscape SoCs, the clock for each core's
  1099. * has an enable bit in the PMU Physical Core Time Base Enable
  1100. * Register (PCTBENR), which allows the watchdog to operate.
  1101. */
  1102. setbits_le32(pctbenr, 0xff);
  1103. /*
  1104. * For LS2080A SoC and its personalities, timer controller
  1105. * offset is different
  1106. */
  1107. svr_dev_id = get_svr();
  1108. if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
  1109. cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
  1110. #endif
  1111. /* Enable clock for timer
  1112. * This is a global setting.
  1113. */
  1114. out_le32(cntcr, 0x1);
  1115. return 0;
  1116. }
  1117. __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
  1118. void __efi_runtime reset_cpu(ulong addr)
  1119. {
  1120. u32 val;
  1121. #ifdef CONFIG_ARCH_LX2160A
  1122. val = in_le32(rstcr);
  1123. val |= 0x01;
  1124. out_le32(rstcr, val);
  1125. #else
  1126. /* Raise RESET_REQ_B */
  1127. val = scfg_in32(rstcr);
  1128. val |= 0x02;
  1129. scfg_out32(rstcr, val);
  1130. #endif
  1131. }
  1132. #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
  1133. void __efi_runtime EFIAPI efi_reset_system(
  1134. enum efi_reset_type reset_type,
  1135. efi_status_t reset_status,
  1136. unsigned long data_size, void *reset_data)
  1137. {
  1138. switch (reset_type) {
  1139. case EFI_RESET_COLD:
  1140. case EFI_RESET_WARM:
  1141. case EFI_RESET_PLATFORM_SPECIFIC:
  1142. reset_cpu(0);
  1143. break;
  1144. case EFI_RESET_SHUTDOWN:
  1145. /* Nothing we can do */
  1146. break;
  1147. }
  1148. while (1) { }
  1149. }
  1150. efi_status_t efi_reset_system_init(void)
  1151. {
  1152. return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
  1153. }
  1154. #endif
  1155. /*
  1156. * Calculate reserved memory with given memory bank
  1157. * Return aligned memory size on success
  1158. * Return (ram_size + needed size) for failure
  1159. */
  1160. phys_size_t board_reserve_ram_top(phys_size_t ram_size)
  1161. {
  1162. phys_size_t ram_top = ram_size;
  1163. #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
  1164. ram_top = mc_get_dram_block_size();
  1165. if (ram_top > ram_size)
  1166. return ram_size + ram_top;
  1167. ram_top = ram_size - ram_top;
  1168. /* The start address of MC reserved memory needs to be aligned. */
  1169. ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
  1170. #endif
  1171. return ram_size - ram_top;
  1172. }
  1173. phys_size_t get_effective_memsize(void)
  1174. {
  1175. phys_size_t ea_size, rem = 0;
  1176. /*
  1177. * For ARMv8 SoCs, DDR memory is split into two or three regions. The
  1178. * first region is 2GB space at 0x8000_0000. Secure memory needs to
  1179. * allocated from first region. If the memory extends to the second
  1180. * region (or the third region if applicable), Management Complex (MC)
  1181. * memory should be put into the highest region, i.e. the end of DDR
  1182. * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
  1183. * U-Boot doesn't relocate itself into higher address. Should DDR be
  1184. * configured to skip the first region, this function needs to be
  1185. * adjusted.
  1186. */
  1187. if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
  1188. ea_size = CONFIG_MAX_MEM_MAPPED;
  1189. rem = gd->ram_size - ea_size;
  1190. } else {
  1191. ea_size = gd->ram_size;
  1192. }
  1193. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1194. /* Check if we have enough space for secure memory */
  1195. if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
  1196. ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1197. else
  1198. printf("Error: No enough space for secure memory.\n");
  1199. #endif
  1200. /* Check if we have enough memory for MC */
  1201. if (rem < board_reserve_ram_top(rem)) {
  1202. /* Not enough memory in high region to reserve */
  1203. if (ea_size > board_reserve_ram_top(ea_size))
  1204. ea_size -= board_reserve_ram_top(ea_size);
  1205. else
  1206. printf("Error: No enough space for reserved memory.\n");
  1207. }
  1208. return ea_size;
  1209. }
  1210. #ifdef CONFIG_TFABOOT
  1211. phys_size_t tfa_get_dram_size(void)
  1212. {
  1213. struct pt_regs regs;
  1214. phys_size_t dram_size = 0;
  1215. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1216. regs.regs[1] = -1;
  1217. smc_call(&regs);
  1218. if (regs.regs[0])
  1219. return 0;
  1220. dram_size = regs.regs[1];
  1221. return dram_size;
  1222. }
  1223. static int tfa_dram_init_banksize(void)
  1224. {
  1225. int i = 0, ret = 0;
  1226. struct pt_regs regs;
  1227. phys_size_t dram_size = tfa_get_dram_size();
  1228. debug("dram_size %llx\n", dram_size);
  1229. if (!dram_size)
  1230. return -EINVAL;
  1231. do {
  1232. regs.regs[0] = SMC_DRAM_BANK_INFO;
  1233. regs.regs[1] = i;
  1234. smc_call(&regs);
  1235. if (regs.regs[0]) {
  1236. ret = -EINVAL;
  1237. break;
  1238. }
  1239. debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1],
  1240. regs.regs[2]);
  1241. gd->bd->bi_dram[i].start = regs.regs[1];
  1242. gd->bd->bi_dram[i].size = regs.regs[2];
  1243. dram_size -= gd->bd->bi_dram[i].size;
  1244. i++;
  1245. } while (dram_size);
  1246. if (i > 0)
  1247. ret = 0;
  1248. #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
  1249. /* Assign memory for MC */
  1250. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1251. if (gd->bd->bi_dram[2].size >=
  1252. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1253. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1254. gd->bd->bi_dram[2].size -
  1255. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1256. } else
  1257. #endif
  1258. {
  1259. if (gd->bd->bi_dram[1].size >=
  1260. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1261. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1262. gd->bd->bi_dram[1].size -
  1263. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1264. } else if (gd->bd->bi_dram[0].size >
  1265. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1266. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1267. gd->bd->bi_dram[0].size -
  1268. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1269. }
  1270. }
  1271. #endif /* CONFIG_RESV_RAM */
  1272. return ret;
  1273. }
  1274. #endif
  1275. int dram_init_banksize(void)
  1276. {
  1277. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1278. phys_size_t dp_ddr_size;
  1279. #endif
  1280. #ifdef CONFIG_TFABOOT
  1281. if (!tfa_dram_init_banksize())
  1282. return 0;
  1283. #endif
  1284. /*
  1285. * gd->ram_size has the total size of DDR memory, less reserved secure
  1286. * memory. The DDR extends from low region to high region(s) presuming
  1287. * no hole is created with DDR configuration. gd->arch.secure_ram tracks
  1288. * the location of secure memory. gd->arch.resv_ram tracks the location
  1289. * of reserved memory for Management Complex (MC). Because gd->ram_size
  1290. * is reduced by this function if secure memory is reserved, checking
  1291. * gd->arch.secure_ram should be done to avoid running it repeatedly.
  1292. */
  1293. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1294. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  1295. debug("No need to run again, skip %s\n", __func__);
  1296. return 0;
  1297. }
  1298. #endif
  1299. gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
  1300. if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
  1301. gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
  1302. gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
  1303. gd->bd->bi_dram[1].size = gd->ram_size -
  1304. CONFIG_SYS_DDR_BLOCK1_SIZE;
  1305. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1306. if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1307. gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
  1308. gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
  1309. CONFIG_SYS_DDR_BLOCK2_SIZE;
  1310. gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
  1311. }
  1312. #endif
  1313. } else {
  1314. gd->bd->bi_dram[0].size = gd->ram_size;
  1315. }
  1316. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1317. if (gd->bd->bi_dram[0].size >
  1318. CONFIG_SYS_MEM_RESERVE_SECURE) {
  1319. gd->bd->bi_dram[0].size -=
  1320. CONFIG_SYS_MEM_RESERVE_SECURE;
  1321. gd->arch.secure_ram = gd->bd->bi_dram[0].start +
  1322. gd->bd->bi_dram[0].size;
  1323. gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
  1324. gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
  1325. }
  1326. #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
  1327. #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
  1328. /* Assign memory for MC */
  1329. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1330. if (gd->bd->bi_dram[2].size >=
  1331. board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
  1332. gd->arch.resv_ram = gd->bd->bi_dram[2].start +
  1333. gd->bd->bi_dram[2].size -
  1334. board_reserve_ram_top(gd->bd->bi_dram[2].size);
  1335. } else
  1336. #endif
  1337. {
  1338. if (gd->bd->bi_dram[1].size >=
  1339. board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
  1340. gd->arch.resv_ram = gd->bd->bi_dram[1].start +
  1341. gd->bd->bi_dram[1].size -
  1342. board_reserve_ram_top(gd->bd->bi_dram[1].size);
  1343. } else if (gd->bd->bi_dram[0].size >
  1344. board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
  1345. gd->arch.resv_ram = gd->bd->bi_dram[0].start +
  1346. gd->bd->bi_dram[0].size -
  1347. board_reserve_ram_top(gd->bd->bi_dram[0].size);
  1348. }
  1349. }
  1350. #endif /* CONFIG_RESV_RAM */
  1351. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1352. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1353. #error "This SoC shouldn't have DP DDR"
  1354. #endif
  1355. if (soc_has_dp_ddr()) {
  1356. /* initialize DP-DDR here */
  1357. puts("DP-DDR: ");
  1358. /*
  1359. * DDR controller use 0 as the base address for binding.
  1360. * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
  1361. */
  1362. dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
  1363. CONFIG_DP_DDR_CTRL,
  1364. CONFIG_DP_DDR_NUM_CTRLS,
  1365. CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
  1366. NULL, NULL, NULL);
  1367. if (dp_ddr_size) {
  1368. gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
  1369. gd->bd->bi_dram[2].size = dp_ddr_size;
  1370. } else {
  1371. puts("Not detected");
  1372. }
  1373. }
  1374. #endif
  1375. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  1376. debug("%s is called. gd->ram_size is reduced to %lu\n",
  1377. __func__, (ulong)gd->ram_size);
  1378. #endif
  1379. return 0;
  1380. }
  1381. #if CONFIG_IS_ENABLED(EFI_LOADER)
  1382. void efi_add_known_memory(void)
  1383. {
  1384. int i;
  1385. phys_addr_t ram_start;
  1386. phys_size_t ram_size;
  1387. /* Add RAM */
  1388. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  1389. #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
  1390. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1391. #error "This SoC shouldn't have DP DDR"
  1392. #endif
  1393. if (i == 2)
  1394. continue; /* skip DP-DDR */
  1395. #endif
  1396. ram_start = gd->bd->bi_dram[i].start;
  1397. ram_size = gd->bd->bi_dram[i].size;
  1398. #ifdef CONFIG_RESV_RAM
  1399. if (gd->arch.resv_ram >= ram_start &&
  1400. gd->arch.resv_ram < ram_start + ram_size)
  1401. ram_size = gd->arch.resv_ram - ram_start;
  1402. #endif
  1403. efi_add_memory_map(ram_start, ram_size,
  1404. EFI_CONVENTIONAL_MEMORY);
  1405. }
  1406. }
  1407. #endif
  1408. /*
  1409. * Before DDR size is known, early MMU table have DDR mapped as device memory
  1410. * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
  1411. * needs to be set for these mappings.
  1412. * If a special case configures DDR with holes in the mapping, the holes need
  1413. * to be marked as invalid. This is not implemented in this function.
  1414. */
  1415. void update_early_mmu_table(void)
  1416. {
  1417. if (!gd->arch.tlb_addr)
  1418. return;
  1419. if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
  1420. mmu_change_region_attr(
  1421. CONFIG_SYS_SDRAM_BASE,
  1422. gd->ram_size,
  1423. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1424. PTE_BLOCK_OUTER_SHARE |
  1425. PTE_BLOCK_NS |
  1426. PTE_TYPE_VALID);
  1427. } else {
  1428. mmu_change_region_attr(
  1429. CONFIG_SYS_SDRAM_BASE,
  1430. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1431. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1432. PTE_BLOCK_OUTER_SHARE |
  1433. PTE_BLOCK_NS |
  1434. PTE_TYPE_VALID);
  1435. #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
  1436. #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
  1437. #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
  1438. #endif
  1439. if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
  1440. CONFIG_SYS_DDR_BLOCK2_SIZE) {
  1441. mmu_change_region_attr(
  1442. CONFIG_SYS_DDR_BLOCK2_BASE,
  1443. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1444. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1445. PTE_BLOCK_OUTER_SHARE |
  1446. PTE_BLOCK_NS |
  1447. PTE_TYPE_VALID);
  1448. mmu_change_region_attr(
  1449. CONFIG_SYS_DDR_BLOCK3_BASE,
  1450. gd->ram_size -
  1451. CONFIG_SYS_DDR_BLOCK1_SIZE -
  1452. CONFIG_SYS_DDR_BLOCK2_SIZE,
  1453. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1454. PTE_BLOCK_OUTER_SHARE |
  1455. PTE_BLOCK_NS |
  1456. PTE_TYPE_VALID);
  1457. } else
  1458. #endif
  1459. {
  1460. mmu_change_region_attr(
  1461. CONFIG_SYS_DDR_BLOCK2_BASE,
  1462. gd->ram_size -
  1463. CONFIG_SYS_DDR_BLOCK1_SIZE,
  1464. PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  1465. PTE_BLOCK_OUTER_SHARE |
  1466. PTE_BLOCK_NS |
  1467. PTE_TYPE_VALID);
  1468. }
  1469. }
  1470. }
  1471. __weak int dram_init(void)
  1472. {
  1473. fsl_initdram();
  1474. #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
  1475. defined(CONFIG_SPL_BUILD)
  1476. /* This will break-before-make MMU for DDR */
  1477. update_early_mmu_table();
  1478. #endif
  1479. return 0;
  1480. }
  1481. #ifdef CONFIG_ARCH_MISC_INIT
  1482. __weak int serdes_misc_init(void)
  1483. {
  1484. return 0;
  1485. }
  1486. int arch_misc_init(void)
  1487. {
  1488. serdes_misc_init();
  1489. return 0;
  1490. }
  1491. #endif