cpu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2018 NXP
  4. */
  5. #include <common.h>
  6. #include <clk.h>
  7. #include <cpu.h>
  8. #include <cpu_func.h>
  9. #include <dm.h>
  10. #include <dm/device-internal.h>
  11. #include <dm/lists.h>
  12. #include <dm/uclass.h>
  13. #include <errno.h>
  14. #include <thermal.h>
  15. #include <asm/arch/sci/sci.h>
  16. #include <asm/arch/sys_proto.h>
  17. #include <asm/arch-imx/cpu.h>
  18. #include <asm/armv8/cpu.h>
  19. #include <asm/armv8/mmu.h>
  20. #include <asm/mach-imx/boot_mode.h>
  21. DECLARE_GLOBAL_DATA_PTR;
  22. #define BT_PASSOVER_TAG 0x504F
  23. struct pass_over_info_t *get_pass_over_info(void)
  24. {
  25. struct pass_over_info_t *p =
  26. (struct pass_over_info_t *)PASS_OVER_INFO_ADDR;
  27. if (p->barker != BT_PASSOVER_TAG ||
  28. p->len != sizeof(struct pass_over_info_t))
  29. return NULL;
  30. return p;
  31. }
  32. int arch_cpu_init(void)
  33. {
  34. #ifdef CONFIG_SPL_BUILD
  35. struct pass_over_info_t *pass_over;
  36. if (is_soc_rev(CHIP_REV_A)) {
  37. pass_over = get_pass_over_info();
  38. if (pass_over && pass_over->g_ap_mu == 0) {
  39. /*
  40. * When ap_mu is 0, means the U-Boot booted
  41. * from first container
  42. */
  43. sc_misc_boot_status(-1, SC_MISC_BOOT_STATUS_SUCCESS);
  44. }
  45. }
  46. #endif
  47. return 0;
  48. }
  49. int arch_cpu_init_dm(void)
  50. {
  51. struct udevice *devp;
  52. int node, ret;
  53. node = fdt_node_offset_by_compatible(gd->fdt_blob, -1, "fsl,imx8-mu");
  54. ret = uclass_get_device_by_of_offset(UCLASS_MISC, node, &devp);
  55. if (ret) {
  56. printf("could not get scu %d\n", ret);
  57. return ret;
  58. }
  59. if (is_imx8qm()) {
  60. ret = sc_pm_set_resource_power_mode(-1, SC_R_SMMU,
  61. SC_PM_PW_MODE_ON);
  62. if (ret)
  63. return ret;
  64. }
  65. return 0;
  66. }
  67. int print_bootinfo(void)
  68. {
  69. enum boot_device bt_dev = get_boot_device();
  70. puts("Boot: ");
  71. switch (bt_dev) {
  72. case SD1_BOOT:
  73. puts("SD0\n");
  74. break;
  75. case SD2_BOOT:
  76. puts("SD1\n");
  77. break;
  78. case SD3_BOOT:
  79. puts("SD2\n");
  80. break;
  81. case MMC1_BOOT:
  82. puts("MMC0\n");
  83. break;
  84. case MMC2_BOOT:
  85. puts("MMC1\n");
  86. break;
  87. case MMC3_BOOT:
  88. puts("MMC2\n");
  89. break;
  90. case FLEXSPI_BOOT:
  91. puts("FLEXSPI\n");
  92. break;
  93. case SATA_BOOT:
  94. puts("SATA\n");
  95. break;
  96. case NAND_BOOT:
  97. puts("NAND\n");
  98. break;
  99. case USB_BOOT:
  100. puts("USB\n");
  101. break;
  102. default:
  103. printf("Unknown device %u\n", bt_dev);
  104. break;
  105. }
  106. return 0;
  107. }
  108. enum boot_device get_boot_device(void)
  109. {
  110. enum boot_device boot_dev = SD1_BOOT;
  111. sc_rsrc_t dev_rsrc;
  112. sc_misc_get_boot_dev(-1, &dev_rsrc);
  113. switch (dev_rsrc) {
  114. case SC_R_SDHC_0:
  115. boot_dev = MMC1_BOOT;
  116. break;
  117. case SC_R_SDHC_1:
  118. boot_dev = SD2_BOOT;
  119. break;
  120. case SC_R_SDHC_2:
  121. boot_dev = SD3_BOOT;
  122. break;
  123. case SC_R_NAND:
  124. boot_dev = NAND_BOOT;
  125. break;
  126. case SC_R_FSPI_0:
  127. boot_dev = FLEXSPI_BOOT;
  128. break;
  129. case SC_R_SATA_0:
  130. boot_dev = SATA_BOOT;
  131. break;
  132. case SC_R_USB_0:
  133. case SC_R_USB_1:
  134. case SC_R_USB_2:
  135. boot_dev = USB_BOOT;
  136. break;
  137. default:
  138. break;
  139. }
  140. return boot_dev;
  141. }
  142. #ifdef CONFIG_ENV_IS_IN_MMC
  143. __weak int board_mmc_get_env_dev(int devno)
  144. {
  145. return CONFIG_SYS_MMC_ENV_DEV;
  146. }
  147. int mmc_get_env_dev(void)
  148. {
  149. sc_rsrc_t dev_rsrc;
  150. int devno;
  151. sc_misc_get_boot_dev(-1, &dev_rsrc);
  152. switch (dev_rsrc) {
  153. case SC_R_SDHC_0:
  154. devno = 0;
  155. break;
  156. case SC_R_SDHC_1:
  157. devno = 1;
  158. break;
  159. case SC_R_SDHC_2:
  160. devno = 2;
  161. break;
  162. default:
  163. /* If not boot from sd/mmc, use default value */
  164. return CONFIG_SYS_MMC_ENV_DEV;
  165. }
  166. return board_mmc_get_env_dev(devno);
  167. }
  168. #endif
  169. #define MEMSTART_ALIGNMENT SZ_2M /* Align the memory start with 2MB */
  170. static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
  171. sc_faddr_t *addr_end)
  172. {
  173. sc_faddr_t start, end;
  174. int ret;
  175. bool owned;
  176. owned = sc_rm_is_memreg_owned(-1, mr);
  177. if (owned) {
  178. ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
  179. if (ret) {
  180. printf("Memreg get info failed, %d\n", ret);
  181. return -EINVAL;
  182. }
  183. debug("0x%llx -- 0x%llx\n", start, end);
  184. *addr_start = start;
  185. *addr_end = end;
  186. return 0;
  187. }
  188. return -EINVAL;
  189. }
  190. phys_size_t get_effective_memsize(void)
  191. {
  192. sc_rm_mr_t mr;
  193. sc_faddr_t start, end, end1;
  194. int err;
  195. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  196. for (mr = 0; mr < 64; mr++) {
  197. err = get_owned_memreg(mr, &start, &end);
  198. if (!err) {
  199. start = roundup(start, MEMSTART_ALIGNMENT);
  200. /* Too small memory region, not use it */
  201. if (start > end)
  202. continue;
  203. /* Find the memory region runs the U-Boot */
  204. if (start >= PHYS_SDRAM_1 && start <= end1 &&
  205. (start <= CONFIG_SYS_TEXT_BASE &&
  206. end >= CONFIG_SYS_TEXT_BASE)) {
  207. if ((end + 1) <= ((sc_faddr_t)PHYS_SDRAM_1 +
  208. PHYS_SDRAM_1_SIZE))
  209. return (end - PHYS_SDRAM_1 + 1);
  210. else
  211. return PHYS_SDRAM_1_SIZE;
  212. }
  213. }
  214. }
  215. return PHYS_SDRAM_1_SIZE;
  216. }
  217. int dram_init(void)
  218. {
  219. sc_rm_mr_t mr;
  220. sc_faddr_t start, end, end1, end2;
  221. int err;
  222. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  223. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  224. for (mr = 0; mr < 64; mr++) {
  225. err = get_owned_memreg(mr, &start, &end);
  226. if (!err) {
  227. start = roundup(start, MEMSTART_ALIGNMENT);
  228. /* Too small memory region, not use it */
  229. if (start > end)
  230. continue;
  231. if (start >= PHYS_SDRAM_1 && start <= end1) {
  232. if ((end + 1) <= end1)
  233. gd->ram_size += end - start + 1;
  234. else
  235. gd->ram_size += end1 - start;
  236. } else if (start >= PHYS_SDRAM_2 && start <= end2) {
  237. if ((end + 1) <= end2)
  238. gd->ram_size += end - start + 1;
  239. else
  240. gd->ram_size += end2 - start;
  241. }
  242. }
  243. }
  244. /* If error, set to the default value */
  245. if (!gd->ram_size) {
  246. gd->ram_size = PHYS_SDRAM_1_SIZE;
  247. gd->ram_size += PHYS_SDRAM_2_SIZE;
  248. }
  249. return 0;
  250. }
  251. static void dram_bank_sort(int current_bank)
  252. {
  253. phys_addr_t start;
  254. phys_size_t size;
  255. while (current_bank > 0) {
  256. if (gd->bd->bi_dram[current_bank - 1].start >
  257. gd->bd->bi_dram[current_bank].start) {
  258. start = gd->bd->bi_dram[current_bank - 1].start;
  259. size = gd->bd->bi_dram[current_bank - 1].size;
  260. gd->bd->bi_dram[current_bank - 1].start =
  261. gd->bd->bi_dram[current_bank].start;
  262. gd->bd->bi_dram[current_bank - 1].size =
  263. gd->bd->bi_dram[current_bank].size;
  264. gd->bd->bi_dram[current_bank].start = start;
  265. gd->bd->bi_dram[current_bank].size = size;
  266. }
  267. current_bank--;
  268. }
  269. }
  270. int dram_init_banksize(void)
  271. {
  272. sc_rm_mr_t mr;
  273. sc_faddr_t start, end, end1, end2;
  274. int i = 0;
  275. int err;
  276. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  277. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  278. for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
  279. err = get_owned_memreg(mr, &start, &end);
  280. if (!err) {
  281. start = roundup(start, MEMSTART_ALIGNMENT);
  282. if (start > end) /* Small memory region, no use it */
  283. continue;
  284. if (start >= PHYS_SDRAM_1 && start <= end1) {
  285. gd->bd->bi_dram[i].start = start;
  286. if ((end + 1) <= end1)
  287. gd->bd->bi_dram[i].size =
  288. end - start + 1;
  289. else
  290. gd->bd->bi_dram[i].size = end1 - start;
  291. dram_bank_sort(i);
  292. i++;
  293. } else if (start >= PHYS_SDRAM_2 && start <= end2) {
  294. gd->bd->bi_dram[i].start = start;
  295. if ((end + 1) <= end2)
  296. gd->bd->bi_dram[i].size =
  297. end - start + 1;
  298. else
  299. gd->bd->bi_dram[i].size = end2 - start;
  300. dram_bank_sort(i);
  301. i++;
  302. }
  303. }
  304. }
  305. /* If error, set to the default value */
  306. if (!i) {
  307. gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
  308. gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE;
  309. gd->bd->bi_dram[1].start = PHYS_SDRAM_2;
  310. gd->bd->bi_dram[1].size = PHYS_SDRAM_2_SIZE;
  311. }
  312. return 0;
  313. }
  314. static u64 get_block_attrs(sc_faddr_t addr_start)
  315. {
  316. u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
  317. PTE_BLOCK_PXN | PTE_BLOCK_UXN;
  318. if ((addr_start >= PHYS_SDRAM_1 &&
  319. addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) ||
  320. (addr_start >= PHYS_SDRAM_2 &&
  321. addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE)))
  322. return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
  323. return attr;
  324. }
  325. static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
  326. {
  327. sc_faddr_t end1, end2;
  328. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  329. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  330. if (addr_start >= PHYS_SDRAM_1 && addr_start <= end1) {
  331. if ((addr_end + 1) > end1)
  332. return end1 - addr_start;
  333. } else if (addr_start >= PHYS_SDRAM_2 && addr_start <= end2) {
  334. if ((addr_end + 1) > end2)
  335. return end2 - addr_start;
  336. }
  337. return (addr_end - addr_start + 1);
  338. }
  339. #define MAX_PTE_ENTRIES 512
  340. #define MAX_MEM_MAP_REGIONS 16
  341. static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
  342. struct mm_region *mem_map = imx8_mem_map;
  343. void enable_caches(void)
  344. {
  345. sc_rm_mr_t mr;
  346. sc_faddr_t start, end;
  347. int err, i;
  348. /* Create map for registers access from 0x1c000000 to 0x80000000*/
  349. imx8_mem_map[0].virt = 0x1c000000UL;
  350. imx8_mem_map[0].phys = 0x1c000000UL;
  351. imx8_mem_map[0].size = 0x64000000UL;
  352. imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  353. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
  354. i = 1;
  355. for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
  356. err = get_owned_memreg(mr, &start, &end);
  357. if (!err) {
  358. imx8_mem_map[i].virt = start;
  359. imx8_mem_map[i].phys = start;
  360. imx8_mem_map[i].size = get_block_size(start, end);
  361. imx8_mem_map[i].attrs = get_block_attrs(start);
  362. i++;
  363. }
  364. }
  365. if (i < MAX_MEM_MAP_REGIONS) {
  366. imx8_mem_map[i].size = 0;
  367. imx8_mem_map[i].attrs = 0;
  368. } else {
  369. puts("Error, need more MEM MAP REGIONS reserved\n");
  370. icache_enable();
  371. return;
  372. }
  373. for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
  374. debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
  375. i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
  376. imx8_mem_map[i].size, imx8_mem_map[i].attrs);
  377. }
  378. icache_enable();
  379. dcache_enable();
  380. }
  381. #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  382. u64 get_page_table_size(void)
  383. {
  384. u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
  385. u64 size = 0;
  386. /*
  387. * For each memory region, the max table size:
  388. * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
  389. */
  390. size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
  391. /*
  392. * We need to duplicate our page table once to have an emergency pt to
  393. * resort to when splitting page tables later on
  394. */
  395. size *= 2;
  396. /*
  397. * We may need to split page tables later on if dcache settings change,
  398. * so reserve up to 4 (random pick) page tables for that.
  399. */
  400. size += one_pt * 4;
  401. return size;
  402. }
  403. #endif
  404. #if defined(CONFIG_IMX8QM)
  405. #define FUSE_MAC0_WORD0 452
  406. #define FUSE_MAC0_WORD1 453
  407. #define FUSE_MAC1_WORD0 454
  408. #define FUSE_MAC1_WORD1 455
  409. #elif defined(CONFIG_IMX8QXP)
  410. #define FUSE_MAC0_WORD0 708
  411. #define FUSE_MAC0_WORD1 709
  412. #define FUSE_MAC1_WORD0 710
  413. #define FUSE_MAC1_WORD1 711
  414. #endif
  415. void imx_get_mac_from_fuse(int dev_id, unsigned char *mac)
  416. {
  417. u32 word[2], val[2] = {};
  418. int i, ret;
  419. if (dev_id == 0) {
  420. word[0] = FUSE_MAC0_WORD0;
  421. word[1] = FUSE_MAC0_WORD1;
  422. } else {
  423. word[0] = FUSE_MAC1_WORD0;
  424. word[1] = FUSE_MAC1_WORD1;
  425. }
  426. for (i = 0; i < 2; i++) {
  427. ret = sc_misc_otp_fuse_read(-1, word[i], &val[i]);
  428. if (ret < 0)
  429. goto err;
  430. }
  431. mac[0] = val[0];
  432. mac[1] = val[0] >> 8;
  433. mac[2] = val[0] >> 16;
  434. mac[3] = val[0] >> 24;
  435. mac[4] = val[1];
  436. mac[5] = val[1] >> 8;
  437. debug("%s: MAC%d: %02x.%02x.%02x.%02x.%02x.%02x\n",
  438. __func__, dev_id, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  439. return;
  440. err:
  441. printf("%s: fuse %d, err: %d\n", __func__, word[i], ret);
  442. }
  443. u32 get_cpu_rev(void)
  444. {
  445. u32 id = 0, rev = 0;
  446. int ret;
  447. ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
  448. if (ret)
  449. return 0;
  450. rev = (id >> 5) & 0xf;
  451. id = (id & 0x1f) + MXC_SOC_IMX8; /* Dummy ID for chip */
  452. return (id << 12) | rev;
  453. }