cpu.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2018 NXP
  4. */
  5. #include <common.h>
  6. #include <clk.h>
  7. #include <cpu.h>
  8. #include <cpu_func.h>
  9. #include <dm.h>
  10. #include <init.h>
  11. #include <log.h>
  12. #include <asm/cache.h>
  13. #include <dm/device-internal.h>
  14. #include <dm/lists.h>
  15. #include <dm/uclass.h>
  16. #include <errno.h>
  17. #include <spl.h>
  18. #include <thermal.h>
  19. #include <asm/arch/sci/sci.h>
  20. #include <asm/arch/sys_proto.h>
  21. #include <asm/arch-imx/cpu.h>
  22. #include <asm/armv8/cpu.h>
  23. #include <asm/armv8/mmu.h>
  24. #include <asm/setup.h>
  25. #include <asm/mach-imx/boot_mode.h>
  26. #include <spl.h>
  27. DECLARE_GLOBAL_DATA_PTR;
  28. #define BT_PASSOVER_TAG 0x504F
  29. struct pass_over_info_t *get_pass_over_info(void)
  30. {
  31. struct pass_over_info_t *p =
  32. (struct pass_over_info_t *)PASS_OVER_INFO_ADDR;
  33. if (p->barker != BT_PASSOVER_TAG ||
  34. p->len != sizeof(struct pass_over_info_t))
  35. return NULL;
  36. return p;
  37. }
  38. int arch_cpu_init(void)
  39. {
  40. #if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_RECOVER_DATA_SECTION)
  41. spl_save_restore_data();
  42. #endif
  43. #ifdef CONFIG_SPL_BUILD
  44. struct pass_over_info_t *pass_over;
  45. if (is_soc_rev(CHIP_REV_A)) {
  46. pass_over = get_pass_over_info();
  47. if (pass_over && pass_over->g_ap_mu == 0) {
  48. /*
  49. * When ap_mu is 0, means the U-Boot booted
  50. * from first container
  51. */
  52. sc_misc_boot_status(-1, SC_MISC_BOOT_STATUS_SUCCESS);
  53. }
  54. }
  55. #endif
  56. return 0;
  57. }
  58. int arch_cpu_init_dm(void)
  59. {
  60. struct udevice *devp;
  61. int node, ret;
  62. node = fdt_node_offset_by_compatible(gd->fdt_blob, -1, "fsl,imx8-mu");
  63. ret = uclass_get_device_by_of_offset(UCLASS_MISC, node, &devp);
  64. if (ret) {
  65. printf("could not get scu %d\n", ret);
  66. return ret;
  67. }
  68. if (is_imx8qm()) {
  69. ret = sc_pm_set_resource_power_mode(-1, SC_R_SMMU,
  70. SC_PM_PW_MODE_ON);
  71. if (ret)
  72. return ret;
  73. }
  74. return 0;
  75. }
  76. int print_bootinfo(void)
  77. {
  78. enum boot_device bt_dev = get_boot_device();
  79. puts("Boot: ");
  80. switch (bt_dev) {
  81. case SD1_BOOT:
  82. puts("SD0\n");
  83. break;
  84. case SD2_BOOT:
  85. puts("SD1\n");
  86. break;
  87. case SD3_BOOT:
  88. puts("SD2\n");
  89. break;
  90. case MMC1_BOOT:
  91. puts("MMC0\n");
  92. break;
  93. case MMC2_BOOT:
  94. puts("MMC1\n");
  95. break;
  96. case MMC3_BOOT:
  97. puts("MMC2\n");
  98. break;
  99. case FLEXSPI_BOOT:
  100. puts("FLEXSPI\n");
  101. break;
  102. case SATA_BOOT:
  103. puts("SATA\n");
  104. break;
  105. case NAND_BOOT:
  106. puts("NAND\n");
  107. break;
  108. case USB_BOOT:
  109. puts("USB\n");
  110. break;
  111. default:
  112. printf("Unknown device %u\n", bt_dev);
  113. break;
  114. }
  115. return 0;
  116. }
  117. enum boot_device get_boot_device(void)
  118. {
  119. enum boot_device boot_dev = SD1_BOOT;
  120. sc_rsrc_t dev_rsrc;
  121. sc_misc_get_boot_dev(-1, &dev_rsrc);
  122. switch (dev_rsrc) {
  123. case SC_R_SDHC_0:
  124. boot_dev = MMC1_BOOT;
  125. break;
  126. case SC_R_SDHC_1:
  127. boot_dev = SD2_BOOT;
  128. break;
  129. case SC_R_SDHC_2:
  130. boot_dev = SD3_BOOT;
  131. break;
  132. case SC_R_NAND:
  133. boot_dev = NAND_BOOT;
  134. break;
  135. case SC_R_FSPI_0:
  136. boot_dev = FLEXSPI_BOOT;
  137. break;
  138. case SC_R_SATA_0:
  139. boot_dev = SATA_BOOT;
  140. break;
  141. case SC_R_USB_0:
  142. case SC_R_USB_1:
  143. case SC_R_USB_2:
  144. boot_dev = USB_BOOT;
  145. break;
  146. default:
  147. break;
  148. }
  149. return boot_dev;
  150. }
  151. #ifdef CONFIG_SERIAL_TAG
  152. #define FUSE_UNIQUE_ID_WORD0 16
  153. #define FUSE_UNIQUE_ID_WORD1 17
  154. void get_board_serial(struct tag_serialnr *serialnr)
  155. {
  156. sc_err_t err;
  157. u32 val1 = 0, val2 = 0;
  158. u32 word1, word2;
  159. if (!serialnr)
  160. return;
  161. word1 = FUSE_UNIQUE_ID_WORD0;
  162. word2 = FUSE_UNIQUE_ID_WORD1;
  163. err = sc_misc_otp_fuse_read(-1, word1, &val1);
  164. if (err != SC_ERR_NONE) {
  165. printf("%s fuse %d read error: %d\n", __func__, word1, err);
  166. return;
  167. }
  168. err = sc_misc_otp_fuse_read(-1, word2, &val2);
  169. if (err != SC_ERR_NONE) {
  170. printf("%s fuse %d read error: %d\n", __func__, word2, err);
  171. return;
  172. }
  173. serialnr->low = val1;
  174. serialnr->high = val2;
  175. }
  176. #endif /*CONFIG_SERIAL_TAG*/
  177. #ifdef CONFIG_ENV_IS_IN_MMC
  178. __weak int board_mmc_get_env_dev(int devno)
  179. {
  180. return CONFIG_SYS_MMC_ENV_DEV;
  181. }
  182. int mmc_get_env_dev(void)
  183. {
  184. sc_rsrc_t dev_rsrc;
  185. int devno;
  186. sc_misc_get_boot_dev(-1, &dev_rsrc);
  187. switch (dev_rsrc) {
  188. case SC_R_SDHC_0:
  189. devno = 0;
  190. break;
  191. case SC_R_SDHC_1:
  192. devno = 1;
  193. break;
  194. case SC_R_SDHC_2:
  195. devno = 2;
  196. break;
  197. default:
  198. /* If not boot from sd/mmc, use default value */
  199. return CONFIG_SYS_MMC_ENV_DEV;
  200. }
  201. return board_mmc_get_env_dev(devno);
  202. }
  203. #endif
  204. #define MEMSTART_ALIGNMENT SZ_2M /* Align the memory start with 2MB */
  205. static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
  206. sc_faddr_t *addr_end)
  207. {
  208. sc_faddr_t start, end;
  209. int ret;
  210. bool owned;
  211. owned = sc_rm_is_memreg_owned(-1, mr);
  212. if (owned) {
  213. ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
  214. if (ret) {
  215. printf("Memreg get info failed, %d\n", ret);
  216. return -EINVAL;
  217. }
  218. debug("0x%llx -- 0x%llx\n", start, end);
  219. *addr_start = start;
  220. *addr_end = end;
  221. return 0;
  222. }
  223. return -EINVAL;
  224. }
  225. phys_size_t get_effective_memsize(void)
  226. {
  227. sc_rm_mr_t mr;
  228. sc_faddr_t start, end, end1, start_aligned;
  229. int err;
  230. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  231. for (mr = 0; mr < 64; mr++) {
  232. err = get_owned_memreg(mr, &start, &end);
  233. if (!err) {
  234. start_aligned = roundup(start, MEMSTART_ALIGNMENT);
  235. /* Too small memory region, not use it */
  236. if (start_aligned > end)
  237. continue;
  238. /* Find the memory region runs the U-Boot */
  239. if (start >= PHYS_SDRAM_1 && start <= end1 &&
  240. (start <= CONFIG_SYS_TEXT_BASE &&
  241. end >= CONFIG_SYS_TEXT_BASE)) {
  242. if ((end + 1) <= ((sc_faddr_t)PHYS_SDRAM_1 +
  243. PHYS_SDRAM_1_SIZE))
  244. return (end - PHYS_SDRAM_1 + 1);
  245. else
  246. return PHYS_SDRAM_1_SIZE;
  247. }
  248. }
  249. }
  250. return PHYS_SDRAM_1_SIZE;
  251. }
  252. int dram_init(void)
  253. {
  254. sc_rm_mr_t mr;
  255. sc_faddr_t start, end, end1, end2;
  256. int err;
  257. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  258. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  259. for (mr = 0; mr < 64; mr++) {
  260. err = get_owned_memreg(mr, &start, &end);
  261. if (!err) {
  262. start = roundup(start, MEMSTART_ALIGNMENT);
  263. /* Too small memory region, not use it */
  264. if (start > end)
  265. continue;
  266. if (start >= PHYS_SDRAM_1 && start <= end1) {
  267. if ((end + 1) <= end1)
  268. gd->ram_size += end - start + 1;
  269. else
  270. gd->ram_size += end1 - start;
  271. } else if (start >= PHYS_SDRAM_2 && start <= end2) {
  272. if ((end + 1) <= end2)
  273. gd->ram_size += end - start + 1;
  274. else
  275. gd->ram_size += end2 - start;
  276. }
  277. }
  278. }
  279. /* If error, set to the default value */
  280. if (!gd->ram_size) {
  281. gd->ram_size = PHYS_SDRAM_1_SIZE;
  282. gd->ram_size += PHYS_SDRAM_2_SIZE;
  283. }
  284. return 0;
  285. }
  286. static void dram_bank_sort(int current_bank)
  287. {
  288. phys_addr_t start;
  289. phys_size_t size;
  290. while (current_bank > 0) {
  291. if (gd->bd->bi_dram[current_bank - 1].start >
  292. gd->bd->bi_dram[current_bank].start) {
  293. start = gd->bd->bi_dram[current_bank - 1].start;
  294. size = gd->bd->bi_dram[current_bank - 1].size;
  295. gd->bd->bi_dram[current_bank - 1].start =
  296. gd->bd->bi_dram[current_bank].start;
  297. gd->bd->bi_dram[current_bank - 1].size =
  298. gd->bd->bi_dram[current_bank].size;
  299. gd->bd->bi_dram[current_bank].start = start;
  300. gd->bd->bi_dram[current_bank].size = size;
  301. }
  302. current_bank--;
  303. }
  304. }
  305. int dram_init_banksize(void)
  306. {
  307. sc_rm_mr_t mr;
  308. sc_faddr_t start, end, end1, end2;
  309. int i = 0;
  310. int err;
  311. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  312. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  313. for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
  314. err = get_owned_memreg(mr, &start, &end);
  315. if (!err) {
  316. start = roundup(start, MEMSTART_ALIGNMENT);
  317. if (start > end) /* Small memory region, no use it */
  318. continue;
  319. if (start >= PHYS_SDRAM_1 && start <= end1) {
  320. gd->bd->bi_dram[i].start = start;
  321. if ((end + 1) <= end1)
  322. gd->bd->bi_dram[i].size =
  323. end - start + 1;
  324. else
  325. gd->bd->bi_dram[i].size = end1 - start;
  326. dram_bank_sort(i);
  327. i++;
  328. } else if (start >= PHYS_SDRAM_2 && start <= end2) {
  329. gd->bd->bi_dram[i].start = start;
  330. if ((end + 1) <= end2)
  331. gd->bd->bi_dram[i].size =
  332. end - start + 1;
  333. else
  334. gd->bd->bi_dram[i].size = end2 - start;
  335. dram_bank_sort(i);
  336. i++;
  337. }
  338. }
  339. }
  340. /* If error, set to the default value */
  341. if (!i) {
  342. gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
  343. gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE;
  344. gd->bd->bi_dram[1].start = PHYS_SDRAM_2;
  345. gd->bd->bi_dram[1].size = PHYS_SDRAM_2_SIZE;
  346. }
  347. return 0;
  348. }
  349. static u64 get_block_attrs(sc_faddr_t addr_start)
  350. {
  351. u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
  352. PTE_BLOCK_PXN | PTE_BLOCK_UXN;
  353. if ((addr_start >= PHYS_SDRAM_1 &&
  354. addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) ||
  355. (addr_start >= PHYS_SDRAM_2 &&
  356. addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE)))
  357. return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
  358. return attr;
  359. }
  360. static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
  361. {
  362. sc_faddr_t end1, end2;
  363. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  364. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  365. if (addr_start >= PHYS_SDRAM_1 && addr_start <= end1) {
  366. if ((addr_end + 1) > end1)
  367. return end1 - addr_start;
  368. } else if (addr_start >= PHYS_SDRAM_2 && addr_start <= end2) {
  369. if ((addr_end + 1) > end2)
  370. return end2 - addr_start;
  371. }
  372. return (addr_end - addr_start + 1);
  373. }
  374. #define MAX_PTE_ENTRIES 512
  375. #define MAX_MEM_MAP_REGIONS 16
  376. static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
  377. struct mm_region *mem_map = imx8_mem_map;
  378. void enable_caches(void)
  379. {
  380. sc_rm_mr_t mr;
  381. sc_faddr_t start, end;
  382. int err, i;
  383. /* Create map for registers access from 0x1c000000 to 0x80000000*/
  384. imx8_mem_map[0].virt = 0x1c000000UL;
  385. imx8_mem_map[0].phys = 0x1c000000UL;
  386. imx8_mem_map[0].size = 0x64000000UL;
  387. imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  388. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
  389. i = 1;
  390. for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
  391. err = get_owned_memreg(mr, &start, &end);
  392. if (!err) {
  393. imx8_mem_map[i].virt = start;
  394. imx8_mem_map[i].phys = start;
  395. imx8_mem_map[i].size = get_block_size(start, end);
  396. imx8_mem_map[i].attrs = get_block_attrs(start);
  397. i++;
  398. }
  399. }
  400. if (i < MAX_MEM_MAP_REGIONS) {
  401. imx8_mem_map[i].size = 0;
  402. imx8_mem_map[i].attrs = 0;
  403. } else {
  404. puts("Error, need more MEM MAP REGIONS reserved\n");
  405. icache_enable();
  406. return;
  407. }
  408. for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
  409. debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
  410. i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
  411. imx8_mem_map[i].size, imx8_mem_map[i].attrs);
  412. }
  413. icache_enable();
  414. dcache_enable();
  415. }
  416. #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  417. u64 get_page_table_size(void)
  418. {
  419. u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
  420. u64 size = 0;
  421. /*
  422. * For each memory region, the max table size:
  423. * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
  424. */
  425. size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
  426. /*
  427. * We need to duplicate our page table once to have an emergency pt to
  428. * resort to when splitting page tables later on
  429. */
  430. size *= 2;
  431. /*
  432. * We may need to split page tables later on if dcache settings change,
  433. * so reserve up to 4 (random pick) page tables for that.
  434. */
  435. size += one_pt * 4;
  436. return size;
  437. }
  438. #endif
  439. #if defined(CONFIG_IMX8QM)
  440. #define FUSE_MAC0_WORD0 452
  441. #define FUSE_MAC0_WORD1 453
  442. #define FUSE_MAC1_WORD0 454
  443. #define FUSE_MAC1_WORD1 455
  444. #elif defined(CONFIG_IMX8QXP)
  445. #define FUSE_MAC0_WORD0 708
  446. #define FUSE_MAC0_WORD1 709
  447. #define FUSE_MAC1_WORD0 710
  448. #define FUSE_MAC1_WORD1 711
  449. #endif
  450. void imx_get_mac_from_fuse(int dev_id, unsigned char *mac)
  451. {
  452. u32 word[2], val[2] = {};
  453. int i, ret;
  454. if (dev_id == 0) {
  455. word[0] = FUSE_MAC0_WORD0;
  456. word[1] = FUSE_MAC0_WORD1;
  457. } else {
  458. word[0] = FUSE_MAC1_WORD0;
  459. word[1] = FUSE_MAC1_WORD1;
  460. }
  461. for (i = 0; i < 2; i++) {
  462. ret = sc_misc_otp_fuse_read(-1, word[i], &val[i]);
  463. if (ret < 0)
  464. goto err;
  465. }
  466. mac[0] = val[0];
  467. mac[1] = val[0] >> 8;
  468. mac[2] = val[0] >> 16;
  469. mac[3] = val[0] >> 24;
  470. mac[4] = val[1];
  471. mac[5] = val[1] >> 8;
  472. debug("%s: MAC%d: %02x.%02x.%02x.%02x.%02x.%02x\n",
  473. __func__, dev_id, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  474. return;
  475. err:
  476. printf("%s: fuse %d, err: %d\n", __func__, word[i], ret);
  477. }
  478. u32 get_cpu_rev(void)
  479. {
  480. u32 id = 0, rev = 0;
  481. int ret;
  482. ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
  483. if (ret)
  484. return 0;
  485. rev = (id >> 5) & 0xf;
  486. id = (id & 0x1f) + MXC_SOC_IMX8; /* Dummy ID for chip */
  487. return (id << 12) | rev;
  488. }
  489. void board_boot_order(u32 *spl_boot_list)
  490. {
  491. spl_boot_list[0] = spl_boot_device();
  492. if (spl_boot_list[0] == BOOT_DEVICE_SPI) {
  493. /* Check whether we own the flexspi0, if not, use NOR boot */
  494. if (!sc_rm_is_resource_owned(-1, SC_R_FSPI_0))
  495. spl_boot_list[0] = BOOT_DEVICE_NOR;
  496. }
  497. }
  498. bool m4_parts_booted(void)
  499. {
  500. sc_rm_pt_t m4_parts[2];
  501. int err;
  502. err = sc_rm_get_resource_owner(-1, SC_R_M4_0_PID0, &m4_parts[0]);
  503. if (err) {
  504. printf("%s get resource [%d] owner error: %d\n", __func__,
  505. SC_R_M4_0_PID0, err);
  506. return false;
  507. }
  508. if (sc_pm_is_partition_started(-1, m4_parts[0]))
  509. return true;
  510. if (is_imx8qm()) {
  511. err = sc_rm_get_resource_owner(-1, SC_R_M4_1_PID0, &m4_parts[1]);
  512. if (err) {
  513. printf("%s get resource [%d] owner error: %d\n",
  514. __func__, SC_R_M4_1_PID0, err);
  515. return false;
  516. }
  517. if (sc_pm_is_partition_started(-1, m4_parts[1]))
  518. return true;
  519. }
  520. return false;
  521. }