cpu.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2014 - 2015 Xilinx, Inc.
  4. * Michal Simek <michal.simek@xilinx.com>
  5. */
  6. #include <common.h>
  7. #include <time.h>
  8. #include <asm/arch/hardware.h>
  9. #include <asm/arch/sys_proto.h>
  10. #include <asm/armv8/mmu.h>
  11. #include <asm/io.h>
  12. #include <zynqmp_firmware.h>
  13. #include <asm/cache.h>
  14. #define ZYNQ_SILICON_VER_MASK 0xF000
  15. #define ZYNQ_SILICON_VER_SHIFT 12
  16. DECLARE_GLOBAL_DATA_PTR;
  17. /*
  18. * Number of filled static entries and also the first empty
  19. * slot in zynqmp_mem_map.
  20. */
  21. #define ZYNQMP_MEM_MAP_USED 4
  22. #if !defined(CONFIG_ZYNQMP_NO_DDR)
  23. #define DRAM_BANKS CONFIG_NR_DRAM_BANKS
  24. #else
  25. #define DRAM_BANKS 0
  26. #endif
  27. #if defined(CONFIG_DEFINE_TCM_OCM_MMAP)
  28. #define TCM_MAP 1
  29. #else
  30. #define TCM_MAP 0
  31. #endif
  32. /* +1 is end of list which needs to be empty */
  33. #define ZYNQMP_MEM_MAP_MAX (ZYNQMP_MEM_MAP_USED + DRAM_BANKS + TCM_MAP + 1)
  34. static struct mm_region zynqmp_mem_map[ZYNQMP_MEM_MAP_MAX] = {
  35. {
  36. .virt = 0x80000000UL,
  37. .phys = 0x80000000UL,
  38. .size = 0x70000000UL,
  39. .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  40. PTE_BLOCK_NON_SHARE |
  41. PTE_BLOCK_PXN | PTE_BLOCK_UXN
  42. }, {
  43. .virt = 0xf8000000UL,
  44. .phys = 0xf8000000UL,
  45. .size = 0x07e00000UL,
  46. .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  47. PTE_BLOCK_NON_SHARE |
  48. PTE_BLOCK_PXN | PTE_BLOCK_UXN
  49. }, {
  50. .virt = 0x400000000UL,
  51. .phys = 0x400000000UL,
  52. .size = 0x400000000UL,
  53. .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  54. PTE_BLOCK_NON_SHARE |
  55. PTE_BLOCK_PXN | PTE_BLOCK_UXN
  56. }, {
  57. .virt = 0x1000000000UL,
  58. .phys = 0x1000000000UL,
  59. .size = 0xf000000000UL,
  60. .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  61. PTE_BLOCK_NON_SHARE |
  62. PTE_BLOCK_PXN | PTE_BLOCK_UXN
  63. }
  64. };
  65. void mem_map_fill(void)
  66. {
  67. int banks = ZYNQMP_MEM_MAP_USED;
  68. #if defined(CONFIG_DEFINE_TCM_OCM_MMAP)
  69. zynqmp_mem_map[banks].virt = 0xffe00000UL;
  70. zynqmp_mem_map[banks].phys = 0xffe00000UL;
  71. zynqmp_mem_map[banks].size = 0x00200000UL;
  72. zynqmp_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  73. PTE_BLOCK_INNER_SHARE;
  74. banks = banks + 1;
  75. #endif
  76. #if !defined(CONFIG_ZYNQMP_NO_DDR)
  77. for (int i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  78. /* Zero size means no more DDR that's this is end */
  79. if (!gd->bd->bi_dram[i].size)
  80. break;
  81. zynqmp_mem_map[banks].virt = gd->bd->bi_dram[i].start;
  82. zynqmp_mem_map[banks].phys = gd->bd->bi_dram[i].start;
  83. zynqmp_mem_map[banks].size = gd->bd->bi_dram[i].size;
  84. zynqmp_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  85. PTE_BLOCK_INNER_SHARE;
  86. banks = banks + 1;
  87. }
  88. #endif
  89. }
  90. struct mm_region *mem_map = zynqmp_mem_map;
  91. u64 get_page_table_size(void)
  92. {
  93. return 0x14000;
  94. }
  95. #if defined(CONFIG_SYS_MEM_RSVD_FOR_MMU) || defined(CONFIG_DEFINE_TCM_OCM_MMAP)
  96. void tcm_init(u8 mode)
  97. {
  98. puts("WARNING: Initializing TCM overwrites TCM content\n");
  99. initialize_tcm(mode);
  100. memset((void *)ZYNQMP_TCM_BASE_ADDR, 0, ZYNQMP_TCM_SIZE);
  101. }
  102. #endif
  103. #ifdef CONFIG_SYS_MEM_RSVD_FOR_MMU
  104. int arm_reserve_mmu(void)
  105. {
  106. tcm_init(TCM_LOCK);
  107. gd->arch.tlb_size = PGTABLE_SIZE;
  108. gd->arch.tlb_addr = ZYNQMP_TCM_BASE_ADDR;
  109. return 0;
  110. }
  111. #endif
  112. static unsigned int zynqmp_get_silicon_version_secure(void)
  113. {
  114. u32 ver;
  115. ver = readl(&csu_base->version);
  116. ver &= ZYNQMP_SILICON_VER_MASK;
  117. ver >>= ZYNQMP_SILICON_VER_SHIFT;
  118. return ver;
  119. }
  120. unsigned int zynqmp_get_silicon_version(void)
  121. {
  122. if (current_el() == 3)
  123. return zynqmp_get_silicon_version_secure();
  124. gd->cpu_clk = get_tbclk();
  125. switch (gd->cpu_clk) {
  126. case 50000000:
  127. return ZYNQMP_CSU_VERSION_QEMU;
  128. }
  129. return ZYNQMP_CSU_VERSION_SILICON;
  130. }
  131. static int zynqmp_mmio_rawwrite(const u32 address,
  132. const u32 mask,
  133. const u32 value)
  134. {
  135. u32 data;
  136. u32 value_local = value;
  137. int ret;
  138. ret = zynqmp_mmio_read(address, &data);
  139. if (ret)
  140. return ret;
  141. data &= ~mask;
  142. value_local &= mask;
  143. value_local |= data;
  144. writel(value_local, (ulong)address);
  145. return 0;
  146. }
  147. static int zynqmp_mmio_rawread(const u32 address, u32 *value)
  148. {
  149. *value = readl((ulong)address);
  150. return 0;
  151. }
  152. int zynqmp_mmio_write(const u32 address,
  153. const u32 mask,
  154. const u32 value)
  155. {
  156. if (IS_ENABLED(CONFIG_SPL_BUILD) || current_el() == 3)
  157. return zynqmp_mmio_rawwrite(address, mask, value);
  158. #if defined(CONFIG_ZYNQMP_FIRMWARE)
  159. else
  160. return xilinx_pm_request(PM_MMIO_WRITE, address, mask,
  161. value, 0, NULL);
  162. #endif
  163. return -EINVAL;
  164. }
  165. int zynqmp_mmio_read(const u32 address, u32 *value)
  166. {
  167. u32 ret = -EINVAL;
  168. if (!value)
  169. return ret;
  170. if (IS_ENABLED(CONFIG_SPL_BUILD) || current_el() == 3) {
  171. ret = zynqmp_mmio_rawread(address, value);
  172. }
  173. #if defined(CONFIG_ZYNQMP_FIRMWARE)
  174. else {
  175. u32 ret_payload[PAYLOAD_ARG_CNT];
  176. ret = xilinx_pm_request(PM_MMIO_READ, address, 0, 0,
  177. 0, ret_payload);
  178. *value = ret_payload[1];
  179. }
  180. #endif
  181. return ret;
  182. }