sbi_domain.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/sbi_console.h>
  11. #include <sbi/sbi_domain.h>
  12. #include <sbi/sbi_hartmask.h>
  13. #include <sbi/sbi_hsm.h>
  14. #include <sbi/sbi_math.h>
  15. #include <sbi/sbi_platform.h>
  16. #include <sbi/sbi_scratch.h>
  17. #include <sbi/sbi_string.h>
  18. struct sbi_domain *hartid_to_domain_table[SBI_HARTMASK_MAX_BITS] = { 0 };
  19. struct sbi_domain *domidx_to_domain_table[SBI_DOMAIN_MAX_INDEX] = { 0 };
  20. static u32 domain_count = 0;
  21. static struct sbi_hartmask root_hmask = { 0 };
  22. #define ROOT_FW_REGION 0
  23. #define ROOT_ALL_REGION 1
  24. #define ROOT_END_REGION 2
  25. static struct sbi_domain_memregion root_memregs[ROOT_END_REGION + 1] = { 0 };
  26. static struct sbi_domain root = {
  27. .name = "root",
  28. .possible_harts = &root_hmask,
  29. .regions = root_memregs,
  30. .system_reset_allowed = TRUE,
  31. };
  32. bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartid)
  33. {
  34. if (dom)
  35. return sbi_hartmask_test_hart(hartid, &dom->assigned_harts);
  36. return FALSE;
  37. }
  38. ulong sbi_domain_get_assigned_hartmask(const struct sbi_domain *dom,
  39. ulong hbase)
  40. {
  41. ulong ret, bword, boff;
  42. if (!dom)
  43. return 0;
  44. bword = BIT_WORD(hbase);
  45. boff = BIT_WORD_OFFSET(hbase);
  46. ret = sbi_hartmask_bits(&dom->assigned_harts)[bword++] >> boff;
  47. if (boff && bword < BIT_WORD(SBI_HARTMASK_MAX_BITS)) {
  48. ret |= (sbi_hartmask_bits(&dom->assigned_harts)[bword] &
  49. (BIT(boff) - 1UL)) << (BITS_PER_LONG - boff);
  50. }
  51. return ret;
  52. }
  53. void sbi_domain_memregion_initfw(struct sbi_domain_memregion *reg)
  54. {
  55. if (!reg)
  56. return;
  57. sbi_memcpy(reg, &root_memregs[ROOT_FW_REGION], sizeof(*reg));
  58. }
  59. bool sbi_domain_check_addr(const struct sbi_domain *dom,
  60. unsigned long addr, unsigned long mode,
  61. unsigned long access_flags)
  62. {
  63. bool mmio = FALSE;
  64. struct sbi_domain_memregion *reg;
  65. unsigned long rstart, rend, rflags, rwx = 0;
  66. if (!dom)
  67. return FALSE;
  68. if (access_flags & SBI_DOMAIN_READ)
  69. rwx |= SBI_DOMAIN_MEMREGION_READABLE;
  70. if (access_flags & SBI_DOMAIN_WRITE)
  71. rwx |= SBI_DOMAIN_MEMREGION_WRITEABLE;
  72. if (access_flags & SBI_DOMAIN_EXECUTE)
  73. rwx |= SBI_DOMAIN_MEMREGION_EXECUTABLE;
  74. if (access_flags & SBI_DOMAIN_MMIO)
  75. mmio = TRUE;
  76. sbi_domain_for_each_memregion(dom, reg) {
  77. rflags = reg->flags;
  78. if (mode == PRV_M && !(rflags & SBI_DOMAIN_MEMREGION_MMODE))
  79. continue;
  80. rstart = reg->base;
  81. rend = (reg->order < __riscv_xlen) ?
  82. rstart + ((1UL << reg->order) - 1) : -1UL;
  83. if (rstart <= addr && addr <= rend) {
  84. if ((mmio && !(rflags & SBI_DOMAIN_MEMREGION_MMIO)) ||
  85. (!mmio && (rflags & SBI_DOMAIN_MEMREGION_MMIO)))
  86. return FALSE;
  87. return ((rflags & rwx) == rwx) ? TRUE : FALSE;
  88. }
  89. }
  90. return (mode == PRV_M) ? TRUE : FALSE;
  91. }
  92. /* Check if region complies with constraints */
  93. static bool is_region_valid(const struct sbi_domain_memregion *reg)
  94. {
  95. if (reg->order < 3 || __riscv_xlen < reg->order)
  96. return FALSE;
  97. if (reg->base & (BIT(reg->order) - 1))
  98. return FALSE;
  99. return TRUE;
  100. }
  101. /** Check if regionA is sub-region of regionB */
  102. static bool is_region_subset(const struct sbi_domain_memregion *regA,
  103. const struct sbi_domain_memregion *regB)
  104. {
  105. ulong regA_start = regA->base;
  106. ulong regA_end = regA->base + (BIT(regA->order) - 1);
  107. ulong regB_start = regB->base;
  108. ulong regB_end = regB->base + (BIT(regA->order) - 1);
  109. if ((regB_start <= regA_start) &&
  110. (regA_start < regB_end) &&
  111. (regB_start < regA_end) &&
  112. (regA_end <= regB_end))
  113. return TRUE;
  114. return FALSE;
  115. }
  116. /** Check if regionA conflicts regionB */
  117. static bool is_region_conflict(const struct sbi_domain_memregion *regA,
  118. const struct sbi_domain_memregion *regB)
  119. {
  120. if ((is_region_subset(regA, regB) || is_region_subset(regB, regA)) &&
  121. regA->flags == regB->flags)
  122. return TRUE;
  123. return FALSE;
  124. }
  125. /** Check if regionA should be placed before regionB */
  126. static bool is_region_before(const struct sbi_domain_memregion *regA,
  127. const struct sbi_domain_memregion *regB)
  128. {
  129. if (regA->order < regB->order)
  130. return TRUE;
  131. if ((regA->order == regB->order) &&
  132. (regA->base < regB->base))
  133. return TRUE;
  134. return FALSE;
  135. }
  136. static int sanitize_domain(const struct sbi_platform *plat,
  137. struct sbi_domain *dom)
  138. {
  139. u32 i, j, count;
  140. bool have_fw_reg;
  141. struct sbi_domain_memregion treg, *reg, *reg1;
  142. /* Check possible HARTs */
  143. if (!dom->possible_harts)
  144. return SBI_EINVAL;
  145. sbi_hartmask_for_each_hart(i, dom->possible_harts) {
  146. if (sbi_platform_hart_invalid(plat, i))
  147. return SBI_EINVAL;
  148. };
  149. /* Check memory regions */
  150. if (!dom->regions)
  151. return SBI_EINVAL;
  152. sbi_domain_for_each_memregion(dom, reg) {
  153. if (!is_region_valid(reg))
  154. return SBI_EINVAL;
  155. }
  156. /* Count memory regions and check presence of firmware region */
  157. count = 0;
  158. have_fw_reg = FALSE;
  159. sbi_domain_for_each_memregion(dom, reg) {
  160. if (reg->order == root_memregs[ROOT_FW_REGION].order &&
  161. reg->base == root_memregs[ROOT_FW_REGION].base &&
  162. reg->flags == root_memregs[ROOT_FW_REGION].flags)
  163. have_fw_reg = TRUE;
  164. count++;
  165. }
  166. if (!have_fw_reg)
  167. return SBI_EINVAL;
  168. /* Sort the memory regions */
  169. for (i = 0; i < (count - 1); i++) {
  170. reg = &dom->regions[i];
  171. for (j = i + 1; j < count; j++) {
  172. reg1 = &dom->regions[j];
  173. if (is_region_conflict(reg1, reg))
  174. return SBI_EINVAL;
  175. if (!is_region_before(reg1, reg))
  176. continue;
  177. sbi_memcpy(&treg, reg1, sizeof(treg));
  178. sbi_memcpy(reg1, reg, sizeof(treg));
  179. sbi_memcpy(reg, &treg, sizeof(treg));
  180. }
  181. }
  182. /*
  183. * We don't need to check boot HART id of domain because if boot
  184. * HART id is not possible/assigned to this domain then it won't
  185. * be started at boot-time by sbi_domain_finalize().
  186. */
  187. /*
  188. * Check next mode
  189. *
  190. * We only allow next mode to be S-mode or U-mode.so that we can
  191. * protect M-mode context and enforce checks on memory accesses.
  192. */
  193. if (dom->next_mode != PRV_S &&
  194. dom->next_mode != PRV_U)
  195. return SBI_EINVAL;
  196. /* Check next address and next mode*/
  197. if (!sbi_domain_check_addr(dom, dom->next_addr, dom->next_mode,
  198. SBI_DOMAIN_EXECUTE))
  199. return SBI_EINVAL;
  200. return 0;
  201. }
  202. void sbi_domain_dump(const struct sbi_domain *dom, const char *suffix)
  203. {
  204. u32 i, k;
  205. unsigned long rstart, rend;
  206. struct sbi_domain_memregion *reg;
  207. sbi_printf("Domain%d Name %s: %s\n",
  208. dom->index, suffix, dom->name);
  209. sbi_printf("Domain%d Boot HART %s: %d\n",
  210. dom->index, suffix, dom->boot_hartid);
  211. k = 0;
  212. sbi_printf("Domain%d HARTs %s: ", dom->index, suffix);
  213. sbi_hartmask_for_each_hart(i, dom->possible_harts)
  214. sbi_printf("%s%d%s", (k++) ? "," : "",
  215. i, sbi_domain_is_assigned_hart(dom, i) ? "*" : "");
  216. sbi_printf("\n");
  217. i = 0;
  218. sbi_domain_for_each_memregion(dom, reg) {
  219. rstart = reg->base;
  220. rend = (reg->order < __riscv_xlen) ?
  221. rstart + ((1UL << reg->order) - 1) : -1UL;
  222. #if __riscv_xlen == 32
  223. sbi_printf("Domain%d Region%02d %s: 0x%08lx-0x%08lx ",
  224. #else
  225. sbi_printf("Domain%d Region%02d %s: 0x%016lx-0x%016lx ",
  226. #endif
  227. dom->index, i, suffix, rstart, rend);
  228. k = 0;
  229. if (reg->flags & SBI_DOMAIN_MEMREGION_MMODE)
  230. sbi_printf("%cM", (k++) ? ',' : '(');
  231. if (reg->flags & SBI_DOMAIN_MEMREGION_MMIO)
  232. sbi_printf("%cI", (k++) ? ',' : '(');
  233. if (reg->flags & SBI_DOMAIN_MEMREGION_READABLE)
  234. sbi_printf("%cR", (k++) ? ',' : '(');
  235. if (reg->flags & SBI_DOMAIN_MEMREGION_WRITEABLE)
  236. sbi_printf("%cW", (k++) ? ',' : '(');
  237. if (reg->flags & SBI_DOMAIN_MEMREGION_EXECUTABLE)
  238. sbi_printf("%cX", (k++) ? ',' : '(');
  239. sbi_printf("%s\n", (k++) ? ")" : "()");
  240. i++;
  241. }
  242. #if __riscv_xlen == 32
  243. sbi_printf("Domain%d Next Address%s: 0x%08lx\n",
  244. #else
  245. sbi_printf("Domain%d Next Address%s: 0x%016lx\n",
  246. #endif
  247. dom->index, suffix, dom->next_addr);
  248. #if __riscv_xlen == 32
  249. sbi_printf("Domain%d Next Arg1 %s: 0x%08lx\n",
  250. #else
  251. sbi_printf("Domain%d Next Arg1 %s: 0x%016lx\n",
  252. #endif
  253. dom->index, suffix, dom->next_arg1);
  254. sbi_printf("Domain%d Next Mode %s: ", dom->index, suffix);
  255. switch (dom->next_mode) {
  256. case PRV_M:
  257. sbi_printf("M-mode\n");
  258. break;
  259. case PRV_S:
  260. sbi_printf("S-mode\n");
  261. break;
  262. case PRV_U:
  263. sbi_printf("U-mode\n");
  264. break;
  265. default:
  266. sbi_printf("Unknown\n");
  267. break;
  268. };
  269. sbi_printf("Domain%d SysReset %s: %s\n",
  270. dom->index, suffix, (dom->system_reset_allowed) ? "yes" : "no");
  271. }
  272. void sbi_domain_dump_all(const char *suffix)
  273. {
  274. u32 i;
  275. const struct sbi_domain *dom;
  276. sbi_domain_for_each(i, dom) {
  277. sbi_domain_dump(dom, suffix);
  278. sbi_printf("\n");
  279. }
  280. }
  281. int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid)
  282. {
  283. int rc;
  284. u32 i, j, dhart;
  285. bool dom_exists;
  286. struct sbi_domain *dom, *tdom;
  287. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  288. /* Discover domains */
  289. for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) {
  290. /* Ignore invalid HART */
  291. if (sbi_platform_hart_invalid(plat, i))
  292. continue;
  293. /* Get domain assigned to HART */
  294. dom = sbi_platform_domain_get(plat, i);
  295. if (!dom)
  296. continue;
  297. /* Check if domain already discovered */
  298. dom_exists = FALSE;
  299. sbi_domain_for_each(j, tdom) {
  300. if (tdom == dom) {
  301. dom_exists = TRUE;
  302. break;
  303. }
  304. }
  305. /* Newly discovered domain */
  306. if (!dom_exists) {
  307. /*
  308. * Ensure that we have room for Domain Index to
  309. * HART ID mapping
  310. */
  311. if (domain_count <= SBI_DOMAIN_MAX_INDEX)
  312. return SBI_ENOSPC;
  313. /* Sanitize discovered domain */
  314. rc = sanitize_domain(plat, dom);
  315. if (rc)
  316. return rc;
  317. /* Assign index to domain */
  318. dom->index = domain_count++;
  319. domidx_to_domain_table[dom->index] = dom;
  320. /* Clear assigned HARTs of domain */
  321. sbi_hartmask_clear_all(&dom->assigned_harts);
  322. }
  323. /* Assign domain to HART if HART is a possible HART */
  324. if (sbi_hartmask_test_hart(i, dom->possible_harts)) {
  325. tdom = hartid_to_domain_table[i];
  326. if (tdom)
  327. sbi_hartmask_clear_hart(i,
  328. &tdom->assigned_harts);
  329. hartid_to_domain_table[i] = dom;
  330. sbi_hartmask_set_hart(i, &dom->assigned_harts);
  331. }
  332. }
  333. /* Startup boot HART of domains */
  334. sbi_domain_for_each(i, dom) {
  335. /* Domain boot HART */
  336. dhart = dom->boot_hartid;
  337. /* Ignore if boot HART not possible for this domain */
  338. if (!sbi_hartmask_test_hart(i, dom->possible_harts))
  339. continue;
  340. /* Ignore if boot HART assigned different domain */
  341. if (sbi_hartid_to_domain(dhart) != dom ||
  342. !sbi_hartmask_test_hart(i, &dom->assigned_harts))
  343. continue;
  344. /* Startup boot HART of domain */
  345. if (dhart == cold_hartid) {
  346. scratch->next_addr = dom->next_addr;
  347. scratch->next_mode = dom->next_mode;
  348. scratch->next_arg1 = dom->next_arg1;
  349. } else {
  350. rc = sbi_hsm_hart_start(scratch, NULL, dhart,
  351. dom->next_addr,
  352. dom->next_mode,
  353. dom->next_arg1);
  354. if (rc)
  355. return rc;
  356. }
  357. }
  358. return 0;
  359. }
  360. int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
  361. {
  362. u32 i;
  363. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  364. /* Root domain firmware memory region */
  365. root_memregs[ROOT_FW_REGION].order = log2roundup(scratch->fw_size);
  366. root_memregs[ROOT_FW_REGION].base = scratch->fw_start &
  367. ~((1UL << root_memregs[0].order) - 1UL);
  368. root_memregs[ROOT_FW_REGION].flags = 0;
  369. /* Root domain allow everything memory region */
  370. root_memregs[ROOT_ALL_REGION].order = __riscv_xlen;
  371. root_memregs[ROOT_ALL_REGION].base = 0;
  372. root_memregs[ROOT_ALL_REGION].flags = (SBI_DOMAIN_MEMREGION_READABLE |
  373. SBI_DOMAIN_MEMREGION_WRITEABLE |
  374. SBI_DOMAIN_MEMREGION_EXECUTABLE);
  375. /* Root domain memory region end */
  376. root_memregs[ROOT_END_REGION].order = 0;
  377. /* Root domain boot HART id is same as coldboot HART id */
  378. root.boot_hartid = cold_hartid;
  379. /* Root domain next booting stage details */
  380. root.next_arg1 = scratch->next_arg1;
  381. root.next_addr = scratch->next_addr;
  382. root.next_mode = scratch->next_mode;
  383. /* Select root domain for all valid HARTs */
  384. for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) {
  385. if (sbi_platform_hart_invalid(plat, i))
  386. continue;
  387. sbi_hartmask_set_hart(i, &root_hmask);
  388. hartid_to_domain_table[i] = &root;
  389. sbi_hartmask_set_hart(i, &root.assigned_harts);
  390. }
  391. /* Set root domain index */
  392. root.index = domain_count++;
  393. domidx_to_domain_table[root.index] = &root;
  394. return 0;
  395. }