sbi_domain.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/sbi_console.h>
  11. #include <sbi/sbi_domain.h>
  12. #include <sbi/sbi_hartmask.h>
  13. #include <sbi/sbi_hsm.h>
  14. #include <sbi/sbi_math.h>
  15. #include <sbi/sbi_platform.h>
  16. #include <sbi/sbi_scratch.h>
  17. #include <sbi/sbi_string.h>
  18. struct sbi_domain *hartid_to_domain_table[SBI_HARTMASK_MAX_BITS] = { 0 };
  19. struct sbi_domain *domidx_to_domain_table[SBI_DOMAIN_MAX_INDEX] = { 0 };
  20. static u32 domain_count = 0;
  21. static bool domain_finalized = false;
  22. static struct sbi_hartmask root_hmask = { 0 };
  23. #define ROOT_REGION_MAX 16
  24. static u32 root_memregs_count = 0;
  25. static struct sbi_domain_memregion root_fw_region;
  26. static struct sbi_domain_memregion root_memregs[ROOT_REGION_MAX + 1] = { 0 };
  27. struct sbi_domain root = {
  28. .name = "root",
  29. .possible_harts = &root_hmask,
  30. .regions = root_memregs,
  31. .system_reset_allowed = TRUE,
  32. };
  33. bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartid)
  34. {
  35. if (dom)
  36. return sbi_hartmask_test_hart(hartid, &dom->assigned_harts);
  37. return FALSE;
  38. }
  39. ulong sbi_domain_get_assigned_hartmask(const struct sbi_domain *dom,
  40. ulong hbase)
  41. {
  42. ulong ret, bword, boff;
  43. if (!dom)
  44. return 0;
  45. bword = BIT_WORD(hbase);
  46. boff = BIT_WORD_OFFSET(hbase);
  47. ret = sbi_hartmask_bits(&dom->assigned_harts)[bword++] >> boff;
  48. if (boff && bword < BIT_WORD(SBI_HARTMASK_MAX_BITS)) {
  49. ret |= (sbi_hartmask_bits(&dom->assigned_harts)[bword] &
  50. (BIT(boff) - 1UL)) << (BITS_PER_LONG - boff);
  51. }
  52. return ret;
  53. }
  54. static void domain_memregion_initfw(struct sbi_domain_memregion *reg)
  55. {
  56. if (!reg)
  57. return;
  58. sbi_memcpy(reg, &root_fw_region, sizeof(*reg));
  59. }
  60. void sbi_domain_memregion_init(unsigned long addr,
  61. unsigned long size,
  62. unsigned long flags,
  63. struct sbi_domain_memregion *reg)
  64. {
  65. unsigned long base = 0, order;
  66. for (order = log2roundup(size) ; order <= __riscv_xlen; order++) {
  67. if (order < __riscv_xlen) {
  68. base = addr & ~((1UL << order) - 1UL);
  69. if ((base <= addr) &&
  70. (addr < (base + (1UL << order))) &&
  71. (base <= (addr + size - 1UL)) &&
  72. ((addr + size - 1UL) < (base + (1UL << order))))
  73. break;
  74. } else {
  75. base = 0;
  76. break;
  77. }
  78. }
  79. if (reg) {
  80. reg->base = base;
  81. reg->order = order;
  82. reg->flags = flags;
  83. }
  84. }
  85. bool sbi_domain_check_addr(const struct sbi_domain *dom,
  86. unsigned long addr, unsigned long mode,
  87. unsigned long access_flags)
  88. {
  89. bool mmio = FALSE;
  90. struct sbi_domain_memregion *reg;
  91. unsigned long rstart, rend, rflags, rwx = 0;
  92. if (!dom)
  93. return FALSE;
  94. if (access_flags & SBI_DOMAIN_READ)
  95. rwx |= SBI_DOMAIN_MEMREGION_READABLE;
  96. if (access_flags & SBI_DOMAIN_WRITE)
  97. rwx |= SBI_DOMAIN_MEMREGION_WRITEABLE;
  98. if (access_flags & SBI_DOMAIN_EXECUTE)
  99. rwx |= SBI_DOMAIN_MEMREGION_EXECUTABLE;
  100. if (access_flags & SBI_DOMAIN_MMIO)
  101. mmio = TRUE;
  102. sbi_domain_for_each_memregion(dom, reg) {
  103. rflags = reg->flags;
  104. if (mode == PRV_M && !(rflags & SBI_DOMAIN_MEMREGION_MMODE))
  105. continue;
  106. rstart = reg->base;
  107. rend = (reg->order < __riscv_xlen) ?
  108. rstart + ((1UL << reg->order) - 1) : -1UL;
  109. if (rstart <= addr && addr <= rend) {
  110. if ((mmio && !(rflags & SBI_DOMAIN_MEMREGION_MMIO)) ||
  111. (!mmio && (rflags & SBI_DOMAIN_MEMREGION_MMIO)))
  112. return FALSE;
  113. return ((rflags & rwx) == rwx) ? TRUE : FALSE;
  114. }
  115. }
  116. return (mode == PRV_M) ? TRUE : FALSE;
  117. }
  118. /* Check if region complies with constraints */
  119. static bool is_region_valid(const struct sbi_domain_memregion *reg)
  120. {
  121. if (reg->order < 3 || __riscv_xlen < reg->order)
  122. return FALSE;
  123. if (reg->base & (BIT(reg->order) - 1))
  124. return FALSE;
  125. return TRUE;
  126. }
  127. /** Check if regionA is sub-region of regionB */
  128. static bool is_region_subset(const struct sbi_domain_memregion *regA,
  129. const struct sbi_domain_memregion *regB)
  130. {
  131. ulong regA_start = regA->base;
  132. ulong regA_end = regA->base + (BIT(regA->order) - 1);
  133. ulong regB_start = regB->base;
  134. ulong regB_end = regB->base + (BIT(regA->order) - 1);
  135. if ((regB_start <= regA_start) &&
  136. (regA_start < regB_end) &&
  137. (regB_start < regA_end) &&
  138. (regA_end <= regB_end))
  139. return TRUE;
  140. return FALSE;
  141. }
  142. /** Check if regionA conflicts regionB */
  143. static bool is_region_conflict(const struct sbi_domain_memregion *regA,
  144. const struct sbi_domain_memregion *regB)
  145. {
  146. if ((is_region_subset(regA, regB) || is_region_subset(regB, regA)) &&
  147. regA->flags == regB->flags)
  148. return TRUE;
  149. return FALSE;
  150. }
  151. /** Check if regionA should be placed before regionB */
  152. static bool is_region_before(const struct sbi_domain_memregion *regA,
  153. const struct sbi_domain_memregion *regB)
  154. {
  155. if (regA->order < regB->order)
  156. return TRUE;
  157. if ((regA->order == regB->order) &&
  158. (regA->base < regB->base))
  159. return TRUE;
  160. return FALSE;
  161. }
  162. static int sanitize_domain(const struct sbi_platform *plat,
  163. struct sbi_domain *dom)
  164. {
  165. u32 i, j, count;
  166. bool have_fw_reg;
  167. struct sbi_domain_memregion treg, *reg, *reg1;
  168. /* Check possible HARTs */
  169. if (!dom->possible_harts) {
  170. sbi_printf("%s: %s possible HART mask is NULL\n",
  171. __func__, dom->name);
  172. return SBI_EINVAL;
  173. }
  174. sbi_hartmask_for_each_hart(i, dom->possible_harts) {
  175. if (sbi_platform_hart_invalid(plat, i)) {
  176. sbi_printf("%s: %s possible HART mask has invalid "
  177. "hart %d\n", __func__, dom->name, i);
  178. return SBI_EINVAL;
  179. }
  180. };
  181. /* Check memory regions */
  182. if (!dom->regions) {
  183. sbi_printf("%s: %s regions is NULL\n",
  184. __func__, dom->name);
  185. return SBI_EINVAL;
  186. }
  187. sbi_domain_for_each_memregion(dom, reg) {
  188. if (!is_region_valid(reg)) {
  189. sbi_printf("%s: %s has invalid region base=0x%lx "
  190. "order=%lu flags=0x%lx\n", __func__,
  191. dom->name, reg->base, reg->order,
  192. reg->flags);
  193. return SBI_EINVAL;
  194. }
  195. }
  196. /* Count memory regions and check presence of firmware region */
  197. count = 0;
  198. have_fw_reg = FALSE;
  199. sbi_domain_for_each_memregion(dom, reg) {
  200. if (reg->order == root_fw_region.order &&
  201. reg->base == root_fw_region.base &&
  202. reg->flags == root_fw_region.flags)
  203. have_fw_reg = TRUE;
  204. count++;
  205. }
  206. if (!have_fw_reg) {
  207. sbi_printf("%s: %s does not have firmware region\n",
  208. __func__, dom->name);
  209. return SBI_EINVAL;
  210. }
  211. /* Sort the memory regions */
  212. for (i = 0; i < (count - 1); i++) {
  213. reg = &dom->regions[i];
  214. for (j = i + 1; j < count; j++) {
  215. reg1 = &dom->regions[j];
  216. if (is_region_conflict(reg1, reg)) {
  217. sbi_printf("%s: %s conflict between regions "
  218. "(base=0x%lx order=%lu flags=0x%lx) and "
  219. "(base=0x%lx order=%lu flags=0x%lx)\n",
  220. __func__, dom->name,
  221. reg->base, reg->order, reg->flags,
  222. reg1->base, reg1->order, reg1->flags);
  223. return SBI_EINVAL;
  224. }
  225. if (!is_region_before(reg1, reg))
  226. continue;
  227. sbi_memcpy(&treg, reg1, sizeof(treg));
  228. sbi_memcpy(reg1, reg, sizeof(treg));
  229. sbi_memcpy(reg, &treg, sizeof(treg));
  230. }
  231. }
  232. /*
  233. * We don't need to check boot HART id of domain because if boot
  234. * HART id is not possible/assigned to this domain then it won't
  235. * be started at boot-time by sbi_domain_finalize().
  236. */
  237. /*
  238. * Check next mode
  239. *
  240. * We only allow next mode to be S-mode or U-mode.so that we can
  241. * protect M-mode context and enforce checks on memory accesses.
  242. */
  243. if (dom->next_mode != PRV_S &&
  244. dom->next_mode != PRV_U) {
  245. sbi_printf("%s: %s invalid next booting stage mode 0x%lx\n",
  246. __func__, dom->name, dom->next_mode);
  247. return SBI_EINVAL;
  248. }
  249. /* Check next address and next mode*/
  250. if (!sbi_domain_check_addr(dom, dom->next_addr, dom->next_mode,
  251. SBI_DOMAIN_EXECUTE)) {
  252. sbi_printf("%s: %s next booting stage addres 0x%lx can't "
  253. "execute\n", __func__, dom->name, dom->next_addr);
  254. return SBI_EINVAL;
  255. }
  256. return 0;
  257. }
  258. void sbi_domain_dump(const struct sbi_domain *dom, const char *suffix)
  259. {
  260. u32 i, k;
  261. unsigned long rstart, rend;
  262. struct sbi_domain_memregion *reg;
  263. sbi_printf("Domain%d Name %s: %s\n",
  264. dom->index, suffix, dom->name);
  265. sbi_printf("Domain%d Boot HART %s: %d\n",
  266. dom->index, suffix, dom->boot_hartid);
  267. k = 0;
  268. sbi_printf("Domain%d HARTs %s: ", dom->index, suffix);
  269. sbi_hartmask_for_each_hart(i, dom->possible_harts)
  270. sbi_printf("%s%d%s", (k++) ? "," : "",
  271. i, sbi_domain_is_assigned_hart(dom, i) ? "*" : "");
  272. sbi_printf("\n");
  273. i = 0;
  274. sbi_domain_for_each_memregion(dom, reg) {
  275. rstart = reg->base;
  276. rend = (reg->order < __riscv_xlen) ?
  277. rstart + ((1UL << reg->order) - 1) : -1UL;
  278. #if __riscv_xlen == 32
  279. sbi_printf("Domain%d Region%02d %s: 0x%08lx-0x%08lx ",
  280. #else
  281. sbi_printf("Domain%d Region%02d %s: 0x%016lx-0x%016lx ",
  282. #endif
  283. dom->index, i, suffix, rstart, rend);
  284. k = 0;
  285. if (reg->flags & SBI_DOMAIN_MEMREGION_MMODE)
  286. sbi_printf("%cM", (k++) ? ',' : '(');
  287. if (reg->flags & SBI_DOMAIN_MEMREGION_MMIO)
  288. sbi_printf("%cI", (k++) ? ',' : '(');
  289. if (reg->flags & SBI_DOMAIN_MEMREGION_READABLE)
  290. sbi_printf("%cR", (k++) ? ',' : '(');
  291. if (reg->flags & SBI_DOMAIN_MEMREGION_WRITEABLE)
  292. sbi_printf("%cW", (k++) ? ',' : '(');
  293. if (reg->flags & SBI_DOMAIN_MEMREGION_EXECUTABLE)
  294. sbi_printf("%cX", (k++) ? ',' : '(');
  295. sbi_printf("%s\n", (k++) ? ")" : "()");
  296. i++;
  297. }
  298. #if __riscv_xlen == 32
  299. sbi_printf("Domain%d Next Address%s: 0x%08lx\n",
  300. #else
  301. sbi_printf("Domain%d Next Address%s: 0x%016lx\n",
  302. #endif
  303. dom->index, suffix, dom->next_addr);
  304. #if __riscv_xlen == 32
  305. sbi_printf("Domain%d Next Arg1 %s: 0x%08lx\n",
  306. #else
  307. sbi_printf("Domain%d Next Arg1 %s: 0x%016lx\n",
  308. #endif
  309. dom->index, suffix, dom->next_arg1);
  310. sbi_printf("Domain%d Next Mode %s: ", dom->index, suffix);
  311. switch (dom->next_mode) {
  312. case PRV_M:
  313. sbi_printf("M-mode\n");
  314. break;
  315. case PRV_S:
  316. sbi_printf("S-mode\n");
  317. break;
  318. case PRV_U:
  319. sbi_printf("U-mode\n");
  320. break;
  321. default:
  322. sbi_printf("Unknown\n");
  323. break;
  324. };
  325. sbi_printf("Domain%d SysReset %s: %s\n",
  326. dom->index, suffix, (dom->system_reset_allowed) ? "yes" : "no");
  327. }
  328. void sbi_domain_dump_all(const char *suffix)
  329. {
  330. u32 i;
  331. const struct sbi_domain *dom;
  332. sbi_domain_for_each(i, dom) {
  333. sbi_domain_dump(dom, suffix);
  334. sbi_printf("\n");
  335. }
  336. }
  337. int sbi_domain_register(struct sbi_domain *dom,
  338. const struct sbi_hartmask *assign_mask)
  339. {
  340. u32 i;
  341. int rc;
  342. struct sbi_domain *tdom;
  343. u32 cold_hartid = current_hartid();
  344. const struct sbi_platform *plat = sbi_platform_thishart_ptr();
  345. /* Sanity checks */
  346. if (!dom || !assign_mask || domain_finalized)
  347. return SBI_EINVAL;
  348. /* Check if domain already discovered */
  349. sbi_domain_for_each(i, tdom) {
  350. if (tdom == dom)
  351. return SBI_EALREADY;
  352. }
  353. /*
  354. * Ensure that we have room for Domain Index to
  355. * HART ID mapping
  356. */
  357. if (SBI_DOMAIN_MAX_INDEX <= domain_count) {
  358. sbi_printf("%s: No room for %s\n",
  359. __func__, dom->name);
  360. return SBI_ENOSPC;
  361. }
  362. /* Sanitize discovered domain */
  363. rc = sanitize_domain(plat, dom);
  364. if (rc) {
  365. sbi_printf("%s: sanity checks failed for"
  366. " %s (error %d)\n", __func__,
  367. dom->name, rc);
  368. return rc;
  369. }
  370. /* Assign index to domain */
  371. dom->index = domain_count++;
  372. domidx_to_domain_table[dom->index] = dom;
  373. /* Clear assigned HARTs of domain */
  374. sbi_hartmask_clear_all(&dom->assigned_harts);
  375. /* Assign domain to HART if HART is a possible HART */
  376. sbi_hartmask_for_each_hart(i, assign_mask) {
  377. if (!sbi_hartmask_test_hart(i, dom->possible_harts))
  378. continue;
  379. tdom = hartid_to_domain_table[i];
  380. if (tdom)
  381. sbi_hartmask_clear_hart(i,
  382. &tdom->assigned_harts);
  383. hartid_to_domain_table[i] = dom;
  384. sbi_hartmask_set_hart(i, &dom->assigned_harts);
  385. /*
  386. * If cold boot HART is assigned to this domain then
  387. * override boot HART of this domain.
  388. */
  389. if (i == cold_hartid &&
  390. dom->boot_hartid != cold_hartid) {
  391. sbi_printf("Domain%d Boot HARTID forced to"
  392. " %d\n", dom->index, cold_hartid);
  393. dom->boot_hartid = cold_hartid;
  394. }
  395. }
  396. return 0;
  397. }
  398. int sbi_domain_root_add_memregion(const struct sbi_domain_memregion *reg)
  399. {
  400. int rc;
  401. bool reg_merged;
  402. struct sbi_domain_memregion *nreg, *nreg1, *nreg2;
  403. const struct sbi_platform *plat = sbi_platform_thishart_ptr();
  404. /* Sanity checks */
  405. if (!reg || domain_finalized ||
  406. (root.regions != root_memregs) ||
  407. (ROOT_REGION_MAX <= root_memregs_count))
  408. return SBI_EINVAL;
  409. /* Check for conflicts */
  410. sbi_domain_for_each_memregion(&root, nreg) {
  411. if (is_region_conflict(reg, nreg))
  412. return SBI_EINVAL;
  413. }
  414. /* Append the memregion to root memregions */
  415. nreg = &root_memregs[root_memregs_count];
  416. sbi_memcpy(nreg, reg, sizeof(*reg));
  417. root_memregs_count++;
  418. root_memregs[root_memregs_count].order = 0;
  419. /* Sort and optimize root regions */
  420. do {
  421. /* Sanitize the root domain so that memregions are sorted */
  422. rc = sanitize_domain(plat, &root);
  423. if (rc) {
  424. sbi_printf("%s: sanity checks failed for"
  425. " %s (error %d)\n", __func__,
  426. root.name, rc);
  427. return rc;
  428. }
  429. /* Merge consecutive memregions with same order and flags */
  430. reg_merged = false;
  431. sbi_domain_for_each_memregion(&root, nreg) {
  432. nreg1 = nreg + 1;
  433. if (!nreg1->order)
  434. continue;
  435. if ((nreg->base + BIT(nreg->order)) == nreg1->base &&
  436. nreg->order == nreg1->order &&
  437. nreg->flags == nreg1->flags) {
  438. nreg->order++;
  439. while (nreg1->order) {
  440. nreg2 = nreg1 + 1;
  441. sbi_memcpy(nreg1, nreg2, sizeof(*nreg1));
  442. nreg1++;
  443. }
  444. reg_merged = true;
  445. root_memregs_count--;
  446. }
  447. }
  448. } while (reg_merged);
  449. return 0;
  450. }
  451. int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid)
  452. {
  453. int rc;
  454. u32 i, dhart;
  455. struct sbi_domain *dom;
  456. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  457. /* Initialize and populate domains for the platform */
  458. rc = sbi_platform_domains_init(plat);
  459. if (rc) {
  460. sbi_printf("%s: platform domains_init() failed (error %d)\n",
  461. __func__, rc);
  462. return rc;
  463. }
  464. /* Startup boot HART of domains */
  465. sbi_domain_for_each(i, dom) {
  466. /* Domain boot HART */
  467. dhart = dom->boot_hartid;
  468. /* Ignore of boot HART is off limits */
  469. if (SBI_HARTMASK_MAX_BITS <= dhart)
  470. continue;
  471. /* Ignore if boot HART not possible for this domain */
  472. if (!sbi_hartmask_test_hart(dhart, dom->possible_harts))
  473. continue;
  474. /* Ignore if boot HART assigned different domain */
  475. if (sbi_hartid_to_domain(dhart) != dom ||
  476. !sbi_hartmask_test_hart(dhart, &dom->assigned_harts))
  477. continue;
  478. /* Startup boot HART of domain */
  479. if (dhart == cold_hartid) {
  480. scratch->next_addr = dom->next_addr;
  481. scratch->next_mode = dom->next_mode;
  482. scratch->next_arg1 = dom->next_arg1;
  483. } else {
  484. rc = sbi_hsm_hart_start(scratch, NULL, dhart,
  485. dom->next_addr,
  486. dom->next_mode,
  487. dom->next_arg1);
  488. if (rc) {
  489. sbi_printf("%s: failed to start boot HART %d"
  490. " for %s (error %d)\n", __func__,
  491. dhart, dom->name, rc);
  492. return rc;
  493. }
  494. }
  495. }
  496. /*
  497. * Set the finalized flag so that the root domain
  498. * regions can't be changed.
  499. */
  500. domain_finalized = true;
  501. return 0;
  502. }
  503. int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
  504. {
  505. u32 i;
  506. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  507. /* Root domain firmware memory region */
  508. sbi_domain_memregion_init(scratch->fw_start, scratch->fw_size, 0,
  509. &root_fw_region);
  510. domain_memregion_initfw(&root_memregs[root_memregs_count++]);
  511. /* Root domain allow everything memory region */
  512. sbi_domain_memregion_init(0, ~0UL,
  513. (SBI_DOMAIN_MEMREGION_READABLE |
  514. SBI_DOMAIN_MEMREGION_WRITEABLE |
  515. SBI_DOMAIN_MEMREGION_EXECUTABLE),
  516. &root_memregs[root_memregs_count++]);
  517. /* Root domain memory region end */
  518. root_memregs[root_memregs_count].order = 0;
  519. /* Root domain boot HART id is same as coldboot HART id */
  520. root.boot_hartid = cold_hartid;
  521. /* Root domain next booting stage details */
  522. root.next_arg1 = scratch->next_arg1;
  523. root.next_addr = scratch->next_addr;
  524. root.next_mode = scratch->next_mode;
  525. /* Root domain possible and assigned HARTs */
  526. for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) {
  527. if (sbi_platform_hart_invalid(plat, i))
  528. continue;
  529. sbi_hartmask_set_hart(i, &root_hmask);
  530. }
  531. return sbi_domain_register(&root, &root_hmask);
  532. }