sbi_domain.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/sbi_console.h>
  11. #include <sbi/sbi_domain.h>
  12. #include <sbi/sbi_hartmask.h>
  13. #include <sbi/sbi_hsm.h>
  14. #include <sbi/sbi_math.h>
  15. #include <sbi/sbi_platform.h>
  16. #include <sbi/sbi_scratch.h>
  17. #include <sbi/sbi_string.h>
  18. /*
  19. * We allocate an extra element because sbi_domain_for_each() expects
  20. * the array to be null-terminated.
  21. */
  22. struct sbi_domain *domidx_to_domain_table[SBI_DOMAIN_MAX_INDEX + 1] = { 0 };
  23. struct sbi_domain *hartid_to_domain_table[SBI_HARTMASK_MAX_BITS] = { 0 };
  24. static u32 domain_count = 0;
  25. static bool domain_finalized = false;
  26. static struct sbi_hartmask root_hmask = { 0 };
  27. #define ROOT_REGION_MAX 16
  28. static u32 root_memregs_count = 0;
  29. static struct sbi_domain_memregion root_fw_region;
  30. static struct sbi_domain_memregion root_memregs[ROOT_REGION_MAX + 1] = { 0 };
  31. struct sbi_domain root = {
  32. .name = "root",
  33. .possible_harts = &root_hmask,
  34. .regions = root_memregs,
  35. .system_reset_allowed = true,
  36. };
  37. bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartid)
  38. {
  39. if (dom)
  40. return sbi_hartmask_test_hart(hartid, &dom->assigned_harts);
  41. return false;
  42. }
  43. ulong sbi_domain_get_assigned_hartmask(const struct sbi_domain *dom,
  44. ulong hbase)
  45. {
  46. ulong ret, bword, boff;
  47. if (!dom)
  48. return 0;
  49. bword = BIT_WORD(hbase);
  50. boff = BIT_WORD_OFFSET(hbase);
  51. ret = sbi_hartmask_bits(&dom->assigned_harts)[bword++] >> boff;
  52. if (boff && bword < BIT_WORD(SBI_HARTMASK_MAX_BITS)) {
  53. ret |= (sbi_hartmask_bits(&dom->assigned_harts)[bword] &
  54. (BIT(boff) - 1UL)) << (BITS_PER_LONG - boff);
  55. }
  56. return ret;
  57. }
  58. static void domain_memregion_initfw(struct sbi_domain_memregion *reg)
  59. {
  60. if (!reg)
  61. return;
  62. sbi_memcpy(reg, &root_fw_region, sizeof(*reg));
  63. }
  64. void sbi_domain_memregion_init(unsigned long addr,
  65. unsigned long size,
  66. unsigned long flags,
  67. struct sbi_domain_memregion *reg)
  68. {
  69. unsigned long base = 0, order;
  70. for (order = log2roundup(size) ; order <= __riscv_xlen; order++) {
  71. if (order < __riscv_xlen) {
  72. base = addr & ~((1UL << order) - 1UL);
  73. if ((base <= addr) &&
  74. (addr < (base + (1UL << order))) &&
  75. (base <= (addr + size - 1UL)) &&
  76. ((addr + size - 1UL) < (base + (1UL << order))))
  77. break;
  78. } else {
  79. base = 0;
  80. break;
  81. }
  82. }
  83. if (reg) {
  84. reg->base = base;
  85. reg->order = order;
  86. reg->flags = flags;
  87. }
  88. }
  89. bool sbi_domain_check_addr(const struct sbi_domain *dom,
  90. unsigned long addr, unsigned long mode,
  91. unsigned long access_flags)
  92. {
  93. bool rmmio, mmio = false;
  94. struct sbi_domain_memregion *reg;
  95. unsigned long rstart, rend, rflags, rwx = 0, rrwx = 0;
  96. if (!dom)
  97. return false;
  98. /*
  99. * Use M_{R/W/X} bits because the SU-bits are at the
  100. * same relative offsets. If the mode is not M, the SU
  101. * bits will fall at same offsets after the shift.
  102. */
  103. if (access_flags & SBI_DOMAIN_READ)
  104. rwx |= SBI_DOMAIN_MEMREGION_M_READABLE;
  105. if (access_flags & SBI_DOMAIN_WRITE)
  106. rwx |= SBI_DOMAIN_MEMREGION_M_WRITABLE;
  107. if (access_flags & SBI_DOMAIN_EXECUTE)
  108. rwx |= SBI_DOMAIN_MEMREGION_M_EXECUTABLE;
  109. if (access_flags & SBI_DOMAIN_MMIO)
  110. mmio = true;
  111. sbi_domain_for_each_memregion(dom, reg) {
  112. rflags = reg->flags;
  113. rrwx = (mode == PRV_M ?
  114. (rflags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK) :
  115. (rflags & SBI_DOMAIN_MEMREGION_SU_ACCESS_MASK)
  116. >> SBI_DOMAIN_MEMREGION_SU_ACCESS_SHIFT);
  117. rstart = reg->base;
  118. rend = (reg->order < __riscv_xlen) ?
  119. rstart + ((1UL << reg->order) - 1) : -1UL;
  120. if (rstart <= addr && addr <= rend) {
  121. rmmio = (rflags & SBI_DOMAIN_MEMREGION_MMIO) ? true : false;
  122. if (mmio != rmmio)
  123. return false;
  124. return ((rrwx & rwx) == rwx) ? true : false;
  125. }
  126. }
  127. return (mode == PRV_M) ? true : false;
  128. }
  129. /* Check if region complies with constraints */
  130. static bool is_region_valid(const struct sbi_domain_memregion *reg)
  131. {
  132. if (reg->order < 3 || __riscv_xlen < reg->order)
  133. return false;
  134. if (reg->order == __riscv_xlen && reg->base != 0)
  135. return false;
  136. if (reg->order < __riscv_xlen && (reg->base & (BIT(reg->order) - 1)))
  137. return false;
  138. return true;
  139. }
  140. /** Check if regionA is sub-region of regionB */
  141. static bool is_region_subset(const struct sbi_domain_memregion *regA,
  142. const struct sbi_domain_memregion *regB)
  143. {
  144. ulong regA_start = regA->base;
  145. ulong regA_end = regA->base + (BIT(regA->order) - 1);
  146. ulong regB_start = regB->base;
  147. ulong regB_end = regB->base + (BIT(regB->order) - 1);
  148. if ((regB_start <= regA_start) &&
  149. (regA_start < regB_end) &&
  150. (regB_start < regA_end) &&
  151. (regA_end <= regB_end))
  152. return true;
  153. return false;
  154. }
  155. /** Check if regionA conflicts regionB */
  156. static bool is_region_conflict(const struct sbi_domain_memregion *regA,
  157. const struct sbi_domain_memregion *regB)
  158. {
  159. if ((is_region_subset(regA, regB) || is_region_subset(regB, regA)) &&
  160. regA->flags == regB->flags)
  161. return true;
  162. return false;
  163. }
  164. /** Check if regionA should be placed before regionB */
  165. static bool is_region_before(const struct sbi_domain_memregion *regA,
  166. const struct sbi_domain_memregion *regB)
  167. {
  168. if (regA->order < regB->order)
  169. return true;
  170. if ((regA->order == regB->order) &&
  171. (regA->base < regB->base))
  172. return true;
  173. return false;
  174. }
  175. static const struct sbi_domain_memregion *find_region(
  176. const struct sbi_domain *dom,
  177. unsigned long addr)
  178. {
  179. unsigned long rstart, rend;
  180. struct sbi_domain_memregion *reg;
  181. sbi_domain_for_each_memregion(dom, reg) {
  182. rstart = reg->base;
  183. rend = (reg->order < __riscv_xlen) ?
  184. rstart + ((1UL << reg->order) - 1) : -1UL;
  185. if (rstart <= addr && addr <= rend)
  186. return reg;
  187. }
  188. return NULL;
  189. }
  190. static const struct sbi_domain_memregion *find_next_subset_region(
  191. const struct sbi_domain *dom,
  192. const struct sbi_domain_memregion *reg,
  193. unsigned long addr)
  194. {
  195. struct sbi_domain_memregion *sreg, *ret = NULL;
  196. sbi_domain_for_each_memregion(dom, sreg) {
  197. if (sreg == reg || (sreg->base <= addr) ||
  198. !is_region_subset(sreg, reg))
  199. continue;
  200. if (!ret || (sreg->base < ret->base) ||
  201. ((sreg->base == ret->base) && (sreg->order < ret->order)))
  202. ret = sreg;
  203. }
  204. return ret;
  205. }
  206. static int sanitize_domain(const struct sbi_platform *plat,
  207. struct sbi_domain *dom)
  208. {
  209. u32 i, j, count;
  210. bool have_fw_reg;
  211. struct sbi_domain_memregion treg, *reg, *reg1;
  212. /* Check possible HARTs */
  213. if (!dom->possible_harts) {
  214. sbi_printf("%s: %s possible HART mask is NULL\n",
  215. __func__, dom->name);
  216. return SBI_EINVAL;
  217. }
  218. sbi_hartmask_for_each_hart(i, dom->possible_harts) {
  219. if (sbi_platform_hart_invalid(plat, i)) {
  220. sbi_printf("%s: %s possible HART mask has invalid "
  221. "hart %d\n", __func__, dom->name, i);
  222. return SBI_EINVAL;
  223. }
  224. };
  225. /* Check memory regions */
  226. if (!dom->regions) {
  227. sbi_printf("%s: %s regions is NULL\n",
  228. __func__, dom->name);
  229. return SBI_EINVAL;
  230. }
  231. sbi_domain_for_each_memregion(dom, reg) {
  232. if (!is_region_valid(reg)) {
  233. sbi_printf("%s: %s has invalid region base=0x%lx "
  234. "order=%lu flags=0x%lx\n", __func__,
  235. dom->name, reg->base, reg->order,
  236. reg->flags);
  237. return SBI_EINVAL;
  238. }
  239. }
  240. /* Count memory regions and check presence of firmware region */
  241. count = 0;
  242. have_fw_reg = false;
  243. sbi_domain_for_each_memregion(dom, reg) {
  244. if (reg->order == root_fw_region.order &&
  245. reg->base == root_fw_region.base &&
  246. reg->flags == root_fw_region.flags)
  247. have_fw_reg = true;
  248. count++;
  249. }
  250. if (!have_fw_reg) {
  251. sbi_printf("%s: %s does not have firmware region\n",
  252. __func__, dom->name);
  253. return SBI_EINVAL;
  254. }
  255. /* Sort the memory regions */
  256. for (i = 0; i < (count - 1); i++) {
  257. reg = &dom->regions[i];
  258. for (j = i + 1; j < count; j++) {
  259. reg1 = &dom->regions[j];
  260. if (is_region_conflict(reg1, reg)) {
  261. sbi_printf("%s: %s conflict between regions "
  262. "(base=0x%lx order=%lu flags=0x%lx) and "
  263. "(base=0x%lx order=%lu flags=0x%lx)\n",
  264. __func__, dom->name,
  265. reg->base, reg->order, reg->flags,
  266. reg1->base, reg1->order, reg1->flags);
  267. return SBI_EINVAL;
  268. }
  269. if (!is_region_before(reg1, reg))
  270. continue;
  271. sbi_memcpy(&treg, reg1, sizeof(treg));
  272. sbi_memcpy(reg1, reg, sizeof(treg));
  273. sbi_memcpy(reg, &treg, sizeof(treg));
  274. }
  275. }
  276. /*
  277. * We don't need to check boot HART id of domain because if boot
  278. * HART id is not possible/assigned to this domain then it won't
  279. * be started at boot-time by sbi_domain_finalize().
  280. */
  281. /*
  282. * Check next mode
  283. *
  284. * We only allow next mode to be S-mode or U-mode, so that we can
  285. * protect M-mode context and enforce checks on memory accesses.
  286. */
  287. if (dom->next_mode != PRV_S &&
  288. dom->next_mode != PRV_U) {
  289. sbi_printf("%s: %s invalid next booting stage mode 0x%lx\n",
  290. __func__, dom->name, dom->next_mode);
  291. return SBI_EINVAL;
  292. }
  293. /* Check next address and next mode */
  294. if (!sbi_domain_check_addr(dom, dom->next_addr, dom->next_mode,
  295. SBI_DOMAIN_EXECUTE)) {
  296. sbi_printf("%s: %s next booting stage address 0x%lx can't "
  297. "execute\n", __func__, dom->name, dom->next_addr);
  298. return SBI_EINVAL;
  299. }
  300. return 0;
  301. }
  302. bool sbi_domain_check_addr_range(const struct sbi_domain *dom,
  303. unsigned long addr, unsigned long size,
  304. unsigned long mode,
  305. unsigned long access_flags)
  306. {
  307. unsigned long max = addr + size;
  308. const struct sbi_domain_memregion *reg, *sreg;
  309. if (!dom)
  310. return false;
  311. while (addr < max) {
  312. reg = find_region(dom, addr);
  313. if (!reg)
  314. return false;
  315. if (!sbi_domain_check_addr(dom, addr, mode, access_flags))
  316. return false;
  317. sreg = find_next_subset_region(dom, reg, addr);
  318. if (sreg)
  319. addr = sreg->base;
  320. else if (reg->order < __riscv_xlen)
  321. addr = reg->base + (1UL << reg->order);
  322. else
  323. break;
  324. }
  325. return true;
  326. }
  327. void sbi_domain_dump(const struct sbi_domain *dom, const char *suffix)
  328. {
  329. u32 i, k;
  330. unsigned long rstart, rend;
  331. struct sbi_domain_memregion *reg;
  332. sbi_printf("Domain%d Name %s: %s\n",
  333. dom->index, suffix, dom->name);
  334. sbi_printf("Domain%d Boot HART %s: %d\n",
  335. dom->index, suffix, dom->boot_hartid);
  336. k = 0;
  337. sbi_printf("Domain%d HARTs %s: ", dom->index, suffix);
  338. sbi_hartmask_for_each_hart(i, dom->possible_harts)
  339. sbi_printf("%s%d%s", (k++) ? "," : "",
  340. i, sbi_domain_is_assigned_hart(dom, i) ? "*" : "");
  341. sbi_printf("\n");
  342. i = 0;
  343. sbi_domain_for_each_memregion(dom, reg) {
  344. rstart = reg->base;
  345. rend = (reg->order < __riscv_xlen) ?
  346. rstart + ((1UL << reg->order) - 1) : -1UL;
  347. sbi_printf("Domain%d Region%02d %s: 0x%" PRILX "-0x%" PRILX " ",
  348. dom->index, i, suffix, rstart, rend);
  349. k = 0;
  350. sbi_printf("M: ");
  351. if (reg->flags & SBI_DOMAIN_MEMREGION_MMIO)
  352. sbi_printf("%cI", (k++) ? ',' : '(');
  353. if (reg->flags & SBI_DOMAIN_MEMREGION_M_READABLE)
  354. sbi_printf("%cR", (k++) ? ',' : '(');
  355. if (reg->flags & SBI_DOMAIN_MEMREGION_M_WRITABLE)
  356. sbi_printf("%cW", (k++) ? ',' : '(');
  357. if (reg->flags & SBI_DOMAIN_MEMREGION_M_EXECUTABLE)
  358. sbi_printf("%cX", (k++) ? ',' : '(');
  359. sbi_printf("%s ", (k++) ? ")" : "()");
  360. k = 0;
  361. sbi_printf("S/U: ");
  362. if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
  363. sbi_printf("%cR", (k++) ? ',' : '(');
  364. if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
  365. sbi_printf("%cW", (k++) ? ',' : '(');
  366. if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
  367. sbi_printf("%cX", (k++) ? ',' : '(');
  368. sbi_printf("%s\n", (k++) ? ")" : "()");
  369. i++;
  370. }
  371. sbi_printf("Domain%d Next Address%s: 0x%" PRILX "\n",
  372. dom->index, suffix, dom->next_addr);
  373. sbi_printf("Domain%d Next Arg1 %s: 0x%" PRILX "\n",
  374. dom->index, suffix, dom->next_arg1);
  375. sbi_printf("Domain%d Next Mode %s: ", dom->index, suffix);
  376. switch (dom->next_mode) {
  377. case PRV_M:
  378. sbi_printf("M-mode\n");
  379. break;
  380. case PRV_S:
  381. sbi_printf("S-mode\n");
  382. break;
  383. case PRV_U:
  384. sbi_printf("U-mode\n");
  385. break;
  386. default:
  387. sbi_printf("Unknown\n");
  388. break;
  389. };
  390. sbi_printf("Domain%d SysReset %s: %s\n",
  391. dom->index, suffix, (dom->system_reset_allowed) ? "yes" : "no");
  392. }
  393. void sbi_domain_dump_all(const char *suffix)
  394. {
  395. u32 i;
  396. const struct sbi_domain *dom;
  397. sbi_domain_for_each(i, dom) {
  398. sbi_domain_dump(dom, suffix);
  399. sbi_printf("\n");
  400. }
  401. }
  402. int sbi_domain_register(struct sbi_domain *dom,
  403. const struct sbi_hartmask *assign_mask)
  404. {
  405. u32 i;
  406. int rc;
  407. struct sbi_domain *tdom;
  408. u32 cold_hartid = current_hartid();
  409. const struct sbi_platform *plat = sbi_platform_thishart_ptr();
  410. /* Sanity checks */
  411. if (!dom || !assign_mask || domain_finalized)
  412. return SBI_EINVAL;
  413. /* Check if domain already discovered */
  414. sbi_domain_for_each(i, tdom) {
  415. if (tdom == dom)
  416. return SBI_EALREADY;
  417. }
  418. /*
  419. * Ensure that we have room for Domain Index to
  420. * HART ID mapping
  421. */
  422. if (SBI_DOMAIN_MAX_INDEX <= domain_count) {
  423. sbi_printf("%s: No room for %s\n",
  424. __func__, dom->name);
  425. return SBI_ENOSPC;
  426. }
  427. /* Sanitize discovered domain */
  428. rc = sanitize_domain(plat, dom);
  429. if (rc) {
  430. sbi_printf("%s: sanity checks failed for"
  431. " %s (error %d)\n", __func__,
  432. dom->name, rc);
  433. return rc;
  434. }
  435. /* Assign index to domain */
  436. dom->index = domain_count++;
  437. domidx_to_domain_table[dom->index] = dom;
  438. /* Clear assigned HARTs of domain */
  439. sbi_hartmask_clear_all(&dom->assigned_harts);
  440. /* Assign domain to HART if HART is a possible HART */
  441. sbi_hartmask_for_each_hart(i, assign_mask) {
  442. if (!sbi_hartmask_test_hart(i, dom->possible_harts))
  443. continue;
  444. tdom = hartid_to_domain_table[i];
  445. if (tdom)
  446. sbi_hartmask_clear_hart(i,
  447. &tdom->assigned_harts);
  448. hartid_to_domain_table[i] = dom;
  449. sbi_hartmask_set_hart(i, &dom->assigned_harts);
  450. /*
  451. * If cold boot HART is assigned to this domain then
  452. * override boot HART of this domain.
  453. */
  454. if (i == cold_hartid &&
  455. dom->boot_hartid != cold_hartid) {
  456. sbi_printf("Domain%d Boot HARTID forced to"
  457. " %d\n", dom->index, cold_hartid);
  458. dom->boot_hartid = cold_hartid;
  459. }
  460. }
  461. return 0;
  462. }
  463. int sbi_domain_root_add_memregion(const struct sbi_domain_memregion *reg)
  464. {
  465. int rc;
  466. bool reg_merged;
  467. struct sbi_domain_memregion *nreg, *nreg1, *nreg2;
  468. const struct sbi_platform *plat = sbi_platform_thishart_ptr();
  469. /* Sanity checks */
  470. if (!reg || domain_finalized ||
  471. (root.regions != root_memregs) ||
  472. (ROOT_REGION_MAX <= root_memregs_count))
  473. return SBI_EINVAL;
  474. /* Check for conflicts */
  475. sbi_domain_for_each_memregion(&root, nreg) {
  476. if (is_region_conflict(reg, nreg)) {
  477. sbi_printf("%s: is_region_conflict check failed"
  478. " 0x%lx conflicts existing 0x%lx\n", __func__,
  479. reg->base, nreg->base);
  480. return SBI_EALREADY;
  481. }
  482. }
  483. /* Append the memregion to root memregions */
  484. nreg = &root_memregs[root_memregs_count];
  485. sbi_memcpy(nreg, reg, sizeof(*reg));
  486. root_memregs_count++;
  487. root_memregs[root_memregs_count].order = 0;
  488. /* Sort and optimize root regions */
  489. do {
  490. /* Sanitize the root domain so that memregions are sorted */
  491. rc = sanitize_domain(plat, &root);
  492. if (rc) {
  493. sbi_printf("%s: sanity checks failed for"
  494. " %s (error %d)\n", __func__,
  495. root.name, rc);
  496. return rc;
  497. }
  498. /* Merge consecutive memregions with same order and flags */
  499. reg_merged = false;
  500. sbi_domain_for_each_memregion(&root, nreg) {
  501. nreg1 = nreg + 1;
  502. if (!nreg1->order)
  503. continue;
  504. if (!(nreg->base & (BIT(nreg->order + 1) - 1)) &&
  505. (nreg->base + BIT(nreg->order)) == nreg1->base &&
  506. nreg->order == nreg1->order &&
  507. nreg->flags == nreg1->flags) {
  508. nreg->order++;
  509. while (nreg1->order) {
  510. nreg2 = nreg1 + 1;
  511. sbi_memcpy(nreg1, nreg2, sizeof(*nreg1));
  512. nreg1++;
  513. }
  514. reg_merged = true;
  515. root_memregs_count--;
  516. }
  517. }
  518. } while (reg_merged);
  519. return 0;
  520. }
  521. int sbi_domain_root_add_memrange(unsigned long addr, unsigned long size,
  522. unsigned long align, unsigned long region_flags)
  523. {
  524. int rc;
  525. unsigned long pos, end, rsize;
  526. struct sbi_domain_memregion reg;
  527. pos = addr;
  528. end = addr + size;
  529. while (pos < end) {
  530. rsize = pos & (align - 1);
  531. if (rsize)
  532. rsize = 1UL << sbi_ffs(pos);
  533. else
  534. rsize = ((end - pos) < align) ?
  535. (end - pos) : align;
  536. sbi_domain_memregion_init(pos, rsize, region_flags, &reg);
  537. rc = sbi_domain_root_add_memregion(&reg);
  538. if (rc)
  539. return rc;
  540. pos += rsize;
  541. }
  542. return 0;
  543. }
  544. int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid)
  545. {
  546. int rc;
  547. u32 i, dhart;
  548. struct sbi_domain *dom;
  549. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  550. /* Initialize and populate domains for the platform */
  551. rc = sbi_platform_domains_init(plat);
  552. if (rc) {
  553. sbi_printf("%s: platform domains_init() failed (error %d)\n",
  554. __func__, rc);
  555. return rc;
  556. }
  557. /* Startup boot HART of domains */
  558. sbi_domain_for_each(i, dom) {
  559. /* Domain boot HART */
  560. dhart = dom->boot_hartid;
  561. /* Ignore of boot HART is off limits */
  562. if (SBI_HARTMASK_MAX_BITS <= dhart)
  563. continue;
  564. /* Ignore if boot HART not possible for this domain */
  565. if (!sbi_hartmask_test_hart(dhart, dom->possible_harts))
  566. continue;
  567. /* Ignore if boot HART assigned different domain */
  568. if (sbi_hartid_to_domain(dhart) != dom ||
  569. !sbi_hartmask_test_hart(dhart, &dom->assigned_harts))
  570. continue;
  571. /* Startup boot HART of domain */
  572. if (dhart == cold_hartid) {
  573. scratch->next_addr = dom->next_addr;
  574. scratch->next_mode = dom->next_mode;
  575. scratch->next_arg1 = dom->next_arg1;
  576. } else {
  577. rc = sbi_hsm_hart_start(scratch, NULL, dhart,
  578. dom->next_addr,
  579. dom->next_mode,
  580. dom->next_arg1);
  581. if (rc) {
  582. sbi_printf("%s: failed to start boot HART %d"
  583. " for %s (error %d)\n", __func__,
  584. dhart, dom->name, rc);
  585. return rc;
  586. }
  587. }
  588. }
  589. /*
  590. * Set the finalized flag so that the root domain
  591. * regions can't be changed.
  592. */
  593. domain_finalized = true;
  594. return 0;
  595. }
  596. int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
  597. {
  598. u32 i;
  599. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  600. if (scratch->fw_rw_offset == 0 ||
  601. (scratch->fw_rw_offset & (scratch->fw_rw_offset - 1)) != 0) {
  602. sbi_printf("%s: fw_rw_offset is not a power of 2 (0x%lx)\n",
  603. __func__, scratch->fw_rw_offset);
  604. return SBI_EINVAL;
  605. }
  606. if ((scratch->fw_start & (scratch->fw_rw_offset - 1)) != 0) {
  607. sbi_printf("%s: fw_start and fw_rw_offset not aligned\n",
  608. __func__);
  609. return SBI_EINVAL;
  610. }
  611. /* Root domain firmware memory region */
  612. sbi_domain_memregion_init(scratch->fw_start, scratch->fw_rw_offset,
  613. (SBI_DOMAIN_MEMREGION_M_READABLE |
  614. SBI_DOMAIN_MEMREGION_M_EXECUTABLE),
  615. &root_fw_region);
  616. domain_memregion_initfw(&root_memregs[root_memregs_count++]);
  617. sbi_domain_memregion_init((scratch->fw_start + scratch->fw_rw_offset),
  618. (scratch->fw_size - scratch->fw_rw_offset),
  619. (SBI_DOMAIN_MEMREGION_M_READABLE |
  620. SBI_DOMAIN_MEMREGION_M_WRITABLE),
  621. &root_memregs[root_memregs_count++]);
  622. /* Root domain allow everything memory region */
  623. sbi_domain_memregion_init(0, ~0UL,
  624. (SBI_DOMAIN_MEMREGION_READABLE |
  625. SBI_DOMAIN_MEMREGION_WRITEABLE |
  626. SBI_DOMAIN_MEMREGION_EXECUTABLE),
  627. &root_memregs[root_memregs_count++]);
  628. /* Root domain memory region end */
  629. root_memregs[root_memregs_count].order = 0;
  630. /* Root domain boot HART id is same as coldboot HART id */
  631. root.boot_hartid = cold_hartid;
  632. /* Root domain next booting stage details */
  633. root.next_arg1 = scratch->next_arg1;
  634. root.next_addr = scratch->next_addr;
  635. root.next_mode = scratch->next_mode;
  636. /* Root domain possible and assigned HARTs */
  637. for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) {
  638. if (sbi_platform_hart_invalid(plat, i))
  639. continue;
  640. sbi_hartmask_set_hart(i, &root_hmask);
  641. }
  642. return sbi_domain_register(&root, &root_hmask);
  643. }