sbi_domain.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/sbi_console.h>
  11. #include <sbi/sbi_domain.h>
  12. #include <sbi/sbi_hartmask.h>
  13. #include <sbi/sbi_hsm.h>
  14. #include <sbi/sbi_math.h>
  15. #include <sbi/sbi_platform.h>
  16. #include <sbi/sbi_scratch.h>
  17. #include <sbi/sbi_string.h>
  18. /*
  19. * We allocate an extra element because sbi_domain_for_each() expects
  20. * the array to be null-terminated.
  21. */
  22. struct sbi_domain *domidx_to_domain_table[SBI_DOMAIN_MAX_INDEX + 1] = { 0 };
  23. struct sbi_domain *hartid_to_domain_table[SBI_HARTMASK_MAX_BITS] = { 0 };
  24. static u32 domain_count = 0;
  25. static bool domain_finalized = false;
  26. static struct sbi_hartmask root_hmask = { 0 };
  27. #define ROOT_REGION_MAX 16
  28. static u32 root_memregs_count = 0;
  29. static struct sbi_domain_memregion root_fw_region;
  30. static struct sbi_domain_memregion root_memregs[ROOT_REGION_MAX + 1] = { 0 };
  31. struct sbi_domain root = {
  32. .name = "root",
  33. .possible_harts = &root_hmask,
  34. .regions = root_memregs,
  35. .system_reset_allowed = true,
  36. .system_suspend_allowed = true,
  37. };
  38. bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartid)
  39. {
  40. if (dom)
  41. return sbi_hartmask_test_hart(hartid, &dom->assigned_harts);
  42. return false;
  43. }
  44. ulong sbi_domain_get_assigned_hartmask(const struct sbi_domain *dom,
  45. ulong hbase)
  46. {
  47. ulong ret, bword, boff;
  48. if (!dom)
  49. return 0;
  50. bword = BIT_WORD(hbase);
  51. boff = BIT_WORD_OFFSET(hbase);
  52. ret = sbi_hartmask_bits(&dom->assigned_harts)[bword++] >> boff;
  53. if (boff && bword < BIT_WORD(SBI_HARTMASK_MAX_BITS)) {
  54. ret |= (sbi_hartmask_bits(&dom->assigned_harts)[bword] &
  55. (BIT(boff) - 1UL)) << (BITS_PER_LONG - boff);
  56. }
  57. return ret;
  58. }
  59. static void domain_memregion_initfw(struct sbi_domain_memregion *reg)
  60. {
  61. if (!reg)
  62. return;
  63. sbi_memcpy(reg, &root_fw_region, sizeof(*reg));
  64. }
  65. void sbi_domain_memregion_init(unsigned long addr,
  66. unsigned long size,
  67. unsigned long flags,
  68. struct sbi_domain_memregion *reg)
  69. {
  70. unsigned long base = 0, order;
  71. for (order = log2roundup(size) ; order <= __riscv_xlen; order++) {
  72. if (order < __riscv_xlen) {
  73. base = addr & ~((1UL << order) - 1UL);
  74. if ((base <= addr) &&
  75. (addr < (base + (1UL << order))) &&
  76. (base <= (addr + size - 1UL)) &&
  77. ((addr + size - 1UL) < (base + (1UL << order))))
  78. break;
  79. } else {
  80. base = 0;
  81. break;
  82. }
  83. }
  84. if (reg) {
  85. reg->base = base;
  86. reg->order = order;
  87. reg->flags = flags;
  88. }
  89. }
  90. bool sbi_domain_check_addr(const struct sbi_domain *dom,
  91. unsigned long addr, unsigned long mode,
  92. unsigned long access_flags)
  93. {
  94. bool rmmio, mmio = false;
  95. struct sbi_domain_memregion *reg;
  96. unsigned long rstart, rend, rflags, rwx = 0, rrwx = 0;
  97. if (!dom)
  98. return false;
  99. /*
  100. * Use M_{R/W/X} bits because the SU-bits are at the
  101. * same relative offsets. If the mode is not M, the SU
  102. * bits will fall at same offsets after the shift.
  103. */
  104. if (access_flags & SBI_DOMAIN_READ)
  105. rwx |= SBI_DOMAIN_MEMREGION_M_READABLE;
  106. if (access_flags & SBI_DOMAIN_WRITE)
  107. rwx |= SBI_DOMAIN_MEMREGION_M_WRITABLE;
  108. if (access_flags & SBI_DOMAIN_EXECUTE)
  109. rwx |= SBI_DOMAIN_MEMREGION_M_EXECUTABLE;
  110. if (access_flags & SBI_DOMAIN_MMIO)
  111. mmio = true;
  112. sbi_domain_for_each_memregion(dom, reg) {
  113. rflags = reg->flags;
  114. rrwx = (mode == PRV_M ?
  115. (rflags & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK) :
  116. (rflags & SBI_DOMAIN_MEMREGION_SU_ACCESS_MASK)
  117. >> SBI_DOMAIN_MEMREGION_SU_ACCESS_SHIFT);
  118. rstart = reg->base;
  119. rend = (reg->order < __riscv_xlen) ?
  120. rstart + ((1UL << reg->order) - 1) : -1UL;
  121. if (rstart <= addr && addr <= rend) {
  122. rmmio = (rflags & SBI_DOMAIN_MEMREGION_MMIO) ? true : false;
  123. if (mmio != rmmio)
  124. return false;
  125. return ((rrwx & rwx) == rwx) ? true : false;
  126. }
  127. }
  128. return (mode == PRV_M) ? true : false;
  129. }
  130. /* Check if region complies with constraints */
  131. static bool is_region_valid(const struct sbi_domain_memregion *reg)
  132. {
  133. if (reg->order < 3 || __riscv_xlen < reg->order)
  134. return false;
  135. if (reg->order == __riscv_xlen && reg->base != 0)
  136. return false;
  137. if (reg->order < __riscv_xlen && (reg->base & (BIT(reg->order) - 1)))
  138. return false;
  139. return true;
  140. }
  141. /** Check if regionA is sub-region of regionB */
  142. static bool is_region_subset(const struct sbi_domain_memregion *regA,
  143. const struct sbi_domain_memregion *regB)
  144. {
  145. ulong regA_start = regA->base;
  146. ulong regA_end = regA->base + (BIT(regA->order) - 1);
  147. ulong regB_start = regB->base;
  148. ulong regB_end = regB->base + (BIT(regB->order) - 1);
  149. if ((regB_start <= regA_start) &&
  150. (regA_start < regB_end) &&
  151. (regB_start < regA_end) &&
  152. (regA_end <= regB_end))
  153. return true;
  154. return false;
  155. }
  156. /** Check if regionA conflicts regionB */
  157. static bool is_region_conflict(const struct sbi_domain_memregion *regA,
  158. const struct sbi_domain_memregion *regB)
  159. {
  160. if ((is_region_subset(regA, regB) || is_region_subset(regB, regA)) &&
  161. regA->flags == regB->flags)
  162. return true;
  163. return false;
  164. }
  165. /** Check if regionA should be placed before regionB */
  166. static bool is_region_before(const struct sbi_domain_memregion *regA,
  167. const struct sbi_domain_memregion *regB)
  168. {
  169. if (regA->order < regB->order)
  170. return true;
  171. if ((regA->order == regB->order) &&
  172. (regA->base < regB->base))
  173. return true;
  174. return false;
  175. }
  176. static const struct sbi_domain_memregion *find_region(
  177. const struct sbi_domain *dom,
  178. unsigned long addr)
  179. {
  180. unsigned long rstart, rend;
  181. struct sbi_domain_memregion *reg;
  182. sbi_domain_for_each_memregion(dom, reg) {
  183. rstart = reg->base;
  184. rend = (reg->order < __riscv_xlen) ?
  185. rstart + ((1UL << reg->order) - 1) : -1UL;
  186. if (rstart <= addr && addr <= rend)
  187. return reg;
  188. }
  189. return NULL;
  190. }
  191. static const struct sbi_domain_memregion *find_next_subset_region(
  192. const struct sbi_domain *dom,
  193. const struct sbi_domain_memregion *reg,
  194. unsigned long addr)
  195. {
  196. struct sbi_domain_memregion *sreg, *ret = NULL;
  197. sbi_domain_for_each_memregion(dom, sreg) {
  198. if (sreg == reg || (sreg->base <= addr) ||
  199. !is_region_subset(sreg, reg))
  200. continue;
  201. if (!ret || (sreg->base < ret->base) ||
  202. ((sreg->base == ret->base) && (sreg->order < ret->order)))
  203. ret = sreg;
  204. }
  205. return ret;
  206. }
  207. static int sanitize_domain(const struct sbi_platform *plat,
  208. struct sbi_domain *dom)
  209. {
  210. u32 i, j, count;
  211. bool have_fw_reg;
  212. struct sbi_domain_memregion treg, *reg, *reg1;
  213. /* Check possible HARTs */
  214. if (!dom->possible_harts) {
  215. sbi_printf("%s: %s possible HART mask is NULL\n",
  216. __func__, dom->name);
  217. return SBI_EINVAL;
  218. }
  219. sbi_hartmask_for_each_hart(i, dom->possible_harts) {
  220. if (sbi_platform_hart_invalid(plat, i)) {
  221. sbi_printf("%s: %s possible HART mask has invalid "
  222. "hart %d\n", __func__, dom->name, i);
  223. return SBI_EINVAL;
  224. }
  225. };
  226. /* Check memory regions */
  227. if (!dom->regions) {
  228. sbi_printf("%s: %s regions is NULL\n",
  229. __func__, dom->name);
  230. return SBI_EINVAL;
  231. }
  232. sbi_domain_for_each_memregion(dom, reg) {
  233. if (!is_region_valid(reg)) {
  234. sbi_printf("%s: %s has invalid region base=0x%lx "
  235. "order=%lu flags=0x%lx\n", __func__,
  236. dom->name, reg->base, reg->order,
  237. reg->flags);
  238. return SBI_EINVAL;
  239. }
  240. }
  241. /* Count memory regions and check presence of firmware region */
  242. count = 0;
  243. have_fw_reg = false;
  244. sbi_domain_for_each_memregion(dom, reg) {
  245. if (reg->order == root_fw_region.order &&
  246. reg->base == root_fw_region.base &&
  247. reg->flags == root_fw_region.flags)
  248. have_fw_reg = true;
  249. count++;
  250. }
  251. if (!have_fw_reg) {
  252. sbi_printf("%s: %s does not have firmware region\n",
  253. __func__, dom->name);
  254. return SBI_EINVAL;
  255. }
  256. /* Sort the memory regions */
  257. for (i = 0; i < (count - 1); i++) {
  258. reg = &dom->regions[i];
  259. for (j = i + 1; j < count; j++) {
  260. reg1 = &dom->regions[j];
  261. if (is_region_conflict(reg1, reg)) {
  262. sbi_printf("%s: %s conflict between regions "
  263. "(base=0x%lx order=%lu flags=0x%lx) and "
  264. "(base=0x%lx order=%lu flags=0x%lx)\n",
  265. __func__, dom->name,
  266. reg->base, reg->order, reg->flags,
  267. reg1->base, reg1->order, reg1->flags);
  268. return SBI_EINVAL;
  269. }
  270. if (!is_region_before(reg1, reg))
  271. continue;
  272. sbi_memcpy(&treg, reg1, sizeof(treg));
  273. sbi_memcpy(reg1, reg, sizeof(treg));
  274. sbi_memcpy(reg, &treg, sizeof(treg));
  275. }
  276. }
  277. /*
  278. * We don't need to check boot HART id of domain because if boot
  279. * HART id is not possible/assigned to this domain then it won't
  280. * be started at boot-time by sbi_domain_finalize().
  281. */
  282. /*
  283. * Check next mode
  284. *
  285. * We only allow next mode to be S-mode or U-mode, so that we can
  286. * protect M-mode context and enforce checks on memory accesses.
  287. */
  288. if (dom->next_mode != PRV_S &&
  289. dom->next_mode != PRV_U) {
  290. sbi_printf("%s: %s invalid next booting stage mode 0x%lx\n",
  291. __func__, dom->name, dom->next_mode);
  292. return SBI_EINVAL;
  293. }
  294. /* Check next address and next mode */
  295. if (!sbi_domain_check_addr(dom, dom->next_addr, dom->next_mode,
  296. SBI_DOMAIN_EXECUTE)) {
  297. sbi_printf("%s: %s next booting stage address 0x%lx can't "
  298. "execute\n", __func__, dom->name, dom->next_addr);
  299. return SBI_EINVAL;
  300. }
  301. return 0;
  302. }
  303. bool sbi_domain_check_addr_range(const struct sbi_domain *dom,
  304. unsigned long addr, unsigned long size,
  305. unsigned long mode,
  306. unsigned long access_flags)
  307. {
  308. unsigned long max = addr + size;
  309. const struct sbi_domain_memregion *reg, *sreg;
  310. if (!dom)
  311. return false;
  312. while (addr < max) {
  313. reg = find_region(dom, addr);
  314. if (!reg)
  315. return false;
  316. if (!sbi_domain_check_addr(dom, addr, mode, access_flags))
  317. return false;
  318. sreg = find_next_subset_region(dom, reg, addr);
  319. if (sreg)
  320. addr = sreg->base;
  321. else if (reg->order < __riscv_xlen)
  322. addr = reg->base + (1UL << reg->order);
  323. else
  324. break;
  325. }
  326. return true;
  327. }
  328. void sbi_domain_dump(const struct sbi_domain *dom, const char *suffix)
  329. {
  330. u32 i, k;
  331. unsigned long rstart, rend;
  332. struct sbi_domain_memregion *reg;
  333. sbi_printf("Domain%d Name %s: %s\n",
  334. dom->index, suffix, dom->name);
  335. sbi_printf("Domain%d Boot HART %s: %d\n",
  336. dom->index, suffix, dom->boot_hartid);
  337. k = 0;
  338. sbi_printf("Domain%d HARTs %s: ", dom->index, suffix);
  339. sbi_hartmask_for_each_hart(i, dom->possible_harts)
  340. sbi_printf("%s%d%s", (k++) ? "," : "",
  341. i, sbi_domain_is_assigned_hart(dom, i) ? "*" : "");
  342. sbi_printf("\n");
  343. i = 0;
  344. sbi_domain_for_each_memregion(dom, reg) {
  345. rstart = reg->base;
  346. rend = (reg->order < __riscv_xlen) ?
  347. rstart + ((1UL << reg->order) - 1) : -1UL;
  348. sbi_printf("Domain%d Region%02d %s: 0x%" PRILX "-0x%" PRILX " ",
  349. dom->index, i, suffix, rstart, rend);
  350. k = 0;
  351. sbi_printf("M: ");
  352. if (reg->flags & SBI_DOMAIN_MEMREGION_MMIO)
  353. sbi_printf("%cI", (k++) ? ',' : '(');
  354. if (reg->flags & SBI_DOMAIN_MEMREGION_M_READABLE)
  355. sbi_printf("%cR", (k++) ? ',' : '(');
  356. if (reg->flags & SBI_DOMAIN_MEMREGION_M_WRITABLE)
  357. sbi_printf("%cW", (k++) ? ',' : '(');
  358. if (reg->flags & SBI_DOMAIN_MEMREGION_M_EXECUTABLE)
  359. sbi_printf("%cX", (k++) ? ',' : '(');
  360. sbi_printf("%s ", (k++) ? ")" : "()");
  361. k = 0;
  362. sbi_printf("S/U: ");
  363. if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
  364. sbi_printf("%cR", (k++) ? ',' : '(');
  365. if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
  366. sbi_printf("%cW", (k++) ? ',' : '(');
  367. if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
  368. sbi_printf("%cX", (k++) ? ',' : '(');
  369. sbi_printf("%s\n", (k++) ? ")" : "()");
  370. i++;
  371. }
  372. sbi_printf("Domain%d Next Address%s: 0x%" PRILX "\n",
  373. dom->index, suffix, dom->next_addr);
  374. sbi_printf("Domain%d Next Arg1 %s: 0x%" PRILX "\n",
  375. dom->index, suffix, dom->next_arg1);
  376. sbi_printf("Domain%d Next Mode %s: ", dom->index, suffix);
  377. switch (dom->next_mode) {
  378. case PRV_M:
  379. sbi_printf("M-mode\n");
  380. break;
  381. case PRV_S:
  382. sbi_printf("S-mode\n");
  383. break;
  384. case PRV_U:
  385. sbi_printf("U-mode\n");
  386. break;
  387. default:
  388. sbi_printf("Unknown\n");
  389. break;
  390. };
  391. sbi_printf("Domain%d SysReset %s: %s\n",
  392. dom->index, suffix, (dom->system_reset_allowed) ? "yes" : "no");
  393. sbi_printf("Domain%d SysSuspend %s: %s\n",
  394. dom->index, suffix, (dom->system_suspend_allowed) ? "yes" : "no");
  395. }
  396. void sbi_domain_dump_all(const char *suffix)
  397. {
  398. u32 i;
  399. const struct sbi_domain *dom;
  400. sbi_domain_for_each(i, dom) {
  401. sbi_domain_dump(dom, suffix);
  402. sbi_printf("\n");
  403. }
  404. }
  405. int sbi_domain_register(struct sbi_domain *dom,
  406. const struct sbi_hartmask *assign_mask)
  407. {
  408. u32 i;
  409. int rc;
  410. struct sbi_domain *tdom;
  411. u32 cold_hartid = current_hartid();
  412. const struct sbi_platform *plat = sbi_platform_thishart_ptr();
  413. /* Sanity checks */
  414. if (!dom || !assign_mask || domain_finalized)
  415. return SBI_EINVAL;
  416. /* Check if domain already discovered */
  417. sbi_domain_for_each(i, tdom) {
  418. if (tdom == dom)
  419. return SBI_EALREADY;
  420. }
  421. /*
  422. * Ensure that we have room for Domain Index to
  423. * HART ID mapping
  424. */
  425. if (SBI_DOMAIN_MAX_INDEX <= domain_count) {
  426. sbi_printf("%s: No room for %s\n",
  427. __func__, dom->name);
  428. return SBI_ENOSPC;
  429. }
  430. /* Sanitize discovered domain */
  431. rc = sanitize_domain(plat, dom);
  432. if (rc) {
  433. sbi_printf("%s: sanity checks failed for"
  434. " %s (error %d)\n", __func__,
  435. dom->name, rc);
  436. return rc;
  437. }
  438. /* Assign index to domain */
  439. dom->index = domain_count++;
  440. domidx_to_domain_table[dom->index] = dom;
  441. /* Clear assigned HARTs of domain */
  442. sbi_hartmask_clear_all(&dom->assigned_harts);
  443. /* Assign domain to HART if HART is a possible HART */
  444. sbi_hartmask_for_each_hart(i, assign_mask) {
  445. if (!sbi_hartmask_test_hart(i, dom->possible_harts))
  446. continue;
  447. tdom = hartid_to_domain_table[i];
  448. if (tdom)
  449. sbi_hartmask_clear_hart(i,
  450. &tdom->assigned_harts);
  451. hartid_to_domain_table[i] = dom;
  452. sbi_hartmask_set_hart(i, &dom->assigned_harts);
  453. /*
  454. * If cold boot HART is assigned to this domain then
  455. * override boot HART of this domain.
  456. */
  457. if (i == cold_hartid &&
  458. dom->boot_hartid != cold_hartid) {
  459. sbi_printf("Domain%d Boot HARTID forced to"
  460. " %d\n", dom->index, cold_hartid);
  461. dom->boot_hartid = cold_hartid;
  462. }
  463. }
  464. return 0;
  465. }
  466. int sbi_domain_root_add_memregion(const struct sbi_domain_memregion *reg)
  467. {
  468. int rc;
  469. bool reg_merged;
  470. struct sbi_domain_memregion *nreg, *nreg1, *nreg2;
  471. const struct sbi_platform *plat = sbi_platform_thishart_ptr();
  472. /* Sanity checks */
  473. if (!reg || domain_finalized ||
  474. (root.regions != root_memregs) ||
  475. (ROOT_REGION_MAX <= root_memregs_count))
  476. return SBI_EINVAL;
  477. /* Check for conflicts */
  478. sbi_domain_for_each_memregion(&root, nreg) {
  479. if (is_region_conflict(reg, nreg)) {
  480. sbi_printf("%s: is_region_conflict check failed"
  481. " 0x%lx conflicts existing 0x%lx\n", __func__,
  482. reg->base, nreg->base);
  483. return SBI_EALREADY;
  484. }
  485. }
  486. /* Append the memregion to root memregions */
  487. nreg = &root_memregs[root_memregs_count];
  488. sbi_memcpy(nreg, reg, sizeof(*reg));
  489. root_memregs_count++;
  490. root_memregs[root_memregs_count].order = 0;
  491. /* Sort and optimize root regions */
  492. do {
  493. /* Sanitize the root domain so that memregions are sorted */
  494. rc = sanitize_domain(plat, &root);
  495. if (rc) {
  496. sbi_printf("%s: sanity checks failed for"
  497. " %s (error %d)\n", __func__,
  498. root.name, rc);
  499. return rc;
  500. }
  501. /* Merge consecutive memregions with same order and flags */
  502. reg_merged = false;
  503. sbi_domain_for_each_memregion(&root, nreg) {
  504. nreg1 = nreg + 1;
  505. if (!nreg1->order)
  506. continue;
  507. if (!(nreg->base & (BIT(nreg->order + 1) - 1)) &&
  508. (nreg->base + BIT(nreg->order)) == nreg1->base &&
  509. nreg->order == nreg1->order &&
  510. nreg->flags == nreg1->flags) {
  511. nreg->order++;
  512. while (nreg1->order) {
  513. nreg2 = nreg1 + 1;
  514. sbi_memcpy(nreg1, nreg2, sizeof(*nreg1));
  515. nreg1++;
  516. }
  517. reg_merged = true;
  518. root_memregs_count--;
  519. }
  520. }
  521. } while (reg_merged);
  522. return 0;
  523. }
  524. int sbi_domain_root_add_memrange(unsigned long addr, unsigned long size,
  525. unsigned long align, unsigned long region_flags)
  526. {
  527. int rc;
  528. unsigned long pos, end, rsize;
  529. struct sbi_domain_memregion reg;
  530. pos = addr;
  531. end = addr + size;
  532. while (pos < end) {
  533. rsize = pos & (align - 1);
  534. if (rsize)
  535. rsize = 1UL << sbi_ffs(pos);
  536. else
  537. rsize = ((end - pos) < align) ?
  538. (end - pos) : align;
  539. sbi_domain_memregion_init(pos, rsize, region_flags, &reg);
  540. rc = sbi_domain_root_add_memregion(&reg);
  541. if (rc)
  542. return rc;
  543. pos += rsize;
  544. }
  545. return 0;
  546. }
  547. int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid)
  548. {
  549. int rc;
  550. u32 i, dhart;
  551. struct sbi_domain *dom;
  552. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  553. /* Initialize and populate domains for the platform */
  554. rc = sbi_platform_domains_init(plat);
  555. if (rc) {
  556. sbi_printf("%s: platform domains_init() failed (error %d)\n",
  557. __func__, rc);
  558. return rc;
  559. }
  560. /* Startup boot HART of domains */
  561. sbi_domain_for_each(i, dom) {
  562. /* Domain boot HART */
  563. dhart = dom->boot_hartid;
  564. /* Ignore of boot HART is off limits */
  565. if (SBI_HARTMASK_MAX_BITS <= dhart)
  566. continue;
  567. /* Ignore if boot HART not possible for this domain */
  568. if (!sbi_hartmask_test_hart(dhart, dom->possible_harts))
  569. continue;
  570. /* Ignore if boot HART assigned different domain */
  571. if (sbi_hartid_to_domain(dhart) != dom ||
  572. !sbi_hartmask_test_hart(dhart, &dom->assigned_harts))
  573. continue;
  574. /* Startup boot HART of domain */
  575. if (dhart == cold_hartid) {
  576. scratch->next_addr = dom->next_addr;
  577. scratch->next_mode = dom->next_mode;
  578. scratch->next_arg1 = dom->next_arg1;
  579. } else {
  580. rc = sbi_hsm_hart_start(scratch, NULL, dhart,
  581. dom->next_addr,
  582. dom->next_mode,
  583. dom->next_arg1);
  584. if (rc) {
  585. sbi_printf("%s: failed to start boot HART %d"
  586. " for %s (error %d)\n", __func__,
  587. dhart, dom->name, rc);
  588. return rc;
  589. }
  590. }
  591. }
  592. /*
  593. * Set the finalized flag so that the root domain
  594. * regions can't be changed.
  595. */
  596. domain_finalized = true;
  597. return 0;
  598. }
  599. int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
  600. {
  601. u32 i;
  602. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  603. if (scratch->fw_rw_offset == 0 ||
  604. (scratch->fw_rw_offset & (scratch->fw_rw_offset - 1)) != 0) {
  605. sbi_printf("%s: fw_rw_offset is not a power of 2 (0x%lx)\n",
  606. __func__, scratch->fw_rw_offset);
  607. return SBI_EINVAL;
  608. }
  609. if ((scratch->fw_start & (scratch->fw_rw_offset - 1)) != 0) {
  610. sbi_printf("%s: fw_start and fw_rw_offset not aligned\n",
  611. __func__);
  612. return SBI_EINVAL;
  613. }
  614. /* Root domain firmware memory region */
  615. sbi_domain_memregion_init(scratch->fw_start, scratch->fw_rw_offset,
  616. (SBI_DOMAIN_MEMREGION_M_READABLE |
  617. SBI_DOMAIN_MEMREGION_M_EXECUTABLE),
  618. &root_fw_region);
  619. domain_memregion_initfw(&root_memregs[root_memregs_count++]);
  620. sbi_domain_memregion_init((scratch->fw_start + scratch->fw_rw_offset),
  621. (scratch->fw_size - scratch->fw_rw_offset),
  622. (SBI_DOMAIN_MEMREGION_M_READABLE |
  623. SBI_DOMAIN_MEMREGION_M_WRITABLE),
  624. &root_memregs[root_memregs_count++]);
  625. /* Root domain allow everything memory region */
  626. sbi_domain_memregion_init(0, ~0UL,
  627. (SBI_DOMAIN_MEMREGION_READABLE |
  628. SBI_DOMAIN_MEMREGION_WRITEABLE |
  629. SBI_DOMAIN_MEMREGION_EXECUTABLE),
  630. &root_memregs[root_memregs_count++]);
  631. /* Root domain memory region end */
  632. root_memregs[root_memregs_count].order = 0;
  633. /* Root domain boot HART id is same as coldboot HART id */
  634. root.boot_hartid = cold_hartid;
  635. /* Root domain next booting stage details */
  636. root.next_arg1 = scratch->next_arg1;
  637. root.next_addr = scratch->next_addr;
  638. root.next_mode = scratch->next_mode;
  639. /* Root domain possible and assigned HARTs */
  640. for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) {
  641. if (sbi_platform_hart_invalid(plat, i))
  642. continue;
  643. sbi_hartmask_set_hart(i, &root_hmask);
  644. }
  645. return sbi_domain_register(&root, &root_hmask);
  646. }