sbi_domain.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/sbi_console.h>
  11. #include <sbi/sbi_domain.h>
  12. #include <sbi/sbi_hartmask.h>
  13. #include <sbi/sbi_hsm.h>
  14. #include <sbi/sbi_math.h>
  15. #include <sbi/sbi_platform.h>
  16. #include <sbi/sbi_scratch.h>
  17. #include <sbi/sbi_string.h>
  18. struct sbi_domain *hartid_to_domain_table[SBI_HARTMASK_MAX_BITS] = { 0 };
  19. struct sbi_domain *domidx_to_domain_table[SBI_DOMAIN_MAX_INDEX] = { 0 };
  20. static u32 domain_count = 0;
  21. static bool domain_finalized = false;
  22. static struct sbi_hartmask root_hmask = { 0 };
  23. #define ROOT_REGION_MAX 16
  24. static u32 root_memregs_count = 0;
  25. static struct sbi_domain_memregion root_fw_region;
  26. static struct sbi_domain_memregion root_memregs[ROOT_REGION_MAX + 1] = { 0 };
  27. struct sbi_domain root = {
  28. .name = "root",
  29. .possible_harts = &root_hmask,
  30. .regions = root_memregs,
  31. .system_reset_allowed = true,
  32. };
  33. bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartid)
  34. {
  35. if (dom)
  36. return sbi_hartmask_test_hart(hartid, &dom->assigned_harts);
  37. return false;
  38. }
  39. ulong sbi_domain_get_assigned_hartmask(const struct sbi_domain *dom,
  40. ulong hbase)
  41. {
  42. ulong ret, bword, boff;
  43. if (!dom)
  44. return 0;
  45. bword = BIT_WORD(hbase);
  46. boff = BIT_WORD_OFFSET(hbase);
  47. ret = sbi_hartmask_bits(&dom->assigned_harts)[bword++] >> boff;
  48. if (boff && bword < BIT_WORD(SBI_HARTMASK_MAX_BITS)) {
  49. ret |= (sbi_hartmask_bits(&dom->assigned_harts)[bword] &
  50. (BIT(boff) - 1UL)) << (BITS_PER_LONG - boff);
  51. }
  52. return ret;
  53. }
  54. static void domain_memregion_initfw(struct sbi_domain_memregion *reg)
  55. {
  56. if (!reg)
  57. return;
  58. sbi_memcpy(reg, &root_fw_region, sizeof(*reg));
  59. }
  60. void sbi_domain_memregion_init(unsigned long addr,
  61. unsigned long size,
  62. unsigned long flags,
  63. struct sbi_domain_memregion *reg)
  64. {
  65. unsigned long base = 0, order;
  66. for (order = log2roundup(size) ; order <= __riscv_xlen; order++) {
  67. if (order < __riscv_xlen) {
  68. base = addr & ~((1UL << order) - 1UL);
  69. if ((base <= addr) &&
  70. (addr < (base + (1UL << order))) &&
  71. (base <= (addr + size - 1UL)) &&
  72. ((addr + size - 1UL) < (base + (1UL << order))))
  73. break;
  74. } else {
  75. base = 0;
  76. break;
  77. }
  78. }
  79. if (reg) {
  80. reg->base = base;
  81. reg->order = order;
  82. reg->flags = flags;
  83. }
  84. }
  85. bool sbi_domain_check_addr(const struct sbi_domain *dom,
  86. unsigned long addr, unsigned long mode,
  87. unsigned long access_flags)
  88. {
  89. bool rmmio, mmio = false;
  90. struct sbi_domain_memregion *reg;
  91. unsigned long rstart, rend, rflags, rwx = 0;
  92. if (!dom)
  93. return false;
  94. if (access_flags & SBI_DOMAIN_READ)
  95. rwx |= SBI_DOMAIN_MEMREGION_READABLE;
  96. if (access_flags & SBI_DOMAIN_WRITE)
  97. rwx |= SBI_DOMAIN_MEMREGION_WRITEABLE;
  98. if (access_flags & SBI_DOMAIN_EXECUTE)
  99. rwx |= SBI_DOMAIN_MEMREGION_EXECUTABLE;
  100. if (access_flags & SBI_DOMAIN_MMIO)
  101. mmio = true;
  102. sbi_domain_for_each_memregion(dom, reg) {
  103. rflags = reg->flags;
  104. if (mode == PRV_M && !(rflags & SBI_DOMAIN_MEMREGION_MMODE))
  105. continue;
  106. rstart = reg->base;
  107. rend = (reg->order < __riscv_xlen) ?
  108. rstart + ((1UL << reg->order) - 1) : -1UL;
  109. if (rstart <= addr && addr <= rend) {
  110. rmmio = (rflags & SBI_DOMAIN_MEMREGION_MMIO) ? true : false;
  111. if (mmio != rmmio)
  112. return false;
  113. return ((rflags & rwx) == rwx) ? true : false;
  114. }
  115. }
  116. return (mode == PRV_M) ? true : false;
  117. }
  118. /* Check if region complies with constraints */
  119. static bool is_region_valid(const struct sbi_domain_memregion *reg)
  120. {
  121. if (reg->order < 3 || __riscv_xlen < reg->order)
  122. return false;
  123. if (reg->order == __riscv_xlen && reg->base != 0)
  124. return false;
  125. if (reg->order < __riscv_xlen && (reg->base & (BIT(reg->order) - 1)))
  126. return false;
  127. return true;
  128. }
  129. /** Check if regionA is sub-region of regionB */
  130. static bool is_region_subset(const struct sbi_domain_memregion *regA,
  131. const struct sbi_domain_memregion *regB)
  132. {
  133. ulong regA_start = regA->base;
  134. ulong regA_end = regA->base + (BIT(regA->order) - 1);
  135. ulong regB_start = regB->base;
  136. ulong regB_end = regB->base + (BIT(regB->order) - 1);
  137. if ((regB_start <= regA_start) &&
  138. (regA_start < regB_end) &&
  139. (regB_start < regA_end) &&
  140. (regA_end <= regB_end))
  141. return true;
  142. return false;
  143. }
  144. /** Check if regionA conflicts regionB */
  145. static bool is_region_conflict(const struct sbi_domain_memregion *regA,
  146. const struct sbi_domain_memregion *regB)
  147. {
  148. if ((is_region_subset(regA, regB) || is_region_subset(regB, regA)) &&
  149. regA->flags == regB->flags)
  150. return true;
  151. return false;
  152. }
  153. /** Check if regionA should be placed before regionB */
  154. static bool is_region_before(const struct sbi_domain_memregion *regA,
  155. const struct sbi_domain_memregion *regB)
  156. {
  157. if (regA->order < regB->order)
  158. return true;
  159. if ((regA->order == regB->order) &&
  160. (regA->base < regB->base))
  161. return true;
  162. return false;
  163. }
  164. static int sanitize_domain(const struct sbi_platform *plat,
  165. struct sbi_domain *dom)
  166. {
  167. u32 i, j, count;
  168. bool have_fw_reg;
  169. struct sbi_domain_memregion treg, *reg, *reg1;
  170. /* Check possible HARTs */
  171. if (!dom->possible_harts) {
  172. sbi_printf("%s: %s possible HART mask is NULL\n",
  173. __func__, dom->name);
  174. return SBI_EINVAL;
  175. }
  176. sbi_hartmask_for_each_hart(i, dom->possible_harts) {
  177. if (sbi_platform_hart_invalid(plat, i)) {
  178. sbi_printf("%s: %s possible HART mask has invalid "
  179. "hart %d\n", __func__, dom->name, i);
  180. return SBI_EINVAL;
  181. }
  182. };
  183. /* Check memory regions */
  184. if (!dom->regions) {
  185. sbi_printf("%s: %s regions is NULL\n",
  186. __func__, dom->name);
  187. return SBI_EINVAL;
  188. }
  189. sbi_domain_for_each_memregion(dom, reg) {
  190. if (!is_region_valid(reg)) {
  191. sbi_printf("%s: %s has invalid region base=0x%lx "
  192. "order=%lu flags=0x%lx\n", __func__,
  193. dom->name, reg->base, reg->order,
  194. reg->flags);
  195. return SBI_EINVAL;
  196. }
  197. }
  198. /* Count memory regions and check presence of firmware region */
  199. count = 0;
  200. have_fw_reg = false;
  201. sbi_domain_for_each_memregion(dom, reg) {
  202. if (reg->order == root_fw_region.order &&
  203. reg->base == root_fw_region.base &&
  204. reg->flags == root_fw_region.flags)
  205. have_fw_reg = true;
  206. count++;
  207. }
  208. if (!have_fw_reg) {
  209. sbi_printf("%s: %s does not have firmware region\n",
  210. __func__, dom->name);
  211. return SBI_EINVAL;
  212. }
  213. /* Sort the memory regions */
  214. for (i = 0; i < (count - 1); i++) {
  215. reg = &dom->regions[i];
  216. for (j = i + 1; j < count; j++) {
  217. reg1 = &dom->regions[j];
  218. if (is_region_conflict(reg1, reg)) {
  219. sbi_printf("%s: %s conflict between regions "
  220. "(base=0x%lx order=%lu flags=0x%lx) and "
  221. "(base=0x%lx order=%lu flags=0x%lx)\n",
  222. __func__, dom->name,
  223. reg->base, reg->order, reg->flags,
  224. reg1->base, reg1->order, reg1->flags);
  225. return SBI_EINVAL;
  226. }
  227. if (!is_region_before(reg1, reg))
  228. continue;
  229. sbi_memcpy(&treg, reg1, sizeof(treg));
  230. sbi_memcpy(reg1, reg, sizeof(treg));
  231. sbi_memcpy(reg, &treg, sizeof(treg));
  232. }
  233. }
  234. /*
  235. * We don't need to check boot HART id of domain because if boot
  236. * HART id is not possible/assigned to this domain then it won't
  237. * be started at boot-time by sbi_domain_finalize().
  238. */
  239. /*
  240. * Check next mode
  241. *
  242. * We only allow next mode to be S-mode or U-mode.so that we can
  243. * protect M-mode context and enforce checks on memory accesses.
  244. */
  245. if (dom->next_mode != PRV_S &&
  246. dom->next_mode != PRV_U) {
  247. sbi_printf("%s: %s invalid next booting stage mode 0x%lx\n",
  248. __func__, dom->name, dom->next_mode);
  249. return SBI_EINVAL;
  250. }
  251. /* Check next address and next mode*/
  252. if (!sbi_domain_check_addr(dom, dom->next_addr, dom->next_mode,
  253. SBI_DOMAIN_EXECUTE)) {
  254. sbi_printf("%s: %s next booting stage address 0x%lx can't "
  255. "execute\n", __func__, dom->name, dom->next_addr);
  256. return SBI_EINVAL;
  257. }
  258. return 0;
  259. }
  260. void sbi_domain_dump(const struct sbi_domain *dom, const char *suffix)
  261. {
  262. u32 i, k;
  263. unsigned long rstart, rend;
  264. struct sbi_domain_memregion *reg;
  265. sbi_printf("Domain%d Name %s: %s\n",
  266. dom->index, suffix, dom->name);
  267. sbi_printf("Domain%d Boot HART %s: %d\n",
  268. dom->index, suffix, dom->boot_hartid);
  269. k = 0;
  270. sbi_printf("Domain%d HARTs %s: ", dom->index, suffix);
  271. sbi_hartmask_for_each_hart(i, dom->possible_harts)
  272. sbi_printf("%s%d%s", (k++) ? "," : "",
  273. i, sbi_domain_is_assigned_hart(dom, i) ? "*" : "");
  274. sbi_printf("\n");
  275. i = 0;
  276. sbi_domain_for_each_memregion(dom, reg) {
  277. rstart = reg->base;
  278. rend = (reg->order < __riscv_xlen) ?
  279. rstart + ((1UL << reg->order) - 1) : -1UL;
  280. sbi_printf("Domain%d Region%02d %s: 0x%" PRILX "-0x%" PRILX " ",
  281. dom->index, i, suffix, rstart, rend);
  282. k = 0;
  283. if (reg->flags & SBI_DOMAIN_MEMREGION_MMODE)
  284. sbi_printf("%cM", (k++) ? ',' : '(');
  285. if (reg->flags & SBI_DOMAIN_MEMREGION_MMIO)
  286. sbi_printf("%cI", (k++) ? ',' : '(');
  287. if (reg->flags & SBI_DOMAIN_MEMREGION_READABLE)
  288. sbi_printf("%cR", (k++) ? ',' : '(');
  289. if (reg->flags & SBI_DOMAIN_MEMREGION_WRITEABLE)
  290. sbi_printf("%cW", (k++) ? ',' : '(');
  291. if (reg->flags & SBI_DOMAIN_MEMREGION_EXECUTABLE)
  292. sbi_printf("%cX", (k++) ? ',' : '(');
  293. sbi_printf("%s\n", (k++) ? ")" : "()");
  294. i++;
  295. }
  296. sbi_printf("Domain%d Next Address%s: 0x%" PRILX "\n",
  297. dom->index, suffix, dom->next_addr);
  298. sbi_printf("Domain%d Next Arg1 %s: 0x%" PRILX "\n",
  299. dom->index, suffix, dom->next_arg1);
  300. sbi_printf("Domain%d Next Mode %s: ", dom->index, suffix);
  301. switch (dom->next_mode) {
  302. case PRV_M:
  303. sbi_printf("M-mode\n");
  304. break;
  305. case PRV_S:
  306. sbi_printf("S-mode\n");
  307. break;
  308. case PRV_U:
  309. sbi_printf("U-mode\n");
  310. break;
  311. default:
  312. sbi_printf("Unknown\n");
  313. break;
  314. };
  315. sbi_printf("Domain%d SysReset %s: %s\n",
  316. dom->index, suffix, (dom->system_reset_allowed) ? "yes" : "no");
  317. }
  318. void sbi_domain_dump_all(const char *suffix)
  319. {
  320. u32 i;
  321. const struct sbi_domain *dom;
  322. sbi_domain_for_each(i, dom) {
  323. sbi_domain_dump(dom, suffix);
  324. sbi_printf("\n");
  325. }
  326. }
  327. int sbi_domain_register(struct sbi_domain *dom,
  328. const struct sbi_hartmask *assign_mask)
  329. {
  330. u32 i;
  331. int rc;
  332. struct sbi_domain *tdom;
  333. u32 cold_hartid = current_hartid();
  334. const struct sbi_platform *plat = sbi_platform_thishart_ptr();
  335. /* Sanity checks */
  336. if (!dom || !assign_mask || domain_finalized)
  337. return SBI_EINVAL;
  338. /* Check if domain already discovered */
  339. sbi_domain_for_each(i, tdom) {
  340. if (tdom == dom)
  341. return SBI_EALREADY;
  342. }
  343. /*
  344. * Ensure that we have room for Domain Index to
  345. * HART ID mapping
  346. */
  347. if (SBI_DOMAIN_MAX_INDEX <= domain_count) {
  348. sbi_printf("%s: No room for %s\n",
  349. __func__, dom->name);
  350. return SBI_ENOSPC;
  351. }
  352. /* Sanitize discovered domain */
  353. rc = sanitize_domain(plat, dom);
  354. if (rc) {
  355. sbi_printf("%s: sanity checks failed for"
  356. " %s (error %d)\n", __func__,
  357. dom->name, rc);
  358. return rc;
  359. }
  360. /* Assign index to domain */
  361. dom->index = domain_count++;
  362. domidx_to_domain_table[dom->index] = dom;
  363. /* Clear assigned HARTs of domain */
  364. sbi_hartmask_clear_all(&dom->assigned_harts);
  365. /* Assign domain to HART if HART is a possible HART */
  366. sbi_hartmask_for_each_hart(i, assign_mask) {
  367. if (!sbi_hartmask_test_hart(i, dom->possible_harts))
  368. continue;
  369. tdom = hartid_to_domain_table[i];
  370. if (tdom)
  371. sbi_hartmask_clear_hart(i,
  372. &tdom->assigned_harts);
  373. hartid_to_domain_table[i] = dom;
  374. sbi_hartmask_set_hart(i, &dom->assigned_harts);
  375. /*
  376. * If cold boot HART is assigned to this domain then
  377. * override boot HART of this domain.
  378. */
  379. if (i == cold_hartid &&
  380. dom->boot_hartid != cold_hartid) {
  381. sbi_printf("Domain%d Boot HARTID forced to"
  382. " %d\n", dom->index, cold_hartid);
  383. dom->boot_hartid = cold_hartid;
  384. }
  385. }
  386. return 0;
  387. }
  388. int sbi_domain_root_add_memregion(const struct sbi_domain_memregion *reg)
  389. {
  390. int rc;
  391. bool reg_merged;
  392. struct sbi_domain_memregion *nreg, *nreg1, *nreg2;
  393. const struct sbi_platform *plat = sbi_platform_thishart_ptr();
  394. /* Sanity checks */
  395. if (!reg || domain_finalized ||
  396. (root.regions != root_memregs) ||
  397. (ROOT_REGION_MAX <= root_memregs_count))
  398. return SBI_EINVAL;
  399. /* Check for conflicts */
  400. sbi_domain_for_each_memregion(&root, nreg) {
  401. if (is_region_conflict(reg, nreg)) {
  402. sbi_printf("%s: is_region_conflict check failed"
  403. " 0x%lx conflicts existing 0x%lx\n", __func__,
  404. reg->base, nreg->base);
  405. return SBI_EALREADY;
  406. }
  407. }
  408. /* Append the memregion to root memregions */
  409. nreg = &root_memregs[root_memregs_count];
  410. sbi_memcpy(nreg, reg, sizeof(*reg));
  411. root_memregs_count++;
  412. root_memregs[root_memregs_count].order = 0;
  413. /* Sort and optimize root regions */
  414. do {
  415. /* Sanitize the root domain so that memregions are sorted */
  416. rc = sanitize_domain(plat, &root);
  417. if (rc) {
  418. sbi_printf("%s: sanity checks failed for"
  419. " %s (error %d)\n", __func__,
  420. root.name, rc);
  421. return rc;
  422. }
  423. /* Merge consecutive memregions with same order and flags */
  424. reg_merged = false;
  425. sbi_domain_for_each_memregion(&root, nreg) {
  426. nreg1 = nreg + 1;
  427. if (!nreg1->order)
  428. continue;
  429. if (!(nreg->base & (BIT(nreg->order + 1) - 1)) &&
  430. (nreg->base + BIT(nreg->order)) == nreg1->base &&
  431. nreg->order == nreg1->order &&
  432. nreg->flags == nreg1->flags) {
  433. nreg->order++;
  434. while (nreg1->order) {
  435. nreg2 = nreg1 + 1;
  436. sbi_memcpy(nreg1, nreg2, sizeof(*nreg1));
  437. nreg1++;
  438. }
  439. reg_merged = true;
  440. root_memregs_count--;
  441. }
  442. }
  443. } while (reg_merged);
  444. return 0;
  445. }
  446. int sbi_domain_root_add_memrange(unsigned long addr, unsigned long size,
  447. unsigned long align, unsigned long region_flags)
  448. {
  449. int rc;
  450. unsigned long pos, end, rsize;
  451. struct sbi_domain_memregion reg;
  452. pos = addr;
  453. end = addr + size;
  454. while (pos < end) {
  455. rsize = pos & (align - 1);
  456. if (rsize)
  457. rsize = 1UL << sbi_ffs(pos);
  458. else
  459. rsize = ((end - pos) < align) ?
  460. (end - pos) : align;
  461. sbi_domain_memregion_init(pos, rsize, region_flags, &reg);
  462. rc = sbi_domain_root_add_memregion(&reg);
  463. if (rc)
  464. return rc;
  465. pos += rsize;
  466. }
  467. return 0;
  468. }
  469. int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid)
  470. {
  471. int rc;
  472. u32 i, dhart;
  473. struct sbi_domain *dom;
  474. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  475. /* Initialize and populate domains for the platform */
  476. rc = sbi_platform_domains_init(plat);
  477. if (rc) {
  478. sbi_printf("%s: platform domains_init() failed (error %d)\n",
  479. __func__, rc);
  480. return rc;
  481. }
  482. /* Startup boot HART of domains */
  483. sbi_domain_for_each(i, dom) {
  484. /* Domain boot HART */
  485. dhart = dom->boot_hartid;
  486. /* Ignore of boot HART is off limits */
  487. if (SBI_HARTMASK_MAX_BITS <= dhart)
  488. continue;
  489. /* Ignore if boot HART not possible for this domain */
  490. if (!sbi_hartmask_test_hart(dhart, dom->possible_harts))
  491. continue;
  492. /* Ignore if boot HART assigned different domain */
  493. if (sbi_hartid_to_domain(dhart) != dom ||
  494. !sbi_hartmask_test_hart(dhart, &dom->assigned_harts))
  495. continue;
  496. /* Startup boot HART of domain */
  497. if (dhart == cold_hartid) {
  498. scratch->next_addr = dom->next_addr;
  499. scratch->next_mode = dom->next_mode;
  500. scratch->next_arg1 = dom->next_arg1;
  501. } else {
  502. rc = sbi_hsm_hart_start(scratch, NULL, dhart,
  503. dom->next_addr,
  504. dom->next_mode,
  505. dom->next_arg1);
  506. if (rc) {
  507. sbi_printf("%s: failed to start boot HART %d"
  508. " for %s (error %d)\n", __func__,
  509. dhart, dom->name, rc);
  510. return rc;
  511. }
  512. }
  513. }
  514. /*
  515. * Set the finalized flag so that the root domain
  516. * regions can't be changed.
  517. */
  518. domain_finalized = true;
  519. return 0;
  520. }
  521. int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
  522. {
  523. u32 i;
  524. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  525. /* Root domain firmware memory region */
  526. sbi_domain_memregion_init(scratch->fw_start, scratch->fw_size, 0,
  527. &root_fw_region);
  528. domain_memregion_initfw(&root_memregs[root_memregs_count++]);
  529. /* Root domain allow everything memory region */
  530. sbi_domain_memregion_init(0, ~0UL,
  531. (SBI_DOMAIN_MEMREGION_READABLE |
  532. SBI_DOMAIN_MEMREGION_WRITEABLE |
  533. SBI_DOMAIN_MEMREGION_EXECUTABLE),
  534. &root_memregs[root_memregs_count++]);
  535. /* Root domain memory region end */
  536. root_memregs[root_memregs_count].order = 0;
  537. /* Root domain boot HART id is same as coldboot HART id */
  538. root.boot_hartid = cold_hartid;
  539. /* Root domain next booting stage details */
  540. root.next_arg1 = scratch->next_arg1;
  541. root.next_addr = scratch->next_addr;
  542. root.next_mode = scratch->next_mode;
  543. /* Root domain possible and assigned HARTs */
  544. for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) {
  545. if (sbi_platform_hart_invalid(plat, i))
  546. continue;
  547. sbi_hartmask_set_hart(i, &root_hmask);
  548. }
  549. return sbi_domain_register(&root, &root_hmask);
  550. }