fdt_domain.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. // SPDX-License-Identifier: BSD-2-Clause
  2. /*
  3. * fdt_domain.c - Flat Device Tree Domain helper routines
  4. *
  5. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  6. *
  7. * Authors:
  8. * Anup Patel <anup.patel@wdc.com>
  9. */
  10. #include <libfdt.h>
  11. #include <libfdt_env.h>
  12. #include <sbi/sbi_domain.h>
  13. #include <sbi/sbi_error.h>
  14. #include <sbi/sbi_hartmask.h>
  15. #include <sbi/sbi_scratch.h>
  16. #include <sbi_utils/fdt/fdt_domain.h>
  17. #include <sbi_utils/fdt/fdt_helper.h>
  18. int fdt_iterate_each_domain(void *fdt, void *opaque,
  19. int (*fn)(void *fdt, int domain_offset,
  20. void *opaque))
  21. {
  22. int rc, doffset, poffset;
  23. if (!fdt || !fn)
  24. return SBI_EINVAL;
  25. poffset = fdt_path_offset(fdt, "/chosen");
  26. if (poffset < 0)
  27. return 0;
  28. poffset = fdt_node_offset_by_compatible(fdt, poffset,
  29. "opensbi,domain,config");
  30. if (poffset < 0)
  31. return 0;
  32. fdt_for_each_subnode(doffset, fdt, poffset) {
  33. if (fdt_node_check_compatible(fdt, doffset,
  34. "opensbi,domain,instance"))
  35. continue;
  36. rc = fn(fdt, doffset, opaque);
  37. if (rc)
  38. return rc;
  39. }
  40. return 0;
  41. }
  42. int fdt_iterate_each_memregion(void *fdt, int domain_offset, void *opaque,
  43. int (*fn)(void *fdt, int domain_offset,
  44. int region_offset, u32 region_access,
  45. void *opaque))
  46. {
  47. u32 i, rcount;
  48. int rc, len, region_offset;
  49. const u32 *regions;
  50. if (!fdt || (domain_offset < 0) || !fn)
  51. return SBI_EINVAL;
  52. if (fdt_node_check_compatible(fdt, domain_offset,
  53. "opensbi,domain,instance"))
  54. return SBI_EINVAL;
  55. regions = fdt_getprop(fdt, domain_offset, "regions", &len);
  56. if (!regions)
  57. return 0;
  58. rcount = (u32)len / (sizeof(u32) * 2);
  59. for (i = 0; i < rcount; i++) {
  60. region_offset = fdt_node_offset_by_phandle(fdt,
  61. fdt32_to_cpu(regions[2 * i]));
  62. if (region_offset < 0)
  63. return region_offset;
  64. if (fdt_node_check_compatible(fdt, region_offset,
  65. "opensbi,domain,memregion"))
  66. return SBI_EINVAL;
  67. rc = fn(fdt, domain_offset, region_offset,
  68. fdt32_to_cpu(regions[(2 * i) + 1]), opaque);
  69. if (rc)
  70. return rc;
  71. }
  72. return 0;
  73. }
  74. struct __fixup_find_domain_offset_info {
  75. const char *name;
  76. int *doffset;
  77. };
  78. static int __fixup_find_domain_offset(void *fdt, int doff, void *p)
  79. {
  80. struct __fixup_find_domain_offset_info *fdo = p;
  81. if (!strncmp(fdo->name, fdt_get_name(fdt, doff, NULL), strlen(fdo->name)))
  82. *fdo->doffset = doff;
  83. return 0;
  84. }
  85. #define DISABLE_DEVICES_MASK (SBI_DOMAIN_MEMREGION_READABLE | \
  86. SBI_DOMAIN_MEMREGION_WRITEABLE | \
  87. SBI_DOMAIN_MEMREGION_EXECUTABLE)
  88. static int __fixup_count_disable_devices(void *fdt, int doff, int roff,
  89. u32 perm, void *p)
  90. {
  91. int len;
  92. u32 *dcount = p;
  93. if (perm & DISABLE_DEVICES_MASK)
  94. return 0;
  95. len = 0;
  96. if (fdt_getprop(fdt, roff, "devices", &len))
  97. *dcount += len / sizeof(u32);
  98. return 0;
  99. }
  100. static int __fixup_disable_devices(void *fdt, int doff, int roff,
  101. u32 raccess, void *p)
  102. {
  103. int i, len, coff;
  104. const u32 *devices;
  105. if (raccess & DISABLE_DEVICES_MASK)
  106. return 0;
  107. len = 0;
  108. devices = fdt_getprop(fdt, roff, "devices", &len);
  109. if (!devices)
  110. return 0;
  111. len = len / sizeof(u32);
  112. for (i = 0; i < len; i++) {
  113. coff = fdt_node_offset_by_phandle(fdt,
  114. fdt32_to_cpu(devices[i]));
  115. if (coff < 0)
  116. return coff;
  117. fdt_setprop_string(fdt, coff, "status", "disabled");
  118. }
  119. return 0;
  120. }
  121. void fdt_domain_fixup(void *fdt)
  122. {
  123. u32 i, dcount;
  124. int err, poffset, doffset;
  125. struct sbi_domain *dom = sbi_domain_thishart_ptr();
  126. struct __fixup_find_domain_offset_info fdo;
  127. /* Remove the domain assignment DT property from CPU DT nodes */
  128. poffset = fdt_path_offset(fdt, "/cpus");
  129. if (poffset < 0)
  130. return;
  131. fdt_for_each_subnode(doffset, fdt, poffset) {
  132. err = fdt_parse_hart_id(fdt, doffset, &i);
  133. if (err)
  134. continue;
  135. if (!fdt_node_is_enabled(fdt, doffset))
  136. continue;
  137. fdt_nop_property(fdt, doffset, "opensbi-domain");
  138. }
  139. /* Skip device disable for root domain */
  140. if (!dom->index)
  141. goto skip_device_disable;
  142. /* Find current domain DT node */
  143. doffset = -1;
  144. fdo.name = dom->name;
  145. fdo.doffset = &doffset;
  146. fdt_iterate_each_domain(fdt, &fdo, __fixup_find_domain_offset);
  147. if (doffset < 0)
  148. goto skip_device_disable;
  149. /* Count current domain device DT nodes to be disabled */
  150. dcount = 0;
  151. fdt_iterate_each_memregion(fdt, doffset, &dcount,
  152. __fixup_count_disable_devices);
  153. if (!dcount)
  154. goto skip_device_disable;
  155. /* Expand FDT based on device DT nodes to be disabled */
  156. err = fdt_open_into(fdt, fdt, fdt_totalsize(fdt) + dcount * 32);
  157. if (err < 0)
  158. return;
  159. /* Again find current domain DT node */
  160. doffset = -1;
  161. fdo.name = dom->name;
  162. fdo.doffset = &doffset;
  163. fdt_iterate_each_domain(fdt, &fdo, __fixup_find_domain_offset);
  164. if (doffset < 0)
  165. goto skip_device_disable;
  166. /* Disable device DT nodes for current domain */
  167. fdt_iterate_each_memregion(fdt, doffset, NULL,
  168. __fixup_disable_devices);
  169. skip_device_disable:
  170. /* Remove the OpenSBI domain config DT node */
  171. poffset = fdt_path_offset(fdt, "/chosen");
  172. if (poffset < 0)
  173. return;
  174. poffset = fdt_node_offset_by_compatible(fdt, poffset,
  175. "opensbi,domain,config");
  176. if (poffset < 0)
  177. return;
  178. fdt_nop_node(fdt, poffset);
  179. }
  180. #define FDT_DOMAIN_MAX_COUNT 8
  181. #define FDT_DOMAIN_REGION_MAX_COUNT 16
  182. static u32 fdt_domains_count;
  183. static struct sbi_domain fdt_domains[FDT_DOMAIN_MAX_COUNT];
  184. static struct sbi_hartmask fdt_masks[FDT_DOMAIN_MAX_COUNT];
  185. static struct sbi_domain_memregion
  186. fdt_regions[FDT_DOMAIN_MAX_COUNT][FDT_DOMAIN_REGION_MAX_COUNT + 1];
  187. static int __fdt_parse_region(void *fdt, int domain_offset,
  188. int region_offset, u32 region_access,
  189. void *opaque)
  190. {
  191. int len;
  192. u32 val32;
  193. u64 val64;
  194. const u32 *val;
  195. u32 *region_count = opaque;
  196. struct sbi_domain_memregion *region;
  197. /*
  198. * Non-root domains cannot add a region with only M-mode
  199. * access permissions. M-mode regions can only be part of
  200. * root domain.
  201. *
  202. * SU permission bits can't be all zeroes when M-mode permission
  203. * bits have at least one bit set.
  204. */
  205. if (!(region_access & SBI_DOMAIN_MEMREGION_SU_ACCESS_MASK)
  206. && (region_access & SBI_DOMAIN_MEMREGION_M_ACCESS_MASK))
  207. return SBI_EINVAL;
  208. /* Find next region of the domain */
  209. if (FDT_DOMAIN_REGION_MAX_COUNT <= *region_count)
  210. return SBI_EINVAL;
  211. region = &fdt_regions[fdt_domains_count][*region_count];
  212. /* Read "base" DT property */
  213. val = fdt_getprop(fdt, region_offset, "base", &len);
  214. if (!val || len != 8)
  215. return SBI_EINVAL;
  216. val64 = fdt32_to_cpu(val[0]);
  217. val64 = (val64 << 32) | fdt32_to_cpu(val[1]);
  218. region->base = val64;
  219. /* Read "order" DT property */
  220. val = fdt_getprop(fdt, region_offset, "order", &len);
  221. if (!val || len != 4)
  222. return SBI_EINVAL;
  223. val32 = fdt32_to_cpu(*val);
  224. if (val32 < 3 || __riscv_xlen < val32)
  225. return SBI_EINVAL;
  226. region->order = val32;
  227. /* Read "mmio" DT property */
  228. region->flags = region_access & SBI_DOMAIN_MEMREGION_ACCESS_MASK;
  229. if (fdt_get_property(fdt, region_offset, "mmio", NULL))
  230. region->flags |= SBI_DOMAIN_MEMREGION_MMIO;
  231. (*region_count)++;
  232. return 0;
  233. }
  234. static int __fdt_parse_domain(void *fdt, int domain_offset, void *opaque)
  235. {
  236. u32 val32;
  237. u64 val64;
  238. const u32 *val;
  239. struct sbi_domain *dom;
  240. struct sbi_hartmask *mask;
  241. struct sbi_hartmask assign_mask;
  242. int *cold_domain_offset = opaque;
  243. struct sbi_domain_memregion *reg, *regions;
  244. int i, err, len, cpus_offset, cpu_offset, doffset;
  245. /* Sanity check on maximum domains we can handle */
  246. if (FDT_DOMAIN_MAX_COUNT <= fdt_domains_count)
  247. return SBI_EINVAL;
  248. dom = &fdt_domains[fdt_domains_count];
  249. mask = &fdt_masks[fdt_domains_count];
  250. regions = &fdt_regions[fdt_domains_count][0];
  251. /* Read DT node name */
  252. strncpy(dom->name, fdt_get_name(fdt, domain_offset, NULL),
  253. sizeof(dom->name));
  254. dom->name[sizeof(dom->name) - 1] = '\0';
  255. /* Setup possible HARTs mask */
  256. SBI_HARTMASK_INIT(mask);
  257. dom->possible_harts = mask;
  258. val = fdt_getprop(fdt, domain_offset, "possible-harts", &len);
  259. len = len / sizeof(u32);
  260. if (val && len) {
  261. for (i = 0; i < len; i++) {
  262. cpu_offset = fdt_node_offset_by_phandle(fdt,
  263. fdt32_to_cpu(val[i]));
  264. if (cpu_offset < 0)
  265. return cpu_offset;
  266. err = fdt_parse_hart_id(fdt, cpu_offset, &val32);
  267. if (err)
  268. return err;
  269. if (!fdt_node_is_enabled(fdt, cpu_offset))
  270. continue;
  271. sbi_hartmask_set_hart(val32, mask);
  272. }
  273. }
  274. /* Setup memregions from DT */
  275. val32 = 0;
  276. memset(regions, 0,
  277. sizeof(*regions) * (FDT_DOMAIN_REGION_MAX_COUNT + 1));
  278. dom->regions = regions;
  279. err = fdt_iterate_each_memregion(fdt, domain_offset, &val32,
  280. __fdt_parse_region);
  281. if (err)
  282. return err;
  283. /*
  284. * Copy over root domain memregions which don't allow
  285. * read, write and execute from lower privilege modes.
  286. *
  287. * These root domain memregions without read, write,
  288. * and execute permissions include:
  289. * 1) firmware region protecting the firmware memory
  290. * 2) mmio regions protecting M-mode only mmio devices
  291. */
  292. sbi_domain_for_each_memregion(&root, reg) {
  293. if ((reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE) ||
  294. (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE) ||
  295. (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE))
  296. continue;
  297. if (FDT_DOMAIN_REGION_MAX_COUNT <= val32)
  298. return SBI_EINVAL;
  299. memcpy(&regions[val32++], reg, sizeof(*reg));
  300. }
  301. /* Read "boot-hart" DT property */
  302. val32 = -1U;
  303. val = fdt_getprop(fdt, domain_offset, "boot-hart", &len);
  304. if (val && len >= 4) {
  305. cpu_offset = fdt_node_offset_by_phandle(fdt,
  306. fdt32_to_cpu(*val));
  307. if (cpu_offset >= 0 && fdt_node_is_enabled(fdt, cpu_offset))
  308. fdt_parse_hart_id(fdt, cpu_offset, &val32);
  309. } else {
  310. if (domain_offset == *cold_domain_offset)
  311. val32 = current_hartid();
  312. }
  313. dom->boot_hartid = val32;
  314. /* Read "next-arg1" DT property */
  315. val64 = 0;
  316. val = fdt_getprop(fdt, domain_offset, "next-arg1", &len);
  317. if (val && len >= 8) {
  318. val64 = fdt32_to_cpu(val[0]);
  319. val64 = (val64 << 32) | fdt32_to_cpu(val[1]);
  320. } else {
  321. if (domain_offset == *cold_domain_offset)
  322. val64 = sbi_scratch_thishart_ptr()->next_arg1;
  323. }
  324. dom->next_arg1 = val64;
  325. /* Read "next-addr" DT property */
  326. val64 = 0;
  327. val = fdt_getprop(fdt, domain_offset, "next-addr", &len);
  328. if (val && len >= 8) {
  329. val64 = fdt32_to_cpu(val[0]);
  330. val64 = (val64 << 32) | fdt32_to_cpu(val[1]);
  331. } else {
  332. if (domain_offset == *cold_domain_offset)
  333. val64 = sbi_scratch_thishart_ptr()->next_addr;
  334. }
  335. dom->next_addr = val64;
  336. /* Read "next-mode" DT property */
  337. val32 = 0x1;
  338. val = fdt_getprop(fdt, domain_offset, "next-mode", &len);
  339. if (val && len >= 4) {
  340. val32 = fdt32_to_cpu(val[0]);
  341. if (val32 != 0x0 && val32 != 0x1)
  342. val32 = 0x1;
  343. } else {
  344. if (domain_offset == *cold_domain_offset)
  345. val32 = sbi_scratch_thishart_ptr()->next_mode;
  346. }
  347. dom->next_mode = val32;
  348. /* Read "system-reset-allowed" DT property */
  349. if (fdt_get_property(fdt, domain_offset,
  350. "system-reset-allowed", NULL))
  351. dom->system_reset_allowed = true;
  352. else
  353. dom->system_reset_allowed = false;
  354. /* Read "system-suspend-allowed" DT property */
  355. if (fdt_get_property(fdt, domain_offset,
  356. "system-suspend-allowed", NULL))
  357. dom->system_suspend_allowed = true;
  358. else
  359. dom->system_suspend_allowed = false;
  360. /* Find /cpus DT node */
  361. cpus_offset = fdt_path_offset(fdt, "/cpus");
  362. if (cpus_offset < 0)
  363. return cpus_offset;
  364. /* HART to domain assignment mask based on CPU DT nodes */
  365. sbi_hartmask_clear_all(&assign_mask);
  366. fdt_for_each_subnode(cpu_offset, fdt, cpus_offset) {
  367. err = fdt_parse_hart_id(fdt, cpu_offset, &val32);
  368. if (err)
  369. continue;
  370. if (SBI_HARTMASK_MAX_BITS <= val32)
  371. continue;
  372. if (!fdt_node_is_enabled(fdt, cpu_offset))
  373. continue;
  374. val = fdt_getprop(fdt, cpu_offset, "opensbi-domain", &len);
  375. if (!val || len < 4)
  376. return SBI_EINVAL;
  377. doffset = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(*val));
  378. if (doffset < 0)
  379. return doffset;
  380. if (doffset == domain_offset)
  381. sbi_hartmask_set_hart(val32, &assign_mask);
  382. }
  383. /* Increment domains count */
  384. fdt_domains_count++;
  385. /* Register the domain */
  386. return sbi_domain_register(dom, &assign_mask);
  387. }
  388. int fdt_domains_populate(void *fdt)
  389. {
  390. const u32 *val;
  391. int cold_domain_offset;
  392. u32 hartid, cold_hartid;
  393. int err, len, cpus_offset, cpu_offset;
  394. /* Sanity checks */
  395. if (!fdt)
  396. return SBI_EINVAL;
  397. /* Find /cpus DT node */
  398. cpus_offset = fdt_path_offset(fdt, "/cpus");
  399. if (cpus_offset < 0)
  400. return cpus_offset;
  401. /* Find coldboot HART domain DT node offset */
  402. cold_domain_offset = -1;
  403. cold_hartid = current_hartid();
  404. fdt_for_each_subnode(cpu_offset, fdt, cpus_offset) {
  405. err = fdt_parse_hart_id(fdt, cpu_offset, &hartid);
  406. if (err)
  407. continue;
  408. if (hartid != cold_hartid)
  409. continue;
  410. if (!fdt_node_is_enabled(fdt, cpu_offset))
  411. continue;
  412. val = fdt_getprop(fdt, cpu_offset, "opensbi-domain", &len);
  413. if (val && len >= 4)
  414. cold_domain_offset = fdt_node_offset_by_phandle(fdt,
  415. fdt32_to_cpu(*val));
  416. break;
  417. }
  418. /* Iterate over each domain in FDT and populate details */
  419. return fdt_iterate_each_domain(fdt, &cold_domain_offset,
  420. __fdt_parse_domain);
  421. }