fdt_domain.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. // SPDX-License-Identifier: BSD-2-Clause
  2. /*
  3. * fdt_domain.c - Flat Device Tree Domain helper routines
  4. *
  5. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  6. *
  7. * Authors:
  8. * Anup Patel <anup.patel@wdc.com>
  9. */
  10. #include <libfdt.h>
  11. #include <libfdt_env.h>
  12. #include <sbi/sbi_domain.h>
  13. #include <sbi/sbi_error.h>
  14. #include <sbi/sbi_hartmask.h>
  15. #include <sbi/sbi_scratch.h>
  16. #include <sbi_utils/fdt/fdt_domain.h>
  17. #include <sbi_utils/fdt/fdt_helper.h>
  18. int fdt_iterate_each_domain(void *fdt, void *opaque,
  19. int (*fn)(void *fdt, int domain_offset,
  20. void *opaque))
  21. {
  22. int rc, doffset, poffset;
  23. if (!fdt || !fn)
  24. return SBI_EINVAL;
  25. poffset = fdt_path_offset(fdt, "/chosen");
  26. if (poffset < 0)
  27. return 0;
  28. poffset = fdt_node_offset_by_compatible(fdt, poffset,
  29. "opensbi,domain,config");
  30. if (poffset < 0)
  31. return 0;
  32. fdt_for_each_subnode(doffset, fdt, poffset) {
  33. if (fdt_node_check_compatible(fdt, doffset,
  34. "opensbi,domain,instance"))
  35. continue;
  36. rc = fn(fdt, doffset, opaque);
  37. if (rc)
  38. return rc;
  39. }
  40. return 0;
  41. }
  42. int fdt_iterate_each_memregion(void *fdt, int domain_offset, void *opaque,
  43. int (*fn)(void *fdt, int domain_offset,
  44. int region_offset, u32 region_access,
  45. void *opaque))
  46. {
  47. u32 i, rcount;
  48. int rc, len, region_offset;
  49. const u32 *regions;
  50. if (!fdt || (domain_offset < 0) || !fn)
  51. return SBI_EINVAL;
  52. if (fdt_node_check_compatible(fdt, domain_offset,
  53. "opensbi,domain,instance"))
  54. return SBI_EINVAL;
  55. regions = fdt_getprop(fdt, domain_offset, "regions", &len);
  56. if (!regions)
  57. return 0;
  58. rcount = (u32)len / (sizeof(u32) * 2);
  59. for (i = 0; i < rcount; i++) {
  60. region_offset = fdt_node_offset_by_phandle(fdt,
  61. fdt32_to_cpu(regions[2 * i]));
  62. if (region_offset < 0)
  63. return region_offset;
  64. if (fdt_node_check_compatible(fdt, region_offset,
  65. "opensbi,domain,memregion"))
  66. return SBI_EINVAL;
  67. rc = fn(fdt, domain_offset, region_offset,
  68. fdt32_to_cpu(regions[(2 * i) + 1]), opaque);
  69. if (rc)
  70. return rc;
  71. }
  72. return 0;
  73. }
  74. struct __fixup_find_domain_offset_info {
  75. const char *name;
  76. int *doffset;
  77. };
  78. static int __fixup_find_domain_offset(void *fdt, int doff, void *p)
  79. {
  80. struct __fixup_find_domain_offset_info *fdo = p;
  81. if (!strncmp(fdo->name, fdt_get_name(fdt, doff, NULL), strlen(fdo->name)))
  82. *fdo->doffset = doff;
  83. return 0;
  84. }
  85. #define DISABLE_DEVICES_MASK (SBI_DOMAIN_MEMREGION_READABLE | \
  86. SBI_DOMAIN_MEMREGION_WRITEABLE | \
  87. SBI_DOMAIN_MEMREGION_EXECUTABLE)
  88. static int __fixup_count_disable_devices(void *fdt, int doff, int roff,
  89. u32 perm, void *p)
  90. {
  91. int len;
  92. u32 *dcount = p;
  93. if (perm & DISABLE_DEVICES_MASK)
  94. return 0;
  95. len = 0;
  96. if (fdt_getprop(fdt, roff, "devices", &len))
  97. *dcount += len / sizeof(u32);
  98. return 0;
  99. }
  100. static int __fixup_disable_devices(void *fdt, int doff, int roff,
  101. u32 raccess, void *p)
  102. {
  103. int i, len, coff;
  104. const u32 *devices;
  105. if (raccess & DISABLE_DEVICES_MASK)
  106. return 0;
  107. len = 0;
  108. devices = fdt_getprop(fdt, roff, "devices", &len);
  109. if (!devices)
  110. return 0;
  111. len = len / sizeof(u32);
  112. for (i = 0; i < len; i++) {
  113. coff = fdt_node_offset_by_phandle(fdt,
  114. fdt32_to_cpu(devices[i]));
  115. if (coff < 0)
  116. return coff;
  117. fdt_setprop_string(fdt, coff, "status", "disabled");
  118. }
  119. return 0;
  120. }
  121. void fdt_domain_fixup(void *fdt)
  122. {
  123. u32 i, dcount;
  124. int err, poffset, doffset;
  125. struct sbi_domain *dom = sbi_domain_thishart_ptr();
  126. struct __fixup_find_domain_offset_info fdo;
  127. /* Remove the domain assignment DT property from CPU DT nodes */
  128. poffset = fdt_path_offset(fdt, "/cpus");
  129. if (poffset < 0)
  130. return;
  131. fdt_for_each_subnode(doffset, fdt, poffset) {
  132. err = fdt_parse_hart_id(fdt, doffset, &i);
  133. if (err)
  134. continue;
  135. fdt_nop_property(fdt, doffset, "opensbi-domain");
  136. }
  137. /* Skip device disable for root domain */
  138. if (!dom->index)
  139. goto skip_device_disable;
  140. /* Find current domain DT node */
  141. doffset = -1;
  142. fdo.name = dom->name;
  143. fdo.doffset = &doffset;
  144. fdt_iterate_each_domain(fdt, &fdo, __fixup_find_domain_offset);
  145. if (doffset < 0)
  146. goto skip_device_disable;
  147. /* Count current domain device DT nodes to be disabled */
  148. dcount = 0;
  149. fdt_iterate_each_memregion(fdt, doffset, &dcount,
  150. __fixup_count_disable_devices);
  151. if (!dcount)
  152. goto skip_device_disable;
  153. /* Expand FDT based on device DT nodes to be disabled */
  154. err = fdt_open_into(fdt, fdt, fdt_totalsize(fdt) + dcount * 32);
  155. if (err < 0)
  156. return;
  157. /* Again find current domain DT node */
  158. doffset = -1;
  159. fdo.name = dom->name;
  160. fdo.doffset = &doffset;
  161. fdt_iterate_each_domain(fdt, &fdo, __fixup_find_domain_offset);
  162. if (doffset < 0)
  163. goto skip_device_disable;
  164. /* Disable device DT nodes for current domain */
  165. fdt_iterate_each_memregion(fdt, doffset, NULL,
  166. __fixup_disable_devices);
  167. skip_device_disable:
  168. /* Remove the OpenSBI domain config DT node */
  169. poffset = fdt_path_offset(fdt, "/chosen");
  170. if (poffset < 0)
  171. return;
  172. poffset = fdt_node_offset_by_compatible(fdt, poffset,
  173. "opensbi,domain,config");
  174. if (poffset < 0)
  175. return;
  176. fdt_nop_node(fdt, poffset);
  177. }
  178. #define FDT_DOMAIN_MAX_COUNT 8
  179. #define FDT_DOMAIN_REGION_MAX_COUNT 16
  180. static u32 fdt_domains_count;
  181. static struct sbi_domain fdt_domains[FDT_DOMAIN_MAX_COUNT];
  182. static struct sbi_hartmask fdt_masks[FDT_DOMAIN_MAX_COUNT];
  183. static struct sbi_domain_memregion
  184. fdt_regions[FDT_DOMAIN_MAX_COUNT][FDT_DOMAIN_REGION_MAX_COUNT + 1];
  185. static int __fdt_parse_region(void *fdt, int domain_offset,
  186. int region_offset, u32 region_access,
  187. void *opaque)
  188. {
  189. int len;
  190. u32 val32;
  191. u64 val64;
  192. const u32 *val;
  193. u32 *region_count = opaque;
  194. struct sbi_domain_memregion *region;
  195. /* Find next region of the domain */
  196. if (FDT_DOMAIN_REGION_MAX_COUNT <= *region_count)
  197. return SBI_EINVAL;
  198. region = &fdt_regions[fdt_domains_count][*region_count];
  199. /* Read "base" DT property */
  200. val = fdt_getprop(fdt, region_offset, "base", &len);
  201. if (!val && len >= 8)
  202. return SBI_EINVAL;
  203. val64 = fdt32_to_cpu(val[0]);
  204. val64 = (val64 << 32) | fdt32_to_cpu(val[1]);
  205. region->base = val64;
  206. /* Read "order" DT property */
  207. val = fdt_getprop(fdt, region_offset, "order", &len);
  208. if (!val && len >= 4)
  209. return SBI_EINVAL;
  210. val32 = fdt32_to_cpu(*val);
  211. if (val32 < 3 || __riscv_xlen < val32)
  212. return SBI_EINVAL;
  213. region->order = val32;
  214. /* Read "mmio" DT property */
  215. region->flags = region_access & SBI_DOMAIN_MEMREGION_ACCESS_MASK;
  216. if (fdt_get_property(fdt, region_offset, "mmio", NULL))
  217. region->flags |= SBI_DOMAIN_MEMREGION_MMIO;
  218. (*region_count)++;
  219. return 0;
  220. }
  221. static int __fdt_parse_domain(void *fdt, int domain_offset, void *opaque)
  222. {
  223. u32 val32;
  224. u64 val64;
  225. const u32 *val;
  226. struct sbi_domain *dom;
  227. struct sbi_hartmask *mask;
  228. struct sbi_hartmask assign_mask;
  229. int *cold_domain_offset = opaque;
  230. struct sbi_domain_memregion *reg, *regions;
  231. int i, err, len, cpus_offset, cpu_offset, doffset;
  232. /* Sanity check on maximum domains we can handle */
  233. if (FDT_DOMAIN_MAX_COUNT <= fdt_domains_count)
  234. return SBI_EINVAL;
  235. dom = &fdt_domains[fdt_domains_count];
  236. mask = &fdt_masks[fdt_domains_count];
  237. regions = &fdt_regions[fdt_domains_count][0];
  238. /* Read DT node name */
  239. strncpy(dom->name, fdt_get_name(fdt, domain_offset, NULL),
  240. sizeof(dom->name));
  241. dom->name[sizeof(dom->name) - 1] = '\0';
  242. /* Setup possible HARTs mask */
  243. SBI_HARTMASK_INIT(mask);
  244. dom->possible_harts = mask;
  245. val = fdt_getprop(fdt, domain_offset, "possible-harts", &len);
  246. len = len / sizeof(u32);
  247. if (val && len) {
  248. for (i = 0; i < len; i++) {
  249. cpu_offset = fdt_node_offset_by_phandle(fdt,
  250. fdt32_to_cpu(val[i]));
  251. if (cpu_offset < 0)
  252. return cpu_offset;
  253. err = fdt_parse_hart_id(fdt, cpu_offset, &val32);
  254. if (err)
  255. return err;
  256. sbi_hartmask_set_hart(val32, mask);
  257. }
  258. }
  259. /* Setup memregions from DT */
  260. val32 = 0;
  261. memset(regions, 0,
  262. sizeof(*regions) * (FDT_DOMAIN_REGION_MAX_COUNT + 1));
  263. dom->regions = regions;
  264. err = fdt_iterate_each_memregion(fdt, domain_offset, &val32,
  265. __fdt_parse_region);
  266. if (err)
  267. return err;
  268. /*
  269. * Copy over root domain memregions which don't allow
  270. * read, write and execute from lower privilege modes.
  271. *
  272. * These root domain memregions without read, write,
  273. * and execute permissions include:
  274. * 1) firmware region protecting the firmware memory
  275. * 2) mmio regions protecting M-mode only mmio devices
  276. */
  277. sbi_domain_for_each_memregion(&root, reg) {
  278. if ((reg->flags & SBI_DOMAIN_MEMREGION_READABLE) ||
  279. (reg->flags & SBI_DOMAIN_MEMREGION_WRITEABLE) ||
  280. (reg->flags & SBI_DOMAIN_MEMREGION_EXECUTABLE))
  281. continue;
  282. if (FDT_DOMAIN_REGION_MAX_COUNT <= val32)
  283. return SBI_EINVAL;
  284. memcpy(&regions[val32++], reg, sizeof(*reg));
  285. }
  286. /* Read "boot-hart" DT property */
  287. val32 = -1U;
  288. val = fdt_getprop(fdt, domain_offset, "boot-hart", &len);
  289. if (val && len >= 4) {
  290. cpu_offset = fdt_node_offset_by_phandle(fdt,
  291. fdt32_to_cpu(*val));
  292. if (cpu_offset >= 0)
  293. fdt_parse_hart_id(fdt, cpu_offset, &val32);
  294. } else {
  295. if (domain_offset == *cold_domain_offset)
  296. val32 = current_hartid();
  297. }
  298. dom->boot_hartid = val32;
  299. /* Read "next-arg1" DT property */
  300. val64 = 0;
  301. val = fdt_getprop(fdt, domain_offset, "next-arg1", &len);
  302. if (val && len >= 8) {
  303. val64 = fdt32_to_cpu(val[0]);
  304. val64 = (val64 << 32) | fdt32_to_cpu(val[1]);
  305. } else {
  306. if (domain_offset == *cold_domain_offset)
  307. val64 = sbi_scratch_thishart_ptr()->next_arg1;
  308. }
  309. dom->next_arg1 = val64;
  310. /* Read "next-addr" DT property */
  311. val64 = 0;
  312. val = fdt_getprop(fdt, domain_offset, "next-addr", &len);
  313. if (val && len >= 8) {
  314. val64 = fdt32_to_cpu(val[0]);
  315. val64 = (val64 << 32) | fdt32_to_cpu(val[1]);
  316. } else {
  317. if (domain_offset == *cold_domain_offset)
  318. val64 = sbi_scratch_thishart_ptr()->next_addr;
  319. }
  320. dom->next_addr = val64;
  321. /* Read "next-mode" DT property */
  322. val32 = 0x1;
  323. val = fdt_getprop(fdt, domain_offset, "next-mode", &len);
  324. if (val && len >= 4) {
  325. val32 = fdt32_to_cpu(val[0]);
  326. if (val32 != 0x0 && val32 != 0x1)
  327. val32 = 0x1;
  328. } else {
  329. if (domain_offset == *cold_domain_offset)
  330. val32 = sbi_scratch_thishart_ptr()->next_mode;
  331. }
  332. dom->next_mode = val32;
  333. /* Read "system-reset-allowed" DT property */
  334. if (fdt_get_property(fdt, domain_offset,
  335. "system-reset-allowed", NULL))
  336. dom->system_reset_allowed = TRUE;
  337. else
  338. dom->system_reset_allowed = FALSE;
  339. /* Find /cpus DT node */
  340. cpus_offset = fdt_path_offset(fdt, "/cpus");
  341. if (cpus_offset < 0)
  342. return cpus_offset;
  343. /* HART to domain assignment mask based on CPU DT nodes */
  344. sbi_hartmask_clear_all(&assign_mask);
  345. fdt_for_each_subnode(cpu_offset, fdt, cpus_offset) {
  346. err = fdt_parse_hart_id(fdt, cpu_offset, &val32);
  347. if (err)
  348. continue;
  349. if (SBI_HARTMASK_MAX_BITS <= val32)
  350. continue;
  351. val = fdt_getprop(fdt, cpu_offset, "opensbi-domain", &len);
  352. if (!val || len < 4)
  353. return SBI_EINVAL;
  354. doffset = fdt_node_offset_by_phandle(fdt, fdt32_to_cpu(*val));
  355. if (doffset < 0)
  356. return doffset;
  357. if (doffset == domain_offset)
  358. sbi_hartmask_set_hart(val32, &assign_mask);
  359. }
  360. /* Increment domains count */
  361. fdt_domains_count++;
  362. /* Register the domain */
  363. return sbi_domain_register(dom, &assign_mask);
  364. }
  365. int fdt_domains_populate(void *fdt)
  366. {
  367. const u32 *val;
  368. int cold_domain_offset;
  369. u32 hartid, cold_hartid;
  370. int err, len, cpus_offset, cpu_offset;
  371. /* Sanity checks */
  372. if (!fdt)
  373. return SBI_EINVAL;
  374. /* Find /cpus DT node */
  375. cpus_offset = fdt_path_offset(fdt, "/cpus");
  376. if (cpus_offset < 0)
  377. return cpus_offset;
  378. /* Find coldboot HART domain DT node offset */
  379. cold_domain_offset = -1;
  380. cold_hartid = current_hartid();
  381. fdt_for_each_subnode(cpu_offset, fdt, cpus_offset) {
  382. err = fdt_parse_hart_id(fdt, cpu_offset, &hartid);
  383. if (err)
  384. continue;
  385. if (hartid != cold_hartid)
  386. continue;
  387. val = fdt_getprop(fdt, cpu_offset, "opensbi-domain", &len);
  388. if (val && len >= 4)
  389. cold_domain_offset = fdt_node_offset_by_phandle(fdt,
  390. fdt32_to_cpu(*val));
  391. break;
  392. }
  393. /* Iterate over each domain in FDT and populate details */
  394. return fdt_iterate_each_domain(fdt, &cold_domain_offset,
  395. __fdt_parse_domain);
  396. }