sbi_hart.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/riscv_barrier.h>
  11. #include <sbi/riscv_encoding.h>
  12. #include <sbi/riscv_fp.h>
  13. #include <sbi/sbi_bitops.h>
  14. #include <sbi/sbi_console.h>
  15. #include <sbi/sbi_domain.h>
  16. #include <sbi/sbi_csr_detect.h>
  17. #include <sbi/sbi_error.h>
  18. #include <sbi/sbi_hart.h>
  19. #include <sbi/sbi_math.h>
  20. #include <sbi/sbi_platform.h>
  21. #include <sbi/sbi_string.h>
  22. #include <sbi/sbi_trap.h>
  23. extern void __sbi_expected_trap(void);
  24. extern void __sbi_expected_trap_hext(void);
  25. void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap;
  26. struct hart_features {
  27. unsigned long features;
  28. unsigned int pmp_count;
  29. unsigned int pmp_addr_bits;
  30. unsigned long pmp_gran;
  31. unsigned int mhpm_count;
  32. };
  33. static unsigned long hart_features_offset;
  34. static void mstatus_init(struct sbi_scratch *scratch)
  35. {
  36. unsigned long mstatus_val = 0;
  37. /* Enable FPU */
  38. if (misa_extension('D') || misa_extension('F'))
  39. mstatus_val |= MSTATUS_FS;
  40. /* Enable Vector context */
  41. if (misa_extension('V'))
  42. mstatus_val |= MSTATUS_VS;
  43. csr_write(CSR_MSTATUS, mstatus_val);
  44. /* Enable user/supervisor use of perf counters */
  45. if (misa_extension('S') &&
  46. sbi_hart_has_feature(scratch, SBI_HART_HAS_SCOUNTEREN))
  47. csr_write(CSR_SCOUNTEREN, -1);
  48. if (sbi_hart_has_feature(scratch, SBI_HART_HAS_MCOUNTEREN))
  49. csr_write(CSR_MCOUNTEREN, -1);
  50. /* Disable all interrupts */
  51. csr_write(CSR_MIE, 0);
  52. /* Disable S-mode paging */
  53. if (misa_extension('S'))
  54. csr_write(CSR_SATP, 0);
  55. }
  56. static int fp_init(struct sbi_scratch *scratch)
  57. {
  58. #ifdef __riscv_flen
  59. int i;
  60. #endif
  61. if (!misa_extension('D') && !misa_extension('F'))
  62. return 0;
  63. if (!(csr_read(CSR_MSTATUS) & MSTATUS_FS))
  64. return SBI_EINVAL;
  65. #ifdef __riscv_flen
  66. for (i = 0; i < 32; i++)
  67. init_fp_reg(i);
  68. csr_write(CSR_FCSR, 0);
  69. #endif
  70. return 0;
  71. }
  72. static int delegate_traps(struct sbi_scratch *scratch)
  73. {
  74. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  75. unsigned long interrupts, exceptions;
  76. if (!misa_extension('S'))
  77. /* No delegation possible as mideleg does not exist */
  78. return 0;
  79. /* Send M-mode interrupts and most exceptions to S-mode */
  80. interrupts = MIP_SSIP | MIP_STIP | MIP_SEIP;
  81. exceptions = (1U << CAUSE_MISALIGNED_FETCH) | (1U << CAUSE_BREAKPOINT) |
  82. (1U << CAUSE_USER_ECALL);
  83. if (sbi_platform_has_mfaults_delegation(plat))
  84. exceptions |= (1U << CAUSE_FETCH_PAGE_FAULT) |
  85. (1U << CAUSE_LOAD_PAGE_FAULT) |
  86. (1U << CAUSE_STORE_PAGE_FAULT);
  87. /*
  88. * If hypervisor extension available then we only handle hypervisor
  89. * calls (i.e. ecalls from HS-mode) in M-mode.
  90. *
  91. * The HS-mode will additionally handle supervisor calls (i.e. ecalls
  92. * from VS-mode), Guest page faults and Virtual interrupts.
  93. */
  94. if (misa_extension('H')) {
  95. exceptions |= (1U << CAUSE_VIRTUAL_SUPERVISOR_ECALL);
  96. exceptions |= (1U << CAUSE_FETCH_GUEST_PAGE_FAULT);
  97. exceptions |= (1U << CAUSE_LOAD_GUEST_PAGE_FAULT);
  98. exceptions |= (1U << CAUSE_VIRTUAL_INST_FAULT);
  99. exceptions |= (1U << CAUSE_STORE_GUEST_PAGE_FAULT);
  100. }
  101. csr_write(CSR_MIDELEG, interrupts);
  102. csr_write(CSR_MEDELEG, exceptions);
  103. return 0;
  104. }
  105. void sbi_hart_delegation_dump(struct sbi_scratch *scratch,
  106. const char *prefix, const char *suffix)
  107. {
  108. if (!misa_extension('S'))
  109. /* No delegation possible as mideleg does not exist*/
  110. return;
  111. #if __riscv_xlen == 32
  112. sbi_printf("%sMIDELEG%s: 0x%08lx\n",
  113. prefix, suffix, csr_read(CSR_MIDELEG));
  114. sbi_printf("%sMEDELEG%s: 0x%08lx\n",
  115. prefix, suffix, csr_read(CSR_MEDELEG));
  116. #else
  117. sbi_printf("%sMIDELEG%s: 0x%016lx\n",
  118. prefix, suffix, csr_read(CSR_MIDELEG));
  119. sbi_printf("%sMEDELEG%s: 0x%016lx\n",
  120. prefix, suffix, csr_read(CSR_MEDELEG));
  121. #endif
  122. }
  123. unsigned int sbi_hart_mhpm_count(struct sbi_scratch *scratch)
  124. {
  125. struct hart_features *hfeatures =
  126. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  127. return hfeatures->mhpm_count;
  128. }
  129. unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch)
  130. {
  131. struct hart_features *hfeatures =
  132. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  133. return hfeatures->pmp_count;
  134. }
  135. unsigned long sbi_hart_pmp_granularity(struct sbi_scratch *scratch)
  136. {
  137. struct hart_features *hfeatures =
  138. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  139. return hfeatures->pmp_gran;
  140. }
  141. unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch)
  142. {
  143. struct hart_features *hfeatures =
  144. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  145. return hfeatures->pmp_addr_bits;
  146. }
  147. int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
  148. {
  149. struct sbi_domain_memregion *reg;
  150. struct sbi_domain *dom = sbi_domain_thishart_ptr();
  151. unsigned int pmp_idx = 0, pmp_flags, pmp_bits, pmp_gran_log2;
  152. unsigned int pmp_count = sbi_hart_pmp_count(scratch);
  153. unsigned long pmp_addr = 0, pmp_addr_max = 0;
  154. if (!pmp_count)
  155. return 0;
  156. pmp_gran_log2 = log2roundup(sbi_hart_pmp_granularity(scratch));
  157. pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
  158. pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
  159. sbi_domain_for_each_memregion(dom, reg) {
  160. if (pmp_count <= pmp_idx)
  161. break;
  162. pmp_flags = 0;
  163. if (reg->flags & SBI_DOMAIN_MEMREGION_READABLE)
  164. pmp_flags |= PMP_R;
  165. if (reg->flags & SBI_DOMAIN_MEMREGION_WRITEABLE)
  166. pmp_flags |= PMP_W;
  167. if (reg->flags & SBI_DOMAIN_MEMREGION_EXECUTABLE)
  168. pmp_flags |= PMP_X;
  169. if (reg->flags & SBI_DOMAIN_MEMREGION_MMODE)
  170. pmp_flags |= PMP_L;
  171. pmp_addr = reg->base >> PMP_SHIFT;
  172. if (pmp_gran_log2 <= reg->order && pmp_addr < pmp_addr_max)
  173. pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order);
  174. else {
  175. sbi_printf("Can not configure pmp for domain %s", dom->name);
  176. sbi_printf("because memory region address %lx or size %lx is not in range\n",
  177. reg->base, reg->order);
  178. }
  179. }
  180. return 0;
  181. }
  182. /**
  183. * Check whether a particular hart feature is available
  184. *
  185. * @param scratch pointer to the HART scratch space
  186. * @param feature the feature to check
  187. * @returns true (feature available) or false (feature not available)
  188. */
  189. bool sbi_hart_has_feature(struct sbi_scratch *scratch, unsigned long feature)
  190. {
  191. struct hart_features *hfeatures =
  192. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  193. if (hfeatures->features & feature)
  194. return true;
  195. else
  196. return false;
  197. }
  198. static unsigned long hart_get_features(struct sbi_scratch *scratch)
  199. {
  200. struct hart_features *hfeatures =
  201. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  202. return hfeatures->features;
  203. }
  204. static inline char *sbi_hart_feature_id2string(unsigned long feature)
  205. {
  206. char *fstr = NULL;
  207. if (!feature)
  208. return NULL;
  209. switch (feature) {
  210. case SBI_HART_HAS_SCOUNTEREN:
  211. fstr = "scounteren";
  212. break;
  213. case SBI_HART_HAS_MCOUNTEREN:
  214. fstr = "mcounteren";
  215. break;
  216. case SBI_HART_HAS_TIME:
  217. fstr = "time";
  218. break;
  219. default:
  220. break;
  221. }
  222. return fstr;
  223. }
  224. /**
  225. * Get the hart features in string format
  226. *
  227. * @param scratch pointer to the HART scratch space
  228. * @param features_str pointer to a char array where the features string will be
  229. * updated
  230. * @param nfstr length of the features_str. The feature string will be truncated
  231. * if nfstr is not long enough.
  232. */
  233. void sbi_hart_get_features_str(struct sbi_scratch *scratch,
  234. char *features_str, int nfstr)
  235. {
  236. unsigned long features, feat = 1UL;
  237. char *temp;
  238. int offset = 0;
  239. if (!features_str || nfstr <= 0)
  240. return;
  241. sbi_memset(features_str, 0, nfstr);
  242. features = hart_get_features(scratch);
  243. if (!features)
  244. goto done;
  245. do {
  246. if (features & feat) {
  247. temp = sbi_hart_feature_id2string(feat);
  248. if (temp) {
  249. sbi_snprintf(features_str + offset, nfstr,
  250. "%s,", temp);
  251. offset = offset + sbi_strlen(temp) + 1;
  252. }
  253. }
  254. feat = feat << 1;
  255. } while (feat <= SBI_HART_HAS_LAST_FEATURE);
  256. done:
  257. if (offset)
  258. features_str[offset - 1] = '\0';
  259. else
  260. sbi_strncpy(features_str, "none", nfstr);
  261. }
  262. static unsigned long hart_pmp_get_allowed_addr(void)
  263. {
  264. unsigned long val = 0;
  265. struct sbi_trap_info trap = {0};
  266. csr_write_allowed(CSR_PMPADDR0, (ulong)&trap, PMP_ADDR_MASK); \
  267. if (!trap.cause) {
  268. val = csr_read_allowed(CSR_PMPADDR0, (ulong)&trap);
  269. if (trap.cause)
  270. val = 0;
  271. }
  272. return val;
  273. }
  274. static void hart_detect_features(struct sbi_scratch *scratch)
  275. {
  276. struct sbi_trap_info trap = {0};
  277. struct hart_features *hfeatures;
  278. unsigned long val;
  279. /* Reset hart features */
  280. hfeatures = sbi_scratch_offset_ptr(scratch, hart_features_offset);
  281. hfeatures->features = 0;
  282. hfeatures->pmp_count = 0;
  283. hfeatures->mhpm_count = 0;
  284. #define __check_csr(__csr, __rdonly, __wrval, __field, __skip) \
  285. val = csr_read_allowed(__csr, (ulong)&trap); \
  286. if (!trap.cause) { \
  287. if (__rdonly) { \
  288. (hfeatures->__field)++; \
  289. } else { \
  290. csr_write_allowed(__csr, (ulong)&trap, __wrval);\
  291. if (!trap.cause) { \
  292. if (csr_swap(__csr, val) == __wrval) \
  293. (hfeatures->__field)++; \
  294. else \
  295. goto __skip; \
  296. } else { \
  297. goto __skip; \
  298. } \
  299. } \
  300. } else { \
  301. goto __skip; \
  302. }
  303. #define __check_csr_2(__csr, __rdonly, __wrval, __field, __skip) \
  304. __check_csr(__csr + 0, __rdonly, __wrval, __field, __skip) \
  305. __check_csr(__csr + 1, __rdonly, __wrval, __field, __skip)
  306. #define __check_csr_4(__csr, __rdonly, __wrval, __field, __skip) \
  307. __check_csr_2(__csr + 0, __rdonly, __wrval, __field, __skip) \
  308. __check_csr_2(__csr + 2, __rdonly, __wrval, __field, __skip)
  309. #define __check_csr_8(__csr, __rdonly, __wrval, __field, __skip) \
  310. __check_csr_4(__csr + 0, __rdonly, __wrval, __field, __skip) \
  311. __check_csr_4(__csr + 4, __rdonly, __wrval, __field, __skip)
  312. #define __check_csr_16(__csr, __rdonly, __wrval, __field, __skip) \
  313. __check_csr_8(__csr + 0, __rdonly, __wrval, __field, __skip) \
  314. __check_csr_8(__csr + 8, __rdonly, __wrval, __field, __skip)
  315. #define __check_csr_32(__csr, __rdonly, __wrval, __field, __skip) \
  316. __check_csr_16(__csr + 0, __rdonly, __wrval, __field, __skip) \
  317. __check_csr_16(__csr + 16, __rdonly, __wrval, __field, __skip)
  318. #define __check_csr_64(__csr, __rdonly, __wrval, __field, __skip) \
  319. __check_csr_32(__csr + 0, __rdonly, __wrval, __field, __skip) \
  320. __check_csr_32(__csr + 32, __rdonly, __wrval, __field, __skip)
  321. /**
  322. * Detect the allowed address bits & granularity. At least PMPADDR0
  323. * should be implemented.
  324. */
  325. val = hart_pmp_get_allowed_addr();
  326. if (val) {
  327. hfeatures->pmp_gran = 1 << (__ffs(val) + 2);
  328. hfeatures->pmp_addr_bits = __fls(val) + 1;
  329. /* Detect number of PMP regions. At least PMPADDR0 should be implemented*/
  330. __check_csr_64(CSR_PMPADDR0, 0, val, pmp_count, __pmp_skip);
  331. }
  332. __pmp_skip:
  333. /* Detect number of MHPM counters */
  334. __check_csr(CSR_MHPMCOUNTER3, 0, 1UL, mhpm_count, __mhpm_skip);
  335. __check_csr_4(CSR_MHPMCOUNTER4, 0, 1UL, mhpm_count, __mhpm_skip);
  336. __check_csr_8(CSR_MHPMCOUNTER8, 0, 1UL, mhpm_count, __mhpm_skip);
  337. __check_csr_16(CSR_MHPMCOUNTER16, 0, 1UL, mhpm_count, __mhpm_skip);
  338. __mhpm_skip:
  339. #undef __check_csr_64
  340. #undef __check_csr_32
  341. #undef __check_csr_16
  342. #undef __check_csr_8
  343. #undef __check_csr_4
  344. #undef __check_csr_2
  345. #undef __check_csr
  346. /* Detect if hart supports SCOUNTEREN feature */
  347. val = csr_read_allowed(CSR_SCOUNTEREN, (unsigned long)&trap);
  348. if (!trap.cause) {
  349. csr_write_allowed(CSR_SCOUNTEREN, (unsigned long)&trap, val);
  350. if (!trap.cause)
  351. hfeatures->features |= SBI_HART_HAS_SCOUNTEREN;
  352. }
  353. /* Detect if hart supports MCOUNTEREN feature */
  354. val = csr_read_allowed(CSR_MCOUNTEREN, (unsigned long)&trap);
  355. if (!trap.cause) {
  356. csr_write_allowed(CSR_MCOUNTEREN, (unsigned long)&trap, val);
  357. if (!trap.cause)
  358. hfeatures->features |= SBI_HART_HAS_MCOUNTEREN;
  359. }
  360. /* Detect if hart supports time CSR */
  361. csr_read_allowed(CSR_TIME, (unsigned long)&trap);
  362. if (!trap.cause)
  363. hfeatures->features |= SBI_HART_HAS_TIME;
  364. }
  365. int sbi_hart_reinit(struct sbi_scratch *scratch)
  366. {
  367. int rc;
  368. mstatus_init(scratch);
  369. rc = fp_init(scratch);
  370. if (rc)
  371. return rc;
  372. rc = delegate_traps(scratch);
  373. if (rc)
  374. return rc;
  375. return 0;
  376. }
  377. int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot)
  378. {
  379. if (cold_boot) {
  380. if (misa_extension('H'))
  381. sbi_hart_expected_trap = &__sbi_expected_trap_hext;
  382. hart_features_offset = sbi_scratch_alloc_offset(
  383. sizeof(struct hart_features),
  384. "HART_FEATURES");
  385. if (!hart_features_offset)
  386. return SBI_ENOMEM;
  387. }
  388. hart_detect_features(scratch);
  389. return sbi_hart_reinit(scratch);
  390. }
  391. void __attribute__((noreturn)) sbi_hart_hang(void)
  392. {
  393. while (1)
  394. wfi();
  395. __builtin_unreachable();
  396. }
  397. void __attribute__((noreturn))
  398. sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1,
  399. unsigned long next_addr, unsigned long next_mode,
  400. bool next_virt)
  401. {
  402. #if __riscv_xlen == 32
  403. unsigned long val, valH;
  404. #else
  405. unsigned long val;
  406. #endif
  407. switch (next_mode) {
  408. case PRV_M:
  409. break;
  410. case PRV_S:
  411. if (!misa_extension('S'))
  412. sbi_hart_hang();
  413. break;
  414. case PRV_U:
  415. if (!misa_extension('U'))
  416. sbi_hart_hang();
  417. break;
  418. default:
  419. sbi_hart_hang();
  420. }
  421. val = csr_read(CSR_MSTATUS);
  422. val = INSERT_FIELD(val, MSTATUS_MPP, next_mode);
  423. val = INSERT_FIELD(val, MSTATUS_MPIE, 0);
  424. #if __riscv_xlen == 32
  425. if (misa_extension('H')) {
  426. valH = csr_read(CSR_MSTATUSH);
  427. if (next_virt)
  428. valH = INSERT_FIELD(valH, MSTATUSH_MPV, 1);
  429. else
  430. valH = INSERT_FIELD(valH, MSTATUSH_MPV, 0);
  431. csr_write(CSR_MSTATUSH, valH);
  432. }
  433. #else
  434. if (misa_extension('H')) {
  435. if (next_virt)
  436. val = INSERT_FIELD(val, MSTATUS_MPV, 1);
  437. else
  438. val = INSERT_FIELD(val, MSTATUS_MPV, 0);
  439. }
  440. #endif
  441. csr_write(CSR_MSTATUS, val);
  442. csr_write(CSR_MEPC, next_addr);
  443. if (next_mode == PRV_S) {
  444. csr_write(CSR_STVEC, next_addr);
  445. csr_write(CSR_SSCRATCH, 0);
  446. csr_write(CSR_SIE, 0);
  447. csr_write(CSR_SATP, 0);
  448. } else if (next_mode == PRV_U) {
  449. if (misa_extension('N')) {
  450. csr_write(CSR_UTVEC, next_addr);
  451. csr_write(CSR_USCRATCH, 0);
  452. csr_write(CSR_UIE, 0);
  453. }
  454. }
  455. register unsigned long a0 asm("a0") = arg0;
  456. register unsigned long a1 asm("a1") = arg1;
  457. __asm__ __volatile__("mret" : : "r"(a0), "r"(a1));
  458. __builtin_unreachable();
  459. }