sbi_hart.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/riscv_barrier.h>
  11. #include <sbi/riscv_encoding.h>
  12. #include <sbi/riscv_fp.h>
  13. #include <sbi/sbi_bitops.h>
  14. #include <sbi/sbi_console.h>
  15. #include <sbi/sbi_domain.h>
  16. #include <sbi/sbi_csr_detect.h>
  17. #include <sbi/sbi_error.h>
  18. #include <sbi/sbi_hart.h>
  19. #include <sbi/sbi_math.h>
  20. #include <sbi/sbi_platform.h>
  21. #include <sbi/sbi_string.h>
  22. #include <sbi/sbi_trap.h>
  23. extern void __sbi_expected_trap(void);
  24. extern void __sbi_expected_trap_hext(void);
  25. void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap;
  26. struct hart_features {
  27. unsigned long features;
  28. unsigned int pmp_count;
  29. unsigned int mhpm_count;
  30. };
  31. static unsigned long hart_features_offset;
  32. static void mstatus_init(struct sbi_scratch *scratch)
  33. {
  34. unsigned long mstatus_val = 0;
  35. /* Enable FPU */
  36. if (misa_extension('D') || misa_extension('F'))
  37. mstatus_val |= MSTATUS_FS;
  38. /* Enable Vector context */
  39. if (misa_extension('V'))
  40. mstatus_val |= MSTATUS_VS;
  41. csr_write(CSR_MSTATUS, mstatus_val);
  42. /* Enable user/supervisor use of perf counters */
  43. if (misa_extension('S') &&
  44. sbi_hart_has_feature(scratch, SBI_HART_HAS_SCOUNTEREN))
  45. csr_write(CSR_SCOUNTEREN, -1);
  46. if (sbi_hart_has_feature(scratch, SBI_HART_HAS_MCOUNTEREN))
  47. csr_write(CSR_MCOUNTEREN, -1);
  48. /* Disable all interrupts */
  49. csr_write(CSR_MIE, 0);
  50. /* Disable S-mode paging */
  51. if (misa_extension('S'))
  52. csr_write(CSR_SATP, 0);
  53. }
  54. static int fp_init(struct sbi_scratch *scratch)
  55. {
  56. #ifdef __riscv_flen
  57. int i;
  58. #endif
  59. if (!misa_extension('D') && !misa_extension('F'))
  60. return 0;
  61. if (!(csr_read(CSR_MSTATUS) & MSTATUS_FS))
  62. return SBI_EINVAL;
  63. #ifdef __riscv_flen
  64. for (i = 0; i < 32; i++)
  65. init_fp_reg(i);
  66. csr_write(CSR_FCSR, 0);
  67. #endif
  68. return 0;
  69. }
  70. static int delegate_traps(struct sbi_scratch *scratch)
  71. {
  72. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  73. unsigned long interrupts, exceptions;
  74. if (!misa_extension('S'))
  75. /* No delegation possible as mideleg does not exist */
  76. return 0;
  77. /* Send M-mode interrupts and most exceptions to S-mode */
  78. interrupts = MIP_SSIP | MIP_STIP | MIP_SEIP;
  79. exceptions = (1U << CAUSE_MISALIGNED_FETCH) | (1U << CAUSE_BREAKPOINT) |
  80. (1U << CAUSE_USER_ECALL);
  81. if (sbi_platform_has_mfaults_delegation(plat))
  82. exceptions |= (1U << CAUSE_FETCH_PAGE_FAULT) |
  83. (1U << CAUSE_LOAD_PAGE_FAULT) |
  84. (1U << CAUSE_STORE_PAGE_FAULT);
  85. /*
  86. * If hypervisor extension available then we only handle hypervisor
  87. * calls (i.e. ecalls from HS-mode) in M-mode.
  88. *
  89. * The HS-mode will additionally handle supervisor calls (i.e. ecalls
  90. * from VS-mode), Guest page faults and Virtual interrupts.
  91. */
  92. if (misa_extension('H')) {
  93. exceptions |= (1U << CAUSE_VIRTUAL_SUPERVISOR_ECALL);
  94. exceptions |= (1U << CAUSE_FETCH_GUEST_PAGE_FAULT);
  95. exceptions |= (1U << CAUSE_LOAD_GUEST_PAGE_FAULT);
  96. exceptions |= (1U << CAUSE_VIRTUAL_INST_FAULT);
  97. exceptions |= (1U << CAUSE_STORE_GUEST_PAGE_FAULT);
  98. }
  99. csr_write(CSR_MIDELEG, interrupts);
  100. csr_write(CSR_MEDELEG, exceptions);
  101. return 0;
  102. }
  103. void sbi_hart_delegation_dump(struct sbi_scratch *scratch,
  104. const char *prefix, const char *suffix)
  105. {
  106. if (!misa_extension('S'))
  107. /* No delegation possible as mideleg does not exist*/
  108. return;
  109. #if __riscv_xlen == 32
  110. sbi_printf("%sMIDELEG%s: 0x%08lx\n",
  111. prefix, suffix, csr_read(CSR_MIDELEG));
  112. sbi_printf("%sMEDELEG%s: 0x%08lx\n",
  113. prefix, suffix, csr_read(CSR_MEDELEG));
  114. #else
  115. sbi_printf("%sMIDELEG%s: 0x%016lx\n",
  116. prefix, suffix, csr_read(CSR_MIDELEG));
  117. sbi_printf("%sMEDELEG%s: 0x%016lx\n",
  118. prefix, suffix, csr_read(CSR_MEDELEG));
  119. #endif
  120. }
  121. unsigned int sbi_hart_mhpm_count(struct sbi_scratch *scratch)
  122. {
  123. struct hart_features *hfeatures =
  124. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  125. return hfeatures->mhpm_count;
  126. }
  127. unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch)
  128. {
  129. struct hart_features *hfeatures =
  130. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  131. return hfeatures->pmp_count;
  132. }
  133. int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
  134. {
  135. struct sbi_domain_memregion *reg;
  136. struct sbi_domain *dom = sbi_domain_thishart_ptr();
  137. unsigned int pmp_idx = 0, pmp_flags;
  138. unsigned int pmp_count = sbi_hart_pmp_count(scratch);
  139. if (!pmp_count)
  140. return 0;
  141. sbi_domain_for_each_memregion(dom, reg) {
  142. if (pmp_count <= pmp_idx)
  143. break;
  144. pmp_flags = 0;
  145. if (reg->flags & SBI_DOMAIN_MEMREGION_READABLE)
  146. pmp_flags |= PMP_R;
  147. if (reg->flags & SBI_DOMAIN_MEMREGION_WRITEABLE)
  148. pmp_flags |= PMP_W;
  149. if (reg->flags & SBI_DOMAIN_MEMREGION_EXECUTABLE)
  150. pmp_flags |= PMP_X;
  151. if (reg->flags & SBI_DOMAIN_MEMREGION_MMODE)
  152. pmp_flags |= PMP_L;
  153. pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order);
  154. }
  155. return 0;
  156. }
  157. /**
  158. * Check whether a particular hart feature is available
  159. *
  160. * @param scratch pointer to the HART scratch space
  161. * @param feature the feature to check
  162. * @returns true (feature available) or false (feature not available)
  163. */
  164. bool sbi_hart_has_feature(struct sbi_scratch *scratch, unsigned long feature)
  165. {
  166. struct hart_features *hfeatures =
  167. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  168. if (hfeatures->features & feature)
  169. return true;
  170. else
  171. return false;
  172. }
  173. static unsigned long hart_get_features(struct sbi_scratch *scratch)
  174. {
  175. struct hart_features *hfeatures =
  176. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  177. return hfeatures->features;
  178. }
  179. static inline char *sbi_hart_feature_id2string(unsigned long feature)
  180. {
  181. char *fstr = NULL;
  182. if (!feature)
  183. return NULL;
  184. switch (feature) {
  185. case SBI_HART_HAS_SCOUNTEREN:
  186. fstr = "scounteren";
  187. break;
  188. case SBI_HART_HAS_MCOUNTEREN:
  189. fstr = "mcounteren";
  190. break;
  191. case SBI_HART_HAS_TIME:
  192. fstr = "time";
  193. break;
  194. default:
  195. break;
  196. }
  197. return fstr;
  198. }
  199. /**
  200. * Get the hart features in string format
  201. *
  202. * @param scratch pointer to the HART scratch space
  203. * @param features_str pointer to a char array where the features string will be
  204. * updated
  205. * @param nfstr length of the features_str. The feature string will be truncated
  206. * if nfstr is not long enough.
  207. */
  208. void sbi_hart_get_features_str(struct sbi_scratch *scratch,
  209. char *features_str, int nfstr)
  210. {
  211. unsigned long features, feat = 1UL;
  212. char *temp;
  213. int offset = 0;
  214. if (!features_str || nfstr <= 0)
  215. return;
  216. sbi_memset(features_str, 0, nfstr);
  217. features = hart_get_features(scratch);
  218. if (!features)
  219. goto done;
  220. do {
  221. if (features & feat) {
  222. temp = sbi_hart_feature_id2string(feat);
  223. if (temp) {
  224. sbi_snprintf(features_str + offset, nfstr,
  225. "%s,", temp);
  226. offset = offset + sbi_strlen(temp) + 1;
  227. }
  228. }
  229. feat = feat << 1;
  230. } while (feat <= SBI_HART_HAS_LAST_FEATURE);
  231. done:
  232. if (offset)
  233. features_str[offset - 1] = '\0';
  234. else
  235. sbi_strncpy(features_str, "none", nfstr);
  236. }
  237. static void hart_detect_features(struct sbi_scratch *scratch)
  238. {
  239. struct sbi_trap_info trap = {0};
  240. struct hart_features *hfeatures;
  241. unsigned long val;
  242. /* Reset hart features */
  243. hfeatures = sbi_scratch_offset_ptr(scratch, hart_features_offset);
  244. hfeatures->features = 0;
  245. hfeatures->pmp_count = 0;
  246. hfeatures->mhpm_count = 0;
  247. #define __check_csr(__csr, __rdonly, __wrval, __field, __skip) \
  248. val = csr_read_allowed(__csr, (ulong)&trap); \
  249. if (!trap.cause) { \
  250. if (__rdonly) { \
  251. (hfeatures->__field)++; \
  252. } else { \
  253. csr_write_allowed(__csr, (ulong)&trap, __wrval);\
  254. if (!trap.cause) { \
  255. if (csr_swap(__csr, val) == __wrval) \
  256. (hfeatures->__field)++; \
  257. else \
  258. goto __skip; \
  259. } else { \
  260. goto __skip; \
  261. } \
  262. } \
  263. } else { \
  264. goto __skip; \
  265. }
  266. #define __check_csr_2(__csr, __rdonly, __wrval, __field, __skip) \
  267. __check_csr(__csr + 0, __rdonly, __wrval, __field, __skip) \
  268. __check_csr(__csr + 1, __rdonly, __wrval, __field, __skip)
  269. #define __check_csr_4(__csr, __rdonly, __wrval, __field, __skip) \
  270. __check_csr_2(__csr + 0, __rdonly, __wrval, __field, __skip) \
  271. __check_csr_2(__csr + 2, __rdonly, __wrval, __field, __skip)
  272. #define __check_csr_8(__csr, __rdonly, __wrval, __field, __skip) \
  273. __check_csr_4(__csr + 0, __rdonly, __wrval, __field, __skip) \
  274. __check_csr_4(__csr + 4, __rdonly, __wrval, __field, __skip)
  275. #define __check_csr_16(__csr, __rdonly, __wrval, __field, __skip) \
  276. __check_csr_8(__csr + 0, __rdonly, __wrval, __field, __skip) \
  277. __check_csr_8(__csr + 8, __rdonly, __wrval, __field, __skip)
  278. #define __check_csr_32(__csr, __rdonly, __wrval, __field, __skip) \
  279. __check_csr_16(__csr + 0, __rdonly, __wrval, __field, __skip) \
  280. __check_csr_16(__csr + 16, __rdonly, __wrval, __field, __skip)
  281. #define __check_csr_64(__csr, __rdonly, __wrval, __field, __skip) \
  282. __check_csr_32(__csr + 0, __rdonly, __wrval, __field, __skip) \
  283. __check_csr_32(__csr + 32, __rdonly, __wrval, __field, __skip)
  284. /* Detect number of PMP regions */
  285. __check_csr_64(CSR_PMPADDR0, 0, 1UL, pmp_count, __pmp_skip);
  286. __pmp_skip:
  287. /* Detect number of MHPM counters */
  288. __check_csr(CSR_MHPMCOUNTER3, 0, 1UL, mhpm_count, __mhpm_skip);
  289. __check_csr_4(CSR_MHPMCOUNTER4, 0, 1UL, mhpm_count, __mhpm_skip);
  290. __check_csr_8(CSR_MHPMCOUNTER8, 0, 1UL, mhpm_count, __mhpm_skip);
  291. __check_csr_16(CSR_MHPMCOUNTER16, 0, 1UL, mhpm_count, __mhpm_skip);
  292. __mhpm_skip:
  293. #undef __check_csr_64
  294. #undef __check_csr_32
  295. #undef __check_csr_16
  296. #undef __check_csr_8
  297. #undef __check_csr_4
  298. #undef __check_csr_2
  299. #undef __check_csr
  300. /* Detect if hart supports SCOUNTEREN feature */
  301. trap.cause = 0;
  302. val = csr_read_allowed(CSR_SCOUNTEREN, (unsigned long)&trap);
  303. if (!trap.cause) {
  304. csr_write_allowed(CSR_SCOUNTEREN, (unsigned long)&trap, val);
  305. if (!trap.cause)
  306. hfeatures->features |= SBI_HART_HAS_SCOUNTEREN;
  307. }
  308. /* Detect if hart supports MCOUNTEREN feature */
  309. trap.cause = 0;
  310. val = csr_read_allowed(CSR_MCOUNTEREN, (unsigned long)&trap);
  311. if (!trap.cause) {
  312. csr_write_allowed(CSR_MCOUNTEREN, (unsigned long)&trap, val);
  313. if (!trap.cause)
  314. hfeatures->features |= SBI_HART_HAS_MCOUNTEREN;
  315. }
  316. /* Detect if hart supports time CSR */
  317. trap.cause = 0;
  318. csr_read_allowed(CSR_TIME, (unsigned long)&trap);
  319. if (!trap.cause)
  320. hfeatures->features |= SBI_HART_HAS_TIME;
  321. }
  322. int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot)
  323. {
  324. int rc;
  325. if (cold_boot) {
  326. if (misa_extension('H'))
  327. sbi_hart_expected_trap = &__sbi_expected_trap_hext;
  328. hart_features_offset = sbi_scratch_alloc_offset(
  329. sizeof(struct hart_features),
  330. "HART_FEATURES");
  331. if (!hart_features_offset)
  332. return SBI_ENOMEM;
  333. }
  334. hart_detect_features(scratch);
  335. mstatus_init(scratch);
  336. rc = fp_init(scratch);
  337. if (rc)
  338. return rc;
  339. rc = delegate_traps(scratch);
  340. if (rc)
  341. return rc;
  342. return 0;
  343. }
  344. void __attribute__((noreturn)) sbi_hart_hang(void)
  345. {
  346. while (1)
  347. wfi();
  348. __builtin_unreachable();
  349. }
  350. void __attribute__((noreturn))
  351. sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1,
  352. unsigned long next_addr, unsigned long next_mode,
  353. bool next_virt)
  354. {
  355. #if __riscv_xlen == 32
  356. unsigned long val, valH;
  357. #else
  358. unsigned long val;
  359. #endif
  360. switch (next_mode) {
  361. case PRV_M:
  362. break;
  363. case PRV_S:
  364. if (!misa_extension('S'))
  365. sbi_hart_hang();
  366. break;
  367. case PRV_U:
  368. if (!misa_extension('U'))
  369. sbi_hart_hang();
  370. break;
  371. default:
  372. sbi_hart_hang();
  373. }
  374. val = csr_read(CSR_MSTATUS);
  375. val = INSERT_FIELD(val, MSTATUS_MPP, next_mode);
  376. val = INSERT_FIELD(val, MSTATUS_MPIE, 0);
  377. #if __riscv_xlen == 32
  378. if (misa_extension('H')) {
  379. valH = csr_read(CSR_MSTATUSH);
  380. if (next_virt)
  381. valH = INSERT_FIELD(valH, MSTATUSH_MPV, 1);
  382. else
  383. valH = INSERT_FIELD(valH, MSTATUSH_MPV, 0);
  384. csr_write(CSR_MSTATUSH, valH);
  385. }
  386. #else
  387. if (misa_extension('H')) {
  388. if (next_virt)
  389. val = INSERT_FIELD(val, MSTATUS_MPV, 1);
  390. else
  391. val = INSERT_FIELD(val, MSTATUS_MPV, 0);
  392. }
  393. #endif
  394. csr_write(CSR_MSTATUS, val);
  395. csr_write(CSR_MEPC, next_addr);
  396. if (next_mode == PRV_S) {
  397. csr_write(CSR_STVEC, next_addr);
  398. csr_write(CSR_SSCRATCH, 0);
  399. csr_write(CSR_SIE, 0);
  400. csr_write(CSR_SATP, 0);
  401. } else if (next_mode == PRV_U) {
  402. csr_write(CSR_UTVEC, next_addr);
  403. csr_write(CSR_USCRATCH, 0);
  404. csr_write(CSR_UIE, 0);
  405. }
  406. register unsigned long a0 asm("a0") = arg0;
  407. register unsigned long a1 asm("a1") = arg1;
  408. __asm__ __volatile__("mret" : : "r"(a0), "r"(a1));
  409. __builtin_unreachable();
  410. }