sbi_hart.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/riscv_barrier.h>
  11. #include <sbi/riscv_encoding.h>
  12. #include <sbi/riscv_fp.h>
  13. #include <sbi/sbi_bitops.h>
  14. #include <sbi/sbi_console.h>
  15. #include <sbi/sbi_domain.h>
  16. #include <sbi/sbi_csr_detect.h>
  17. #include <sbi/sbi_error.h>
  18. #include <sbi/sbi_hart.h>
  19. #include <sbi/sbi_math.h>
  20. #include <sbi/sbi_platform.h>
  21. #include <sbi/sbi_pmu.h>
  22. #include <sbi/sbi_string.h>
  23. #include <sbi/sbi_trap.h>
  24. #include <sbi/sbi_hfence.h>
  25. extern void __sbi_expected_trap(void);
  26. extern void __sbi_expected_trap_hext(void);
  27. void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap;
  28. static unsigned long hart_features_offset;
  29. static void mstatus_init(struct sbi_scratch *scratch)
  30. {
  31. unsigned long menvcfg_val, mstatus_val = 0;
  32. int cidx;
  33. unsigned int num_mhpm = sbi_hart_mhpm_count(scratch);
  34. uint64_t mhpmevent_init_val = 0;
  35. uint64_t mstateen_val;
  36. /* Enable FPU */
  37. if (misa_extension('D') || misa_extension('F'))
  38. mstatus_val |= MSTATUS_FS;
  39. /* Enable Vector context */
  40. if (misa_extension('V'))
  41. mstatus_val |= MSTATUS_VS;
  42. csr_write(CSR_MSTATUS, mstatus_val);
  43. /* Disable user mode usage of all perf counters except default ones (CY, TM, IR) */
  44. if (misa_extension('S') &&
  45. sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_10)
  46. csr_write(CSR_SCOUNTEREN, 7);
  47. /**
  48. * OpenSBI doesn't use any PMU counters in M-mode.
  49. * Supervisor mode usage for all counters are enabled by default
  50. * But counters will not run until mcountinhibit is set.
  51. */
  52. if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_10)
  53. csr_write(CSR_MCOUNTEREN, -1);
  54. /* All programmable counters will start running at runtime after S-mode request */
  55. if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_11)
  56. csr_write(CSR_MCOUNTINHIBIT, 0xFFFFFFF8);
  57. /**
  58. * The mhpmeventn[h] CSR should be initialized with interrupt disabled
  59. * and inhibited running in M-mode during init.
  60. * To keep it simple, only contiguous mhpmcounters are supported as a
  61. * platform with discontiguous mhpmcounters may not make much sense.
  62. */
  63. mhpmevent_init_val |= (MHPMEVENT_OF | MHPMEVENT_MINH);
  64. for (cidx = 0; cidx < num_mhpm; cidx++) {
  65. #if __riscv_xlen == 32
  66. csr_write_num(CSR_MHPMEVENT3 + cidx, mhpmevent_init_val & 0xFFFFFFFF);
  67. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
  68. csr_write_num(CSR_MHPMEVENT3H + cidx,
  69. mhpmevent_init_val >> BITS_PER_LONG);
  70. #else
  71. csr_write_num(CSR_MHPMEVENT3 + cidx, mhpmevent_init_val);
  72. #endif
  73. }
  74. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMSTATEEN)) {
  75. mstateen_val = csr_read(CSR_MSTATEEN0);
  76. #if __riscv_xlen == 32
  77. mstateen_val |= ((uint64_t)csr_read(CSR_MSTATEEN0H)) << 32;
  78. #endif
  79. mstateen_val |= SMSTATEEN_STATEN;
  80. mstateen_val |= SMSTATEEN0_HSENVCFG;
  81. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMAIA))
  82. mstateen_val |= (SMSTATEEN0_AIA | SMSTATEEN0_SVSLCT |
  83. SMSTATEEN0_IMSIC);
  84. else
  85. mstateen_val &= ~(SMSTATEEN0_AIA | SMSTATEEN0_SVSLCT |
  86. SMSTATEEN0_IMSIC);
  87. csr_write(CSR_MSTATEEN0, mstateen_val);
  88. #if __riscv_xlen == 32
  89. csr_write(CSR_MSTATEEN0H, mstateen_val >> 32);
  90. #endif
  91. }
  92. if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12) {
  93. menvcfg_val = csr_read(CSR_MENVCFG);
  94. /*
  95. * Set menvcfg.CBZE == 1
  96. *
  97. * If Zicboz extension is not available then writes to
  98. * menvcfg.CBZE will be ignored because it is a WARL field.
  99. */
  100. menvcfg_val |= ENVCFG_CBZE;
  101. /*
  102. * Set menvcfg.CBCFE == 1
  103. *
  104. * If Zicbom extension is not available then writes to
  105. * menvcfg.CBCFE will be ignored because it is a WARL field.
  106. */
  107. menvcfg_val |= ENVCFG_CBCFE;
  108. /*
  109. * Set menvcfg.CBIE == 3
  110. *
  111. * If Zicbom extension is not available then writes to
  112. * menvcfg.CBIE will be ignored because it is a WARL field.
  113. */
  114. menvcfg_val |= ENVCFG_CBIE_INV << ENVCFG_CBIE_SHIFT;
  115. /*
  116. * Set menvcfg.PBMTE == 1 for RV64 or RV128
  117. *
  118. * If Svpbmt extension is not available then menvcfg.PBMTE
  119. * will be read-only zero.
  120. */
  121. #if __riscv_xlen > 32
  122. menvcfg_val |= ENVCFG_PBMTE;
  123. #endif
  124. /*
  125. * The spec doesn't explicitly describe the reset value of menvcfg.
  126. * Enable access to stimecmp if sstc extension is present in the
  127. * hardware.
  128. */
  129. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSTC)) {
  130. #if __riscv_xlen == 32
  131. unsigned long menvcfgh_val;
  132. menvcfgh_val = csr_read(CSR_MENVCFGH);
  133. menvcfgh_val |= ENVCFGH_STCE;
  134. csr_write(CSR_MENVCFGH, menvcfgh_val);
  135. #else
  136. menvcfg_val |= ENVCFG_STCE;
  137. #endif
  138. }
  139. csr_write(CSR_MENVCFG, menvcfg_val);
  140. }
  141. /* Disable all interrupts */
  142. csr_write(CSR_MIE, 0);
  143. /* Disable S-mode paging */
  144. if (misa_extension('S'))
  145. csr_write(CSR_SATP, 0);
  146. }
  147. static int fp_init(struct sbi_scratch *scratch)
  148. {
  149. #ifdef __riscv_flen
  150. int i;
  151. #endif
  152. if (!misa_extension('D') && !misa_extension('F'))
  153. return 0;
  154. if (!(csr_read(CSR_MSTATUS) & MSTATUS_FS))
  155. return SBI_EINVAL;
  156. #ifdef __riscv_flen
  157. for (i = 0; i < 32; i++)
  158. init_fp_reg(i);
  159. csr_write(CSR_FCSR, 0);
  160. #endif
  161. return 0;
  162. }
  163. static int delegate_traps(struct sbi_scratch *scratch)
  164. {
  165. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  166. unsigned long interrupts, exceptions;
  167. if (!misa_extension('S'))
  168. /* No delegation possible as mideleg does not exist */
  169. return 0;
  170. /* Send M-mode interrupts and most exceptions to S-mode */
  171. interrupts = MIP_SSIP | MIP_STIP | MIP_SEIP;
  172. interrupts |= sbi_pmu_irq_bit();
  173. exceptions = (1U << CAUSE_MISALIGNED_FETCH) | (1U << CAUSE_BREAKPOINT) |
  174. (1U << CAUSE_USER_ECALL);
  175. if (sbi_platform_has_mfaults_delegation(plat))
  176. exceptions |= (1U << CAUSE_FETCH_PAGE_FAULT) |
  177. (1U << CAUSE_LOAD_PAGE_FAULT) |
  178. (1U << CAUSE_STORE_PAGE_FAULT);
  179. /*
  180. * If hypervisor extension available then we only handle hypervisor
  181. * calls (i.e. ecalls from HS-mode) in M-mode.
  182. *
  183. * The HS-mode will additionally handle supervisor calls (i.e. ecalls
  184. * from VS-mode), Guest page faults and Virtual interrupts.
  185. */
  186. if (misa_extension('H')) {
  187. exceptions |= (1U << CAUSE_VIRTUAL_SUPERVISOR_ECALL);
  188. exceptions |= (1U << CAUSE_FETCH_GUEST_PAGE_FAULT);
  189. exceptions |= (1U << CAUSE_LOAD_GUEST_PAGE_FAULT);
  190. exceptions |= (1U << CAUSE_VIRTUAL_INST_FAULT);
  191. exceptions |= (1U << CAUSE_STORE_GUEST_PAGE_FAULT);
  192. }
  193. csr_write(CSR_MIDELEG, interrupts);
  194. csr_write(CSR_MEDELEG, exceptions);
  195. return 0;
  196. }
  197. void sbi_hart_delegation_dump(struct sbi_scratch *scratch,
  198. const char *prefix, const char *suffix)
  199. {
  200. if (!misa_extension('S'))
  201. /* No delegation possible as mideleg does not exist*/
  202. return;
  203. sbi_printf("%sMIDELEG%s: 0x%" PRILX "\n",
  204. prefix, suffix, csr_read(CSR_MIDELEG));
  205. sbi_printf("%sMEDELEG%s: 0x%" PRILX "\n",
  206. prefix, suffix, csr_read(CSR_MEDELEG));
  207. }
  208. unsigned int sbi_hart_mhpm_count(struct sbi_scratch *scratch)
  209. {
  210. struct sbi_hart_features *hfeatures =
  211. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  212. return hfeatures->mhpm_count;
  213. }
  214. unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch)
  215. {
  216. struct sbi_hart_features *hfeatures =
  217. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  218. return hfeatures->pmp_count;
  219. }
  220. unsigned long sbi_hart_pmp_granularity(struct sbi_scratch *scratch)
  221. {
  222. struct sbi_hart_features *hfeatures =
  223. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  224. return hfeatures->pmp_gran;
  225. }
  226. unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch)
  227. {
  228. struct sbi_hart_features *hfeatures =
  229. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  230. return hfeatures->pmp_addr_bits;
  231. }
  232. unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch)
  233. {
  234. struct sbi_hart_features *hfeatures =
  235. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  236. return hfeatures->mhpm_bits;
  237. }
  238. int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
  239. {
  240. struct sbi_domain_memregion *reg;
  241. struct sbi_domain *dom = sbi_domain_thishart_ptr();
  242. unsigned int pmp_idx = 0, pmp_flags, pmp_bits, pmp_gran_log2;
  243. unsigned int pmp_count = sbi_hart_pmp_count(scratch);
  244. unsigned long pmp_addr = 0, pmp_addr_max = 0;
  245. if (!pmp_count)
  246. return 0;
  247. pmp_gran_log2 = log2roundup(sbi_hart_pmp_granularity(scratch));
  248. pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
  249. pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
  250. sbi_domain_for_each_memregion(dom, reg) {
  251. if (pmp_count <= pmp_idx)
  252. break;
  253. pmp_flags = 0;
  254. if (reg->flags & SBI_DOMAIN_MEMREGION_READABLE)
  255. pmp_flags |= PMP_R;
  256. if (reg->flags & SBI_DOMAIN_MEMREGION_WRITEABLE)
  257. pmp_flags |= PMP_W;
  258. if (reg->flags & SBI_DOMAIN_MEMREGION_EXECUTABLE)
  259. pmp_flags |= PMP_X;
  260. if (reg->flags & SBI_DOMAIN_MEMREGION_MMODE)
  261. pmp_flags |= PMP_L;
  262. pmp_addr = reg->base >> PMP_SHIFT;
  263. if (pmp_gran_log2 <= reg->order && pmp_addr < pmp_addr_max)
  264. pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order);
  265. else {
  266. sbi_printf("Can not configure pmp for domain %s", dom->name);
  267. sbi_printf(" because memory region address %lx or size %lx is not in range\n",
  268. reg->base, reg->order);
  269. }
  270. }
  271. /*
  272. * As per section 3.7.2 of privileged specification v1.12,
  273. * virtual address translations can be speculatively performed
  274. * (even before actual access). These, along with PMP traslations,
  275. * can be cached. This can pose a problem with CPU hotplug
  276. * and non-retentive suspend scenario because PMP states are
  277. * not preserved.
  278. * It is advisable to flush the caching structures under such
  279. * conditions.
  280. */
  281. if (misa_extension('S')) {
  282. __asm__ __volatile__("sfence.vma");
  283. /*
  284. * If hypervisor mode is supported, flush caching
  285. * structures in guest mode too.
  286. */
  287. if (misa_extension('H'))
  288. __sbi_hfence_gvma_all();
  289. }
  290. return 0;
  291. }
  292. int sbi_hart_priv_version(struct sbi_scratch *scratch)
  293. {
  294. struct sbi_hart_features *hfeatures =
  295. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  296. return hfeatures->priv_version;
  297. }
  298. void sbi_hart_get_priv_version_str(struct sbi_scratch *scratch,
  299. char *version_str, int nvstr)
  300. {
  301. char *temp;
  302. struct sbi_hart_features *hfeatures =
  303. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  304. switch (hfeatures->priv_version) {
  305. case SBI_HART_PRIV_VER_1_10:
  306. temp = "v1.10";
  307. break;
  308. case SBI_HART_PRIV_VER_1_11:
  309. temp = "v1.11";
  310. break;
  311. case SBI_HART_PRIV_VER_1_12:
  312. temp = "v1.12";
  313. break;
  314. default:
  315. temp = "unknown";
  316. break;
  317. }
  318. sbi_snprintf(version_str, nvstr, "%s", temp);
  319. }
  320. static inline void __sbi_hart_update_extension(
  321. struct sbi_hart_features *hfeatures,
  322. enum sbi_hart_extensions ext,
  323. bool enable)
  324. {
  325. if (enable)
  326. hfeatures->extensions |= BIT(ext);
  327. else
  328. hfeatures->extensions &= ~BIT(ext);
  329. }
  330. /**
  331. * Enable/Disable a particular hart extension
  332. *
  333. * @param scratch pointer to the HART scratch space
  334. * @param ext the extension number to check
  335. * @param enable new state of hart extension
  336. */
  337. void sbi_hart_update_extension(struct sbi_scratch *scratch,
  338. enum sbi_hart_extensions ext,
  339. bool enable)
  340. {
  341. struct sbi_hart_features *hfeatures =
  342. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  343. __sbi_hart_update_extension(hfeatures, ext, enable);
  344. }
  345. /**
  346. * Check whether a particular hart extension is available
  347. *
  348. * @param scratch pointer to the HART scratch space
  349. * @param ext the extension number to check
  350. * @returns true (available) or false (not available)
  351. */
  352. bool sbi_hart_has_extension(struct sbi_scratch *scratch,
  353. enum sbi_hart_extensions ext)
  354. {
  355. struct sbi_hart_features *hfeatures =
  356. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  357. if (hfeatures->extensions & BIT(ext))
  358. return true;
  359. else
  360. return false;
  361. }
  362. static inline char *sbi_hart_extension_id2string(int ext)
  363. {
  364. char *estr = NULL;
  365. switch (ext) {
  366. case SBI_HART_EXT_SSCOFPMF:
  367. estr = "sscofpmf";
  368. break;
  369. case SBI_HART_EXT_TIME:
  370. estr = "time";
  371. break;
  372. case SBI_HART_EXT_SMAIA:
  373. estr = "smaia";
  374. break;
  375. case SBI_HART_EXT_SSTC:
  376. estr = "sstc";
  377. break;
  378. case SBI_HART_EXT_SMSTATEEN:
  379. estr = "smstateen";
  380. break;
  381. default:
  382. break;
  383. }
  384. return estr;
  385. }
  386. /**
  387. * Get the hart extensions in string format
  388. *
  389. * @param scratch pointer to the HART scratch space
  390. * @param extensions_str pointer to a char array where the extensions string
  391. * will be updated
  392. * @param nestr length of the features_str. The feature string will be
  393. * truncated if nestr is not long enough.
  394. */
  395. void sbi_hart_get_extensions_str(struct sbi_scratch *scratch,
  396. char *extensions_str, int nestr)
  397. {
  398. struct sbi_hart_features *hfeatures =
  399. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  400. int offset = 0, ext = 0;
  401. char *temp;
  402. if (!extensions_str || nestr <= 0)
  403. return;
  404. sbi_memset(extensions_str, 0, nestr);
  405. if (!hfeatures->extensions)
  406. goto done;
  407. do {
  408. if (hfeatures->extensions & BIT(ext)) {
  409. temp = sbi_hart_extension_id2string(ext);
  410. if (temp) {
  411. sbi_snprintf(extensions_str + offset,
  412. nestr - offset,
  413. "%s,", temp);
  414. offset = offset + sbi_strlen(temp) + 1;
  415. }
  416. }
  417. ext++;
  418. } while (ext < SBI_HART_EXT_MAX);
  419. done:
  420. if (offset)
  421. extensions_str[offset - 1] = '\0';
  422. else
  423. sbi_strncpy(extensions_str, "none", nestr);
  424. }
  425. static unsigned long hart_pmp_get_allowed_addr(void)
  426. {
  427. unsigned long val = 0;
  428. struct sbi_trap_info trap = {0};
  429. csr_write_allowed(CSR_PMPCFG0, (ulong)&trap, 0);
  430. if (trap.cause)
  431. return 0;
  432. csr_write_allowed(CSR_PMPADDR0, (ulong)&trap, PMP_ADDR_MASK);
  433. if (!trap.cause) {
  434. val = csr_read_allowed(CSR_PMPADDR0, (ulong)&trap);
  435. if (trap.cause)
  436. val = 0;
  437. }
  438. return val;
  439. }
  440. static int hart_pmu_get_allowed_bits(void)
  441. {
  442. unsigned long val = ~(0UL);
  443. struct sbi_trap_info trap = {0};
  444. int num_bits = 0;
  445. /**
  446. * It is assumed that platforms will implement same number of bits for
  447. * all the performance counters including mcycle/minstret.
  448. */
  449. csr_write_allowed(CSR_MHPMCOUNTER3, (ulong)&trap, val);
  450. if (!trap.cause) {
  451. val = csr_read_allowed(CSR_MHPMCOUNTER3, (ulong)&trap);
  452. if (trap.cause)
  453. return 0;
  454. }
  455. num_bits = sbi_fls(val) + 1;
  456. #if __riscv_xlen == 32
  457. csr_write_allowed(CSR_MHPMCOUNTER3H, (ulong)&trap, val);
  458. if (!trap.cause) {
  459. val = csr_read_allowed(CSR_MHPMCOUNTER3H, (ulong)&trap);
  460. if (trap.cause)
  461. return num_bits;
  462. }
  463. num_bits += sbi_fls(val) + 1;
  464. #endif
  465. return num_bits;
  466. }
  467. static int hart_detect_features(struct sbi_scratch *scratch)
  468. {
  469. struct sbi_trap_info trap = {0};
  470. struct sbi_hart_features *hfeatures =
  471. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  472. unsigned long val, oldval;
  473. int rc;
  474. /* If hart features already detected then do nothing */
  475. if (hfeatures->detected)
  476. return 0;
  477. /* Clear hart features */
  478. hfeatures->extensions = 0;
  479. hfeatures->pmp_count = 0;
  480. hfeatures->mhpm_count = 0;
  481. #define __check_csr(__csr, __rdonly, __wrval, __field, __skip) \
  482. oldval = csr_read_allowed(__csr, (ulong)&trap); \
  483. if (!trap.cause) { \
  484. if (__rdonly) { \
  485. (hfeatures->__field)++; \
  486. } else { \
  487. csr_write_allowed(__csr, (ulong)&trap, __wrval);\
  488. if (!trap.cause) { \
  489. if (csr_swap(__csr, oldval) == __wrval) \
  490. (hfeatures->__field)++; \
  491. else \
  492. goto __skip; \
  493. } else { \
  494. goto __skip; \
  495. } \
  496. } \
  497. } else { \
  498. goto __skip; \
  499. }
  500. #define __check_csr_2(__csr, __rdonly, __wrval, __field, __skip) \
  501. __check_csr(__csr + 0, __rdonly, __wrval, __field, __skip) \
  502. __check_csr(__csr + 1, __rdonly, __wrval, __field, __skip)
  503. #define __check_csr_4(__csr, __rdonly, __wrval, __field, __skip) \
  504. __check_csr_2(__csr + 0, __rdonly, __wrval, __field, __skip) \
  505. __check_csr_2(__csr + 2, __rdonly, __wrval, __field, __skip)
  506. #define __check_csr_8(__csr, __rdonly, __wrval, __field, __skip) \
  507. __check_csr_4(__csr + 0, __rdonly, __wrval, __field, __skip) \
  508. __check_csr_4(__csr + 4, __rdonly, __wrval, __field, __skip)
  509. #define __check_csr_16(__csr, __rdonly, __wrval, __field, __skip) \
  510. __check_csr_8(__csr + 0, __rdonly, __wrval, __field, __skip) \
  511. __check_csr_8(__csr + 8, __rdonly, __wrval, __field, __skip)
  512. #define __check_csr_32(__csr, __rdonly, __wrval, __field, __skip) \
  513. __check_csr_16(__csr + 0, __rdonly, __wrval, __field, __skip) \
  514. __check_csr_16(__csr + 16, __rdonly, __wrval, __field, __skip)
  515. #define __check_csr_64(__csr, __rdonly, __wrval, __field, __skip) \
  516. __check_csr_32(__csr + 0, __rdonly, __wrval, __field, __skip) \
  517. __check_csr_32(__csr + 32, __rdonly, __wrval, __field, __skip)
  518. /**
  519. * Detect the allowed address bits & granularity. At least PMPADDR0
  520. * should be implemented.
  521. */
  522. val = hart_pmp_get_allowed_addr();
  523. if (val) {
  524. hfeatures->pmp_gran = 1 << (sbi_ffs(val) + 2);
  525. hfeatures->pmp_addr_bits = sbi_fls(val) + 1;
  526. /* Detect number of PMP regions. At least PMPADDR0 should be implemented*/
  527. __check_csr_64(CSR_PMPADDR0, 0, val, pmp_count, __pmp_skip);
  528. }
  529. __pmp_skip:
  530. /* Detect number of MHPM counters */
  531. __check_csr(CSR_MHPMCOUNTER3, 0, 1UL, mhpm_count, __mhpm_skip);
  532. hfeatures->mhpm_bits = hart_pmu_get_allowed_bits();
  533. __check_csr_4(CSR_MHPMCOUNTER4, 0, 1UL, mhpm_count, __mhpm_skip);
  534. __check_csr_8(CSR_MHPMCOUNTER8, 0, 1UL, mhpm_count, __mhpm_skip);
  535. __check_csr_16(CSR_MHPMCOUNTER16, 0, 1UL, mhpm_count, __mhpm_skip);
  536. /**
  537. * No need to check for MHPMCOUNTERH for RV32 as they are expected to be
  538. * implemented if MHPMCOUNTER is implemented.
  539. */
  540. __mhpm_skip:
  541. #undef __check_csr_64
  542. #undef __check_csr_32
  543. #undef __check_csr_16
  544. #undef __check_csr_8
  545. #undef __check_csr_4
  546. #undef __check_csr_2
  547. #undef __check_csr
  548. /* Detect if hart supports Priv v1.10 */
  549. val = csr_read_allowed(CSR_MCOUNTEREN, (unsigned long)&trap);
  550. if (!trap.cause)
  551. hfeatures->priv_version = SBI_HART_PRIV_VER_1_10;
  552. /* Detect if hart supports Priv v1.11 */
  553. val = csr_read_allowed(CSR_MCOUNTINHIBIT, (unsigned long)&trap);
  554. if (!trap.cause &&
  555. (hfeatures->priv_version >= SBI_HART_PRIV_VER_1_10))
  556. hfeatures->priv_version = SBI_HART_PRIV_VER_1_11;
  557. /* Detect if hart supports Priv v1.12 */
  558. csr_read_allowed(CSR_MENVCFG, (unsigned long)&trap);
  559. if (!trap.cause &&
  560. (hfeatures->priv_version >= SBI_HART_PRIV_VER_1_11))
  561. hfeatures->priv_version = SBI_HART_PRIV_VER_1_12;
  562. /* Counter overflow/filtering is not useful without mcounter/inhibit */
  563. if (hfeatures->priv_version >= SBI_HART_PRIV_VER_1_12) {
  564. /* Detect if hart supports sscofpmf */
  565. csr_read_allowed(CSR_SCOUNTOVF, (unsigned long)&trap);
  566. if (!trap.cause)
  567. __sbi_hart_update_extension(hfeatures,
  568. SBI_HART_EXT_SSCOFPMF, true);
  569. }
  570. /* Detect if hart supports time CSR */
  571. csr_read_allowed(CSR_TIME, (unsigned long)&trap);
  572. if (!trap.cause)
  573. __sbi_hart_update_extension(hfeatures,
  574. SBI_HART_EXT_TIME, true);
  575. /* Detect if hart has AIA local interrupt CSRs */
  576. csr_read_allowed(CSR_MTOPI, (unsigned long)&trap);
  577. if (!trap.cause)
  578. __sbi_hart_update_extension(hfeatures,
  579. SBI_HART_EXT_SMAIA, true);
  580. /* Detect if hart supports stimecmp CSR(Sstc extension) */
  581. if (hfeatures->priv_version >= SBI_HART_PRIV_VER_1_12) {
  582. csr_read_allowed(CSR_STIMECMP, (unsigned long)&trap);
  583. if (!trap.cause)
  584. __sbi_hart_update_extension(hfeatures,
  585. SBI_HART_EXT_SSTC, true);
  586. }
  587. /* Detect if hart supports mstateen CSRs */
  588. if (hfeatures->priv_version >= SBI_HART_PRIV_VER_1_12) {
  589. val = csr_read_allowed(CSR_MSTATEEN0, (unsigned long)&trap);
  590. if (!trap.cause)
  591. __sbi_hart_update_extension(hfeatures,
  592. SBI_HART_EXT_SMSTATEEN, true);
  593. }
  594. /* Let platform populate extensions */
  595. rc = sbi_platform_extensions_init(sbi_platform_thishart_ptr(),
  596. hfeatures);
  597. if (rc)
  598. return rc;
  599. /* Mark hart feature detection done */
  600. hfeatures->detected = true;
  601. return 0;
  602. }
  603. int sbi_hart_reinit(struct sbi_scratch *scratch)
  604. {
  605. int rc;
  606. mstatus_init(scratch);
  607. rc = fp_init(scratch);
  608. if (rc)
  609. return rc;
  610. rc = delegate_traps(scratch);
  611. if (rc)
  612. return rc;
  613. return 0;
  614. }
  615. int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot)
  616. {
  617. int rc;
  618. if (cold_boot) {
  619. if (misa_extension('H'))
  620. sbi_hart_expected_trap = &__sbi_expected_trap_hext;
  621. hart_features_offset = sbi_scratch_alloc_offset(
  622. sizeof(struct sbi_hart_features));
  623. if (!hart_features_offset)
  624. return SBI_ENOMEM;
  625. }
  626. rc = hart_detect_features(scratch);
  627. if (rc)
  628. return rc;
  629. return sbi_hart_reinit(scratch);
  630. }
  631. void __attribute__((noreturn)) sbi_hart_hang(void)
  632. {
  633. while (1)
  634. wfi();
  635. __builtin_unreachable();
  636. }
  637. void __attribute__((noreturn))
  638. sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1,
  639. unsigned long next_addr, unsigned long next_mode,
  640. bool next_virt)
  641. {
  642. #if __riscv_xlen == 32
  643. unsigned long val, valH;
  644. #else
  645. unsigned long val;
  646. #endif
  647. switch (next_mode) {
  648. case PRV_M:
  649. break;
  650. case PRV_S:
  651. if (!misa_extension('S'))
  652. sbi_hart_hang();
  653. break;
  654. case PRV_U:
  655. if (!misa_extension('U'))
  656. sbi_hart_hang();
  657. break;
  658. default:
  659. sbi_hart_hang();
  660. }
  661. val = csr_read(CSR_MSTATUS);
  662. val = INSERT_FIELD(val, MSTATUS_MPP, next_mode);
  663. val = INSERT_FIELD(val, MSTATUS_MPIE, 0);
  664. #if __riscv_xlen == 32
  665. if (misa_extension('H')) {
  666. valH = csr_read(CSR_MSTATUSH);
  667. valH = INSERT_FIELD(valH, MSTATUSH_MPV, next_virt);
  668. csr_write(CSR_MSTATUSH, valH);
  669. }
  670. #else
  671. if (misa_extension('H'))
  672. val = INSERT_FIELD(val, MSTATUS_MPV, next_virt);
  673. #endif
  674. csr_write(CSR_MSTATUS, val);
  675. csr_write(CSR_MEPC, next_addr);
  676. if (next_mode == PRV_S) {
  677. csr_write(CSR_STVEC, next_addr);
  678. csr_write(CSR_SSCRATCH, 0);
  679. csr_write(CSR_SIE, 0);
  680. csr_write(CSR_SATP, 0);
  681. } else if (next_mode == PRV_U) {
  682. if (misa_extension('N')) {
  683. csr_write(CSR_UTVEC, next_addr);
  684. csr_write(CSR_USCRATCH, 0);
  685. csr_write(CSR_UIE, 0);
  686. }
  687. }
  688. register unsigned long a0 asm("a0") = arg0;
  689. register unsigned long a1 asm("a1") = arg1;
  690. __asm__ __volatile__("mret" : : "r"(a0), "r"(a1));
  691. __builtin_unreachable();
  692. }