sbi_hart.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/riscv_barrier.h>
  11. #include <sbi/riscv_encoding.h>
  12. #include <sbi/riscv_fp.h>
  13. #include <sbi/sbi_bitops.h>
  14. #include <sbi/sbi_console.h>
  15. #include <sbi/sbi_domain.h>
  16. #include <sbi/sbi_csr_detect.h>
  17. #include <sbi/sbi_error.h>
  18. #include <sbi/sbi_hart.h>
  19. #include <sbi/sbi_math.h>
  20. #include <sbi/sbi_platform.h>
  21. #include <sbi/sbi_pmu.h>
  22. #include <sbi/sbi_string.h>
  23. #include <sbi/sbi_trap.h>
  24. #include <sbi/sbi_hfence.h>
  25. extern void __sbi_expected_trap(void);
  26. extern void __sbi_expected_trap_hext(void);
  27. void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap;
  28. static unsigned long hart_features_offset;
  29. static void mstatus_init(struct sbi_scratch *scratch)
  30. {
  31. unsigned long menvcfg_val, mstatus_val = 0;
  32. int cidx;
  33. unsigned int num_mhpm = sbi_hart_mhpm_count(scratch);
  34. uint64_t mhpmevent_init_val = 0;
  35. uint64_t mstateen_val;
  36. /* Enable FPU */
  37. if (misa_extension('D') || misa_extension('F'))
  38. mstatus_val |= MSTATUS_FS;
  39. /* Enable Vector context */
  40. if (misa_extension('V'))
  41. mstatus_val |= MSTATUS_VS;
  42. csr_write(CSR_MSTATUS, mstatus_val);
  43. /* Disable user mode usage of all perf counters except default ones (CY, TM, IR) */
  44. if (misa_extension('S') &&
  45. sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_10)
  46. csr_write(CSR_SCOUNTEREN, 7);
  47. /**
  48. * OpenSBI doesn't use any PMU counters in M-mode.
  49. * Supervisor mode usage for all counters are enabled by default
  50. * But counters will not run until mcountinhibit is set.
  51. */
  52. if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_10)
  53. csr_write(CSR_MCOUNTEREN, -1);
  54. /* All programmable counters will start running at runtime after S-mode request */
  55. if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_11)
  56. csr_write(CSR_MCOUNTINHIBIT, 0xFFFFFFF8);
  57. /**
  58. * The mhpmeventn[h] CSR should be initialized with interrupt disabled
  59. * and inhibited running in M-mode during init.
  60. * To keep it simple, only contiguous mhpmcounters are supported as a
  61. * platform with discontiguous mhpmcounters may not make much sense.
  62. */
  63. mhpmevent_init_val |= (MHPMEVENT_OF | MHPMEVENT_MINH);
  64. for (cidx = 0; cidx < num_mhpm; cidx++) {
  65. #if __riscv_xlen == 32
  66. csr_write_num(CSR_MHPMEVENT3 + cidx, mhpmevent_init_val & 0xFFFFFFFF);
  67. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSCOFPMF))
  68. csr_write_num(CSR_MHPMEVENT3H + cidx,
  69. mhpmevent_init_val >> BITS_PER_LONG);
  70. #else
  71. csr_write_num(CSR_MHPMEVENT3 + cidx, mhpmevent_init_val);
  72. #endif
  73. }
  74. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMSTATEEN)) {
  75. mstateen_val = csr_read(CSR_MSTATEEN0);
  76. #if __riscv_xlen == 32
  77. mstateen_val |= ((uint64_t)csr_read(CSR_MSTATEEN0H)) << 32;
  78. #endif
  79. mstateen_val |= SMSTATEEN_STATEN;
  80. mstateen_val |= SMSTATEEN0_CONTEXT;
  81. mstateen_val |= SMSTATEEN0_HSENVCFG;
  82. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMAIA))
  83. mstateen_val |= (SMSTATEEN0_AIA | SMSTATEEN0_SVSLCT |
  84. SMSTATEEN0_IMSIC);
  85. else
  86. mstateen_val &= ~(SMSTATEEN0_AIA | SMSTATEEN0_SVSLCT |
  87. SMSTATEEN0_IMSIC);
  88. csr_write(CSR_MSTATEEN0, mstateen_val);
  89. #if __riscv_xlen == 32
  90. csr_write(CSR_MSTATEEN0H, mstateen_val >> 32);
  91. #endif
  92. }
  93. if (sbi_hart_priv_version(scratch) >= SBI_HART_PRIV_VER_1_12) {
  94. menvcfg_val = csr_read(CSR_MENVCFG);
  95. /*
  96. * Set menvcfg.CBZE == 1
  97. *
  98. * If Zicboz extension is not available then writes to
  99. * menvcfg.CBZE will be ignored because it is a WARL field.
  100. */
  101. menvcfg_val |= ENVCFG_CBZE;
  102. /*
  103. * Set menvcfg.CBCFE == 1
  104. *
  105. * If Zicbom extension is not available then writes to
  106. * menvcfg.CBCFE will be ignored because it is a WARL field.
  107. */
  108. menvcfg_val |= ENVCFG_CBCFE;
  109. /*
  110. * Set menvcfg.CBIE == 3
  111. *
  112. * If Zicbom extension is not available then writes to
  113. * menvcfg.CBIE will be ignored because it is a WARL field.
  114. */
  115. menvcfg_val |= ENVCFG_CBIE_INV << ENVCFG_CBIE_SHIFT;
  116. /*
  117. * Set menvcfg.PBMTE == 1 for RV64 or RV128
  118. *
  119. * If Svpbmt extension is not available then menvcfg.PBMTE
  120. * will be read-only zero.
  121. */
  122. #if __riscv_xlen > 32
  123. menvcfg_val |= ENVCFG_PBMTE;
  124. #endif
  125. /*
  126. * The spec doesn't explicitly describe the reset value of menvcfg.
  127. * Enable access to stimecmp if sstc extension is present in the
  128. * hardware.
  129. */
  130. if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SSTC)) {
  131. #if __riscv_xlen == 32
  132. unsigned long menvcfgh_val;
  133. menvcfgh_val = csr_read(CSR_MENVCFGH);
  134. menvcfgh_val |= ENVCFGH_STCE;
  135. csr_write(CSR_MENVCFGH, menvcfgh_val);
  136. #else
  137. menvcfg_val |= ENVCFG_STCE;
  138. #endif
  139. }
  140. csr_write(CSR_MENVCFG, menvcfg_val);
  141. }
  142. /* Disable all interrupts */
  143. csr_write(CSR_MIE, 0);
  144. /* Disable S-mode paging */
  145. if (misa_extension('S'))
  146. csr_write(CSR_SATP, 0);
  147. }
  148. static int fp_init(struct sbi_scratch *scratch)
  149. {
  150. #ifdef __riscv_flen
  151. int i;
  152. #endif
  153. if (!misa_extension('D') && !misa_extension('F'))
  154. return 0;
  155. if (!(csr_read(CSR_MSTATUS) & MSTATUS_FS))
  156. return SBI_EINVAL;
  157. #ifdef __riscv_flen
  158. for (i = 0; i < 32; i++)
  159. init_fp_reg(i);
  160. csr_write(CSR_FCSR, 0);
  161. #endif
  162. return 0;
  163. }
  164. static int delegate_traps(struct sbi_scratch *scratch)
  165. {
  166. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  167. unsigned long interrupts, exceptions;
  168. if (!misa_extension('S'))
  169. /* No delegation possible as mideleg does not exist */
  170. return 0;
  171. /* Send M-mode interrupts and most exceptions to S-mode */
  172. interrupts = MIP_SSIP | MIP_STIP | MIP_SEIP;
  173. interrupts |= sbi_pmu_irq_bit();
  174. exceptions = (1U << CAUSE_MISALIGNED_FETCH) | (1U << CAUSE_BREAKPOINT) |
  175. (1U << CAUSE_USER_ECALL);
  176. if (sbi_platform_has_mfaults_delegation(plat))
  177. exceptions |= (1U << CAUSE_FETCH_PAGE_FAULT) |
  178. (1U << CAUSE_LOAD_PAGE_FAULT) |
  179. (1U << CAUSE_STORE_PAGE_FAULT);
  180. /*
  181. * If hypervisor extension available then we only handle hypervisor
  182. * calls (i.e. ecalls from HS-mode) in M-mode.
  183. *
  184. * The HS-mode will additionally handle supervisor calls (i.e. ecalls
  185. * from VS-mode), Guest page faults and Virtual interrupts.
  186. */
  187. if (misa_extension('H')) {
  188. exceptions |= (1U << CAUSE_VIRTUAL_SUPERVISOR_ECALL);
  189. exceptions |= (1U << CAUSE_FETCH_GUEST_PAGE_FAULT);
  190. exceptions |= (1U << CAUSE_LOAD_GUEST_PAGE_FAULT);
  191. exceptions |= (1U << CAUSE_VIRTUAL_INST_FAULT);
  192. exceptions |= (1U << CAUSE_STORE_GUEST_PAGE_FAULT);
  193. }
  194. csr_write(CSR_MIDELEG, interrupts);
  195. csr_write(CSR_MEDELEG, exceptions);
  196. return 0;
  197. }
  198. void sbi_hart_delegation_dump(struct sbi_scratch *scratch,
  199. const char *prefix, const char *suffix)
  200. {
  201. if (!misa_extension('S'))
  202. /* No delegation possible as mideleg does not exist*/
  203. return;
  204. sbi_printf("%sMIDELEG%s: 0x%" PRILX "\n",
  205. prefix, suffix, csr_read(CSR_MIDELEG));
  206. sbi_printf("%sMEDELEG%s: 0x%" PRILX "\n",
  207. prefix, suffix, csr_read(CSR_MEDELEG));
  208. }
  209. unsigned int sbi_hart_mhpm_count(struct sbi_scratch *scratch)
  210. {
  211. struct sbi_hart_features *hfeatures =
  212. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  213. return hfeatures->mhpm_count;
  214. }
  215. unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch)
  216. {
  217. struct sbi_hart_features *hfeatures =
  218. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  219. return hfeatures->pmp_count;
  220. }
  221. unsigned long sbi_hart_pmp_granularity(struct sbi_scratch *scratch)
  222. {
  223. struct sbi_hart_features *hfeatures =
  224. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  225. return hfeatures->pmp_gran;
  226. }
  227. unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch)
  228. {
  229. struct sbi_hart_features *hfeatures =
  230. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  231. return hfeatures->pmp_addr_bits;
  232. }
  233. unsigned int sbi_hart_mhpm_bits(struct sbi_scratch *scratch)
  234. {
  235. struct sbi_hart_features *hfeatures =
  236. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  237. return hfeatures->mhpm_bits;
  238. }
  239. int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
  240. {
  241. struct sbi_domain_memregion *reg;
  242. struct sbi_domain *dom = sbi_domain_thishart_ptr();
  243. unsigned int pmp_idx = 0, pmp_flags, pmp_bits, pmp_gran_log2;
  244. unsigned int pmp_count = sbi_hart_pmp_count(scratch);
  245. unsigned long pmp_addr = 0, pmp_addr_max = 0;
  246. if (!pmp_count)
  247. return 0;
  248. pmp_gran_log2 = log2roundup(sbi_hart_pmp_granularity(scratch));
  249. pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
  250. pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
  251. sbi_domain_for_each_memregion(dom, reg) {
  252. if (pmp_count <= pmp_idx)
  253. break;
  254. pmp_flags = 0;
  255. /*
  256. * If permissions are to be enforced for all modes on this
  257. * region, the lock bit should be set.
  258. */
  259. if (reg->flags & SBI_DOMAIN_MEMREGION_ENF_PERMISSIONS)
  260. pmp_flags |= PMP_L;
  261. if (reg->flags & SBI_DOMAIN_MEMREGION_SU_READABLE)
  262. pmp_flags |= PMP_R;
  263. if (reg->flags & SBI_DOMAIN_MEMREGION_SU_WRITABLE)
  264. pmp_flags |= PMP_W;
  265. if (reg->flags & SBI_DOMAIN_MEMREGION_SU_EXECUTABLE)
  266. pmp_flags |= PMP_X;
  267. pmp_addr = reg->base >> PMP_SHIFT;
  268. if (pmp_gran_log2 <= reg->order && pmp_addr < pmp_addr_max)
  269. pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order);
  270. else {
  271. sbi_printf("Can not configure pmp for domain %s", dom->name);
  272. sbi_printf(" because memory region address %lx or size %lx is not in range\n",
  273. reg->base, reg->order);
  274. }
  275. }
  276. /*
  277. * As per section 3.7.2 of privileged specification v1.12,
  278. * virtual address translations can be speculatively performed
  279. * (even before actual access). These, along with PMP traslations,
  280. * can be cached. This can pose a problem with CPU hotplug
  281. * and non-retentive suspend scenario because PMP states are
  282. * not preserved.
  283. * It is advisable to flush the caching structures under such
  284. * conditions.
  285. */
  286. if (misa_extension('S')) {
  287. __asm__ __volatile__("sfence.vma");
  288. /*
  289. * If hypervisor mode is supported, flush caching
  290. * structures in guest mode too.
  291. */
  292. if (misa_extension('H'))
  293. __sbi_hfence_gvma_all();
  294. }
  295. return 0;
  296. }
  297. int sbi_hart_priv_version(struct sbi_scratch *scratch)
  298. {
  299. struct sbi_hart_features *hfeatures =
  300. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  301. return hfeatures->priv_version;
  302. }
  303. void sbi_hart_get_priv_version_str(struct sbi_scratch *scratch,
  304. char *version_str, int nvstr)
  305. {
  306. char *temp;
  307. struct sbi_hart_features *hfeatures =
  308. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  309. switch (hfeatures->priv_version) {
  310. case SBI_HART_PRIV_VER_1_10:
  311. temp = "v1.10";
  312. break;
  313. case SBI_HART_PRIV_VER_1_11:
  314. temp = "v1.11";
  315. break;
  316. case SBI_HART_PRIV_VER_1_12:
  317. temp = "v1.12";
  318. break;
  319. default:
  320. temp = "unknown";
  321. break;
  322. }
  323. sbi_snprintf(version_str, nvstr, "%s", temp);
  324. }
  325. static inline void __sbi_hart_update_extension(
  326. struct sbi_hart_features *hfeatures,
  327. enum sbi_hart_extensions ext,
  328. bool enable)
  329. {
  330. if (enable)
  331. hfeatures->extensions |= BIT(ext);
  332. else
  333. hfeatures->extensions &= ~BIT(ext);
  334. }
  335. /**
  336. * Enable/Disable a particular hart extension
  337. *
  338. * @param scratch pointer to the HART scratch space
  339. * @param ext the extension number to check
  340. * @param enable new state of hart extension
  341. */
  342. void sbi_hart_update_extension(struct sbi_scratch *scratch,
  343. enum sbi_hart_extensions ext,
  344. bool enable)
  345. {
  346. struct sbi_hart_features *hfeatures =
  347. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  348. __sbi_hart_update_extension(hfeatures, ext, enable);
  349. }
  350. /**
  351. * Check whether a particular hart extension is available
  352. *
  353. * @param scratch pointer to the HART scratch space
  354. * @param ext the extension number to check
  355. * @returns true (available) or false (not available)
  356. */
  357. bool sbi_hart_has_extension(struct sbi_scratch *scratch,
  358. enum sbi_hart_extensions ext)
  359. {
  360. struct sbi_hart_features *hfeatures =
  361. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  362. if (hfeatures->extensions & BIT(ext))
  363. return true;
  364. else
  365. return false;
  366. }
  367. static inline char *sbi_hart_extension_id2string(int ext)
  368. {
  369. char *estr = NULL;
  370. switch (ext) {
  371. case SBI_HART_EXT_SSCOFPMF:
  372. estr = "sscofpmf";
  373. break;
  374. case SBI_HART_EXT_TIME:
  375. estr = "time";
  376. break;
  377. case SBI_HART_EXT_SMAIA:
  378. estr = "smaia";
  379. break;
  380. case SBI_HART_EXT_SSTC:
  381. estr = "sstc";
  382. break;
  383. case SBI_HART_EXT_SMSTATEEN:
  384. estr = "smstateen";
  385. break;
  386. default:
  387. break;
  388. }
  389. return estr;
  390. }
  391. /**
  392. * Get the hart extensions in string format
  393. *
  394. * @param scratch pointer to the HART scratch space
  395. * @param extensions_str pointer to a char array where the extensions string
  396. * will be updated
  397. * @param nestr length of the features_str. The feature string will be
  398. * truncated if nestr is not long enough.
  399. */
  400. void sbi_hart_get_extensions_str(struct sbi_scratch *scratch,
  401. char *extensions_str, int nestr)
  402. {
  403. struct sbi_hart_features *hfeatures =
  404. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  405. int offset = 0, ext = 0;
  406. char *temp;
  407. if (!extensions_str || nestr <= 0)
  408. return;
  409. sbi_memset(extensions_str, 0, nestr);
  410. if (!hfeatures->extensions)
  411. goto done;
  412. do {
  413. if (hfeatures->extensions & BIT(ext)) {
  414. temp = sbi_hart_extension_id2string(ext);
  415. if (temp) {
  416. sbi_snprintf(extensions_str + offset,
  417. nestr - offset,
  418. "%s,", temp);
  419. offset = offset + sbi_strlen(temp) + 1;
  420. }
  421. }
  422. ext++;
  423. } while (ext < SBI_HART_EXT_MAX);
  424. done:
  425. if (offset)
  426. extensions_str[offset - 1] = '\0';
  427. else
  428. sbi_strncpy(extensions_str, "none", nestr);
  429. }
  430. static unsigned long hart_pmp_get_allowed_addr(void)
  431. {
  432. unsigned long val = 0;
  433. struct sbi_trap_info trap = {0};
  434. csr_write_allowed(CSR_PMPCFG0, (ulong)&trap, 0);
  435. if (trap.cause)
  436. return 0;
  437. csr_write_allowed(CSR_PMPADDR0, (ulong)&trap, PMP_ADDR_MASK);
  438. if (!trap.cause) {
  439. val = csr_read_allowed(CSR_PMPADDR0, (ulong)&trap);
  440. if (trap.cause)
  441. val = 0;
  442. }
  443. return val;
  444. }
  445. static int hart_pmu_get_allowed_bits(void)
  446. {
  447. unsigned long val = ~(0UL);
  448. struct sbi_trap_info trap = {0};
  449. int num_bits = 0;
  450. /**
  451. * It is assumed that platforms will implement same number of bits for
  452. * all the performance counters including mcycle/minstret.
  453. */
  454. csr_write_allowed(CSR_MHPMCOUNTER3, (ulong)&trap, val);
  455. if (!trap.cause) {
  456. val = csr_read_allowed(CSR_MHPMCOUNTER3, (ulong)&trap);
  457. if (trap.cause)
  458. return 0;
  459. }
  460. num_bits = sbi_fls(val) + 1;
  461. #if __riscv_xlen == 32
  462. csr_write_allowed(CSR_MHPMCOUNTER3H, (ulong)&trap, val);
  463. if (!trap.cause) {
  464. val = csr_read_allowed(CSR_MHPMCOUNTER3H, (ulong)&trap);
  465. if (trap.cause)
  466. return num_bits;
  467. }
  468. num_bits += sbi_fls(val) + 1;
  469. #endif
  470. return num_bits;
  471. }
  472. static int hart_detect_features(struct sbi_scratch *scratch)
  473. {
  474. struct sbi_trap_info trap = {0};
  475. struct sbi_hart_features *hfeatures =
  476. sbi_scratch_offset_ptr(scratch, hart_features_offset);
  477. unsigned long val, oldval;
  478. int rc;
  479. /* If hart features already detected then do nothing */
  480. if (hfeatures->detected)
  481. return 0;
  482. /* Clear hart features */
  483. hfeatures->extensions = 0;
  484. hfeatures->pmp_count = 0;
  485. hfeatures->mhpm_count = 0;
  486. #define __check_csr(__csr, __rdonly, __wrval, __field, __skip) \
  487. oldval = csr_read_allowed(__csr, (ulong)&trap); \
  488. if (!trap.cause) { \
  489. if (__rdonly) { \
  490. (hfeatures->__field)++; \
  491. } else { \
  492. csr_write_allowed(__csr, (ulong)&trap, __wrval);\
  493. if (!trap.cause) { \
  494. if (csr_swap(__csr, oldval) == __wrval) \
  495. (hfeatures->__field)++; \
  496. else \
  497. goto __skip; \
  498. } else { \
  499. goto __skip; \
  500. } \
  501. } \
  502. } else { \
  503. goto __skip; \
  504. }
  505. #define __check_csr_2(__csr, __rdonly, __wrval, __field, __skip) \
  506. __check_csr(__csr + 0, __rdonly, __wrval, __field, __skip) \
  507. __check_csr(__csr + 1, __rdonly, __wrval, __field, __skip)
  508. #define __check_csr_4(__csr, __rdonly, __wrval, __field, __skip) \
  509. __check_csr_2(__csr + 0, __rdonly, __wrval, __field, __skip) \
  510. __check_csr_2(__csr + 2, __rdonly, __wrval, __field, __skip)
  511. #define __check_csr_8(__csr, __rdonly, __wrval, __field, __skip) \
  512. __check_csr_4(__csr + 0, __rdonly, __wrval, __field, __skip) \
  513. __check_csr_4(__csr + 4, __rdonly, __wrval, __field, __skip)
  514. #define __check_csr_16(__csr, __rdonly, __wrval, __field, __skip) \
  515. __check_csr_8(__csr + 0, __rdonly, __wrval, __field, __skip) \
  516. __check_csr_8(__csr + 8, __rdonly, __wrval, __field, __skip)
  517. #define __check_csr_32(__csr, __rdonly, __wrval, __field, __skip) \
  518. __check_csr_16(__csr + 0, __rdonly, __wrval, __field, __skip) \
  519. __check_csr_16(__csr + 16, __rdonly, __wrval, __field, __skip)
  520. #define __check_csr_64(__csr, __rdonly, __wrval, __field, __skip) \
  521. __check_csr_32(__csr + 0, __rdonly, __wrval, __field, __skip) \
  522. __check_csr_32(__csr + 32, __rdonly, __wrval, __field, __skip)
  523. /**
  524. * Detect the allowed address bits & granularity. At least PMPADDR0
  525. * should be implemented.
  526. */
  527. val = hart_pmp_get_allowed_addr();
  528. if (val) {
  529. hfeatures->pmp_gran = 1 << (sbi_ffs(val) + 2);
  530. hfeatures->pmp_addr_bits = sbi_fls(val) + 1;
  531. /* Detect number of PMP regions. At least PMPADDR0 should be implemented*/
  532. __check_csr_64(CSR_PMPADDR0, 0, val, pmp_count, __pmp_skip);
  533. }
  534. __pmp_skip:
  535. /* Detect number of MHPM counters */
  536. __check_csr(CSR_MHPMCOUNTER3, 0, 1UL, mhpm_count, __mhpm_skip);
  537. hfeatures->mhpm_bits = hart_pmu_get_allowed_bits();
  538. __check_csr_4(CSR_MHPMCOUNTER4, 0, 1UL, mhpm_count, __mhpm_skip);
  539. __check_csr_8(CSR_MHPMCOUNTER8, 0, 1UL, mhpm_count, __mhpm_skip);
  540. __check_csr_16(CSR_MHPMCOUNTER16, 0, 1UL, mhpm_count, __mhpm_skip);
  541. /**
  542. * No need to check for MHPMCOUNTERH for RV32 as they are expected to be
  543. * implemented if MHPMCOUNTER is implemented.
  544. */
  545. __mhpm_skip:
  546. #undef __check_csr_64
  547. #undef __check_csr_32
  548. #undef __check_csr_16
  549. #undef __check_csr_8
  550. #undef __check_csr_4
  551. #undef __check_csr_2
  552. #undef __check_csr
  553. /* Detect if hart supports Priv v1.10 */
  554. val = csr_read_allowed(CSR_MCOUNTEREN, (unsigned long)&trap);
  555. if (!trap.cause)
  556. hfeatures->priv_version = SBI_HART_PRIV_VER_1_10;
  557. /* Detect if hart supports Priv v1.11 */
  558. val = csr_read_allowed(CSR_MCOUNTINHIBIT, (unsigned long)&trap);
  559. if (!trap.cause &&
  560. (hfeatures->priv_version >= SBI_HART_PRIV_VER_1_10))
  561. hfeatures->priv_version = SBI_HART_PRIV_VER_1_11;
  562. /* Detect if hart supports Priv v1.12 */
  563. csr_read_allowed(CSR_MENVCFG, (unsigned long)&trap);
  564. if (!trap.cause &&
  565. (hfeatures->priv_version >= SBI_HART_PRIV_VER_1_11))
  566. hfeatures->priv_version = SBI_HART_PRIV_VER_1_12;
  567. /* Counter overflow/filtering is not useful without mcounter/inhibit */
  568. if (hfeatures->priv_version >= SBI_HART_PRIV_VER_1_12) {
  569. /* Detect if hart supports sscofpmf */
  570. csr_read_allowed(CSR_SCOUNTOVF, (unsigned long)&trap);
  571. if (!trap.cause)
  572. __sbi_hart_update_extension(hfeatures,
  573. SBI_HART_EXT_SSCOFPMF, true);
  574. }
  575. /* Detect if hart supports time CSR */
  576. csr_read_allowed(CSR_TIME, (unsigned long)&trap);
  577. if (!trap.cause)
  578. __sbi_hart_update_extension(hfeatures,
  579. SBI_HART_EXT_TIME, true);
  580. /* Detect if hart has AIA local interrupt CSRs */
  581. csr_read_allowed(CSR_MTOPI, (unsigned long)&trap);
  582. if (!trap.cause)
  583. __sbi_hart_update_extension(hfeatures,
  584. SBI_HART_EXT_SMAIA, true);
  585. /* Detect if hart supports stimecmp CSR(Sstc extension) */
  586. if (hfeatures->priv_version >= SBI_HART_PRIV_VER_1_12) {
  587. csr_read_allowed(CSR_STIMECMP, (unsigned long)&trap);
  588. if (!trap.cause)
  589. __sbi_hart_update_extension(hfeatures,
  590. SBI_HART_EXT_SSTC, true);
  591. }
  592. /* Detect if hart supports mstateen CSRs */
  593. if (hfeatures->priv_version >= SBI_HART_PRIV_VER_1_12) {
  594. val = csr_read_allowed(CSR_MSTATEEN0, (unsigned long)&trap);
  595. if (!trap.cause)
  596. __sbi_hart_update_extension(hfeatures,
  597. SBI_HART_EXT_SMSTATEEN, true);
  598. }
  599. /* Let platform populate extensions */
  600. rc = sbi_platform_extensions_init(sbi_platform_thishart_ptr(),
  601. hfeatures);
  602. if (rc)
  603. return rc;
  604. /* Mark hart feature detection done */
  605. hfeatures->detected = true;
  606. return 0;
  607. }
  608. int sbi_hart_reinit(struct sbi_scratch *scratch)
  609. {
  610. int rc;
  611. mstatus_init(scratch);
  612. rc = fp_init(scratch);
  613. if (rc)
  614. return rc;
  615. rc = delegate_traps(scratch);
  616. if (rc)
  617. return rc;
  618. return 0;
  619. }
  620. int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot)
  621. {
  622. int rc;
  623. if (cold_boot) {
  624. if (misa_extension('H'))
  625. sbi_hart_expected_trap = &__sbi_expected_trap_hext;
  626. hart_features_offset = sbi_scratch_alloc_offset(
  627. sizeof(struct sbi_hart_features));
  628. if (!hart_features_offset)
  629. return SBI_ENOMEM;
  630. }
  631. rc = hart_detect_features(scratch);
  632. if (rc)
  633. return rc;
  634. return sbi_hart_reinit(scratch);
  635. }
  636. void __attribute__((noreturn)) sbi_hart_hang(void)
  637. {
  638. while (1)
  639. wfi();
  640. __builtin_unreachable();
  641. }
  642. void __attribute__((noreturn))
  643. sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1,
  644. unsigned long next_addr, unsigned long next_mode,
  645. bool next_virt)
  646. {
  647. #if __riscv_xlen == 32
  648. unsigned long val, valH;
  649. #else
  650. unsigned long val;
  651. #endif
  652. switch (next_mode) {
  653. case PRV_M:
  654. break;
  655. case PRV_S:
  656. if (!misa_extension('S'))
  657. sbi_hart_hang();
  658. break;
  659. case PRV_U:
  660. if (!misa_extension('U'))
  661. sbi_hart_hang();
  662. break;
  663. default:
  664. sbi_hart_hang();
  665. }
  666. val = csr_read(CSR_MSTATUS);
  667. val = INSERT_FIELD(val, MSTATUS_MPP, next_mode);
  668. val = INSERT_FIELD(val, MSTATUS_MPIE, 0);
  669. #if __riscv_xlen == 32
  670. if (misa_extension('H')) {
  671. valH = csr_read(CSR_MSTATUSH);
  672. valH = INSERT_FIELD(valH, MSTATUSH_MPV, next_virt);
  673. csr_write(CSR_MSTATUSH, valH);
  674. }
  675. #else
  676. if (misa_extension('H'))
  677. val = INSERT_FIELD(val, MSTATUS_MPV, next_virt);
  678. #endif
  679. csr_write(CSR_MSTATUS, val);
  680. csr_write(CSR_MEPC, next_addr);
  681. if (next_mode == PRV_S) {
  682. csr_write(CSR_STVEC, next_addr);
  683. csr_write(CSR_SSCRATCH, 0);
  684. csr_write(CSR_SIE, 0);
  685. csr_write(CSR_SATP, 0);
  686. } else if (next_mode == PRV_U) {
  687. if (misa_extension('N')) {
  688. csr_write(CSR_UTVEC, next_addr);
  689. csr_write(CSR_USCRATCH, 0);
  690. csr_write(CSR_UIE, 0);
  691. }
  692. }
  693. register unsigned long a0 asm("a0") = arg0;
  694. register unsigned long a1 asm("a1") = arg1;
  695. __asm__ __volatile__("mret" : : "r"(a0), "r"(a1));
  696. __builtin_unreachable();
  697. }