sbi_hsm.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Atish Patra <atish.patra@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/riscv_barrier.h>
  11. #include <sbi/riscv_encoding.h>
  12. #include <sbi/riscv_atomic.h>
  13. #include <sbi/sbi_bitops.h>
  14. #include <sbi/sbi_console.h>
  15. #include <sbi/sbi_domain.h>
  16. #include <sbi/sbi_error.h>
  17. #include <sbi/sbi_ecall_interface.h>
  18. #include <sbi/sbi_hart.h>
  19. #include <sbi/sbi_hartmask.h>
  20. #include <sbi/sbi_hsm.h>
  21. #include <sbi/sbi_init.h>
  22. #include <sbi/sbi_ipi.h>
  23. #include <sbi/sbi_scratch.h>
  24. #include <sbi/sbi_system.h>
  25. #include <sbi/sbi_timer.h>
  26. #include <sbi/sbi_console.h>
  27. static const struct sbi_hsm_device *hsm_dev = NULL;
  28. static unsigned long hart_data_offset;
  29. /** Per hart specific data to manage state transition **/
  30. struct sbi_hsm_data {
  31. atomic_t state;
  32. unsigned long suspend_type;
  33. unsigned long saved_mie;
  34. unsigned long saved_mip;
  35. };
  36. static inline int __sbi_hsm_hart_get_state(u32 hartid)
  37. {
  38. struct sbi_hsm_data *hdata;
  39. struct sbi_scratch *scratch;
  40. scratch = sbi_hartid_to_scratch(hartid);
  41. if (!scratch)
  42. return SBI_EINVAL;
  43. hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset);
  44. return atomic_read(&hdata->state);
  45. }
  46. int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid)
  47. {
  48. if (!sbi_domain_is_assigned_hart(dom, hartid))
  49. return SBI_EINVAL;
  50. return __sbi_hsm_hart_get_state(hartid);
  51. }
  52. /**
  53. * Get ulong HART mask for given HART base ID
  54. * @param dom the domain to be used for output HART mask
  55. * @param hbase the HART base ID
  56. * @param out_hmask the output ulong HART mask
  57. * @return 0 on success and SBI_Exxx (< 0) on failure
  58. * Note: the output HART mask will be set to zero on failure as well.
  59. */
  60. int sbi_hsm_hart_interruptible_mask(const struct sbi_domain *dom,
  61. ulong hbase, ulong *out_hmask)
  62. {
  63. int hstate;
  64. ulong i, hmask, dmask;
  65. ulong hend = sbi_scratch_last_hartid() + 1;
  66. *out_hmask = 0;
  67. if (hend <= hbase)
  68. return SBI_EINVAL;
  69. if (BITS_PER_LONG < (hend - hbase))
  70. hend = hbase + BITS_PER_LONG;
  71. dmask = sbi_domain_get_assigned_hartmask(dom, hbase);
  72. for (i = hbase; i < hend; i++) {
  73. hmask = 1UL << (i - hbase);
  74. if (dmask & hmask) {
  75. hstate = __sbi_hsm_hart_get_state(i);
  76. if (hstate == SBI_HSM_STATE_STARTED ||
  77. hstate == SBI_HSM_STATE_SUSPENDED)
  78. *out_hmask |= hmask;
  79. }
  80. }
  81. return 0;
  82. }
  83. void sbi_hsm_prepare_next_jump(struct sbi_scratch *scratch, u32 hartid)
  84. {
  85. u32 oldstate;
  86. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  87. hart_data_offset);
  88. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_START_PENDING,
  89. SBI_HSM_STATE_STARTED);
  90. if (oldstate != SBI_HSM_STATE_START_PENDING)
  91. sbi_hart_hang();
  92. }
  93. static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid)
  94. {
  95. unsigned long saved_mie;
  96. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  97. hart_data_offset);
  98. /* Save MIE CSR */
  99. saved_mie = csr_read(CSR_MIE);
  100. /* Set MSIE and MEIE bits to receive IPI */
  101. csr_set(CSR_MIE, MIP_MSIP | MIP_MEIP);
  102. /* Wait for state transition requested by sbi_hsm_hart_start() */
  103. while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING) {
  104. wfi();
  105. };
  106. /* Restore MIE CSR */
  107. csr_write(CSR_MIE, saved_mie);
  108. /*
  109. * No need to clear IPI here because the sbi_ipi_init() will
  110. * clear it for current HART via sbi_platform_ipi_init().
  111. */
  112. }
  113. const struct sbi_hsm_device *sbi_hsm_get_device(void)
  114. {
  115. return hsm_dev;
  116. }
  117. void sbi_hsm_set_device(const struct sbi_hsm_device *dev)
  118. {
  119. if (!dev || hsm_dev)
  120. return;
  121. hsm_dev = dev;
  122. }
  123. static bool hsm_device_has_hart_hotplug(void)
  124. {
  125. if (hsm_dev && hsm_dev->hart_start && hsm_dev->hart_stop)
  126. return true;
  127. return false;
  128. }
  129. static bool hsm_device_has_hart_secondary_boot(void)
  130. {
  131. if (hsm_dev && hsm_dev->hart_start && !hsm_dev->hart_stop)
  132. return true;
  133. return false;
  134. }
  135. static int hsm_device_hart_start(u32 hartid, ulong saddr)
  136. {
  137. if (hsm_dev && hsm_dev->hart_start)
  138. return hsm_dev->hart_start(hartid, saddr);
  139. return SBI_ENOTSUPP;
  140. }
  141. static int hsm_device_hart_stop(void)
  142. {
  143. if (hsm_dev && hsm_dev->hart_stop)
  144. return hsm_dev->hart_stop();
  145. return SBI_ENOTSUPP;
  146. }
  147. static int hsm_device_hart_suspend(u32 suspend_type)
  148. {
  149. if (hsm_dev && hsm_dev->hart_suspend)
  150. return hsm_dev->hart_suspend(suspend_type);
  151. return SBI_ENOTSUPP;
  152. }
  153. static void hsm_device_hart_resume(void)
  154. {
  155. if (hsm_dev && hsm_dev->hart_resume)
  156. hsm_dev->hart_resume();
  157. }
  158. int sbi_hsm_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot)
  159. {
  160. u32 i;
  161. struct sbi_scratch *rscratch;
  162. struct sbi_hsm_data *hdata;
  163. if (cold_boot) {
  164. hart_data_offset = sbi_scratch_alloc_offset(sizeof(*hdata));
  165. if (!hart_data_offset)
  166. return SBI_ENOMEM;
  167. /* Initialize hart state data for every hart */
  168. for (i = 0; i <= sbi_scratch_last_hartid(); i++) {
  169. rscratch = sbi_hartid_to_scratch(i);
  170. if (!rscratch)
  171. continue;
  172. hdata = sbi_scratch_offset_ptr(rscratch,
  173. hart_data_offset);
  174. ATOMIC_INIT(&hdata->state,
  175. (i == hartid) ?
  176. SBI_HSM_STATE_START_PENDING :
  177. SBI_HSM_STATE_STOPPED);
  178. }
  179. } else {
  180. sbi_hsm_hart_wait(scratch, hartid);
  181. }
  182. return 0;
  183. }
  184. void __noreturn sbi_hsm_exit(struct sbi_scratch *scratch)
  185. {
  186. u32 hstate;
  187. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  188. hart_data_offset);
  189. void (*jump_warmboot)(void) = (void (*)(void))scratch->warmboot_addr;
  190. hstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STOP_PENDING,
  191. SBI_HSM_STATE_STOPPED);
  192. if (hstate != SBI_HSM_STATE_STOP_PENDING)
  193. goto fail_exit;
  194. if (hsm_device_has_hart_hotplug()) {
  195. if (hsm_device_hart_stop() != SBI_ENOTSUPP)
  196. goto fail_exit;
  197. }
  198. /**
  199. * As platform is lacking support for hotplug, directly jump to warmboot
  200. * and wait for interrupts in warmboot. We do it preemptively in order
  201. * preserve the hart states and reuse the code path for hotplug.
  202. */
  203. jump_warmboot();
  204. fail_exit:
  205. /* It should never reach here */
  206. sbi_printf("ERR: Failed stop hart [%u]\n", current_hartid());
  207. sbi_hart_hang();
  208. }
  209. int sbi_hsm_hart_start(struct sbi_scratch *scratch,
  210. const struct sbi_domain *dom,
  211. u32 hartid, ulong saddr, ulong smode, ulong arg1)
  212. {
  213. unsigned long init_count;
  214. unsigned int hstate;
  215. struct sbi_scratch *rscratch;
  216. struct sbi_hsm_data *hdata;
  217. /* For now, we only allow start mode to be S-mode or U-mode. */
  218. if (smode != PRV_S && smode != PRV_U)
  219. return SBI_EINVAL;
  220. if (dom && !sbi_domain_is_assigned_hart(dom, hartid))
  221. return SBI_EINVAL;
  222. if (dom && !sbi_domain_check_addr(dom, saddr, smode,
  223. SBI_DOMAIN_EXECUTE))
  224. return SBI_EINVALID_ADDR;
  225. rscratch = sbi_hartid_to_scratch(hartid);
  226. if (!rscratch)
  227. return SBI_EINVAL;
  228. hdata = sbi_scratch_offset_ptr(rscratch, hart_data_offset);
  229. hstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STOPPED,
  230. SBI_HSM_STATE_START_PENDING);
  231. if (hstate == SBI_HSM_STATE_STARTED)
  232. return SBI_EALREADY;
  233. /**
  234. * if a hart is already transition to start or stop, another start call
  235. * is considered as invalid request.
  236. */
  237. if (hstate != SBI_HSM_STATE_STOPPED)
  238. return SBI_EINVAL;
  239. init_count = sbi_init_count(hartid);
  240. rscratch->next_arg1 = arg1;
  241. rscratch->next_addr = saddr;
  242. rscratch->next_mode = smode;
  243. if (hsm_device_has_hart_hotplug() ||
  244. (hsm_device_has_hart_secondary_boot() && !init_count)) {
  245. return hsm_device_hart_start(hartid, scratch->warmboot_addr);
  246. } else {
  247. int rc = sbi_ipi_raw_send(hartid);
  248. if (rc)
  249. return rc;
  250. }
  251. return 0;
  252. }
  253. int sbi_hsm_hart_stop(struct sbi_scratch *scratch, bool exitnow)
  254. {
  255. int oldstate;
  256. const struct sbi_domain *dom = sbi_domain_thishart_ptr();
  257. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  258. hart_data_offset);
  259. if (!dom)
  260. return SBI_EFAIL;
  261. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STARTED,
  262. SBI_HSM_STATE_STOP_PENDING);
  263. if (oldstate != SBI_HSM_STATE_STARTED) {
  264. sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
  265. __func__, oldstate);
  266. return SBI_EFAIL;
  267. }
  268. if (exitnow)
  269. sbi_exit(scratch);
  270. return 0;
  271. }
  272. static int __sbi_hsm_suspend_default(struct sbi_scratch *scratch)
  273. {
  274. /* Wait for interrupt */
  275. wfi();
  276. return 0;
  277. }
  278. static void __sbi_hsm_suspend_non_ret_save(struct sbi_scratch *scratch)
  279. {
  280. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  281. hart_data_offset);
  282. /*
  283. * We will be resuming in warm-boot path so the MIE and MIP CSRs
  284. * will be back to initial state. It is possible that HART has
  285. * configured timer event before going to suspend state so we
  286. * should save MIE and MIP CSRs and restore it after resuming.
  287. *
  288. * Further, the M-mode bits in MIP CSR are read-only and set by
  289. * external devices (such as interrupt controller) whereas all
  290. * VS-mode bits in MIP are read-only alias of bits in HVIP CSR.
  291. *
  292. * This means we should only save/restore S-mode bits of MIP CSR
  293. * such as MIP.SSIP and MIP.STIP.
  294. */
  295. hdata->saved_mie = csr_read(CSR_MIE);
  296. hdata->saved_mip = csr_read(CSR_MIP) & (MIP_SSIP | MIP_STIP);
  297. }
  298. static void __sbi_hsm_suspend_non_ret_restore(struct sbi_scratch *scratch)
  299. {
  300. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  301. hart_data_offset);
  302. csr_write(CSR_MIE, hdata->saved_mie);
  303. csr_set(CSR_MIP, (hdata->saved_mip & (MIP_SSIP | MIP_STIP)));
  304. }
  305. void sbi_hsm_hart_resume_start(struct sbi_scratch *scratch)
  306. {
  307. int oldstate;
  308. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  309. hart_data_offset);
  310. /* If current HART was SUSPENDED then set RESUME_PENDING state */
  311. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_SUSPENDED,
  312. SBI_HSM_STATE_RESUME_PENDING);
  313. if (oldstate != SBI_HSM_STATE_SUSPENDED) {
  314. sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
  315. __func__, oldstate);
  316. sbi_hart_hang();
  317. }
  318. hsm_device_hart_resume();
  319. }
  320. void sbi_hsm_hart_resume_finish(struct sbi_scratch *scratch)
  321. {
  322. u32 oldstate;
  323. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  324. hart_data_offset);
  325. /* If current HART was RESUME_PENDING then set STARTED state */
  326. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_RESUME_PENDING,
  327. SBI_HSM_STATE_STARTED);
  328. if (oldstate != SBI_HSM_STATE_RESUME_PENDING) {
  329. sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
  330. __func__, oldstate);
  331. sbi_hart_hang();
  332. }
  333. /*
  334. * Restore some of the M-mode CSRs which we are re-configured by
  335. * the warm-boot sequence.
  336. */
  337. __sbi_hsm_suspend_non_ret_restore(scratch);
  338. }
  339. int sbi_hsm_hart_suspend(struct sbi_scratch *scratch, u32 suspend_type,
  340. ulong raddr, ulong rmode, ulong arg1)
  341. {
  342. int oldstate, ret;
  343. const struct sbi_domain *dom = sbi_domain_thishart_ptr();
  344. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  345. hart_data_offset);
  346. /* For now, we only allow suspend from S-mode or U-mode. */
  347. /* Sanity check on domain assigned to current HART */
  348. if (!dom)
  349. return SBI_EINVAL;
  350. /* Sanity check on suspend type */
  351. if (SBI_HSM_SUSPEND_RET_DEFAULT < suspend_type &&
  352. suspend_type < SBI_HSM_SUSPEND_RET_PLATFORM)
  353. return SBI_EINVAL;
  354. if (SBI_HSM_SUSPEND_NON_RET_DEFAULT < suspend_type &&
  355. suspend_type < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
  356. return SBI_EINVAL;
  357. /* Additional sanity check for non-retentive suspend */
  358. if (suspend_type & SBI_HSM_SUSP_NON_RET_BIT) {
  359. if (rmode != PRV_S && rmode != PRV_U)
  360. return SBI_EINVAL;
  361. if (dom && !sbi_domain_check_addr(dom, raddr, rmode,
  362. SBI_DOMAIN_EXECUTE))
  363. return SBI_EINVALID_ADDR;
  364. }
  365. /* Save the resume address and resume mode */
  366. scratch->next_arg1 = arg1;
  367. scratch->next_addr = raddr;
  368. scratch->next_mode = rmode;
  369. /* Directly move from STARTED to SUSPENDED state */
  370. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STARTED,
  371. SBI_HSM_STATE_SUSPENDED);
  372. if (oldstate != SBI_HSM_STATE_STARTED) {
  373. sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
  374. __func__, oldstate);
  375. ret = SBI_EDENIED;
  376. goto fail_restore_state;
  377. }
  378. /* Save the suspend type */
  379. hdata->suspend_type = suspend_type;
  380. /*
  381. * Save context which will be restored after resuming from
  382. * non-retentive suspend.
  383. */
  384. if (suspend_type & SBI_HSM_SUSP_NON_RET_BIT)
  385. __sbi_hsm_suspend_non_ret_save(scratch);
  386. /* Try platform specific suspend */
  387. ret = hsm_device_hart_suspend(suspend_type);
  388. if (ret == SBI_ENOTSUPP) {
  389. /* Try generic implementation of default suspend types */
  390. if (suspend_type == SBI_HSM_SUSPEND_RET_DEFAULT ||
  391. suspend_type == SBI_HSM_SUSPEND_NON_RET_DEFAULT) {
  392. ret = __sbi_hsm_suspend_default(scratch);
  393. }
  394. }
  395. /*
  396. * The platform may have coordinated a retentive suspend, or it may
  397. * have exited early from a non-retentive suspend. Either way, the
  398. * caller is not expecting a successful return, so jump to the warm
  399. * boot entry point to simulate resume from a non-retentive suspend.
  400. */
  401. if (ret == 0 && (suspend_type & SBI_HSM_SUSP_NON_RET_BIT)) {
  402. void (*jump_warmboot)(void) =
  403. (void (*)(void))scratch->warmboot_addr;
  404. jump_warmboot();
  405. }
  406. fail_restore_state:
  407. /*
  408. * We might have successfully resumed from retentive suspend
  409. * or suspend failed. In both cases, we restore state of hart.
  410. */
  411. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_SUSPENDED,
  412. SBI_HSM_STATE_STARTED);
  413. if (oldstate != SBI_HSM_STATE_SUSPENDED) {
  414. sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
  415. __func__, oldstate);
  416. sbi_hart_hang();
  417. }
  418. return ret;
  419. }