sbi_hsm.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Atish Patra <atish.patra@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/riscv_barrier.h>
  11. #include <sbi/riscv_encoding.h>
  12. #include <sbi/riscv_atomic.h>
  13. #include <sbi/sbi_bitops.h>
  14. #include <sbi/sbi_console.h>
  15. #include <sbi/sbi_domain.h>
  16. #include <sbi/sbi_error.h>
  17. #include <sbi/sbi_ecall_interface.h>
  18. #include <sbi/sbi_hart.h>
  19. #include <sbi/sbi_hartmask.h>
  20. #include <sbi/sbi_hsm.h>
  21. #include <sbi/sbi_init.h>
  22. #include <sbi/sbi_ipi.h>
  23. #include <sbi/sbi_scratch.h>
  24. #include <sbi/sbi_system.h>
  25. #include <sbi/sbi_timer.h>
  26. #include <sbi/sbi_console.h>
  27. #define __sbi_hsm_hart_change_state(hdata, oldstate, newstate) \
  28. ({ \
  29. long state = atomic_cmpxchg(&(hdata)->state, oldstate, newstate); \
  30. if (state != (oldstate)) \
  31. sbi_printf("%s: ERR: The hart is in invalid state [%lu]\n", \
  32. __func__, state); \
  33. state == (oldstate); \
  34. })
  35. static const struct sbi_hsm_device *hsm_dev = NULL;
  36. static unsigned long hart_data_offset;
  37. /** Per hart specific data to manage state transition **/
  38. struct sbi_hsm_data {
  39. atomic_t state;
  40. unsigned long suspend_type;
  41. unsigned long saved_mie;
  42. unsigned long saved_mip;
  43. };
  44. bool sbi_hsm_hart_change_state(struct sbi_scratch *scratch, long oldstate,
  45. long newstate)
  46. {
  47. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  48. hart_data_offset);
  49. return __sbi_hsm_hart_change_state(hdata, oldstate, newstate);
  50. }
  51. int __sbi_hsm_hart_get_state(u32 hartid)
  52. {
  53. struct sbi_hsm_data *hdata;
  54. struct sbi_scratch *scratch;
  55. scratch = sbi_hartid_to_scratch(hartid);
  56. if (!scratch)
  57. return SBI_EINVAL;
  58. hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset);
  59. return atomic_read(&hdata->state);
  60. }
  61. int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid)
  62. {
  63. if (!sbi_domain_is_assigned_hart(dom, hartid))
  64. return SBI_EINVAL;
  65. return __sbi_hsm_hart_get_state(hartid);
  66. }
  67. /**
  68. * Get ulong HART mask for given HART base ID
  69. * @param dom the domain to be used for output HART mask
  70. * @param hbase the HART base ID
  71. * @param out_hmask the output ulong HART mask
  72. * @return 0 on success and SBI_Exxx (< 0) on failure
  73. * Note: the output HART mask will be set to zero on failure as well.
  74. */
  75. int sbi_hsm_hart_interruptible_mask(const struct sbi_domain *dom,
  76. ulong hbase, ulong *out_hmask)
  77. {
  78. int hstate;
  79. ulong i, hmask, dmask;
  80. ulong hend = sbi_scratch_last_hartid() + 1;
  81. *out_hmask = 0;
  82. if (hend <= hbase)
  83. return SBI_EINVAL;
  84. if (BITS_PER_LONG < (hend - hbase))
  85. hend = hbase + BITS_PER_LONG;
  86. dmask = sbi_domain_get_assigned_hartmask(dom, hbase);
  87. for (i = hbase; i < hend; i++) {
  88. hmask = 1UL << (i - hbase);
  89. if (dmask & hmask) {
  90. hstate = __sbi_hsm_hart_get_state(i);
  91. if (hstate == SBI_HSM_STATE_STARTED ||
  92. hstate == SBI_HSM_STATE_SUSPENDED)
  93. *out_hmask |= hmask;
  94. }
  95. }
  96. return 0;
  97. }
  98. void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch,
  99. u32 hartid)
  100. {
  101. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  102. hart_data_offset);
  103. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_START_PENDING,
  104. SBI_HSM_STATE_STARTED))
  105. sbi_hart_hang();
  106. sbi_hart_switch_mode(hartid, scratch->next_arg1, scratch->next_addr,
  107. scratch->next_mode, false);
  108. }
  109. static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid)
  110. {
  111. unsigned long saved_mie;
  112. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  113. hart_data_offset);
  114. /* Save MIE CSR */
  115. saved_mie = csr_read(CSR_MIE);
  116. /* Set MSIE and MEIE bits to receive IPI */
  117. csr_set(CSR_MIE, MIP_MSIP | MIP_MEIP);
  118. /* Wait for state transition requested by sbi_hsm_hart_start() */
  119. while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING) {
  120. wfi();
  121. };
  122. /* Restore MIE CSR */
  123. csr_write(CSR_MIE, saved_mie);
  124. /*
  125. * No need to clear IPI here because the sbi_ipi_init() will
  126. * clear it for current HART via sbi_platform_ipi_init().
  127. */
  128. }
  129. const struct sbi_hsm_device *sbi_hsm_get_device(void)
  130. {
  131. return hsm_dev;
  132. }
  133. void sbi_hsm_set_device(const struct sbi_hsm_device *dev)
  134. {
  135. if (!dev || hsm_dev)
  136. return;
  137. hsm_dev = dev;
  138. }
  139. static bool hsm_device_has_hart_hotplug(void)
  140. {
  141. if (hsm_dev && hsm_dev->hart_start && hsm_dev->hart_stop)
  142. return true;
  143. return false;
  144. }
  145. static bool hsm_device_has_hart_secondary_boot(void)
  146. {
  147. if (hsm_dev && hsm_dev->hart_start && !hsm_dev->hart_stop)
  148. return true;
  149. return false;
  150. }
  151. static int hsm_device_hart_start(u32 hartid, ulong saddr)
  152. {
  153. if (hsm_dev && hsm_dev->hart_start)
  154. return hsm_dev->hart_start(hartid, saddr);
  155. return SBI_ENOTSUPP;
  156. }
  157. static int hsm_device_hart_stop(void)
  158. {
  159. if (hsm_dev && hsm_dev->hart_stop)
  160. return hsm_dev->hart_stop();
  161. return SBI_ENOTSUPP;
  162. }
  163. static int hsm_device_hart_suspend(u32 suspend_type)
  164. {
  165. if (hsm_dev && hsm_dev->hart_suspend)
  166. return hsm_dev->hart_suspend(suspend_type);
  167. return SBI_ENOTSUPP;
  168. }
  169. static void hsm_device_hart_resume(void)
  170. {
  171. if (hsm_dev && hsm_dev->hart_resume)
  172. hsm_dev->hart_resume();
  173. }
  174. int sbi_hsm_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot)
  175. {
  176. u32 i;
  177. struct sbi_scratch *rscratch;
  178. struct sbi_hsm_data *hdata;
  179. if (cold_boot) {
  180. hart_data_offset = sbi_scratch_alloc_offset(sizeof(*hdata));
  181. if (!hart_data_offset)
  182. return SBI_ENOMEM;
  183. /* Initialize hart state data for every hart */
  184. for (i = 0; i <= sbi_scratch_last_hartid(); i++) {
  185. rscratch = sbi_hartid_to_scratch(i);
  186. if (!rscratch)
  187. continue;
  188. hdata = sbi_scratch_offset_ptr(rscratch,
  189. hart_data_offset);
  190. ATOMIC_INIT(&hdata->state,
  191. (i == hartid) ?
  192. SBI_HSM_STATE_START_PENDING :
  193. SBI_HSM_STATE_STOPPED);
  194. }
  195. } else {
  196. sbi_hsm_hart_wait(scratch, hartid);
  197. }
  198. return 0;
  199. }
  200. void __noreturn sbi_hsm_exit(struct sbi_scratch *scratch)
  201. {
  202. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  203. hart_data_offset);
  204. void (*jump_warmboot)(void) = (void (*)(void))scratch->warmboot_addr;
  205. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_STOP_PENDING,
  206. SBI_HSM_STATE_STOPPED))
  207. goto fail_exit;
  208. if (hsm_device_has_hart_hotplug()) {
  209. if (hsm_device_hart_stop() != SBI_ENOTSUPP)
  210. goto fail_exit;
  211. }
  212. /**
  213. * As platform is lacking support for hotplug, directly jump to warmboot
  214. * and wait for interrupts in warmboot. We do it preemptively in order
  215. * preserve the hart states and reuse the code path for hotplug.
  216. */
  217. jump_warmboot();
  218. fail_exit:
  219. /* It should never reach here */
  220. sbi_printf("ERR: Failed stop hart [%u]\n", current_hartid());
  221. sbi_hart_hang();
  222. }
  223. int sbi_hsm_hart_start(struct sbi_scratch *scratch,
  224. const struct sbi_domain *dom,
  225. u32 hartid, ulong saddr, ulong smode, ulong arg1)
  226. {
  227. unsigned long init_count;
  228. unsigned int hstate;
  229. struct sbi_scratch *rscratch;
  230. struct sbi_hsm_data *hdata;
  231. /* For now, we only allow start mode to be S-mode or U-mode. */
  232. if (smode != PRV_S && smode != PRV_U)
  233. return SBI_EINVAL;
  234. if (dom && !sbi_domain_is_assigned_hart(dom, hartid))
  235. return SBI_EINVAL;
  236. if (dom && !sbi_domain_check_addr(dom, saddr, smode,
  237. SBI_DOMAIN_EXECUTE))
  238. return SBI_EINVALID_ADDR;
  239. rscratch = sbi_hartid_to_scratch(hartid);
  240. if (!rscratch)
  241. return SBI_EINVAL;
  242. hdata = sbi_scratch_offset_ptr(rscratch, hart_data_offset);
  243. hstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STOPPED,
  244. SBI_HSM_STATE_START_PENDING);
  245. if (hstate == SBI_HSM_STATE_STARTED)
  246. return SBI_EALREADY;
  247. /**
  248. * if a hart is already transition to start or stop, another start call
  249. * is considered as invalid request.
  250. */
  251. if (hstate != SBI_HSM_STATE_STOPPED)
  252. return SBI_EINVAL;
  253. init_count = sbi_init_count(hartid);
  254. rscratch->next_arg1 = arg1;
  255. rscratch->next_addr = saddr;
  256. rscratch->next_mode = smode;
  257. if (hsm_device_has_hart_hotplug() ||
  258. (hsm_device_has_hart_secondary_boot() && !init_count)) {
  259. return hsm_device_hart_start(hartid, scratch->warmboot_addr);
  260. } else {
  261. int rc = sbi_ipi_raw_send(hartid);
  262. if (rc)
  263. return rc;
  264. }
  265. return 0;
  266. }
  267. int sbi_hsm_hart_stop(struct sbi_scratch *scratch, bool exitnow)
  268. {
  269. const struct sbi_domain *dom = sbi_domain_thishart_ptr();
  270. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  271. hart_data_offset);
  272. if (!dom)
  273. return SBI_EFAIL;
  274. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_STARTED,
  275. SBI_HSM_STATE_STOP_PENDING))
  276. return SBI_EFAIL;
  277. if (exitnow)
  278. sbi_exit(scratch);
  279. return 0;
  280. }
  281. static int __sbi_hsm_suspend_default(struct sbi_scratch *scratch)
  282. {
  283. /* Wait for interrupt */
  284. wfi();
  285. return 0;
  286. }
  287. void __sbi_hsm_suspend_non_ret_save(struct sbi_scratch *scratch)
  288. {
  289. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  290. hart_data_offset);
  291. /*
  292. * We will be resuming in warm-boot path so the MIE and MIP CSRs
  293. * will be back to initial state. It is possible that HART has
  294. * configured timer event before going to suspend state so we
  295. * should save MIE and MIP CSRs and restore it after resuming.
  296. *
  297. * Further, the M-mode bits in MIP CSR are read-only and set by
  298. * external devices (such as interrupt controller) whereas all
  299. * VS-mode bits in MIP are read-only alias of bits in HVIP CSR.
  300. *
  301. * This means we should only save/restore S-mode bits of MIP CSR
  302. * such as MIP.SSIP and MIP.STIP.
  303. */
  304. hdata->saved_mie = csr_read(CSR_MIE);
  305. hdata->saved_mip = csr_read(CSR_MIP) & (MIP_SSIP | MIP_STIP);
  306. }
  307. static void __sbi_hsm_suspend_non_ret_restore(struct sbi_scratch *scratch)
  308. {
  309. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  310. hart_data_offset);
  311. csr_write(CSR_MIE, hdata->saved_mie);
  312. csr_set(CSR_MIP, (hdata->saved_mip & (MIP_SSIP | MIP_STIP)));
  313. }
  314. void sbi_hsm_hart_resume_start(struct sbi_scratch *scratch)
  315. {
  316. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  317. hart_data_offset);
  318. /* If current HART was SUSPENDED then set RESUME_PENDING state */
  319. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_SUSPENDED,
  320. SBI_HSM_STATE_RESUME_PENDING))
  321. sbi_hart_hang();
  322. hsm_device_hart_resume();
  323. }
  324. void __noreturn sbi_hsm_hart_resume_finish(struct sbi_scratch *scratch,
  325. u32 hartid)
  326. {
  327. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  328. hart_data_offset);
  329. /* If current HART was RESUME_PENDING then set STARTED state */
  330. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_RESUME_PENDING,
  331. SBI_HSM_STATE_STARTED))
  332. sbi_hart_hang();
  333. /*
  334. * Restore some of the M-mode CSRs which we are re-configured by
  335. * the warm-boot sequence.
  336. */
  337. __sbi_hsm_suspend_non_ret_restore(scratch);
  338. sbi_hart_switch_mode(hartid, scratch->next_arg1,
  339. scratch->next_addr,
  340. scratch->next_mode, false);
  341. }
  342. int sbi_hsm_hart_suspend(struct sbi_scratch *scratch, u32 suspend_type,
  343. ulong raddr, ulong rmode, ulong arg1)
  344. {
  345. int ret;
  346. const struct sbi_domain *dom = sbi_domain_thishart_ptr();
  347. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  348. hart_data_offset);
  349. /* Sanity check on domain assigned to current HART */
  350. if (!dom)
  351. return SBI_EFAIL;
  352. /* Sanity check on suspend type */
  353. if (SBI_HSM_SUSPEND_RET_DEFAULT < suspend_type &&
  354. suspend_type < SBI_HSM_SUSPEND_RET_PLATFORM)
  355. return SBI_EINVAL;
  356. if (SBI_HSM_SUSPEND_NON_RET_DEFAULT < suspend_type &&
  357. suspend_type < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
  358. return SBI_EINVAL;
  359. /* Additional sanity check for non-retentive suspend */
  360. if (suspend_type & SBI_HSM_SUSP_NON_RET_BIT) {
  361. /*
  362. * For now, we only allow non-retentive suspend from
  363. * S-mode or U-mode.
  364. */
  365. if (rmode != PRV_S && rmode != PRV_U)
  366. return SBI_EFAIL;
  367. if (dom && !sbi_domain_check_addr(dom, raddr, rmode,
  368. SBI_DOMAIN_EXECUTE))
  369. return SBI_EINVALID_ADDR;
  370. }
  371. /* Save the resume address and resume mode */
  372. scratch->next_arg1 = arg1;
  373. scratch->next_addr = raddr;
  374. scratch->next_mode = rmode;
  375. /* Directly move from STARTED to SUSPENDED state */
  376. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_STARTED,
  377. SBI_HSM_STATE_SUSPENDED))
  378. return SBI_EFAIL;
  379. /* Save the suspend type */
  380. hdata->suspend_type = suspend_type;
  381. /*
  382. * Save context which will be restored after resuming from
  383. * non-retentive suspend.
  384. */
  385. if (suspend_type & SBI_HSM_SUSP_NON_RET_BIT)
  386. __sbi_hsm_suspend_non_ret_save(scratch);
  387. /* Try platform specific suspend */
  388. ret = hsm_device_hart_suspend(suspend_type);
  389. if (ret == SBI_ENOTSUPP) {
  390. /* Try generic implementation of default suspend types */
  391. if (suspend_type == SBI_HSM_SUSPEND_RET_DEFAULT ||
  392. suspend_type == SBI_HSM_SUSPEND_NON_RET_DEFAULT) {
  393. ret = __sbi_hsm_suspend_default(scratch);
  394. }
  395. }
  396. /*
  397. * The platform may have coordinated a retentive suspend, or it may
  398. * have exited early from a non-retentive suspend. Either way, the
  399. * caller is not expecting a successful return, so jump to the warm
  400. * boot entry point to simulate resume from a non-retentive suspend.
  401. */
  402. if (ret == 0 && (suspend_type & SBI_HSM_SUSP_NON_RET_BIT)) {
  403. void (*jump_warmboot)(void) =
  404. (void (*)(void))scratch->warmboot_addr;
  405. jump_warmboot();
  406. }
  407. /*
  408. * We might have successfully resumed from retentive suspend
  409. * or suspend failed. In both cases, we restore state of hart.
  410. */
  411. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_SUSPENDED,
  412. SBI_HSM_STATE_STARTED))
  413. sbi_hart_hang();
  414. return ret;
  415. }