sbi_hsm.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Atish Patra <atish.patra@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/riscv_barrier.h>
  11. #include <sbi/riscv_encoding.h>
  12. #include <sbi/riscv_atomic.h>
  13. #include <sbi/sbi_bitops.h>
  14. #include <sbi/sbi_console.h>
  15. #include <sbi/sbi_domain.h>
  16. #include <sbi/sbi_error.h>
  17. #include <sbi/sbi_ecall_interface.h>
  18. #include <sbi/sbi_hart.h>
  19. #include <sbi/sbi_hartmask.h>
  20. #include <sbi/sbi_hsm.h>
  21. #include <sbi/sbi_init.h>
  22. #include <sbi/sbi_ipi.h>
  23. #include <sbi/sbi_scratch.h>
  24. #include <sbi/sbi_system.h>
  25. #include <sbi/sbi_timer.h>
  26. #include <sbi/sbi_console.h>
  27. static const struct sbi_hsm_device *hsm_dev = NULL;
  28. static unsigned long hart_data_offset;
  29. /** Per hart specific data to manage state transition **/
  30. struct sbi_hsm_data {
  31. atomic_t state;
  32. unsigned long suspend_type;
  33. unsigned long saved_mie;
  34. unsigned long saved_mip;
  35. };
  36. static inline int __sbi_hsm_hart_get_state(u32 hartid)
  37. {
  38. struct sbi_hsm_data *hdata;
  39. struct sbi_scratch *scratch;
  40. scratch = sbi_hartid_to_scratch(hartid);
  41. if (!scratch)
  42. return SBI_EINVAL;
  43. hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset);
  44. return atomic_read(&hdata->state);
  45. }
  46. int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid)
  47. {
  48. if (!sbi_domain_is_assigned_hart(dom, hartid))
  49. return SBI_EINVAL;
  50. return __sbi_hsm_hart_get_state(hartid);
  51. }
  52. /**
  53. * Get ulong HART mask for given HART base ID
  54. * @param dom the domain to be used for output HART mask
  55. * @param hbase the HART base ID
  56. * @param out_hmask the output ulong HART mask
  57. * @return 0 on success and SBI_Exxx (< 0) on failure
  58. * Note: the output HART mask will be set to zero on failure as well.
  59. */
  60. int sbi_hsm_hart_interruptible_mask(const struct sbi_domain *dom,
  61. ulong hbase, ulong *out_hmask)
  62. {
  63. int hstate;
  64. ulong i, hmask, dmask;
  65. ulong hend = sbi_scratch_last_hartid() + 1;
  66. *out_hmask = 0;
  67. if (hend <= hbase)
  68. return SBI_EINVAL;
  69. if (BITS_PER_LONG < (hend - hbase))
  70. hend = hbase + BITS_PER_LONG;
  71. dmask = sbi_domain_get_assigned_hartmask(dom, hbase);
  72. for (i = hbase; i < hend; i++) {
  73. hmask = 1UL << (i - hbase);
  74. if (dmask & hmask) {
  75. hstate = __sbi_hsm_hart_get_state(i);
  76. if (hstate == SBI_HSM_STATE_STARTED ||
  77. hstate == SBI_HSM_STATE_SUSPENDED)
  78. *out_hmask |= hmask;
  79. }
  80. }
  81. return 0;
  82. }
  83. void sbi_hsm_prepare_next_jump(struct sbi_scratch *scratch, u32 hartid)
  84. {
  85. u32 oldstate;
  86. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  87. hart_data_offset);
  88. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_START_PENDING,
  89. SBI_HSM_STATE_STARTED);
  90. if (oldstate != SBI_HSM_STATE_START_PENDING)
  91. sbi_hart_hang();
  92. }
  93. static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid)
  94. {
  95. unsigned long saved_mie;
  96. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  97. hart_data_offset);
  98. /* Save MIE CSR */
  99. saved_mie = csr_read(CSR_MIE);
  100. /* Set MSIE bit to receive IPI */
  101. csr_set(CSR_MIE, MIP_MSIP);
  102. /* Wait for hart_add call*/
  103. while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING) {
  104. wfi();
  105. };
  106. /* Restore MIE CSR */
  107. csr_write(CSR_MIE, saved_mie);
  108. /*
  109. * No need to clear IPI here because the sbi_ipi_init() will
  110. * clear it for current HART via sbi_platform_ipi_init().
  111. */
  112. }
  113. const struct sbi_hsm_device *sbi_hsm_get_device(void)
  114. {
  115. return hsm_dev;
  116. }
  117. void sbi_hsm_set_device(const struct sbi_hsm_device *dev)
  118. {
  119. if (!dev || hsm_dev)
  120. return;
  121. hsm_dev = dev;
  122. }
  123. static bool hsm_device_has_hart_hotplug(void)
  124. {
  125. if (hsm_dev && hsm_dev->hart_start && hsm_dev->hart_stop)
  126. return true;
  127. return false;
  128. }
  129. static bool hsm_device_has_hart_secondary_boot(void)
  130. {
  131. if (hsm_dev && hsm_dev->hart_start && !hsm_dev->hart_stop)
  132. return true;
  133. return false;
  134. }
  135. static int hsm_device_hart_start(u32 hartid, ulong saddr)
  136. {
  137. if (hsm_dev && hsm_dev->hart_start)
  138. return hsm_dev->hart_start(hartid, saddr);
  139. return SBI_ENOTSUPP;
  140. }
  141. static int hsm_device_hart_stop(void)
  142. {
  143. if (hsm_dev && hsm_dev->hart_stop)
  144. return hsm_dev->hart_stop();
  145. return SBI_ENOTSUPP;
  146. }
  147. static int hsm_device_hart_suspend(u32 suspend_type, ulong raddr)
  148. {
  149. if (hsm_dev && hsm_dev->hart_suspend)
  150. return hsm_dev->hart_suspend(suspend_type, raddr);
  151. return SBI_ENOTSUPP;
  152. }
  153. int sbi_hsm_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot)
  154. {
  155. u32 i;
  156. struct sbi_scratch *rscratch;
  157. struct sbi_hsm_data *hdata;
  158. if (cold_boot) {
  159. hart_data_offset = sbi_scratch_alloc_offset(sizeof(*hdata),
  160. "HART_DATA");
  161. if (!hart_data_offset)
  162. return SBI_ENOMEM;
  163. /* Initialize hart state data for every hart */
  164. for (i = 0; i <= sbi_scratch_last_hartid(); i++) {
  165. rscratch = sbi_hartid_to_scratch(i);
  166. if (!rscratch)
  167. continue;
  168. hdata = sbi_scratch_offset_ptr(rscratch,
  169. hart_data_offset);
  170. ATOMIC_INIT(&hdata->state,
  171. (i == hartid) ?
  172. SBI_HSM_STATE_START_PENDING :
  173. SBI_HSM_STATE_STOPPED);
  174. }
  175. } else {
  176. sbi_hsm_hart_wait(scratch, hartid);
  177. }
  178. return 0;
  179. }
  180. void __noreturn sbi_hsm_exit(struct sbi_scratch *scratch)
  181. {
  182. u32 hstate;
  183. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  184. hart_data_offset);
  185. void (*jump_warmboot)(void) = (void (*)(void))scratch->warmboot_addr;
  186. hstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STOP_PENDING,
  187. SBI_HSM_STATE_STOPPED);
  188. if (hstate != SBI_HSM_STATE_STOP_PENDING)
  189. goto fail_exit;
  190. if (hsm_device_has_hart_hotplug()) {
  191. hsm_device_hart_stop();
  192. /* It should never reach here */
  193. goto fail_exit;
  194. }
  195. /**
  196. * As platform is lacking support for hotplug, directly jump to warmboot
  197. * and wait for interrupts in warmboot. We do it preemptively in order
  198. * preserve the hart states and reuse the code path for hotplug.
  199. */
  200. jump_warmboot();
  201. fail_exit:
  202. /* It should never reach here */
  203. sbi_printf("ERR: Failed stop hart [%u]\n", current_hartid());
  204. sbi_hart_hang();
  205. }
  206. int sbi_hsm_hart_start(struct sbi_scratch *scratch,
  207. const struct sbi_domain *dom,
  208. u32 hartid, ulong saddr, ulong smode, ulong priv)
  209. {
  210. unsigned long init_count;
  211. unsigned int hstate;
  212. struct sbi_scratch *rscratch;
  213. struct sbi_hsm_data *hdata;
  214. /* For now, we only allow start mode to be S-mode or U-mode. */
  215. if (smode != PRV_S && smode != PRV_U)
  216. return SBI_EINVAL;
  217. if (dom && !sbi_domain_is_assigned_hart(dom, hartid))
  218. return SBI_EINVAL;
  219. if (dom && !sbi_domain_check_addr(dom, saddr, smode,
  220. SBI_DOMAIN_EXECUTE))
  221. return SBI_EINVALID_ADDR;
  222. rscratch = sbi_hartid_to_scratch(hartid);
  223. if (!rscratch)
  224. return SBI_EINVAL;
  225. hdata = sbi_scratch_offset_ptr(rscratch, hart_data_offset);
  226. hstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STOPPED,
  227. SBI_HSM_STATE_START_PENDING);
  228. if (hstate == SBI_HSM_STATE_STARTED)
  229. return SBI_EALREADY;
  230. /**
  231. * if a hart is already transition to start or stop, another start call
  232. * is considered as invalid request.
  233. */
  234. if (hstate != SBI_HSM_STATE_STOPPED)
  235. return SBI_EINVAL;
  236. init_count = sbi_init_count(hartid);
  237. rscratch->next_arg1 = priv;
  238. rscratch->next_addr = saddr;
  239. rscratch->next_mode = smode;
  240. if (hsm_device_has_hart_hotplug() ||
  241. (hsm_device_has_hart_secondary_boot() && !init_count)) {
  242. return hsm_device_hart_start(hartid, scratch->warmboot_addr);
  243. } else {
  244. sbi_ipi_raw_send(hartid);
  245. }
  246. return 0;
  247. }
  248. int sbi_hsm_hart_stop(struct sbi_scratch *scratch, bool exitnow)
  249. {
  250. int oldstate;
  251. const struct sbi_domain *dom = sbi_domain_thishart_ptr();
  252. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  253. hart_data_offset);
  254. if (!dom)
  255. return SBI_EFAIL;
  256. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STARTED,
  257. SBI_HSM_STATE_STOP_PENDING);
  258. if (oldstate != SBI_HSM_STATE_STARTED) {
  259. sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
  260. __func__, oldstate);
  261. return SBI_EFAIL;
  262. }
  263. if (exitnow)
  264. sbi_exit(scratch);
  265. return 0;
  266. }
  267. static int __sbi_hsm_suspend_ret_default(struct sbi_scratch *scratch)
  268. {
  269. /* Wait for interrupt */
  270. wfi();
  271. return 0;
  272. }
  273. static void __sbi_hsm_suspend_non_ret_save(struct sbi_scratch *scratch)
  274. {
  275. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  276. hart_data_offset);
  277. /*
  278. * We will be resuming in warm-boot path so the MIE and MIP CSRs
  279. * will be back to initial state. It is possible that HART has
  280. * configured timer event before going to suspend state so we
  281. * should save MIE and MIP CSRs and restore it after resuming.
  282. *
  283. * Further, the M-mode bits in MIP CSR are read-only and set by
  284. * external devices (such as interrupt controller) whereas all
  285. * VS-mode bits in MIP are read-only alias of bits in HVIP CSR.
  286. *
  287. * This means we should only save/restore S-mode bits of MIP CSR
  288. * such as MIP.SSIP and MIP.STIP.
  289. */
  290. hdata->saved_mie = csr_read(CSR_MIE);
  291. hdata->saved_mip = csr_read(CSR_MIP) & (MIP_SSIP | MIP_STIP);
  292. }
  293. static void __sbi_hsm_suspend_non_ret_restore(struct sbi_scratch *scratch)
  294. {
  295. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  296. hart_data_offset);
  297. csr_write(CSR_MIE, hdata->saved_mie);
  298. csr_write(CSR_MIP, (hdata->saved_mip & (MIP_SSIP | MIP_STIP)));
  299. }
  300. static int __sbi_hsm_suspend_non_ret_default(struct sbi_scratch *scratch,
  301. ulong raddr)
  302. {
  303. void (*jump_warmboot)(void) = (void (*)(void))scratch->warmboot_addr;
  304. /*
  305. * Save some of the M-mode CSRs which should be restored after
  306. * resuming from suspend state
  307. */
  308. __sbi_hsm_suspend_non_ret_save(scratch);
  309. /* Wait for interrupt */
  310. wfi();
  311. /*
  312. * Directly jump to warm reboot to simulate resume from a
  313. * non-retentive suspend.
  314. */
  315. jump_warmboot();
  316. return 0;
  317. }
  318. void sbi_hsm_hart_resume_start(struct sbi_scratch *scratch)
  319. {
  320. int oldstate;
  321. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  322. hart_data_offset);
  323. /* If current HART was SUSPENDED then set RESUME_PENDING state */
  324. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_SUSPENDED,
  325. SBI_HSM_STATE_RESUME_PENDING);
  326. if (oldstate != SBI_HSM_STATE_SUSPENDED) {
  327. sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
  328. __func__, oldstate);
  329. sbi_hart_hang();
  330. }
  331. }
  332. void sbi_hsm_hart_resume_finish(struct sbi_scratch *scratch)
  333. {
  334. u32 oldstate;
  335. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  336. hart_data_offset);
  337. /* If current HART was RESUME_PENDING then set STARTED state */
  338. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_RESUME_PENDING,
  339. SBI_HSM_STATE_STARTED);
  340. if (oldstate != SBI_HSM_STATE_RESUME_PENDING) {
  341. sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
  342. __func__, oldstate);
  343. sbi_hart_hang();
  344. }
  345. /*
  346. * Restore some of the M-mode CSRs which we are re-configured by
  347. * the warm-boot sequence.
  348. */
  349. __sbi_hsm_suspend_non_ret_restore(scratch);
  350. }
  351. int sbi_hsm_hart_suspend(struct sbi_scratch *scratch, u32 suspend_type,
  352. ulong raddr, ulong rmode, ulong priv)
  353. {
  354. int oldstate, ret;
  355. const struct sbi_domain *dom = sbi_domain_thishart_ptr();
  356. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  357. hart_data_offset);
  358. /* For now, we only allow suspend from S-mode or U-mode. */
  359. /* Sanity check on domain assigned to current HART */
  360. if (!dom)
  361. return SBI_EINVAL;
  362. /* Sanity check on suspend type */
  363. if (SBI_HSM_SUSPEND_RET_DEFAULT < suspend_type &&
  364. suspend_type < SBI_HSM_SUSPEND_RET_PLATFORM)
  365. return SBI_EINVAL;
  366. if (SBI_HSM_SUSPEND_NON_RET_DEFAULT < suspend_type &&
  367. suspend_type < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
  368. return SBI_EINVAL;
  369. /* Additional sanity check for non-retentive suspend */
  370. if (suspend_type & SBI_HSM_SUSP_NON_RET_BIT) {
  371. if (rmode != PRV_S && rmode != PRV_U)
  372. return SBI_EINVAL;
  373. if (dom && !sbi_domain_check_addr(dom, raddr, rmode,
  374. SBI_DOMAIN_EXECUTE))
  375. return SBI_EINVALID_ADDR;
  376. }
  377. /* Save the resume address and resume mode */
  378. scratch->next_arg1 = priv;
  379. scratch->next_addr = raddr;
  380. scratch->next_mode = rmode;
  381. /* Directly move from STARTED to SUSPENDED state */
  382. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STARTED,
  383. SBI_HSM_STATE_SUSPENDED);
  384. if (oldstate != SBI_HSM_STATE_STARTED) {
  385. sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
  386. __func__, oldstate);
  387. ret = SBI_EDENIED;
  388. goto fail_restore_state;
  389. }
  390. /* Save the suspend type */
  391. hdata->suspend_type = suspend_type;
  392. /* Try platform specific suspend */
  393. ret = hsm_device_hart_suspend(suspend_type, scratch->warmboot_addr);
  394. if (ret == SBI_ENOTSUPP) {
  395. /* Try generic implementation of default suspend types */
  396. if (suspend_type == SBI_HSM_SUSPEND_RET_DEFAULT) {
  397. ret = __sbi_hsm_suspend_ret_default(scratch);
  398. } else if (suspend_type == SBI_HSM_SUSPEND_NON_RET_DEFAULT) {
  399. ret = __sbi_hsm_suspend_non_ret_default(scratch,
  400. scratch->warmboot_addr);
  401. }
  402. }
  403. fail_restore_state:
  404. /*
  405. * We might have successfully resumed from retentive suspend
  406. * or suspend failed. In both cases, we restore state of hart.
  407. */
  408. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_SUSPENDED,
  409. SBI_HSM_STATE_STARTED);
  410. if (oldstate != SBI_HSM_STATE_SUSPENDED) {
  411. sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
  412. __func__, oldstate);
  413. sbi_hart_hang();
  414. }
  415. return ret;
  416. }