sbi_hsm.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Atish Patra <atish.patra@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/riscv_barrier.h>
  11. #include <sbi/riscv_encoding.h>
  12. #include <sbi/riscv_atomic.h>
  13. #include <sbi/sbi_bitops.h>
  14. #include <sbi/sbi_console.h>
  15. #include <sbi/sbi_domain.h>
  16. #include <sbi/sbi_error.h>
  17. #include <sbi/sbi_ecall_interface.h>
  18. #include <sbi/sbi_hart.h>
  19. #include <sbi/sbi_hartmask.h>
  20. #include <sbi/sbi_hsm.h>
  21. #include <sbi/sbi_init.h>
  22. #include <sbi/sbi_ipi.h>
  23. #include <sbi/sbi_scratch.h>
  24. #include <sbi/sbi_system.h>
  25. #include <sbi/sbi_timer.h>
  26. #include <sbi/sbi_console.h>
  27. #define __sbi_hsm_hart_change_state(hdata, oldstate, newstate) \
  28. ({ \
  29. long state = atomic_cmpxchg(&(hdata)->state, oldstate, newstate); \
  30. if (state != (oldstate)) \
  31. sbi_printf("%s: ERR: The hart is in invalid state [%lu]\n", \
  32. __func__, state); \
  33. state == (oldstate); \
  34. })
  35. static const struct sbi_hsm_device *hsm_dev = NULL;
  36. static unsigned long hart_data_offset;
  37. /** Per hart specific data to manage state transition **/
  38. struct sbi_hsm_data {
  39. atomic_t state;
  40. unsigned long suspend_type;
  41. unsigned long saved_mie;
  42. unsigned long saved_mip;
  43. atomic_t start_ticket;
  44. };
  45. bool sbi_hsm_hart_change_state(struct sbi_scratch *scratch, long oldstate,
  46. long newstate)
  47. {
  48. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  49. hart_data_offset);
  50. return __sbi_hsm_hart_change_state(hdata, oldstate, newstate);
  51. }
  52. int __sbi_hsm_hart_get_state(u32 hartid)
  53. {
  54. struct sbi_hsm_data *hdata;
  55. struct sbi_scratch *scratch;
  56. scratch = sbi_hartid_to_scratch(hartid);
  57. if (!scratch)
  58. return SBI_EINVAL;
  59. hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset);
  60. return atomic_read(&hdata->state);
  61. }
  62. int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid)
  63. {
  64. if (!sbi_domain_is_assigned_hart(dom, hartid))
  65. return SBI_EINVAL;
  66. return __sbi_hsm_hart_get_state(hartid);
  67. }
  68. /*
  69. * Try to acquire the ticket for the given target hart to make sure only
  70. * one hart prepares the start of the target hart.
  71. * Returns true if the ticket has been acquired, false otherwise.
  72. *
  73. * The function has "acquire" semantics: no memory operations following it
  74. * in the current hart can be seen before it by other harts.
  75. * atomic_cmpxchg() provides the memory barriers needed for that.
  76. */
  77. static bool hsm_start_ticket_acquire(struct sbi_hsm_data *hdata)
  78. {
  79. return (atomic_cmpxchg(&hdata->start_ticket, 0, 1) == 0);
  80. }
  81. /*
  82. * Release the ticket for the given target hart.
  83. *
  84. * The function has "release" semantics: no memory operations preceding it
  85. * in the current hart can be seen after it by other harts.
  86. */
  87. static void hsm_start_ticket_release(struct sbi_hsm_data *hdata)
  88. {
  89. RISCV_FENCE(rw, w);
  90. atomic_write(&hdata->start_ticket, 0);
  91. }
  92. /**
  93. * Get ulong HART mask for given HART base ID
  94. * @param dom the domain to be used for output HART mask
  95. * @param hbase the HART base ID
  96. * @param out_hmask the output ulong HART mask
  97. * @return 0 on success and SBI_Exxx (< 0) on failure
  98. * Note: the output HART mask will be set to zero on failure as well.
  99. */
  100. int sbi_hsm_hart_interruptible_mask(const struct sbi_domain *dom,
  101. ulong hbase, ulong *out_hmask)
  102. {
  103. int hstate;
  104. ulong i, hmask, dmask;
  105. ulong hend = sbi_scratch_last_hartid() + 1;
  106. *out_hmask = 0;
  107. if (hend <= hbase)
  108. return SBI_EINVAL;
  109. if (BITS_PER_LONG < (hend - hbase))
  110. hend = hbase + BITS_PER_LONG;
  111. dmask = sbi_domain_get_assigned_hartmask(dom, hbase);
  112. for (i = hbase; i < hend; i++) {
  113. hmask = 1UL << (i - hbase);
  114. if (dmask & hmask) {
  115. hstate = __sbi_hsm_hart_get_state(i);
  116. if (hstate == SBI_HSM_STATE_STARTED ||
  117. hstate == SBI_HSM_STATE_SUSPENDED)
  118. *out_hmask |= hmask;
  119. }
  120. }
  121. return 0;
  122. }
  123. void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch,
  124. u32 hartid)
  125. {
  126. unsigned long next_arg1;
  127. unsigned long next_addr;
  128. unsigned long next_mode;
  129. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  130. hart_data_offset);
  131. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_START_PENDING,
  132. SBI_HSM_STATE_STARTED))
  133. sbi_hart_hang();
  134. next_arg1 = scratch->next_arg1;
  135. next_addr = scratch->next_addr;
  136. next_mode = scratch->next_mode;
  137. hsm_start_ticket_release(hdata);
  138. sbi_hart_switch_mode(hartid, next_arg1, next_addr, next_mode, false);
  139. }
  140. static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid)
  141. {
  142. unsigned long saved_mie;
  143. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  144. hart_data_offset);
  145. /* Save MIE CSR */
  146. saved_mie = csr_read(CSR_MIE);
  147. /* Set MSIE and MEIE bits to receive IPI */
  148. csr_set(CSR_MIE, MIP_MSIP | MIP_MEIP);
  149. /* Wait for state transition requested by sbi_hsm_hart_start() */
  150. while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING) {
  151. wfi();
  152. };
  153. /* Restore MIE CSR */
  154. csr_write(CSR_MIE, saved_mie);
  155. /*
  156. * No need to clear IPI here because the sbi_ipi_init() will
  157. * clear it for current HART via sbi_platform_ipi_init().
  158. */
  159. }
  160. const struct sbi_hsm_device *sbi_hsm_get_device(void)
  161. {
  162. return hsm_dev;
  163. }
  164. void sbi_hsm_set_device(const struct sbi_hsm_device *dev)
  165. {
  166. if (!dev || hsm_dev)
  167. return;
  168. hsm_dev = dev;
  169. }
  170. static bool hsm_device_has_hart_hotplug(void)
  171. {
  172. if (hsm_dev && hsm_dev->hart_start && hsm_dev->hart_stop)
  173. return true;
  174. return false;
  175. }
  176. static bool hsm_device_has_hart_secondary_boot(void)
  177. {
  178. if (hsm_dev && hsm_dev->hart_start && !hsm_dev->hart_stop)
  179. return true;
  180. return false;
  181. }
  182. static int hsm_device_hart_start(u32 hartid, ulong saddr)
  183. {
  184. if (hsm_dev && hsm_dev->hart_start)
  185. return hsm_dev->hart_start(hartid, saddr);
  186. return SBI_ENOTSUPP;
  187. }
  188. static int hsm_device_hart_stop(void)
  189. {
  190. if (hsm_dev && hsm_dev->hart_stop)
  191. return hsm_dev->hart_stop();
  192. return SBI_ENOTSUPP;
  193. }
  194. static int hsm_device_hart_suspend(u32 suspend_type)
  195. {
  196. if (hsm_dev && hsm_dev->hart_suspend)
  197. return hsm_dev->hart_suspend(suspend_type);
  198. return SBI_ENOTSUPP;
  199. }
  200. static void hsm_device_hart_resume(void)
  201. {
  202. if (hsm_dev && hsm_dev->hart_resume)
  203. hsm_dev->hart_resume();
  204. }
  205. int sbi_hsm_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot)
  206. {
  207. u32 i;
  208. struct sbi_scratch *rscratch;
  209. struct sbi_hsm_data *hdata;
  210. if (cold_boot) {
  211. hart_data_offset = sbi_scratch_alloc_offset(sizeof(*hdata));
  212. if (!hart_data_offset)
  213. return SBI_ENOMEM;
  214. /* Initialize hart state data for every hart */
  215. for (i = 0; i <= sbi_scratch_last_hartid(); i++) {
  216. rscratch = sbi_hartid_to_scratch(i);
  217. if (!rscratch)
  218. continue;
  219. hdata = sbi_scratch_offset_ptr(rscratch,
  220. hart_data_offset);
  221. ATOMIC_INIT(&hdata->state,
  222. (i == hartid) ?
  223. SBI_HSM_STATE_START_PENDING :
  224. SBI_HSM_STATE_STOPPED);
  225. ATOMIC_INIT(&hdata->start_ticket, 0);
  226. }
  227. } else {
  228. sbi_hsm_hart_wait(scratch, hartid);
  229. }
  230. return 0;
  231. }
  232. void __noreturn sbi_hsm_exit(struct sbi_scratch *scratch)
  233. {
  234. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  235. hart_data_offset);
  236. void (*jump_warmboot)(void) = (void (*)(void))scratch->warmboot_addr;
  237. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_STOP_PENDING,
  238. SBI_HSM_STATE_STOPPED))
  239. goto fail_exit;
  240. if (hsm_device_has_hart_hotplug()) {
  241. if (hsm_device_hart_stop() != SBI_ENOTSUPP)
  242. goto fail_exit;
  243. }
  244. /**
  245. * As platform is lacking support for hotplug, directly jump to warmboot
  246. * and wait for interrupts in warmboot. We do it preemptively in order
  247. * preserve the hart states and reuse the code path for hotplug.
  248. */
  249. jump_warmboot();
  250. fail_exit:
  251. /* It should never reach here */
  252. sbi_printf("ERR: Failed stop hart [%u]\n", current_hartid());
  253. sbi_hart_hang();
  254. }
  255. int sbi_hsm_hart_start(struct sbi_scratch *scratch,
  256. const struct sbi_domain *dom,
  257. u32 hartid, ulong saddr, ulong smode, ulong arg1)
  258. {
  259. unsigned long init_count;
  260. unsigned int hstate;
  261. struct sbi_scratch *rscratch;
  262. struct sbi_hsm_data *hdata;
  263. int rc;
  264. /* For now, we only allow start mode to be S-mode or U-mode. */
  265. if (smode != PRV_S && smode != PRV_U)
  266. return SBI_EINVAL;
  267. if (dom && !sbi_domain_is_assigned_hart(dom, hartid))
  268. return SBI_EINVAL;
  269. if (dom && !sbi_domain_check_addr(dom, saddr, smode,
  270. SBI_DOMAIN_EXECUTE))
  271. return SBI_EINVALID_ADDR;
  272. rscratch = sbi_hartid_to_scratch(hartid);
  273. if (!rscratch)
  274. return SBI_EINVAL;
  275. hdata = sbi_scratch_offset_ptr(rscratch, hart_data_offset);
  276. if (!hsm_start_ticket_acquire(hdata))
  277. return SBI_EINVAL;
  278. init_count = sbi_init_count(hartid);
  279. rscratch->next_arg1 = arg1;
  280. rscratch->next_addr = saddr;
  281. rscratch->next_mode = smode;
  282. /*
  283. * atomic_cmpxchg() is an implicit barrier. It makes sure that
  284. * other harts see reading of init_count and writing to *rscratch
  285. * before hdata->state is set to SBI_HSM_STATE_START_PENDING.
  286. */
  287. hstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STOPPED,
  288. SBI_HSM_STATE_START_PENDING);
  289. if (hstate == SBI_HSM_STATE_STARTED) {
  290. rc = SBI_EALREADY;
  291. goto err;
  292. }
  293. /**
  294. * if a hart is already transition to start or stop, another start call
  295. * is considered as invalid request.
  296. */
  297. if (hstate != SBI_HSM_STATE_STOPPED) {
  298. rc = SBI_EINVAL;
  299. goto err;
  300. }
  301. if (hsm_device_has_hart_hotplug() ||
  302. (hsm_device_has_hart_secondary_boot() && !init_count)) {
  303. rc = hsm_device_hart_start(hartid, scratch->warmboot_addr);
  304. } else {
  305. rc = sbi_ipi_raw_send(hartid);
  306. }
  307. if (!rc)
  308. return 0;
  309. err:
  310. hsm_start_ticket_release(hdata);
  311. return rc;
  312. }
  313. int sbi_hsm_hart_stop(struct sbi_scratch *scratch, bool exitnow)
  314. {
  315. const struct sbi_domain *dom = sbi_domain_thishart_ptr();
  316. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  317. hart_data_offset);
  318. if (!dom)
  319. return SBI_EFAIL;
  320. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_STARTED,
  321. SBI_HSM_STATE_STOP_PENDING))
  322. return SBI_EFAIL;
  323. if (exitnow)
  324. sbi_exit(scratch);
  325. return 0;
  326. }
  327. static int __sbi_hsm_suspend_default(struct sbi_scratch *scratch)
  328. {
  329. /* Wait for interrupt */
  330. wfi();
  331. return 0;
  332. }
  333. void __sbi_hsm_suspend_non_ret_save(struct sbi_scratch *scratch)
  334. {
  335. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  336. hart_data_offset);
  337. /*
  338. * We will be resuming in warm-boot path so the MIE and MIP CSRs
  339. * will be back to initial state. It is possible that HART has
  340. * configured timer event before going to suspend state so we
  341. * should save MIE and MIP CSRs and restore it after resuming.
  342. *
  343. * Further, the M-mode bits in MIP CSR are read-only and set by
  344. * external devices (such as interrupt controller) whereas all
  345. * VS-mode bits in MIP are read-only alias of bits in HVIP CSR.
  346. *
  347. * This means we should only save/restore S-mode bits of MIP CSR
  348. * such as MIP.SSIP and MIP.STIP.
  349. */
  350. hdata->saved_mie = csr_read(CSR_MIE);
  351. hdata->saved_mip = csr_read(CSR_MIP) & (MIP_SSIP | MIP_STIP);
  352. }
  353. static void __sbi_hsm_suspend_non_ret_restore(struct sbi_scratch *scratch)
  354. {
  355. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  356. hart_data_offset);
  357. csr_write(CSR_MIE, hdata->saved_mie);
  358. csr_set(CSR_MIP, (hdata->saved_mip & (MIP_SSIP | MIP_STIP)));
  359. }
  360. void sbi_hsm_hart_resume_start(struct sbi_scratch *scratch)
  361. {
  362. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  363. hart_data_offset);
  364. /* If current HART was SUSPENDED then set RESUME_PENDING state */
  365. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_SUSPENDED,
  366. SBI_HSM_STATE_RESUME_PENDING))
  367. sbi_hart_hang();
  368. hsm_device_hart_resume();
  369. }
  370. void __noreturn sbi_hsm_hart_resume_finish(struct sbi_scratch *scratch,
  371. u32 hartid)
  372. {
  373. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  374. hart_data_offset);
  375. /* If current HART was RESUME_PENDING then set STARTED state */
  376. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_RESUME_PENDING,
  377. SBI_HSM_STATE_STARTED))
  378. sbi_hart_hang();
  379. /*
  380. * Restore some of the M-mode CSRs which we are re-configured by
  381. * the warm-boot sequence.
  382. */
  383. __sbi_hsm_suspend_non_ret_restore(scratch);
  384. sbi_hart_switch_mode(hartid, scratch->next_arg1,
  385. scratch->next_addr,
  386. scratch->next_mode, false);
  387. }
  388. int sbi_hsm_hart_suspend(struct sbi_scratch *scratch, u32 suspend_type,
  389. ulong raddr, ulong rmode, ulong arg1)
  390. {
  391. int ret;
  392. const struct sbi_domain *dom = sbi_domain_thishart_ptr();
  393. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  394. hart_data_offset);
  395. /* Sanity check on domain assigned to current HART */
  396. if (!dom)
  397. return SBI_EFAIL;
  398. /* Sanity check on suspend type */
  399. if (SBI_HSM_SUSPEND_RET_DEFAULT < suspend_type &&
  400. suspend_type < SBI_HSM_SUSPEND_RET_PLATFORM)
  401. return SBI_EINVAL;
  402. if (SBI_HSM_SUSPEND_NON_RET_DEFAULT < suspend_type &&
  403. suspend_type < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
  404. return SBI_EINVAL;
  405. /* Additional sanity check for non-retentive suspend */
  406. if (suspend_type & SBI_HSM_SUSP_NON_RET_BIT) {
  407. /*
  408. * For now, we only allow non-retentive suspend from
  409. * S-mode or U-mode.
  410. */
  411. if (rmode != PRV_S && rmode != PRV_U)
  412. return SBI_EFAIL;
  413. if (dom && !sbi_domain_check_addr(dom, raddr, rmode,
  414. SBI_DOMAIN_EXECUTE))
  415. return SBI_EINVALID_ADDR;
  416. }
  417. /* Save the resume address and resume mode */
  418. scratch->next_arg1 = arg1;
  419. scratch->next_addr = raddr;
  420. scratch->next_mode = rmode;
  421. /* Directly move from STARTED to SUSPENDED state */
  422. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_STARTED,
  423. SBI_HSM_STATE_SUSPENDED))
  424. return SBI_EFAIL;
  425. /* Save the suspend type */
  426. hdata->suspend_type = suspend_type;
  427. /*
  428. * Save context which will be restored after resuming from
  429. * non-retentive suspend.
  430. */
  431. if (suspend_type & SBI_HSM_SUSP_NON_RET_BIT)
  432. __sbi_hsm_suspend_non_ret_save(scratch);
  433. /* Try platform specific suspend */
  434. ret = hsm_device_hart_suspend(suspend_type);
  435. if (ret == SBI_ENOTSUPP) {
  436. /* Try generic implementation of default suspend types */
  437. if (suspend_type == SBI_HSM_SUSPEND_RET_DEFAULT ||
  438. suspend_type == SBI_HSM_SUSPEND_NON_RET_DEFAULT) {
  439. ret = __sbi_hsm_suspend_default(scratch);
  440. }
  441. }
  442. /*
  443. * The platform may have coordinated a retentive suspend, or it may
  444. * have exited early from a non-retentive suspend. Either way, the
  445. * caller is not expecting a successful return, so jump to the warm
  446. * boot entry point to simulate resume from a non-retentive suspend.
  447. */
  448. if (ret == 0 && (suspend_type & SBI_HSM_SUSP_NON_RET_BIT)) {
  449. void (*jump_warmboot)(void) =
  450. (void (*)(void))scratch->warmboot_addr;
  451. jump_warmboot();
  452. }
  453. /*
  454. * We might have successfully resumed from retentive suspend
  455. * or suspend failed. In both cases, we restore state of hart.
  456. */
  457. if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_SUSPENDED,
  458. SBI_HSM_STATE_STARTED))
  459. sbi_hart_hang();
  460. return ret;
  461. }