sbi_hsm.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Atish Patra <atish.patra@wdc.com>
  8. */
  9. #include <sbi/riscv_asm.h>
  10. #include <sbi/riscv_barrier.h>
  11. #include <sbi/riscv_encoding.h>
  12. #include <sbi/riscv_atomic.h>
  13. #include <sbi/sbi_bitops.h>
  14. #include <sbi/sbi_console.h>
  15. #include <sbi/sbi_domain.h>
  16. #include <sbi/sbi_error.h>
  17. #include <sbi/sbi_ecall_interface.h>
  18. #include <sbi/sbi_hart.h>
  19. #include <sbi/sbi_hartmask.h>
  20. #include <sbi/sbi_hsm.h>
  21. #include <sbi/sbi_init.h>
  22. #include <sbi/sbi_ipi.h>
  23. #include <sbi/sbi_platform.h>
  24. #include <sbi/sbi_system.h>
  25. #include <sbi/sbi_timer.h>
  26. #include <sbi/sbi_console.h>
  27. static unsigned long hart_data_offset;
  28. /** Per hart specific data to manage state transition **/
  29. struct sbi_hsm_data {
  30. atomic_t state;
  31. };
  32. static inline int __sbi_hsm_hart_get_state(u32 hartid)
  33. {
  34. struct sbi_hsm_data *hdata;
  35. struct sbi_scratch *scratch;
  36. scratch = sbi_hartid_to_scratch(hartid);
  37. if (!scratch)
  38. return SBI_EINVAL;
  39. hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset);
  40. return atomic_read(&hdata->state);
  41. }
  42. int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid)
  43. {
  44. if (!sbi_domain_is_assigned_hart(dom, hartid))
  45. return SBI_EINVAL;
  46. return __sbi_hsm_hart_get_state(hartid);
  47. }
  48. static bool sbi_hsm_hart_started(const struct sbi_domain *dom, u32 hartid)
  49. {
  50. if (sbi_hsm_hart_get_state(dom, hartid) == SBI_HSM_STATE_STARTED)
  51. return TRUE;
  52. else
  53. return FALSE;
  54. }
  55. /**
  56. * Get ulong HART mask for given HART base ID
  57. * @param dom the domain to be used for output HART mask
  58. * @param hbase the HART base ID
  59. * @param out_hmask the output ulong HART mask
  60. * @return 0 on success and SBI_Exxx (< 0) on failure
  61. * Note: the output HART mask will be set to zero on failure as well.
  62. */
  63. int sbi_hsm_hart_interruptible_mask(const struct sbi_domain *dom,
  64. ulong hbase, ulong *out_hmask)
  65. {
  66. ulong i, hmask, dmask;
  67. ulong hend = sbi_scratch_last_hartid() + 1;
  68. *out_hmask = 0;
  69. if (hend <= hbase)
  70. return SBI_EINVAL;
  71. if (BITS_PER_LONG < (hend - hbase))
  72. hend = hbase + BITS_PER_LONG;
  73. dmask = sbi_domain_get_assigned_hartmask(dom, hbase);
  74. for (i = hbase; i < hend; i++) {
  75. hmask = 1UL << (i - hbase);
  76. if ((dmask & hmask) &&
  77. (__sbi_hsm_hart_get_state(i) == SBI_HSM_STATE_STARTED))
  78. *out_hmask |= hmask;
  79. }
  80. return 0;
  81. }
  82. void sbi_hsm_prepare_next_jump(struct sbi_scratch *scratch, u32 hartid)
  83. {
  84. u32 oldstate;
  85. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  86. hart_data_offset);
  87. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_START_PENDING,
  88. SBI_HSM_STATE_STARTED);
  89. if (oldstate != SBI_HSM_STATE_START_PENDING)
  90. sbi_hart_hang();
  91. }
  92. static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid)
  93. {
  94. unsigned long saved_mie;
  95. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  96. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  97. hart_data_offset);
  98. /* Save MIE CSR */
  99. saved_mie = csr_read(CSR_MIE);
  100. /* Set MSIE bit to receive IPI */
  101. csr_set(CSR_MIE, MIP_MSIP);
  102. /* Wait for hart_add call*/
  103. while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING) {
  104. wfi();
  105. };
  106. /* Restore MIE CSR */
  107. csr_write(CSR_MIE, saved_mie);
  108. /* Clear current HART IPI */
  109. sbi_platform_ipi_clear(plat, hartid);
  110. }
  111. int sbi_hsm_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot)
  112. {
  113. u32 i;
  114. struct sbi_scratch *rscratch;
  115. struct sbi_hsm_data *hdata;
  116. if (cold_boot) {
  117. hart_data_offset = sbi_scratch_alloc_offset(sizeof(*hdata),
  118. "HART_DATA");
  119. if (!hart_data_offset)
  120. return SBI_ENOMEM;
  121. /* Initialize hart state data for every hart */
  122. for (i = 0; i <= sbi_scratch_last_hartid(); i++) {
  123. rscratch = sbi_hartid_to_scratch(i);
  124. if (!rscratch)
  125. continue;
  126. hdata = sbi_scratch_offset_ptr(rscratch,
  127. hart_data_offset);
  128. ATOMIC_INIT(&hdata->state,
  129. (i == hartid) ?
  130. SBI_HSM_STATE_START_PENDING :
  131. SBI_HSM_STATE_STOPPED);
  132. }
  133. } else {
  134. sbi_hsm_hart_wait(scratch, hartid);
  135. }
  136. return 0;
  137. }
  138. void __noreturn sbi_hsm_exit(struct sbi_scratch *scratch)
  139. {
  140. u32 hstate;
  141. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  142. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  143. hart_data_offset);
  144. void (*jump_warmboot)(void) = (void (*)(void))scratch->warmboot_addr;
  145. hstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STOP_PENDING,
  146. SBI_HSM_STATE_STOPPED);
  147. if (hstate != SBI_HSM_STATE_STOP_PENDING)
  148. goto fail_exit;
  149. if (sbi_platform_has_hart_hotplug(plat)) {
  150. sbi_platform_hart_stop(plat);
  151. /* It should never reach here */
  152. goto fail_exit;
  153. }
  154. /**
  155. * As platform is lacking support for hotplug, directly jump to warmboot
  156. * and wait for interrupts in warmboot. We do it preemptively in order
  157. * preserve the hart states and reuse the code path for hotplug.
  158. */
  159. jump_warmboot();
  160. fail_exit:
  161. /* It should never reach here */
  162. sbi_printf("ERR: Failed stop hart [%u]\n", current_hartid());
  163. sbi_hart_hang();
  164. }
  165. int sbi_hsm_hart_start(struct sbi_scratch *scratch,
  166. const struct sbi_domain *dom,
  167. u32 hartid, ulong saddr, ulong smode, ulong priv)
  168. {
  169. unsigned long init_count;
  170. unsigned int hstate;
  171. struct sbi_scratch *rscratch;
  172. struct sbi_hsm_data *hdata;
  173. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  174. /* For now, we only allow start mode to be S-mode or U-mode. */
  175. if (smode != PRV_S && smode != PRV_U)
  176. return SBI_EINVAL;
  177. if (dom && !sbi_domain_is_assigned_hart(dom, hartid))
  178. return SBI_EINVAL;
  179. if (dom && !sbi_domain_check_addr(dom, saddr, smode,
  180. SBI_DOMAIN_EXECUTE))
  181. return SBI_EINVAL;
  182. rscratch = sbi_hartid_to_scratch(hartid);
  183. if (!rscratch)
  184. return SBI_EINVAL;
  185. hdata = sbi_scratch_offset_ptr(rscratch, hart_data_offset);
  186. hstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STOPPED,
  187. SBI_HSM_STATE_START_PENDING);
  188. if (hstate == SBI_HSM_STATE_STARTED)
  189. return SBI_EALREADY;
  190. /**
  191. * if a hart is already transition to start or stop, another start call
  192. * is considered as invalid request.
  193. */
  194. if (hstate != SBI_HSM_STATE_STOPPED)
  195. return SBI_EINVAL;
  196. init_count = sbi_init_count(hartid);
  197. rscratch->next_arg1 = priv;
  198. rscratch->next_addr = saddr;
  199. rscratch->next_mode = smode;
  200. if (sbi_platform_has_hart_hotplug(plat) ||
  201. (sbi_platform_has_hart_secondary_boot(plat) && !init_count)) {
  202. return sbi_platform_hart_start(plat, hartid,
  203. scratch->warmboot_addr);
  204. } else {
  205. sbi_platform_ipi_send(plat, hartid);
  206. }
  207. return 0;
  208. }
  209. int sbi_hsm_hart_stop(struct sbi_scratch *scratch, bool exitnow)
  210. {
  211. int oldstate;
  212. u32 hartid = current_hartid();
  213. struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
  214. hart_data_offset);
  215. if (!sbi_hsm_hart_started(sbi_domain_thishart_ptr(), hartid))
  216. return SBI_EINVAL;
  217. oldstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STARTED,
  218. SBI_HSM_STATE_STOP_PENDING);
  219. if (oldstate != SBI_HSM_STATE_STARTED) {
  220. sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
  221. __func__, oldstate);
  222. return SBI_EDENIED;
  223. }
  224. if (exitnow)
  225. sbi_exit(scratch);
  226. return 0;
  227. }