sbi_ipi.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. * Nick Kossifidis <mick@ics.forth.gr>
  9. */
  10. #include <sbi/riscv_asm.h>
  11. #include <sbi/riscv_atomic.h>
  12. #include <sbi/riscv_barrier.h>
  13. #include <sbi/sbi_bitops.h>
  14. #include <sbi/sbi_domain.h>
  15. #include <sbi/sbi_error.h>
  16. #include <sbi/sbi_hart.h>
  17. #include <sbi/sbi_hsm.h>
  18. #include <sbi/sbi_init.h>
  19. #include <sbi/sbi_ipi.h>
  20. #include <sbi/sbi_platform.h>
  21. struct sbi_ipi_data {
  22. unsigned long ipi_type;
  23. };
  24. static unsigned long ipi_data_off;
  25. static const struct sbi_ipi_device *ipi_dev = NULL;
  26. static const struct sbi_ipi_event_ops *ipi_ops_array[SBI_IPI_EVENT_MAX];
  27. static int sbi_ipi_send(struct sbi_scratch *scratch, u32 remote_hartid,
  28. u32 event, void *data)
  29. {
  30. int ret;
  31. struct sbi_scratch *remote_scratch = NULL;
  32. struct sbi_ipi_data *ipi_data;
  33. const struct sbi_ipi_event_ops *ipi_ops;
  34. if ((SBI_IPI_EVENT_MAX <= event) ||
  35. !ipi_ops_array[event])
  36. return SBI_EINVAL;
  37. ipi_ops = ipi_ops_array[event];
  38. remote_scratch = sbi_hartid_to_scratch(remote_hartid);
  39. if (!remote_scratch)
  40. return SBI_EINVAL;
  41. ipi_data = sbi_scratch_offset_ptr(remote_scratch, ipi_data_off);
  42. if (ipi_ops->update) {
  43. ret = ipi_ops->update(scratch, remote_scratch,
  44. remote_hartid, data);
  45. if (ret < 0)
  46. return ret;
  47. }
  48. /*
  49. * Set IPI type on remote hart's scratch area and
  50. * trigger the interrupt
  51. */
  52. atomic_raw_set_bit(event, &ipi_data->ipi_type);
  53. smp_wmb();
  54. if (ipi_dev && ipi_dev->ipi_send)
  55. ipi_dev->ipi_send(remote_hartid);
  56. if (ipi_ops->sync)
  57. ipi_ops->sync(scratch);
  58. return 0;
  59. }
  60. /**
  61. * As this this function only handlers scalar values of hart mask, it must be
  62. * set to all online harts if the intention is to send IPIs to all the harts.
  63. * If hmask is zero, no IPIs will be sent.
  64. */
  65. int sbi_ipi_send_many(ulong hmask, ulong hbase, u32 event, void *data)
  66. {
  67. int rc;
  68. ulong i, m;
  69. struct sbi_domain *dom = sbi_domain_thishart_ptr();
  70. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  71. if (hbase != -1UL) {
  72. rc = sbi_hsm_hart_interruptible_mask(dom, hbase, &m);
  73. if (rc)
  74. return rc;
  75. m &= hmask;
  76. /* Send IPIs */
  77. for (i = hbase; m; i++, m >>= 1) {
  78. if (m & 1UL)
  79. sbi_ipi_send(scratch, i, event, data);
  80. }
  81. } else {
  82. hbase = 0;
  83. while (!sbi_hsm_hart_interruptible_mask(dom, hbase, &m)) {
  84. /* Send IPIs */
  85. for (i = hbase; m; i++, m >>= 1) {
  86. if (m & 1UL)
  87. sbi_ipi_send(scratch, i, event, data);
  88. }
  89. hbase += BITS_PER_LONG;
  90. }
  91. }
  92. return 0;
  93. }
  94. int sbi_ipi_event_create(const struct sbi_ipi_event_ops *ops)
  95. {
  96. int i, ret = SBI_ENOSPC;
  97. if (!ops || !ops->process)
  98. return SBI_EINVAL;
  99. for (i = 0; i < SBI_IPI_EVENT_MAX; i++) {
  100. if (!ipi_ops_array[i]) {
  101. ret = i;
  102. ipi_ops_array[i] = ops;
  103. break;
  104. }
  105. }
  106. return ret;
  107. }
  108. void sbi_ipi_event_destroy(u32 event)
  109. {
  110. if (SBI_IPI_EVENT_MAX <= event)
  111. return;
  112. ipi_ops_array[event] = NULL;
  113. }
  114. static void sbi_ipi_process_smode(struct sbi_scratch *scratch)
  115. {
  116. csr_set(CSR_MIP, MIP_SSIP);
  117. }
  118. static struct sbi_ipi_event_ops ipi_smode_ops = {
  119. .name = "IPI_SMODE",
  120. .process = sbi_ipi_process_smode,
  121. };
  122. static u32 ipi_smode_event = SBI_IPI_EVENT_MAX;
  123. int sbi_ipi_send_smode(ulong hmask, ulong hbase)
  124. {
  125. return sbi_ipi_send_many(hmask, hbase, ipi_smode_event, NULL);
  126. }
  127. void sbi_ipi_clear_smode(void)
  128. {
  129. csr_clear(CSR_MIP, MIP_SSIP);
  130. }
  131. static void sbi_ipi_process_halt(struct sbi_scratch *scratch)
  132. {
  133. sbi_hsm_hart_stop(scratch, TRUE);
  134. }
  135. static struct sbi_ipi_event_ops ipi_halt_ops = {
  136. .name = "IPI_HALT",
  137. .process = sbi_ipi_process_halt,
  138. };
  139. static u32 ipi_halt_event = SBI_IPI_EVENT_MAX;
  140. int sbi_ipi_send_halt(ulong hmask, ulong hbase)
  141. {
  142. return sbi_ipi_send_many(hmask, hbase, ipi_halt_event, NULL);
  143. }
  144. void sbi_ipi_process(void)
  145. {
  146. unsigned long ipi_type;
  147. unsigned int ipi_event;
  148. const struct sbi_ipi_event_ops *ipi_ops;
  149. struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
  150. struct sbi_ipi_data *ipi_data =
  151. sbi_scratch_offset_ptr(scratch, ipi_data_off);
  152. u32 hartid = current_hartid();
  153. if (ipi_dev && ipi_dev->ipi_clear)
  154. ipi_dev->ipi_clear(hartid);
  155. ipi_type = atomic_raw_xchg_ulong(&ipi_data->ipi_type, 0);
  156. ipi_event = 0;
  157. while (ipi_type) {
  158. if (!(ipi_type & 1UL))
  159. goto skip;
  160. ipi_ops = ipi_ops_array[ipi_event];
  161. if (ipi_ops && ipi_ops->process)
  162. ipi_ops->process(scratch);
  163. skip:
  164. ipi_type = ipi_type >> 1;
  165. ipi_event++;
  166. };
  167. }
  168. void sbi_ipi_raw_send(u32 target_hart)
  169. {
  170. if (ipi_dev && ipi_dev->ipi_send)
  171. ipi_dev->ipi_send(target_hart);
  172. }
  173. const struct sbi_ipi_device *sbi_ipi_get_device(void)
  174. {
  175. return ipi_dev;
  176. }
  177. void sbi_ipi_set_device(const struct sbi_ipi_device *dev)
  178. {
  179. if (!dev || ipi_dev)
  180. return;
  181. ipi_dev = dev;
  182. }
  183. int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
  184. {
  185. int ret;
  186. struct sbi_ipi_data *ipi_data;
  187. if (cold_boot) {
  188. ipi_data_off = sbi_scratch_alloc_offset(sizeof(*ipi_data),
  189. "IPI_DATA");
  190. if (!ipi_data_off)
  191. return SBI_ENOMEM;
  192. ret = sbi_ipi_event_create(&ipi_smode_ops);
  193. if (ret < 0)
  194. return ret;
  195. ipi_smode_event = ret;
  196. ret = sbi_ipi_event_create(&ipi_halt_ops);
  197. if (ret < 0)
  198. return ret;
  199. ipi_halt_event = ret;
  200. } else {
  201. if (!ipi_data_off)
  202. return SBI_ENOMEM;
  203. if (SBI_IPI_EVENT_MAX <= ipi_smode_event ||
  204. SBI_IPI_EVENT_MAX <= ipi_halt_event)
  205. return SBI_ENOSPC;
  206. }
  207. ipi_data = sbi_scratch_offset_ptr(scratch, ipi_data_off);
  208. ipi_data->ipi_type = 0x00;
  209. /*
  210. * Initialize platform IPI support. This will also clear any
  211. * pending IPIs for current/calling HART.
  212. */
  213. ret = sbi_platform_ipi_init(sbi_platform_ptr(scratch), cold_boot);
  214. if (ret)
  215. return ret;
  216. /* Enable software interrupts */
  217. csr_set(CSR_MIE, MIP_MSIP);
  218. return 0;
  219. }
  220. void sbi_ipi_exit(struct sbi_scratch *scratch)
  221. {
  222. /* Disable software interrupts */
  223. csr_clear(CSR_MIE, MIP_MSIP);
  224. /* Process pending IPIs */
  225. sbi_ipi_process();
  226. /* Platform exit */
  227. sbi_platform_ipi_exit(sbi_platform_ptr(scratch));
  228. }