sbi_ipi.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. * Nick Kossifidis <mick@ics.forth.gr>
  9. */
  10. #include <sbi/riscv_asm.h>
  11. #include <sbi/riscv_barrier.h>
  12. #include <sbi/riscv_atomic.h>
  13. #include <sbi/riscv_unpriv.h>
  14. #include <sbi/sbi_fifo.h>
  15. #include <sbi/sbi_hart.h>
  16. #include <sbi/sbi_bitops.h>
  17. #include <sbi/sbi_ipi.h>
  18. #include <sbi/sbi_platform.h>
  19. #include <sbi/sbi_timer.h>
  20. #include <plat/string.h>
  21. static inline int __sbi_tlb_fifo_range_check(struct sbi_tlb_info *curr,
  22. struct sbi_tlb_info *next)
  23. {
  24. int curr_end;
  25. int next_end;
  26. int ret = SBI_FIFO_UNCHANGED;
  27. if (!curr || !next)
  28. return ret;
  29. next_end = next->start + next->size;
  30. curr_end = curr->start + curr->size;
  31. if (next->start <= curr->start && next_end > curr_end) {
  32. curr->start = next->start;
  33. curr->size = next->size;
  34. ret = SBI_FIFO_UPDATED;
  35. } else if (next->start >= curr->start && next_end <= curr_end) {
  36. ret = SBI_FIFO_SKIP;
  37. }
  38. return ret;
  39. }
  40. /**
  41. * Call back to decide if an inplace fifo update is required or next entry can
  42. * can be skipped. Here are the different cases that are being handled.
  43. *
  44. * Case1:
  45. * if next flush request range lies within one of the existing entry, skip
  46. * the next entry.
  47. * Case2:
  48. * if flush request range in current fifo entry lies within next flush
  49. * request, update the current entry.
  50. * Case3:
  51. if a complete vma flush is requested, then all entries can be deleted
  52. and new request can be enqueued. This will not be done for ASID case
  53. as that means we have to iterate again in the fifo to figure out which
  54. entries belong to that ASID.
  55. */
  56. int sbi_tlb_fifo_update_cb(void *in, void *data)
  57. {
  58. struct sbi_tlb_info *curr;
  59. struct sbi_tlb_info *next;
  60. int ret = SBI_FIFO_UNCHANGED;
  61. if (!in && !!data)
  62. return ret;
  63. curr = (struct sbi_tlb_info *)data;
  64. next = (struct sbi_tlb_info *)in;
  65. if (next->type == SBI_TLB_FLUSH_VMA_ASID &&
  66. curr->type == SBI_TLB_FLUSH_VMA_ASID) {
  67. if (next->asid == curr->asid)
  68. ret = __sbi_tlb_fifo_range_check(curr, next);
  69. } else if (next->type == SBI_TLB_FLUSH_VMA &&
  70. curr->type == SBI_TLB_FLUSH_VMA) {
  71. if (next->size == SBI_TLB_FLUSH_ALL)
  72. ret = SBI_FIFO_RESET;
  73. else
  74. ret = __sbi_tlb_fifo_range_check(curr, next);
  75. }
  76. return ret;
  77. }
  78. static int sbi_ipi_send(struct sbi_scratch *scratch, u32 hartid, u32 event,
  79. void *data)
  80. {
  81. struct sbi_scratch *remote_scratch = NULL;
  82. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  83. struct sbi_fifo *ipi_tlb_fifo;
  84. int ret = SBI_FIFO_UNCHANGED;
  85. if (sbi_platform_hart_disabled(plat, hartid))
  86. return -1;
  87. /* Set IPI type on remote hart's scratch area and
  88. * trigger the interrupt
  89. */
  90. remote_scratch = sbi_hart_id_to_scratch(scratch, hartid);
  91. if (event == SBI_IPI_EVENT_SFENCE_VMA ||
  92. event == SBI_IPI_EVENT_SFENCE_VMA_ASID) {
  93. ipi_tlb_fifo = sbi_tlb_fifo_head_ptr(remote_scratch);
  94. ret = sbi_fifo_inplace_update(ipi_tlb_fifo, data,
  95. sbi_tlb_fifo_update_cb);
  96. if (ret == SBI_FIFO_SKIP || ret == SBI_FIFO_UPDATED) {
  97. goto done;
  98. }
  99. while (sbi_fifo_enqueue(ipi_tlb_fifo, data) < 0) {
  100. /**
  101. * For now, Busy loop until there is space in the fifo.
  102. * There may be case where target hart is also
  103. * enqueue in source hart's fifo. Both hart may busy
  104. * loop leading to a deadlock.
  105. * TODO: Introduce a wait/wakeup event mechansim to handle
  106. * this properly.
  107. */
  108. __asm__ __volatile("nop");
  109. __asm__ __volatile("nop");
  110. }
  111. }
  112. atomic_raw_set_bit(event, &sbi_ipi_data_ptr(remote_scratch)->ipi_type);
  113. mb();
  114. sbi_platform_ipi_send(plat, hartid);
  115. if (event != SBI_IPI_EVENT_SOFT)
  116. sbi_platform_ipi_sync(plat, hartid);
  117. done:
  118. return 0;
  119. }
  120. int sbi_ipi_send_many(struct sbi_scratch *scratch, ulong *pmask, u32 event,
  121. void *data)
  122. {
  123. ulong i, m;
  124. ulong mask = sbi_hart_available_mask();
  125. u32 hartid = sbi_current_hartid();
  126. if (pmask)
  127. mask &= load_ulong(pmask);
  128. /* send IPIs to every other hart on the set */
  129. for (i = 0, m = mask; m; i++, m >>= 1)
  130. if ((m & 1UL) && (i != hartid))
  131. sbi_ipi_send(scratch, i, event, data);
  132. /* If the current hart is on the set, send an IPI
  133. * to it as well
  134. */
  135. if (mask & (1UL << hartid))
  136. sbi_ipi_send(scratch, hartid, event, data);
  137. return 0;
  138. }
  139. void sbi_ipi_clear_smode(struct sbi_scratch *scratch)
  140. {
  141. csr_clear(CSR_MIP, MIP_SSIP);
  142. }
  143. static void sbi_ipi_tlb_flush_all()
  144. {
  145. __asm__ __volatile("sfence.vma");
  146. }
  147. static void sbi_ipi_sfence_vma(struct sbi_tlb_info *tinfo)
  148. {
  149. unsigned long start = tinfo->start;
  150. unsigned long size = tinfo->size;
  151. unsigned long i;
  152. if ((start == 0 && size == 0) || (size == SBI_TLB_FLUSH_ALL)) {
  153. sbi_ipi_tlb_flush_all();
  154. return;
  155. }
  156. for (i = 0; i < size; i += PAGE_SIZE) {
  157. __asm__ __volatile__("sfence.vma %0"
  158. :
  159. : "r"(start + i)
  160. : "memory");
  161. }
  162. }
  163. static void sbi_ipi_sfence_vma_asid(struct sbi_tlb_info *tinfo)
  164. {
  165. unsigned long start = tinfo->start;
  166. unsigned long size = tinfo->size;
  167. unsigned long asid = tinfo->asid;
  168. unsigned long i;
  169. if (start == 0 && size == 0) {
  170. sbi_ipi_tlb_flush_all();
  171. return;
  172. }
  173. /* Flush entire MM context for a given ASID */
  174. if (size == SBI_TLB_FLUSH_ALL) {
  175. __asm__ __volatile__("sfence.vma x0, %0"
  176. :
  177. : "r"(asid)
  178. : "memory");
  179. return;
  180. }
  181. for (i = 0; i < size; i += PAGE_SIZE) {
  182. __asm__ __volatile__("sfence.vma %0, %1"
  183. :
  184. : "r"(start + i), "r"(asid)
  185. : "memory");
  186. }
  187. }
  188. void sbi_ipi_process(struct sbi_scratch *scratch)
  189. {
  190. volatile unsigned long ipi_type;
  191. struct sbi_tlb_info tinfo;
  192. unsigned int ipi_event;
  193. const struct sbi_platform *plat = sbi_platform_ptr(scratch);
  194. struct sbi_fifo *ipi_tlb_fifo = sbi_tlb_fifo_head_ptr(scratch);
  195. u32 hartid = sbi_current_hartid();
  196. sbi_platform_ipi_clear(plat, hartid);
  197. do {
  198. ipi_type = sbi_ipi_data_ptr(scratch)->ipi_type;
  199. rmb();
  200. ipi_event = __ffs(ipi_type);
  201. switch (ipi_event) {
  202. case SBI_IPI_EVENT_SOFT:
  203. csr_set(CSR_MIP, MIP_SSIP);
  204. break;
  205. case SBI_IPI_EVENT_FENCE_I:
  206. __asm__ __volatile("fence.i");
  207. break;
  208. case SBI_IPI_EVENT_SFENCE_VMA:
  209. case SBI_IPI_EVENT_SFENCE_VMA_ASID:
  210. while (!sbi_fifo_dequeue(ipi_tlb_fifo, &tinfo)) {
  211. if (tinfo.type == SBI_TLB_FLUSH_VMA)
  212. sbi_ipi_sfence_vma(&tinfo);
  213. else if (tinfo.type == SBI_TLB_FLUSH_VMA_ASID)
  214. sbi_ipi_sfence_vma_asid(&tinfo);
  215. memset(&tinfo, 0, SBI_TLB_INFO_SIZE);
  216. }
  217. break;
  218. case SBI_IPI_EVENT_HALT:
  219. sbi_hart_hang();
  220. break;
  221. };
  222. ipi_type = atomic_raw_clear_bit(
  223. ipi_event, &sbi_ipi_data_ptr(scratch)->ipi_type);
  224. } while (ipi_type > 0);
  225. }
  226. int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
  227. {
  228. struct sbi_fifo *tlb_info_q = sbi_tlb_fifo_head_ptr(scratch);
  229. sbi_ipi_data_ptr(scratch)->ipi_type = 0x00;
  230. sbi_fifo_init(tlb_info_q, sbi_tlb_fifo_mem_ptr(scratch),
  231. SBI_TLB_FIFO_NUM_ENTRIES, SBI_TLB_INFO_SIZE);
  232. /* Enable software interrupts */
  233. csr_set(CSR_MIE, MIP_MSIP);
  234. return sbi_platform_ipi_init(sbi_platform_ptr(scratch), cold_boot);
  235. }