sbi_tlb.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Atish Patra <atish.patra@wdc.com>
  8. * Anup Patel <anup.patel@wdc.com>
  9. */
  10. #include <sbi/riscv_asm.h>
  11. #include <sbi/riscv_barrier.h>
  12. #include <sbi/sbi_error.h>
  13. #include <sbi/sbi_fifo.h>
  14. #include <sbi/sbi_hart.h>
  15. #include <sbi/sbi_bitops.h>
  16. #include <sbi/sbi_scratch.h>
  17. #include <sbi/sbi_tlb.h>
  18. #include <plat/string.h>
  19. static unsigned long ipi_tlb_fifo_off;
  20. static unsigned long ipi_tlb_fifo_mem_off;
  21. static inline int __sbi_tlb_fifo_range_check(struct sbi_tlb_info *curr,
  22. struct sbi_tlb_info *next)
  23. {
  24. unsigned long curr_end;
  25. unsigned long next_end;
  26. int ret = SBI_FIFO_UNCHANGED;
  27. if (!curr || !next)
  28. return ret;
  29. next_end = next->start + next->size;
  30. curr_end = curr->start + curr->size;
  31. if (next->start <= curr->start && next_end > curr_end) {
  32. curr->start = next->start;
  33. curr->size = next->size;
  34. ret = SBI_FIFO_UPDATED;
  35. } else if (next->start >= curr->start && next_end <= curr_end) {
  36. ret = SBI_FIFO_SKIP;
  37. }
  38. return ret;
  39. }
  40. /**
  41. * Call back to decide if an inplace fifo update is required or next entry can
  42. * can be skipped. Here are the different cases that are being handled.
  43. *
  44. * Case1:
  45. * if next flush request range lies within one of the existing entry, skip
  46. * the next entry.
  47. * Case2:
  48. * if flush request range in current fifo entry lies within next flush
  49. * request, update the current entry.
  50. * Case3:
  51. if a complete vma flush is requested, then all entries can be deleted
  52. and new request can be enqueued. This will not be done for ASID case
  53. as that means we have to iterate again in the fifo to figure out which
  54. entries belong to that ASID.
  55. */
  56. static int sbi_tlb_fifo_update_cb(void *in, void *data)
  57. {
  58. struct sbi_tlb_info *curr;
  59. struct sbi_tlb_info *next;
  60. int ret = SBI_FIFO_UNCHANGED;
  61. if (!in && !!data)
  62. return ret;
  63. curr = (struct sbi_tlb_info *)data;
  64. next = (struct sbi_tlb_info *)in;
  65. if (next->type == SBI_TLB_FLUSH_VMA_ASID &&
  66. curr->type == SBI_TLB_FLUSH_VMA_ASID) {
  67. if (next->asid == curr->asid)
  68. ret = __sbi_tlb_fifo_range_check(curr, next);
  69. } else if (next->type == SBI_TLB_FLUSH_VMA &&
  70. curr->type == SBI_TLB_FLUSH_VMA) {
  71. if (next->size == SBI_TLB_FLUSH_ALL)
  72. ret = SBI_FIFO_RESET;
  73. else
  74. ret = __sbi_tlb_fifo_range_check(curr, next);
  75. }
  76. return ret;
  77. }
  78. int sbi_tlb_fifo_update(struct sbi_scratch *scratch, u32 event, void *data)
  79. {
  80. int ret;
  81. struct sbi_fifo *ipi_tlb_fifo;
  82. struct sbi_tlb_info *tinfo = data;
  83. ipi_tlb_fifo = sbi_scratch_offset_ptr(scratch,
  84. ipi_tlb_fifo_off);
  85. /*
  86. * If address range to flush is too big then simply
  87. * upgrade it to flush all because we can only flush
  88. * 4KB at a time.
  89. */
  90. if (tinfo->size >= SBI_TLB_FLUSH_MAX_SIZE) {
  91. tinfo->start = 0;
  92. tinfo->size = SBI_TLB_FLUSH_ALL;
  93. }
  94. ret = sbi_fifo_inplace_update(ipi_tlb_fifo, data,
  95. sbi_tlb_fifo_update_cb);
  96. if (ret == SBI_FIFO_SKIP || ret == SBI_FIFO_UPDATED) {
  97. return 1;
  98. }
  99. while (sbi_fifo_enqueue(ipi_tlb_fifo, data) < 0) {
  100. /**
  101. * For now, Busy loop until there is space in the fifo.
  102. * There may be case where target hart is also
  103. * enqueue in source hart's fifo. Both hart may busy
  104. * loop leading to a deadlock.
  105. * TODO: Introduce a wait/wakeup event mechansim to handle
  106. * this properly.
  107. */
  108. __asm__ __volatile("nop");
  109. __asm__ __volatile("nop");
  110. }
  111. return 0;
  112. }
  113. static void sbi_tlb_flush_all(void)
  114. {
  115. __asm__ __volatile("sfence.vma");
  116. }
  117. static void sbi_tlb_fifo_sfence_vma(struct sbi_tlb_info *tinfo)
  118. {
  119. unsigned long start = tinfo->start;
  120. unsigned long size = tinfo->size;
  121. unsigned long i;
  122. if ((start == 0 && size == 0) || (size == SBI_TLB_FLUSH_ALL)) {
  123. sbi_tlb_flush_all();
  124. return;
  125. }
  126. for (i = 0; i < size; i += PAGE_SIZE) {
  127. __asm__ __volatile__("sfence.vma %0"
  128. :
  129. : "r"(start + i)
  130. : "memory");
  131. }
  132. }
  133. static void sbi_tlb_fifo_sfence_vma_asid(struct sbi_tlb_info *tinfo)
  134. {
  135. unsigned long start = tinfo->start;
  136. unsigned long size = tinfo->size;
  137. unsigned long asid = tinfo->asid;
  138. unsigned long i;
  139. if (start == 0 && size == 0) {
  140. sbi_tlb_flush_all();
  141. return;
  142. }
  143. /* Flush entire MM context for a given ASID */
  144. if (size == SBI_TLB_FLUSH_ALL) {
  145. __asm__ __volatile__("sfence.vma x0, %0"
  146. :
  147. : "r"(asid)
  148. : "memory");
  149. return;
  150. }
  151. for (i = 0; i < size; i += PAGE_SIZE) {
  152. __asm__ __volatile__("sfence.vma %0, %1"
  153. :
  154. : "r"(start + i), "r"(asid)
  155. : "memory");
  156. }
  157. }
  158. void sbi_tlb_fifo_process(struct sbi_scratch *scratch, u32 event)
  159. {
  160. struct sbi_tlb_info tinfo;
  161. struct sbi_fifo *ipi_tlb_fifo =
  162. sbi_scratch_offset_ptr(scratch, ipi_tlb_fifo_off);
  163. while (!sbi_fifo_dequeue(ipi_tlb_fifo, &tinfo)) {
  164. if (tinfo.type == SBI_TLB_FLUSH_VMA)
  165. sbi_tlb_fifo_sfence_vma(&tinfo);
  166. else if (tinfo.type == SBI_TLB_FLUSH_VMA_ASID)
  167. sbi_tlb_fifo_sfence_vma_asid(&tinfo);
  168. memset(&tinfo, 0, SBI_TLB_INFO_SIZE);
  169. }
  170. }
  171. int sbi_tlb_fifo_init(struct sbi_scratch *scratch, bool cold_boot)
  172. {
  173. void *ipi_tlb_mem;
  174. struct sbi_fifo *ipi_tlb_q;
  175. if (cold_boot) {
  176. ipi_tlb_fifo_off = sbi_scratch_alloc_offset(sizeof(*ipi_tlb_q),
  177. "IPI_TLB_FIFO");
  178. if (!ipi_tlb_fifo_off)
  179. return SBI_ENOMEM;
  180. ipi_tlb_fifo_mem_off = sbi_scratch_alloc_offset(
  181. SBI_TLB_FIFO_NUM_ENTRIES * SBI_TLB_INFO_SIZE,
  182. "IPI_TLB_FIFO_MEM");
  183. if (!ipi_tlb_fifo_mem_off) {
  184. sbi_scratch_free_offset(ipi_tlb_fifo_off);
  185. return SBI_ENOMEM;
  186. }
  187. } else {
  188. if (!ipi_tlb_fifo_off ||
  189. !ipi_tlb_fifo_mem_off)
  190. return SBI_ENOMEM;
  191. }
  192. ipi_tlb_q = sbi_scratch_offset_ptr(scratch, ipi_tlb_fifo_off);
  193. ipi_tlb_mem = sbi_scratch_offset_ptr(scratch, ipi_tlb_fifo_mem_off);
  194. sbi_fifo_init(ipi_tlb_q, ipi_tlb_mem,
  195. SBI_TLB_FIFO_NUM_ENTRIES, SBI_TLB_INFO_SIZE);
  196. return 0;
  197. }