imsic.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2021 Western Digital Corporation or its affiliates.
  5. * Copyright (c) 2022 Ventana Micro Systems Inc.
  6. *
  7. * Authors:
  8. * Anup Patel <anup.patel@wdc.com>
  9. */
  10. #include <sbi/riscv_asm.h>
  11. #include <sbi/riscv_io.h>
  12. #include <sbi/riscv_encoding.h>
  13. #include <sbi/sbi_console.h>
  14. #include <sbi/sbi_domain.h>
  15. #include <sbi/sbi_hartmask.h>
  16. #include <sbi/sbi_ipi.h>
  17. #include <sbi/sbi_irqchip.h>
  18. #include <sbi/sbi_error.h>
  19. #include <sbi_utils/irqchip/imsic.h>
  20. #define IMSIC_MMIO_PAGE_LE 0x00
  21. #define IMSIC_MMIO_PAGE_BE 0x04
  22. #define IMSIC_MIN_ID 63
  23. #define IMSIC_MAX_ID 2047
  24. #define IMSIC_EIDELIVERY 0x70
  25. #define IMSIC_EITHRESHOLD 0x72
  26. #define IMSIC_TOPEI 0x76
  27. #define IMSIC_TOPEI_ID_SHIFT 16
  28. #define IMSIC_TOPEI_ID_MASK 0x7ff
  29. #define IMSIC_TOPEI_PRIO_MASK 0x7ff
  30. #define IMSIC_EIP0 0x80
  31. #define IMSIC_EIP63 0xbf
  32. #define IMSIC_EIPx_BITS 32
  33. #define IMSIC_EIE0 0xc0
  34. #define IMSIC_EIE63 0xff
  35. #define IMSIC_EIEx_BITS 32
  36. #define IMSIC_DISABLE_EIDELIVERY 0
  37. #define IMSIC_ENABLE_EIDELIVERY 1
  38. #define IMSIC_DISABLE_EITHRESHOLD 1
  39. #define IMSIC_ENABLE_EITHRESHOLD 0
  40. #define IMSIC_IPI_ID 1
  41. #define imsic_csr_write(__c, __v) \
  42. do { \
  43. csr_write(CSR_MISELECT, __c); \
  44. csr_write(CSR_MIREG, __v); \
  45. } while (0)
  46. #define imsic_csr_read(__c) \
  47. ({ \
  48. unsigned long __v; \
  49. csr_write(CSR_MISELECT, __c); \
  50. __v = csr_read(CSR_MIREG); \
  51. __v; \
  52. })
  53. #define imsic_csr_set(__c, __v) \
  54. do { \
  55. csr_write(CSR_MISELECT, __c); \
  56. csr_set(CSR_MIREG, __v); \
  57. } while (0)
  58. #define imsic_csr_clear(__c, __v) \
  59. do { \
  60. csr_write(CSR_MISELECT, __c); \
  61. csr_clear(CSR_MIREG, __v); \
  62. } while (0)
  63. static struct imsic_data *imsic_hartid2data[SBI_HARTMASK_MAX_BITS];
  64. static int imsic_hartid2file[SBI_HARTMASK_MAX_BITS];
  65. int imsic_map_hartid_to_data(u32 hartid, struct imsic_data *imsic, int file)
  66. {
  67. if (!imsic || !imsic->targets_mmode ||
  68. (SBI_HARTMASK_MAX_BITS <= hartid))
  69. return SBI_EINVAL;
  70. imsic_hartid2data[hartid] = imsic;
  71. imsic_hartid2file[hartid] = file;
  72. return 0;
  73. }
  74. struct imsic_data *imsic_get_data(u32 hartid)
  75. {
  76. if (SBI_HARTMASK_MAX_BITS <= hartid)
  77. return NULL;
  78. return imsic_hartid2data[hartid];
  79. }
  80. int imsic_get_target_file(u32 hartid)
  81. {
  82. if ((SBI_HARTMASK_MAX_BITS <= hartid) ||
  83. !imsic_hartid2data[hartid])
  84. return SBI_ENOENT;
  85. return imsic_hartid2file[hartid];
  86. }
  87. static int imsic_external_irqfn(struct sbi_trap_regs *regs)
  88. {
  89. ulong mirq;
  90. while ((mirq = csr_swap(CSR_MTOPEI, 0))) {
  91. mirq = (mirq >> IMSIC_TOPEI_ID_SHIFT);
  92. switch (mirq) {
  93. case IMSIC_IPI_ID:
  94. sbi_ipi_process();
  95. break;
  96. default:
  97. sbi_printf("%s: unhandled IRQ%d\n",
  98. __func__, (u32)mirq);
  99. break;
  100. }
  101. }
  102. return 0;
  103. }
  104. static void imsic_ipi_send(u32 target_hart)
  105. {
  106. unsigned long reloff;
  107. struct imsic_regs *regs;
  108. struct imsic_data *data = imsic_hartid2data[target_hart];
  109. int file = imsic_hartid2file[target_hart];
  110. if (!data || !data->targets_mmode)
  111. return;
  112. regs = &data->regs[0];
  113. reloff = file * (1UL << data->guest_index_bits) * IMSIC_MMIO_PAGE_SZ;
  114. while (regs->size && (regs->size <= reloff)) {
  115. reloff -= regs->size;
  116. regs++;
  117. }
  118. if (regs->size && (reloff < regs->size))
  119. writel(IMSIC_IPI_ID,
  120. (void *)(regs->addr + reloff + IMSIC_MMIO_PAGE_LE));
  121. }
  122. static struct sbi_ipi_device imsic_ipi_device = {
  123. .name = "aia-imsic",
  124. .ipi_send = imsic_ipi_send
  125. };
  126. static void imsic_local_eix_update(unsigned long base_id,
  127. unsigned long num_id, bool pend, bool val)
  128. {
  129. unsigned long i, isel, ireg;
  130. unsigned long id = base_id, last_id = base_id + num_id;
  131. while (id < last_id) {
  132. isel = id / __riscv_xlen;
  133. isel *= __riscv_xlen / IMSIC_EIPx_BITS;
  134. isel += (pend) ? IMSIC_EIP0 : IMSIC_EIE0;
  135. ireg = 0;
  136. for (i = id & (__riscv_xlen - 1);
  137. (id < last_id) && (i < __riscv_xlen); i++) {
  138. ireg |= BIT(i);
  139. id++;
  140. }
  141. if (val)
  142. imsic_csr_set(isel, ireg);
  143. else
  144. imsic_csr_clear(isel, ireg);
  145. }
  146. }
  147. void imsic_local_irqchip_init(void)
  148. {
  149. /*
  150. * This function is expected to be called from:
  151. * 1) nascent_init() platform callback which is called
  152. * very early on each HART in boot-up path and and
  153. * HSM resume path.
  154. * 2) irqchip_init() platform callback which is called
  155. * in boot-up path.
  156. */
  157. /* Setup threshold to allow all enabled interrupts */
  158. imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_ENABLE_EITHRESHOLD);
  159. /* Enable interrupt delivery */
  160. imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_ENABLE_EIDELIVERY);
  161. /* Enable IPI */
  162. imsic_local_eix_update(IMSIC_IPI_ID, 1, false, true);
  163. }
  164. int imsic_warm_irqchip_init(void)
  165. {
  166. struct imsic_data *imsic = imsic_hartid2data[current_hartid()];
  167. /* Sanity checks */
  168. if (!imsic || !imsic->targets_mmode)
  169. return SBI_EINVAL;
  170. /* Disable all interrupts */
  171. imsic_local_eix_update(1, imsic->num_ids, false, false);
  172. /* Clear IPI pending */
  173. imsic_local_eix_update(IMSIC_IPI_ID, 1, true, false);
  174. /* Local IMSIC initialization */
  175. imsic_local_irqchip_init();
  176. return 0;
  177. }
  178. int imsic_data_check(struct imsic_data *imsic)
  179. {
  180. u32 i, tmp;
  181. unsigned long base_addr, addr, mask;
  182. /* Sanity checks */
  183. if (!imsic ||
  184. (imsic->num_ids < IMSIC_MIN_ID) ||
  185. (IMSIC_MAX_ID < imsic->num_ids))
  186. return SBI_EINVAL;
  187. tmp = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT;
  188. if (tmp < imsic->guest_index_bits)
  189. return SBI_EINVAL;
  190. tmp = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT -
  191. imsic->guest_index_bits;
  192. if (tmp < imsic->hart_index_bits)
  193. return SBI_EINVAL;
  194. tmp = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT -
  195. imsic->guest_index_bits - imsic->hart_index_bits;
  196. if (tmp < imsic->group_index_bits)
  197. return SBI_EINVAL;
  198. tmp = IMSIC_MMIO_PAGE_SHIFT + imsic->guest_index_bits +
  199. imsic->hart_index_bits;
  200. if (imsic->group_index_shift < tmp)
  201. return SBI_EINVAL;
  202. tmp = imsic->group_index_bits + imsic->group_index_shift - 1;
  203. if (tmp >= BITS_PER_LONG)
  204. return SBI_EINVAL;
  205. /*
  206. * Number of interrupt identities should be 1 less than
  207. * multiple of 63
  208. */
  209. if ((imsic->num_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID)
  210. return SBI_EINVAL;
  211. /* We should have at least one regset */
  212. if (!imsic->regs[0].size)
  213. return SBI_EINVAL;
  214. /* Match patter of each regset */
  215. base_addr = imsic->regs[0].addr;
  216. base_addr &= ~((1UL << (imsic->guest_index_bits +
  217. imsic->hart_index_bits +
  218. IMSIC_MMIO_PAGE_SHIFT)) - 1);
  219. base_addr &= ~(((1UL << imsic->group_index_bits) - 1) <<
  220. imsic->group_index_shift);
  221. for (i = 0; i < IMSIC_MAX_REGS && imsic->regs[i].size; i++) {
  222. mask = (1UL << imsic->guest_index_bits) * IMSIC_MMIO_PAGE_SZ;
  223. mask -= 1UL;
  224. if (imsic->regs[i].size & mask)
  225. return SBI_EINVAL;
  226. addr = imsic->regs[i].addr;
  227. addr &= ~((1UL << (imsic->guest_index_bits +
  228. imsic->hart_index_bits +
  229. IMSIC_MMIO_PAGE_SHIFT)) - 1);
  230. addr &= ~(((1UL << imsic->group_index_bits) - 1) <<
  231. imsic->group_index_shift);
  232. if (base_addr != addr)
  233. return SBI_EINVAL;
  234. }
  235. return 0;
  236. }
  237. int imsic_cold_irqchip_init(struct imsic_data *imsic)
  238. {
  239. int i, rc;
  240. struct sbi_domain_memregion reg;
  241. /* Sanity checks */
  242. rc = imsic_data_check(imsic);
  243. if (rc)
  244. return rc;
  245. /* We only initialize M-mode IMSIC */
  246. if (!imsic->targets_mmode)
  247. return SBI_EINVAL;
  248. /* Setup external interrupt function for IMSIC */
  249. sbi_irqchip_set_irqfn(imsic_external_irqfn);
  250. /* Add IMSIC regions to the root domain */
  251. for (i = 0; i < IMSIC_MAX_REGS && imsic->regs[i].size; i++) {
  252. sbi_domain_memregion_init(imsic->regs[i].addr,
  253. imsic->regs[i].size,
  254. (SBI_DOMAIN_MEMREGION_MMIO |
  255. SBI_DOMAIN_MEMREGION_M_READABLE |
  256. SBI_DOMAIN_MEMREGION_M_WRITABLE),
  257. &reg);
  258. rc = sbi_domain_root_add_memregion(&reg);
  259. if (rc)
  260. return rc;
  261. }
  262. /* Register IPI device */
  263. sbi_ipi_set_device(&imsic_ipi_device);
  264. return 0;
  265. }