irq-mvebu-sei.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define pr_fmt(fmt) "mvebu-sei: " fmt
  3. #include <linux/interrupt.h>
  4. #include <linux/irq.h>
  5. #include <linux/irqchip.h>
  6. #include <linux/irqchip/chained_irq.h>
  7. #include <linux/irqdomain.h>
  8. #include <linux/kernel.h>
  9. #include <linux/msi.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/of_address.h>
  12. #include <linux/of_irq.h>
  13. #include <linux/of_platform.h>
  14. /* Cause register */
  15. #define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
  16. /* Mask register */
  17. #define GICP_SEMR(idx) (0x20 + ((idx) * 0x4))
  18. #define GICP_SET_SEI_OFFSET 0x30
  19. #define SEI_IRQ_COUNT_PER_REG 32
  20. #define SEI_IRQ_REG_COUNT 2
  21. #define SEI_IRQ_COUNT (SEI_IRQ_COUNT_PER_REG * SEI_IRQ_REG_COUNT)
  22. #define SEI_IRQ_REG_IDX(irq_id) ((irq_id) / SEI_IRQ_COUNT_PER_REG)
  23. #define SEI_IRQ_REG_BIT(irq_id) ((irq_id) % SEI_IRQ_COUNT_PER_REG)
  24. struct mvebu_sei_interrupt_range {
  25. u32 first;
  26. u32 size;
  27. };
  28. struct mvebu_sei_caps {
  29. struct mvebu_sei_interrupt_range ap_range;
  30. struct mvebu_sei_interrupt_range cp_range;
  31. };
  32. struct mvebu_sei {
  33. struct device *dev;
  34. void __iomem *base;
  35. struct resource *res;
  36. struct irq_domain *sei_domain;
  37. struct irq_domain *ap_domain;
  38. struct irq_domain *cp_domain;
  39. const struct mvebu_sei_caps *caps;
  40. /* Lock on MSI allocations/releases */
  41. struct mutex cp_msi_lock;
  42. DECLARE_BITMAP(cp_msi_bitmap, SEI_IRQ_COUNT);
  43. /* Lock on IRQ masking register */
  44. raw_spinlock_t mask_lock;
  45. };
  46. static void mvebu_sei_ack_irq(struct irq_data *d)
  47. {
  48. struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
  49. u32 reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
  50. writel_relaxed(BIT(SEI_IRQ_REG_BIT(d->hwirq)),
  51. sei->base + GICP_SECR(reg_idx));
  52. }
  53. static void mvebu_sei_mask_irq(struct irq_data *d)
  54. {
  55. struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
  56. u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
  57. unsigned long flags;
  58. /* 1 disables the interrupt */
  59. raw_spin_lock_irqsave(&sei->mask_lock, flags);
  60. reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
  61. reg |= BIT(SEI_IRQ_REG_BIT(d->hwirq));
  62. writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
  63. raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
  64. }
  65. static void mvebu_sei_unmask_irq(struct irq_data *d)
  66. {
  67. struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
  68. u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
  69. unsigned long flags;
  70. /* 0 enables the interrupt */
  71. raw_spin_lock_irqsave(&sei->mask_lock, flags);
  72. reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
  73. reg &= ~BIT(SEI_IRQ_REG_BIT(d->hwirq));
  74. writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
  75. raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
  76. }
  77. static int mvebu_sei_set_affinity(struct irq_data *d,
  78. const struct cpumask *mask_val,
  79. bool force)
  80. {
  81. return -EINVAL;
  82. }
  83. static int mvebu_sei_set_irqchip_state(struct irq_data *d,
  84. enum irqchip_irq_state which,
  85. bool state)
  86. {
  87. /* We can only clear the pending state by acking the interrupt */
  88. if (which != IRQCHIP_STATE_PENDING || state)
  89. return -EINVAL;
  90. mvebu_sei_ack_irq(d);
  91. return 0;
  92. }
  93. static struct irq_chip mvebu_sei_irq_chip = {
  94. .name = "SEI",
  95. .irq_ack = mvebu_sei_ack_irq,
  96. .irq_mask = mvebu_sei_mask_irq,
  97. .irq_unmask = mvebu_sei_unmask_irq,
  98. .irq_set_affinity = mvebu_sei_set_affinity,
  99. .irq_set_irqchip_state = mvebu_sei_set_irqchip_state,
  100. };
  101. static int mvebu_sei_ap_set_type(struct irq_data *data, unsigned int type)
  102. {
  103. if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
  104. return -EINVAL;
  105. return 0;
  106. }
  107. static struct irq_chip mvebu_sei_ap_irq_chip = {
  108. .name = "AP SEI",
  109. .irq_ack = irq_chip_ack_parent,
  110. .irq_mask = irq_chip_mask_parent,
  111. .irq_unmask = irq_chip_unmask_parent,
  112. .irq_set_affinity = irq_chip_set_affinity_parent,
  113. .irq_set_type = mvebu_sei_ap_set_type,
  114. };
  115. static void mvebu_sei_cp_compose_msi_msg(struct irq_data *data,
  116. struct msi_msg *msg)
  117. {
  118. struct mvebu_sei *sei = data->chip_data;
  119. phys_addr_t set = sei->res->start + GICP_SET_SEI_OFFSET;
  120. msg->data = data->hwirq + sei->caps->cp_range.first;
  121. msg->address_lo = lower_32_bits(set);
  122. msg->address_hi = upper_32_bits(set);
  123. }
  124. static int mvebu_sei_cp_set_type(struct irq_data *data, unsigned int type)
  125. {
  126. if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)
  127. return -EINVAL;
  128. return 0;
  129. }
  130. static struct irq_chip mvebu_sei_cp_irq_chip = {
  131. .name = "CP SEI",
  132. .irq_ack = irq_chip_ack_parent,
  133. .irq_mask = irq_chip_mask_parent,
  134. .irq_unmask = irq_chip_unmask_parent,
  135. .irq_set_affinity = irq_chip_set_affinity_parent,
  136. .irq_set_type = mvebu_sei_cp_set_type,
  137. .irq_compose_msi_msg = mvebu_sei_cp_compose_msi_msg,
  138. };
  139. static int mvebu_sei_domain_alloc(struct irq_domain *domain, unsigned int virq,
  140. unsigned int nr_irqs, void *arg)
  141. {
  142. struct mvebu_sei *sei = domain->host_data;
  143. struct irq_fwspec *fwspec = arg;
  144. /* Not much to do, just setup the irqdata */
  145. irq_domain_set_hwirq_and_chip(domain, virq, fwspec->param[0],
  146. &mvebu_sei_irq_chip, sei);
  147. return 0;
  148. }
  149. static void mvebu_sei_domain_free(struct irq_domain *domain, unsigned int virq,
  150. unsigned int nr_irqs)
  151. {
  152. int i;
  153. for (i = 0; i < nr_irqs; i++) {
  154. struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
  155. irq_set_handler(virq + i, NULL);
  156. irq_domain_reset_irq_data(d);
  157. }
  158. }
  159. static const struct irq_domain_ops mvebu_sei_domain_ops = {
  160. .alloc = mvebu_sei_domain_alloc,
  161. .free = mvebu_sei_domain_free,
  162. };
  163. static int mvebu_sei_ap_translate(struct irq_domain *domain,
  164. struct irq_fwspec *fwspec,
  165. unsigned long *hwirq,
  166. unsigned int *type)
  167. {
  168. *hwirq = fwspec->param[0];
  169. *type = IRQ_TYPE_LEVEL_HIGH;
  170. return 0;
  171. }
  172. static int mvebu_sei_ap_alloc(struct irq_domain *domain, unsigned int virq,
  173. unsigned int nr_irqs, void *arg)
  174. {
  175. struct mvebu_sei *sei = domain->host_data;
  176. struct irq_fwspec fwspec;
  177. unsigned long hwirq;
  178. unsigned int type;
  179. int err;
  180. mvebu_sei_ap_translate(domain, arg, &hwirq, &type);
  181. fwspec.fwnode = domain->parent->fwnode;
  182. fwspec.param_count = 1;
  183. fwspec.param[0] = hwirq + sei->caps->ap_range.first;
  184. err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
  185. if (err)
  186. return err;
  187. irq_domain_set_info(domain, virq, hwirq,
  188. &mvebu_sei_ap_irq_chip, sei,
  189. handle_level_irq, NULL, NULL);
  190. irq_set_probe(virq);
  191. return 0;
  192. }
  193. static const struct irq_domain_ops mvebu_sei_ap_domain_ops = {
  194. .translate = mvebu_sei_ap_translate,
  195. .alloc = mvebu_sei_ap_alloc,
  196. .free = irq_domain_free_irqs_parent,
  197. };
  198. static void mvebu_sei_cp_release_irq(struct mvebu_sei *sei, unsigned long hwirq)
  199. {
  200. mutex_lock(&sei->cp_msi_lock);
  201. clear_bit(hwirq, sei->cp_msi_bitmap);
  202. mutex_unlock(&sei->cp_msi_lock);
  203. }
  204. static int mvebu_sei_cp_domain_alloc(struct irq_domain *domain,
  205. unsigned int virq, unsigned int nr_irqs,
  206. void *args)
  207. {
  208. struct mvebu_sei *sei = domain->host_data;
  209. struct irq_fwspec fwspec;
  210. unsigned long hwirq;
  211. int ret;
  212. /* The software only supports single allocations for now */
  213. if (nr_irqs != 1)
  214. return -ENOTSUPP;
  215. mutex_lock(&sei->cp_msi_lock);
  216. hwirq = find_first_zero_bit(sei->cp_msi_bitmap,
  217. sei->caps->cp_range.size);
  218. if (hwirq < sei->caps->cp_range.size)
  219. set_bit(hwirq, sei->cp_msi_bitmap);
  220. mutex_unlock(&sei->cp_msi_lock);
  221. if (hwirq == sei->caps->cp_range.size)
  222. return -ENOSPC;
  223. fwspec.fwnode = domain->parent->fwnode;
  224. fwspec.param_count = 1;
  225. fwspec.param[0] = hwirq + sei->caps->cp_range.first;
  226. ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
  227. if (ret)
  228. goto free_irq;
  229. irq_domain_set_info(domain, virq, hwirq,
  230. &mvebu_sei_cp_irq_chip, sei,
  231. handle_edge_irq, NULL, NULL);
  232. return 0;
  233. free_irq:
  234. mvebu_sei_cp_release_irq(sei, hwirq);
  235. return ret;
  236. }
  237. static void mvebu_sei_cp_domain_free(struct irq_domain *domain,
  238. unsigned int virq, unsigned int nr_irqs)
  239. {
  240. struct mvebu_sei *sei = domain->host_data;
  241. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  242. if (nr_irqs != 1 || d->hwirq >= sei->caps->cp_range.size) {
  243. dev_err(sei->dev, "Invalid hwirq %lu\n", d->hwirq);
  244. return;
  245. }
  246. mvebu_sei_cp_release_irq(sei, d->hwirq);
  247. irq_domain_free_irqs_parent(domain, virq, 1);
  248. }
  249. static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
  250. .alloc = mvebu_sei_cp_domain_alloc,
  251. .free = mvebu_sei_cp_domain_free,
  252. };
  253. static struct irq_chip mvebu_sei_msi_irq_chip = {
  254. .name = "SEI pMSI",
  255. .irq_ack = irq_chip_ack_parent,
  256. .irq_set_type = irq_chip_set_type_parent,
  257. };
  258. static struct msi_domain_ops mvebu_sei_msi_ops = {
  259. };
  260. static struct msi_domain_info mvebu_sei_msi_domain_info = {
  261. .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS,
  262. .ops = &mvebu_sei_msi_ops,
  263. .chip = &mvebu_sei_msi_irq_chip,
  264. };
  265. static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
  266. {
  267. struct mvebu_sei *sei = irq_desc_get_handler_data(desc);
  268. struct irq_chip *chip = irq_desc_get_chip(desc);
  269. u32 idx;
  270. chained_irq_enter(chip, desc);
  271. for (idx = 0; idx < SEI_IRQ_REG_COUNT; idx++) {
  272. unsigned long irqmap;
  273. int bit;
  274. irqmap = readl_relaxed(sei->base + GICP_SECR(idx));
  275. for_each_set_bit(bit, &irqmap, SEI_IRQ_COUNT_PER_REG) {
  276. unsigned long hwirq;
  277. unsigned int virq;
  278. hwirq = idx * SEI_IRQ_COUNT_PER_REG + bit;
  279. virq = irq_find_mapping(sei->sei_domain, hwirq);
  280. if (likely(virq)) {
  281. generic_handle_irq(virq);
  282. continue;
  283. }
  284. dev_warn(sei->dev,
  285. "Spurious IRQ detected (hwirq %lu)\n", hwirq);
  286. }
  287. }
  288. chained_irq_exit(chip, desc);
  289. }
  290. static void mvebu_sei_reset(struct mvebu_sei *sei)
  291. {
  292. u32 reg_idx;
  293. /* Clear IRQ cause registers, mask all interrupts */
  294. for (reg_idx = 0; reg_idx < SEI_IRQ_REG_COUNT; reg_idx++) {
  295. writel_relaxed(0xFFFFFFFF, sei->base + GICP_SECR(reg_idx));
  296. writel_relaxed(0xFFFFFFFF, sei->base + GICP_SEMR(reg_idx));
  297. }
  298. }
  299. static int mvebu_sei_probe(struct platform_device *pdev)
  300. {
  301. struct device_node *node = pdev->dev.of_node;
  302. struct irq_domain *plat_domain;
  303. struct mvebu_sei *sei;
  304. u32 parent_irq;
  305. int ret;
  306. sei = devm_kzalloc(&pdev->dev, sizeof(*sei), GFP_KERNEL);
  307. if (!sei)
  308. return -ENOMEM;
  309. sei->dev = &pdev->dev;
  310. mutex_init(&sei->cp_msi_lock);
  311. raw_spin_lock_init(&sei->mask_lock);
  312. sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  313. sei->base = devm_ioremap_resource(sei->dev, sei->res);
  314. if (IS_ERR(sei->base)) {
  315. dev_err(sei->dev, "Failed to remap SEI resource\n");
  316. return PTR_ERR(sei->base);
  317. }
  318. /* Retrieve the SEI capabilities with the interrupt ranges */
  319. sei->caps = of_device_get_match_data(&pdev->dev);
  320. if (!sei->caps) {
  321. dev_err(sei->dev,
  322. "Could not retrieve controller capabilities\n");
  323. return -EINVAL;
  324. }
  325. /*
  326. * Reserve the single (top-level) parent SPI IRQ from which all the
  327. * interrupts handled by this driver will be signaled.
  328. */
  329. parent_irq = irq_of_parse_and_map(node, 0);
  330. if (parent_irq <= 0) {
  331. dev_err(sei->dev, "Failed to retrieve top-level SPI IRQ\n");
  332. return -ENODEV;
  333. }
  334. /* Create the root SEI domain */
  335. sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node),
  336. (sei->caps->ap_range.size +
  337. sei->caps->cp_range.size),
  338. &mvebu_sei_domain_ops,
  339. sei);
  340. if (!sei->sei_domain) {
  341. dev_err(sei->dev, "Failed to create SEI IRQ domain\n");
  342. ret = -ENOMEM;
  343. goto dispose_irq;
  344. }
  345. irq_domain_update_bus_token(sei->sei_domain, DOMAIN_BUS_NEXUS);
  346. /* Create the 'wired' domain */
  347. sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
  348. sei->caps->ap_range.size,
  349. of_node_to_fwnode(node),
  350. &mvebu_sei_ap_domain_ops,
  351. sei);
  352. if (!sei->ap_domain) {
  353. dev_err(sei->dev, "Failed to create AP IRQ domain\n");
  354. ret = -ENOMEM;
  355. goto remove_sei_domain;
  356. }
  357. irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED);
  358. /* Create the 'MSI' domain */
  359. sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
  360. sei->caps->cp_range.size,
  361. of_node_to_fwnode(node),
  362. &mvebu_sei_cp_domain_ops,
  363. sei);
  364. if (!sei->cp_domain) {
  365. pr_err("Failed to create CPs IRQ domain\n");
  366. ret = -ENOMEM;
  367. goto remove_ap_domain;
  368. }
  369. irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
  370. plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
  371. &mvebu_sei_msi_domain_info,
  372. sei->cp_domain);
  373. if (!plat_domain) {
  374. pr_err("Failed to create CPs MSI domain\n");
  375. ret = -ENOMEM;
  376. goto remove_cp_domain;
  377. }
  378. mvebu_sei_reset(sei);
  379. irq_set_chained_handler_and_data(parent_irq,
  380. mvebu_sei_handle_cascade_irq,
  381. sei);
  382. return 0;
  383. remove_cp_domain:
  384. irq_domain_remove(sei->cp_domain);
  385. remove_ap_domain:
  386. irq_domain_remove(sei->ap_domain);
  387. remove_sei_domain:
  388. irq_domain_remove(sei->sei_domain);
  389. dispose_irq:
  390. irq_dispose_mapping(parent_irq);
  391. return ret;
  392. }
  393. static struct mvebu_sei_caps mvebu_sei_ap806_caps = {
  394. .ap_range = {
  395. .first = 0,
  396. .size = 21,
  397. },
  398. .cp_range = {
  399. .first = 21,
  400. .size = 43,
  401. },
  402. };
  403. static const struct of_device_id mvebu_sei_of_match[] = {
  404. {
  405. .compatible = "marvell,ap806-sei",
  406. .data = &mvebu_sei_ap806_caps,
  407. },
  408. {},
  409. };
  410. static struct platform_driver mvebu_sei_driver = {
  411. .probe = mvebu_sei_probe,
  412. .driver = {
  413. .name = "mvebu-sei",
  414. .of_match_table = mvebu_sei_of_match,
  415. },
  416. };
  417. builtin_platform_driver(mvebu_sei_driver);