irq-mvebu-odmi.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /*
  2. * Copyright (C) 2016 Marvell
  3. *
  4. * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #define pr_fmt(fmt) "GIC-ODMI: " fmt
  11. #include <linux/irq.h>
  12. #include <linux/irqchip.h>
  13. #include <linux/irqdomain.h>
  14. #include <linux/kernel.h>
  15. #include <linux/msi.h>
  16. #include <linux/of_address.h>
  17. #include <linux/slab.h>
  18. #include <dt-bindings/interrupt-controller/arm-gic.h>
  19. #define GICP_ODMIN_SET 0x40
  20. #define GICP_ODMI_INT_NUM_SHIFT 12
  21. #define GICP_ODMIN_GM_EP_R0 0x110
  22. #define GICP_ODMIN_GM_EP_R1 0x114
  23. #define GICP_ODMIN_GM_EA_R0 0x108
  24. #define GICP_ODMIN_GM_EA_R1 0x118
  25. /*
  26. * We don't support the group events, so we simply have 8 interrupts
  27. * per frame.
  28. */
  29. #define NODMIS_SHIFT 3
  30. #define NODMIS_PER_FRAME (1 << NODMIS_SHIFT)
  31. #define NODMIS_MASK (NODMIS_PER_FRAME - 1)
  32. struct odmi_data {
  33. struct resource res;
  34. void __iomem *base;
  35. unsigned int spi_base;
  36. };
  37. static struct odmi_data *odmis;
  38. static unsigned long *odmis_bm;
  39. static unsigned int odmis_count;
  40. /* Protects odmis_bm */
  41. static DEFINE_SPINLOCK(odmis_bm_lock);
  42. static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
  43. {
  44. struct odmi_data *odmi;
  45. phys_addr_t addr;
  46. unsigned int odmin;
  47. if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME))
  48. return;
  49. odmi = &odmis[d->hwirq >> NODMIS_SHIFT];
  50. odmin = d->hwirq & NODMIS_MASK;
  51. addr = odmi->res.start + GICP_ODMIN_SET;
  52. msg->address_hi = upper_32_bits(addr);
  53. msg->address_lo = lower_32_bits(addr);
  54. msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT;
  55. }
  56. static struct irq_chip odmi_irq_chip = {
  57. .name = "ODMI",
  58. .irq_mask = irq_chip_mask_parent,
  59. .irq_unmask = irq_chip_unmask_parent,
  60. .irq_eoi = irq_chip_eoi_parent,
  61. .irq_set_affinity = irq_chip_set_affinity_parent,
  62. .irq_compose_msi_msg = odmi_compose_msi_msg,
  63. };
  64. static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  65. unsigned int nr_irqs, void *args)
  66. {
  67. struct odmi_data *odmi = NULL;
  68. struct irq_fwspec fwspec;
  69. struct irq_data *d;
  70. unsigned int hwirq, odmin;
  71. int ret;
  72. spin_lock(&odmis_bm_lock);
  73. hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count);
  74. if (hwirq >= NODMIS_PER_FRAME * odmis_count) {
  75. spin_unlock(&odmis_bm_lock);
  76. return -ENOSPC;
  77. }
  78. __set_bit(hwirq, odmis_bm);
  79. spin_unlock(&odmis_bm_lock);
  80. odmi = &odmis[hwirq >> NODMIS_SHIFT];
  81. odmin = hwirq & NODMIS_MASK;
  82. fwspec.fwnode = domain->parent->fwnode;
  83. fwspec.param_count = 3;
  84. fwspec.param[0] = GIC_SPI;
  85. fwspec.param[1] = odmi->spi_base - 32 + odmin;
  86. fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
  87. ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
  88. if (ret) {
  89. pr_err("Cannot allocate parent IRQ\n");
  90. spin_lock(&odmis_bm_lock);
  91. __clear_bit(odmin, odmis_bm);
  92. spin_unlock(&odmis_bm_lock);
  93. return ret;
  94. }
  95. /* Configure the interrupt line to be edge */
  96. d = irq_domain_get_irq_data(domain->parent, virq);
  97. d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
  98. irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
  99. &odmi_irq_chip, NULL);
  100. return 0;
  101. }
  102. static void odmi_irq_domain_free(struct irq_domain *domain,
  103. unsigned int virq, unsigned int nr_irqs)
  104. {
  105. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  106. if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) {
  107. pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq);
  108. return;
  109. }
  110. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  111. /* Actually free the MSI */
  112. spin_lock(&odmis_bm_lock);
  113. __clear_bit(d->hwirq, odmis_bm);
  114. spin_unlock(&odmis_bm_lock);
  115. }
  116. static const struct irq_domain_ops odmi_domain_ops = {
  117. .alloc = odmi_irq_domain_alloc,
  118. .free = odmi_irq_domain_free,
  119. };
  120. static struct irq_chip odmi_msi_irq_chip = {
  121. .name = "ODMI",
  122. };
  123. static struct msi_domain_ops odmi_msi_ops = {
  124. };
  125. static struct msi_domain_info odmi_msi_domain_info = {
  126. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
  127. .ops = &odmi_msi_ops,
  128. .chip = &odmi_msi_irq_chip,
  129. };
  130. static int __init mvebu_odmi_init(struct device_node *node,
  131. struct device_node *parent)
  132. {
  133. struct irq_domain *inner_domain, *plat_domain;
  134. int ret, i;
  135. if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
  136. return -EINVAL;
  137. odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL);
  138. if (!odmis)
  139. return -ENOMEM;
  140. odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME),
  141. sizeof(long), GFP_KERNEL);
  142. if (!odmis_bm) {
  143. ret = -ENOMEM;
  144. goto err_alloc;
  145. }
  146. for (i = 0; i < odmis_count; i++) {
  147. struct odmi_data *odmi = &odmis[i];
  148. ret = of_address_to_resource(node, i, &odmi->res);
  149. if (ret)
  150. goto err_unmap;
  151. odmi->base = of_io_request_and_map(node, i, "odmi");
  152. if (IS_ERR(odmi->base)) {
  153. ret = PTR_ERR(odmi->base);
  154. goto err_unmap;
  155. }
  156. if (of_property_read_u32_index(node, "marvell,spi-base",
  157. i, &odmi->spi_base)) {
  158. ret = -EINVAL;
  159. goto err_unmap;
  160. }
  161. }
  162. inner_domain = irq_domain_create_linear(of_node_to_fwnode(node),
  163. odmis_count * NODMIS_PER_FRAME,
  164. &odmi_domain_ops, NULL);
  165. if (!inner_domain) {
  166. ret = -ENOMEM;
  167. goto err_unmap;
  168. }
  169. inner_domain->parent = irq_find_host(parent);
  170. plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
  171. &odmi_msi_domain_info,
  172. inner_domain);
  173. if (!plat_domain) {
  174. ret = -ENOMEM;
  175. goto err_remove_inner;
  176. }
  177. return 0;
  178. err_remove_inner:
  179. irq_domain_remove(inner_domain);
  180. err_unmap:
  181. for (i = 0; i < odmis_count; i++) {
  182. struct odmi_data *odmi = &odmis[i];
  183. if (odmi->base && !IS_ERR(odmi->base))
  184. iounmap(odmis[i].base);
  185. }
  186. kfree(odmis_bm);
  187. err_alloc:
  188. kfree(odmis);
  189. return ret;
  190. }
  191. IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init);