irq-csky-mpintc.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #include <linux/kernel.h>
  4. #include <linux/init.h>
  5. #include <linux/of.h>
  6. #include <linux/of_address.h>
  7. #include <linux/module.h>
  8. #include <linux/irqdomain.h>
  9. #include <linux/irqchip.h>
  10. #include <linux/irq.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/smp.h>
  13. #include <linux/io.h>
  14. #include <asm/irq.h>
  15. #include <asm/traps.h>
  16. #include <asm/reg_ops.h>
  17. static struct irq_domain *root_domain;
  18. static void __iomem *INTCG_base;
  19. static void __iomem *INTCL_base;
  20. #define IPI_IRQ 15
  21. #define INTC_IRQS 256
  22. #define COMM_IRQ_BASE 32
  23. #define INTCG_SIZE 0x8000
  24. #define INTCL_SIZE 0x1000
  25. #define INTCG_ICTLR 0x0
  26. #define INTCG_CICFGR 0x100
  27. #define INTCG_CIDSTR 0x1000
  28. #define INTCL_PICTLR 0x0
  29. #define INTCL_CFGR 0x14
  30. #define INTCL_SIGR 0x60
  31. #define INTCL_RDYIR 0x6c
  32. #define INTCL_SENR 0xa0
  33. #define INTCL_CENR 0xa4
  34. #define INTCL_CACR 0xb4
  35. static DEFINE_PER_CPU(void __iomem *, intcl_reg);
  36. static unsigned long *__trigger;
  37. #define IRQ_OFFSET(irq) ((irq < COMM_IRQ_BASE) ? irq : (irq - COMM_IRQ_BASE))
  38. #define TRIG_BYTE_OFFSET(i) ((((i) * 2) / 32) * 4)
  39. #define TRIG_BIT_OFFSET(i) (((i) * 2) % 32)
  40. #define TRIG_VAL(trigger, irq) (trigger << TRIG_BIT_OFFSET(IRQ_OFFSET(irq)))
  41. #define TRIG_VAL_MSK(irq) (~(3 << TRIG_BIT_OFFSET(IRQ_OFFSET(irq))))
  42. #define TRIG_BASE(irq) \
  43. (TRIG_BYTE_OFFSET(IRQ_OFFSET(irq)) + ((irq < COMM_IRQ_BASE) ? \
  44. (this_cpu_read(intcl_reg) + INTCL_CFGR) : (INTCG_base + INTCG_CICFGR)))
  45. static DEFINE_SPINLOCK(setup_lock);
  46. static void setup_trigger(unsigned long irq, unsigned long trigger)
  47. {
  48. unsigned int tmp;
  49. spin_lock(&setup_lock);
  50. /* setup trigger */
  51. tmp = readl_relaxed(TRIG_BASE(irq)) & TRIG_VAL_MSK(irq);
  52. writel_relaxed(tmp | TRIG_VAL(trigger, irq), TRIG_BASE(irq));
  53. spin_unlock(&setup_lock);
  54. }
  55. static void csky_mpintc_handler(struct pt_regs *regs)
  56. {
  57. void __iomem *reg_base = this_cpu_read(intcl_reg);
  58. handle_domain_irq(root_domain,
  59. readl_relaxed(reg_base + INTCL_RDYIR), regs);
  60. }
  61. static void csky_mpintc_enable(struct irq_data *d)
  62. {
  63. void __iomem *reg_base = this_cpu_read(intcl_reg);
  64. setup_trigger(d->hwirq, __trigger[d->hwirq]);
  65. writel_relaxed(d->hwirq, reg_base + INTCL_SENR);
  66. }
  67. static void csky_mpintc_disable(struct irq_data *d)
  68. {
  69. void __iomem *reg_base = this_cpu_read(intcl_reg);
  70. writel_relaxed(d->hwirq, reg_base + INTCL_CENR);
  71. }
  72. static void csky_mpintc_eoi(struct irq_data *d)
  73. {
  74. void __iomem *reg_base = this_cpu_read(intcl_reg);
  75. writel_relaxed(d->hwirq, reg_base + INTCL_CACR);
  76. }
  77. static int csky_mpintc_set_type(struct irq_data *d, unsigned int type)
  78. {
  79. switch (type & IRQ_TYPE_SENSE_MASK) {
  80. case IRQ_TYPE_LEVEL_HIGH:
  81. __trigger[d->hwirq] = 0;
  82. break;
  83. case IRQ_TYPE_LEVEL_LOW:
  84. __trigger[d->hwirq] = 1;
  85. break;
  86. case IRQ_TYPE_EDGE_RISING:
  87. __trigger[d->hwirq] = 2;
  88. break;
  89. case IRQ_TYPE_EDGE_FALLING:
  90. __trigger[d->hwirq] = 3;
  91. break;
  92. default:
  93. return -EINVAL;
  94. }
  95. return 0;
  96. }
  97. #ifdef CONFIG_SMP
  98. static int csky_irq_set_affinity(struct irq_data *d,
  99. const struct cpumask *mask_val,
  100. bool force)
  101. {
  102. unsigned int cpu;
  103. unsigned int offset = 4 * (d->hwirq - COMM_IRQ_BASE);
  104. if (!force)
  105. cpu = cpumask_any_and(mask_val, cpu_online_mask);
  106. else
  107. cpu = cpumask_first(mask_val);
  108. if (cpu >= nr_cpu_ids)
  109. return -EINVAL;
  110. /*
  111. * The csky,mpintc could support auto irq deliver, but it only
  112. * could deliver external irq to one cpu or all cpus. So it
  113. * doesn't support deliver external irq to a group of cpus
  114. * with cpu_mask.
  115. * SO we only use auto deliver mode when affinity mask_val is
  116. * equal to cpu_present_mask.
  117. *
  118. */
  119. if (cpumask_equal(mask_val, cpu_present_mask))
  120. cpu = 0;
  121. else
  122. cpu |= BIT(31);
  123. writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);
  124. irq_data_update_effective_affinity(d, cpumask_of(cpu));
  125. return IRQ_SET_MASK_OK_DONE;
  126. }
  127. #endif
  128. static struct irq_chip csky_irq_chip = {
  129. .name = "C-SKY SMP Intc",
  130. .irq_eoi = csky_mpintc_eoi,
  131. .irq_enable = csky_mpintc_enable,
  132. .irq_disable = csky_mpintc_disable,
  133. .irq_set_type = csky_mpintc_set_type,
  134. #ifdef CONFIG_SMP
  135. .irq_set_affinity = csky_irq_set_affinity,
  136. #endif
  137. };
  138. static int csky_irqdomain_map(struct irq_domain *d, unsigned int irq,
  139. irq_hw_number_t hwirq)
  140. {
  141. if (hwirq < COMM_IRQ_BASE) {
  142. irq_set_percpu_devid(irq);
  143. irq_set_chip_and_handler(irq, &csky_irq_chip,
  144. handle_percpu_irq);
  145. } else {
  146. irq_set_chip_and_handler(irq, &csky_irq_chip,
  147. handle_fasteoi_irq);
  148. }
  149. return 0;
  150. }
  151. static int csky_irq_domain_xlate_cells(struct irq_domain *d,
  152. struct device_node *ctrlr, const u32 *intspec,
  153. unsigned int intsize, unsigned long *out_hwirq,
  154. unsigned int *out_type)
  155. {
  156. if (WARN_ON(intsize < 1))
  157. return -EINVAL;
  158. *out_hwirq = intspec[0];
  159. if (intsize > 1)
  160. *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
  161. else
  162. *out_type = IRQ_TYPE_LEVEL_HIGH;
  163. return 0;
  164. }
  165. static const struct irq_domain_ops csky_irqdomain_ops = {
  166. .map = csky_irqdomain_map,
  167. .xlate = csky_irq_domain_xlate_cells,
  168. };
  169. #ifdef CONFIG_SMP
  170. static void csky_mpintc_send_ipi(const struct cpumask *mask)
  171. {
  172. void __iomem *reg_base = this_cpu_read(intcl_reg);
  173. /*
  174. * INTCL_SIGR[3:0] INTID
  175. * INTCL_SIGR[8:15] CPUMASK
  176. */
  177. writel_relaxed((*cpumask_bits(mask)) << 8 | IPI_IRQ,
  178. reg_base + INTCL_SIGR);
  179. }
  180. #endif
  181. /* C-SKY multi processor interrupt controller */
  182. static int __init
  183. csky_mpintc_init(struct device_node *node, struct device_node *parent)
  184. {
  185. int ret;
  186. unsigned int cpu, nr_irq;
  187. #ifdef CONFIG_SMP
  188. unsigned int ipi_irq;
  189. #endif
  190. if (parent)
  191. return 0;
  192. ret = of_property_read_u32(node, "csky,num-irqs", &nr_irq);
  193. if (ret < 0)
  194. nr_irq = INTC_IRQS;
  195. __trigger = kcalloc(nr_irq, sizeof(unsigned long), GFP_KERNEL);
  196. if (__trigger == NULL)
  197. return -ENXIO;
  198. if (INTCG_base == NULL) {
  199. INTCG_base = ioremap(mfcr("cr<31, 14>"),
  200. INTCL_SIZE*nr_cpu_ids + INTCG_SIZE);
  201. if (INTCG_base == NULL)
  202. return -EIO;
  203. INTCL_base = INTCG_base + INTCG_SIZE;
  204. writel_relaxed(BIT(0), INTCG_base + INTCG_ICTLR);
  205. }
  206. root_domain = irq_domain_add_linear(node, nr_irq, &csky_irqdomain_ops,
  207. NULL);
  208. if (!root_domain)
  209. return -ENXIO;
  210. /* for every cpu */
  211. for_each_present_cpu(cpu) {
  212. per_cpu(intcl_reg, cpu) = INTCL_base + (INTCL_SIZE * cpu);
  213. writel_relaxed(BIT(0), per_cpu(intcl_reg, cpu) + INTCL_PICTLR);
  214. }
  215. set_handle_irq(&csky_mpintc_handler);
  216. #ifdef CONFIG_SMP
  217. ipi_irq = irq_create_mapping(root_domain, IPI_IRQ);
  218. if (!ipi_irq)
  219. return -EIO;
  220. set_send_ipi(&csky_mpintc_send_ipi, ipi_irq);
  221. #endif
  222. return 0;
  223. }
  224. IRQCHIP_DECLARE(csky_mpintc, "csky,mpintc", csky_mpintc_init);