irq-hip04.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Hisilicon HiP04 INTC
  4. *
  5. * Copyright (C) 2002-2014 ARM Limited.
  6. * Copyright (c) 2013-2014 Hisilicon Ltd.
  7. * Copyright (c) 2013-2014 Linaro Ltd.
  8. *
  9. * Interrupt architecture for the HIP04 INTC:
  10. *
  11. * o There is one Interrupt Distributor, which receives interrupts
  12. * from system devices and sends them to the Interrupt Controllers.
  13. *
  14. * o There is one CPU Interface per CPU, which sends interrupts sent
  15. * by the Distributor, and interrupts generated locally, to the
  16. * associated CPU. The base address of the CPU interface is usually
  17. * aliased so that the same address points to different chips depending
  18. * on the CPU it is accessed from.
  19. *
  20. * Note that IRQs 0-31 are special - they are local to each CPU.
  21. * As such, the enable set/clear, pending set/clear and active bit
  22. * registers are banked per-cpu for these sources.
  23. */
  24. #include <linux/init.h>
  25. #include <linux/kernel.h>
  26. #include <linux/err.h>
  27. #include <linux/module.h>
  28. #include <linux/list.h>
  29. #include <linux/smp.h>
  30. #include <linux/cpu.h>
  31. #include <linux/cpu_pm.h>
  32. #include <linux/cpumask.h>
  33. #include <linux/io.h>
  34. #include <linux/of.h>
  35. #include <linux/of_address.h>
  36. #include <linux/of_irq.h>
  37. #include <linux/irqdomain.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/slab.h>
  40. #include <linux/irqchip.h>
  41. #include <linux/irqchip/arm-gic.h>
  42. #include <asm/irq.h>
  43. #include <asm/exception.h>
  44. #include <asm/smp_plat.h>
  45. #include "irq-gic-common.h"
  46. #define HIP04_MAX_IRQS 510
  47. struct hip04_irq_data {
  48. void __iomem *dist_base;
  49. void __iomem *cpu_base;
  50. struct irq_domain *domain;
  51. unsigned int nr_irqs;
  52. };
  53. static DEFINE_RAW_SPINLOCK(irq_controller_lock);
  54. /*
  55. * The GIC mapping of CPU interfaces does not necessarily match
  56. * the logical CPU numbering. Let's use a mapping as returned
  57. * by the GIC itself.
  58. */
  59. #define NR_HIP04_CPU_IF 16
  60. static u16 hip04_cpu_map[NR_HIP04_CPU_IF] __read_mostly;
  61. static struct hip04_irq_data hip04_data __read_mostly;
  62. static inline void __iomem *hip04_dist_base(struct irq_data *d)
  63. {
  64. struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
  65. return hip04_data->dist_base;
  66. }
  67. static inline void __iomem *hip04_cpu_base(struct irq_data *d)
  68. {
  69. struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
  70. return hip04_data->cpu_base;
  71. }
  72. static inline unsigned int hip04_irq(struct irq_data *d)
  73. {
  74. return d->hwirq;
  75. }
  76. /*
  77. * Routines to acknowledge, disable and enable interrupts
  78. */
  79. static void hip04_mask_irq(struct irq_data *d)
  80. {
  81. u32 mask = 1 << (hip04_irq(d) % 32);
  82. raw_spin_lock(&irq_controller_lock);
  83. writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_CLEAR +
  84. (hip04_irq(d) / 32) * 4);
  85. raw_spin_unlock(&irq_controller_lock);
  86. }
  87. static void hip04_unmask_irq(struct irq_data *d)
  88. {
  89. u32 mask = 1 << (hip04_irq(d) % 32);
  90. raw_spin_lock(&irq_controller_lock);
  91. writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_SET +
  92. (hip04_irq(d) / 32) * 4);
  93. raw_spin_unlock(&irq_controller_lock);
  94. }
  95. static void hip04_eoi_irq(struct irq_data *d)
  96. {
  97. writel_relaxed(hip04_irq(d), hip04_cpu_base(d) + GIC_CPU_EOI);
  98. }
  99. static int hip04_irq_set_type(struct irq_data *d, unsigned int type)
  100. {
  101. void __iomem *base = hip04_dist_base(d);
  102. unsigned int irq = hip04_irq(d);
  103. int ret;
  104. /* Interrupt configuration for SGIs can't be changed */
  105. if (irq < 16)
  106. return -EINVAL;
  107. /* SPIs have restrictions on the supported types */
  108. if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
  109. type != IRQ_TYPE_EDGE_RISING)
  110. return -EINVAL;
  111. raw_spin_lock(&irq_controller_lock);
  112. ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG, NULL);
  113. if (ret && irq < 32) {
  114. /* Misconfigured PPIs are usually not fatal */
  115. pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16);
  116. ret = 0;
  117. }
  118. raw_spin_unlock(&irq_controller_lock);
  119. return ret;
  120. }
  121. #ifdef CONFIG_SMP
  122. static int hip04_irq_set_affinity(struct irq_data *d,
  123. const struct cpumask *mask_val,
  124. bool force)
  125. {
  126. void __iomem *reg;
  127. unsigned int cpu, shift = (hip04_irq(d) % 2) * 16;
  128. u32 val, mask, bit;
  129. if (!force)
  130. cpu = cpumask_any_and(mask_val, cpu_online_mask);
  131. else
  132. cpu = cpumask_first(mask_val);
  133. if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids)
  134. return -EINVAL;
  135. raw_spin_lock(&irq_controller_lock);
  136. reg = hip04_dist_base(d) + GIC_DIST_TARGET + ((hip04_irq(d) * 2) & ~3);
  137. mask = 0xffff << shift;
  138. bit = hip04_cpu_map[cpu] << shift;
  139. val = readl_relaxed(reg) & ~mask;
  140. writel_relaxed(val | bit, reg);
  141. raw_spin_unlock(&irq_controller_lock);
  142. irq_data_update_effective_affinity(d, cpumask_of(cpu));
  143. return IRQ_SET_MASK_OK;
  144. }
  145. static void hip04_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
  146. {
  147. int cpu;
  148. unsigned long flags, map = 0;
  149. raw_spin_lock_irqsave(&irq_controller_lock, flags);
  150. /* Convert our logical CPU mask into a physical one. */
  151. for_each_cpu(cpu, mask)
  152. map |= hip04_cpu_map[cpu];
  153. /*
  154. * Ensure that stores to Normal memory are visible to the
  155. * other CPUs before they observe us issuing the IPI.
  156. */
  157. dmb(ishst);
  158. /* this always happens on GIC0 */
  159. writel_relaxed(map << 8 | d->hwirq, hip04_data.dist_base + GIC_DIST_SOFTINT);
  160. raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
  161. }
  162. #endif
  163. static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
  164. {
  165. u32 irqstat, irqnr;
  166. void __iomem *cpu_base = hip04_data.cpu_base;
  167. do {
  168. irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
  169. irqnr = irqstat & GICC_IAR_INT_ID_MASK;
  170. if (irqnr <= HIP04_MAX_IRQS)
  171. handle_domain_irq(hip04_data.domain, irqnr, regs);
  172. } while (irqnr > HIP04_MAX_IRQS);
  173. }
  174. static struct irq_chip hip04_irq_chip = {
  175. .name = "HIP04 INTC",
  176. .irq_mask = hip04_mask_irq,
  177. .irq_unmask = hip04_unmask_irq,
  178. .irq_eoi = hip04_eoi_irq,
  179. .irq_set_type = hip04_irq_set_type,
  180. #ifdef CONFIG_SMP
  181. .irq_set_affinity = hip04_irq_set_affinity,
  182. .ipi_send_mask = hip04_ipi_send_mask,
  183. #endif
  184. .flags = IRQCHIP_SET_TYPE_MASKED |
  185. IRQCHIP_SKIP_SET_WAKE |
  186. IRQCHIP_MASK_ON_SUSPEND,
  187. };
  188. static u16 hip04_get_cpumask(struct hip04_irq_data *intc)
  189. {
  190. void __iomem *base = intc->dist_base;
  191. u32 mask, i;
  192. for (i = mask = 0; i < 32; i += 2) {
  193. mask = readl_relaxed(base + GIC_DIST_TARGET + i * 2);
  194. mask |= mask >> 16;
  195. if (mask)
  196. break;
  197. }
  198. if (!mask)
  199. pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
  200. return mask;
  201. }
  202. static void __init hip04_irq_dist_init(struct hip04_irq_data *intc)
  203. {
  204. unsigned int i;
  205. u32 cpumask;
  206. unsigned int nr_irqs = intc->nr_irqs;
  207. void __iomem *base = intc->dist_base;
  208. writel_relaxed(0, base + GIC_DIST_CTRL);
  209. /*
  210. * Set all global interrupts to this CPU only.
  211. */
  212. cpumask = hip04_get_cpumask(intc);
  213. cpumask |= cpumask << 16;
  214. for (i = 32; i < nr_irqs; i += 2)
  215. writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3));
  216. gic_dist_config(base, nr_irqs, NULL);
  217. writel_relaxed(1, base + GIC_DIST_CTRL);
  218. }
  219. static void hip04_irq_cpu_init(struct hip04_irq_data *intc)
  220. {
  221. void __iomem *dist_base = intc->dist_base;
  222. void __iomem *base = intc->cpu_base;
  223. unsigned int cpu_mask, cpu = smp_processor_id();
  224. int i;
  225. /*
  226. * Get what the GIC says our CPU mask is.
  227. */
  228. BUG_ON(cpu >= NR_HIP04_CPU_IF);
  229. cpu_mask = hip04_get_cpumask(intc);
  230. hip04_cpu_map[cpu] = cpu_mask;
  231. /*
  232. * Clear our mask from the other map entries in case they're
  233. * still undefined.
  234. */
  235. for (i = 0; i < NR_HIP04_CPU_IF; i++)
  236. if (i != cpu)
  237. hip04_cpu_map[i] &= ~cpu_mask;
  238. gic_cpu_config(dist_base, 32, NULL);
  239. writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
  240. writel_relaxed(1, base + GIC_CPU_CTRL);
  241. }
  242. static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
  243. irq_hw_number_t hw)
  244. {
  245. if (hw < 16) {
  246. irq_set_percpu_devid(irq);
  247. irq_set_chip_and_handler(irq, &hip04_irq_chip,
  248. handle_percpu_devid_fasteoi_ipi);
  249. } else if (hw < 32) {
  250. irq_set_percpu_devid(irq);
  251. irq_set_chip_and_handler(irq, &hip04_irq_chip,
  252. handle_percpu_devid_irq);
  253. } else {
  254. irq_set_chip_and_handler(irq, &hip04_irq_chip,
  255. handle_fasteoi_irq);
  256. irq_set_probe(irq);
  257. irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
  258. }
  259. irq_set_chip_data(irq, d->host_data);
  260. return 0;
  261. }
  262. static int hip04_irq_domain_xlate(struct irq_domain *d,
  263. struct device_node *controller,
  264. const u32 *intspec, unsigned int intsize,
  265. unsigned long *out_hwirq,
  266. unsigned int *out_type)
  267. {
  268. if (irq_domain_get_of_node(d) != controller)
  269. return -EINVAL;
  270. if (intsize == 1 && intspec[0] < 16) {
  271. *out_hwirq = intspec[0];
  272. *out_type = IRQ_TYPE_EDGE_RISING;
  273. return 0;
  274. }
  275. if (intsize < 3)
  276. return -EINVAL;
  277. /* Get the interrupt number and add 16 to skip over SGIs */
  278. *out_hwirq = intspec[1] + 16;
  279. /* For SPIs, we need to add 16 more to get the irq ID number */
  280. if (!intspec[0])
  281. *out_hwirq += 16;
  282. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  283. return 0;
  284. }
  285. static int hip04_irq_starting_cpu(unsigned int cpu)
  286. {
  287. hip04_irq_cpu_init(&hip04_data);
  288. return 0;
  289. }
  290. static const struct irq_domain_ops hip04_irq_domain_ops = {
  291. .map = hip04_irq_domain_map,
  292. .xlate = hip04_irq_domain_xlate,
  293. };
  294. static int __init
  295. hip04_of_init(struct device_node *node, struct device_node *parent)
  296. {
  297. int nr_irqs, irq_base, i;
  298. if (WARN_ON(!node))
  299. return -ENODEV;
  300. hip04_data.dist_base = of_iomap(node, 0);
  301. WARN(!hip04_data.dist_base, "fail to map hip04 intc dist registers\n");
  302. hip04_data.cpu_base = of_iomap(node, 1);
  303. WARN(!hip04_data.cpu_base, "unable to map hip04 intc cpu registers\n");
  304. /*
  305. * Initialize the CPU interface map to all CPUs.
  306. * It will be refined as each CPU probes its ID.
  307. */
  308. for (i = 0; i < NR_HIP04_CPU_IF; i++)
  309. hip04_cpu_map[i] = 0xffff;
  310. /*
  311. * Find out how many interrupts are supported.
  312. * The HIP04 INTC only supports up to 510 interrupt sources.
  313. */
  314. nr_irqs = readl_relaxed(hip04_data.dist_base + GIC_DIST_CTR) & 0x1f;
  315. nr_irqs = (nr_irqs + 1) * 32;
  316. if (nr_irqs > HIP04_MAX_IRQS)
  317. nr_irqs = HIP04_MAX_IRQS;
  318. hip04_data.nr_irqs = nr_irqs;
  319. irq_base = irq_alloc_descs(-1, 0, nr_irqs, numa_node_id());
  320. if (irq_base < 0) {
  321. pr_err("failed to allocate IRQ numbers\n");
  322. return -EINVAL;
  323. }
  324. hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base,
  325. 0,
  326. &hip04_irq_domain_ops,
  327. &hip04_data);
  328. if (WARN_ON(!hip04_data.domain))
  329. return -EINVAL;
  330. #ifdef CONFIG_SMP
  331. set_smp_ipi_range(irq_base, 16);
  332. #endif
  333. set_handle_irq(hip04_handle_irq);
  334. hip04_irq_dist_init(&hip04_data);
  335. cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "irqchip/hip04:starting",
  336. hip04_irq_starting_cpu, NULL);
  337. return 0;
  338. }
  339. IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);