irq-bcm7038-l1.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Broadcom BCM7038 style Level 1 interrupt controller driver
  4. *
  5. * Copyright (C) 2014 Broadcom Corporation
  6. * Author: Kevin Cernekee
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/bitops.h>
  10. #include <linux/kernel.h>
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/io.h>
  14. #include <linux/ioport.h>
  15. #include <linux/irq.h>
  16. #include <linux/irqdomain.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/of_irq.h>
  20. #include <linux/of_address.h>
  21. #include <linux/of_platform.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/slab.h>
  24. #include <linux/smp.h>
  25. #include <linux/types.h>
  26. #include <linux/irqchip.h>
  27. #include <linux/irqchip/chained_irq.h>
  28. #include <linux/syscore_ops.h>
  29. #ifdef CONFIG_ARM
  30. #include <asm/smp_plat.h>
  31. #endif
  32. #define IRQS_PER_WORD 32
  33. #define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
  34. #define MAX_WORDS 8
  35. struct bcm7038_l1_cpu;
  36. struct bcm7038_l1_chip {
  37. raw_spinlock_t lock;
  38. unsigned int n_words;
  39. struct irq_domain *domain;
  40. struct bcm7038_l1_cpu *cpus[NR_CPUS];
  41. #ifdef CONFIG_PM_SLEEP
  42. struct list_head list;
  43. u32 wake_mask[MAX_WORDS];
  44. #endif
  45. u32 irq_fwd_mask[MAX_WORDS];
  46. u8 affinity[MAX_WORDS * IRQS_PER_WORD];
  47. };
  48. struct bcm7038_l1_cpu {
  49. void __iomem *map_base;
  50. u32 mask_cache[];
  51. };
  52. /*
  53. * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another:
  54. *
  55. * 7038:
  56. * 0x1000_1400: W0_STATUS
  57. * 0x1000_1404: W1_STATUS
  58. * 0x1000_1408: W0_MASK_STATUS
  59. * 0x1000_140c: W1_MASK_STATUS
  60. * 0x1000_1410: W0_MASK_SET
  61. * 0x1000_1414: W1_MASK_SET
  62. * 0x1000_1418: W0_MASK_CLEAR
  63. * 0x1000_141c: W1_MASK_CLEAR
  64. *
  65. * 7445:
  66. * 0xf03e_1500: W0_STATUS
  67. * 0xf03e_1504: W1_STATUS
  68. * 0xf03e_1508: W2_STATUS
  69. * 0xf03e_150c: W3_STATUS
  70. * 0xf03e_1510: W4_STATUS
  71. * 0xf03e_1514: W0_MASK_STATUS
  72. * 0xf03e_1518: W1_MASK_STATUS
  73. * [...]
  74. */
  75. static inline unsigned int reg_status(struct bcm7038_l1_chip *intc,
  76. unsigned int word)
  77. {
  78. return (0 * intc->n_words + word) * sizeof(u32);
  79. }
  80. static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc,
  81. unsigned int word)
  82. {
  83. return (1 * intc->n_words + word) * sizeof(u32);
  84. }
  85. static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc,
  86. unsigned int word)
  87. {
  88. return (2 * intc->n_words + word) * sizeof(u32);
  89. }
  90. static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip *intc,
  91. unsigned int word)
  92. {
  93. return (3 * intc->n_words + word) * sizeof(u32);
  94. }
  95. static inline u32 l1_readl(void __iomem *reg)
  96. {
  97. if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  98. return ioread32be(reg);
  99. else
  100. return readl(reg);
  101. }
  102. static inline void l1_writel(u32 val, void __iomem *reg)
  103. {
  104. if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
  105. iowrite32be(val, reg);
  106. else
  107. writel(val, reg);
  108. }
  109. static void bcm7038_l1_irq_handle(struct irq_desc *desc)
  110. {
  111. struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc);
  112. struct bcm7038_l1_cpu *cpu;
  113. struct irq_chip *chip = irq_desc_get_chip(desc);
  114. unsigned int idx;
  115. #ifdef CONFIG_SMP
  116. cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
  117. #else
  118. cpu = intc->cpus[0];
  119. #endif
  120. chained_irq_enter(chip, desc);
  121. for (idx = 0; idx < intc->n_words; idx++) {
  122. int base = idx * IRQS_PER_WORD;
  123. unsigned long pending, flags;
  124. int hwirq;
  125. raw_spin_lock_irqsave(&intc->lock, flags);
  126. pending = l1_readl(cpu->map_base + reg_status(intc, idx)) &
  127. ~cpu->mask_cache[idx];
  128. raw_spin_unlock_irqrestore(&intc->lock, flags);
  129. for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
  130. generic_handle_irq(irq_find_mapping(intc->domain,
  131. base + hwirq));
  132. }
  133. }
  134. chained_irq_exit(chip, desc);
  135. }
  136. static void __bcm7038_l1_unmask(struct irq_data *d, unsigned int cpu_idx)
  137. {
  138. struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
  139. u32 word = d->hwirq / IRQS_PER_WORD;
  140. u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
  141. intc->cpus[cpu_idx]->mask_cache[word] &= ~mask;
  142. l1_writel(mask, intc->cpus[cpu_idx]->map_base +
  143. reg_mask_clr(intc, word));
  144. }
  145. static void __bcm7038_l1_mask(struct irq_data *d, unsigned int cpu_idx)
  146. {
  147. struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
  148. u32 word = d->hwirq / IRQS_PER_WORD;
  149. u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
  150. intc->cpus[cpu_idx]->mask_cache[word] |= mask;
  151. l1_writel(mask, intc->cpus[cpu_idx]->map_base +
  152. reg_mask_set(intc, word));
  153. }
  154. static void bcm7038_l1_unmask(struct irq_data *d)
  155. {
  156. struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
  157. unsigned long flags;
  158. raw_spin_lock_irqsave(&intc->lock, flags);
  159. __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]);
  160. raw_spin_unlock_irqrestore(&intc->lock, flags);
  161. }
  162. static void bcm7038_l1_mask(struct irq_data *d)
  163. {
  164. struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
  165. unsigned long flags;
  166. raw_spin_lock_irqsave(&intc->lock, flags);
  167. __bcm7038_l1_mask(d, intc->affinity[d->hwirq]);
  168. raw_spin_unlock_irqrestore(&intc->lock, flags);
  169. }
  170. static int bcm7038_l1_set_affinity(struct irq_data *d,
  171. const struct cpumask *dest,
  172. bool force)
  173. {
  174. struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
  175. unsigned long flags;
  176. irq_hw_number_t hw = d->hwirq;
  177. u32 word = hw / IRQS_PER_WORD;
  178. u32 mask = BIT(hw % IRQS_PER_WORD);
  179. unsigned int first_cpu = cpumask_any_and(dest, cpu_online_mask);
  180. bool was_disabled;
  181. raw_spin_lock_irqsave(&intc->lock, flags);
  182. was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] &
  183. mask);
  184. __bcm7038_l1_mask(d, intc->affinity[hw]);
  185. intc->affinity[hw] = first_cpu;
  186. if (!was_disabled)
  187. __bcm7038_l1_unmask(d, first_cpu);
  188. raw_spin_unlock_irqrestore(&intc->lock, flags);
  189. irq_data_update_effective_affinity(d, cpumask_of(first_cpu));
  190. return 0;
  191. }
  192. #ifdef CONFIG_SMP
  193. static void bcm7038_l1_cpu_offline(struct irq_data *d)
  194. {
  195. struct cpumask *mask = irq_data_get_affinity_mask(d);
  196. int cpu = smp_processor_id();
  197. cpumask_t new_affinity;
  198. /* This CPU was not on the affinity mask */
  199. if (!cpumask_test_cpu(cpu, mask))
  200. return;
  201. if (cpumask_weight(mask) > 1) {
  202. /*
  203. * Multiple CPU affinity, remove this CPU from the affinity
  204. * mask
  205. */
  206. cpumask_copy(&new_affinity, mask);
  207. cpumask_clear_cpu(cpu, &new_affinity);
  208. } else {
  209. /* Only CPU, put on the lowest online CPU */
  210. cpumask_clear(&new_affinity);
  211. cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
  212. }
  213. irq_set_affinity_locked(d, &new_affinity, false);
  214. }
  215. #endif
  216. static int __init bcm7038_l1_init_one(struct device_node *dn,
  217. unsigned int idx,
  218. struct bcm7038_l1_chip *intc)
  219. {
  220. struct resource res;
  221. resource_size_t sz;
  222. struct bcm7038_l1_cpu *cpu;
  223. unsigned int i, n_words, parent_irq;
  224. int ret;
  225. if (of_address_to_resource(dn, idx, &res))
  226. return -EINVAL;
  227. sz = resource_size(&res);
  228. n_words = sz / REG_BYTES_PER_IRQ_WORD;
  229. if (n_words > MAX_WORDS)
  230. return -EINVAL;
  231. else if (!intc->n_words)
  232. intc->n_words = n_words;
  233. else if (intc->n_words != n_words)
  234. return -EINVAL;
  235. ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask",
  236. intc->irq_fwd_mask, n_words);
  237. if (ret != 0 && ret != -EINVAL) {
  238. /* property exists but has the wrong number of words */
  239. pr_err("invalid brcm,int-fwd-mask property\n");
  240. return -EINVAL;
  241. }
  242. cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
  243. GFP_KERNEL);
  244. if (!cpu)
  245. return -ENOMEM;
  246. cpu->map_base = ioremap(res.start, sz);
  247. if (!cpu->map_base)
  248. return -ENOMEM;
  249. for (i = 0; i < n_words; i++) {
  250. l1_writel(~intc->irq_fwd_mask[i],
  251. cpu->map_base + reg_mask_set(intc, i));
  252. l1_writel(intc->irq_fwd_mask[i],
  253. cpu->map_base + reg_mask_clr(intc, i));
  254. cpu->mask_cache[i] = ~intc->irq_fwd_mask[i];
  255. }
  256. parent_irq = irq_of_parse_and_map(dn, idx);
  257. if (!parent_irq) {
  258. pr_err("failed to map parent interrupt %d\n", parent_irq);
  259. return -EINVAL;
  260. }
  261. if (of_property_read_bool(dn, "brcm,irq-can-wake"))
  262. enable_irq_wake(parent_irq);
  263. irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
  264. intc);
  265. return 0;
  266. }
  267. #ifdef CONFIG_PM_SLEEP
  268. /*
  269. * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is
  270. * used because the struct chip_type suspend/resume hooks are not called
  271. * unless chip_type is hooked onto a generic_chip. Since this driver does
  272. * not use generic_chip, we need to manually hook our resume/suspend to
  273. * syscore_ops.
  274. */
  275. static LIST_HEAD(bcm7038_l1_intcs_list);
  276. static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock);
  277. static int bcm7038_l1_suspend(void)
  278. {
  279. struct bcm7038_l1_chip *intc;
  280. int boot_cpu, word;
  281. u32 val;
  282. /* Wakeup interrupt should only come from the boot cpu */
  283. #ifdef CONFIG_SMP
  284. boot_cpu = cpu_logical_map(0);
  285. #else
  286. boot_cpu = 0;
  287. #endif
  288. list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
  289. for (word = 0; word < intc->n_words; word++) {
  290. val = intc->wake_mask[word] | intc->irq_fwd_mask[word];
  291. l1_writel(~val,
  292. intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
  293. l1_writel(val,
  294. intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
  295. }
  296. }
  297. return 0;
  298. }
  299. static void bcm7038_l1_resume(void)
  300. {
  301. struct bcm7038_l1_chip *intc;
  302. int boot_cpu, word;
  303. #ifdef CONFIG_SMP
  304. boot_cpu = cpu_logical_map(0);
  305. #else
  306. boot_cpu = 0;
  307. #endif
  308. list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
  309. for (word = 0; word < intc->n_words; word++) {
  310. l1_writel(intc->cpus[boot_cpu]->mask_cache[word],
  311. intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
  312. l1_writel(~intc->cpus[boot_cpu]->mask_cache[word],
  313. intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
  314. }
  315. }
  316. }
  317. static struct syscore_ops bcm7038_l1_syscore_ops = {
  318. .suspend = bcm7038_l1_suspend,
  319. .resume = bcm7038_l1_resume,
  320. };
  321. static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on)
  322. {
  323. struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
  324. unsigned long flags;
  325. u32 word = d->hwirq / IRQS_PER_WORD;
  326. u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
  327. raw_spin_lock_irqsave(&intc->lock, flags);
  328. if (on)
  329. intc->wake_mask[word] |= mask;
  330. else
  331. intc->wake_mask[word] &= ~mask;
  332. raw_spin_unlock_irqrestore(&intc->lock, flags);
  333. return 0;
  334. }
  335. #endif
  336. static struct irq_chip bcm7038_l1_irq_chip = {
  337. .name = "bcm7038-l1",
  338. .irq_mask = bcm7038_l1_mask,
  339. .irq_unmask = bcm7038_l1_unmask,
  340. .irq_set_affinity = bcm7038_l1_set_affinity,
  341. #ifdef CONFIG_SMP
  342. .irq_cpu_offline = bcm7038_l1_cpu_offline,
  343. #endif
  344. #ifdef CONFIG_PM_SLEEP
  345. .irq_set_wake = bcm7038_l1_set_wake,
  346. #endif
  347. };
  348. static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
  349. irq_hw_number_t hw_irq)
  350. {
  351. struct bcm7038_l1_chip *intc = d->host_data;
  352. u32 mask = BIT(hw_irq % IRQS_PER_WORD);
  353. u32 word = hw_irq / IRQS_PER_WORD;
  354. if (intc->irq_fwd_mask[word] & mask)
  355. return -EPERM;
  356. irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
  357. irq_set_chip_data(virq, d->host_data);
  358. irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
  359. return 0;
  360. }
  361. static const struct irq_domain_ops bcm7038_l1_domain_ops = {
  362. .xlate = irq_domain_xlate_onecell,
  363. .map = bcm7038_l1_map,
  364. };
  365. static int __init bcm7038_l1_of_init(struct device_node *dn,
  366. struct device_node *parent)
  367. {
  368. struct bcm7038_l1_chip *intc;
  369. int idx, ret;
  370. intc = kzalloc(sizeof(*intc), GFP_KERNEL);
  371. if (!intc)
  372. return -ENOMEM;
  373. raw_spin_lock_init(&intc->lock);
  374. for_each_possible_cpu(idx) {
  375. ret = bcm7038_l1_init_one(dn, idx, intc);
  376. if (ret < 0) {
  377. if (idx)
  378. break;
  379. pr_err("failed to remap intc L1 registers\n");
  380. goto out_free;
  381. }
  382. }
  383. intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
  384. &bcm7038_l1_domain_ops,
  385. intc);
  386. if (!intc->domain) {
  387. ret = -ENOMEM;
  388. goto out_unmap;
  389. }
  390. #ifdef CONFIG_PM_SLEEP
  391. /* Add bcm7038_l1_chip into a list */
  392. raw_spin_lock(&bcm7038_l1_intcs_lock);
  393. list_add_tail(&intc->list, &bcm7038_l1_intcs_list);
  394. raw_spin_unlock(&bcm7038_l1_intcs_lock);
  395. if (list_is_singular(&bcm7038_l1_intcs_list))
  396. register_syscore_ops(&bcm7038_l1_syscore_ops);
  397. #endif
  398. pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
  399. dn, IRQS_PER_WORD * intc->n_words);
  400. return 0;
  401. out_unmap:
  402. for_each_possible_cpu(idx) {
  403. struct bcm7038_l1_cpu *cpu = intc->cpus[idx];
  404. if (cpu) {
  405. if (cpu->map_base)
  406. iounmap(cpu->map_base);
  407. kfree(cpu);
  408. }
  409. }
  410. out_free:
  411. kfree(intc);
  412. return ret;
  413. }
  414. IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init);