irq-mmp.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/arch/arm/mach-mmp/irq.c
  4. *
  5. * Generic IRQ handling, GPIO IRQ demultiplexing, etc.
  6. * Copyright (C) 2008 - 2012 Marvell Technology Group Ltd.
  7. *
  8. * Author: Bin Yang <bin.yang@marvell.com>
  9. * Haojian Zhuang <haojian.zhuang@gmail.com>
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/irq.h>
  14. #include <linux/irqchip.h>
  15. #include <linux/irqchip/chained_irq.h>
  16. #include <linux/irqdomain.h>
  17. #include <linux/io.h>
  18. #include <linux/ioport.h>
  19. #include <linux/of_address.h>
  20. #include <linux/of_irq.h>
  21. #include <asm/exception.h>
  22. #include <asm/hardirq.h>
  23. #define MAX_ICU_NR 16
  24. #define PJ1_INT_SEL 0x10c
  25. #define PJ4_INT_SEL 0x104
  26. /* bit fields in PJ1_INT_SEL and PJ4_INT_SEL */
  27. #define SEL_INT_PENDING (1 << 6)
  28. #define SEL_INT_NUM_MASK 0x3f
  29. #define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
  30. #define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
  31. struct icu_chip_data {
  32. int nr_irqs;
  33. unsigned int virq_base;
  34. unsigned int cascade_irq;
  35. void __iomem *reg_status;
  36. void __iomem *reg_mask;
  37. unsigned int conf_enable;
  38. unsigned int conf_disable;
  39. unsigned int conf_mask;
  40. unsigned int conf2_mask;
  41. unsigned int clr_mfp_irq_base;
  42. unsigned int clr_mfp_hwirq;
  43. struct irq_domain *domain;
  44. };
  45. struct mmp_intc_conf {
  46. unsigned int conf_enable;
  47. unsigned int conf_disable;
  48. unsigned int conf_mask;
  49. unsigned int conf2_mask;
  50. };
  51. static void __iomem *mmp_icu_base;
  52. static void __iomem *mmp_icu2_base;
  53. static struct icu_chip_data icu_data[MAX_ICU_NR];
  54. static int max_icu_nr;
  55. extern void mmp2_clear_pmic_int(void);
  56. static void icu_mask_ack_irq(struct irq_data *d)
  57. {
  58. struct irq_domain *domain = d->domain;
  59. struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
  60. int hwirq;
  61. u32 r;
  62. hwirq = d->irq - data->virq_base;
  63. if (data == &icu_data[0]) {
  64. r = readl_relaxed(mmp_icu_base + (hwirq << 2));
  65. r &= ~data->conf_mask;
  66. r |= data->conf_disable;
  67. writel_relaxed(r, mmp_icu_base + (hwirq << 2));
  68. } else {
  69. #ifdef CONFIG_CPU_MMP2
  70. if ((data->virq_base == data->clr_mfp_irq_base)
  71. && (hwirq == data->clr_mfp_hwirq))
  72. mmp2_clear_pmic_int();
  73. #endif
  74. r = readl_relaxed(data->reg_mask) | (1 << hwirq);
  75. writel_relaxed(r, data->reg_mask);
  76. }
  77. }
  78. static void icu_mask_irq(struct irq_data *d)
  79. {
  80. struct irq_domain *domain = d->domain;
  81. struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
  82. int hwirq;
  83. u32 r;
  84. hwirq = d->irq - data->virq_base;
  85. if (data == &icu_data[0]) {
  86. r = readl_relaxed(mmp_icu_base + (hwirq << 2));
  87. r &= ~data->conf_mask;
  88. r |= data->conf_disable;
  89. writel_relaxed(r, mmp_icu_base + (hwirq << 2));
  90. if (data->conf2_mask) {
  91. /*
  92. * ICU1 (above) only controls PJ4 MP1; if using SMP,
  93. * we need to also mask the MP2 and MM cores via ICU2.
  94. */
  95. r = readl_relaxed(mmp_icu2_base + (hwirq << 2));
  96. r &= ~data->conf2_mask;
  97. writel_relaxed(r, mmp_icu2_base + (hwirq << 2));
  98. }
  99. } else {
  100. r = readl_relaxed(data->reg_mask) | (1 << hwirq);
  101. writel_relaxed(r, data->reg_mask);
  102. }
  103. }
  104. static void icu_unmask_irq(struct irq_data *d)
  105. {
  106. struct irq_domain *domain = d->domain;
  107. struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
  108. int hwirq;
  109. u32 r;
  110. hwirq = d->irq - data->virq_base;
  111. if (data == &icu_data[0]) {
  112. r = readl_relaxed(mmp_icu_base + (hwirq << 2));
  113. r &= ~data->conf_mask;
  114. r |= data->conf_enable;
  115. writel_relaxed(r, mmp_icu_base + (hwirq << 2));
  116. } else {
  117. r = readl_relaxed(data->reg_mask) & ~(1 << hwirq);
  118. writel_relaxed(r, data->reg_mask);
  119. }
  120. }
  121. struct irq_chip icu_irq_chip = {
  122. .name = "icu_irq",
  123. .irq_mask = icu_mask_irq,
  124. .irq_mask_ack = icu_mask_ack_irq,
  125. .irq_unmask = icu_unmask_irq,
  126. };
  127. static void icu_mux_irq_demux(struct irq_desc *desc)
  128. {
  129. unsigned int irq = irq_desc_get_irq(desc);
  130. struct irq_chip *chip = irq_desc_get_chip(desc);
  131. struct irq_domain *domain;
  132. struct icu_chip_data *data;
  133. int i;
  134. unsigned long mask, status, n;
  135. chained_irq_enter(chip, desc);
  136. for (i = 1; i < max_icu_nr; i++) {
  137. if (irq == icu_data[i].cascade_irq) {
  138. domain = icu_data[i].domain;
  139. data = (struct icu_chip_data *)domain->host_data;
  140. break;
  141. }
  142. }
  143. if (i >= max_icu_nr) {
  144. pr_err("Spurious irq %d in MMP INTC\n", irq);
  145. goto out;
  146. }
  147. mask = readl_relaxed(data->reg_mask);
  148. while (1) {
  149. status = readl_relaxed(data->reg_status) & ~mask;
  150. if (status == 0)
  151. break;
  152. for_each_set_bit(n, &status, BITS_PER_LONG) {
  153. generic_handle_irq(icu_data[i].virq_base + n);
  154. }
  155. }
  156. out:
  157. chained_irq_exit(chip, desc);
  158. }
  159. static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
  160. irq_hw_number_t hw)
  161. {
  162. irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
  163. return 0;
  164. }
  165. static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
  166. const u32 *intspec, unsigned int intsize,
  167. unsigned long *out_hwirq,
  168. unsigned int *out_type)
  169. {
  170. *out_hwirq = intspec[0];
  171. return 0;
  172. }
  173. static const struct irq_domain_ops mmp_irq_domain_ops = {
  174. .map = mmp_irq_domain_map,
  175. .xlate = mmp_irq_domain_xlate,
  176. };
  177. static const struct mmp_intc_conf mmp_conf = {
  178. .conf_enable = 0x51,
  179. .conf_disable = 0x0,
  180. .conf_mask = 0x7f,
  181. };
  182. static const struct mmp_intc_conf mmp2_conf = {
  183. .conf_enable = 0x20,
  184. .conf_disable = 0x0,
  185. .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
  186. MMP2_ICU_INT_ROUTE_PJ4_FIQ,
  187. };
  188. static struct mmp_intc_conf mmp3_conf = {
  189. .conf_enable = 0x20,
  190. .conf_disable = 0x0,
  191. .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
  192. MMP2_ICU_INT_ROUTE_PJ4_FIQ,
  193. .conf2_mask = 0xf0,
  194. };
  195. static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
  196. {
  197. int hwirq;
  198. hwirq = readl_relaxed(mmp_icu_base + PJ1_INT_SEL);
  199. if (!(hwirq & SEL_INT_PENDING))
  200. return;
  201. hwirq &= SEL_INT_NUM_MASK;
  202. handle_domain_irq(icu_data[0].domain, hwirq, regs);
  203. }
  204. static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
  205. {
  206. int hwirq;
  207. hwirq = readl_relaxed(mmp_icu_base + PJ4_INT_SEL);
  208. if (!(hwirq & SEL_INT_PENDING))
  209. return;
  210. hwirq &= SEL_INT_NUM_MASK;
  211. handle_domain_irq(icu_data[0].domain, hwirq, regs);
  212. }
  213. /* MMP (ARMv5) */
  214. void __init icu_init_irq(void)
  215. {
  216. int irq;
  217. max_icu_nr = 1;
  218. mmp_icu_base = ioremap(0xd4282000, 0x1000);
  219. icu_data[0].conf_enable = mmp_conf.conf_enable;
  220. icu_data[0].conf_disable = mmp_conf.conf_disable;
  221. icu_data[0].conf_mask = mmp_conf.conf_mask;
  222. icu_data[0].nr_irqs = 64;
  223. icu_data[0].virq_base = 0;
  224. icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
  225. &irq_domain_simple_ops,
  226. &icu_data[0]);
  227. for (irq = 0; irq < 64; irq++) {
  228. icu_mask_irq(irq_get_irq_data(irq));
  229. irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
  230. }
  231. irq_set_default_host(icu_data[0].domain);
  232. set_handle_irq(mmp_handle_irq);
  233. }
  234. /* MMP2 (ARMv7) */
  235. void __init mmp2_init_icu(void)
  236. {
  237. int irq, end;
  238. max_icu_nr = 8;
  239. mmp_icu_base = ioremap(0xd4282000, 0x1000);
  240. icu_data[0].conf_enable = mmp2_conf.conf_enable;
  241. icu_data[0].conf_disable = mmp2_conf.conf_disable;
  242. icu_data[0].conf_mask = mmp2_conf.conf_mask;
  243. icu_data[0].nr_irqs = 64;
  244. icu_data[0].virq_base = 0;
  245. icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
  246. &irq_domain_simple_ops,
  247. &icu_data[0]);
  248. icu_data[1].reg_status = mmp_icu_base + 0x150;
  249. icu_data[1].reg_mask = mmp_icu_base + 0x168;
  250. icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base +
  251. icu_data[0].nr_irqs;
  252. icu_data[1].clr_mfp_hwirq = 1; /* offset to IRQ_MMP2_PMIC_BASE */
  253. icu_data[1].nr_irqs = 2;
  254. icu_data[1].cascade_irq = 4;
  255. icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs;
  256. icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
  257. icu_data[1].virq_base, 0,
  258. &irq_domain_simple_ops,
  259. &icu_data[1]);
  260. icu_data[2].reg_status = mmp_icu_base + 0x154;
  261. icu_data[2].reg_mask = mmp_icu_base + 0x16c;
  262. icu_data[2].nr_irqs = 2;
  263. icu_data[2].cascade_irq = 5;
  264. icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs;
  265. icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
  266. icu_data[2].virq_base, 0,
  267. &irq_domain_simple_ops,
  268. &icu_data[2]);
  269. icu_data[3].reg_status = mmp_icu_base + 0x180;
  270. icu_data[3].reg_mask = mmp_icu_base + 0x17c;
  271. icu_data[3].nr_irqs = 3;
  272. icu_data[3].cascade_irq = 9;
  273. icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs;
  274. icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
  275. icu_data[3].virq_base, 0,
  276. &irq_domain_simple_ops,
  277. &icu_data[3]);
  278. icu_data[4].reg_status = mmp_icu_base + 0x158;
  279. icu_data[4].reg_mask = mmp_icu_base + 0x170;
  280. icu_data[4].nr_irqs = 5;
  281. icu_data[4].cascade_irq = 17;
  282. icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs;
  283. icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
  284. icu_data[4].virq_base, 0,
  285. &irq_domain_simple_ops,
  286. &icu_data[4]);
  287. icu_data[5].reg_status = mmp_icu_base + 0x15c;
  288. icu_data[5].reg_mask = mmp_icu_base + 0x174;
  289. icu_data[5].nr_irqs = 15;
  290. icu_data[5].cascade_irq = 35;
  291. icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs;
  292. icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
  293. icu_data[5].virq_base, 0,
  294. &irq_domain_simple_ops,
  295. &icu_data[5]);
  296. icu_data[6].reg_status = mmp_icu_base + 0x160;
  297. icu_data[6].reg_mask = mmp_icu_base + 0x178;
  298. icu_data[6].nr_irqs = 2;
  299. icu_data[6].cascade_irq = 51;
  300. icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs;
  301. icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
  302. icu_data[6].virq_base, 0,
  303. &irq_domain_simple_ops,
  304. &icu_data[6]);
  305. icu_data[7].reg_status = mmp_icu_base + 0x188;
  306. icu_data[7].reg_mask = mmp_icu_base + 0x184;
  307. icu_data[7].nr_irqs = 2;
  308. icu_data[7].cascade_irq = 55;
  309. icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs;
  310. icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
  311. icu_data[7].virq_base, 0,
  312. &irq_domain_simple_ops,
  313. &icu_data[7]);
  314. end = icu_data[7].virq_base + icu_data[7].nr_irqs;
  315. for (irq = 0; irq < end; irq++) {
  316. icu_mask_irq(irq_get_irq_data(irq));
  317. if (irq == icu_data[1].cascade_irq ||
  318. irq == icu_data[2].cascade_irq ||
  319. irq == icu_data[3].cascade_irq ||
  320. irq == icu_data[4].cascade_irq ||
  321. irq == icu_data[5].cascade_irq ||
  322. irq == icu_data[6].cascade_irq ||
  323. irq == icu_data[7].cascade_irq) {
  324. irq_set_chip(irq, &icu_irq_chip);
  325. irq_set_chained_handler(irq, icu_mux_irq_demux);
  326. } else {
  327. irq_set_chip_and_handler(irq, &icu_irq_chip,
  328. handle_level_irq);
  329. }
  330. }
  331. irq_set_default_host(icu_data[0].domain);
  332. set_handle_irq(mmp2_handle_irq);
  333. }
  334. #ifdef CONFIG_OF
  335. static int __init mmp_init_bases(struct device_node *node)
  336. {
  337. int ret, nr_irqs, irq, i = 0;
  338. ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
  339. if (ret) {
  340. pr_err("Not found mrvl,intc-nr-irqs property\n");
  341. return ret;
  342. }
  343. mmp_icu_base = of_iomap(node, 0);
  344. if (!mmp_icu_base) {
  345. pr_err("Failed to get interrupt controller register\n");
  346. return -ENOMEM;
  347. }
  348. icu_data[0].virq_base = 0;
  349. icu_data[0].domain = irq_domain_add_linear(node, nr_irqs,
  350. &mmp_irq_domain_ops,
  351. &icu_data[0]);
  352. for (irq = 0; irq < nr_irqs; irq++) {
  353. ret = irq_create_mapping(icu_data[0].domain, irq);
  354. if (!ret) {
  355. pr_err("Failed to mapping hwirq\n");
  356. goto err;
  357. }
  358. if (!irq)
  359. icu_data[0].virq_base = ret;
  360. }
  361. icu_data[0].nr_irqs = nr_irqs;
  362. return 0;
  363. err:
  364. if (icu_data[0].virq_base) {
  365. for (i = 0; i < irq; i++)
  366. irq_dispose_mapping(icu_data[0].virq_base + i);
  367. }
  368. irq_domain_remove(icu_data[0].domain);
  369. iounmap(mmp_icu_base);
  370. return -EINVAL;
  371. }
  372. static int __init mmp_of_init(struct device_node *node,
  373. struct device_node *parent)
  374. {
  375. int ret;
  376. ret = mmp_init_bases(node);
  377. if (ret < 0)
  378. return ret;
  379. icu_data[0].conf_enable = mmp_conf.conf_enable;
  380. icu_data[0].conf_disable = mmp_conf.conf_disable;
  381. icu_data[0].conf_mask = mmp_conf.conf_mask;
  382. set_handle_irq(mmp_handle_irq);
  383. max_icu_nr = 1;
  384. return 0;
  385. }
  386. IRQCHIP_DECLARE(mmp_intc, "mrvl,mmp-intc", mmp_of_init);
  387. static int __init mmp2_of_init(struct device_node *node,
  388. struct device_node *parent)
  389. {
  390. int ret;
  391. ret = mmp_init_bases(node);
  392. if (ret < 0)
  393. return ret;
  394. icu_data[0].conf_enable = mmp2_conf.conf_enable;
  395. icu_data[0].conf_disable = mmp2_conf.conf_disable;
  396. icu_data[0].conf_mask = mmp2_conf.conf_mask;
  397. set_handle_irq(mmp2_handle_irq);
  398. max_icu_nr = 1;
  399. return 0;
  400. }
  401. IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init);
  402. static int __init mmp3_of_init(struct device_node *node,
  403. struct device_node *parent)
  404. {
  405. int ret;
  406. mmp_icu2_base = of_iomap(node, 1);
  407. if (!mmp_icu2_base) {
  408. pr_err("Failed to get interrupt controller register #2\n");
  409. return -ENODEV;
  410. }
  411. ret = mmp_init_bases(node);
  412. if (ret < 0) {
  413. iounmap(mmp_icu2_base);
  414. return ret;
  415. }
  416. icu_data[0].conf_enable = mmp3_conf.conf_enable;
  417. icu_data[0].conf_disable = mmp3_conf.conf_disable;
  418. icu_data[0].conf_mask = mmp3_conf.conf_mask;
  419. icu_data[0].conf2_mask = mmp3_conf.conf2_mask;
  420. if (!parent) {
  421. /* This is the main interrupt controller. */
  422. set_handle_irq(mmp2_handle_irq);
  423. }
  424. max_icu_nr = 1;
  425. return 0;
  426. }
  427. IRQCHIP_DECLARE(mmp3_intc, "marvell,mmp3-intc", mmp3_of_init);
  428. static int __init mmp2_mux_of_init(struct device_node *node,
  429. struct device_node *parent)
  430. {
  431. int i, ret, irq, j = 0;
  432. u32 nr_irqs, mfp_irq;
  433. u32 reg[4];
  434. if (!parent)
  435. return -ENODEV;
  436. i = max_icu_nr;
  437. ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
  438. &nr_irqs);
  439. if (ret) {
  440. pr_err("Not found mrvl,intc-nr-irqs property\n");
  441. return -EINVAL;
  442. }
  443. /*
  444. * For historical reasons, the "regs" property of the
  445. * mrvl,mmp2-mux-intc is not a regular "regs" property containing
  446. * addresses on the parent bus, but offsets from the intc's base.
  447. * That is why we can't use of_address_to_resource() here.
  448. */
  449. ret = of_property_read_variable_u32_array(node, "reg", reg,
  450. ARRAY_SIZE(reg),
  451. ARRAY_SIZE(reg));
  452. if (ret < 0) {
  453. pr_err("Not found reg property\n");
  454. return -EINVAL;
  455. }
  456. icu_data[i].reg_status = mmp_icu_base + reg[0];
  457. icu_data[i].reg_mask = mmp_icu_base + reg[2];
  458. icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
  459. if (!icu_data[i].cascade_irq)
  460. return -EINVAL;
  461. icu_data[i].virq_base = 0;
  462. icu_data[i].domain = irq_domain_add_linear(node, nr_irqs,
  463. &mmp_irq_domain_ops,
  464. &icu_data[i]);
  465. for (irq = 0; irq < nr_irqs; irq++) {
  466. ret = irq_create_mapping(icu_data[i].domain, irq);
  467. if (!ret) {
  468. pr_err("Failed to mapping hwirq\n");
  469. goto err;
  470. }
  471. if (!irq)
  472. icu_data[i].virq_base = ret;
  473. }
  474. icu_data[i].nr_irqs = nr_irqs;
  475. if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
  476. &mfp_irq)) {
  477. icu_data[i].clr_mfp_irq_base = icu_data[i].virq_base;
  478. icu_data[i].clr_mfp_hwirq = mfp_irq;
  479. }
  480. irq_set_chained_handler(icu_data[i].cascade_irq,
  481. icu_mux_irq_demux);
  482. max_icu_nr++;
  483. return 0;
  484. err:
  485. if (icu_data[i].virq_base) {
  486. for (j = 0; j < irq; j++)
  487. irq_dispose_mapping(icu_data[i].virq_base + j);
  488. }
  489. irq_domain_remove(icu_data[i].domain);
  490. return -EINVAL;
  491. }
  492. IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init);
  493. #endif