irq.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  7. * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/irq.h>
  13. #include <linux/spinlock.h>
  14. #include <asm/irq_cpu.h>
  15. #include <asm/mipsregs.h>
  16. #include <bcm63xx_cpu.h>
  17. #include <bcm63xx_regs.h>
  18. #include <bcm63xx_io.h>
  19. #include <bcm63xx_irq.h>
  20. static DEFINE_SPINLOCK(ipic_lock);
  21. static DEFINE_SPINLOCK(epic_lock);
  22. static u32 irq_stat_addr[2];
  23. static u32 irq_mask_addr[2];
  24. static void (*dispatch_internal)(int cpu);
  25. static int is_ext_irq_cascaded;
  26. static unsigned int ext_irq_count;
  27. static unsigned int ext_irq_start, ext_irq_end;
  28. static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
  29. static void (*internal_irq_mask)(struct irq_data *d);
  30. static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
  31. static inline u32 get_ext_irq_perf_reg(int irq)
  32. {
  33. if (irq < 4)
  34. return ext_irq_cfg_reg1;
  35. return ext_irq_cfg_reg2;
  36. }
  37. static inline void handle_internal(int intbit)
  38. {
  39. if (is_ext_irq_cascaded &&
  40. intbit >= ext_irq_start && intbit <= ext_irq_end)
  41. do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
  42. else
  43. do_IRQ(intbit + IRQ_INTERNAL_BASE);
  44. }
  45. static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
  46. const struct cpumask *m)
  47. {
  48. bool enable = cpu_online(cpu);
  49. #ifdef CONFIG_SMP
  50. if (m)
  51. enable &= cpumask_test_cpu(cpu, m);
  52. else if (irqd_affinity_was_set(d))
  53. enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
  54. #endif
  55. return enable;
  56. }
  57. /*
  58. * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
  59. * prioritize any interrupt relatively to another. the static counter
  60. * will resume the loop where it ended the last time we left this
  61. * function.
  62. */
  63. #define BUILD_IPIC_INTERNAL(width) \
  64. void __dispatch_internal_##width(int cpu) \
  65. { \
  66. u32 pending[width / 32]; \
  67. unsigned int src, tgt; \
  68. bool irqs_pending = false; \
  69. static unsigned int i[2]; \
  70. unsigned int *next = &i[cpu]; \
  71. unsigned long flags; \
  72. \
  73. /* read registers in reverse order */ \
  74. spin_lock_irqsave(&ipic_lock, flags); \
  75. for (src = 0, tgt = (width / 32); src < (width / 32); src++) { \
  76. u32 val; \
  77. \
  78. val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
  79. val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
  80. pending[--tgt] = val; \
  81. \
  82. if (val) \
  83. irqs_pending = true; \
  84. } \
  85. spin_unlock_irqrestore(&ipic_lock, flags); \
  86. \
  87. if (!irqs_pending) \
  88. return; \
  89. \
  90. while (1) { \
  91. unsigned int to_call = *next; \
  92. \
  93. *next = (*next + 1) & (width - 1); \
  94. if (pending[to_call / 32] & (1 << (to_call & 0x1f))) { \
  95. handle_internal(to_call); \
  96. break; \
  97. } \
  98. } \
  99. } \
  100. \
  101. static void __internal_irq_mask_##width(struct irq_data *d) \
  102. { \
  103. u32 val; \
  104. unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
  105. unsigned reg = (irq / 32) ^ (width/32 - 1); \
  106. unsigned bit = irq & 0x1f; \
  107. unsigned long flags; \
  108. int cpu; \
  109. \
  110. spin_lock_irqsave(&ipic_lock, flags); \
  111. for_each_present_cpu(cpu) { \
  112. if (!irq_mask_addr[cpu]) \
  113. break; \
  114. \
  115. val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
  116. val &= ~(1 << bit); \
  117. bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
  118. } \
  119. spin_unlock_irqrestore(&ipic_lock, flags); \
  120. } \
  121. \
  122. static void __internal_irq_unmask_##width(struct irq_data *d, \
  123. const struct cpumask *m) \
  124. { \
  125. u32 val; \
  126. unsigned irq = d->irq - IRQ_INTERNAL_BASE; \
  127. unsigned reg = (irq / 32) ^ (width/32 - 1); \
  128. unsigned bit = irq & 0x1f; \
  129. unsigned long flags; \
  130. int cpu; \
  131. \
  132. spin_lock_irqsave(&ipic_lock, flags); \
  133. for_each_present_cpu(cpu) { \
  134. if (!irq_mask_addr[cpu]) \
  135. break; \
  136. \
  137. val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
  138. if (enable_irq_for_cpu(cpu, d, m)) \
  139. val |= (1 << bit); \
  140. else \
  141. val &= ~(1 << bit); \
  142. bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
  143. } \
  144. spin_unlock_irqrestore(&ipic_lock, flags); \
  145. }
  146. BUILD_IPIC_INTERNAL(32);
  147. BUILD_IPIC_INTERNAL(64);
  148. asmlinkage void plat_irq_dispatch(void)
  149. {
  150. u32 cause;
  151. do {
  152. cause = read_c0_cause() & read_c0_status() & ST0_IM;
  153. if (!cause)
  154. break;
  155. if (cause & CAUSEF_IP7)
  156. do_IRQ(7);
  157. if (cause & CAUSEF_IP0)
  158. do_IRQ(0);
  159. if (cause & CAUSEF_IP1)
  160. do_IRQ(1);
  161. if (cause & CAUSEF_IP2)
  162. dispatch_internal(0);
  163. if (is_ext_irq_cascaded) {
  164. if (cause & CAUSEF_IP3)
  165. dispatch_internal(1);
  166. } else {
  167. if (cause & CAUSEF_IP3)
  168. do_IRQ(IRQ_EXT_0);
  169. if (cause & CAUSEF_IP4)
  170. do_IRQ(IRQ_EXT_1);
  171. if (cause & CAUSEF_IP5)
  172. do_IRQ(IRQ_EXT_2);
  173. if (cause & CAUSEF_IP6)
  174. do_IRQ(IRQ_EXT_3);
  175. }
  176. } while (1);
  177. }
  178. /*
  179. * internal IRQs operations: only mask/unmask on PERF irq mask
  180. * register.
  181. */
  182. static void bcm63xx_internal_irq_mask(struct irq_data *d)
  183. {
  184. internal_irq_mask(d);
  185. }
  186. static void bcm63xx_internal_irq_unmask(struct irq_data *d)
  187. {
  188. internal_irq_unmask(d, NULL);
  189. }
  190. /*
  191. * external IRQs operations: mask/unmask and clear on PERF external
  192. * irq control register.
  193. */
  194. static void bcm63xx_external_irq_mask(struct irq_data *d)
  195. {
  196. unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
  197. u32 reg, regaddr;
  198. unsigned long flags;
  199. regaddr = get_ext_irq_perf_reg(irq);
  200. spin_lock_irqsave(&epic_lock, flags);
  201. reg = bcm_perf_readl(regaddr);
  202. if (BCMCPU_IS_6348())
  203. reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
  204. else
  205. reg &= ~EXTIRQ_CFG_MASK(irq % 4);
  206. bcm_perf_writel(reg, regaddr);
  207. spin_unlock_irqrestore(&epic_lock, flags);
  208. if (is_ext_irq_cascaded)
  209. internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
  210. }
  211. static void bcm63xx_external_irq_unmask(struct irq_data *d)
  212. {
  213. unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
  214. u32 reg, regaddr;
  215. unsigned long flags;
  216. regaddr = get_ext_irq_perf_reg(irq);
  217. spin_lock_irqsave(&epic_lock, flags);
  218. reg = bcm_perf_readl(regaddr);
  219. if (BCMCPU_IS_6348())
  220. reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
  221. else
  222. reg |= EXTIRQ_CFG_MASK(irq % 4);
  223. bcm_perf_writel(reg, regaddr);
  224. spin_unlock_irqrestore(&epic_lock, flags);
  225. if (is_ext_irq_cascaded)
  226. internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
  227. NULL);
  228. }
  229. static void bcm63xx_external_irq_clear(struct irq_data *d)
  230. {
  231. unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
  232. u32 reg, regaddr;
  233. unsigned long flags;
  234. regaddr = get_ext_irq_perf_reg(irq);
  235. spin_lock_irqsave(&epic_lock, flags);
  236. reg = bcm_perf_readl(regaddr);
  237. if (BCMCPU_IS_6348())
  238. reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
  239. else
  240. reg |= EXTIRQ_CFG_CLEAR(irq % 4);
  241. bcm_perf_writel(reg, regaddr);
  242. spin_unlock_irqrestore(&epic_lock, flags);
  243. }
  244. static int bcm63xx_external_irq_set_type(struct irq_data *d,
  245. unsigned int flow_type)
  246. {
  247. unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
  248. u32 reg, regaddr;
  249. int levelsense, sense, bothedge;
  250. unsigned long flags;
  251. flow_type &= IRQ_TYPE_SENSE_MASK;
  252. if (flow_type == IRQ_TYPE_NONE)
  253. flow_type = IRQ_TYPE_LEVEL_LOW;
  254. levelsense = sense = bothedge = 0;
  255. switch (flow_type) {
  256. case IRQ_TYPE_EDGE_BOTH:
  257. bothedge = 1;
  258. break;
  259. case IRQ_TYPE_EDGE_RISING:
  260. sense = 1;
  261. break;
  262. case IRQ_TYPE_EDGE_FALLING:
  263. break;
  264. case IRQ_TYPE_LEVEL_HIGH:
  265. levelsense = 1;
  266. sense = 1;
  267. break;
  268. case IRQ_TYPE_LEVEL_LOW:
  269. levelsense = 1;
  270. break;
  271. default:
  272. pr_err("bogus flow type combination given !\n");
  273. return -EINVAL;
  274. }
  275. regaddr = get_ext_irq_perf_reg(irq);
  276. spin_lock_irqsave(&epic_lock, flags);
  277. reg = bcm_perf_readl(regaddr);
  278. irq %= 4;
  279. switch (bcm63xx_get_cpu_id()) {
  280. case BCM6348_CPU_ID:
  281. if (levelsense)
  282. reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
  283. else
  284. reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
  285. if (sense)
  286. reg |= EXTIRQ_CFG_SENSE_6348(irq);
  287. else
  288. reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
  289. if (bothedge)
  290. reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
  291. else
  292. reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
  293. break;
  294. case BCM3368_CPU_ID:
  295. case BCM6328_CPU_ID:
  296. case BCM6338_CPU_ID:
  297. case BCM6345_CPU_ID:
  298. case BCM6358_CPU_ID:
  299. case BCM6362_CPU_ID:
  300. case BCM6368_CPU_ID:
  301. if (levelsense)
  302. reg |= EXTIRQ_CFG_LEVELSENSE(irq);
  303. else
  304. reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
  305. if (sense)
  306. reg |= EXTIRQ_CFG_SENSE(irq);
  307. else
  308. reg &= ~EXTIRQ_CFG_SENSE(irq);
  309. if (bothedge)
  310. reg |= EXTIRQ_CFG_BOTHEDGE(irq);
  311. else
  312. reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
  313. break;
  314. default:
  315. BUG();
  316. }
  317. bcm_perf_writel(reg, regaddr);
  318. spin_unlock_irqrestore(&epic_lock, flags);
  319. irqd_set_trigger_type(d, flow_type);
  320. if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
  321. irq_set_handler_locked(d, handle_level_irq);
  322. else
  323. irq_set_handler_locked(d, handle_edge_irq);
  324. return IRQ_SET_MASK_OK_NOCOPY;
  325. }
  326. #ifdef CONFIG_SMP
  327. static int bcm63xx_internal_set_affinity(struct irq_data *data,
  328. const struct cpumask *dest,
  329. bool force)
  330. {
  331. if (!irqd_irq_disabled(data))
  332. internal_irq_unmask(data, dest);
  333. return 0;
  334. }
  335. #endif
  336. static struct irq_chip bcm63xx_internal_irq_chip = {
  337. .name = "bcm63xx_ipic",
  338. .irq_mask = bcm63xx_internal_irq_mask,
  339. .irq_unmask = bcm63xx_internal_irq_unmask,
  340. };
  341. static struct irq_chip bcm63xx_external_irq_chip = {
  342. .name = "bcm63xx_epic",
  343. .irq_ack = bcm63xx_external_irq_clear,
  344. .irq_mask = bcm63xx_external_irq_mask,
  345. .irq_unmask = bcm63xx_external_irq_unmask,
  346. .irq_set_type = bcm63xx_external_irq_set_type,
  347. };
  348. static void bcm63xx_init_irq(void)
  349. {
  350. int irq_bits;
  351. irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
  352. irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
  353. irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
  354. irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
  355. switch (bcm63xx_get_cpu_id()) {
  356. case BCM3368_CPU_ID:
  357. irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
  358. irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
  359. irq_stat_addr[1] = 0;
  360. irq_mask_addr[1] = 0;
  361. irq_bits = 32;
  362. ext_irq_count = 4;
  363. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
  364. break;
  365. case BCM6328_CPU_ID:
  366. irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
  367. irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
  368. irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
  369. irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
  370. irq_bits = 64;
  371. ext_irq_count = 4;
  372. is_ext_irq_cascaded = 1;
  373. ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
  374. ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
  375. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
  376. break;
  377. case BCM6338_CPU_ID:
  378. irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
  379. irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
  380. irq_stat_addr[1] = 0;
  381. irq_mask_addr[1] = 0;
  382. irq_bits = 32;
  383. ext_irq_count = 4;
  384. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
  385. break;
  386. case BCM6345_CPU_ID:
  387. irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
  388. irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
  389. irq_stat_addr[1] = 0;
  390. irq_mask_addr[1] = 0;
  391. irq_bits = 32;
  392. ext_irq_count = 4;
  393. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
  394. break;
  395. case BCM6348_CPU_ID:
  396. irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
  397. irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
  398. irq_stat_addr[1] = 0;
  399. irq_mask_addr[1] = 0;
  400. irq_bits = 32;
  401. ext_irq_count = 4;
  402. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
  403. break;
  404. case BCM6358_CPU_ID:
  405. irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
  406. irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
  407. irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
  408. irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
  409. irq_bits = 32;
  410. ext_irq_count = 4;
  411. is_ext_irq_cascaded = 1;
  412. ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
  413. ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
  414. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
  415. break;
  416. case BCM6362_CPU_ID:
  417. irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
  418. irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
  419. irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
  420. irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
  421. irq_bits = 64;
  422. ext_irq_count = 4;
  423. is_ext_irq_cascaded = 1;
  424. ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
  425. ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
  426. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
  427. break;
  428. case BCM6368_CPU_ID:
  429. irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
  430. irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
  431. irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
  432. irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
  433. irq_bits = 64;
  434. ext_irq_count = 6;
  435. is_ext_irq_cascaded = 1;
  436. ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
  437. ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
  438. ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
  439. ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
  440. break;
  441. default:
  442. BUG();
  443. }
  444. if (irq_bits == 32) {
  445. dispatch_internal = __dispatch_internal_32;
  446. internal_irq_mask = __internal_irq_mask_32;
  447. internal_irq_unmask = __internal_irq_unmask_32;
  448. } else {
  449. dispatch_internal = __dispatch_internal_64;
  450. internal_irq_mask = __internal_irq_mask_64;
  451. internal_irq_unmask = __internal_irq_unmask_64;
  452. }
  453. }
  454. void __init arch_init_irq(void)
  455. {
  456. int i, irq;
  457. bcm63xx_init_irq();
  458. mips_cpu_irq_init();
  459. for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
  460. irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
  461. handle_level_irq);
  462. for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
  463. irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
  464. handle_edge_irq);
  465. if (!is_ext_irq_cascaded) {
  466. for (i = 3; i < 3 + ext_irq_count; ++i) {
  467. irq = MIPS_CPU_IRQ_BASE + i;
  468. if (request_irq(irq, no_action, IRQF_NO_THREAD,
  469. "cascade_extirq", NULL)) {
  470. pr_err("Failed to request irq %d (cascade_extirq)\n",
  471. irq);
  472. }
  473. }
  474. }
  475. irq = MIPS_CPU_IRQ_BASE + 2;
  476. if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip2", NULL))
  477. pr_err("Failed to request irq %d (cascade_ip2)\n", irq);
  478. #ifdef CONFIG_SMP
  479. if (is_ext_irq_cascaded) {
  480. irq = MIPS_CPU_IRQ_BASE + 3;
  481. if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade_ip3",
  482. NULL))
  483. pr_err("Failed to request irq %d (cascade_ip3)\n", irq);
  484. bcm63xx_internal_irq_chip.irq_set_affinity =
  485. bcm63xx_internal_set_affinity;
  486. cpumask_clear(irq_default_affinity);
  487. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  488. }
  489. #endif
  490. }