irq-owl-sirq.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Actions Semi Owl SoCs SIRQ interrupt controller driver
  4. *
  5. * Copyright (C) 2014 Actions Semi Inc.
  6. * David Liu <liuwei@actions-semi.com>
  7. *
  8. * Author: Parthiban Nallathambi <pn@denx.de>
  9. * Author: Saravanan Sekar <sravanhome@gmail.com>
  10. * Author: Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
  11. */
  12. #include <linux/bitfield.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irqchip.h>
  15. #include <linux/of_address.h>
  16. #include <linux/of_irq.h>
  17. #include <dt-bindings/interrupt-controller/arm-gic.h>
  18. #define NUM_SIRQ 3
  19. #define INTC_EXTCTL_PENDING BIT(0)
  20. #define INTC_EXTCTL_CLK_SEL BIT(4)
  21. #define INTC_EXTCTL_EN BIT(5)
  22. #define INTC_EXTCTL_TYPE_MASK GENMASK(7, 6)
  23. #define INTC_EXTCTL_TYPE_HIGH 0
  24. #define INTC_EXTCTL_TYPE_LOW BIT(6)
  25. #define INTC_EXTCTL_TYPE_RISING BIT(7)
  26. #define INTC_EXTCTL_TYPE_FALLING (BIT(6) | BIT(7))
  27. /* S500 & S700 SIRQ control register masks */
  28. #define INTC_EXTCTL_SIRQ0_MASK GENMASK(23, 16)
  29. #define INTC_EXTCTL_SIRQ1_MASK GENMASK(15, 8)
  30. #define INTC_EXTCTL_SIRQ2_MASK GENMASK(7, 0)
  31. /* S900 SIRQ control register offsets, relative to controller base address */
  32. #define INTC_EXTCTL0 0x0000
  33. #define INTC_EXTCTL1 0x0328
  34. #define INTC_EXTCTL2 0x032c
  35. struct owl_sirq_params {
  36. /* INTC_EXTCTL reg shared for all three SIRQ lines */
  37. bool reg_shared;
  38. /* INTC_EXTCTL reg offsets relative to controller base address */
  39. u16 reg_offset[NUM_SIRQ];
  40. };
  41. struct owl_sirq_chip_data {
  42. const struct owl_sirq_params *params;
  43. void __iomem *base;
  44. raw_spinlock_t lock;
  45. u32 ext_irqs[NUM_SIRQ];
  46. };
  47. /* S500 & S700 SoCs */
  48. static const struct owl_sirq_params owl_sirq_s500_params = {
  49. .reg_shared = true,
  50. .reg_offset = { 0, 0, 0 },
  51. };
  52. /* S900 SoC */
  53. static const struct owl_sirq_params owl_sirq_s900_params = {
  54. .reg_shared = false,
  55. .reg_offset = { INTC_EXTCTL0, INTC_EXTCTL1, INTC_EXTCTL2 },
  56. };
  57. static u32 owl_field_get(u32 val, u32 index)
  58. {
  59. switch (index) {
  60. case 0:
  61. return FIELD_GET(INTC_EXTCTL_SIRQ0_MASK, val);
  62. case 1:
  63. return FIELD_GET(INTC_EXTCTL_SIRQ1_MASK, val);
  64. case 2:
  65. default:
  66. return FIELD_GET(INTC_EXTCTL_SIRQ2_MASK, val);
  67. }
  68. }
  69. static u32 owl_field_prep(u32 val, u32 index)
  70. {
  71. switch (index) {
  72. case 0:
  73. return FIELD_PREP(INTC_EXTCTL_SIRQ0_MASK, val);
  74. case 1:
  75. return FIELD_PREP(INTC_EXTCTL_SIRQ1_MASK, val);
  76. case 2:
  77. default:
  78. return FIELD_PREP(INTC_EXTCTL_SIRQ2_MASK, val);
  79. }
  80. }
  81. static u32 owl_sirq_read_extctl(struct owl_sirq_chip_data *data, u32 index)
  82. {
  83. u32 val;
  84. val = readl_relaxed(data->base + data->params->reg_offset[index]);
  85. if (data->params->reg_shared)
  86. val = owl_field_get(val, index);
  87. return val;
  88. }
  89. static void owl_sirq_write_extctl(struct owl_sirq_chip_data *data,
  90. u32 extctl, u32 index)
  91. {
  92. u32 val;
  93. if (data->params->reg_shared) {
  94. val = readl_relaxed(data->base + data->params->reg_offset[index]);
  95. val &= ~owl_field_prep(0xff, index);
  96. extctl = owl_field_prep(extctl, index) | val;
  97. }
  98. writel_relaxed(extctl, data->base + data->params->reg_offset[index]);
  99. }
  100. static void owl_sirq_clear_set_extctl(struct owl_sirq_chip_data *d,
  101. u32 clear, u32 set, u32 index)
  102. {
  103. unsigned long flags;
  104. u32 val;
  105. raw_spin_lock_irqsave(&d->lock, flags);
  106. val = owl_sirq_read_extctl(d, index);
  107. val &= ~clear;
  108. val |= set;
  109. owl_sirq_write_extctl(d, val, index);
  110. raw_spin_unlock_irqrestore(&d->lock, flags);
  111. }
  112. static void owl_sirq_eoi(struct irq_data *data)
  113. {
  114. struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
  115. /*
  116. * Software must clear external interrupt pending, when interrupt type
  117. * is edge triggered, so we need per SIRQ based clearing.
  118. */
  119. if (!irqd_is_level_type(data))
  120. owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_PENDING,
  121. data->hwirq);
  122. irq_chip_eoi_parent(data);
  123. }
  124. static void owl_sirq_mask(struct irq_data *data)
  125. {
  126. struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
  127. owl_sirq_clear_set_extctl(chip_data, INTC_EXTCTL_EN, 0, data->hwirq);
  128. irq_chip_mask_parent(data);
  129. }
  130. static void owl_sirq_unmask(struct irq_data *data)
  131. {
  132. struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
  133. owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_EN, data->hwirq);
  134. irq_chip_unmask_parent(data);
  135. }
  136. /*
  137. * GIC does not handle falling edge or active low, hence SIRQ shall be
  138. * programmed to convert falling edge to rising edge signal and active
  139. * low to active high signal.
  140. */
  141. static int owl_sirq_set_type(struct irq_data *data, unsigned int type)
  142. {
  143. struct owl_sirq_chip_data *chip_data = irq_data_get_irq_chip_data(data);
  144. u32 sirq_type;
  145. switch (type) {
  146. case IRQ_TYPE_LEVEL_LOW:
  147. sirq_type = INTC_EXTCTL_TYPE_LOW;
  148. type = IRQ_TYPE_LEVEL_HIGH;
  149. break;
  150. case IRQ_TYPE_LEVEL_HIGH:
  151. sirq_type = INTC_EXTCTL_TYPE_HIGH;
  152. break;
  153. case IRQ_TYPE_EDGE_FALLING:
  154. sirq_type = INTC_EXTCTL_TYPE_FALLING;
  155. type = IRQ_TYPE_EDGE_RISING;
  156. break;
  157. case IRQ_TYPE_EDGE_RISING:
  158. sirq_type = INTC_EXTCTL_TYPE_RISING;
  159. break;
  160. default:
  161. return -EINVAL;
  162. }
  163. owl_sirq_clear_set_extctl(chip_data, INTC_EXTCTL_TYPE_MASK, sirq_type,
  164. data->hwirq);
  165. return irq_chip_set_type_parent(data, type);
  166. }
  167. static struct irq_chip owl_sirq_chip = {
  168. .name = "owl-sirq",
  169. .irq_mask = owl_sirq_mask,
  170. .irq_unmask = owl_sirq_unmask,
  171. .irq_eoi = owl_sirq_eoi,
  172. .irq_set_type = owl_sirq_set_type,
  173. .irq_retrigger = irq_chip_retrigger_hierarchy,
  174. #ifdef CONFIG_SMP
  175. .irq_set_affinity = irq_chip_set_affinity_parent,
  176. #endif
  177. };
  178. static int owl_sirq_domain_translate(struct irq_domain *d,
  179. struct irq_fwspec *fwspec,
  180. unsigned long *hwirq,
  181. unsigned int *type)
  182. {
  183. if (!is_of_node(fwspec->fwnode))
  184. return -EINVAL;
  185. if (fwspec->param_count != 2 || fwspec->param[0] >= NUM_SIRQ)
  186. return -EINVAL;
  187. *hwirq = fwspec->param[0];
  188. *type = fwspec->param[1];
  189. return 0;
  190. }
  191. static int owl_sirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  192. unsigned int nr_irqs, void *data)
  193. {
  194. struct owl_sirq_chip_data *chip_data = domain->host_data;
  195. struct irq_fwspec *fwspec = data;
  196. struct irq_fwspec parent_fwspec;
  197. irq_hw_number_t hwirq;
  198. unsigned int type;
  199. int ret;
  200. if (WARN_ON(nr_irqs != 1))
  201. return -EINVAL;
  202. ret = owl_sirq_domain_translate(domain, fwspec, &hwirq, &type);
  203. if (ret)
  204. return ret;
  205. switch (type) {
  206. case IRQ_TYPE_EDGE_RISING:
  207. case IRQ_TYPE_LEVEL_HIGH:
  208. break;
  209. case IRQ_TYPE_EDGE_FALLING:
  210. type = IRQ_TYPE_EDGE_RISING;
  211. break;
  212. case IRQ_TYPE_LEVEL_LOW:
  213. type = IRQ_TYPE_LEVEL_HIGH;
  214. break;
  215. default:
  216. return -EINVAL;
  217. }
  218. irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &owl_sirq_chip,
  219. chip_data);
  220. parent_fwspec.fwnode = domain->parent->fwnode;
  221. parent_fwspec.param_count = 3;
  222. parent_fwspec.param[0] = GIC_SPI;
  223. parent_fwspec.param[1] = chip_data->ext_irqs[hwirq];
  224. parent_fwspec.param[2] = type;
  225. return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
  226. }
  227. static const struct irq_domain_ops owl_sirq_domain_ops = {
  228. .translate = owl_sirq_domain_translate,
  229. .alloc = owl_sirq_domain_alloc,
  230. .free = irq_domain_free_irqs_common,
  231. };
  232. static int __init owl_sirq_init(const struct owl_sirq_params *params,
  233. struct device_node *node,
  234. struct device_node *parent)
  235. {
  236. struct irq_domain *domain, *parent_domain;
  237. struct owl_sirq_chip_data *chip_data;
  238. int ret, i;
  239. parent_domain = irq_find_host(parent);
  240. if (!parent_domain) {
  241. pr_err("%pOF: failed to find sirq parent domain\n", node);
  242. return -ENXIO;
  243. }
  244. chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
  245. if (!chip_data)
  246. return -ENOMEM;
  247. raw_spin_lock_init(&chip_data->lock);
  248. chip_data->params = params;
  249. chip_data->base = of_iomap(node, 0);
  250. if (!chip_data->base) {
  251. pr_err("%pOF: failed to map sirq registers\n", node);
  252. ret = -ENXIO;
  253. goto out_free;
  254. }
  255. for (i = 0; i < NUM_SIRQ; i++) {
  256. struct of_phandle_args irq;
  257. ret = of_irq_parse_one(node, i, &irq);
  258. if (ret) {
  259. pr_err("%pOF: failed to parse interrupt %d\n", node, i);
  260. goto out_unmap;
  261. }
  262. if (WARN_ON(irq.args_count != 3)) {
  263. ret = -EINVAL;
  264. goto out_unmap;
  265. }
  266. chip_data->ext_irqs[i] = irq.args[1];
  267. /* Set 24MHz external interrupt clock freq */
  268. owl_sirq_clear_set_extctl(chip_data, 0, INTC_EXTCTL_CLK_SEL, i);
  269. }
  270. domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_SIRQ, node,
  271. &owl_sirq_domain_ops, chip_data);
  272. if (!domain) {
  273. pr_err("%pOF: failed to add domain\n", node);
  274. ret = -ENOMEM;
  275. goto out_unmap;
  276. }
  277. return 0;
  278. out_unmap:
  279. iounmap(chip_data->base);
  280. out_free:
  281. kfree(chip_data);
  282. return ret;
  283. }
  284. static int __init owl_sirq_s500_of_init(struct device_node *node,
  285. struct device_node *parent)
  286. {
  287. return owl_sirq_init(&owl_sirq_s500_params, node, parent);
  288. }
  289. IRQCHIP_DECLARE(owl_sirq_s500, "actions,s500-sirq", owl_sirq_s500_of_init);
  290. IRQCHIP_DECLARE(owl_sirq_s700, "actions,s700-sirq", owl_sirq_s500_of_init);
  291. static int __init owl_sirq_s900_of_init(struct device_node *node,
  292. struct device_node *parent)
  293. {
  294. return owl_sirq_init(&owl_sirq_s900_params, node, parent);
  295. }
  296. IRQCHIP_DECLARE(owl_sirq_s900, "actions,s900-sirq", owl_sirq_s900_of_init);