irq_sim.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl>
  4. * Copyright (C) 2020 Bartosz Golaszewski <bgolaszewski@baylibre.com>
  5. */
  6. #include <linux/irq.h>
  7. #include <linux/irq_sim.h>
  8. #include <linux/irq_work.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/slab.h>
  11. struct irq_sim_work_ctx {
  12. struct irq_work work;
  13. int irq_base;
  14. unsigned int irq_count;
  15. unsigned long *pending;
  16. struct irq_domain *domain;
  17. };
  18. struct irq_sim_irq_ctx {
  19. int irqnum;
  20. bool enabled;
  21. struct irq_sim_work_ctx *work_ctx;
  22. };
  23. struct irq_sim_devres {
  24. struct irq_domain *domain;
  25. };
  26. static void irq_sim_irqmask(struct irq_data *data)
  27. {
  28. struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
  29. irq_ctx->enabled = false;
  30. }
  31. static void irq_sim_irqunmask(struct irq_data *data)
  32. {
  33. struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
  34. irq_ctx->enabled = true;
  35. }
  36. static int irq_sim_set_type(struct irq_data *data, unsigned int type)
  37. {
  38. /* We only support rising and falling edge trigger types. */
  39. if (type & ~IRQ_TYPE_EDGE_BOTH)
  40. return -EINVAL;
  41. irqd_set_trigger_type(data, type);
  42. return 0;
  43. }
  44. static int irq_sim_get_irqchip_state(struct irq_data *data,
  45. enum irqchip_irq_state which, bool *state)
  46. {
  47. struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
  48. irq_hw_number_t hwirq = irqd_to_hwirq(data);
  49. switch (which) {
  50. case IRQCHIP_STATE_PENDING:
  51. if (irq_ctx->enabled)
  52. *state = test_bit(hwirq, irq_ctx->work_ctx->pending);
  53. break;
  54. default:
  55. return -EINVAL;
  56. }
  57. return 0;
  58. }
  59. static int irq_sim_set_irqchip_state(struct irq_data *data,
  60. enum irqchip_irq_state which, bool state)
  61. {
  62. struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
  63. irq_hw_number_t hwirq = irqd_to_hwirq(data);
  64. switch (which) {
  65. case IRQCHIP_STATE_PENDING:
  66. if (irq_ctx->enabled) {
  67. assign_bit(hwirq, irq_ctx->work_ctx->pending, state);
  68. if (state)
  69. irq_work_queue(&irq_ctx->work_ctx->work);
  70. }
  71. break;
  72. default:
  73. return -EINVAL;
  74. }
  75. return 0;
  76. }
  77. static struct irq_chip irq_sim_irqchip = {
  78. .name = "irq_sim",
  79. .irq_mask = irq_sim_irqmask,
  80. .irq_unmask = irq_sim_irqunmask,
  81. .irq_set_type = irq_sim_set_type,
  82. .irq_get_irqchip_state = irq_sim_get_irqchip_state,
  83. .irq_set_irqchip_state = irq_sim_set_irqchip_state,
  84. };
  85. static void irq_sim_handle_irq(struct irq_work *work)
  86. {
  87. struct irq_sim_work_ctx *work_ctx;
  88. unsigned int offset = 0;
  89. int irqnum;
  90. work_ctx = container_of(work, struct irq_sim_work_ctx, work);
  91. while (!bitmap_empty(work_ctx->pending, work_ctx->irq_count)) {
  92. offset = find_next_bit(work_ctx->pending,
  93. work_ctx->irq_count, offset);
  94. clear_bit(offset, work_ctx->pending);
  95. irqnum = irq_find_mapping(work_ctx->domain, offset);
  96. handle_simple_irq(irq_to_desc(irqnum));
  97. }
  98. }
  99. static int irq_sim_domain_map(struct irq_domain *domain,
  100. unsigned int virq, irq_hw_number_t hw)
  101. {
  102. struct irq_sim_work_ctx *work_ctx = domain->host_data;
  103. struct irq_sim_irq_ctx *irq_ctx;
  104. irq_ctx = kzalloc(sizeof(*irq_ctx), GFP_KERNEL);
  105. if (!irq_ctx)
  106. return -ENOMEM;
  107. irq_set_chip(virq, &irq_sim_irqchip);
  108. irq_set_chip_data(virq, irq_ctx);
  109. irq_set_handler(virq, handle_simple_irq);
  110. irq_modify_status(virq, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
  111. irq_ctx->work_ctx = work_ctx;
  112. return 0;
  113. }
  114. static void irq_sim_domain_unmap(struct irq_domain *domain, unsigned int virq)
  115. {
  116. struct irq_sim_irq_ctx *irq_ctx;
  117. struct irq_data *irqd;
  118. irqd = irq_domain_get_irq_data(domain, virq);
  119. irq_ctx = irq_data_get_irq_chip_data(irqd);
  120. irq_set_handler(virq, NULL);
  121. irq_domain_reset_irq_data(irqd);
  122. kfree(irq_ctx);
  123. }
  124. static const struct irq_domain_ops irq_sim_domain_ops = {
  125. .map = irq_sim_domain_map,
  126. .unmap = irq_sim_domain_unmap,
  127. };
  128. /**
  129. * irq_domain_create_sim - Create a new interrupt simulator irq_domain and
  130. * allocate a range of dummy interrupts.
  131. *
  132. * @fnode: struct fwnode_handle to be associated with this domain.
  133. * @num_irqs: Number of interrupts to allocate.
  134. *
  135. * On success: return a new irq_domain object.
  136. * On failure: a negative errno wrapped with ERR_PTR().
  137. */
  138. struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
  139. unsigned int num_irqs)
  140. {
  141. struct irq_sim_work_ctx *work_ctx;
  142. work_ctx = kmalloc(sizeof(*work_ctx), GFP_KERNEL);
  143. if (!work_ctx)
  144. goto err_out;
  145. work_ctx->pending = bitmap_zalloc(num_irqs, GFP_KERNEL);
  146. if (!work_ctx->pending)
  147. goto err_free_work_ctx;
  148. work_ctx->domain = irq_domain_create_linear(fwnode, num_irqs,
  149. &irq_sim_domain_ops,
  150. work_ctx);
  151. if (!work_ctx->domain)
  152. goto err_free_bitmap;
  153. work_ctx->irq_count = num_irqs;
  154. init_irq_work(&work_ctx->work, irq_sim_handle_irq);
  155. return work_ctx->domain;
  156. err_free_bitmap:
  157. bitmap_free(work_ctx->pending);
  158. err_free_work_ctx:
  159. kfree(work_ctx);
  160. err_out:
  161. return ERR_PTR(-ENOMEM);
  162. }
  163. EXPORT_SYMBOL_GPL(irq_domain_create_sim);
  164. /**
  165. * irq_domain_remove_sim - Deinitialize the interrupt simulator domain: free
  166. * the interrupt descriptors and allocated memory.
  167. *
  168. * @domain: The interrupt simulator domain to tear down.
  169. */
  170. void irq_domain_remove_sim(struct irq_domain *domain)
  171. {
  172. struct irq_sim_work_ctx *work_ctx = domain->host_data;
  173. irq_work_sync(&work_ctx->work);
  174. bitmap_free(work_ctx->pending);
  175. kfree(work_ctx);
  176. irq_domain_remove(domain);
  177. }
  178. EXPORT_SYMBOL_GPL(irq_domain_remove_sim);
  179. static void devm_irq_domain_release_sim(struct device *dev, void *res)
  180. {
  181. struct irq_sim_devres *this = res;
  182. irq_domain_remove_sim(this->domain);
  183. }
  184. /**
  185. * devm_irq_domain_create_sim - Create a new interrupt simulator for
  186. * a managed device.
  187. *
  188. * @dev: Device to initialize the simulator object for.
  189. * @fnode: struct fwnode_handle to be associated with this domain.
  190. * @num_irqs: Number of interrupts to allocate
  191. *
  192. * On success: return a new irq_domain object.
  193. * On failure: a negative errno wrapped with ERR_PTR().
  194. */
  195. struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
  196. struct fwnode_handle *fwnode,
  197. unsigned int num_irqs)
  198. {
  199. struct irq_sim_devres *dr;
  200. dr = devres_alloc(devm_irq_domain_release_sim,
  201. sizeof(*dr), GFP_KERNEL);
  202. if (!dr)
  203. return ERR_PTR(-ENOMEM);
  204. dr->domain = irq_domain_create_sim(fwnode, num_irqs);
  205. if (IS_ERR(dr->domain)) {
  206. devres_free(dr);
  207. return dr->domain;
  208. }
  209. devres_add(dev, dr);
  210. return dr->domain;
  211. }
  212. EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim);