ipi.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015 Imagination Technologies Ltd
  4. * Author: Qais Yousef <qais.yousef@imgtec.com>
  5. *
  6. * This file contains driver APIs to the IPI subsystem.
  7. */
  8. #define pr_fmt(fmt) "genirq/ipi: " fmt
  9. #include <linux/irqdomain.h>
  10. #include <linux/irq.h>
  11. /**
  12. * irq_reserve_ipi() - Setup an IPI to destination cpumask
  13. * @domain: IPI domain
  14. * @dest: cpumask of cpus which can receive the IPI
  15. *
  16. * Allocate a virq that can be used to send IPI to any CPU in dest mask.
  17. *
  18. * On success it'll return linux irq number and error code on failure
  19. */
  20. int irq_reserve_ipi(struct irq_domain *domain,
  21. const struct cpumask *dest)
  22. {
  23. unsigned int nr_irqs, offset;
  24. struct irq_data *data;
  25. int virq, i;
  26. if (!domain ||!irq_domain_is_ipi(domain)) {
  27. pr_warn("Reservation on a non IPI domain\n");
  28. return -EINVAL;
  29. }
  30. if (!cpumask_subset(dest, cpu_possible_mask)) {
  31. pr_warn("Reservation is not in possible_cpu_mask\n");
  32. return -EINVAL;
  33. }
  34. nr_irqs = cpumask_weight(dest);
  35. if (!nr_irqs) {
  36. pr_warn("Reservation for empty destination mask\n");
  37. return -EINVAL;
  38. }
  39. if (irq_domain_is_ipi_single(domain)) {
  40. /*
  41. * If the underlying implementation uses a single HW irq on
  42. * all cpus then we only need a single Linux irq number for
  43. * it. We have no restrictions vs. the destination mask. The
  44. * underlying implementation can deal with holes nicely.
  45. */
  46. nr_irqs = 1;
  47. offset = 0;
  48. } else {
  49. unsigned int next;
  50. /*
  51. * The IPI requires a separate HW irq on each CPU. We require
  52. * that the destination mask is consecutive. If an
  53. * implementation needs to support holes, it can reserve
  54. * several IPI ranges.
  55. */
  56. offset = cpumask_first(dest);
  57. /*
  58. * Find a hole and if found look for another set bit after the
  59. * hole. For now we don't support this scenario.
  60. */
  61. next = cpumask_next_zero(offset, dest);
  62. if (next < nr_cpu_ids)
  63. next = cpumask_next(next, dest);
  64. if (next < nr_cpu_ids) {
  65. pr_warn("Destination mask has holes\n");
  66. return -EINVAL;
  67. }
  68. }
  69. virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
  70. if (virq <= 0) {
  71. pr_warn("Can't reserve IPI, failed to alloc descs\n");
  72. return -ENOMEM;
  73. }
  74. virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
  75. (void *) dest, true, NULL);
  76. if (virq <= 0) {
  77. pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
  78. goto free_descs;
  79. }
  80. for (i = 0; i < nr_irqs; i++) {
  81. data = irq_get_irq_data(virq + i);
  82. cpumask_copy(data->common->affinity, dest);
  83. data->common->ipi_offset = offset;
  84. irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
  85. }
  86. return virq;
  87. free_descs:
  88. irq_free_descs(virq, nr_irqs);
  89. return -EBUSY;
  90. }
  91. /**
  92. * irq_destroy_ipi() - unreserve an IPI that was previously allocated
  93. * @irq: linux irq number to be destroyed
  94. * @dest: cpumask of cpus which should have the IPI removed
  95. *
  96. * The IPIs allocated with irq_reserve_ipi() are retuerned to the system
  97. * destroying all virqs associated with them.
  98. *
  99. * Return 0 on success or error code on failure.
  100. */
  101. int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
  102. {
  103. struct irq_data *data = irq_get_irq_data(irq);
  104. struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
  105. struct irq_domain *domain;
  106. unsigned int nr_irqs;
  107. if (!irq || !data || !ipimask)
  108. return -EINVAL;
  109. domain = data->domain;
  110. if (WARN_ON(domain == NULL))
  111. return -EINVAL;
  112. if (!irq_domain_is_ipi(domain)) {
  113. pr_warn("Trying to destroy a non IPI domain!\n");
  114. return -EINVAL;
  115. }
  116. if (WARN_ON(!cpumask_subset(dest, ipimask)))
  117. /*
  118. * Must be destroying a subset of CPUs to which this IPI
  119. * was set up to target
  120. */
  121. return -EINVAL;
  122. if (irq_domain_is_ipi_per_cpu(domain)) {
  123. irq = irq + cpumask_first(dest) - data->common->ipi_offset;
  124. nr_irqs = cpumask_weight(dest);
  125. } else {
  126. nr_irqs = 1;
  127. }
  128. irq_domain_free_irqs(irq, nr_irqs);
  129. return 0;
  130. }
  131. /**
  132. * ipi_get_hwirq - Get the hwirq associated with an IPI to a cpu
  133. * @irq: linux irq number
  134. * @cpu: the target cpu
  135. *
  136. * When dealing with coprocessors IPI, we need to inform the coprocessor of
  137. * the hwirq it needs to use to receive and send IPIs.
  138. *
  139. * Returns hwirq value on success and INVALID_HWIRQ on failure.
  140. */
  141. irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
  142. {
  143. struct irq_data *data = irq_get_irq_data(irq);
  144. struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
  145. if (!data || !ipimask || cpu >= nr_cpu_ids)
  146. return INVALID_HWIRQ;
  147. if (!cpumask_test_cpu(cpu, ipimask))
  148. return INVALID_HWIRQ;
  149. /*
  150. * Get the real hardware irq number if the underlying implementation
  151. * uses a separate irq per cpu. If the underlying implementation uses
  152. * a single hardware irq for all cpus then the IPI send mechanism
  153. * needs to take care of the cpu destinations.
  154. */
  155. if (irq_domain_is_ipi_per_cpu(data->domain))
  156. data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
  157. return data ? irqd_to_hwirq(data) : INVALID_HWIRQ;
  158. }
  159. EXPORT_SYMBOL_GPL(ipi_get_hwirq);
  160. static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
  161. const struct cpumask *dest, unsigned int cpu)
  162. {
  163. struct cpumask *ipimask = irq_data_get_affinity_mask(data);
  164. if (!chip || !ipimask)
  165. return -EINVAL;
  166. if (!chip->ipi_send_single && !chip->ipi_send_mask)
  167. return -EINVAL;
  168. if (cpu >= nr_cpu_ids)
  169. return -EINVAL;
  170. if (dest) {
  171. if (!cpumask_subset(dest, ipimask))
  172. return -EINVAL;
  173. } else {
  174. if (!cpumask_test_cpu(cpu, ipimask))
  175. return -EINVAL;
  176. }
  177. return 0;
  178. }
  179. /**
  180. * __ipi_send_single - send an IPI to a target Linux SMP CPU
  181. * @desc: pointer to irq_desc of the IRQ
  182. * @cpu: destination CPU, must in the destination mask passed to
  183. * irq_reserve_ipi()
  184. *
  185. * This function is for architecture or core code to speed up IPI sending. Not
  186. * usable from driver code.
  187. *
  188. * Returns zero on success and negative error number on failure.
  189. */
  190. int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
  191. {
  192. struct irq_data *data = irq_desc_get_irq_data(desc);
  193. struct irq_chip *chip = irq_data_get_irq_chip(data);
  194. #ifdef DEBUG
  195. /*
  196. * Minimise the overhead by omitting the checks for Linux SMP IPIs.
  197. * Since the callers should be arch or core code which is generally
  198. * trusted, only check for errors when debugging.
  199. */
  200. if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
  201. return -EINVAL;
  202. #endif
  203. if (!chip->ipi_send_single) {
  204. chip->ipi_send_mask(data, cpumask_of(cpu));
  205. return 0;
  206. }
  207. /* FIXME: Store this information in irqdata flags */
  208. if (irq_domain_is_ipi_per_cpu(data->domain) &&
  209. cpu != data->common->ipi_offset) {
  210. /* use the correct data for that cpu */
  211. unsigned irq = data->irq + cpu - data->common->ipi_offset;
  212. data = irq_get_irq_data(irq);
  213. }
  214. chip->ipi_send_single(data, cpu);
  215. return 0;
  216. }
  217. /**
  218. * ipi_send_mask - send an IPI to target Linux SMP CPU(s)
  219. * @desc: pointer to irq_desc of the IRQ
  220. * @dest: dest CPU(s), must be a subset of the mask passed to
  221. * irq_reserve_ipi()
  222. *
  223. * This function is for architecture or core code to speed up IPI sending. Not
  224. * usable from driver code.
  225. *
  226. * Returns zero on success and negative error number on failure.
  227. */
  228. int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
  229. {
  230. struct irq_data *data = irq_desc_get_irq_data(desc);
  231. struct irq_chip *chip = irq_data_get_irq_chip(data);
  232. unsigned int cpu;
  233. #ifdef DEBUG
  234. /*
  235. * Minimise the overhead by omitting the checks for Linux SMP IPIs.
  236. * Since the callers should be arch or core code which is generally
  237. * trusted, only check for errors when debugging.
  238. */
  239. if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
  240. return -EINVAL;
  241. #endif
  242. if (chip->ipi_send_mask) {
  243. chip->ipi_send_mask(data, dest);
  244. return 0;
  245. }
  246. if (irq_domain_is_ipi_per_cpu(data->domain)) {
  247. unsigned int base = data->irq;
  248. for_each_cpu(cpu, dest) {
  249. unsigned irq = base + cpu - data->common->ipi_offset;
  250. data = irq_get_irq_data(irq);
  251. chip->ipi_send_single(data, cpu);
  252. }
  253. } else {
  254. for_each_cpu(cpu, dest)
  255. chip->ipi_send_single(data, cpu);
  256. }
  257. return 0;
  258. }
  259. /**
  260. * ipi_send_single - Send an IPI to a single CPU
  261. * @virq: linux irq number from irq_reserve_ipi()
  262. * @cpu: destination CPU, must in the destination mask passed to
  263. * irq_reserve_ipi()
  264. *
  265. * Returns zero on success and negative error number on failure.
  266. */
  267. int ipi_send_single(unsigned int virq, unsigned int cpu)
  268. {
  269. struct irq_desc *desc = irq_to_desc(virq);
  270. struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
  271. struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
  272. if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
  273. return -EINVAL;
  274. return __ipi_send_single(desc, cpu);
  275. }
  276. EXPORT_SYMBOL_GPL(ipi_send_single);
  277. /**
  278. * ipi_send_mask - Send an IPI to target CPU(s)
  279. * @virq: linux irq number from irq_reserve_ipi()
  280. * @dest: dest CPU(s), must be a subset of the mask passed to
  281. * irq_reserve_ipi()
  282. *
  283. * Returns zero on success and negative error number on failure.
  284. */
  285. int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
  286. {
  287. struct irq_desc *desc = irq_to_desc(virq);
  288. struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
  289. struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
  290. if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
  291. return -EINVAL;
  292. return __ipi_send_mask(desc, dest);
  293. }
  294. EXPORT_SYMBOL_GPL(ipi_send_mask);