up.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Uniprocessor-only support functions. The counterpart to kernel/smp.c
  4. */
  5. #include <linux/interrupt.h>
  6. #include <linux/kernel.h>
  7. #include <linux/export.h>
  8. #include <linux/smp.h>
  9. #include <linux/hypervisor.h>
  10. int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
  11. int wait)
  12. {
  13. unsigned long flags;
  14. if (cpu != 0)
  15. return -ENXIO;
  16. local_irq_save(flags);
  17. func(info);
  18. local_irq_restore(flags);
  19. return 0;
  20. }
  21. EXPORT_SYMBOL(smp_call_function_single);
  22. int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
  23. {
  24. unsigned long flags;
  25. local_irq_save(flags);
  26. csd->func(csd->info);
  27. local_irq_restore(flags);
  28. return 0;
  29. }
  30. EXPORT_SYMBOL(smp_call_function_single_async);
  31. void on_each_cpu(smp_call_func_t func, void *info, int wait)
  32. {
  33. unsigned long flags;
  34. local_irq_save(flags);
  35. func(info);
  36. local_irq_restore(flags);
  37. }
  38. EXPORT_SYMBOL(on_each_cpu);
  39. /*
  40. * Note we still need to test the mask even for UP
  41. * because we actually can get an empty mask from
  42. * code that on SMP might call us without the local
  43. * CPU in the mask.
  44. */
  45. void on_each_cpu_mask(const struct cpumask *mask,
  46. smp_call_func_t func, void *info, bool wait)
  47. {
  48. unsigned long flags;
  49. if (cpumask_test_cpu(0, mask)) {
  50. local_irq_save(flags);
  51. func(info);
  52. local_irq_restore(flags);
  53. }
  54. }
  55. EXPORT_SYMBOL(on_each_cpu_mask);
  56. /*
  57. * Preemption is disabled here to make sure the cond_func is called under the
  58. * same condtions in UP and SMP.
  59. */
  60. void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
  61. void *info, bool wait, const struct cpumask *mask)
  62. {
  63. unsigned long flags;
  64. preempt_disable();
  65. if (cond_func(0, info)) {
  66. local_irq_save(flags);
  67. func(info);
  68. local_irq_restore(flags);
  69. }
  70. preempt_enable();
  71. }
  72. EXPORT_SYMBOL(on_each_cpu_cond_mask);
  73. void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
  74. void *info, bool wait)
  75. {
  76. on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
  77. }
  78. EXPORT_SYMBOL(on_each_cpu_cond);
  79. int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
  80. {
  81. int ret;
  82. if (cpu != 0)
  83. return -ENXIO;
  84. if (phys)
  85. hypervisor_pin_vcpu(0);
  86. ret = func(par);
  87. if (phys)
  88. hypervisor_pin_vcpu(-1);
  89. return ret;
  90. }
  91. EXPORT_SYMBOL_GPL(smp_call_on_cpu);