tick-broadcast.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This file contains functions which emulate a local clock-event
  4. * device via a broadcast event source.
  5. *
  6. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  7. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  8. * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  9. */
  10. #include <linux/cpu.h>
  11. #include <linux/err.h>
  12. #include <linux/hrtimer.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/percpu.h>
  15. #include <linux/profile.h>
  16. #include <linux/sched.h>
  17. #include <linux/smp.h>
  18. #include <linux/module.h>
  19. #include "tick-internal.h"
  20. /*
  21. * Broadcast support for broken x86 hardware, where the local apic
  22. * timer stops in C3 state.
  23. */
  24. static struct tick_device tick_broadcast_device;
  25. static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly;
  26. static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly;
  27. static cpumask_var_t tmpmask __cpumask_var_read_mostly;
  28. static int tick_broadcast_forced;
  29. static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
  30. #ifdef CONFIG_TICK_ONESHOT
  31. static DEFINE_PER_CPU(struct clock_event_device *, tick_oneshot_wakeup_device);
  32. static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
  33. static void tick_broadcast_clear_oneshot(int cpu);
  34. static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
  35. # ifdef CONFIG_HOTPLUG_CPU
  36. static void tick_broadcast_oneshot_offline(unsigned int cpu);
  37. # endif
  38. #else
  39. static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
  40. static inline void tick_broadcast_clear_oneshot(int cpu) { }
  41. static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
  42. # ifdef CONFIG_HOTPLUG_CPU
  43. static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { }
  44. # endif
  45. #endif
  46. /*
  47. * Debugging: see timer_list.c
  48. */
  49. struct tick_device *tick_get_broadcast_device(void)
  50. {
  51. return &tick_broadcast_device;
  52. }
  53. struct cpumask *tick_get_broadcast_mask(void)
  54. {
  55. return tick_broadcast_mask;
  56. }
  57. static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu);
  58. const struct clock_event_device *tick_get_wakeup_device(int cpu)
  59. {
  60. return tick_get_oneshot_wakeup_device(cpu);
  61. }
  62. /*
  63. * Start the device in periodic mode
  64. */
  65. static void tick_broadcast_start_periodic(struct clock_event_device *bc)
  66. {
  67. if (bc)
  68. tick_setup_periodic(bc, 1);
  69. }
  70. /*
  71. * Check, if the device can be utilized as broadcast device:
  72. */
  73. static bool tick_check_broadcast_device(struct clock_event_device *curdev,
  74. struct clock_event_device *newdev)
  75. {
  76. if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
  77. (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
  78. (newdev->features & CLOCK_EVT_FEAT_C3STOP))
  79. return false;
  80. if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
  81. !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
  82. return false;
  83. return !curdev || newdev->rating > curdev->rating;
  84. }
  85. #ifdef CONFIG_TICK_ONESHOT
  86. static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu)
  87. {
  88. return per_cpu(tick_oneshot_wakeup_device, cpu);
  89. }
  90. static void tick_oneshot_wakeup_handler(struct clock_event_device *wd)
  91. {
  92. /*
  93. * If we woke up early and the tick was reprogrammed in the
  94. * meantime then this may be spurious but harmless.
  95. */
  96. tick_receive_broadcast();
  97. }
  98. static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev,
  99. int cpu)
  100. {
  101. struct clock_event_device *curdev = tick_get_oneshot_wakeup_device(cpu);
  102. if (!newdev)
  103. goto set_device;
  104. if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
  105. (newdev->features & CLOCK_EVT_FEAT_C3STOP))
  106. return false;
  107. if (!(newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
  108. !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
  109. return false;
  110. if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
  111. return false;
  112. if (curdev && newdev->rating <= curdev->rating)
  113. return false;
  114. if (!try_module_get(newdev->owner))
  115. return false;
  116. newdev->event_handler = tick_oneshot_wakeup_handler;
  117. set_device:
  118. clockevents_exchange_device(curdev, newdev);
  119. per_cpu(tick_oneshot_wakeup_device, cpu) = newdev;
  120. return true;
  121. }
  122. #else
  123. static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu)
  124. {
  125. return NULL;
  126. }
  127. static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev,
  128. int cpu)
  129. {
  130. return false;
  131. }
  132. #endif
  133. /*
  134. * Conditionally install/replace broadcast device
  135. */
  136. void tick_install_broadcast_device(struct clock_event_device *dev, int cpu)
  137. {
  138. struct clock_event_device *cur = tick_broadcast_device.evtdev;
  139. if (tick_set_oneshot_wakeup_device(dev, cpu))
  140. return;
  141. if (!tick_check_broadcast_device(cur, dev))
  142. return;
  143. if (!try_module_get(dev->owner))
  144. return;
  145. clockevents_exchange_device(cur, dev);
  146. if (cur)
  147. cur->event_handler = clockevents_handle_noop;
  148. tick_broadcast_device.evtdev = dev;
  149. if (!cpumask_empty(tick_broadcast_mask))
  150. tick_broadcast_start_periodic(dev);
  151. if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
  152. return;
  153. /*
  154. * If the system already runs in oneshot mode, switch the newly
  155. * registered broadcast device to oneshot mode explicitly.
  156. */
  157. if (tick_broadcast_oneshot_active()) {
  158. tick_broadcast_switch_to_oneshot();
  159. return;
  160. }
  161. /*
  162. * Inform all cpus about this. We might be in a situation
  163. * where we did not switch to oneshot mode because the per cpu
  164. * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
  165. * of a oneshot capable broadcast device. Without that
  166. * notification the systems stays stuck in periodic mode
  167. * forever.
  168. */
  169. tick_clock_notify();
  170. }
  171. /*
  172. * Check, if the device is the broadcast device
  173. */
  174. int tick_is_broadcast_device(struct clock_event_device *dev)
  175. {
  176. return (dev && tick_broadcast_device.evtdev == dev);
  177. }
  178. int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
  179. {
  180. int ret = -ENODEV;
  181. if (tick_is_broadcast_device(dev)) {
  182. raw_spin_lock(&tick_broadcast_lock);
  183. ret = __clockevents_update_freq(dev, freq);
  184. raw_spin_unlock(&tick_broadcast_lock);
  185. }
  186. return ret;
  187. }
  188. static void err_broadcast(const struct cpumask *mask)
  189. {
  190. pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
  191. }
  192. static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
  193. {
  194. if (!dev->broadcast)
  195. dev->broadcast = tick_broadcast;
  196. if (!dev->broadcast) {
  197. pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
  198. dev->name);
  199. dev->broadcast = err_broadcast;
  200. }
  201. }
  202. /*
  203. * Check, if the device is disfunctional and a place holder, which
  204. * needs to be handled by the broadcast device.
  205. */
  206. int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
  207. {
  208. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  209. unsigned long flags;
  210. int ret = 0;
  211. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  212. /*
  213. * Devices might be registered with both periodic and oneshot
  214. * mode disabled. This signals, that the device needs to be
  215. * operated from the broadcast device and is a placeholder for
  216. * the cpu local device.
  217. */
  218. if (!tick_device_is_functional(dev)) {
  219. dev->event_handler = tick_handle_periodic;
  220. tick_device_setup_broadcast_func(dev);
  221. cpumask_set_cpu(cpu, tick_broadcast_mask);
  222. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
  223. tick_broadcast_start_periodic(bc);
  224. else
  225. tick_broadcast_setup_oneshot(bc);
  226. ret = 1;
  227. } else {
  228. /*
  229. * Clear the broadcast bit for this cpu if the
  230. * device is not power state affected.
  231. */
  232. if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
  233. cpumask_clear_cpu(cpu, tick_broadcast_mask);
  234. else
  235. tick_device_setup_broadcast_func(dev);
  236. /*
  237. * Clear the broadcast bit if the CPU is not in
  238. * periodic broadcast on state.
  239. */
  240. if (!cpumask_test_cpu(cpu, tick_broadcast_on))
  241. cpumask_clear_cpu(cpu, tick_broadcast_mask);
  242. switch (tick_broadcast_device.mode) {
  243. case TICKDEV_MODE_ONESHOT:
  244. /*
  245. * If the system is in oneshot mode we can
  246. * unconditionally clear the oneshot mask bit,
  247. * because the CPU is running and therefore
  248. * not in an idle state which causes the power
  249. * state affected device to stop. Let the
  250. * caller initialize the device.
  251. */
  252. tick_broadcast_clear_oneshot(cpu);
  253. ret = 0;
  254. break;
  255. case TICKDEV_MODE_PERIODIC:
  256. /*
  257. * If the system is in periodic mode, check
  258. * whether the broadcast device can be
  259. * switched off now.
  260. */
  261. if (cpumask_empty(tick_broadcast_mask) && bc)
  262. clockevents_shutdown(bc);
  263. /*
  264. * If we kept the cpu in the broadcast mask,
  265. * tell the caller to leave the per cpu device
  266. * in shutdown state. The periodic interrupt
  267. * is delivered by the broadcast device, if
  268. * the broadcast device exists and is not
  269. * hrtimer based.
  270. */
  271. if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
  272. ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
  273. break;
  274. default:
  275. break;
  276. }
  277. }
  278. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  279. return ret;
  280. }
  281. int tick_receive_broadcast(void)
  282. {
  283. struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
  284. struct clock_event_device *evt = td->evtdev;
  285. if (!evt)
  286. return -ENODEV;
  287. if (!evt->event_handler)
  288. return -EINVAL;
  289. evt->event_handler(evt);
  290. return 0;
  291. }
  292. /*
  293. * Broadcast the event to the cpus, which are set in the mask (mangled).
  294. */
  295. static bool tick_do_broadcast(struct cpumask *mask)
  296. {
  297. int cpu = smp_processor_id();
  298. struct tick_device *td;
  299. bool local = false;
  300. /*
  301. * Check, if the current cpu is in the mask
  302. */
  303. if (cpumask_test_cpu(cpu, mask)) {
  304. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  305. cpumask_clear_cpu(cpu, mask);
  306. /*
  307. * We only run the local handler, if the broadcast
  308. * device is not hrtimer based. Otherwise we run into
  309. * a hrtimer recursion.
  310. *
  311. * local timer_interrupt()
  312. * local_handler()
  313. * expire_hrtimers()
  314. * bc_handler()
  315. * local_handler()
  316. * expire_hrtimers()
  317. */
  318. local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
  319. }
  320. if (!cpumask_empty(mask)) {
  321. /*
  322. * It might be necessary to actually check whether the devices
  323. * have different broadcast functions. For now, just use the
  324. * one of the first device. This works as long as we have this
  325. * misfeature only on x86 (lapic)
  326. */
  327. td = &per_cpu(tick_cpu_device, cpumask_first(mask));
  328. td->evtdev->broadcast(mask);
  329. }
  330. return local;
  331. }
  332. /*
  333. * Periodic broadcast:
  334. * - invoke the broadcast handlers
  335. */
  336. static bool tick_do_periodic_broadcast(void)
  337. {
  338. cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
  339. return tick_do_broadcast(tmpmask);
  340. }
  341. /*
  342. * Event handler for periodic broadcast ticks
  343. */
  344. static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
  345. {
  346. struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
  347. bool bc_local;
  348. raw_spin_lock(&tick_broadcast_lock);
  349. /* Handle spurious interrupts gracefully */
  350. if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
  351. raw_spin_unlock(&tick_broadcast_lock);
  352. return;
  353. }
  354. bc_local = tick_do_periodic_broadcast();
  355. if (clockevent_state_oneshot(dev)) {
  356. ktime_t next = ktime_add(dev->next_event, tick_period);
  357. clockevents_program_event(dev, next, true);
  358. }
  359. raw_spin_unlock(&tick_broadcast_lock);
  360. /*
  361. * We run the handler of the local cpu after dropping
  362. * tick_broadcast_lock because the handler might deadlock when
  363. * trying to switch to oneshot mode.
  364. */
  365. if (bc_local)
  366. td->evtdev->event_handler(td->evtdev);
  367. }
  368. /**
  369. * tick_broadcast_control - Enable/disable or force broadcast mode
  370. * @mode: The selected broadcast mode
  371. *
  372. * Called when the system enters a state where affected tick devices
  373. * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
  374. */
  375. void tick_broadcast_control(enum tick_broadcast_mode mode)
  376. {
  377. struct clock_event_device *bc, *dev;
  378. struct tick_device *td;
  379. int cpu, bc_stopped;
  380. unsigned long flags;
  381. /* Protects also the local clockevent device. */
  382. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  383. td = this_cpu_ptr(&tick_cpu_device);
  384. dev = td->evtdev;
  385. /*
  386. * Is the device not affected by the powerstate ?
  387. */
  388. if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
  389. goto out;
  390. if (!tick_device_is_functional(dev))
  391. goto out;
  392. cpu = smp_processor_id();
  393. bc = tick_broadcast_device.evtdev;
  394. bc_stopped = cpumask_empty(tick_broadcast_mask);
  395. switch (mode) {
  396. case TICK_BROADCAST_FORCE:
  397. tick_broadcast_forced = 1;
  398. fallthrough;
  399. case TICK_BROADCAST_ON:
  400. cpumask_set_cpu(cpu, tick_broadcast_on);
  401. if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
  402. /*
  403. * Only shutdown the cpu local device, if:
  404. *
  405. * - the broadcast device exists
  406. * - the broadcast device is not a hrtimer based one
  407. * - the broadcast device is in periodic mode to
  408. * avoid a hickup during switch to oneshot mode
  409. */
  410. if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
  411. tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
  412. clockevents_shutdown(dev);
  413. }
  414. break;
  415. case TICK_BROADCAST_OFF:
  416. if (tick_broadcast_forced)
  417. break;
  418. cpumask_clear_cpu(cpu, tick_broadcast_on);
  419. if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
  420. if (tick_broadcast_device.mode ==
  421. TICKDEV_MODE_PERIODIC)
  422. tick_setup_periodic(dev, 0);
  423. }
  424. break;
  425. }
  426. if (bc) {
  427. if (cpumask_empty(tick_broadcast_mask)) {
  428. if (!bc_stopped)
  429. clockevents_shutdown(bc);
  430. } else if (bc_stopped) {
  431. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
  432. tick_broadcast_start_periodic(bc);
  433. else
  434. tick_broadcast_setup_oneshot(bc);
  435. }
  436. }
  437. out:
  438. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  439. }
  440. EXPORT_SYMBOL_GPL(tick_broadcast_control);
  441. /*
  442. * Set the periodic handler depending on broadcast on/off
  443. */
  444. void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
  445. {
  446. if (!broadcast)
  447. dev->event_handler = tick_handle_periodic;
  448. else
  449. dev->event_handler = tick_handle_periodic_broadcast;
  450. }
  451. #ifdef CONFIG_HOTPLUG_CPU
  452. static void tick_shutdown_broadcast(void)
  453. {
  454. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  455. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
  456. if (bc && cpumask_empty(tick_broadcast_mask))
  457. clockevents_shutdown(bc);
  458. }
  459. }
  460. /*
  461. * Remove a CPU from broadcasting
  462. */
  463. void tick_broadcast_offline(unsigned int cpu)
  464. {
  465. raw_spin_lock(&tick_broadcast_lock);
  466. cpumask_clear_cpu(cpu, tick_broadcast_mask);
  467. cpumask_clear_cpu(cpu, tick_broadcast_on);
  468. tick_broadcast_oneshot_offline(cpu);
  469. tick_shutdown_broadcast();
  470. raw_spin_unlock(&tick_broadcast_lock);
  471. }
  472. #endif
  473. void tick_suspend_broadcast(void)
  474. {
  475. struct clock_event_device *bc;
  476. unsigned long flags;
  477. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  478. bc = tick_broadcast_device.evtdev;
  479. if (bc)
  480. clockevents_shutdown(bc);
  481. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  482. }
  483. /*
  484. * This is called from tick_resume_local() on a resuming CPU. That's
  485. * called from the core resume function, tick_unfreeze() and the magic XEN
  486. * resume hackery.
  487. *
  488. * In none of these cases the broadcast device mode can change and the
  489. * bit of the resuming CPU in the broadcast mask is safe as well.
  490. */
  491. bool tick_resume_check_broadcast(void)
  492. {
  493. if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT)
  494. return false;
  495. else
  496. return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask);
  497. }
  498. void tick_resume_broadcast(void)
  499. {
  500. struct clock_event_device *bc;
  501. unsigned long flags;
  502. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  503. bc = tick_broadcast_device.evtdev;
  504. if (bc) {
  505. clockevents_tick_resume(bc);
  506. switch (tick_broadcast_device.mode) {
  507. case TICKDEV_MODE_PERIODIC:
  508. if (!cpumask_empty(tick_broadcast_mask))
  509. tick_broadcast_start_periodic(bc);
  510. break;
  511. case TICKDEV_MODE_ONESHOT:
  512. if (!cpumask_empty(tick_broadcast_mask))
  513. tick_resume_broadcast_oneshot(bc);
  514. break;
  515. }
  516. }
  517. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  518. }
  519. #ifdef CONFIG_TICK_ONESHOT
  520. static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly;
  521. static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly;
  522. static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly;
  523. /*
  524. * Exposed for debugging: see timer_list.c
  525. */
  526. struct cpumask *tick_get_broadcast_oneshot_mask(void)
  527. {
  528. return tick_broadcast_oneshot_mask;
  529. }
  530. /*
  531. * Called before going idle with interrupts disabled. Checks whether a
  532. * broadcast event from the other core is about to happen. We detected
  533. * that in tick_broadcast_oneshot_control(). The callsite can use this
  534. * to avoid a deep idle transition as we are about to get the
  535. * broadcast IPI right away.
  536. */
  537. int tick_check_broadcast_expired(void)
  538. {
  539. return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
  540. }
  541. /*
  542. * Set broadcast interrupt affinity
  543. */
  544. static void tick_broadcast_set_affinity(struct clock_event_device *bc,
  545. const struct cpumask *cpumask)
  546. {
  547. if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
  548. return;
  549. if (cpumask_equal(bc->cpumask, cpumask))
  550. return;
  551. bc->cpumask = cpumask;
  552. irq_set_affinity(bc->irq, bc->cpumask);
  553. }
  554. static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
  555. ktime_t expires)
  556. {
  557. if (!clockevent_state_oneshot(bc))
  558. clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
  559. clockevents_program_event(bc, expires, 1);
  560. tick_broadcast_set_affinity(bc, cpumask_of(cpu));
  561. }
  562. static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
  563. {
  564. clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
  565. }
  566. /*
  567. * Called from irq_enter() when idle was interrupted to reenable the
  568. * per cpu device.
  569. */
  570. void tick_check_oneshot_broadcast_this_cpu(void)
  571. {
  572. if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
  573. struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
  574. /*
  575. * We might be in the middle of switching over from
  576. * periodic to oneshot. If the CPU has not yet
  577. * switched over, leave the device alone.
  578. */
  579. if (td->mode == TICKDEV_MODE_ONESHOT) {
  580. clockevents_switch_state(td->evtdev,
  581. CLOCK_EVT_STATE_ONESHOT);
  582. }
  583. }
  584. }
  585. /*
  586. * Handle oneshot mode broadcasting
  587. */
  588. static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
  589. {
  590. struct tick_device *td;
  591. ktime_t now, next_event;
  592. int cpu, next_cpu = 0;
  593. bool bc_local;
  594. raw_spin_lock(&tick_broadcast_lock);
  595. dev->next_event = KTIME_MAX;
  596. next_event = KTIME_MAX;
  597. cpumask_clear(tmpmask);
  598. now = ktime_get();
  599. /* Find all expired events */
  600. for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
  601. /*
  602. * Required for !SMP because for_each_cpu() reports
  603. * unconditionally CPU0 as set on UP kernels.
  604. */
  605. if (!IS_ENABLED(CONFIG_SMP) &&
  606. cpumask_empty(tick_broadcast_oneshot_mask))
  607. break;
  608. td = &per_cpu(tick_cpu_device, cpu);
  609. if (td->evtdev->next_event <= now) {
  610. cpumask_set_cpu(cpu, tmpmask);
  611. /*
  612. * Mark the remote cpu in the pending mask, so
  613. * it can avoid reprogramming the cpu local
  614. * timer in tick_broadcast_oneshot_control().
  615. */
  616. cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
  617. } else if (td->evtdev->next_event < next_event) {
  618. next_event = td->evtdev->next_event;
  619. next_cpu = cpu;
  620. }
  621. }
  622. /*
  623. * Remove the current cpu from the pending mask. The event is
  624. * delivered immediately in tick_do_broadcast() !
  625. */
  626. cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
  627. /* Take care of enforced broadcast requests */
  628. cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
  629. cpumask_clear(tick_broadcast_force_mask);
  630. /*
  631. * Sanity check. Catch the case where we try to broadcast to
  632. * offline cpus.
  633. */
  634. if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
  635. cpumask_and(tmpmask, tmpmask, cpu_online_mask);
  636. /*
  637. * Wakeup the cpus which have an expired event.
  638. */
  639. bc_local = tick_do_broadcast(tmpmask);
  640. /*
  641. * Two reasons for reprogram:
  642. *
  643. * - The global event did not expire any CPU local
  644. * events. This happens in dyntick mode, as the maximum PIT
  645. * delta is quite small.
  646. *
  647. * - There are pending events on sleeping CPUs which were not
  648. * in the event mask
  649. */
  650. if (next_event != KTIME_MAX)
  651. tick_broadcast_set_event(dev, next_cpu, next_event);
  652. raw_spin_unlock(&tick_broadcast_lock);
  653. if (bc_local) {
  654. td = this_cpu_ptr(&tick_cpu_device);
  655. td->evtdev->event_handler(td->evtdev);
  656. }
  657. }
  658. static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
  659. {
  660. if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
  661. return 0;
  662. if (bc->next_event == KTIME_MAX)
  663. return 0;
  664. return bc->bound_on == cpu ? -EBUSY : 0;
  665. }
  666. static void broadcast_shutdown_local(struct clock_event_device *bc,
  667. struct clock_event_device *dev)
  668. {
  669. /*
  670. * For hrtimer based broadcasting we cannot shutdown the cpu
  671. * local device if our own event is the first one to expire or
  672. * if we own the broadcast timer.
  673. */
  674. if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
  675. if (broadcast_needs_cpu(bc, smp_processor_id()))
  676. return;
  677. if (dev->next_event < bc->next_event)
  678. return;
  679. }
  680. clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
  681. }
  682. static int ___tick_broadcast_oneshot_control(enum tick_broadcast_state state,
  683. struct tick_device *td,
  684. int cpu)
  685. {
  686. struct clock_event_device *bc, *dev = td->evtdev;
  687. int ret = 0;
  688. ktime_t now;
  689. raw_spin_lock(&tick_broadcast_lock);
  690. bc = tick_broadcast_device.evtdev;
  691. if (state == TICK_BROADCAST_ENTER) {
  692. /*
  693. * If the current CPU owns the hrtimer broadcast
  694. * mechanism, it cannot go deep idle and we do not add
  695. * the CPU to the broadcast mask. We don't have to go
  696. * through the EXIT path as the local timer is not
  697. * shutdown.
  698. */
  699. ret = broadcast_needs_cpu(bc, cpu);
  700. if (ret)
  701. goto out;
  702. /*
  703. * If the broadcast device is in periodic mode, we
  704. * return.
  705. */
  706. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
  707. /* If it is a hrtimer based broadcast, return busy */
  708. if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
  709. ret = -EBUSY;
  710. goto out;
  711. }
  712. if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
  713. WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
  714. /* Conditionally shut down the local timer. */
  715. broadcast_shutdown_local(bc, dev);
  716. /*
  717. * We only reprogram the broadcast timer if we
  718. * did not mark ourself in the force mask and
  719. * if the cpu local event is earlier than the
  720. * broadcast event. If the current CPU is in
  721. * the force mask, then we are going to be
  722. * woken by the IPI right away; we return
  723. * busy, so the CPU does not try to go deep
  724. * idle.
  725. */
  726. if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
  727. ret = -EBUSY;
  728. } else if (dev->next_event < bc->next_event) {
  729. tick_broadcast_set_event(bc, cpu, dev->next_event);
  730. /*
  731. * In case of hrtimer broadcasts the
  732. * programming might have moved the
  733. * timer to this cpu. If yes, remove
  734. * us from the broadcast mask and
  735. * return busy.
  736. */
  737. ret = broadcast_needs_cpu(bc, cpu);
  738. if (ret) {
  739. cpumask_clear_cpu(cpu,
  740. tick_broadcast_oneshot_mask);
  741. }
  742. }
  743. }
  744. } else {
  745. if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
  746. clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
  747. /*
  748. * The cpu which was handling the broadcast
  749. * timer marked this cpu in the broadcast
  750. * pending mask and fired the broadcast
  751. * IPI. So we are going to handle the expired
  752. * event anyway via the broadcast IPI
  753. * handler. No need to reprogram the timer
  754. * with an already expired event.
  755. */
  756. if (cpumask_test_and_clear_cpu(cpu,
  757. tick_broadcast_pending_mask))
  758. goto out;
  759. /*
  760. * Bail out if there is no next event.
  761. */
  762. if (dev->next_event == KTIME_MAX)
  763. goto out;
  764. /*
  765. * If the pending bit is not set, then we are
  766. * either the CPU handling the broadcast
  767. * interrupt or we got woken by something else.
  768. *
  769. * We are no longer in the broadcast mask, so
  770. * if the cpu local expiry time is already
  771. * reached, we would reprogram the cpu local
  772. * timer with an already expired event.
  773. *
  774. * This can lead to a ping-pong when we return
  775. * to idle and therefore rearm the broadcast
  776. * timer before the cpu local timer was able
  777. * to fire. This happens because the forced
  778. * reprogramming makes sure that the event
  779. * will happen in the future and depending on
  780. * the min_delta setting this might be far
  781. * enough out that the ping-pong starts.
  782. *
  783. * If the cpu local next_event has expired
  784. * then we know that the broadcast timer
  785. * next_event has expired as well and
  786. * broadcast is about to be handled. So we
  787. * avoid reprogramming and enforce that the
  788. * broadcast handler, which did not run yet,
  789. * will invoke the cpu local handler.
  790. *
  791. * We cannot call the handler directly from
  792. * here, because we might be in a NOHZ phase
  793. * and we did not go through the irq_enter()
  794. * nohz fixups.
  795. */
  796. now = ktime_get();
  797. if (dev->next_event <= now) {
  798. cpumask_set_cpu(cpu, tick_broadcast_force_mask);
  799. goto out;
  800. }
  801. /*
  802. * We got woken by something else. Reprogram
  803. * the cpu local timer device.
  804. */
  805. tick_program_event(dev->next_event, 1);
  806. }
  807. }
  808. out:
  809. raw_spin_unlock(&tick_broadcast_lock);
  810. return ret;
  811. }
  812. static int tick_oneshot_wakeup_control(enum tick_broadcast_state state,
  813. struct tick_device *td,
  814. int cpu)
  815. {
  816. struct clock_event_device *dev, *wd;
  817. dev = td->evtdev;
  818. if (td->mode != TICKDEV_MODE_ONESHOT)
  819. return -EINVAL;
  820. wd = tick_get_oneshot_wakeup_device(cpu);
  821. if (!wd)
  822. return -ENODEV;
  823. switch (state) {
  824. case TICK_BROADCAST_ENTER:
  825. clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED);
  826. clockevents_switch_state(wd, CLOCK_EVT_STATE_ONESHOT);
  827. clockevents_program_event(wd, dev->next_event, 1);
  828. break;
  829. case TICK_BROADCAST_EXIT:
  830. /* We may have transitioned to oneshot mode while idle */
  831. if (clockevent_get_state(wd) != CLOCK_EVT_STATE_ONESHOT)
  832. return -ENODEV;
  833. }
  834. return 0;
  835. }
  836. int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
  837. {
  838. struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
  839. int cpu = smp_processor_id();
  840. if (!tick_oneshot_wakeup_control(state, td, cpu))
  841. return 0;
  842. if (tick_broadcast_device.evtdev)
  843. return ___tick_broadcast_oneshot_control(state, td, cpu);
  844. /*
  845. * If there is no broadcast or wakeup device, tell the caller not
  846. * to go into deep idle.
  847. */
  848. return -EBUSY;
  849. }
  850. /*
  851. * Reset the one shot broadcast for a cpu
  852. *
  853. * Called with tick_broadcast_lock held
  854. */
  855. static void tick_broadcast_clear_oneshot(int cpu)
  856. {
  857. cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
  858. cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
  859. }
  860. static void tick_broadcast_init_next_event(struct cpumask *mask,
  861. ktime_t expires)
  862. {
  863. struct tick_device *td;
  864. int cpu;
  865. for_each_cpu(cpu, mask) {
  866. td = &per_cpu(tick_cpu_device, cpu);
  867. if (td->evtdev)
  868. td->evtdev->next_event = expires;
  869. }
  870. }
  871. /**
  872. * tick_broadcast_setup_oneshot - setup the broadcast device
  873. */
  874. static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
  875. {
  876. int cpu = smp_processor_id();
  877. if (!bc)
  878. return;
  879. /* Set it up only once ! */
  880. if (bc->event_handler != tick_handle_oneshot_broadcast) {
  881. int was_periodic = clockevent_state_periodic(bc);
  882. bc->event_handler = tick_handle_oneshot_broadcast;
  883. /*
  884. * We must be careful here. There might be other CPUs
  885. * waiting for periodic broadcast. We need to set the
  886. * oneshot_mask bits for those and program the
  887. * broadcast device to fire.
  888. */
  889. cpumask_copy(tmpmask, tick_broadcast_mask);
  890. cpumask_clear_cpu(cpu, tmpmask);
  891. cpumask_or(tick_broadcast_oneshot_mask,
  892. tick_broadcast_oneshot_mask, tmpmask);
  893. if (was_periodic && !cpumask_empty(tmpmask)) {
  894. clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
  895. tick_broadcast_init_next_event(tmpmask,
  896. tick_next_period);
  897. tick_broadcast_set_event(bc, cpu, tick_next_period);
  898. } else
  899. bc->next_event = KTIME_MAX;
  900. } else {
  901. /*
  902. * The first cpu which switches to oneshot mode sets
  903. * the bit for all other cpus which are in the general
  904. * (periodic) broadcast mask. So the bit is set and
  905. * would prevent the first broadcast enter after this
  906. * to program the bc device.
  907. */
  908. tick_broadcast_clear_oneshot(cpu);
  909. }
  910. }
  911. /*
  912. * Select oneshot operating mode for the broadcast device
  913. */
  914. void tick_broadcast_switch_to_oneshot(void)
  915. {
  916. struct clock_event_device *bc;
  917. unsigned long flags;
  918. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  919. tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
  920. bc = tick_broadcast_device.evtdev;
  921. if (bc)
  922. tick_broadcast_setup_oneshot(bc);
  923. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  924. }
  925. #ifdef CONFIG_HOTPLUG_CPU
  926. void hotplug_cpu__broadcast_tick_pull(int deadcpu)
  927. {
  928. struct clock_event_device *bc;
  929. unsigned long flags;
  930. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  931. bc = tick_broadcast_device.evtdev;
  932. if (bc && broadcast_needs_cpu(bc, deadcpu)) {
  933. /* This moves the broadcast assignment to this CPU: */
  934. clockevents_program_event(bc, bc->next_event, 1);
  935. }
  936. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  937. }
  938. /*
  939. * Remove a dying CPU from broadcasting
  940. */
  941. static void tick_broadcast_oneshot_offline(unsigned int cpu)
  942. {
  943. if (tick_get_oneshot_wakeup_device(cpu))
  944. tick_set_oneshot_wakeup_device(NULL, cpu);
  945. /*
  946. * Clear the broadcast masks for the dead cpu, but do not stop
  947. * the broadcast device!
  948. */
  949. cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
  950. cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
  951. cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
  952. }
  953. #endif
  954. /*
  955. * Check, whether the broadcast device is in one shot mode
  956. */
  957. int tick_broadcast_oneshot_active(void)
  958. {
  959. return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
  960. }
  961. /*
  962. * Check whether the broadcast device supports oneshot.
  963. */
  964. bool tick_broadcast_oneshot_available(void)
  965. {
  966. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  967. return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
  968. }
  969. #else
  970. int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
  971. {
  972. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  973. if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
  974. return -EBUSY;
  975. return 0;
  976. }
  977. #endif
  978. void __init tick_broadcast_init(void)
  979. {
  980. zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
  981. zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
  982. zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
  983. #ifdef CONFIG_TICK_ONESHOT
  984. zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
  985. zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
  986. zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
  987. #endif
  988. }