clockevents.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * This file contains functions which manage clock event devices.
  4. *
  5. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  6. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  7. * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  8. */
  9. #include <linux/clockchips.h>
  10. #include <linux/hrtimer.h>
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/smp.h>
  14. #include <linux/device.h>
  15. #include "tick-internal.h"
  16. /* The registered clock event devices */
  17. static LIST_HEAD(clockevent_devices);
  18. static LIST_HEAD(clockevents_released);
  19. /* Protection for the above */
  20. static DEFINE_RAW_SPINLOCK(clockevents_lock);
  21. /* Protection for unbind operations */
  22. static DEFINE_MUTEX(clockevents_mutex);
  23. struct ce_unbind {
  24. struct clock_event_device *ce;
  25. int res;
  26. };
  27. static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
  28. bool ismax)
  29. {
  30. u64 clc = (u64) latch << evt->shift;
  31. u64 rnd;
  32. if (WARN_ON(!evt->mult))
  33. evt->mult = 1;
  34. rnd = (u64) evt->mult - 1;
  35. /*
  36. * Upper bound sanity check. If the backwards conversion is
  37. * not equal latch, we know that the above shift overflowed.
  38. */
  39. if ((clc >> evt->shift) != (u64)latch)
  40. clc = ~0ULL;
  41. /*
  42. * Scaled math oddities:
  43. *
  44. * For mult <= (1 << shift) we can safely add mult - 1 to
  45. * prevent integer rounding loss. So the backwards conversion
  46. * from nsec to device ticks will be correct.
  47. *
  48. * For mult > (1 << shift), i.e. device frequency is > 1GHz we
  49. * need to be careful. Adding mult - 1 will result in a value
  50. * which when converted back to device ticks can be larger
  51. * than latch by up to (mult - 1) >> shift. For the min_delta
  52. * calculation we still want to apply this in order to stay
  53. * above the minimum device ticks limit. For the upper limit
  54. * we would end up with a latch value larger than the upper
  55. * limit of the device, so we omit the add to stay below the
  56. * device upper boundary.
  57. *
  58. * Also omit the add if it would overflow the u64 boundary.
  59. */
  60. if ((~0ULL - clc > rnd) &&
  61. (!ismax || evt->mult <= (1ULL << evt->shift)))
  62. clc += rnd;
  63. do_div(clc, evt->mult);
  64. /* Deltas less than 1usec are pointless noise */
  65. return clc > 1000 ? clc : 1000;
  66. }
  67. /**
  68. * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
  69. * @latch: value to convert
  70. * @evt: pointer to clock event device descriptor
  71. *
  72. * Math helper, returns latch value converted to nanoseconds (bound checked)
  73. */
  74. u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
  75. {
  76. return cev_delta2ns(latch, evt, false);
  77. }
  78. EXPORT_SYMBOL_GPL(clockevent_delta2ns);
  79. static int __clockevents_switch_state(struct clock_event_device *dev,
  80. enum clock_event_state state)
  81. {
  82. if (dev->features & CLOCK_EVT_FEAT_DUMMY)
  83. return 0;
  84. /* Transition with new state-specific callbacks */
  85. switch (state) {
  86. case CLOCK_EVT_STATE_DETACHED:
  87. /* The clockevent device is getting replaced. Shut it down. */
  88. case CLOCK_EVT_STATE_SHUTDOWN:
  89. if (dev->set_state_shutdown)
  90. return dev->set_state_shutdown(dev);
  91. return 0;
  92. case CLOCK_EVT_STATE_PERIODIC:
  93. /* Core internal bug */
  94. if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
  95. return -ENOSYS;
  96. if (dev->set_state_periodic)
  97. return dev->set_state_periodic(dev);
  98. return 0;
  99. case CLOCK_EVT_STATE_ONESHOT:
  100. /* Core internal bug */
  101. if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
  102. return -ENOSYS;
  103. if (dev->set_state_oneshot)
  104. return dev->set_state_oneshot(dev);
  105. return 0;
  106. case CLOCK_EVT_STATE_ONESHOT_STOPPED:
  107. /* Core internal bug */
  108. if (WARN_ONCE(!clockevent_state_oneshot(dev),
  109. "Current state: %d\n",
  110. clockevent_get_state(dev)))
  111. return -EINVAL;
  112. if (dev->set_state_oneshot_stopped)
  113. return dev->set_state_oneshot_stopped(dev);
  114. else
  115. return -ENOSYS;
  116. default:
  117. return -ENOSYS;
  118. }
  119. }
  120. /**
  121. * clockevents_switch_state - set the operating state of a clock event device
  122. * @dev: device to modify
  123. * @state: new state
  124. *
  125. * Must be called with interrupts disabled !
  126. */
  127. void clockevents_switch_state(struct clock_event_device *dev,
  128. enum clock_event_state state)
  129. {
  130. if (clockevent_get_state(dev) != state) {
  131. if (__clockevents_switch_state(dev, state))
  132. return;
  133. clockevent_set_state(dev, state);
  134. /*
  135. * A nsec2cyc multiplicator of 0 is invalid and we'd crash
  136. * on it, so fix it up and emit a warning:
  137. */
  138. if (clockevent_state_oneshot(dev)) {
  139. if (WARN_ON(!dev->mult))
  140. dev->mult = 1;
  141. }
  142. }
  143. }
  144. /**
  145. * clockevents_shutdown - shutdown the device and clear next_event
  146. * @dev: device to shutdown
  147. */
  148. void clockevents_shutdown(struct clock_event_device *dev)
  149. {
  150. clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
  151. dev->next_event = KTIME_MAX;
  152. }
  153. /**
  154. * clockevents_tick_resume - Resume the tick device before using it again
  155. * @dev: device to resume
  156. */
  157. int clockevents_tick_resume(struct clock_event_device *dev)
  158. {
  159. int ret = 0;
  160. if (dev->tick_resume)
  161. ret = dev->tick_resume(dev);
  162. return ret;
  163. }
  164. #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
  165. /* Limit min_delta to a jiffie */
  166. #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
  167. /**
  168. * clockevents_increase_min_delta - raise minimum delta of a clock event device
  169. * @dev: device to increase the minimum delta
  170. *
  171. * Returns 0 on success, -ETIME when the minimum delta reached the limit.
  172. */
  173. static int clockevents_increase_min_delta(struct clock_event_device *dev)
  174. {
  175. /* Nothing to do if we already reached the limit */
  176. if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
  177. printk_deferred(KERN_WARNING
  178. "CE: Reprogramming failure. Giving up\n");
  179. dev->next_event = KTIME_MAX;
  180. return -ETIME;
  181. }
  182. if (dev->min_delta_ns < 5000)
  183. dev->min_delta_ns = 5000;
  184. else
  185. dev->min_delta_ns += dev->min_delta_ns >> 1;
  186. if (dev->min_delta_ns > MIN_DELTA_LIMIT)
  187. dev->min_delta_ns = MIN_DELTA_LIMIT;
  188. printk_deferred(KERN_WARNING
  189. "CE: %s increased min_delta_ns to %llu nsec\n",
  190. dev->name ? dev->name : "?",
  191. (unsigned long long) dev->min_delta_ns);
  192. return 0;
  193. }
  194. /**
  195. * clockevents_program_min_delta - Set clock event device to the minimum delay.
  196. * @dev: device to program
  197. *
  198. * Returns 0 on success, -ETIME when the retry loop failed.
  199. */
  200. static int clockevents_program_min_delta(struct clock_event_device *dev)
  201. {
  202. unsigned long long clc;
  203. int64_t delta;
  204. int i;
  205. for (i = 0;;) {
  206. delta = dev->min_delta_ns;
  207. dev->next_event = ktime_add_ns(ktime_get(), delta);
  208. if (clockevent_state_shutdown(dev))
  209. return 0;
  210. dev->retries++;
  211. clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  212. if (dev->set_next_event((unsigned long) clc, dev) == 0)
  213. return 0;
  214. if (++i > 2) {
  215. /*
  216. * We tried 3 times to program the device with the
  217. * given min_delta_ns. Try to increase the minimum
  218. * delta, if that fails as well get out of here.
  219. */
  220. if (clockevents_increase_min_delta(dev))
  221. return -ETIME;
  222. i = 0;
  223. }
  224. }
  225. }
  226. #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
  227. /**
  228. * clockevents_program_min_delta - Set clock event device to the minimum delay.
  229. * @dev: device to program
  230. *
  231. * Returns 0 on success, -ETIME when the retry loop failed.
  232. */
  233. static int clockevents_program_min_delta(struct clock_event_device *dev)
  234. {
  235. unsigned long long clc;
  236. int64_t delta = 0;
  237. int i;
  238. for (i = 0; i < 10; i++) {
  239. delta += dev->min_delta_ns;
  240. dev->next_event = ktime_add_ns(ktime_get(), delta);
  241. if (clockevent_state_shutdown(dev))
  242. return 0;
  243. dev->retries++;
  244. clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  245. if (dev->set_next_event((unsigned long) clc, dev) == 0)
  246. return 0;
  247. }
  248. return -ETIME;
  249. }
  250. #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
  251. /**
  252. * clockevents_program_event - Reprogram the clock event device.
  253. * @dev: device to program
  254. * @expires: absolute expiry time (monotonic clock)
  255. * @force: program minimum delay if expires can not be set
  256. *
  257. * Returns 0 on success, -ETIME when the event is in the past.
  258. */
  259. int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
  260. bool force)
  261. {
  262. unsigned long long clc;
  263. int64_t delta;
  264. int rc;
  265. if (WARN_ON_ONCE(expires < 0))
  266. return -ETIME;
  267. dev->next_event = expires;
  268. if (clockevent_state_shutdown(dev))
  269. return 0;
  270. /* We must be in ONESHOT state here */
  271. WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
  272. clockevent_get_state(dev));
  273. /* Shortcut for clockevent devices that can deal with ktime. */
  274. if (dev->features & CLOCK_EVT_FEAT_KTIME)
  275. return dev->set_next_ktime(expires, dev);
  276. delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
  277. if (delta <= 0)
  278. return force ? clockevents_program_min_delta(dev) : -ETIME;
  279. delta = min(delta, (int64_t) dev->max_delta_ns);
  280. delta = max(delta, (int64_t) dev->min_delta_ns);
  281. clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  282. rc = dev->set_next_event((unsigned long) clc, dev);
  283. return (rc && force) ? clockevents_program_min_delta(dev) : rc;
  284. }
  285. /*
  286. * Called after a notify add to make devices available which were
  287. * released from the notifier call.
  288. */
  289. static void clockevents_notify_released(void)
  290. {
  291. struct clock_event_device *dev;
  292. while (!list_empty(&clockevents_released)) {
  293. dev = list_entry(clockevents_released.next,
  294. struct clock_event_device, list);
  295. list_del(&dev->list);
  296. list_add(&dev->list, &clockevent_devices);
  297. tick_check_new_device(dev);
  298. }
  299. }
  300. /*
  301. * Try to install a replacement clock event device
  302. */
  303. static int clockevents_replace(struct clock_event_device *ced)
  304. {
  305. struct clock_event_device *dev, *newdev = NULL;
  306. list_for_each_entry(dev, &clockevent_devices, list) {
  307. if (dev == ced || !clockevent_state_detached(dev))
  308. continue;
  309. if (!tick_check_replacement(newdev, dev))
  310. continue;
  311. if (!try_module_get(dev->owner))
  312. continue;
  313. if (newdev)
  314. module_put(newdev->owner);
  315. newdev = dev;
  316. }
  317. if (newdev) {
  318. tick_install_replacement(newdev);
  319. list_del_init(&ced->list);
  320. }
  321. return newdev ? 0 : -EBUSY;
  322. }
  323. /*
  324. * Called with clockevents_mutex and clockevents_lock held
  325. */
  326. static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
  327. {
  328. /* Fast track. Device is unused */
  329. if (clockevent_state_detached(ced)) {
  330. list_del_init(&ced->list);
  331. return 0;
  332. }
  333. return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
  334. }
  335. /*
  336. * SMP function call to unbind a device
  337. */
  338. static void __clockevents_unbind(void *arg)
  339. {
  340. struct ce_unbind *cu = arg;
  341. int res;
  342. raw_spin_lock(&clockevents_lock);
  343. res = __clockevents_try_unbind(cu->ce, smp_processor_id());
  344. if (res == -EAGAIN)
  345. res = clockevents_replace(cu->ce);
  346. cu->res = res;
  347. raw_spin_unlock(&clockevents_lock);
  348. }
  349. /*
  350. * Issues smp function call to unbind a per cpu device. Called with
  351. * clockevents_mutex held.
  352. */
  353. static int clockevents_unbind(struct clock_event_device *ced, int cpu)
  354. {
  355. struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
  356. smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
  357. return cu.res;
  358. }
  359. /*
  360. * Unbind a clockevents device.
  361. */
  362. int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
  363. {
  364. int ret;
  365. mutex_lock(&clockevents_mutex);
  366. ret = clockevents_unbind(ced, cpu);
  367. mutex_unlock(&clockevents_mutex);
  368. return ret;
  369. }
  370. EXPORT_SYMBOL_GPL(clockevents_unbind_device);
  371. /**
  372. * clockevents_register_device - register a clock event device
  373. * @dev: device to register
  374. */
  375. void clockevents_register_device(struct clock_event_device *dev)
  376. {
  377. unsigned long flags;
  378. /* Initialize state to DETACHED */
  379. clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
  380. if (!dev->cpumask) {
  381. WARN_ON(num_possible_cpus() > 1);
  382. dev->cpumask = cpumask_of(smp_processor_id());
  383. }
  384. if (dev->cpumask == cpu_all_mask) {
  385. WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n",
  386. dev->name);
  387. dev->cpumask = cpu_possible_mask;
  388. }
  389. raw_spin_lock_irqsave(&clockevents_lock, flags);
  390. list_add(&dev->list, &clockevent_devices);
  391. tick_check_new_device(dev);
  392. clockevents_notify_released();
  393. raw_spin_unlock_irqrestore(&clockevents_lock, flags);
  394. }
  395. EXPORT_SYMBOL_GPL(clockevents_register_device);
  396. static void clockevents_config(struct clock_event_device *dev, u32 freq)
  397. {
  398. u64 sec;
  399. if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
  400. return;
  401. /*
  402. * Calculate the maximum number of seconds we can sleep. Limit
  403. * to 10 minutes for hardware which can program more than
  404. * 32bit ticks so we still get reasonable conversion values.
  405. */
  406. sec = dev->max_delta_ticks;
  407. do_div(sec, freq);
  408. if (!sec)
  409. sec = 1;
  410. else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
  411. sec = 600;
  412. clockevents_calc_mult_shift(dev, freq, sec);
  413. dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
  414. dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
  415. }
  416. /**
  417. * clockevents_config_and_register - Configure and register a clock event device
  418. * @dev: device to register
  419. * @freq: The clock frequency
  420. * @min_delta: The minimum clock ticks to program in oneshot mode
  421. * @max_delta: The maximum clock ticks to program in oneshot mode
  422. *
  423. * min/max_delta can be 0 for devices which do not support oneshot mode.
  424. */
  425. void clockevents_config_and_register(struct clock_event_device *dev,
  426. u32 freq, unsigned long min_delta,
  427. unsigned long max_delta)
  428. {
  429. dev->min_delta_ticks = min_delta;
  430. dev->max_delta_ticks = max_delta;
  431. clockevents_config(dev, freq);
  432. clockevents_register_device(dev);
  433. }
  434. EXPORT_SYMBOL_GPL(clockevents_config_and_register);
  435. int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
  436. {
  437. clockevents_config(dev, freq);
  438. if (clockevent_state_oneshot(dev))
  439. return clockevents_program_event(dev, dev->next_event, false);
  440. if (clockevent_state_periodic(dev))
  441. return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
  442. return 0;
  443. }
  444. /**
  445. * clockevents_update_freq - Update frequency and reprogram a clock event device.
  446. * @dev: device to modify
  447. * @freq: new device frequency
  448. *
  449. * Reconfigure and reprogram a clock event device in oneshot
  450. * mode. Must be called on the cpu for which the device delivers per
  451. * cpu timer events. If called for the broadcast device the core takes
  452. * care of serialization.
  453. *
  454. * Returns 0 on success, -ETIME when the event is in the past.
  455. */
  456. int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
  457. {
  458. unsigned long flags;
  459. int ret;
  460. local_irq_save(flags);
  461. ret = tick_broadcast_update_freq(dev, freq);
  462. if (ret == -ENODEV)
  463. ret = __clockevents_update_freq(dev, freq);
  464. local_irq_restore(flags);
  465. return ret;
  466. }
  467. /*
  468. * Noop handler when we shut down an event device
  469. */
  470. void clockevents_handle_noop(struct clock_event_device *dev)
  471. {
  472. }
  473. /**
  474. * clockevents_exchange_device - release and request clock devices
  475. * @old: device to release (can be NULL)
  476. * @new: device to request (can be NULL)
  477. *
  478. * Called from various tick functions with clockevents_lock held and
  479. * interrupts disabled.
  480. */
  481. void clockevents_exchange_device(struct clock_event_device *old,
  482. struct clock_event_device *new)
  483. {
  484. /*
  485. * Caller releases a clock event device. We queue it into the
  486. * released list and do a notify add later.
  487. */
  488. if (old) {
  489. module_put(old->owner);
  490. clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
  491. list_del(&old->list);
  492. list_add(&old->list, &clockevents_released);
  493. }
  494. if (new) {
  495. BUG_ON(!clockevent_state_detached(new));
  496. clockevents_shutdown(new);
  497. }
  498. }
  499. /**
  500. * clockevents_suspend - suspend clock devices
  501. */
  502. void clockevents_suspend(void)
  503. {
  504. struct clock_event_device *dev;
  505. list_for_each_entry_reverse(dev, &clockevent_devices, list)
  506. if (dev->suspend && !clockevent_state_detached(dev))
  507. dev->suspend(dev);
  508. }
  509. /**
  510. * clockevents_resume - resume clock devices
  511. */
  512. void clockevents_resume(void)
  513. {
  514. struct clock_event_device *dev;
  515. list_for_each_entry(dev, &clockevent_devices, list)
  516. if (dev->resume && !clockevent_state_detached(dev))
  517. dev->resume(dev);
  518. }
  519. #ifdef CONFIG_HOTPLUG_CPU
  520. # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  521. /**
  522. * tick_offline_cpu - Take CPU out of the broadcast mechanism
  523. * @cpu: The outgoing CPU
  524. *
  525. * Called on the outgoing CPU after it took itself offline.
  526. */
  527. void tick_offline_cpu(unsigned int cpu)
  528. {
  529. raw_spin_lock(&clockevents_lock);
  530. tick_broadcast_offline(cpu);
  531. raw_spin_unlock(&clockevents_lock);
  532. }
  533. # endif
  534. /**
  535. * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu
  536. */
  537. void tick_cleanup_dead_cpu(int cpu)
  538. {
  539. struct clock_event_device *dev, *tmp;
  540. unsigned long flags;
  541. raw_spin_lock_irqsave(&clockevents_lock, flags);
  542. tick_shutdown(cpu);
  543. /*
  544. * Unregister the clock event devices which were
  545. * released from the users in the notify chain.
  546. */
  547. list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
  548. list_del(&dev->list);
  549. /*
  550. * Now check whether the CPU has left unused per cpu devices
  551. */
  552. list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
  553. if (cpumask_test_cpu(cpu, dev->cpumask) &&
  554. cpumask_weight(dev->cpumask) == 1 &&
  555. !tick_is_broadcast_device(dev)) {
  556. BUG_ON(!clockevent_state_detached(dev));
  557. list_del(&dev->list);
  558. }
  559. }
  560. raw_spin_unlock_irqrestore(&clockevents_lock, flags);
  561. }
  562. #endif
  563. #ifdef CONFIG_SYSFS
  564. static struct bus_type clockevents_subsys = {
  565. .name = "clockevents",
  566. .dev_name = "clockevent",
  567. };
  568. static DEFINE_PER_CPU(struct device, tick_percpu_dev);
  569. static struct tick_device *tick_get_tick_dev(struct device *dev);
  570. static ssize_t sysfs_show_current_tick_dev(struct device *dev,
  571. struct device_attribute *attr,
  572. char *buf)
  573. {
  574. struct tick_device *td;
  575. ssize_t count = 0;
  576. raw_spin_lock_irq(&clockevents_lock);
  577. td = tick_get_tick_dev(dev);
  578. if (td && td->evtdev)
  579. count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name);
  580. raw_spin_unlock_irq(&clockevents_lock);
  581. return count;
  582. }
  583. static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL);
  584. /* We don't support the abomination of removable broadcast devices */
  585. static ssize_t sysfs_unbind_tick_dev(struct device *dev,
  586. struct device_attribute *attr,
  587. const char *buf, size_t count)
  588. {
  589. char name[CS_NAME_LEN];
  590. ssize_t ret = sysfs_get_uname(buf, name, count);
  591. struct clock_event_device *ce;
  592. if (ret < 0)
  593. return ret;
  594. ret = -ENODEV;
  595. mutex_lock(&clockevents_mutex);
  596. raw_spin_lock_irq(&clockevents_lock);
  597. list_for_each_entry(ce, &clockevent_devices, list) {
  598. if (!strcmp(ce->name, name)) {
  599. ret = __clockevents_try_unbind(ce, dev->id);
  600. break;
  601. }
  602. }
  603. raw_spin_unlock_irq(&clockevents_lock);
  604. /*
  605. * We hold clockevents_mutex, so ce can't go away
  606. */
  607. if (ret == -EAGAIN)
  608. ret = clockevents_unbind(ce, dev->id);
  609. mutex_unlock(&clockevents_mutex);
  610. return ret ? ret : count;
  611. }
  612. static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev);
  613. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  614. static struct device tick_bc_dev = {
  615. .init_name = "broadcast",
  616. .id = 0,
  617. .bus = &clockevents_subsys,
  618. };
  619. static struct tick_device *tick_get_tick_dev(struct device *dev)
  620. {
  621. return dev == &tick_bc_dev ? tick_get_broadcast_device() :
  622. &per_cpu(tick_cpu_device, dev->id);
  623. }
  624. static __init int tick_broadcast_init_sysfs(void)
  625. {
  626. int err = device_register(&tick_bc_dev);
  627. if (!err)
  628. err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
  629. return err;
  630. }
  631. #else
  632. static struct tick_device *tick_get_tick_dev(struct device *dev)
  633. {
  634. return &per_cpu(tick_cpu_device, dev->id);
  635. }
  636. static inline int tick_broadcast_init_sysfs(void) { return 0; }
  637. #endif
  638. static int __init tick_init_sysfs(void)
  639. {
  640. int cpu;
  641. for_each_possible_cpu(cpu) {
  642. struct device *dev = &per_cpu(tick_percpu_dev, cpu);
  643. int err;
  644. dev->id = cpu;
  645. dev->bus = &clockevents_subsys;
  646. err = device_register(dev);
  647. if (!err)
  648. err = device_create_file(dev, &dev_attr_current_device);
  649. if (!err)
  650. err = device_create_file(dev, &dev_attr_unbind_device);
  651. if (err)
  652. return err;
  653. }
  654. return tick_broadcast_init_sysfs();
  655. }
  656. static int __init clockevents_init_sysfs(void)
  657. {
  658. int err = subsys_system_register(&clockevents_subsys, NULL);
  659. if (!err)
  660. err = tick_init_sysfs();
  661. return err;
  662. }
  663. device_initcall(clockevents_init_sysfs);
  664. #endif /* SYSFS */