srcutiny.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Sleepable Read-Copy Update mechanism for mutual exclusion,
  4. * tiny version for non-preemptible single-CPU use.
  5. *
  6. * Copyright (C) IBM Corporation, 2017
  7. *
  8. * Author: Paul McKenney <paulmck@linux.ibm.com>
  9. */
  10. #include <linux/export.h>
  11. #include <linux/mutex.h>
  12. #include <linux/preempt.h>
  13. #include <linux/rcupdate_wait.h>
  14. #include <linux/sched.h>
  15. #include <linux/delay.h>
  16. #include <linux/srcu.h>
  17. #include <linux/rcu_node_tree.h>
  18. #include "rcu_segcblist.h"
  19. #include "rcu.h"
  20. int rcu_scheduler_active __read_mostly;
  21. static LIST_HEAD(srcu_boot_list);
  22. static bool srcu_init_done;
  23. static int init_srcu_struct_fields(struct srcu_struct *ssp)
  24. {
  25. ssp->srcu_lock_nesting[0] = 0;
  26. ssp->srcu_lock_nesting[1] = 0;
  27. init_swait_queue_head(&ssp->srcu_wq);
  28. ssp->srcu_cb_head = NULL;
  29. ssp->srcu_cb_tail = &ssp->srcu_cb_head;
  30. ssp->srcu_gp_running = false;
  31. ssp->srcu_gp_waiting = false;
  32. ssp->srcu_idx = 0;
  33. ssp->srcu_idx_max = 0;
  34. INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
  35. INIT_LIST_HEAD(&ssp->srcu_work.entry);
  36. return 0;
  37. }
  38. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  39. int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
  40. struct lock_class_key *key)
  41. {
  42. /* Don't re-initialize a lock while it is held. */
  43. debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
  44. lockdep_init_map(&ssp->dep_map, name, key, 0);
  45. return init_srcu_struct_fields(ssp);
  46. }
  47. EXPORT_SYMBOL_GPL(__init_srcu_struct);
  48. #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  49. /*
  50. * init_srcu_struct - initialize a sleep-RCU structure
  51. * @ssp: structure to initialize.
  52. *
  53. * Must invoke this on a given srcu_struct before passing that srcu_struct
  54. * to any other function. Each srcu_struct represents a separate domain
  55. * of SRCU protection.
  56. */
  57. int init_srcu_struct(struct srcu_struct *ssp)
  58. {
  59. return init_srcu_struct_fields(ssp);
  60. }
  61. EXPORT_SYMBOL_GPL(init_srcu_struct);
  62. #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  63. /*
  64. * cleanup_srcu_struct - deconstruct a sleep-RCU structure
  65. * @ssp: structure to clean up.
  66. *
  67. * Must invoke this after you are finished using a given srcu_struct that
  68. * was initialized via init_srcu_struct(), else you leak memory.
  69. */
  70. void cleanup_srcu_struct(struct srcu_struct *ssp)
  71. {
  72. WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
  73. flush_work(&ssp->srcu_work);
  74. WARN_ON(ssp->srcu_gp_running);
  75. WARN_ON(ssp->srcu_gp_waiting);
  76. WARN_ON(ssp->srcu_cb_head);
  77. WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
  78. WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max);
  79. WARN_ON(ssp->srcu_idx & 0x1);
  80. }
  81. EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
  82. /*
  83. * Removes the count for the old reader from the appropriate element of
  84. * the srcu_struct.
  85. */
  86. void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
  87. {
  88. int newval = ssp->srcu_lock_nesting[idx] - 1;
  89. WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
  90. if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
  91. swake_up_one(&ssp->srcu_wq);
  92. }
  93. EXPORT_SYMBOL_GPL(__srcu_read_unlock);
  94. /*
  95. * Workqueue handler to drive one grace period and invoke any callbacks
  96. * that become ready as a result. Single-CPU and !PREEMPTION operation
  97. * means that we get away with murder on synchronization. ;-)
  98. */
  99. void srcu_drive_gp(struct work_struct *wp)
  100. {
  101. int idx;
  102. struct rcu_head *lh;
  103. struct rcu_head *rhp;
  104. struct srcu_struct *ssp;
  105. ssp = container_of(wp, struct srcu_struct, srcu_work);
  106. if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
  107. return; /* Already running or nothing to do. */
  108. /* Remove recently arrived callbacks and wait for readers. */
  109. WRITE_ONCE(ssp->srcu_gp_running, true);
  110. local_irq_disable();
  111. lh = ssp->srcu_cb_head;
  112. ssp->srcu_cb_head = NULL;
  113. ssp->srcu_cb_tail = &ssp->srcu_cb_head;
  114. local_irq_enable();
  115. idx = (ssp->srcu_idx & 0x2) / 2;
  116. WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
  117. WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
  118. swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
  119. WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
  120. WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
  121. /* Invoke the callbacks we removed above. */
  122. while (lh) {
  123. rhp = lh;
  124. lh = lh->next;
  125. local_bh_disable();
  126. rhp->func(rhp);
  127. local_bh_enable();
  128. }
  129. /*
  130. * Enable rescheduling, and if there are more callbacks,
  131. * reschedule ourselves. This can race with a call_srcu()
  132. * at interrupt level, but the ->srcu_gp_running checks will
  133. * straighten that out.
  134. */
  135. WRITE_ONCE(ssp->srcu_gp_running, false);
  136. if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
  137. schedule_work(&ssp->srcu_work);
  138. }
  139. EXPORT_SYMBOL_GPL(srcu_drive_gp);
  140. static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
  141. {
  142. unsigned short cookie;
  143. cookie = get_state_synchronize_srcu(ssp);
  144. if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie))
  145. return;
  146. WRITE_ONCE(ssp->srcu_idx_max, cookie);
  147. if (!READ_ONCE(ssp->srcu_gp_running)) {
  148. if (likely(srcu_init_done))
  149. schedule_work(&ssp->srcu_work);
  150. else if (list_empty(&ssp->srcu_work.entry))
  151. list_add(&ssp->srcu_work.entry, &srcu_boot_list);
  152. }
  153. }
  154. /*
  155. * Enqueue an SRCU callback on the specified srcu_struct structure,
  156. * initiating grace-period processing if it is not already running.
  157. */
  158. void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
  159. rcu_callback_t func)
  160. {
  161. unsigned long flags;
  162. rhp->func = func;
  163. rhp->next = NULL;
  164. local_irq_save(flags);
  165. *ssp->srcu_cb_tail = rhp;
  166. ssp->srcu_cb_tail = &rhp->next;
  167. local_irq_restore(flags);
  168. srcu_gp_start_if_needed(ssp);
  169. }
  170. EXPORT_SYMBOL_GPL(call_srcu);
  171. /*
  172. * synchronize_srcu - wait for prior SRCU read-side critical-section completion
  173. */
  174. void synchronize_srcu(struct srcu_struct *ssp)
  175. {
  176. struct rcu_synchronize rs;
  177. init_rcu_head_on_stack(&rs.head);
  178. init_completion(&rs.completion);
  179. call_srcu(ssp, &rs.head, wakeme_after_rcu);
  180. wait_for_completion(&rs.completion);
  181. destroy_rcu_head_on_stack(&rs.head);
  182. }
  183. EXPORT_SYMBOL_GPL(synchronize_srcu);
  184. /*
  185. * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
  186. */
  187. unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
  188. {
  189. unsigned long ret;
  190. barrier();
  191. ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1;
  192. barrier();
  193. return ret & USHRT_MAX;
  194. }
  195. EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
  196. /*
  197. * start_poll_synchronize_srcu - Provide cookie and start grace period
  198. *
  199. * The difference between this and get_state_synchronize_srcu() is that
  200. * this function ensures that the poll_state_synchronize_srcu() will
  201. * eventually return the value true.
  202. */
  203. unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
  204. {
  205. unsigned long ret = get_state_synchronize_srcu(ssp);
  206. srcu_gp_start_if_needed(ssp);
  207. return ret;
  208. }
  209. EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
  210. /*
  211. * poll_state_synchronize_srcu - Has cookie's grace period ended?
  212. */
  213. bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
  214. {
  215. bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie);
  216. barrier();
  217. return ret;
  218. }
  219. EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
  220. /* Lockdep diagnostics. */
  221. void __init rcu_scheduler_starting(void)
  222. {
  223. rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
  224. }
  225. /*
  226. * Queue work for srcu_struct structures with early boot callbacks.
  227. * The work won't actually execute until the workqueue initialization
  228. * phase that takes place after the scheduler starts.
  229. */
  230. void __init srcu_init(void)
  231. {
  232. struct srcu_struct *ssp;
  233. srcu_init_done = true;
  234. while (!list_empty(&srcu_boot_list)) {
  235. ssp = list_first_entry(&srcu_boot_list,
  236. struct srcu_struct, srcu_work.entry);
  237. list_del_init(&ssp->srcu_work.entry);
  238. schedule_work(&ssp->srcu_work);
  239. }
  240. }