percpu-rwsem.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/atomic.h>
  3. #include <linux/percpu.h>
  4. #include <linux/wait.h>
  5. #include <linux/lockdep.h>
  6. #include <linux/percpu-rwsem.h>
  7. #include <linux/rcupdate.h>
  8. #include <linux/sched.h>
  9. #include <linux/sched/task.h>
  10. #include <linux/slab.h>
  11. #include <linux/errno.h>
  12. int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
  13. const char *name, struct lock_class_key *key)
  14. {
  15. sem->read_count = alloc_percpu(int);
  16. if (unlikely(!sem->read_count))
  17. return -ENOMEM;
  18. rcu_sync_init(&sem->rss);
  19. rcuwait_init(&sem->writer);
  20. init_waitqueue_head(&sem->waiters);
  21. atomic_set(&sem->block, 0);
  22. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  23. debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  24. lockdep_init_map(&sem->dep_map, name, key, 0);
  25. #endif
  26. return 0;
  27. }
  28. EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
  29. void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
  30. {
  31. /*
  32. * XXX: temporary kludge. The error path in alloc_super()
  33. * assumes that percpu_free_rwsem() is safe after kzalloc().
  34. */
  35. if (!sem->read_count)
  36. return;
  37. rcu_sync_dtor(&sem->rss);
  38. free_percpu(sem->read_count);
  39. sem->read_count = NULL; /* catch use after free bugs */
  40. }
  41. EXPORT_SYMBOL_GPL(percpu_free_rwsem);
  42. static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
  43. {
  44. this_cpu_inc(*sem->read_count);
  45. /*
  46. * Due to having preemption disabled the decrement happens on
  47. * the same CPU as the increment, avoiding the
  48. * increment-on-one-CPU-and-decrement-on-another problem.
  49. *
  50. * If the reader misses the writer's assignment of sem->block, then the
  51. * writer is guaranteed to see the reader's increment.
  52. *
  53. * Conversely, any readers that increment their sem->read_count after
  54. * the writer looks are guaranteed to see the sem->block value, which
  55. * in turn means that they are guaranteed to immediately decrement
  56. * their sem->read_count, so that it doesn't matter that the writer
  57. * missed them.
  58. */
  59. smp_mb(); /* A matches D */
  60. /*
  61. * If !sem->block the critical section starts here, matched by the
  62. * release in percpu_up_write().
  63. */
  64. if (likely(!atomic_read_acquire(&sem->block)))
  65. return true;
  66. this_cpu_dec(*sem->read_count);
  67. /* Prod writer to re-evaluate readers_active_check() */
  68. rcuwait_wake_up(&sem->writer);
  69. return false;
  70. }
  71. static inline bool __percpu_down_write_trylock(struct percpu_rw_semaphore *sem)
  72. {
  73. if (atomic_read(&sem->block))
  74. return false;
  75. return atomic_xchg(&sem->block, 1) == 0;
  76. }
  77. static bool __percpu_rwsem_trylock(struct percpu_rw_semaphore *sem, bool reader)
  78. {
  79. if (reader) {
  80. bool ret;
  81. preempt_disable();
  82. ret = __percpu_down_read_trylock(sem);
  83. preempt_enable();
  84. return ret;
  85. }
  86. return __percpu_down_write_trylock(sem);
  87. }
  88. /*
  89. * The return value of wait_queue_entry::func means:
  90. *
  91. * <0 - error, wakeup is terminated and the error is returned
  92. * 0 - no wakeup, a next waiter is tried
  93. * >0 - woken, if EXCLUSIVE, counted towards @nr_exclusive.
  94. *
  95. * We use EXCLUSIVE for both readers and writers to preserve FIFO order,
  96. * and play games with the return value to allow waking multiple readers.
  97. *
  98. * Specifically, we wake readers until we've woken a single writer, or until a
  99. * trylock fails.
  100. */
  101. static int percpu_rwsem_wake_function(struct wait_queue_entry *wq_entry,
  102. unsigned int mode, int wake_flags,
  103. void *key)
  104. {
  105. bool reader = wq_entry->flags & WQ_FLAG_CUSTOM;
  106. struct percpu_rw_semaphore *sem = key;
  107. struct task_struct *p;
  108. /* concurrent against percpu_down_write(), can get stolen */
  109. if (!__percpu_rwsem_trylock(sem, reader))
  110. return 1;
  111. p = get_task_struct(wq_entry->private);
  112. list_del_init(&wq_entry->entry);
  113. smp_store_release(&wq_entry->private, NULL);
  114. wake_up_process(p);
  115. put_task_struct(p);
  116. return !reader; /* wake (readers until) 1 writer */
  117. }
  118. static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
  119. {
  120. DEFINE_WAIT_FUNC(wq_entry, percpu_rwsem_wake_function);
  121. bool wait;
  122. spin_lock_irq(&sem->waiters.lock);
  123. /*
  124. * Serialize against the wakeup in percpu_up_write(), if we fail
  125. * the trylock, the wakeup must see us on the list.
  126. */
  127. wait = !__percpu_rwsem_trylock(sem, reader);
  128. if (wait) {
  129. wq_entry.flags |= WQ_FLAG_EXCLUSIVE | reader * WQ_FLAG_CUSTOM;
  130. __add_wait_queue_entry_tail(&sem->waiters, &wq_entry);
  131. }
  132. spin_unlock_irq(&sem->waiters.lock);
  133. while (wait) {
  134. set_current_state(TASK_UNINTERRUPTIBLE);
  135. if (!smp_load_acquire(&wq_entry.private))
  136. break;
  137. schedule();
  138. }
  139. __set_current_state(TASK_RUNNING);
  140. }
  141. bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
  142. {
  143. if (__percpu_down_read_trylock(sem))
  144. return true;
  145. if (try)
  146. return false;
  147. preempt_enable();
  148. percpu_rwsem_wait(sem, /* .reader = */ true);
  149. preempt_disable();
  150. return true;
  151. }
  152. EXPORT_SYMBOL_GPL(__percpu_down_read);
  153. #define per_cpu_sum(var) \
  154. ({ \
  155. typeof(var) __sum = 0; \
  156. int cpu; \
  157. compiletime_assert_atomic_type(__sum); \
  158. for_each_possible_cpu(cpu) \
  159. __sum += per_cpu(var, cpu); \
  160. __sum; \
  161. })
  162. /*
  163. * Return true if the modular sum of the sem->read_count per-CPU variable is
  164. * zero. If this sum is zero, then it is stable due to the fact that if any
  165. * newly arriving readers increment a given counter, they will immediately
  166. * decrement that same counter.
  167. *
  168. * Assumes sem->block is set.
  169. */
  170. static bool readers_active_check(struct percpu_rw_semaphore *sem)
  171. {
  172. if (per_cpu_sum(*sem->read_count) != 0)
  173. return false;
  174. /*
  175. * If we observed the decrement; ensure we see the entire critical
  176. * section.
  177. */
  178. smp_mb(); /* C matches B */
  179. return true;
  180. }
  181. void percpu_down_write(struct percpu_rw_semaphore *sem)
  182. {
  183. might_sleep();
  184. rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
  185. /* Notify readers to take the slow path. */
  186. rcu_sync_enter(&sem->rss);
  187. /*
  188. * Try set sem->block; this provides writer-writer exclusion.
  189. * Having sem->block set makes new readers block.
  190. */
  191. if (!__percpu_down_write_trylock(sem))
  192. percpu_rwsem_wait(sem, /* .reader = */ false);
  193. /* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */
  194. /*
  195. * If they don't see our store of sem->block, then we are guaranteed to
  196. * see their sem->read_count increment, and therefore will wait for
  197. * them.
  198. */
  199. /* Wait for all active readers to complete. */
  200. rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
  201. }
  202. EXPORT_SYMBOL_GPL(percpu_down_write);
  203. void percpu_up_write(struct percpu_rw_semaphore *sem)
  204. {
  205. rwsem_release(&sem->dep_map, _RET_IP_);
  206. /*
  207. * Signal the writer is done, no fast path yet.
  208. *
  209. * One reason that we cannot just immediately flip to readers_fast is
  210. * that new readers might fail to see the results of this writer's
  211. * critical section.
  212. *
  213. * Therefore we force it through the slow path which guarantees an
  214. * acquire and thereby guarantees the critical section's consistency.
  215. */
  216. atomic_set_release(&sem->block, 0);
  217. /*
  218. * Prod any pending reader/writer to make progress.
  219. */
  220. __wake_up(&sem->waiters, TASK_NORMAL, 1, sem);
  221. /*
  222. * Once this completes (at least one RCU-sched grace period hence) the
  223. * reader fast path will be available again. Safe to use outside the
  224. * exclusive write lock because its counting.
  225. */
  226. rcu_sync_exit(&sem->rss);
  227. }
  228. EXPORT_SYMBOL_GPL(percpu_up_write);
  229. static LIST_HEAD(destroy_list);
  230. static DEFINE_SPINLOCK(destroy_list_lock);
  231. static void destroy_list_workfn(struct work_struct *work)
  232. {
  233. struct percpu_rw_semaphore_atomic *sem, *sem2;
  234. LIST_HEAD(to_destroy);
  235. spin_lock(&destroy_list_lock);
  236. list_splice_init(&destroy_list, &to_destroy);
  237. spin_unlock(&destroy_list_lock);
  238. if (list_empty(&to_destroy))
  239. return;
  240. list_for_each_entry_safe(sem, sem2, &to_destroy, destroy_list_entry) {
  241. percpu_free_rwsem(&sem->rw_sem);
  242. kfree(sem);
  243. }
  244. }
  245. static DECLARE_WORK(destroy_list_work, destroy_list_workfn);
  246. void percpu_rwsem_async_destroy(struct percpu_rw_semaphore_atomic *sem)
  247. {
  248. spin_lock(&destroy_list_lock);
  249. list_add_tail(&sem->destroy_list_entry, &destroy_list);
  250. spin_unlock(&destroy_list_lock);
  251. schedule_work(&destroy_list_work);
  252. }