percpu-refcount.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #define pr_fmt(fmt) "%s: " fmt, __func__
  3. #include <linux/kernel.h>
  4. #include <linux/sched.h>
  5. #include <linux/wait.h>
  6. #include <linux/slab.h>
  7. #include <linux/percpu-refcount.h>
  8. /*
  9. * Initially, a percpu refcount is just a set of percpu counters. Initially, we
  10. * don't try to detect the ref hitting 0 - which means that get/put can just
  11. * increment or decrement the local counter. Note that the counter on a
  12. * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
  13. * percpu counters will all sum to the correct value
  14. *
  15. * (More precisely: because modular arithmetic is commutative the sum of all the
  16. * percpu_count vars will be equal to what it would have been if all the gets
  17. * and puts were done to a single integer, even if some of the percpu integers
  18. * overflow or underflow).
  19. *
  20. * The real trick to implementing percpu refcounts is shutdown. We can't detect
  21. * the ref hitting 0 on every put - this would require global synchronization
  22. * and defeat the whole purpose of using percpu refs.
  23. *
  24. * What we do is require the user to keep track of the initial refcount; we know
  25. * the ref can't hit 0 before the user drops the initial ref, so as long as we
  26. * convert to non percpu mode before the initial ref is dropped everything
  27. * works.
  28. *
  29. * Converting to non percpu mode is done with some RCUish stuff in
  30. * percpu_ref_kill. Additionally, we need a bias value so that the
  31. * atomic_long_t can't hit 0 before we've added up all the percpu refs.
  32. */
  33. #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
  34. static DEFINE_SPINLOCK(percpu_ref_switch_lock);
  35. static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
  36. static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
  37. {
  38. return (unsigned long __percpu *)
  39. (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
  40. }
  41. /**
  42. * percpu_ref_init - initialize a percpu refcount
  43. * @ref: percpu_ref to initialize
  44. * @release: function which will be called when refcount hits 0
  45. * @flags: PERCPU_REF_INIT_* flags
  46. * @gfp: allocation mask to use
  47. *
  48. * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
  49. * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
  50. * change the start state to atomic with the latter setting the initial refcount
  51. * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
  52. *
  53. * Note that @release must not sleep - it may potentially be called from RCU
  54. * callback context by percpu_ref_kill().
  55. */
  56. int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
  57. unsigned int flags, gfp_t gfp)
  58. {
  59. size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
  60. __alignof__(unsigned long));
  61. unsigned long start_count = 0;
  62. struct percpu_ref_data *data;
  63. ref->percpu_count_ptr = (unsigned long)
  64. __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
  65. if (!ref->percpu_count_ptr)
  66. return -ENOMEM;
  67. data = kzalloc(sizeof(*ref->data), gfp);
  68. if (!data) {
  69. free_percpu((void __percpu *)ref->percpu_count_ptr);
  70. return -ENOMEM;
  71. }
  72. data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
  73. data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
  74. if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
  75. ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
  76. data->allow_reinit = true;
  77. } else {
  78. start_count += PERCPU_COUNT_BIAS;
  79. }
  80. if (flags & PERCPU_REF_INIT_DEAD)
  81. ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
  82. else
  83. start_count++;
  84. atomic_long_set(&data->count, start_count);
  85. data->release = release;
  86. data->confirm_switch = NULL;
  87. data->ref = ref;
  88. ref->data = data;
  89. return 0;
  90. }
  91. EXPORT_SYMBOL_GPL(percpu_ref_init);
  92. static void __percpu_ref_exit(struct percpu_ref *ref)
  93. {
  94. unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
  95. if (percpu_count) {
  96. /* non-NULL confirm_switch indicates switching in progress */
  97. WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
  98. free_percpu(percpu_count);
  99. ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
  100. }
  101. }
  102. /**
  103. * percpu_ref_exit - undo percpu_ref_init()
  104. * @ref: percpu_ref to exit
  105. *
  106. * This function exits @ref. The caller is responsible for ensuring that
  107. * @ref is no longer in active use. The usual places to invoke this
  108. * function from are the @ref->release() callback or in init failure path
  109. * where percpu_ref_init() succeeded but other parts of the initialization
  110. * of the embedding object failed.
  111. */
  112. void percpu_ref_exit(struct percpu_ref *ref)
  113. {
  114. struct percpu_ref_data *data = ref->data;
  115. unsigned long flags;
  116. __percpu_ref_exit(ref);
  117. if (!data)
  118. return;
  119. spin_lock_irqsave(&percpu_ref_switch_lock, flags);
  120. ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
  121. __PERCPU_REF_FLAG_BITS;
  122. ref->data = NULL;
  123. spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
  124. kfree(data);
  125. }
  126. EXPORT_SYMBOL_GPL(percpu_ref_exit);
  127. static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
  128. {
  129. struct percpu_ref_data *data = container_of(rcu,
  130. struct percpu_ref_data, rcu);
  131. struct percpu_ref *ref = data->ref;
  132. data->confirm_switch(ref);
  133. data->confirm_switch = NULL;
  134. wake_up_all(&percpu_ref_switch_waitq);
  135. if (!data->allow_reinit)
  136. __percpu_ref_exit(ref);
  137. /* drop ref from percpu_ref_switch_to_atomic() */
  138. percpu_ref_put(ref);
  139. }
  140. static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
  141. {
  142. struct percpu_ref_data *data = container_of(rcu,
  143. struct percpu_ref_data, rcu);
  144. struct percpu_ref *ref = data->ref;
  145. unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
  146. unsigned long count = 0;
  147. int cpu;
  148. for_each_possible_cpu(cpu)
  149. count += *per_cpu_ptr(percpu_count, cpu);
  150. pr_debug("global %lu percpu %lu\n",
  151. atomic_long_read(&data->count), count);
  152. /*
  153. * It's crucial that we sum the percpu counters _before_ adding the sum
  154. * to &ref->count; since gets could be happening on one cpu while puts
  155. * happen on another, adding a single cpu's count could cause
  156. * @ref->count to hit 0 before we've got a consistent value - but the
  157. * sum of all the counts will be consistent and correct.
  158. *
  159. * Subtracting the bias value then has to happen _after_ adding count to
  160. * &ref->count; we need the bias value to prevent &ref->count from
  161. * reaching 0 before we add the percpu counts. But doing it at the same
  162. * time is equivalent and saves us atomic operations:
  163. */
  164. atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
  165. WARN_ONCE(atomic_long_read(&data->count) <= 0,
  166. "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
  167. data->release, atomic_long_read(&data->count));
  168. /* @ref is viewed as dead on all CPUs, send out switch confirmation */
  169. percpu_ref_call_confirm_rcu(rcu);
  170. }
  171. static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
  172. {
  173. }
  174. static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
  175. percpu_ref_func_t *confirm_switch)
  176. {
  177. if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
  178. if (confirm_switch)
  179. confirm_switch(ref);
  180. return;
  181. }
  182. /* switching from percpu to atomic */
  183. ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
  184. /*
  185. * Non-NULL ->confirm_switch is used to indicate that switching is
  186. * in progress. Use noop one if unspecified.
  187. */
  188. ref->data->confirm_switch = confirm_switch ?:
  189. percpu_ref_noop_confirm_switch;
  190. percpu_ref_get(ref); /* put after confirmation */
  191. call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu);
  192. }
  193. static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
  194. {
  195. unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
  196. int cpu;
  197. BUG_ON(!percpu_count);
  198. if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
  199. return;
  200. if (WARN_ON_ONCE(!ref->data->allow_reinit))
  201. return;
  202. atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
  203. /*
  204. * Restore per-cpu operation. smp_store_release() is paired
  205. * with READ_ONCE() in __ref_is_percpu() and guarantees that the
  206. * zeroing is visible to all percpu accesses which can see the
  207. * following __PERCPU_REF_ATOMIC clearing.
  208. */
  209. for_each_possible_cpu(cpu)
  210. *per_cpu_ptr(percpu_count, cpu) = 0;
  211. smp_store_release(&ref->percpu_count_ptr,
  212. ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
  213. }
  214. static void __percpu_ref_switch_mode(struct percpu_ref *ref,
  215. percpu_ref_func_t *confirm_switch)
  216. {
  217. struct percpu_ref_data *data = ref->data;
  218. lockdep_assert_held(&percpu_ref_switch_lock);
  219. /*
  220. * If the previous ATOMIC switching hasn't finished yet, wait for
  221. * its completion. If the caller ensures that ATOMIC switching
  222. * isn't in progress, this function can be called from any context.
  223. */
  224. wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
  225. percpu_ref_switch_lock);
  226. if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
  227. __percpu_ref_switch_to_atomic(ref, confirm_switch);
  228. else
  229. __percpu_ref_switch_to_percpu(ref);
  230. }
  231. /**
  232. * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
  233. * @ref: percpu_ref to switch to atomic mode
  234. * @confirm_switch: optional confirmation callback
  235. *
  236. * There's no reason to use this function for the usual reference counting.
  237. * Use percpu_ref_kill[_and_confirm]().
  238. *
  239. * Schedule switching of @ref to atomic mode. All its percpu counts will
  240. * be collected to the main atomic counter. On completion, when all CPUs
  241. * are guaraneed to be in atomic mode, @confirm_switch, which may not
  242. * block, is invoked. This function may be invoked concurrently with all
  243. * the get/put operations and can safely be mixed with kill and reinit
  244. * operations. Note that @ref will stay in atomic mode across kill/reinit
  245. * cycles until percpu_ref_switch_to_percpu() is called.
  246. *
  247. * This function may block if @ref is in the process of switching to atomic
  248. * mode. If the caller ensures that @ref is not in the process of
  249. * switching to atomic mode, this function can be called from any context.
  250. */
  251. void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
  252. percpu_ref_func_t *confirm_switch)
  253. {
  254. unsigned long flags;
  255. spin_lock_irqsave(&percpu_ref_switch_lock, flags);
  256. ref->data->force_atomic = true;
  257. __percpu_ref_switch_mode(ref, confirm_switch);
  258. spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
  259. }
  260. EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
  261. /**
  262. * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
  263. * @ref: percpu_ref to switch to atomic mode
  264. *
  265. * Schedule switching the ref to atomic mode, and wait for the
  266. * switch to complete. Caller must ensure that no other thread
  267. * will switch back to percpu mode.
  268. */
  269. void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
  270. {
  271. percpu_ref_switch_to_atomic(ref, NULL);
  272. wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
  273. }
  274. EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
  275. /**
  276. * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
  277. * @ref: percpu_ref to switch to percpu mode
  278. *
  279. * There's no reason to use this function for the usual reference counting.
  280. * To re-use an expired ref, use percpu_ref_reinit().
  281. *
  282. * Switch @ref to percpu mode. This function may be invoked concurrently
  283. * with all the get/put operations and can safely be mixed with kill and
  284. * reinit operations. This function reverses the sticky atomic state set
  285. * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
  286. * dying or dead, the actual switching takes place on the following
  287. * percpu_ref_reinit().
  288. *
  289. * This function may block if @ref is in the process of switching to atomic
  290. * mode. If the caller ensures that @ref is not in the process of
  291. * switching to atomic mode, this function can be called from any context.
  292. */
  293. void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
  294. {
  295. unsigned long flags;
  296. spin_lock_irqsave(&percpu_ref_switch_lock, flags);
  297. ref->data->force_atomic = false;
  298. __percpu_ref_switch_mode(ref, NULL);
  299. spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
  300. }
  301. EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
  302. /**
  303. * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
  304. * @ref: percpu_ref to kill
  305. * @confirm_kill: optional confirmation callback
  306. *
  307. * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
  308. * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
  309. * called after @ref is seen as dead from all CPUs at which point all
  310. * further invocations of percpu_ref_tryget_live() will fail. See
  311. * percpu_ref_tryget_live() for details.
  312. *
  313. * This function normally doesn't block and can be called from any context
  314. * but it may block if @confirm_kill is specified and @ref is in the
  315. * process of switching to atomic mode by percpu_ref_switch_to_atomic().
  316. *
  317. * There are no implied RCU grace periods between kill and release.
  318. */
  319. void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
  320. percpu_ref_func_t *confirm_kill)
  321. {
  322. unsigned long flags;
  323. spin_lock_irqsave(&percpu_ref_switch_lock, flags);
  324. WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
  325. "%s called more than once on %ps!", __func__,
  326. ref->data->release);
  327. ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
  328. __percpu_ref_switch_mode(ref, confirm_kill);
  329. percpu_ref_put(ref);
  330. spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
  331. }
  332. EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
  333. /**
  334. * percpu_ref_is_zero - test whether a percpu refcount reached zero
  335. * @ref: percpu_ref to test
  336. *
  337. * Returns %true if @ref reached zero.
  338. *
  339. * This function is safe to call as long as @ref is between init and exit.
  340. */
  341. bool percpu_ref_is_zero(struct percpu_ref *ref)
  342. {
  343. unsigned long __percpu *percpu_count;
  344. unsigned long count, flags;
  345. if (__ref_is_percpu(ref, &percpu_count))
  346. return false;
  347. /* protect us from being destroyed */
  348. spin_lock_irqsave(&percpu_ref_switch_lock, flags);
  349. if (ref->data)
  350. count = atomic_long_read(&ref->data->count);
  351. else
  352. count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
  353. spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
  354. return count == 0;
  355. }
  356. EXPORT_SYMBOL_GPL(percpu_ref_is_zero);
  357. /**
  358. * percpu_ref_reinit - re-initialize a percpu refcount
  359. * @ref: perpcu_ref to re-initialize
  360. *
  361. * Re-initialize @ref so that it's in the same state as when it finished
  362. * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
  363. * initialized successfully and reached 0 but not exited.
  364. *
  365. * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
  366. * this function is in progress.
  367. */
  368. void percpu_ref_reinit(struct percpu_ref *ref)
  369. {
  370. WARN_ON_ONCE(!percpu_ref_is_zero(ref));
  371. percpu_ref_resurrect(ref);
  372. }
  373. EXPORT_SYMBOL_GPL(percpu_ref_reinit);
  374. /**
  375. * percpu_ref_resurrect - modify a percpu refcount from dead to live
  376. * @ref: perpcu_ref to resurrect
  377. *
  378. * Modify @ref so that it's in the same state as before percpu_ref_kill() was
  379. * called. @ref must be dead but must not yet have exited.
  380. *
  381. * If @ref->release() frees @ref then the caller is responsible for
  382. * guaranteeing that @ref->release() does not get called while this
  383. * function is in progress.
  384. *
  385. * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
  386. * this function is in progress.
  387. */
  388. void percpu_ref_resurrect(struct percpu_ref *ref)
  389. {
  390. unsigned long __percpu *percpu_count;
  391. unsigned long flags;
  392. spin_lock_irqsave(&percpu_ref_switch_lock, flags);
  393. WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
  394. WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
  395. ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
  396. percpu_ref_get(ref);
  397. __percpu_ref_switch_mode(ref, NULL);
  398. spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
  399. }
  400. EXPORT_SYMBOL_GPL(percpu_ref_resurrect);