refcount.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Variant of atomic_t specialized for reference counts.
  4. *
  5. * The interface matches the atomic_t interface (to aid in porting) but only
  6. * provides the few functions one should use for reference counting.
  7. *
  8. * Saturation semantics
  9. * ====================
  10. *
  11. * refcount_t differs from atomic_t in that the counter saturates at
  12. * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
  13. * counter and causing 'spurious' use-after-free issues. In order to avoid the
  14. * cost associated with introducing cmpxchg() loops into all of the saturating
  15. * operations, we temporarily allow the counter to take on an unchecked value
  16. * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
  17. * or overflow has occurred. Although this is racy when multiple threads
  18. * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
  19. * equidistant from 0 and INT_MAX we minimise the scope for error:
  20. *
  21. * INT_MAX REFCOUNT_SATURATED UINT_MAX
  22. * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff)
  23. * +--------------------------------+----------------+----------------+
  24. * <---------- bad value! ---------->
  25. *
  26. * (in a signed view of the world, the "bad value" range corresponds to
  27. * a negative counter value).
  28. *
  29. * As an example, consider a refcount_inc() operation that causes the counter
  30. * to overflow:
  31. *
  32. * int old = atomic_fetch_add_relaxed(r);
  33. * // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
  34. * if (old < 0)
  35. * atomic_set(r, REFCOUNT_SATURATED);
  36. *
  37. * If another thread also performs a refcount_inc() operation between the two
  38. * atomic operations, then the count will continue to edge closer to 0. If it
  39. * reaches a value of 1 before /any/ of the threads reset it to the saturated
  40. * value, then a concurrent refcount_dec_and_test() may erroneously free the
  41. * underlying object.
  42. * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently
  43. * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK).
  44. * With the current PID limit, if no batched refcounting operations are used and
  45. * the attacker can't repeatedly trigger kernel oopses in the middle of refcount
  46. * operations, this makes it impossible for a saturated refcount to leave the
  47. * saturation range, even if it is possible for multiple uses of the same
  48. * refcount to nest in the context of a single task:
  49. *
  50. * (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT =
  51. * 0x40000000 / 0x400000 = 0x100 = 256
  52. *
  53. * If hundreds of references are added/removed with a single refcounting
  54. * operation, it may potentially be possible to leave the saturation range; but
  55. * given the precise timing details involved with the round-robin scheduling of
  56. * each thread manipulating the refcount and the need to hit the race multiple
  57. * times in succession, there doesn't appear to be a practical avenue of attack
  58. * even if using refcount_add() operations with larger increments.
  59. *
  60. * Memory ordering
  61. * ===============
  62. *
  63. * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
  64. * and provide only what is strictly required for refcounts.
  65. *
  66. * The increments are fully relaxed; these will not provide ordering. The
  67. * rationale is that whatever is used to obtain the object we're increasing the
  68. * reference count on will provide the ordering. For locked data structures,
  69. * its the lock acquire, for RCU/lockless data structures its the dependent
  70. * load.
  71. *
  72. * Do note that inc_not_zero() provides a control dependency which will order
  73. * future stores against the inc, this ensures we'll never modify the object
  74. * if we did not in fact acquire a reference.
  75. *
  76. * The decrements will provide release order, such that all the prior loads and
  77. * stores will be issued before, it also provides a control dependency, which
  78. * will order us against the subsequent free().
  79. *
  80. * The control dependency is against the load of the cmpxchg (ll/sc) that
  81. * succeeded. This means the stores aren't fully ordered, but this is fine
  82. * because the 1->0 transition indicates no concurrency.
  83. *
  84. * Note that the allocator is responsible for ordering things between free()
  85. * and alloc().
  86. *
  87. * The decrements dec_and_test() and sub_and_test() also provide acquire
  88. * ordering on success.
  89. *
  90. */
  91. #ifndef _LINUX_REFCOUNT_H
  92. #define _LINUX_REFCOUNT_H
  93. #include <linux/atomic.h>
  94. #include <linux/bug.h>
  95. #include <linux/compiler.h>
  96. #include <linux/limits.h>
  97. #include <linux/spinlock_types.h>
  98. struct mutex;
  99. /**
  100. * struct refcount_t - variant of atomic_t specialized for reference counts
  101. * @refs: atomic_t counter field
  102. *
  103. * The counter saturates at REFCOUNT_SATURATED and will not move once
  104. * there. This avoids wrapping the counter and causing 'spurious'
  105. * use-after-free bugs.
  106. */
  107. typedef struct refcount_struct {
  108. atomic_t refs;
  109. } refcount_t;
  110. #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
  111. #define REFCOUNT_MAX INT_MAX
  112. #define REFCOUNT_SATURATED (INT_MIN / 2)
  113. enum refcount_saturation_type {
  114. REFCOUNT_ADD_NOT_ZERO_OVF,
  115. REFCOUNT_ADD_OVF,
  116. REFCOUNT_ADD_UAF,
  117. REFCOUNT_SUB_UAF,
  118. REFCOUNT_DEC_LEAK,
  119. };
  120. void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
  121. /**
  122. * refcount_set - set a refcount's value
  123. * @r: the refcount
  124. * @n: value to which the refcount will be set
  125. */
  126. static inline void refcount_set(refcount_t *r, int n)
  127. {
  128. atomic_set(&r->refs, n);
  129. }
  130. /**
  131. * refcount_read - get a refcount's value
  132. * @r: the refcount
  133. *
  134. * Return: the refcount's value
  135. */
  136. static inline unsigned int refcount_read(const refcount_t *r)
  137. {
  138. return atomic_read(&r->refs);
  139. }
  140. static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
  141. {
  142. int old = refcount_read(r);
  143. do {
  144. if (!old)
  145. break;
  146. } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
  147. if (oldp)
  148. *oldp = old;
  149. if (unlikely(old < 0 || old + i < 0))
  150. refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
  151. return old;
  152. }
  153. /**
  154. * refcount_add_not_zero - add a value to a refcount unless it is 0
  155. * @i: the value to add to the refcount
  156. * @r: the refcount
  157. *
  158. * Will saturate at REFCOUNT_SATURATED and WARN.
  159. *
  160. * Provides no memory ordering, it is assumed the caller has guaranteed the
  161. * object memory to be stable (RCU, etc.). It does provide a control dependency
  162. * and thereby orders future stores. See the comment on top.
  163. *
  164. * Use of this function is not recommended for the normal reference counting
  165. * use case in which references are taken and released one at a time. In these
  166. * cases, refcount_inc(), or one of its variants, should instead be used to
  167. * increment a reference count.
  168. *
  169. * Return: false if the passed refcount is 0, true otherwise
  170. */
  171. static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
  172. {
  173. return __refcount_add_not_zero(i, r, NULL);
  174. }
  175. static inline void __refcount_add(int i, refcount_t *r, int *oldp)
  176. {
  177. int old = atomic_fetch_add_relaxed(i, &r->refs);
  178. if (oldp)
  179. *oldp = old;
  180. if (unlikely(!old))
  181. refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
  182. else if (unlikely(old < 0 || old + i < 0))
  183. refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
  184. }
  185. /**
  186. * refcount_add - add a value to a refcount
  187. * @i: the value to add to the refcount
  188. * @r: the refcount
  189. *
  190. * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
  191. *
  192. * Provides no memory ordering, it is assumed the caller has guaranteed the
  193. * object memory to be stable (RCU, etc.). It does provide a control dependency
  194. * and thereby orders future stores. See the comment on top.
  195. *
  196. * Use of this function is not recommended for the normal reference counting
  197. * use case in which references are taken and released one at a time. In these
  198. * cases, refcount_inc(), or one of its variants, should instead be used to
  199. * increment a reference count.
  200. */
  201. static inline void refcount_add(int i, refcount_t *r)
  202. {
  203. __refcount_add(i, r, NULL);
  204. }
  205. static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
  206. {
  207. return __refcount_add_not_zero(1, r, oldp);
  208. }
  209. /**
  210. * refcount_inc_not_zero - increment a refcount unless it is 0
  211. * @r: the refcount to increment
  212. *
  213. * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
  214. * and WARN.
  215. *
  216. * Provides no memory ordering, it is assumed the caller has guaranteed the
  217. * object memory to be stable (RCU, etc.). It does provide a control dependency
  218. * and thereby orders future stores. See the comment on top.
  219. *
  220. * Return: true if the increment was successful, false otherwise
  221. */
  222. static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
  223. {
  224. return __refcount_inc_not_zero(r, NULL);
  225. }
  226. static inline void __refcount_inc(refcount_t *r, int *oldp)
  227. {
  228. __refcount_add(1, r, oldp);
  229. }
  230. /**
  231. * refcount_inc - increment a refcount
  232. * @r: the refcount to increment
  233. *
  234. * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
  235. *
  236. * Provides no memory ordering, it is assumed the caller already has a
  237. * reference on the object.
  238. *
  239. * Will WARN if the refcount is 0, as this represents a possible use-after-free
  240. * condition.
  241. */
  242. static inline void refcount_inc(refcount_t *r)
  243. {
  244. __refcount_inc(r, NULL);
  245. }
  246. static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
  247. {
  248. int old = atomic_fetch_sub_release(i, &r->refs);
  249. if (oldp)
  250. *oldp = old;
  251. if (old == i) {
  252. smp_acquire__after_ctrl_dep();
  253. return true;
  254. }
  255. if (unlikely(old < 0 || old - i < 0))
  256. refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
  257. return false;
  258. }
  259. /**
  260. * refcount_sub_and_test - subtract from a refcount and test if it is 0
  261. * @i: amount to subtract from the refcount
  262. * @r: the refcount
  263. *
  264. * Similar to atomic_dec_and_test(), but it will WARN, return false and
  265. * ultimately leak on underflow and will fail to decrement when saturated
  266. * at REFCOUNT_SATURATED.
  267. *
  268. * Provides release memory ordering, such that prior loads and stores are done
  269. * before, and provides an acquire ordering on success such that free()
  270. * must come after.
  271. *
  272. * Use of this function is not recommended for the normal reference counting
  273. * use case in which references are taken and released one at a time. In these
  274. * cases, refcount_dec(), or one of its variants, should instead be used to
  275. * decrement a reference count.
  276. *
  277. * Return: true if the resulting refcount is 0, false otherwise
  278. */
  279. static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
  280. {
  281. return __refcount_sub_and_test(i, r, NULL);
  282. }
  283. static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
  284. {
  285. return __refcount_sub_and_test(1, r, oldp);
  286. }
  287. /**
  288. * refcount_dec_and_test - decrement a refcount and test if it is 0
  289. * @r: the refcount
  290. *
  291. * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
  292. * decrement when saturated at REFCOUNT_SATURATED.
  293. *
  294. * Provides release memory ordering, such that prior loads and stores are done
  295. * before, and provides an acquire ordering on success such that free()
  296. * must come after.
  297. *
  298. * Return: true if the resulting refcount is 0, false otherwise
  299. */
  300. static inline __must_check bool refcount_dec_and_test(refcount_t *r)
  301. {
  302. return __refcount_dec_and_test(r, NULL);
  303. }
  304. static inline void __refcount_dec(refcount_t *r, int *oldp)
  305. {
  306. int old = atomic_fetch_sub_release(1, &r->refs);
  307. if (oldp)
  308. *oldp = old;
  309. if (unlikely(old <= 1))
  310. refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
  311. }
  312. /**
  313. * refcount_dec - decrement a refcount
  314. * @r: the refcount
  315. *
  316. * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
  317. * when saturated at REFCOUNT_SATURATED.
  318. *
  319. * Provides release memory ordering, such that prior loads and stores are done
  320. * before.
  321. */
  322. static inline void refcount_dec(refcount_t *r)
  323. {
  324. __refcount_dec(r, NULL);
  325. }
  326. extern __must_check bool refcount_dec_if_one(refcount_t *r);
  327. extern __must_check bool refcount_dec_not_one(refcount_t *r);
  328. extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
  329. extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
  330. extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
  331. spinlock_t *lock,
  332. unsigned long *flags);
  333. #endif /* _LINUX_REFCOUNT_H */