refcount.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Out-of-line refcount functions.
  4. */
  5. #include <linux/mutex.h>
  6. #include <linux/refcount.h>
  7. #include <linux/spinlock.h>
  8. #include <linux/bug.h>
  9. #define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n")
  10. void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t)
  11. {
  12. refcount_set(r, REFCOUNT_SATURATED);
  13. switch (t) {
  14. case REFCOUNT_ADD_NOT_ZERO_OVF:
  15. REFCOUNT_WARN("saturated; leaking memory");
  16. break;
  17. case REFCOUNT_ADD_OVF:
  18. REFCOUNT_WARN("saturated; leaking memory");
  19. break;
  20. case REFCOUNT_ADD_UAF:
  21. REFCOUNT_WARN("addition on 0; use-after-free");
  22. break;
  23. case REFCOUNT_SUB_UAF:
  24. REFCOUNT_WARN("underflow; use-after-free");
  25. break;
  26. case REFCOUNT_DEC_LEAK:
  27. REFCOUNT_WARN("decrement hit 0; leaking memory");
  28. break;
  29. default:
  30. REFCOUNT_WARN("unknown saturation event!?");
  31. }
  32. }
  33. EXPORT_SYMBOL(refcount_warn_saturate);
  34. /**
  35. * refcount_dec_if_one - decrement a refcount if it is 1
  36. * @r: the refcount
  37. *
  38. * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
  39. * success thereof.
  40. *
  41. * Like all decrement operations, it provides release memory order and provides
  42. * a control dependency.
  43. *
  44. * It can be used like a try-delete operator; this explicit case is provided
  45. * and not cmpxchg in generic, because that would allow implementing unsafe
  46. * operations.
  47. *
  48. * Return: true if the resulting refcount is 0, false otherwise
  49. */
  50. bool refcount_dec_if_one(refcount_t *r)
  51. {
  52. int val = 1;
  53. return atomic_try_cmpxchg_release(&r->refs, &val, 0);
  54. }
  55. EXPORT_SYMBOL(refcount_dec_if_one);
  56. /**
  57. * refcount_dec_not_one - decrement a refcount if it is not 1
  58. * @r: the refcount
  59. *
  60. * No atomic_t counterpart, it decrements unless the value is 1, in which case
  61. * it will return false.
  62. *
  63. * Was often done like: atomic_add_unless(&var, -1, 1)
  64. *
  65. * Return: true if the decrement operation was successful, false otherwise
  66. */
  67. bool refcount_dec_not_one(refcount_t *r)
  68. {
  69. unsigned int new, val = atomic_read(&r->refs);
  70. do {
  71. if (unlikely(val == REFCOUNT_SATURATED))
  72. return true;
  73. if (val == 1)
  74. return false;
  75. new = val - 1;
  76. if (new > val) {
  77. WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
  78. return true;
  79. }
  80. } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
  81. return true;
  82. }
  83. EXPORT_SYMBOL(refcount_dec_not_one);
  84. /**
  85. * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
  86. * refcount to 0
  87. * @r: the refcount
  88. * @lock: the mutex to be locked
  89. *
  90. * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
  91. * to decrement when saturated at REFCOUNT_SATURATED.
  92. *
  93. * Provides release memory ordering, such that prior loads and stores are done
  94. * before, and provides a control dependency such that free() must come after.
  95. * See the comment on top.
  96. *
  97. * Return: true and hold mutex if able to decrement refcount to 0, false
  98. * otherwise
  99. */
  100. bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
  101. {
  102. if (refcount_dec_not_one(r))
  103. return false;
  104. mutex_lock(lock);
  105. if (!refcount_dec_and_test(r)) {
  106. mutex_unlock(lock);
  107. return false;
  108. }
  109. return true;
  110. }
  111. EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
  112. /**
  113. * refcount_dec_and_lock - return holding spinlock if able to decrement
  114. * refcount to 0
  115. * @r: the refcount
  116. * @lock: the spinlock to be locked
  117. *
  118. * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
  119. * decrement when saturated at REFCOUNT_SATURATED.
  120. *
  121. * Provides release memory ordering, such that prior loads and stores are done
  122. * before, and provides a control dependency such that free() must come after.
  123. * See the comment on top.
  124. *
  125. * Return: true and hold spinlock if able to decrement refcount to 0, false
  126. * otherwise
  127. */
  128. bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
  129. {
  130. if (refcount_dec_not_one(r))
  131. return false;
  132. spin_lock(lock);
  133. if (!refcount_dec_and_test(r)) {
  134. spin_unlock(lock);
  135. return false;
  136. }
  137. return true;
  138. }
  139. EXPORT_SYMBOL(refcount_dec_and_lock);
  140. /**
  141. * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
  142. * interrupts if able to decrement refcount to 0
  143. * @r: the refcount
  144. * @lock: the spinlock to be locked
  145. * @flags: saved IRQ-flags if the is acquired
  146. *
  147. * Same as refcount_dec_and_lock() above except that the spinlock is acquired
  148. * with disabled interupts.
  149. *
  150. * Return: true and hold spinlock if able to decrement refcount to 0, false
  151. * otherwise
  152. */
  153. bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
  154. unsigned long *flags)
  155. {
  156. if (refcount_dec_not_one(r))
  157. return false;
  158. spin_lock_irqsave(lock, *flags);
  159. if (!refcount_dec_and_test(r)) {
  160. spin_unlock_irqrestore(lock, *flags);
  161. return false;
  162. }
  163. return true;
  164. }
  165. EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);