lockref.h 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LINUX_LOCKREF_H
  3. #define __LINUX_LOCKREF_H
  4. /*
  5. * Locked reference counts.
  6. *
  7. * These are different from just plain atomic refcounts in that they
  8. * are atomic with respect to the spinlock that goes with them. In
  9. * particular, there can be implementations that don't actually get
  10. * the spinlock for the common decrement/increment operations, but they
  11. * still have to check that the operation is done semantically as if
  12. * the spinlock had been taken (using a cmpxchg operation that covers
  13. * both the lock and the count word, or using memory transactions, for
  14. * example).
  15. */
  16. #include <linux/spinlock.h>
  17. #include <generated/bounds.h>
  18. #define USE_CMPXCHG_LOCKREF \
  19. (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
  20. IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
  21. struct lockref {
  22. union {
  23. #if USE_CMPXCHG_LOCKREF
  24. aligned_u64 lock_count;
  25. #endif
  26. struct {
  27. spinlock_t lock;
  28. int count;
  29. };
  30. };
  31. };
  32. extern void lockref_get(struct lockref *);
  33. extern int lockref_put_return(struct lockref *);
  34. extern int lockref_get_not_zero(struct lockref *);
  35. extern int lockref_put_not_zero(struct lockref *);
  36. extern int lockref_get_or_lock(struct lockref *);
  37. extern int lockref_put_or_lock(struct lockref *);
  38. extern void lockref_mark_dead(struct lockref *);
  39. extern int lockref_get_not_dead(struct lockref *);
  40. /* Must be called under spinlock for reliable results */
  41. static inline bool __lockref_is_dead(const struct lockref *l)
  42. {
  43. return ((int)l->count < 0);
  44. }
  45. #endif /* __LINUX_LOCKREF_H */