atomic64.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Generic implementation of 64-bit atomics using spinlocks,
  4. * useful on processors that don't have 64-bit atomic instructions.
  5. *
  6. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  7. */
  8. #include <linux/types.h>
  9. #include <linux/cache.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/init.h>
  12. #include <linux/export.h>
  13. #include <linux/atomic.h>
  14. /*
  15. * We use a hashed array of spinlocks to provide exclusive access
  16. * to each atomic64_t variable. Since this is expected to used on
  17. * systems with small numbers of CPUs (<= 4 or so), we use a
  18. * relatively small array of 16 spinlocks to avoid wasting too much
  19. * memory on the spinlock array.
  20. */
  21. #define NR_LOCKS 16
  22. /*
  23. * Ensure each lock is in a separate cacheline.
  24. */
  25. static union {
  26. raw_spinlock_t lock;
  27. char pad[L1_CACHE_BYTES];
  28. } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
  29. [0 ... (NR_LOCKS - 1)] = {
  30. .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
  31. },
  32. };
  33. static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
  34. {
  35. unsigned long addr = (unsigned long) v;
  36. addr >>= L1_CACHE_SHIFT;
  37. addr ^= (addr >> 8) ^ (addr >> 16);
  38. return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
  39. }
  40. s64 atomic64_read(const atomic64_t *v)
  41. {
  42. unsigned long flags;
  43. raw_spinlock_t *lock = lock_addr(v);
  44. s64 val;
  45. raw_spin_lock_irqsave(lock, flags);
  46. val = v->counter;
  47. raw_spin_unlock_irqrestore(lock, flags);
  48. return val;
  49. }
  50. EXPORT_SYMBOL(atomic64_read);
  51. void atomic64_set(atomic64_t *v, s64 i)
  52. {
  53. unsigned long flags;
  54. raw_spinlock_t *lock = lock_addr(v);
  55. raw_spin_lock_irqsave(lock, flags);
  56. v->counter = i;
  57. raw_spin_unlock_irqrestore(lock, flags);
  58. }
  59. EXPORT_SYMBOL(atomic64_set);
  60. #define ATOMIC64_OP(op, c_op) \
  61. void atomic64_##op(s64 a, atomic64_t *v) \
  62. { \
  63. unsigned long flags; \
  64. raw_spinlock_t *lock = lock_addr(v); \
  65. \
  66. raw_spin_lock_irqsave(lock, flags); \
  67. v->counter c_op a; \
  68. raw_spin_unlock_irqrestore(lock, flags); \
  69. } \
  70. EXPORT_SYMBOL(atomic64_##op);
  71. #define ATOMIC64_OP_RETURN(op, c_op) \
  72. s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
  73. { \
  74. unsigned long flags; \
  75. raw_spinlock_t *lock = lock_addr(v); \
  76. s64 val; \
  77. \
  78. raw_spin_lock_irqsave(lock, flags); \
  79. val = (v->counter c_op a); \
  80. raw_spin_unlock_irqrestore(lock, flags); \
  81. return val; \
  82. } \
  83. EXPORT_SYMBOL(atomic64_##op##_return);
  84. #define ATOMIC64_FETCH_OP(op, c_op) \
  85. s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
  86. { \
  87. unsigned long flags; \
  88. raw_spinlock_t *lock = lock_addr(v); \
  89. s64 val; \
  90. \
  91. raw_spin_lock_irqsave(lock, flags); \
  92. val = v->counter; \
  93. v->counter c_op a; \
  94. raw_spin_unlock_irqrestore(lock, flags); \
  95. return val; \
  96. } \
  97. EXPORT_SYMBOL(atomic64_fetch_##op);
  98. #define ATOMIC64_OPS(op, c_op) \
  99. ATOMIC64_OP(op, c_op) \
  100. ATOMIC64_OP_RETURN(op, c_op) \
  101. ATOMIC64_FETCH_OP(op, c_op)
  102. ATOMIC64_OPS(add, +=)
  103. ATOMIC64_OPS(sub, -=)
  104. #undef ATOMIC64_OPS
  105. #define ATOMIC64_OPS(op, c_op) \
  106. ATOMIC64_OP(op, c_op) \
  107. ATOMIC64_OP_RETURN(op, c_op) \
  108. ATOMIC64_FETCH_OP(op, c_op)
  109. ATOMIC64_OPS(and, &=)
  110. ATOMIC64_OPS(or, |=)
  111. ATOMIC64_OPS(xor, ^=)
  112. #undef ATOMIC64_OPS
  113. #undef ATOMIC64_FETCH_OP
  114. #undef ATOMIC64_OP_RETURN
  115. #undef ATOMIC64_OP
  116. s64 atomic64_dec_if_positive(atomic64_t *v)
  117. {
  118. unsigned long flags;
  119. raw_spinlock_t *lock = lock_addr(v);
  120. s64 val;
  121. raw_spin_lock_irqsave(lock, flags);
  122. val = v->counter - 1;
  123. if (val >= 0)
  124. v->counter = val;
  125. raw_spin_unlock_irqrestore(lock, flags);
  126. return val;
  127. }
  128. EXPORT_SYMBOL(atomic64_dec_if_positive);
  129. s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
  130. {
  131. unsigned long flags;
  132. raw_spinlock_t *lock = lock_addr(v);
  133. s64 val;
  134. raw_spin_lock_irqsave(lock, flags);
  135. val = v->counter;
  136. if (val == o)
  137. v->counter = n;
  138. raw_spin_unlock_irqrestore(lock, flags);
  139. return val;
  140. }
  141. EXPORT_SYMBOL(atomic64_cmpxchg);
  142. s64 atomic64_xchg(atomic64_t *v, s64 new)
  143. {
  144. unsigned long flags;
  145. raw_spinlock_t *lock = lock_addr(v);
  146. s64 val;
  147. raw_spin_lock_irqsave(lock, flags);
  148. val = v->counter;
  149. v->counter = new;
  150. raw_spin_unlock_irqrestore(lock, flags);
  151. return val;
  152. }
  153. EXPORT_SYMBOL(atomic64_xchg);
  154. s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
  155. {
  156. unsigned long flags;
  157. raw_spinlock_t *lock = lock_addr(v);
  158. s64 val;
  159. raw_spin_lock_irqsave(lock, flags);
  160. val = v->counter;
  161. if (val != u)
  162. v->counter += a;
  163. raw_spin_unlock_irqrestore(lock, flags);
  164. return val;
  165. }
  166. EXPORT_SYMBOL(atomic64_fetch_add_unless);