atomic32.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * atomic32.c: 32-bit atomic_t implementation
  4. *
  5. * Copyright (C) 2004 Keith M Wesolowski
  6. * Copyright (C) 2007 Kyle McMartin
  7. *
  8. * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
  9. */
  10. #include <linux/atomic.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/module.h>
  13. #ifdef CONFIG_SMP
  14. #define ATOMIC_HASH_SIZE 4
  15. #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
  16. spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
  17. [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
  18. };
  19. #else /* SMP */
  20. static DEFINE_SPINLOCK(dummy);
  21. #define ATOMIC_HASH_SIZE 1
  22. #define ATOMIC_HASH(a) (&dummy)
  23. #endif /* SMP */
  24. #define ATOMIC_FETCH_OP(op, c_op) \
  25. int atomic_fetch_##op(int i, atomic_t *v) \
  26. { \
  27. int ret; \
  28. unsigned long flags; \
  29. spin_lock_irqsave(ATOMIC_HASH(v), flags); \
  30. \
  31. ret = v->counter; \
  32. v->counter c_op i; \
  33. \
  34. spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
  35. return ret; \
  36. } \
  37. EXPORT_SYMBOL(atomic_fetch_##op);
  38. #define ATOMIC_OP_RETURN(op, c_op) \
  39. int atomic_##op##_return(int i, atomic_t *v) \
  40. { \
  41. int ret; \
  42. unsigned long flags; \
  43. spin_lock_irqsave(ATOMIC_HASH(v), flags); \
  44. \
  45. ret = (v->counter c_op i); \
  46. \
  47. spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
  48. return ret; \
  49. } \
  50. EXPORT_SYMBOL(atomic_##op##_return);
  51. ATOMIC_OP_RETURN(add, +=)
  52. ATOMIC_FETCH_OP(add, +=)
  53. ATOMIC_FETCH_OP(and, &=)
  54. ATOMIC_FETCH_OP(or, |=)
  55. ATOMIC_FETCH_OP(xor, ^=)
  56. #undef ATOMIC_FETCH_OP
  57. #undef ATOMIC_OP_RETURN
  58. int atomic_xchg(atomic_t *v, int new)
  59. {
  60. int ret;
  61. unsigned long flags;
  62. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  63. ret = v->counter;
  64. v->counter = new;
  65. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  66. return ret;
  67. }
  68. EXPORT_SYMBOL(atomic_xchg);
  69. int atomic_cmpxchg(atomic_t *v, int old, int new)
  70. {
  71. int ret;
  72. unsigned long flags;
  73. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  74. ret = v->counter;
  75. if (likely(ret == old))
  76. v->counter = new;
  77. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  78. return ret;
  79. }
  80. EXPORT_SYMBOL(atomic_cmpxchg);
  81. int atomic_fetch_add_unless(atomic_t *v, int a, int u)
  82. {
  83. int ret;
  84. unsigned long flags;
  85. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  86. ret = v->counter;
  87. if (ret != u)
  88. v->counter += a;
  89. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  90. return ret;
  91. }
  92. EXPORT_SYMBOL(atomic_fetch_add_unless);
  93. /* Atomic operations are already serializing */
  94. void atomic_set(atomic_t *v, int i)
  95. {
  96. unsigned long flags;
  97. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  98. v->counter = i;
  99. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  100. }
  101. EXPORT_SYMBOL(atomic_set);
  102. unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
  103. {
  104. unsigned long old, flags;
  105. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  106. old = *addr;
  107. *addr = old | mask;
  108. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  109. return old & mask;
  110. }
  111. EXPORT_SYMBOL(___set_bit);
  112. unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
  113. {
  114. unsigned long old, flags;
  115. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  116. old = *addr;
  117. *addr = old & ~mask;
  118. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  119. return old & mask;
  120. }
  121. EXPORT_SYMBOL(___clear_bit);
  122. unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
  123. {
  124. unsigned long old, flags;
  125. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  126. old = *addr;
  127. *addr = old ^ mask;
  128. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  129. return old & mask;
  130. }
  131. EXPORT_SYMBOL(___change_bit);
  132. unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
  133. {
  134. unsigned long flags;
  135. u32 prev;
  136. spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
  137. if ((prev = *ptr) == old)
  138. *ptr = new;
  139. spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
  140. return (unsigned long)prev;
  141. }
  142. EXPORT_SYMBOL(__cmpxchg_u32);
  143. u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
  144. {
  145. unsigned long flags;
  146. u64 prev;
  147. spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
  148. if ((prev = *ptr) == old)
  149. *ptr = new;
  150. spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
  151. return prev;
  152. }
  153. EXPORT_SYMBOL(__cmpxchg_u64);
  154. unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
  155. {
  156. unsigned long flags;
  157. u32 prev;
  158. spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
  159. prev = *ptr;
  160. *ptr = new;
  161. spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
  162. return (unsigned long)prev;
  163. }
  164. EXPORT_SYMBOL(__xchg_u32);