riscv_atomic.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/sbi_types.h>
  10. #include <sbi/riscv_asm.h>
  11. #include <sbi/riscv_atomic.h>
  12. #include <sbi/riscv_barrier.h>
  13. #include <sbi/sbi_bits.h>
  14. long atomic_read(atomic_t *atom)
  15. {
  16. long ret = atom->counter;
  17. rmb();
  18. return ret;
  19. }
  20. void atomic_write(atomic_t *atom, long value)
  21. {
  22. atom->counter = value;
  23. wmb();
  24. }
  25. long atomic_add_return(atomic_t *atom, long value)
  26. {
  27. long ret;
  28. __asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
  29. : "+A"(atom->counter), "=r"(ret)
  30. : "r"(value)
  31. : "memory");
  32. return ret + value;
  33. }
  34. long atomic_sub_return(atomic_t *atom, long value)
  35. {
  36. long ret;
  37. __asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
  38. : "+A"(atom->counter), "=r"(ret)
  39. : "r"(-value)
  40. : "memory");
  41. return ret - value;
  42. }
  43. #define __xchg(ptr, new, size) \
  44. ({ \
  45. __typeof__(ptr) __ptr = (ptr); \
  46. __typeof__(*(ptr)) __new = (new); \
  47. __typeof__(*(ptr)) __ret; \
  48. register unsigned int __rc; \
  49. switch (size) { \
  50. case 4: \
  51. __asm__ __volatile__("0: lr.w %0, %2\n" \
  52. " sc.w.rl %1, %z3, %2\n" \
  53. " bnez %1, 0b\n" \
  54. " fence rw, rw\n" \
  55. : "=&r"(__ret), "=&r"(__rc), \
  56. "+A"(*__ptr) \
  57. : "rJ"(__new) \
  58. : "memory"); \
  59. break; \
  60. case 8: \
  61. __asm__ __volatile__("0: lr.d %0, %2\n" \
  62. " sc.d.rl %1, %z3, %2\n" \
  63. " bnez %1, 0b\n" \
  64. " fence rw, rw\n" \
  65. : "=&r"(__ret), "=&r"(__rc), \
  66. "+A"(*__ptr) \
  67. : "rJ"(__new) \
  68. : "memory"); \
  69. break; \
  70. default: \
  71. break; \
  72. } \
  73. __ret; \
  74. })
  75. #define xchg(ptr, n) \
  76. ({ \
  77. __typeof__(*(ptr)) _n_ = (n); \
  78. (__typeof__(*(ptr))) __xchg((ptr), _n_, sizeof(*(ptr))); \
  79. })
  80. #define __cmpxchg(ptr, old, new, size) \
  81. ({ \
  82. __typeof__(ptr) __ptr = (ptr); \
  83. __typeof__(*(ptr)) __old = (old); \
  84. __typeof__(*(ptr)) __new = (new); \
  85. __typeof__(*(ptr)) __ret; \
  86. register unsigned int __rc; \
  87. switch (size) { \
  88. case 4: \
  89. __asm__ __volatile__("0: lr.w %0, %2\n" \
  90. " bne %0, %z3, 1f\n" \
  91. " sc.w.rl %1, %z4, %2\n" \
  92. " bnez %1, 0b\n" \
  93. " fence rw, rw\n" \
  94. "1:\n" \
  95. : "=&r"(__ret), "=&r"(__rc), \
  96. "+A"(*__ptr) \
  97. : "rJ"(__old), "rJ"(__new) \
  98. : "memory"); \
  99. break; \
  100. case 8: \
  101. __asm__ __volatile__("0: lr.d %0, %2\n" \
  102. " bne %0, %z3, 1f\n" \
  103. " sc.d.rl %1, %z4, %2\n" \
  104. " bnez %1, 0b\n" \
  105. " fence rw, rw\n" \
  106. "1:\n" \
  107. : "=&r"(__ret), "=&r"(__rc), \
  108. "+A"(*__ptr) \
  109. : "rJ"(__old), "rJ"(__new) \
  110. : "memory"); \
  111. break; \
  112. default: \
  113. break; \
  114. } \
  115. __ret; \
  116. })
  117. #define cmpxchg(ptr, o, n) \
  118. ({ \
  119. __typeof__(*(ptr)) _o_ = (o); \
  120. __typeof__(*(ptr)) _n_ = (n); \
  121. (__typeof__(*(ptr))) \
  122. __cmpxchg((ptr), _o_, _n_, sizeof(*(ptr))); \
  123. })
  124. long arch_atomic_cmpxchg(atomic_t *atom, long oldval, long newval)
  125. {
  126. #ifdef __riscv_atomic
  127. return __sync_val_compare_and_swap(&atom->counter, oldval, newval);
  128. #else
  129. return cmpxchg(&atom->counter, oldval, newval);
  130. #endif
  131. }
  132. long arch_atomic_xchg(atomic_t *atom, long newval)
  133. {
  134. /* Atomically set new value and return old value. */
  135. #ifdef __riscv_atomic
  136. /*
  137. * The name of GCC built-in macro __sync_lock_test_and_set()
  138. * is misleading. A more appropriate name for GCC built-in
  139. * macro would be __sync_val_exchange().
  140. */
  141. return __sync_lock_test_and_set(&atom->counter, newval);
  142. #else
  143. return xchg(&atom->counter, newval);
  144. #endif
  145. }
  146. unsigned int atomic_raw_xchg_uint(volatile unsigned int *ptr,
  147. unsigned int newval)
  148. {
  149. /* Atomically set new value and return old value. */
  150. #ifdef __riscv_atomic
  151. /*
  152. * The name of GCC built-in macro __sync_lock_test_and_set()
  153. * is misleading. A more appropriate name for GCC built-in
  154. * macro would be __sync_val_exchange().
  155. */
  156. return __sync_lock_test_and_set(ptr, newval);
  157. #else
  158. return xchg(ptr, newval);
  159. #endif
  160. }
  161. #if (BITS_PER_LONG == 64)
  162. #define __AMO(op) "amo" #op ".d"
  163. #elif (BITS_PER_LONG == 32)
  164. #define __AMO(op) "amo" #op ".w"
  165. #else
  166. #error "Unexpected BITS_PER_LONG"
  167. #endif
  168. #define __atomic_op_bit_ord(op, mod, nr, addr, ord) \
  169. ({ \
  170. unsigned long __res, __mask; \
  171. __mask = BIT_MASK(nr); \
  172. __asm__ __volatile__(__AMO(op) #ord " %0, %2, %1" \
  173. : "=r"(__res), "+A"(addr[BIT_WORD(nr)]) \
  174. : "r"(mod(__mask)) \
  175. : "memory"); \
  176. __res; \
  177. })
  178. #define __atomic_op_bit(op, mod, nr, addr) \
  179. __atomic_op_bit_ord(op, mod, nr, addr, .aqrl)
  180. /* Bitmask modifiers */
  181. #define __NOP(x) (x)
  182. #define __NOT(x) (~(x))
  183. inline int atomic_raw_set_bit(int nr, volatile unsigned long *addr)
  184. {
  185. return __atomic_op_bit(or, __NOP, nr, addr);
  186. }
  187. inline int atomic_raw_clear_bit(int nr, volatile unsigned long *addr)
  188. {
  189. return __atomic_op_bit(and, __NOT, nr, addr);
  190. }
  191. inline int atomic_set_bit(int nr, atomic_t *atom)
  192. {
  193. return atomic_raw_set_bit(nr, (unsigned long *)&atom->counter);
  194. }
  195. inline int atomic_clear_bit(int nr, atomic_t *atom)
  196. {
  197. return atomic_raw_clear_bit(nr, (unsigned long *)&atom->counter);
  198. }