riscv_atomic.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /*
  2. * SPDX-License-Identifier: BSD-2-Clause
  3. *
  4. * Copyright (c) 2019 Western Digital Corporation or its affiliates.
  5. *
  6. * Authors:
  7. * Anup Patel <anup.patel@wdc.com>
  8. */
  9. #include <sbi/sbi_bitops.h>
  10. #include <sbi/riscv_asm.h>
  11. #include <sbi/riscv_atomic.h>
  12. #include <sbi/riscv_barrier.h>
  13. long atomic_read(atomic_t *atom)
  14. {
  15. long ret = atom->counter;
  16. rmb();
  17. return ret;
  18. }
  19. void atomic_write(atomic_t *atom, long value)
  20. {
  21. atom->counter = value;
  22. wmb();
  23. }
  24. long atomic_add_return(atomic_t *atom, long value)
  25. {
  26. long ret;
  27. #if __SIZEOF_LONG__ == 4
  28. __asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
  29. : "+A"(atom->counter), "=r"(ret)
  30. : "r"(value)
  31. : "memory");
  32. #elif __SIZEOF_LONG__ == 8
  33. __asm__ __volatile__(" amoadd.d.aqrl %1, %2, %0"
  34. : "+A"(atom->counter), "=r"(ret)
  35. : "r"(value)
  36. : "memory");
  37. #endif
  38. return ret + value;
  39. }
  40. long atomic_sub_return(atomic_t *atom, long value)
  41. {
  42. return atomic_add_return(atom, -value);
  43. }
  44. #define __axchg(ptr, new, size) \
  45. ({ \
  46. __typeof__(ptr) __ptr = (ptr); \
  47. __typeof__(new) __new = (new); \
  48. __typeof__(*(ptr)) __ret; \
  49. switch (size) { \
  50. case 4: \
  51. __asm__ __volatile__ ( \
  52. " amoswap.w.aqrl %0, %2, %1\n" \
  53. : "=r" (__ret), "+A" (*__ptr) \
  54. : "r" (__new) \
  55. : "memory"); \
  56. break; \
  57. case 8: \
  58. __asm__ __volatile__ ( \
  59. " amoswap.d.aqrl %0, %2, %1\n" \
  60. : "=r" (__ret), "+A" (*__ptr) \
  61. : "r" (__new) \
  62. : "memory"); \
  63. break; \
  64. default: \
  65. break; \
  66. } \
  67. __ret; \
  68. })
  69. #define axchg(ptr, x) \
  70. ({ \
  71. __typeof__(*(ptr)) _x_ = (x); \
  72. (__typeof__(*(ptr))) __axchg((ptr), _x_, sizeof(*(ptr))); \
  73. })
  74. #define __xchg(ptr, new, size) \
  75. ({ \
  76. __typeof__(ptr) __ptr = (ptr); \
  77. __typeof__(*(ptr)) __new = (new); \
  78. __typeof__(*(ptr)) __ret; \
  79. register unsigned int __rc; \
  80. switch (size) { \
  81. case 4: \
  82. __asm__ __volatile__("0: lr.w %0, %2\n" \
  83. " sc.w.rl %1, %z3, %2\n" \
  84. " bnez %1, 0b\n" \
  85. " fence rw, rw\n" \
  86. : "=&r"(__ret), "=&r"(__rc), \
  87. "+A"(*__ptr) \
  88. : "rJ"(__new) \
  89. : "memory"); \
  90. break; \
  91. case 8: \
  92. __asm__ __volatile__("0: lr.d %0, %2\n" \
  93. " sc.d.rl %1, %z3, %2\n" \
  94. " bnez %1, 0b\n" \
  95. " fence rw, rw\n" \
  96. : "=&r"(__ret), "=&r"(__rc), \
  97. "+A"(*__ptr) \
  98. : "rJ"(__new) \
  99. : "memory"); \
  100. break; \
  101. default: \
  102. break; \
  103. } \
  104. __ret; \
  105. })
  106. #define xchg(ptr, n) \
  107. ({ \
  108. __typeof__(*(ptr)) _n_ = (n); \
  109. (__typeof__(*(ptr))) __xchg((ptr), _n_, sizeof(*(ptr))); \
  110. })
  111. #define __cmpxchg(ptr, old, new, size) \
  112. ({ \
  113. __typeof__(ptr) __ptr = (ptr); \
  114. __typeof__(*(ptr)) __old = (old); \
  115. __typeof__(*(ptr)) __new = (new); \
  116. __typeof__(*(ptr)) __ret; \
  117. register unsigned int __rc; \
  118. switch (size) { \
  119. case 4: \
  120. __asm__ __volatile__("0: lr.w %0, %2\n" \
  121. " bne %0, %z3, 1f\n" \
  122. " sc.w.rl %1, %z4, %2\n" \
  123. " bnez %1, 0b\n" \
  124. " fence rw, rw\n" \
  125. "1:\n" \
  126. : "=&r"(__ret), "=&r"(__rc), \
  127. "+A"(*__ptr) \
  128. : "rJ"(__old), "rJ"(__new) \
  129. : "memory"); \
  130. break; \
  131. case 8: \
  132. __asm__ __volatile__("0: lr.d %0, %2\n" \
  133. " bne %0, %z3, 1f\n" \
  134. " sc.d.rl %1, %z4, %2\n" \
  135. " bnez %1, 0b\n" \
  136. " fence rw, rw\n" \
  137. "1:\n" \
  138. : "=&r"(__ret), "=&r"(__rc), \
  139. "+A"(*__ptr) \
  140. : "rJ"(__old), "rJ"(__new) \
  141. : "memory"); \
  142. break; \
  143. default: \
  144. break; \
  145. } \
  146. __ret; \
  147. })
  148. #define cmpxchg(ptr, o, n) \
  149. ({ \
  150. __typeof__(*(ptr)) _o_ = (o); \
  151. __typeof__(*(ptr)) _n_ = (n); \
  152. (__typeof__(*(ptr))) \
  153. __cmpxchg((ptr), _o_, _n_, sizeof(*(ptr))); \
  154. })
  155. long atomic_cmpxchg(atomic_t *atom, long oldval, long newval)
  156. {
  157. #ifdef __riscv_atomic
  158. return __sync_val_compare_and_swap(&atom->counter, oldval, newval);
  159. #else
  160. return cmpxchg(&atom->counter, oldval, newval);
  161. #endif
  162. }
  163. long atomic_xchg(atomic_t *atom, long newval)
  164. {
  165. /* Atomically set new value and return old value. */
  166. #ifdef __riscv_atomic
  167. return axchg(&atom->counter, newval);
  168. #else
  169. return xchg(&atom->counter, newval);
  170. #endif
  171. }
  172. unsigned int atomic_raw_xchg_uint(volatile unsigned int *ptr,
  173. unsigned int newval)
  174. {
  175. /* Atomically set new value and return old value. */
  176. #ifdef __riscv_atomic
  177. return axchg(ptr, newval);
  178. #else
  179. return xchg(ptr, newval);
  180. #endif
  181. }
  182. unsigned long atomic_raw_xchg_ulong(volatile unsigned long *ptr,
  183. unsigned long newval)
  184. {
  185. /* Atomically set new value and return old value. */
  186. #ifdef __riscv_atomic
  187. return axchg(ptr, newval);
  188. #else
  189. return xchg(ptr, newval);
  190. #endif
  191. }
  192. #if (__SIZEOF_POINTER__ == 8)
  193. #define __AMO(op) "amo" #op ".d"
  194. #elif (__SIZEOF_POINTER__ == 4)
  195. #define __AMO(op) "amo" #op ".w"
  196. #else
  197. #error "Unexpected __SIZEOF_POINTER__"
  198. #endif
  199. #define __atomic_op_bit_ord(op, mod, nr, addr, ord) \
  200. ({ \
  201. unsigned long __res, __mask; \
  202. __mask = BIT_MASK(nr); \
  203. __asm__ __volatile__(__AMO(op) #ord " %0, %2, %1" \
  204. : "=r"(__res), "+A"(addr[BIT_WORD(nr)]) \
  205. : "r"(mod(__mask)) \
  206. : "memory"); \
  207. __res; \
  208. })
  209. #define __atomic_op_bit(op, mod, nr, addr) \
  210. __atomic_op_bit_ord(op, mod, nr, addr, .aqrl)
  211. /* Bitmask modifiers */
  212. #define __NOP(x) (x)
  213. #define __NOT(x) (~(x))
  214. inline int atomic_raw_set_bit(int nr, volatile unsigned long *addr)
  215. {
  216. return __atomic_op_bit(or, __NOP, nr, addr);
  217. }
  218. inline int atomic_raw_clear_bit(int nr, volatile unsigned long *addr)
  219. {
  220. return __atomic_op_bit(and, __NOT, nr, addr);
  221. }
  222. inline int atomic_set_bit(int nr, atomic_t *atom)
  223. {
  224. return atomic_raw_set_bit(nr, (unsigned long *)&atom->counter);
  225. }
  226. inline int atomic_clear_bit(int nr, atomic_t *atom)
  227. {
  228. return atomic_raw_clear_bit(nr, (unsigned long *)&atom->counter);
  229. }