spinlock.h 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2015 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. */
  6. #ifndef _ASM_RISCV_SPINLOCK_H
  7. #define _ASM_RISCV_SPINLOCK_H
  8. #include <linux/kernel.h>
  9. #include <asm/current.h>
  10. #include <asm/fence.h>
  11. /*
  12. * Simple spin lock operations. These provide no fairness guarantees.
  13. */
  14. /* FIXME: Replace this with a ticket lock, like MIPS. */
  15. #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
  16. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  17. {
  18. smp_store_release(&lock->lock, 0);
  19. }
  20. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  21. {
  22. int tmp = 1, busy;
  23. __asm__ __volatile__ (
  24. " amoswap.w %0, %2, %1\n"
  25. RISCV_ACQUIRE_BARRIER
  26. : "=r" (busy), "+A" (lock->lock)
  27. : "r" (tmp)
  28. : "memory");
  29. return !busy;
  30. }
  31. static inline void arch_spin_lock(arch_spinlock_t *lock)
  32. {
  33. while (1) {
  34. if (arch_spin_is_locked(lock))
  35. continue;
  36. if (arch_spin_trylock(lock))
  37. break;
  38. }
  39. }
  40. /***********************************************************/
  41. static inline void arch_read_lock(arch_rwlock_t *lock)
  42. {
  43. int tmp;
  44. __asm__ __volatile__(
  45. "1: lr.w %1, %0\n"
  46. " bltz %1, 1b\n"
  47. " addi %1, %1, 1\n"
  48. " sc.w %1, %1, %0\n"
  49. " bnez %1, 1b\n"
  50. RISCV_ACQUIRE_BARRIER
  51. : "+A" (lock->lock), "=&r" (tmp)
  52. :: "memory");
  53. }
  54. static inline void arch_write_lock(arch_rwlock_t *lock)
  55. {
  56. int tmp;
  57. __asm__ __volatile__(
  58. "1: lr.w %1, %0\n"
  59. " bnez %1, 1b\n"
  60. " li %1, -1\n"
  61. " sc.w %1, %1, %0\n"
  62. " bnez %1, 1b\n"
  63. RISCV_ACQUIRE_BARRIER
  64. : "+A" (lock->lock), "=&r" (tmp)
  65. :: "memory");
  66. }
  67. static inline int arch_read_trylock(arch_rwlock_t *lock)
  68. {
  69. int busy;
  70. __asm__ __volatile__(
  71. "1: lr.w %1, %0\n"
  72. " bltz %1, 1f\n"
  73. " addi %1, %1, 1\n"
  74. " sc.w %1, %1, %0\n"
  75. " bnez %1, 1b\n"
  76. RISCV_ACQUIRE_BARRIER
  77. "1:\n"
  78. : "+A" (lock->lock), "=&r" (busy)
  79. :: "memory");
  80. return !busy;
  81. }
  82. static inline int arch_write_trylock(arch_rwlock_t *lock)
  83. {
  84. int busy;
  85. __asm__ __volatile__(
  86. "1: lr.w %1, %0\n"
  87. " bnez %1, 1f\n"
  88. " li %1, -1\n"
  89. " sc.w %1, %1, %0\n"
  90. " bnez %1, 1b\n"
  91. RISCV_ACQUIRE_BARRIER
  92. "1:\n"
  93. : "+A" (lock->lock), "=&r" (busy)
  94. :: "memory");
  95. return !busy;
  96. }
  97. static inline void arch_read_unlock(arch_rwlock_t *lock)
  98. {
  99. __asm__ __volatile__(
  100. RISCV_RELEASE_BARRIER
  101. " amoadd.w x0, %1, %0\n"
  102. : "+A" (lock->lock)
  103. : "r" (-1)
  104. : "memory");
  105. }
  106. static inline void arch_write_unlock(arch_rwlock_t *lock)
  107. {
  108. smp_store_release(&lock->lock, 0);
  109. }
  110. #endif /* _ASM_RISCV_SPINLOCK_H */