mutex.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. /*
  2. * include/asm-arm/mutex.h
  3. *
  4. * ARM optimized mutex locking primitives
  5. *
  6. * Please look into asm-generic/mutex-xchg.h for a formal definition.
  7. */
  8. #ifndef _ASM_MUTEX_H
  9. #define _ASM_MUTEX_H
  10. #if __LINUX_ARM_ARCH__ < 6
  11. /* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
  12. # include <asm-generic/mutex-xchg.h>
  13. #else
  14. /*
  15. * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
  16. * atomic decrement (it is not a reliable atomic decrement but it satisfies
  17. * the defined semantics for our purpose, while being smaller and faster
  18. * than a real atomic decrement or atomic swap. The idea is to attempt
  19. * decrementing the lock value only once. If once decremented it isn't zero,
  20. * or if its store-back fails due to a dispute on the exclusive store, we
  21. * simply bail out immediately through the slow path where the lock will be
  22. * reattempted until it succeeds.
  23. */
  24. static inline void
  25. __mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
  26. {
  27. int __ex_flag, __res;
  28. __asm__ (
  29. "ldrex %0, [%2] \n\t"
  30. "sub %0, %0, #1 \n\t"
  31. "strex %1, %0, [%2] "
  32. : "=&r" (__res), "=&r" (__ex_flag)
  33. : "r" (&(count)->counter)
  34. : "cc","memory" );
  35. __res |= __ex_flag;
  36. if (unlikely(__res != 0))
  37. fail_fn(count);
  38. }
  39. static inline int
  40. __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
  41. {
  42. int __ex_flag, __res;
  43. __asm__ (
  44. "ldrex %0, [%2] \n\t"
  45. "sub %0, %0, #1 \n\t"
  46. "strex %1, %0, [%2] "
  47. : "=&r" (__res), "=&r" (__ex_flag)
  48. : "r" (&(count)->counter)
  49. : "cc","memory" );
  50. __res |= __ex_flag;
  51. if (unlikely(__res != 0))
  52. __res = fail_fn(count);
  53. return __res;
  54. }
  55. /*
  56. * Same trick is used for the unlock fast path. However the original value,
  57. * rather than the result, is used to test for success in order to have
  58. * better generated assembly.
  59. */
  60. static inline void
  61. __mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
  62. {
  63. int __ex_flag, __res, __orig;
  64. __asm__ (
  65. "ldrex %0, [%3] \n\t"
  66. "add %1, %0, #1 \n\t"
  67. "strex %2, %1, [%3] "
  68. : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
  69. : "r" (&(count)->counter)
  70. : "cc","memory" );
  71. __orig |= __ex_flag;
  72. if (unlikely(__orig != 0))
  73. fail_fn(count);
  74. }
  75. /*
  76. * If the unlock was done on a contended lock, or if the unlock simply fails
  77. * then the mutex remains locked.
  78. */
  79. #define __mutex_slowpath_needs_to_unlock() 1
  80. /*
  81. * For __mutex_fastpath_trylock we use another construct which could be
  82. * described as a "single value cmpxchg".
  83. *
  84. * This provides the needed trylock semantics like cmpxchg would, but it is
  85. * lighter and less generic than a true cmpxchg implementation.
  86. */
  87. static inline int
  88. __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
  89. {
  90. int __ex_flag, __res, __orig;
  91. __asm__ (
  92. "1: ldrex %0, [%3] \n\t"
  93. "subs %1, %0, #1 \n\t"
  94. "strexeq %2, %1, [%3] \n\t"
  95. "movlt %0, #0 \n\t"
  96. "cmpeq %2, #0 \n\t"
  97. "bgt 1b "
  98. : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
  99. : "r" (&count->counter)
  100. : "cc", "memory" );
  101. return __orig;
  102. }
  103. #endif
  104. #endif