spinlock_up.h 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172
  1. #ifndef __LINUX_SPINLOCK_UP_H
  2. #define __LINUX_SPINLOCK_UP_H
  3. #ifndef __LINUX_SPINLOCK_H
  4. # error "please don't include this file directly"
  5. #endif
  6. #include <asm/processor.h> /* for cpu_relax() */
  7. #include <asm/barrier.h>
  8. /*
  9. * include/linux/spinlock_up.h - UP-debug version of spinlocks.
  10. *
  11. * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
  12. * Released under the General Public License (GPL).
  13. *
  14. * In the debug case, 1 means unlocked, 0 means locked. (the values
  15. * are inverted, to catch initialization bugs)
  16. *
  17. * No atomicity anywhere, we are on UP. However, we still need
  18. * the compiler barriers, because we do not want the compiler to
  19. * move potentially faulting instructions (notably user accesses)
  20. * into the locked sequence, resulting in non-atomic execution.
  21. */
  22. #ifdef CONFIG_DEBUG_SPINLOCK
  23. #define arch_spin_is_locked(x) ((x)->slock == 0)
  24. static inline void arch_spin_lock(arch_spinlock_t *lock)
  25. {
  26. lock->slock = 0;
  27. barrier();
  28. }
  29. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  30. {
  31. char oldval = lock->slock;
  32. lock->slock = 0;
  33. barrier();
  34. return oldval > 0;
  35. }
  36. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  37. {
  38. barrier();
  39. lock->slock = 1;
  40. }
  41. /*
  42. * Read-write spinlocks. No debug version.
  43. */
  44. #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
  45. #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
  46. #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
  47. #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
  48. #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
  49. #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
  50. #else /* DEBUG_SPINLOCK */
  51. #define arch_spin_is_locked(lock) ((void)(lock), 0)
  52. /* for sched/core.c and kernel_lock.c: */
  53. # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
  54. # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
  55. # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
  56. # define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
  57. #endif /* DEBUG_SPINLOCK */
  58. #define arch_spin_is_contended(lock) (((void)(lock), 0))
  59. #endif /* __LINUX_SPINLOCK_UP_H */