rwsem.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /* rwsem.c: R/W semaphores: contention handling functions
  2. *
  3. * Written by David Howells (dhowells@redhat.com).
  4. * Derived from arch/i386/kernel/semaphore.c
  5. */
  6. #include <linux/rwsem.h>
  7. #include <linux/sched.h>
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. /*
  11. * Initialize an rwsem:
  12. */
  13. void __init_rwsem(struct rw_semaphore *sem, const char *name,
  14. struct lock_class_key *key)
  15. {
  16. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  17. /*
  18. * Make sure we are not reinitializing a held semaphore:
  19. */
  20. debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  21. lockdep_init_map(&sem->dep_map, name, key, 0);
  22. #endif
  23. sem->count = RWSEM_UNLOCKED_VALUE;
  24. spin_lock_init(&sem->wait_lock);
  25. INIT_LIST_HEAD(&sem->wait_list);
  26. }
  27. EXPORT_SYMBOL(__init_rwsem);
  28. struct rwsem_waiter {
  29. struct list_head list;
  30. struct task_struct *task;
  31. unsigned int flags;
  32. #define RWSEM_WAITING_FOR_READ 0x00000001
  33. #define RWSEM_WAITING_FOR_WRITE 0x00000002
  34. };
  35. /*
  36. * handle the lock release when processes blocked on it that can now run
  37. * - if we come here from up_xxxx(), then:
  38. * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
  39. * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
  40. * - there must be someone on the queue
  41. * - the spinlock must be held by the caller
  42. * - woken process blocks are discarded from the list after having task zeroed
  43. * - writers are only woken if downgrading is false
  44. */
  45. static inline struct rw_semaphore *
  46. __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
  47. {
  48. struct rwsem_waiter *waiter;
  49. struct task_struct *tsk;
  50. struct list_head *next;
  51. signed long oldcount, woken, loop;
  52. if (downgrading)
  53. goto dont_wake_writers;
  54. /* if we came through an up_xxxx() call, we only only wake someone up
  55. * if we can transition the active part of the count from 0 -> 1
  56. */
  57. try_again:
  58. oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
  59. - RWSEM_ACTIVE_BIAS;
  60. if (oldcount & RWSEM_ACTIVE_MASK)
  61. goto undo;
  62. waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  63. /* try to grant a single write lock if there's a writer at the front
  64. * of the queue - note we leave the 'active part' of the count
  65. * incremented by 1 and the waiting part incremented by 0x00010000
  66. */
  67. if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
  68. goto readers_only;
  69. /* We must be careful not to touch 'waiter' after we set ->task = NULL.
  70. * It is an allocated on the waiter's stack and may become invalid at
  71. * any time after that point (due to a wakeup from another source).
  72. */
  73. list_del(&waiter->list);
  74. tsk = waiter->task;
  75. smp_mb();
  76. waiter->task = NULL;
  77. wake_up_process(tsk);
  78. put_task_struct(tsk);
  79. goto out;
  80. /* don't want to wake any writers */
  81. dont_wake_writers:
  82. waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  83. if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
  84. goto out;
  85. /* grant an infinite number of read locks to the readers at the front
  86. * of the queue
  87. * - note we increment the 'active part' of the count by the number of
  88. * readers before waking any processes up
  89. */
  90. readers_only:
  91. woken = 0;
  92. do {
  93. woken++;
  94. if (waiter->list.next == &sem->wait_list)
  95. break;
  96. waiter = list_entry(waiter->list.next,
  97. struct rwsem_waiter, list);
  98. } while (waiter->flags & RWSEM_WAITING_FOR_READ);
  99. loop = woken;
  100. woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
  101. if (!downgrading)
  102. /* we'd already done one increment earlier */
  103. woken -= RWSEM_ACTIVE_BIAS;
  104. rwsem_atomic_add(woken, sem);
  105. next = sem->wait_list.next;
  106. for (; loop > 0; loop--) {
  107. waiter = list_entry(next, struct rwsem_waiter, list);
  108. next = waiter->list.next;
  109. tsk = waiter->task;
  110. smp_mb();
  111. waiter->task = NULL;
  112. wake_up_process(tsk);
  113. put_task_struct(tsk);
  114. }
  115. sem->wait_list.next = next;
  116. next->prev = &sem->wait_list;
  117. out:
  118. return sem;
  119. /* undo the change to count, but check for a transition 1->0 */
  120. undo:
  121. if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0)
  122. goto out;
  123. goto try_again;
  124. }
  125. /*
  126. * wait for a lock to be granted
  127. */
  128. static struct rw_semaphore *
  129. rwsem_down_failed_common(struct rw_semaphore *sem,
  130. struct rwsem_waiter *waiter, signed long adjustment)
  131. {
  132. struct task_struct *tsk = current;
  133. signed long count;
  134. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  135. /* set up my own style of waitqueue */
  136. spin_lock_irq(&sem->wait_lock);
  137. waiter->task = tsk;
  138. get_task_struct(tsk);
  139. list_add_tail(&waiter->list, &sem->wait_list);
  140. /* we're now waiting on the lock, but no longer actively read-locking */
  141. count = rwsem_atomic_update(adjustment, sem);
  142. /* if there are no active locks, wake the front queued process(es) up */
  143. if (!(count & RWSEM_ACTIVE_MASK))
  144. sem = __rwsem_do_wake(sem, 0);
  145. spin_unlock_irq(&sem->wait_lock);
  146. /* wait to be given the lock */
  147. for (;;) {
  148. if (!waiter->task)
  149. break;
  150. schedule();
  151. set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  152. }
  153. tsk->state = TASK_RUNNING;
  154. return sem;
  155. }
  156. /*
  157. * wait for the read lock to be granted
  158. */
  159. struct rw_semaphore fastcall __sched *
  160. rwsem_down_read_failed(struct rw_semaphore *sem)
  161. {
  162. struct rwsem_waiter waiter;
  163. waiter.flags = RWSEM_WAITING_FOR_READ;
  164. rwsem_down_failed_common(sem, &waiter,
  165. RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
  166. return sem;
  167. }
  168. /*
  169. * wait for the write lock to be granted
  170. */
  171. struct rw_semaphore fastcall __sched *
  172. rwsem_down_write_failed(struct rw_semaphore *sem)
  173. {
  174. struct rwsem_waiter waiter;
  175. waiter.flags = RWSEM_WAITING_FOR_WRITE;
  176. rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
  177. return sem;
  178. }
  179. /*
  180. * handle waking up a waiter on the semaphore
  181. * - up_read/up_write has decremented the active part of count if we come here
  182. */
  183. struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
  184. {
  185. unsigned long flags;
  186. spin_lock_irqsave(&sem->wait_lock, flags);
  187. /* do nothing if list empty */
  188. if (!list_empty(&sem->wait_list))
  189. sem = __rwsem_do_wake(sem, 0);
  190. spin_unlock_irqrestore(&sem->wait_lock, flags);
  191. return sem;
  192. }
  193. /*
  194. * downgrade a write lock into a read lock
  195. * - caller incremented waiting part of count and discovered it still negative
  196. * - just wake up any readers at the front of the queue
  197. */
  198. struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
  199. {
  200. unsigned long flags;
  201. spin_lock_irqsave(&sem->wait_lock, flags);
  202. /* do nothing if list empty */
  203. if (!list_empty(&sem->wait_list))
  204. sem = __rwsem_do_wake(sem, 1);
  205. spin_unlock_irqrestore(&sem->wait_lock, flags);
  206. return sem;
  207. }
  208. EXPORT_SYMBOL(rwsem_down_read_failed);
  209. EXPORT_SYMBOL(rwsem_down_write_failed);
  210. EXPORT_SYMBOL(rwsem_wake);
  211. EXPORT_SYMBOL(rwsem_downgrade_wake);