kernel_lock.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /*
  2. * lib/kernel_lock.c
  3. *
  4. * This is the traditional BKL - big kernel lock. Largely
  5. * relegated to obsolescense, but used by various less
  6. * important (or lazy) subsystems.
  7. */
  8. #include <linux/smp_lock.h>
  9. #include <linux/module.h>
  10. #include <linux/kallsyms.h>
  11. #ifdef CONFIG_PREEMPT_BKL
  12. /*
  13. * The 'big kernel semaphore'
  14. *
  15. * This mutex is taken and released recursively by lock_kernel()
  16. * and unlock_kernel(). It is transparently dropped and reacquired
  17. * over schedule(). It is used to protect legacy code that hasn't
  18. * been migrated to a proper locking design yet.
  19. *
  20. * Note: code locked by this semaphore will only be serialized against
  21. * other code using the same locking facility. The code guarantees that
  22. * the task remains on the same CPU.
  23. *
  24. * Don't use in new code.
  25. */
  26. static DECLARE_MUTEX(kernel_sem);
  27. /*
  28. * Re-acquire the kernel semaphore.
  29. *
  30. * This function is called with preemption off.
  31. *
  32. * We are executing in schedule() so the code must be extremely careful
  33. * about recursion, both due to the down() and due to the enabling of
  34. * preemption. schedule() will re-check the preemption flag after
  35. * reacquiring the semaphore.
  36. */
  37. int __lockfunc __reacquire_kernel_lock(void)
  38. {
  39. struct task_struct *task = current;
  40. int saved_lock_depth = task->lock_depth;
  41. BUG_ON(saved_lock_depth < 0);
  42. task->lock_depth = -1;
  43. preempt_enable_no_resched();
  44. down(&kernel_sem);
  45. preempt_disable();
  46. task->lock_depth = saved_lock_depth;
  47. return 0;
  48. }
  49. void __lockfunc __release_kernel_lock(void)
  50. {
  51. up(&kernel_sem);
  52. }
  53. /*
  54. * Getting the big kernel semaphore.
  55. */
  56. void __lockfunc lock_kernel(void)
  57. {
  58. struct task_struct *task = current;
  59. int depth = task->lock_depth + 1;
  60. if (likely(!depth))
  61. /*
  62. * No recursion worries - we set up lock_depth _after_
  63. */
  64. down(&kernel_sem);
  65. task->lock_depth = depth;
  66. }
  67. void __lockfunc unlock_kernel(void)
  68. {
  69. struct task_struct *task = current;
  70. BUG_ON(task->lock_depth < 0);
  71. if (likely(--task->lock_depth < 0))
  72. up(&kernel_sem);
  73. }
  74. #else
  75. /*
  76. * The 'big kernel lock'
  77. *
  78. * This spinlock is taken and released recursively by lock_kernel()
  79. * and unlock_kernel(). It is transparently dropped and reacquired
  80. * over schedule(). It is used to protect legacy code that hasn't
  81. * been migrated to a proper locking design yet.
  82. *
  83. * Don't use in new code.
  84. */
  85. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
  86. /*
  87. * Acquire/release the underlying lock from the scheduler.
  88. *
  89. * This is called with preemption disabled, and should
  90. * return an error value if it cannot get the lock and
  91. * TIF_NEED_RESCHED gets set.
  92. *
  93. * If it successfully gets the lock, it should increment
  94. * the preemption count like any spinlock does.
  95. *
  96. * (This works on UP too - _raw_spin_trylock will never
  97. * return false in that case)
  98. */
  99. int __lockfunc __reacquire_kernel_lock(void)
  100. {
  101. while (!_raw_spin_trylock(&kernel_flag)) {
  102. if (test_thread_flag(TIF_NEED_RESCHED))
  103. return -EAGAIN;
  104. cpu_relax();
  105. }
  106. preempt_disable();
  107. return 0;
  108. }
  109. void __lockfunc __release_kernel_lock(void)
  110. {
  111. _raw_spin_unlock(&kernel_flag);
  112. preempt_enable_no_resched();
  113. }
  114. /*
  115. * These are the BKL spinlocks - we try to be polite about preemption.
  116. * If SMP is not on (ie UP preemption), this all goes away because the
  117. * _raw_spin_trylock() will always succeed.
  118. */
  119. #ifdef CONFIG_PREEMPT
  120. static inline void __lock_kernel(void)
  121. {
  122. preempt_disable();
  123. if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
  124. /*
  125. * If preemption was disabled even before this
  126. * was called, there's nothing we can be polite
  127. * about - just spin.
  128. */
  129. if (preempt_count() > 1) {
  130. _raw_spin_lock(&kernel_flag);
  131. return;
  132. }
  133. /*
  134. * Otherwise, let's wait for the kernel lock
  135. * with preemption enabled..
  136. */
  137. do {
  138. preempt_enable();
  139. while (spin_is_locked(&kernel_flag))
  140. cpu_relax();
  141. preempt_disable();
  142. } while (!_raw_spin_trylock(&kernel_flag));
  143. }
  144. }
  145. #else
  146. /*
  147. * Non-preemption case - just get the spinlock
  148. */
  149. static inline void __lock_kernel(void)
  150. {
  151. _raw_spin_lock(&kernel_flag);
  152. }
  153. #endif
  154. static inline void __unlock_kernel(void)
  155. {
  156. /*
  157. * the BKL is not covered by lockdep, so we open-code the
  158. * unlocking sequence (and thus avoid the dep-chain ops):
  159. */
  160. _raw_spin_unlock(&kernel_flag);
  161. preempt_enable();
  162. }
  163. /*
  164. * Getting the big kernel lock.
  165. *
  166. * This cannot happen asynchronously, so we only need to
  167. * worry about other CPU's.
  168. */
  169. void __lockfunc lock_kernel(void)
  170. {
  171. int depth = current->lock_depth+1;
  172. if (likely(!depth))
  173. __lock_kernel();
  174. current->lock_depth = depth;
  175. }
  176. void __lockfunc unlock_kernel(void)
  177. {
  178. BUG_ON(current->lock_depth < 0);
  179. if (likely(--current->lock_depth < 0))
  180. __unlock_kernel();
  181. }
  182. #endif
  183. EXPORT_SYMBOL(lock_kernel);
  184. EXPORT_SYMBOL(unlock_kernel);