wait.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Generic waiting primitives.
  4. *
  5. * (C) 2004 Nadia Yvette Chambers, Oracle
  6. */
  7. #include "sched.h"
  8. #include <trace/hooks/sched.h>
  9. void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
  10. {
  11. spin_lock_init(&wq_head->lock);
  12. lockdep_set_class_and_name(&wq_head->lock, key, name);
  13. INIT_LIST_HEAD(&wq_head->head);
  14. }
  15. EXPORT_SYMBOL(__init_waitqueue_head);
  16. void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  17. {
  18. unsigned long flags;
  19. wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
  20. spin_lock_irqsave(&wq_head->lock, flags);
  21. __add_wait_queue(wq_head, wq_entry);
  22. spin_unlock_irqrestore(&wq_head->lock, flags);
  23. }
  24. EXPORT_SYMBOL(add_wait_queue);
  25. void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  26. {
  27. unsigned long flags;
  28. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  29. spin_lock_irqsave(&wq_head->lock, flags);
  30. __add_wait_queue_entry_tail(wq_head, wq_entry);
  31. spin_unlock_irqrestore(&wq_head->lock, flags);
  32. }
  33. EXPORT_SYMBOL(add_wait_queue_exclusive);
  34. void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  35. {
  36. unsigned long flags;
  37. spin_lock_irqsave(&wq_head->lock, flags);
  38. __remove_wait_queue(wq_head, wq_entry);
  39. spin_unlock_irqrestore(&wq_head->lock, flags);
  40. }
  41. EXPORT_SYMBOL(remove_wait_queue);
  42. /*
  43. * Scan threshold to break wait queue walk.
  44. * This allows a waker to take a break from holding the
  45. * wait queue lock during the wait queue walk.
  46. */
  47. #define WAITQUEUE_WALK_BREAK_CNT 64
  48. /*
  49. * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
  50. * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
  51. * number) then we wake all the non-exclusive tasks and one exclusive task.
  52. *
  53. * There are circumstances in which we can try to wake a task which has already
  54. * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  55. * zero in this (rare) case, and we handle it by continuing to scan the queue.
  56. */
  57. static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
  58. int nr_exclusive, int wake_flags, void *key,
  59. wait_queue_entry_t *bookmark)
  60. {
  61. wait_queue_entry_t *curr, *next;
  62. int cnt = 0;
  63. lockdep_assert_held(&wq_head->lock);
  64. if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
  65. curr = list_next_entry(bookmark, entry);
  66. list_del(&bookmark->entry);
  67. bookmark->flags = 0;
  68. } else
  69. curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
  70. if (&curr->entry == &wq_head->head)
  71. return nr_exclusive;
  72. list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
  73. unsigned flags = curr->flags;
  74. int ret;
  75. if (flags & WQ_FLAG_BOOKMARK)
  76. continue;
  77. ret = curr->func(curr, mode, wake_flags, key);
  78. if (ret < 0)
  79. break;
  80. if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
  81. break;
  82. if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
  83. (&next->entry != &wq_head->head)) {
  84. bookmark->flags = WQ_FLAG_BOOKMARK;
  85. list_add_tail(&bookmark->entry, &next->entry);
  86. break;
  87. }
  88. }
  89. return nr_exclusive;
  90. }
  91. static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
  92. int nr_exclusive, int wake_flags, void *key)
  93. {
  94. unsigned long flags;
  95. wait_queue_entry_t bookmark;
  96. bookmark.flags = 0;
  97. bookmark.private = NULL;
  98. bookmark.func = NULL;
  99. INIT_LIST_HEAD(&bookmark.entry);
  100. do {
  101. spin_lock_irqsave(&wq_head->lock, flags);
  102. nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
  103. wake_flags, key, &bookmark);
  104. spin_unlock_irqrestore(&wq_head->lock, flags);
  105. } while (bookmark.flags & WQ_FLAG_BOOKMARK);
  106. }
  107. /**
  108. * __wake_up - wake up threads blocked on a waitqueue.
  109. * @wq_head: the waitqueue
  110. * @mode: which threads
  111. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  112. * @key: is directly passed to the wakeup function
  113. *
  114. * If this function wakes up a task, it executes a full memory barrier before
  115. * accessing the task state.
  116. */
  117. void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
  118. int nr_exclusive, void *key)
  119. {
  120. __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
  121. }
  122. EXPORT_SYMBOL(__wake_up);
  123. /*
  124. * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
  125. */
  126. void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
  127. {
  128. __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
  129. }
  130. EXPORT_SYMBOL_GPL(__wake_up_locked);
  131. void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
  132. {
  133. __wake_up_common(wq_head, mode, 1, 0, key, NULL);
  134. }
  135. EXPORT_SYMBOL_GPL(__wake_up_locked_key);
  136. void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
  137. unsigned int mode, void *key, wait_queue_entry_t *bookmark)
  138. {
  139. __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
  140. }
  141. EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
  142. /**
  143. * __wake_up_sync_key - wake up threads blocked on a waitqueue.
  144. * @wq_head: the waitqueue
  145. * @mode: which threads
  146. * @key: opaque value to be passed to wakeup targets
  147. *
  148. * The sync wakeup differs that the waker knows that it will schedule
  149. * away soon, so while the target thread will be woken up, it will not
  150. * be migrated to another CPU - ie. the two threads are 'synchronized'
  151. * with each other. This can prevent needless bouncing between CPUs.
  152. *
  153. * On UP it can prevent extra preemption.
  154. *
  155. * If this function wakes up a task, it executes a full memory barrier before
  156. * accessing the task state.
  157. */
  158. void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
  159. void *key)
  160. {
  161. int wake_flags = WF_SYNC;
  162. if (unlikely(!wq_head))
  163. return;
  164. trace_android_vh_set_wake_flags(&wake_flags, &mode);
  165. __wake_up_common_lock(wq_head, mode, 1, wake_flags, key);
  166. }
  167. EXPORT_SYMBOL_GPL(__wake_up_sync_key);
  168. /**
  169. * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
  170. * @wq_head: the waitqueue
  171. * @mode: which threads
  172. * @key: opaque value to be passed to wakeup targets
  173. *
  174. * The sync wakeup differs in that the waker knows that it will schedule
  175. * away soon, so while the target thread will be woken up, it will not
  176. * be migrated to another CPU - ie. the two threads are 'synchronized'
  177. * with each other. This can prevent needless bouncing between CPUs.
  178. *
  179. * On UP it can prevent extra preemption.
  180. *
  181. * If this function wakes up a task, it executes a full memory barrier before
  182. * accessing the task state.
  183. */
  184. void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
  185. unsigned int mode, void *key)
  186. {
  187. __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
  188. }
  189. EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
  190. /*
  191. * __wake_up_sync - see __wake_up_sync_key()
  192. */
  193. void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
  194. {
  195. __wake_up_sync_key(wq_head, mode, NULL);
  196. }
  197. EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
  198. void __wake_up_pollfree(struct wait_queue_head *wq_head)
  199. {
  200. __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
  201. /* POLLFREE must have cleared the queue. */
  202. WARN_ON_ONCE(waitqueue_active(wq_head));
  203. }
  204. /*
  205. * Note: we use "set_current_state()" _after_ the wait-queue add,
  206. * because we need a memory barrier there on SMP, so that any
  207. * wake-function that tests for the wait-queue being active
  208. * will be guaranteed to see waitqueue addition _or_ subsequent
  209. * tests in this thread will see the wakeup having taken place.
  210. *
  211. * The spin_unlock() itself is semi-permeable and only protects
  212. * one way (it only protects stuff inside the critical region and
  213. * stops them from bleeding out - it would still allow subsequent
  214. * loads to move into the critical region).
  215. */
  216. void
  217. prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
  218. {
  219. unsigned long flags;
  220. wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
  221. spin_lock_irqsave(&wq_head->lock, flags);
  222. if (list_empty(&wq_entry->entry))
  223. __add_wait_queue(wq_head, wq_entry);
  224. set_current_state(state);
  225. spin_unlock_irqrestore(&wq_head->lock, flags);
  226. }
  227. EXPORT_SYMBOL(prepare_to_wait);
  228. /* Returns true if we are the first waiter in the queue, false otherwise. */
  229. bool
  230. prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
  231. {
  232. unsigned long flags;
  233. bool was_empty = false;
  234. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  235. spin_lock_irqsave(&wq_head->lock, flags);
  236. if (list_empty(&wq_entry->entry)) {
  237. was_empty = list_empty(&wq_head->head);
  238. __add_wait_queue_entry_tail(wq_head, wq_entry);
  239. }
  240. set_current_state(state);
  241. spin_unlock_irqrestore(&wq_head->lock, flags);
  242. return was_empty;
  243. }
  244. EXPORT_SYMBOL(prepare_to_wait_exclusive);
  245. void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
  246. {
  247. wq_entry->flags = flags;
  248. wq_entry->private = current;
  249. wq_entry->func = autoremove_wake_function;
  250. INIT_LIST_HEAD(&wq_entry->entry);
  251. }
  252. EXPORT_SYMBOL(init_wait_entry);
  253. long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
  254. {
  255. unsigned long flags;
  256. long ret = 0;
  257. spin_lock_irqsave(&wq_head->lock, flags);
  258. if (signal_pending_state(state, current)) {
  259. /*
  260. * Exclusive waiter must not fail if it was selected by wakeup,
  261. * it should "consume" the condition we were waiting for.
  262. *
  263. * The caller will recheck the condition and return success if
  264. * we were already woken up, we can not miss the event because
  265. * wakeup locks/unlocks the same wq_head->lock.
  266. *
  267. * But we need to ensure that set-condition + wakeup after that
  268. * can't see us, it should wake up another exclusive waiter if
  269. * we fail.
  270. */
  271. list_del_init(&wq_entry->entry);
  272. ret = -ERESTARTSYS;
  273. } else {
  274. if (list_empty(&wq_entry->entry)) {
  275. if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
  276. __add_wait_queue_entry_tail(wq_head, wq_entry);
  277. else
  278. __add_wait_queue(wq_head, wq_entry);
  279. }
  280. set_current_state(state);
  281. }
  282. spin_unlock_irqrestore(&wq_head->lock, flags);
  283. return ret;
  284. }
  285. EXPORT_SYMBOL(prepare_to_wait_event);
  286. /*
  287. * Note! These two wait functions are entered with the
  288. * wait-queue lock held (and interrupts off in the _irq
  289. * case), so there is no race with testing the wakeup
  290. * condition in the caller before they add the wait
  291. * entry to the wake queue.
  292. */
  293. int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
  294. {
  295. if (likely(list_empty(&wait->entry)))
  296. __add_wait_queue_entry_tail(wq, wait);
  297. set_current_state(TASK_INTERRUPTIBLE);
  298. if (signal_pending(current))
  299. return -ERESTARTSYS;
  300. spin_unlock(&wq->lock);
  301. schedule();
  302. spin_lock(&wq->lock);
  303. return 0;
  304. }
  305. EXPORT_SYMBOL(do_wait_intr);
  306. int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
  307. {
  308. if (likely(list_empty(&wait->entry)))
  309. __add_wait_queue_entry_tail(wq, wait);
  310. set_current_state(TASK_INTERRUPTIBLE);
  311. if (signal_pending(current))
  312. return -ERESTARTSYS;
  313. spin_unlock_irq(&wq->lock);
  314. schedule();
  315. spin_lock_irq(&wq->lock);
  316. return 0;
  317. }
  318. EXPORT_SYMBOL(do_wait_intr_irq);
  319. /**
  320. * finish_wait - clean up after waiting in a queue
  321. * @wq_head: waitqueue waited on
  322. * @wq_entry: wait descriptor
  323. *
  324. * Sets current thread back to running state and removes
  325. * the wait descriptor from the given waitqueue if still
  326. * queued.
  327. */
  328. void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  329. {
  330. unsigned long flags;
  331. __set_current_state(TASK_RUNNING);
  332. /*
  333. * We can check for list emptiness outside the lock
  334. * IFF:
  335. * - we use the "careful" check that verifies both
  336. * the next and prev pointers, so that there cannot
  337. * be any half-pending updates in progress on other
  338. * CPU's that we haven't seen yet (and that might
  339. * still change the stack area.
  340. * and
  341. * - all other users take the lock (ie we can only
  342. * have _one_ other CPU that looks at or modifies
  343. * the list).
  344. */
  345. if (!list_empty_careful(&wq_entry->entry)) {
  346. spin_lock_irqsave(&wq_head->lock, flags);
  347. list_del_init(&wq_entry->entry);
  348. spin_unlock_irqrestore(&wq_head->lock, flags);
  349. }
  350. }
  351. EXPORT_SYMBOL(finish_wait);
  352. __sched int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
  353. int sync, void *key)
  354. {
  355. int ret = default_wake_function(wq_entry, mode, sync, key);
  356. if (ret)
  357. list_del_init_careful(&wq_entry->entry);
  358. return ret;
  359. }
  360. EXPORT_SYMBOL(autoremove_wake_function);
  361. static inline bool is_kthread_should_stop(void)
  362. {
  363. return (current->flags & PF_KTHREAD) && kthread_should_stop();
  364. }
  365. /*
  366. * DEFINE_WAIT_FUNC(wait, woken_wake_func);
  367. *
  368. * add_wait_queue(&wq_head, &wait);
  369. * for (;;) {
  370. * if (condition)
  371. * break;
  372. *
  373. * // in wait_woken() // in woken_wake_function()
  374. *
  375. * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
  376. * smp_mb(); // A try_to_wake_up():
  377. * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
  378. * schedule() if (p->state & mode)
  379. * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
  380. * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
  381. * smp_mb(); // B condition = true;
  382. * } smp_mb(); // C
  383. * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
  384. */
  385. __sched long wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, long timeout)
  386. {
  387. /*
  388. * The below executes an smp_mb(), which matches with the full barrier
  389. * executed by the try_to_wake_up() in woken_wake_function() such that
  390. * either we see the store to wq_entry->flags in woken_wake_function()
  391. * or woken_wake_function() sees our store to current->state.
  392. */
  393. set_current_state(mode); /* A */
  394. if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
  395. timeout = schedule_timeout(timeout);
  396. __set_current_state(TASK_RUNNING);
  397. /*
  398. * The below executes an smp_mb(), which matches with the smp_mb() (C)
  399. * in woken_wake_function() such that either we see the wait condition
  400. * being true or the store to wq_entry->flags in woken_wake_function()
  401. * follows ours in the coherence order.
  402. */
  403. smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
  404. return timeout;
  405. }
  406. EXPORT_SYMBOL(wait_woken);
  407. __sched int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
  408. int sync, void *key)
  409. {
  410. /* Pairs with the smp_store_mb() in wait_woken(). */
  411. smp_mb(); /* C */
  412. wq_entry->flags |= WQ_FLAG_WOKEN;
  413. return default_wake_function(wq_entry, mode, sync, key);
  414. }
  415. EXPORT_SYMBOL(woken_wake_function);