wait.h 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_WAIT_H
  3. #define _LINUX_WAIT_H
  4. /*
  5. * Linux wait queue related types and methods
  6. */
  7. #include <linux/list.h>
  8. #include <linux/stddef.h>
  9. #include <linux/spinlock.h>
  10. #include <asm/current.h>
  11. #include <uapi/linux/wait.h>
  12. typedef struct wait_queue_entry wait_queue_entry_t;
  13. typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  14. int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
  15. /* wait_queue_entry::flags */
  16. #define WQ_FLAG_EXCLUSIVE 0x01
  17. #define WQ_FLAG_WOKEN 0x02
  18. #define WQ_FLAG_BOOKMARK 0x04
  19. #define WQ_FLAG_CUSTOM 0x08
  20. #define WQ_FLAG_DONE 0x10
  21. /*
  22. * A single wait-queue entry structure:
  23. */
  24. struct wait_queue_entry {
  25. unsigned int flags;
  26. void *private;
  27. wait_queue_func_t func;
  28. struct list_head entry;
  29. };
  30. struct wait_queue_head {
  31. spinlock_t lock;
  32. struct list_head head;
  33. };
  34. typedef struct wait_queue_head wait_queue_head_t;
  35. struct task_struct;
  36. /*
  37. * Macros for declaration and initialisaton of the datatypes
  38. */
  39. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  40. .private = tsk, \
  41. .func = default_wake_function, \
  42. .entry = { NULL, NULL } }
  43. #define DECLARE_WAITQUEUE(name, tsk) \
  44. struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
  45. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  46. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  47. .head = { &(name).head, &(name).head } }
  48. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  49. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  50. extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
  51. #define init_waitqueue_head(wq_head) \
  52. do { \
  53. static struct lock_class_key __key; \
  54. \
  55. __init_waitqueue_head((wq_head), #wq_head, &__key); \
  56. } while (0)
  57. #ifdef CONFIG_LOCKDEP
  58. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  59. ({ init_waitqueue_head(&name); name; })
  60. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  61. struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  62. #else
  63. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  64. #endif
  65. static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
  66. {
  67. wq_entry->flags = 0;
  68. wq_entry->private = p;
  69. wq_entry->func = default_wake_function;
  70. }
  71. static inline void
  72. init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
  73. {
  74. wq_entry->flags = 0;
  75. wq_entry->private = NULL;
  76. wq_entry->func = func;
  77. }
  78. /**
  79. * waitqueue_active -- locklessly test for waiters on the queue
  80. * @wq_head: the waitqueue to test for waiters
  81. *
  82. * returns true if the wait list is not empty
  83. *
  84. * NOTE: this function is lockless and requires care, incorrect usage _will_
  85. * lead to sporadic and non-obvious failure.
  86. *
  87. * Use either while holding wait_queue_head::lock or when used for wakeups
  88. * with an extra smp_mb() like::
  89. *
  90. * CPU0 - waker CPU1 - waiter
  91. *
  92. * for (;;) {
  93. * @cond = true; prepare_to_wait(&wq_head, &wait, state);
  94. * smp_mb(); // smp_mb() from set_current_state()
  95. * if (waitqueue_active(wq_head)) if (@cond)
  96. * wake_up(wq_head); break;
  97. * schedule();
  98. * }
  99. * finish_wait(&wq_head, &wait);
  100. *
  101. * Because without the explicit smp_mb() it's possible for the
  102. * waitqueue_active() load to get hoisted over the @cond store such that we'll
  103. * observe an empty wait list while the waiter might not observe @cond.
  104. *
  105. * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
  106. * which (when the lock is uncontended) are of roughly equal cost.
  107. */
  108. static inline int waitqueue_active(struct wait_queue_head *wq_head)
  109. {
  110. return !list_empty(&wq_head->head);
  111. }
  112. /**
  113. * wq_has_single_sleeper - check if there is only one sleeper
  114. * @wq_head: wait queue head
  115. *
  116. * Returns true of wq_head has only one sleeper on the list.
  117. *
  118. * Please refer to the comment for waitqueue_active.
  119. */
  120. static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
  121. {
  122. return list_is_singular(&wq_head->head);
  123. }
  124. /**
  125. * wq_has_sleeper - check if there are any waiting processes
  126. * @wq_head: wait queue head
  127. *
  128. * Returns true if wq_head has waiting processes
  129. *
  130. * Please refer to the comment for waitqueue_active.
  131. */
  132. static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
  133. {
  134. /*
  135. * We need to be sure we are in sync with the
  136. * add_wait_queue modifications to the wait queue.
  137. *
  138. * This memory barrier should be paired with one on the
  139. * waiting side.
  140. */
  141. smp_mb();
  142. return waitqueue_active(wq_head);
  143. }
  144. extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  145. extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  146. extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  147. static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  148. {
  149. list_add(&wq_entry->entry, &wq_head->head);
  150. }
  151. /*
  152. * Used for wake-one threads:
  153. */
  154. static inline void
  155. __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  156. {
  157. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  158. __add_wait_queue(wq_head, wq_entry);
  159. }
  160. static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  161. {
  162. list_add_tail(&wq_entry->entry, &wq_head->head);
  163. }
  164. static inline void
  165. __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  166. {
  167. wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  168. __add_wait_queue_entry_tail(wq_head, wq_entry);
  169. }
  170. static inline void
  171. __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  172. {
  173. list_del(&wq_entry->entry);
  174. }
  175. void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
  176. void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
  177. void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
  178. unsigned int mode, void *key, wait_queue_entry_t *bookmark);
  179. void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
  180. void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
  181. void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
  182. void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
  183. void __wake_up_pollfree(struct wait_queue_head *wq_head);
  184. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  185. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  186. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  187. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  188. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  189. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  190. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  191. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  192. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
  193. /*
  194. * Wakeup macros to be used to report events to the targets.
  195. */
  196. #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
  197. #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
  198. #define wake_up_poll(x, m) \
  199. __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
  200. #define wake_up_locked_poll(x, m) \
  201. __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
  202. #define wake_up_interruptible_poll(x, m) \
  203. __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
  204. #define wake_up_interruptible_sync_poll(x, m) \
  205. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
  206. #define wake_up_interruptible_sync_poll_locked(x, m) \
  207. __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
  208. /**
  209. * wake_up_pollfree - signal that a polled waitqueue is going away
  210. * @wq_head: the wait queue head
  211. *
  212. * In the very rare cases where a ->poll() implementation uses a waitqueue whose
  213. * lifetime is tied to a task rather than to the 'struct file' being polled,
  214. * this function must be called before the waitqueue is freed so that
  215. * non-blocking polls (e.g. epoll) are notified that the queue is going away.
  216. *
  217. * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
  218. * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
  219. */
  220. static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
  221. {
  222. /*
  223. * For performance reasons, we don't always take the queue lock here.
  224. * Therefore, we might race with someone removing the last entry from
  225. * the queue, and proceed while they still hold the queue lock.
  226. * However, rcu_read_lock() is required to be held in such cases, so we
  227. * can safely proceed with an RCU-delayed free.
  228. */
  229. if (waitqueue_active(wq_head))
  230. __wake_up_pollfree(wq_head);
  231. }
  232. #define ___wait_cond_timeout(condition) \
  233. ({ \
  234. bool __cond = (condition); \
  235. if (__cond && !__ret) \
  236. __ret = 1; \
  237. __cond || !__ret; \
  238. })
  239. #define ___wait_is_interruptible(state) \
  240. (!__builtin_constant_p(state) || \
  241. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  242. extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
  243. /*
  244. * The below macro ___wait_event() has an explicit shadow of the __ret
  245. * variable when used from the wait_event_*() macros.
  246. *
  247. * This is so that both can use the ___wait_cond_timeout() construct
  248. * to wrap the condition.
  249. *
  250. * The type inconsistency of the wait_event_*() __ret variable is also
  251. * on purpose; we use long where we can return timeout values and int
  252. * otherwise.
  253. */
  254. #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
  255. ({ \
  256. __label__ __out; \
  257. struct wait_queue_entry __wq_entry; \
  258. long __ret = ret; /* explicit shadow */ \
  259. \
  260. init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
  261. for (;;) { \
  262. long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
  263. \
  264. if (condition) \
  265. break; \
  266. \
  267. if (___wait_is_interruptible(state) && __int) { \
  268. __ret = __int; \
  269. goto __out; \
  270. } \
  271. \
  272. cmd; \
  273. } \
  274. finish_wait(&wq_head, &__wq_entry); \
  275. __out: __ret; \
  276. })
  277. #define __wait_event(wq_head, condition) \
  278. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  279. schedule())
  280. /**
  281. * wait_event - sleep until a condition gets true
  282. * @wq_head: the waitqueue to wait on
  283. * @condition: a C expression for the event to wait for
  284. *
  285. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  286. * @condition evaluates to true. The @condition is checked each time
  287. * the waitqueue @wq_head is woken up.
  288. *
  289. * wake_up() has to be called after changing any variable that could
  290. * change the result of the wait condition.
  291. */
  292. #define wait_event(wq_head, condition) \
  293. do { \
  294. might_sleep(); \
  295. if (condition) \
  296. break; \
  297. __wait_event(wq_head, condition); \
  298. } while (0)
  299. #define __io_wait_event(wq_head, condition) \
  300. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  301. io_schedule())
  302. /*
  303. * io_wait_event() -- like wait_event() but with io_schedule()
  304. */
  305. #define io_wait_event(wq_head, condition) \
  306. do { \
  307. might_sleep(); \
  308. if (condition) \
  309. break; \
  310. __io_wait_event(wq_head, condition); \
  311. } while (0)
  312. #define __wait_event_freezable(wq_head, condition) \
  313. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  314. freezable_schedule())
  315. /**
  316. * wait_event_freezable - sleep (or freeze) until a condition gets true
  317. * @wq_head: the waitqueue to wait on
  318. * @condition: a C expression for the event to wait for
  319. *
  320. * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  321. * to system load) until the @condition evaluates to true. The
  322. * @condition is checked each time the waitqueue @wq_head is woken up.
  323. *
  324. * wake_up() has to be called after changing any variable that could
  325. * change the result of the wait condition.
  326. */
  327. #define wait_event_freezable(wq_head, condition) \
  328. ({ \
  329. int __ret = 0; \
  330. might_sleep(); \
  331. if (!(condition)) \
  332. __ret = __wait_event_freezable(wq_head, condition); \
  333. __ret; \
  334. })
  335. #define __wait_event_timeout(wq_head, condition, timeout) \
  336. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  337. TASK_UNINTERRUPTIBLE, 0, timeout, \
  338. __ret = schedule_timeout(__ret))
  339. /**
  340. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  341. * @wq_head: the waitqueue to wait on
  342. * @condition: a C expression for the event to wait for
  343. * @timeout: timeout, in jiffies
  344. *
  345. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  346. * @condition evaluates to true. The @condition is checked each time
  347. * the waitqueue @wq_head is woken up.
  348. *
  349. * wake_up() has to be called after changing any variable that could
  350. * change the result of the wait condition.
  351. *
  352. * Returns:
  353. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  354. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  355. * or the remaining jiffies (at least 1) if the @condition evaluated
  356. * to %true before the @timeout elapsed.
  357. */
  358. #define wait_event_timeout(wq_head, condition, timeout) \
  359. ({ \
  360. long __ret = timeout; \
  361. might_sleep(); \
  362. if (!___wait_cond_timeout(condition)) \
  363. __ret = __wait_event_timeout(wq_head, condition, timeout); \
  364. __ret; \
  365. })
  366. #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
  367. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  368. TASK_INTERRUPTIBLE, 0, timeout, \
  369. __ret = freezable_schedule_timeout(__ret))
  370. /*
  371. * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  372. * increasing load and is freezable.
  373. */
  374. #define wait_event_freezable_timeout(wq_head, condition, timeout) \
  375. ({ \
  376. long __ret = timeout; \
  377. might_sleep(); \
  378. if (!___wait_cond_timeout(condition)) \
  379. __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
  380. __ret; \
  381. })
  382. #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
  383. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
  384. cmd1; schedule(); cmd2)
  385. /*
  386. * Just like wait_event_cmd(), except it sets exclusive flag
  387. */
  388. #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
  389. do { \
  390. if (condition) \
  391. break; \
  392. __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
  393. } while (0)
  394. #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
  395. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  396. cmd1; schedule(); cmd2)
  397. /**
  398. * wait_event_cmd - sleep until a condition gets true
  399. * @wq_head: the waitqueue to wait on
  400. * @condition: a C expression for the event to wait for
  401. * @cmd1: the command will be executed before sleep
  402. * @cmd2: the command will be executed after sleep
  403. *
  404. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  405. * @condition evaluates to true. The @condition is checked each time
  406. * the waitqueue @wq_head is woken up.
  407. *
  408. * wake_up() has to be called after changing any variable that could
  409. * change the result of the wait condition.
  410. */
  411. #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
  412. do { \
  413. if (condition) \
  414. break; \
  415. __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
  416. } while (0)
  417. #define __wait_event_interruptible(wq_head, condition) \
  418. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  419. schedule())
  420. /**
  421. * wait_event_interruptible - sleep until a condition gets true
  422. * @wq_head: the waitqueue to wait on
  423. * @condition: a C expression for the event to wait for
  424. *
  425. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  426. * @condition evaluates to true or a signal is received.
  427. * The @condition is checked each time the waitqueue @wq_head is woken up.
  428. *
  429. * wake_up() has to be called after changing any variable that could
  430. * change the result of the wait condition.
  431. *
  432. * The function will return -ERESTARTSYS if it was interrupted by a
  433. * signal and 0 if @condition evaluated to true.
  434. */
  435. #define wait_event_interruptible(wq_head, condition) \
  436. ({ \
  437. int __ret = 0; \
  438. might_sleep(); \
  439. if (!(condition)) \
  440. __ret = __wait_event_interruptible(wq_head, condition); \
  441. __ret; \
  442. })
  443. #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
  444. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  445. TASK_INTERRUPTIBLE, 0, timeout, \
  446. __ret = schedule_timeout(__ret))
  447. /**
  448. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  449. * @wq_head: the waitqueue to wait on
  450. * @condition: a C expression for the event to wait for
  451. * @timeout: timeout, in jiffies
  452. *
  453. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  454. * @condition evaluates to true or a signal is received.
  455. * The @condition is checked each time the waitqueue @wq_head is woken up.
  456. *
  457. * wake_up() has to be called after changing any variable that could
  458. * change the result of the wait condition.
  459. *
  460. * Returns:
  461. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  462. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  463. * the remaining jiffies (at least 1) if the @condition evaluated
  464. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  465. * interrupted by a signal.
  466. */
  467. #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
  468. ({ \
  469. long __ret = timeout; \
  470. might_sleep(); \
  471. if (!___wait_cond_timeout(condition)) \
  472. __ret = __wait_event_interruptible_timeout(wq_head, \
  473. condition, timeout); \
  474. __ret; \
  475. })
  476. #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
  477. ({ \
  478. int __ret = 0; \
  479. struct hrtimer_sleeper __t; \
  480. \
  481. hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
  482. HRTIMER_MODE_REL); \
  483. if ((timeout) != KTIME_MAX) \
  484. hrtimer_start_range_ns(&__t.timer, timeout, \
  485. current->timer_slack_ns, \
  486. HRTIMER_MODE_REL); \
  487. \
  488. __ret = ___wait_event(wq_head, condition, state, 0, 0, \
  489. if (!__t.task) { \
  490. __ret = -ETIME; \
  491. break; \
  492. } \
  493. schedule()); \
  494. \
  495. hrtimer_cancel(&__t.timer); \
  496. destroy_hrtimer_on_stack(&__t.timer); \
  497. __ret; \
  498. })
  499. /**
  500. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  501. * @wq_head: the waitqueue to wait on
  502. * @condition: a C expression for the event to wait for
  503. * @timeout: timeout, as a ktime_t
  504. *
  505. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  506. * @condition evaluates to true or a signal is received.
  507. * The @condition is checked each time the waitqueue @wq_head is woken up.
  508. *
  509. * wake_up() has to be called after changing any variable that could
  510. * change the result of the wait condition.
  511. *
  512. * The function returns 0 if @condition became true, or -ETIME if the timeout
  513. * elapsed.
  514. */
  515. #define wait_event_hrtimeout(wq_head, condition, timeout) \
  516. ({ \
  517. int __ret = 0; \
  518. might_sleep(); \
  519. if (!(condition)) \
  520. __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
  521. TASK_UNINTERRUPTIBLE); \
  522. __ret; \
  523. })
  524. /**
  525. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  526. * @wq: the waitqueue to wait on
  527. * @condition: a C expression for the event to wait for
  528. * @timeout: timeout, as a ktime_t
  529. *
  530. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  531. * @condition evaluates to true or a signal is received.
  532. * The @condition is checked each time the waitqueue @wq is woken up.
  533. *
  534. * wake_up() has to be called after changing any variable that could
  535. * change the result of the wait condition.
  536. *
  537. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  538. * interrupted by a signal, or -ETIME if the timeout elapsed.
  539. */
  540. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  541. ({ \
  542. long __ret = 0; \
  543. might_sleep(); \
  544. if (!(condition)) \
  545. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  546. TASK_INTERRUPTIBLE); \
  547. __ret; \
  548. })
  549. #define __wait_event_interruptible_exclusive(wq, condition) \
  550. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  551. schedule())
  552. #define wait_event_interruptible_exclusive(wq, condition) \
  553. ({ \
  554. int __ret = 0; \
  555. might_sleep(); \
  556. if (!(condition)) \
  557. __ret = __wait_event_interruptible_exclusive(wq, condition); \
  558. __ret; \
  559. })
  560. #define __wait_event_killable_exclusive(wq, condition) \
  561. ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
  562. schedule())
  563. #define wait_event_killable_exclusive(wq, condition) \
  564. ({ \
  565. int __ret = 0; \
  566. might_sleep(); \
  567. if (!(condition)) \
  568. __ret = __wait_event_killable_exclusive(wq, condition); \
  569. __ret; \
  570. })
  571. #define __wait_event_freezable_exclusive(wq, condition) \
  572. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  573. freezable_schedule())
  574. #define wait_event_freezable_exclusive(wq, condition) \
  575. ({ \
  576. int __ret = 0; \
  577. might_sleep(); \
  578. if (!(condition)) \
  579. __ret = __wait_event_freezable_exclusive(wq, condition); \
  580. __ret; \
  581. })
  582. /**
  583. * wait_event_idle - wait for a condition without contributing to system load
  584. * @wq_head: the waitqueue to wait on
  585. * @condition: a C expression for the event to wait for
  586. *
  587. * The process is put to sleep (TASK_IDLE) until the
  588. * @condition evaluates to true.
  589. * The @condition is checked each time the waitqueue @wq_head is woken up.
  590. *
  591. * wake_up() has to be called after changing any variable that could
  592. * change the result of the wait condition.
  593. *
  594. */
  595. #define wait_event_idle(wq_head, condition) \
  596. do { \
  597. might_sleep(); \
  598. if (!(condition)) \
  599. ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
  600. } while (0)
  601. /**
  602. * wait_event_idle_exclusive - wait for a condition with contributing to system load
  603. * @wq_head: the waitqueue to wait on
  604. * @condition: a C expression for the event to wait for
  605. *
  606. * The process is put to sleep (TASK_IDLE) until the
  607. * @condition evaluates to true.
  608. * The @condition is checked each time the waitqueue @wq_head is woken up.
  609. *
  610. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  611. * set thus if other processes wait on the same list, when this
  612. * process is woken further processes are not considered.
  613. *
  614. * wake_up() has to be called after changing any variable that could
  615. * change the result of the wait condition.
  616. *
  617. */
  618. #define wait_event_idle_exclusive(wq_head, condition) \
  619. do { \
  620. might_sleep(); \
  621. if (!(condition)) \
  622. ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
  623. } while (0)
  624. #define __wait_event_idle_timeout(wq_head, condition, timeout) \
  625. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  626. TASK_IDLE, 0, timeout, \
  627. __ret = schedule_timeout(__ret))
  628. /**
  629. * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
  630. * @wq_head: the waitqueue to wait on
  631. * @condition: a C expression for the event to wait for
  632. * @timeout: timeout, in jiffies
  633. *
  634. * The process is put to sleep (TASK_IDLE) until the
  635. * @condition evaluates to true. The @condition is checked each time
  636. * the waitqueue @wq_head is woken up.
  637. *
  638. * wake_up() has to be called after changing any variable that could
  639. * change the result of the wait condition.
  640. *
  641. * Returns:
  642. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  643. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  644. * or the remaining jiffies (at least 1) if the @condition evaluated
  645. * to %true before the @timeout elapsed.
  646. */
  647. #define wait_event_idle_timeout(wq_head, condition, timeout) \
  648. ({ \
  649. long __ret = timeout; \
  650. might_sleep(); \
  651. if (!___wait_cond_timeout(condition)) \
  652. __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
  653. __ret; \
  654. })
  655. #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
  656. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  657. TASK_IDLE, 1, timeout, \
  658. __ret = schedule_timeout(__ret))
  659. /**
  660. * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
  661. * @wq_head: the waitqueue to wait on
  662. * @condition: a C expression for the event to wait for
  663. * @timeout: timeout, in jiffies
  664. *
  665. * The process is put to sleep (TASK_IDLE) until the
  666. * @condition evaluates to true. The @condition is checked each time
  667. * the waitqueue @wq_head is woken up.
  668. *
  669. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  670. * set thus if other processes wait on the same list, when this
  671. * process is woken further processes are not considered.
  672. *
  673. * wake_up() has to be called after changing any variable that could
  674. * change the result of the wait condition.
  675. *
  676. * Returns:
  677. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  678. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  679. * or the remaining jiffies (at least 1) if the @condition evaluated
  680. * to %true before the @timeout elapsed.
  681. */
  682. #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
  683. ({ \
  684. long __ret = timeout; \
  685. might_sleep(); \
  686. if (!___wait_cond_timeout(condition)) \
  687. __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
  688. __ret; \
  689. })
  690. extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
  691. extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
  692. #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
  693. ({ \
  694. int __ret; \
  695. DEFINE_WAIT(__wait); \
  696. if (exclusive) \
  697. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  698. do { \
  699. __ret = fn(&(wq), &__wait); \
  700. if (__ret) \
  701. break; \
  702. } while (!(condition)); \
  703. __remove_wait_queue(&(wq), &__wait); \
  704. __set_current_state(TASK_RUNNING); \
  705. __ret; \
  706. })
  707. /**
  708. * wait_event_interruptible_locked - sleep until a condition gets true
  709. * @wq: the waitqueue to wait on
  710. * @condition: a C expression for the event to wait for
  711. *
  712. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  713. * @condition evaluates to true or a signal is received.
  714. * The @condition is checked each time the waitqueue @wq is woken up.
  715. *
  716. * It must be called with wq.lock being held. This spinlock is
  717. * unlocked while sleeping but @condition testing is done while lock
  718. * is held and when this macro exits the lock is held.
  719. *
  720. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  721. * functions which must match the way they are locked/unlocked outside
  722. * of this macro.
  723. *
  724. * wake_up_locked() has to be called after changing any variable that could
  725. * change the result of the wait condition.
  726. *
  727. * The function will return -ERESTARTSYS if it was interrupted by a
  728. * signal and 0 if @condition evaluated to true.
  729. */
  730. #define wait_event_interruptible_locked(wq, condition) \
  731. ((condition) \
  732. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
  733. /**
  734. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  735. * @wq: the waitqueue to wait on
  736. * @condition: a C expression for the event to wait for
  737. *
  738. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  739. * @condition evaluates to true or a signal is received.
  740. * The @condition is checked each time the waitqueue @wq is woken up.
  741. *
  742. * It must be called with wq.lock being held. This spinlock is
  743. * unlocked while sleeping but @condition testing is done while lock
  744. * is held and when this macro exits the lock is held.
  745. *
  746. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  747. * functions which must match the way they are locked/unlocked outside
  748. * of this macro.
  749. *
  750. * wake_up_locked() has to be called after changing any variable that could
  751. * change the result of the wait condition.
  752. *
  753. * The function will return -ERESTARTSYS if it was interrupted by a
  754. * signal and 0 if @condition evaluated to true.
  755. */
  756. #define wait_event_interruptible_locked_irq(wq, condition) \
  757. ((condition) \
  758. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
  759. /**
  760. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  761. * @wq: the waitqueue to wait on
  762. * @condition: a C expression for the event to wait for
  763. *
  764. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  765. * @condition evaluates to true or a signal is received.
  766. * The @condition is checked each time the waitqueue @wq is woken up.
  767. *
  768. * It must be called with wq.lock being held. This spinlock is
  769. * unlocked while sleeping but @condition testing is done while lock
  770. * is held and when this macro exits the lock is held.
  771. *
  772. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  773. * functions which must match the way they are locked/unlocked outside
  774. * of this macro.
  775. *
  776. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  777. * set thus when other process waits process on the list if this
  778. * process is awaken further processes are not considered.
  779. *
  780. * wake_up_locked() has to be called after changing any variable that could
  781. * change the result of the wait condition.
  782. *
  783. * The function will return -ERESTARTSYS if it was interrupted by a
  784. * signal and 0 if @condition evaluated to true.
  785. */
  786. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  787. ((condition) \
  788. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
  789. /**
  790. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  791. * @wq: the waitqueue to wait on
  792. * @condition: a C expression for the event to wait for
  793. *
  794. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  795. * @condition evaluates to true or a signal is received.
  796. * The @condition is checked each time the waitqueue @wq is woken up.
  797. *
  798. * It must be called with wq.lock being held. This spinlock is
  799. * unlocked while sleeping but @condition testing is done while lock
  800. * is held and when this macro exits the lock is held.
  801. *
  802. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  803. * functions which must match the way they are locked/unlocked outside
  804. * of this macro.
  805. *
  806. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  807. * set thus when other process waits process on the list if this
  808. * process is awaken further processes are not considered.
  809. *
  810. * wake_up_locked() has to be called after changing any variable that could
  811. * change the result of the wait condition.
  812. *
  813. * The function will return -ERESTARTSYS if it was interrupted by a
  814. * signal and 0 if @condition evaluated to true.
  815. */
  816. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  817. ((condition) \
  818. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
  819. #define __wait_event_killable(wq, condition) \
  820. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  821. /**
  822. * wait_event_killable - sleep until a condition gets true
  823. * @wq_head: the waitqueue to wait on
  824. * @condition: a C expression for the event to wait for
  825. *
  826. * The process is put to sleep (TASK_KILLABLE) until the
  827. * @condition evaluates to true or a signal is received.
  828. * The @condition is checked each time the waitqueue @wq_head is woken up.
  829. *
  830. * wake_up() has to be called after changing any variable that could
  831. * change the result of the wait condition.
  832. *
  833. * The function will return -ERESTARTSYS if it was interrupted by a
  834. * signal and 0 if @condition evaluated to true.
  835. */
  836. #define wait_event_killable(wq_head, condition) \
  837. ({ \
  838. int __ret = 0; \
  839. might_sleep(); \
  840. if (!(condition)) \
  841. __ret = __wait_event_killable(wq_head, condition); \
  842. __ret; \
  843. })
  844. #define __wait_event_killable_timeout(wq_head, condition, timeout) \
  845. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  846. TASK_KILLABLE, 0, timeout, \
  847. __ret = schedule_timeout(__ret))
  848. /**
  849. * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
  850. * @wq_head: the waitqueue to wait on
  851. * @condition: a C expression for the event to wait for
  852. * @timeout: timeout, in jiffies
  853. *
  854. * The process is put to sleep (TASK_KILLABLE) until the
  855. * @condition evaluates to true or a kill signal is received.
  856. * The @condition is checked each time the waitqueue @wq_head is woken up.
  857. *
  858. * wake_up() has to be called after changing any variable that could
  859. * change the result of the wait condition.
  860. *
  861. * Returns:
  862. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  863. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  864. * the remaining jiffies (at least 1) if the @condition evaluated
  865. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  866. * interrupted by a kill signal.
  867. *
  868. * Only kill signals interrupt this process.
  869. */
  870. #define wait_event_killable_timeout(wq_head, condition, timeout) \
  871. ({ \
  872. long __ret = timeout; \
  873. might_sleep(); \
  874. if (!___wait_cond_timeout(condition)) \
  875. __ret = __wait_event_killable_timeout(wq_head, \
  876. condition, timeout); \
  877. __ret; \
  878. })
  879. #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
  880. (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  881. spin_unlock_irq(&lock); \
  882. cmd; \
  883. schedule(); \
  884. spin_lock_irq(&lock))
  885. /**
  886. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  887. * condition is checked under the lock. This
  888. * is expected to be called with the lock
  889. * taken.
  890. * @wq_head: the waitqueue to wait on
  891. * @condition: a C expression for the event to wait for
  892. * @lock: a locked spinlock_t, which will be released before cmd
  893. * and schedule() and reacquired afterwards.
  894. * @cmd: a command which is invoked outside the critical section before
  895. * sleep
  896. *
  897. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  898. * @condition evaluates to true. The @condition is checked each time
  899. * the waitqueue @wq_head is woken up.
  900. *
  901. * wake_up() has to be called after changing any variable that could
  902. * change the result of the wait condition.
  903. *
  904. * This is supposed to be called while holding the lock. The lock is
  905. * dropped before invoking the cmd and going to sleep and is reacquired
  906. * afterwards.
  907. */
  908. #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
  909. do { \
  910. if (condition) \
  911. break; \
  912. __wait_event_lock_irq(wq_head, condition, lock, cmd); \
  913. } while (0)
  914. /**
  915. * wait_event_lock_irq - sleep until a condition gets true. The
  916. * condition is checked under the lock. This
  917. * is expected to be called with the lock
  918. * taken.
  919. * @wq_head: the waitqueue to wait on
  920. * @condition: a C expression for the event to wait for
  921. * @lock: a locked spinlock_t, which will be released before schedule()
  922. * and reacquired afterwards.
  923. *
  924. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  925. * @condition evaluates to true. The @condition is checked each time
  926. * the waitqueue @wq_head is woken up.
  927. *
  928. * wake_up() has to be called after changing any variable that could
  929. * change the result of the wait condition.
  930. *
  931. * This is supposed to be called while holding the lock. The lock is
  932. * dropped before going to sleep and is reacquired afterwards.
  933. */
  934. #define wait_event_lock_irq(wq_head, condition, lock) \
  935. do { \
  936. if (condition) \
  937. break; \
  938. __wait_event_lock_irq(wq_head, condition, lock, ); \
  939. } while (0)
  940. #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
  941. ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
  942. spin_unlock_irq(&lock); \
  943. cmd; \
  944. schedule(); \
  945. spin_lock_irq(&lock))
  946. /**
  947. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  948. * The condition is checked under the lock. This is expected to
  949. * be called with the lock taken.
  950. * @wq_head: the waitqueue to wait on
  951. * @condition: a C expression for the event to wait for
  952. * @lock: a locked spinlock_t, which will be released before cmd and
  953. * schedule() and reacquired afterwards.
  954. * @cmd: a command which is invoked outside the critical section before
  955. * sleep
  956. *
  957. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  958. * @condition evaluates to true or a signal is received. The @condition is
  959. * checked each time the waitqueue @wq_head is woken up.
  960. *
  961. * wake_up() has to be called after changing any variable that could
  962. * change the result of the wait condition.
  963. *
  964. * This is supposed to be called while holding the lock. The lock is
  965. * dropped before invoking the cmd and going to sleep and is reacquired
  966. * afterwards.
  967. *
  968. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  969. * and 0 if @condition evaluated to true.
  970. */
  971. #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
  972. ({ \
  973. int __ret = 0; \
  974. if (!(condition)) \
  975. __ret = __wait_event_interruptible_lock_irq(wq_head, \
  976. condition, lock, cmd); \
  977. __ret; \
  978. })
  979. /**
  980. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  981. * The condition is checked under the lock. This is expected
  982. * to be called with the lock taken.
  983. * @wq_head: the waitqueue to wait on
  984. * @condition: a C expression for the event to wait for
  985. * @lock: a locked spinlock_t, which will be released before schedule()
  986. * and reacquired afterwards.
  987. *
  988. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  989. * @condition evaluates to true or signal is received. The @condition is
  990. * checked each time the waitqueue @wq_head is woken up.
  991. *
  992. * wake_up() has to be called after changing any variable that could
  993. * change the result of the wait condition.
  994. *
  995. * This is supposed to be called while holding the lock. The lock is
  996. * dropped before going to sleep and is reacquired afterwards.
  997. *
  998. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  999. * and 0 if @condition evaluated to true.
  1000. */
  1001. #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
  1002. ({ \
  1003. int __ret = 0; \
  1004. if (!(condition)) \
  1005. __ret = __wait_event_interruptible_lock_irq(wq_head, \
  1006. condition, lock,); \
  1007. __ret; \
  1008. })
  1009. #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
  1010. ___wait_event(wq_head, ___wait_cond_timeout(condition), \
  1011. state, 0, timeout, \
  1012. spin_unlock_irq(&lock); \
  1013. __ret = schedule_timeout(__ret); \
  1014. spin_lock_irq(&lock));
  1015. /**
  1016. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  1017. * true or a timeout elapses. The condition is checked under
  1018. * the lock. This is expected to be called with the lock taken.
  1019. * @wq_head: the waitqueue to wait on
  1020. * @condition: a C expression for the event to wait for
  1021. * @lock: a locked spinlock_t, which will be released before schedule()
  1022. * and reacquired afterwards.
  1023. * @timeout: timeout, in jiffies
  1024. *
  1025. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  1026. * @condition evaluates to true or signal is received. The @condition is
  1027. * checked each time the waitqueue @wq_head is woken up.
  1028. *
  1029. * wake_up() has to be called after changing any variable that could
  1030. * change the result of the wait condition.
  1031. *
  1032. * This is supposed to be called while holding the lock. The lock is
  1033. * dropped before going to sleep and is reacquired afterwards.
  1034. *
  1035. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  1036. * was interrupted by a signal, and the remaining jiffies otherwise
  1037. * if the condition evaluated to true before the timeout elapsed.
  1038. */
  1039. #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
  1040. timeout) \
  1041. ({ \
  1042. long __ret = timeout; \
  1043. if (!___wait_cond_timeout(condition)) \
  1044. __ret = __wait_event_lock_irq_timeout( \
  1045. wq_head, condition, lock, timeout, \
  1046. TASK_INTERRUPTIBLE); \
  1047. __ret; \
  1048. })
  1049. #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
  1050. ({ \
  1051. long __ret = timeout; \
  1052. if (!___wait_cond_timeout(condition)) \
  1053. __ret = __wait_event_lock_irq_timeout( \
  1054. wq_head, condition, lock, timeout, \
  1055. TASK_UNINTERRUPTIBLE); \
  1056. __ret; \
  1057. })
  1058. /*
  1059. * Waitqueues which are removed from the waitqueue_head at wakeup time
  1060. */
  1061. void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  1062. bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  1063. long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
  1064. void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
  1065. long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
  1066. int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  1067. int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
  1068. #define DEFINE_WAIT_FUNC(name, function) \
  1069. struct wait_queue_entry name = { \
  1070. .private = current, \
  1071. .func = function, \
  1072. .entry = LIST_HEAD_INIT((name).entry), \
  1073. }
  1074. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  1075. #define init_wait(wait) \
  1076. do { \
  1077. (wait)->private = current; \
  1078. (wait)->func = autoremove_wake_function; \
  1079. INIT_LIST_HEAD(&(wait)->entry); \
  1080. (wait)->flags = 0; \
  1081. } while (0)
  1082. bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
  1083. #endif /* _LINUX_WAIT_H */