task_work.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/spinlock.h>
  3. #include <linux/task_work.h>
  4. #include <linux/tracehook.h>
  5. static struct callback_head work_exited; /* all we need is ->next == NULL */
  6. /**
  7. * task_work_add - ask the @task to execute @work->func()
  8. * @task: the task which should run the callback
  9. * @work: the callback to run
  10. * @notify: how to notify the targeted task
  11. *
  12. * Queue @work for task_work_run() below and notify the @task if @notify
  13. * is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the
  14. * it will interrupt the targeted task and run the task_work. @TWA_RESUME
  15. * work is run only when the task exits the kernel and returns to user mode,
  16. * or before entering guest mode. Fails if the @task is exiting/exited and thus
  17. * it can't process this @work. Otherwise @work->func() will be called when the
  18. * @task goes through one of the aforementioned transitions, or exits.
  19. *
  20. * If the targeted task is exiting, then an error is returned and the work item
  21. * is not queued. It's up to the caller to arrange for an alternative mechanism
  22. * in that case.
  23. *
  24. * Note: there is no ordering guarantee on works queued here. The task_work
  25. * list is LIFO.
  26. *
  27. * RETURNS:
  28. * 0 if succeeds or -ESRCH.
  29. */
  30. int task_work_add(struct task_struct *task, struct callback_head *work,
  31. enum task_work_notify_mode notify)
  32. {
  33. struct callback_head *head;
  34. unsigned long flags;
  35. /* record the work call stack in order to print it in KASAN reports */
  36. kasan_record_aux_stack(work);
  37. do {
  38. head = READ_ONCE(task->task_works);
  39. if (unlikely(head == &work_exited))
  40. return -ESRCH;
  41. work->next = head;
  42. } while (cmpxchg(&task->task_works, head, work) != head);
  43. switch (notify) {
  44. case TWA_NONE:
  45. break;
  46. case TWA_RESUME:
  47. set_notify_resume(task);
  48. break;
  49. case TWA_SIGNAL:
  50. /*
  51. * Only grab the sighand lock if we don't already have some
  52. * task_work pending. This pairs with the smp_store_mb()
  53. * in get_signal(), see comment there.
  54. */
  55. if (!(READ_ONCE(task->jobctl) & JOBCTL_TASK_WORK) &&
  56. lock_task_sighand(task, &flags)) {
  57. task->jobctl |= JOBCTL_TASK_WORK;
  58. signal_wake_up(task, 0);
  59. unlock_task_sighand(task, &flags);
  60. }
  61. break;
  62. default:
  63. WARN_ON_ONCE(1);
  64. break;
  65. }
  66. return 0;
  67. }
  68. /**
  69. * task_work_cancel - cancel a pending work added by task_work_add()
  70. * @task: the task which should execute the work
  71. * @func: identifies the work to remove
  72. *
  73. * Find the last queued pending work with ->func == @func and remove
  74. * it from queue.
  75. *
  76. * RETURNS:
  77. * The found work or NULL if not found.
  78. */
  79. struct callback_head *
  80. task_work_cancel(struct task_struct *task, task_work_func_t func)
  81. {
  82. struct callback_head **pprev = &task->task_works;
  83. struct callback_head *work;
  84. unsigned long flags;
  85. if (likely(!task->task_works))
  86. return NULL;
  87. /*
  88. * If cmpxchg() fails we continue without updating pprev.
  89. * Either we raced with task_work_add() which added the
  90. * new entry before this work, we will find it again. Or
  91. * we raced with task_work_run(), *pprev == NULL/exited.
  92. */
  93. raw_spin_lock_irqsave(&task->pi_lock, flags);
  94. while ((work = READ_ONCE(*pprev))) {
  95. if (work->func != func)
  96. pprev = &work->next;
  97. else if (cmpxchg(pprev, work, work->next) == work)
  98. break;
  99. }
  100. raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  101. return work;
  102. }
  103. /**
  104. * task_work_run - execute the works added by task_work_add()
  105. *
  106. * Flush the pending works. Should be used by the core kernel code.
  107. * Called before the task returns to the user-mode or stops, or when
  108. * it exits. In the latter case task_work_add() can no longer add the
  109. * new work after task_work_run() returns.
  110. */
  111. void task_work_run(void)
  112. {
  113. struct task_struct *task = current;
  114. struct callback_head *work, *head, *next;
  115. for (;;) {
  116. /*
  117. * work->func() can do task_work_add(), do not set
  118. * work_exited unless the list is empty.
  119. */
  120. do {
  121. head = NULL;
  122. work = READ_ONCE(task->task_works);
  123. if (!work) {
  124. if (task->flags & PF_EXITING)
  125. head = &work_exited;
  126. else
  127. break;
  128. }
  129. } while (cmpxchg(&task->task_works, work, head) != work);
  130. if (!work)
  131. break;
  132. /*
  133. * Synchronize with task_work_cancel(). It can not remove
  134. * the first entry == work, cmpxchg(task_works) must fail.
  135. * But it can remove another entry from the ->next list.
  136. */
  137. raw_spin_lock_irq(&task->pi_lock);
  138. raw_spin_unlock_irq(&task->pi_lock);
  139. do {
  140. next = work->next;
  141. work->func(work);
  142. work = next;
  143. cond_resched();
  144. } while (work);
  145. }
  146. }