tasks.h 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. /*
  3. * Task-based RCU implementations.
  4. *
  5. * Copyright (C) 2020 Paul E. McKenney
  6. */
  7. #ifdef CONFIG_TASKS_RCU_GENERIC
  8. ////////////////////////////////////////////////////////////////////////
  9. //
  10. // Generic data structures.
  11. struct rcu_tasks;
  12. typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
  13. typedef void (*pregp_func_t)(void);
  14. typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
  15. typedef void (*postscan_func_t)(struct list_head *hop);
  16. typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
  17. typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
  18. /**
  19. * Definition for a Tasks-RCU-like mechanism.
  20. * @cbs_head: Head of callback list.
  21. * @cbs_tail: Tail pointer for callback list.
  22. * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
  23. * @cbs_lock: Lock protecting callback list.
  24. * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
  25. * @gp_func: This flavor's grace-period-wait function.
  26. * @gp_state: Grace period's most recent state transition (debugging).
  27. * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
  28. * @init_fract: Initial backoff sleep interval.
  29. * @gp_jiffies: Time of last @gp_state transition.
  30. * @gp_start: Most recent grace-period start in jiffies.
  31. * @n_gps: Number of grace periods completed since boot.
  32. * @n_ipis: Number of IPIs sent to encourage grace periods to end.
  33. * @n_ipis_fails: Number of IPI-send failures.
  34. * @pregp_func: This flavor's pre-grace-period function (optional).
  35. * @pertask_func: This flavor's per-task scan function (optional).
  36. * @postscan_func: This flavor's post-task scan function (optional).
  37. * @holdout_func: This flavor's holdout-list scan function (optional).
  38. * @postgp_func: This flavor's post-grace-period function (optional).
  39. * @call_func: This flavor's call_rcu()-equivalent function.
  40. * @name: This flavor's textual name.
  41. * @kname: This flavor's kthread name.
  42. */
  43. struct rcu_tasks {
  44. struct rcu_head *cbs_head;
  45. struct rcu_head **cbs_tail;
  46. struct wait_queue_head cbs_wq;
  47. raw_spinlock_t cbs_lock;
  48. int gp_state;
  49. int gp_sleep;
  50. int init_fract;
  51. unsigned long gp_jiffies;
  52. unsigned long gp_start;
  53. unsigned long n_gps;
  54. unsigned long n_ipis;
  55. unsigned long n_ipis_fails;
  56. struct task_struct *kthread_ptr;
  57. rcu_tasks_gp_func_t gp_func;
  58. pregp_func_t pregp_func;
  59. pertask_func_t pertask_func;
  60. postscan_func_t postscan_func;
  61. holdouts_func_t holdouts_func;
  62. postgp_func_t postgp_func;
  63. call_rcu_func_t call_func;
  64. char *name;
  65. char *kname;
  66. };
  67. #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
  68. static struct rcu_tasks rt_name = \
  69. { \
  70. .cbs_tail = &rt_name.cbs_head, \
  71. .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
  72. .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \
  73. .gp_func = gp, \
  74. .call_func = call, \
  75. .name = n, \
  76. .kname = #rt_name, \
  77. }
  78. /* Track exiting tasks in order to allow them to be waited for. */
  79. DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
  80. /* Avoid IPIing CPUs early in the grace period. */
  81. #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
  82. static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
  83. module_param(rcu_task_ipi_delay, int, 0644);
  84. /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
  85. #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
  86. static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
  87. module_param(rcu_task_stall_timeout, int, 0644);
  88. /* RCU tasks grace-period state for debugging. */
  89. #define RTGS_INIT 0
  90. #define RTGS_WAIT_WAIT_CBS 1
  91. #define RTGS_WAIT_GP 2
  92. #define RTGS_PRE_WAIT_GP 3
  93. #define RTGS_SCAN_TASKLIST 4
  94. #define RTGS_POST_SCAN_TASKLIST 5
  95. #define RTGS_WAIT_SCAN_HOLDOUTS 6
  96. #define RTGS_SCAN_HOLDOUTS 7
  97. #define RTGS_POST_GP 8
  98. #define RTGS_WAIT_READERS 9
  99. #define RTGS_INVOKE_CBS 10
  100. #define RTGS_WAIT_CBS 11
  101. #ifndef CONFIG_TINY_RCU
  102. static const char * const rcu_tasks_gp_state_names[] = {
  103. "RTGS_INIT",
  104. "RTGS_WAIT_WAIT_CBS",
  105. "RTGS_WAIT_GP",
  106. "RTGS_PRE_WAIT_GP",
  107. "RTGS_SCAN_TASKLIST",
  108. "RTGS_POST_SCAN_TASKLIST",
  109. "RTGS_WAIT_SCAN_HOLDOUTS",
  110. "RTGS_SCAN_HOLDOUTS",
  111. "RTGS_POST_GP",
  112. "RTGS_WAIT_READERS",
  113. "RTGS_INVOKE_CBS",
  114. "RTGS_WAIT_CBS",
  115. };
  116. #endif /* #ifndef CONFIG_TINY_RCU */
  117. ////////////////////////////////////////////////////////////////////////
  118. //
  119. // Generic code.
  120. /* Record grace-period phase and time. */
  121. static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
  122. {
  123. rtp->gp_state = newstate;
  124. rtp->gp_jiffies = jiffies;
  125. }
  126. #ifndef CONFIG_TINY_RCU
  127. /* Return state name. */
  128. static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
  129. {
  130. int i = data_race(rtp->gp_state); // Let KCSAN detect update races
  131. int j = READ_ONCE(i); // Prevent the compiler from reading twice
  132. if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
  133. return "???";
  134. return rcu_tasks_gp_state_names[j];
  135. }
  136. #endif /* #ifndef CONFIG_TINY_RCU */
  137. // Enqueue a callback for the specified flavor of Tasks RCU.
  138. static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
  139. struct rcu_tasks *rtp)
  140. {
  141. unsigned long flags;
  142. bool needwake;
  143. rhp->next = NULL;
  144. rhp->func = func;
  145. raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
  146. needwake = !rtp->cbs_head;
  147. WRITE_ONCE(*rtp->cbs_tail, rhp);
  148. rtp->cbs_tail = &rhp->next;
  149. raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
  150. /* We can't create the thread unless interrupts are enabled. */
  151. if (needwake && READ_ONCE(rtp->kthread_ptr))
  152. wake_up(&rtp->cbs_wq);
  153. }
  154. // Wait for a grace period for the specified flavor of Tasks RCU.
  155. static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
  156. {
  157. /* Complain if the scheduler has not started. */
  158. RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
  159. "synchronize_rcu_tasks called too soon");
  160. /* Wait for the grace period. */
  161. wait_rcu_gp(rtp->call_func);
  162. }
  163. /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
  164. static int __noreturn rcu_tasks_kthread(void *arg)
  165. {
  166. unsigned long flags;
  167. struct rcu_head *list;
  168. struct rcu_head *next;
  169. struct rcu_tasks *rtp = arg;
  170. /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
  171. housekeeping_affine(current, HK_FLAG_RCU);
  172. WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
  173. /*
  174. * Each pass through the following loop makes one check for
  175. * newly arrived callbacks, and, if there are some, waits for
  176. * one RCU-tasks grace period and then invokes the callbacks.
  177. * This loop is terminated by the system going down. ;-)
  178. */
  179. for (;;) {
  180. set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
  181. /* Pick up any new callbacks. */
  182. raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
  183. smp_mb__after_spinlock(); // Order updates vs. GP.
  184. list = rtp->cbs_head;
  185. rtp->cbs_head = NULL;
  186. rtp->cbs_tail = &rtp->cbs_head;
  187. raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
  188. /* If there were none, wait a bit and start over. */
  189. if (!list) {
  190. wait_event_interruptible(rtp->cbs_wq,
  191. READ_ONCE(rtp->cbs_head));
  192. if (!rtp->cbs_head) {
  193. WARN_ON(signal_pending(current));
  194. set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
  195. schedule_timeout_idle(HZ/10);
  196. }
  197. continue;
  198. }
  199. // Wait for one grace period.
  200. set_tasks_gp_state(rtp, RTGS_WAIT_GP);
  201. rtp->gp_start = jiffies;
  202. rtp->gp_func(rtp);
  203. rtp->n_gps++;
  204. /* Invoke the callbacks. */
  205. set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
  206. while (list) {
  207. next = list->next;
  208. local_bh_disable();
  209. list->func(list);
  210. local_bh_enable();
  211. list = next;
  212. cond_resched();
  213. }
  214. /* Paranoid sleep to keep this from entering a tight loop */
  215. schedule_timeout_idle(rtp->gp_sleep);
  216. }
  217. }
  218. /* Spawn RCU-tasks grace-period kthread. */
  219. static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
  220. {
  221. struct task_struct *t;
  222. t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
  223. if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
  224. return;
  225. smp_mb(); /* Ensure others see full kthread. */
  226. }
  227. #ifndef CONFIG_TINY_RCU
  228. /*
  229. * Print any non-default Tasks RCU settings.
  230. */
  231. static void __init rcu_tasks_bootup_oddness(void)
  232. {
  233. #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
  234. if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
  235. pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
  236. #endif /* #ifdef CONFIG_TASKS_RCU */
  237. #ifdef CONFIG_TASKS_RCU
  238. pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
  239. #endif /* #ifdef CONFIG_TASKS_RCU */
  240. #ifdef CONFIG_TASKS_RUDE_RCU
  241. pr_info("\tRude variant of Tasks RCU enabled.\n");
  242. #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
  243. #ifdef CONFIG_TASKS_TRACE_RCU
  244. pr_info("\tTracing variant of Tasks RCU enabled.\n");
  245. #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
  246. }
  247. #endif /* #ifndef CONFIG_TINY_RCU */
  248. #ifndef CONFIG_TINY_RCU
  249. /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
  250. static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
  251. {
  252. pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
  253. rtp->kname,
  254. tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
  255. jiffies - data_race(rtp->gp_jiffies),
  256. data_race(rtp->n_gps),
  257. data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
  258. ".k"[!!data_race(rtp->kthread_ptr)],
  259. ".C"[!!data_race(rtp->cbs_head)],
  260. s);
  261. }
  262. #endif /* #ifndef CONFIG_TINY_RCU */
  263. static void exit_tasks_rcu_finish_trace(struct task_struct *t);
  264. #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
  265. ////////////////////////////////////////////////////////////////////////
  266. //
  267. // Shared code between task-list-scanning variants of Tasks RCU.
  268. /* Wait for one RCU-tasks grace period. */
  269. static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
  270. {
  271. struct task_struct *g, *t;
  272. unsigned long lastreport;
  273. LIST_HEAD(holdouts);
  274. int fract;
  275. set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
  276. rtp->pregp_func();
  277. /*
  278. * There were callbacks, so we need to wait for an RCU-tasks
  279. * grace period. Start off by scanning the task list for tasks
  280. * that are not already voluntarily blocked. Mark these tasks
  281. * and make a list of them in holdouts.
  282. */
  283. set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
  284. rcu_read_lock();
  285. for_each_process_thread(g, t)
  286. rtp->pertask_func(t, &holdouts);
  287. rcu_read_unlock();
  288. set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
  289. rtp->postscan_func(&holdouts);
  290. /*
  291. * Each pass through the following loop scans the list of holdout
  292. * tasks, removing any that are no longer holdouts. When the list
  293. * is empty, we are done.
  294. */
  295. lastreport = jiffies;
  296. // Start off with initial wait and slowly back off to 1 HZ wait.
  297. fract = rtp->init_fract;
  298. if (fract > HZ)
  299. fract = HZ;
  300. for (;;) {
  301. bool firstreport;
  302. bool needreport;
  303. int rtst;
  304. if (list_empty(&holdouts))
  305. break;
  306. /* Slowly back off waiting for holdouts */
  307. set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
  308. schedule_timeout_idle(HZ/fract);
  309. if (fract > 1)
  310. fract--;
  311. rtst = READ_ONCE(rcu_task_stall_timeout);
  312. needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
  313. if (needreport)
  314. lastreport = jiffies;
  315. firstreport = true;
  316. WARN_ON(signal_pending(current));
  317. set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
  318. rtp->holdouts_func(&holdouts, needreport, &firstreport);
  319. }
  320. set_tasks_gp_state(rtp, RTGS_POST_GP);
  321. rtp->postgp_func(rtp);
  322. }
  323. #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
  324. #ifdef CONFIG_TASKS_RCU
  325. ////////////////////////////////////////////////////////////////////////
  326. //
  327. // Simple variant of RCU whose quiescent states are voluntary context
  328. // switch, cond_resched_rcu_qs(), user-space execution, and idle.
  329. // As such, grace periods can take one good long time. There are no
  330. // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
  331. // because this implementation is intended to get the system into a safe
  332. // state for some of the manipulations involved in tracing and the like.
  333. // Finally, this implementation does not support high call_rcu_tasks()
  334. // rates from multiple CPUs. If this is required, per-CPU callback lists
  335. // will be needed.
  336. /* Pre-grace-period preparation. */
  337. static void rcu_tasks_pregp_step(void)
  338. {
  339. /*
  340. * Wait for all pre-existing t->on_rq and t->nvcsw transitions
  341. * to complete. Invoking synchronize_rcu() suffices because all
  342. * these transitions occur with interrupts disabled. Without this
  343. * synchronize_rcu(), a read-side critical section that started
  344. * before the grace period might be incorrectly seen as having
  345. * started after the grace period.
  346. *
  347. * This synchronize_rcu() also dispenses with the need for a
  348. * memory barrier on the first store to t->rcu_tasks_holdout,
  349. * as it forces the store to happen after the beginning of the
  350. * grace period.
  351. */
  352. synchronize_rcu();
  353. }
  354. /* Per-task initial processing. */
  355. static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
  356. {
  357. if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
  358. get_task_struct(t);
  359. t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
  360. WRITE_ONCE(t->rcu_tasks_holdout, true);
  361. list_add(&t->rcu_tasks_holdout_list, hop);
  362. }
  363. }
  364. /* Processing between scanning taskslist and draining the holdout list. */
  365. static void rcu_tasks_postscan(struct list_head *hop)
  366. {
  367. /*
  368. * Wait for tasks that are in the process of exiting. This
  369. * does only part of the job, ensuring that all tasks that were
  370. * previously exiting reach the point where they have disabled
  371. * preemption, allowing the later synchronize_rcu() to finish
  372. * the job.
  373. */
  374. synchronize_srcu(&tasks_rcu_exit_srcu);
  375. }
  376. /* See if tasks are still holding out, complain if so. */
  377. static void check_holdout_task(struct task_struct *t,
  378. bool needreport, bool *firstreport)
  379. {
  380. int cpu;
  381. if (!READ_ONCE(t->rcu_tasks_holdout) ||
  382. t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
  383. !READ_ONCE(t->on_rq) ||
  384. (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
  385. !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
  386. WRITE_ONCE(t->rcu_tasks_holdout, false);
  387. list_del_init(&t->rcu_tasks_holdout_list);
  388. put_task_struct(t);
  389. return;
  390. }
  391. rcu_request_urgent_qs_task(t);
  392. if (!needreport)
  393. return;
  394. if (*firstreport) {
  395. pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
  396. *firstreport = false;
  397. }
  398. cpu = task_cpu(t);
  399. pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
  400. t, ".I"[is_idle_task(t)],
  401. "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
  402. t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
  403. t->rcu_tasks_idle_cpu, cpu);
  404. sched_show_task(t);
  405. }
  406. /* Scan the holdout lists for tasks no longer holding out. */
  407. static void check_all_holdout_tasks(struct list_head *hop,
  408. bool needreport, bool *firstreport)
  409. {
  410. struct task_struct *t, *t1;
  411. list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
  412. check_holdout_task(t, needreport, firstreport);
  413. cond_resched();
  414. }
  415. }
  416. /* Finish off the Tasks-RCU grace period. */
  417. static void rcu_tasks_postgp(struct rcu_tasks *rtp)
  418. {
  419. /*
  420. * Because ->on_rq and ->nvcsw are not guaranteed to have a full
  421. * memory barriers prior to them in the schedule() path, memory
  422. * reordering on other CPUs could cause their RCU-tasks read-side
  423. * critical sections to extend past the end of the grace period.
  424. * However, because these ->nvcsw updates are carried out with
  425. * interrupts disabled, we can use synchronize_rcu() to force the
  426. * needed ordering on all such CPUs.
  427. *
  428. * This synchronize_rcu() also confines all ->rcu_tasks_holdout
  429. * accesses to be within the grace period, avoiding the need for
  430. * memory barriers for ->rcu_tasks_holdout accesses.
  431. *
  432. * In addition, this synchronize_rcu() waits for exiting tasks
  433. * to complete their final preempt_disable() region of execution,
  434. * cleaning up after the synchronize_srcu() above.
  435. */
  436. synchronize_rcu();
  437. }
  438. void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
  439. DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
  440. /**
  441. * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
  442. * @rhp: structure to be used for queueing the RCU updates.
  443. * @func: actual callback function to be invoked after the grace period
  444. *
  445. * The callback function will be invoked some time after a full grace
  446. * period elapses, in other words after all currently executing RCU
  447. * read-side critical sections have completed. call_rcu_tasks() assumes
  448. * that the read-side critical sections end at a voluntary context
  449. * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
  450. * or transition to usermode execution. As such, there are no read-side
  451. * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
  452. * this primitive is intended to determine that all tasks have passed
  453. * through a safe state, not so much for data-strcuture synchronization.
  454. *
  455. * See the description of call_rcu() for more detailed information on
  456. * memory ordering guarantees.
  457. */
  458. void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
  459. {
  460. call_rcu_tasks_generic(rhp, func, &rcu_tasks);
  461. }
  462. EXPORT_SYMBOL_GPL(call_rcu_tasks);
  463. /**
  464. * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
  465. *
  466. * Control will return to the caller some time after a full rcu-tasks
  467. * grace period has elapsed, in other words after all currently
  468. * executing rcu-tasks read-side critical sections have elapsed. These
  469. * read-side critical sections are delimited by calls to schedule(),
  470. * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
  471. * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
  472. *
  473. * This is a very specialized primitive, intended only for a few uses in
  474. * tracing and other situations requiring manipulation of function
  475. * preambles and profiling hooks. The synchronize_rcu_tasks() function
  476. * is not (yet) intended for heavy use from multiple CPUs.
  477. *
  478. * See the description of synchronize_rcu() for more detailed information
  479. * on memory ordering guarantees.
  480. */
  481. void synchronize_rcu_tasks(void)
  482. {
  483. synchronize_rcu_tasks_generic(&rcu_tasks);
  484. }
  485. EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
  486. /**
  487. * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
  488. *
  489. * Although the current implementation is guaranteed to wait, it is not
  490. * obligated to, for example, if there are no pending callbacks.
  491. */
  492. void rcu_barrier_tasks(void)
  493. {
  494. /* There is only one callback queue, so this is easy. ;-) */
  495. synchronize_rcu_tasks();
  496. }
  497. EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
  498. static int __init rcu_spawn_tasks_kthread(void)
  499. {
  500. rcu_tasks.gp_sleep = HZ / 10;
  501. rcu_tasks.init_fract = 10;
  502. rcu_tasks.pregp_func = rcu_tasks_pregp_step;
  503. rcu_tasks.pertask_func = rcu_tasks_pertask;
  504. rcu_tasks.postscan_func = rcu_tasks_postscan;
  505. rcu_tasks.holdouts_func = check_all_holdout_tasks;
  506. rcu_tasks.postgp_func = rcu_tasks_postgp;
  507. rcu_spawn_tasks_kthread_generic(&rcu_tasks);
  508. return 0;
  509. }
  510. #ifndef CONFIG_TINY_RCU
  511. static void show_rcu_tasks_classic_gp_kthread(void)
  512. {
  513. show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
  514. }
  515. #endif /* #ifndef CONFIG_TINY_RCU */
  516. /* Do the srcu_read_lock() for the above synchronize_srcu(). */
  517. void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
  518. {
  519. preempt_disable();
  520. current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
  521. preempt_enable();
  522. }
  523. /* Do the srcu_read_unlock() for the above synchronize_srcu(). */
  524. void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
  525. {
  526. struct task_struct *t = current;
  527. preempt_disable();
  528. __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
  529. preempt_enable();
  530. exit_tasks_rcu_finish_trace(t);
  531. }
  532. #else /* #ifdef CONFIG_TASKS_RCU */
  533. static inline void show_rcu_tasks_classic_gp_kthread(void) { }
  534. void exit_tasks_rcu_start(void) { }
  535. void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
  536. #endif /* #else #ifdef CONFIG_TASKS_RCU */
  537. #ifdef CONFIG_TASKS_RUDE_RCU
  538. ////////////////////////////////////////////////////////////////////////
  539. //
  540. // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
  541. // passing an empty function to schedule_on_each_cpu(). This approach
  542. // provides an asynchronous call_rcu_tasks_rude() API and batching
  543. // of concurrent calls to the synchronous synchronize_rcu_rude() API.
  544. // This sends IPIs far and wide and induces otherwise unnecessary context
  545. // switches on all online CPUs, whether idle or not.
  546. // Empty function to allow workqueues to force a context switch.
  547. static void rcu_tasks_be_rude(struct work_struct *work)
  548. {
  549. }
  550. // Wait for one rude RCU-tasks grace period.
  551. static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
  552. {
  553. rtp->n_ipis += cpumask_weight(cpu_online_mask);
  554. schedule_on_each_cpu(rcu_tasks_be_rude);
  555. }
  556. void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
  557. DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
  558. "RCU Tasks Rude");
  559. /**
  560. * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
  561. * @rhp: structure to be used for queueing the RCU updates.
  562. * @func: actual callback function to be invoked after the grace period
  563. *
  564. * The callback function will be invoked some time after a full grace
  565. * period elapses, in other words after all currently executing RCU
  566. * read-side critical sections have completed. call_rcu_tasks_rude()
  567. * assumes that the read-side critical sections end at context switch,
  568. * cond_resched_rcu_qs(), or transition to usermode execution. As such,
  569. * there are no read-side primitives analogous to rcu_read_lock() and
  570. * rcu_read_unlock() because this primitive is intended to determine
  571. * that all tasks have passed through a safe state, not so much for
  572. * data-strcuture synchronization.
  573. *
  574. * See the description of call_rcu() for more detailed information on
  575. * memory ordering guarantees.
  576. */
  577. void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
  578. {
  579. call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
  580. }
  581. EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
  582. /**
  583. * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
  584. *
  585. * Control will return to the caller some time after a rude rcu-tasks
  586. * grace period has elapsed, in other words after all currently
  587. * executing rcu-tasks read-side critical sections have elapsed. These
  588. * read-side critical sections are delimited by calls to schedule(),
  589. * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
  590. * anyway) cond_resched().
  591. *
  592. * This is a very specialized primitive, intended only for a few uses in
  593. * tracing and other situations requiring manipulation of function preambles
  594. * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
  595. * (yet) intended for heavy use from multiple CPUs.
  596. *
  597. * See the description of synchronize_rcu() for more detailed information
  598. * on memory ordering guarantees.
  599. */
  600. void synchronize_rcu_tasks_rude(void)
  601. {
  602. synchronize_rcu_tasks_generic(&rcu_tasks_rude);
  603. }
  604. EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
  605. /**
  606. * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
  607. *
  608. * Although the current implementation is guaranteed to wait, it is not
  609. * obligated to, for example, if there are no pending callbacks.
  610. */
  611. void rcu_barrier_tasks_rude(void)
  612. {
  613. /* There is only one callback queue, so this is easy. ;-) */
  614. synchronize_rcu_tasks_rude();
  615. }
  616. EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
  617. static int __init rcu_spawn_tasks_rude_kthread(void)
  618. {
  619. rcu_tasks_rude.gp_sleep = HZ / 10;
  620. rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
  621. return 0;
  622. }
  623. #ifndef CONFIG_TINY_RCU
  624. static void show_rcu_tasks_rude_gp_kthread(void)
  625. {
  626. show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
  627. }
  628. #endif /* #ifndef CONFIG_TINY_RCU */
  629. #else /* #ifdef CONFIG_TASKS_RUDE_RCU */
  630. static void show_rcu_tasks_rude_gp_kthread(void) {}
  631. #endif /* #else #ifdef CONFIG_TASKS_RUDE_RCU */
  632. ////////////////////////////////////////////////////////////////////////
  633. //
  634. // Tracing variant of Tasks RCU. This variant is designed to be used
  635. // to protect tracing hooks, including those of BPF. This variant
  636. // therefore:
  637. //
  638. // 1. Has explicit read-side markers to allow finite grace periods
  639. // in the face of in-kernel loops for PREEMPT=n builds.
  640. //
  641. // 2. Protects code in the idle loop, exception entry/exit, and
  642. // CPU-hotplug code paths, similar to the capabilities of SRCU.
  643. //
  644. // 3. Avoids expensive read-side instruction, having overhead similar
  645. // to that of Preemptible RCU.
  646. //
  647. // There are of course downsides. The grace-period code can send IPIs to
  648. // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
  649. // It is necessary to scan the full tasklist, much as for Tasks RCU. There
  650. // is a single callback queue guarded by a single lock, again, much as for
  651. // Tasks RCU. If needed, these downsides can be at least partially remedied.
  652. //
  653. // Perhaps most important, this variant of RCU does not affect the vanilla
  654. // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
  655. // readers can operate from idle, offline, and exception entry/exit in no
  656. // way allows rcu_preempt and rcu_sched readers to also do so.
  657. // The lockdep state must be outside of #ifdef to be useful.
  658. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  659. static struct lock_class_key rcu_lock_trace_key;
  660. struct lockdep_map rcu_trace_lock_map =
  661. STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
  662. EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
  663. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  664. #ifdef CONFIG_TASKS_TRACE_RCU
  665. static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
  666. static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
  667. // Record outstanding IPIs to each CPU. No point in sending two...
  668. static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
  669. // The number of detections of task quiescent state relying on
  670. // heavyweight readers executing explicit memory barriers.
  671. static unsigned long n_heavy_reader_attempts;
  672. static unsigned long n_heavy_reader_updates;
  673. static unsigned long n_heavy_reader_ofl_updates;
  674. void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
  675. DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
  676. "RCU Tasks Trace");
  677. /*
  678. * This irq_work handler allows rcu_read_unlock_trace() to be invoked
  679. * while the scheduler locks are held.
  680. */
  681. static void rcu_read_unlock_iw(struct irq_work *iwp)
  682. {
  683. wake_up(&trc_wait);
  684. }
  685. static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
  686. /* If we are the last reader, wake up the grace-period kthread. */
  687. void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
  688. {
  689. int nq = t->trc_reader_special.b.need_qs;
  690. if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
  691. t->trc_reader_special.b.need_mb)
  692. smp_mb(); // Pairs with update-side barriers.
  693. // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
  694. if (nq)
  695. WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
  696. WRITE_ONCE(t->trc_reader_nesting, nesting);
  697. if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
  698. irq_work_queue(&rcu_tasks_trace_iw);
  699. }
  700. EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
  701. /* Add a task to the holdout list, if it is not already on the list. */
  702. static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
  703. {
  704. if (list_empty(&t->trc_holdout_list)) {
  705. get_task_struct(t);
  706. list_add(&t->trc_holdout_list, bhp);
  707. }
  708. }
  709. /* Remove a task from the holdout list, if it is in fact present. */
  710. static void trc_del_holdout(struct task_struct *t)
  711. {
  712. if (!list_empty(&t->trc_holdout_list)) {
  713. list_del_init(&t->trc_holdout_list);
  714. put_task_struct(t);
  715. }
  716. }
  717. /* IPI handler to check task state. */
  718. static void trc_read_check_handler(void *t_in)
  719. {
  720. struct task_struct *t = current;
  721. struct task_struct *texp = t_in;
  722. // If the task is no longer running on this CPU, leave.
  723. if (unlikely(texp != t)) {
  724. if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
  725. wake_up(&trc_wait);
  726. goto reset_ipi; // Already on holdout list, so will check later.
  727. }
  728. // If the task is not in a read-side critical section, and
  729. // if this is the last reader, awaken the grace-period kthread.
  730. if (likely(!t->trc_reader_nesting)) {
  731. if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
  732. wake_up(&trc_wait);
  733. // Mark as checked after decrement to avoid false
  734. // positives on the above WARN_ON_ONCE().
  735. WRITE_ONCE(t->trc_reader_checked, true);
  736. goto reset_ipi;
  737. }
  738. // If we are racing with an rcu_read_unlock_trace(), try again later.
  739. if (unlikely(t->trc_reader_nesting < 0)) {
  740. if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
  741. wake_up(&trc_wait);
  742. goto reset_ipi;
  743. }
  744. WRITE_ONCE(t->trc_reader_checked, true);
  745. // Get here if the task is in a read-side critical section. Set
  746. // its state so that it will awaken the grace-period kthread upon
  747. // exit from that critical section.
  748. WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
  749. WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
  750. reset_ipi:
  751. // Allow future IPIs to be sent on CPU and for task.
  752. // Also order this IPI handler against any later manipulations of
  753. // the intended task.
  754. smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
  755. smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
  756. }
  757. /* Callback function for scheduler to check locked-down task. */
  758. static bool trc_inspect_reader(struct task_struct *t, void *arg)
  759. {
  760. int cpu = task_cpu(t);
  761. bool in_qs = false;
  762. bool ofl = cpu_is_offline(cpu);
  763. if (task_curr(t)) {
  764. WARN_ON_ONCE(ofl && !is_idle_task(t));
  765. // If no chance of heavyweight readers, do it the hard way.
  766. if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
  767. return false;
  768. // If heavyweight readers are enabled on the remote task,
  769. // we can inspect its state despite its currently running.
  770. // However, we cannot safely change its state.
  771. n_heavy_reader_attempts++;
  772. if (!ofl && // Check for "running" idle tasks on offline CPUs.
  773. !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
  774. return false; // No quiescent state, do it the hard way.
  775. n_heavy_reader_updates++;
  776. if (ofl)
  777. n_heavy_reader_ofl_updates++;
  778. in_qs = true;
  779. } else {
  780. in_qs = likely(!t->trc_reader_nesting);
  781. }
  782. // Mark as checked so that the grace-period kthread will
  783. // remove it from the holdout list.
  784. t->trc_reader_checked = true;
  785. if (in_qs)
  786. return true; // Already in quiescent state, done!!!
  787. // The task is in a read-side critical section, so set up its
  788. // state so that it will awaken the grace-period kthread upon exit
  789. // from that critical section.
  790. atomic_inc(&trc_n_readers_need_end); // One more to wait on.
  791. WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
  792. WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
  793. return true;
  794. }
  795. /* Attempt to extract the state for the specified task. */
  796. static void trc_wait_for_one_reader(struct task_struct *t,
  797. struct list_head *bhp)
  798. {
  799. int cpu;
  800. // If a previous IPI is still in flight, let it complete.
  801. if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
  802. return;
  803. // The current task had better be in a quiescent state.
  804. if (t == current) {
  805. t->trc_reader_checked = true;
  806. WARN_ON_ONCE(t->trc_reader_nesting);
  807. return;
  808. }
  809. // Attempt to nail down the task for inspection.
  810. get_task_struct(t);
  811. if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
  812. put_task_struct(t);
  813. return;
  814. }
  815. put_task_struct(t);
  816. // If currently running, send an IPI, either way, add to list.
  817. trc_add_holdout(t, bhp);
  818. if (task_curr(t) &&
  819. time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
  820. // The task is currently running, so try IPIing it.
  821. cpu = task_cpu(t);
  822. // If there is already an IPI outstanding, let it happen.
  823. if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
  824. return;
  825. atomic_inc(&trc_n_readers_need_end);
  826. per_cpu(trc_ipi_to_cpu, cpu) = true;
  827. t->trc_ipi_to_cpu = cpu;
  828. rcu_tasks_trace.n_ipis++;
  829. if (smp_call_function_single(cpu,
  830. trc_read_check_handler, t, 0)) {
  831. // Just in case there is some other reason for
  832. // failure than the target CPU being offline.
  833. rcu_tasks_trace.n_ipis_fails++;
  834. per_cpu(trc_ipi_to_cpu, cpu) = false;
  835. t->trc_ipi_to_cpu = cpu;
  836. if (atomic_dec_and_test(&trc_n_readers_need_end)) {
  837. WARN_ON_ONCE(1);
  838. wake_up(&trc_wait);
  839. }
  840. }
  841. }
  842. }
  843. /* Initialize for a new RCU-tasks-trace grace period. */
  844. static void rcu_tasks_trace_pregp_step(void)
  845. {
  846. int cpu;
  847. // Allow for fast-acting IPIs.
  848. atomic_set(&trc_n_readers_need_end, 1);
  849. // There shouldn't be any old IPIs, but...
  850. for_each_possible_cpu(cpu)
  851. WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
  852. // Disable CPU hotplug across the tasklist scan.
  853. // This also waits for all readers in CPU-hotplug code paths.
  854. cpus_read_lock();
  855. }
  856. /* Do first-round processing for the specified task. */
  857. static void rcu_tasks_trace_pertask(struct task_struct *t,
  858. struct list_head *hop)
  859. {
  860. // During early boot when there is only the one boot CPU, there
  861. // is no idle task for the other CPUs. Just return.
  862. if (unlikely(t == NULL))
  863. return;
  864. WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
  865. WRITE_ONCE(t->trc_reader_checked, false);
  866. t->trc_ipi_to_cpu = -1;
  867. trc_wait_for_one_reader(t, hop);
  868. }
  869. /*
  870. * Do intermediate processing between task and holdout scans and
  871. * pick up the idle tasks.
  872. */
  873. static void rcu_tasks_trace_postscan(struct list_head *hop)
  874. {
  875. int cpu;
  876. for_each_possible_cpu(cpu)
  877. rcu_tasks_trace_pertask(idle_task(cpu), hop);
  878. // Re-enable CPU hotplug now that the tasklist scan has completed.
  879. cpus_read_unlock();
  880. // Wait for late-stage exiting tasks to finish exiting.
  881. // These might have passed the call to exit_tasks_rcu_finish().
  882. synchronize_rcu();
  883. // Any tasks that exit after this point will set ->trc_reader_checked.
  884. }
  885. /* Show the state of a task stalling the current RCU tasks trace GP. */
  886. static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
  887. {
  888. int cpu;
  889. if (*firstreport) {
  890. pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
  891. *firstreport = false;
  892. }
  893. // FIXME: This should attempt to use try_invoke_on_nonrunning_task().
  894. cpu = task_cpu(t);
  895. pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
  896. t->pid,
  897. ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
  898. ".i"[is_idle_task(t)],
  899. ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
  900. t->trc_reader_nesting,
  901. " N"[!!t->trc_reader_special.b.need_qs],
  902. cpu);
  903. sched_show_task(t);
  904. }
  905. /* List stalled IPIs for RCU tasks trace. */
  906. static void show_stalled_ipi_trace(void)
  907. {
  908. int cpu;
  909. for_each_possible_cpu(cpu)
  910. if (per_cpu(trc_ipi_to_cpu, cpu))
  911. pr_alert("\tIPI outstanding to CPU %d\n", cpu);
  912. }
  913. /* Do one scan of the holdout list. */
  914. static void check_all_holdout_tasks_trace(struct list_head *hop,
  915. bool needreport, bool *firstreport)
  916. {
  917. struct task_struct *g, *t;
  918. // Disable CPU hotplug across the holdout list scan.
  919. cpus_read_lock();
  920. list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
  921. // If safe and needed, try to check the current task.
  922. if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
  923. !READ_ONCE(t->trc_reader_checked))
  924. trc_wait_for_one_reader(t, hop);
  925. // If check succeeded, remove this task from the list.
  926. if (READ_ONCE(t->trc_reader_checked))
  927. trc_del_holdout(t);
  928. else if (needreport)
  929. show_stalled_task_trace(t, firstreport);
  930. }
  931. // Re-enable CPU hotplug now that the holdout list scan has completed.
  932. cpus_read_unlock();
  933. if (needreport) {
  934. if (firstreport)
  935. pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
  936. show_stalled_ipi_trace();
  937. }
  938. }
  939. /* Wait for grace period to complete and provide ordering. */
  940. static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
  941. {
  942. bool firstreport;
  943. struct task_struct *g, *t;
  944. LIST_HEAD(holdouts);
  945. long ret;
  946. // Remove the safety count.
  947. smp_mb__before_atomic(); // Order vs. earlier atomics
  948. atomic_dec(&trc_n_readers_need_end);
  949. smp_mb__after_atomic(); // Order vs. later atomics
  950. // Wait for readers.
  951. set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
  952. for (;;) {
  953. ret = wait_event_idle_exclusive_timeout(
  954. trc_wait,
  955. atomic_read(&trc_n_readers_need_end) == 0,
  956. READ_ONCE(rcu_task_stall_timeout));
  957. if (ret)
  958. break; // Count reached zero.
  959. // Stall warning time, so make a list of the offenders.
  960. rcu_read_lock();
  961. for_each_process_thread(g, t)
  962. if (READ_ONCE(t->trc_reader_special.b.need_qs))
  963. trc_add_holdout(t, &holdouts);
  964. rcu_read_unlock();
  965. firstreport = true;
  966. list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
  967. if (READ_ONCE(t->trc_reader_special.b.need_qs))
  968. show_stalled_task_trace(t, &firstreport);
  969. trc_del_holdout(t); // Release task_struct reference.
  970. }
  971. if (firstreport)
  972. pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
  973. show_stalled_ipi_trace();
  974. pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
  975. }
  976. smp_mb(); // Caller's code must be ordered after wakeup.
  977. // Pairs with pretty much every ordering primitive.
  978. }
  979. /* Report any needed quiescent state for this exiting task. */
  980. static void exit_tasks_rcu_finish_trace(struct task_struct *t)
  981. {
  982. WRITE_ONCE(t->trc_reader_checked, true);
  983. WARN_ON_ONCE(t->trc_reader_nesting);
  984. WRITE_ONCE(t->trc_reader_nesting, 0);
  985. if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
  986. rcu_read_unlock_trace_special(t, 0);
  987. }
  988. /**
  989. * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
  990. * @rhp: structure to be used for queueing the RCU updates.
  991. * @func: actual callback function to be invoked after the grace period
  992. *
  993. * The callback function will be invoked some time after a full grace
  994. * period elapses, in other words after all currently executing RCU
  995. * read-side critical sections have completed. call_rcu_tasks_trace()
  996. * assumes that the read-side critical sections end at context switch,
  997. * cond_resched_rcu_qs(), or transition to usermode execution. As such,
  998. * there are no read-side primitives analogous to rcu_read_lock() and
  999. * rcu_read_unlock() because this primitive is intended to determine
  1000. * that all tasks have passed through a safe state, not so much for
  1001. * data-strcuture synchronization.
  1002. *
  1003. * See the description of call_rcu() for more detailed information on
  1004. * memory ordering guarantees.
  1005. */
  1006. void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
  1007. {
  1008. call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
  1009. }
  1010. EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
  1011. /**
  1012. * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
  1013. *
  1014. * Control will return to the caller some time after a trace rcu-tasks
  1015. * grace period has elapsed, in other words after all currently executing
  1016. * rcu-tasks read-side critical sections have elapsed. These read-side
  1017. * critical sections are delimited by calls to rcu_read_lock_trace()
  1018. * and rcu_read_unlock_trace().
  1019. *
  1020. * This is a very specialized primitive, intended only for a few uses in
  1021. * tracing and other situations requiring manipulation of function preambles
  1022. * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
  1023. * (yet) intended for heavy use from multiple CPUs.
  1024. *
  1025. * See the description of synchronize_rcu() for more detailed information
  1026. * on memory ordering guarantees.
  1027. */
  1028. void synchronize_rcu_tasks_trace(void)
  1029. {
  1030. RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
  1031. synchronize_rcu_tasks_generic(&rcu_tasks_trace);
  1032. }
  1033. EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
  1034. /**
  1035. * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
  1036. *
  1037. * Although the current implementation is guaranteed to wait, it is not
  1038. * obligated to, for example, if there are no pending callbacks.
  1039. */
  1040. void rcu_barrier_tasks_trace(void)
  1041. {
  1042. /* There is only one callback queue, so this is easy. ;-) */
  1043. synchronize_rcu_tasks_trace();
  1044. }
  1045. EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
  1046. static int __init rcu_spawn_tasks_trace_kthread(void)
  1047. {
  1048. if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
  1049. rcu_tasks_trace.gp_sleep = HZ / 10;
  1050. rcu_tasks_trace.init_fract = 10;
  1051. } else {
  1052. rcu_tasks_trace.gp_sleep = HZ / 200;
  1053. if (rcu_tasks_trace.gp_sleep <= 0)
  1054. rcu_tasks_trace.gp_sleep = 1;
  1055. rcu_tasks_trace.init_fract = HZ / 5;
  1056. if (rcu_tasks_trace.init_fract <= 0)
  1057. rcu_tasks_trace.init_fract = 1;
  1058. }
  1059. rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
  1060. rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
  1061. rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
  1062. rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
  1063. rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
  1064. rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
  1065. return 0;
  1066. }
  1067. #ifndef CONFIG_TINY_RCU
  1068. static void show_rcu_tasks_trace_gp_kthread(void)
  1069. {
  1070. char buf[64];
  1071. sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
  1072. data_race(n_heavy_reader_ofl_updates),
  1073. data_race(n_heavy_reader_updates),
  1074. data_race(n_heavy_reader_attempts));
  1075. show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
  1076. }
  1077. #endif /* #ifndef CONFIG_TINY_RCU */
  1078. #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
  1079. static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
  1080. static inline void show_rcu_tasks_trace_gp_kthread(void) {}
  1081. #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
  1082. #ifndef CONFIG_TINY_RCU
  1083. void show_rcu_tasks_gp_kthreads(void)
  1084. {
  1085. show_rcu_tasks_classic_gp_kthread();
  1086. show_rcu_tasks_rude_gp_kthread();
  1087. show_rcu_tasks_trace_gp_kthread();
  1088. }
  1089. #endif /* #ifndef CONFIG_TINY_RCU */
  1090. void __init rcu_init_tasks_generic(void)
  1091. {
  1092. #ifdef CONFIG_TASKS_RCU
  1093. rcu_spawn_tasks_kthread();
  1094. #endif
  1095. #ifdef CONFIG_TASKS_RUDE_RCU
  1096. rcu_spawn_tasks_rude_kthread();
  1097. #endif
  1098. #ifdef CONFIG_TASKS_TRACE_RCU
  1099. rcu_spawn_tasks_trace_kthread();
  1100. #endif
  1101. }
  1102. #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
  1103. static inline void rcu_tasks_bootup_oddness(void) {}
  1104. void show_rcu_tasks_gp_kthreads(void) {}
  1105. #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */