1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249 |
- /* SPDX-License-Identifier: GPL-2.0+ */
- /*
- * Task-based RCU implementations.
- *
- * Copyright (C) 2020 Paul E. McKenney
- */
- #ifdef CONFIG_TASKS_RCU_GENERIC
- ////////////////////////////////////////////////////////////////////////
- //
- // Generic data structures.
- struct rcu_tasks;
- typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
- typedef void (*pregp_func_t)(void);
- typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
- typedef void (*postscan_func_t)(struct list_head *hop);
- typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
- typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
- /**
- * Definition for a Tasks-RCU-like mechanism.
- * @cbs_head: Head of callback list.
- * @cbs_tail: Tail pointer for callback list.
- * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
- * @cbs_lock: Lock protecting callback list.
- * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
- * @gp_func: This flavor's grace-period-wait function.
- * @gp_state: Grace period's most recent state transition (debugging).
- * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
- * @init_fract: Initial backoff sleep interval.
- * @gp_jiffies: Time of last @gp_state transition.
- * @gp_start: Most recent grace-period start in jiffies.
- * @n_gps: Number of grace periods completed since boot.
- * @n_ipis: Number of IPIs sent to encourage grace periods to end.
- * @n_ipis_fails: Number of IPI-send failures.
- * @pregp_func: This flavor's pre-grace-period function (optional).
- * @pertask_func: This flavor's per-task scan function (optional).
- * @postscan_func: This flavor's post-task scan function (optional).
- * @holdout_func: This flavor's holdout-list scan function (optional).
- * @postgp_func: This flavor's post-grace-period function (optional).
- * @call_func: This flavor's call_rcu()-equivalent function.
- * @name: This flavor's textual name.
- * @kname: This flavor's kthread name.
- */
- struct rcu_tasks {
- struct rcu_head *cbs_head;
- struct rcu_head **cbs_tail;
- struct wait_queue_head cbs_wq;
- raw_spinlock_t cbs_lock;
- int gp_state;
- int gp_sleep;
- int init_fract;
- unsigned long gp_jiffies;
- unsigned long gp_start;
- unsigned long n_gps;
- unsigned long n_ipis;
- unsigned long n_ipis_fails;
- struct task_struct *kthread_ptr;
- rcu_tasks_gp_func_t gp_func;
- pregp_func_t pregp_func;
- pertask_func_t pertask_func;
- postscan_func_t postscan_func;
- holdouts_func_t holdouts_func;
- postgp_func_t postgp_func;
- call_rcu_func_t call_func;
- char *name;
- char *kname;
- };
- #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
- static struct rcu_tasks rt_name = \
- { \
- .cbs_tail = &rt_name.cbs_head, \
- .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
- .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \
- .gp_func = gp, \
- .call_func = call, \
- .name = n, \
- .kname = #rt_name, \
- }
- /* Track exiting tasks in order to allow them to be waited for. */
- DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
- /* Avoid IPIing CPUs early in the grace period. */
- #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
- static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
- module_param(rcu_task_ipi_delay, int, 0644);
- /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
- #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
- static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
- module_param(rcu_task_stall_timeout, int, 0644);
- /* RCU tasks grace-period state for debugging. */
- #define RTGS_INIT 0
- #define RTGS_WAIT_WAIT_CBS 1
- #define RTGS_WAIT_GP 2
- #define RTGS_PRE_WAIT_GP 3
- #define RTGS_SCAN_TASKLIST 4
- #define RTGS_POST_SCAN_TASKLIST 5
- #define RTGS_WAIT_SCAN_HOLDOUTS 6
- #define RTGS_SCAN_HOLDOUTS 7
- #define RTGS_POST_GP 8
- #define RTGS_WAIT_READERS 9
- #define RTGS_INVOKE_CBS 10
- #define RTGS_WAIT_CBS 11
- #ifndef CONFIG_TINY_RCU
- static const char * const rcu_tasks_gp_state_names[] = {
- "RTGS_INIT",
- "RTGS_WAIT_WAIT_CBS",
- "RTGS_WAIT_GP",
- "RTGS_PRE_WAIT_GP",
- "RTGS_SCAN_TASKLIST",
- "RTGS_POST_SCAN_TASKLIST",
- "RTGS_WAIT_SCAN_HOLDOUTS",
- "RTGS_SCAN_HOLDOUTS",
- "RTGS_POST_GP",
- "RTGS_WAIT_READERS",
- "RTGS_INVOKE_CBS",
- "RTGS_WAIT_CBS",
- };
- #endif /* #ifndef CONFIG_TINY_RCU */
- ////////////////////////////////////////////////////////////////////////
- //
- // Generic code.
- /* Record grace-period phase and time. */
- static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
- {
- rtp->gp_state = newstate;
- rtp->gp_jiffies = jiffies;
- }
- #ifndef CONFIG_TINY_RCU
- /* Return state name. */
- static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
- {
- int i = data_race(rtp->gp_state); // Let KCSAN detect update races
- int j = READ_ONCE(i); // Prevent the compiler from reading twice
- if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
- return "???";
- return rcu_tasks_gp_state_names[j];
- }
- #endif /* #ifndef CONFIG_TINY_RCU */
- // Enqueue a callback for the specified flavor of Tasks RCU.
- static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
- struct rcu_tasks *rtp)
- {
- unsigned long flags;
- bool needwake;
- rhp->next = NULL;
- rhp->func = func;
- raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
- needwake = !rtp->cbs_head;
- WRITE_ONCE(*rtp->cbs_tail, rhp);
- rtp->cbs_tail = &rhp->next;
- raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
- /* We can't create the thread unless interrupts are enabled. */
- if (needwake && READ_ONCE(rtp->kthread_ptr))
- wake_up(&rtp->cbs_wq);
- }
- // Wait for a grace period for the specified flavor of Tasks RCU.
- static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
- {
- /* Complain if the scheduler has not started. */
- RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
- "synchronize_rcu_tasks called too soon");
- /* Wait for the grace period. */
- wait_rcu_gp(rtp->call_func);
- }
- /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
- static int __noreturn rcu_tasks_kthread(void *arg)
- {
- unsigned long flags;
- struct rcu_head *list;
- struct rcu_head *next;
- struct rcu_tasks *rtp = arg;
- /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
- housekeeping_affine(current, HK_FLAG_RCU);
- WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
- /*
- * Each pass through the following loop makes one check for
- * newly arrived callbacks, and, if there are some, waits for
- * one RCU-tasks grace period and then invokes the callbacks.
- * This loop is terminated by the system going down. ;-)
- */
- for (;;) {
- set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
- /* Pick up any new callbacks. */
- raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
- smp_mb__after_spinlock(); // Order updates vs. GP.
- list = rtp->cbs_head;
- rtp->cbs_head = NULL;
- rtp->cbs_tail = &rtp->cbs_head;
- raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
- /* If there were none, wait a bit and start over. */
- if (!list) {
- wait_event_interruptible(rtp->cbs_wq,
- READ_ONCE(rtp->cbs_head));
- if (!rtp->cbs_head) {
- WARN_ON(signal_pending(current));
- set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
- schedule_timeout_idle(HZ/10);
- }
- continue;
- }
- // Wait for one grace period.
- set_tasks_gp_state(rtp, RTGS_WAIT_GP);
- rtp->gp_start = jiffies;
- rtp->gp_func(rtp);
- rtp->n_gps++;
- /* Invoke the callbacks. */
- set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
- while (list) {
- next = list->next;
- local_bh_disable();
- list->func(list);
- local_bh_enable();
- list = next;
- cond_resched();
- }
- /* Paranoid sleep to keep this from entering a tight loop */
- schedule_timeout_idle(rtp->gp_sleep);
- }
- }
- /* Spawn RCU-tasks grace-period kthread. */
- static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
- {
- struct task_struct *t;
- t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
- if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
- return;
- smp_mb(); /* Ensure others see full kthread. */
- }
- #ifndef CONFIG_TINY_RCU
- /*
- * Print any non-default Tasks RCU settings.
- */
- static void __init rcu_tasks_bootup_oddness(void)
- {
- #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
- if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
- pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
- #endif /* #ifdef CONFIG_TASKS_RCU */
- #ifdef CONFIG_TASKS_RCU
- pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
- #endif /* #ifdef CONFIG_TASKS_RCU */
- #ifdef CONFIG_TASKS_RUDE_RCU
- pr_info("\tRude variant of Tasks RCU enabled.\n");
- #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
- #ifdef CONFIG_TASKS_TRACE_RCU
- pr_info("\tTracing variant of Tasks RCU enabled.\n");
- #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
- }
- #endif /* #ifndef CONFIG_TINY_RCU */
- #ifndef CONFIG_TINY_RCU
- /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
- static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
- {
- pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
- rtp->kname,
- tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
- jiffies - data_race(rtp->gp_jiffies),
- data_race(rtp->n_gps),
- data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
- ".k"[!!data_race(rtp->kthread_ptr)],
- ".C"[!!data_race(rtp->cbs_head)],
- s);
- }
- #endif /* #ifndef CONFIG_TINY_RCU */
- static void exit_tasks_rcu_finish_trace(struct task_struct *t);
- #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
- ////////////////////////////////////////////////////////////////////////
- //
- // Shared code between task-list-scanning variants of Tasks RCU.
- /* Wait for one RCU-tasks grace period. */
- static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
- {
- struct task_struct *g, *t;
- unsigned long lastreport;
- LIST_HEAD(holdouts);
- int fract;
- set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
- rtp->pregp_func();
- /*
- * There were callbacks, so we need to wait for an RCU-tasks
- * grace period. Start off by scanning the task list for tasks
- * that are not already voluntarily blocked. Mark these tasks
- * and make a list of them in holdouts.
- */
- set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
- rcu_read_lock();
- for_each_process_thread(g, t)
- rtp->pertask_func(t, &holdouts);
- rcu_read_unlock();
- set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
- rtp->postscan_func(&holdouts);
- /*
- * Each pass through the following loop scans the list of holdout
- * tasks, removing any that are no longer holdouts. When the list
- * is empty, we are done.
- */
- lastreport = jiffies;
- // Start off with initial wait and slowly back off to 1 HZ wait.
- fract = rtp->init_fract;
- if (fract > HZ)
- fract = HZ;
- for (;;) {
- bool firstreport;
- bool needreport;
- int rtst;
- if (list_empty(&holdouts))
- break;
- /* Slowly back off waiting for holdouts */
- set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
- schedule_timeout_idle(HZ/fract);
- if (fract > 1)
- fract--;
- rtst = READ_ONCE(rcu_task_stall_timeout);
- needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
- if (needreport)
- lastreport = jiffies;
- firstreport = true;
- WARN_ON(signal_pending(current));
- set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
- rtp->holdouts_func(&holdouts, needreport, &firstreport);
- }
- set_tasks_gp_state(rtp, RTGS_POST_GP);
- rtp->postgp_func(rtp);
- }
- #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
- #ifdef CONFIG_TASKS_RCU
- ////////////////////////////////////////////////////////////////////////
- //
- // Simple variant of RCU whose quiescent states are voluntary context
- // switch, cond_resched_rcu_qs(), user-space execution, and idle.
- // As such, grace periods can take one good long time. There are no
- // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
- // because this implementation is intended to get the system into a safe
- // state for some of the manipulations involved in tracing and the like.
- // Finally, this implementation does not support high call_rcu_tasks()
- // rates from multiple CPUs. If this is required, per-CPU callback lists
- // will be needed.
- /* Pre-grace-period preparation. */
- static void rcu_tasks_pregp_step(void)
- {
- /*
- * Wait for all pre-existing t->on_rq and t->nvcsw transitions
- * to complete. Invoking synchronize_rcu() suffices because all
- * these transitions occur with interrupts disabled. Without this
- * synchronize_rcu(), a read-side critical section that started
- * before the grace period might be incorrectly seen as having
- * started after the grace period.
- *
- * This synchronize_rcu() also dispenses with the need for a
- * memory barrier on the first store to t->rcu_tasks_holdout,
- * as it forces the store to happen after the beginning of the
- * grace period.
- */
- synchronize_rcu();
- }
- /* Per-task initial processing. */
- static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
- {
- if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
- get_task_struct(t);
- t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
- WRITE_ONCE(t->rcu_tasks_holdout, true);
- list_add(&t->rcu_tasks_holdout_list, hop);
- }
- }
- /* Processing between scanning taskslist and draining the holdout list. */
- static void rcu_tasks_postscan(struct list_head *hop)
- {
- /*
- * Wait for tasks that are in the process of exiting. This
- * does only part of the job, ensuring that all tasks that were
- * previously exiting reach the point where they have disabled
- * preemption, allowing the later synchronize_rcu() to finish
- * the job.
- */
- synchronize_srcu(&tasks_rcu_exit_srcu);
- }
- /* See if tasks are still holding out, complain if so. */
- static void check_holdout_task(struct task_struct *t,
- bool needreport, bool *firstreport)
- {
- int cpu;
- if (!READ_ONCE(t->rcu_tasks_holdout) ||
- t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
- !READ_ONCE(t->on_rq) ||
- (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
- !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
- WRITE_ONCE(t->rcu_tasks_holdout, false);
- list_del_init(&t->rcu_tasks_holdout_list);
- put_task_struct(t);
- return;
- }
- rcu_request_urgent_qs_task(t);
- if (!needreport)
- return;
- if (*firstreport) {
- pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
- *firstreport = false;
- }
- cpu = task_cpu(t);
- pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
- t, ".I"[is_idle_task(t)],
- "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
- t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
- t->rcu_tasks_idle_cpu, cpu);
- sched_show_task(t);
- }
- /* Scan the holdout lists for tasks no longer holding out. */
- static void check_all_holdout_tasks(struct list_head *hop,
- bool needreport, bool *firstreport)
- {
- struct task_struct *t, *t1;
- list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
- check_holdout_task(t, needreport, firstreport);
- cond_resched();
- }
- }
- /* Finish off the Tasks-RCU grace period. */
- static void rcu_tasks_postgp(struct rcu_tasks *rtp)
- {
- /*
- * Because ->on_rq and ->nvcsw are not guaranteed to have a full
- * memory barriers prior to them in the schedule() path, memory
- * reordering on other CPUs could cause their RCU-tasks read-side
- * critical sections to extend past the end of the grace period.
- * However, because these ->nvcsw updates are carried out with
- * interrupts disabled, we can use synchronize_rcu() to force the
- * needed ordering on all such CPUs.
- *
- * This synchronize_rcu() also confines all ->rcu_tasks_holdout
- * accesses to be within the grace period, avoiding the need for
- * memory barriers for ->rcu_tasks_holdout accesses.
- *
- * In addition, this synchronize_rcu() waits for exiting tasks
- * to complete their final preempt_disable() region of execution,
- * cleaning up after the synchronize_srcu() above.
- */
- synchronize_rcu();
- }
- void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
- DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
- /**
- * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
- * @rhp: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_tasks() assumes
- * that the read-side critical sections end at a voluntary context
- * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
- * or transition to usermode execution. As such, there are no read-side
- * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
- * this primitive is intended to determine that all tasks have passed
- * through a safe state, not so much for data-strcuture synchronization.
- *
- * See the description of call_rcu() for more detailed information on
- * memory ordering guarantees.
- */
- void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
- {
- call_rcu_tasks_generic(rhp, func, &rcu_tasks);
- }
- EXPORT_SYMBOL_GPL(call_rcu_tasks);
- /**
- * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
- *
- * Control will return to the caller some time after a full rcu-tasks
- * grace period has elapsed, in other words after all currently
- * executing rcu-tasks read-side critical sections have elapsed. These
- * read-side critical sections are delimited by calls to schedule(),
- * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
- * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
- *
- * This is a very specialized primitive, intended only for a few uses in
- * tracing and other situations requiring manipulation of function
- * preambles and profiling hooks. The synchronize_rcu_tasks() function
- * is not (yet) intended for heavy use from multiple CPUs.
- *
- * See the description of synchronize_rcu() for more detailed information
- * on memory ordering guarantees.
- */
- void synchronize_rcu_tasks(void)
- {
- synchronize_rcu_tasks_generic(&rcu_tasks);
- }
- EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
- /**
- * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
- *
- * Although the current implementation is guaranteed to wait, it is not
- * obligated to, for example, if there are no pending callbacks.
- */
- void rcu_barrier_tasks(void)
- {
- /* There is only one callback queue, so this is easy. ;-) */
- synchronize_rcu_tasks();
- }
- EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
- static int __init rcu_spawn_tasks_kthread(void)
- {
- rcu_tasks.gp_sleep = HZ / 10;
- rcu_tasks.init_fract = 10;
- rcu_tasks.pregp_func = rcu_tasks_pregp_step;
- rcu_tasks.pertask_func = rcu_tasks_pertask;
- rcu_tasks.postscan_func = rcu_tasks_postscan;
- rcu_tasks.holdouts_func = check_all_holdout_tasks;
- rcu_tasks.postgp_func = rcu_tasks_postgp;
- rcu_spawn_tasks_kthread_generic(&rcu_tasks);
- return 0;
- }
- #ifndef CONFIG_TINY_RCU
- static void show_rcu_tasks_classic_gp_kthread(void)
- {
- show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
- }
- #endif /* #ifndef CONFIG_TINY_RCU */
- /* Do the srcu_read_lock() for the above synchronize_srcu(). */
- void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
- {
- preempt_disable();
- current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
- preempt_enable();
- }
- /* Do the srcu_read_unlock() for the above synchronize_srcu(). */
- void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
- {
- struct task_struct *t = current;
- preempt_disable();
- __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
- preempt_enable();
- exit_tasks_rcu_finish_trace(t);
- }
- #else /* #ifdef CONFIG_TASKS_RCU */
- static inline void show_rcu_tasks_classic_gp_kthread(void) { }
- void exit_tasks_rcu_start(void) { }
- void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
- #endif /* #else #ifdef CONFIG_TASKS_RCU */
- #ifdef CONFIG_TASKS_RUDE_RCU
- ////////////////////////////////////////////////////////////////////////
- //
- // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
- // passing an empty function to schedule_on_each_cpu(). This approach
- // provides an asynchronous call_rcu_tasks_rude() API and batching
- // of concurrent calls to the synchronous synchronize_rcu_rude() API.
- // This sends IPIs far and wide and induces otherwise unnecessary context
- // switches on all online CPUs, whether idle or not.
- // Empty function to allow workqueues to force a context switch.
- static void rcu_tasks_be_rude(struct work_struct *work)
- {
- }
- // Wait for one rude RCU-tasks grace period.
- static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
- {
- rtp->n_ipis += cpumask_weight(cpu_online_mask);
- schedule_on_each_cpu(rcu_tasks_be_rude);
- }
- void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
- DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
- "RCU Tasks Rude");
- /**
- * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
- * @rhp: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_tasks_rude()
- * assumes that the read-side critical sections end at context switch,
- * cond_resched_rcu_qs(), or transition to usermode execution. As such,
- * there are no read-side primitives analogous to rcu_read_lock() and
- * rcu_read_unlock() because this primitive is intended to determine
- * that all tasks have passed through a safe state, not so much for
- * data-strcuture synchronization.
- *
- * See the description of call_rcu() for more detailed information on
- * memory ordering guarantees.
- */
- void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
- {
- call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
- }
- EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
- /**
- * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
- *
- * Control will return to the caller some time after a rude rcu-tasks
- * grace period has elapsed, in other words after all currently
- * executing rcu-tasks read-side critical sections have elapsed. These
- * read-side critical sections are delimited by calls to schedule(),
- * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
- * anyway) cond_resched().
- *
- * This is a very specialized primitive, intended only for a few uses in
- * tracing and other situations requiring manipulation of function preambles
- * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
- * (yet) intended for heavy use from multiple CPUs.
- *
- * See the description of synchronize_rcu() for more detailed information
- * on memory ordering guarantees.
- */
- void synchronize_rcu_tasks_rude(void)
- {
- synchronize_rcu_tasks_generic(&rcu_tasks_rude);
- }
- EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
- /**
- * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
- *
- * Although the current implementation is guaranteed to wait, it is not
- * obligated to, for example, if there are no pending callbacks.
- */
- void rcu_barrier_tasks_rude(void)
- {
- /* There is only one callback queue, so this is easy. ;-) */
- synchronize_rcu_tasks_rude();
- }
- EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
- static int __init rcu_spawn_tasks_rude_kthread(void)
- {
- rcu_tasks_rude.gp_sleep = HZ / 10;
- rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
- return 0;
- }
- #ifndef CONFIG_TINY_RCU
- static void show_rcu_tasks_rude_gp_kthread(void)
- {
- show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
- }
- #endif /* #ifndef CONFIG_TINY_RCU */
- #else /* #ifdef CONFIG_TASKS_RUDE_RCU */
- static void show_rcu_tasks_rude_gp_kthread(void) {}
- #endif /* #else #ifdef CONFIG_TASKS_RUDE_RCU */
- ////////////////////////////////////////////////////////////////////////
- //
- // Tracing variant of Tasks RCU. This variant is designed to be used
- // to protect tracing hooks, including those of BPF. This variant
- // therefore:
- //
- // 1. Has explicit read-side markers to allow finite grace periods
- // in the face of in-kernel loops for PREEMPT=n builds.
- //
- // 2. Protects code in the idle loop, exception entry/exit, and
- // CPU-hotplug code paths, similar to the capabilities of SRCU.
- //
- // 3. Avoids expensive read-side instruction, having overhead similar
- // to that of Preemptible RCU.
- //
- // There are of course downsides. The grace-period code can send IPIs to
- // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
- // It is necessary to scan the full tasklist, much as for Tasks RCU. There
- // is a single callback queue guarded by a single lock, again, much as for
- // Tasks RCU. If needed, these downsides can be at least partially remedied.
- //
- // Perhaps most important, this variant of RCU does not affect the vanilla
- // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
- // readers can operate from idle, offline, and exception entry/exit in no
- // way allows rcu_preempt and rcu_sched readers to also do so.
- // The lockdep state must be outside of #ifdef to be useful.
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- static struct lock_class_key rcu_lock_trace_key;
- struct lockdep_map rcu_trace_lock_map =
- STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
- EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
- #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
- #ifdef CONFIG_TASKS_TRACE_RCU
- static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
- static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
- // Record outstanding IPIs to each CPU. No point in sending two...
- static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
- // The number of detections of task quiescent state relying on
- // heavyweight readers executing explicit memory barriers.
- static unsigned long n_heavy_reader_attempts;
- static unsigned long n_heavy_reader_updates;
- static unsigned long n_heavy_reader_ofl_updates;
- void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
- DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
- "RCU Tasks Trace");
- /*
- * This irq_work handler allows rcu_read_unlock_trace() to be invoked
- * while the scheduler locks are held.
- */
- static void rcu_read_unlock_iw(struct irq_work *iwp)
- {
- wake_up(&trc_wait);
- }
- static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
- /* If we are the last reader, wake up the grace-period kthread. */
- void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
- {
- int nq = t->trc_reader_special.b.need_qs;
- if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
- t->trc_reader_special.b.need_mb)
- smp_mb(); // Pairs with update-side barriers.
- // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
- if (nq)
- WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
- WRITE_ONCE(t->trc_reader_nesting, nesting);
- if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
- irq_work_queue(&rcu_tasks_trace_iw);
- }
- EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
- /* Add a task to the holdout list, if it is not already on the list. */
- static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
- {
- if (list_empty(&t->trc_holdout_list)) {
- get_task_struct(t);
- list_add(&t->trc_holdout_list, bhp);
- }
- }
- /* Remove a task from the holdout list, if it is in fact present. */
- static void trc_del_holdout(struct task_struct *t)
- {
- if (!list_empty(&t->trc_holdout_list)) {
- list_del_init(&t->trc_holdout_list);
- put_task_struct(t);
- }
- }
- /* IPI handler to check task state. */
- static void trc_read_check_handler(void *t_in)
- {
- struct task_struct *t = current;
- struct task_struct *texp = t_in;
- // If the task is no longer running on this CPU, leave.
- if (unlikely(texp != t)) {
- if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
- wake_up(&trc_wait);
- goto reset_ipi; // Already on holdout list, so will check later.
- }
- // If the task is not in a read-side critical section, and
- // if this is the last reader, awaken the grace-period kthread.
- if (likely(!t->trc_reader_nesting)) {
- if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
- wake_up(&trc_wait);
- // Mark as checked after decrement to avoid false
- // positives on the above WARN_ON_ONCE().
- WRITE_ONCE(t->trc_reader_checked, true);
- goto reset_ipi;
- }
- // If we are racing with an rcu_read_unlock_trace(), try again later.
- if (unlikely(t->trc_reader_nesting < 0)) {
- if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
- wake_up(&trc_wait);
- goto reset_ipi;
- }
- WRITE_ONCE(t->trc_reader_checked, true);
- // Get here if the task is in a read-side critical section. Set
- // its state so that it will awaken the grace-period kthread upon
- // exit from that critical section.
- WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
- WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
- reset_ipi:
- // Allow future IPIs to be sent on CPU and for task.
- // Also order this IPI handler against any later manipulations of
- // the intended task.
- smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
- smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
- }
- /* Callback function for scheduler to check locked-down task. */
- static bool trc_inspect_reader(struct task_struct *t, void *arg)
- {
- int cpu = task_cpu(t);
- bool in_qs = false;
- bool ofl = cpu_is_offline(cpu);
- if (task_curr(t)) {
- WARN_ON_ONCE(ofl && !is_idle_task(t));
- // If no chance of heavyweight readers, do it the hard way.
- if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
- return false;
- // If heavyweight readers are enabled on the remote task,
- // we can inspect its state despite its currently running.
- // However, we cannot safely change its state.
- n_heavy_reader_attempts++;
- if (!ofl && // Check for "running" idle tasks on offline CPUs.
- !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
- return false; // No quiescent state, do it the hard way.
- n_heavy_reader_updates++;
- if (ofl)
- n_heavy_reader_ofl_updates++;
- in_qs = true;
- } else {
- in_qs = likely(!t->trc_reader_nesting);
- }
- // Mark as checked so that the grace-period kthread will
- // remove it from the holdout list.
- t->trc_reader_checked = true;
- if (in_qs)
- return true; // Already in quiescent state, done!!!
- // The task is in a read-side critical section, so set up its
- // state so that it will awaken the grace-period kthread upon exit
- // from that critical section.
- atomic_inc(&trc_n_readers_need_end); // One more to wait on.
- WARN_ON_ONCE(t->trc_reader_special.b.need_qs);
- WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
- return true;
- }
- /* Attempt to extract the state for the specified task. */
- static void trc_wait_for_one_reader(struct task_struct *t,
- struct list_head *bhp)
- {
- int cpu;
- // If a previous IPI is still in flight, let it complete.
- if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
- return;
- // The current task had better be in a quiescent state.
- if (t == current) {
- t->trc_reader_checked = true;
- WARN_ON_ONCE(t->trc_reader_nesting);
- return;
- }
- // Attempt to nail down the task for inspection.
- get_task_struct(t);
- if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
- put_task_struct(t);
- return;
- }
- put_task_struct(t);
- // If currently running, send an IPI, either way, add to list.
- trc_add_holdout(t, bhp);
- if (task_curr(t) &&
- time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
- // The task is currently running, so try IPIing it.
- cpu = task_cpu(t);
- // If there is already an IPI outstanding, let it happen.
- if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
- return;
- atomic_inc(&trc_n_readers_need_end);
- per_cpu(trc_ipi_to_cpu, cpu) = true;
- t->trc_ipi_to_cpu = cpu;
- rcu_tasks_trace.n_ipis++;
- if (smp_call_function_single(cpu,
- trc_read_check_handler, t, 0)) {
- // Just in case there is some other reason for
- // failure than the target CPU being offline.
- rcu_tasks_trace.n_ipis_fails++;
- per_cpu(trc_ipi_to_cpu, cpu) = false;
- t->trc_ipi_to_cpu = cpu;
- if (atomic_dec_and_test(&trc_n_readers_need_end)) {
- WARN_ON_ONCE(1);
- wake_up(&trc_wait);
- }
- }
- }
- }
- /* Initialize for a new RCU-tasks-trace grace period. */
- static void rcu_tasks_trace_pregp_step(void)
- {
- int cpu;
- // Allow for fast-acting IPIs.
- atomic_set(&trc_n_readers_need_end, 1);
- // There shouldn't be any old IPIs, but...
- for_each_possible_cpu(cpu)
- WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
- // Disable CPU hotplug across the tasklist scan.
- // This also waits for all readers in CPU-hotplug code paths.
- cpus_read_lock();
- }
- /* Do first-round processing for the specified task. */
- static void rcu_tasks_trace_pertask(struct task_struct *t,
- struct list_head *hop)
- {
- // During early boot when there is only the one boot CPU, there
- // is no idle task for the other CPUs. Just return.
- if (unlikely(t == NULL))
- return;
- WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
- WRITE_ONCE(t->trc_reader_checked, false);
- t->trc_ipi_to_cpu = -1;
- trc_wait_for_one_reader(t, hop);
- }
- /*
- * Do intermediate processing between task and holdout scans and
- * pick up the idle tasks.
- */
- static void rcu_tasks_trace_postscan(struct list_head *hop)
- {
- int cpu;
- for_each_possible_cpu(cpu)
- rcu_tasks_trace_pertask(idle_task(cpu), hop);
- // Re-enable CPU hotplug now that the tasklist scan has completed.
- cpus_read_unlock();
- // Wait for late-stage exiting tasks to finish exiting.
- // These might have passed the call to exit_tasks_rcu_finish().
- synchronize_rcu();
- // Any tasks that exit after this point will set ->trc_reader_checked.
- }
- /* Show the state of a task stalling the current RCU tasks trace GP. */
- static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
- {
- int cpu;
- if (*firstreport) {
- pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
- *firstreport = false;
- }
- // FIXME: This should attempt to use try_invoke_on_nonrunning_task().
- cpu = task_cpu(t);
- pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
- t->pid,
- ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
- ".i"[is_idle_task(t)],
- ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
- t->trc_reader_nesting,
- " N"[!!t->trc_reader_special.b.need_qs],
- cpu);
- sched_show_task(t);
- }
- /* List stalled IPIs for RCU tasks trace. */
- static void show_stalled_ipi_trace(void)
- {
- int cpu;
- for_each_possible_cpu(cpu)
- if (per_cpu(trc_ipi_to_cpu, cpu))
- pr_alert("\tIPI outstanding to CPU %d\n", cpu);
- }
- /* Do one scan of the holdout list. */
- static void check_all_holdout_tasks_trace(struct list_head *hop,
- bool needreport, bool *firstreport)
- {
- struct task_struct *g, *t;
- // Disable CPU hotplug across the holdout list scan.
- cpus_read_lock();
- list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
- // If safe and needed, try to check the current task.
- if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
- !READ_ONCE(t->trc_reader_checked))
- trc_wait_for_one_reader(t, hop);
- // If check succeeded, remove this task from the list.
- if (READ_ONCE(t->trc_reader_checked))
- trc_del_holdout(t);
- else if (needreport)
- show_stalled_task_trace(t, firstreport);
- }
- // Re-enable CPU hotplug now that the holdout list scan has completed.
- cpus_read_unlock();
- if (needreport) {
- if (firstreport)
- pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
- show_stalled_ipi_trace();
- }
- }
- /* Wait for grace period to complete and provide ordering. */
- static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
- {
- bool firstreport;
- struct task_struct *g, *t;
- LIST_HEAD(holdouts);
- long ret;
- // Remove the safety count.
- smp_mb__before_atomic(); // Order vs. earlier atomics
- atomic_dec(&trc_n_readers_need_end);
- smp_mb__after_atomic(); // Order vs. later atomics
- // Wait for readers.
- set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
- for (;;) {
- ret = wait_event_idle_exclusive_timeout(
- trc_wait,
- atomic_read(&trc_n_readers_need_end) == 0,
- READ_ONCE(rcu_task_stall_timeout));
- if (ret)
- break; // Count reached zero.
- // Stall warning time, so make a list of the offenders.
- rcu_read_lock();
- for_each_process_thread(g, t)
- if (READ_ONCE(t->trc_reader_special.b.need_qs))
- trc_add_holdout(t, &holdouts);
- rcu_read_unlock();
- firstreport = true;
- list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
- if (READ_ONCE(t->trc_reader_special.b.need_qs))
- show_stalled_task_trace(t, &firstreport);
- trc_del_holdout(t); // Release task_struct reference.
- }
- if (firstreport)
- pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
- show_stalled_ipi_trace();
- pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
- }
- smp_mb(); // Caller's code must be ordered after wakeup.
- // Pairs with pretty much every ordering primitive.
- }
- /* Report any needed quiescent state for this exiting task. */
- static void exit_tasks_rcu_finish_trace(struct task_struct *t)
- {
- WRITE_ONCE(t->trc_reader_checked, true);
- WARN_ON_ONCE(t->trc_reader_nesting);
- WRITE_ONCE(t->trc_reader_nesting, 0);
- if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
- rcu_read_unlock_trace_special(t, 0);
- }
- /**
- * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
- * @rhp: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_tasks_trace()
- * assumes that the read-side critical sections end at context switch,
- * cond_resched_rcu_qs(), or transition to usermode execution. As such,
- * there are no read-side primitives analogous to rcu_read_lock() and
- * rcu_read_unlock() because this primitive is intended to determine
- * that all tasks have passed through a safe state, not so much for
- * data-strcuture synchronization.
- *
- * See the description of call_rcu() for more detailed information on
- * memory ordering guarantees.
- */
- void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
- {
- call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
- }
- EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
- /**
- * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
- *
- * Control will return to the caller some time after a trace rcu-tasks
- * grace period has elapsed, in other words after all currently executing
- * rcu-tasks read-side critical sections have elapsed. These read-side
- * critical sections are delimited by calls to rcu_read_lock_trace()
- * and rcu_read_unlock_trace().
- *
- * This is a very specialized primitive, intended only for a few uses in
- * tracing and other situations requiring manipulation of function preambles
- * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
- * (yet) intended for heavy use from multiple CPUs.
- *
- * See the description of synchronize_rcu() for more detailed information
- * on memory ordering guarantees.
- */
- void synchronize_rcu_tasks_trace(void)
- {
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
- synchronize_rcu_tasks_generic(&rcu_tasks_trace);
- }
- EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
- /**
- * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
- *
- * Although the current implementation is guaranteed to wait, it is not
- * obligated to, for example, if there are no pending callbacks.
- */
- void rcu_barrier_tasks_trace(void)
- {
- /* There is only one callback queue, so this is easy. ;-) */
- synchronize_rcu_tasks_trace();
- }
- EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
- static int __init rcu_spawn_tasks_trace_kthread(void)
- {
- if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
- rcu_tasks_trace.gp_sleep = HZ / 10;
- rcu_tasks_trace.init_fract = 10;
- } else {
- rcu_tasks_trace.gp_sleep = HZ / 200;
- if (rcu_tasks_trace.gp_sleep <= 0)
- rcu_tasks_trace.gp_sleep = 1;
- rcu_tasks_trace.init_fract = HZ / 5;
- if (rcu_tasks_trace.init_fract <= 0)
- rcu_tasks_trace.init_fract = 1;
- }
- rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
- rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
- rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
- rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
- rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
- rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
- return 0;
- }
- #ifndef CONFIG_TINY_RCU
- static void show_rcu_tasks_trace_gp_kthread(void)
- {
- char buf[64];
- sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
- data_race(n_heavy_reader_ofl_updates),
- data_race(n_heavy_reader_updates),
- data_race(n_heavy_reader_attempts));
- show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
- }
- #endif /* #ifndef CONFIG_TINY_RCU */
- #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
- static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
- static inline void show_rcu_tasks_trace_gp_kthread(void) {}
- #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
- #ifndef CONFIG_TINY_RCU
- void show_rcu_tasks_gp_kthreads(void)
- {
- show_rcu_tasks_classic_gp_kthread();
- show_rcu_tasks_rude_gp_kthread();
- show_rcu_tasks_trace_gp_kthread();
- }
- #endif /* #ifndef CONFIG_TINY_RCU */
- void __init rcu_init_tasks_generic(void)
- {
- #ifdef CONFIG_TASKS_RCU
- rcu_spawn_tasks_kthread();
- #endif
- #ifdef CONFIG_TASKS_RUDE_RCU
- rcu_spawn_tasks_rude_kthread();
- #endif
- #ifdef CONFIG_TASKS_TRACE_RCU
- rcu_spawn_tasks_trace_kthread();
- #endif
- }
- #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
- static inline void rcu_tasks_bootup_oddness(void) {}
- void show_rcu_tasks_gp_kthreads(void) {}
- #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
|