rcutorture.c 84 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Read-Copy Update module-based torture test facility
  4. *
  5. * Copyright (C) IBM Corporation, 2005, 2006
  6. *
  7. * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
  8. * Josh Triplett <josh@joshtriplett.org>
  9. *
  10. * See also: Documentation/RCU/torture.rst
  11. */
  12. #define pr_fmt(fmt) fmt
  13. #include <linux/types.h>
  14. #include <linux/kernel.h>
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/kthread.h>
  18. #include <linux/err.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/smp.h>
  21. #include <linux/rcupdate_wait.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/sched/signal.h>
  24. #include <uapi/linux/sched/types.h>
  25. #include <linux/atomic.h>
  26. #include <linux/bitops.h>
  27. #include <linux/completion.h>
  28. #include <linux/moduleparam.h>
  29. #include <linux/percpu.h>
  30. #include <linux/notifier.h>
  31. #include <linux/reboot.h>
  32. #include <linux/freezer.h>
  33. #include <linux/cpu.h>
  34. #include <linux/delay.h>
  35. #include <linux/stat.h>
  36. #include <linux/srcu.h>
  37. #include <linux/slab.h>
  38. #include <linux/trace_clock.h>
  39. #include <asm/byteorder.h>
  40. #include <linux/torture.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/sched/debug.h>
  43. #include <linux/sched/sysctl.h>
  44. #include <linux/oom.h>
  45. #include <linux/tick.h>
  46. #include <linux/rcupdate_trace.h>
  47. #include "rcu.h"
  48. MODULE_LICENSE("GPL");
  49. MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
  50. /* Bits for ->extendables field, extendables param, and related definitions. */
  51. #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
  52. #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
  53. #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
  54. #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
  55. #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
  56. #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
  57. #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
  58. #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
  59. #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
  60. #define RCUTORTURE_MAX_EXTEND \
  61. (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
  62. RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
  63. #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
  64. /* Must be power of two minus one. */
  65. #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
  66. torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
  67. "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
  68. torture_param(int, fqs_duration, 0,
  69. "Duration of fqs bursts (us), 0 to disable");
  70. torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
  71. torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
  72. torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
  73. torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
  74. torture_param(int, fwd_progress_holdoff, 60,
  75. "Time between forward-progress tests (s)");
  76. torture_param(bool, fwd_progress_need_resched, 1,
  77. "Hide cond_resched() behind need_resched()");
  78. torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
  79. torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
  80. torture_param(bool, gp_normal, false,
  81. "Use normal (non-expedited) GP wait primitives");
  82. torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
  83. torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
  84. torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
  85. torture_param(int, n_barrier_cbs, 0,
  86. "# of callbacks/kthreads for barrier testing");
  87. torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
  88. torture_param(int, nreaders, -1, "Number of RCU reader threads");
  89. torture_param(int, object_debug, 0,
  90. "Enable debug-object double call_rcu() testing");
  91. torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
  92. torture_param(int, onoff_interval, 0,
  93. "Time between CPU hotplugs (jiffies), 0=disable");
  94. torture_param(int, read_exit_delay, 13,
  95. "Delay between read-then-exit episodes (s)");
  96. torture_param(int, read_exit_burst, 16,
  97. "# of read-then-exit bursts per episode, zero to disable");
  98. torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
  99. torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
  100. torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
  101. torture_param(int, stall_cpu_holdoff, 10,
  102. "Time to wait before starting stall (s).");
  103. torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
  104. torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
  105. torture_param(int, stall_gp_kthread, 0,
  106. "Grace-period kthread stall duration (s).");
  107. torture_param(int, stat_interval, 60,
  108. "Number of seconds between stats printk()s");
  109. torture_param(int, stutter, 5, "Number of seconds to run/halt test");
  110. torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
  111. torture_param(int, test_boost_duration, 4,
  112. "Duration of each boost test, seconds.");
  113. torture_param(int, test_boost_interval, 7,
  114. "Interval between boost tests, seconds.");
  115. torture_param(bool, test_no_idle_hz, true,
  116. "Test support for tickless idle CPUs");
  117. torture_param(int, verbose, 1,
  118. "Enable verbose debugging printk()s");
  119. static char *torture_type = "rcu";
  120. module_param(torture_type, charp, 0444);
  121. MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
  122. static int nrealreaders;
  123. static struct task_struct *writer_task;
  124. static struct task_struct **fakewriter_tasks;
  125. static struct task_struct **reader_tasks;
  126. static struct task_struct *stats_task;
  127. static struct task_struct *fqs_task;
  128. static struct task_struct *boost_tasks[NR_CPUS];
  129. static struct task_struct *stall_task;
  130. static struct task_struct *fwd_prog_task;
  131. static struct task_struct **barrier_cbs_tasks;
  132. static struct task_struct *barrier_task;
  133. static struct task_struct *read_exit_task;
  134. #define RCU_TORTURE_PIPE_LEN 10
  135. struct rcu_torture {
  136. struct rcu_head rtort_rcu;
  137. int rtort_pipe_count;
  138. struct list_head rtort_free;
  139. int rtort_mbtest;
  140. };
  141. static LIST_HEAD(rcu_torture_freelist);
  142. static struct rcu_torture __rcu *rcu_torture_current;
  143. static unsigned long rcu_torture_current_version;
  144. static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
  145. static DEFINE_SPINLOCK(rcu_torture_lock);
  146. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
  147. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
  148. static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
  149. static atomic_t n_rcu_torture_alloc;
  150. static atomic_t n_rcu_torture_alloc_fail;
  151. static atomic_t n_rcu_torture_free;
  152. static atomic_t n_rcu_torture_mberror;
  153. static atomic_t n_rcu_torture_error;
  154. static long n_rcu_torture_barrier_error;
  155. static long n_rcu_torture_boost_ktrerror;
  156. static long n_rcu_torture_boost_rterror;
  157. static long n_rcu_torture_boost_failure;
  158. static long n_rcu_torture_boosts;
  159. static atomic_long_t n_rcu_torture_timers;
  160. static long n_barrier_attempts;
  161. static long n_barrier_successes; /* did rcu_barrier test succeed? */
  162. static unsigned long n_read_exits;
  163. static struct list_head rcu_torture_removed;
  164. static unsigned long shutdown_jiffies;
  165. static unsigned long start_gp_seq;
  166. static int rcu_torture_writer_state;
  167. #define RTWS_FIXED_DELAY 0
  168. #define RTWS_DELAY 1
  169. #define RTWS_REPLACE 2
  170. #define RTWS_DEF_FREE 3
  171. #define RTWS_EXP_SYNC 4
  172. #define RTWS_COND_GET 5
  173. #define RTWS_COND_SYNC 6
  174. #define RTWS_SYNC 7
  175. #define RTWS_STUTTER 8
  176. #define RTWS_STOPPING 9
  177. static const char * const rcu_torture_writer_state_names[] = {
  178. "RTWS_FIXED_DELAY",
  179. "RTWS_DELAY",
  180. "RTWS_REPLACE",
  181. "RTWS_DEF_FREE",
  182. "RTWS_EXP_SYNC",
  183. "RTWS_COND_GET",
  184. "RTWS_COND_SYNC",
  185. "RTWS_SYNC",
  186. "RTWS_STUTTER",
  187. "RTWS_STOPPING",
  188. };
  189. /* Record reader segment types and duration for first failing read. */
  190. struct rt_read_seg {
  191. int rt_readstate;
  192. unsigned long rt_delay_jiffies;
  193. unsigned long rt_delay_ms;
  194. unsigned long rt_delay_us;
  195. bool rt_preempted;
  196. };
  197. static int err_segs_recorded;
  198. static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
  199. static int rt_read_nsegs;
  200. static const char *rcu_torture_writer_state_getname(void)
  201. {
  202. unsigned int i = READ_ONCE(rcu_torture_writer_state);
  203. if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
  204. return "???";
  205. return rcu_torture_writer_state_names[i];
  206. }
  207. #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
  208. #define rcu_can_boost() 1
  209. #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
  210. #define rcu_can_boost() 0
  211. #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
  212. #ifdef CONFIG_RCU_TRACE
  213. static u64 notrace rcu_trace_clock_local(void)
  214. {
  215. u64 ts = trace_clock_local();
  216. (void)do_div(ts, NSEC_PER_USEC);
  217. return ts;
  218. }
  219. #else /* #ifdef CONFIG_RCU_TRACE */
  220. static u64 notrace rcu_trace_clock_local(void)
  221. {
  222. return 0ULL;
  223. }
  224. #endif /* #else #ifdef CONFIG_RCU_TRACE */
  225. /*
  226. * Stop aggressive CPU-hog tests a bit before the end of the test in order
  227. * to avoid interfering with test shutdown.
  228. */
  229. static bool shutdown_time_arrived(void)
  230. {
  231. return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
  232. }
  233. static unsigned long boost_starttime; /* jiffies of next boost test start. */
  234. static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
  235. /* and boost task create/destroy. */
  236. static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
  237. static bool barrier_phase; /* Test phase. */
  238. static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
  239. static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
  240. static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
  241. static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
  242. /*
  243. * Allocate an element from the rcu_tortures pool.
  244. */
  245. static struct rcu_torture *
  246. rcu_torture_alloc(void)
  247. {
  248. struct list_head *p;
  249. spin_lock_bh(&rcu_torture_lock);
  250. if (list_empty(&rcu_torture_freelist)) {
  251. atomic_inc(&n_rcu_torture_alloc_fail);
  252. spin_unlock_bh(&rcu_torture_lock);
  253. return NULL;
  254. }
  255. atomic_inc(&n_rcu_torture_alloc);
  256. p = rcu_torture_freelist.next;
  257. list_del_init(p);
  258. spin_unlock_bh(&rcu_torture_lock);
  259. return container_of(p, struct rcu_torture, rtort_free);
  260. }
  261. /*
  262. * Free an element to the rcu_tortures pool.
  263. */
  264. static void
  265. rcu_torture_free(struct rcu_torture *p)
  266. {
  267. atomic_inc(&n_rcu_torture_free);
  268. spin_lock_bh(&rcu_torture_lock);
  269. list_add_tail(&p->rtort_free, &rcu_torture_freelist);
  270. spin_unlock_bh(&rcu_torture_lock);
  271. }
  272. /*
  273. * Operations vector for selecting different types of tests.
  274. */
  275. struct rcu_torture_ops {
  276. int ttype;
  277. void (*init)(void);
  278. void (*cleanup)(void);
  279. int (*readlock)(void);
  280. void (*read_delay)(struct torture_random_state *rrsp,
  281. struct rt_read_seg *rtrsp);
  282. void (*readunlock)(int idx);
  283. unsigned long (*get_gp_seq)(void);
  284. unsigned long (*gp_diff)(unsigned long new, unsigned long old);
  285. void (*deferred_free)(struct rcu_torture *p);
  286. void (*sync)(void);
  287. void (*exp_sync)(void);
  288. unsigned long (*get_state)(void);
  289. void (*cond_sync)(unsigned long oldstate);
  290. call_rcu_func_t call;
  291. void (*cb_barrier)(void);
  292. void (*fqs)(void);
  293. void (*stats)(void);
  294. int (*stall_dur)(void);
  295. int irq_capable;
  296. int can_boost;
  297. int extendables;
  298. int slow_gps;
  299. const char *name;
  300. };
  301. static struct rcu_torture_ops *cur_ops;
  302. /*
  303. * Definitions for rcu torture testing.
  304. */
  305. static int rcu_torture_read_lock(void) __acquires(RCU)
  306. {
  307. rcu_read_lock();
  308. return 0;
  309. }
  310. static void
  311. rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
  312. {
  313. unsigned long started;
  314. unsigned long completed;
  315. const unsigned long shortdelay_us = 200;
  316. unsigned long longdelay_ms = 300;
  317. unsigned long long ts;
  318. /* We want a short delay sometimes to make a reader delay the grace
  319. * period, and we want a long delay occasionally to trigger
  320. * force_quiescent_state. */
  321. if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
  322. !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
  323. started = cur_ops->get_gp_seq();
  324. ts = rcu_trace_clock_local();
  325. if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
  326. longdelay_ms = 5; /* Avoid triggering BH limits. */
  327. mdelay(longdelay_ms);
  328. rtrsp->rt_delay_ms = longdelay_ms;
  329. completed = cur_ops->get_gp_seq();
  330. do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
  331. started, completed);
  332. }
  333. if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
  334. udelay(shortdelay_us);
  335. rtrsp->rt_delay_us = shortdelay_us;
  336. }
  337. if (!preempt_count() &&
  338. !(torture_random(rrsp) % (nrealreaders * 500))) {
  339. torture_preempt_schedule(); /* QS only if preemptible. */
  340. rtrsp->rt_preempted = true;
  341. }
  342. }
  343. static void rcu_torture_read_unlock(int idx) __releases(RCU)
  344. {
  345. rcu_read_unlock();
  346. }
  347. /*
  348. * Update callback in the pipe. This should be invoked after a grace period.
  349. */
  350. static bool
  351. rcu_torture_pipe_update_one(struct rcu_torture *rp)
  352. {
  353. int i;
  354. i = READ_ONCE(rp->rtort_pipe_count);
  355. if (i > RCU_TORTURE_PIPE_LEN)
  356. i = RCU_TORTURE_PIPE_LEN;
  357. atomic_inc(&rcu_torture_wcount[i]);
  358. WRITE_ONCE(rp->rtort_pipe_count, i + 1);
  359. if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
  360. rp->rtort_mbtest = 0;
  361. return true;
  362. }
  363. return false;
  364. }
  365. /*
  366. * Update all callbacks in the pipe. Suitable for synchronous grace-period
  367. * primitives.
  368. */
  369. static void
  370. rcu_torture_pipe_update(struct rcu_torture *old_rp)
  371. {
  372. struct rcu_torture *rp;
  373. struct rcu_torture *rp1;
  374. if (old_rp)
  375. list_add(&old_rp->rtort_free, &rcu_torture_removed);
  376. list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
  377. if (rcu_torture_pipe_update_one(rp)) {
  378. list_del(&rp->rtort_free);
  379. rcu_torture_free(rp);
  380. }
  381. }
  382. }
  383. static void
  384. rcu_torture_cb(struct rcu_head *p)
  385. {
  386. struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
  387. if (torture_must_stop_irq()) {
  388. /* Test is ending, just drop callbacks on the floor. */
  389. /* The next initialization will pick up the pieces. */
  390. return;
  391. }
  392. if (rcu_torture_pipe_update_one(rp))
  393. rcu_torture_free(rp);
  394. else
  395. cur_ops->deferred_free(rp);
  396. }
  397. static unsigned long rcu_no_completed(void)
  398. {
  399. return 0;
  400. }
  401. static void rcu_torture_deferred_free(struct rcu_torture *p)
  402. {
  403. call_rcu(&p->rtort_rcu, rcu_torture_cb);
  404. }
  405. static void rcu_sync_torture_init(void)
  406. {
  407. INIT_LIST_HEAD(&rcu_torture_removed);
  408. }
  409. static struct rcu_torture_ops rcu_ops = {
  410. .ttype = RCU_FLAVOR,
  411. .init = rcu_sync_torture_init,
  412. .readlock = rcu_torture_read_lock,
  413. .read_delay = rcu_read_delay,
  414. .readunlock = rcu_torture_read_unlock,
  415. .get_gp_seq = rcu_get_gp_seq,
  416. .gp_diff = rcu_seq_diff,
  417. .deferred_free = rcu_torture_deferred_free,
  418. .sync = synchronize_rcu,
  419. .exp_sync = synchronize_rcu_expedited,
  420. .get_state = get_state_synchronize_rcu,
  421. .cond_sync = cond_synchronize_rcu,
  422. .call = call_rcu,
  423. .cb_barrier = rcu_barrier,
  424. .fqs = rcu_force_quiescent_state,
  425. .stats = NULL,
  426. .stall_dur = rcu_jiffies_till_stall_check,
  427. .irq_capable = 1,
  428. .can_boost = rcu_can_boost(),
  429. .extendables = RCUTORTURE_MAX_EXTEND,
  430. .name = "rcu"
  431. };
  432. /*
  433. * Don't even think about trying any of these in real life!!!
  434. * The names includes "busted", and they really means it!
  435. * The only purpose of these functions is to provide a buggy RCU
  436. * implementation to make sure that rcutorture correctly emits
  437. * buggy-RCU error messages.
  438. */
  439. static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
  440. {
  441. /* This is a deliberate bug for testing purposes only! */
  442. rcu_torture_cb(&p->rtort_rcu);
  443. }
  444. static void synchronize_rcu_busted(void)
  445. {
  446. /* This is a deliberate bug for testing purposes only! */
  447. }
  448. static void
  449. call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
  450. {
  451. /* This is a deliberate bug for testing purposes only! */
  452. func(head);
  453. }
  454. static struct rcu_torture_ops rcu_busted_ops = {
  455. .ttype = INVALID_RCU_FLAVOR,
  456. .init = rcu_sync_torture_init,
  457. .readlock = rcu_torture_read_lock,
  458. .read_delay = rcu_read_delay, /* just reuse rcu's version. */
  459. .readunlock = rcu_torture_read_unlock,
  460. .get_gp_seq = rcu_no_completed,
  461. .deferred_free = rcu_busted_torture_deferred_free,
  462. .sync = synchronize_rcu_busted,
  463. .exp_sync = synchronize_rcu_busted,
  464. .call = call_rcu_busted,
  465. .cb_barrier = NULL,
  466. .fqs = NULL,
  467. .stats = NULL,
  468. .irq_capable = 1,
  469. .name = "busted"
  470. };
  471. /*
  472. * Definitions for srcu torture testing.
  473. */
  474. DEFINE_STATIC_SRCU(srcu_ctl);
  475. static struct srcu_struct srcu_ctld;
  476. static struct srcu_struct *srcu_ctlp = &srcu_ctl;
  477. static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
  478. {
  479. return srcu_read_lock(srcu_ctlp);
  480. }
  481. static void
  482. srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
  483. {
  484. long delay;
  485. const long uspertick = 1000000 / HZ;
  486. const long longdelay = 10;
  487. /* We want there to be long-running readers, but not all the time. */
  488. delay = torture_random(rrsp) %
  489. (nrealreaders * 2 * longdelay * uspertick);
  490. if (!delay && in_task()) {
  491. schedule_timeout_interruptible(longdelay);
  492. rtrsp->rt_delay_jiffies = longdelay;
  493. } else {
  494. rcu_read_delay(rrsp, rtrsp);
  495. }
  496. }
  497. static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
  498. {
  499. srcu_read_unlock(srcu_ctlp, idx);
  500. }
  501. static unsigned long srcu_torture_completed(void)
  502. {
  503. return srcu_batches_completed(srcu_ctlp);
  504. }
  505. static void srcu_torture_deferred_free(struct rcu_torture *rp)
  506. {
  507. call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
  508. }
  509. static void srcu_torture_synchronize(void)
  510. {
  511. synchronize_srcu(srcu_ctlp);
  512. }
  513. static void srcu_torture_call(struct rcu_head *head,
  514. rcu_callback_t func)
  515. {
  516. call_srcu(srcu_ctlp, head, func);
  517. }
  518. static void srcu_torture_barrier(void)
  519. {
  520. srcu_barrier(srcu_ctlp);
  521. }
  522. static void srcu_torture_stats(void)
  523. {
  524. srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
  525. }
  526. static void srcu_torture_synchronize_expedited(void)
  527. {
  528. synchronize_srcu_expedited(srcu_ctlp);
  529. }
  530. static struct rcu_torture_ops srcu_ops = {
  531. .ttype = SRCU_FLAVOR,
  532. .init = rcu_sync_torture_init,
  533. .readlock = srcu_torture_read_lock,
  534. .read_delay = srcu_read_delay,
  535. .readunlock = srcu_torture_read_unlock,
  536. .get_gp_seq = srcu_torture_completed,
  537. .deferred_free = srcu_torture_deferred_free,
  538. .sync = srcu_torture_synchronize,
  539. .exp_sync = srcu_torture_synchronize_expedited,
  540. .call = srcu_torture_call,
  541. .cb_barrier = srcu_torture_barrier,
  542. .stats = srcu_torture_stats,
  543. .irq_capable = 1,
  544. .name = "srcu"
  545. };
  546. static void srcu_torture_init(void)
  547. {
  548. rcu_sync_torture_init();
  549. WARN_ON(init_srcu_struct(&srcu_ctld));
  550. srcu_ctlp = &srcu_ctld;
  551. }
  552. static void srcu_torture_cleanup(void)
  553. {
  554. cleanup_srcu_struct(&srcu_ctld);
  555. srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
  556. }
  557. /* As above, but dynamically allocated. */
  558. static struct rcu_torture_ops srcud_ops = {
  559. .ttype = SRCU_FLAVOR,
  560. .init = srcu_torture_init,
  561. .cleanup = srcu_torture_cleanup,
  562. .readlock = srcu_torture_read_lock,
  563. .read_delay = srcu_read_delay,
  564. .readunlock = srcu_torture_read_unlock,
  565. .get_gp_seq = srcu_torture_completed,
  566. .deferred_free = srcu_torture_deferred_free,
  567. .sync = srcu_torture_synchronize,
  568. .exp_sync = srcu_torture_synchronize_expedited,
  569. .call = srcu_torture_call,
  570. .cb_barrier = srcu_torture_barrier,
  571. .stats = srcu_torture_stats,
  572. .irq_capable = 1,
  573. .name = "srcud"
  574. };
  575. /* As above, but broken due to inappropriate reader extension. */
  576. static struct rcu_torture_ops busted_srcud_ops = {
  577. .ttype = SRCU_FLAVOR,
  578. .init = srcu_torture_init,
  579. .cleanup = srcu_torture_cleanup,
  580. .readlock = srcu_torture_read_lock,
  581. .read_delay = rcu_read_delay,
  582. .readunlock = srcu_torture_read_unlock,
  583. .get_gp_seq = srcu_torture_completed,
  584. .deferred_free = srcu_torture_deferred_free,
  585. .sync = srcu_torture_synchronize,
  586. .exp_sync = srcu_torture_synchronize_expedited,
  587. .call = srcu_torture_call,
  588. .cb_barrier = srcu_torture_barrier,
  589. .stats = srcu_torture_stats,
  590. .irq_capable = 1,
  591. .extendables = RCUTORTURE_MAX_EXTEND,
  592. .name = "busted_srcud"
  593. };
  594. /*
  595. * Definitions for RCU-tasks torture testing.
  596. */
  597. static int tasks_torture_read_lock(void)
  598. {
  599. return 0;
  600. }
  601. static void tasks_torture_read_unlock(int idx)
  602. {
  603. }
  604. static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
  605. {
  606. call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
  607. }
  608. static void synchronize_rcu_mult_test(void)
  609. {
  610. synchronize_rcu_mult(call_rcu_tasks, call_rcu);
  611. }
  612. static struct rcu_torture_ops tasks_ops = {
  613. .ttype = RCU_TASKS_FLAVOR,
  614. .init = rcu_sync_torture_init,
  615. .readlock = tasks_torture_read_lock,
  616. .read_delay = rcu_read_delay, /* just reuse rcu's version. */
  617. .readunlock = tasks_torture_read_unlock,
  618. .get_gp_seq = rcu_no_completed,
  619. .deferred_free = rcu_tasks_torture_deferred_free,
  620. .sync = synchronize_rcu_tasks,
  621. .exp_sync = synchronize_rcu_mult_test,
  622. .call = call_rcu_tasks,
  623. .cb_barrier = rcu_barrier_tasks,
  624. .fqs = NULL,
  625. .stats = NULL,
  626. .irq_capable = 1,
  627. .slow_gps = 1,
  628. .name = "tasks"
  629. };
  630. /*
  631. * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
  632. * This implementation does not necessarily work well with CPU hotplug.
  633. */
  634. static void synchronize_rcu_trivial(void)
  635. {
  636. int cpu;
  637. for_each_online_cpu(cpu) {
  638. rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
  639. WARN_ON_ONCE(raw_smp_processor_id() != cpu);
  640. }
  641. }
  642. static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
  643. {
  644. preempt_disable();
  645. return 0;
  646. }
  647. static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
  648. {
  649. preempt_enable();
  650. }
  651. static struct rcu_torture_ops trivial_ops = {
  652. .ttype = RCU_TRIVIAL_FLAVOR,
  653. .init = rcu_sync_torture_init,
  654. .readlock = rcu_torture_read_lock_trivial,
  655. .read_delay = rcu_read_delay, /* just reuse rcu's version. */
  656. .readunlock = rcu_torture_read_unlock_trivial,
  657. .get_gp_seq = rcu_no_completed,
  658. .sync = synchronize_rcu_trivial,
  659. .exp_sync = synchronize_rcu_trivial,
  660. .fqs = NULL,
  661. .stats = NULL,
  662. .irq_capable = 1,
  663. .name = "trivial"
  664. };
  665. /*
  666. * Definitions for rude RCU-tasks torture testing.
  667. */
  668. static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
  669. {
  670. call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
  671. }
  672. static struct rcu_torture_ops tasks_rude_ops = {
  673. .ttype = RCU_TASKS_RUDE_FLAVOR,
  674. .init = rcu_sync_torture_init,
  675. .readlock = rcu_torture_read_lock_trivial,
  676. .read_delay = rcu_read_delay, /* just reuse rcu's version. */
  677. .readunlock = rcu_torture_read_unlock_trivial,
  678. .get_gp_seq = rcu_no_completed,
  679. .deferred_free = rcu_tasks_rude_torture_deferred_free,
  680. .sync = synchronize_rcu_tasks_rude,
  681. .exp_sync = synchronize_rcu_tasks_rude,
  682. .call = call_rcu_tasks_rude,
  683. .cb_barrier = rcu_barrier_tasks_rude,
  684. .fqs = NULL,
  685. .stats = NULL,
  686. .irq_capable = 1,
  687. .name = "tasks-rude"
  688. };
  689. /*
  690. * Definitions for tracing RCU-tasks torture testing.
  691. */
  692. static int tasks_tracing_torture_read_lock(void)
  693. {
  694. rcu_read_lock_trace();
  695. return 0;
  696. }
  697. static void tasks_tracing_torture_read_unlock(int idx)
  698. {
  699. rcu_read_unlock_trace();
  700. }
  701. static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
  702. {
  703. call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
  704. }
  705. static struct rcu_torture_ops tasks_tracing_ops = {
  706. .ttype = RCU_TASKS_TRACING_FLAVOR,
  707. .init = rcu_sync_torture_init,
  708. .readlock = tasks_tracing_torture_read_lock,
  709. .read_delay = srcu_read_delay, /* just reuse srcu's version. */
  710. .readunlock = tasks_tracing_torture_read_unlock,
  711. .get_gp_seq = rcu_no_completed,
  712. .deferred_free = rcu_tasks_tracing_torture_deferred_free,
  713. .sync = synchronize_rcu_tasks_trace,
  714. .exp_sync = synchronize_rcu_tasks_trace,
  715. .call = call_rcu_tasks_trace,
  716. .cb_barrier = rcu_barrier_tasks_trace,
  717. .fqs = NULL,
  718. .stats = NULL,
  719. .irq_capable = 1,
  720. .slow_gps = 1,
  721. .name = "tasks-tracing"
  722. };
  723. static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
  724. {
  725. if (!cur_ops->gp_diff)
  726. return new - old;
  727. return cur_ops->gp_diff(new, old);
  728. }
  729. static bool __maybe_unused torturing_tasks(void)
  730. {
  731. return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops;
  732. }
  733. /*
  734. * RCU torture priority-boost testing. Runs one real-time thread per
  735. * CPU for moderate bursts, repeatedly registering RCU callbacks and
  736. * spinning waiting for them to be invoked. If a given callback takes
  737. * too long to be invoked, we assume that priority inversion has occurred.
  738. */
  739. struct rcu_boost_inflight {
  740. struct rcu_head rcu;
  741. int inflight;
  742. };
  743. static void rcu_torture_boost_cb(struct rcu_head *head)
  744. {
  745. struct rcu_boost_inflight *rbip =
  746. container_of(head, struct rcu_boost_inflight, rcu);
  747. /* Ensure RCU-core accesses precede clearing ->inflight */
  748. smp_store_release(&rbip->inflight, 0);
  749. }
  750. static int old_rt_runtime = -1;
  751. static void rcu_torture_disable_rt_throttle(void)
  752. {
  753. /*
  754. * Disable RT throttling so that rcutorture's boost threads don't get
  755. * throttled. Only possible if rcutorture is built-in otherwise the
  756. * user should manually do this by setting the sched_rt_period_us and
  757. * sched_rt_runtime sysctls.
  758. */
  759. if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
  760. return;
  761. old_rt_runtime = sysctl_sched_rt_runtime;
  762. sysctl_sched_rt_runtime = -1;
  763. }
  764. static void rcu_torture_enable_rt_throttle(void)
  765. {
  766. if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
  767. return;
  768. sysctl_sched_rt_runtime = old_rt_runtime;
  769. old_rt_runtime = -1;
  770. }
  771. static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
  772. {
  773. if (end - start > test_boost_duration * HZ - HZ / 2) {
  774. VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
  775. n_rcu_torture_boost_failure++;
  776. return true; /* failed */
  777. }
  778. return false; /* passed */
  779. }
  780. static int rcu_torture_boost(void *arg)
  781. {
  782. unsigned long call_rcu_time;
  783. unsigned long endtime;
  784. unsigned long oldstarttime;
  785. struct rcu_boost_inflight rbi = { .inflight = 0 };
  786. VERBOSE_TOROUT_STRING("rcu_torture_boost started");
  787. /* Set real-time priority. */
  788. sched_set_fifo_low(current);
  789. init_rcu_head_on_stack(&rbi.rcu);
  790. /* Each pass through the following loop does one boost-test cycle. */
  791. do {
  792. /* Track if the test failed already in this test interval? */
  793. bool failed = false;
  794. /* Increment n_rcu_torture_boosts once per boost-test */
  795. while (!kthread_should_stop()) {
  796. if (mutex_trylock(&boost_mutex)) {
  797. n_rcu_torture_boosts++;
  798. mutex_unlock(&boost_mutex);
  799. break;
  800. }
  801. schedule_timeout_uninterruptible(1);
  802. }
  803. if (kthread_should_stop())
  804. goto checkwait;
  805. /* Wait for the next test interval. */
  806. oldstarttime = boost_starttime;
  807. while (time_before(jiffies, oldstarttime)) {
  808. schedule_timeout_interruptible(oldstarttime - jiffies);
  809. stutter_wait("rcu_torture_boost");
  810. if (torture_must_stop())
  811. goto checkwait;
  812. }
  813. /* Do one boost-test interval. */
  814. endtime = oldstarttime + test_boost_duration * HZ;
  815. call_rcu_time = jiffies;
  816. while (time_before(jiffies, endtime)) {
  817. /* If we don't have a callback in flight, post one. */
  818. if (!smp_load_acquire(&rbi.inflight)) {
  819. /* RCU core before ->inflight = 1. */
  820. smp_store_release(&rbi.inflight, 1);
  821. call_rcu(&rbi.rcu, rcu_torture_boost_cb);
  822. /* Check if the boost test failed */
  823. failed = failed ||
  824. rcu_torture_boost_failed(call_rcu_time,
  825. jiffies);
  826. call_rcu_time = jiffies;
  827. }
  828. stutter_wait("rcu_torture_boost");
  829. if (torture_must_stop())
  830. goto checkwait;
  831. }
  832. /*
  833. * If boost never happened, then inflight will always be 1, in
  834. * this case the boost check would never happen in the above
  835. * loop so do another one here.
  836. */
  837. if (!failed && smp_load_acquire(&rbi.inflight))
  838. rcu_torture_boost_failed(call_rcu_time, jiffies);
  839. /*
  840. * Set the start time of the next test interval.
  841. * Yes, this is vulnerable to long delays, but such
  842. * delays simply cause a false negative for the next
  843. * interval. Besides, we are running at RT priority,
  844. * so delays should be relatively rare.
  845. */
  846. while (oldstarttime == boost_starttime &&
  847. !kthread_should_stop()) {
  848. if (mutex_trylock(&boost_mutex)) {
  849. boost_starttime = jiffies +
  850. test_boost_interval * HZ;
  851. mutex_unlock(&boost_mutex);
  852. break;
  853. }
  854. schedule_timeout_uninterruptible(1);
  855. }
  856. /* Go do the stutter. */
  857. checkwait: stutter_wait("rcu_torture_boost");
  858. } while (!torture_must_stop());
  859. /* Clean up and exit. */
  860. while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
  861. torture_shutdown_absorb("rcu_torture_boost");
  862. schedule_timeout_uninterruptible(1);
  863. }
  864. destroy_rcu_head_on_stack(&rbi.rcu);
  865. torture_kthread_stopping("rcu_torture_boost");
  866. return 0;
  867. }
  868. /*
  869. * RCU torture force-quiescent-state kthread. Repeatedly induces
  870. * bursts of calls to force_quiescent_state(), increasing the probability
  871. * of occurrence of some important types of race conditions.
  872. */
  873. static int
  874. rcu_torture_fqs(void *arg)
  875. {
  876. unsigned long fqs_resume_time;
  877. int fqs_burst_remaining;
  878. VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
  879. do {
  880. fqs_resume_time = jiffies + fqs_stutter * HZ;
  881. while (time_before(jiffies, fqs_resume_time) &&
  882. !kthread_should_stop()) {
  883. schedule_timeout_interruptible(1);
  884. }
  885. fqs_burst_remaining = fqs_duration;
  886. while (fqs_burst_remaining > 0 &&
  887. !kthread_should_stop()) {
  888. cur_ops->fqs();
  889. udelay(fqs_holdoff);
  890. fqs_burst_remaining -= fqs_holdoff;
  891. }
  892. stutter_wait("rcu_torture_fqs");
  893. } while (!torture_must_stop());
  894. torture_kthread_stopping("rcu_torture_fqs");
  895. return 0;
  896. }
  897. /*
  898. * RCU torture writer kthread. Repeatedly substitutes a new structure
  899. * for that pointed to by rcu_torture_current, freeing the old structure
  900. * after a series of grace periods (the "pipeline").
  901. */
  902. static int
  903. rcu_torture_writer(void *arg)
  904. {
  905. bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
  906. int expediting = 0;
  907. unsigned long gp_snap;
  908. bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
  909. bool gp_sync1 = gp_sync;
  910. int i;
  911. struct rcu_torture *rp;
  912. struct rcu_torture *old_rp;
  913. static DEFINE_TORTURE_RANDOM(rand);
  914. int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
  915. RTWS_COND_GET, RTWS_SYNC };
  916. int nsynctypes = 0;
  917. VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
  918. if (!can_expedite)
  919. pr_alert("%s" TORTURE_FLAG
  920. " GP expediting controlled from boot/sysfs for %s.\n",
  921. torture_type, cur_ops->name);
  922. /* Initialize synctype[] array. If none set, take default. */
  923. if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
  924. gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
  925. if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
  926. synctype[nsynctypes++] = RTWS_COND_GET;
  927. pr_info("%s: Testing conditional GPs.\n", __func__);
  928. } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
  929. pr_alert("%s: gp_cond without primitives.\n", __func__);
  930. }
  931. if (gp_exp1 && cur_ops->exp_sync) {
  932. synctype[nsynctypes++] = RTWS_EXP_SYNC;
  933. pr_info("%s: Testing expedited GPs.\n", __func__);
  934. } else if (gp_exp && !cur_ops->exp_sync) {
  935. pr_alert("%s: gp_exp without primitives.\n", __func__);
  936. }
  937. if (gp_normal1 && cur_ops->deferred_free) {
  938. synctype[nsynctypes++] = RTWS_DEF_FREE;
  939. pr_info("%s: Testing asynchronous GPs.\n", __func__);
  940. } else if (gp_normal && !cur_ops->deferred_free) {
  941. pr_alert("%s: gp_normal without primitives.\n", __func__);
  942. }
  943. if (gp_sync1 && cur_ops->sync) {
  944. synctype[nsynctypes++] = RTWS_SYNC;
  945. pr_info("%s: Testing normal GPs.\n", __func__);
  946. } else if (gp_sync && !cur_ops->sync) {
  947. pr_alert("%s: gp_sync without primitives.\n", __func__);
  948. }
  949. if (WARN_ONCE(nsynctypes == 0,
  950. "rcu_torture_writer: No update-side primitives.\n")) {
  951. /*
  952. * No updates primitives, so don't try updating.
  953. * The resulting test won't be testing much, hence the
  954. * above WARN_ONCE().
  955. */
  956. rcu_torture_writer_state = RTWS_STOPPING;
  957. torture_kthread_stopping("rcu_torture_writer");
  958. }
  959. do {
  960. rcu_torture_writer_state = RTWS_FIXED_DELAY;
  961. schedule_timeout_uninterruptible(1);
  962. rp = rcu_torture_alloc();
  963. if (rp == NULL)
  964. continue;
  965. rp->rtort_pipe_count = 0;
  966. rcu_torture_writer_state = RTWS_DELAY;
  967. udelay(torture_random(&rand) & 0x3ff);
  968. rcu_torture_writer_state = RTWS_REPLACE;
  969. old_rp = rcu_dereference_check(rcu_torture_current,
  970. current == writer_task);
  971. rp->rtort_mbtest = 1;
  972. rcu_assign_pointer(rcu_torture_current, rp);
  973. smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
  974. if (old_rp) {
  975. i = old_rp->rtort_pipe_count;
  976. if (i > RCU_TORTURE_PIPE_LEN)
  977. i = RCU_TORTURE_PIPE_LEN;
  978. atomic_inc(&rcu_torture_wcount[i]);
  979. WRITE_ONCE(old_rp->rtort_pipe_count,
  980. old_rp->rtort_pipe_count + 1);
  981. switch (synctype[torture_random(&rand) % nsynctypes]) {
  982. case RTWS_DEF_FREE:
  983. rcu_torture_writer_state = RTWS_DEF_FREE;
  984. cur_ops->deferred_free(old_rp);
  985. break;
  986. case RTWS_EXP_SYNC:
  987. rcu_torture_writer_state = RTWS_EXP_SYNC;
  988. cur_ops->exp_sync();
  989. rcu_torture_pipe_update(old_rp);
  990. break;
  991. case RTWS_COND_GET:
  992. rcu_torture_writer_state = RTWS_COND_GET;
  993. gp_snap = cur_ops->get_state();
  994. i = torture_random(&rand) % 16;
  995. if (i != 0)
  996. schedule_timeout_interruptible(i);
  997. udelay(torture_random(&rand) % 1000);
  998. rcu_torture_writer_state = RTWS_COND_SYNC;
  999. cur_ops->cond_sync(gp_snap);
  1000. rcu_torture_pipe_update(old_rp);
  1001. break;
  1002. case RTWS_SYNC:
  1003. rcu_torture_writer_state = RTWS_SYNC;
  1004. cur_ops->sync();
  1005. rcu_torture_pipe_update(old_rp);
  1006. break;
  1007. default:
  1008. WARN_ON_ONCE(1);
  1009. break;
  1010. }
  1011. }
  1012. WRITE_ONCE(rcu_torture_current_version,
  1013. rcu_torture_current_version + 1);
  1014. /* Cycle through nesting levels of rcu_expedite_gp() calls. */
  1015. if (can_expedite &&
  1016. !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
  1017. WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
  1018. if (expediting >= 0)
  1019. rcu_expedite_gp();
  1020. else
  1021. rcu_unexpedite_gp();
  1022. if (++expediting > 3)
  1023. expediting = -expediting;
  1024. } else if (!can_expedite) { /* Disabled during boot, recheck. */
  1025. can_expedite = !rcu_gp_is_expedited() &&
  1026. !rcu_gp_is_normal();
  1027. }
  1028. rcu_torture_writer_state = RTWS_STUTTER;
  1029. if (stutter_wait("rcu_torture_writer") &&
  1030. !READ_ONCE(rcu_fwd_cb_nodelay) &&
  1031. !cur_ops->slow_gps &&
  1032. !torture_must_stop() &&
  1033. rcu_inkernel_boot_has_ended())
  1034. for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
  1035. if (list_empty(&rcu_tortures[i].rtort_free) &&
  1036. rcu_access_pointer(rcu_torture_current) !=
  1037. &rcu_tortures[i]) {
  1038. rcu_ftrace_dump(DUMP_ALL);
  1039. WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
  1040. }
  1041. } while (!torture_must_stop());
  1042. rcu_torture_current = NULL; // Let stats task know that we are done.
  1043. /* Reset expediting back to unexpedited. */
  1044. if (expediting > 0)
  1045. expediting = -expediting;
  1046. while (can_expedite && expediting++ < 0)
  1047. rcu_unexpedite_gp();
  1048. WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
  1049. if (!can_expedite)
  1050. pr_alert("%s" TORTURE_FLAG
  1051. " Dynamic grace-period expediting was disabled.\n",
  1052. torture_type);
  1053. rcu_torture_writer_state = RTWS_STOPPING;
  1054. torture_kthread_stopping("rcu_torture_writer");
  1055. return 0;
  1056. }
  1057. /*
  1058. * RCU torture fake writer kthread. Repeatedly calls sync, with a random
  1059. * delay between calls.
  1060. */
  1061. static int
  1062. rcu_torture_fakewriter(void *arg)
  1063. {
  1064. DEFINE_TORTURE_RANDOM(rand);
  1065. VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
  1066. set_user_nice(current, MAX_NICE);
  1067. do {
  1068. schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
  1069. udelay(torture_random(&rand) & 0x3ff);
  1070. if (cur_ops->cb_barrier != NULL &&
  1071. torture_random(&rand) % (nfakewriters * 8) == 0) {
  1072. cur_ops->cb_barrier();
  1073. } else if (gp_normal == gp_exp) {
  1074. if (cur_ops->sync && torture_random(&rand) & 0x80)
  1075. cur_ops->sync();
  1076. else if (cur_ops->exp_sync)
  1077. cur_ops->exp_sync();
  1078. } else if (gp_normal && cur_ops->sync) {
  1079. cur_ops->sync();
  1080. } else if (cur_ops->exp_sync) {
  1081. cur_ops->exp_sync();
  1082. }
  1083. stutter_wait("rcu_torture_fakewriter");
  1084. } while (!torture_must_stop());
  1085. torture_kthread_stopping("rcu_torture_fakewriter");
  1086. return 0;
  1087. }
  1088. static void rcu_torture_timer_cb(struct rcu_head *rhp)
  1089. {
  1090. kfree(rhp);
  1091. }
  1092. /*
  1093. * Do one extension of an RCU read-side critical section using the
  1094. * current reader state in readstate (set to zero for initial entry
  1095. * to extended critical section), set the new state as specified by
  1096. * newstate (set to zero for final exit from extended critical section),
  1097. * and random-number-generator state in trsp. If this is neither the
  1098. * beginning or end of the critical section and if there was actually a
  1099. * change, do a ->read_delay().
  1100. */
  1101. static void rcutorture_one_extend(int *readstate, int newstate,
  1102. struct torture_random_state *trsp,
  1103. struct rt_read_seg *rtrsp)
  1104. {
  1105. unsigned long flags;
  1106. int idxnew = -1;
  1107. int idxold = *readstate;
  1108. int statesnew = ~*readstate & newstate;
  1109. int statesold = *readstate & ~newstate;
  1110. WARN_ON_ONCE(idxold < 0);
  1111. WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
  1112. rtrsp->rt_readstate = newstate;
  1113. /* First, put new protection in place to avoid critical-section gap. */
  1114. if (statesnew & RCUTORTURE_RDR_BH)
  1115. local_bh_disable();
  1116. if (statesnew & RCUTORTURE_RDR_RBH)
  1117. rcu_read_lock_bh();
  1118. if (statesnew & RCUTORTURE_RDR_IRQ)
  1119. local_irq_disable();
  1120. if (statesnew & RCUTORTURE_RDR_PREEMPT)
  1121. preempt_disable();
  1122. if (statesnew & RCUTORTURE_RDR_SCHED)
  1123. rcu_read_lock_sched();
  1124. if (statesnew & RCUTORTURE_RDR_RCU)
  1125. idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
  1126. /*
  1127. * Next, remove old protection, in decreasing order of strength
  1128. * to avoid unlock paths that aren't safe in the stronger
  1129. * context. Namely: BH can not be enabled with disabled interrupts.
  1130. * Additionally PREEMPT_RT requires that BH is enabled in preemptible
  1131. * context.
  1132. */
  1133. if (statesold & RCUTORTURE_RDR_IRQ)
  1134. local_irq_enable();
  1135. if (statesold & RCUTORTURE_RDR_PREEMPT)
  1136. preempt_enable();
  1137. if (statesold & RCUTORTURE_RDR_SCHED)
  1138. rcu_read_unlock_sched();
  1139. if (statesold & RCUTORTURE_RDR_BH)
  1140. local_bh_enable();
  1141. if (statesold & RCUTORTURE_RDR_RBH)
  1142. rcu_read_unlock_bh();
  1143. if (statesold & RCUTORTURE_RDR_RCU) {
  1144. bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
  1145. if (lockit)
  1146. raw_spin_lock_irqsave(&current->pi_lock, flags);
  1147. cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
  1148. if (lockit)
  1149. raw_spin_unlock_irqrestore(&current->pi_lock, flags);
  1150. }
  1151. /* Delay if neither beginning nor end and there was a change. */
  1152. if ((statesnew || statesold) && *readstate && newstate)
  1153. cur_ops->read_delay(trsp, rtrsp);
  1154. /* Update the reader state. */
  1155. if (idxnew == -1)
  1156. idxnew = idxold & ~RCUTORTURE_RDR_MASK;
  1157. WARN_ON_ONCE(idxnew < 0);
  1158. WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
  1159. *readstate = idxnew | newstate;
  1160. WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
  1161. WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
  1162. }
  1163. /* Return the biggest extendables mask given current RCU and boot parameters. */
  1164. static int rcutorture_extend_mask_max(void)
  1165. {
  1166. int mask;
  1167. WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
  1168. mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
  1169. mask = mask | RCUTORTURE_RDR_RCU;
  1170. return mask;
  1171. }
  1172. /* Return a random protection state mask, but with at least one bit set. */
  1173. static int
  1174. rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
  1175. {
  1176. int mask = rcutorture_extend_mask_max();
  1177. unsigned long randmask1 = torture_random(trsp) >> 8;
  1178. unsigned long randmask2 = randmask1 >> 3;
  1179. unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
  1180. unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
  1181. unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
  1182. WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
  1183. /* Mostly only one bit (need preemption!), sometimes lots of bits. */
  1184. if (!(randmask1 & 0x7))
  1185. mask = mask & randmask2;
  1186. else
  1187. mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
  1188. /*
  1189. * Can't enable bh w/irq disabled.
  1190. */
  1191. if (mask & RCUTORTURE_RDR_IRQ)
  1192. mask |= oldmask & bhs;
  1193. /*
  1194. * Ideally these sequences would be detected in debug builds
  1195. * (regardless of RT), but until then don't stop testing
  1196. * them on non-RT.
  1197. */
  1198. if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
  1199. /* Can't modify BH in atomic context */
  1200. if (oldmask & preempts_irq)
  1201. mask &= ~bhs;
  1202. if ((oldmask | mask) & preempts_irq)
  1203. mask |= oldmask & bhs;
  1204. }
  1205. return mask ?: RCUTORTURE_RDR_RCU;
  1206. }
  1207. /*
  1208. * Do a randomly selected number of extensions of an existing RCU read-side
  1209. * critical section.
  1210. */
  1211. static struct rt_read_seg *
  1212. rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
  1213. struct rt_read_seg *rtrsp)
  1214. {
  1215. int i;
  1216. int j;
  1217. int mask = rcutorture_extend_mask_max();
  1218. WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
  1219. if (!((mask - 1) & mask))
  1220. return rtrsp; /* Current RCU reader not extendable. */
  1221. /* Bias towards larger numbers of loops. */
  1222. i = (torture_random(trsp) >> 3);
  1223. i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
  1224. for (j = 0; j < i; j++) {
  1225. mask = rcutorture_extend_mask(*readstate, trsp);
  1226. rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
  1227. }
  1228. return &rtrsp[j];
  1229. }
  1230. /*
  1231. * Do one read-side critical section, returning false if there was
  1232. * no data to read. Can be invoked both from process context and
  1233. * from a timer handler.
  1234. */
  1235. static bool rcu_torture_one_read(struct torture_random_state *trsp)
  1236. {
  1237. int i;
  1238. unsigned long started;
  1239. unsigned long completed;
  1240. int newstate;
  1241. struct rcu_torture *p;
  1242. int pipe_count;
  1243. int readstate = 0;
  1244. struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
  1245. struct rt_read_seg *rtrsp = &rtseg[0];
  1246. struct rt_read_seg *rtrsp1;
  1247. unsigned long long ts;
  1248. WARN_ON_ONCE(!rcu_is_watching());
  1249. newstate = rcutorture_extend_mask(readstate, trsp);
  1250. rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
  1251. started = cur_ops->get_gp_seq();
  1252. ts = rcu_trace_clock_local();
  1253. p = rcu_dereference_check(rcu_torture_current,
  1254. rcu_read_lock_bh_held() ||
  1255. rcu_read_lock_sched_held() ||
  1256. srcu_read_lock_held(srcu_ctlp) ||
  1257. rcu_read_lock_trace_held() ||
  1258. torturing_tasks());
  1259. if (p == NULL) {
  1260. /* Wait for rcu_torture_writer to get underway */
  1261. rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
  1262. return false;
  1263. }
  1264. if (p->rtort_mbtest == 0)
  1265. atomic_inc(&n_rcu_torture_mberror);
  1266. rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
  1267. preempt_disable();
  1268. pipe_count = READ_ONCE(p->rtort_pipe_count);
  1269. if (pipe_count > RCU_TORTURE_PIPE_LEN) {
  1270. /* Should not happen, but... */
  1271. pipe_count = RCU_TORTURE_PIPE_LEN;
  1272. }
  1273. completed = cur_ops->get_gp_seq();
  1274. if (pipe_count > 1) {
  1275. do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
  1276. ts, started, completed);
  1277. rcu_ftrace_dump(DUMP_ALL);
  1278. }
  1279. __this_cpu_inc(rcu_torture_count[pipe_count]);
  1280. completed = rcutorture_seq_diff(completed, started);
  1281. if (completed > RCU_TORTURE_PIPE_LEN) {
  1282. /* Should not happen, but... */
  1283. completed = RCU_TORTURE_PIPE_LEN;
  1284. }
  1285. __this_cpu_inc(rcu_torture_batch[completed]);
  1286. preempt_enable();
  1287. rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
  1288. WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
  1289. // This next splat is expected behavior if leakpointer, especially
  1290. // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
  1291. WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
  1292. /* If error or close call, record the sequence of reader protections. */
  1293. if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
  1294. i = 0;
  1295. for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
  1296. err_segs[i++] = *rtrsp1;
  1297. rt_read_nsegs = i;
  1298. }
  1299. return true;
  1300. }
  1301. static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
  1302. /*
  1303. * RCU torture reader from timer handler. Dereferences rcu_torture_current,
  1304. * incrementing the corresponding element of the pipeline array. The
  1305. * counter in the element should never be greater than 1, otherwise, the
  1306. * RCU implementation is broken.
  1307. */
  1308. static void rcu_torture_timer(struct timer_list *unused)
  1309. {
  1310. atomic_long_inc(&n_rcu_torture_timers);
  1311. (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
  1312. /* Test call_rcu() invocation from interrupt handler. */
  1313. if (cur_ops->call) {
  1314. struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
  1315. if (rhp)
  1316. cur_ops->call(rhp, rcu_torture_timer_cb);
  1317. }
  1318. }
  1319. /*
  1320. * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
  1321. * incrementing the corresponding element of the pipeline array. The
  1322. * counter in the element should never be greater than 1, otherwise, the
  1323. * RCU implementation is broken.
  1324. */
  1325. static int
  1326. rcu_torture_reader(void *arg)
  1327. {
  1328. unsigned long lastsleep = jiffies;
  1329. long myid = (long)arg;
  1330. int mynumonline = myid;
  1331. DEFINE_TORTURE_RANDOM(rand);
  1332. struct timer_list t;
  1333. VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
  1334. set_user_nice(current, MAX_NICE);
  1335. if (irqreader && cur_ops->irq_capable)
  1336. timer_setup_on_stack(&t, rcu_torture_timer, 0);
  1337. tick_dep_set_task(current, TICK_DEP_BIT_RCU);
  1338. do {
  1339. if (irqreader && cur_ops->irq_capable) {
  1340. if (!timer_pending(&t))
  1341. mod_timer(&t, jiffies + 1);
  1342. }
  1343. if (!rcu_torture_one_read(&rand) && !torture_must_stop())
  1344. schedule_timeout_interruptible(HZ);
  1345. if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
  1346. schedule_timeout_interruptible(1);
  1347. lastsleep = jiffies + 10;
  1348. }
  1349. while (num_online_cpus() < mynumonline && !torture_must_stop())
  1350. schedule_timeout_interruptible(HZ / 5);
  1351. stutter_wait("rcu_torture_reader");
  1352. } while (!torture_must_stop());
  1353. if (irqreader && cur_ops->irq_capable) {
  1354. del_timer_sync(&t);
  1355. destroy_timer_on_stack(&t);
  1356. }
  1357. tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
  1358. torture_kthread_stopping("rcu_torture_reader");
  1359. return 0;
  1360. }
  1361. /*
  1362. * Print torture statistics. Caller must ensure that there is only
  1363. * one call to this function at a given time!!! This is normally
  1364. * accomplished by relying on the module system to only have one copy
  1365. * of the module loaded, and then by giving the rcu_torture_stats
  1366. * kthread full control (or the init/cleanup functions when rcu_torture_stats
  1367. * thread is not running).
  1368. */
  1369. static void
  1370. rcu_torture_stats_print(void)
  1371. {
  1372. int cpu;
  1373. int i;
  1374. long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  1375. long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  1376. struct rcu_torture *rtcp;
  1377. static unsigned long rtcv_snap = ULONG_MAX;
  1378. static bool splatted;
  1379. struct task_struct *wtp;
  1380. for_each_possible_cpu(cpu) {
  1381. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  1382. pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
  1383. batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
  1384. }
  1385. }
  1386. for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
  1387. if (pipesummary[i] != 0)
  1388. break;
  1389. }
  1390. pr_alert("%s%s ", torture_type, TORTURE_FLAG);
  1391. rtcp = rcu_access_pointer(rcu_torture_current);
  1392. pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
  1393. rtcp,
  1394. rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
  1395. rcu_torture_current_version,
  1396. list_empty(&rcu_torture_freelist),
  1397. atomic_read(&n_rcu_torture_alloc),
  1398. atomic_read(&n_rcu_torture_alloc_fail),
  1399. atomic_read(&n_rcu_torture_free));
  1400. pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
  1401. atomic_read(&n_rcu_torture_mberror),
  1402. n_rcu_torture_barrier_error,
  1403. n_rcu_torture_boost_ktrerror,
  1404. n_rcu_torture_boost_rterror);
  1405. pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
  1406. n_rcu_torture_boost_failure,
  1407. n_rcu_torture_boosts,
  1408. atomic_long_read(&n_rcu_torture_timers));
  1409. torture_onoff_stats();
  1410. pr_cont("barrier: %ld/%ld:%ld ",
  1411. data_race(n_barrier_successes),
  1412. data_race(n_barrier_attempts),
  1413. data_race(n_rcu_torture_barrier_error));
  1414. pr_cont("read-exits: %ld\n", data_race(n_read_exits));
  1415. pr_alert("%s%s ", torture_type, TORTURE_FLAG);
  1416. if (atomic_read(&n_rcu_torture_mberror) ||
  1417. n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
  1418. n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
  1419. i > 1) {
  1420. pr_cont("%s", "!!! ");
  1421. atomic_inc(&n_rcu_torture_error);
  1422. WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
  1423. WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
  1424. WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
  1425. WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
  1426. WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed
  1427. WARN_ON_ONCE(i > 1); // Too-short grace period
  1428. }
  1429. pr_cont("Reader Pipe: ");
  1430. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  1431. pr_cont(" %ld", pipesummary[i]);
  1432. pr_cont("\n");
  1433. pr_alert("%s%s ", torture_type, TORTURE_FLAG);
  1434. pr_cont("Reader Batch: ");
  1435. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  1436. pr_cont(" %ld", batchsummary[i]);
  1437. pr_cont("\n");
  1438. pr_alert("%s%s ", torture_type, TORTURE_FLAG);
  1439. pr_cont("Free-Block Circulation: ");
  1440. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  1441. pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
  1442. }
  1443. pr_cont("\n");
  1444. if (cur_ops->stats)
  1445. cur_ops->stats();
  1446. if (rtcv_snap == rcu_torture_current_version &&
  1447. rcu_access_pointer(rcu_torture_current) &&
  1448. !rcu_stall_is_suppressed()) {
  1449. int __maybe_unused flags = 0;
  1450. unsigned long __maybe_unused gp_seq = 0;
  1451. rcutorture_get_gp_data(cur_ops->ttype,
  1452. &flags, &gp_seq);
  1453. srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
  1454. &flags, &gp_seq);
  1455. wtp = READ_ONCE(writer_task);
  1456. pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
  1457. rcu_torture_writer_state_getname(),
  1458. rcu_torture_writer_state, gp_seq, flags,
  1459. wtp == NULL ? ~0UL : wtp->state,
  1460. wtp == NULL ? -1 : (int)task_cpu(wtp));
  1461. if (!splatted && wtp) {
  1462. sched_show_task(wtp);
  1463. splatted = true;
  1464. }
  1465. show_rcu_gp_kthreads();
  1466. rcu_ftrace_dump(DUMP_ALL);
  1467. }
  1468. rtcv_snap = rcu_torture_current_version;
  1469. }
  1470. /*
  1471. * Periodically prints torture statistics, if periodic statistics printing
  1472. * was specified via the stat_interval module parameter.
  1473. */
  1474. static int
  1475. rcu_torture_stats(void *arg)
  1476. {
  1477. VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
  1478. do {
  1479. schedule_timeout_interruptible(stat_interval * HZ);
  1480. rcu_torture_stats_print();
  1481. torture_shutdown_absorb("rcu_torture_stats");
  1482. } while (!torture_must_stop());
  1483. torture_kthread_stopping("rcu_torture_stats");
  1484. return 0;
  1485. }
  1486. static void
  1487. rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
  1488. {
  1489. pr_alert("%s" TORTURE_FLAG
  1490. "--- %s: nreaders=%d nfakewriters=%d "
  1491. "stat_interval=%d verbose=%d test_no_idle_hz=%d "
  1492. "shuffle_interval=%d stutter=%d irqreader=%d "
  1493. "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
  1494. "test_boost=%d/%d test_boost_interval=%d "
  1495. "test_boost_duration=%d shutdown_secs=%d "
  1496. "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
  1497. "stall_cpu_block=%d "
  1498. "n_barrier_cbs=%d "
  1499. "onoff_interval=%d onoff_holdoff=%d "
  1500. "read_exit_delay=%d read_exit_burst=%d\n",
  1501. torture_type, tag, nrealreaders, nfakewriters,
  1502. stat_interval, verbose, test_no_idle_hz, shuffle_interval,
  1503. stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
  1504. test_boost, cur_ops->can_boost,
  1505. test_boost_interval, test_boost_duration, shutdown_secs,
  1506. stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
  1507. stall_cpu_block,
  1508. n_barrier_cbs,
  1509. onoff_interval, onoff_holdoff,
  1510. read_exit_delay, read_exit_burst);
  1511. }
  1512. static int rcutorture_booster_cleanup(unsigned int cpu)
  1513. {
  1514. struct task_struct *t;
  1515. if (boost_tasks[cpu] == NULL)
  1516. return 0;
  1517. mutex_lock(&boost_mutex);
  1518. t = boost_tasks[cpu];
  1519. boost_tasks[cpu] = NULL;
  1520. rcu_torture_enable_rt_throttle();
  1521. mutex_unlock(&boost_mutex);
  1522. /* This must be outside of the mutex, otherwise deadlock! */
  1523. torture_stop_kthread(rcu_torture_boost, t);
  1524. return 0;
  1525. }
  1526. static int rcutorture_booster_init(unsigned int cpu)
  1527. {
  1528. int retval;
  1529. if (boost_tasks[cpu] != NULL)
  1530. return 0; /* Already created, nothing more to do. */
  1531. /* Don't allow time recalculation while creating a new task. */
  1532. mutex_lock(&boost_mutex);
  1533. rcu_torture_disable_rt_throttle();
  1534. VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
  1535. boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
  1536. cpu_to_node(cpu),
  1537. "rcu_torture_boost");
  1538. if (IS_ERR(boost_tasks[cpu])) {
  1539. retval = PTR_ERR(boost_tasks[cpu]);
  1540. VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
  1541. n_rcu_torture_boost_ktrerror++;
  1542. boost_tasks[cpu] = NULL;
  1543. mutex_unlock(&boost_mutex);
  1544. return retval;
  1545. }
  1546. kthread_bind(boost_tasks[cpu], cpu);
  1547. wake_up_process(boost_tasks[cpu]);
  1548. mutex_unlock(&boost_mutex);
  1549. return 0;
  1550. }
  1551. /*
  1552. * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
  1553. * induces a CPU stall for the time specified by stall_cpu.
  1554. */
  1555. static int rcu_torture_stall(void *args)
  1556. {
  1557. int idx;
  1558. unsigned long stop_at;
  1559. VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
  1560. if (stall_cpu_holdoff > 0) {
  1561. VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
  1562. schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
  1563. VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
  1564. }
  1565. if (!kthread_should_stop() && stall_gp_kthread > 0) {
  1566. VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
  1567. rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
  1568. for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
  1569. if (kthread_should_stop())
  1570. break;
  1571. schedule_timeout_uninterruptible(HZ);
  1572. }
  1573. }
  1574. if (!kthread_should_stop() && stall_cpu > 0) {
  1575. VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
  1576. stop_at = ktime_get_seconds() + stall_cpu;
  1577. /* RCU CPU stall is expected behavior in following code. */
  1578. idx = cur_ops->readlock();
  1579. if (stall_cpu_irqsoff)
  1580. local_irq_disable();
  1581. else if (!stall_cpu_block)
  1582. preempt_disable();
  1583. pr_alert("rcu_torture_stall start on CPU %d.\n",
  1584. raw_smp_processor_id());
  1585. while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
  1586. stop_at))
  1587. if (stall_cpu_block)
  1588. schedule_timeout_uninterruptible(HZ);
  1589. if (stall_cpu_irqsoff)
  1590. local_irq_enable();
  1591. else if (!stall_cpu_block)
  1592. preempt_enable();
  1593. cur_ops->readunlock(idx);
  1594. }
  1595. pr_alert("rcu_torture_stall end.\n");
  1596. torture_shutdown_absorb("rcu_torture_stall");
  1597. while (!kthread_should_stop())
  1598. schedule_timeout_interruptible(10 * HZ);
  1599. return 0;
  1600. }
  1601. /* Spawn CPU-stall kthread, if stall_cpu specified. */
  1602. static int __init rcu_torture_stall_init(void)
  1603. {
  1604. if (stall_cpu <= 0 && stall_gp_kthread <= 0)
  1605. return 0;
  1606. return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
  1607. }
  1608. /* State structure for forward-progress self-propagating RCU callback. */
  1609. struct fwd_cb_state {
  1610. struct rcu_head rh;
  1611. int stop;
  1612. };
  1613. /*
  1614. * Forward-progress self-propagating RCU callback function. Because
  1615. * callbacks run from softirq, this function is an implicit RCU read-side
  1616. * critical section.
  1617. */
  1618. static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
  1619. {
  1620. struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
  1621. if (READ_ONCE(fcsp->stop)) {
  1622. WRITE_ONCE(fcsp->stop, 2);
  1623. return;
  1624. }
  1625. cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
  1626. }
  1627. /* State for continuous-flood RCU callbacks. */
  1628. struct rcu_fwd_cb {
  1629. struct rcu_head rh;
  1630. struct rcu_fwd_cb *rfc_next;
  1631. struct rcu_fwd *rfc_rfp;
  1632. int rfc_gps;
  1633. };
  1634. #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
  1635. #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
  1636. #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
  1637. #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
  1638. #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
  1639. struct rcu_launder_hist {
  1640. long n_launders;
  1641. unsigned long launder_gp_seq;
  1642. };
  1643. struct rcu_fwd {
  1644. spinlock_t rcu_fwd_lock;
  1645. struct rcu_fwd_cb *rcu_fwd_cb_head;
  1646. struct rcu_fwd_cb **rcu_fwd_cb_tail;
  1647. long n_launders_cb;
  1648. unsigned long rcu_fwd_startat;
  1649. struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
  1650. unsigned long rcu_launder_gp_seq_start;
  1651. };
  1652. static DEFINE_MUTEX(rcu_fwd_mutex);
  1653. static struct rcu_fwd *rcu_fwds;
  1654. static bool rcu_fwd_emergency_stop;
  1655. static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
  1656. {
  1657. unsigned long gps;
  1658. unsigned long gps_old;
  1659. int i;
  1660. int j;
  1661. for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
  1662. if (rfp->n_launders_hist[i].n_launders > 0)
  1663. break;
  1664. pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
  1665. __func__, jiffies - rfp->rcu_fwd_startat);
  1666. gps_old = rfp->rcu_launder_gp_seq_start;
  1667. for (j = 0; j <= i; j++) {
  1668. gps = rfp->n_launders_hist[j].launder_gp_seq;
  1669. pr_cont(" %ds/%d: %ld:%ld",
  1670. j + 1, FWD_CBS_HIST_DIV,
  1671. rfp->n_launders_hist[j].n_launders,
  1672. rcutorture_seq_diff(gps, gps_old));
  1673. gps_old = gps;
  1674. }
  1675. pr_cont("\n");
  1676. }
  1677. /* Callback function for continuous-flood RCU callbacks. */
  1678. static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
  1679. {
  1680. unsigned long flags;
  1681. int i;
  1682. struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
  1683. struct rcu_fwd_cb **rfcpp;
  1684. struct rcu_fwd *rfp = rfcp->rfc_rfp;
  1685. rfcp->rfc_next = NULL;
  1686. rfcp->rfc_gps++;
  1687. spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
  1688. rfcpp = rfp->rcu_fwd_cb_tail;
  1689. rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
  1690. WRITE_ONCE(*rfcpp, rfcp);
  1691. WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
  1692. i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
  1693. if (i >= ARRAY_SIZE(rfp->n_launders_hist))
  1694. i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
  1695. rfp->n_launders_hist[i].n_launders++;
  1696. rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
  1697. spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
  1698. }
  1699. // Give the scheduler a chance, even on nohz_full CPUs.
  1700. static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
  1701. {
  1702. if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
  1703. // Real call_rcu() floods hit userspace, so emulate that.
  1704. if (need_resched() || (iter & 0xfff))
  1705. schedule();
  1706. return;
  1707. }
  1708. // No userspace emulation: CB invocation throttles call_rcu()
  1709. cond_resched();
  1710. }
  1711. /*
  1712. * Free all callbacks on the rcu_fwd_cb_head list, either because the
  1713. * test is over or because we hit an OOM event.
  1714. */
  1715. static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
  1716. {
  1717. unsigned long flags;
  1718. unsigned long freed = 0;
  1719. struct rcu_fwd_cb *rfcp;
  1720. for (;;) {
  1721. spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
  1722. rfcp = rfp->rcu_fwd_cb_head;
  1723. if (!rfcp) {
  1724. spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
  1725. break;
  1726. }
  1727. rfp->rcu_fwd_cb_head = rfcp->rfc_next;
  1728. if (!rfp->rcu_fwd_cb_head)
  1729. rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
  1730. spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
  1731. kfree(rfcp);
  1732. freed++;
  1733. rcu_torture_fwd_prog_cond_resched(freed);
  1734. if (tick_nohz_full_enabled()) {
  1735. local_irq_save(flags);
  1736. rcu_momentary_dyntick_idle();
  1737. local_irq_restore(flags);
  1738. }
  1739. }
  1740. return freed;
  1741. }
  1742. /* Carry out need_resched()/cond_resched() forward-progress testing. */
  1743. static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
  1744. int *tested, int *tested_tries)
  1745. {
  1746. unsigned long cver;
  1747. unsigned long dur;
  1748. struct fwd_cb_state fcs;
  1749. unsigned long gps;
  1750. int idx;
  1751. int sd;
  1752. int sd4;
  1753. bool selfpropcb = false;
  1754. unsigned long stopat;
  1755. static DEFINE_TORTURE_RANDOM(trs);
  1756. if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
  1757. init_rcu_head_on_stack(&fcs.rh);
  1758. selfpropcb = true;
  1759. }
  1760. /* Tight loop containing cond_resched(). */
  1761. WRITE_ONCE(rcu_fwd_cb_nodelay, true);
  1762. cur_ops->sync(); /* Later readers see above write. */
  1763. if (selfpropcb) {
  1764. WRITE_ONCE(fcs.stop, 0);
  1765. cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
  1766. }
  1767. cver = READ_ONCE(rcu_torture_current_version);
  1768. gps = cur_ops->get_gp_seq();
  1769. sd = cur_ops->stall_dur() + 1;
  1770. sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
  1771. dur = sd4 + torture_random(&trs) % (sd - sd4);
  1772. WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
  1773. stopat = rfp->rcu_fwd_startat + dur;
  1774. while (time_before(jiffies, stopat) &&
  1775. !shutdown_time_arrived() &&
  1776. !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
  1777. idx = cur_ops->readlock();
  1778. udelay(10);
  1779. cur_ops->readunlock(idx);
  1780. if (!fwd_progress_need_resched || need_resched())
  1781. cond_resched();
  1782. }
  1783. (*tested_tries)++;
  1784. if (!time_before(jiffies, stopat) &&
  1785. !shutdown_time_arrived() &&
  1786. !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
  1787. (*tested)++;
  1788. cver = READ_ONCE(rcu_torture_current_version) - cver;
  1789. gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
  1790. WARN_ON(!cver && gps < 2);
  1791. pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
  1792. }
  1793. if (selfpropcb) {
  1794. WRITE_ONCE(fcs.stop, 1);
  1795. cur_ops->sync(); /* Wait for running CB to complete. */
  1796. cur_ops->cb_barrier(); /* Wait for queued callbacks. */
  1797. }
  1798. if (selfpropcb) {
  1799. WARN_ON(READ_ONCE(fcs.stop) != 2);
  1800. destroy_rcu_head_on_stack(&fcs.rh);
  1801. }
  1802. schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
  1803. WRITE_ONCE(rcu_fwd_cb_nodelay, false);
  1804. }
  1805. /* Carry out call_rcu() forward-progress testing. */
  1806. static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
  1807. {
  1808. unsigned long cver;
  1809. unsigned long flags;
  1810. unsigned long gps;
  1811. int i;
  1812. long n_launders;
  1813. long n_launders_cb_snap;
  1814. long n_launders_sa;
  1815. long n_max_cbs;
  1816. long n_max_gps;
  1817. struct rcu_fwd_cb *rfcp;
  1818. struct rcu_fwd_cb *rfcpn;
  1819. unsigned long stopat;
  1820. unsigned long stoppedat;
  1821. if (READ_ONCE(rcu_fwd_emergency_stop))
  1822. return; /* Get out of the way quickly, no GP wait! */
  1823. if (!cur_ops->call)
  1824. return; /* Can't do call_rcu() fwd prog without ->call. */
  1825. /* Loop continuously posting RCU callbacks. */
  1826. WRITE_ONCE(rcu_fwd_cb_nodelay, true);
  1827. cur_ops->sync(); /* Later readers see above write. */
  1828. WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
  1829. stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
  1830. n_launders = 0;
  1831. rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
  1832. n_launders_sa = 0;
  1833. n_max_cbs = 0;
  1834. n_max_gps = 0;
  1835. for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
  1836. rfp->n_launders_hist[i].n_launders = 0;
  1837. cver = READ_ONCE(rcu_torture_current_version);
  1838. gps = cur_ops->get_gp_seq();
  1839. rfp->rcu_launder_gp_seq_start = gps;
  1840. tick_dep_set_task(current, TICK_DEP_BIT_RCU);
  1841. while (time_before(jiffies, stopat) &&
  1842. !shutdown_time_arrived() &&
  1843. !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
  1844. rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
  1845. rfcpn = NULL;
  1846. if (rfcp)
  1847. rfcpn = READ_ONCE(rfcp->rfc_next);
  1848. if (rfcpn) {
  1849. if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
  1850. ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
  1851. break;
  1852. rfp->rcu_fwd_cb_head = rfcpn;
  1853. n_launders++;
  1854. n_launders_sa++;
  1855. } else {
  1856. rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
  1857. if (WARN_ON_ONCE(!rfcp)) {
  1858. schedule_timeout_interruptible(1);
  1859. continue;
  1860. }
  1861. n_max_cbs++;
  1862. n_launders_sa = 0;
  1863. rfcp->rfc_gps = 0;
  1864. rfcp->rfc_rfp = rfp;
  1865. }
  1866. cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
  1867. rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
  1868. if (tick_nohz_full_enabled()) {
  1869. local_irq_save(flags);
  1870. rcu_momentary_dyntick_idle();
  1871. local_irq_restore(flags);
  1872. }
  1873. }
  1874. stoppedat = jiffies;
  1875. n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
  1876. cver = READ_ONCE(rcu_torture_current_version) - cver;
  1877. gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
  1878. cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
  1879. (void)rcu_torture_fwd_prog_cbfree(rfp);
  1880. if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
  1881. !shutdown_time_arrived()) {
  1882. WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
  1883. pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
  1884. __func__,
  1885. stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
  1886. n_launders + n_max_cbs - n_launders_cb_snap,
  1887. n_launders, n_launders_sa,
  1888. n_max_gps, n_max_cbs, cver, gps);
  1889. rcu_torture_fwd_cb_hist(rfp);
  1890. }
  1891. schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
  1892. tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
  1893. WRITE_ONCE(rcu_fwd_cb_nodelay, false);
  1894. }
  1895. /*
  1896. * OOM notifier, but this only prints diagnostic information for the
  1897. * current forward-progress test.
  1898. */
  1899. static int rcutorture_oom_notify(struct notifier_block *self,
  1900. unsigned long notused, void *nfreed)
  1901. {
  1902. struct rcu_fwd *rfp;
  1903. mutex_lock(&rcu_fwd_mutex);
  1904. rfp = rcu_fwds;
  1905. if (!rfp) {
  1906. mutex_unlock(&rcu_fwd_mutex);
  1907. return NOTIFY_OK;
  1908. }
  1909. WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
  1910. __func__);
  1911. rcu_torture_fwd_cb_hist(rfp);
  1912. rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
  1913. WRITE_ONCE(rcu_fwd_emergency_stop, true);
  1914. smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
  1915. pr_info("%s: Freed %lu RCU callbacks.\n",
  1916. __func__, rcu_torture_fwd_prog_cbfree(rfp));
  1917. rcu_barrier();
  1918. pr_info("%s: Freed %lu RCU callbacks.\n",
  1919. __func__, rcu_torture_fwd_prog_cbfree(rfp));
  1920. rcu_barrier();
  1921. pr_info("%s: Freed %lu RCU callbacks.\n",
  1922. __func__, rcu_torture_fwd_prog_cbfree(rfp));
  1923. smp_mb(); /* Frees before return to avoid redoing OOM. */
  1924. (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
  1925. pr_info("%s returning after OOM processing.\n", __func__);
  1926. mutex_unlock(&rcu_fwd_mutex);
  1927. return NOTIFY_OK;
  1928. }
  1929. static struct notifier_block rcutorture_oom_nb = {
  1930. .notifier_call = rcutorture_oom_notify
  1931. };
  1932. /* Carry out grace-period forward-progress testing. */
  1933. static int rcu_torture_fwd_prog(void *args)
  1934. {
  1935. struct rcu_fwd *rfp = args;
  1936. int tested = 0;
  1937. int tested_tries = 0;
  1938. VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
  1939. rcu_bind_current_to_nocb();
  1940. if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
  1941. set_user_nice(current, MAX_NICE);
  1942. do {
  1943. schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
  1944. WRITE_ONCE(rcu_fwd_emergency_stop, false);
  1945. if (!IS_ENABLED(CONFIG_TINY_RCU) ||
  1946. rcu_inkernel_boot_has_ended())
  1947. rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
  1948. if (rcu_inkernel_boot_has_ended())
  1949. rcu_torture_fwd_prog_cr(rfp);
  1950. /* Avoid slow periods, better to test when busy. */
  1951. stutter_wait("rcu_torture_fwd_prog");
  1952. } while (!torture_must_stop());
  1953. /* Short runs might not contain a valid forward-progress attempt. */
  1954. WARN_ON(!tested && tested_tries >= 5);
  1955. pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
  1956. torture_kthread_stopping("rcu_torture_fwd_prog");
  1957. return 0;
  1958. }
  1959. /* If forward-progress checking is requested and feasible, spawn the thread. */
  1960. static int __init rcu_torture_fwd_prog_init(void)
  1961. {
  1962. struct rcu_fwd *rfp;
  1963. if (!fwd_progress)
  1964. return 0; /* Not requested, so don't do it. */
  1965. if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
  1966. cur_ops == &rcu_busted_ops) {
  1967. VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
  1968. return 0;
  1969. }
  1970. if (stall_cpu > 0) {
  1971. VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
  1972. if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
  1973. return -EINVAL; /* In module, can fail back to user. */
  1974. WARN_ON(1); /* Make sure rcutorture notices conflict. */
  1975. return 0;
  1976. }
  1977. if (fwd_progress_holdoff <= 0)
  1978. fwd_progress_holdoff = 1;
  1979. if (fwd_progress_div <= 0)
  1980. fwd_progress_div = 4;
  1981. rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
  1982. if (!rfp)
  1983. return -ENOMEM;
  1984. spin_lock_init(&rfp->rcu_fwd_lock);
  1985. rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
  1986. mutex_lock(&rcu_fwd_mutex);
  1987. rcu_fwds = rfp;
  1988. mutex_unlock(&rcu_fwd_mutex);
  1989. register_oom_notifier(&rcutorture_oom_nb);
  1990. return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
  1991. }
  1992. static void rcu_torture_fwd_prog_cleanup(void)
  1993. {
  1994. struct rcu_fwd *rfp;
  1995. torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
  1996. rfp = rcu_fwds;
  1997. mutex_lock(&rcu_fwd_mutex);
  1998. rcu_fwds = NULL;
  1999. mutex_unlock(&rcu_fwd_mutex);
  2000. unregister_oom_notifier(&rcutorture_oom_nb);
  2001. kfree(rfp);
  2002. }
  2003. /* Callback function for RCU barrier testing. */
  2004. static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
  2005. {
  2006. atomic_inc(&barrier_cbs_invoked);
  2007. }
  2008. /* IPI handler to get callback posted on desired CPU, if online. */
  2009. static void rcu_torture_barrier1cb(void *rcu_void)
  2010. {
  2011. struct rcu_head *rhp = rcu_void;
  2012. cur_ops->call(rhp, rcu_torture_barrier_cbf);
  2013. }
  2014. /* kthread function to register callbacks used to test RCU barriers. */
  2015. static int rcu_torture_barrier_cbs(void *arg)
  2016. {
  2017. long myid = (long)arg;
  2018. bool lastphase = false;
  2019. bool newphase;
  2020. struct rcu_head rcu;
  2021. init_rcu_head_on_stack(&rcu);
  2022. VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
  2023. set_user_nice(current, MAX_NICE);
  2024. do {
  2025. wait_event(barrier_cbs_wq[myid],
  2026. (newphase =
  2027. smp_load_acquire(&barrier_phase)) != lastphase ||
  2028. torture_must_stop());
  2029. lastphase = newphase;
  2030. if (torture_must_stop())
  2031. break;
  2032. /*
  2033. * The above smp_load_acquire() ensures barrier_phase load
  2034. * is ordered before the following ->call().
  2035. */
  2036. if (smp_call_function_single(myid, rcu_torture_barrier1cb,
  2037. &rcu, 1)) {
  2038. // IPI failed, so use direct call from current CPU.
  2039. cur_ops->call(&rcu, rcu_torture_barrier_cbf);
  2040. }
  2041. if (atomic_dec_and_test(&barrier_cbs_count))
  2042. wake_up(&barrier_wq);
  2043. } while (!torture_must_stop());
  2044. if (cur_ops->cb_barrier != NULL)
  2045. cur_ops->cb_barrier();
  2046. destroy_rcu_head_on_stack(&rcu);
  2047. torture_kthread_stopping("rcu_torture_barrier_cbs");
  2048. return 0;
  2049. }
  2050. /* kthread function to drive and coordinate RCU barrier testing. */
  2051. static int rcu_torture_barrier(void *arg)
  2052. {
  2053. int i;
  2054. VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
  2055. do {
  2056. atomic_set(&barrier_cbs_invoked, 0);
  2057. atomic_set(&barrier_cbs_count, n_barrier_cbs);
  2058. /* Ensure barrier_phase ordered after prior assignments. */
  2059. smp_store_release(&barrier_phase, !barrier_phase);
  2060. for (i = 0; i < n_barrier_cbs; i++)
  2061. wake_up(&barrier_cbs_wq[i]);
  2062. wait_event(barrier_wq,
  2063. atomic_read(&barrier_cbs_count) == 0 ||
  2064. torture_must_stop());
  2065. if (torture_must_stop())
  2066. break;
  2067. n_barrier_attempts++;
  2068. cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
  2069. if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
  2070. n_rcu_torture_barrier_error++;
  2071. pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
  2072. atomic_read(&barrier_cbs_invoked),
  2073. n_barrier_cbs);
  2074. WARN_ON(1);
  2075. // Wait manually for the remaining callbacks
  2076. i = 0;
  2077. do {
  2078. if (WARN_ON(i++ > HZ))
  2079. i = INT_MIN;
  2080. schedule_timeout_interruptible(1);
  2081. cur_ops->cb_barrier();
  2082. } while (atomic_read(&barrier_cbs_invoked) !=
  2083. n_barrier_cbs &&
  2084. !torture_must_stop());
  2085. smp_mb(); // Can't trust ordering if broken.
  2086. if (!torture_must_stop())
  2087. pr_err("Recovered: barrier_cbs_invoked = %d\n",
  2088. atomic_read(&barrier_cbs_invoked));
  2089. } else {
  2090. n_barrier_successes++;
  2091. }
  2092. schedule_timeout_interruptible(HZ / 10);
  2093. } while (!torture_must_stop());
  2094. torture_kthread_stopping("rcu_torture_barrier");
  2095. return 0;
  2096. }
  2097. /* Initialize RCU barrier testing. */
  2098. static int rcu_torture_barrier_init(void)
  2099. {
  2100. int i;
  2101. int ret;
  2102. if (n_barrier_cbs <= 0)
  2103. return 0;
  2104. if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
  2105. pr_alert("%s" TORTURE_FLAG
  2106. " Call or barrier ops missing for %s,\n",
  2107. torture_type, cur_ops->name);
  2108. pr_alert("%s" TORTURE_FLAG
  2109. " RCU barrier testing omitted from run.\n",
  2110. torture_type);
  2111. return 0;
  2112. }
  2113. atomic_set(&barrier_cbs_count, 0);
  2114. atomic_set(&barrier_cbs_invoked, 0);
  2115. barrier_cbs_tasks =
  2116. kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
  2117. GFP_KERNEL);
  2118. barrier_cbs_wq =
  2119. kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
  2120. if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
  2121. return -ENOMEM;
  2122. for (i = 0; i < n_barrier_cbs; i++) {
  2123. init_waitqueue_head(&barrier_cbs_wq[i]);
  2124. ret = torture_create_kthread(rcu_torture_barrier_cbs,
  2125. (void *)(long)i,
  2126. barrier_cbs_tasks[i]);
  2127. if (ret)
  2128. return ret;
  2129. }
  2130. return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
  2131. }
  2132. /* Clean up after RCU barrier testing. */
  2133. static void rcu_torture_barrier_cleanup(void)
  2134. {
  2135. int i;
  2136. torture_stop_kthread(rcu_torture_barrier, barrier_task);
  2137. if (barrier_cbs_tasks != NULL) {
  2138. for (i = 0; i < n_barrier_cbs; i++)
  2139. torture_stop_kthread(rcu_torture_barrier_cbs,
  2140. barrier_cbs_tasks[i]);
  2141. kfree(barrier_cbs_tasks);
  2142. barrier_cbs_tasks = NULL;
  2143. }
  2144. if (barrier_cbs_wq != NULL) {
  2145. kfree(barrier_cbs_wq);
  2146. barrier_cbs_wq = NULL;
  2147. }
  2148. }
  2149. static bool rcu_torture_can_boost(void)
  2150. {
  2151. static int boost_warn_once;
  2152. int prio;
  2153. if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
  2154. return false;
  2155. prio = rcu_get_gp_kthreads_prio();
  2156. if (!prio)
  2157. return false;
  2158. if (prio < 2) {
  2159. if (boost_warn_once == 1)
  2160. return false;
  2161. pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
  2162. boost_warn_once = 1;
  2163. return false;
  2164. }
  2165. return true;
  2166. }
  2167. static bool read_exit_child_stop;
  2168. static bool read_exit_child_stopped;
  2169. static wait_queue_head_t read_exit_wq;
  2170. // Child kthread which just does an rcutorture reader and exits.
  2171. static int rcu_torture_read_exit_child(void *trsp_in)
  2172. {
  2173. struct torture_random_state *trsp = trsp_in;
  2174. set_user_nice(current, MAX_NICE);
  2175. // Minimize time between reading and exiting.
  2176. while (!kthread_should_stop())
  2177. schedule_timeout_uninterruptible(1);
  2178. (void)rcu_torture_one_read(trsp);
  2179. return 0;
  2180. }
  2181. // Parent kthread which creates and destroys read-exit child kthreads.
  2182. static int rcu_torture_read_exit(void *unused)
  2183. {
  2184. int count = 0;
  2185. bool errexit = false;
  2186. int i;
  2187. struct task_struct *tsp;
  2188. DEFINE_TORTURE_RANDOM(trs);
  2189. // Allocate and initialize.
  2190. set_user_nice(current, MAX_NICE);
  2191. VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
  2192. // Each pass through this loop does one read-exit episode.
  2193. do {
  2194. if (++count > read_exit_burst) {
  2195. VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
  2196. rcu_barrier(); // Wait for task_struct free, avoid OOM.
  2197. for (i = 0; i < read_exit_delay; i++) {
  2198. schedule_timeout_uninterruptible(HZ);
  2199. if (READ_ONCE(read_exit_child_stop))
  2200. break;
  2201. }
  2202. if (!READ_ONCE(read_exit_child_stop))
  2203. VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
  2204. count = 0;
  2205. }
  2206. if (READ_ONCE(read_exit_child_stop))
  2207. break;
  2208. // Spawn child.
  2209. tsp = kthread_run(rcu_torture_read_exit_child,
  2210. &trs, "%s",
  2211. "rcu_torture_read_exit_child");
  2212. if (IS_ERR(tsp)) {
  2213. VERBOSE_TOROUT_ERRSTRING("out of memory");
  2214. errexit = true;
  2215. tsp = NULL;
  2216. break;
  2217. }
  2218. cond_resched();
  2219. kthread_stop(tsp);
  2220. n_read_exits ++;
  2221. stutter_wait("rcu_torture_read_exit");
  2222. } while (!errexit && !READ_ONCE(read_exit_child_stop));
  2223. // Clean up and exit.
  2224. smp_store_release(&read_exit_child_stopped, true); // After reaping.
  2225. smp_mb(); // Store before wakeup.
  2226. wake_up(&read_exit_wq);
  2227. while (!torture_must_stop())
  2228. schedule_timeout_uninterruptible(1);
  2229. torture_kthread_stopping("rcu_torture_read_exit");
  2230. return 0;
  2231. }
  2232. static int rcu_torture_read_exit_init(void)
  2233. {
  2234. if (read_exit_burst <= 0)
  2235. return -EINVAL;
  2236. init_waitqueue_head(&read_exit_wq);
  2237. read_exit_child_stop = false;
  2238. read_exit_child_stopped = false;
  2239. return torture_create_kthread(rcu_torture_read_exit, NULL,
  2240. read_exit_task);
  2241. }
  2242. static void rcu_torture_read_exit_cleanup(void)
  2243. {
  2244. if (!read_exit_task)
  2245. return;
  2246. WRITE_ONCE(read_exit_child_stop, true);
  2247. smp_mb(); // Above write before wait.
  2248. wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
  2249. torture_stop_kthread(rcutorture_read_exit, read_exit_task);
  2250. }
  2251. static enum cpuhp_state rcutor_hp;
  2252. static void
  2253. rcu_torture_cleanup(void)
  2254. {
  2255. int firsttime;
  2256. int flags = 0;
  2257. unsigned long gp_seq = 0;
  2258. int i;
  2259. if (torture_cleanup_begin()) {
  2260. if (cur_ops->cb_barrier != NULL)
  2261. cur_ops->cb_barrier();
  2262. return;
  2263. }
  2264. if (!cur_ops) {
  2265. torture_cleanup_end();
  2266. return;
  2267. }
  2268. show_rcu_gp_kthreads();
  2269. rcu_torture_read_exit_cleanup();
  2270. rcu_torture_barrier_cleanup();
  2271. rcu_torture_fwd_prog_cleanup();
  2272. torture_stop_kthread(rcu_torture_stall, stall_task);
  2273. torture_stop_kthread(rcu_torture_writer, writer_task);
  2274. if (reader_tasks) {
  2275. for (i = 0; i < nrealreaders; i++)
  2276. torture_stop_kthread(rcu_torture_reader,
  2277. reader_tasks[i]);
  2278. kfree(reader_tasks);
  2279. }
  2280. if (fakewriter_tasks) {
  2281. for (i = 0; i < nfakewriters; i++) {
  2282. torture_stop_kthread(rcu_torture_fakewriter,
  2283. fakewriter_tasks[i]);
  2284. }
  2285. kfree(fakewriter_tasks);
  2286. fakewriter_tasks = NULL;
  2287. }
  2288. rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
  2289. srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
  2290. pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
  2291. cur_ops->name, (long)gp_seq, flags,
  2292. rcutorture_seq_diff(gp_seq, start_gp_seq));
  2293. torture_stop_kthread(rcu_torture_stats, stats_task);
  2294. torture_stop_kthread(rcu_torture_fqs, fqs_task);
  2295. if (rcu_torture_can_boost())
  2296. cpuhp_remove_state(rcutor_hp);
  2297. /*
  2298. * Wait for all RCU callbacks to fire, then do torture-type-specific
  2299. * cleanup operations.
  2300. */
  2301. if (cur_ops->cb_barrier != NULL)
  2302. cur_ops->cb_barrier();
  2303. if (cur_ops->cleanup != NULL)
  2304. cur_ops->cleanup();
  2305. rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
  2306. if (err_segs_recorded) {
  2307. pr_alert("Failure/close-call rcutorture reader segments:\n");
  2308. if (rt_read_nsegs == 0)
  2309. pr_alert("\t: No segments recorded!!!\n");
  2310. firsttime = 1;
  2311. for (i = 0; i < rt_read_nsegs; i++) {
  2312. pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
  2313. if (err_segs[i].rt_delay_jiffies != 0) {
  2314. pr_cont("%s%ldjiffies", firsttime ? "" : "+",
  2315. err_segs[i].rt_delay_jiffies);
  2316. firsttime = 0;
  2317. }
  2318. if (err_segs[i].rt_delay_ms != 0) {
  2319. pr_cont("%s%ldms", firsttime ? "" : "+",
  2320. err_segs[i].rt_delay_ms);
  2321. firsttime = 0;
  2322. }
  2323. if (err_segs[i].rt_delay_us != 0) {
  2324. pr_cont("%s%ldus", firsttime ? "" : "+",
  2325. err_segs[i].rt_delay_us);
  2326. firsttime = 0;
  2327. }
  2328. pr_cont("%s\n",
  2329. err_segs[i].rt_preempted ? "preempted" : "");
  2330. }
  2331. }
  2332. if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
  2333. rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
  2334. else if (torture_onoff_failures())
  2335. rcu_torture_print_module_parms(cur_ops,
  2336. "End of test: RCU_HOTPLUG");
  2337. else
  2338. rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
  2339. torture_cleanup_end();
  2340. }
  2341. #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
  2342. static void rcu_torture_leak_cb(struct rcu_head *rhp)
  2343. {
  2344. }
  2345. static void rcu_torture_err_cb(struct rcu_head *rhp)
  2346. {
  2347. /*
  2348. * This -might- happen due to race conditions, but is unlikely.
  2349. * The scenario that leads to this happening is that the
  2350. * first of the pair of duplicate callbacks is queued,
  2351. * someone else starts a grace period that includes that
  2352. * callback, then the second of the pair must wait for the
  2353. * next grace period. Unlikely, but can happen. If it
  2354. * does happen, the debug-objects subsystem won't have splatted.
  2355. */
  2356. pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
  2357. }
  2358. #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  2359. /*
  2360. * Verify that double-free causes debug-objects to complain, but only
  2361. * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
  2362. * cannot be carried out.
  2363. */
  2364. static void rcu_test_debug_objects(void)
  2365. {
  2366. #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
  2367. struct rcu_head rh1;
  2368. struct rcu_head rh2;
  2369. init_rcu_head_on_stack(&rh1);
  2370. init_rcu_head_on_stack(&rh2);
  2371. pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
  2372. /* Try to queue the rh2 pair of callbacks for the same grace period. */
  2373. preempt_disable(); /* Prevent preemption from interrupting test. */
  2374. rcu_read_lock(); /* Make it impossible to finish a grace period. */
  2375. call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
  2376. local_irq_disable(); /* Make it harder to start a new grace period. */
  2377. call_rcu(&rh2, rcu_torture_leak_cb);
  2378. call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
  2379. local_irq_enable();
  2380. rcu_read_unlock();
  2381. preempt_enable();
  2382. /* Wait for them all to get done so we can safely return. */
  2383. rcu_barrier();
  2384. pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
  2385. destroy_rcu_head_on_stack(&rh1);
  2386. destroy_rcu_head_on_stack(&rh2);
  2387. #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  2388. pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
  2389. #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  2390. }
  2391. static void rcutorture_sync(void)
  2392. {
  2393. static unsigned long n;
  2394. if (cur_ops->sync && !(++n & 0xfff))
  2395. cur_ops->sync();
  2396. }
  2397. static int __init
  2398. rcu_torture_init(void)
  2399. {
  2400. long i;
  2401. int cpu;
  2402. int firsterr = 0;
  2403. int flags = 0;
  2404. unsigned long gp_seq = 0;
  2405. static struct rcu_torture_ops *torture_ops[] = {
  2406. &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
  2407. &busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
  2408. &tasks_tracing_ops, &trivial_ops,
  2409. };
  2410. if (!torture_init_begin(torture_type, verbose))
  2411. return -EBUSY;
  2412. /* Process args and tell the world that the torturer is on the job. */
  2413. for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
  2414. cur_ops = torture_ops[i];
  2415. if (strcmp(torture_type, cur_ops->name) == 0)
  2416. break;
  2417. }
  2418. if (i == ARRAY_SIZE(torture_ops)) {
  2419. pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
  2420. torture_type);
  2421. pr_alert("rcu-torture types:");
  2422. for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
  2423. pr_cont(" %s", torture_ops[i]->name);
  2424. pr_cont("\n");
  2425. WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
  2426. firsterr = -EINVAL;
  2427. cur_ops = NULL;
  2428. goto unwind;
  2429. }
  2430. if (cur_ops->fqs == NULL && fqs_duration != 0) {
  2431. pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
  2432. fqs_duration = 0;
  2433. }
  2434. if (cur_ops->init)
  2435. cur_ops->init();
  2436. if (nreaders >= 0) {
  2437. nrealreaders = nreaders;
  2438. } else {
  2439. nrealreaders = num_online_cpus() - 2 - nreaders;
  2440. if (nrealreaders <= 0)
  2441. nrealreaders = 1;
  2442. }
  2443. rcu_torture_print_module_parms(cur_ops, "Start of test");
  2444. rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
  2445. srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
  2446. start_gp_seq = gp_seq;
  2447. pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
  2448. cur_ops->name, (long)gp_seq, flags);
  2449. /* Set up the freelist. */
  2450. INIT_LIST_HEAD(&rcu_torture_freelist);
  2451. for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
  2452. rcu_tortures[i].rtort_mbtest = 0;
  2453. list_add_tail(&rcu_tortures[i].rtort_free,
  2454. &rcu_torture_freelist);
  2455. }
  2456. /* Initialize the statistics so that each run gets its own numbers. */
  2457. rcu_torture_current = NULL;
  2458. rcu_torture_current_version = 0;
  2459. atomic_set(&n_rcu_torture_alloc, 0);
  2460. atomic_set(&n_rcu_torture_alloc_fail, 0);
  2461. atomic_set(&n_rcu_torture_free, 0);
  2462. atomic_set(&n_rcu_torture_mberror, 0);
  2463. atomic_set(&n_rcu_torture_error, 0);
  2464. n_rcu_torture_barrier_error = 0;
  2465. n_rcu_torture_boost_ktrerror = 0;
  2466. n_rcu_torture_boost_rterror = 0;
  2467. n_rcu_torture_boost_failure = 0;
  2468. n_rcu_torture_boosts = 0;
  2469. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  2470. atomic_set(&rcu_torture_wcount[i], 0);
  2471. for_each_possible_cpu(cpu) {
  2472. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  2473. per_cpu(rcu_torture_count, cpu)[i] = 0;
  2474. per_cpu(rcu_torture_batch, cpu)[i] = 0;
  2475. }
  2476. }
  2477. err_segs_recorded = 0;
  2478. rt_read_nsegs = 0;
  2479. /* Start up the kthreads. */
  2480. firsterr = torture_create_kthread(rcu_torture_writer, NULL,
  2481. writer_task);
  2482. if (firsterr)
  2483. goto unwind;
  2484. if (nfakewriters > 0) {
  2485. fakewriter_tasks = kcalloc(nfakewriters,
  2486. sizeof(fakewriter_tasks[0]),
  2487. GFP_KERNEL);
  2488. if (fakewriter_tasks == NULL) {
  2489. VERBOSE_TOROUT_ERRSTRING("out of memory");
  2490. firsterr = -ENOMEM;
  2491. goto unwind;
  2492. }
  2493. }
  2494. for (i = 0; i < nfakewriters; i++) {
  2495. firsterr = torture_create_kthread(rcu_torture_fakewriter,
  2496. NULL, fakewriter_tasks[i]);
  2497. if (firsterr)
  2498. goto unwind;
  2499. }
  2500. reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
  2501. GFP_KERNEL);
  2502. if (reader_tasks == NULL) {
  2503. VERBOSE_TOROUT_ERRSTRING("out of memory");
  2504. firsterr = -ENOMEM;
  2505. goto unwind;
  2506. }
  2507. for (i = 0; i < nrealreaders; i++) {
  2508. firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
  2509. reader_tasks[i]);
  2510. if (firsterr)
  2511. goto unwind;
  2512. }
  2513. if (stat_interval > 0) {
  2514. firsterr = torture_create_kthread(rcu_torture_stats, NULL,
  2515. stats_task);
  2516. if (firsterr)
  2517. goto unwind;
  2518. }
  2519. if (test_no_idle_hz && shuffle_interval > 0) {
  2520. firsterr = torture_shuffle_init(shuffle_interval * HZ);
  2521. if (firsterr)
  2522. goto unwind;
  2523. }
  2524. if (stutter < 0)
  2525. stutter = 0;
  2526. if (stutter) {
  2527. int t;
  2528. t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
  2529. firsterr = torture_stutter_init(stutter * HZ, t);
  2530. if (firsterr)
  2531. goto unwind;
  2532. }
  2533. if (fqs_duration < 0)
  2534. fqs_duration = 0;
  2535. if (fqs_duration) {
  2536. /* Create the fqs thread */
  2537. firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
  2538. fqs_task);
  2539. if (firsterr)
  2540. goto unwind;
  2541. }
  2542. if (test_boost_interval < 1)
  2543. test_boost_interval = 1;
  2544. if (test_boost_duration < 2)
  2545. test_boost_duration = 2;
  2546. if (rcu_torture_can_boost()) {
  2547. boost_starttime = jiffies + test_boost_interval * HZ;
  2548. firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
  2549. rcutorture_booster_init,
  2550. rcutorture_booster_cleanup);
  2551. if (firsterr < 0)
  2552. goto unwind;
  2553. rcutor_hp = firsterr;
  2554. }
  2555. shutdown_jiffies = jiffies + shutdown_secs * HZ;
  2556. firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
  2557. if (firsterr)
  2558. goto unwind;
  2559. firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
  2560. rcutorture_sync);
  2561. if (firsterr)
  2562. goto unwind;
  2563. firsterr = rcu_torture_stall_init();
  2564. if (firsterr)
  2565. goto unwind;
  2566. firsterr = rcu_torture_fwd_prog_init();
  2567. if (firsterr)
  2568. goto unwind;
  2569. firsterr = rcu_torture_barrier_init();
  2570. if (firsterr)
  2571. goto unwind;
  2572. firsterr = rcu_torture_read_exit_init();
  2573. if (firsterr)
  2574. goto unwind;
  2575. if (object_debug)
  2576. rcu_test_debug_objects();
  2577. torture_init_end();
  2578. return 0;
  2579. unwind:
  2580. torture_init_end();
  2581. rcu_torture_cleanup();
  2582. return firsterr;
  2583. }
  2584. module_init(rcu_torture_init);
  2585. module_exit(rcu_torture_cleanup);