printk_ringbuffer.c 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/kernel.h>
  3. #include <linux/irqflags.h>
  4. #include <linux/string.h>
  5. #include <linux/errno.h>
  6. #include <linux/bug.h>
  7. #include "printk_ringbuffer.h"
  8. /**
  9. * DOC: printk_ringbuffer overview
  10. *
  11. * Data Structure
  12. * --------------
  13. * The printk_ringbuffer is made up of 3 internal ringbuffers:
  14. *
  15. * desc_ring
  16. * A ring of descriptors and their meta data (such as sequence number,
  17. * timestamp, loglevel, etc.) as well as internal state information about
  18. * the record and logical positions specifying where in the other
  19. * ringbuffer the text strings are located.
  20. *
  21. * text_data_ring
  22. * A ring of data blocks. A data block consists of an unsigned long
  23. * integer (ID) that maps to a desc_ring index followed by the text
  24. * string of the record.
  25. *
  26. * The internal state information of a descriptor is the key element to allow
  27. * readers and writers to locklessly synchronize access to the data.
  28. *
  29. * Implementation
  30. * --------------
  31. *
  32. * Descriptor Ring
  33. * ~~~~~~~~~~~~~~~
  34. * The descriptor ring is an array of descriptors. A descriptor contains
  35. * essential meta data to track the data of a printk record using
  36. * blk_lpos structs pointing to associated text data blocks (see
  37. * "Data Rings" below). Each descriptor is assigned an ID that maps
  38. * directly to index values of the descriptor array and has a state. The ID
  39. * and the state are bitwise combined into a single descriptor field named
  40. * @state_var, allowing ID and state to be synchronously and atomically
  41. * updated.
  42. *
  43. * Descriptors have four states:
  44. *
  45. * reserved
  46. * A writer is modifying the record.
  47. *
  48. * committed
  49. * The record and all its data are written. A writer can reopen the
  50. * descriptor (transitioning it back to reserved), but in the committed
  51. * state the data is consistent.
  52. *
  53. * finalized
  54. * The record and all its data are complete and available for reading. A
  55. * writer cannot reopen the descriptor.
  56. *
  57. * reusable
  58. * The record exists, but its text and/or meta data may no longer be
  59. * available.
  60. *
  61. * Querying the @state_var of a record requires providing the ID of the
  62. * descriptor to query. This can yield a possible fifth (pseudo) state:
  63. *
  64. * miss
  65. * The descriptor being queried has an unexpected ID.
  66. *
  67. * The descriptor ring has a @tail_id that contains the ID of the oldest
  68. * descriptor and @head_id that contains the ID of the newest descriptor.
  69. *
  70. * When a new descriptor should be created (and the ring is full), the tail
  71. * descriptor is invalidated by first transitioning to the reusable state and
  72. * then invalidating all tail data blocks up to and including the data blocks
  73. * associated with the tail descriptor (for the text ring). Then
  74. * @tail_id is advanced, followed by advancing @head_id. And finally the
  75. * @state_var of the new descriptor is initialized to the new ID and reserved
  76. * state.
  77. *
  78. * The @tail_id can only be advanced if the new @tail_id would be in the
  79. * committed or reusable queried state. This makes it possible that a valid
  80. * sequence number of the tail is always available.
  81. *
  82. * Descriptor Finalization
  83. * ~~~~~~~~~~~~~~~~~~~~~~~
  84. * When a writer calls the commit function prb_commit(), record data is
  85. * fully stored and is consistent within the ringbuffer. However, a writer can
  86. * reopen that record, claiming exclusive access (as with prb_reserve()), and
  87. * modify that record. When finished, the writer must again commit the record.
  88. *
  89. * In order for a record to be made available to readers (and also become
  90. * recyclable for writers), it must be finalized. A finalized record cannot be
  91. * reopened and can never become "unfinalized". Record finalization can occur
  92. * in three different scenarios:
  93. *
  94. * 1) A writer can simultaneously commit and finalize its record by calling
  95. * prb_final_commit() instead of prb_commit().
  96. *
  97. * 2) When a new record is reserved and the previous record has been
  98. * committed via prb_commit(), that previous record is automatically
  99. * finalized.
  100. *
  101. * 3) When a record is committed via prb_commit() and a newer record
  102. * already exists, the record being committed is automatically finalized.
  103. *
  104. * Data Ring
  105. * ~~~~~~~~~
  106. * The text data ring is a byte array composed of data blocks. Data blocks are
  107. * referenced by blk_lpos structs that point to the logical position of the
  108. * beginning of a data block and the beginning of the next adjacent data
  109. * block. Logical positions are mapped directly to index values of the byte
  110. * array ringbuffer.
  111. *
  112. * Each data block consists of an ID followed by the writer data. The ID is
  113. * the identifier of a descriptor that is associated with the data block. A
  114. * given data block is considered valid if all of the following conditions
  115. * are met:
  116. *
  117. * 1) The descriptor associated with the data block is in the committed
  118. * or finalized queried state.
  119. *
  120. * 2) The blk_lpos struct within the descriptor associated with the data
  121. * block references back to the same data block.
  122. *
  123. * 3) The data block is within the head/tail logical position range.
  124. *
  125. * If the writer data of a data block would extend beyond the end of the
  126. * byte array, only the ID of the data block is stored at the logical
  127. * position and the full data block (ID and writer data) is stored at the
  128. * beginning of the byte array. The referencing blk_lpos will point to the
  129. * ID before the wrap and the next data block will be at the logical
  130. * position adjacent the full data block after the wrap.
  131. *
  132. * Data rings have a @tail_lpos that points to the beginning of the oldest
  133. * data block and a @head_lpos that points to the logical position of the
  134. * next (not yet existing) data block.
  135. *
  136. * When a new data block should be created (and the ring is full), tail data
  137. * blocks will first be invalidated by putting their associated descriptors
  138. * into the reusable state and then pushing the @tail_lpos forward beyond
  139. * them. Then the @head_lpos is pushed forward and is associated with a new
  140. * descriptor. If a data block is not valid, the @tail_lpos cannot be
  141. * advanced beyond it.
  142. *
  143. * Info Array
  144. * ~~~~~~~~~~
  145. * The general meta data of printk records are stored in printk_info structs,
  146. * stored in an array with the same number of elements as the descriptor ring.
  147. * Each info corresponds to the descriptor of the same index in the
  148. * descriptor ring. Info validity is confirmed by evaluating the corresponding
  149. * descriptor before and after loading the info.
  150. *
  151. * Usage
  152. * -----
  153. * Here are some simple examples demonstrating writers and readers. For the
  154. * examples a global ringbuffer (test_rb) is available (which is not the
  155. * actual ringbuffer used by printk)::
  156. *
  157. * DEFINE_PRINTKRB(test_rb, 15, 5);
  158. *
  159. * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of
  160. * 1 MiB (2 ^ (15 + 5)) for text data.
  161. *
  162. * Sample writer code::
  163. *
  164. * const char *textstr = "message text";
  165. * struct prb_reserved_entry e;
  166. * struct printk_record r;
  167. *
  168. * // specify how much to allocate
  169. * prb_rec_init_wr(&r, strlen(textstr) + 1);
  170. *
  171. * if (prb_reserve(&e, &test_rb, &r)) {
  172. * snprintf(r.text_buf, r.text_buf_size, "%s", textstr);
  173. *
  174. * r.info->text_len = strlen(textstr);
  175. * r.info->ts_nsec = local_clock();
  176. * r.info->caller_id = printk_caller_id();
  177. *
  178. * // commit and finalize the record
  179. * prb_final_commit(&e);
  180. * }
  181. *
  182. * Note that additional writer functions are available to extend a record
  183. * after it has been committed but not yet finalized. This can be done as
  184. * long as no new records have been reserved and the caller is the same.
  185. *
  186. * Sample writer code (record extending)::
  187. *
  188. * // alternate rest of previous example
  189. *
  190. * r.info->text_len = strlen(textstr);
  191. * r.info->ts_nsec = local_clock();
  192. * r.info->caller_id = printk_caller_id();
  193. *
  194. * // commit the record (but do not finalize yet)
  195. * prb_commit(&e);
  196. * }
  197. *
  198. * ...
  199. *
  200. * // specify additional 5 bytes text space to extend
  201. * prb_rec_init_wr(&r, 5);
  202. *
  203. * // try to extend, but only if it does not exceed 32 bytes
  204. * if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id()), 32) {
  205. * snprintf(&r.text_buf[r.info->text_len],
  206. * r.text_buf_size - r.info->text_len, "hello");
  207. *
  208. * r.info->text_len += 5;
  209. *
  210. * // commit and finalize the record
  211. * prb_final_commit(&e);
  212. * }
  213. *
  214. * Sample reader code::
  215. *
  216. * struct printk_info info;
  217. * struct printk_record r;
  218. * char text_buf[32];
  219. * u64 seq;
  220. *
  221. * prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf));
  222. *
  223. * prb_for_each_record(0, &test_rb, &seq, &r) {
  224. * if (info.seq != seq)
  225. * pr_warn("lost %llu records\n", info.seq - seq);
  226. *
  227. * if (info.text_len > r.text_buf_size) {
  228. * pr_warn("record %llu text truncated\n", info.seq);
  229. * text_buf[r.text_buf_size - 1] = 0;
  230. * }
  231. *
  232. * pr_info("%llu: %llu: %s\n", info.seq, info.ts_nsec,
  233. * &text_buf[0]);
  234. * }
  235. *
  236. * Note that additional less convenient reader functions are available to
  237. * allow complex record access.
  238. *
  239. * ABA Issues
  240. * ~~~~~~~~~~
  241. * To help avoid ABA issues, descriptors are referenced by IDs (array index
  242. * values combined with tagged bits counting array wraps) and data blocks are
  243. * referenced by logical positions (array index values combined with tagged
  244. * bits counting array wraps). However, on 32-bit systems the number of
  245. * tagged bits is relatively small such that an ABA incident is (at least
  246. * theoretically) possible. For example, if 4 million maximally sized (1KiB)
  247. * printk messages were to occur in NMI context on a 32-bit system, the
  248. * interrupted context would not be able to recognize that the 32-bit integer
  249. * completely wrapped and thus represents a different data block than the one
  250. * the interrupted context expects.
  251. *
  252. * To help combat this possibility, additional state checking is performed
  253. * (such as using cmpxchg() even though set() would suffice). These extra
  254. * checks are commented as such and will hopefully catch any ABA issue that
  255. * a 32-bit system might experience.
  256. *
  257. * Memory Barriers
  258. * ~~~~~~~~~~~~~~~
  259. * Multiple memory barriers are used. To simplify proving correctness and
  260. * generating litmus tests, lines of code related to memory barriers
  261. * (loads, stores, and the associated memory barriers) are labeled::
  262. *
  263. * LMM(function:letter)
  264. *
  265. * Comments reference the labels using only the "function:letter" part.
  266. *
  267. * The memory barrier pairs and their ordering are:
  268. *
  269. * desc_reserve:D / desc_reserve:B
  270. * push descriptor tail (id), then push descriptor head (id)
  271. *
  272. * desc_reserve:D / data_push_tail:B
  273. * push data tail (lpos), then set new descriptor reserved (state)
  274. *
  275. * desc_reserve:D / desc_push_tail:C
  276. * push descriptor tail (id), then set new descriptor reserved (state)
  277. *
  278. * desc_reserve:D / prb_first_seq:C
  279. * push descriptor tail (id), then set new descriptor reserved (state)
  280. *
  281. * desc_reserve:F / desc_read:D
  282. * set new descriptor id and reserved (state), then allow writer changes
  283. *
  284. * data_alloc:A (or data_realloc:A) / desc_read:D
  285. * set old descriptor reusable (state), then modify new data block area
  286. *
  287. * data_alloc:A (or data_realloc:A) / data_push_tail:B
  288. * push data tail (lpos), then modify new data block area
  289. *
  290. * _prb_commit:B / desc_read:B
  291. * store writer changes, then set new descriptor committed (state)
  292. *
  293. * desc_reopen_last:A / _prb_commit:B
  294. * set descriptor reserved (state), then read descriptor data
  295. *
  296. * _prb_commit:B / desc_reserve:D
  297. * set new descriptor committed (state), then check descriptor head (id)
  298. *
  299. * data_push_tail:D / data_push_tail:A
  300. * set descriptor reusable (state), then push data tail (lpos)
  301. *
  302. * desc_push_tail:B / desc_reserve:D
  303. * set descriptor reusable (state), then push descriptor tail (id)
  304. */
  305. #define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits)
  306. #define DATA_SIZE_MASK(data_ring) (DATA_SIZE(data_ring) - 1)
  307. #define DESCS_COUNT(desc_ring) _DESCS_COUNT((desc_ring)->count_bits)
  308. #define DESCS_COUNT_MASK(desc_ring) (DESCS_COUNT(desc_ring) - 1)
  309. /* Determine the data array index from a logical position. */
  310. #define DATA_INDEX(data_ring, lpos) ((lpos) & DATA_SIZE_MASK(data_ring))
  311. /* Determine the desc array index from an ID or sequence number. */
  312. #define DESC_INDEX(desc_ring, n) ((n) & DESCS_COUNT_MASK(desc_ring))
  313. /* Determine how many times the data array has wrapped. */
  314. #define DATA_WRAPS(data_ring, lpos) ((lpos) >> (data_ring)->size_bits)
  315. /* Determine if a logical position refers to a data-less block. */
  316. #define LPOS_DATALESS(lpos) ((lpos) & 1UL)
  317. #define BLK_DATALESS(blk) (LPOS_DATALESS((blk)->begin) && \
  318. LPOS_DATALESS((blk)->next))
  319. /* Get the logical position at index 0 of the current wrap. */
  320. #define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \
  321. ((lpos) & ~DATA_SIZE_MASK(data_ring))
  322. /* Get the ID for the same index of the previous wrap as the given ID. */
  323. #define DESC_ID_PREV_WRAP(desc_ring, id) \
  324. DESC_ID((id) - DESCS_COUNT(desc_ring))
  325. /*
  326. * A data block: mapped directly to the beginning of the data block area
  327. * specified as a logical position within the data ring.
  328. *
  329. * @id: the ID of the associated descriptor
  330. * @data: the writer data
  331. *
  332. * Note that the size of a data block is only known by its associated
  333. * descriptor.
  334. */
  335. struct prb_data_block {
  336. unsigned long id;
  337. char data[];
  338. };
  339. /*
  340. * Return the descriptor associated with @n. @n can be either a
  341. * descriptor ID or a sequence number.
  342. */
  343. static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n)
  344. {
  345. return &desc_ring->descs[DESC_INDEX(desc_ring, n)];
  346. }
  347. /*
  348. * Return the printk_info associated with @n. @n can be either a
  349. * descriptor ID or a sequence number.
  350. */
  351. static struct printk_info *to_info(struct prb_desc_ring *desc_ring, u64 n)
  352. {
  353. return &desc_ring->infos[DESC_INDEX(desc_ring, n)];
  354. }
  355. static struct prb_data_block *to_block(struct prb_data_ring *data_ring,
  356. unsigned long begin_lpos)
  357. {
  358. return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)];
  359. }
  360. /*
  361. * Increase the data size to account for data block meta data plus any
  362. * padding so that the adjacent data block is aligned on the ID size.
  363. */
  364. static unsigned int to_blk_size(unsigned int size)
  365. {
  366. struct prb_data_block *db = NULL;
  367. size += sizeof(*db);
  368. size = ALIGN(size, sizeof(db->id));
  369. return size;
  370. }
  371. /*
  372. * Sanity checker for reserve size. The ringbuffer code assumes that a data
  373. * block does not exceed the maximum possible size that could fit within the
  374. * ringbuffer. This function provides that basic size check so that the
  375. * assumption is safe.
  376. */
  377. static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
  378. {
  379. struct prb_data_block *db = NULL;
  380. if (size == 0)
  381. return true;
  382. /*
  383. * Ensure the alignment padded size could possibly fit in the data
  384. * array. The largest possible data block must still leave room for
  385. * at least the ID of the next block.
  386. */
  387. size = to_blk_size(size);
  388. if (size > DATA_SIZE(data_ring) - sizeof(db->id))
  389. return false;
  390. return true;
  391. }
  392. /* Query the state of a descriptor. */
  393. static enum desc_state get_desc_state(unsigned long id,
  394. unsigned long state_val)
  395. {
  396. if (id != DESC_ID(state_val))
  397. return desc_miss;
  398. return DESC_STATE(state_val);
  399. }
  400. /*
  401. * Get a copy of a specified descriptor and return its queried state. If the
  402. * descriptor is in an inconsistent state (miss or reserved), the caller can
  403. * only expect the descriptor's @state_var field to be valid.
  404. *
  405. * The sequence number and caller_id can be optionally retrieved. Like all
  406. * non-state_var data, they are only valid if the descriptor is in a
  407. * consistent state.
  408. */
  409. static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
  410. unsigned long id, struct prb_desc *desc_out,
  411. u64 *seq_out, u32 *caller_id_out)
  412. {
  413. struct printk_info *info = to_info(desc_ring, id);
  414. struct prb_desc *desc = to_desc(desc_ring, id);
  415. atomic_long_t *state_var = &desc->state_var;
  416. enum desc_state d_state;
  417. unsigned long state_val;
  418. /* Check the descriptor state. */
  419. state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */
  420. d_state = get_desc_state(id, state_val);
  421. if (d_state == desc_miss || d_state == desc_reserved) {
  422. /*
  423. * The descriptor is in an inconsistent state. Set at least
  424. * @state_var so that the caller can see the details of
  425. * the inconsistent state.
  426. */
  427. goto out;
  428. }
  429. /*
  430. * Guarantee the state is loaded before copying the descriptor
  431. * content. This avoids copying obsolete descriptor content that might
  432. * not apply to the descriptor state. This pairs with _prb_commit:B.
  433. *
  434. * Memory barrier involvement:
  435. *
  436. * If desc_read:A reads from _prb_commit:B, then desc_read:C reads
  437. * from _prb_commit:A.
  438. *
  439. * Relies on:
  440. *
  441. * WMB from _prb_commit:A to _prb_commit:B
  442. * matching
  443. * RMB from desc_read:A to desc_read:C
  444. */
  445. smp_rmb(); /* LMM(desc_read:B) */
  446. /*
  447. * Copy the descriptor data. The data is not valid until the
  448. * state has been re-checked. A memcpy() for all of @desc
  449. * cannot be used because of the atomic_t @state_var field.
  450. */
  451. if (desc_out) {
  452. memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
  453. sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */
  454. }
  455. if (seq_out)
  456. *seq_out = info->seq; /* also part of desc_read:C */
  457. if (caller_id_out)
  458. *caller_id_out = info->caller_id; /* also part of desc_read:C */
  459. /*
  460. * 1. Guarantee the descriptor content is loaded before re-checking
  461. * the state. This avoids reading an obsolete descriptor state
  462. * that may not apply to the copied content. This pairs with
  463. * desc_reserve:F.
  464. *
  465. * Memory barrier involvement:
  466. *
  467. * If desc_read:C reads from desc_reserve:G, then desc_read:E
  468. * reads from desc_reserve:F.
  469. *
  470. * Relies on:
  471. *
  472. * WMB from desc_reserve:F to desc_reserve:G
  473. * matching
  474. * RMB from desc_read:C to desc_read:E
  475. *
  476. * 2. Guarantee the record data is loaded before re-checking the
  477. * state. This avoids reading an obsolete descriptor state that may
  478. * not apply to the copied data. This pairs with data_alloc:A and
  479. * data_realloc:A.
  480. *
  481. * Memory barrier involvement:
  482. *
  483. * If copy_data:A reads from data_alloc:B, then desc_read:E
  484. * reads from desc_make_reusable:A.
  485. *
  486. * Relies on:
  487. *
  488. * MB from desc_make_reusable:A to data_alloc:B
  489. * matching
  490. * RMB from desc_read:C to desc_read:E
  491. *
  492. * Note: desc_make_reusable:A and data_alloc:B can be different
  493. * CPUs. However, the data_alloc:B CPU (which performs the
  494. * full memory barrier) must have previously seen
  495. * desc_make_reusable:A.
  496. */
  497. smp_rmb(); /* LMM(desc_read:D) */
  498. /*
  499. * The data has been copied. Return the current descriptor state,
  500. * which may have changed since the load above.
  501. */
  502. state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */
  503. d_state = get_desc_state(id, state_val);
  504. out:
  505. if (desc_out)
  506. atomic_long_set(&desc_out->state_var, state_val);
  507. return d_state;
  508. }
  509. /*
  510. * Take a specified descriptor out of the finalized state by attempting
  511. * the transition from finalized to reusable. Either this context or some
  512. * other context will have been successful.
  513. */
  514. static void desc_make_reusable(struct prb_desc_ring *desc_ring,
  515. unsigned long id)
  516. {
  517. unsigned long val_finalized = DESC_SV(id, desc_finalized);
  518. unsigned long val_reusable = DESC_SV(id, desc_reusable);
  519. struct prb_desc *desc = to_desc(desc_ring, id);
  520. atomic_long_t *state_var = &desc->state_var;
  521. atomic_long_cmpxchg_relaxed(state_var, val_finalized,
  522. val_reusable); /* LMM(desc_make_reusable:A) */
  523. }
  524. /*
  525. * Given the text data ring, put the associated descriptor of each
  526. * data block from @lpos_begin until @lpos_end into the reusable state.
  527. *
  528. * If there is any problem making the associated descriptor reusable, either
  529. * the descriptor has not yet been finalized or another writer context has
  530. * already pushed the tail lpos past the problematic data block. Regardless,
  531. * on error the caller can re-load the tail lpos to determine the situation.
  532. */
  533. static bool data_make_reusable(struct printk_ringbuffer *rb,
  534. struct prb_data_ring *data_ring,
  535. unsigned long lpos_begin,
  536. unsigned long lpos_end,
  537. unsigned long *lpos_out)
  538. {
  539. struct prb_desc_ring *desc_ring = &rb->desc_ring;
  540. struct prb_data_block *blk;
  541. enum desc_state d_state;
  542. struct prb_desc desc;
  543. struct prb_data_blk_lpos *blk_lpos = &desc.text_blk_lpos;
  544. unsigned long id;
  545. /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
  546. while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) {
  547. blk = to_block(data_ring, lpos_begin);
  548. /*
  549. * Load the block ID from the data block. This is a data race
  550. * against a writer that may have newly reserved this data
  551. * area. If the loaded value matches a valid descriptor ID,
  552. * the blk_lpos of that descriptor will be checked to make
  553. * sure it points back to this data block. If the check fails,
  554. * the data area has been recycled by another writer.
  555. */
  556. id = blk->id; /* LMM(data_make_reusable:A) */
  557. d_state = desc_read(desc_ring, id, &desc,
  558. NULL, NULL); /* LMM(data_make_reusable:B) */
  559. switch (d_state) {
  560. case desc_miss:
  561. case desc_reserved:
  562. case desc_committed:
  563. return false;
  564. case desc_finalized:
  565. /*
  566. * This data block is invalid if the descriptor
  567. * does not point back to it.
  568. */
  569. if (blk_lpos->begin != lpos_begin)
  570. return false;
  571. desc_make_reusable(desc_ring, id);
  572. break;
  573. case desc_reusable:
  574. /*
  575. * This data block is invalid if the descriptor
  576. * does not point back to it.
  577. */
  578. if (blk_lpos->begin != lpos_begin)
  579. return false;
  580. break;
  581. }
  582. /* Advance @lpos_begin to the next data block. */
  583. lpos_begin = blk_lpos->next;
  584. }
  585. *lpos_out = lpos_begin;
  586. return true;
  587. }
  588. /*
  589. * Advance the data ring tail to at least @lpos. This function puts
  590. * descriptors into the reusable state if the tail is pushed beyond
  591. * their associated data block.
  592. */
  593. static bool data_push_tail(struct printk_ringbuffer *rb,
  594. struct prb_data_ring *data_ring,
  595. unsigned long lpos)
  596. {
  597. unsigned long tail_lpos_new;
  598. unsigned long tail_lpos;
  599. unsigned long next_lpos;
  600. /* If @lpos is from a data-less block, there is nothing to do. */
  601. if (LPOS_DATALESS(lpos))
  602. return true;
  603. /*
  604. * Any descriptor states that have transitioned to reusable due to the
  605. * data tail being pushed to this loaded value will be visible to this
  606. * CPU. This pairs with data_push_tail:D.
  607. *
  608. * Memory barrier involvement:
  609. *
  610. * If data_push_tail:A reads from data_push_tail:D, then this CPU can
  611. * see desc_make_reusable:A.
  612. *
  613. * Relies on:
  614. *
  615. * MB from desc_make_reusable:A to data_push_tail:D
  616. * matches
  617. * READFROM from data_push_tail:D to data_push_tail:A
  618. * thus
  619. * READFROM from desc_make_reusable:A to this CPU
  620. */
  621. tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */
  622. /*
  623. * Loop until the tail lpos is at or beyond @lpos. This condition
  624. * may already be satisfied, resulting in no full memory barrier
  625. * from data_push_tail:D being performed. However, since this CPU
  626. * sees the new tail lpos, any descriptor states that transitioned to
  627. * the reusable state must already be visible.
  628. */
  629. while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) {
  630. /*
  631. * Make all descriptors reusable that are associated with
  632. * data blocks before @lpos.
  633. */
  634. if (!data_make_reusable(rb, data_ring, tail_lpos, lpos,
  635. &next_lpos)) {
  636. /*
  637. * 1. Guarantee the block ID loaded in
  638. * data_make_reusable() is performed before
  639. * reloading the tail lpos. The failed
  640. * data_make_reusable() may be due to a newly
  641. * recycled data area causing the tail lpos to
  642. * have been previously pushed. This pairs with
  643. * data_alloc:A and data_realloc:A.
  644. *
  645. * Memory barrier involvement:
  646. *
  647. * If data_make_reusable:A reads from data_alloc:B,
  648. * then data_push_tail:C reads from
  649. * data_push_tail:D.
  650. *
  651. * Relies on:
  652. *
  653. * MB from data_push_tail:D to data_alloc:B
  654. * matching
  655. * RMB from data_make_reusable:A to
  656. * data_push_tail:C
  657. *
  658. * Note: data_push_tail:D and data_alloc:B can be
  659. * different CPUs. However, the data_alloc:B
  660. * CPU (which performs the full memory
  661. * barrier) must have previously seen
  662. * data_push_tail:D.
  663. *
  664. * 2. Guarantee the descriptor state loaded in
  665. * data_make_reusable() is performed before
  666. * reloading the tail lpos. The failed
  667. * data_make_reusable() may be due to a newly
  668. * recycled descriptor causing the tail lpos to
  669. * have been previously pushed. This pairs with
  670. * desc_reserve:D.
  671. *
  672. * Memory barrier involvement:
  673. *
  674. * If data_make_reusable:B reads from
  675. * desc_reserve:F, then data_push_tail:C reads
  676. * from data_push_tail:D.
  677. *
  678. * Relies on:
  679. *
  680. * MB from data_push_tail:D to desc_reserve:F
  681. * matching
  682. * RMB from data_make_reusable:B to
  683. * data_push_tail:C
  684. *
  685. * Note: data_push_tail:D and desc_reserve:F can
  686. * be different CPUs. However, the
  687. * desc_reserve:F CPU (which performs the
  688. * full memory barrier) must have previously
  689. * seen data_push_tail:D.
  690. */
  691. smp_rmb(); /* LMM(data_push_tail:B) */
  692. tail_lpos_new = atomic_long_read(&data_ring->tail_lpos
  693. ); /* LMM(data_push_tail:C) */
  694. if (tail_lpos_new == tail_lpos)
  695. return false;
  696. /* Another CPU pushed the tail. Try again. */
  697. tail_lpos = tail_lpos_new;
  698. continue;
  699. }
  700. /*
  701. * Guarantee any descriptor states that have transitioned to
  702. * reusable are stored before pushing the tail lpos. A full
  703. * memory barrier is needed since other CPUs may have made
  704. * the descriptor states reusable. This pairs with
  705. * data_push_tail:A.
  706. */
  707. if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos,
  708. next_lpos)) { /* LMM(data_push_tail:D) */
  709. break;
  710. }
  711. }
  712. return true;
  713. }
  714. /*
  715. * Advance the desc ring tail. This function advances the tail by one
  716. * descriptor, thus invalidating the oldest descriptor. Before advancing
  717. * the tail, the tail descriptor is made reusable and all data blocks up to
  718. * and including the descriptor's data block are invalidated (i.e. the data
  719. * ring tail is pushed past the data block of the descriptor being made
  720. * reusable).
  721. */
  722. static bool desc_push_tail(struct printk_ringbuffer *rb,
  723. unsigned long tail_id)
  724. {
  725. struct prb_desc_ring *desc_ring = &rb->desc_ring;
  726. enum desc_state d_state;
  727. struct prb_desc desc;
  728. d_state = desc_read(desc_ring, tail_id, &desc, NULL, NULL);
  729. switch (d_state) {
  730. case desc_miss:
  731. /*
  732. * If the ID is exactly 1 wrap behind the expected, it is
  733. * in the process of being reserved by another writer and
  734. * must be considered reserved.
  735. */
  736. if (DESC_ID(atomic_long_read(&desc.state_var)) ==
  737. DESC_ID_PREV_WRAP(desc_ring, tail_id)) {
  738. return false;
  739. }
  740. /*
  741. * The ID has changed. Another writer must have pushed the
  742. * tail and recycled the descriptor already. Success is
  743. * returned because the caller is only interested in the
  744. * specified tail being pushed, which it was.
  745. */
  746. return true;
  747. case desc_reserved:
  748. case desc_committed:
  749. return false;
  750. case desc_finalized:
  751. desc_make_reusable(desc_ring, tail_id);
  752. break;
  753. case desc_reusable:
  754. break;
  755. }
  756. /*
  757. * Data blocks must be invalidated before their associated
  758. * descriptor can be made available for recycling. Invalidating
  759. * them later is not possible because there is no way to trust
  760. * data blocks once their associated descriptor is gone.
  761. */
  762. if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next))
  763. return false;
  764. /*
  765. * Check the next descriptor after @tail_id before pushing the tail
  766. * to it because the tail must always be in a finalized or reusable
  767. * state. The implementation of prb_first_seq() relies on this.
  768. *
  769. * A successful read implies that the next descriptor is less than or
  770. * equal to @head_id so there is no risk of pushing the tail past the
  771. * head.
  772. */
  773. d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc,
  774. NULL, NULL); /* LMM(desc_push_tail:A) */
  775. if (d_state == desc_finalized || d_state == desc_reusable) {
  776. /*
  777. * Guarantee any descriptor states that have transitioned to
  778. * reusable are stored before pushing the tail ID. This allows
  779. * verifying the recycled descriptor state. A full memory
  780. * barrier is needed since other CPUs may have made the
  781. * descriptor states reusable. This pairs with desc_reserve:D.
  782. */
  783. atomic_long_cmpxchg(&desc_ring->tail_id, tail_id,
  784. DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */
  785. } else {
  786. /*
  787. * Guarantee the last state load from desc_read() is before
  788. * reloading @tail_id in order to see a new tail ID in the
  789. * case that the descriptor has been recycled. This pairs
  790. * with desc_reserve:D.
  791. *
  792. * Memory barrier involvement:
  793. *
  794. * If desc_push_tail:A reads from desc_reserve:F, then
  795. * desc_push_tail:D reads from desc_push_tail:B.
  796. *
  797. * Relies on:
  798. *
  799. * MB from desc_push_tail:B to desc_reserve:F
  800. * matching
  801. * RMB from desc_push_tail:A to desc_push_tail:D
  802. *
  803. * Note: desc_push_tail:B and desc_reserve:F can be different
  804. * CPUs. However, the desc_reserve:F CPU (which performs
  805. * the full memory barrier) must have previously seen
  806. * desc_push_tail:B.
  807. */
  808. smp_rmb(); /* LMM(desc_push_tail:C) */
  809. /*
  810. * Re-check the tail ID. The descriptor following @tail_id is
  811. * not in an allowed tail state. But if the tail has since
  812. * been moved by another CPU, then it does not matter.
  813. */
  814. if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */
  815. return false;
  816. }
  817. return true;
  818. }
  819. /* Reserve a new descriptor, invalidating the oldest if necessary. */
  820. static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
  821. {
  822. struct prb_desc_ring *desc_ring = &rb->desc_ring;
  823. unsigned long prev_state_val;
  824. unsigned long id_prev_wrap;
  825. struct prb_desc *desc;
  826. unsigned long head_id;
  827. unsigned long id;
  828. head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */
  829. do {
  830. id = DESC_ID(head_id + 1);
  831. id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id);
  832. /*
  833. * Guarantee the head ID is read before reading the tail ID.
  834. * Since the tail ID is updated before the head ID, this
  835. * guarantees that @id_prev_wrap is never ahead of the tail
  836. * ID. This pairs with desc_reserve:D.
  837. *
  838. * Memory barrier involvement:
  839. *
  840. * If desc_reserve:A reads from desc_reserve:D, then
  841. * desc_reserve:C reads from desc_push_tail:B.
  842. *
  843. * Relies on:
  844. *
  845. * MB from desc_push_tail:B to desc_reserve:D
  846. * matching
  847. * RMB from desc_reserve:A to desc_reserve:C
  848. *
  849. * Note: desc_push_tail:B and desc_reserve:D can be different
  850. * CPUs. However, the desc_reserve:D CPU (which performs
  851. * the full memory barrier) must have previously seen
  852. * desc_push_tail:B.
  853. */
  854. smp_rmb(); /* LMM(desc_reserve:B) */
  855. if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id
  856. )) { /* LMM(desc_reserve:C) */
  857. /*
  858. * Make space for the new descriptor by
  859. * advancing the tail.
  860. */
  861. if (!desc_push_tail(rb, id_prev_wrap))
  862. return false;
  863. }
  864. /*
  865. * 1. Guarantee the tail ID is read before validating the
  866. * recycled descriptor state. A read memory barrier is
  867. * sufficient for this. This pairs with desc_push_tail:B.
  868. *
  869. * Memory barrier involvement:
  870. *
  871. * If desc_reserve:C reads from desc_push_tail:B, then
  872. * desc_reserve:E reads from desc_make_reusable:A.
  873. *
  874. * Relies on:
  875. *
  876. * MB from desc_make_reusable:A to desc_push_tail:B
  877. * matching
  878. * RMB from desc_reserve:C to desc_reserve:E
  879. *
  880. * Note: desc_make_reusable:A and desc_push_tail:B can be
  881. * different CPUs. However, the desc_push_tail:B CPU
  882. * (which performs the full memory barrier) must have
  883. * previously seen desc_make_reusable:A.
  884. *
  885. * 2. Guarantee the tail ID is stored before storing the head
  886. * ID. This pairs with desc_reserve:B.
  887. *
  888. * 3. Guarantee any data ring tail changes are stored before
  889. * recycling the descriptor. Data ring tail changes can
  890. * happen via desc_push_tail()->data_push_tail(). A full
  891. * memory barrier is needed since another CPU may have
  892. * pushed the data ring tails. This pairs with
  893. * data_push_tail:B.
  894. *
  895. * 4. Guarantee a new tail ID is stored before recycling the
  896. * descriptor. A full memory barrier is needed since
  897. * another CPU may have pushed the tail ID. This pairs
  898. * with desc_push_tail:C and this also pairs with
  899. * prb_first_seq:C.
  900. *
  901. * 5. Guarantee the head ID is stored before trying to
  902. * finalize the previous descriptor. This pairs with
  903. * _prb_commit:B.
  904. */
  905. } while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id,
  906. id)); /* LMM(desc_reserve:D) */
  907. desc = to_desc(desc_ring, id);
  908. /*
  909. * If the descriptor has been recycled, verify the old state val.
  910. * See "ABA Issues" about why this verification is performed.
  911. */
  912. prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */
  913. if (prev_state_val &&
  914. get_desc_state(id_prev_wrap, prev_state_val) != desc_reusable) {
  915. WARN_ON_ONCE(1);
  916. return false;
  917. }
  918. /*
  919. * Assign the descriptor a new ID and set its state to reserved.
  920. * See "ABA Issues" about why cmpxchg() instead of set() is used.
  921. *
  922. * Guarantee the new descriptor ID and state is stored before making
  923. * any other changes. A write memory barrier is sufficient for this.
  924. * This pairs with desc_read:D.
  925. */
  926. if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val,
  927. DESC_SV(id, desc_reserved))) { /* LMM(desc_reserve:F) */
  928. WARN_ON_ONCE(1);
  929. return false;
  930. }
  931. /* Now data in @desc can be modified: LMM(desc_reserve:G) */
  932. *id_out = id;
  933. return true;
  934. }
  935. /* Determine the end of a data block. */
  936. static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
  937. unsigned long lpos, unsigned int size)
  938. {
  939. unsigned long begin_lpos;
  940. unsigned long next_lpos;
  941. begin_lpos = lpos;
  942. next_lpos = lpos + size;
  943. /* First check if the data block does not wrap. */
  944. if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos))
  945. return next_lpos;
  946. /* Wrapping data blocks store their data at the beginning. */
  947. return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size);
  948. }
  949. /*
  950. * Allocate a new data block, invalidating the oldest data block(s)
  951. * if necessary. This function also associates the data block with
  952. * a specified descriptor.
  953. */
  954. static char *data_alloc(struct printk_ringbuffer *rb,
  955. struct prb_data_ring *data_ring, unsigned int size,
  956. struct prb_data_blk_lpos *blk_lpos, unsigned long id)
  957. {
  958. struct prb_data_block *blk;
  959. unsigned long begin_lpos;
  960. unsigned long next_lpos;
  961. if (size == 0) {
  962. /* Specify a data-less block. */
  963. blk_lpos->begin = NO_LPOS;
  964. blk_lpos->next = NO_LPOS;
  965. return NULL;
  966. }
  967. size = to_blk_size(size);
  968. begin_lpos = atomic_long_read(&data_ring->head_lpos);
  969. do {
  970. next_lpos = get_next_lpos(data_ring, begin_lpos, size);
  971. if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring))) {
  972. /* Failed to allocate, specify a data-less block. */
  973. blk_lpos->begin = FAILED_LPOS;
  974. blk_lpos->next = FAILED_LPOS;
  975. return NULL;
  976. }
  977. /*
  978. * 1. Guarantee any descriptor states that have transitioned
  979. * to reusable are stored before modifying the newly
  980. * allocated data area. A full memory barrier is needed
  981. * since other CPUs may have made the descriptor states
  982. * reusable. See data_push_tail:A about why the reusable
  983. * states are visible. This pairs with desc_read:D.
  984. *
  985. * 2. Guarantee any updated tail lpos is stored before
  986. * modifying the newly allocated data area. Another CPU may
  987. * be in data_make_reusable() and is reading a block ID
  988. * from this area. data_make_reusable() can handle reading
  989. * a garbage block ID value, but then it must be able to
  990. * load a new tail lpos. A full memory barrier is needed
  991. * since other CPUs may have updated the tail lpos. This
  992. * pairs with data_push_tail:B.
  993. */
  994. } while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos,
  995. next_lpos)); /* LMM(data_alloc:A) */
  996. blk = to_block(data_ring, begin_lpos);
  997. blk->id = id; /* LMM(data_alloc:B) */
  998. if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) {
  999. /* Wrapping data blocks store their data at the beginning. */
  1000. blk = to_block(data_ring, 0);
  1001. /*
  1002. * Store the ID on the wrapped block for consistency.
  1003. * The printk_ringbuffer does not actually use it.
  1004. */
  1005. blk->id = id;
  1006. }
  1007. blk_lpos->begin = begin_lpos;
  1008. blk_lpos->next = next_lpos;
  1009. return &blk->data[0];
  1010. }
  1011. /*
  1012. * Try to resize an existing data block associated with the descriptor
  1013. * specified by @id. If the resized data block should become wrapped, it
  1014. * copies the old data to the new data block. If @size yields a data block
  1015. * with the same or less size, the data block is left as is.
  1016. *
  1017. * Fail if this is not the last allocated data block or if there is not
  1018. * enough space or it is not possible make enough space.
  1019. *
  1020. * Return a pointer to the beginning of the entire data buffer or NULL on
  1021. * failure.
  1022. */
  1023. static char *data_realloc(struct printk_ringbuffer *rb,
  1024. struct prb_data_ring *data_ring, unsigned int size,
  1025. struct prb_data_blk_lpos *blk_lpos, unsigned long id)
  1026. {
  1027. struct prb_data_block *blk;
  1028. unsigned long head_lpos;
  1029. unsigned long next_lpos;
  1030. bool wrapped;
  1031. /* Reallocation only works if @blk_lpos is the newest data block. */
  1032. head_lpos = atomic_long_read(&data_ring->head_lpos);
  1033. if (head_lpos != blk_lpos->next)
  1034. return NULL;
  1035. /* Keep track if @blk_lpos was a wrapping data block. */
  1036. wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next));
  1037. size = to_blk_size(size);
  1038. next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size);
  1039. /* If the data block does not increase, there is nothing to do. */
  1040. if (head_lpos - next_lpos < DATA_SIZE(data_ring)) {
  1041. if (wrapped)
  1042. blk = to_block(data_ring, 0);
  1043. else
  1044. blk = to_block(data_ring, blk_lpos->begin);
  1045. return &blk->data[0];
  1046. }
  1047. if (!data_push_tail(rb, data_ring, next_lpos - DATA_SIZE(data_ring)))
  1048. return NULL;
  1049. /* The memory barrier involvement is the same as data_alloc:A. */
  1050. if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos,
  1051. next_lpos)) { /* LMM(data_realloc:A) */
  1052. return NULL;
  1053. }
  1054. blk = to_block(data_ring, blk_lpos->begin);
  1055. if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) {
  1056. struct prb_data_block *old_blk = blk;
  1057. /* Wrapping data blocks store their data at the beginning. */
  1058. blk = to_block(data_ring, 0);
  1059. /*
  1060. * Store the ID on the wrapped block for consistency.
  1061. * The printk_ringbuffer does not actually use it.
  1062. */
  1063. blk->id = id;
  1064. if (!wrapped) {
  1065. /*
  1066. * Since the allocated space is now in the newly
  1067. * created wrapping data block, copy the content
  1068. * from the old data block.
  1069. */
  1070. memcpy(&blk->data[0], &old_blk->data[0],
  1071. (blk_lpos->next - blk_lpos->begin) - sizeof(blk->id));
  1072. }
  1073. }
  1074. blk_lpos->next = next_lpos;
  1075. return &blk->data[0];
  1076. }
  1077. /* Return the number of bytes used by a data block. */
  1078. static unsigned int space_used(struct prb_data_ring *data_ring,
  1079. struct prb_data_blk_lpos *blk_lpos)
  1080. {
  1081. /* Data-less blocks take no space. */
  1082. if (BLK_DATALESS(blk_lpos))
  1083. return 0;
  1084. if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
  1085. /* Data block does not wrap. */
  1086. return (DATA_INDEX(data_ring, blk_lpos->next) -
  1087. DATA_INDEX(data_ring, blk_lpos->begin));
  1088. }
  1089. /*
  1090. * For wrapping data blocks, the trailing (wasted) space is
  1091. * also counted.
  1092. */
  1093. return (DATA_INDEX(data_ring, blk_lpos->next) +
  1094. DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin));
  1095. }
  1096. /*
  1097. * Given @blk_lpos, return a pointer to the writer data from the data block
  1098. * and calculate the size of the data part. A NULL pointer is returned if
  1099. * @blk_lpos specifies values that could never be legal.
  1100. *
  1101. * This function (used by readers) performs strict validation on the lpos
  1102. * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
  1103. * triggered if an internal error is detected.
  1104. */
  1105. static const char *get_data(struct prb_data_ring *data_ring,
  1106. struct prb_data_blk_lpos *blk_lpos,
  1107. unsigned int *data_size)
  1108. {
  1109. struct prb_data_block *db;
  1110. /* Data-less data block description. */
  1111. if (BLK_DATALESS(blk_lpos)) {
  1112. if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
  1113. *data_size = 0;
  1114. return "";
  1115. }
  1116. return NULL;
  1117. }
  1118. /* Regular data block: @begin less than @next and in same wrap. */
  1119. if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
  1120. blk_lpos->begin < blk_lpos->next) {
  1121. db = to_block(data_ring, blk_lpos->begin);
  1122. *data_size = blk_lpos->next - blk_lpos->begin;
  1123. /* Wrapping data block: @begin is one wrap behind @next. */
  1124. } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
  1125. DATA_WRAPS(data_ring, blk_lpos->next)) {
  1126. db = to_block(data_ring, 0);
  1127. *data_size = DATA_INDEX(data_ring, blk_lpos->next);
  1128. /* Illegal block description. */
  1129. } else {
  1130. WARN_ON_ONCE(1);
  1131. return NULL;
  1132. }
  1133. /* A valid data block will always be aligned to the ID size. */
  1134. if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
  1135. WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
  1136. return NULL;
  1137. }
  1138. /* A valid data block will always have at least an ID. */
  1139. if (WARN_ON_ONCE(*data_size < sizeof(db->id)))
  1140. return NULL;
  1141. /* Subtract block ID space from size to reflect data size. */
  1142. *data_size -= sizeof(db->id);
  1143. return &db->data[0];
  1144. }
  1145. /*
  1146. * Attempt to transition the newest descriptor from committed back to reserved
  1147. * so that the record can be modified by a writer again. This is only possible
  1148. * if the descriptor is not yet finalized and the provided @caller_id matches.
  1149. */
  1150. static struct prb_desc *desc_reopen_last(struct prb_desc_ring *desc_ring,
  1151. u32 caller_id, unsigned long *id_out)
  1152. {
  1153. unsigned long prev_state_val;
  1154. enum desc_state d_state;
  1155. struct prb_desc desc;
  1156. struct prb_desc *d;
  1157. unsigned long id;
  1158. u32 cid;
  1159. id = atomic_long_read(&desc_ring->head_id);
  1160. /*
  1161. * To reduce unnecessarily reopening, first check if the descriptor
  1162. * state and caller ID are correct.
  1163. */
  1164. d_state = desc_read(desc_ring, id, &desc, NULL, &cid);
  1165. if (d_state != desc_committed || cid != caller_id)
  1166. return NULL;
  1167. d = to_desc(desc_ring, id);
  1168. prev_state_val = DESC_SV(id, desc_committed);
  1169. /*
  1170. * Guarantee the reserved state is stored before reading any
  1171. * record data. A full memory barrier is needed because @state_var
  1172. * modification is followed by reading. This pairs with _prb_commit:B.
  1173. *
  1174. * Memory barrier involvement:
  1175. *
  1176. * If desc_reopen_last:A reads from _prb_commit:B, then
  1177. * prb_reserve_in_last:A reads from _prb_commit:A.
  1178. *
  1179. * Relies on:
  1180. *
  1181. * WMB from _prb_commit:A to _prb_commit:B
  1182. * matching
  1183. * MB If desc_reopen_last:A to prb_reserve_in_last:A
  1184. */
  1185. if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
  1186. DESC_SV(id, desc_reserved))) { /* LMM(desc_reopen_last:A) */
  1187. return NULL;
  1188. }
  1189. *id_out = id;
  1190. return d;
  1191. }
  1192. /**
  1193. * prb_reserve_in_last() - Re-reserve and extend the space in the ringbuffer
  1194. * used by the newest record.
  1195. *
  1196. * @e: The entry structure to setup.
  1197. * @rb: The ringbuffer to re-reserve and extend data in.
  1198. * @r: The record structure to allocate buffers for.
  1199. * @caller_id: The caller ID of the caller (reserving writer).
  1200. * @max_size: Fail if the extended size would be greater than this.
  1201. *
  1202. * This is the public function available to writers to re-reserve and extend
  1203. * data.
  1204. *
  1205. * The writer specifies the text size to extend (not the new total size) by
  1206. * setting the @text_buf_size field of @r. To ensure proper initialization
  1207. * of @r, prb_rec_init_wr() should be used.
  1208. *
  1209. * This function will fail if @caller_id does not match the caller ID of the
  1210. * newest record. In that case the caller must reserve new data using
  1211. * prb_reserve().
  1212. *
  1213. * Context: Any context. Disables local interrupts on success.
  1214. * Return: true if text data could be extended, otherwise false.
  1215. *
  1216. * On success:
  1217. *
  1218. * - @r->text_buf points to the beginning of the entire text buffer.
  1219. *
  1220. * - @r->text_buf_size is set to the new total size of the buffer.
  1221. *
  1222. * - @r->info is not touched so that @r->info->text_len could be used
  1223. * to append the text.
  1224. *
  1225. * - prb_record_text_space() can be used on @e to query the new
  1226. * actually used space.
  1227. *
  1228. * Important: All @r->info fields will already be set with the current values
  1229. * for the record. I.e. @r->info->text_len will be less than
  1230. * @text_buf_size. Writers can use @r->info->text_len to know
  1231. * where concatenation begins and writers should update
  1232. * @r->info->text_len after concatenating.
  1233. */
  1234. bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
  1235. struct printk_record *r, u32 caller_id, unsigned int max_size)
  1236. {
  1237. struct prb_desc_ring *desc_ring = &rb->desc_ring;
  1238. struct printk_info *info;
  1239. unsigned int data_size;
  1240. struct prb_desc *d;
  1241. unsigned long id;
  1242. local_irq_save(e->irqflags);
  1243. /* Transition the newest descriptor back to the reserved state. */
  1244. d = desc_reopen_last(desc_ring, caller_id, &id);
  1245. if (!d) {
  1246. local_irq_restore(e->irqflags);
  1247. goto fail_reopen;
  1248. }
  1249. /* Now the writer has exclusive access: LMM(prb_reserve_in_last:A) */
  1250. info = to_info(desc_ring, id);
  1251. /*
  1252. * Set the @e fields here so that prb_commit() can be used if
  1253. * anything fails from now on.
  1254. */
  1255. e->rb = rb;
  1256. e->id = id;
  1257. /*
  1258. * desc_reopen_last() checked the caller_id, but there was no
  1259. * exclusive access at that point. The descriptor may have
  1260. * changed since then.
  1261. */
  1262. if (caller_id != info->caller_id)
  1263. goto fail;
  1264. if (BLK_DATALESS(&d->text_blk_lpos)) {
  1265. if (WARN_ON_ONCE(info->text_len != 0)) {
  1266. pr_warn_once("wrong text_len value (%hu, expecting 0)\n",
  1267. info->text_len);
  1268. info->text_len = 0;
  1269. }
  1270. if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
  1271. goto fail;
  1272. if (r->text_buf_size > max_size)
  1273. goto fail;
  1274. r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
  1275. &d->text_blk_lpos, id);
  1276. } else {
  1277. if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size))
  1278. goto fail;
  1279. /*
  1280. * Increase the buffer size to include the original size. If
  1281. * the meta data (@text_len) is not sane, use the full data
  1282. * block size.
  1283. */
  1284. if (WARN_ON_ONCE(info->text_len > data_size)) {
  1285. pr_warn_once("wrong text_len value (%hu, expecting <=%u)\n",
  1286. info->text_len, data_size);
  1287. info->text_len = data_size;
  1288. }
  1289. r->text_buf_size += info->text_len;
  1290. if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
  1291. goto fail;
  1292. if (r->text_buf_size > max_size)
  1293. goto fail;
  1294. r->text_buf = data_realloc(rb, &rb->text_data_ring, r->text_buf_size,
  1295. &d->text_blk_lpos, id);
  1296. }
  1297. if (r->text_buf_size && !r->text_buf)
  1298. goto fail;
  1299. r->info = info;
  1300. e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
  1301. return true;
  1302. fail:
  1303. prb_commit(e);
  1304. /* prb_commit() re-enabled interrupts. */
  1305. fail_reopen:
  1306. /* Make it clear to the caller that the re-reserve failed. */
  1307. memset(r, 0, sizeof(*r));
  1308. return false;
  1309. }
  1310. /*
  1311. * Attempt to finalize a specified descriptor. If this fails, the descriptor
  1312. * is either already final or it will finalize itself when the writer commits.
  1313. */
  1314. static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id)
  1315. {
  1316. unsigned long prev_state_val = DESC_SV(id, desc_committed);
  1317. struct prb_desc *d = to_desc(desc_ring, id);
  1318. atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val,
  1319. DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */
  1320. /* Best effort to remember the last finalized @id. */
  1321. atomic_long_set(&desc_ring->last_finalized_id, id);
  1322. }
  1323. /**
  1324. * prb_reserve() - Reserve space in the ringbuffer.
  1325. *
  1326. * @e: The entry structure to setup.
  1327. * @rb: The ringbuffer to reserve data in.
  1328. * @r: The record structure to allocate buffers for.
  1329. *
  1330. * This is the public function available to writers to reserve data.
  1331. *
  1332. * The writer specifies the text size to reserve by setting the
  1333. * @text_buf_size field of @r. To ensure proper initialization of @r,
  1334. * prb_rec_init_wr() should be used.
  1335. *
  1336. * Context: Any context. Disables local interrupts on success.
  1337. * Return: true if at least text data could be allocated, otherwise false.
  1338. *
  1339. * On success, the fields @info and @text_buf of @r will be set by this
  1340. * function and should be filled in by the writer before committing. Also
  1341. * on success, prb_record_text_space() can be used on @e to query the actual
  1342. * space used for the text data block.
  1343. *
  1344. * Important: @info->text_len needs to be set correctly by the writer in
  1345. * order for data to be readable and/or extended. Its value
  1346. * is initialized to 0.
  1347. */
  1348. bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
  1349. struct printk_record *r)
  1350. {
  1351. struct prb_desc_ring *desc_ring = &rb->desc_ring;
  1352. struct printk_info *info;
  1353. struct prb_desc *d;
  1354. unsigned long id;
  1355. u64 seq;
  1356. if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
  1357. goto fail;
  1358. /*
  1359. * Descriptors in the reserved state act as blockers to all further
  1360. * reservations once the desc_ring has fully wrapped. Disable
  1361. * interrupts during the reserve/commit window in order to minimize
  1362. * the likelihood of this happening.
  1363. */
  1364. local_irq_save(e->irqflags);
  1365. if (!desc_reserve(rb, &id)) {
  1366. /* Descriptor reservation failures are tracked. */
  1367. atomic_long_inc(&rb->fail);
  1368. local_irq_restore(e->irqflags);
  1369. goto fail;
  1370. }
  1371. d = to_desc(desc_ring, id);
  1372. info = to_info(desc_ring, id);
  1373. /*
  1374. * All @info fields (except @seq) are cleared and must be filled in
  1375. * by the writer. Save @seq before clearing because it is used to
  1376. * determine the new sequence number.
  1377. */
  1378. seq = info->seq;
  1379. memset(info, 0, sizeof(*info));
  1380. /*
  1381. * Set the @e fields here so that prb_commit() can be used if
  1382. * text data allocation fails.
  1383. */
  1384. e->rb = rb;
  1385. e->id = id;
  1386. /*
  1387. * Initialize the sequence number if it has "never been set".
  1388. * Otherwise just increment it by a full wrap.
  1389. *
  1390. * @seq is considered "never been set" if it has a value of 0,
  1391. * _except_ for @infos[0], which was specially setup by the ringbuffer
  1392. * initializer and therefore is always considered as set.
  1393. *
  1394. * See the "Bootstrap" comment block in printk_ringbuffer.h for
  1395. * details about how the initializer bootstraps the descriptors.
  1396. */
  1397. if (seq == 0 && DESC_INDEX(desc_ring, id) != 0)
  1398. info->seq = DESC_INDEX(desc_ring, id);
  1399. else
  1400. info->seq = seq + DESCS_COUNT(desc_ring);
  1401. /*
  1402. * New data is about to be reserved. Once that happens, previous
  1403. * descriptors are no longer able to be extended. Finalize the
  1404. * previous descriptor now so that it can be made available to
  1405. * readers. (For seq==0 there is no previous descriptor.)
  1406. */
  1407. if (info->seq > 0)
  1408. desc_make_final(desc_ring, DESC_ID(id - 1));
  1409. r->text_buf = data_alloc(rb, &rb->text_data_ring, r->text_buf_size,
  1410. &d->text_blk_lpos, id);
  1411. /* If text data allocation fails, a data-less record is committed. */
  1412. if (r->text_buf_size && !r->text_buf) {
  1413. prb_commit(e);
  1414. /* prb_commit() re-enabled interrupts. */
  1415. goto fail;
  1416. }
  1417. r->info = info;
  1418. /* Record full text space used by record. */
  1419. e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
  1420. return true;
  1421. fail:
  1422. /* Make it clear to the caller that the reserve failed. */
  1423. memset(r, 0, sizeof(*r));
  1424. return false;
  1425. }
  1426. /* Commit the data (possibly finalizing it) and restore interrupts. */
  1427. static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val)
  1428. {
  1429. struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
  1430. struct prb_desc *d = to_desc(desc_ring, e->id);
  1431. unsigned long prev_state_val = DESC_SV(e->id, desc_reserved);
  1432. /* Now the writer has finished all writing: LMM(_prb_commit:A) */
  1433. /*
  1434. * Set the descriptor as committed. See "ABA Issues" about why
  1435. * cmpxchg() instead of set() is used.
  1436. *
  1437. * 1 Guarantee all record data is stored before the descriptor state
  1438. * is stored as committed. A write memory barrier is sufficient
  1439. * for this. This pairs with desc_read:B and desc_reopen_last:A.
  1440. *
  1441. * 2. Guarantee the descriptor state is stored as committed before
  1442. * re-checking the head ID in order to possibly finalize this
  1443. * descriptor. This pairs with desc_reserve:D.
  1444. *
  1445. * Memory barrier involvement:
  1446. *
  1447. * If prb_commit:A reads from desc_reserve:D, then
  1448. * desc_make_final:A reads from _prb_commit:B.
  1449. *
  1450. * Relies on:
  1451. *
  1452. * MB _prb_commit:B to prb_commit:A
  1453. * matching
  1454. * MB desc_reserve:D to desc_make_final:A
  1455. */
  1456. if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
  1457. DESC_SV(e->id, state_val))) { /* LMM(_prb_commit:B) */
  1458. WARN_ON_ONCE(1);
  1459. }
  1460. /* Restore interrupts, the reserve/commit window is finished. */
  1461. local_irq_restore(e->irqflags);
  1462. }
  1463. /**
  1464. * prb_commit() - Commit (previously reserved) data to the ringbuffer.
  1465. *
  1466. * @e: The entry containing the reserved data information.
  1467. *
  1468. * This is the public function available to writers to commit data.
  1469. *
  1470. * Note that the data is not yet available to readers until it is finalized.
  1471. * Finalizing happens automatically when space for the next record is
  1472. * reserved.
  1473. *
  1474. * See prb_final_commit() for a version of this function that finalizes
  1475. * immediately.
  1476. *
  1477. * Context: Any context. Enables local interrupts.
  1478. */
  1479. void prb_commit(struct prb_reserved_entry *e)
  1480. {
  1481. struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
  1482. unsigned long head_id;
  1483. _prb_commit(e, desc_committed);
  1484. /*
  1485. * If this descriptor is no longer the head (i.e. a new record has
  1486. * been allocated), extending the data for this record is no longer
  1487. * allowed and therefore it must be finalized.
  1488. */
  1489. head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */
  1490. if (head_id != e->id)
  1491. desc_make_final(desc_ring, e->id);
  1492. }
  1493. /**
  1494. * prb_final_commit() - Commit and finalize (previously reserved) data to
  1495. * the ringbuffer.
  1496. *
  1497. * @e: The entry containing the reserved data information.
  1498. *
  1499. * This is the public function available to writers to commit+finalize data.
  1500. *
  1501. * By finalizing, the data is made immediately available to readers.
  1502. *
  1503. * This function should only be used if there are no intentions of extending
  1504. * this data using prb_reserve_in_last().
  1505. *
  1506. * Context: Any context. Enables local interrupts.
  1507. */
  1508. void prb_final_commit(struct prb_reserved_entry *e)
  1509. {
  1510. struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
  1511. _prb_commit(e, desc_finalized);
  1512. /* Best effort to remember the last finalized @id. */
  1513. atomic_long_set(&desc_ring->last_finalized_id, e->id);
  1514. }
  1515. /*
  1516. * Count the number of lines in provided text. All text has at least 1 line
  1517. * (even if @text_size is 0). Each '\n' processed is counted as an additional
  1518. * line.
  1519. */
  1520. static unsigned int count_lines(const char *text, unsigned int text_size)
  1521. {
  1522. unsigned int next_size = text_size;
  1523. unsigned int line_count = 1;
  1524. const char *next = text;
  1525. while (next_size) {
  1526. next = memchr(next, '\n', next_size);
  1527. if (!next)
  1528. break;
  1529. line_count++;
  1530. next++;
  1531. next_size = text_size - (next - text);
  1532. }
  1533. return line_count;
  1534. }
  1535. /*
  1536. * Given @blk_lpos, copy an expected @len of data into the provided buffer.
  1537. * If @line_count is provided, count the number of lines in the data.
  1538. *
  1539. * This function (used by readers) performs strict validation on the data
  1540. * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
  1541. * triggered if an internal error is detected.
  1542. */
  1543. static bool copy_data(struct prb_data_ring *data_ring,
  1544. struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf,
  1545. unsigned int buf_size, unsigned int *line_count)
  1546. {
  1547. unsigned int data_size;
  1548. const char *data;
  1549. /* Caller might not want any data. */
  1550. if ((!buf || !buf_size) && !line_count)
  1551. return true;
  1552. data = get_data(data_ring, blk_lpos, &data_size);
  1553. if (!data)
  1554. return false;
  1555. /*
  1556. * Actual cannot be less than expected. It can be more than expected
  1557. * because of the trailing alignment padding.
  1558. *
  1559. * Note that invalid @len values can occur because the caller loads
  1560. * the value during an allowed data race.
  1561. */
  1562. if (data_size < (unsigned int)len)
  1563. return false;
  1564. /* Caller interested in the line count? */
  1565. if (line_count)
  1566. *line_count = count_lines(data, len);
  1567. /* Caller interested in the data content? */
  1568. if (!buf || !buf_size)
  1569. return true;
  1570. data_size = min_t(u16, buf_size, len);
  1571. memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
  1572. return true;
  1573. }
  1574. /*
  1575. * This is an extended version of desc_read(). It gets a copy of a specified
  1576. * descriptor. However, it also verifies that the record is finalized and has
  1577. * the sequence number @seq. On success, 0 is returned.
  1578. *
  1579. * Error return values:
  1580. * -EINVAL: A finalized record with sequence number @seq does not exist.
  1581. * -ENOENT: A finalized record with sequence number @seq exists, but its data
  1582. * is not available. This is a valid record, so readers should
  1583. * continue with the next record.
  1584. */
  1585. static int desc_read_finalized_seq(struct prb_desc_ring *desc_ring,
  1586. unsigned long id, u64 seq,
  1587. struct prb_desc *desc_out)
  1588. {
  1589. struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos;
  1590. enum desc_state d_state;
  1591. u64 s;
  1592. d_state = desc_read(desc_ring, id, desc_out, &s, NULL);
  1593. /*
  1594. * An unexpected @id (desc_miss) or @seq mismatch means the record
  1595. * does not exist. A descriptor in the reserved or committed state
  1596. * means the record does not yet exist for the reader.
  1597. */
  1598. if (d_state == desc_miss ||
  1599. d_state == desc_reserved ||
  1600. d_state == desc_committed ||
  1601. s != seq) {
  1602. return -EINVAL;
  1603. }
  1604. /*
  1605. * A descriptor in the reusable state may no longer have its data
  1606. * available; report it as existing but with lost data. Or the record
  1607. * may actually be a record with lost data.
  1608. */
  1609. if (d_state == desc_reusable ||
  1610. (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) {
  1611. return -ENOENT;
  1612. }
  1613. return 0;
  1614. }
  1615. /*
  1616. * Copy the ringbuffer data from the record with @seq to the provided
  1617. * @r buffer. On success, 0 is returned.
  1618. *
  1619. * See desc_read_finalized_seq() for error return values.
  1620. */
  1621. static int prb_read(struct printk_ringbuffer *rb, u64 seq,
  1622. struct printk_record *r, unsigned int *line_count)
  1623. {
  1624. struct prb_desc_ring *desc_ring = &rb->desc_ring;
  1625. struct printk_info *info = to_info(desc_ring, seq);
  1626. struct prb_desc *rdesc = to_desc(desc_ring, seq);
  1627. atomic_long_t *state_var = &rdesc->state_var;
  1628. struct prb_desc desc;
  1629. unsigned long id;
  1630. int err;
  1631. /* Extract the ID, used to specify the descriptor to read. */
  1632. id = DESC_ID(atomic_long_read(state_var));
  1633. /* Get a local copy of the correct descriptor (if available). */
  1634. err = desc_read_finalized_seq(desc_ring, id, seq, &desc);
  1635. /*
  1636. * If @r is NULL, the caller is only interested in the availability
  1637. * of the record.
  1638. */
  1639. if (err || !r)
  1640. return err;
  1641. /* If requested, copy meta data. */
  1642. if (r->info)
  1643. memcpy(r->info, info, sizeof(*(r->info)));
  1644. /* Copy text data. If it fails, this is a data-less record. */
  1645. if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, info->text_len,
  1646. r->text_buf, r->text_buf_size, line_count)) {
  1647. return -ENOENT;
  1648. }
  1649. /* Ensure the record is still finalized and has the same @seq. */
  1650. return desc_read_finalized_seq(desc_ring, id, seq, &desc);
  1651. }
  1652. /* Get the sequence number of the tail descriptor. */
  1653. static u64 prb_first_seq(struct printk_ringbuffer *rb)
  1654. {
  1655. struct prb_desc_ring *desc_ring = &rb->desc_ring;
  1656. enum desc_state d_state;
  1657. struct prb_desc desc;
  1658. unsigned long id;
  1659. u64 seq;
  1660. for (;;) {
  1661. id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */
  1662. d_state = desc_read(desc_ring, id, &desc, &seq, NULL); /* LMM(prb_first_seq:B) */
  1663. /*
  1664. * This loop will not be infinite because the tail is
  1665. * _always_ in the finalized or reusable state.
  1666. */
  1667. if (d_state == desc_finalized || d_state == desc_reusable)
  1668. break;
  1669. /*
  1670. * Guarantee the last state load from desc_read() is before
  1671. * reloading @tail_id in order to see a new tail in the case
  1672. * that the descriptor has been recycled. This pairs with
  1673. * desc_reserve:D.
  1674. *
  1675. * Memory barrier involvement:
  1676. *
  1677. * If prb_first_seq:B reads from desc_reserve:F, then
  1678. * prb_first_seq:A reads from desc_push_tail:B.
  1679. *
  1680. * Relies on:
  1681. *
  1682. * MB from desc_push_tail:B to desc_reserve:F
  1683. * matching
  1684. * RMB prb_first_seq:B to prb_first_seq:A
  1685. */
  1686. smp_rmb(); /* LMM(prb_first_seq:C) */
  1687. }
  1688. return seq;
  1689. }
  1690. /*
  1691. * Non-blocking read of a record. Updates @seq to the last finalized record
  1692. * (which may have no data available).
  1693. *
  1694. * See the description of prb_read_valid() and prb_read_valid_info()
  1695. * for details.
  1696. */
  1697. static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
  1698. struct printk_record *r, unsigned int *line_count)
  1699. {
  1700. u64 tail_seq;
  1701. int err;
  1702. while ((err = prb_read(rb, *seq, r, line_count))) {
  1703. tail_seq = prb_first_seq(rb);
  1704. if (*seq < tail_seq) {
  1705. /*
  1706. * Behind the tail. Catch up and try again. This
  1707. * can happen for -ENOENT and -EINVAL cases.
  1708. */
  1709. *seq = tail_seq;
  1710. } else if (err == -ENOENT) {
  1711. /* Record exists, but no data available. Skip. */
  1712. (*seq)++;
  1713. } else {
  1714. /* Non-existent/non-finalized record. Must stop. */
  1715. return false;
  1716. }
  1717. }
  1718. return true;
  1719. }
  1720. /**
  1721. * prb_read_valid() - Non-blocking read of a requested record or (if gone)
  1722. * the next available record.
  1723. *
  1724. * @rb: The ringbuffer to read from.
  1725. * @seq: The sequence number of the record to read.
  1726. * @r: A record data buffer to store the read record to.
  1727. *
  1728. * This is the public function available to readers to read a record.
  1729. *
  1730. * The reader provides the @info and @text_buf buffers of @r to be
  1731. * filled in. Any of the buffer pointers can be set to NULL if the reader
  1732. * is not interested in that data. To ensure proper initialization of @r,
  1733. * prb_rec_init_rd() should be used.
  1734. *
  1735. * Context: Any context.
  1736. * Return: true if a record was read, otherwise false.
  1737. *
  1738. * On success, the reader must check r->info.seq to see which record was
  1739. * actually read. This allows the reader to detect dropped records.
  1740. *
  1741. * Failure means @seq refers to a not yet written record.
  1742. */
  1743. bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
  1744. struct printk_record *r)
  1745. {
  1746. return _prb_read_valid(rb, &seq, r, NULL);
  1747. }
  1748. /**
  1749. * prb_read_valid_info() - Non-blocking read of meta data for a requested
  1750. * record or (if gone) the next available record.
  1751. *
  1752. * @rb: The ringbuffer to read from.
  1753. * @seq: The sequence number of the record to read.
  1754. * @info: A buffer to store the read record meta data to.
  1755. * @line_count: A buffer to store the number of lines in the record text.
  1756. *
  1757. * This is the public function available to readers to read only the
  1758. * meta data of a record.
  1759. *
  1760. * The reader provides the @info, @line_count buffers to be filled in.
  1761. * Either of the buffer pointers can be set to NULL if the reader is not
  1762. * interested in that data.
  1763. *
  1764. * Context: Any context.
  1765. * Return: true if a record's meta data was read, otherwise false.
  1766. *
  1767. * On success, the reader must check info->seq to see which record meta data
  1768. * was actually read. This allows the reader to detect dropped records.
  1769. *
  1770. * Failure means @seq refers to a not yet written record.
  1771. */
  1772. bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
  1773. struct printk_info *info, unsigned int *line_count)
  1774. {
  1775. struct printk_record r;
  1776. prb_rec_init_rd(&r, info, NULL, 0);
  1777. return _prb_read_valid(rb, &seq, &r, line_count);
  1778. }
  1779. /**
  1780. * prb_first_valid_seq() - Get the sequence number of the oldest available
  1781. * record.
  1782. *
  1783. * @rb: The ringbuffer to get the sequence number from.
  1784. *
  1785. * This is the public function available to readers to see what the
  1786. * first/oldest valid sequence number is.
  1787. *
  1788. * This provides readers a starting point to begin iterating the ringbuffer.
  1789. *
  1790. * Context: Any context.
  1791. * Return: The sequence number of the first/oldest record or, if the
  1792. * ringbuffer is empty, 0 is returned.
  1793. */
  1794. u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
  1795. {
  1796. u64 seq = 0;
  1797. if (!_prb_read_valid(rb, &seq, NULL, NULL))
  1798. return 0;
  1799. return seq;
  1800. }
  1801. /**
  1802. * prb_next_seq() - Get the sequence number after the last available record.
  1803. *
  1804. * @rb: The ringbuffer to get the sequence number from.
  1805. *
  1806. * This is the public function available to readers to see what the next
  1807. * newest sequence number available to readers will be.
  1808. *
  1809. * This provides readers a sequence number to jump to if all currently
  1810. * available records should be skipped.
  1811. *
  1812. * Context: Any context.
  1813. * Return: The sequence number of the next newest (not yet available) record
  1814. * for readers.
  1815. */
  1816. u64 prb_next_seq(struct printk_ringbuffer *rb)
  1817. {
  1818. struct prb_desc_ring *desc_ring = &rb->desc_ring;
  1819. enum desc_state d_state;
  1820. unsigned long id;
  1821. u64 seq;
  1822. /* Check if the cached @id still points to a valid @seq. */
  1823. id = atomic_long_read(&desc_ring->last_finalized_id);
  1824. d_state = desc_read(desc_ring, id, NULL, &seq, NULL);
  1825. if (d_state == desc_finalized || d_state == desc_reusable) {
  1826. /*
  1827. * Begin searching after the last finalized record.
  1828. *
  1829. * On 0, the search must begin at 0 because of hack#2
  1830. * of the bootstrapping phase it is not known if a
  1831. * record at index 0 exists.
  1832. */
  1833. if (seq != 0)
  1834. seq++;
  1835. } else {
  1836. /*
  1837. * The information about the last finalized sequence number
  1838. * has gone. It should happen only when there is a flood of
  1839. * new messages and the ringbuffer is rapidly recycled.
  1840. * Give up and start from the beginning.
  1841. */
  1842. seq = 0;
  1843. }
  1844. /*
  1845. * The information about the last finalized @seq might be inaccurate.
  1846. * Search forward to find the current one.
  1847. */
  1848. while (_prb_read_valid(rb, &seq, NULL, NULL))
  1849. seq++;
  1850. return seq;
  1851. }
  1852. /**
  1853. * prb_init() - Initialize a ringbuffer to use provided external buffers.
  1854. *
  1855. * @rb: The ringbuffer to initialize.
  1856. * @text_buf: The data buffer for text data.
  1857. * @textbits: The size of @text_buf as a power-of-2 value.
  1858. * @descs: The descriptor buffer for ringbuffer records.
  1859. * @descbits: The count of @descs items as a power-of-2 value.
  1860. * @infos: The printk_info buffer for ringbuffer records.
  1861. *
  1862. * This is the public function available to writers to setup a ringbuffer
  1863. * during runtime using provided buffers.
  1864. *
  1865. * This must match the initialization of DEFINE_PRINTKRB().
  1866. *
  1867. * Context: Any context.
  1868. */
  1869. void prb_init(struct printk_ringbuffer *rb,
  1870. char *text_buf, unsigned int textbits,
  1871. struct prb_desc *descs, unsigned int descbits,
  1872. struct printk_info *infos)
  1873. {
  1874. memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0]));
  1875. memset(infos, 0, _DESCS_COUNT(descbits) * sizeof(infos[0]));
  1876. rb->desc_ring.count_bits = descbits;
  1877. rb->desc_ring.descs = descs;
  1878. rb->desc_ring.infos = infos;
  1879. atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
  1880. atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
  1881. atomic_long_set(&rb->desc_ring.last_finalized_id, DESC0_ID(descbits));
  1882. rb->text_data_ring.size_bits = textbits;
  1883. rb->text_data_ring.data = text_buf;
  1884. atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits));
  1885. atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits));
  1886. atomic_long_set(&rb->fail, 0);
  1887. atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits));
  1888. descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS;
  1889. descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS;
  1890. infos[0].seq = -(u64)_DESCS_COUNT(descbits);
  1891. infos[_DESCS_COUNT(descbits) - 1].seq = 0;
  1892. }
  1893. /**
  1894. * prb_record_text_space() - Query the full actual used ringbuffer space for
  1895. * the text data of a reserved entry.
  1896. *
  1897. * @e: The successfully reserved entry to query.
  1898. *
  1899. * This is the public function available to writers to see how much actual
  1900. * space is used in the ringbuffer to store the text data of the specified
  1901. * entry.
  1902. *
  1903. * This function is only valid if @e has been successfully reserved using
  1904. * prb_reserve().
  1905. *
  1906. * Context: Any context.
  1907. * Return: The size in bytes used by the text data of the associated record.
  1908. */
  1909. unsigned int prb_record_text_space(struct prb_reserved_entry *e)
  1910. {
  1911. return e->text_space;
  1912. }