zone.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Provide a pstore intermediate backend, organized into kernel memory
  4. * allocated zones that are then mapped and flushed into a single
  5. * contiguous region on a storage backend of some kind (block, mtd, etc).
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <linux/mount.h>
  12. #include <linux/printk.h>
  13. #include <linux/fs.h>
  14. #include <linux/pstore_zone.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/device.h>
  17. #include <linux/namei.h>
  18. #include <linux/fcntl.h>
  19. #include <linux/uio.h>
  20. #include <linux/writeback.h>
  21. #include "internal.h"
  22. /**
  23. * struct psz_head - header of zone to flush to storage
  24. *
  25. * @sig: signature to indicate header (PSZ_SIG xor PSZONE-type value)
  26. * @datalen: length of data in @data
  27. * @start: offset into @data where the beginning of the stored bytes begin
  28. * @data: zone data.
  29. */
  30. struct psz_buffer {
  31. #define PSZ_SIG (0x43474244) /* DBGC */
  32. uint32_t sig;
  33. atomic_t datalen;
  34. atomic_t start;
  35. uint8_t data[];
  36. };
  37. /**
  38. * struct psz_kmsg_header - kmsg dump-specific header to flush to storage
  39. *
  40. * @magic: magic num for kmsg dump header
  41. * @time: kmsg dump trigger time
  42. * @compressed: whether conpressed
  43. * @counter: kmsg dump counter
  44. * @reason: the kmsg dump reason (e.g. oops, panic, etc)
  45. * @data: pointer to log data
  46. *
  47. * This is a sub-header for a kmsg dump, trailing after &psz_buffer.
  48. */
  49. struct psz_kmsg_header {
  50. #define PSTORE_KMSG_HEADER_MAGIC 0x4dfc3ae5 /* Just a random number */
  51. uint32_t magic;
  52. struct timespec64 time;
  53. bool compressed;
  54. uint32_t counter;
  55. enum kmsg_dump_reason reason;
  56. uint8_t data[];
  57. };
  58. /**
  59. * struct pstore_zone - single stored buffer
  60. *
  61. * @off: zone offset of storage
  62. * @type: front-end type for this zone
  63. * @name: front-end name for this zone
  64. * @buffer: pointer to data buffer managed by this zone
  65. * @oldbuf: pointer to old data buffer
  66. * @buffer_size: bytes in @buffer->data
  67. * @should_recover: whether this zone should recover from storage
  68. * @dirty: whether the data in @buffer dirty
  69. *
  70. * zone structure in memory.
  71. */
  72. struct pstore_zone {
  73. loff_t off;
  74. const char *name;
  75. enum pstore_type_id type;
  76. struct psz_buffer *buffer;
  77. struct psz_buffer *oldbuf;
  78. size_t buffer_size;
  79. bool should_recover;
  80. atomic_t dirty;
  81. };
  82. /**
  83. * struct psz_context - all about running state of pstore/zone
  84. *
  85. * @kpszs: kmsg dump storage zones
  86. * @ppsz: pmsg storage zone
  87. * @cpsz: console storage zone
  88. * @fpszs: ftrace storage zones
  89. * @kmsg_max_cnt: max count of @kpszs
  90. * @kmsg_read_cnt: counter of total read kmsg dumps
  91. * @kmsg_write_cnt: counter of total kmsg dump writes
  92. * @pmsg_read_cnt: counter of total read pmsg zone
  93. * @console_read_cnt: counter of total read console zone
  94. * @ftrace_max_cnt: max count of @fpszs
  95. * @ftrace_read_cnt: counter of max read ftrace zone
  96. * @oops_counter: counter of oops dumps
  97. * @panic_counter: counter of panic dumps
  98. * @recovered: whether finished recovering data from storage
  99. * @on_panic: whether panic is happening
  100. * @pstore_zone_info_lock: lock to @pstore_zone_info
  101. * @pstore_zone_info: information from backend
  102. * @pstore: structure for pstore
  103. */
  104. struct psz_context {
  105. struct pstore_zone **kpszs;
  106. struct pstore_zone *ppsz;
  107. struct pstore_zone *cpsz;
  108. struct pstore_zone **fpszs;
  109. unsigned int kmsg_max_cnt;
  110. unsigned int kmsg_read_cnt;
  111. unsigned int kmsg_write_cnt;
  112. unsigned int pmsg_read_cnt;
  113. unsigned int console_read_cnt;
  114. unsigned int ftrace_max_cnt;
  115. unsigned int ftrace_read_cnt;
  116. /*
  117. * These counters should be calculated during recovery.
  118. * It records the oops/panic times after crashes rather than boots.
  119. */
  120. unsigned int oops_counter;
  121. unsigned int panic_counter;
  122. atomic_t recovered;
  123. atomic_t on_panic;
  124. /*
  125. * pstore_zone_info_lock protects this entire structure during calls
  126. * to register_pstore_zone()/unregister_pstore_zone().
  127. */
  128. struct mutex pstore_zone_info_lock;
  129. struct pstore_zone_info *pstore_zone_info;
  130. struct pstore_info pstore;
  131. };
  132. static struct psz_context pstore_zone_cxt;
  133. static void psz_flush_all_dirty_zones(struct work_struct *);
  134. static DECLARE_DELAYED_WORK(psz_cleaner, psz_flush_all_dirty_zones);
  135. /**
  136. * enum psz_flush_mode - flush mode for psz_zone_write()
  137. *
  138. * @FLUSH_NONE: do not flush to storage but update data on memory
  139. * @FLUSH_PART: just flush part of data including meta data to storage
  140. * @FLUSH_META: just flush meta data of zone to storage
  141. * @FLUSH_ALL: flush all of zone
  142. */
  143. enum psz_flush_mode {
  144. FLUSH_NONE = 0,
  145. FLUSH_PART,
  146. FLUSH_META,
  147. FLUSH_ALL,
  148. };
  149. static inline int buffer_datalen(struct pstore_zone *zone)
  150. {
  151. return atomic_read(&zone->buffer->datalen);
  152. }
  153. static inline int buffer_start(struct pstore_zone *zone)
  154. {
  155. return atomic_read(&zone->buffer->start);
  156. }
  157. static inline bool is_on_panic(void)
  158. {
  159. return atomic_read(&pstore_zone_cxt.on_panic);
  160. }
  161. static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf,
  162. size_t len, unsigned long off)
  163. {
  164. if (!buf || !zone || !zone->buffer)
  165. return -EINVAL;
  166. if (off > zone->buffer_size)
  167. return -EINVAL;
  168. len = min_t(size_t, len, zone->buffer_size - off);
  169. memcpy(buf, zone->buffer->data + off, len);
  170. return len;
  171. }
  172. static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf,
  173. size_t len, unsigned long off)
  174. {
  175. if (!buf || !zone || !zone->oldbuf)
  176. return -EINVAL;
  177. if (off > zone->buffer_size)
  178. return -EINVAL;
  179. len = min_t(size_t, len, zone->buffer_size - off);
  180. memcpy(buf, zone->oldbuf->data + off, len);
  181. return 0;
  182. }
  183. static int psz_zone_write(struct pstore_zone *zone,
  184. enum psz_flush_mode flush_mode, const char *buf,
  185. size_t len, unsigned long off)
  186. {
  187. struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
  188. ssize_t wcnt = 0;
  189. ssize_t (*writeop)(const char *buf, size_t bytes, loff_t pos);
  190. size_t wlen;
  191. if (off > zone->buffer_size)
  192. return -EINVAL;
  193. wlen = min_t(size_t, len, zone->buffer_size - off);
  194. if (buf && wlen) {
  195. memcpy(zone->buffer->data + off, buf, wlen);
  196. atomic_set(&zone->buffer->datalen, wlen + off);
  197. }
  198. /* avoid to damage old records */
  199. if (!is_on_panic() && !atomic_read(&pstore_zone_cxt.recovered))
  200. goto dirty;
  201. writeop = is_on_panic() ? info->panic_write : info->write;
  202. if (!writeop)
  203. goto dirty;
  204. switch (flush_mode) {
  205. case FLUSH_NONE:
  206. if (unlikely(buf && wlen))
  207. goto dirty;
  208. return 0;
  209. case FLUSH_PART:
  210. wcnt = writeop((const char *)zone->buffer->data + off, wlen,
  211. zone->off + sizeof(*zone->buffer) + off);
  212. if (wcnt != wlen)
  213. goto dirty;
  214. fallthrough;
  215. case FLUSH_META:
  216. wlen = sizeof(struct psz_buffer);
  217. wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
  218. if (wcnt != wlen)
  219. goto dirty;
  220. break;
  221. case FLUSH_ALL:
  222. wlen = zone->buffer_size + sizeof(*zone->buffer);
  223. wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
  224. if (wcnt != wlen)
  225. goto dirty;
  226. break;
  227. }
  228. return 0;
  229. dirty:
  230. /* no need to mark dirty if going to try next zone */
  231. if (wcnt == -ENOMSG)
  232. return -ENOMSG;
  233. atomic_set(&zone->dirty, true);
  234. /* flush dirty zones nicely */
  235. if (wcnt == -EBUSY && !is_on_panic())
  236. schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(500));
  237. return -EBUSY;
  238. }
  239. static int psz_flush_dirty_zone(struct pstore_zone *zone)
  240. {
  241. int ret;
  242. if (unlikely(!zone))
  243. return -EINVAL;
  244. if (unlikely(!atomic_read(&pstore_zone_cxt.recovered)))
  245. return -EBUSY;
  246. if (!atomic_xchg(&zone->dirty, false))
  247. return 0;
  248. ret = psz_zone_write(zone, FLUSH_ALL, NULL, 0, 0);
  249. if (ret)
  250. atomic_set(&zone->dirty, true);
  251. return ret;
  252. }
  253. static int psz_flush_dirty_zones(struct pstore_zone **zones, unsigned int cnt)
  254. {
  255. int i, ret;
  256. struct pstore_zone *zone;
  257. if (!zones)
  258. return -EINVAL;
  259. for (i = 0; i < cnt; i++) {
  260. zone = zones[i];
  261. if (!zone)
  262. return -EINVAL;
  263. ret = psz_flush_dirty_zone(zone);
  264. if (ret)
  265. return ret;
  266. }
  267. return 0;
  268. }
  269. static int psz_move_zone(struct pstore_zone *old, struct pstore_zone *new)
  270. {
  271. const char *data = (const char *)old->buffer->data;
  272. int ret;
  273. ret = psz_zone_write(new, FLUSH_ALL, data, buffer_datalen(old), 0);
  274. if (ret) {
  275. atomic_set(&new->buffer->datalen, 0);
  276. atomic_set(&new->dirty, false);
  277. return ret;
  278. }
  279. atomic_set(&old->buffer->datalen, 0);
  280. return 0;
  281. }
  282. static void psz_flush_all_dirty_zones(struct work_struct *work)
  283. {
  284. struct psz_context *cxt = &pstore_zone_cxt;
  285. int ret = 0;
  286. if (cxt->ppsz)
  287. ret |= psz_flush_dirty_zone(cxt->ppsz);
  288. if (cxt->cpsz)
  289. ret |= psz_flush_dirty_zone(cxt->cpsz);
  290. if (cxt->kpszs)
  291. ret |= psz_flush_dirty_zones(cxt->kpszs, cxt->kmsg_max_cnt);
  292. if (cxt->fpszs)
  293. ret |= psz_flush_dirty_zones(cxt->fpszs, cxt->ftrace_max_cnt);
  294. if (ret && cxt->pstore_zone_info)
  295. schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(1000));
  296. }
  297. static int psz_kmsg_recover_data(struct psz_context *cxt)
  298. {
  299. struct pstore_zone_info *info = cxt->pstore_zone_info;
  300. struct pstore_zone *zone = NULL;
  301. struct psz_buffer *buf;
  302. unsigned long i;
  303. ssize_t rcnt;
  304. if (!info->read)
  305. return -EINVAL;
  306. for (i = 0; i < cxt->kmsg_max_cnt; i++) {
  307. zone = cxt->kpszs[i];
  308. if (unlikely(!zone))
  309. return -EINVAL;
  310. if (atomic_read(&zone->dirty)) {
  311. unsigned int wcnt = cxt->kmsg_write_cnt;
  312. struct pstore_zone *new = cxt->kpszs[wcnt];
  313. int ret;
  314. ret = psz_move_zone(zone, new);
  315. if (ret) {
  316. pr_err("move zone from %lu to %d failed\n",
  317. i, wcnt);
  318. return ret;
  319. }
  320. cxt->kmsg_write_cnt = (wcnt + 1) % cxt->kmsg_max_cnt;
  321. }
  322. if (!zone->should_recover)
  323. continue;
  324. buf = zone->buffer;
  325. rcnt = info->read((char *)buf, zone->buffer_size + sizeof(*buf),
  326. zone->off);
  327. if (rcnt != zone->buffer_size + sizeof(*buf))
  328. return (int)rcnt < 0 ? (int)rcnt : -EIO;
  329. }
  330. return 0;
  331. }
  332. static int psz_kmsg_recover_meta(struct psz_context *cxt)
  333. {
  334. struct pstore_zone_info *info = cxt->pstore_zone_info;
  335. struct pstore_zone *zone;
  336. size_t rcnt, len;
  337. struct psz_buffer *buf;
  338. struct psz_kmsg_header *hdr;
  339. struct timespec64 time = { };
  340. unsigned long i;
  341. /*
  342. * Recover may on panic, we can't allocate any memory by kmalloc.
  343. * So, we use local array instead.
  344. */
  345. char buffer_header[sizeof(*buf) + sizeof(*hdr)] = {0};
  346. if (!info->read)
  347. return -EINVAL;
  348. len = sizeof(*buf) + sizeof(*hdr);
  349. buf = (struct psz_buffer *)buffer_header;
  350. for (i = 0; i < cxt->kmsg_max_cnt; i++) {
  351. zone = cxt->kpszs[i];
  352. if (unlikely(!zone))
  353. return -EINVAL;
  354. rcnt = info->read((char *)buf, len, zone->off);
  355. if (rcnt == -ENOMSG) {
  356. pr_debug("%s with id %lu may be broken, skip\n",
  357. zone->name, i);
  358. continue;
  359. } else if (rcnt != len) {
  360. pr_err("read %s with id %lu failed\n", zone->name, i);
  361. return (int)rcnt < 0 ? (int)rcnt : -EIO;
  362. }
  363. if (buf->sig != zone->buffer->sig) {
  364. pr_debug("no valid data in kmsg dump zone %lu\n", i);
  365. continue;
  366. }
  367. if (zone->buffer_size < atomic_read(&buf->datalen)) {
  368. pr_info("found overtop zone: %s: id %lu, off %lld, size %zu\n",
  369. zone->name, i, zone->off,
  370. zone->buffer_size);
  371. continue;
  372. }
  373. hdr = (struct psz_kmsg_header *)buf->data;
  374. if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC) {
  375. pr_info("found invalid zone: %s: id %lu, off %lld, size %zu\n",
  376. zone->name, i, zone->off,
  377. zone->buffer_size);
  378. continue;
  379. }
  380. /*
  381. * we get the newest zone, and the next one must be the oldest
  382. * or unused zone, because we do write one by one like a circle.
  383. */
  384. if (hdr->time.tv_sec >= time.tv_sec) {
  385. time.tv_sec = hdr->time.tv_sec;
  386. cxt->kmsg_write_cnt = (i + 1) % cxt->kmsg_max_cnt;
  387. }
  388. if (hdr->reason == KMSG_DUMP_OOPS)
  389. cxt->oops_counter =
  390. max(cxt->oops_counter, hdr->counter);
  391. else if (hdr->reason == KMSG_DUMP_PANIC)
  392. cxt->panic_counter =
  393. max(cxt->panic_counter, hdr->counter);
  394. if (!atomic_read(&buf->datalen)) {
  395. pr_debug("found erased zone: %s: id %lu, off %lld, size %zu, datalen %d\n",
  396. zone->name, i, zone->off,
  397. zone->buffer_size,
  398. atomic_read(&buf->datalen));
  399. continue;
  400. }
  401. if (!is_on_panic())
  402. zone->should_recover = true;
  403. pr_debug("found nice zone: %s: id %lu, off %lld, size %zu, datalen %d\n",
  404. zone->name, i, zone->off,
  405. zone->buffer_size, atomic_read(&buf->datalen));
  406. }
  407. return 0;
  408. }
  409. static int psz_kmsg_recover(struct psz_context *cxt)
  410. {
  411. int ret;
  412. if (!cxt->kpszs)
  413. return 0;
  414. ret = psz_kmsg_recover_meta(cxt);
  415. if (ret)
  416. goto recover_fail;
  417. ret = psz_kmsg_recover_data(cxt);
  418. if (ret)
  419. goto recover_fail;
  420. return 0;
  421. recover_fail:
  422. pr_debug("psz_recover_kmsg failed\n");
  423. return ret;
  424. }
  425. static int psz_recover_zone(struct psz_context *cxt, struct pstore_zone *zone)
  426. {
  427. struct pstore_zone_info *info = cxt->pstore_zone_info;
  428. struct psz_buffer *oldbuf, tmpbuf;
  429. int ret = 0;
  430. char *buf;
  431. ssize_t rcnt, len, start, off;
  432. if (!zone || zone->oldbuf)
  433. return 0;
  434. if (is_on_panic()) {
  435. /* save data as much as possible */
  436. psz_flush_dirty_zone(zone);
  437. return 0;
  438. }
  439. if (unlikely(!info->read))
  440. return -EINVAL;
  441. len = sizeof(struct psz_buffer);
  442. rcnt = info->read((char *)&tmpbuf, len, zone->off);
  443. if (rcnt != len) {
  444. pr_debug("read zone %s failed\n", zone->name);
  445. return (int)rcnt < 0 ? (int)rcnt : -EIO;
  446. }
  447. if (tmpbuf.sig != zone->buffer->sig) {
  448. pr_debug("no valid data in zone %s\n", zone->name);
  449. return 0;
  450. }
  451. if (zone->buffer_size < atomic_read(&tmpbuf.datalen) ||
  452. zone->buffer_size < atomic_read(&tmpbuf.start)) {
  453. pr_info("found overtop zone: %s: off %lld, size %zu\n",
  454. zone->name, zone->off, zone->buffer_size);
  455. /* just keep going */
  456. return 0;
  457. }
  458. if (!atomic_read(&tmpbuf.datalen)) {
  459. pr_debug("found erased zone: %s: off %lld, size %zu, datalen %d\n",
  460. zone->name, zone->off, zone->buffer_size,
  461. atomic_read(&tmpbuf.datalen));
  462. return 0;
  463. }
  464. pr_debug("found nice zone: %s: off %lld, size %zu, datalen %d\n",
  465. zone->name, zone->off, zone->buffer_size,
  466. atomic_read(&tmpbuf.datalen));
  467. len = atomic_read(&tmpbuf.datalen) + sizeof(*oldbuf);
  468. oldbuf = kzalloc(len, GFP_KERNEL);
  469. if (!oldbuf)
  470. return -ENOMEM;
  471. memcpy(oldbuf, &tmpbuf, sizeof(*oldbuf));
  472. buf = (char *)oldbuf + sizeof(*oldbuf);
  473. len = atomic_read(&oldbuf->datalen);
  474. start = atomic_read(&oldbuf->start);
  475. off = zone->off + sizeof(*oldbuf);
  476. /* get part of data */
  477. rcnt = info->read(buf, len - start, off + start);
  478. if (rcnt != len - start) {
  479. pr_err("read zone %s failed\n", zone->name);
  480. ret = (int)rcnt < 0 ? (int)rcnt : -EIO;
  481. goto free_oldbuf;
  482. }
  483. /* get the rest of data */
  484. rcnt = info->read(buf + len - start, start, off);
  485. if (rcnt != start) {
  486. pr_err("read zone %s failed\n", zone->name);
  487. ret = (int)rcnt < 0 ? (int)rcnt : -EIO;
  488. goto free_oldbuf;
  489. }
  490. zone->oldbuf = oldbuf;
  491. psz_flush_dirty_zone(zone);
  492. return 0;
  493. free_oldbuf:
  494. kfree(oldbuf);
  495. return ret;
  496. }
  497. static int psz_recover_zones(struct psz_context *cxt,
  498. struct pstore_zone **zones, unsigned int cnt)
  499. {
  500. int ret;
  501. unsigned int i;
  502. struct pstore_zone *zone;
  503. if (!zones)
  504. return 0;
  505. for (i = 0; i < cnt; i++) {
  506. zone = zones[i];
  507. if (unlikely(!zone))
  508. continue;
  509. ret = psz_recover_zone(cxt, zone);
  510. if (ret)
  511. goto recover_fail;
  512. }
  513. return 0;
  514. recover_fail:
  515. pr_debug("recover %s[%u] failed\n", zone->name, i);
  516. return ret;
  517. }
  518. /**
  519. * psz_recovery() - recover data from storage
  520. * @cxt: the context of pstore/zone
  521. *
  522. * recovery means reading data back from storage after rebooting
  523. *
  524. * Return: 0 on success, others on failure.
  525. */
  526. static inline int psz_recovery(struct psz_context *cxt)
  527. {
  528. int ret;
  529. if (atomic_read(&cxt->recovered))
  530. return 0;
  531. ret = psz_kmsg_recover(cxt);
  532. if (ret)
  533. goto out;
  534. ret = psz_recover_zone(cxt, cxt->ppsz);
  535. if (ret)
  536. goto out;
  537. ret = psz_recover_zone(cxt, cxt->cpsz);
  538. if (ret)
  539. goto out;
  540. ret = psz_recover_zones(cxt, cxt->fpszs, cxt->ftrace_max_cnt);
  541. out:
  542. if (unlikely(ret))
  543. pr_err("recover failed\n");
  544. else {
  545. pr_debug("recover end!\n");
  546. atomic_set(&cxt->recovered, 1);
  547. }
  548. return ret;
  549. }
  550. static int psz_pstore_open(struct pstore_info *psi)
  551. {
  552. struct psz_context *cxt = psi->data;
  553. cxt->kmsg_read_cnt = 0;
  554. cxt->pmsg_read_cnt = 0;
  555. cxt->console_read_cnt = 0;
  556. cxt->ftrace_read_cnt = 0;
  557. return 0;
  558. }
  559. static inline bool psz_old_ok(struct pstore_zone *zone)
  560. {
  561. if (zone && zone->oldbuf && atomic_read(&zone->oldbuf->datalen))
  562. return true;
  563. return false;
  564. }
  565. static inline bool psz_ok(struct pstore_zone *zone)
  566. {
  567. if (zone && zone->buffer && buffer_datalen(zone))
  568. return true;
  569. return false;
  570. }
  571. static inline int psz_kmsg_erase(struct psz_context *cxt,
  572. struct pstore_zone *zone, struct pstore_record *record)
  573. {
  574. struct psz_buffer *buffer = zone->buffer;
  575. struct psz_kmsg_header *hdr =
  576. (struct psz_kmsg_header *)buffer->data;
  577. size_t size;
  578. if (unlikely(!psz_ok(zone)))
  579. return 0;
  580. /* this zone is already updated, no need to erase */
  581. if (record->count != hdr->counter)
  582. return 0;
  583. size = buffer_datalen(zone) + sizeof(*zone->buffer);
  584. atomic_set(&zone->buffer->datalen, 0);
  585. if (cxt->pstore_zone_info->erase)
  586. return cxt->pstore_zone_info->erase(size, zone->off);
  587. else
  588. return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
  589. }
  590. static inline int psz_record_erase(struct psz_context *cxt,
  591. struct pstore_zone *zone)
  592. {
  593. if (unlikely(!psz_old_ok(zone)))
  594. return 0;
  595. kfree(zone->oldbuf);
  596. zone->oldbuf = NULL;
  597. /*
  598. * if there are new data in zone buffer, that means the old data
  599. * are already invalid. It is no need to flush 0 (erase) to
  600. * block device.
  601. */
  602. if (!buffer_datalen(zone))
  603. return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
  604. psz_flush_dirty_zone(zone);
  605. return 0;
  606. }
  607. static int psz_pstore_erase(struct pstore_record *record)
  608. {
  609. struct psz_context *cxt = record->psi->data;
  610. switch (record->type) {
  611. case PSTORE_TYPE_DMESG:
  612. if (record->id >= cxt->kmsg_max_cnt)
  613. return -EINVAL;
  614. return psz_kmsg_erase(cxt, cxt->kpszs[record->id], record);
  615. case PSTORE_TYPE_PMSG:
  616. return psz_record_erase(cxt, cxt->ppsz);
  617. case PSTORE_TYPE_CONSOLE:
  618. return psz_record_erase(cxt, cxt->cpsz);
  619. case PSTORE_TYPE_FTRACE:
  620. if (record->id >= cxt->ftrace_max_cnt)
  621. return -EINVAL;
  622. return psz_record_erase(cxt, cxt->fpszs[record->id]);
  623. default: return -EINVAL;
  624. }
  625. }
  626. static void psz_write_kmsg_hdr(struct pstore_zone *zone,
  627. struct pstore_record *record)
  628. {
  629. struct psz_context *cxt = record->psi->data;
  630. struct psz_buffer *buffer = zone->buffer;
  631. struct psz_kmsg_header *hdr =
  632. (struct psz_kmsg_header *)buffer->data;
  633. hdr->magic = PSTORE_KMSG_HEADER_MAGIC;
  634. hdr->compressed = record->compressed;
  635. hdr->time.tv_sec = record->time.tv_sec;
  636. hdr->time.tv_nsec = record->time.tv_nsec;
  637. hdr->reason = record->reason;
  638. if (hdr->reason == KMSG_DUMP_OOPS)
  639. hdr->counter = ++cxt->oops_counter;
  640. else if (hdr->reason == KMSG_DUMP_PANIC)
  641. hdr->counter = ++cxt->panic_counter;
  642. else
  643. hdr->counter = 0;
  644. }
  645. /*
  646. * In case zone is broken, which may occur to MTD device, we try each zones,
  647. * start at cxt->kmsg_write_cnt.
  648. */
  649. static inline int notrace psz_kmsg_write_record(struct psz_context *cxt,
  650. struct pstore_record *record)
  651. {
  652. size_t size, hlen;
  653. struct pstore_zone *zone;
  654. unsigned int i;
  655. for (i = 0; i < cxt->kmsg_max_cnt; i++) {
  656. unsigned int zonenum, len;
  657. int ret;
  658. zonenum = (cxt->kmsg_write_cnt + i) % cxt->kmsg_max_cnt;
  659. zone = cxt->kpszs[zonenum];
  660. if (unlikely(!zone))
  661. return -ENOSPC;
  662. /* avoid destroying old data, allocate a new one */
  663. len = zone->buffer_size + sizeof(*zone->buffer);
  664. zone->oldbuf = zone->buffer;
  665. zone->buffer = kzalloc(len, GFP_KERNEL);
  666. if (!zone->buffer) {
  667. zone->buffer = zone->oldbuf;
  668. return -ENOMEM;
  669. }
  670. zone->buffer->sig = zone->oldbuf->sig;
  671. pr_debug("write %s to zone id %d\n", zone->name, zonenum);
  672. psz_write_kmsg_hdr(zone, record);
  673. hlen = sizeof(struct psz_kmsg_header);
  674. size = min_t(size_t, record->size, zone->buffer_size - hlen);
  675. ret = psz_zone_write(zone, FLUSH_ALL, record->buf, size, hlen);
  676. if (likely(!ret || ret != -ENOMSG)) {
  677. cxt->kmsg_write_cnt = zonenum + 1;
  678. cxt->kmsg_write_cnt %= cxt->kmsg_max_cnt;
  679. /* no need to try next zone, free last zone buffer */
  680. kfree(zone->oldbuf);
  681. zone->oldbuf = NULL;
  682. return ret;
  683. }
  684. pr_debug("zone %u may be broken, try next dmesg zone\n",
  685. zonenum);
  686. kfree(zone->buffer);
  687. zone->buffer = zone->oldbuf;
  688. zone->oldbuf = NULL;
  689. }
  690. return -EBUSY;
  691. }
  692. static int notrace psz_kmsg_write(struct psz_context *cxt,
  693. struct pstore_record *record)
  694. {
  695. int ret;
  696. /*
  697. * Explicitly only take the first part of any new crash.
  698. * If our buffer is larger than kmsg_bytes, this can never happen,
  699. * and if our buffer is smaller than kmsg_bytes, we don't want the
  700. * report split across multiple records.
  701. */
  702. if (record->part != 1)
  703. return -ENOSPC;
  704. if (!cxt->kpszs)
  705. return -ENOSPC;
  706. ret = psz_kmsg_write_record(cxt, record);
  707. if (!ret && is_on_panic()) {
  708. /* ensure all data are flushed to storage when panic */
  709. pr_debug("try to flush other dirty zones\n");
  710. psz_flush_all_dirty_zones(NULL);
  711. }
  712. /* always return 0 as we had handled it on buffer */
  713. return 0;
  714. }
  715. static int notrace psz_record_write(struct pstore_zone *zone,
  716. struct pstore_record *record)
  717. {
  718. size_t start, rem;
  719. bool is_full_data = false;
  720. char *buf;
  721. int cnt;
  722. if (!zone || !record)
  723. return -ENOSPC;
  724. if (atomic_read(&zone->buffer->datalen) >= zone->buffer_size)
  725. is_full_data = true;
  726. cnt = record->size;
  727. buf = record->buf;
  728. if (unlikely(cnt > zone->buffer_size)) {
  729. buf += cnt - zone->buffer_size;
  730. cnt = zone->buffer_size;
  731. }
  732. start = buffer_start(zone);
  733. rem = zone->buffer_size - start;
  734. if (unlikely(rem < cnt)) {
  735. psz_zone_write(zone, FLUSH_PART, buf, rem, start);
  736. buf += rem;
  737. cnt -= rem;
  738. start = 0;
  739. is_full_data = true;
  740. }
  741. atomic_set(&zone->buffer->start, cnt + start);
  742. psz_zone_write(zone, FLUSH_PART, buf, cnt, start);
  743. /**
  744. * psz_zone_write will set datalen as start + cnt.
  745. * It work if actual data length lesser than buffer size.
  746. * If data length greater than buffer size, pmsg will rewrite to
  747. * beginning of zone, which make buffer->datalen wrongly.
  748. * So we should reset datalen as buffer size once actual data length
  749. * greater than buffer size.
  750. */
  751. if (is_full_data) {
  752. atomic_set(&zone->buffer->datalen, zone->buffer_size);
  753. psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
  754. }
  755. return 0;
  756. }
  757. static int notrace psz_pstore_write(struct pstore_record *record)
  758. {
  759. struct psz_context *cxt = record->psi->data;
  760. if (record->type == PSTORE_TYPE_DMESG &&
  761. record->reason == KMSG_DUMP_PANIC)
  762. atomic_set(&cxt->on_panic, 1);
  763. /*
  764. * if on panic, do not write except panic records
  765. * Fix case that panic_write prints log which wakes up console backend.
  766. */
  767. if (is_on_panic() && record->type != PSTORE_TYPE_DMESG)
  768. return -EBUSY;
  769. switch (record->type) {
  770. case PSTORE_TYPE_DMESG:
  771. return psz_kmsg_write(cxt, record);
  772. case PSTORE_TYPE_CONSOLE:
  773. return psz_record_write(cxt->cpsz, record);
  774. case PSTORE_TYPE_PMSG:
  775. return psz_record_write(cxt->ppsz, record);
  776. case PSTORE_TYPE_FTRACE: {
  777. int zonenum = smp_processor_id();
  778. if (!cxt->fpszs)
  779. return -ENOSPC;
  780. return psz_record_write(cxt->fpszs[zonenum], record);
  781. }
  782. default:
  783. return -EINVAL;
  784. }
  785. }
  786. static struct pstore_zone *psz_read_next_zone(struct psz_context *cxt)
  787. {
  788. struct pstore_zone *zone = NULL;
  789. while (cxt->kmsg_read_cnt < cxt->kmsg_max_cnt) {
  790. zone = cxt->kpszs[cxt->kmsg_read_cnt++];
  791. if (psz_ok(zone))
  792. return zone;
  793. }
  794. if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt)
  795. /*
  796. * No need psz_old_ok(). Let psz_ftrace_read() do so for
  797. * combination. psz_ftrace_read() should traverse over
  798. * all zones in case of some zone without data.
  799. */
  800. return cxt->fpszs[cxt->ftrace_read_cnt++];
  801. if (cxt->pmsg_read_cnt == 0) {
  802. cxt->pmsg_read_cnt++;
  803. zone = cxt->ppsz;
  804. if (psz_old_ok(zone))
  805. return zone;
  806. }
  807. if (cxt->console_read_cnt == 0) {
  808. cxt->console_read_cnt++;
  809. zone = cxt->cpsz;
  810. if (psz_old_ok(zone))
  811. return zone;
  812. }
  813. return NULL;
  814. }
  815. static int psz_kmsg_read_hdr(struct pstore_zone *zone,
  816. struct pstore_record *record)
  817. {
  818. struct psz_buffer *buffer = zone->buffer;
  819. struct psz_kmsg_header *hdr =
  820. (struct psz_kmsg_header *)buffer->data;
  821. if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC)
  822. return -EINVAL;
  823. record->compressed = hdr->compressed;
  824. record->time.tv_sec = hdr->time.tv_sec;
  825. record->time.tv_nsec = hdr->time.tv_nsec;
  826. record->reason = hdr->reason;
  827. record->count = hdr->counter;
  828. return 0;
  829. }
  830. static ssize_t psz_kmsg_read(struct pstore_zone *zone,
  831. struct pstore_record *record)
  832. {
  833. ssize_t size, hlen = 0;
  834. size = buffer_datalen(zone);
  835. /* Clear and skip this kmsg dump record if it has no valid header */
  836. if (psz_kmsg_read_hdr(zone, record)) {
  837. atomic_set(&zone->buffer->datalen, 0);
  838. atomic_set(&zone->dirty, 0);
  839. return -ENOMSG;
  840. }
  841. size -= sizeof(struct psz_kmsg_header);
  842. if (!record->compressed) {
  843. char *buf = kasprintf(GFP_KERNEL, "%s: Total %d times\n",
  844. kmsg_dump_reason_str(record->reason),
  845. record->count);
  846. hlen = strlen(buf);
  847. record->buf = krealloc(buf, hlen + size, GFP_KERNEL);
  848. if (!record->buf) {
  849. kfree(buf);
  850. return -ENOMEM;
  851. }
  852. } else {
  853. record->buf = kmalloc(size, GFP_KERNEL);
  854. if (!record->buf)
  855. return -ENOMEM;
  856. }
  857. size = psz_zone_read_buffer(zone, record->buf + hlen, size,
  858. sizeof(struct psz_kmsg_header));
  859. if (unlikely(size < 0)) {
  860. kfree(record->buf);
  861. return -ENOMSG;
  862. }
  863. return size + hlen;
  864. }
  865. /* try to combine all ftrace zones */
  866. static ssize_t psz_ftrace_read(struct pstore_zone *zone,
  867. struct pstore_record *record)
  868. {
  869. struct psz_context *cxt;
  870. struct psz_buffer *buf;
  871. int ret;
  872. if (!zone || !record)
  873. return -ENOSPC;
  874. if (!psz_old_ok(zone))
  875. goto out;
  876. buf = (struct psz_buffer *)zone->oldbuf;
  877. if (!buf)
  878. return -ENOMSG;
  879. ret = pstore_ftrace_combine_log(&record->buf, &record->size,
  880. (char *)buf->data, atomic_read(&buf->datalen));
  881. if (unlikely(ret))
  882. return ret;
  883. out:
  884. cxt = record->psi->data;
  885. if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt)
  886. /* then, read next ftrace zone */
  887. return -ENOMSG;
  888. record->id = 0;
  889. return record->size ? record->size : -ENOMSG;
  890. }
  891. static ssize_t psz_record_read(struct pstore_zone *zone,
  892. struct pstore_record *record)
  893. {
  894. size_t len;
  895. struct psz_buffer *buf;
  896. if (!zone || !record)
  897. return -ENOSPC;
  898. buf = (struct psz_buffer *)zone->oldbuf;
  899. if (!buf)
  900. return -ENOMSG;
  901. len = atomic_read(&buf->datalen);
  902. record->buf = kmalloc(len, GFP_KERNEL);
  903. if (!record->buf)
  904. return -ENOMEM;
  905. if (unlikely(psz_zone_read_oldbuf(zone, record->buf, len, 0))) {
  906. kfree(record->buf);
  907. return -ENOMSG;
  908. }
  909. return len;
  910. }
  911. static ssize_t psz_pstore_read(struct pstore_record *record)
  912. {
  913. struct psz_context *cxt = record->psi->data;
  914. ssize_t (*readop)(struct pstore_zone *zone,
  915. struct pstore_record *record);
  916. struct pstore_zone *zone;
  917. ssize_t ret;
  918. /* before read, we must recover from storage */
  919. ret = psz_recovery(cxt);
  920. if (ret)
  921. return ret;
  922. next_zone:
  923. zone = psz_read_next_zone(cxt);
  924. if (!zone)
  925. return 0;
  926. record->type = zone->type;
  927. switch (record->type) {
  928. case PSTORE_TYPE_DMESG:
  929. readop = psz_kmsg_read;
  930. record->id = cxt->kmsg_read_cnt - 1;
  931. break;
  932. case PSTORE_TYPE_FTRACE:
  933. readop = psz_ftrace_read;
  934. break;
  935. case PSTORE_TYPE_CONSOLE:
  936. case PSTORE_TYPE_PMSG:
  937. readop = psz_record_read;
  938. break;
  939. default:
  940. goto next_zone;
  941. }
  942. ret = readop(zone, record);
  943. if (ret == -ENOMSG)
  944. goto next_zone;
  945. return ret;
  946. }
  947. static struct psz_context pstore_zone_cxt = {
  948. .pstore_zone_info_lock =
  949. __MUTEX_INITIALIZER(pstore_zone_cxt.pstore_zone_info_lock),
  950. .recovered = ATOMIC_INIT(0),
  951. .on_panic = ATOMIC_INIT(0),
  952. .pstore = {
  953. .owner = THIS_MODULE,
  954. .open = psz_pstore_open,
  955. .read = psz_pstore_read,
  956. .write = psz_pstore_write,
  957. .erase = psz_pstore_erase,
  958. },
  959. };
  960. static void psz_free_zone(struct pstore_zone **pszone)
  961. {
  962. struct pstore_zone *zone = *pszone;
  963. if (!zone)
  964. return;
  965. kfree(zone->buffer);
  966. kfree(zone);
  967. *pszone = NULL;
  968. }
  969. static void psz_free_zones(struct pstore_zone ***pszones, unsigned int *cnt)
  970. {
  971. struct pstore_zone **zones = *pszones;
  972. if (!zones)
  973. return;
  974. while (*cnt > 0) {
  975. (*cnt)--;
  976. psz_free_zone(&(zones[*cnt]));
  977. }
  978. kfree(zones);
  979. *pszones = NULL;
  980. }
  981. static void psz_free_all_zones(struct psz_context *cxt)
  982. {
  983. if (cxt->kpszs)
  984. psz_free_zones(&cxt->kpszs, &cxt->kmsg_max_cnt);
  985. if (cxt->ppsz)
  986. psz_free_zone(&cxt->ppsz);
  987. if (cxt->cpsz)
  988. psz_free_zone(&cxt->cpsz);
  989. if (cxt->fpszs)
  990. psz_free_zones(&cxt->fpszs, &cxt->ftrace_max_cnt);
  991. }
  992. static struct pstore_zone *psz_init_zone(enum pstore_type_id type,
  993. loff_t *off, size_t size)
  994. {
  995. struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
  996. struct pstore_zone *zone;
  997. const char *name = pstore_type_to_name(type);
  998. if (!size)
  999. return NULL;
  1000. if (*off + size > info->total_size) {
  1001. pr_err("no room for %s (0x%zx@0x%llx over 0x%lx)\n",
  1002. name, size, *off, info->total_size);
  1003. return ERR_PTR(-ENOMEM);
  1004. }
  1005. zone = kzalloc(sizeof(struct pstore_zone), GFP_KERNEL);
  1006. if (!zone)
  1007. return ERR_PTR(-ENOMEM);
  1008. zone->buffer = kmalloc(size, GFP_KERNEL);
  1009. if (!zone->buffer) {
  1010. kfree(zone);
  1011. return ERR_PTR(-ENOMEM);
  1012. }
  1013. memset(zone->buffer, 0xFF, size);
  1014. zone->off = *off;
  1015. zone->name = name;
  1016. zone->type = type;
  1017. zone->buffer_size = size - sizeof(struct psz_buffer);
  1018. zone->buffer->sig = type ^ PSZ_SIG;
  1019. zone->oldbuf = NULL;
  1020. atomic_set(&zone->dirty, 0);
  1021. atomic_set(&zone->buffer->datalen, 0);
  1022. atomic_set(&zone->buffer->start, 0);
  1023. *off += size;
  1024. pr_debug("pszone %s: off 0x%llx, %zu header, %zu data\n", zone->name,
  1025. zone->off, sizeof(*zone->buffer), zone->buffer_size);
  1026. return zone;
  1027. }
  1028. static struct pstore_zone **psz_init_zones(enum pstore_type_id type,
  1029. loff_t *off, size_t total_size, ssize_t record_size,
  1030. unsigned int *cnt)
  1031. {
  1032. struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
  1033. struct pstore_zone **zones, *zone;
  1034. const char *name = pstore_type_to_name(type);
  1035. int c, i;
  1036. *cnt = 0;
  1037. if (!total_size || !record_size)
  1038. return NULL;
  1039. if (*off + total_size > info->total_size) {
  1040. pr_err("no room for zones %s (0x%zx@0x%llx over 0x%lx)\n",
  1041. name, total_size, *off, info->total_size);
  1042. return ERR_PTR(-ENOMEM);
  1043. }
  1044. c = total_size / record_size;
  1045. zones = kcalloc(c, sizeof(*zones), GFP_KERNEL);
  1046. if (!zones) {
  1047. pr_err("allocate for zones %s failed\n", name);
  1048. return ERR_PTR(-ENOMEM);
  1049. }
  1050. memset(zones, 0, c * sizeof(*zones));
  1051. for (i = 0; i < c; i++) {
  1052. zone = psz_init_zone(type, off, record_size);
  1053. if (!zone || IS_ERR(zone)) {
  1054. pr_err("initialize zones %s failed\n", name);
  1055. psz_free_zones(&zones, &i);
  1056. return (void *)zone;
  1057. }
  1058. zones[i] = zone;
  1059. }
  1060. *cnt = c;
  1061. return zones;
  1062. }
  1063. static int psz_alloc_zones(struct psz_context *cxt)
  1064. {
  1065. struct pstore_zone_info *info = cxt->pstore_zone_info;
  1066. loff_t off = 0;
  1067. int err;
  1068. size_t off_size = 0;
  1069. off_size += info->pmsg_size;
  1070. cxt->ppsz = psz_init_zone(PSTORE_TYPE_PMSG, &off, info->pmsg_size);
  1071. if (IS_ERR(cxt->ppsz)) {
  1072. err = PTR_ERR(cxt->ppsz);
  1073. cxt->ppsz = NULL;
  1074. goto free_out;
  1075. }
  1076. off_size += info->console_size;
  1077. cxt->cpsz = psz_init_zone(PSTORE_TYPE_CONSOLE, &off,
  1078. info->console_size);
  1079. if (IS_ERR(cxt->cpsz)) {
  1080. err = PTR_ERR(cxt->cpsz);
  1081. cxt->cpsz = NULL;
  1082. goto free_out;
  1083. }
  1084. off_size += info->ftrace_size;
  1085. cxt->fpszs = psz_init_zones(PSTORE_TYPE_FTRACE, &off,
  1086. info->ftrace_size,
  1087. info->ftrace_size / nr_cpu_ids,
  1088. &cxt->ftrace_max_cnt);
  1089. if (IS_ERR(cxt->fpszs)) {
  1090. err = PTR_ERR(cxt->fpszs);
  1091. cxt->fpszs = NULL;
  1092. goto free_out;
  1093. }
  1094. cxt->kpszs = psz_init_zones(PSTORE_TYPE_DMESG, &off,
  1095. info->total_size - off_size,
  1096. info->kmsg_size, &cxt->kmsg_max_cnt);
  1097. if (IS_ERR(cxt->kpszs)) {
  1098. err = PTR_ERR(cxt->kpszs);
  1099. cxt->kpszs = NULL;
  1100. goto free_out;
  1101. }
  1102. return 0;
  1103. free_out:
  1104. psz_free_all_zones(cxt);
  1105. return err;
  1106. }
  1107. /**
  1108. * register_pstore_zone() - register to pstore/zone
  1109. *
  1110. * @info: back-end driver information. See &struct pstore_zone_info.
  1111. *
  1112. * Only one back-end at one time.
  1113. *
  1114. * Return: 0 on success, others on failure.
  1115. */
  1116. int register_pstore_zone(struct pstore_zone_info *info)
  1117. {
  1118. int err = -EINVAL;
  1119. struct psz_context *cxt = &pstore_zone_cxt;
  1120. if (info->total_size < 4096) {
  1121. pr_warn("total_size must be >= 4096\n");
  1122. return -EINVAL;
  1123. }
  1124. if (!info->kmsg_size && !info->pmsg_size && !info->console_size &&
  1125. !info->ftrace_size) {
  1126. pr_warn("at least one record size must be non-zero\n");
  1127. return -EINVAL;
  1128. }
  1129. if (!info->name || !info->name[0])
  1130. return -EINVAL;
  1131. #define check_size(name, size) { \
  1132. if (info->name > 0 && info->name < (size)) { \
  1133. pr_err(#name " must be over %d\n", (size)); \
  1134. return -EINVAL; \
  1135. } \
  1136. if (info->name & (size - 1)) { \
  1137. pr_err(#name " must be a multiple of %d\n", \
  1138. (size)); \
  1139. return -EINVAL; \
  1140. } \
  1141. }
  1142. check_size(total_size, 4096);
  1143. check_size(kmsg_size, SECTOR_SIZE);
  1144. check_size(pmsg_size, SECTOR_SIZE);
  1145. check_size(console_size, SECTOR_SIZE);
  1146. check_size(ftrace_size, SECTOR_SIZE);
  1147. #undef check_size
  1148. /*
  1149. * the @read and @write must be applied.
  1150. * if no @read, pstore may mount failed.
  1151. * if no @write, pstore do not support to remove record file.
  1152. */
  1153. if (!info->read || !info->write) {
  1154. pr_err("no valid general read/write interface\n");
  1155. return -EINVAL;
  1156. }
  1157. mutex_lock(&cxt->pstore_zone_info_lock);
  1158. if (cxt->pstore_zone_info) {
  1159. pr_warn("'%s' already loaded: ignoring '%s'\n",
  1160. cxt->pstore_zone_info->name, info->name);
  1161. mutex_unlock(&cxt->pstore_zone_info_lock);
  1162. return -EBUSY;
  1163. }
  1164. cxt->pstore_zone_info = info;
  1165. pr_debug("register %s with properties:\n", info->name);
  1166. pr_debug("\ttotal size : %ld Bytes\n", info->total_size);
  1167. pr_debug("\tkmsg size : %ld Bytes\n", info->kmsg_size);
  1168. pr_debug("\tpmsg size : %ld Bytes\n", info->pmsg_size);
  1169. pr_debug("\tconsole size : %ld Bytes\n", info->console_size);
  1170. pr_debug("\tftrace size : %ld Bytes\n", info->ftrace_size);
  1171. err = psz_alloc_zones(cxt);
  1172. if (err) {
  1173. pr_err("alloc zones failed\n");
  1174. goto fail_out;
  1175. }
  1176. if (info->kmsg_size) {
  1177. cxt->pstore.bufsize = cxt->kpszs[0]->buffer_size -
  1178. sizeof(struct psz_kmsg_header);
  1179. cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
  1180. if (!cxt->pstore.buf) {
  1181. err = -ENOMEM;
  1182. goto fail_free;
  1183. }
  1184. }
  1185. cxt->pstore.data = cxt;
  1186. pr_info("registered %s as backend for", info->name);
  1187. cxt->pstore.max_reason = info->max_reason;
  1188. cxt->pstore.name = info->name;
  1189. if (info->kmsg_size) {
  1190. cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
  1191. pr_cont(" kmsg(%s",
  1192. kmsg_dump_reason_str(cxt->pstore.max_reason));
  1193. if (cxt->pstore_zone_info->panic_write)
  1194. pr_cont(",panic_write");
  1195. pr_cont(")");
  1196. }
  1197. if (info->pmsg_size) {
  1198. cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
  1199. pr_cont(" pmsg");
  1200. }
  1201. if (info->console_size) {
  1202. cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
  1203. pr_cont(" console");
  1204. }
  1205. if (info->ftrace_size) {
  1206. cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
  1207. pr_cont(" ftrace");
  1208. }
  1209. pr_cont("\n");
  1210. err = pstore_register(&cxt->pstore);
  1211. if (err) {
  1212. pr_err("registering with pstore failed\n");
  1213. goto fail_free;
  1214. }
  1215. mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock);
  1216. return 0;
  1217. fail_free:
  1218. kfree(cxt->pstore.buf);
  1219. cxt->pstore.buf = NULL;
  1220. cxt->pstore.bufsize = 0;
  1221. psz_free_all_zones(cxt);
  1222. fail_out:
  1223. pstore_zone_cxt.pstore_zone_info = NULL;
  1224. mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock);
  1225. return err;
  1226. }
  1227. EXPORT_SYMBOL_GPL(register_pstore_zone);
  1228. /**
  1229. * unregister_pstore_zone() - unregister to pstore/zone
  1230. *
  1231. * @info: back-end driver information. See struct pstore_zone_info.
  1232. */
  1233. void unregister_pstore_zone(struct pstore_zone_info *info)
  1234. {
  1235. struct psz_context *cxt = &pstore_zone_cxt;
  1236. mutex_lock(&cxt->pstore_zone_info_lock);
  1237. if (!cxt->pstore_zone_info) {
  1238. mutex_unlock(&cxt->pstore_zone_info_lock);
  1239. return;
  1240. }
  1241. /* Stop incoming writes from pstore. */
  1242. pstore_unregister(&cxt->pstore);
  1243. /* Flush any pending writes. */
  1244. psz_flush_all_dirty_zones(NULL);
  1245. flush_delayed_work(&psz_cleaner);
  1246. /* Clean up allocations. */
  1247. kfree(cxt->pstore.buf);
  1248. cxt->pstore.buf = NULL;
  1249. cxt->pstore.bufsize = 0;
  1250. cxt->pstore_zone_info = NULL;
  1251. psz_free_all_zones(cxt);
  1252. /* Clear counters and zone state. */
  1253. cxt->oops_counter = 0;
  1254. cxt->panic_counter = 0;
  1255. atomic_set(&cxt->recovered, 0);
  1256. atomic_set(&cxt->on_panic, 0);
  1257. mutex_unlock(&cxt->pstore_zone_info_lock);
  1258. }
  1259. EXPORT_SYMBOL_GPL(unregister_pstore_zone);
  1260. MODULE_LICENSE("GPL");
  1261. MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
  1262. MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
  1263. MODULE_DESCRIPTION("Storage Manager for pstore/blk");