ram.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * RAM Oops/Panic logger
  4. *
  5. * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
  6. * Copyright (C) 2011 Kees Cook <keescook@chromium.org>
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/kernel.h>
  10. #include <linux/err.h>
  11. #include <linux/module.h>
  12. #include <linux/version.h>
  13. #include <linux/pstore.h>
  14. #include <linux/io.h>
  15. #include <linux/ioport.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/slab.h>
  18. #include <linux/compiler.h>
  19. #include <linux/pstore_ram.h>
  20. #include <linux/of.h>
  21. #include <linux/of_address.h>
  22. #include <linux/of_reserved_mem.h>
  23. #include "internal.h"
  24. #define RAMOOPS_KERNMSG_HDR "===="
  25. #define MIN_MEM_SIZE 4096UL
  26. static ulong record_size = MIN_MEM_SIZE;
  27. module_param(record_size, ulong, 0400);
  28. MODULE_PARM_DESC(record_size,
  29. "size of each dump done on oops/panic");
  30. static ulong ramoops_console_size = MIN_MEM_SIZE;
  31. module_param_named(console_size, ramoops_console_size, ulong, 0400);
  32. MODULE_PARM_DESC(console_size, "size of kernel console log");
  33. static ulong ramoops_ftrace_size = MIN_MEM_SIZE;
  34. module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400);
  35. MODULE_PARM_DESC(ftrace_size, "size of ftrace log");
  36. static ulong ramoops_pmsg_size = MIN_MEM_SIZE;
  37. module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400);
  38. MODULE_PARM_DESC(pmsg_size, "size of user space message log");
  39. static unsigned long long mem_address;
  40. module_param_hw(mem_address, ullong, other, 0400);
  41. MODULE_PARM_DESC(mem_address,
  42. "start of reserved RAM used to store oops/panic logs");
  43. static ulong mem_size;
  44. module_param(mem_size, ulong, 0400);
  45. MODULE_PARM_DESC(mem_size,
  46. "size of reserved RAM used to store oops/panic logs");
  47. static unsigned int mem_type;
  48. module_param(mem_type, uint, 0400);
  49. MODULE_PARM_DESC(mem_type,
  50. "memory type: 0=write-combined (default), 1=unbuffered, 2=cached");
  51. static int ramoops_max_reason = -1;
  52. module_param_named(max_reason, ramoops_max_reason, int, 0400);
  53. MODULE_PARM_DESC(max_reason,
  54. "maximum reason for kmsg dump (default 2: Oops and Panic) ");
  55. static int ramoops_ecc;
  56. module_param_named(ecc, ramoops_ecc, int, 0400);
  57. MODULE_PARM_DESC(ramoops_ecc,
  58. "if non-zero, the option enables ECC support and specifies "
  59. "ECC buffer size in bytes (1 is a special value, means 16 "
  60. "bytes ECC)");
  61. static int ramoops_dump_oops = -1;
  62. module_param_named(dump_oops, ramoops_dump_oops, int, 0400);
  63. MODULE_PARM_DESC(dump_oops,
  64. "(deprecated: use max_reason instead) set to 1 to dump oopses & panics, 0 to only dump panics");
  65. struct ramoops_context {
  66. struct persistent_ram_zone **dprzs; /* Oops dump zones */
  67. struct persistent_ram_zone *cprz; /* Console zone */
  68. struct persistent_ram_zone **fprzs; /* Ftrace zones */
  69. struct persistent_ram_zone *mprz; /* PMSG zone */
  70. phys_addr_t phys_addr;
  71. unsigned long size;
  72. unsigned int memtype;
  73. size_t record_size;
  74. size_t console_size;
  75. size_t ftrace_size;
  76. size_t pmsg_size;
  77. u32 flags;
  78. struct persistent_ram_ecc_info ecc_info;
  79. unsigned int max_dump_cnt;
  80. unsigned int dump_write_cnt;
  81. /* _read_cnt need clear on ramoops_pstore_open */
  82. unsigned int dump_read_cnt;
  83. unsigned int console_read_cnt;
  84. unsigned int max_ftrace_cnt;
  85. unsigned int ftrace_read_cnt;
  86. unsigned int pmsg_read_cnt;
  87. struct pstore_info pstore;
  88. };
  89. static struct platform_device *dummy;
  90. static int ramoops_pstore_open(struct pstore_info *psi)
  91. {
  92. struct ramoops_context *cxt = psi->data;
  93. cxt->dump_read_cnt = 0;
  94. cxt->console_read_cnt = 0;
  95. cxt->ftrace_read_cnt = 0;
  96. cxt->pmsg_read_cnt = 0;
  97. return 0;
  98. }
  99. static struct persistent_ram_zone *
  100. ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id,
  101. struct pstore_record *record)
  102. {
  103. struct persistent_ram_zone *prz;
  104. /* Give up if we never existed or have hit the end. */
  105. if (!przs)
  106. return NULL;
  107. prz = przs[id];
  108. if (!prz)
  109. return NULL;
  110. /* Update old/shadowed buffer. */
  111. if (prz->type == PSTORE_TYPE_DMESG)
  112. persistent_ram_save_old(prz);
  113. if (!persistent_ram_old_size(prz))
  114. return NULL;
  115. record->type = prz->type;
  116. record->id = id;
  117. return prz;
  118. }
  119. static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
  120. bool *compressed)
  121. {
  122. char data_type;
  123. int header_length = 0;
  124. if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n",
  125. (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type,
  126. &header_length) == 3) {
  127. time->tv_nsec *= 1000;
  128. if (data_type == 'C')
  129. *compressed = true;
  130. else
  131. *compressed = false;
  132. } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n",
  133. (time64_t *)&time->tv_sec, &time->tv_nsec,
  134. &header_length) == 2) {
  135. time->tv_nsec *= 1000;
  136. *compressed = false;
  137. } else {
  138. time->tv_sec = 0;
  139. time->tv_nsec = 0;
  140. *compressed = false;
  141. }
  142. return header_length;
  143. }
  144. static bool prz_ok(struct persistent_ram_zone *prz)
  145. {
  146. return !!prz && !!(persistent_ram_old_size(prz) +
  147. persistent_ram_ecc_string(prz, NULL, 0));
  148. }
  149. static ssize_t ramoops_pstore_read(struct pstore_record *record)
  150. {
  151. ssize_t size = 0;
  152. struct ramoops_context *cxt = record->psi->data;
  153. struct persistent_ram_zone *prz = NULL;
  154. int header_length = 0;
  155. bool free_prz = false;
  156. /*
  157. * Ramoops headers provide time stamps for PSTORE_TYPE_DMESG, but
  158. * PSTORE_TYPE_CONSOLE and PSTORE_TYPE_FTRACE don't currently have
  159. * valid time stamps, so it is initialized to zero.
  160. */
  161. record->time.tv_sec = 0;
  162. record->time.tv_nsec = 0;
  163. record->compressed = false;
  164. /* Find the next valid persistent_ram_zone for DMESG */
  165. while (cxt->dump_read_cnt < cxt->max_dump_cnt && !prz) {
  166. prz = ramoops_get_next_prz(cxt->dprzs, cxt->dump_read_cnt++,
  167. record);
  168. if (!prz_ok(prz))
  169. continue;
  170. header_length = ramoops_read_kmsg_hdr(persistent_ram_old(prz),
  171. &record->time,
  172. &record->compressed);
  173. /* Clear and skip this DMESG record if it has no valid header */
  174. if (!header_length) {
  175. persistent_ram_free_old(prz);
  176. persistent_ram_zap(prz);
  177. prz = NULL;
  178. }
  179. }
  180. if (!prz_ok(prz) && !cxt->console_read_cnt++)
  181. prz = ramoops_get_next_prz(&cxt->cprz, 0 /* single */, record);
  182. if (!prz_ok(prz) && !cxt->pmsg_read_cnt++)
  183. prz = ramoops_get_next_prz(&cxt->mprz, 0 /* single */, record);
  184. /* ftrace is last since it may want to dynamically allocate memory. */
  185. if (!prz_ok(prz)) {
  186. if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) &&
  187. !cxt->ftrace_read_cnt++) {
  188. prz = ramoops_get_next_prz(cxt->fprzs, 0 /* single */,
  189. record);
  190. } else {
  191. /*
  192. * Build a new dummy record which combines all the
  193. * per-cpu records including metadata and ecc info.
  194. */
  195. struct persistent_ram_zone *tmp_prz, *prz_next;
  196. tmp_prz = kzalloc(sizeof(struct persistent_ram_zone),
  197. GFP_KERNEL);
  198. if (!tmp_prz)
  199. return -ENOMEM;
  200. prz = tmp_prz;
  201. free_prz = true;
  202. while (cxt->ftrace_read_cnt < cxt->max_ftrace_cnt) {
  203. prz_next = ramoops_get_next_prz(cxt->fprzs,
  204. cxt->ftrace_read_cnt++, record);
  205. if (!prz_ok(prz_next))
  206. continue;
  207. tmp_prz->ecc_info = prz_next->ecc_info;
  208. tmp_prz->corrected_bytes +=
  209. prz_next->corrected_bytes;
  210. tmp_prz->bad_blocks += prz_next->bad_blocks;
  211. size = pstore_ftrace_combine_log(
  212. &tmp_prz->old_log,
  213. &tmp_prz->old_log_size,
  214. prz_next->old_log,
  215. prz_next->old_log_size);
  216. if (size)
  217. goto out;
  218. }
  219. record->id = 0;
  220. }
  221. }
  222. if (!prz_ok(prz)) {
  223. size = 0;
  224. goto out;
  225. }
  226. size = persistent_ram_old_size(prz) - header_length;
  227. /* ECC correction notice */
  228. record->ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0);
  229. record->buf = kmalloc(size + record->ecc_notice_size + 1, GFP_KERNEL);
  230. if (record->buf == NULL) {
  231. size = -ENOMEM;
  232. goto out;
  233. }
  234. memcpy(record->buf, (char *)persistent_ram_old(prz) + header_length,
  235. size);
  236. persistent_ram_ecc_string(prz, record->buf + size,
  237. record->ecc_notice_size + 1);
  238. out:
  239. if (free_prz) {
  240. kfree(prz->old_log);
  241. kfree(prz);
  242. }
  243. return size;
  244. }
  245. static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz,
  246. struct pstore_record *record)
  247. {
  248. char hdr[36]; /* "===="(4), %lld(20), "."(1), %06lu(6), "-%c\n"(3) */
  249. size_t len;
  250. len = scnprintf(hdr, sizeof(hdr),
  251. RAMOOPS_KERNMSG_HDR "%lld.%06lu-%c\n",
  252. (time64_t)record->time.tv_sec,
  253. record->time.tv_nsec / 1000,
  254. record->compressed ? 'C' : 'D');
  255. persistent_ram_write(prz, hdr, len);
  256. return len;
  257. }
  258. static int notrace ramoops_pstore_write(struct pstore_record *record)
  259. {
  260. struct ramoops_context *cxt = record->psi->data;
  261. struct persistent_ram_zone *prz;
  262. size_t size, hlen;
  263. if (record->type == PSTORE_TYPE_CONSOLE) {
  264. if (!cxt->cprz)
  265. return -ENOMEM;
  266. persistent_ram_write(cxt->cprz, record->buf, record->size);
  267. return 0;
  268. } else if (record->type == PSTORE_TYPE_FTRACE) {
  269. int zonenum;
  270. if (!cxt->fprzs)
  271. return -ENOMEM;
  272. /*
  273. * Choose zone by if we're using per-cpu buffers.
  274. */
  275. if (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)
  276. zonenum = smp_processor_id();
  277. else
  278. zonenum = 0;
  279. persistent_ram_write(cxt->fprzs[zonenum], record->buf,
  280. record->size);
  281. return 0;
  282. } else if (record->type == PSTORE_TYPE_PMSG) {
  283. pr_warn_ratelimited("PMSG shouldn't call %s\n", __func__);
  284. return -EINVAL;
  285. }
  286. if (record->type != PSTORE_TYPE_DMESG)
  287. return -EINVAL;
  288. /*
  289. * We could filter on record->reason here if we wanted to (which
  290. * would duplicate what happened before the "max_reason" setting
  291. * was added), but that would defeat the purpose of a system
  292. * changing printk.always_kmsg_dump, so instead log everything that
  293. * the kmsg dumper sends us, since it should be doing the filtering
  294. * based on the combination of printk.always_kmsg_dump and our
  295. * requested "max_reason".
  296. */
  297. /*
  298. * Explicitly only take the first part of any new crash.
  299. * If our buffer is larger than kmsg_bytes, this can never happen,
  300. * and if our buffer is smaller than kmsg_bytes, we don't want the
  301. * report split across multiple records.
  302. */
  303. if (record->part != 1)
  304. return -ENOSPC;
  305. if (!cxt->dprzs)
  306. return -ENOSPC;
  307. prz = cxt->dprzs[cxt->dump_write_cnt];
  308. /*
  309. * Since this is a new crash dump, we need to reset the buffer in
  310. * case it still has an old dump present. Without this, the new dump
  311. * will get appended, which would seriously confuse anything trying
  312. * to check dump file contents. Specifically, ramoops_read_kmsg_hdr()
  313. * expects to find a dump header in the beginning of buffer data, so
  314. * we must to reset the buffer values, in order to ensure that the
  315. * header will be written to the beginning of the buffer.
  316. */
  317. persistent_ram_zap(prz);
  318. /* Build header and append record contents. */
  319. hlen = ramoops_write_kmsg_hdr(prz, record);
  320. if (!hlen)
  321. return -ENOMEM;
  322. size = record->size;
  323. if (size + hlen > prz->buffer_size)
  324. size = prz->buffer_size - hlen;
  325. persistent_ram_write(prz, record->buf, size);
  326. cxt->dump_write_cnt = (cxt->dump_write_cnt + 1) % cxt->max_dump_cnt;
  327. return 0;
  328. }
  329. static int notrace ramoops_pstore_write_user(struct pstore_record *record,
  330. const char __user *buf)
  331. {
  332. if (record->type == PSTORE_TYPE_PMSG) {
  333. struct ramoops_context *cxt = record->psi->data;
  334. if (!cxt->mprz)
  335. return -ENOMEM;
  336. return persistent_ram_write_user(cxt->mprz, buf, record->size);
  337. }
  338. return -EINVAL;
  339. }
  340. static int ramoops_pstore_erase(struct pstore_record *record)
  341. {
  342. struct ramoops_context *cxt = record->psi->data;
  343. struct persistent_ram_zone *prz;
  344. switch (record->type) {
  345. case PSTORE_TYPE_DMESG:
  346. if (record->id >= cxt->max_dump_cnt)
  347. return -EINVAL;
  348. prz = cxt->dprzs[record->id];
  349. break;
  350. case PSTORE_TYPE_CONSOLE:
  351. prz = cxt->cprz;
  352. break;
  353. case PSTORE_TYPE_FTRACE:
  354. if (record->id >= cxt->max_ftrace_cnt)
  355. return -EINVAL;
  356. prz = cxt->fprzs[record->id];
  357. break;
  358. case PSTORE_TYPE_PMSG:
  359. prz = cxt->mprz;
  360. break;
  361. default:
  362. return -EINVAL;
  363. }
  364. persistent_ram_free_old(prz);
  365. persistent_ram_zap(prz);
  366. return 0;
  367. }
  368. static struct ramoops_context oops_cxt = {
  369. .pstore = {
  370. .owner = THIS_MODULE,
  371. .name = "ramoops",
  372. .open = ramoops_pstore_open,
  373. .read = ramoops_pstore_read,
  374. .write = ramoops_pstore_write,
  375. .write_user = ramoops_pstore_write_user,
  376. .erase = ramoops_pstore_erase,
  377. },
  378. };
  379. static void ramoops_free_przs(struct ramoops_context *cxt)
  380. {
  381. int i;
  382. /* Free dump PRZs */
  383. if (cxt->dprzs) {
  384. for (i = 0; i < cxt->max_dump_cnt; i++)
  385. persistent_ram_free(cxt->dprzs[i]);
  386. kfree(cxt->dprzs);
  387. cxt->max_dump_cnt = 0;
  388. }
  389. /* Free ftrace PRZs */
  390. if (cxt->fprzs) {
  391. for (i = 0; i < cxt->max_ftrace_cnt; i++)
  392. persistent_ram_free(cxt->fprzs[i]);
  393. kfree(cxt->fprzs);
  394. cxt->max_ftrace_cnt = 0;
  395. }
  396. }
  397. static int ramoops_init_przs(const char *name,
  398. struct device *dev, struct ramoops_context *cxt,
  399. struct persistent_ram_zone ***przs,
  400. phys_addr_t *paddr, size_t mem_sz,
  401. ssize_t record_size,
  402. unsigned int *cnt, u32 sig, u32 flags)
  403. {
  404. int err = -ENOMEM;
  405. int i;
  406. size_t zone_sz;
  407. struct persistent_ram_zone **prz_ar;
  408. /* Allocate nothing for 0 mem_sz or 0 record_size. */
  409. if (mem_sz == 0 || record_size == 0) {
  410. *cnt = 0;
  411. return 0;
  412. }
  413. /*
  414. * If we have a negative record size, calculate it based on
  415. * mem_sz / *cnt. If we have a positive record size, calculate
  416. * cnt from mem_sz / record_size.
  417. */
  418. if (record_size < 0) {
  419. if (*cnt == 0)
  420. return 0;
  421. record_size = mem_sz / *cnt;
  422. if (record_size == 0) {
  423. dev_err(dev, "%s record size == 0 (%zu / %u)\n",
  424. name, mem_sz, *cnt);
  425. goto fail;
  426. }
  427. } else {
  428. *cnt = mem_sz / record_size;
  429. if (*cnt == 0) {
  430. dev_err(dev, "%s record count == 0 (%zu / %zu)\n",
  431. name, mem_sz, record_size);
  432. goto fail;
  433. }
  434. }
  435. if (*paddr + mem_sz - cxt->phys_addr > cxt->size) {
  436. dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n",
  437. name,
  438. mem_sz, (unsigned long long)*paddr,
  439. cxt->size, (unsigned long long)cxt->phys_addr);
  440. goto fail;
  441. }
  442. zone_sz = mem_sz / *cnt;
  443. if (!zone_sz) {
  444. dev_err(dev, "%s zone size == 0\n", name);
  445. goto fail;
  446. }
  447. prz_ar = kcalloc(*cnt, sizeof(**przs), GFP_KERNEL);
  448. if (!prz_ar)
  449. goto fail;
  450. for (i = 0; i < *cnt; i++) {
  451. char *label;
  452. if (*cnt == 1)
  453. label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
  454. else
  455. label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)",
  456. name, i, *cnt - 1);
  457. prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
  458. &cxt->ecc_info,
  459. cxt->memtype, flags, label);
  460. kfree(label);
  461. if (IS_ERR(prz_ar[i])) {
  462. err = PTR_ERR(prz_ar[i]);
  463. dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
  464. name, record_size,
  465. (unsigned long long)*paddr, err);
  466. while (i > 0) {
  467. i--;
  468. persistent_ram_free(prz_ar[i]);
  469. }
  470. kfree(prz_ar);
  471. goto fail;
  472. }
  473. *paddr += zone_sz;
  474. prz_ar[i]->type = pstore_name_to_type(name);
  475. }
  476. *przs = prz_ar;
  477. return 0;
  478. fail:
  479. *cnt = 0;
  480. return err;
  481. }
  482. static int ramoops_init_prz(const char *name,
  483. struct device *dev, struct ramoops_context *cxt,
  484. struct persistent_ram_zone **prz,
  485. phys_addr_t *paddr, size_t sz, u32 sig)
  486. {
  487. char *label;
  488. if (!sz)
  489. return 0;
  490. if (*paddr + sz - cxt->phys_addr > cxt->size) {
  491. dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n",
  492. name, sz, (unsigned long long)*paddr,
  493. cxt->size, (unsigned long long)cxt->phys_addr);
  494. return -ENOMEM;
  495. }
  496. label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
  497. *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
  498. cxt->memtype, PRZ_FLAG_ZAP_OLD, label);
  499. kfree(label);
  500. if (IS_ERR(*prz)) {
  501. int err = PTR_ERR(*prz);
  502. dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
  503. name, sz, (unsigned long long)*paddr, err);
  504. return err;
  505. }
  506. *paddr += sz;
  507. (*prz)->type = pstore_name_to_type(name);
  508. return 0;
  509. }
  510. /* Read a u32 from a dt property and make sure it's safe for an int. */
  511. static int ramoops_parse_dt_u32(struct platform_device *pdev,
  512. const char *propname,
  513. u32 default_value, u32 *value)
  514. {
  515. u32 val32 = 0;
  516. int ret;
  517. ret = of_property_read_u32(pdev->dev.of_node, propname, &val32);
  518. if (ret == -EINVAL) {
  519. /* field is missing, use default value. */
  520. val32 = default_value;
  521. } else if (ret < 0) {
  522. dev_err(&pdev->dev, "failed to parse property %s: %d\n",
  523. propname, ret);
  524. return ret;
  525. }
  526. /* Sanity check our results. */
  527. if (val32 > INT_MAX) {
  528. dev_err(&pdev->dev, "%s %u > INT_MAX\n", propname, val32);
  529. return -EOVERFLOW;
  530. }
  531. *value = val32;
  532. return 0;
  533. }
  534. static int ramoops_parse_dt(struct platform_device *pdev,
  535. struct ramoops_platform_data *pdata)
  536. {
  537. struct device_node *of_node = pdev->dev.of_node;
  538. struct device_node *parent_node;
  539. struct reserved_mem *rmem;
  540. struct resource *res;
  541. u32 value;
  542. int ret;
  543. dev_dbg(&pdev->dev, "using Device Tree\n");
  544. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  545. if (!res) {
  546. rmem = of_reserved_mem_lookup(of_node);
  547. if (rmem) {
  548. pdata->mem_size = rmem->size;
  549. pdata->mem_address = rmem->base;
  550. } else {
  551. dev_err(&pdev->dev,
  552. "failed to locate DT /reserved-memory resource\n");
  553. return -EINVAL;
  554. }
  555. } else {
  556. pdata->mem_size = resource_size(res);
  557. pdata->mem_address = res->start;
  558. }
  559. /*
  560. * Setting "unbuffered" is deprecated and will be ignored if
  561. * "mem_type" is also specified.
  562. */
  563. pdata->mem_type = of_property_read_bool(of_node, "unbuffered");
  564. /*
  565. * Setting "no-dump-oops" is deprecated and will be ignored if
  566. * "max_reason" is also specified.
  567. */
  568. if (of_property_read_bool(of_node, "no-dump-oops"))
  569. pdata->max_reason = KMSG_DUMP_PANIC;
  570. else
  571. pdata->max_reason = KMSG_DUMP_OOPS;
  572. #define parse_u32(name, field, default_value) { \
  573. ret = ramoops_parse_dt_u32(pdev, name, default_value, \
  574. &value); \
  575. if (ret < 0) \
  576. return ret; \
  577. field = value; \
  578. }
  579. parse_u32("mem-type", pdata->record_size, pdata->mem_type);
  580. parse_u32("record-size", pdata->record_size, 0);
  581. parse_u32("console-size", pdata->console_size, 0);
  582. parse_u32("ftrace-size", pdata->ftrace_size, 0);
  583. parse_u32("pmsg-size", pdata->pmsg_size, 0);
  584. parse_u32("ecc-size", pdata->ecc_info.ecc_size, 0);
  585. parse_u32("flags", pdata->flags, 0);
  586. parse_u32("max-reason", pdata->max_reason, pdata->max_reason);
  587. #undef parse_u32
  588. /*
  589. * Some old Chromebooks relied on the kernel setting the
  590. * console_size and pmsg_size to the record size since that's
  591. * what the downstream kernel did. These same Chromebooks had
  592. * "ramoops" straight under the root node which isn't
  593. * according to the current upstream bindings (though it was
  594. * arguably acceptable under a prior version of the bindings).
  595. * Let's make those old Chromebooks work by detecting that
  596. * we're not a child of "reserved-memory" and mimicking the
  597. * expected behavior.
  598. */
  599. parent_node = of_get_parent(of_node);
  600. if (!of_node_name_eq(parent_node, "reserved-memory") &&
  601. !pdata->console_size && !pdata->ftrace_size &&
  602. !pdata->pmsg_size && !pdata->ecc_info.ecc_size) {
  603. pdata->console_size = pdata->record_size;
  604. pdata->pmsg_size = pdata->record_size;
  605. }
  606. of_node_put(parent_node);
  607. return 0;
  608. }
  609. static int ramoops_probe(struct platform_device *pdev)
  610. {
  611. struct device *dev = &pdev->dev;
  612. struct ramoops_platform_data *pdata = dev->platform_data;
  613. struct ramoops_platform_data pdata_local;
  614. struct ramoops_context *cxt = &oops_cxt;
  615. size_t dump_mem_sz;
  616. phys_addr_t paddr;
  617. int err = -EINVAL;
  618. /*
  619. * Only a single ramoops area allowed at a time, so fail extra
  620. * probes.
  621. */
  622. if (cxt->max_dump_cnt) {
  623. pr_err("already initialized\n");
  624. goto fail_out;
  625. }
  626. if (dev_of_node(dev) && !pdata) {
  627. pdata = &pdata_local;
  628. memset(pdata, 0, sizeof(*pdata));
  629. err = ramoops_parse_dt(pdev, pdata);
  630. if (err < 0)
  631. goto fail_out;
  632. }
  633. /* Make sure we didn't get bogus platform data pointer. */
  634. if (!pdata) {
  635. pr_err("NULL platform data\n");
  636. goto fail_out;
  637. }
  638. if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size &&
  639. !pdata->ftrace_size && !pdata->pmsg_size)) {
  640. pr_err("The memory size and the record/console size must be "
  641. "non-zero\n");
  642. goto fail_out;
  643. }
  644. if (pdata->record_size && !is_power_of_2(pdata->record_size))
  645. pdata->record_size = rounddown_pow_of_two(pdata->record_size);
  646. if (pdata->console_size && !is_power_of_2(pdata->console_size))
  647. pdata->console_size = rounddown_pow_of_two(pdata->console_size);
  648. if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size))
  649. pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
  650. if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size))
  651. pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size);
  652. cxt->size = pdata->mem_size;
  653. cxt->phys_addr = pdata->mem_address;
  654. cxt->memtype = pdata->mem_type;
  655. cxt->record_size = pdata->record_size;
  656. cxt->console_size = pdata->console_size;
  657. cxt->ftrace_size = pdata->ftrace_size;
  658. cxt->pmsg_size = pdata->pmsg_size;
  659. cxt->flags = pdata->flags;
  660. cxt->ecc_info = pdata->ecc_info;
  661. paddr = cxt->phys_addr;
  662. dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
  663. - cxt->pmsg_size;
  664. err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr,
  665. dump_mem_sz, cxt->record_size,
  666. &cxt->max_dump_cnt, 0, 0);
  667. if (err)
  668. goto fail_out;
  669. err = ramoops_init_prz("console", dev, cxt, &cxt->cprz, &paddr,
  670. cxt->console_size, 0);
  671. if (err)
  672. goto fail_init_cprz;
  673. cxt->max_ftrace_cnt = (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)
  674. ? nr_cpu_ids
  675. : 1;
  676. err = ramoops_init_przs("ftrace", dev, cxt, &cxt->fprzs, &paddr,
  677. cxt->ftrace_size, -1,
  678. &cxt->max_ftrace_cnt, LINUX_VERSION_CODE,
  679. (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)
  680. ? PRZ_FLAG_NO_LOCK : 0);
  681. if (err)
  682. goto fail_init_fprz;
  683. err = ramoops_init_prz("pmsg", dev, cxt, &cxt->mprz, &paddr,
  684. cxt->pmsg_size, 0);
  685. if (err)
  686. goto fail_init_mprz;
  687. cxt->pstore.data = cxt;
  688. /*
  689. * Prepare frontend flags based on which areas are initialized.
  690. * For ramoops_init_przs() cases, the "max count" variable tells
  691. * if there are regions present. For ramoops_init_prz() cases,
  692. * the single region size is how to check.
  693. */
  694. cxt->pstore.flags = 0;
  695. if (cxt->max_dump_cnt) {
  696. cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
  697. cxt->pstore.max_reason = pdata->max_reason;
  698. }
  699. if (cxt->console_size)
  700. cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
  701. if (cxt->max_ftrace_cnt)
  702. cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
  703. if (cxt->pmsg_size)
  704. cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
  705. /*
  706. * Since bufsize is only used for dmesg crash dumps, it
  707. * must match the size of the dprz record (after PRZ header
  708. * and ECC bytes have been accounted for).
  709. */
  710. if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) {
  711. cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
  712. cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
  713. if (!cxt->pstore.buf) {
  714. pr_err("cannot allocate pstore crash dump buffer\n");
  715. err = -ENOMEM;
  716. goto fail_clear;
  717. }
  718. }
  719. err = pstore_register(&cxt->pstore);
  720. if (err) {
  721. pr_err("registering with pstore failed\n");
  722. goto fail_buf;
  723. }
  724. /*
  725. * Update the module parameter variables as well so they are visible
  726. * through /sys/module/ramoops/parameters/
  727. */
  728. mem_size = pdata->mem_size;
  729. mem_address = pdata->mem_address;
  730. record_size = pdata->record_size;
  731. ramoops_max_reason = pdata->max_reason;
  732. ramoops_console_size = pdata->console_size;
  733. ramoops_pmsg_size = pdata->pmsg_size;
  734. ramoops_ftrace_size = pdata->ftrace_size;
  735. pr_info("using 0x%lx@0x%llx, ecc: %d\n",
  736. cxt->size, (unsigned long long)cxt->phys_addr,
  737. cxt->ecc_info.ecc_size);
  738. return 0;
  739. fail_buf:
  740. kfree(cxt->pstore.buf);
  741. fail_clear:
  742. cxt->pstore.bufsize = 0;
  743. persistent_ram_free(cxt->mprz);
  744. fail_init_mprz:
  745. fail_init_fprz:
  746. persistent_ram_free(cxt->cprz);
  747. fail_init_cprz:
  748. ramoops_free_przs(cxt);
  749. fail_out:
  750. return err;
  751. }
  752. static int ramoops_remove(struct platform_device *pdev)
  753. {
  754. struct ramoops_context *cxt = &oops_cxt;
  755. pstore_unregister(&cxt->pstore);
  756. kfree(cxt->pstore.buf);
  757. cxt->pstore.bufsize = 0;
  758. persistent_ram_free(cxt->mprz);
  759. persistent_ram_free(cxt->cprz);
  760. ramoops_free_przs(cxt);
  761. return 0;
  762. }
  763. static const struct of_device_id dt_match[] = {
  764. { .compatible = "ramoops" },
  765. {}
  766. };
  767. static struct platform_driver ramoops_driver = {
  768. .probe = ramoops_probe,
  769. .remove = ramoops_remove,
  770. .driver = {
  771. .name = "ramoops",
  772. .of_match_table = dt_match,
  773. },
  774. };
  775. static inline void ramoops_unregister_dummy(void)
  776. {
  777. platform_device_unregister(dummy);
  778. dummy = NULL;
  779. }
  780. static void __init ramoops_register_dummy(void)
  781. {
  782. struct ramoops_platform_data pdata;
  783. /*
  784. * Prepare a dummy platform data structure to carry the module
  785. * parameters. If mem_size isn't set, then there are no module
  786. * parameters, and we can skip this.
  787. */
  788. if (!mem_size)
  789. return;
  790. pr_info("using module parameters\n");
  791. memset(&pdata, 0, sizeof(pdata));
  792. pdata.mem_size = mem_size;
  793. pdata.mem_address = mem_address;
  794. pdata.mem_type = mem_type;
  795. pdata.record_size = record_size;
  796. pdata.console_size = ramoops_console_size;
  797. pdata.ftrace_size = ramoops_ftrace_size;
  798. pdata.pmsg_size = ramoops_pmsg_size;
  799. /* If "max_reason" is set, its value has priority over "dump_oops". */
  800. if (ramoops_max_reason >= 0)
  801. pdata.max_reason = ramoops_max_reason;
  802. /* Otherwise, if "dump_oops" is set, parse it into "max_reason". */
  803. else if (ramoops_dump_oops != -1)
  804. pdata.max_reason = ramoops_dump_oops ? KMSG_DUMP_OOPS
  805. : KMSG_DUMP_PANIC;
  806. /* And if neither are explicitly set, use the default. */
  807. else
  808. pdata.max_reason = KMSG_DUMP_OOPS;
  809. pdata.flags = RAMOOPS_FLAG_FTRACE_PER_CPU;
  810. /*
  811. * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC
  812. * (using 1 byte for ECC isn't much of use anyway).
  813. */
  814. pdata.ecc_info.ecc_size = ramoops_ecc == 1 ? 16 : ramoops_ecc;
  815. dummy = platform_device_register_data(NULL, "ramoops", -1,
  816. &pdata, sizeof(pdata));
  817. if (IS_ERR(dummy)) {
  818. pr_info("could not create platform device: %ld\n",
  819. PTR_ERR(dummy));
  820. dummy = NULL;
  821. }
  822. }
  823. static int __init ramoops_init(void)
  824. {
  825. int ret;
  826. ramoops_register_dummy();
  827. ret = platform_driver_register(&ramoops_driver);
  828. if (ret != 0)
  829. ramoops_unregister_dummy();
  830. return ret;
  831. }
  832. postcore_initcall(ramoops_init);
  833. static void __exit ramoops_exit(void)
  834. {
  835. platform_driver_unregister(&ramoops_driver);
  836. ramoops_unregister_dummy();
  837. }
  838. module_exit(ramoops_exit);
  839. MODULE_LICENSE("GPL");
  840. MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>");
  841. MODULE_DESCRIPTION("RAM Oops/Panic logger/driver");