kallsyms.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * kallsyms.c: in-kernel printing of symbolic oopses and stack traces.
  4. *
  5. * Rewritten and vastly simplified by Rusty Russell for in-kernel
  6. * module loader:
  7. * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
  8. *
  9. * ChangeLog:
  10. *
  11. * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com>
  12. * Changed the compression method from stem compression to "table lookup"
  13. * compression (see scripts/kallsyms.c for a more complete description)
  14. */
  15. #include <linux/kallsyms.h>
  16. #include <linux/init.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/fs.h>
  19. #include <linux/kdb.h>
  20. #include <linux/err.h>
  21. #include <linux/proc_fs.h>
  22. #include <linux/sched.h> /* for cond_resched */
  23. #include <linux/ctype.h>
  24. #include <linux/slab.h>
  25. #include <linux/filter.h>
  26. #include <linux/ftrace.h>
  27. #include <linux/kprobes.h>
  28. #include <linux/compiler.h>
  29. /*
  30. * These will be re-linked against their real values
  31. * during the second link stage.
  32. */
  33. extern const unsigned long kallsyms_addresses[] __weak;
  34. extern const int kallsyms_offsets[] __weak;
  35. extern const u8 kallsyms_names[] __weak;
  36. /*
  37. * Tell the compiler that the count isn't in the small data section if the arch
  38. * has one (eg: FRV).
  39. */
  40. extern const unsigned int kallsyms_num_syms
  41. __section(".rodata") __attribute__((weak));
  42. extern const unsigned long kallsyms_relative_base
  43. __section(".rodata") __attribute__((weak));
  44. extern const char kallsyms_token_table[] __weak;
  45. extern const u16 kallsyms_token_index[] __weak;
  46. extern const unsigned int kallsyms_markers[] __weak;
  47. /*
  48. * Expand a compressed symbol data into the resulting uncompressed string,
  49. * if uncompressed string is too long (>= maxlen), it will be truncated,
  50. * given the offset to where the symbol is in the compressed stream.
  51. */
  52. static unsigned int kallsyms_expand_symbol(unsigned int off,
  53. char *result, size_t maxlen)
  54. {
  55. int len, skipped_first = 0;
  56. const char *tptr;
  57. const u8 *data;
  58. /* Get the compressed symbol length from the first symbol byte. */
  59. data = &kallsyms_names[off];
  60. len = *data;
  61. data++;
  62. /*
  63. * Update the offset to return the offset for the next symbol on
  64. * the compressed stream.
  65. */
  66. off += len + 1;
  67. /*
  68. * For every byte on the compressed symbol data, copy the table
  69. * entry for that byte.
  70. */
  71. while (len) {
  72. tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
  73. data++;
  74. len--;
  75. while (*tptr) {
  76. if (skipped_first) {
  77. if (maxlen <= 1)
  78. goto tail;
  79. *result = *tptr;
  80. result++;
  81. maxlen--;
  82. } else
  83. skipped_first = 1;
  84. tptr++;
  85. }
  86. }
  87. tail:
  88. if (maxlen)
  89. *result = '\0';
  90. /* Return to offset to the next symbol. */
  91. return off;
  92. }
  93. /*
  94. * Get symbol type information. This is encoded as a single char at the
  95. * beginning of the symbol name.
  96. */
  97. static char kallsyms_get_symbol_type(unsigned int off)
  98. {
  99. /*
  100. * Get just the first code, look it up in the token table,
  101. * and return the first char from this token.
  102. */
  103. return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]];
  104. }
  105. /*
  106. * Find the offset on the compressed stream given and index in the
  107. * kallsyms array.
  108. */
  109. static unsigned int get_symbol_offset(unsigned long pos)
  110. {
  111. const u8 *name;
  112. int i;
  113. /*
  114. * Use the closest marker we have. We have markers every 256 positions,
  115. * so that should be close enough.
  116. */
  117. name = &kallsyms_names[kallsyms_markers[pos >> 8]];
  118. /*
  119. * Sequentially scan all the symbols up to the point we're searching
  120. * for. Every symbol is stored in a [<len>][<len> bytes of data] format,
  121. * so we just need to add the len to the current pointer for every
  122. * symbol we wish to skip.
  123. */
  124. for (i = 0; i < (pos & 0xFF); i++)
  125. name = name + (*name) + 1;
  126. return name - kallsyms_names;
  127. }
  128. static unsigned long kallsyms_sym_address(int idx)
  129. {
  130. if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE))
  131. return kallsyms_addresses[idx];
  132. /* values are unsigned offsets if --absolute-percpu is not in effect */
  133. if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU))
  134. return kallsyms_relative_base + (u32)kallsyms_offsets[idx];
  135. /* ...otherwise, positive offsets are absolute values */
  136. if (kallsyms_offsets[idx] >= 0)
  137. return kallsyms_offsets[idx];
  138. /* ...and negative offsets are relative to kallsyms_relative_base - 1 */
  139. return kallsyms_relative_base - 1 - kallsyms_offsets[idx];
  140. }
  141. #if defined(CONFIG_CFI_CLANG) && defined(CONFIG_LTO_CLANG_THIN)
  142. /*
  143. * LLVM appends a hash to static function names when ThinLTO and CFI are
  144. * both enabled, which causes confusion and potentially breaks user space
  145. * tools, so we will strip the postfix from expanded symbol names.
  146. */
  147. static inline char *cleanup_symbol_name(char *s)
  148. {
  149. char *res = NULL;
  150. res = strrchr(s, '$');
  151. if (res)
  152. *res = '\0';
  153. return res;
  154. }
  155. #else
  156. static inline char *cleanup_symbol_name(char *s) { return NULL; }
  157. #endif
  158. /* Lookup the address for this symbol. Returns 0 if not found. */
  159. unsigned long kallsyms_lookup_name(const char *name)
  160. {
  161. char namebuf[KSYM_NAME_LEN];
  162. unsigned long i;
  163. unsigned int off;
  164. for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
  165. off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
  166. if (strcmp(namebuf, name) == 0)
  167. return kallsyms_sym_address(i);
  168. if (cleanup_symbol_name(namebuf) && strcmp(namebuf, name) == 0)
  169. return kallsyms_sym_address(i);
  170. }
  171. return module_kallsyms_lookup_name(name);
  172. }
  173. int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
  174. unsigned long),
  175. void *data)
  176. {
  177. char namebuf[KSYM_NAME_LEN];
  178. unsigned long i;
  179. unsigned int off;
  180. int ret;
  181. for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
  182. off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
  183. ret = fn(data, namebuf, NULL, kallsyms_sym_address(i));
  184. if (ret != 0)
  185. return ret;
  186. }
  187. return module_kallsyms_on_each_symbol(fn, data);
  188. }
  189. static unsigned long get_symbol_pos(unsigned long addr,
  190. unsigned long *symbolsize,
  191. unsigned long *offset)
  192. {
  193. unsigned long symbol_start = 0, symbol_end = 0;
  194. unsigned long i, low, high, mid;
  195. /* This kernel should never had been booted. */
  196. if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE))
  197. BUG_ON(!kallsyms_addresses);
  198. else
  199. BUG_ON(!kallsyms_offsets);
  200. /* Do a binary search on the sorted kallsyms_addresses array. */
  201. low = 0;
  202. high = kallsyms_num_syms;
  203. while (high - low > 1) {
  204. mid = low + (high - low) / 2;
  205. if (kallsyms_sym_address(mid) <= addr)
  206. low = mid;
  207. else
  208. high = mid;
  209. }
  210. /*
  211. * Search for the first aliased symbol. Aliased
  212. * symbols are symbols with the same address.
  213. */
  214. while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low))
  215. --low;
  216. symbol_start = kallsyms_sym_address(low);
  217. /* Search for next non-aliased symbol. */
  218. for (i = low + 1; i < kallsyms_num_syms; i++) {
  219. if (kallsyms_sym_address(i) > symbol_start) {
  220. symbol_end = kallsyms_sym_address(i);
  221. break;
  222. }
  223. }
  224. /* If we found no next symbol, we use the end of the section. */
  225. if (!symbol_end) {
  226. if (is_kernel_inittext(addr))
  227. symbol_end = (unsigned long)_einittext;
  228. else if (IS_ENABLED(CONFIG_KALLSYMS_ALL))
  229. symbol_end = (unsigned long)_end;
  230. else
  231. symbol_end = (unsigned long)_etext;
  232. }
  233. if (symbolsize)
  234. *symbolsize = symbol_end - symbol_start;
  235. if (offset)
  236. *offset = addr - symbol_start;
  237. return low;
  238. }
  239. /*
  240. * Lookup an address but don't bother to find any names.
  241. */
  242. int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
  243. unsigned long *offset)
  244. {
  245. char namebuf[KSYM_NAME_LEN];
  246. if (is_ksym_addr(addr)) {
  247. get_symbol_pos(addr, symbolsize, offset);
  248. return 1;
  249. }
  250. return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
  251. !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
  252. }
  253. /*
  254. * Lookup an address
  255. * - modname is set to NULL if it's in the kernel.
  256. * - We guarantee that the returned name is valid until we reschedule even if.
  257. * It resides in a module.
  258. * - We also guarantee that modname will be valid until rescheduled.
  259. */
  260. const char *kallsyms_lookup(unsigned long addr,
  261. unsigned long *symbolsize,
  262. unsigned long *offset,
  263. char **modname, char *namebuf)
  264. {
  265. const char *ret;
  266. namebuf[KSYM_NAME_LEN - 1] = 0;
  267. namebuf[0] = 0;
  268. if (is_ksym_addr(addr)) {
  269. unsigned long pos;
  270. pos = get_symbol_pos(addr, symbolsize, offset);
  271. /* Grab name */
  272. kallsyms_expand_symbol(get_symbol_offset(pos),
  273. namebuf, KSYM_NAME_LEN);
  274. if (modname)
  275. *modname = NULL;
  276. ret = namebuf;
  277. goto found;
  278. }
  279. /* See if it's in a module or a BPF JITed image. */
  280. ret = module_address_lookup(addr, symbolsize, offset,
  281. modname, namebuf);
  282. if (!ret)
  283. ret = bpf_address_lookup(addr, symbolsize,
  284. offset, modname, namebuf);
  285. if (!ret)
  286. ret = ftrace_mod_address_lookup(addr, symbolsize,
  287. offset, modname, namebuf);
  288. found:
  289. cleanup_symbol_name(namebuf);
  290. return ret;
  291. }
  292. int lookup_symbol_name(unsigned long addr, char *symname)
  293. {
  294. int res;
  295. symname[0] = '\0';
  296. symname[KSYM_NAME_LEN - 1] = '\0';
  297. if (is_ksym_addr(addr)) {
  298. unsigned long pos;
  299. pos = get_symbol_pos(addr, NULL, NULL);
  300. /* Grab name */
  301. kallsyms_expand_symbol(get_symbol_offset(pos),
  302. symname, KSYM_NAME_LEN);
  303. goto found;
  304. }
  305. /* See if it's in a module. */
  306. res = lookup_module_symbol_name(addr, symname);
  307. if (res)
  308. return res;
  309. found:
  310. cleanup_symbol_name(symname);
  311. return 0;
  312. }
  313. int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
  314. unsigned long *offset, char *modname, char *name)
  315. {
  316. int res;
  317. name[0] = '\0';
  318. name[KSYM_NAME_LEN - 1] = '\0';
  319. if (is_ksym_addr(addr)) {
  320. unsigned long pos;
  321. pos = get_symbol_pos(addr, size, offset);
  322. /* Grab name */
  323. kallsyms_expand_symbol(get_symbol_offset(pos),
  324. name, KSYM_NAME_LEN);
  325. modname[0] = '\0';
  326. goto found;
  327. }
  328. /* See if it's in a module. */
  329. res = lookup_module_symbol_attrs(addr, size, offset, modname, name);
  330. if (res)
  331. return res;
  332. found:
  333. cleanup_symbol_name(name);
  334. return 0;
  335. }
  336. /* Look up a kernel symbol and return it in a text buffer. */
  337. static int __sprint_symbol(char *buffer, unsigned long address,
  338. int symbol_offset, int add_offset)
  339. {
  340. char *modname;
  341. const char *name;
  342. unsigned long offset, size;
  343. int len;
  344. address += symbol_offset;
  345. name = kallsyms_lookup(address, &size, &offset, &modname, buffer);
  346. if (!name)
  347. return sprintf(buffer, "0x%lx", address - symbol_offset);
  348. if (name != buffer)
  349. strcpy(buffer, name);
  350. len = strlen(buffer);
  351. offset -= symbol_offset;
  352. if (add_offset)
  353. len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
  354. if (modname)
  355. len += sprintf(buffer + len, " [%s]", modname);
  356. return len;
  357. }
  358. /**
  359. * sprint_symbol - Look up a kernel symbol and return it in a text buffer
  360. * @buffer: buffer to be stored
  361. * @address: address to lookup
  362. *
  363. * This function looks up a kernel symbol with @address and stores its name,
  364. * offset, size and module name to @buffer if possible. If no symbol was found,
  365. * just saves its @address as is.
  366. *
  367. * This function returns the number of bytes stored in @buffer.
  368. */
  369. int sprint_symbol(char *buffer, unsigned long address)
  370. {
  371. return __sprint_symbol(buffer, address, 0, 1);
  372. }
  373. EXPORT_SYMBOL_GPL(sprint_symbol);
  374. /**
  375. * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
  376. * @buffer: buffer to be stored
  377. * @address: address to lookup
  378. *
  379. * This function looks up a kernel symbol with @address and stores its name
  380. * and module name to @buffer if possible. If no symbol was found, just saves
  381. * its @address as is.
  382. *
  383. * This function returns the number of bytes stored in @buffer.
  384. */
  385. int sprint_symbol_no_offset(char *buffer, unsigned long address)
  386. {
  387. return __sprint_symbol(buffer, address, 0, 0);
  388. }
  389. EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
  390. /**
  391. * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer
  392. * @buffer: buffer to be stored
  393. * @address: address to lookup
  394. *
  395. * This function is for stack backtrace and does the same thing as
  396. * sprint_symbol() but with modified/decreased @address. If there is a
  397. * tail-call to the function marked "noreturn", gcc optimized out code after
  398. * the call so that the stack-saved return address could point outside of the
  399. * caller. This function ensures that kallsyms will find the original caller
  400. * by decreasing @address.
  401. *
  402. * This function returns the number of bytes stored in @buffer.
  403. */
  404. int sprint_backtrace(char *buffer, unsigned long address)
  405. {
  406. return __sprint_symbol(buffer, address, -1, 1);
  407. }
  408. /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
  409. struct kallsym_iter {
  410. loff_t pos;
  411. loff_t pos_arch_end;
  412. loff_t pos_mod_end;
  413. loff_t pos_ftrace_mod_end;
  414. loff_t pos_bpf_end;
  415. unsigned long value;
  416. unsigned int nameoff; /* If iterating in core kernel symbols. */
  417. char type;
  418. char name[KSYM_NAME_LEN];
  419. char module_name[MODULE_NAME_LEN];
  420. int exported;
  421. int show_value;
  422. };
  423. int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value,
  424. char *type, char *name)
  425. {
  426. return -EINVAL;
  427. }
  428. static int get_ksymbol_arch(struct kallsym_iter *iter)
  429. {
  430. int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms,
  431. &iter->value, &iter->type,
  432. iter->name);
  433. if (ret < 0) {
  434. iter->pos_arch_end = iter->pos;
  435. return 0;
  436. }
  437. return 1;
  438. }
  439. static int get_ksymbol_mod(struct kallsym_iter *iter)
  440. {
  441. int ret = module_get_kallsym(iter->pos - iter->pos_arch_end,
  442. &iter->value, &iter->type,
  443. iter->name, iter->module_name,
  444. &iter->exported);
  445. if (ret < 0) {
  446. iter->pos_mod_end = iter->pos;
  447. return 0;
  448. }
  449. return 1;
  450. }
  451. /*
  452. * ftrace_mod_get_kallsym() may also get symbols for pages allocated for ftrace
  453. * purposes. In that case "__builtin__ftrace" is used as a module name, even
  454. * though "__builtin__ftrace" is not a module.
  455. */
  456. static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
  457. {
  458. int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
  459. &iter->value, &iter->type,
  460. iter->name, iter->module_name,
  461. &iter->exported);
  462. if (ret < 0) {
  463. iter->pos_ftrace_mod_end = iter->pos;
  464. return 0;
  465. }
  466. return 1;
  467. }
  468. static int get_ksymbol_bpf(struct kallsym_iter *iter)
  469. {
  470. int ret;
  471. strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN);
  472. iter->exported = 0;
  473. ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
  474. &iter->value, &iter->type,
  475. iter->name);
  476. if (ret < 0) {
  477. iter->pos_bpf_end = iter->pos;
  478. return 0;
  479. }
  480. return 1;
  481. }
  482. /*
  483. * This uses "__builtin__kprobes" as a module name for symbols for pages
  484. * allocated for kprobes' purposes, even though "__builtin__kprobes" is not a
  485. * module.
  486. */
  487. static int get_ksymbol_kprobe(struct kallsym_iter *iter)
  488. {
  489. strlcpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN);
  490. iter->exported = 0;
  491. return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end,
  492. &iter->value, &iter->type,
  493. iter->name) < 0 ? 0 : 1;
  494. }
  495. /* Returns space to next name. */
  496. static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
  497. {
  498. unsigned off = iter->nameoff;
  499. iter->module_name[0] = '\0';
  500. iter->value = kallsyms_sym_address(iter->pos);
  501. iter->type = kallsyms_get_symbol_type(off);
  502. off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name));
  503. return off - iter->nameoff;
  504. }
  505. static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
  506. {
  507. iter->name[0] = '\0';
  508. iter->nameoff = get_symbol_offset(new_pos);
  509. iter->pos = new_pos;
  510. if (new_pos == 0) {
  511. iter->pos_arch_end = 0;
  512. iter->pos_mod_end = 0;
  513. iter->pos_ftrace_mod_end = 0;
  514. iter->pos_bpf_end = 0;
  515. }
  516. }
  517. /*
  518. * The end position (last + 1) of each additional kallsyms section is recorded
  519. * in iter->pos_..._end as each section is added, and so can be used to
  520. * determine which get_ksymbol_...() function to call next.
  521. */
  522. static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
  523. {
  524. iter->pos = pos;
  525. if ((!iter->pos_arch_end || iter->pos_arch_end > pos) &&
  526. get_ksymbol_arch(iter))
  527. return 1;
  528. if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
  529. get_ksymbol_mod(iter))
  530. return 1;
  531. if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) &&
  532. get_ksymbol_ftrace_mod(iter))
  533. return 1;
  534. if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) &&
  535. get_ksymbol_bpf(iter))
  536. return 1;
  537. return get_ksymbol_kprobe(iter);
  538. }
  539. /* Returns false if pos at or past end of file. */
  540. static int update_iter(struct kallsym_iter *iter, loff_t pos)
  541. {
  542. /* Module symbols can be accessed randomly. */
  543. if (pos >= kallsyms_num_syms)
  544. return update_iter_mod(iter, pos);
  545. /* If we're not on the desired position, reset to new position. */
  546. if (pos != iter->pos)
  547. reset_iter(iter, pos);
  548. iter->nameoff += get_ksymbol_core(iter);
  549. iter->pos++;
  550. return 1;
  551. }
  552. static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  553. {
  554. (*pos)++;
  555. if (!update_iter(m->private, *pos))
  556. return NULL;
  557. return p;
  558. }
  559. static void *s_start(struct seq_file *m, loff_t *pos)
  560. {
  561. if (!update_iter(m->private, *pos))
  562. return NULL;
  563. return m->private;
  564. }
  565. static void s_stop(struct seq_file *m, void *p)
  566. {
  567. }
  568. static int s_show(struct seq_file *m, void *p)
  569. {
  570. void *value;
  571. struct kallsym_iter *iter = m->private;
  572. /* Some debugging symbols have no name. Ignore them. */
  573. if (!iter->name[0])
  574. return 0;
  575. value = iter->show_value ? (void *)iter->value : NULL;
  576. if (iter->module_name[0]) {
  577. char type;
  578. /*
  579. * Label it "global" if it is exported,
  580. * "local" if not exported.
  581. */
  582. type = iter->exported ? toupper(iter->type) :
  583. tolower(iter->type);
  584. seq_printf(m, "%px %c %s\t[%s]\n", value,
  585. type, iter->name, iter->module_name);
  586. } else
  587. seq_printf(m, "%px %c %s\n", value,
  588. iter->type, iter->name);
  589. return 0;
  590. }
  591. static const struct seq_operations kallsyms_op = {
  592. .start = s_start,
  593. .next = s_next,
  594. .stop = s_stop,
  595. .show = s_show
  596. };
  597. static inline int kallsyms_for_perf(void)
  598. {
  599. #ifdef CONFIG_PERF_EVENTS
  600. extern int sysctl_perf_event_paranoid;
  601. if (sysctl_perf_event_paranoid <= 1)
  602. return 1;
  603. #endif
  604. return 0;
  605. }
  606. /*
  607. * We show kallsyms information even to normal users if we've enabled
  608. * kernel profiling and are explicitly not paranoid (so kptr_restrict
  609. * is clear, and sysctl_perf_event_paranoid isn't set).
  610. *
  611. * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
  612. * block even that).
  613. */
  614. bool kallsyms_show_value(const struct cred *cred)
  615. {
  616. switch (kptr_restrict) {
  617. case 0:
  618. if (kallsyms_for_perf())
  619. return true;
  620. fallthrough;
  621. case 1:
  622. if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
  623. CAP_OPT_NOAUDIT) == 0)
  624. return true;
  625. fallthrough;
  626. default:
  627. return false;
  628. }
  629. }
  630. static int kallsyms_open(struct inode *inode, struct file *file)
  631. {
  632. /*
  633. * We keep iterator in m->private, since normal case is to
  634. * s_start from where we left off, so we avoid doing
  635. * using get_symbol_offset for every symbol.
  636. */
  637. struct kallsym_iter *iter;
  638. iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter));
  639. if (!iter)
  640. return -ENOMEM;
  641. reset_iter(iter, 0);
  642. /*
  643. * Instead of checking this on every s_show() call, cache
  644. * the result here at open time.
  645. */
  646. iter->show_value = kallsyms_show_value(file->f_cred);
  647. return 0;
  648. }
  649. #ifdef CONFIG_KGDB_KDB
  650. const char *kdb_walk_kallsyms(loff_t *pos)
  651. {
  652. static struct kallsym_iter kdb_walk_kallsyms_iter;
  653. if (*pos == 0) {
  654. memset(&kdb_walk_kallsyms_iter, 0,
  655. sizeof(kdb_walk_kallsyms_iter));
  656. reset_iter(&kdb_walk_kallsyms_iter, 0);
  657. }
  658. while (1) {
  659. if (!update_iter(&kdb_walk_kallsyms_iter, *pos))
  660. return NULL;
  661. ++*pos;
  662. /* Some debugging symbols have no name. Ignore them. */
  663. if (kdb_walk_kallsyms_iter.name[0])
  664. return kdb_walk_kallsyms_iter.name;
  665. }
  666. }
  667. #endif /* CONFIG_KGDB_KDB */
  668. static const struct proc_ops kallsyms_proc_ops = {
  669. .proc_open = kallsyms_open,
  670. .proc_read = seq_read,
  671. .proc_lseek = seq_lseek,
  672. .proc_release = seq_release_private,
  673. };
  674. static int __init kallsyms_init(void)
  675. {
  676. proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops);
  677. return 0;
  678. }
  679. device_initcall(kallsyms_init);