trace_functions.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ring buffer based function tracer
  4. *
  5. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7. *
  8. * Based on code from the latency_tracer, that is:
  9. *
  10. * Copyright (C) 2004-2006 Ingo Molnar
  11. * Copyright (C) 2004 Nadia Yvette Chambers
  12. */
  13. #include <linux/ring_buffer.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/slab.h>
  18. #include <linux/fs.h>
  19. #include "trace.h"
  20. static void tracing_start_function_trace(struct trace_array *tr);
  21. static void tracing_stop_function_trace(struct trace_array *tr);
  22. static void
  23. function_trace_call(unsigned long ip, unsigned long parent_ip,
  24. struct ftrace_ops *op, struct pt_regs *pt_regs);
  25. static void
  26. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  27. struct ftrace_ops *op, struct pt_regs *pt_regs);
  28. static struct tracer_flags func_flags;
  29. /* Our option */
  30. enum {
  31. TRACE_FUNC_OPT_STACK = 0x1,
  32. };
  33. int ftrace_allocate_ftrace_ops(struct trace_array *tr)
  34. {
  35. struct ftrace_ops *ops;
  36. /* The top level array uses the "global_ops" */
  37. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  38. return 0;
  39. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  40. if (!ops)
  41. return -ENOMEM;
  42. /* Currently only the non stack version is supported */
  43. ops->func = function_trace_call;
  44. ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
  45. tr->ops = ops;
  46. ops->private = tr;
  47. return 0;
  48. }
  49. void ftrace_free_ftrace_ops(struct trace_array *tr)
  50. {
  51. kfree(tr->ops);
  52. tr->ops = NULL;
  53. }
  54. int ftrace_create_function_files(struct trace_array *tr,
  55. struct dentry *parent)
  56. {
  57. /*
  58. * The top level array uses the "global_ops", and the files are
  59. * created on boot up.
  60. */
  61. if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  62. return 0;
  63. if (!tr->ops)
  64. return -EINVAL;
  65. ftrace_create_filter_files(tr->ops, parent);
  66. return 0;
  67. }
  68. void ftrace_destroy_function_files(struct trace_array *tr)
  69. {
  70. ftrace_destroy_filter_files(tr->ops);
  71. ftrace_free_ftrace_ops(tr);
  72. }
  73. static int function_trace_init(struct trace_array *tr)
  74. {
  75. ftrace_func_t func;
  76. /*
  77. * Instance trace_arrays get their ops allocated
  78. * at instance creation. Unless it failed
  79. * the allocation.
  80. */
  81. if (!tr->ops)
  82. return -ENOMEM;
  83. /* Currently only the global instance can do stack tracing */
  84. if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
  85. func_flags.val & TRACE_FUNC_OPT_STACK)
  86. func = function_stack_trace_call;
  87. else
  88. func = function_trace_call;
  89. ftrace_init_array_ops(tr, func);
  90. tr->array_buffer.cpu = get_cpu();
  91. put_cpu();
  92. tracing_start_cmdline_record();
  93. tracing_start_function_trace(tr);
  94. return 0;
  95. }
  96. static void function_trace_reset(struct trace_array *tr)
  97. {
  98. tracing_stop_function_trace(tr);
  99. tracing_stop_cmdline_record();
  100. ftrace_reset_array_ops(tr);
  101. }
  102. static void function_trace_start(struct trace_array *tr)
  103. {
  104. tracing_reset_online_cpus(&tr->array_buffer);
  105. }
  106. static void
  107. function_trace_call(unsigned long ip, unsigned long parent_ip,
  108. struct ftrace_ops *op, struct pt_regs *pt_regs)
  109. {
  110. struct trace_array *tr = op->private;
  111. struct trace_array_cpu *data;
  112. unsigned long flags;
  113. int bit;
  114. int cpu;
  115. int pc;
  116. if (unlikely(!tr->function_enabled))
  117. return;
  118. pc = preempt_count();
  119. preempt_disable_notrace();
  120. bit = trace_test_and_set_recursion(TRACE_FTRACE_START);
  121. if (bit < 0)
  122. goto out;
  123. cpu = smp_processor_id();
  124. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  125. if (!atomic_read(&data->disabled)) {
  126. local_save_flags(flags);
  127. trace_function(tr, ip, parent_ip, flags, pc);
  128. }
  129. trace_clear_recursion(bit);
  130. out:
  131. preempt_enable_notrace();
  132. }
  133. #ifdef CONFIG_UNWINDER_ORC
  134. /*
  135. * Skip 2:
  136. *
  137. * function_stack_trace_call()
  138. * ftrace_call()
  139. */
  140. #define STACK_SKIP 2
  141. #else
  142. /*
  143. * Skip 3:
  144. * __trace_stack()
  145. * function_stack_trace_call()
  146. * ftrace_call()
  147. */
  148. #define STACK_SKIP 3
  149. #endif
  150. static void
  151. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  152. struct ftrace_ops *op, struct pt_regs *pt_regs)
  153. {
  154. struct trace_array *tr = op->private;
  155. struct trace_array_cpu *data;
  156. unsigned long flags;
  157. long disabled;
  158. int cpu;
  159. int pc;
  160. if (unlikely(!tr->function_enabled))
  161. return;
  162. /*
  163. * Need to use raw, since this must be called before the
  164. * recursive protection is performed.
  165. */
  166. local_irq_save(flags);
  167. cpu = raw_smp_processor_id();
  168. data = per_cpu_ptr(tr->array_buffer.data, cpu);
  169. disabled = atomic_inc_return(&data->disabled);
  170. if (likely(disabled == 1)) {
  171. pc = preempt_count();
  172. trace_function(tr, ip, parent_ip, flags, pc);
  173. __trace_stack(tr, flags, STACK_SKIP, pc);
  174. }
  175. atomic_dec(&data->disabled);
  176. local_irq_restore(flags);
  177. }
  178. static struct tracer_opt func_opts[] = {
  179. #ifdef CONFIG_STACKTRACE
  180. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  181. #endif
  182. { } /* Always set a last empty entry */
  183. };
  184. static struct tracer_flags func_flags = {
  185. .val = 0, /* By default: all flags disabled */
  186. .opts = func_opts
  187. };
  188. static void tracing_start_function_trace(struct trace_array *tr)
  189. {
  190. tr->function_enabled = 0;
  191. register_ftrace_function(tr->ops);
  192. tr->function_enabled = 1;
  193. }
  194. static void tracing_stop_function_trace(struct trace_array *tr)
  195. {
  196. tr->function_enabled = 0;
  197. unregister_ftrace_function(tr->ops);
  198. }
  199. static struct tracer function_trace;
  200. static int
  201. func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  202. {
  203. switch (bit) {
  204. case TRACE_FUNC_OPT_STACK:
  205. /* do nothing if already set */
  206. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  207. break;
  208. /* We can change this flag when not running. */
  209. if (tr->current_trace != &function_trace)
  210. break;
  211. unregister_ftrace_function(tr->ops);
  212. if (set) {
  213. tr->ops->func = function_stack_trace_call;
  214. register_ftrace_function(tr->ops);
  215. } else {
  216. tr->ops->func = function_trace_call;
  217. register_ftrace_function(tr->ops);
  218. }
  219. break;
  220. default:
  221. return -EINVAL;
  222. }
  223. return 0;
  224. }
  225. static struct tracer function_trace __tracer_data =
  226. {
  227. .name = "function",
  228. .init = function_trace_init,
  229. .reset = function_trace_reset,
  230. .start = function_trace_start,
  231. .flags = &func_flags,
  232. .set_flag = func_set_flag,
  233. .allow_instances = true,
  234. #ifdef CONFIG_FTRACE_SELFTEST
  235. .selftest = trace_selftest_startup_function,
  236. #endif
  237. };
  238. #ifdef CONFIG_DYNAMIC_FTRACE
  239. static void update_traceon_count(struct ftrace_probe_ops *ops,
  240. unsigned long ip,
  241. struct trace_array *tr, bool on,
  242. void *data)
  243. {
  244. struct ftrace_func_mapper *mapper = data;
  245. long *count;
  246. long old_count;
  247. /*
  248. * Tracing gets disabled (or enabled) once per count.
  249. * This function can be called at the same time on multiple CPUs.
  250. * It is fine if both disable (or enable) tracing, as disabling
  251. * (or enabling) the second time doesn't do anything as the
  252. * state of the tracer is already disabled (or enabled).
  253. * What needs to be synchronized in this case is that the count
  254. * only gets decremented once, even if the tracer is disabled
  255. * (or enabled) twice, as the second one is really a nop.
  256. *
  257. * The memory barriers guarantee that we only decrement the
  258. * counter once. First the count is read to a local variable
  259. * and a read barrier is used to make sure that it is loaded
  260. * before checking if the tracer is in the state we want.
  261. * If the tracer is not in the state we want, then the count
  262. * is guaranteed to be the old count.
  263. *
  264. * Next the tracer is set to the state we want (disabled or enabled)
  265. * then a write memory barrier is used to make sure that
  266. * the new state is visible before changing the counter by
  267. * one minus the old counter. This guarantees that another CPU
  268. * executing this code will see the new state before seeing
  269. * the new counter value, and would not do anything if the new
  270. * counter is seen.
  271. *
  272. * Note, there is no synchronization between this and a user
  273. * setting the tracing_on file. But we currently don't care
  274. * about that.
  275. */
  276. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  277. old_count = *count;
  278. if (old_count <= 0)
  279. return;
  280. /* Make sure we see count before checking tracing state */
  281. smp_rmb();
  282. if (on == !!tracer_tracing_is_on(tr))
  283. return;
  284. if (on)
  285. tracer_tracing_on(tr);
  286. else
  287. tracer_tracing_off(tr);
  288. /* Make sure tracing state is visible before updating count */
  289. smp_wmb();
  290. *count = old_count - 1;
  291. }
  292. static void
  293. ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
  294. struct trace_array *tr, struct ftrace_probe_ops *ops,
  295. void *data)
  296. {
  297. update_traceon_count(ops, ip, tr, 1, data);
  298. }
  299. static void
  300. ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
  301. struct trace_array *tr, struct ftrace_probe_ops *ops,
  302. void *data)
  303. {
  304. update_traceon_count(ops, ip, tr, 0, data);
  305. }
  306. static void
  307. ftrace_traceon(unsigned long ip, unsigned long parent_ip,
  308. struct trace_array *tr, struct ftrace_probe_ops *ops,
  309. void *data)
  310. {
  311. if (tracer_tracing_is_on(tr))
  312. return;
  313. tracer_tracing_on(tr);
  314. }
  315. static void
  316. ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
  317. struct trace_array *tr, struct ftrace_probe_ops *ops,
  318. void *data)
  319. {
  320. if (!tracer_tracing_is_on(tr))
  321. return;
  322. tracer_tracing_off(tr);
  323. }
  324. #ifdef CONFIG_UNWINDER_ORC
  325. /*
  326. * Skip 3:
  327. *
  328. * function_trace_probe_call()
  329. * ftrace_ops_assist_func()
  330. * ftrace_call()
  331. */
  332. #define FTRACE_STACK_SKIP 3
  333. #else
  334. /*
  335. * Skip 5:
  336. *
  337. * __trace_stack()
  338. * ftrace_stacktrace()
  339. * function_trace_probe_call()
  340. * ftrace_ops_assist_func()
  341. * ftrace_call()
  342. */
  343. #define FTRACE_STACK_SKIP 5
  344. #endif
  345. static __always_inline void trace_stack(struct trace_array *tr)
  346. {
  347. unsigned long flags;
  348. int pc;
  349. local_save_flags(flags);
  350. pc = preempt_count();
  351. __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
  352. }
  353. static void
  354. ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
  355. struct trace_array *tr, struct ftrace_probe_ops *ops,
  356. void *data)
  357. {
  358. trace_stack(tr);
  359. }
  360. static void
  361. ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
  362. struct trace_array *tr, struct ftrace_probe_ops *ops,
  363. void *data)
  364. {
  365. struct ftrace_func_mapper *mapper = data;
  366. long *count;
  367. long old_count;
  368. long new_count;
  369. if (!tracing_is_on())
  370. return;
  371. /* unlimited? */
  372. if (!mapper) {
  373. trace_stack(tr);
  374. return;
  375. }
  376. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  377. /*
  378. * Stack traces should only execute the number of times the
  379. * user specified in the counter.
  380. */
  381. do {
  382. old_count = *count;
  383. if (!old_count)
  384. return;
  385. new_count = old_count - 1;
  386. new_count = cmpxchg(count, old_count, new_count);
  387. if (new_count == old_count)
  388. trace_stack(tr);
  389. if (!tracing_is_on())
  390. return;
  391. } while (new_count != old_count);
  392. }
  393. static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
  394. void *data)
  395. {
  396. struct ftrace_func_mapper *mapper = data;
  397. long *count = NULL;
  398. if (mapper)
  399. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  400. if (count) {
  401. if (*count <= 0)
  402. return 0;
  403. (*count)--;
  404. }
  405. return 1;
  406. }
  407. static void
  408. ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
  409. struct trace_array *tr, struct ftrace_probe_ops *ops,
  410. void *data)
  411. {
  412. if (update_count(ops, ip, data))
  413. ftrace_dump(DUMP_ALL);
  414. }
  415. /* Only dump the current CPU buffer. */
  416. static void
  417. ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
  418. struct trace_array *tr, struct ftrace_probe_ops *ops,
  419. void *data)
  420. {
  421. if (update_count(ops, ip, data))
  422. ftrace_dump(DUMP_ORIG);
  423. }
  424. static int
  425. ftrace_probe_print(const char *name, struct seq_file *m,
  426. unsigned long ip, struct ftrace_probe_ops *ops,
  427. void *data)
  428. {
  429. struct ftrace_func_mapper *mapper = data;
  430. long *count = NULL;
  431. seq_printf(m, "%ps:%s", (void *)ip, name);
  432. if (mapper)
  433. count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
  434. if (count)
  435. seq_printf(m, ":count=%ld\n", *count);
  436. else
  437. seq_puts(m, ":unlimited\n");
  438. return 0;
  439. }
  440. static int
  441. ftrace_traceon_print(struct seq_file *m, unsigned long ip,
  442. struct ftrace_probe_ops *ops,
  443. void *data)
  444. {
  445. return ftrace_probe_print("traceon", m, ip, ops, data);
  446. }
  447. static int
  448. ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
  449. struct ftrace_probe_ops *ops, void *data)
  450. {
  451. return ftrace_probe_print("traceoff", m, ip, ops, data);
  452. }
  453. static int
  454. ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
  455. struct ftrace_probe_ops *ops, void *data)
  456. {
  457. return ftrace_probe_print("stacktrace", m, ip, ops, data);
  458. }
  459. static int
  460. ftrace_dump_print(struct seq_file *m, unsigned long ip,
  461. struct ftrace_probe_ops *ops, void *data)
  462. {
  463. return ftrace_probe_print("dump", m, ip, ops, data);
  464. }
  465. static int
  466. ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
  467. struct ftrace_probe_ops *ops, void *data)
  468. {
  469. return ftrace_probe_print("cpudump", m, ip, ops, data);
  470. }
  471. static int
  472. ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
  473. unsigned long ip, void *init_data, void **data)
  474. {
  475. struct ftrace_func_mapper *mapper = *data;
  476. if (!mapper) {
  477. mapper = allocate_ftrace_func_mapper();
  478. if (!mapper)
  479. return -ENOMEM;
  480. *data = mapper;
  481. }
  482. return ftrace_func_mapper_add_ip(mapper, ip, init_data);
  483. }
  484. static void
  485. ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
  486. unsigned long ip, void *data)
  487. {
  488. struct ftrace_func_mapper *mapper = data;
  489. if (!ip) {
  490. free_ftrace_func_mapper(mapper, NULL);
  491. return;
  492. }
  493. ftrace_func_mapper_remove_ip(mapper, ip);
  494. }
  495. static struct ftrace_probe_ops traceon_count_probe_ops = {
  496. .func = ftrace_traceon_count,
  497. .print = ftrace_traceon_print,
  498. .init = ftrace_count_init,
  499. .free = ftrace_count_free,
  500. };
  501. static struct ftrace_probe_ops traceoff_count_probe_ops = {
  502. .func = ftrace_traceoff_count,
  503. .print = ftrace_traceoff_print,
  504. .init = ftrace_count_init,
  505. .free = ftrace_count_free,
  506. };
  507. static struct ftrace_probe_ops stacktrace_count_probe_ops = {
  508. .func = ftrace_stacktrace_count,
  509. .print = ftrace_stacktrace_print,
  510. .init = ftrace_count_init,
  511. .free = ftrace_count_free,
  512. };
  513. static struct ftrace_probe_ops dump_probe_ops = {
  514. .func = ftrace_dump_probe,
  515. .print = ftrace_dump_print,
  516. .init = ftrace_count_init,
  517. .free = ftrace_count_free,
  518. };
  519. static struct ftrace_probe_ops cpudump_probe_ops = {
  520. .func = ftrace_cpudump_probe,
  521. .print = ftrace_cpudump_print,
  522. };
  523. static struct ftrace_probe_ops traceon_probe_ops = {
  524. .func = ftrace_traceon,
  525. .print = ftrace_traceon_print,
  526. };
  527. static struct ftrace_probe_ops traceoff_probe_ops = {
  528. .func = ftrace_traceoff,
  529. .print = ftrace_traceoff_print,
  530. };
  531. static struct ftrace_probe_ops stacktrace_probe_ops = {
  532. .func = ftrace_stacktrace,
  533. .print = ftrace_stacktrace_print,
  534. };
  535. static int
  536. ftrace_trace_probe_callback(struct trace_array *tr,
  537. struct ftrace_probe_ops *ops,
  538. struct ftrace_hash *hash, char *glob,
  539. char *cmd, char *param, int enable)
  540. {
  541. void *count = (void *)-1;
  542. char *number;
  543. int ret;
  544. /* hash funcs only work with set_ftrace_filter */
  545. if (!enable)
  546. return -EINVAL;
  547. if (glob[0] == '!')
  548. return unregister_ftrace_function_probe_func(glob+1, tr, ops);
  549. if (!param)
  550. goto out_reg;
  551. number = strsep(&param, ":");
  552. if (!strlen(number))
  553. goto out_reg;
  554. /*
  555. * We use the callback data field (which is a pointer)
  556. * as our counter.
  557. */
  558. ret = kstrtoul(number, 0, (unsigned long *)&count);
  559. if (ret)
  560. return ret;
  561. out_reg:
  562. ret = register_ftrace_function_probe(glob, tr, ops, count);
  563. return ret < 0 ? ret : 0;
  564. }
  565. static int
  566. ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
  567. char *glob, char *cmd, char *param, int enable)
  568. {
  569. struct ftrace_probe_ops *ops;
  570. if (!tr)
  571. return -ENODEV;
  572. /* we register both traceon and traceoff to this callback */
  573. if (strcmp(cmd, "traceon") == 0)
  574. ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
  575. else
  576. ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
  577. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  578. param, enable);
  579. }
  580. static int
  581. ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
  582. char *glob, char *cmd, char *param, int enable)
  583. {
  584. struct ftrace_probe_ops *ops;
  585. if (!tr)
  586. return -ENODEV;
  587. ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
  588. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  589. param, enable);
  590. }
  591. static int
  592. ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  593. char *glob, char *cmd, char *param, int enable)
  594. {
  595. struct ftrace_probe_ops *ops;
  596. if (!tr)
  597. return -ENODEV;
  598. ops = &dump_probe_ops;
  599. /* Only dump once. */
  600. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  601. "1", enable);
  602. }
  603. static int
  604. ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
  605. char *glob, char *cmd, char *param, int enable)
  606. {
  607. struct ftrace_probe_ops *ops;
  608. if (!tr)
  609. return -ENODEV;
  610. ops = &cpudump_probe_ops;
  611. /* Only dump once. */
  612. return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
  613. "1", enable);
  614. }
  615. static struct ftrace_func_command ftrace_traceon_cmd = {
  616. .name = "traceon",
  617. .func = ftrace_trace_onoff_callback,
  618. };
  619. static struct ftrace_func_command ftrace_traceoff_cmd = {
  620. .name = "traceoff",
  621. .func = ftrace_trace_onoff_callback,
  622. };
  623. static struct ftrace_func_command ftrace_stacktrace_cmd = {
  624. .name = "stacktrace",
  625. .func = ftrace_stacktrace_callback,
  626. };
  627. static struct ftrace_func_command ftrace_dump_cmd = {
  628. .name = "dump",
  629. .func = ftrace_dump_callback,
  630. };
  631. static struct ftrace_func_command ftrace_cpudump_cmd = {
  632. .name = "cpudump",
  633. .func = ftrace_cpudump_callback,
  634. };
  635. static int __init init_func_cmd_traceon(void)
  636. {
  637. int ret;
  638. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  639. if (ret)
  640. return ret;
  641. ret = register_ftrace_command(&ftrace_traceon_cmd);
  642. if (ret)
  643. goto out_free_traceoff;
  644. ret = register_ftrace_command(&ftrace_stacktrace_cmd);
  645. if (ret)
  646. goto out_free_traceon;
  647. ret = register_ftrace_command(&ftrace_dump_cmd);
  648. if (ret)
  649. goto out_free_stacktrace;
  650. ret = register_ftrace_command(&ftrace_cpudump_cmd);
  651. if (ret)
  652. goto out_free_dump;
  653. return 0;
  654. out_free_dump:
  655. unregister_ftrace_command(&ftrace_dump_cmd);
  656. out_free_stacktrace:
  657. unregister_ftrace_command(&ftrace_stacktrace_cmd);
  658. out_free_traceon:
  659. unregister_ftrace_command(&ftrace_traceon_cmd);
  660. out_free_traceoff:
  661. unregister_ftrace_command(&ftrace_traceoff_cmd);
  662. return ret;
  663. }
  664. #else
  665. static inline int init_func_cmd_traceon(void)
  666. {
  667. return 0;
  668. }
  669. #endif /* CONFIG_DYNAMIC_FTRACE */
  670. __init int init_function_trace(void)
  671. {
  672. init_func_cmd_traceon();
  673. return register_tracer(&function_trace);
  674. }