fgraph.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Infrastructure to took into function calls and returns.
  4. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. * Highly modified by Steven Rostedt (VMware).
  9. */
  10. #include <linux/suspend.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/slab.h>
  13. #include <trace/events/sched.h>
  14. #include "ftrace_internal.h"
  15. #ifdef CONFIG_DYNAMIC_FTRACE
  16. #define ASSIGN_OPS_HASH(opsname, val) \
  17. .func_hash = val, \
  18. .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
  19. #else
  20. #define ASSIGN_OPS_HASH(opsname, val)
  21. #endif
  22. static bool kill_ftrace_graph;
  23. int ftrace_graph_active;
  24. /* Both enabled by default (can be cleared by function_graph tracer flags */
  25. static bool fgraph_sleep_time = true;
  26. /**
  27. * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  28. *
  29. * ftrace_graph_stop() is called when a severe error is detected in
  30. * the function graph tracing. This function is called by the critical
  31. * paths of function graph to keep those paths from doing any more harm.
  32. */
  33. bool ftrace_graph_is_dead(void)
  34. {
  35. return kill_ftrace_graph;
  36. }
  37. /**
  38. * ftrace_graph_stop - set to permanently disable function graph tracincg
  39. *
  40. * In case of an error int function graph tracing, this is called
  41. * to try to keep function graph tracing from causing any more harm.
  42. * Usually this is pretty severe and this is called to try to at least
  43. * get a warning out to the user.
  44. */
  45. void ftrace_graph_stop(void)
  46. {
  47. kill_ftrace_graph = true;
  48. }
  49. /* Add a function return address to the trace stack on thread info.*/
  50. static int
  51. ftrace_push_return_trace(unsigned long ret, unsigned long func,
  52. unsigned long frame_pointer, unsigned long *retp)
  53. {
  54. unsigned long long calltime;
  55. int index;
  56. if (unlikely(ftrace_graph_is_dead()))
  57. return -EBUSY;
  58. if (!current->ret_stack)
  59. return -EBUSY;
  60. /*
  61. * We must make sure the ret_stack is tested before we read
  62. * anything else.
  63. */
  64. smp_rmb();
  65. /* The return trace stack is full */
  66. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  67. atomic_inc(&current->trace_overrun);
  68. return -EBUSY;
  69. }
  70. calltime = trace_clock_local();
  71. index = ++current->curr_ret_stack;
  72. barrier();
  73. current->ret_stack[index].ret = ret;
  74. current->ret_stack[index].func = func;
  75. current->ret_stack[index].calltime = calltime;
  76. #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
  77. current->ret_stack[index].fp = frame_pointer;
  78. #endif
  79. #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
  80. current->ret_stack[index].retp = retp;
  81. #endif
  82. return 0;
  83. }
  84. /*
  85. * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
  86. * functions. But those archs currently don't support direct functions
  87. * anyway, and ftrace_find_rec_direct() is just a stub for them.
  88. * Define MCOUNT_INSN_SIZE to keep those archs compiling.
  89. */
  90. #ifndef MCOUNT_INSN_SIZE
  91. /* Make sure this only works without direct calls */
  92. # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
  93. # error MCOUNT_INSN_SIZE not defined with direct calls enabled
  94. # endif
  95. # define MCOUNT_INSN_SIZE 0
  96. #endif
  97. int function_graph_enter(unsigned long ret, unsigned long func,
  98. unsigned long frame_pointer, unsigned long *retp)
  99. {
  100. struct ftrace_graph_ent trace;
  101. /*
  102. * Skip graph tracing if the return location is served by direct trampoline,
  103. * since call sequence and return addresses is unpredicatable anymore.
  104. * Ex: BPF trampoline may call original function and may skip frame
  105. * depending on type of BPF programs attached.
  106. */
  107. if (ftrace_direct_func_count &&
  108. ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
  109. return -EBUSY;
  110. trace.func = func;
  111. trace.depth = ++current->curr_ret_depth;
  112. if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
  113. goto out;
  114. /* Only trace if the calling function expects to */
  115. if (!ftrace_graph_entry(&trace))
  116. goto out_ret;
  117. return 0;
  118. out_ret:
  119. current->curr_ret_stack--;
  120. out:
  121. current->curr_ret_depth--;
  122. return -EBUSY;
  123. }
  124. /* Retrieve a function return address to the trace stack on thread info.*/
  125. static void
  126. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  127. unsigned long frame_pointer)
  128. {
  129. int index;
  130. index = current->curr_ret_stack;
  131. if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
  132. ftrace_graph_stop();
  133. WARN_ON(1);
  134. /* Might as well panic, otherwise we have no where to go */
  135. *ret = (unsigned long)panic;
  136. return;
  137. }
  138. #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
  139. /*
  140. * The arch may choose to record the frame pointer used
  141. * and check it here to make sure that it is what we expect it
  142. * to be. If gcc does not set the place holder of the return
  143. * address in the frame pointer, and does a copy instead, then
  144. * the function graph trace will fail. This test detects this
  145. * case.
  146. *
  147. * Currently, x86_32 with optimize for size (-Os) makes the latest
  148. * gcc do the above.
  149. *
  150. * Note, -mfentry does not use frame pointers, and this test
  151. * is not needed if CC_USING_FENTRY is set.
  152. */
  153. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  154. ftrace_graph_stop();
  155. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  156. " from func %ps return to %lx\n",
  157. current->ret_stack[index].fp,
  158. frame_pointer,
  159. (void *)current->ret_stack[index].func,
  160. current->ret_stack[index].ret);
  161. *ret = (unsigned long)panic;
  162. return;
  163. }
  164. #endif
  165. *ret = current->ret_stack[index].ret;
  166. trace->func = current->ret_stack[index].func;
  167. trace->calltime = current->ret_stack[index].calltime;
  168. trace->overrun = atomic_read(&current->trace_overrun);
  169. trace->depth = current->curr_ret_depth--;
  170. /*
  171. * We still want to trace interrupts coming in if
  172. * max_depth is set to 1. Make sure the decrement is
  173. * seen before ftrace_graph_return.
  174. */
  175. barrier();
  176. }
  177. /*
  178. * Hibernation protection.
  179. * The state of the current task is too much unstable during
  180. * suspend/restore to disk. We want to protect against that.
  181. */
  182. static int
  183. ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
  184. void *unused)
  185. {
  186. switch (state) {
  187. case PM_HIBERNATION_PREPARE:
  188. pause_graph_tracing();
  189. break;
  190. case PM_POST_HIBERNATION:
  191. unpause_graph_tracing();
  192. break;
  193. }
  194. return NOTIFY_DONE;
  195. }
  196. static struct notifier_block ftrace_suspend_notifier = {
  197. .notifier_call = ftrace_suspend_notifier_call,
  198. };
  199. /*
  200. * Send the trace to the ring-buffer.
  201. * @return the original return address.
  202. */
  203. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  204. {
  205. struct ftrace_graph_ret trace;
  206. unsigned long ret;
  207. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  208. trace.rettime = trace_clock_local();
  209. ftrace_graph_return(&trace);
  210. /*
  211. * The ftrace_graph_return() may still access the current
  212. * ret_stack structure, we need to make sure the update of
  213. * curr_ret_stack is after that.
  214. */
  215. barrier();
  216. current->curr_ret_stack--;
  217. if (unlikely(!ret)) {
  218. ftrace_graph_stop();
  219. WARN_ON(1);
  220. /* Might as well panic. What else to do? */
  221. ret = (unsigned long)panic;
  222. }
  223. return ret;
  224. }
  225. /**
  226. * ftrace_graph_get_ret_stack - return the entry of the shadow stack
  227. * @task: The task to read the shadow stack from
  228. * @idx: Index down the shadow stack
  229. *
  230. * Return the ret_struct on the shadow stack of the @task at the
  231. * call graph at @idx starting with zero. If @idx is zero, it
  232. * will return the last saved ret_stack entry. If it is greater than
  233. * zero, it will return the corresponding ret_stack for the depth
  234. * of saved return addresses.
  235. */
  236. struct ftrace_ret_stack *
  237. ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
  238. {
  239. idx = task->curr_ret_stack - idx;
  240. if (idx >= 0 && idx <= task->curr_ret_stack)
  241. return &task->ret_stack[idx];
  242. return NULL;
  243. }
  244. /**
  245. * ftrace_graph_ret_addr - convert a potentially modified stack return address
  246. * to its original value
  247. *
  248. * This function can be called by stack unwinding code to convert a found stack
  249. * return address ('ret') to its original value, in case the function graph
  250. * tracer has modified it to be 'return_to_handler'. If the address hasn't
  251. * been modified, the unchanged value of 'ret' is returned.
  252. *
  253. * 'idx' is a state variable which should be initialized by the caller to zero
  254. * before the first call.
  255. *
  256. * 'retp' is a pointer to the return address on the stack. It's ignored if
  257. * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
  258. */
  259. #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
  260. unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
  261. unsigned long ret, unsigned long *retp)
  262. {
  263. int index = task->curr_ret_stack;
  264. int i;
  265. if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
  266. return ret;
  267. if (index < 0)
  268. return ret;
  269. for (i = 0; i <= index; i++)
  270. if (task->ret_stack[i].retp == retp)
  271. return task->ret_stack[i].ret;
  272. return ret;
  273. }
  274. #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
  275. unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
  276. unsigned long ret, unsigned long *retp)
  277. {
  278. int task_idx;
  279. if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
  280. return ret;
  281. task_idx = task->curr_ret_stack;
  282. if (!task->ret_stack || task_idx < *idx)
  283. return ret;
  284. task_idx -= *idx;
  285. (*idx)++;
  286. return task->ret_stack[task_idx].ret;
  287. }
  288. #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
  289. static struct ftrace_ops graph_ops = {
  290. .func = ftrace_stub,
  291. .flags = FTRACE_OPS_FL_RECURSION_SAFE |
  292. FTRACE_OPS_FL_INITIALIZED |
  293. FTRACE_OPS_FL_PID |
  294. FTRACE_OPS_FL_STUB,
  295. #ifdef FTRACE_GRAPH_TRAMP_ADDR
  296. .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
  297. /* trampoline_size is only needed for dynamically allocated tramps */
  298. #endif
  299. ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
  300. };
  301. void ftrace_graph_sleep_time_control(bool enable)
  302. {
  303. fgraph_sleep_time = enable;
  304. }
  305. int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
  306. {
  307. return 0;
  308. }
  309. /*
  310. * Simply points to ftrace_stub, but with the proper protocol.
  311. * Defined by the linker script in linux/vmlinux.lds.h
  312. */
  313. extern void ftrace_stub_graph(struct ftrace_graph_ret *);
  314. /* The callbacks that hook a function */
  315. trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
  316. trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
  317. static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
  318. /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
  319. static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
  320. {
  321. int i;
  322. int ret = 0;
  323. int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
  324. struct task_struct *g, *t;
  325. for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
  326. ret_stack_list[i] =
  327. kmalloc_array(FTRACE_RETFUNC_DEPTH,
  328. sizeof(struct ftrace_ret_stack),
  329. GFP_KERNEL);
  330. if (!ret_stack_list[i]) {
  331. start = 0;
  332. end = i;
  333. ret = -ENOMEM;
  334. goto free;
  335. }
  336. }
  337. rcu_read_lock();
  338. for_each_process_thread(g, t) {
  339. if (start == end) {
  340. ret = -EAGAIN;
  341. goto unlock;
  342. }
  343. if (t->ret_stack == NULL) {
  344. atomic_set(&t->trace_overrun, 0);
  345. t->curr_ret_stack = -1;
  346. t->curr_ret_depth = -1;
  347. /* Make sure the tasks see the -1 first: */
  348. smp_wmb();
  349. t->ret_stack = ret_stack_list[start++];
  350. }
  351. }
  352. unlock:
  353. rcu_read_unlock();
  354. free:
  355. for (i = start; i < end; i++)
  356. kfree(ret_stack_list[i]);
  357. return ret;
  358. }
  359. static void
  360. ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
  361. struct task_struct *prev, struct task_struct *next)
  362. {
  363. unsigned long long timestamp;
  364. int index;
  365. /*
  366. * Does the user want to count the time a function was asleep.
  367. * If so, do not update the time stamps.
  368. */
  369. if (fgraph_sleep_time)
  370. return;
  371. timestamp = trace_clock_local();
  372. prev->ftrace_timestamp = timestamp;
  373. /* only process tasks that we timestamped */
  374. if (!next->ftrace_timestamp)
  375. return;
  376. /*
  377. * Update all the counters in next to make up for the
  378. * time next was sleeping.
  379. */
  380. timestamp -= next->ftrace_timestamp;
  381. for (index = next->curr_ret_stack; index >= 0; index--)
  382. next->ret_stack[index].calltime += timestamp;
  383. }
  384. static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
  385. {
  386. if (!ftrace_ops_test(&global_ops, trace->func, NULL))
  387. return 0;
  388. return __ftrace_graph_entry(trace);
  389. }
  390. /*
  391. * The function graph tracer should only trace the functions defined
  392. * by set_ftrace_filter and set_ftrace_notrace. If another function
  393. * tracer ops is registered, the graph tracer requires testing the
  394. * function against the global ops, and not just trace any function
  395. * that any ftrace_ops registered.
  396. */
  397. void update_function_graph_func(void)
  398. {
  399. struct ftrace_ops *op;
  400. bool do_test = false;
  401. /*
  402. * The graph and global ops share the same set of functions
  403. * to test. If any other ops is on the list, then
  404. * the graph tracing needs to test if its the function
  405. * it should call.
  406. */
  407. do_for_each_ftrace_op(op, ftrace_ops_list) {
  408. if (op != &global_ops && op != &graph_ops &&
  409. op != &ftrace_list_end) {
  410. do_test = true;
  411. /* in double loop, break out with goto */
  412. goto out;
  413. }
  414. } while_for_each_ftrace_op(op);
  415. out:
  416. if (do_test)
  417. ftrace_graph_entry = ftrace_graph_entry_test;
  418. else
  419. ftrace_graph_entry = __ftrace_graph_entry;
  420. }
  421. static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
  422. static void
  423. graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
  424. {
  425. atomic_set(&t->trace_overrun, 0);
  426. t->ftrace_timestamp = 0;
  427. /* make curr_ret_stack visible before we add the ret_stack */
  428. smp_wmb();
  429. t->ret_stack = ret_stack;
  430. }
  431. /*
  432. * Allocate a return stack for the idle task. May be the first
  433. * time through, or it may be done by CPU hotplug online.
  434. */
  435. void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
  436. {
  437. t->curr_ret_stack = -1;
  438. t->curr_ret_depth = -1;
  439. /*
  440. * The idle task has no parent, it either has its own
  441. * stack or no stack at all.
  442. */
  443. if (t->ret_stack)
  444. WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
  445. if (ftrace_graph_active) {
  446. struct ftrace_ret_stack *ret_stack;
  447. ret_stack = per_cpu(idle_ret_stack, cpu);
  448. if (!ret_stack) {
  449. ret_stack =
  450. kmalloc_array(FTRACE_RETFUNC_DEPTH,
  451. sizeof(struct ftrace_ret_stack),
  452. GFP_KERNEL);
  453. if (!ret_stack)
  454. return;
  455. per_cpu(idle_ret_stack, cpu) = ret_stack;
  456. }
  457. graph_init_task(t, ret_stack);
  458. }
  459. }
  460. /* Allocate a return stack for newly created task */
  461. void ftrace_graph_init_task(struct task_struct *t)
  462. {
  463. /* Make sure we do not use the parent ret_stack */
  464. t->ret_stack = NULL;
  465. t->curr_ret_stack = -1;
  466. t->curr_ret_depth = -1;
  467. if (ftrace_graph_active) {
  468. struct ftrace_ret_stack *ret_stack;
  469. ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
  470. sizeof(struct ftrace_ret_stack),
  471. GFP_KERNEL);
  472. if (!ret_stack)
  473. return;
  474. graph_init_task(t, ret_stack);
  475. }
  476. }
  477. void ftrace_graph_exit_task(struct task_struct *t)
  478. {
  479. struct ftrace_ret_stack *ret_stack = t->ret_stack;
  480. t->ret_stack = NULL;
  481. /* NULL must become visible to IRQs before we free it: */
  482. barrier();
  483. kfree(ret_stack);
  484. }
  485. /* Allocate a return stack for each task */
  486. static int start_graph_tracing(void)
  487. {
  488. struct ftrace_ret_stack **ret_stack_list;
  489. int ret, cpu;
  490. ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
  491. sizeof(struct ftrace_ret_stack *),
  492. GFP_KERNEL);
  493. if (!ret_stack_list)
  494. return -ENOMEM;
  495. /* The cpu_boot init_task->ret_stack will never be freed */
  496. for_each_online_cpu(cpu) {
  497. if (!idle_task(cpu)->ret_stack)
  498. ftrace_graph_init_idle_task(idle_task(cpu), cpu);
  499. }
  500. do {
  501. ret = alloc_retstack_tasklist(ret_stack_list);
  502. } while (ret == -EAGAIN);
  503. if (!ret) {
  504. ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
  505. if (ret)
  506. pr_info("ftrace_graph: Couldn't activate tracepoint"
  507. " probe to kernel_sched_switch\n");
  508. }
  509. kfree(ret_stack_list);
  510. return ret;
  511. }
  512. int register_ftrace_graph(struct fgraph_ops *gops)
  513. {
  514. int ret = 0;
  515. mutex_lock(&ftrace_lock);
  516. /* we currently allow only one tracer registered at a time */
  517. if (ftrace_graph_active) {
  518. ret = -EBUSY;
  519. goto out;
  520. }
  521. register_pm_notifier(&ftrace_suspend_notifier);
  522. ftrace_graph_active++;
  523. ret = start_graph_tracing();
  524. if (ret) {
  525. ftrace_graph_active--;
  526. goto out;
  527. }
  528. ftrace_graph_return = gops->retfunc;
  529. /*
  530. * Update the indirect function to the entryfunc, and the
  531. * function that gets called to the entry_test first. Then
  532. * call the update fgraph entry function to determine if
  533. * the entryfunc should be called directly or not.
  534. */
  535. __ftrace_graph_entry = gops->entryfunc;
  536. ftrace_graph_entry = ftrace_graph_entry_test;
  537. update_function_graph_func();
  538. ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
  539. out:
  540. mutex_unlock(&ftrace_lock);
  541. return ret;
  542. }
  543. void unregister_ftrace_graph(struct fgraph_ops *gops)
  544. {
  545. mutex_lock(&ftrace_lock);
  546. if (unlikely(!ftrace_graph_active))
  547. goto out;
  548. ftrace_graph_active--;
  549. ftrace_graph_return = ftrace_stub_graph;
  550. ftrace_graph_entry = ftrace_graph_entry_stub;
  551. __ftrace_graph_entry = ftrace_graph_entry_stub;
  552. ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
  553. unregister_pm_notifier(&ftrace_suspend_notifier);
  554. unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
  555. out:
  556. mutex_unlock(&ftrace_lock);
  557. }