ftrace.h 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Ftrace header. For implementation details beyond the random comments
  4. * scattered below, see: Documentation/trace/ftrace-design.rst
  5. */
  6. #ifndef _LINUX_FTRACE_H
  7. #define _LINUX_FTRACE_H
  8. #include <linux/trace_clock.h>
  9. #include <linux/kallsyms.h>
  10. #include <linux/linkage.h>
  11. #include <linux/bitops.h>
  12. #include <linux/ptrace.h>
  13. #include <linux/ktime.h>
  14. #include <linux/sched.h>
  15. #include <linux/types.h>
  16. #include <linux/init.h>
  17. #include <linux/fs.h>
  18. #include <asm/ftrace.h>
  19. /*
  20. * If the arch supports passing the variable contents of
  21. * function_trace_op as the third parameter back from the
  22. * mcount call, then the arch should define this as 1.
  23. */
  24. #ifndef ARCH_SUPPORTS_FTRACE_OPS
  25. #define ARCH_SUPPORTS_FTRACE_OPS 0
  26. #endif
  27. /*
  28. * If the arch's mcount caller does not support all of ftrace's
  29. * features, then it must call an indirect function that
  30. * does. Or at least does enough to prevent any unwelcomed side effects.
  31. */
  32. #if !ARCH_SUPPORTS_FTRACE_OPS
  33. # define FTRACE_FORCE_LIST_FUNC 1
  34. #else
  35. # define FTRACE_FORCE_LIST_FUNC 0
  36. #endif
  37. /* Main tracing buffer and events set up */
  38. #ifdef CONFIG_TRACING
  39. void trace_init(void);
  40. void early_trace_init(void);
  41. #else
  42. static inline void trace_init(void) { }
  43. static inline void early_trace_init(void) { }
  44. #endif
  45. struct module;
  46. struct ftrace_hash;
  47. struct ftrace_direct_func;
  48. #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
  49. defined(CONFIG_DYNAMIC_FTRACE)
  50. const char *
  51. ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
  52. unsigned long *off, char **modname, char *sym);
  53. #else
  54. static inline const char *
  55. ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
  56. unsigned long *off, char **modname, char *sym)
  57. {
  58. return NULL;
  59. }
  60. #endif
  61. #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
  62. int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
  63. char *type, char *name,
  64. char *module_name, int *exported);
  65. #else
  66. static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
  67. char *type, char *name,
  68. char *module_name, int *exported)
  69. {
  70. return -1;
  71. }
  72. #endif
  73. #ifdef CONFIG_FUNCTION_TRACER
  74. extern int ftrace_enabled;
  75. extern int
  76. ftrace_enable_sysctl(struct ctl_table *table, int write,
  77. void *buffer, size_t *lenp, loff_t *ppos);
  78. struct ftrace_ops;
  79. typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  80. struct ftrace_ops *op, struct pt_regs *regs);
  81. ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
  82. /*
  83. * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
  84. * set in the flags member.
  85. * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
  86. * IPMODIFY are a kind of attribute flags which can be set only before
  87. * registering the ftrace_ops, and can not be modified while registered.
  88. * Changing those attribute flags after registering ftrace_ops will
  89. * cause unexpected results.
  90. *
  91. * ENABLED - set/unset when ftrace_ops is registered/unregistered
  92. * DYNAMIC - set when ftrace_ops is registered to denote dynamically
  93. * allocated ftrace_ops which need special care
  94. * SAVE_REGS - The ftrace_ops wants regs saved at each function called
  95. * and passed to the callback. If this flag is set, but the
  96. * architecture does not support passing regs
  97. * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
  98. * ftrace_ops will fail to register, unless the next flag
  99. * is set.
  100. * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
  101. * handler can handle an arch that does not save regs
  102. * (the handler tests if regs == NULL), then it can set
  103. * this flag instead. It will not fail registering the ftrace_ops
  104. * but, the regs field will be NULL if the arch does not support
  105. * passing regs to the handler.
  106. * Note, if this flag is set, the SAVE_REGS flag will automatically
  107. * get set upon registering the ftrace_ops, if the arch supports it.
  108. * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
  109. * that the call back has its own recursion protection. If it does
  110. * not set this, then the ftrace infrastructure will add recursion
  111. * protection for the caller.
  112. * STUB - The ftrace_ops is just a place holder.
  113. * INITIALIZED - The ftrace_ops has already been initialized (first use time
  114. * register_ftrace_function() is called, it will initialized the ops)
  115. * DELETED - The ops are being deleted, do not let them be registered again.
  116. * ADDING - The ops is in the process of being added.
  117. * REMOVING - The ops is in the process of being removed.
  118. * MODIFYING - The ops is in the process of changing its filter functions.
  119. * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
  120. * The arch specific code sets this flag when it allocated a
  121. * trampoline. This lets the arch know that it can update the
  122. * trampoline in case the callback function changes.
  123. * The ftrace_ops trampoline can be set by the ftrace users, and
  124. * in such cases the arch must not modify it. Only the arch ftrace
  125. * core code should set this flag.
  126. * IPMODIFY - The ops can modify the IP register. This can only be set with
  127. * SAVE_REGS. If another ops with this flag set is already registered
  128. * for any of the functions that this ops will be registered for, then
  129. * this ops will fail to register or set_filter_ip.
  130. * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
  131. * RCU - Set when the ops can only be called when RCU is watching.
  132. * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
  133. * PERMANENT - Set when the ops is permanent and should not be affected by
  134. * ftrace_enabled.
  135. * DIRECT - Used by the direct ftrace_ops helper for direct functions
  136. * (internal ftrace only, should not be used by others)
  137. */
  138. enum {
  139. FTRACE_OPS_FL_ENABLED = BIT(0),
  140. FTRACE_OPS_FL_DYNAMIC = BIT(1),
  141. FTRACE_OPS_FL_SAVE_REGS = BIT(2),
  142. FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
  143. FTRACE_OPS_FL_RECURSION_SAFE = BIT(4),
  144. FTRACE_OPS_FL_STUB = BIT(5),
  145. FTRACE_OPS_FL_INITIALIZED = BIT(6),
  146. FTRACE_OPS_FL_DELETED = BIT(7),
  147. FTRACE_OPS_FL_ADDING = BIT(8),
  148. FTRACE_OPS_FL_REMOVING = BIT(9),
  149. FTRACE_OPS_FL_MODIFYING = BIT(10),
  150. FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
  151. FTRACE_OPS_FL_IPMODIFY = BIT(12),
  152. FTRACE_OPS_FL_PID = BIT(13),
  153. FTRACE_OPS_FL_RCU = BIT(14),
  154. FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
  155. FTRACE_OPS_FL_PERMANENT = BIT(16),
  156. FTRACE_OPS_FL_DIRECT = BIT(17),
  157. };
  158. #ifdef CONFIG_DYNAMIC_FTRACE
  159. /* The hash used to know what functions callbacks trace */
  160. struct ftrace_ops_hash {
  161. struct ftrace_hash __rcu *notrace_hash;
  162. struct ftrace_hash __rcu *filter_hash;
  163. struct mutex regex_lock;
  164. };
  165. void ftrace_free_init_mem(void);
  166. void ftrace_free_mem(struct module *mod, void *start, void *end);
  167. #else
  168. static inline void ftrace_free_init_mem(void) { }
  169. static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
  170. #endif
  171. /*
  172. * Note, ftrace_ops can be referenced outside of RCU protection, unless
  173. * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
  174. * core data, the unregistering of it will perform a scheduling on all CPUs
  175. * to make sure that there are no more users. Depending on the load of the
  176. * system that may take a bit of time.
  177. *
  178. * Any private data added must also take care not to be freed and if private
  179. * data is added to a ftrace_ops that is in core code, the user of the
  180. * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
  181. */
  182. struct ftrace_ops {
  183. ftrace_func_t func;
  184. struct ftrace_ops __rcu *next;
  185. unsigned long flags;
  186. void *private;
  187. ftrace_func_t saved_func;
  188. #ifdef CONFIG_DYNAMIC_FTRACE
  189. struct ftrace_ops_hash local_hash;
  190. struct ftrace_ops_hash *func_hash;
  191. struct ftrace_ops_hash old_hash;
  192. unsigned long trampoline;
  193. unsigned long trampoline_size;
  194. struct list_head list;
  195. #endif
  196. };
  197. extern struct ftrace_ops __rcu *ftrace_ops_list;
  198. extern struct ftrace_ops ftrace_list_end;
  199. /*
  200. * Traverse the ftrace_ops_list, invoking all entries. The reason that we
  201. * can use rcu_dereference_raw_check() is that elements removed from this list
  202. * are simply leaked, so there is no need to interact with a grace-period
  203. * mechanism. The rcu_dereference_raw_check() calls are needed to handle
  204. * concurrent insertions into the ftrace_ops_list.
  205. *
  206. * Silly Alpha and silly pointer-speculation compiler optimizations!
  207. */
  208. #define do_for_each_ftrace_op(op, list) \
  209. op = rcu_dereference_raw_check(list); \
  210. do
  211. /*
  212. * Optimized for just a single item in the list (as that is the normal case).
  213. */
  214. #define while_for_each_ftrace_op(op) \
  215. while (likely(op = rcu_dereference_raw_check((op)->next)) && \
  216. unlikely((op) != &ftrace_list_end))
  217. /*
  218. * Type of the current tracing.
  219. */
  220. enum ftrace_tracing_type_t {
  221. FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
  222. FTRACE_TYPE_RETURN, /* Hook the return of the function */
  223. };
  224. /* Current tracing type, default is FTRACE_TYPE_ENTER */
  225. extern enum ftrace_tracing_type_t ftrace_tracing_type;
  226. /*
  227. * The ftrace_ops must be a static and should also
  228. * be read_mostly. These functions do modify read_mostly variables
  229. * so use them sparely. Never free an ftrace_op or modify the
  230. * next pointer after it has been registered. Even after unregistering
  231. * it, the next pointer may still be used internally.
  232. */
  233. int register_ftrace_function(struct ftrace_ops *ops);
  234. int unregister_ftrace_function(struct ftrace_ops *ops);
  235. extern void ftrace_stub(unsigned long a0, unsigned long a1,
  236. struct ftrace_ops *op, struct pt_regs *regs);
  237. #else /* !CONFIG_FUNCTION_TRACER */
  238. /*
  239. * (un)register_ftrace_function must be a macro since the ops parameter
  240. * must not be evaluated.
  241. */
  242. #define register_ftrace_function(ops) ({ 0; })
  243. #define unregister_ftrace_function(ops) ({ 0; })
  244. static inline void ftrace_kill(void) { }
  245. static inline void ftrace_free_init_mem(void) { }
  246. static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
  247. #endif /* CONFIG_FUNCTION_TRACER */
  248. struct ftrace_func_entry {
  249. struct hlist_node hlist;
  250. unsigned long ip;
  251. unsigned long direct; /* for direct lookup only */
  252. };
  253. struct dyn_ftrace;
  254. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
  255. extern int ftrace_direct_func_count;
  256. int register_ftrace_direct(unsigned long ip, unsigned long addr);
  257. int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
  258. int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
  259. struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
  260. int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
  261. struct dyn_ftrace *rec,
  262. unsigned long old_addr,
  263. unsigned long new_addr);
  264. unsigned long ftrace_find_rec_direct(unsigned long ip);
  265. #else
  266. # define ftrace_direct_func_count 0
  267. static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
  268. {
  269. return -ENOTSUPP;
  270. }
  271. static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
  272. {
  273. return -ENOTSUPP;
  274. }
  275. static inline int modify_ftrace_direct(unsigned long ip,
  276. unsigned long old_addr, unsigned long new_addr)
  277. {
  278. return -ENOTSUPP;
  279. }
  280. static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
  281. {
  282. return NULL;
  283. }
  284. static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
  285. struct dyn_ftrace *rec,
  286. unsigned long old_addr,
  287. unsigned long new_addr)
  288. {
  289. return -ENODEV;
  290. }
  291. static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
  292. {
  293. return 0;
  294. }
  295. #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
  296. #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
  297. /*
  298. * This must be implemented by the architecture.
  299. * It is the way the ftrace direct_ops helper, when called
  300. * via ftrace (because there's other callbacks besides the
  301. * direct call), can inform the architecture's trampoline that this
  302. * routine has a direct caller, and what the caller is.
  303. *
  304. * For example, in x86, it returns the direct caller
  305. * callback function via the regs->orig_ax parameter.
  306. * Then in the ftrace trampoline, if this is set, it makes
  307. * the return from the trampoline jump to the direct caller
  308. * instead of going back to the function it just traced.
  309. */
  310. static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
  311. unsigned long addr) { }
  312. #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
  313. #ifdef CONFIG_STACK_TRACER
  314. extern int stack_tracer_enabled;
  315. int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
  316. size_t *lenp, loff_t *ppos);
  317. /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
  318. DECLARE_PER_CPU(int, disable_stack_tracer);
  319. /**
  320. * stack_tracer_disable - temporarily disable the stack tracer
  321. *
  322. * There's a few locations (namely in RCU) where stack tracing
  323. * cannot be executed. This function is used to disable stack
  324. * tracing during those critical sections.
  325. *
  326. * This function must be called with preemption or interrupts
  327. * disabled and stack_tracer_enable() must be called shortly after
  328. * while preemption or interrupts are still disabled.
  329. */
  330. static inline void stack_tracer_disable(void)
  331. {
  332. /* Preemption or interupts must be disabled */
  333. if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
  334. WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
  335. this_cpu_inc(disable_stack_tracer);
  336. }
  337. /**
  338. * stack_tracer_enable - re-enable the stack tracer
  339. *
  340. * After stack_tracer_disable() is called, stack_tracer_enable()
  341. * must be called shortly afterward.
  342. */
  343. static inline void stack_tracer_enable(void)
  344. {
  345. if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
  346. WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
  347. this_cpu_dec(disable_stack_tracer);
  348. }
  349. #else
  350. static inline void stack_tracer_disable(void) { }
  351. static inline void stack_tracer_enable(void) { }
  352. #endif
  353. #ifdef CONFIG_DYNAMIC_FTRACE
  354. int ftrace_arch_code_modify_prepare(void);
  355. int ftrace_arch_code_modify_post_process(void);
  356. enum ftrace_bug_type {
  357. FTRACE_BUG_UNKNOWN,
  358. FTRACE_BUG_INIT,
  359. FTRACE_BUG_NOP,
  360. FTRACE_BUG_CALL,
  361. FTRACE_BUG_UPDATE,
  362. };
  363. extern enum ftrace_bug_type ftrace_bug_type;
  364. /*
  365. * Archs can set this to point to a variable that holds the value that was
  366. * expected at the call site before calling ftrace_bug().
  367. */
  368. extern const void *ftrace_expected;
  369. void ftrace_bug(int err, struct dyn_ftrace *rec);
  370. struct seq_file;
  371. extern int ftrace_text_reserved(const void *start, const void *end);
  372. struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
  373. bool is_ftrace_trampoline(unsigned long addr);
  374. /*
  375. * The dyn_ftrace record's flags field is split into two parts.
  376. * the first part which is '0-FTRACE_REF_MAX' is a counter of
  377. * the number of callbacks that have registered the function that
  378. * the dyn_ftrace descriptor represents.
  379. *
  380. * The second part is a mask:
  381. * ENABLED - the function is being traced
  382. * REGS - the record wants the function to save regs
  383. * REGS_EN - the function is set up to save regs.
  384. * IPMODIFY - the record allows for the IP address to be changed.
  385. * DISABLED - the record is not ready to be touched yet
  386. * DIRECT - there is a direct function to call
  387. *
  388. * When a new ftrace_ops is registered and wants a function to save
  389. * pt_regs, the rec->flags REGS is set. When the function has been
  390. * set up to save regs, the REG_EN flag is set. Once a function
  391. * starts saving regs it will do so until all ftrace_ops are removed
  392. * from tracing that function.
  393. */
  394. enum {
  395. FTRACE_FL_ENABLED = (1UL << 31),
  396. FTRACE_FL_REGS = (1UL << 30),
  397. FTRACE_FL_REGS_EN = (1UL << 29),
  398. FTRACE_FL_TRAMP = (1UL << 28),
  399. FTRACE_FL_TRAMP_EN = (1UL << 27),
  400. FTRACE_FL_IPMODIFY = (1UL << 26),
  401. FTRACE_FL_DISABLED = (1UL << 25),
  402. FTRACE_FL_DIRECT = (1UL << 24),
  403. FTRACE_FL_DIRECT_EN = (1UL << 23),
  404. };
  405. #define FTRACE_REF_MAX_SHIFT 23
  406. #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
  407. #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
  408. struct dyn_ftrace {
  409. unsigned long ip; /* address of mcount call-site */
  410. unsigned long flags;
  411. struct dyn_arch_ftrace arch;
  412. };
  413. int ftrace_force_update(void);
  414. int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
  415. int remove, int reset);
  416. int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
  417. int len, int reset);
  418. int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
  419. int len, int reset);
  420. void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
  421. void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
  422. void ftrace_free_filter(struct ftrace_ops *ops);
  423. void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
  424. enum {
  425. FTRACE_UPDATE_CALLS = (1 << 0),
  426. FTRACE_DISABLE_CALLS = (1 << 1),
  427. FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
  428. FTRACE_START_FUNC_RET = (1 << 3),
  429. FTRACE_STOP_FUNC_RET = (1 << 4),
  430. FTRACE_MAY_SLEEP = (1 << 5),
  431. };
  432. /*
  433. * The FTRACE_UPDATE_* enum is used to pass information back
  434. * from the ftrace_update_record() and ftrace_test_record()
  435. * functions. These are called by the code update routines
  436. * to find out what is to be done for a given function.
  437. *
  438. * IGNORE - The function is already what we want it to be
  439. * MAKE_CALL - Start tracing the function
  440. * MODIFY_CALL - Stop saving regs for the function
  441. * MAKE_NOP - Stop tracing the function
  442. */
  443. enum {
  444. FTRACE_UPDATE_IGNORE,
  445. FTRACE_UPDATE_MAKE_CALL,
  446. FTRACE_UPDATE_MODIFY_CALL,
  447. FTRACE_UPDATE_MAKE_NOP,
  448. };
  449. enum {
  450. FTRACE_ITER_FILTER = (1 << 0),
  451. FTRACE_ITER_NOTRACE = (1 << 1),
  452. FTRACE_ITER_PRINTALL = (1 << 2),
  453. FTRACE_ITER_DO_PROBES = (1 << 3),
  454. FTRACE_ITER_PROBE = (1 << 4),
  455. FTRACE_ITER_MOD = (1 << 5),
  456. FTRACE_ITER_ENABLED = (1 << 6),
  457. };
  458. void arch_ftrace_update_code(int command);
  459. void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
  460. void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
  461. void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
  462. struct ftrace_rec_iter;
  463. struct ftrace_rec_iter *ftrace_rec_iter_start(void);
  464. struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
  465. struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
  466. #define for_ftrace_rec_iter(iter) \
  467. for (iter = ftrace_rec_iter_start(); \
  468. iter; \
  469. iter = ftrace_rec_iter_next(iter))
  470. int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
  471. int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
  472. void ftrace_run_stop_machine(int command);
  473. unsigned long ftrace_location(unsigned long ip);
  474. unsigned long ftrace_location_range(unsigned long start, unsigned long end);
  475. unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
  476. unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
  477. extern ftrace_func_t ftrace_trace_function;
  478. int ftrace_regex_open(struct ftrace_ops *ops, int flag,
  479. struct inode *inode, struct file *file);
  480. ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
  481. size_t cnt, loff_t *ppos);
  482. ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
  483. size_t cnt, loff_t *ppos);
  484. int ftrace_regex_release(struct inode *inode, struct file *file);
  485. void __init
  486. ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
  487. /* defined in arch */
  488. extern int ftrace_ip_converted(unsigned long ip);
  489. extern int ftrace_dyn_arch_init(void);
  490. extern void ftrace_replace_code(int enable);
  491. extern int ftrace_update_ftrace_func(ftrace_func_t func);
  492. extern void ftrace_caller(void);
  493. extern void ftrace_regs_caller(void);
  494. extern void ftrace_call(void);
  495. extern void ftrace_regs_call(void);
  496. extern void mcount_call(void);
  497. void ftrace_modify_all_code(int command);
  498. #ifndef FTRACE_ADDR
  499. #define FTRACE_ADDR ((unsigned long)ftrace_caller)
  500. #endif
  501. #ifndef FTRACE_GRAPH_ADDR
  502. #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
  503. #endif
  504. #ifndef FTRACE_REGS_ADDR
  505. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  506. # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
  507. #else
  508. # define FTRACE_REGS_ADDR FTRACE_ADDR
  509. #endif
  510. #endif
  511. /*
  512. * If an arch would like functions that are only traced
  513. * by the function graph tracer to jump directly to its own
  514. * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
  515. * to be that address to jump to.
  516. */
  517. #ifndef FTRACE_GRAPH_TRAMP_ADDR
  518. #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
  519. #endif
  520. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  521. extern void ftrace_graph_caller(void);
  522. extern int ftrace_enable_ftrace_graph_caller(void);
  523. extern int ftrace_disable_ftrace_graph_caller(void);
  524. #else
  525. static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
  526. static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
  527. #endif
  528. /**
  529. * ftrace_make_nop - convert code into nop
  530. * @mod: module structure if called by module load initialization
  531. * @rec: the call site record (e.g. mcount/fentry)
  532. * @addr: the address that the call site should be calling
  533. *
  534. * This is a very sensitive operation and great care needs
  535. * to be taken by the arch. The operation should carefully
  536. * read the location, check to see if what is read is indeed
  537. * what we expect it to be, and then on success of the compare,
  538. * it should write to the location.
  539. *
  540. * The code segment at @rec->ip should be a caller to @addr
  541. *
  542. * Return must be:
  543. * 0 on success
  544. * -EFAULT on error reading the location
  545. * -EINVAL on a failed compare of the contents
  546. * -EPERM on error writing to the location
  547. * Any other value will be considered a failure.
  548. */
  549. extern int ftrace_make_nop(struct module *mod,
  550. struct dyn_ftrace *rec, unsigned long addr);
  551. /**
  552. * ftrace_init_nop - initialize a nop call site
  553. * @mod: module structure if called by module load initialization
  554. * @rec: the call site record (e.g. mcount/fentry)
  555. *
  556. * This is a very sensitive operation and great care needs
  557. * to be taken by the arch. The operation should carefully
  558. * read the location, check to see if what is read is indeed
  559. * what we expect it to be, and then on success of the compare,
  560. * it should write to the location.
  561. *
  562. * The code segment at @rec->ip should contain the contents created by
  563. * the compiler
  564. *
  565. * Return must be:
  566. * 0 on success
  567. * -EFAULT on error reading the location
  568. * -EINVAL on a failed compare of the contents
  569. * -EPERM on error writing to the location
  570. * Any other value will be considered a failure.
  571. */
  572. #ifndef ftrace_init_nop
  573. static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
  574. {
  575. return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
  576. }
  577. #endif
  578. /**
  579. * ftrace_make_call - convert a nop call site into a call to addr
  580. * @rec: the call site record (e.g. mcount/fentry)
  581. * @addr: the address that the call site should call
  582. *
  583. * This is a very sensitive operation and great care needs
  584. * to be taken by the arch. The operation should carefully
  585. * read the location, check to see if what is read is indeed
  586. * what we expect it to be, and then on success of the compare,
  587. * it should write to the location.
  588. *
  589. * The code segment at @rec->ip should be a nop
  590. *
  591. * Return must be:
  592. * 0 on success
  593. * -EFAULT on error reading the location
  594. * -EINVAL on a failed compare of the contents
  595. * -EPERM on error writing to the location
  596. * Any other value will be considered a failure.
  597. */
  598. extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
  599. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  600. /**
  601. * ftrace_modify_call - convert from one addr to another (no nop)
  602. * @rec: the call site record (e.g. mcount/fentry)
  603. * @old_addr: the address expected to be currently called to
  604. * @addr: the address to change to
  605. *
  606. * This is a very sensitive operation and great care needs
  607. * to be taken by the arch. The operation should carefully
  608. * read the location, check to see if what is read is indeed
  609. * what we expect it to be, and then on success of the compare,
  610. * it should write to the location.
  611. *
  612. * The code segment at @rec->ip should be a caller to @old_addr
  613. *
  614. * Return must be:
  615. * 0 on success
  616. * -EFAULT on error reading the location
  617. * -EINVAL on a failed compare of the contents
  618. * -EPERM on error writing to the location
  619. * Any other value will be considered a failure.
  620. */
  621. extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  622. unsigned long addr);
  623. #else
  624. /* Should never be called */
  625. static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  626. unsigned long addr)
  627. {
  628. return -EINVAL;
  629. }
  630. #endif
  631. /* May be defined in arch */
  632. extern int ftrace_arch_read_dyn_info(char *buf, int size);
  633. extern int skip_trace(unsigned long ip);
  634. extern void ftrace_module_init(struct module *mod);
  635. extern void ftrace_module_enable(struct module *mod);
  636. extern void ftrace_release_mod(struct module *mod);
  637. extern void ftrace_disable_daemon(void);
  638. extern void ftrace_enable_daemon(void);
  639. #else /* CONFIG_DYNAMIC_FTRACE */
  640. static inline int skip_trace(unsigned long ip) { return 0; }
  641. static inline int ftrace_force_update(void) { return 0; }
  642. static inline void ftrace_disable_daemon(void) { }
  643. static inline void ftrace_enable_daemon(void) { }
  644. static inline void ftrace_module_init(struct module *mod) { }
  645. static inline void ftrace_module_enable(struct module *mod) { }
  646. static inline void ftrace_release_mod(struct module *mod) { }
  647. static inline int ftrace_text_reserved(const void *start, const void *end)
  648. {
  649. return 0;
  650. }
  651. static inline unsigned long ftrace_location(unsigned long ip)
  652. {
  653. return 0;
  654. }
  655. /*
  656. * Again users of functions that have ftrace_ops may not
  657. * have them defined when ftrace is not enabled, but these
  658. * functions may still be called. Use a macro instead of inline.
  659. */
  660. #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
  661. #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
  662. #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
  663. #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
  664. #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
  665. #define ftrace_free_filter(ops) do { } while (0)
  666. #define ftrace_ops_set_global_filter(ops) do { } while (0)
  667. static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
  668. size_t cnt, loff_t *ppos) { return -ENODEV; }
  669. static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
  670. size_t cnt, loff_t *ppos) { return -ENODEV; }
  671. static inline int
  672. ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
  673. static inline bool is_ftrace_trampoline(unsigned long addr)
  674. {
  675. return false;
  676. }
  677. #endif /* CONFIG_DYNAMIC_FTRACE */
  678. /* totally disable ftrace - can not re-enable after this */
  679. void ftrace_kill(void);
  680. static inline void tracer_disable(void)
  681. {
  682. #ifdef CONFIG_FUNCTION_TRACER
  683. ftrace_enabled = 0;
  684. #endif
  685. }
  686. /*
  687. * Ftrace disable/restore without lock. Some synchronization mechanism
  688. * must be used to prevent ftrace_enabled to be changed between
  689. * disable/restore.
  690. */
  691. static inline int __ftrace_enabled_save(void)
  692. {
  693. #ifdef CONFIG_FUNCTION_TRACER
  694. int saved_ftrace_enabled = ftrace_enabled;
  695. ftrace_enabled = 0;
  696. return saved_ftrace_enabled;
  697. #else
  698. return 0;
  699. #endif
  700. }
  701. static inline void __ftrace_enabled_restore(int enabled)
  702. {
  703. #ifdef CONFIG_FUNCTION_TRACER
  704. ftrace_enabled = enabled;
  705. #endif
  706. }
  707. /* All archs should have this, but we define it for consistency */
  708. #ifndef ftrace_return_address0
  709. # define ftrace_return_address0 __builtin_return_address(0)
  710. #endif
  711. /* Archs may use other ways for ADDR1 and beyond */
  712. #ifndef ftrace_return_address
  713. # ifdef CONFIG_FRAME_POINTER
  714. # define ftrace_return_address(n) __builtin_return_address(n)
  715. # else
  716. # define ftrace_return_address(n) 0UL
  717. # endif
  718. #endif
  719. #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
  720. #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
  721. #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
  722. #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
  723. #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
  724. #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
  725. #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
  726. static inline unsigned long get_lock_parent_ip(void)
  727. {
  728. unsigned long addr = CALLER_ADDR0;
  729. if (!in_lock_functions(addr))
  730. return addr;
  731. addr = CALLER_ADDR1;
  732. if (!in_lock_functions(addr))
  733. return addr;
  734. return CALLER_ADDR2;
  735. }
  736. #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
  737. extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  738. extern void trace_preempt_off(unsigned long a0, unsigned long a1);
  739. #else
  740. /*
  741. * Use defines instead of static inlines because some arches will make code out
  742. * of the CALLER_ADDR, when we really want these to be a real nop.
  743. */
  744. # define trace_preempt_on(a0, a1) do { } while (0)
  745. # define trace_preempt_off(a0, a1) do { } while (0)
  746. #endif
  747. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  748. extern void ftrace_init(void);
  749. #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
  750. #define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
  751. #else
  752. #define FTRACE_CALLSITE_SECTION "__mcount_loc"
  753. #endif
  754. #else
  755. static inline void ftrace_init(void) { }
  756. #endif
  757. /*
  758. * Structure that defines an entry function trace.
  759. * It's already packed but the attribute "packed" is needed
  760. * to remove extra padding at the end.
  761. */
  762. struct ftrace_graph_ent {
  763. unsigned long func; /* Current function */
  764. int depth;
  765. } __packed;
  766. /*
  767. * Structure that defines a return function trace.
  768. * It's already packed but the attribute "packed" is needed
  769. * to remove extra padding at the end.
  770. */
  771. struct ftrace_graph_ret {
  772. unsigned long func; /* Current function */
  773. /* Number of functions that overran the depth limit for current task */
  774. unsigned long overrun;
  775. unsigned long long calltime;
  776. unsigned long long rettime;
  777. int depth;
  778. } __packed;
  779. /* Type of the callback handlers for tracing function graph*/
  780. typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
  781. typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
  782. extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
  783. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  784. struct fgraph_ops {
  785. trace_func_graph_ent_t entryfunc;
  786. trace_func_graph_ret_t retfunc;
  787. };
  788. /*
  789. * Stack of return addresses for functions
  790. * of a thread.
  791. * Used in struct thread_info
  792. */
  793. struct ftrace_ret_stack {
  794. unsigned long ret;
  795. unsigned long func;
  796. unsigned long long calltime;
  797. #ifdef CONFIG_FUNCTION_PROFILER
  798. unsigned long long subtime;
  799. #endif
  800. #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
  801. unsigned long fp;
  802. #endif
  803. #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
  804. unsigned long *retp;
  805. #endif
  806. };
  807. /*
  808. * Primary handler of a function return.
  809. * It relays on ftrace_return_to_handler.
  810. * Defined in entry_32/64.S
  811. */
  812. extern void return_to_handler(void);
  813. extern int
  814. function_graph_enter(unsigned long ret, unsigned long func,
  815. unsigned long frame_pointer, unsigned long *retp);
  816. struct ftrace_ret_stack *
  817. ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
  818. unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
  819. unsigned long ret, unsigned long *retp);
  820. /*
  821. * Sometimes we don't want to trace a function with the function
  822. * graph tracer but we want them to keep traced by the usual function
  823. * tracer if the function graph tracer is not configured.
  824. */
  825. #define __notrace_funcgraph notrace
  826. #define FTRACE_RETFUNC_DEPTH 50
  827. #define FTRACE_RETSTACK_ALLOC_SIZE 32
  828. extern int register_ftrace_graph(struct fgraph_ops *ops);
  829. extern void unregister_ftrace_graph(struct fgraph_ops *ops);
  830. extern bool ftrace_graph_is_dead(void);
  831. extern void ftrace_graph_stop(void);
  832. /* The current handlers in use */
  833. extern trace_func_graph_ret_t ftrace_graph_return;
  834. extern trace_func_graph_ent_t ftrace_graph_entry;
  835. extern void ftrace_graph_init_task(struct task_struct *t);
  836. extern void ftrace_graph_exit_task(struct task_struct *t);
  837. extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
  838. static inline void pause_graph_tracing(void)
  839. {
  840. atomic_inc(&current->tracing_graph_pause);
  841. }
  842. static inline void unpause_graph_tracing(void)
  843. {
  844. atomic_dec(&current->tracing_graph_pause);
  845. }
  846. #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
  847. #define __notrace_funcgraph
  848. static inline void ftrace_graph_init_task(struct task_struct *t) { }
  849. static inline void ftrace_graph_exit_task(struct task_struct *t) { }
  850. static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
  851. /* Define as macros as fgraph_ops may not be defined */
  852. #define register_ftrace_graph(ops) ({ -1; })
  853. #define unregister_ftrace_graph(ops) do { } while (0)
  854. static inline unsigned long
  855. ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
  856. unsigned long *retp)
  857. {
  858. return ret;
  859. }
  860. static inline void pause_graph_tracing(void) { }
  861. static inline void unpause_graph_tracing(void) { }
  862. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  863. #ifdef CONFIG_TRACING
  864. /* flags for current->trace */
  865. enum {
  866. TSK_TRACE_FL_TRACE_BIT = 0,
  867. TSK_TRACE_FL_GRAPH_BIT = 1,
  868. };
  869. enum {
  870. TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
  871. TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
  872. };
  873. static inline void set_tsk_trace_trace(struct task_struct *tsk)
  874. {
  875. set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  876. }
  877. static inline void clear_tsk_trace_trace(struct task_struct *tsk)
  878. {
  879. clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  880. }
  881. static inline int test_tsk_trace_trace(struct task_struct *tsk)
  882. {
  883. return tsk->trace & TSK_TRACE_FL_TRACE;
  884. }
  885. static inline void set_tsk_trace_graph(struct task_struct *tsk)
  886. {
  887. set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  888. }
  889. static inline void clear_tsk_trace_graph(struct task_struct *tsk)
  890. {
  891. clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  892. }
  893. static inline int test_tsk_trace_graph(struct task_struct *tsk)
  894. {
  895. return tsk->trace & TSK_TRACE_FL_GRAPH;
  896. }
  897. enum ftrace_dump_mode;
  898. extern enum ftrace_dump_mode ftrace_dump_on_oops;
  899. extern int tracepoint_printk;
  900. extern void disable_trace_on_warning(void);
  901. extern int __disable_trace_on_warning;
  902. int tracepoint_printk_sysctl(struct ctl_table *table, int write,
  903. void *buffer, size_t *lenp, loff_t *ppos);
  904. #else /* CONFIG_TRACING */
  905. static inline void disable_trace_on_warning(void) { }
  906. #endif /* CONFIG_TRACING */
  907. #ifdef CONFIG_FTRACE_SYSCALLS
  908. unsigned long arch_syscall_addr(int nr);
  909. #endif /* CONFIG_FTRACE_SYSCALLS */
  910. #endif /* _LINUX_FTRACE_H */