entry-ftrace.S 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * arch/arm64/kernel/entry-ftrace.S
  4. *
  5. * Copyright (C) 2013 Linaro Limited
  6. * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
  7. */
  8. #include <linux/linkage.h>
  9. #include <asm/asm-offsets.h>
  10. #include <asm/assembler.h>
  11. #include <asm/ftrace.h>
  12. #include <asm/insn.h>
  13. #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
  14. /*
  15. * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
  16. * the regular function prologue. For an enabled callsite, ftrace_init_nop() and
  17. * ftrace_make_call() have patched those NOPs to:
  18. *
  19. * MOV X9, LR
  20. * BL <entry>
  21. *
  22. * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
  23. *
  24. * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
  25. * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
  26. * clobber.
  27. *
  28. * We save the callsite's context into a pt_regs before invoking any ftrace
  29. * callbacks. So that we can get a sensible backtrace, we create a stack record
  30. * for the callsite and the ftrace entry assembly. This is not sufficient for
  31. * reliable stacktrace: until we create the callsite stack record, its caller
  32. * is missing from the LR and existing chain of frame records.
  33. */
  34. .macro ftrace_regs_entry, allregs=0
  35. /* Make room for pt_regs, plus a callee frame */
  36. sub sp, sp, #(S_FRAME_SIZE + 16)
  37. /* Save function arguments (and x9 for simplicity) */
  38. stp x0, x1, [sp, #S_X0]
  39. stp x2, x3, [sp, #S_X2]
  40. stp x4, x5, [sp, #S_X4]
  41. stp x6, x7, [sp, #S_X6]
  42. stp x8, x9, [sp, #S_X8]
  43. /* Optionally save the callee-saved registers, always save the FP */
  44. .if \allregs == 1
  45. stp x10, x11, [sp, #S_X10]
  46. stp x12, x13, [sp, #S_X12]
  47. stp x14, x15, [sp, #S_X14]
  48. stp x16, x17, [sp, #S_X16]
  49. stp x18, x19, [sp, #S_X18]
  50. stp x20, x21, [sp, #S_X20]
  51. stp x22, x23, [sp, #S_X22]
  52. stp x24, x25, [sp, #S_X24]
  53. stp x26, x27, [sp, #S_X26]
  54. stp x28, x29, [sp, #S_X28]
  55. .else
  56. str x29, [sp, #S_FP]
  57. .endif
  58. /* Save the callsite's SP and LR */
  59. add x10, sp, #(S_FRAME_SIZE + 16)
  60. stp x9, x10, [sp, #S_LR]
  61. /* Save the PC after the ftrace callsite */
  62. str x30, [sp, #S_PC]
  63. /* Create a frame record for the callsite above pt_regs */
  64. stp x29, x9, [sp, #S_FRAME_SIZE]
  65. add x29, sp, #S_FRAME_SIZE
  66. /* Create our frame record within pt_regs. */
  67. stp x29, x30, [sp, #S_STACKFRAME]
  68. add x29, sp, #S_STACKFRAME
  69. .endm
  70. SYM_CODE_START(ftrace_regs_caller)
  71. #ifdef BTI_C
  72. BTI_C
  73. #endif
  74. ftrace_regs_entry 1
  75. b ftrace_common
  76. SYM_CODE_END(ftrace_regs_caller)
  77. SYM_CODE_START(ftrace_caller)
  78. #ifdef BTI_C
  79. BTI_C
  80. #endif
  81. ftrace_regs_entry 0
  82. b ftrace_common
  83. SYM_CODE_END(ftrace_caller)
  84. SYM_CODE_START(ftrace_common)
  85. sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
  86. mov x1, x9 // parent_ip (callsite's LR)
  87. ldr_l x2, function_trace_op // op
  88. mov x3, sp // regs
  89. SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
  90. bl ftrace_stub
  91. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  92. SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
  93. nop // If enabled, this will be replaced
  94. // "b ftrace_graph_caller"
  95. #endif
  96. /*
  97. * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
  98. * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
  99. * to restore x0-x8, x29, and x30.
  100. */
  101. ftrace_common_return:
  102. /* Restore function arguments */
  103. ldp x0, x1, [sp]
  104. ldp x2, x3, [sp, #S_X2]
  105. ldp x4, x5, [sp, #S_X4]
  106. ldp x6, x7, [sp, #S_X6]
  107. ldr x8, [sp, #S_X8]
  108. /* Restore the callsite's FP, LR, PC */
  109. ldr x29, [sp, #S_FP]
  110. ldr x30, [sp, #S_LR]
  111. ldr x9, [sp, #S_PC]
  112. /* Restore the callsite's SP */
  113. add sp, sp, #S_FRAME_SIZE + 16
  114. ret x9
  115. SYM_CODE_END(ftrace_common)
  116. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  117. SYM_CODE_START(ftrace_graph_caller)
  118. ldr x0, [sp, #S_PC]
  119. sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
  120. add x1, sp, #S_LR // parent_ip (callsite's LR)
  121. ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
  122. bl prepare_ftrace_return
  123. b ftrace_common_return
  124. SYM_CODE_END(ftrace_graph_caller)
  125. #endif
  126. #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
  127. /*
  128. * Gcc with -pg will put the following code in the beginning of each function:
  129. * mov x0, x30
  130. * bl _mcount
  131. * [function's body ...]
  132. * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
  133. * ftrace is enabled.
  134. *
  135. * Please note that x0 as an argument will not be used here because we can
  136. * get lr(x30) of instrumented function at any time by winding up call stack
  137. * as long as the kernel is compiled without -fomit-frame-pointer.
  138. * (or CONFIG_FRAME_POINTER, this is forced on arm64)
  139. *
  140. * stack layout after mcount_enter in _mcount():
  141. *
  142. * current sp/fp => 0:+-----+
  143. * in _mcount() | x29 | -> instrumented function's fp
  144. * +-----+
  145. * | x30 | -> _mcount()'s lr (= instrumented function's pc)
  146. * old sp => +16:+-----+
  147. * when instrumented | |
  148. * function calls | ... |
  149. * _mcount() | |
  150. * | |
  151. * instrumented => +xx:+-----+
  152. * function's fp | x29 | -> parent's fp
  153. * +-----+
  154. * | x30 | -> instrumented function's lr (= parent's pc)
  155. * +-----+
  156. * | ... |
  157. */
  158. .macro mcount_enter
  159. stp x29, x30, [sp, #-16]!
  160. mov x29, sp
  161. .endm
  162. .macro mcount_exit
  163. ldp x29, x30, [sp], #16
  164. ret
  165. .endm
  166. .macro mcount_adjust_addr rd, rn
  167. sub \rd, \rn, #AARCH64_INSN_SIZE
  168. .endm
  169. /* for instrumented function's parent */
  170. .macro mcount_get_parent_fp reg
  171. ldr \reg, [x29]
  172. ldr \reg, [\reg]
  173. .endm
  174. /* for instrumented function */
  175. .macro mcount_get_pc0 reg
  176. mcount_adjust_addr \reg, x30
  177. .endm
  178. .macro mcount_get_pc reg
  179. ldr \reg, [x29, #8]
  180. mcount_adjust_addr \reg, \reg
  181. .endm
  182. .macro mcount_get_lr reg
  183. ldr \reg, [x29]
  184. ldr \reg, [\reg, #8]
  185. .endm
  186. .macro mcount_get_lr_addr reg
  187. ldr \reg, [x29]
  188. add \reg, \reg, #8
  189. .endm
  190. #ifndef CONFIG_DYNAMIC_FTRACE
  191. /*
  192. * void _mcount(unsigned long return_address)
  193. * @return_address: return address to instrumented function
  194. *
  195. * This function makes calls, if enabled, to:
  196. * - tracer function to probe instrumented function's entry,
  197. * - ftrace_graph_caller to set up an exit hook
  198. */
  199. SYM_FUNC_START(_mcount)
  200. mcount_enter
  201. ldr_l x2, ftrace_trace_function
  202. adr x0, ftrace_stub
  203. cmp x0, x2 // if (ftrace_trace_function
  204. b.eq skip_ftrace_call // != ftrace_stub) {
  205. mcount_get_pc x0 // function's pc
  206. mcount_get_lr x1 // function's lr (= parent's pc)
  207. blr x2 // (*ftrace_trace_function)(pc, lr);
  208. skip_ftrace_call: // }
  209. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  210. ldr_l x2, ftrace_graph_return
  211. cmp x0, x2 // if ((ftrace_graph_return
  212. b.ne ftrace_graph_caller // != ftrace_stub)
  213. ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry
  214. adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
  215. cmp x0, x2
  216. b.ne ftrace_graph_caller // ftrace_graph_caller();
  217. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  218. mcount_exit
  219. SYM_FUNC_END(_mcount)
  220. EXPORT_SYMBOL(_mcount)
  221. NOKPROBE(_mcount)
  222. #else /* CONFIG_DYNAMIC_FTRACE */
  223. /*
  224. * _mcount() is used to build the kernel with -pg option, but all the branch
  225. * instructions to _mcount() are replaced to NOP initially at kernel start up,
  226. * and later on, NOP to branch to ftrace_caller() when enabled or branch to
  227. * NOP when disabled per-function base.
  228. */
  229. SYM_FUNC_START(_mcount)
  230. ret
  231. SYM_FUNC_END(_mcount)
  232. EXPORT_SYMBOL(_mcount)
  233. NOKPROBE(_mcount)
  234. /*
  235. * void ftrace_caller(unsigned long return_address)
  236. * @return_address: return address to instrumented function
  237. *
  238. * This function is a counterpart of _mcount() in 'static' ftrace, and
  239. * makes calls to:
  240. * - tracer function to probe instrumented function's entry,
  241. * - ftrace_graph_caller to set up an exit hook
  242. */
  243. SYM_FUNC_START(ftrace_caller)
  244. mcount_enter
  245. mcount_get_pc0 x0 // function's pc
  246. mcount_get_lr x1 // function's lr
  247. SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr);
  248. nop // This will be replaced with "bl xxx"
  249. // where xxx can be any kind of tracer.
  250. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  251. SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
  252. nop // If enabled, this will be replaced
  253. // "b ftrace_graph_caller"
  254. #endif
  255. mcount_exit
  256. SYM_FUNC_END(ftrace_caller)
  257. #endif /* CONFIG_DYNAMIC_FTRACE */
  258. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  259. /*
  260. * void ftrace_graph_caller(void)
  261. *
  262. * Called from _mcount() or ftrace_caller() when function_graph tracer is
  263. * selected.
  264. * This function w/ prepare_ftrace_return() fakes link register's value on
  265. * the call stack in order to intercept instrumented function's return path
  266. * and run return_to_handler() later on its exit.
  267. */
  268. SYM_FUNC_START(ftrace_graph_caller)
  269. mcount_get_pc x0 // function's pc
  270. mcount_get_lr_addr x1 // pointer to function's saved lr
  271. mcount_get_parent_fp x2 // parent's fp
  272. bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
  273. mcount_exit
  274. SYM_FUNC_END(ftrace_graph_caller)
  275. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  276. #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
  277. SYM_FUNC_START(ftrace_stub)
  278. ret
  279. SYM_FUNC_END(ftrace_stub)
  280. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  281. /*
  282. * void return_to_handler(void)
  283. *
  284. * Run ftrace_return_to_handler() before going back to parent.
  285. * @fp is checked against the value passed by ftrace_graph_caller().
  286. */
  287. SYM_CODE_START(return_to_handler)
  288. /* save return value regs */
  289. sub sp, sp, #64
  290. stp x0, x1, [sp]
  291. stp x2, x3, [sp, #16]
  292. stp x4, x5, [sp, #32]
  293. stp x6, x7, [sp, #48]
  294. mov x0, x29 // parent's fp
  295. bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
  296. mov x30, x0 // restore the original return address
  297. /* restore return value regs */
  298. ldp x0, x1, [sp]
  299. ldp x2, x3, [sp, #16]
  300. ldp x4, x5, [sp, #32]
  301. ldp x6, x7, [sp, #48]
  302. add sp, sp, #64
  303. ret
  304. SYM_CODE_END(return_to_handler)
  305. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */