mcount.S 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. /* SPDX-License-Identifier: GPL-2.0
  2. *
  3. * arch/sh/lib/mcount.S
  4. *
  5. * Copyright (C) 2008, 2009 Paul Mundt
  6. * Copyright (C) 2008, 2009 Matt Fleming
  7. */
  8. #include <asm/ftrace.h>
  9. #include <asm/thread_info.h>
  10. #include <asm/asm-offsets.h>
  11. #define MCOUNT_ENTER() \
  12. mov.l r4, @-r15; \
  13. mov.l r5, @-r15; \
  14. mov.l r6, @-r15; \
  15. mov.l r7, @-r15; \
  16. sts.l pr, @-r15; \
  17. \
  18. mov.l @(20,r15),r4; \
  19. sts pr, r5
  20. #define MCOUNT_LEAVE() \
  21. lds.l @r15+, pr; \
  22. mov.l @r15+, r7; \
  23. mov.l @r15+, r6; \
  24. mov.l @r15+, r5; \
  25. rts; \
  26. mov.l @r15+, r4
  27. #ifdef CONFIG_STACK_DEBUG
  28. /*
  29. * Perform diagnostic checks on the state of the kernel stack.
  30. *
  31. * Check for stack overflow. If there is less than 1KB free
  32. * then it has overflowed.
  33. *
  34. * Make sure the stack pointer contains a valid address. Valid
  35. * addresses for kernel stacks are anywhere after the bss
  36. * (after __bss_stop) and anywhere in init_thread_union (init_stack).
  37. */
  38. #define STACK_CHECK() \
  39. mov #(THREAD_SIZE >> 10), r0; \
  40. shll8 r0; \
  41. shll2 r0; \
  42. \
  43. /* r1 = sp & (THREAD_SIZE - 1) */ \
  44. mov #-1, r1; \
  45. add r0, r1; \
  46. and r15, r1; \
  47. \
  48. mov #TI_SIZE, r3; \
  49. mov #(STACK_WARN >> 8), r2; \
  50. shll8 r2; \
  51. add r3, r2; \
  52. \
  53. /* Is the stack overflowing? */ \
  54. cmp/hi r2, r1; \
  55. bf stack_panic; \
  56. \
  57. /* If sp > __bss_stop then we're OK. */ \
  58. mov.l .L_ebss, r1; \
  59. cmp/hi r1, r15; \
  60. bt 1f; \
  61. \
  62. /* If sp < init_stack, we're not OK. */ \
  63. mov.l .L_init_thread_union, r1; \
  64. cmp/hs r1, r15; \
  65. bf stack_panic; \
  66. \
  67. /* If sp > init_stack && sp < __bss_stop, not OK. */ \
  68. add r0, r1; \
  69. cmp/hs r1, r15; \
  70. bt stack_panic; \
  71. 1:
  72. #else
  73. #define STACK_CHECK()
  74. #endif /* CONFIG_STACK_DEBUG */
  75. .align 2
  76. .globl _mcount
  77. .type _mcount,@function
  78. .globl mcount
  79. .type mcount,@function
  80. _mcount:
  81. mcount:
  82. STACK_CHECK()
  83. #ifndef CONFIG_FUNCTION_TRACER
  84. rts
  85. nop
  86. #else
  87. MCOUNT_ENTER()
  88. #ifdef CONFIG_DYNAMIC_FTRACE
  89. .globl mcount_call
  90. mcount_call:
  91. mov.l .Lftrace_stub, r6
  92. #else
  93. mov.l .Lftrace_trace_function, r6
  94. mov.l ftrace_stub, r7
  95. cmp/eq r6, r7
  96. bt skip_trace
  97. mov.l @r6, r6
  98. #endif
  99. jsr @r6
  100. nop
  101. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  102. mov.l .Lftrace_graph_return, r6
  103. mov.l .Lftrace_stub, r7
  104. cmp/eq r6, r7
  105. bt 1f
  106. mov.l .Lftrace_graph_caller, r0
  107. jmp @r0
  108. nop
  109. 1:
  110. mov.l .Lftrace_graph_entry, r6
  111. mov.l .Lftrace_graph_entry_stub, r7
  112. cmp/eq r6, r7
  113. bt skip_trace
  114. mov.l .Lftrace_graph_caller, r0
  115. jmp @r0
  116. nop
  117. .align 2
  118. .Lftrace_graph_return:
  119. .long ftrace_graph_return
  120. .Lftrace_graph_entry:
  121. .long ftrace_graph_entry
  122. .Lftrace_graph_entry_stub:
  123. .long ftrace_graph_entry_stub
  124. .Lftrace_graph_caller:
  125. .long ftrace_graph_caller
  126. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  127. .globl skip_trace
  128. skip_trace:
  129. MCOUNT_LEAVE()
  130. .align 2
  131. .Lftrace_trace_function:
  132. .long ftrace_trace_function
  133. #ifdef CONFIG_DYNAMIC_FTRACE
  134. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  135. /*
  136. * NOTE: Do not move either ftrace_graph_call or ftrace_caller
  137. * as this will affect the calculation of GRAPH_INSN_OFFSET.
  138. */
  139. .globl ftrace_graph_call
  140. ftrace_graph_call:
  141. mov.l .Lskip_trace, r0
  142. jmp @r0
  143. nop
  144. .align 2
  145. .Lskip_trace:
  146. .long skip_trace
  147. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  148. .globl ftrace_caller
  149. ftrace_caller:
  150. MCOUNT_ENTER()
  151. .globl ftrace_call
  152. ftrace_call:
  153. mov.l .Lftrace_stub, r6
  154. jsr @r6
  155. nop
  156. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  157. bra ftrace_graph_call
  158. nop
  159. #else
  160. MCOUNT_LEAVE()
  161. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  162. #endif /* CONFIG_DYNAMIC_FTRACE */
  163. .align 2
  164. /*
  165. * NOTE: From here on the locations of the .Lftrace_stub label and
  166. * ftrace_stub itself are fixed. Adding additional data here will skew
  167. * the displacement for the memory table and break the block replacement.
  168. * Place new labels either after the ftrace_stub body, or before
  169. * ftrace_caller. You have been warned.
  170. */
  171. .Lftrace_stub:
  172. .long ftrace_stub
  173. .globl ftrace_stub
  174. ftrace_stub:
  175. rts
  176. nop
  177. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  178. .globl ftrace_graph_caller
  179. ftrace_graph_caller:
  180. mov.l 2f, r1
  181. jmp @r1
  182. nop
  183. 1:
  184. /*
  185. * MCOUNT_ENTER() pushed 5 registers onto the stack, so
  186. * the stack address containing our return address is
  187. * r15 + 20.
  188. */
  189. mov #20, r0
  190. add r15, r0
  191. mov r0, r4
  192. mov.l .Lprepare_ftrace_return, r0
  193. jsr @r0
  194. nop
  195. MCOUNT_LEAVE()
  196. .align 2
  197. 2: .long skip_trace
  198. .Lprepare_ftrace_return:
  199. .long prepare_ftrace_return
  200. .globl return_to_handler
  201. return_to_handler:
  202. /*
  203. * Save the return values.
  204. */
  205. mov.l r0, @-r15
  206. mov.l r1, @-r15
  207. mov #0, r4
  208. mov.l .Lftrace_return_to_handler, r0
  209. jsr @r0
  210. nop
  211. /*
  212. * The return value from ftrace_return_handler has the real
  213. * address that we should return to.
  214. */
  215. lds r0, pr
  216. mov.l @r15+, r1
  217. rts
  218. mov.l @r15+, r0
  219. .align 2
  220. .Lftrace_return_to_handler:
  221. .long ftrace_return_to_handler
  222. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  223. #endif /* CONFIG_FUNCTION_TRACER */
  224. #ifdef CONFIG_STACK_DEBUG
  225. .globl stack_panic
  226. stack_panic:
  227. mov.l .Ldump_stack, r0
  228. jsr @r0
  229. nop
  230. mov.l .Lpanic, r0
  231. jsr @r0
  232. mov.l .Lpanic_s, r4
  233. rts
  234. nop
  235. .align 2
  236. .L_init_thread_union:
  237. .long init_thread_union
  238. .L_ebss:
  239. .long __bss_stop
  240. .Lpanic:
  241. .long panic
  242. .Lpanic_s:
  243. .long .Lpanic_str
  244. .Ldump_stack:
  245. .long dump_stack
  246. .section .rodata
  247. .align 2
  248. .Lpanic_str:
  249. .string "Stack error"
  250. #endif /* CONFIG_STACK_DEBUG */