stacktrace.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2008 ARM Limited
  4. * Copyright (C) 2014 Regents of the University of California
  5. */
  6. #include <linux/export.h>
  7. #include <linux/kallsyms.h>
  8. #include <linux/sched.h>
  9. #include <linux/sched/debug.h>
  10. #include <linux/sched/task_stack.h>
  11. #include <linux/stacktrace.h>
  12. #include <linux/ftrace.h>
  13. register unsigned long sp_in_global __asm__("sp");
  14. #ifdef CONFIG_FRAME_POINTER
  15. struct stackframe {
  16. unsigned long fp;
  17. unsigned long ra;
  18. };
  19. extern asmlinkage void ret_from_exception(void);
  20. void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
  21. bool (*fn)(unsigned long, unsigned long, void *), void *arg)
  22. {
  23. unsigned long fp, sp, pc;
  24. if (regs) {
  25. fp = frame_pointer(regs);
  26. sp = user_stack_pointer(regs);
  27. pc = instruction_pointer(regs);
  28. } else if (task == NULL || task == current) {
  29. const register unsigned long current_sp = sp_in_global;
  30. fp = (unsigned long)__builtin_frame_address(0);
  31. sp = current_sp;
  32. pc = (unsigned long)walk_stackframe;
  33. } else {
  34. /* task blocked in __switch_to */
  35. fp = task->thread.s[0];
  36. sp = task->thread.sp;
  37. pc = task->thread.ra;
  38. }
  39. for (;;) {
  40. unsigned long low, high;
  41. struct stackframe *frame;
  42. if (unlikely(!__kernel_text_address(pc) || fn(pc, 0, arg)))
  43. break;
  44. /* Validate frame pointer */
  45. low = sp + sizeof(struct stackframe);
  46. high = ALIGN(sp, THREAD_SIZE);
  47. if (unlikely(fp < low || fp > high || fp & 0x7))
  48. break;
  49. /* Unwind stack frame */
  50. frame = (struct stackframe *)fp - 1;
  51. sp = fp;
  52. if (regs && (regs->epc == pc) && (frame->fp & 0x7)) {
  53. fp = frame->ra;
  54. pc = regs->ra;
  55. } else {
  56. fp = frame->fp;
  57. pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
  58. &frame->ra);
  59. if (pc == (unsigned long)ret_from_exception) {
  60. if (unlikely(!__kernel_text_address(pc) || fn(pc, sp, arg)))
  61. break;
  62. pc = ((struct pt_regs *)sp)->epc;
  63. fp = ((struct pt_regs *)sp)->s0;
  64. }
  65. }
  66. }
  67. }
  68. #else /* !CONFIG_FRAME_POINTER */
  69. void notrace walk_stackframe(struct task_struct *task,
  70. struct pt_regs *regs, bool (*fn)(unsigned long, unsigned long, void *), void *arg)
  71. {
  72. unsigned long sp, pc;
  73. unsigned long *ksp;
  74. if (regs) {
  75. sp = user_stack_pointer(regs);
  76. pc = instruction_pointer(regs);
  77. } else if (task == NULL || task == current) {
  78. sp = sp_in_global;
  79. pc = (unsigned long)walk_stackframe;
  80. } else {
  81. /* task blocked in __switch_to */
  82. sp = task->thread.sp;
  83. pc = task->thread.ra;
  84. }
  85. if (unlikely(sp & 0x7))
  86. return;
  87. ksp = (unsigned long *)sp;
  88. while (!kstack_end(ksp)) {
  89. if (__kernel_text_address(pc) && unlikely(fn(pc, 0, arg)))
  90. break;
  91. pc = (*ksp++) - 0x4;
  92. }
  93. }
  94. #endif /* CONFIG_FRAME_POINTER */
  95. static bool print_trace_address(unsigned long pc, unsigned long regs, void *arg)
  96. {
  97. const char *loglvl = arg;
  98. print_ip_sym(loglvl, pc);
  99. if (regs)
  100. show_regs((struct pt_regs *)regs);
  101. return false;
  102. }
  103. void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
  104. {
  105. pr_cont("Call Trace:\n");
  106. walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
  107. pr_cont("End Trace.\n");
  108. }
  109. static bool save_wchan(unsigned long pc, unsigned long regs, void *arg)
  110. {
  111. if (!in_sched_functions(pc)) {
  112. unsigned long *p = arg;
  113. *p = pc;
  114. return true;
  115. }
  116. return false;
  117. }
  118. unsigned long get_wchan(struct task_struct *task)
  119. {
  120. unsigned long pc = 0;
  121. if (likely(task && task != current && task->state != TASK_RUNNING))
  122. walk_stackframe(task, NULL, save_wchan, &pc);
  123. return pc;
  124. }
  125. #ifdef CONFIG_STACKTRACE
  126. static bool __save_trace(unsigned long pc, void *arg, bool nosched)
  127. {
  128. struct stack_trace *trace = arg;
  129. if (unlikely(nosched && in_sched_functions(pc)))
  130. return false;
  131. if (unlikely(trace->skip > 0)) {
  132. trace->skip--;
  133. return false;
  134. }
  135. trace->entries[trace->nr_entries++] = pc;
  136. return (trace->nr_entries >= trace->max_entries);
  137. }
  138. static bool save_trace(unsigned long pc, unsigned long regs, void *arg)
  139. {
  140. return __save_trace(pc, arg, false);
  141. }
  142. /*
  143. * Save stack-backtrace addresses into a stack_trace buffer.
  144. */
  145. void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
  146. {
  147. walk_stackframe(tsk, NULL, save_trace, trace);
  148. }
  149. EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
  150. void save_stack_trace(struct stack_trace *trace)
  151. {
  152. save_stack_trace_tsk(NULL, trace);
  153. }
  154. EXPORT_SYMBOL_GPL(save_stack_trace);
  155. #endif /* CONFIG_STACKTRACE */