stacktrace.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /*
  2. * Stack trace management functions
  3. *
  4. * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5. */
  6. #include <linux/sched.h>
  7. #include <linux/sched/debug.h>
  8. #include <linux/sched/task_stack.h>
  9. #include <linux/stacktrace.h>
  10. #include <linux/export.h>
  11. #include <linux/uaccess.h>
  12. #include <asm/stacktrace.h>
  13. #include <asm/unwind.h>
  14. void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
  15. struct task_struct *task, struct pt_regs *regs)
  16. {
  17. struct unwind_state state;
  18. unsigned long addr;
  19. if (regs && !consume_entry(cookie, regs->ip))
  20. return;
  21. for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
  22. unwind_next_frame(&state)) {
  23. addr = unwind_get_return_address(&state);
  24. if (!addr || !consume_entry(cookie, addr))
  25. break;
  26. }
  27. }
  28. /*
  29. * This function returns an error if it detects any unreliable features of the
  30. * stack. Otherwise it guarantees that the stack trace is reliable.
  31. *
  32. * If the task is not 'current', the caller *must* ensure the task is inactive.
  33. */
  34. int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
  35. void *cookie, struct task_struct *task)
  36. {
  37. struct unwind_state state;
  38. struct pt_regs *regs;
  39. unsigned long addr;
  40. for (unwind_start(&state, task, NULL, NULL);
  41. !unwind_done(&state) && !unwind_error(&state);
  42. unwind_next_frame(&state)) {
  43. regs = unwind_get_entry_regs(&state, NULL);
  44. if (regs) {
  45. /* Success path for user tasks */
  46. if (user_mode(regs))
  47. return 0;
  48. /*
  49. * Kernel mode registers on the stack indicate an
  50. * in-kernel interrupt or exception (e.g., preemption
  51. * or a page fault), which can make frame pointers
  52. * unreliable.
  53. */
  54. if (IS_ENABLED(CONFIG_FRAME_POINTER))
  55. return -EINVAL;
  56. }
  57. addr = unwind_get_return_address(&state);
  58. /*
  59. * A NULL or invalid return address probably means there's some
  60. * generated code which __kernel_text_address() doesn't know
  61. * about.
  62. */
  63. if (!addr)
  64. return -EINVAL;
  65. if (!consume_entry(cookie, addr))
  66. return -EINVAL;
  67. }
  68. /* Check for stack corruption */
  69. if (unwind_error(&state))
  70. return -EINVAL;
  71. return 0;
  72. }
  73. /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
  74. struct stack_frame_user {
  75. const void __user *next_fp;
  76. unsigned long ret_addr;
  77. };
  78. static int
  79. copy_stack_frame(const struct stack_frame_user __user *fp,
  80. struct stack_frame_user *frame)
  81. {
  82. int ret;
  83. if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
  84. return 0;
  85. ret = 1;
  86. pagefault_disable();
  87. if (__get_user(frame->next_fp, &fp->next_fp) ||
  88. __get_user(frame->ret_addr, &fp->ret_addr))
  89. ret = 0;
  90. pagefault_enable();
  91. return ret;
  92. }
  93. void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
  94. const struct pt_regs *regs)
  95. {
  96. const void __user *fp = (const void __user *)regs->bp;
  97. if (!consume_entry(cookie, regs->ip))
  98. return;
  99. while (1) {
  100. struct stack_frame_user frame;
  101. frame.next_fp = NULL;
  102. frame.ret_addr = 0;
  103. if (!copy_stack_frame(fp, &frame))
  104. break;
  105. if ((unsigned long)fp < regs->sp)
  106. break;
  107. if (!frame.ret_addr)
  108. break;
  109. if (!consume_entry(cookie, frame.ret_addr))
  110. break;
  111. fp = frame.next_fp;
  112. }
  113. }