uprobes.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/highmem.h>
  3. #include <linux/ptrace.h>
  4. #include <linux/uprobes.h>
  5. #include "decode-insn.h"
  6. #define UPROBE_TRAP_NR UINT_MAX
  7. bool is_swbp_insn(uprobe_opcode_t *insn)
  8. {
  9. #ifdef CONFIG_RISCV_ISA_C
  10. return (*insn & 0xffff) == UPROBE_SWBP_INSN;
  11. #else
  12. return *insn == UPROBE_SWBP_INSN;
  13. #endif
  14. }
  15. unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
  16. {
  17. return instruction_pointer(regs);
  18. }
  19. int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
  20. unsigned long addr)
  21. {
  22. probe_opcode_t opcode;
  23. opcode = *(probe_opcode_t *)(&auprobe->insn[0]);
  24. auprobe->insn_size = GET_INSN_LENGTH(opcode);
  25. switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) {
  26. case INSN_REJECTED:
  27. return -EINVAL;
  28. case INSN_GOOD_NO_SLOT:
  29. auprobe->simulate = true;
  30. break;
  31. case INSN_GOOD:
  32. auprobe->simulate = false;
  33. break;
  34. default:
  35. return -EINVAL;
  36. }
  37. return 0;
  38. }
  39. int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  40. {
  41. struct uprobe_task *utask = current->utask;
  42. utask->autask.saved_cause = current->thread.bad_cause;
  43. current->thread.bad_cause = UPROBE_TRAP_NR;
  44. instruction_pointer_set(regs, utask->xol_vaddr);
  45. regs->status &= ~SR_SPIE;
  46. return 0;
  47. }
  48. int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  49. {
  50. struct uprobe_task *utask = current->utask;
  51. WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
  52. instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
  53. regs->status |= SR_SPIE;
  54. return 0;
  55. }
  56. bool arch_uprobe_xol_was_trapped(struct task_struct *t)
  57. {
  58. if (t->thread.bad_cause != UPROBE_TRAP_NR)
  59. return true;
  60. return false;
  61. }
  62. bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
  63. {
  64. probe_opcode_t insn;
  65. unsigned long addr;
  66. if (!auprobe->simulate)
  67. return false;
  68. insn = *(probe_opcode_t *)(&auprobe->insn[0]);
  69. addr = instruction_pointer(regs);
  70. if (auprobe->api.handler)
  71. auprobe->api.handler(insn, addr, regs);
  72. return true;
  73. }
  74. void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
  75. {
  76. struct uprobe_task *utask = current->utask;
  77. /*
  78. * Task has received a fatal signal, so reset back to probbed
  79. * address.
  80. */
  81. instruction_pointer_set(regs, utask->vaddr);
  82. regs->status &= ~SR_SPIE;
  83. }
  84. bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
  85. struct pt_regs *regs)
  86. {
  87. if (ctx == RP_CHECK_CHAIN_CALL)
  88. return regs->sp <= ret->stack;
  89. else
  90. return regs->sp < ret->stack;
  91. }
  92. unsigned long
  93. arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
  94. struct pt_regs *regs)
  95. {
  96. unsigned long ra;
  97. ra = regs->ra;
  98. regs->ra = trampoline_vaddr;
  99. return ra;
  100. }
  101. int arch_uprobe_exception_notify(struct notifier_block *self,
  102. unsigned long val, void *data)
  103. {
  104. return NOTIFY_DONE;
  105. }
  106. bool uprobe_breakpoint_handler(struct pt_regs *regs)
  107. {
  108. if (uprobe_pre_sstep_notifier(regs))
  109. return true;
  110. return false;
  111. }
  112. bool uprobe_single_step_handler(struct pt_regs *regs)
  113. {
  114. if (uprobe_post_sstep_notifier(regs))
  115. return true;
  116. return false;
  117. }
  118. void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
  119. void *src, unsigned long len)
  120. {
  121. /* Initialize the slot */
  122. void *kaddr = kmap_atomic(page);
  123. void *dst = kaddr + (vaddr & ~PAGE_MASK);
  124. memcpy(dst, src, len);
  125. /* Add ebreak behind opcode to simulate singlestep */
  126. if (vaddr) {
  127. dst += GET_INSN_LENGTH(*(probe_opcode_t *)src);
  128. *(uprobe_opcode_t *)dst = __BUG_INSN_32;
  129. }
  130. kunmap_atomic(kaddr);
  131. /*
  132. * We probably need flush_icache_user_page() but it needs vma.
  133. * This should work on most of architectures by default. If
  134. * architecture needs to do something different it can define
  135. * its own version of the function.
  136. */
  137. flush_dcache_page(page);
  138. }