kprobes.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Kernel Probes (KProbes)
  4. *
  5. * Copyright (C) IBM Corporation, 2002, 2004
  6. *
  7. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  8. * Probes initial implementation ( includes contributions from
  9. * Rusty Russell).
  10. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  11. * interface to access function arguments.
  12. * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
  13. * for PPC64
  14. */
  15. #include <linux/kprobes.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/preempt.h>
  18. #include <linux/extable.h>
  19. #include <linux/kdebug.h>
  20. #include <linux/slab.h>
  21. #include <asm/code-patching.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/sstep.h>
  24. #include <asm/sections.h>
  25. #include <asm/inst.h>
  26. #include <linux/uaccess.h>
  27. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  28. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  29. struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
  30. bool arch_within_kprobe_blacklist(unsigned long addr)
  31. {
  32. return (addr >= (unsigned long)__kprobes_text_start &&
  33. addr < (unsigned long)__kprobes_text_end) ||
  34. (addr >= (unsigned long)_stext &&
  35. addr < (unsigned long)__head_end);
  36. }
  37. kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
  38. {
  39. kprobe_opcode_t *addr = NULL;
  40. #ifdef PPC64_ELF_ABI_v2
  41. /* PPC64 ABIv2 needs local entry point */
  42. addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
  43. if (addr && !offset) {
  44. #ifdef CONFIG_KPROBES_ON_FTRACE
  45. unsigned long faddr;
  46. /*
  47. * Per livepatch.h, ftrace location is always within the first
  48. * 16 bytes of a function on powerpc with -mprofile-kernel.
  49. */
  50. faddr = ftrace_location_range((unsigned long)addr,
  51. (unsigned long)addr + 16);
  52. if (faddr)
  53. addr = (kprobe_opcode_t *)faddr;
  54. else
  55. #endif
  56. addr = (kprobe_opcode_t *)ppc_function_entry(addr);
  57. }
  58. #elif defined(PPC64_ELF_ABI_v1)
  59. /*
  60. * 64bit powerpc ABIv1 uses function descriptors:
  61. * - Check for the dot variant of the symbol first.
  62. * - If that fails, try looking up the symbol provided.
  63. *
  64. * This ensures we always get to the actual symbol and not
  65. * the descriptor.
  66. *
  67. * Also handle <module:symbol> format.
  68. */
  69. char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
  70. bool dot_appended = false;
  71. const char *c;
  72. ssize_t ret = 0;
  73. int len = 0;
  74. if ((c = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
  75. c++;
  76. len = c - name;
  77. memcpy(dot_name, name, len);
  78. } else
  79. c = name;
  80. if (*c != '\0' && *c != '.') {
  81. dot_name[len++] = '.';
  82. dot_appended = true;
  83. }
  84. ret = strscpy(dot_name + len, c, KSYM_NAME_LEN);
  85. if (ret > 0)
  86. addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
  87. /* Fallback to the original non-dot symbol lookup */
  88. if (!addr && dot_appended)
  89. addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
  90. #else
  91. addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
  92. #endif
  93. return addr;
  94. }
  95. int arch_prepare_kprobe(struct kprobe *p)
  96. {
  97. int ret = 0;
  98. struct kprobe *prev;
  99. struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
  100. if ((unsigned long)p->addr & 0x03) {
  101. printk("Attempt to register kprobe at an unaligned address\n");
  102. ret = -EINVAL;
  103. } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
  104. printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
  105. ret = -EINVAL;
  106. } else if ((unsigned long)p->addr & ~PAGE_MASK &&
  107. ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
  108. printk("Cannot register a kprobe on the second word of prefixed instruction\n");
  109. ret = -EINVAL;
  110. }
  111. preempt_disable();
  112. prev = get_kprobe(p->addr - 1);
  113. preempt_enable_no_resched();
  114. if (prev &&
  115. ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)prev->ainsn.insn))) {
  116. printk("Cannot register a kprobe on the second word of prefixed instruction\n");
  117. ret = -EINVAL;
  118. }
  119. /* insn must be on a special executable page on ppc64. This is
  120. * not explicitly required on ppc32 (right now), but it doesn't hurt */
  121. if (!ret) {
  122. p->ainsn.insn = get_insn_slot();
  123. if (!p->ainsn.insn)
  124. ret = -ENOMEM;
  125. }
  126. if (!ret) {
  127. patch_instruction((struct ppc_inst *)p->ainsn.insn, insn);
  128. p->opcode = ppc_inst_val(insn);
  129. }
  130. p->ainsn.boostable = 0;
  131. return ret;
  132. }
  133. NOKPROBE_SYMBOL(arch_prepare_kprobe);
  134. void arch_arm_kprobe(struct kprobe *p)
  135. {
  136. patch_instruction((struct ppc_inst *)p->addr, ppc_inst(BREAKPOINT_INSTRUCTION));
  137. }
  138. NOKPROBE_SYMBOL(arch_arm_kprobe);
  139. void arch_disarm_kprobe(struct kprobe *p)
  140. {
  141. patch_instruction((struct ppc_inst *)p->addr, ppc_inst(p->opcode));
  142. }
  143. NOKPROBE_SYMBOL(arch_disarm_kprobe);
  144. void arch_remove_kprobe(struct kprobe *p)
  145. {
  146. if (p->ainsn.insn) {
  147. free_insn_slot(p->ainsn.insn, 0);
  148. p->ainsn.insn = NULL;
  149. }
  150. }
  151. NOKPROBE_SYMBOL(arch_remove_kprobe);
  152. static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
  153. {
  154. enable_single_step(regs);
  155. /*
  156. * On powerpc we should single step on the original
  157. * instruction even if the probed insn is a trap
  158. * variant as values in regs could play a part in
  159. * if the trap is taken or not
  160. */
  161. regs->nip = (unsigned long)p->ainsn.insn;
  162. }
  163. static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
  164. {
  165. kcb->prev_kprobe.kp = kprobe_running();
  166. kcb->prev_kprobe.status = kcb->kprobe_status;
  167. kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
  168. }
  169. static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  170. {
  171. __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  172. kcb->kprobe_status = kcb->prev_kprobe.status;
  173. kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
  174. }
  175. static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
  176. struct kprobe_ctlblk *kcb)
  177. {
  178. __this_cpu_write(current_kprobe, p);
  179. kcb->kprobe_saved_msr = regs->msr;
  180. }
  181. bool arch_kprobe_on_func_entry(unsigned long offset)
  182. {
  183. #ifdef PPC64_ELF_ABI_v2
  184. #ifdef CONFIG_KPROBES_ON_FTRACE
  185. return offset <= 16;
  186. #else
  187. return offset <= 8;
  188. #endif
  189. #else
  190. return !offset;
  191. #endif
  192. }
  193. void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
  194. {
  195. ri->ret_addr = (kprobe_opcode_t *)regs->link;
  196. ri->fp = NULL;
  197. /* Replace the return addr with trampoline addr */
  198. regs->link = (unsigned long)kretprobe_trampoline;
  199. }
  200. NOKPROBE_SYMBOL(arch_prepare_kretprobe);
  201. static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
  202. {
  203. int ret;
  204. struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
  205. /* regs->nip is also adjusted if emulate_step returns 1 */
  206. ret = emulate_step(regs, insn);
  207. if (ret > 0) {
  208. /*
  209. * Once this instruction has been boosted
  210. * successfully, set the boostable flag
  211. */
  212. if (unlikely(p->ainsn.boostable == 0))
  213. p->ainsn.boostable = 1;
  214. } else if (ret < 0) {
  215. /*
  216. * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
  217. * So, we should never get here... but, its still
  218. * good to catch them, just in case...
  219. */
  220. printk("Can't step on instruction %s\n", ppc_inst_as_str(insn));
  221. BUG();
  222. } else {
  223. /*
  224. * If we haven't previously emulated this instruction, then it
  225. * can't be boosted. Note it down so we don't try to do so again.
  226. *
  227. * If, however, we had emulated this instruction in the past,
  228. * then this is just an error with the current run (for
  229. * instance, exceptions due to a load/store). We return 0 so
  230. * that this is now single-stepped, but continue to try
  231. * emulating it in subsequent probe hits.
  232. */
  233. if (unlikely(p->ainsn.boostable != 1))
  234. p->ainsn.boostable = -1;
  235. }
  236. return ret;
  237. }
  238. NOKPROBE_SYMBOL(try_to_emulate);
  239. int kprobe_handler(struct pt_regs *regs)
  240. {
  241. struct kprobe *p;
  242. int ret = 0;
  243. unsigned int *addr = (unsigned int *)regs->nip;
  244. struct kprobe_ctlblk *kcb;
  245. if (user_mode(regs))
  246. return 0;
  247. if (!IS_ENABLED(CONFIG_BOOKE) &&
  248. (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
  249. return 0;
  250. /*
  251. * We don't want to be preempted for the entire
  252. * duration of kprobe processing
  253. */
  254. preempt_disable();
  255. kcb = get_kprobe_ctlblk();
  256. p = get_kprobe(addr);
  257. if (!p) {
  258. unsigned int instr;
  259. if (get_kernel_nofault(instr, addr))
  260. goto no_kprobe;
  261. if (instr != BREAKPOINT_INSTRUCTION) {
  262. /*
  263. * PowerPC has multiple variants of the "trap"
  264. * instruction. If the current instruction is a
  265. * trap variant, it could belong to someone else
  266. */
  267. if (is_trap(instr))
  268. goto no_kprobe;
  269. /*
  270. * The breakpoint instruction was removed right
  271. * after we hit it. Another cpu has removed
  272. * either a probepoint or a debugger breakpoint
  273. * at this address. In either case, no further
  274. * handling of this interrupt is appropriate.
  275. */
  276. ret = 1;
  277. }
  278. /* Not one of ours: let kernel handle it */
  279. goto no_kprobe;
  280. }
  281. /* Check we're not actually recursing */
  282. if (kprobe_running()) {
  283. kprobe_opcode_t insn = *p->ainsn.insn;
  284. if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
  285. /* Turn off 'trace' bits */
  286. regs->msr &= ~MSR_SINGLESTEP;
  287. regs->msr |= kcb->kprobe_saved_msr;
  288. goto no_kprobe;
  289. }
  290. /*
  291. * We have reentered the kprobe_handler(), since another probe
  292. * was hit while within the handler. We here save the original
  293. * kprobes variables and just single step on the instruction of
  294. * the new probe without calling any user handlers.
  295. */
  296. save_previous_kprobe(kcb);
  297. set_current_kprobe(p, regs, kcb);
  298. kprobes_inc_nmissed_count(p);
  299. kcb->kprobe_status = KPROBE_REENTER;
  300. if (p->ainsn.boostable >= 0) {
  301. ret = try_to_emulate(p, regs);
  302. if (ret > 0) {
  303. restore_previous_kprobe(kcb);
  304. preempt_enable_no_resched();
  305. return 1;
  306. }
  307. }
  308. prepare_singlestep(p, regs);
  309. return 1;
  310. }
  311. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  312. set_current_kprobe(p, regs, kcb);
  313. if (p->pre_handler && p->pre_handler(p, regs)) {
  314. /* handler changed execution path, so skip ss setup */
  315. reset_current_kprobe();
  316. preempt_enable_no_resched();
  317. return 1;
  318. }
  319. if (p->ainsn.boostable >= 0) {
  320. ret = try_to_emulate(p, regs);
  321. if (ret > 0) {
  322. if (p->post_handler)
  323. p->post_handler(p, regs, 0);
  324. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  325. reset_current_kprobe();
  326. preempt_enable_no_resched();
  327. return 1;
  328. }
  329. }
  330. prepare_singlestep(p, regs);
  331. kcb->kprobe_status = KPROBE_HIT_SS;
  332. return 1;
  333. no_kprobe:
  334. preempt_enable_no_resched();
  335. return ret;
  336. }
  337. NOKPROBE_SYMBOL(kprobe_handler);
  338. /*
  339. * Function return probe trampoline:
  340. * - init_kprobes() establishes a probepoint here
  341. * - When the probed function returns, this probe
  342. * causes the handlers to fire
  343. */
  344. asm(".global kretprobe_trampoline\n"
  345. ".type kretprobe_trampoline, @function\n"
  346. "kretprobe_trampoline:\n"
  347. "nop\n"
  348. "blr\n"
  349. ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
  350. /*
  351. * Called when the probe at kretprobe trampoline is hit
  352. */
  353. static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
  354. {
  355. unsigned long orig_ret_address;
  356. orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
  357. /*
  358. * We get here through one of two paths:
  359. * 1. by taking a trap -> kprobe_handler() -> here
  360. * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
  361. *
  362. * When going back through (1), we need regs->nip to be setup properly
  363. * as it is used to determine the return address from the trap.
  364. * For (2), since nip is not honoured with optprobes, we instead setup
  365. * the link register properly so that the subsequent 'blr' in
  366. * kretprobe_trampoline jumps back to the right instruction.
  367. *
  368. * For nip, we should set the address to the previous instruction since
  369. * we end up emulating it in kprobe_handler(), which increments the nip
  370. * again.
  371. */
  372. regs->nip = orig_ret_address - 4;
  373. regs->link = orig_ret_address;
  374. return 0;
  375. }
  376. NOKPROBE_SYMBOL(trampoline_probe_handler);
  377. /*
  378. * Called after single-stepping. p->addr is the address of the
  379. * instruction whose first byte has been replaced by the "breakpoint"
  380. * instruction. To avoid the SMP problems that can occur when we
  381. * temporarily put back the original opcode to single-step, we
  382. * single-stepped a copy of the instruction. The address of this
  383. * copy is p->ainsn.insn.
  384. */
  385. int kprobe_post_handler(struct pt_regs *regs)
  386. {
  387. int len;
  388. struct kprobe *cur = kprobe_running();
  389. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  390. if (!cur || user_mode(regs))
  391. return 0;
  392. len = ppc_inst_len(ppc_inst_read((struct ppc_inst *)cur->ainsn.insn));
  393. /* make sure we got here for instruction we have a kprobe on */
  394. if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
  395. return 0;
  396. if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
  397. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  398. cur->post_handler(cur, regs, 0);
  399. }
  400. /* Adjust nip to after the single-stepped instruction */
  401. regs->nip = (unsigned long)cur->addr + len;
  402. regs->msr |= kcb->kprobe_saved_msr;
  403. /*Restore back the original saved kprobes variables and continue. */
  404. if (kcb->kprobe_status == KPROBE_REENTER) {
  405. restore_previous_kprobe(kcb);
  406. goto out;
  407. }
  408. reset_current_kprobe();
  409. out:
  410. preempt_enable_no_resched();
  411. /*
  412. * if somebody else is singlestepping across a probe point, msr
  413. * will have DE/SE set, in which case, continue the remaining processing
  414. * of do_debug, as if this is not a probe hit.
  415. */
  416. if (regs->msr & MSR_SINGLESTEP)
  417. return 0;
  418. return 1;
  419. }
  420. NOKPROBE_SYMBOL(kprobe_post_handler);
  421. int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  422. {
  423. struct kprobe *cur = kprobe_running();
  424. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  425. const struct exception_table_entry *entry;
  426. switch(kcb->kprobe_status) {
  427. case KPROBE_HIT_SS:
  428. case KPROBE_REENTER:
  429. /*
  430. * We are here because the instruction being single
  431. * stepped caused a page fault. We reset the current
  432. * kprobe and the nip points back to the probe address
  433. * and allow the page fault handler to continue as a
  434. * normal page fault.
  435. */
  436. regs->nip = (unsigned long)cur->addr;
  437. regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
  438. regs->msr |= kcb->kprobe_saved_msr;
  439. if (kcb->kprobe_status == KPROBE_REENTER)
  440. restore_previous_kprobe(kcb);
  441. else
  442. reset_current_kprobe();
  443. preempt_enable_no_resched();
  444. break;
  445. case KPROBE_HIT_ACTIVE:
  446. case KPROBE_HIT_SSDONE:
  447. /*
  448. * We increment the nmissed count for accounting,
  449. * we can also use npre/npostfault count for accounting
  450. * these specific fault cases.
  451. */
  452. kprobes_inc_nmissed_count(cur);
  453. /*
  454. * We come here because instructions in the pre/post
  455. * handler caused the page_fault, this could happen
  456. * if handler tries to access user space by
  457. * copy_from_user(), get_user() etc. Let the
  458. * user-specified handler try to fix it first.
  459. */
  460. if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
  461. return 1;
  462. /*
  463. * In case the user-specified fault handler returned
  464. * zero, try to fix up.
  465. */
  466. if ((entry = search_exception_tables(regs->nip)) != NULL) {
  467. regs->nip = extable_fixup(entry);
  468. return 1;
  469. }
  470. /*
  471. * fixup_exception() could not handle it,
  472. * Let do_page_fault() fix it.
  473. */
  474. break;
  475. default:
  476. break;
  477. }
  478. return 0;
  479. }
  480. NOKPROBE_SYMBOL(kprobe_fault_handler);
  481. unsigned long arch_deref_entry_point(void *entry)
  482. {
  483. #ifdef PPC64_ELF_ABI_v1
  484. if (!kernel_text_address((unsigned long)entry))
  485. return ppc_global_function_entry(entry);
  486. else
  487. #endif
  488. return (unsigned long)entry;
  489. }
  490. NOKPROBE_SYMBOL(arch_deref_entry_point);
  491. static struct kprobe trampoline_p = {
  492. .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
  493. .pre_handler = trampoline_probe_handler
  494. };
  495. int __init arch_init_kprobes(void)
  496. {
  497. return register_kprobe(&trampoline_p);
  498. }
  499. int arch_trampoline_kprobe(struct kprobe *p)
  500. {
  501. if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
  502. return 1;
  503. return 0;
  504. }
  505. NOKPROBE_SYMBOL(arch_trampoline_kprobe);