kprobes.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. #ifndef _LINUX_KPROBES_H
  3. #define _LINUX_KPROBES_H
  4. /*
  5. * Kernel Probes (KProbes)
  6. * include/linux/kprobes.h
  7. *
  8. * Copyright (C) IBM Corporation, 2002, 2004
  9. *
  10. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  11. * Probes initial implementation ( includes suggestions from
  12. * Rusty Russell).
  13. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  14. * interface to access function arguments.
  15. * 2005-May Hien Nguyen <hien@us.ibm.com> and Jim Keniston
  16. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  17. * <prasanna@in.ibm.com> added function-return probes.
  18. */
  19. #include <linux/compiler.h>
  20. #include <linux/linkage.h>
  21. #include <linux/list.h>
  22. #include <linux/notifier.h>
  23. #include <linux/smp.h>
  24. #include <linux/bug.h>
  25. #include <linux/percpu.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/rcupdate.h>
  28. #include <linux/mutex.h>
  29. #include <linux/ftrace.h>
  30. #include <asm/kprobes.h>
  31. #ifdef CONFIG_KPROBES
  32. /* kprobe_status settings */
  33. #define KPROBE_HIT_ACTIVE 0x00000001
  34. #define KPROBE_HIT_SS 0x00000002
  35. #define KPROBE_REENTER 0x00000004
  36. #define KPROBE_HIT_SSDONE 0x00000008
  37. #else /* CONFIG_KPROBES */
  38. #include <asm-generic/kprobes.h>
  39. typedef int kprobe_opcode_t;
  40. struct arch_specific_insn {
  41. int dummy;
  42. };
  43. #endif /* CONFIG_KPROBES */
  44. struct kprobe;
  45. struct pt_regs;
  46. struct kretprobe;
  47. struct kretprobe_instance;
  48. typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
  49. typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
  50. unsigned long flags);
  51. typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
  52. int trapnr);
  53. typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
  54. struct pt_regs *);
  55. struct kprobe {
  56. struct hlist_node hlist;
  57. /* list of kprobes for multi-handler support */
  58. struct list_head list;
  59. /*count the number of times this probe was temporarily disarmed */
  60. unsigned long nmissed;
  61. /* location of the probe point */
  62. kprobe_opcode_t *addr;
  63. /* Allow user to indicate symbol name of the probe point */
  64. const char *symbol_name;
  65. /* Offset into the symbol */
  66. unsigned int offset;
  67. /* Called before addr is executed. */
  68. kprobe_pre_handler_t pre_handler;
  69. /* Called after addr is executed, unless... */
  70. kprobe_post_handler_t post_handler;
  71. /*
  72. * ... called if executing addr causes a fault (eg. page fault).
  73. * Return 1 if it handled fault, otherwise kernel will see it.
  74. */
  75. kprobe_fault_handler_t fault_handler;
  76. /* Saved opcode (which has been replaced with breakpoint) */
  77. kprobe_opcode_t opcode;
  78. /* copy of the original instruction */
  79. struct arch_specific_insn ainsn;
  80. /*
  81. * Indicates various status flags.
  82. * Protected by kprobe_mutex after this kprobe is registered.
  83. */
  84. u32 flags;
  85. };
  86. /* Kprobe status flags */
  87. #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */
  88. #define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */
  89. #define KPROBE_FLAG_OPTIMIZED 4 /*
  90. * probe is really optimized.
  91. * NOTE:
  92. * this flag is only for optimized_kprobe.
  93. */
  94. #define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
  95. /* Has this kprobe gone ? */
  96. static inline int kprobe_gone(struct kprobe *p)
  97. {
  98. return p->flags & KPROBE_FLAG_GONE;
  99. }
  100. /* Is this kprobe disabled ? */
  101. static inline int kprobe_disabled(struct kprobe *p)
  102. {
  103. return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
  104. }
  105. /* Is this kprobe really running optimized path ? */
  106. static inline int kprobe_optimized(struct kprobe *p)
  107. {
  108. return p->flags & KPROBE_FLAG_OPTIMIZED;
  109. }
  110. /* Is this kprobe uses ftrace ? */
  111. static inline int kprobe_ftrace(struct kprobe *p)
  112. {
  113. return p->flags & KPROBE_FLAG_FTRACE;
  114. }
  115. /*
  116. * Function-return probe -
  117. * Note:
  118. * User needs to provide a handler function, and initialize maxactive.
  119. * maxactive - The maximum number of instances of the probed function that
  120. * can be active concurrently.
  121. * nmissed - tracks the number of times the probed function's return was
  122. * ignored, due to maxactive being too low.
  123. *
  124. */
  125. struct kretprobe {
  126. struct kprobe kp;
  127. kretprobe_handler_t handler;
  128. kretprobe_handler_t entry_handler;
  129. int maxactive;
  130. int nmissed;
  131. size_t data_size;
  132. struct hlist_head free_instances;
  133. raw_spinlock_t lock;
  134. };
  135. #define KRETPROBE_MAX_DATA_SIZE 4096
  136. struct kretprobe_instance {
  137. union {
  138. struct hlist_node hlist;
  139. struct rcu_head rcu;
  140. };
  141. struct kretprobe *rp;
  142. kprobe_opcode_t *ret_addr;
  143. struct task_struct *task;
  144. void *fp;
  145. char data[];
  146. };
  147. struct kretprobe_blackpoint {
  148. const char *name;
  149. void *addr;
  150. };
  151. struct kprobe_blacklist_entry {
  152. struct list_head list;
  153. unsigned long start_addr;
  154. unsigned long end_addr;
  155. };
  156. #ifdef CONFIG_KPROBES
  157. DECLARE_PER_CPU(struct kprobe *, current_kprobe);
  158. DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  159. /*
  160. * For #ifdef avoidance:
  161. */
  162. static inline int kprobes_built_in(void)
  163. {
  164. return 1;
  165. }
  166. extern void kprobe_busy_begin(void);
  167. extern void kprobe_busy_end(void);
  168. #ifdef CONFIG_KRETPROBES
  169. extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
  170. struct pt_regs *regs);
  171. extern int arch_trampoline_kprobe(struct kprobe *p);
  172. /* If the trampoline handler called from a kprobe, use this version */
  173. unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
  174. void *trampoline_address,
  175. void *frame_pointer);
  176. static nokprobe_inline
  177. unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
  178. void *trampoline_address,
  179. void *frame_pointer)
  180. {
  181. unsigned long ret;
  182. /*
  183. * Set a dummy kprobe for avoiding kretprobe recursion.
  184. * Since kretprobe never runs in kprobe handler, no kprobe must
  185. * be running at this point.
  186. */
  187. kprobe_busy_begin();
  188. ret = __kretprobe_trampoline_handler(regs, trampoline_address, frame_pointer);
  189. kprobe_busy_end();
  190. return ret;
  191. }
  192. #else /* CONFIG_KRETPROBES */
  193. static inline void arch_prepare_kretprobe(struct kretprobe *rp,
  194. struct pt_regs *regs)
  195. {
  196. }
  197. static inline int arch_trampoline_kprobe(struct kprobe *p)
  198. {
  199. return 0;
  200. }
  201. #endif /* CONFIG_KRETPROBES */
  202. extern struct kretprobe_blackpoint kretprobe_blacklist[];
  203. #ifdef CONFIG_KPROBES_SANITY_TEST
  204. extern int init_test_probes(void);
  205. #else
  206. static inline int init_test_probes(void)
  207. {
  208. return 0;
  209. }
  210. #endif /* CONFIG_KPROBES_SANITY_TEST */
  211. extern int arch_prepare_kprobe(struct kprobe *p);
  212. extern void arch_arm_kprobe(struct kprobe *p);
  213. extern void arch_disarm_kprobe(struct kprobe *p);
  214. extern int arch_init_kprobes(void);
  215. extern void kprobes_inc_nmissed_count(struct kprobe *p);
  216. extern bool arch_within_kprobe_blacklist(unsigned long addr);
  217. extern int arch_populate_kprobe_blacklist(void);
  218. extern bool arch_kprobe_on_func_entry(unsigned long offset);
  219. extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
  220. extern bool within_kprobe_blacklist(unsigned long addr);
  221. extern int kprobe_add_ksym_blacklist(unsigned long entry);
  222. extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end);
  223. struct kprobe_insn_cache {
  224. struct mutex mutex;
  225. void *(*alloc)(void); /* allocate insn page */
  226. void (*free)(void *); /* free insn page */
  227. const char *sym; /* symbol for insn pages */
  228. struct list_head pages; /* list of kprobe_insn_page */
  229. size_t insn_size; /* size of instruction slot */
  230. int nr_garbage;
  231. };
  232. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  233. extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
  234. extern void __free_insn_slot(struct kprobe_insn_cache *c,
  235. kprobe_opcode_t *slot, int dirty);
  236. /* sleep-less address checking routine */
  237. extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c,
  238. unsigned long addr);
  239. #define DEFINE_INSN_CACHE_OPS(__name) \
  240. extern struct kprobe_insn_cache kprobe_##__name##_slots; \
  241. \
  242. static inline kprobe_opcode_t *get_##__name##_slot(void) \
  243. { \
  244. return __get_insn_slot(&kprobe_##__name##_slots); \
  245. } \
  246. \
  247. static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
  248. { \
  249. __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \
  250. } \
  251. \
  252. static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
  253. { \
  254. return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \
  255. }
  256. #define KPROBE_INSN_PAGE_SYM "kprobe_insn_page"
  257. #define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page"
  258. int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
  259. unsigned long *value, char *type, char *sym);
  260. #else /* __ARCH_WANT_KPROBES_INSN_SLOT */
  261. #define DEFINE_INSN_CACHE_OPS(__name) \
  262. static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
  263. { \
  264. return 0; \
  265. }
  266. #endif
  267. DEFINE_INSN_CACHE_OPS(insn);
  268. #ifdef CONFIG_OPTPROBES
  269. /*
  270. * Internal structure for direct jump optimized probe
  271. */
  272. struct optimized_kprobe {
  273. struct kprobe kp;
  274. struct list_head list; /* list for optimizing queue */
  275. struct arch_optimized_insn optinsn;
  276. };
  277. /* Architecture dependent functions for direct jump optimization */
  278. extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
  279. extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
  280. extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
  281. struct kprobe *orig);
  282. extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
  283. extern void arch_optimize_kprobes(struct list_head *oplist);
  284. extern void arch_unoptimize_kprobes(struct list_head *oplist,
  285. struct list_head *done_list);
  286. extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
  287. extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
  288. unsigned long addr);
  289. extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
  290. DEFINE_INSN_CACHE_OPS(optinsn);
  291. #ifdef CONFIG_SYSCTL
  292. extern int sysctl_kprobes_optimization;
  293. extern int proc_kprobes_optimization_handler(struct ctl_table *table,
  294. int write, void *buffer,
  295. size_t *length, loff_t *ppos);
  296. #endif
  297. extern void wait_for_kprobe_optimizer(void);
  298. #else
  299. static inline void wait_for_kprobe_optimizer(void) { }
  300. #endif /* CONFIG_OPTPROBES */
  301. #ifdef CONFIG_KPROBES_ON_FTRACE
  302. extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
  303. struct ftrace_ops *ops, struct pt_regs *regs);
  304. extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
  305. #endif
  306. int arch_check_ftrace_location(struct kprobe *p);
  307. /* Get the kprobe at this addr (if any) - called with preemption disabled */
  308. struct kprobe *get_kprobe(void *addr);
  309. /* kprobe_running() will just return the current_kprobe on this CPU */
  310. static inline struct kprobe *kprobe_running(void)
  311. {
  312. return (__this_cpu_read(current_kprobe));
  313. }
  314. static inline void reset_current_kprobe(void)
  315. {
  316. __this_cpu_write(current_kprobe, NULL);
  317. }
  318. static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
  319. {
  320. return this_cpu_ptr(&kprobe_ctlblk);
  321. }
  322. kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
  323. int register_kprobe(struct kprobe *p);
  324. void unregister_kprobe(struct kprobe *p);
  325. int register_kprobes(struct kprobe **kps, int num);
  326. void unregister_kprobes(struct kprobe **kps, int num);
  327. unsigned long arch_deref_entry_point(void *);
  328. int register_kretprobe(struct kretprobe *rp);
  329. void unregister_kretprobe(struct kretprobe *rp);
  330. int register_kretprobes(struct kretprobe **rps, int num);
  331. void unregister_kretprobes(struct kretprobe **rps, int num);
  332. void kprobe_flush_task(struct task_struct *tk);
  333. void kprobe_free_init_mem(void);
  334. int disable_kprobe(struct kprobe *kp);
  335. int enable_kprobe(struct kprobe *kp);
  336. void dump_kprobe(struct kprobe *kp);
  337. void *alloc_insn_page(void);
  338. void free_insn_page(void *page);
  339. int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
  340. char *sym);
  341. int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
  342. char *type, char *sym);
  343. #else /* !CONFIG_KPROBES: */
  344. static inline int kprobes_built_in(void)
  345. {
  346. return 0;
  347. }
  348. static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  349. {
  350. return 0;
  351. }
  352. static inline struct kprobe *get_kprobe(void *addr)
  353. {
  354. return NULL;
  355. }
  356. static inline struct kprobe *kprobe_running(void)
  357. {
  358. return NULL;
  359. }
  360. static inline int register_kprobe(struct kprobe *p)
  361. {
  362. return -ENOSYS;
  363. }
  364. static inline int register_kprobes(struct kprobe **kps, int num)
  365. {
  366. return -ENOSYS;
  367. }
  368. static inline void unregister_kprobe(struct kprobe *p)
  369. {
  370. }
  371. static inline void unregister_kprobes(struct kprobe **kps, int num)
  372. {
  373. }
  374. static inline int register_kretprobe(struct kretprobe *rp)
  375. {
  376. return -ENOSYS;
  377. }
  378. static inline int register_kretprobes(struct kretprobe **rps, int num)
  379. {
  380. return -ENOSYS;
  381. }
  382. static inline void unregister_kretprobe(struct kretprobe *rp)
  383. {
  384. }
  385. static inline void unregister_kretprobes(struct kretprobe **rps, int num)
  386. {
  387. }
  388. static inline void kprobe_flush_task(struct task_struct *tk)
  389. {
  390. }
  391. static inline void kprobe_free_init_mem(void)
  392. {
  393. }
  394. static inline int disable_kprobe(struct kprobe *kp)
  395. {
  396. return -ENOSYS;
  397. }
  398. static inline int enable_kprobe(struct kprobe *kp)
  399. {
  400. return -ENOSYS;
  401. }
  402. static inline bool within_kprobe_blacklist(unsigned long addr)
  403. {
  404. return true;
  405. }
  406. static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
  407. char *type, char *sym)
  408. {
  409. return -ERANGE;
  410. }
  411. #endif /* CONFIG_KPROBES */
  412. static inline int disable_kretprobe(struct kretprobe *rp)
  413. {
  414. return disable_kprobe(&rp->kp);
  415. }
  416. static inline int enable_kretprobe(struct kretprobe *rp)
  417. {
  418. return enable_kprobe(&rp->kp);
  419. }
  420. #ifndef CONFIG_KPROBES
  421. static inline bool is_kprobe_insn_slot(unsigned long addr)
  422. {
  423. return false;
  424. }
  425. #endif
  426. #ifndef CONFIG_OPTPROBES
  427. static inline bool is_kprobe_optinsn_slot(unsigned long addr)
  428. {
  429. return false;
  430. }
  431. #endif
  432. /* Returns true if kprobes handled the fault */
  433. static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
  434. unsigned int trap)
  435. {
  436. if (!kprobes_built_in())
  437. return false;
  438. if (user_mode(regs))
  439. return false;
  440. /*
  441. * To be potentially processing a kprobe fault and to be allowed
  442. * to call kprobe_running(), we have to be non-preemptible.
  443. */
  444. if (preemptible())
  445. return false;
  446. if (!kprobe_running())
  447. return false;
  448. return kprobe_fault_handler(regs, trap);
  449. }
  450. #endif /* _LINUX_KPROBES_H */