trace_uprobe.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * uprobes-based tracing events
  4. *
  5. * Copyright (C) IBM Corporation, 2010-2012
  6. * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
  7. */
  8. #define pr_fmt(fmt) "trace_uprobe: " fmt
  9. #include <linux/security.h>
  10. #include <linux/ctype.h>
  11. #include <linux/module.h>
  12. #include <linux/uaccess.h>
  13. #include <linux/uprobes.h>
  14. #include <linux/namei.h>
  15. #include <linux/string.h>
  16. #include <linux/rculist.h>
  17. #include "trace_dynevent.h"
  18. #include "trace_probe.h"
  19. #include "trace_probe_tmpl.h"
  20. #define UPROBE_EVENT_SYSTEM "uprobes"
  21. struct uprobe_trace_entry_head {
  22. struct trace_entry ent;
  23. unsigned long vaddr[];
  24. };
  25. #define SIZEOF_TRACE_ENTRY(is_return) \
  26. (sizeof(struct uprobe_trace_entry_head) + \
  27. sizeof(unsigned long) * (is_return ? 2 : 1))
  28. #define DATAOF_TRACE_ENTRY(entry, is_return) \
  29. ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
  30. static int trace_uprobe_create(int argc, const char **argv);
  31. static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
  32. static int trace_uprobe_release(struct dyn_event *ev);
  33. static bool trace_uprobe_is_busy(struct dyn_event *ev);
  34. static bool trace_uprobe_match(const char *system, const char *event,
  35. int argc, const char **argv, struct dyn_event *ev);
  36. static struct dyn_event_operations trace_uprobe_ops = {
  37. .create = trace_uprobe_create,
  38. .show = trace_uprobe_show,
  39. .is_busy = trace_uprobe_is_busy,
  40. .free = trace_uprobe_release,
  41. .match = trace_uprobe_match,
  42. };
  43. /*
  44. * uprobe event core functions
  45. */
  46. struct trace_uprobe {
  47. struct dyn_event devent;
  48. struct uprobe_consumer consumer;
  49. struct path path;
  50. struct inode *inode;
  51. char *filename;
  52. unsigned long offset;
  53. unsigned long ref_ctr_offset;
  54. unsigned long nhit;
  55. struct trace_probe tp;
  56. };
  57. static bool is_trace_uprobe(struct dyn_event *ev)
  58. {
  59. return ev->ops == &trace_uprobe_ops;
  60. }
  61. static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
  62. {
  63. return container_of(ev, struct trace_uprobe, devent);
  64. }
  65. /**
  66. * for_each_trace_uprobe - iterate over the trace_uprobe list
  67. * @pos: the struct trace_uprobe * for each entry
  68. * @dpos: the struct dyn_event * to use as a loop cursor
  69. */
  70. #define for_each_trace_uprobe(pos, dpos) \
  71. for_each_dyn_event(dpos) \
  72. if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
  73. #define SIZEOF_TRACE_UPROBE(n) \
  74. (offsetof(struct trace_uprobe, tp.args) + \
  75. (sizeof(struct probe_arg) * (n)))
  76. static int register_uprobe_event(struct trace_uprobe *tu);
  77. static int unregister_uprobe_event(struct trace_uprobe *tu);
  78. struct uprobe_dispatch_data {
  79. struct trace_uprobe *tu;
  80. unsigned long bp_addr;
  81. };
  82. static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
  83. static int uretprobe_dispatcher(struct uprobe_consumer *con,
  84. unsigned long func, struct pt_regs *regs);
  85. #ifdef CONFIG_STACK_GROWSUP
  86. static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  87. {
  88. return addr - (n * sizeof(long));
  89. }
  90. #else
  91. static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  92. {
  93. return addr + (n * sizeof(long));
  94. }
  95. #endif
  96. static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
  97. {
  98. unsigned long ret;
  99. unsigned long addr = user_stack_pointer(regs);
  100. addr = adjust_stack_addr(addr, n);
  101. if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
  102. return 0;
  103. return ret;
  104. }
  105. /*
  106. * Uprobes-specific fetch functions
  107. */
  108. static nokprobe_inline int
  109. probe_mem_read(void *dest, void *src, size_t size)
  110. {
  111. void __user *vaddr = (void __force __user *)src;
  112. return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
  113. }
  114. static nokprobe_inline int
  115. probe_mem_read_user(void *dest, void *src, size_t size)
  116. {
  117. return probe_mem_read(dest, src, size);
  118. }
  119. /*
  120. * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
  121. * length and relative data location.
  122. */
  123. static nokprobe_inline int
  124. fetch_store_string(unsigned long addr, void *dest, void *base)
  125. {
  126. long ret;
  127. u32 loc = *(u32 *)dest;
  128. int maxlen = get_loc_len(loc);
  129. u8 *dst = get_loc_data(dest, base);
  130. void __user *src = (void __force __user *) addr;
  131. if (unlikely(!maxlen))
  132. return -ENOMEM;
  133. if (addr == FETCH_TOKEN_COMM)
  134. ret = strlcpy(dst, current->comm, maxlen);
  135. else
  136. ret = strncpy_from_user(dst, src, maxlen);
  137. if (ret >= 0) {
  138. if (ret == maxlen)
  139. dst[ret - 1] = '\0';
  140. else
  141. /*
  142. * Include the terminating null byte. In this case it
  143. * was copied by strncpy_from_user but not accounted
  144. * for in ret.
  145. */
  146. ret++;
  147. *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
  148. }
  149. return ret;
  150. }
  151. static nokprobe_inline int
  152. fetch_store_string_user(unsigned long addr, void *dest, void *base)
  153. {
  154. return fetch_store_string(addr, dest, base);
  155. }
  156. /* Return the length of string -- including null terminal byte */
  157. static nokprobe_inline int
  158. fetch_store_strlen(unsigned long addr)
  159. {
  160. int len;
  161. void __user *vaddr = (void __force __user *) addr;
  162. if (addr == FETCH_TOKEN_COMM)
  163. len = strlen(current->comm) + 1;
  164. else
  165. len = strnlen_user(vaddr, MAX_STRING_SIZE);
  166. return (len > MAX_STRING_SIZE) ? 0 : len;
  167. }
  168. static nokprobe_inline int
  169. fetch_store_strlen_user(unsigned long addr)
  170. {
  171. return fetch_store_strlen(addr);
  172. }
  173. static unsigned long translate_user_vaddr(unsigned long file_offset)
  174. {
  175. unsigned long base_addr;
  176. struct uprobe_dispatch_data *udd;
  177. udd = (void *) current->utask->vaddr;
  178. base_addr = udd->bp_addr - udd->tu->offset;
  179. return base_addr + file_offset;
  180. }
  181. /* Note that we don't verify it, since the code does not come from user space */
  182. static int
  183. process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
  184. void *base)
  185. {
  186. unsigned long val;
  187. /* 1st stage: get value from context */
  188. switch (code->op) {
  189. case FETCH_OP_REG:
  190. val = regs_get_register(regs, code->param);
  191. break;
  192. case FETCH_OP_STACK:
  193. val = get_user_stack_nth(regs, code->param);
  194. break;
  195. case FETCH_OP_STACKP:
  196. val = user_stack_pointer(regs);
  197. break;
  198. case FETCH_OP_RETVAL:
  199. val = regs_return_value(regs);
  200. break;
  201. case FETCH_OP_IMM:
  202. val = code->immediate;
  203. break;
  204. case FETCH_OP_COMM:
  205. val = FETCH_TOKEN_COMM;
  206. break;
  207. case FETCH_OP_DATA:
  208. val = (unsigned long)code->data;
  209. break;
  210. case FETCH_OP_FOFFS:
  211. val = translate_user_vaddr(code->immediate);
  212. break;
  213. default:
  214. return -EILSEQ;
  215. }
  216. code++;
  217. return process_fetch_insn_bottom(code, val, dest, base);
  218. }
  219. NOKPROBE_SYMBOL(process_fetch_insn)
  220. static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
  221. {
  222. rwlock_init(&filter->rwlock);
  223. filter->nr_systemwide = 0;
  224. INIT_LIST_HEAD(&filter->perf_events);
  225. }
  226. static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
  227. {
  228. return !filter->nr_systemwide && list_empty(&filter->perf_events);
  229. }
  230. static inline bool is_ret_probe(struct trace_uprobe *tu)
  231. {
  232. return tu->consumer.ret_handler != NULL;
  233. }
  234. static bool trace_uprobe_is_busy(struct dyn_event *ev)
  235. {
  236. struct trace_uprobe *tu = to_trace_uprobe(ev);
  237. return trace_probe_is_enabled(&tu->tp);
  238. }
  239. static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
  240. int argc, const char **argv)
  241. {
  242. char buf[MAX_ARGSTR_LEN + 1];
  243. int len;
  244. if (!argc)
  245. return true;
  246. len = strlen(tu->filename);
  247. if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
  248. return false;
  249. if (tu->ref_ctr_offset == 0)
  250. snprintf(buf, sizeof(buf), "0x%0*lx",
  251. (int)(sizeof(void *) * 2), tu->offset);
  252. else
  253. snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
  254. (int)(sizeof(void *) * 2), tu->offset,
  255. tu->ref_ctr_offset);
  256. if (strcmp(buf, &argv[0][len + 1]))
  257. return false;
  258. argc--; argv++;
  259. return trace_probe_match_command_args(&tu->tp, argc, argv);
  260. }
  261. static bool trace_uprobe_match(const char *system, const char *event,
  262. int argc, const char **argv, struct dyn_event *ev)
  263. {
  264. struct trace_uprobe *tu = to_trace_uprobe(ev);
  265. return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
  266. (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
  267. trace_uprobe_match_command_head(tu, argc, argv);
  268. }
  269. static nokprobe_inline struct trace_uprobe *
  270. trace_uprobe_primary_from_call(struct trace_event_call *call)
  271. {
  272. struct trace_probe *tp;
  273. tp = trace_probe_primary_from_call(call);
  274. if (WARN_ON_ONCE(!tp))
  275. return NULL;
  276. return container_of(tp, struct trace_uprobe, tp);
  277. }
  278. /*
  279. * Allocate new trace_uprobe and initialize it (including uprobes).
  280. */
  281. static struct trace_uprobe *
  282. alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
  283. {
  284. struct trace_uprobe *tu;
  285. int ret;
  286. tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
  287. if (!tu)
  288. return ERR_PTR(-ENOMEM);
  289. ret = trace_probe_init(&tu->tp, event, group, true);
  290. if (ret < 0)
  291. goto error;
  292. dyn_event_init(&tu->devent, &trace_uprobe_ops);
  293. tu->consumer.handler = uprobe_dispatcher;
  294. if (is_ret)
  295. tu->consumer.ret_handler = uretprobe_dispatcher;
  296. init_trace_uprobe_filter(tu->tp.event->filter);
  297. return tu;
  298. error:
  299. kfree(tu);
  300. return ERR_PTR(ret);
  301. }
  302. static void free_trace_uprobe(struct trace_uprobe *tu)
  303. {
  304. if (!tu)
  305. return;
  306. path_put(&tu->path);
  307. trace_probe_cleanup(&tu->tp);
  308. kfree(tu->filename);
  309. kfree(tu);
  310. }
  311. static struct trace_uprobe *find_probe_event(const char *event, const char *group)
  312. {
  313. struct dyn_event *pos;
  314. struct trace_uprobe *tu;
  315. for_each_trace_uprobe(tu, pos)
  316. if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
  317. strcmp(trace_probe_group_name(&tu->tp), group) == 0)
  318. return tu;
  319. return NULL;
  320. }
  321. /* Unregister a trace_uprobe and probe_event */
  322. static int unregister_trace_uprobe(struct trace_uprobe *tu)
  323. {
  324. int ret;
  325. if (trace_probe_has_sibling(&tu->tp))
  326. goto unreg;
  327. ret = unregister_uprobe_event(tu);
  328. if (ret)
  329. return ret;
  330. unreg:
  331. dyn_event_remove(&tu->devent);
  332. trace_probe_unlink(&tu->tp);
  333. free_trace_uprobe(tu);
  334. return 0;
  335. }
  336. static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
  337. struct trace_uprobe *comp)
  338. {
  339. struct trace_probe_event *tpe = orig->tp.event;
  340. struct trace_probe *pos;
  341. struct inode *comp_inode = d_real_inode(comp->path.dentry);
  342. int i;
  343. list_for_each_entry(pos, &tpe->probes, list) {
  344. orig = container_of(pos, struct trace_uprobe, tp);
  345. if (comp_inode != d_real_inode(orig->path.dentry) ||
  346. comp->offset != orig->offset)
  347. continue;
  348. /*
  349. * trace_probe_compare_arg_type() ensured that nr_args and
  350. * each argument name and type are same. Let's compare comm.
  351. */
  352. for (i = 0; i < orig->tp.nr_args; i++) {
  353. if (strcmp(orig->tp.args[i].comm,
  354. comp->tp.args[i].comm))
  355. break;
  356. }
  357. if (i == orig->tp.nr_args)
  358. return true;
  359. }
  360. return false;
  361. }
  362. static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
  363. {
  364. int ret;
  365. ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
  366. if (ret) {
  367. /* Note that argument starts index = 2 */
  368. trace_probe_log_set_index(ret + 1);
  369. trace_probe_log_err(0, DIFF_ARG_TYPE);
  370. return -EEXIST;
  371. }
  372. if (trace_uprobe_has_same_uprobe(to, tu)) {
  373. trace_probe_log_set_index(0);
  374. trace_probe_log_err(0, SAME_PROBE);
  375. return -EEXIST;
  376. }
  377. /* Append to existing event */
  378. ret = trace_probe_append(&tu->tp, &to->tp);
  379. if (!ret)
  380. dyn_event_add(&tu->devent);
  381. return ret;
  382. }
  383. /*
  384. * Uprobe with multiple reference counter is not allowed. i.e.
  385. * If inode and offset matches, reference counter offset *must*
  386. * match as well. Though, there is one exception: If user is
  387. * replacing old trace_uprobe with new one(same group/event),
  388. * then we allow same uprobe with new reference counter as far
  389. * as the new one does not conflict with any other existing
  390. * ones.
  391. */
  392. static int validate_ref_ctr_offset(struct trace_uprobe *new)
  393. {
  394. struct dyn_event *pos;
  395. struct trace_uprobe *tmp;
  396. struct inode *new_inode = d_real_inode(new->path.dentry);
  397. for_each_trace_uprobe(tmp, pos) {
  398. if (new_inode == d_real_inode(tmp->path.dentry) &&
  399. new->offset == tmp->offset &&
  400. new->ref_ctr_offset != tmp->ref_ctr_offset) {
  401. pr_warn("Reference counter offset mismatch.");
  402. return -EINVAL;
  403. }
  404. }
  405. return 0;
  406. }
  407. /* Register a trace_uprobe and probe_event */
  408. static int register_trace_uprobe(struct trace_uprobe *tu)
  409. {
  410. struct trace_uprobe *old_tu;
  411. int ret;
  412. mutex_lock(&event_mutex);
  413. ret = validate_ref_ctr_offset(tu);
  414. if (ret)
  415. goto end;
  416. /* register as an event */
  417. old_tu = find_probe_event(trace_probe_name(&tu->tp),
  418. trace_probe_group_name(&tu->tp));
  419. if (old_tu) {
  420. if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
  421. trace_probe_log_set_index(0);
  422. trace_probe_log_err(0, DIFF_PROBE_TYPE);
  423. ret = -EEXIST;
  424. } else {
  425. ret = append_trace_uprobe(tu, old_tu);
  426. }
  427. goto end;
  428. }
  429. ret = register_uprobe_event(tu);
  430. if (ret) {
  431. if (ret == -EEXIST) {
  432. trace_probe_log_set_index(0);
  433. trace_probe_log_err(0, EVENT_EXIST);
  434. } else
  435. pr_warn("Failed to register probe event(%d)\n", ret);
  436. goto end;
  437. }
  438. dyn_event_add(&tu->devent);
  439. end:
  440. mutex_unlock(&event_mutex);
  441. return ret;
  442. }
  443. /*
  444. * Argument syntax:
  445. * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
  446. */
  447. static int trace_uprobe_create(int argc, const char **argv)
  448. {
  449. struct trace_uprobe *tu;
  450. const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
  451. char *arg, *filename, *rctr, *rctr_end, *tmp;
  452. char buf[MAX_EVENT_NAME_LEN];
  453. struct path path;
  454. unsigned long offset, ref_ctr_offset;
  455. bool is_return = false;
  456. int i, ret;
  457. ret = 0;
  458. ref_ctr_offset = 0;
  459. switch (argv[0][0]) {
  460. case 'r':
  461. is_return = true;
  462. break;
  463. case 'p':
  464. break;
  465. default:
  466. return -ECANCELED;
  467. }
  468. if (argc < 2)
  469. return -ECANCELED;
  470. if (argv[0][1] == ':')
  471. event = &argv[0][2];
  472. if (!strchr(argv[1], '/'))
  473. return -ECANCELED;
  474. filename = kstrdup(argv[1], GFP_KERNEL);
  475. if (!filename)
  476. return -ENOMEM;
  477. /* Find the last occurrence, in case the path contains ':' too. */
  478. arg = strrchr(filename, ':');
  479. if (!arg || !isdigit(arg[1])) {
  480. kfree(filename);
  481. return -ECANCELED;
  482. }
  483. trace_probe_log_init("trace_uprobe", argc, argv);
  484. trace_probe_log_set_index(1); /* filename is the 2nd argument */
  485. *arg++ = '\0';
  486. ret = kern_path(filename, LOOKUP_FOLLOW, &path);
  487. if (ret) {
  488. trace_probe_log_err(0, FILE_NOT_FOUND);
  489. kfree(filename);
  490. trace_probe_log_clear();
  491. return ret;
  492. }
  493. if (!d_is_reg(path.dentry)) {
  494. trace_probe_log_err(0, NO_REGULAR_FILE);
  495. ret = -EINVAL;
  496. goto fail_address_parse;
  497. }
  498. /* Parse reference counter offset if specified. */
  499. rctr = strchr(arg, '(');
  500. if (rctr) {
  501. rctr_end = strchr(rctr, ')');
  502. if (!rctr_end) {
  503. ret = -EINVAL;
  504. rctr_end = rctr + strlen(rctr);
  505. trace_probe_log_err(rctr_end - filename,
  506. REFCNT_OPEN_BRACE);
  507. goto fail_address_parse;
  508. } else if (rctr_end[1] != '\0') {
  509. ret = -EINVAL;
  510. trace_probe_log_err(rctr_end + 1 - filename,
  511. BAD_REFCNT_SUFFIX);
  512. goto fail_address_parse;
  513. }
  514. *rctr++ = '\0';
  515. *rctr_end = '\0';
  516. ret = kstrtoul(rctr, 0, &ref_ctr_offset);
  517. if (ret) {
  518. trace_probe_log_err(rctr - filename, BAD_REFCNT);
  519. goto fail_address_parse;
  520. }
  521. }
  522. /* Check if there is %return suffix */
  523. tmp = strchr(arg, '%');
  524. if (tmp) {
  525. if (!strcmp(tmp, "%return")) {
  526. *tmp = '\0';
  527. is_return = true;
  528. } else {
  529. trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
  530. ret = -EINVAL;
  531. goto fail_address_parse;
  532. }
  533. }
  534. /* Parse uprobe offset. */
  535. ret = kstrtoul(arg, 0, &offset);
  536. if (ret) {
  537. trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
  538. goto fail_address_parse;
  539. }
  540. /* setup a probe */
  541. trace_probe_log_set_index(0);
  542. if (event) {
  543. ret = traceprobe_parse_event_name(&event, &group, buf,
  544. event - argv[0]);
  545. if (ret)
  546. goto fail_address_parse;
  547. } else {
  548. char *tail;
  549. char *ptr;
  550. tail = kstrdup(kbasename(filename), GFP_KERNEL);
  551. if (!tail) {
  552. ret = -ENOMEM;
  553. goto fail_address_parse;
  554. }
  555. ptr = strpbrk(tail, ".-_");
  556. if (ptr)
  557. *ptr = '\0';
  558. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
  559. event = buf;
  560. kfree(tail);
  561. }
  562. argc -= 2;
  563. argv += 2;
  564. tu = alloc_trace_uprobe(group, event, argc, is_return);
  565. if (IS_ERR(tu)) {
  566. ret = PTR_ERR(tu);
  567. /* This must return -ENOMEM otherwise there is a bug */
  568. WARN_ON_ONCE(ret != -ENOMEM);
  569. goto fail_address_parse;
  570. }
  571. tu->offset = offset;
  572. tu->ref_ctr_offset = ref_ctr_offset;
  573. tu->path = path;
  574. tu->filename = filename;
  575. /* parse arguments */
  576. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  577. tmp = kstrdup(argv[i], GFP_KERNEL);
  578. if (!tmp) {
  579. ret = -ENOMEM;
  580. goto error;
  581. }
  582. trace_probe_log_set_index(i + 2);
  583. ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
  584. is_return ? TPARG_FL_RETURN : 0);
  585. kfree(tmp);
  586. if (ret)
  587. goto error;
  588. }
  589. ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
  590. if (ret < 0)
  591. goto error;
  592. ret = register_trace_uprobe(tu);
  593. if (!ret)
  594. goto out;
  595. error:
  596. free_trace_uprobe(tu);
  597. out:
  598. trace_probe_log_clear();
  599. return ret;
  600. fail_address_parse:
  601. trace_probe_log_clear();
  602. path_put(&path);
  603. kfree(filename);
  604. return ret;
  605. }
  606. static int create_or_delete_trace_uprobe(int argc, char **argv)
  607. {
  608. int ret;
  609. if (argv[0][0] == '-')
  610. return dyn_event_release(argc, argv, &trace_uprobe_ops);
  611. ret = trace_uprobe_create(argc, (const char **)argv);
  612. return ret == -ECANCELED ? -EINVAL : ret;
  613. }
  614. static int trace_uprobe_release(struct dyn_event *ev)
  615. {
  616. struct trace_uprobe *tu = to_trace_uprobe(ev);
  617. return unregister_trace_uprobe(tu);
  618. }
  619. /* Probes listing interfaces */
  620. static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
  621. {
  622. struct trace_uprobe *tu = to_trace_uprobe(ev);
  623. char c = is_ret_probe(tu) ? 'r' : 'p';
  624. int i;
  625. seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
  626. trace_probe_name(&tu->tp), tu->filename,
  627. (int)(sizeof(void *) * 2), tu->offset);
  628. if (tu->ref_ctr_offset)
  629. seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
  630. for (i = 0; i < tu->tp.nr_args; i++)
  631. seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
  632. seq_putc(m, '\n');
  633. return 0;
  634. }
  635. static int probes_seq_show(struct seq_file *m, void *v)
  636. {
  637. struct dyn_event *ev = v;
  638. if (!is_trace_uprobe(ev))
  639. return 0;
  640. return trace_uprobe_show(m, ev);
  641. }
  642. static const struct seq_operations probes_seq_op = {
  643. .start = dyn_event_seq_start,
  644. .next = dyn_event_seq_next,
  645. .stop = dyn_event_seq_stop,
  646. .show = probes_seq_show
  647. };
  648. static int probes_open(struct inode *inode, struct file *file)
  649. {
  650. int ret;
  651. ret = security_locked_down(LOCKDOWN_TRACEFS);
  652. if (ret)
  653. return ret;
  654. if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
  655. ret = dyn_events_release_all(&trace_uprobe_ops);
  656. if (ret)
  657. return ret;
  658. }
  659. return seq_open(file, &probes_seq_op);
  660. }
  661. static ssize_t probes_write(struct file *file, const char __user *buffer,
  662. size_t count, loff_t *ppos)
  663. {
  664. return trace_parse_run_command(file, buffer, count, ppos,
  665. create_or_delete_trace_uprobe);
  666. }
  667. static const struct file_operations uprobe_events_ops = {
  668. .owner = THIS_MODULE,
  669. .open = probes_open,
  670. .read = seq_read,
  671. .llseek = seq_lseek,
  672. .release = seq_release,
  673. .write = probes_write,
  674. };
  675. /* Probes profiling interfaces */
  676. static int probes_profile_seq_show(struct seq_file *m, void *v)
  677. {
  678. struct dyn_event *ev = v;
  679. struct trace_uprobe *tu;
  680. if (!is_trace_uprobe(ev))
  681. return 0;
  682. tu = to_trace_uprobe(ev);
  683. seq_printf(m, " %s %-44s %15lu\n", tu->filename,
  684. trace_probe_name(&tu->tp), tu->nhit);
  685. return 0;
  686. }
  687. static const struct seq_operations profile_seq_op = {
  688. .start = dyn_event_seq_start,
  689. .next = dyn_event_seq_next,
  690. .stop = dyn_event_seq_stop,
  691. .show = probes_profile_seq_show
  692. };
  693. static int profile_open(struct inode *inode, struct file *file)
  694. {
  695. int ret;
  696. ret = security_locked_down(LOCKDOWN_TRACEFS);
  697. if (ret)
  698. return ret;
  699. return seq_open(file, &profile_seq_op);
  700. }
  701. static const struct file_operations uprobe_profile_ops = {
  702. .owner = THIS_MODULE,
  703. .open = profile_open,
  704. .read = seq_read,
  705. .llseek = seq_lseek,
  706. .release = seq_release,
  707. };
  708. struct uprobe_cpu_buffer {
  709. struct mutex mutex;
  710. void *buf;
  711. };
  712. static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
  713. static int uprobe_buffer_refcnt;
  714. static int uprobe_buffer_init(void)
  715. {
  716. int cpu, err_cpu;
  717. uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
  718. if (uprobe_cpu_buffer == NULL)
  719. return -ENOMEM;
  720. for_each_possible_cpu(cpu) {
  721. struct page *p = alloc_pages_node(cpu_to_node(cpu),
  722. GFP_KERNEL, 0);
  723. if (p == NULL) {
  724. err_cpu = cpu;
  725. goto err;
  726. }
  727. per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
  728. mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
  729. }
  730. return 0;
  731. err:
  732. for_each_possible_cpu(cpu) {
  733. if (cpu == err_cpu)
  734. break;
  735. free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
  736. }
  737. free_percpu(uprobe_cpu_buffer);
  738. return -ENOMEM;
  739. }
  740. static int uprobe_buffer_enable(void)
  741. {
  742. int ret = 0;
  743. BUG_ON(!mutex_is_locked(&event_mutex));
  744. if (uprobe_buffer_refcnt++ == 0) {
  745. ret = uprobe_buffer_init();
  746. if (ret < 0)
  747. uprobe_buffer_refcnt--;
  748. }
  749. return ret;
  750. }
  751. static void uprobe_buffer_disable(void)
  752. {
  753. int cpu;
  754. BUG_ON(!mutex_is_locked(&event_mutex));
  755. if (--uprobe_buffer_refcnt == 0) {
  756. for_each_possible_cpu(cpu)
  757. free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
  758. cpu)->buf);
  759. free_percpu(uprobe_cpu_buffer);
  760. uprobe_cpu_buffer = NULL;
  761. }
  762. }
  763. static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
  764. {
  765. struct uprobe_cpu_buffer *ucb;
  766. int cpu;
  767. cpu = raw_smp_processor_id();
  768. ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
  769. /*
  770. * Use per-cpu buffers for fastest access, but we might migrate
  771. * so the mutex makes sure we have sole access to it.
  772. */
  773. mutex_lock(&ucb->mutex);
  774. return ucb;
  775. }
  776. static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
  777. {
  778. mutex_unlock(&ucb->mutex);
  779. }
  780. static void __uprobe_trace_func(struct trace_uprobe *tu,
  781. unsigned long func, struct pt_regs *regs,
  782. struct uprobe_cpu_buffer *ucb, int dsize,
  783. struct trace_event_file *trace_file)
  784. {
  785. struct uprobe_trace_entry_head *entry;
  786. struct trace_buffer *buffer;
  787. struct ring_buffer_event *event;
  788. void *data;
  789. int size, esize;
  790. struct trace_event_call *call = trace_probe_event_call(&tu->tp);
  791. WARN_ON(call != trace_file->event_call);
  792. if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
  793. return;
  794. if (trace_trigger_soft_disabled(trace_file))
  795. return;
  796. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  797. size = esize + tu->tp.size + dsize;
  798. event = trace_event_buffer_lock_reserve(&buffer, trace_file,
  799. call->event.type, size, 0, 0);
  800. if (!event)
  801. return;
  802. entry = ring_buffer_event_data(event);
  803. if (is_ret_probe(tu)) {
  804. entry->vaddr[0] = func;
  805. entry->vaddr[1] = instruction_pointer(regs);
  806. data = DATAOF_TRACE_ENTRY(entry, true);
  807. } else {
  808. entry->vaddr[0] = instruction_pointer(regs);
  809. data = DATAOF_TRACE_ENTRY(entry, false);
  810. }
  811. memcpy(data, ucb->buf, tu->tp.size + dsize);
  812. event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
  813. }
  814. /* uprobe handler */
  815. static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
  816. struct uprobe_cpu_buffer *ucb, int dsize)
  817. {
  818. struct event_file_link *link;
  819. if (is_ret_probe(tu))
  820. return 0;
  821. rcu_read_lock();
  822. trace_probe_for_each_link_rcu(link, &tu->tp)
  823. __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
  824. rcu_read_unlock();
  825. return 0;
  826. }
  827. static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
  828. struct pt_regs *regs,
  829. struct uprobe_cpu_buffer *ucb, int dsize)
  830. {
  831. struct event_file_link *link;
  832. rcu_read_lock();
  833. trace_probe_for_each_link_rcu(link, &tu->tp)
  834. __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
  835. rcu_read_unlock();
  836. }
  837. /* Event entry printers */
  838. static enum print_line_t
  839. print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
  840. {
  841. struct uprobe_trace_entry_head *entry;
  842. struct trace_seq *s = &iter->seq;
  843. struct trace_uprobe *tu;
  844. u8 *data;
  845. entry = (struct uprobe_trace_entry_head *)iter->ent;
  846. tu = trace_uprobe_primary_from_call(
  847. container_of(event, struct trace_event_call, event));
  848. if (unlikely(!tu))
  849. goto out;
  850. if (is_ret_probe(tu)) {
  851. trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
  852. trace_probe_name(&tu->tp),
  853. entry->vaddr[1], entry->vaddr[0]);
  854. data = DATAOF_TRACE_ENTRY(entry, true);
  855. } else {
  856. trace_seq_printf(s, "%s: (0x%lx)",
  857. trace_probe_name(&tu->tp),
  858. entry->vaddr[0]);
  859. data = DATAOF_TRACE_ENTRY(entry, false);
  860. }
  861. if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
  862. goto out;
  863. trace_seq_putc(s, '\n');
  864. out:
  865. return trace_handle_return(s);
  866. }
  867. typedef bool (*filter_func_t)(struct uprobe_consumer *self,
  868. enum uprobe_filter_ctx ctx,
  869. struct mm_struct *mm);
  870. static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
  871. {
  872. int ret;
  873. tu->consumer.filter = filter;
  874. tu->inode = d_real_inode(tu->path.dentry);
  875. if (tu->ref_ctr_offset)
  876. ret = uprobe_register_refctr(tu->inode, tu->offset,
  877. tu->ref_ctr_offset, &tu->consumer);
  878. else
  879. ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
  880. if (ret)
  881. tu->inode = NULL;
  882. return ret;
  883. }
  884. static void __probe_event_disable(struct trace_probe *tp)
  885. {
  886. struct trace_probe *pos;
  887. struct trace_uprobe *tu;
  888. tu = container_of(tp, struct trace_uprobe, tp);
  889. WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
  890. list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
  891. tu = container_of(pos, struct trace_uprobe, tp);
  892. if (!tu->inode)
  893. continue;
  894. uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
  895. tu->inode = NULL;
  896. }
  897. }
  898. static int probe_event_enable(struct trace_event_call *call,
  899. struct trace_event_file *file, filter_func_t filter)
  900. {
  901. struct trace_probe *pos, *tp;
  902. struct trace_uprobe *tu;
  903. bool enabled;
  904. int ret;
  905. tp = trace_probe_primary_from_call(call);
  906. if (WARN_ON_ONCE(!tp))
  907. return -ENODEV;
  908. enabled = trace_probe_is_enabled(tp);
  909. /* This may also change "enabled" state */
  910. if (file) {
  911. if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
  912. return -EINTR;
  913. ret = trace_probe_add_file(tp, file);
  914. if (ret < 0)
  915. return ret;
  916. } else {
  917. if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
  918. return -EINTR;
  919. trace_probe_set_flag(tp, TP_FLAG_PROFILE);
  920. }
  921. tu = container_of(tp, struct trace_uprobe, tp);
  922. WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
  923. if (enabled)
  924. return 0;
  925. ret = uprobe_buffer_enable();
  926. if (ret)
  927. goto err_flags;
  928. list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
  929. tu = container_of(pos, struct trace_uprobe, tp);
  930. ret = trace_uprobe_enable(tu, filter);
  931. if (ret) {
  932. __probe_event_disable(tp);
  933. goto err_buffer;
  934. }
  935. }
  936. return 0;
  937. err_buffer:
  938. uprobe_buffer_disable();
  939. err_flags:
  940. if (file)
  941. trace_probe_remove_file(tp, file);
  942. else
  943. trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
  944. return ret;
  945. }
  946. static void probe_event_disable(struct trace_event_call *call,
  947. struct trace_event_file *file)
  948. {
  949. struct trace_probe *tp;
  950. tp = trace_probe_primary_from_call(call);
  951. if (WARN_ON_ONCE(!tp))
  952. return;
  953. if (!trace_probe_is_enabled(tp))
  954. return;
  955. if (file) {
  956. if (trace_probe_remove_file(tp, file) < 0)
  957. return;
  958. if (trace_probe_is_enabled(tp))
  959. return;
  960. } else
  961. trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
  962. __probe_event_disable(tp);
  963. uprobe_buffer_disable();
  964. }
  965. static int uprobe_event_define_fields(struct trace_event_call *event_call)
  966. {
  967. int ret, size;
  968. struct uprobe_trace_entry_head field;
  969. struct trace_uprobe *tu;
  970. tu = trace_uprobe_primary_from_call(event_call);
  971. if (unlikely(!tu))
  972. return -ENODEV;
  973. if (is_ret_probe(tu)) {
  974. DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
  975. DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
  976. size = SIZEOF_TRACE_ENTRY(true);
  977. } else {
  978. DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
  979. size = SIZEOF_TRACE_ENTRY(false);
  980. }
  981. return traceprobe_define_arg_fields(event_call, size, &tu->tp);
  982. }
  983. #ifdef CONFIG_PERF_EVENTS
  984. static bool
  985. __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
  986. {
  987. struct perf_event *event;
  988. if (filter->nr_systemwide)
  989. return true;
  990. list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
  991. if (event->hw.target->mm == mm)
  992. return true;
  993. }
  994. return false;
  995. }
  996. static inline bool
  997. trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
  998. struct perf_event *event)
  999. {
  1000. return __uprobe_perf_filter(filter, event->hw.target->mm);
  1001. }
  1002. static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
  1003. struct perf_event *event)
  1004. {
  1005. bool done;
  1006. write_lock(&filter->rwlock);
  1007. if (event->hw.target) {
  1008. list_del(&event->hw.tp_list);
  1009. done = filter->nr_systemwide ||
  1010. (event->hw.target->flags & PF_EXITING) ||
  1011. trace_uprobe_filter_event(filter, event);
  1012. } else {
  1013. filter->nr_systemwide--;
  1014. done = filter->nr_systemwide;
  1015. }
  1016. write_unlock(&filter->rwlock);
  1017. return done;
  1018. }
  1019. /* This returns true if the filter always covers target mm */
  1020. static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
  1021. struct perf_event *event)
  1022. {
  1023. bool done;
  1024. write_lock(&filter->rwlock);
  1025. if (event->hw.target) {
  1026. /*
  1027. * event->parent != NULL means copy_process(), we can avoid
  1028. * uprobe_apply(). current->mm must be probed and we can rely
  1029. * on dup_mmap() which preserves the already installed bp's.
  1030. *
  1031. * attr.enable_on_exec means that exec/mmap will install the
  1032. * breakpoints we need.
  1033. */
  1034. done = filter->nr_systemwide ||
  1035. event->parent || event->attr.enable_on_exec ||
  1036. trace_uprobe_filter_event(filter, event);
  1037. list_add(&event->hw.tp_list, &filter->perf_events);
  1038. } else {
  1039. done = filter->nr_systemwide;
  1040. filter->nr_systemwide++;
  1041. }
  1042. write_unlock(&filter->rwlock);
  1043. return done;
  1044. }
  1045. static int uprobe_perf_close(struct trace_event_call *call,
  1046. struct perf_event *event)
  1047. {
  1048. struct trace_probe *pos, *tp;
  1049. struct trace_uprobe *tu;
  1050. int ret = 0;
  1051. tp = trace_probe_primary_from_call(call);
  1052. if (WARN_ON_ONCE(!tp))
  1053. return -ENODEV;
  1054. tu = container_of(tp, struct trace_uprobe, tp);
  1055. if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
  1056. return 0;
  1057. list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
  1058. tu = container_of(pos, struct trace_uprobe, tp);
  1059. ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
  1060. if (ret)
  1061. break;
  1062. }
  1063. return ret;
  1064. }
  1065. static int uprobe_perf_open(struct trace_event_call *call,
  1066. struct perf_event *event)
  1067. {
  1068. struct trace_probe *pos, *tp;
  1069. struct trace_uprobe *tu;
  1070. int err = 0;
  1071. tp = trace_probe_primary_from_call(call);
  1072. if (WARN_ON_ONCE(!tp))
  1073. return -ENODEV;
  1074. tu = container_of(tp, struct trace_uprobe, tp);
  1075. if (trace_uprobe_filter_add(tu->tp.event->filter, event))
  1076. return 0;
  1077. list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
  1078. tu = container_of(pos, struct trace_uprobe, tp);
  1079. err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
  1080. if (err) {
  1081. uprobe_perf_close(call, event);
  1082. break;
  1083. }
  1084. }
  1085. return err;
  1086. }
  1087. static bool uprobe_perf_filter(struct uprobe_consumer *uc,
  1088. enum uprobe_filter_ctx ctx, struct mm_struct *mm)
  1089. {
  1090. struct trace_uprobe_filter *filter;
  1091. struct trace_uprobe *tu;
  1092. int ret;
  1093. tu = container_of(uc, struct trace_uprobe, consumer);
  1094. filter = tu->tp.event->filter;
  1095. read_lock(&filter->rwlock);
  1096. ret = __uprobe_perf_filter(filter, mm);
  1097. read_unlock(&filter->rwlock);
  1098. return ret;
  1099. }
  1100. static void __uprobe_perf_func(struct trace_uprobe *tu,
  1101. unsigned long func, struct pt_regs *regs,
  1102. struct uprobe_cpu_buffer *ucb, int dsize)
  1103. {
  1104. struct trace_event_call *call = trace_probe_event_call(&tu->tp);
  1105. struct uprobe_trace_entry_head *entry;
  1106. struct hlist_head *head;
  1107. void *data;
  1108. int size, esize;
  1109. int rctx;
  1110. if (bpf_prog_array_valid(call)) {
  1111. u32 ret;
  1112. preempt_disable();
  1113. ret = trace_call_bpf(call, regs);
  1114. preempt_enable();
  1115. if (!ret)
  1116. return;
  1117. }
  1118. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  1119. size = esize + tu->tp.size + dsize;
  1120. size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
  1121. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
  1122. return;
  1123. preempt_disable();
  1124. head = this_cpu_ptr(call->perf_events);
  1125. if (hlist_empty(head))
  1126. goto out;
  1127. entry = perf_trace_buf_alloc(size, NULL, &rctx);
  1128. if (!entry)
  1129. goto out;
  1130. if (is_ret_probe(tu)) {
  1131. entry->vaddr[0] = func;
  1132. entry->vaddr[1] = instruction_pointer(regs);
  1133. data = DATAOF_TRACE_ENTRY(entry, true);
  1134. } else {
  1135. entry->vaddr[0] = instruction_pointer(regs);
  1136. data = DATAOF_TRACE_ENTRY(entry, false);
  1137. }
  1138. memcpy(data, ucb->buf, tu->tp.size + dsize);
  1139. if (size - esize > tu->tp.size + dsize) {
  1140. int len = tu->tp.size + dsize;
  1141. memset(data + len, 0, size - esize - len);
  1142. }
  1143. perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
  1144. head, NULL);
  1145. out:
  1146. preempt_enable();
  1147. }
  1148. /* uprobe profile handler */
  1149. static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
  1150. struct uprobe_cpu_buffer *ucb, int dsize)
  1151. {
  1152. if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
  1153. return UPROBE_HANDLER_REMOVE;
  1154. if (!is_ret_probe(tu))
  1155. __uprobe_perf_func(tu, 0, regs, ucb, dsize);
  1156. return 0;
  1157. }
  1158. static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
  1159. struct pt_regs *regs,
  1160. struct uprobe_cpu_buffer *ucb, int dsize)
  1161. {
  1162. __uprobe_perf_func(tu, func, regs, ucb, dsize);
  1163. }
  1164. int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
  1165. const char **filename, u64 *probe_offset,
  1166. bool perf_type_tracepoint)
  1167. {
  1168. const char *pevent = trace_event_name(event->tp_event);
  1169. const char *group = event->tp_event->class->system;
  1170. struct trace_uprobe *tu;
  1171. if (perf_type_tracepoint)
  1172. tu = find_probe_event(pevent, group);
  1173. else
  1174. tu = trace_uprobe_primary_from_call(event->tp_event);
  1175. if (!tu)
  1176. return -EINVAL;
  1177. *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
  1178. : BPF_FD_TYPE_UPROBE;
  1179. *filename = tu->filename;
  1180. *probe_offset = tu->offset;
  1181. return 0;
  1182. }
  1183. #endif /* CONFIG_PERF_EVENTS */
  1184. static int
  1185. trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
  1186. void *data)
  1187. {
  1188. struct trace_event_file *file = data;
  1189. switch (type) {
  1190. case TRACE_REG_REGISTER:
  1191. return probe_event_enable(event, file, NULL);
  1192. case TRACE_REG_UNREGISTER:
  1193. probe_event_disable(event, file);
  1194. return 0;
  1195. #ifdef CONFIG_PERF_EVENTS
  1196. case TRACE_REG_PERF_REGISTER:
  1197. return probe_event_enable(event, NULL, uprobe_perf_filter);
  1198. case TRACE_REG_PERF_UNREGISTER:
  1199. probe_event_disable(event, NULL);
  1200. return 0;
  1201. case TRACE_REG_PERF_OPEN:
  1202. return uprobe_perf_open(event, data);
  1203. case TRACE_REG_PERF_CLOSE:
  1204. return uprobe_perf_close(event, data);
  1205. #endif
  1206. default:
  1207. return 0;
  1208. }
  1209. }
  1210. static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
  1211. {
  1212. struct trace_uprobe *tu;
  1213. struct uprobe_dispatch_data udd;
  1214. struct uprobe_cpu_buffer *ucb;
  1215. int dsize, esize;
  1216. int ret = 0;
  1217. tu = container_of(con, struct trace_uprobe, consumer);
  1218. tu->nhit++;
  1219. udd.tu = tu;
  1220. udd.bp_addr = instruction_pointer(regs);
  1221. current->utask->vaddr = (unsigned long) &udd;
  1222. if (WARN_ON_ONCE(!uprobe_cpu_buffer))
  1223. return 0;
  1224. dsize = __get_data_size(&tu->tp, regs);
  1225. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  1226. ucb = uprobe_buffer_get();
  1227. store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
  1228. if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
  1229. ret |= uprobe_trace_func(tu, regs, ucb, dsize);
  1230. #ifdef CONFIG_PERF_EVENTS
  1231. if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
  1232. ret |= uprobe_perf_func(tu, regs, ucb, dsize);
  1233. #endif
  1234. uprobe_buffer_put(ucb);
  1235. return ret;
  1236. }
  1237. static int uretprobe_dispatcher(struct uprobe_consumer *con,
  1238. unsigned long func, struct pt_regs *regs)
  1239. {
  1240. struct trace_uprobe *tu;
  1241. struct uprobe_dispatch_data udd;
  1242. struct uprobe_cpu_buffer *ucb;
  1243. int dsize, esize;
  1244. tu = container_of(con, struct trace_uprobe, consumer);
  1245. udd.tu = tu;
  1246. udd.bp_addr = func;
  1247. current->utask->vaddr = (unsigned long) &udd;
  1248. if (WARN_ON_ONCE(!uprobe_cpu_buffer))
  1249. return 0;
  1250. dsize = __get_data_size(&tu->tp, regs);
  1251. esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
  1252. ucb = uprobe_buffer_get();
  1253. store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
  1254. if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
  1255. uretprobe_trace_func(tu, func, regs, ucb, dsize);
  1256. #ifdef CONFIG_PERF_EVENTS
  1257. if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
  1258. uretprobe_perf_func(tu, func, regs, ucb, dsize);
  1259. #endif
  1260. uprobe_buffer_put(ucb);
  1261. return 0;
  1262. }
  1263. static struct trace_event_functions uprobe_funcs = {
  1264. .trace = print_uprobe_event
  1265. };
  1266. static struct trace_event_fields uprobe_fields_array[] = {
  1267. { .type = TRACE_FUNCTION_TYPE,
  1268. .define_fields = uprobe_event_define_fields },
  1269. {}
  1270. };
  1271. static inline void init_trace_event_call(struct trace_uprobe *tu)
  1272. {
  1273. struct trace_event_call *call = trace_probe_event_call(&tu->tp);
  1274. call->event.funcs = &uprobe_funcs;
  1275. call->class->fields_array = uprobe_fields_array;
  1276. call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
  1277. call->class->reg = trace_uprobe_register;
  1278. }
  1279. static int register_uprobe_event(struct trace_uprobe *tu)
  1280. {
  1281. init_trace_event_call(tu);
  1282. return trace_probe_register_event_call(&tu->tp);
  1283. }
  1284. static int unregister_uprobe_event(struct trace_uprobe *tu)
  1285. {
  1286. return trace_probe_unregister_event_call(&tu->tp);
  1287. }
  1288. #ifdef CONFIG_PERF_EVENTS
  1289. struct trace_event_call *
  1290. create_local_trace_uprobe(char *name, unsigned long offs,
  1291. unsigned long ref_ctr_offset, bool is_return)
  1292. {
  1293. struct trace_uprobe *tu;
  1294. struct path path;
  1295. int ret;
  1296. ret = kern_path(name, LOOKUP_FOLLOW, &path);
  1297. if (ret)
  1298. return ERR_PTR(ret);
  1299. if (!d_is_reg(path.dentry)) {
  1300. path_put(&path);
  1301. return ERR_PTR(-EINVAL);
  1302. }
  1303. /*
  1304. * local trace_kprobes are not added to dyn_event, so they are never
  1305. * searched in find_trace_kprobe(). Therefore, there is no concern of
  1306. * duplicated name "DUMMY_EVENT" here.
  1307. */
  1308. tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
  1309. is_return);
  1310. if (IS_ERR(tu)) {
  1311. pr_info("Failed to allocate trace_uprobe.(%d)\n",
  1312. (int)PTR_ERR(tu));
  1313. path_put(&path);
  1314. return ERR_CAST(tu);
  1315. }
  1316. tu->offset = offs;
  1317. tu->path = path;
  1318. tu->ref_ctr_offset = ref_ctr_offset;
  1319. tu->filename = kstrdup(name, GFP_KERNEL);
  1320. init_trace_event_call(tu);
  1321. if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
  1322. ret = -ENOMEM;
  1323. goto error;
  1324. }
  1325. return trace_probe_event_call(&tu->tp);
  1326. error:
  1327. free_trace_uprobe(tu);
  1328. return ERR_PTR(ret);
  1329. }
  1330. void destroy_local_trace_uprobe(struct trace_event_call *event_call)
  1331. {
  1332. struct trace_uprobe *tu;
  1333. tu = trace_uprobe_primary_from_call(event_call);
  1334. free_trace_uprobe(tu);
  1335. }
  1336. #endif /* CONFIG_PERF_EVENTS */
  1337. /* Make a trace interface for controling probe points */
  1338. static __init int init_uprobe_trace(void)
  1339. {
  1340. int ret;
  1341. ret = dyn_event_register(&trace_uprobe_ops);
  1342. if (ret)
  1343. return ret;
  1344. ret = tracing_init_dentry();
  1345. if (ret)
  1346. return 0;
  1347. trace_create_file("uprobe_events", 0644, NULL,
  1348. NULL, &uprobe_events_ops);
  1349. /* Profile interface */
  1350. trace_create_file("uprobe_profile", 0444, NULL,
  1351. NULL, &uprobe_profile_ops);
  1352. return 0;
  1353. }
  1354. fs_initcall(init_uprobe_trace);