evlist.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  4. *
  5. * Parts came from builtin-{top,stat,record}.c, see those files for further
  6. * copyright notes.
  7. */
  8. #include <api/fs/fs.h>
  9. #include <errno.h>
  10. #include <inttypes.h>
  11. #include <poll.h>
  12. #include "cpumap.h"
  13. #include "util/mmap.h"
  14. #include "thread_map.h"
  15. #include "target.h"
  16. #include "evlist.h"
  17. #include "evsel.h"
  18. #include "debug.h"
  19. #include "units.h"
  20. #include <internal/lib.h> // page_size
  21. #include "affinity.h"
  22. #include "../perf.h"
  23. #include "asm/bug.h"
  24. #include "bpf-event.h"
  25. #include "util/string2.h"
  26. #include "util/perf_api_probe.h"
  27. #include <signal.h>
  28. #include <unistd.h>
  29. #include <sched.h>
  30. #include <stdlib.h>
  31. #include "parse-events.h"
  32. #include <subcmd/parse-options.h>
  33. #include <fcntl.h>
  34. #include <sys/ioctl.h>
  35. #include <sys/mman.h>
  36. #include <linux/bitops.h>
  37. #include <linux/hash.h>
  38. #include <linux/log2.h>
  39. #include <linux/err.h>
  40. #include <linux/string.h>
  41. #include <linux/zalloc.h>
  42. #include <perf/evlist.h>
  43. #include <perf/evsel.h>
  44. #include <perf/cpumap.h>
  45. #include <perf/mmap.h>
  46. #include <internal/xyarray.h>
  47. #ifdef LACKS_SIGQUEUE_PROTOTYPE
  48. int sigqueue(pid_t pid, int sig, const union sigval value);
  49. #endif
  50. #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
  51. #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
  52. void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
  53. struct perf_thread_map *threads)
  54. {
  55. perf_evlist__init(&evlist->core);
  56. perf_evlist__set_maps(&evlist->core, cpus, threads);
  57. evlist->workload.pid = -1;
  58. evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
  59. evlist->ctl_fd.fd = -1;
  60. evlist->ctl_fd.ack = -1;
  61. evlist->ctl_fd.pos = -1;
  62. }
  63. struct evlist *evlist__new(void)
  64. {
  65. struct evlist *evlist = zalloc(sizeof(*evlist));
  66. if (evlist != NULL)
  67. evlist__init(evlist, NULL, NULL);
  68. return evlist;
  69. }
  70. struct evlist *perf_evlist__new_default(void)
  71. {
  72. struct evlist *evlist = evlist__new();
  73. if (evlist && evlist__add_default(evlist)) {
  74. evlist__delete(evlist);
  75. evlist = NULL;
  76. }
  77. return evlist;
  78. }
  79. struct evlist *perf_evlist__new_dummy(void)
  80. {
  81. struct evlist *evlist = evlist__new();
  82. if (evlist && evlist__add_dummy(evlist)) {
  83. evlist__delete(evlist);
  84. evlist = NULL;
  85. }
  86. return evlist;
  87. }
  88. /**
  89. * perf_evlist__set_id_pos - set the positions of event ids.
  90. * @evlist: selected event list
  91. *
  92. * Events with compatible sample types all have the same id_pos
  93. * and is_pos. For convenience, put a copy on evlist.
  94. */
  95. void perf_evlist__set_id_pos(struct evlist *evlist)
  96. {
  97. struct evsel *first = evlist__first(evlist);
  98. evlist->id_pos = first->id_pos;
  99. evlist->is_pos = first->is_pos;
  100. }
  101. static void perf_evlist__update_id_pos(struct evlist *evlist)
  102. {
  103. struct evsel *evsel;
  104. evlist__for_each_entry(evlist, evsel)
  105. evsel__calc_id_pos(evsel);
  106. perf_evlist__set_id_pos(evlist);
  107. }
  108. static void evlist__purge(struct evlist *evlist)
  109. {
  110. struct evsel *pos, *n;
  111. evlist__for_each_entry_safe(evlist, n, pos) {
  112. list_del_init(&pos->core.node);
  113. pos->evlist = NULL;
  114. evsel__delete(pos);
  115. }
  116. evlist->core.nr_entries = 0;
  117. }
  118. void evlist__exit(struct evlist *evlist)
  119. {
  120. zfree(&evlist->mmap);
  121. zfree(&evlist->overwrite_mmap);
  122. perf_evlist__exit(&evlist->core);
  123. }
  124. void evlist__delete(struct evlist *evlist)
  125. {
  126. if (evlist == NULL)
  127. return;
  128. evlist__munmap(evlist);
  129. evlist__close(evlist);
  130. evlist__purge(evlist);
  131. evlist__exit(evlist);
  132. free(evlist);
  133. }
  134. void evlist__add(struct evlist *evlist, struct evsel *entry)
  135. {
  136. entry->evlist = evlist;
  137. entry->idx = evlist->core.nr_entries;
  138. entry->tracking = !entry->idx;
  139. perf_evlist__add(&evlist->core, &entry->core);
  140. if (evlist->core.nr_entries == 1)
  141. perf_evlist__set_id_pos(evlist);
  142. }
  143. void evlist__remove(struct evlist *evlist, struct evsel *evsel)
  144. {
  145. evsel->evlist = NULL;
  146. perf_evlist__remove(&evlist->core, &evsel->core);
  147. }
  148. void perf_evlist__splice_list_tail(struct evlist *evlist,
  149. struct list_head *list)
  150. {
  151. struct evsel *evsel, *temp;
  152. __evlist__for_each_entry_safe(list, temp, evsel) {
  153. list_del_init(&evsel->core.node);
  154. evlist__add(evlist, evsel);
  155. }
  156. }
  157. int __evlist__set_tracepoints_handlers(struct evlist *evlist,
  158. const struct evsel_str_handler *assocs, size_t nr_assocs)
  159. {
  160. struct evsel *evsel;
  161. size_t i;
  162. int err;
  163. for (i = 0; i < nr_assocs; i++) {
  164. // Adding a handler for an event not in this evlist, just ignore it.
  165. evsel = perf_evlist__find_tracepoint_by_name(evlist, assocs[i].name);
  166. if (evsel == NULL)
  167. continue;
  168. err = -EEXIST;
  169. if (evsel->handler != NULL)
  170. goto out;
  171. evsel->handler = assocs[i].handler;
  172. }
  173. err = 0;
  174. out:
  175. return err;
  176. }
  177. void __perf_evlist__set_leader(struct list_head *list)
  178. {
  179. struct evsel *evsel, *leader;
  180. leader = list_entry(list->next, struct evsel, core.node);
  181. evsel = list_entry(list->prev, struct evsel, core.node);
  182. leader->core.nr_members = evsel->idx - leader->idx + 1;
  183. __evlist__for_each_entry(list, evsel) {
  184. evsel->leader = leader;
  185. }
  186. }
  187. void perf_evlist__set_leader(struct evlist *evlist)
  188. {
  189. if (evlist->core.nr_entries) {
  190. evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
  191. __perf_evlist__set_leader(&evlist->core.entries);
  192. }
  193. }
  194. int __evlist__add_default(struct evlist *evlist, bool precise)
  195. {
  196. struct evsel *evsel = evsel__new_cycles(precise);
  197. if (evsel == NULL)
  198. return -ENOMEM;
  199. evlist__add(evlist, evsel);
  200. return 0;
  201. }
  202. int evlist__add_dummy(struct evlist *evlist)
  203. {
  204. struct perf_event_attr attr = {
  205. .type = PERF_TYPE_SOFTWARE,
  206. .config = PERF_COUNT_SW_DUMMY,
  207. .size = sizeof(attr), /* to capture ABI version */
  208. };
  209. struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
  210. if (evsel == NULL)
  211. return -ENOMEM;
  212. evlist__add(evlist, evsel);
  213. return 0;
  214. }
  215. static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
  216. {
  217. struct evsel *evsel, *n;
  218. LIST_HEAD(head);
  219. size_t i;
  220. for (i = 0; i < nr_attrs; i++) {
  221. evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
  222. if (evsel == NULL)
  223. goto out_delete_partial_list;
  224. list_add_tail(&evsel->core.node, &head);
  225. }
  226. perf_evlist__splice_list_tail(evlist, &head);
  227. return 0;
  228. out_delete_partial_list:
  229. __evlist__for_each_entry_safe(&head, n, evsel)
  230. evsel__delete(evsel);
  231. return -1;
  232. }
  233. int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
  234. {
  235. size_t i;
  236. for (i = 0; i < nr_attrs; i++)
  237. event_attr_init(attrs + i);
  238. return evlist__add_attrs(evlist, attrs, nr_attrs);
  239. }
  240. struct evsel *
  241. perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
  242. {
  243. struct evsel *evsel;
  244. evlist__for_each_entry(evlist, evsel) {
  245. if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
  246. (int)evsel->core.attr.config == id)
  247. return evsel;
  248. }
  249. return NULL;
  250. }
  251. struct evsel *
  252. perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
  253. const char *name)
  254. {
  255. struct evsel *evsel;
  256. evlist__for_each_entry(evlist, evsel) {
  257. if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
  258. (strcmp(evsel->name, name) == 0))
  259. return evsel;
  260. }
  261. return NULL;
  262. }
  263. int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
  264. {
  265. struct evsel *evsel = evsel__newtp(sys, name);
  266. if (IS_ERR(evsel))
  267. return -1;
  268. evsel->handler = handler;
  269. evlist__add(evlist, evsel);
  270. return 0;
  271. }
  272. static int perf_evlist__nr_threads(struct evlist *evlist,
  273. struct evsel *evsel)
  274. {
  275. if (evsel->core.system_wide)
  276. return 1;
  277. else
  278. return perf_thread_map__nr(evlist->core.threads);
  279. }
  280. void evlist__cpu_iter_start(struct evlist *evlist)
  281. {
  282. struct evsel *pos;
  283. /*
  284. * Reset the per evsel cpu_iter. This is needed because
  285. * each evsel's cpumap may have a different index space,
  286. * and some operations need the index to modify
  287. * the FD xyarray (e.g. open, close)
  288. */
  289. evlist__for_each_entry(evlist, pos)
  290. pos->cpu_iter = 0;
  291. }
  292. bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
  293. {
  294. if (ev->cpu_iter >= ev->core.cpus->nr)
  295. return true;
  296. if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
  297. return true;
  298. return false;
  299. }
  300. bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
  301. {
  302. if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
  303. ev->cpu_iter++;
  304. return false;
  305. }
  306. return true;
  307. }
  308. void evlist__disable(struct evlist *evlist)
  309. {
  310. struct evsel *pos;
  311. struct affinity affinity;
  312. int cpu, i, imm = 0;
  313. bool has_imm = false;
  314. if (affinity__setup(&affinity) < 0)
  315. return;
  316. /* Disable 'immediate' events last */
  317. for (imm = 0; imm <= 1; imm++) {
  318. evlist__for_each_cpu(evlist, i, cpu) {
  319. affinity__set(&affinity, cpu);
  320. evlist__for_each_entry(evlist, pos) {
  321. if (evsel__cpu_iter_skip(pos, cpu))
  322. continue;
  323. if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
  324. continue;
  325. if (pos->immediate)
  326. has_imm = true;
  327. if (pos->immediate != imm)
  328. continue;
  329. evsel__disable_cpu(pos, pos->cpu_iter - 1);
  330. }
  331. }
  332. if (!has_imm)
  333. break;
  334. }
  335. affinity__cleanup(&affinity);
  336. evlist__for_each_entry(evlist, pos) {
  337. if (!evsel__is_group_leader(pos) || !pos->core.fd)
  338. continue;
  339. pos->disabled = true;
  340. }
  341. evlist->enabled = false;
  342. }
  343. void evlist__enable(struct evlist *evlist)
  344. {
  345. struct evsel *pos;
  346. struct affinity affinity;
  347. int cpu, i;
  348. if (affinity__setup(&affinity) < 0)
  349. return;
  350. evlist__for_each_cpu(evlist, i, cpu) {
  351. affinity__set(&affinity, cpu);
  352. evlist__for_each_entry(evlist, pos) {
  353. if (evsel__cpu_iter_skip(pos, cpu))
  354. continue;
  355. if (!evsel__is_group_leader(pos) || !pos->core.fd)
  356. continue;
  357. evsel__enable_cpu(pos, pos->cpu_iter - 1);
  358. }
  359. }
  360. affinity__cleanup(&affinity);
  361. evlist__for_each_entry(evlist, pos) {
  362. if (!evsel__is_group_leader(pos) || !pos->core.fd)
  363. continue;
  364. pos->disabled = false;
  365. }
  366. evlist->enabled = true;
  367. }
  368. void perf_evlist__toggle_enable(struct evlist *evlist)
  369. {
  370. (evlist->enabled ? evlist__disable : evlist__enable)(evlist);
  371. }
  372. static int perf_evlist__enable_event_cpu(struct evlist *evlist,
  373. struct evsel *evsel, int cpu)
  374. {
  375. int thread;
  376. int nr_threads = perf_evlist__nr_threads(evlist, evsel);
  377. if (!evsel->core.fd)
  378. return -EINVAL;
  379. for (thread = 0; thread < nr_threads; thread++) {
  380. int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
  381. if (err)
  382. return err;
  383. }
  384. return 0;
  385. }
  386. static int perf_evlist__enable_event_thread(struct evlist *evlist,
  387. struct evsel *evsel,
  388. int thread)
  389. {
  390. int cpu;
  391. int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
  392. if (!evsel->core.fd)
  393. return -EINVAL;
  394. for (cpu = 0; cpu < nr_cpus; cpu++) {
  395. int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
  396. if (err)
  397. return err;
  398. }
  399. return 0;
  400. }
  401. int perf_evlist__enable_event_idx(struct evlist *evlist,
  402. struct evsel *evsel, int idx)
  403. {
  404. bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
  405. if (per_cpu_mmaps)
  406. return perf_evlist__enable_event_cpu(evlist, evsel, idx);
  407. else
  408. return perf_evlist__enable_event_thread(evlist, evsel, idx);
  409. }
  410. int evlist__add_pollfd(struct evlist *evlist, int fd)
  411. {
  412. return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
  413. }
  414. int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
  415. {
  416. return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
  417. }
  418. #ifdef HAVE_EVENTFD_SUPPORT
  419. int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
  420. {
  421. return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
  422. fdarray_flag__nonfilterable);
  423. }
  424. #endif
  425. int evlist__poll(struct evlist *evlist, int timeout)
  426. {
  427. return perf_evlist__poll(&evlist->core, timeout);
  428. }
  429. struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
  430. {
  431. struct hlist_head *head;
  432. struct perf_sample_id *sid;
  433. int hash;
  434. hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
  435. head = &evlist->core.heads[hash];
  436. hlist_for_each_entry(sid, head, node)
  437. if (sid->id == id)
  438. return sid;
  439. return NULL;
  440. }
  441. struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
  442. {
  443. struct perf_sample_id *sid;
  444. if (evlist->core.nr_entries == 1 || !id)
  445. return evlist__first(evlist);
  446. sid = perf_evlist__id2sid(evlist, id);
  447. if (sid)
  448. return container_of(sid->evsel, struct evsel, core);
  449. if (!evlist__sample_id_all(evlist))
  450. return evlist__first(evlist);
  451. return NULL;
  452. }
  453. struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
  454. u64 id)
  455. {
  456. struct perf_sample_id *sid;
  457. if (!id)
  458. return NULL;
  459. sid = perf_evlist__id2sid(evlist, id);
  460. if (sid)
  461. return container_of(sid->evsel, struct evsel, core);
  462. return NULL;
  463. }
  464. static int perf_evlist__event2id(struct evlist *evlist,
  465. union perf_event *event, u64 *id)
  466. {
  467. const __u64 *array = event->sample.array;
  468. ssize_t n;
  469. n = (event->header.size - sizeof(event->header)) >> 3;
  470. if (event->header.type == PERF_RECORD_SAMPLE) {
  471. if (evlist->id_pos >= n)
  472. return -1;
  473. *id = array[evlist->id_pos];
  474. } else {
  475. if (evlist->is_pos > n)
  476. return -1;
  477. n -= evlist->is_pos;
  478. *id = array[n];
  479. }
  480. return 0;
  481. }
  482. struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
  483. union perf_event *event)
  484. {
  485. struct evsel *first = evlist__first(evlist);
  486. struct hlist_head *head;
  487. struct perf_sample_id *sid;
  488. int hash;
  489. u64 id;
  490. if (evlist->core.nr_entries == 1)
  491. return first;
  492. if (!first->core.attr.sample_id_all &&
  493. event->header.type != PERF_RECORD_SAMPLE)
  494. return first;
  495. if (perf_evlist__event2id(evlist, event, &id))
  496. return NULL;
  497. /* Synthesized events have an id of zero */
  498. if (!id)
  499. return first;
  500. hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
  501. head = &evlist->core.heads[hash];
  502. hlist_for_each_entry(sid, head, node) {
  503. if (sid->id == id)
  504. return container_of(sid->evsel, struct evsel, core);
  505. }
  506. return NULL;
  507. }
  508. static int perf_evlist__set_paused(struct evlist *evlist, bool value)
  509. {
  510. int i;
  511. if (!evlist->overwrite_mmap)
  512. return 0;
  513. for (i = 0; i < evlist->core.nr_mmaps; i++) {
  514. int fd = evlist->overwrite_mmap[i].core.fd;
  515. int err;
  516. if (fd < 0)
  517. continue;
  518. err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
  519. if (err)
  520. return err;
  521. }
  522. return 0;
  523. }
  524. static int perf_evlist__pause(struct evlist *evlist)
  525. {
  526. return perf_evlist__set_paused(evlist, true);
  527. }
  528. static int perf_evlist__resume(struct evlist *evlist)
  529. {
  530. return perf_evlist__set_paused(evlist, false);
  531. }
  532. static void evlist__munmap_nofree(struct evlist *evlist)
  533. {
  534. int i;
  535. if (evlist->mmap)
  536. for (i = 0; i < evlist->core.nr_mmaps; i++)
  537. perf_mmap__munmap(&evlist->mmap[i].core);
  538. if (evlist->overwrite_mmap)
  539. for (i = 0; i < evlist->core.nr_mmaps; i++)
  540. perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
  541. }
  542. void evlist__munmap(struct evlist *evlist)
  543. {
  544. evlist__munmap_nofree(evlist);
  545. zfree(&evlist->mmap);
  546. zfree(&evlist->overwrite_mmap);
  547. }
  548. static void perf_mmap__unmap_cb(struct perf_mmap *map)
  549. {
  550. struct mmap *m = container_of(map, struct mmap, core);
  551. mmap__munmap(m);
  552. }
  553. static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
  554. bool overwrite)
  555. {
  556. int i;
  557. struct mmap *map;
  558. map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
  559. if (!map)
  560. return NULL;
  561. for (i = 0; i < evlist->core.nr_mmaps; i++) {
  562. struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
  563. /*
  564. * When the perf_mmap() call is made we grab one refcount, plus
  565. * one extra to let perf_mmap__consume() get the last
  566. * events after all real references (perf_mmap__get()) are
  567. * dropped.
  568. *
  569. * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
  570. * thus does perf_mmap__get() on it.
  571. */
  572. perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
  573. }
  574. return map;
  575. }
  576. static void
  577. perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
  578. struct perf_mmap_param *_mp,
  579. int idx, bool per_cpu)
  580. {
  581. struct evlist *evlist = container_of(_evlist, struct evlist, core);
  582. struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
  583. auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
  584. }
  585. static struct perf_mmap*
  586. perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
  587. {
  588. struct evlist *evlist = container_of(_evlist, struct evlist, core);
  589. struct mmap *maps;
  590. maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
  591. if (!maps) {
  592. maps = evlist__alloc_mmap(evlist, overwrite);
  593. if (!maps)
  594. return NULL;
  595. if (overwrite) {
  596. evlist->overwrite_mmap = maps;
  597. if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
  598. perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
  599. } else {
  600. evlist->mmap = maps;
  601. }
  602. }
  603. return &maps[idx].core;
  604. }
  605. static int
  606. perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
  607. int output, int cpu)
  608. {
  609. struct mmap *map = container_of(_map, struct mmap, core);
  610. struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
  611. return mmap__mmap(map, mp, output, cpu);
  612. }
  613. unsigned long perf_event_mlock_kb_in_pages(void)
  614. {
  615. unsigned long pages;
  616. int max;
  617. if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
  618. /*
  619. * Pick a once upon a time good value, i.e. things look
  620. * strange since we can't read a sysctl value, but lets not
  621. * die yet...
  622. */
  623. max = 512;
  624. } else {
  625. max -= (page_size / 1024);
  626. }
  627. pages = (max * 1024) / page_size;
  628. if (!is_power_of_2(pages))
  629. pages = rounddown_pow_of_two(pages);
  630. return pages;
  631. }
  632. size_t evlist__mmap_size(unsigned long pages)
  633. {
  634. if (pages == UINT_MAX)
  635. pages = perf_event_mlock_kb_in_pages();
  636. else if (!is_power_of_2(pages))
  637. return 0;
  638. return (pages + 1) * page_size;
  639. }
  640. static long parse_pages_arg(const char *str, unsigned long min,
  641. unsigned long max)
  642. {
  643. unsigned long pages, val;
  644. static struct parse_tag tags[] = {
  645. { .tag = 'B', .mult = 1 },
  646. { .tag = 'K', .mult = 1 << 10 },
  647. { .tag = 'M', .mult = 1 << 20 },
  648. { .tag = 'G', .mult = 1 << 30 },
  649. { .tag = 0 },
  650. };
  651. if (str == NULL)
  652. return -EINVAL;
  653. val = parse_tag_value(str, tags);
  654. if (val != (unsigned long) -1) {
  655. /* we got file size value */
  656. pages = PERF_ALIGN(val, page_size) / page_size;
  657. } else {
  658. /* we got pages count value */
  659. char *eptr;
  660. pages = strtoul(str, &eptr, 10);
  661. if (*eptr != '\0')
  662. return -EINVAL;
  663. }
  664. if (pages == 0 && min == 0) {
  665. /* leave number of pages at 0 */
  666. } else if (!is_power_of_2(pages)) {
  667. char buf[100];
  668. /* round pages up to next power of 2 */
  669. pages = roundup_pow_of_two(pages);
  670. if (!pages)
  671. return -EINVAL;
  672. unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
  673. pr_info("rounding mmap pages size to %s (%lu pages)\n",
  674. buf, pages);
  675. }
  676. if (pages > max)
  677. return -EINVAL;
  678. return pages;
  679. }
  680. int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
  681. {
  682. unsigned long max = UINT_MAX;
  683. long pages;
  684. if (max > SIZE_MAX / page_size)
  685. max = SIZE_MAX / page_size;
  686. pages = parse_pages_arg(str, 1, max);
  687. if (pages < 0) {
  688. pr_err("Invalid argument for --mmap_pages/-m\n");
  689. return -1;
  690. }
  691. *mmap_pages = pages;
  692. return 0;
  693. }
  694. int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
  695. int unset __maybe_unused)
  696. {
  697. return __perf_evlist__parse_mmap_pages(opt->value, str);
  698. }
  699. /**
  700. * evlist__mmap_ex - Create mmaps to receive events.
  701. * @evlist: list of events
  702. * @pages: map length in pages
  703. * @overwrite: overwrite older events?
  704. * @auxtrace_pages - auxtrace map length in pages
  705. * @auxtrace_overwrite - overwrite older auxtrace data?
  706. *
  707. * If @overwrite is %false the user needs to signal event consumption using
  708. * perf_mmap__write_tail(). Using evlist__mmap_read() does this
  709. * automatically.
  710. *
  711. * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
  712. * consumption using auxtrace_mmap__write_tail().
  713. *
  714. * Return: %0 on success, negative error code otherwise.
  715. */
  716. int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
  717. unsigned int auxtrace_pages,
  718. bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
  719. int comp_level)
  720. {
  721. /*
  722. * Delay setting mp.prot: set it before calling perf_mmap__mmap.
  723. * Its value is decided by evsel's write_backward.
  724. * So &mp should not be passed through const pointer.
  725. */
  726. struct mmap_params mp = {
  727. .nr_cblocks = nr_cblocks,
  728. .affinity = affinity,
  729. .flush = flush,
  730. .comp_level = comp_level
  731. };
  732. struct perf_evlist_mmap_ops ops = {
  733. .idx = perf_evlist__mmap_cb_idx,
  734. .get = perf_evlist__mmap_cb_get,
  735. .mmap = perf_evlist__mmap_cb_mmap,
  736. };
  737. evlist->core.mmap_len = evlist__mmap_size(pages);
  738. pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
  739. auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
  740. auxtrace_pages, auxtrace_overwrite);
  741. return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
  742. }
  743. int evlist__mmap(struct evlist *evlist, unsigned int pages)
  744. {
  745. return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
  746. }
  747. int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
  748. {
  749. bool all_threads = (target->per_thread && target->system_wide);
  750. struct perf_cpu_map *cpus;
  751. struct perf_thread_map *threads;
  752. /*
  753. * If specify '-a' and '--per-thread' to perf record, perf record
  754. * will override '--per-thread'. target->per_thread = false and
  755. * target->system_wide = true.
  756. *
  757. * If specify '--per-thread' only to perf record,
  758. * target->per_thread = true and target->system_wide = false.
  759. *
  760. * So target->per_thread && target->system_wide is false.
  761. * For perf record, thread_map__new_str doesn't call
  762. * thread_map__new_all_cpus. That will keep perf record's
  763. * current behavior.
  764. *
  765. * For perf stat, it allows the case that target->per_thread and
  766. * target->system_wide are all true. It means to collect system-wide
  767. * per-thread data. thread_map__new_str will call
  768. * thread_map__new_all_cpus to enumerate all threads.
  769. */
  770. threads = thread_map__new_str(target->pid, target->tid, target->uid,
  771. all_threads);
  772. if (!threads)
  773. return -1;
  774. if (target__uses_dummy_map(target))
  775. cpus = perf_cpu_map__dummy_new();
  776. else
  777. cpus = perf_cpu_map__new(target->cpu_list);
  778. if (!cpus)
  779. goto out_delete_threads;
  780. evlist->core.has_user_cpus = !!target->cpu_list;
  781. perf_evlist__set_maps(&evlist->core, cpus, threads);
  782. /* as evlist now has references, put count here */
  783. perf_cpu_map__put(cpus);
  784. perf_thread_map__put(threads);
  785. return 0;
  786. out_delete_threads:
  787. perf_thread_map__put(threads);
  788. return -1;
  789. }
  790. void __perf_evlist__set_sample_bit(struct evlist *evlist,
  791. enum perf_event_sample_format bit)
  792. {
  793. struct evsel *evsel;
  794. evlist__for_each_entry(evlist, evsel)
  795. __evsel__set_sample_bit(evsel, bit);
  796. }
  797. void __perf_evlist__reset_sample_bit(struct evlist *evlist,
  798. enum perf_event_sample_format bit)
  799. {
  800. struct evsel *evsel;
  801. evlist__for_each_entry(evlist, evsel)
  802. __evsel__reset_sample_bit(evsel, bit);
  803. }
  804. int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
  805. {
  806. struct evsel *evsel;
  807. int err = 0;
  808. evlist__for_each_entry(evlist, evsel) {
  809. if (evsel->filter == NULL)
  810. continue;
  811. /*
  812. * filters only work for tracepoint event, which doesn't have cpu limit.
  813. * So evlist and evsel should always be same.
  814. */
  815. err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
  816. if (err) {
  817. *err_evsel = evsel;
  818. break;
  819. }
  820. }
  821. return err;
  822. }
  823. int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
  824. {
  825. struct evsel *evsel;
  826. int err = 0;
  827. if (filter == NULL)
  828. return -1;
  829. evlist__for_each_entry(evlist, evsel) {
  830. if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
  831. continue;
  832. err = evsel__set_filter(evsel, filter);
  833. if (err)
  834. break;
  835. }
  836. return err;
  837. }
  838. int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter)
  839. {
  840. struct evsel *evsel;
  841. int err = 0;
  842. if (filter == NULL)
  843. return -1;
  844. evlist__for_each_entry(evlist, evsel) {
  845. if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
  846. continue;
  847. err = evsel__append_tp_filter(evsel, filter);
  848. if (err)
  849. break;
  850. }
  851. return err;
  852. }
  853. char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
  854. {
  855. char *filter;
  856. size_t i;
  857. for (i = 0; i < npids; ++i) {
  858. if (i == 0) {
  859. if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
  860. return NULL;
  861. } else {
  862. char *tmp;
  863. if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
  864. goto out_free;
  865. free(filter);
  866. filter = tmp;
  867. }
  868. }
  869. return filter;
  870. out_free:
  871. free(filter);
  872. return NULL;
  873. }
  874. int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
  875. {
  876. char *filter = asprintf__tp_filter_pids(npids, pids);
  877. int ret = perf_evlist__set_tp_filter(evlist, filter);
  878. free(filter);
  879. return ret;
  880. }
  881. int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
  882. {
  883. return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
  884. }
  885. int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
  886. {
  887. char *filter = asprintf__tp_filter_pids(npids, pids);
  888. int ret = perf_evlist__append_tp_filter(evlist, filter);
  889. free(filter);
  890. return ret;
  891. }
  892. int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
  893. {
  894. return perf_evlist__append_tp_filter_pids(evlist, 1, &pid);
  895. }
  896. bool evlist__valid_sample_type(struct evlist *evlist)
  897. {
  898. struct evsel *pos;
  899. if (evlist->core.nr_entries == 1)
  900. return true;
  901. if (evlist->id_pos < 0 || evlist->is_pos < 0)
  902. return false;
  903. evlist__for_each_entry(evlist, pos) {
  904. if (pos->id_pos != evlist->id_pos ||
  905. pos->is_pos != evlist->is_pos)
  906. return false;
  907. }
  908. return true;
  909. }
  910. u64 __evlist__combined_sample_type(struct evlist *evlist)
  911. {
  912. struct evsel *evsel;
  913. if (evlist->combined_sample_type)
  914. return evlist->combined_sample_type;
  915. evlist__for_each_entry(evlist, evsel)
  916. evlist->combined_sample_type |= evsel->core.attr.sample_type;
  917. return evlist->combined_sample_type;
  918. }
  919. u64 evlist__combined_sample_type(struct evlist *evlist)
  920. {
  921. evlist->combined_sample_type = 0;
  922. return __evlist__combined_sample_type(evlist);
  923. }
  924. u64 evlist__combined_branch_type(struct evlist *evlist)
  925. {
  926. struct evsel *evsel;
  927. u64 branch_type = 0;
  928. evlist__for_each_entry(evlist, evsel)
  929. branch_type |= evsel->core.attr.branch_sample_type;
  930. return branch_type;
  931. }
  932. bool perf_evlist__valid_read_format(struct evlist *evlist)
  933. {
  934. struct evsel *first = evlist__first(evlist), *pos = first;
  935. u64 read_format = first->core.attr.read_format;
  936. u64 sample_type = first->core.attr.sample_type;
  937. evlist__for_each_entry(evlist, pos) {
  938. if (read_format != pos->core.attr.read_format) {
  939. pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
  940. read_format, (u64)pos->core.attr.read_format);
  941. }
  942. }
  943. /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
  944. if ((sample_type & PERF_SAMPLE_READ) &&
  945. !(read_format & PERF_FORMAT_ID)) {
  946. return false;
  947. }
  948. return true;
  949. }
  950. u16 perf_evlist__id_hdr_size(struct evlist *evlist)
  951. {
  952. struct evsel *first = evlist__first(evlist);
  953. struct perf_sample *data;
  954. u64 sample_type;
  955. u16 size = 0;
  956. if (!first->core.attr.sample_id_all)
  957. goto out;
  958. sample_type = first->core.attr.sample_type;
  959. if (sample_type & PERF_SAMPLE_TID)
  960. size += sizeof(data->tid) * 2;
  961. if (sample_type & PERF_SAMPLE_TIME)
  962. size += sizeof(data->time);
  963. if (sample_type & PERF_SAMPLE_ID)
  964. size += sizeof(data->id);
  965. if (sample_type & PERF_SAMPLE_STREAM_ID)
  966. size += sizeof(data->stream_id);
  967. if (sample_type & PERF_SAMPLE_CPU)
  968. size += sizeof(data->cpu) * 2;
  969. if (sample_type & PERF_SAMPLE_IDENTIFIER)
  970. size += sizeof(data->id);
  971. out:
  972. return size;
  973. }
  974. bool evlist__valid_sample_id_all(struct evlist *evlist)
  975. {
  976. struct evsel *first = evlist__first(evlist), *pos = first;
  977. evlist__for_each_entry_continue(evlist, pos) {
  978. if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
  979. return false;
  980. }
  981. return true;
  982. }
  983. bool evlist__sample_id_all(struct evlist *evlist)
  984. {
  985. struct evsel *first = evlist__first(evlist);
  986. return first->core.attr.sample_id_all;
  987. }
  988. void perf_evlist__set_selected(struct evlist *evlist,
  989. struct evsel *evsel)
  990. {
  991. evlist->selected = evsel;
  992. }
  993. void evlist__close(struct evlist *evlist)
  994. {
  995. struct evsel *evsel;
  996. struct affinity affinity;
  997. int cpu, i;
  998. /*
  999. * With perf record core.cpus is usually NULL.
  1000. * Use the old method to handle this for now.
  1001. */
  1002. if (!evlist->core.cpus) {
  1003. evlist__for_each_entry_reverse(evlist, evsel)
  1004. evsel__close(evsel);
  1005. return;
  1006. }
  1007. if (affinity__setup(&affinity) < 0)
  1008. return;
  1009. evlist__for_each_cpu(evlist, i, cpu) {
  1010. affinity__set(&affinity, cpu);
  1011. evlist__for_each_entry_reverse(evlist, evsel) {
  1012. if (evsel__cpu_iter_skip(evsel, cpu))
  1013. continue;
  1014. perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
  1015. }
  1016. }
  1017. affinity__cleanup(&affinity);
  1018. evlist__for_each_entry_reverse(evlist, evsel) {
  1019. perf_evsel__free_fd(&evsel->core);
  1020. perf_evsel__free_id(&evsel->core);
  1021. }
  1022. }
  1023. static int perf_evlist__create_syswide_maps(struct evlist *evlist)
  1024. {
  1025. struct perf_cpu_map *cpus;
  1026. struct perf_thread_map *threads;
  1027. int err = -ENOMEM;
  1028. /*
  1029. * Try reading /sys/devices/system/cpu/online to get
  1030. * an all cpus map.
  1031. *
  1032. * FIXME: -ENOMEM is the best we can do here, the cpu_map
  1033. * code needs an overhaul to properly forward the
  1034. * error, and we may not want to do that fallback to a
  1035. * default cpu identity map :-\
  1036. */
  1037. cpus = perf_cpu_map__new(NULL);
  1038. if (!cpus)
  1039. goto out;
  1040. threads = perf_thread_map__new_dummy();
  1041. if (!threads)
  1042. goto out_put;
  1043. perf_evlist__set_maps(&evlist->core, cpus, threads);
  1044. perf_thread_map__put(threads);
  1045. out_put:
  1046. perf_cpu_map__put(cpus);
  1047. out:
  1048. return err;
  1049. }
  1050. int evlist__open(struct evlist *evlist)
  1051. {
  1052. struct evsel *evsel;
  1053. int err;
  1054. /*
  1055. * Default: one fd per CPU, all threads, aka systemwide
  1056. * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
  1057. */
  1058. if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
  1059. err = perf_evlist__create_syswide_maps(evlist);
  1060. if (err < 0)
  1061. goto out_err;
  1062. }
  1063. perf_evlist__update_id_pos(evlist);
  1064. evlist__for_each_entry(evlist, evsel) {
  1065. err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
  1066. if (err < 0)
  1067. goto out_err;
  1068. }
  1069. return 0;
  1070. out_err:
  1071. evlist__close(evlist);
  1072. errno = -err;
  1073. return err;
  1074. }
  1075. int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target,
  1076. const char *argv[], bool pipe_output,
  1077. void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
  1078. {
  1079. int child_ready_pipe[2], go_pipe[2];
  1080. char bf;
  1081. if (pipe(child_ready_pipe) < 0) {
  1082. perror("failed to create 'ready' pipe");
  1083. return -1;
  1084. }
  1085. if (pipe(go_pipe) < 0) {
  1086. perror("failed to create 'go' pipe");
  1087. goto out_close_ready_pipe;
  1088. }
  1089. evlist->workload.pid = fork();
  1090. if (evlist->workload.pid < 0) {
  1091. perror("failed to fork");
  1092. goto out_close_pipes;
  1093. }
  1094. if (!evlist->workload.pid) {
  1095. int ret;
  1096. if (pipe_output)
  1097. dup2(2, 1);
  1098. signal(SIGTERM, SIG_DFL);
  1099. close(child_ready_pipe[0]);
  1100. close(go_pipe[1]);
  1101. fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
  1102. /*
  1103. * Tell the parent we're ready to go
  1104. */
  1105. close(child_ready_pipe[1]);
  1106. /*
  1107. * Wait until the parent tells us to go.
  1108. */
  1109. ret = read(go_pipe[0], &bf, 1);
  1110. /*
  1111. * The parent will ask for the execvp() to be performed by
  1112. * writing exactly one byte, in workload.cork_fd, usually via
  1113. * perf_evlist__start_workload().
  1114. *
  1115. * For cancelling the workload without actually running it,
  1116. * the parent will just close workload.cork_fd, without writing
  1117. * anything, i.e. read will return zero and we just exit()
  1118. * here.
  1119. */
  1120. if (ret != 1) {
  1121. if (ret == -1)
  1122. perror("unable to read pipe");
  1123. exit(ret);
  1124. }
  1125. execvp(argv[0], (char **)argv);
  1126. if (exec_error) {
  1127. union sigval val;
  1128. val.sival_int = errno;
  1129. if (sigqueue(getppid(), SIGUSR1, val))
  1130. perror(argv[0]);
  1131. } else
  1132. perror(argv[0]);
  1133. exit(-1);
  1134. }
  1135. if (exec_error) {
  1136. struct sigaction act = {
  1137. .sa_flags = SA_SIGINFO,
  1138. .sa_sigaction = exec_error,
  1139. };
  1140. sigaction(SIGUSR1, &act, NULL);
  1141. }
  1142. if (target__none(target)) {
  1143. if (evlist->core.threads == NULL) {
  1144. fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
  1145. __func__, __LINE__);
  1146. goto out_close_pipes;
  1147. }
  1148. perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
  1149. }
  1150. close(child_ready_pipe[1]);
  1151. close(go_pipe[0]);
  1152. /*
  1153. * wait for child to settle
  1154. */
  1155. if (read(child_ready_pipe[0], &bf, 1) == -1) {
  1156. perror("unable to read pipe");
  1157. goto out_close_pipes;
  1158. }
  1159. fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
  1160. evlist->workload.cork_fd = go_pipe[1];
  1161. close(child_ready_pipe[0]);
  1162. return 0;
  1163. out_close_pipes:
  1164. close(go_pipe[0]);
  1165. close(go_pipe[1]);
  1166. out_close_ready_pipe:
  1167. close(child_ready_pipe[0]);
  1168. close(child_ready_pipe[1]);
  1169. return -1;
  1170. }
  1171. int perf_evlist__start_workload(struct evlist *evlist)
  1172. {
  1173. if (evlist->workload.cork_fd > 0) {
  1174. char bf = 0;
  1175. int ret;
  1176. /*
  1177. * Remove the cork, let it rip!
  1178. */
  1179. ret = write(evlist->workload.cork_fd, &bf, 1);
  1180. if (ret < 0)
  1181. perror("unable to write to pipe");
  1182. close(evlist->workload.cork_fd);
  1183. return ret;
  1184. }
  1185. return 0;
  1186. }
  1187. int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event,
  1188. struct perf_sample *sample)
  1189. {
  1190. struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
  1191. if (!evsel)
  1192. return -EFAULT;
  1193. return evsel__parse_sample(evsel, event, sample);
  1194. }
  1195. int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
  1196. union perf_event *event,
  1197. u64 *timestamp)
  1198. {
  1199. struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
  1200. if (!evsel)
  1201. return -EFAULT;
  1202. return evsel__parse_sample_timestamp(evsel, event, timestamp);
  1203. }
  1204. int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
  1205. {
  1206. int printed, value;
  1207. char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
  1208. switch (err) {
  1209. case EACCES:
  1210. case EPERM:
  1211. printed = scnprintf(buf, size,
  1212. "Error:\t%s.\n"
  1213. "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
  1214. value = perf_event_paranoid();
  1215. printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
  1216. if (value >= 2) {
  1217. printed += scnprintf(buf + printed, size - printed,
  1218. "For your workloads it needs to be <= 1\nHint:\t");
  1219. }
  1220. printed += scnprintf(buf + printed, size - printed,
  1221. "For system wide tracing it needs to be set to -1.\n");
  1222. printed += scnprintf(buf + printed, size - printed,
  1223. "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
  1224. "Hint:\tThe current value is %d.", value);
  1225. break;
  1226. case EINVAL: {
  1227. struct evsel *first = evlist__first(evlist);
  1228. int max_freq;
  1229. if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
  1230. goto out_default;
  1231. if (first->core.attr.sample_freq < (u64)max_freq)
  1232. goto out_default;
  1233. printed = scnprintf(buf, size,
  1234. "Error:\t%s.\n"
  1235. "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
  1236. "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
  1237. emsg, max_freq, first->core.attr.sample_freq);
  1238. break;
  1239. }
  1240. default:
  1241. out_default:
  1242. scnprintf(buf, size, "%s", emsg);
  1243. break;
  1244. }
  1245. return 0;
  1246. }
  1247. int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
  1248. {
  1249. char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
  1250. int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
  1251. switch (err) {
  1252. case EPERM:
  1253. sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
  1254. printed += scnprintf(buf + printed, size - printed,
  1255. "Error:\t%s.\n"
  1256. "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
  1257. "Hint:\tTried using %zd kB.\n",
  1258. emsg, pages_max_per_user, pages_attempted);
  1259. if (pages_attempted >= pages_max_per_user) {
  1260. printed += scnprintf(buf + printed, size - printed,
  1261. "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
  1262. pages_max_per_user + pages_attempted);
  1263. }
  1264. printed += scnprintf(buf + printed, size - printed,
  1265. "Hint:\tTry using a smaller -m/--mmap-pages value.");
  1266. break;
  1267. default:
  1268. scnprintf(buf, size, "%s", emsg);
  1269. break;
  1270. }
  1271. return 0;
  1272. }
  1273. void perf_evlist__to_front(struct evlist *evlist,
  1274. struct evsel *move_evsel)
  1275. {
  1276. struct evsel *evsel, *n;
  1277. LIST_HEAD(move);
  1278. if (move_evsel == evlist__first(evlist))
  1279. return;
  1280. evlist__for_each_entry_safe(evlist, n, evsel) {
  1281. if (evsel->leader == move_evsel->leader)
  1282. list_move_tail(&evsel->core.node, &move);
  1283. }
  1284. list_splice(&move, &evlist->core.entries);
  1285. }
  1286. struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist)
  1287. {
  1288. struct evsel *evsel;
  1289. evlist__for_each_entry(evlist, evsel) {
  1290. if (evsel->tracking)
  1291. return evsel;
  1292. }
  1293. return evlist__first(evlist);
  1294. }
  1295. void perf_evlist__set_tracking_event(struct evlist *evlist,
  1296. struct evsel *tracking_evsel)
  1297. {
  1298. struct evsel *evsel;
  1299. if (tracking_evsel->tracking)
  1300. return;
  1301. evlist__for_each_entry(evlist, evsel) {
  1302. if (evsel != tracking_evsel)
  1303. evsel->tracking = false;
  1304. }
  1305. tracking_evsel->tracking = true;
  1306. }
  1307. struct evsel *
  1308. perf_evlist__find_evsel_by_str(struct evlist *evlist,
  1309. const char *str)
  1310. {
  1311. struct evsel *evsel;
  1312. evlist__for_each_entry(evlist, evsel) {
  1313. if (!evsel->name)
  1314. continue;
  1315. if (strcmp(str, evsel->name) == 0)
  1316. return evsel;
  1317. }
  1318. return NULL;
  1319. }
  1320. void perf_evlist__toggle_bkw_mmap(struct evlist *evlist,
  1321. enum bkw_mmap_state state)
  1322. {
  1323. enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
  1324. enum action {
  1325. NONE,
  1326. PAUSE,
  1327. RESUME,
  1328. } action = NONE;
  1329. if (!evlist->overwrite_mmap)
  1330. return;
  1331. switch (old_state) {
  1332. case BKW_MMAP_NOTREADY: {
  1333. if (state != BKW_MMAP_RUNNING)
  1334. goto state_err;
  1335. break;
  1336. }
  1337. case BKW_MMAP_RUNNING: {
  1338. if (state != BKW_MMAP_DATA_PENDING)
  1339. goto state_err;
  1340. action = PAUSE;
  1341. break;
  1342. }
  1343. case BKW_MMAP_DATA_PENDING: {
  1344. if (state != BKW_MMAP_EMPTY)
  1345. goto state_err;
  1346. break;
  1347. }
  1348. case BKW_MMAP_EMPTY: {
  1349. if (state != BKW_MMAP_RUNNING)
  1350. goto state_err;
  1351. action = RESUME;
  1352. break;
  1353. }
  1354. default:
  1355. WARN_ONCE(1, "Shouldn't get there\n");
  1356. }
  1357. evlist->bkw_mmap_state = state;
  1358. switch (action) {
  1359. case PAUSE:
  1360. perf_evlist__pause(evlist);
  1361. break;
  1362. case RESUME:
  1363. perf_evlist__resume(evlist);
  1364. break;
  1365. case NONE:
  1366. default:
  1367. break;
  1368. }
  1369. state_err:
  1370. return;
  1371. }
  1372. bool perf_evlist__exclude_kernel(struct evlist *evlist)
  1373. {
  1374. struct evsel *evsel;
  1375. evlist__for_each_entry(evlist, evsel) {
  1376. if (!evsel->core.attr.exclude_kernel)
  1377. return false;
  1378. }
  1379. return true;
  1380. }
  1381. /*
  1382. * Events in data file are not collect in groups, but we still want
  1383. * the group display. Set the artificial group and set the leader's
  1384. * forced_leader flag to notify the display code.
  1385. */
  1386. void perf_evlist__force_leader(struct evlist *evlist)
  1387. {
  1388. if (!evlist->nr_groups) {
  1389. struct evsel *leader = evlist__first(evlist);
  1390. perf_evlist__set_leader(evlist);
  1391. leader->forced_leader = true;
  1392. }
  1393. }
  1394. struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
  1395. struct evsel *evsel,
  1396. bool close)
  1397. {
  1398. struct evsel *c2, *leader;
  1399. bool is_open = true;
  1400. leader = evsel->leader;
  1401. pr_debug("Weak group for %s/%d failed\n",
  1402. leader->name, leader->core.nr_members);
  1403. /*
  1404. * for_each_group_member doesn't work here because it doesn't
  1405. * include the first entry.
  1406. */
  1407. evlist__for_each_entry(evsel_list, c2) {
  1408. if (c2 == evsel)
  1409. is_open = false;
  1410. if (c2->leader == leader) {
  1411. if (is_open && close)
  1412. perf_evsel__close(&c2->core);
  1413. c2->leader = c2;
  1414. c2->core.nr_members = 0;
  1415. /*
  1416. * Set this for all former members of the group
  1417. * to indicate they get reopened.
  1418. */
  1419. c2->reset_group = true;
  1420. }
  1421. }
  1422. return leader;
  1423. }
  1424. static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
  1425. {
  1426. char *s, *p;
  1427. int ret = 0, fd;
  1428. if (strncmp(str, "fifo:", 5))
  1429. return -EINVAL;
  1430. str += 5;
  1431. if (!*str || *str == ',')
  1432. return -EINVAL;
  1433. s = strdup(str);
  1434. if (!s)
  1435. return -ENOMEM;
  1436. p = strchr(s, ',');
  1437. if (p)
  1438. *p = '\0';
  1439. /*
  1440. * O_RDWR avoids POLLHUPs which is necessary to allow the other
  1441. * end of a FIFO to be repeatedly opened and closed.
  1442. */
  1443. fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
  1444. if (fd < 0) {
  1445. pr_err("Failed to open '%s'\n", s);
  1446. ret = -errno;
  1447. goto out_free;
  1448. }
  1449. *ctl_fd = fd;
  1450. *ctl_fd_close = true;
  1451. if (p && *++p) {
  1452. /* O_RDWR | O_NONBLOCK means the other end need not be open */
  1453. fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
  1454. if (fd < 0) {
  1455. pr_err("Failed to open '%s'\n", p);
  1456. ret = -errno;
  1457. goto out_free;
  1458. }
  1459. *ctl_fd_ack = fd;
  1460. }
  1461. out_free:
  1462. free(s);
  1463. return ret;
  1464. }
  1465. int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
  1466. {
  1467. char *comma = NULL, *endptr = NULL;
  1468. *ctl_fd_close = false;
  1469. if (strncmp(str, "fd:", 3))
  1470. return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
  1471. *ctl_fd = strtoul(&str[3], &endptr, 0);
  1472. if (endptr == &str[3])
  1473. return -EINVAL;
  1474. comma = strchr(str, ',');
  1475. if (comma) {
  1476. if (endptr != comma)
  1477. return -EINVAL;
  1478. *ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
  1479. if (endptr == comma + 1 || *endptr != '\0')
  1480. return -EINVAL;
  1481. }
  1482. return 0;
  1483. }
  1484. void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
  1485. {
  1486. if (*ctl_fd_close) {
  1487. *ctl_fd_close = false;
  1488. close(ctl_fd);
  1489. if (ctl_fd_ack >= 0)
  1490. close(ctl_fd_ack);
  1491. }
  1492. }
  1493. int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
  1494. {
  1495. if (fd == -1) {
  1496. pr_debug("Control descriptor is not initialized\n");
  1497. return 0;
  1498. }
  1499. evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
  1500. fdarray_flag__nonfilterable);
  1501. if (evlist->ctl_fd.pos < 0) {
  1502. evlist->ctl_fd.pos = -1;
  1503. pr_err("Failed to add ctl fd entry: %m\n");
  1504. return -1;
  1505. }
  1506. evlist->ctl_fd.fd = fd;
  1507. evlist->ctl_fd.ack = ack;
  1508. return 0;
  1509. }
  1510. bool evlist__ctlfd_initialized(struct evlist *evlist)
  1511. {
  1512. return evlist->ctl_fd.pos >= 0;
  1513. }
  1514. int evlist__finalize_ctlfd(struct evlist *evlist)
  1515. {
  1516. struct pollfd *entries = evlist->core.pollfd.entries;
  1517. if (!evlist__ctlfd_initialized(evlist))
  1518. return 0;
  1519. entries[evlist->ctl_fd.pos].fd = -1;
  1520. entries[evlist->ctl_fd.pos].events = 0;
  1521. entries[evlist->ctl_fd.pos].revents = 0;
  1522. evlist->ctl_fd.pos = -1;
  1523. evlist->ctl_fd.ack = -1;
  1524. evlist->ctl_fd.fd = -1;
  1525. return 0;
  1526. }
  1527. static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
  1528. char *cmd_data, size_t data_size)
  1529. {
  1530. int err;
  1531. char c;
  1532. size_t bytes_read = 0;
  1533. *cmd = EVLIST_CTL_CMD_UNSUPPORTED;
  1534. memset(cmd_data, 0, data_size);
  1535. data_size--;
  1536. do {
  1537. err = read(evlist->ctl_fd.fd, &c, 1);
  1538. if (err > 0) {
  1539. if (c == '\n' || c == '\0')
  1540. break;
  1541. cmd_data[bytes_read++] = c;
  1542. if (bytes_read == data_size)
  1543. break;
  1544. continue;
  1545. } else if (err == -1) {
  1546. if (errno == EINTR)
  1547. continue;
  1548. if (errno == EAGAIN || errno == EWOULDBLOCK)
  1549. err = 0;
  1550. else
  1551. pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
  1552. }
  1553. break;
  1554. } while (1);
  1555. pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
  1556. bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
  1557. if (bytes_read > 0) {
  1558. if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
  1559. (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
  1560. *cmd = EVLIST_CTL_CMD_ENABLE;
  1561. } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
  1562. (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
  1563. *cmd = EVLIST_CTL_CMD_DISABLE;
  1564. } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
  1565. (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
  1566. *cmd = EVLIST_CTL_CMD_SNAPSHOT;
  1567. pr_debug("is snapshot\n");
  1568. }
  1569. }
  1570. return bytes_read ? (int)bytes_read : err;
  1571. }
  1572. int evlist__ctlfd_ack(struct evlist *evlist)
  1573. {
  1574. int err;
  1575. if (evlist->ctl_fd.ack == -1)
  1576. return 0;
  1577. err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
  1578. sizeof(EVLIST_CTL_CMD_ACK_TAG));
  1579. if (err == -1)
  1580. pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
  1581. return err;
  1582. }
  1583. int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
  1584. {
  1585. int err = 0;
  1586. char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
  1587. int ctlfd_pos = evlist->ctl_fd.pos;
  1588. struct pollfd *entries = evlist->core.pollfd.entries;
  1589. if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
  1590. return 0;
  1591. if (entries[ctlfd_pos].revents & POLLIN) {
  1592. err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
  1593. EVLIST_CTL_CMD_MAX_LEN);
  1594. if (err > 0) {
  1595. switch (*cmd) {
  1596. case EVLIST_CTL_CMD_ENABLE:
  1597. evlist__enable(evlist);
  1598. break;
  1599. case EVLIST_CTL_CMD_DISABLE:
  1600. evlist__disable(evlist);
  1601. break;
  1602. case EVLIST_CTL_CMD_SNAPSHOT:
  1603. break;
  1604. case EVLIST_CTL_CMD_ACK:
  1605. case EVLIST_CTL_CMD_UNSUPPORTED:
  1606. default:
  1607. pr_debug("ctlfd: unsupported %d\n", *cmd);
  1608. break;
  1609. }
  1610. if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
  1611. *cmd == EVLIST_CTL_CMD_SNAPSHOT))
  1612. evlist__ctlfd_ack(evlist);
  1613. }
  1614. }
  1615. if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
  1616. evlist__finalize_ctlfd(evlist);
  1617. else
  1618. entries[ctlfd_pos].revents = 0;
  1619. return err;
  1620. }
  1621. struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
  1622. {
  1623. struct evsel *evsel;
  1624. evlist__for_each_entry(evlist, evsel) {
  1625. if (evsel->idx == idx)
  1626. return evsel;
  1627. }
  1628. return NULL;
  1629. }