synthetic-events.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include "util/debug.h"
  3. #include "util/dso.h"
  4. #include "util/event.h"
  5. #include "util/evlist.h"
  6. #include "util/machine.h"
  7. #include "util/map.h"
  8. #include "util/map_symbol.h"
  9. #include "util/branch.h"
  10. #include "util/memswap.h"
  11. #include "util/namespaces.h"
  12. #include "util/session.h"
  13. #include "util/stat.h"
  14. #include "util/symbol.h"
  15. #include "util/synthetic-events.h"
  16. #include "util/target.h"
  17. #include "util/time-utils.h"
  18. #include "util/cgroup.h"
  19. #include <linux/bitops.h>
  20. #include <linux/kernel.h>
  21. #include <linux/string.h>
  22. #include <linux/zalloc.h>
  23. #include <linux/perf_event.h>
  24. #include <asm/bug.h>
  25. #include <perf/evsel.h>
  26. #include <internal/cpumap.h>
  27. #include <perf/cpumap.h>
  28. #include <internal/lib.h> // page_size
  29. #include <internal/threadmap.h>
  30. #include <perf/threadmap.h>
  31. #include <symbol/kallsyms.h>
  32. #include <dirent.h>
  33. #include <errno.h>
  34. #include <inttypes.h>
  35. #include <stdio.h>
  36. #include <string.h>
  37. #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
  38. #include <api/fs/fs.h>
  39. #include <api/io.h>
  40. #include <sys/types.h>
  41. #include <sys/stat.h>
  42. #include <fcntl.h>
  43. #include <unistd.h>
  44. #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
  45. unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
  46. int perf_tool__process_synth_event(struct perf_tool *tool,
  47. union perf_event *event,
  48. struct machine *machine,
  49. perf_event__handler_t process)
  50. {
  51. struct perf_sample synth_sample = {
  52. .pid = -1,
  53. .tid = -1,
  54. .time = -1,
  55. .stream_id = -1,
  56. .cpu = -1,
  57. .period = 1,
  58. .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
  59. };
  60. return process(tool, event, &synth_sample, machine);
  61. };
  62. /*
  63. * Assumes that the first 4095 bytes of /proc/pid/stat contains
  64. * the comm, tgid and ppid.
  65. */
  66. static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
  67. pid_t *tgid, pid_t *ppid)
  68. {
  69. char bf[4096];
  70. int fd;
  71. size_t size = 0;
  72. ssize_t n;
  73. char *name, *tgids, *ppids;
  74. *tgid = -1;
  75. *ppid = -1;
  76. snprintf(bf, sizeof(bf), "/proc/%d/status", pid);
  77. fd = open(bf, O_RDONLY);
  78. if (fd < 0) {
  79. pr_debug("couldn't open %s\n", bf);
  80. return -1;
  81. }
  82. n = read(fd, bf, sizeof(bf) - 1);
  83. close(fd);
  84. if (n <= 0) {
  85. pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
  86. pid);
  87. return -1;
  88. }
  89. bf[n] = '\0';
  90. name = strstr(bf, "Name:");
  91. tgids = strstr(bf, "Tgid:");
  92. ppids = strstr(bf, "PPid:");
  93. if (name) {
  94. char *nl;
  95. name = skip_spaces(name + 5); /* strlen("Name:") */
  96. nl = strchr(name, '\n');
  97. if (nl)
  98. *nl = '\0';
  99. size = strlen(name);
  100. if (size >= len)
  101. size = len - 1;
  102. memcpy(comm, name, size);
  103. comm[size] = '\0';
  104. } else {
  105. pr_debug("Name: string not found for pid %d\n", pid);
  106. }
  107. if (tgids) {
  108. tgids += 5; /* strlen("Tgid:") */
  109. *tgid = atoi(tgids);
  110. } else {
  111. pr_debug("Tgid: string not found for pid %d\n", pid);
  112. }
  113. if (ppids) {
  114. ppids += 5; /* strlen("PPid:") */
  115. *ppid = atoi(ppids);
  116. } else {
  117. pr_debug("PPid: string not found for pid %d\n", pid);
  118. }
  119. return 0;
  120. }
  121. static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
  122. struct machine *machine,
  123. pid_t *tgid, pid_t *ppid)
  124. {
  125. size_t size;
  126. *ppid = -1;
  127. memset(&event->comm, 0, sizeof(event->comm));
  128. if (machine__is_host(machine)) {
  129. if (perf_event__get_comm_ids(pid, event->comm.comm,
  130. sizeof(event->comm.comm),
  131. tgid, ppid) != 0) {
  132. return -1;
  133. }
  134. } else {
  135. *tgid = machine->pid;
  136. }
  137. if (*tgid < 0)
  138. return -1;
  139. event->comm.pid = *tgid;
  140. event->comm.header.type = PERF_RECORD_COMM;
  141. size = strlen(event->comm.comm) + 1;
  142. size = PERF_ALIGN(size, sizeof(u64));
  143. memset(event->comm.comm + size, 0, machine->id_hdr_size);
  144. event->comm.header.size = (sizeof(event->comm) -
  145. (sizeof(event->comm.comm) - size) +
  146. machine->id_hdr_size);
  147. event->comm.tid = pid;
  148. return 0;
  149. }
  150. pid_t perf_event__synthesize_comm(struct perf_tool *tool,
  151. union perf_event *event, pid_t pid,
  152. perf_event__handler_t process,
  153. struct machine *machine)
  154. {
  155. pid_t tgid, ppid;
  156. if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
  157. return -1;
  158. if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
  159. return -1;
  160. return tgid;
  161. }
  162. static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
  163. struct perf_ns_link_info *ns_link_info)
  164. {
  165. struct stat64 st;
  166. char proc_ns[128];
  167. sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
  168. if (stat64(proc_ns, &st) == 0) {
  169. ns_link_info->dev = st.st_dev;
  170. ns_link_info->ino = st.st_ino;
  171. }
  172. }
  173. int perf_event__synthesize_namespaces(struct perf_tool *tool,
  174. union perf_event *event,
  175. pid_t pid, pid_t tgid,
  176. perf_event__handler_t process,
  177. struct machine *machine)
  178. {
  179. u32 idx;
  180. struct perf_ns_link_info *ns_link_info;
  181. if (!tool || !tool->namespace_events)
  182. return 0;
  183. memset(&event->namespaces, 0, (sizeof(event->namespaces) +
  184. (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
  185. machine->id_hdr_size));
  186. event->namespaces.pid = tgid;
  187. event->namespaces.tid = pid;
  188. event->namespaces.nr_namespaces = NR_NAMESPACES;
  189. ns_link_info = event->namespaces.link_info;
  190. for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
  191. perf_event__get_ns_link_info(pid, perf_ns__name(idx),
  192. &ns_link_info[idx]);
  193. event->namespaces.header.type = PERF_RECORD_NAMESPACES;
  194. event->namespaces.header.size = (sizeof(event->namespaces) +
  195. (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
  196. machine->id_hdr_size);
  197. if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
  198. return -1;
  199. return 0;
  200. }
  201. static int perf_event__synthesize_fork(struct perf_tool *tool,
  202. union perf_event *event,
  203. pid_t pid, pid_t tgid, pid_t ppid,
  204. perf_event__handler_t process,
  205. struct machine *machine)
  206. {
  207. memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
  208. /*
  209. * for main thread set parent to ppid from status file. For other
  210. * threads set parent pid to main thread. ie., assume main thread
  211. * spawns all threads in a process
  212. */
  213. if (tgid == pid) {
  214. event->fork.ppid = ppid;
  215. event->fork.ptid = ppid;
  216. } else {
  217. event->fork.ppid = tgid;
  218. event->fork.ptid = tgid;
  219. }
  220. event->fork.pid = tgid;
  221. event->fork.tid = pid;
  222. event->fork.header.type = PERF_RECORD_FORK;
  223. event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
  224. event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
  225. if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
  226. return -1;
  227. return 0;
  228. }
  229. static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
  230. u32 *prot, u32 *flags, __u64 *offset,
  231. u32 *maj, u32 *min,
  232. __u64 *inode,
  233. ssize_t pathname_size, char *pathname)
  234. {
  235. __u64 temp;
  236. int ch;
  237. char *start_pathname = pathname;
  238. if (io__get_hex(io, start) != '-')
  239. return false;
  240. if (io__get_hex(io, end) != ' ')
  241. return false;
  242. /* map protection and flags bits */
  243. *prot = 0;
  244. ch = io__get_char(io);
  245. if (ch == 'r')
  246. *prot |= PROT_READ;
  247. else if (ch != '-')
  248. return false;
  249. ch = io__get_char(io);
  250. if (ch == 'w')
  251. *prot |= PROT_WRITE;
  252. else if (ch != '-')
  253. return false;
  254. ch = io__get_char(io);
  255. if (ch == 'x')
  256. *prot |= PROT_EXEC;
  257. else if (ch != '-')
  258. return false;
  259. ch = io__get_char(io);
  260. if (ch == 's')
  261. *flags = MAP_SHARED;
  262. else if (ch == 'p')
  263. *flags = MAP_PRIVATE;
  264. else
  265. return false;
  266. if (io__get_char(io) != ' ')
  267. return false;
  268. if (io__get_hex(io, offset) != ' ')
  269. return false;
  270. if (io__get_hex(io, &temp) != ':')
  271. return false;
  272. *maj = temp;
  273. if (io__get_hex(io, &temp) != ' ')
  274. return false;
  275. *min = temp;
  276. ch = io__get_dec(io, inode);
  277. if (ch != ' ') {
  278. *pathname = '\0';
  279. return ch == '\n';
  280. }
  281. do {
  282. ch = io__get_char(io);
  283. } while (ch == ' ');
  284. while (true) {
  285. if (ch < 0)
  286. return false;
  287. if (ch == '\0' || ch == '\n' ||
  288. (pathname + 1 - start_pathname) >= pathname_size) {
  289. *pathname = '\0';
  290. return true;
  291. }
  292. *pathname++ = ch;
  293. ch = io__get_char(io);
  294. }
  295. }
  296. int perf_event__synthesize_mmap_events(struct perf_tool *tool,
  297. union perf_event *event,
  298. pid_t pid, pid_t tgid,
  299. perf_event__handler_t process,
  300. struct machine *machine,
  301. bool mmap_data)
  302. {
  303. unsigned long long t;
  304. char bf[BUFSIZ];
  305. struct io io;
  306. bool truncation = false;
  307. unsigned long long timeout = proc_map_timeout * 1000000ULL;
  308. int rc = 0;
  309. const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
  310. int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
  311. if (machine__is_default_guest(machine))
  312. return 0;
  313. snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
  314. machine->root_dir, pid, pid);
  315. io.fd = open(bf, O_RDONLY, 0);
  316. if (io.fd < 0) {
  317. /*
  318. * We raced with a task exiting - just return:
  319. */
  320. pr_debug("couldn't open %s\n", bf);
  321. return -1;
  322. }
  323. io__init(&io, io.fd, bf, sizeof(bf));
  324. event->header.type = PERF_RECORD_MMAP2;
  325. t = rdclock();
  326. while (!io.eof) {
  327. static const char anonstr[] = "//anon";
  328. size_t size, aligned_size;
  329. /* ensure null termination since stack will be reused. */
  330. event->mmap2.filename[0] = '\0';
  331. /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
  332. if (!read_proc_maps_line(&io,
  333. &event->mmap2.start,
  334. &event->mmap2.len,
  335. &event->mmap2.prot,
  336. &event->mmap2.flags,
  337. &event->mmap2.pgoff,
  338. &event->mmap2.maj,
  339. &event->mmap2.min,
  340. &event->mmap2.ino,
  341. sizeof(event->mmap2.filename),
  342. event->mmap2.filename))
  343. continue;
  344. if ((rdclock() - t) > timeout) {
  345. pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
  346. "You may want to increase "
  347. "the time limit by --proc-map-timeout\n",
  348. machine->root_dir, pid, pid);
  349. truncation = true;
  350. goto out;
  351. }
  352. event->mmap2.ino_generation = 0;
  353. /*
  354. * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
  355. */
  356. if (machine__is_host(machine))
  357. event->header.misc = PERF_RECORD_MISC_USER;
  358. else
  359. event->header.misc = PERF_RECORD_MISC_GUEST_USER;
  360. if ((event->mmap2.prot & PROT_EXEC) == 0) {
  361. if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
  362. continue;
  363. event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
  364. }
  365. out:
  366. if (truncation)
  367. event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
  368. if (!strcmp(event->mmap2.filename, ""))
  369. strcpy(event->mmap2.filename, anonstr);
  370. if (hugetlbfs_mnt_len &&
  371. !strncmp(event->mmap2.filename, hugetlbfs_mnt,
  372. hugetlbfs_mnt_len)) {
  373. strcpy(event->mmap2.filename, anonstr);
  374. event->mmap2.flags |= MAP_HUGETLB;
  375. }
  376. size = strlen(event->mmap2.filename) + 1;
  377. aligned_size = PERF_ALIGN(size, sizeof(u64));
  378. event->mmap2.len -= event->mmap.start;
  379. event->mmap2.header.size = (sizeof(event->mmap2) -
  380. (sizeof(event->mmap2.filename) - aligned_size));
  381. memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
  382. (aligned_size - size));
  383. event->mmap2.header.size += machine->id_hdr_size;
  384. event->mmap2.pid = tgid;
  385. event->mmap2.tid = pid;
  386. if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
  387. rc = -1;
  388. break;
  389. }
  390. if (truncation)
  391. break;
  392. }
  393. close(io.fd);
  394. return rc;
  395. }
  396. #ifdef HAVE_FILE_HANDLE
  397. static int perf_event__synthesize_cgroup(struct perf_tool *tool,
  398. union perf_event *event,
  399. char *path, size_t mount_len,
  400. perf_event__handler_t process,
  401. struct machine *machine)
  402. {
  403. size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
  404. size_t path_len = strlen(path) - mount_len + 1;
  405. struct {
  406. struct file_handle fh;
  407. uint64_t cgroup_id;
  408. } handle;
  409. int mount_id;
  410. while (path_len % sizeof(u64))
  411. path[mount_len + path_len++] = '\0';
  412. memset(&event->cgroup, 0, event_size);
  413. event->cgroup.header.type = PERF_RECORD_CGROUP;
  414. event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
  415. handle.fh.handle_bytes = sizeof(handle.cgroup_id);
  416. if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
  417. pr_debug("stat failed: %s\n", path);
  418. return -1;
  419. }
  420. event->cgroup.id = handle.cgroup_id;
  421. strncpy(event->cgroup.path, path + mount_len, path_len);
  422. memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
  423. if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
  424. pr_debug("process synth event failed\n");
  425. return -1;
  426. }
  427. return 0;
  428. }
  429. static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
  430. union perf_event *event,
  431. char *path, size_t mount_len,
  432. perf_event__handler_t process,
  433. struct machine *machine)
  434. {
  435. size_t pos = strlen(path);
  436. DIR *d;
  437. struct dirent *dent;
  438. int ret = 0;
  439. if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
  440. process, machine) < 0)
  441. return -1;
  442. d = opendir(path);
  443. if (d == NULL) {
  444. pr_debug("failed to open directory: %s\n", path);
  445. return -1;
  446. }
  447. while ((dent = readdir(d)) != NULL) {
  448. if (dent->d_type != DT_DIR)
  449. continue;
  450. if (!strcmp(dent->d_name, ".") ||
  451. !strcmp(dent->d_name, ".."))
  452. continue;
  453. /* any sane path should be less than PATH_MAX */
  454. if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
  455. continue;
  456. if (path[pos - 1] != '/')
  457. strcat(path, "/");
  458. strcat(path, dent->d_name);
  459. ret = perf_event__walk_cgroup_tree(tool, event, path,
  460. mount_len, process, machine);
  461. if (ret < 0)
  462. break;
  463. path[pos] = '\0';
  464. }
  465. closedir(d);
  466. return ret;
  467. }
  468. int perf_event__synthesize_cgroups(struct perf_tool *tool,
  469. perf_event__handler_t process,
  470. struct machine *machine)
  471. {
  472. union perf_event event;
  473. char cgrp_root[PATH_MAX];
  474. size_t mount_len; /* length of mount point in the path */
  475. if (!tool || !tool->cgroup_events)
  476. return 0;
  477. if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
  478. pr_debug("cannot find cgroup mount point\n");
  479. return -1;
  480. }
  481. mount_len = strlen(cgrp_root);
  482. /* make sure the path starts with a slash (after mount point) */
  483. strcat(cgrp_root, "/");
  484. if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
  485. process, machine) < 0)
  486. return -1;
  487. return 0;
  488. }
  489. #else
  490. int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
  491. perf_event__handler_t process __maybe_unused,
  492. struct machine *machine __maybe_unused)
  493. {
  494. return -1;
  495. }
  496. #endif
  497. int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
  498. struct machine *machine)
  499. {
  500. int rc = 0;
  501. struct map *pos;
  502. struct maps *maps = machine__kernel_maps(machine);
  503. union perf_event *event = zalloc((sizeof(event->mmap) +
  504. machine->id_hdr_size));
  505. if (event == NULL) {
  506. pr_debug("Not enough memory synthesizing mmap event "
  507. "for kernel modules\n");
  508. return -1;
  509. }
  510. event->header.type = PERF_RECORD_MMAP;
  511. /*
  512. * kernel uses 0 for user space maps, see kernel/perf_event.c
  513. * __perf_event_mmap
  514. */
  515. if (machine__is_host(machine))
  516. event->header.misc = PERF_RECORD_MISC_KERNEL;
  517. else
  518. event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
  519. maps__for_each_entry(maps, pos) {
  520. size_t size;
  521. if (!__map__is_kmodule(pos))
  522. continue;
  523. size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
  524. event->mmap.header.type = PERF_RECORD_MMAP;
  525. event->mmap.header.size = (sizeof(event->mmap) -
  526. (sizeof(event->mmap.filename) - size));
  527. memset(event->mmap.filename + size, 0, machine->id_hdr_size);
  528. event->mmap.header.size += machine->id_hdr_size;
  529. event->mmap.start = pos->start;
  530. event->mmap.len = pos->end - pos->start;
  531. event->mmap.pid = machine->pid;
  532. memcpy(event->mmap.filename, pos->dso->long_name,
  533. pos->dso->long_name_len + 1);
  534. if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
  535. rc = -1;
  536. break;
  537. }
  538. }
  539. free(event);
  540. return rc;
  541. }
  542. static int __event__synthesize_thread(union perf_event *comm_event,
  543. union perf_event *mmap_event,
  544. union perf_event *fork_event,
  545. union perf_event *namespaces_event,
  546. pid_t pid, int full, perf_event__handler_t process,
  547. struct perf_tool *tool, struct machine *machine, bool mmap_data)
  548. {
  549. char filename[PATH_MAX];
  550. DIR *tasks;
  551. struct dirent *dirent;
  552. pid_t tgid, ppid;
  553. int rc = 0;
  554. /* special case: only send one comm event using passed in pid */
  555. if (!full) {
  556. tgid = perf_event__synthesize_comm(tool, comm_event, pid,
  557. process, machine);
  558. if (tgid == -1)
  559. return -1;
  560. if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
  561. tgid, process, machine) < 0)
  562. return -1;
  563. /*
  564. * send mmap only for thread group leader
  565. * see thread__init_maps()
  566. */
  567. if (pid == tgid &&
  568. perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
  569. process, machine, mmap_data))
  570. return -1;
  571. return 0;
  572. }
  573. if (machine__is_default_guest(machine))
  574. return 0;
  575. snprintf(filename, sizeof(filename), "%s/proc/%d/task",
  576. machine->root_dir, pid);
  577. tasks = opendir(filename);
  578. if (tasks == NULL) {
  579. pr_debug("couldn't open %s\n", filename);
  580. return 0;
  581. }
  582. while ((dirent = readdir(tasks)) != NULL) {
  583. char *end;
  584. pid_t _pid;
  585. _pid = strtol(dirent->d_name, &end, 10);
  586. if (*end)
  587. continue;
  588. rc = -1;
  589. if (perf_event__prepare_comm(comm_event, _pid, machine,
  590. &tgid, &ppid) != 0)
  591. break;
  592. if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
  593. ppid, process, machine) < 0)
  594. break;
  595. if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
  596. tgid, process, machine) < 0)
  597. break;
  598. /*
  599. * Send the prepared comm event
  600. */
  601. if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
  602. break;
  603. rc = 0;
  604. if (_pid == pid) {
  605. /* process the parent's maps too */
  606. rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
  607. process, machine, mmap_data);
  608. if (rc)
  609. break;
  610. }
  611. }
  612. closedir(tasks);
  613. return rc;
  614. }
  615. int perf_event__synthesize_thread_map(struct perf_tool *tool,
  616. struct perf_thread_map *threads,
  617. perf_event__handler_t process,
  618. struct machine *machine,
  619. bool mmap_data)
  620. {
  621. union perf_event *comm_event, *mmap_event, *fork_event;
  622. union perf_event *namespaces_event;
  623. int err = -1, thread, j;
  624. comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
  625. if (comm_event == NULL)
  626. goto out;
  627. mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
  628. if (mmap_event == NULL)
  629. goto out_free_comm;
  630. fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
  631. if (fork_event == NULL)
  632. goto out_free_mmap;
  633. namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
  634. (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
  635. machine->id_hdr_size);
  636. if (namespaces_event == NULL)
  637. goto out_free_fork;
  638. err = 0;
  639. for (thread = 0; thread < threads->nr; ++thread) {
  640. if (__event__synthesize_thread(comm_event, mmap_event,
  641. fork_event, namespaces_event,
  642. perf_thread_map__pid(threads, thread), 0,
  643. process, tool, machine,
  644. mmap_data)) {
  645. err = -1;
  646. break;
  647. }
  648. /*
  649. * comm.pid is set to thread group id by
  650. * perf_event__synthesize_comm
  651. */
  652. if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
  653. bool need_leader = true;
  654. /* is thread group leader in thread_map? */
  655. for (j = 0; j < threads->nr; ++j) {
  656. if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
  657. need_leader = false;
  658. break;
  659. }
  660. }
  661. /* if not, generate events for it */
  662. if (need_leader &&
  663. __event__synthesize_thread(comm_event, mmap_event,
  664. fork_event, namespaces_event,
  665. comm_event->comm.pid, 0,
  666. process, tool, machine,
  667. mmap_data)) {
  668. err = -1;
  669. break;
  670. }
  671. }
  672. }
  673. free(namespaces_event);
  674. out_free_fork:
  675. free(fork_event);
  676. out_free_mmap:
  677. free(mmap_event);
  678. out_free_comm:
  679. free(comm_event);
  680. out:
  681. return err;
  682. }
  683. static int __perf_event__synthesize_threads(struct perf_tool *tool,
  684. perf_event__handler_t process,
  685. struct machine *machine,
  686. bool mmap_data,
  687. struct dirent **dirent,
  688. int start,
  689. int num)
  690. {
  691. union perf_event *comm_event, *mmap_event, *fork_event;
  692. union perf_event *namespaces_event;
  693. int err = -1;
  694. char *end;
  695. pid_t pid;
  696. int i;
  697. comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
  698. if (comm_event == NULL)
  699. goto out;
  700. mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
  701. if (mmap_event == NULL)
  702. goto out_free_comm;
  703. fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
  704. if (fork_event == NULL)
  705. goto out_free_mmap;
  706. namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
  707. (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
  708. machine->id_hdr_size);
  709. if (namespaces_event == NULL)
  710. goto out_free_fork;
  711. for (i = start; i < start + num; i++) {
  712. if (!isdigit(dirent[i]->d_name[0]))
  713. continue;
  714. pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
  715. /* only interested in proper numerical dirents */
  716. if (*end)
  717. continue;
  718. /*
  719. * We may race with exiting thread, so don't stop just because
  720. * one thread couldn't be synthesized.
  721. */
  722. __event__synthesize_thread(comm_event, mmap_event, fork_event,
  723. namespaces_event, pid, 1, process,
  724. tool, machine, mmap_data);
  725. }
  726. err = 0;
  727. free(namespaces_event);
  728. out_free_fork:
  729. free(fork_event);
  730. out_free_mmap:
  731. free(mmap_event);
  732. out_free_comm:
  733. free(comm_event);
  734. out:
  735. return err;
  736. }
  737. struct synthesize_threads_arg {
  738. struct perf_tool *tool;
  739. perf_event__handler_t process;
  740. struct machine *machine;
  741. bool mmap_data;
  742. struct dirent **dirent;
  743. int num;
  744. int start;
  745. };
  746. static void *synthesize_threads_worker(void *arg)
  747. {
  748. struct synthesize_threads_arg *args = arg;
  749. __perf_event__synthesize_threads(args->tool, args->process,
  750. args->machine, args->mmap_data,
  751. args->dirent,
  752. args->start, args->num);
  753. return NULL;
  754. }
  755. int perf_event__synthesize_threads(struct perf_tool *tool,
  756. perf_event__handler_t process,
  757. struct machine *machine,
  758. bool mmap_data,
  759. unsigned int nr_threads_synthesize)
  760. {
  761. struct synthesize_threads_arg *args = NULL;
  762. pthread_t *synthesize_threads = NULL;
  763. char proc_path[PATH_MAX];
  764. struct dirent **dirent;
  765. int num_per_thread;
  766. int m, n, i, j;
  767. int thread_nr;
  768. int base = 0;
  769. int err = -1;
  770. if (machine__is_default_guest(machine))
  771. return 0;
  772. snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
  773. n = scandir(proc_path, &dirent, 0, alphasort);
  774. if (n < 0)
  775. return err;
  776. if (nr_threads_synthesize == UINT_MAX)
  777. thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
  778. else
  779. thread_nr = nr_threads_synthesize;
  780. if (thread_nr <= 1) {
  781. err = __perf_event__synthesize_threads(tool, process,
  782. machine, mmap_data,
  783. dirent, base, n);
  784. goto free_dirent;
  785. }
  786. if (thread_nr > n)
  787. thread_nr = n;
  788. synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
  789. if (synthesize_threads == NULL)
  790. goto free_dirent;
  791. args = calloc(sizeof(*args), thread_nr);
  792. if (args == NULL)
  793. goto free_threads;
  794. num_per_thread = n / thread_nr;
  795. m = n % thread_nr;
  796. for (i = 0; i < thread_nr; i++) {
  797. args[i].tool = tool;
  798. args[i].process = process;
  799. args[i].machine = machine;
  800. args[i].mmap_data = mmap_data;
  801. args[i].dirent = dirent;
  802. }
  803. for (i = 0; i < m; i++) {
  804. args[i].num = num_per_thread + 1;
  805. args[i].start = i * args[i].num;
  806. }
  807. if (i != 0)
  808. base = args[i-1].start + args[i-1].num;
  809. for (j = i; j < thread_nr; j++) {
  810. args[j].num = num_per_thread;
  811. args[j].start = base + (j - i) * args[i].num;
  812. }
  813. for (i = 0; i < thread_nr; i++) {
  814. if (pthread_create(&synthesize_threads[i], NULL,
  815. synthesize_threads_worker, &args[i]))
  816. goto out_join;
  817. }
  818. err = 0;
  819. out_join:
  820. for (i = 0; i < thread_nr; i++)
  821. pthread_join(synthesize_threads[i], NULL);
  822. free(args);
  823. free_threads:
  824. free(synthesize_threads);
  825. free_dirent:
  826. for (i = 0; i < n; i++)
  827. zfree(&dirent[i]);
  828. free(dirent);
  829. return err;
  830. }
  831. int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
  832. perf_event__handler_t process __maybe_unused,
  833. struct machine *machine __maybe_unused)
  834. {
  835. return 0;
  836. }
  837. static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
  838. perf_event__handler_t process,
  839. struct machine *machine)
  840. {
  841. size_t size;
  842. struct map *map = machine__kernel_map(machine);
  843. struct kmap *kmap;
  844. int err;
  845. union perf_event *event;
  846. if (map == NULL)
  847. return -1;
  848. kmap = map__kmap(map);
  849. if (!kmap->ref_reloc_sym)
  850. return -1;
  851. /*
  852. * We should get this from /sys/kernel/sections/.text, but till that is
  853. * available use this, and after it is use this as a fallback for older
  854. * kernels.
  855. */
  856. event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
  857. if (event == NULL) {
  858. pr_debug("Not enough memory synthesizing mmap event "
  859. "for kernel modules\n");
  860. return -1;
  861. }
  862. if (machine__is_host(machine)) {
  863. /*
  864. * kernel uses PERF_RECORD_MISC_USER for user space maps,
  865. * see kernel/perf_event.c __perf_event_mmap
  866. */
  867. event->header.misc = PERF_RECORD_MISC_KERNEL;
  868. } else {
  869. event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
  870. }
  871. size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
  872. "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
  873. size = PERF_ALIGN(size, sizeof(u64));
  874. event->mmap.header.type = PERF_RECORD_MMAP;
  875. event->mmap.header.size = (sizeof(event->mmap) -
  876. (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
  877. event->mmap.pgoff = kmap->ref_reloc_sym->addr;
  878. event->mmap.start = map->start;
  879. event->mmap.len = map->end - event->mmap.start;
  880. event->mmap.pid = machine->pid;
  881. err = perf_tool__process_synth_event(tool, event, machine, process);
  882. free(event);
  883. return err;
  884. }
  885. int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
  886. perf_event__handler_t process,
  887. struct machine *machine)
  888. {
  889. int err;
  890. err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
  891. if (err < 0)
  892. return err;
  893. return perf_event__synthesize_extra_kmaps(tool, process, machine);
  894. }
  895. int perf_event__synthesize_thread_map2(struct perf_tool *tool,
  896. struct perf_thread_map *threads,
  897. perf_event__handler_t process,
  898. struct machine *machine)
  899. {
  900. union perf_event *event;
  901. int i, err, size;
  902. size = sizeof(event->thread_map);
  903. size += threads->nr * sizeof(event->thread_map.entries[0]);
  904. event = zalloc(size);
  905. if (!event)
  906. return -ENOMEM;
  907. event->header.type = PERF_RECORD_THREAD_MAP;
  908. event->header.size = size;
  909. event->thread_map.nr = threads->nr;
  910. for (i = 0; i < threads->nr; i++) {
  911. struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
  912. char *comm = perf_thread_map__comm(threads, i);
  913. if (!comm)
  914. comm = (char *) "";
  915. entry->pid = perf_thread_map__pid(threads, i);
  916. strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
  917. }
  918. err = process(tool, event, NULL, machine);
  919. free(event);
  920. return err;
  921. }
  922. static void synthesize_cpus(struct cpu_map_entries *cpus,
  923. struct perf_cpu_map *map)
  924. {
  925. int i;
  926. cpus->nr = map->nr;
  927. for (i = 0; i < map->nr; i++)
  928. cpus->cpu[i] = map->map[i];
  929. }
  930. static void synthesize_mask(struct perf_record_record_cpu_map *mask,
  931. struct perf_cpu_map *map, int max)
  932. {
  933. int i;
  934. mask->nr = BITS_TO_LONGS(max);
  935. mask->long_size = sizeof(long);
  936. for (i = 0; i < map->nr; i++)
  937. set_bit(map->map[i], mask->mask);
  938. }
  939. static size_t cpus_size(struct perf_cpu_map *map)
  940. {
  941. return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
  942. }
  943. static size_t mask_size(struct perf_cpu_map *map, int *max)
  944. {
  945. int i;
  946. *max = 0;
  947. for (i = 0; i < map->nr; i++) {
  948. /* bit possition of the cpu is + 1 */
  949. int bit = map->map[i] + 1;
  950. if (bit > *max)
  951. *max = bit;
  952. }
  953. return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
  954. }
  955. void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
  956. {
  957. size_t size_cpus, size_mask;
  958. bool is_dummy = perf_cpu_map__empty(map);
  959. /*
  960. * Both array and mask data have variable size based
  961. * on the number of cpus and their actual values.
  962. * The size of the 'struct perf_record_cpu_map_data' is:
  963. *
  964. * array = size of 'struct cpu_map_entries' +
  965. * number of cpus * sizeof(u64)
  966. *
  967. * mask = size of 'struct perf_record_record_cpu_map' +
  968. * maximum cpu bit converted to size of longs
  969. *
  970. * and finaly + the size of 'struct perf_record_cpu_map_data'.
  971. */
  972. size_cpus = cpus_size(map);
  973. size_mask = mask_size(map, max);
  974. if (is_dummy || (size_cpus < size_mask)) {
  975. *size += size_cpus;
  976. *type = PERF_CPU_MAP__CPUS;
  977. } else {
  978. *size += size_mask;
  979. *type = PERF_CPU_MAP__MASK;
  980. }
  981. *size += sizeof(struct perf_record_cpu_map_data);
  982. *size = PERF_ALIGN(*size, sizeof(u64));
  983. return zalloc(*size);
  984. }
  985. void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
  986. u16 type, int max)
  987. {
  988. data->type = type;
  989. switch (type) {
  990. case PERF_CPU_MAP__CPUS:
  991. synthesize_cpus((struct cpu_map_entries *) data->data, map);
  992. break;
  993. case PERF_CPU_MAP__MASK:
  994. synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
  995. default:
  996. break;
  997. }
  998. }
  999. static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
  1000. {
  1001. size_t size = sizeof(struct perf_record_cpu_map);
  1002. struct perf_record_cpu_map *event;
  1003. int max;
  1004. u16 type;
  1005. event = cpu_map_data__alloc(map, &size, &type, &max);
  1006. if (!event)
  1007. return NULL;
  1008. event->header.type = PERF_RECORD_CPU_MAP;
  1009. event->header.size = size;
  1010. event->data.type = type;
  1011. cpu_map_data__synthesize(&event->data, map, type, max);
  1012. return event;
  1013. }
  1014. int perf_event__synthesize_cpu_map(struct perf_tool *tool,
  1015. struct perf_cpu_map *map,
  1016. perf_event__handler_t process,
  1017. struct machine *machine)
  1018. {
  1019. struct perf_record_cpu_map *event;
  1020. int err;
  1021. event = cpu_map_event__new(map);
  1022. if (!event)
  1023. return -ENOMEM;
  1024. err = process(tool, (union perf_event *) event, NULL, machine);
  1025. free(event);
  1026. return err;
  1027. }
  1028. int perf_event__synthesize_stat_config(struct perf_tool *tool,
  1029. struct perf_stat_config *config,
  1030. perf_event__handler_t process,
  1031. struct machine *machine)
  1032. {
  1033. struct perf_record_stat_config *event;
  1034. int size, i = 0, err;
  1035. size = sizeof(*event);
  1036. size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
  1037. event = zalloc(size);
  1038. if (!event)
  1039. return -ENOMEM;
  1040. event->header.type = PERF_RECORD_STAT_CONFIG;
  1041. event->header.size = size;
  1042. event->nr = PERF_STAT_CONFIG_TERM__MAX;
  1043. #define ADD(__term, __val) \
  1044. event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
  1045. event->data[i].val = __val; \
  1046. i++;
  1047. ADD(AGGR_MODE, config->aggr_mode)
  1048. ADD(INTERVAL, config->interval)
  1049. ADD(SCALE, config->scale)
  1050. WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
  1051. "stat config terms unbalanced\n");
  1052. #undef ADD
  1053. err = process(tool, (union perf_event *) event, NULL, machine);
  1054. free(event);
  1055. return err;
  1056. }
  1057. int perf_event__synthesize_stat(struct perf_tool *tool,
  1058. u32 cpu, u32 thread, u64 id,
  1059. struct perf_counts_values *count,
  1060. perf_event__handler_t process,
  1061. struct machine *machine)
  1062. {
  1063. struct perf_record_stat event;
  1064. event.header.type = PERF_RECORD_STAT;
  1065. event.header.size = sizeof(event);
  1066. event.header.misc = 0;
  1067. event.id = id;
  1068. event.cpu = cpu;
  1069. event.thread = thread;
  1070. event.val = count->val;
  1071. event.ena = count->ena;
  1072. event.run = count->run;
  1073. return process(tool, (union perf_event *) &event, NULL, machine);
  1074. }
  1075. int perf_event__synthesize_stat_round(struct perf_tool *tool,
  1076. u64 evtime, u64 type,
  1077. perf_event__handler_t process,
  1078. struct machine *machine)
  1079. {
  1080. struct perf_record_stat_round event;
  1081. event.header.type = PERF_RECORD_STAT_ROUND;
  1082. event.header.size = sizeof(event);
  1083. event.header.misc = 0;
  1084. event.time = evtime;
  1085. event.type = type;
  1086. return process(tool, (union perf_event *) &event, NULL, machine);
  1087. }
  1088. size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
  1089. {
  1090. size_t sz, result = sizeof(struct perf_record_sample);
  1091. if (type & PERF_SAMPLE_IDENTIFIER)
  1092. result += sizeof(u64);
  1093. if (type & PERF_SAMPLE_IP)
  1094. result += sizeof(u64);
  1095. if (type & PERF_SAMPLE_TID)
  1096. result += sizeof(u64);
  1097. if (type & PERF_SAMPLE_TIME)
  1098. result += sizeof(u64);
  1099. if (type & PERF_SAMPLE_ADDR)
  1100. result += sizeof(u64);
  1101. if (type & PERF_SAMPLE_ID)
  1102. result += sizeof(u64);
  1103. if (type & PERF_SAMPLE_STREAM_ID)
  1104. result += sizeof(u64);
  1105. if (type & PERF_SAMPLE_CPU)
  1106. result += sizeof(u64);
  1107. if (type & PERF_SAMPLE_PERIOD)
  1108. result += sizeof(u64);
  1109. if (type & PERF_SAMPLE_READ) {
  1110. result += sizeof(u64);
  1111. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1112. result += sizeof(u64);
  1113. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1114. result += sizeof(u64);
  1115. /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
  1116. if (read_format & PERF_FORMAT_GROUP) {
  1117. sz = sample->read.group.nr *
  1118. sizeof(struct sample_read_value);
  1119. result += sz;
  1120. } else {
  1121. result += sizeof(u64);
  1122. }
  1123. }
  1124. if (type & PERF_SAMPLE_CALLCHAIN) {
  1125. sz = (sample->callchain->nr + 1) * sizeof(u64);
  1126. result += sz;
  1127. }
  1128. if (type & PERF_SAMPLE_RAW) {
  1129. result += sizeof(u32);
  1130. result += sample->raw_size;
  1131. }
  1132. if (type & PERF_SAMPLE_BRANCH_STACK) {
  1133. sz = sample->branch_stack->nr * sizeof(struct branch_entry);
  1134. /* nr, hw_idx */
  1135. sz += 2 * sizeof(u64);
  1136. result += sz;
  1137. }
  1138. if (type & PERF_SAMPLE_REGS_USER) {
  1139. if (sample->user_regs.abi) {
  1140. result += sizeof(u64);
  1141. sz = hweight64(sample->user_regs.mask) * sizeof(u64);
  1142. result += sz;
  1143. } else {
  1144. result += sizeof(u64);
  1145. }
  1146. }
  1147. if (type & PERF_SAMPLE_STACK_USER) {
  1148. sz = sample->user_stack.size;
  1149. result += sizeof(u64);
  1150. if (sz) {
  1151. result += sz;
  1152. result += sizeof(u64);
  1153. }
  1154. }
  1155. if (type & PERF_SAMPLE_WEIGHT)
  1156. result += sizeof(u64);
  1157. if (type & PERF_SAMPLE_DATA_SRC)
  1158. result += sizeof(u64);
  1159. if (type & PERF_SAMPLE_TRANSACTION)
  1160. result += sizeof(u64);
  1161. if (type & PERF_SAMPLE_REGS_INTR) {
  1162. if (sample->intr_regs.abi) {
  1163. result += sizeof(u64);
  1164. sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
  1165. result += sz;
  1166. } else {
  1167. result += sizeof(u64);
  1168. }
  1169. }
  1170. if (type & PERF_SAMPLE_PHYS_ADDR)
  1171. result += sizeof(u64);
  1172. if (type & PERF_SAMPLE_CGROUP)
  1173. result += sizeof(u64);
  1174. if (type & PERF_SAMPLE_AUX) {
  1175. result += sizeof(u64);
  1176. result += sample->aux_sample.size;
  1177. }
  1178. return result;
  1179. }
  1180. int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
  1181. const struct perf_sample *sample)
  1182. {
  1183. __u64 *array;
  1184. size_t sz;
  1185. /*
  1186. * used for cross-endian analysis. See git commit 65014ab3
  1187. * for why this goofiness is needed.
  1188. */
  1189. union u64_swap u;
  1190. array = event->sample.array;
  1191. if (type & PERF_SAMPLE_IDENTIFIER) {
  1192. *array = sample->id;
  1193. array++;
  1194. }
  1195. if (type & PERF_SAMPLE_IP) {
  1196. *array = sample->ip;
  1197. array++;
  1198. }
  1199. if (type & PERF_SAMPLE_TID) {
  1200. u.val32[0] = sample->pid;
  1201. u.val32[1] = sample->tid;
  1202. *array = u.val64;
  1203. array++;
  1204. }
  1205. if (type & PERF_SAMPLE_TIME) {
  1206. *array = sample->time;
  1207. array++;
  1208. }
  1209. if (type & PERF_SAMPLE_ADDR) {
  1210. *array = sample->addr;
  1211. array++;
  1212. }
  1213. if (type & PERF_SAMPLE_ID) {
  1214. *array = sample->id;
  1215. array++;
  1216. }
  1217. if (type & PERF_SAMPLE_STREAM_ID) {
  1218. *array = sample->stream_id;
  1219. array++;
  1220. }
  1221. if (type & PERF_SAMPLE_CPU) {
  1222. u.val32[0] = sample->cpu;
  1223. u.val32[1] = 0;
  1224. *array = u.val64;
  1225. array++;
  1226. }
  1227. if (type & PERF_SAMPLE_PERIOD) {
  1228. *array = sample->period;
  1229. array++;
  1230. }
  1231. if (type & PERF_SAMPLE_READ) {
  1232. if (read_format & PERF_FORMAT_GROUP)
  1233. *array = sample->read.group.nr;
  1234. else
  1235. *array = sample->read.one.value;
  1236. array++;
  1237. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
  1238. *array = sample->read.time_enabled;
  1239. array++;
  1240. }
  1241. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
  1242. *array = sample->read.time_running;
  1243. array++;
  1244. }
  1245. /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
  1246. if (read_format & PERF_FORMAT_GROUP) {
  1247. sz = sample->read.group.nr *
  1248. sizeof(struct sample_read_value);
  1249. memcpy(array, sample->read.group.values, sz);
  1250. array = (void *)array + sz;
  1251. } else {
  1252. *array = sample->read.one.id;
  1253. array++;
  1254. }
  1255. }
  1256. if (type & PERF_SAMPLE_CALLCHAIN) {
  1257. sz = (sample->callchain->nr + 1) * sizeof(u64);
  1258. memcpy(array, sample->callchain, sz);
  1259. array = (void *)array + sz;
  1260. }
  1261. if (type & PERF_SAMPLE_RAW) {
  1262. u.val32[0] = sample->raw_size;
  1263. *array = u.val64;
  1264. array = (void *)array + sizeof(u32);
  1265. memcpy(array, sample->raw_data, sample->raw_size);
  1266. array = (void *)array + sample->raw_size;
  1267. }
  1268. if (type & PERF_SAMPLE_BRANCH_STACK) {
  1269. sz = sample->branch_stack->nr * sizeof(struct branch_entry);
  1270. /* nr, hw_idx */
  1271. sz += 2 * sizeof(u64);
  1272. memcpy(array, sample->branch_stack, sz);
  1273. array = (void *)array + sz;
  1274. }
  1275. if (type & PERF_SAMPLE_REGS_USER) {
  1276. if (sample->user_regs.abi) {
  1277. *array++ = sample->user_regs.abi;
  1278. sz = hweight64(sample->user_regs.mask) * sizeof(u64);
  1279. memcpy(array, sample->user_regs.regs, sz);
  1280. array = (void *)array + sz;
  1281. } else {
  1282. *array++ = 0;
  1283. }
  1284. }
  1285. if (type & PERF_SAMPLE_STACK_USER) {
  1286. sz = sample->user_stack.size;
  1287. *array++ = sz;
  1288. if (sz) {
  1289. memcpy(array, sample->user_stack.data, sz);
  1290. array = (void *)array + sz;
  1291. *array++ = sz;
  1292. }
  1293. }
  1294. if (type & PERF_SAMPLE_WEIGHT) {
  1295. *array = sample->weight;
  1296. array++;
  1297. }
  1298. if (type & PERF_SAMPLE_DATA_SRC) {
  1299. *array = sample->data_src;
  1300. array++;
  1301. }
  1302. if (type & PERF_SAMPLE_TRANSACTION) {
  1303. *array = sample->transaction;
  1304. array++;
  1305. }
  1306. if (type & PERF_SAMPLE_REGS_INTR) {
  1307. if (sample->intr_regs.abi) {
  1308. *array++ = sample->intr_regs.abi;
  1309. sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
  1310. memcpy(array, sample->intr_regs.regs, sz);
  1311. array = (void *)array + sz;
  1312. } else {
  1313. *array++ = 0;
  1314. }
  1315. }
  1316. if (type & PERF_SAMPLE_PHYS_ADDR) {
  1317. *array = sample->phys_addr;
  1318. array++;
  1319. }
  1320. if (type & PERF_SAMPLE_CGROUP) {
  1321. *array = sample->cgroup;
  1322. array++;
  1323. }
  1324. if (type & PERF_SAMPLE_AUX) {
  1325. sz = sample->aux_sample.size;
  1326. *array++ = sz;
  1327. memcpy(array, sample->aux_sample.data, sz);
  1328. array = (void *)array + sz;
  1329. }
  1330. return 0;
  1331. }
  1332. int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
  1333. struct evlist *evlist, struct machine *machine)
  1334. {
  1335. union perf_event *ev;
  1336. struct evsel *evsel;
  1337. size_t nr = 0, i = 0, sz, max_nr, n;
  1338. int err;
  1339. pr_debug2("Synthesizing id index\n");
  1340. max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
  1341. sizeof(struct id_index_entry);
  1342. evlist__for_each_entry(evlist, evsel)
  1343. nr += evsel->core.ids;
  1344. n = nr > max_nr ? max_nr : nr;
  1345. sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
  1346. ev = zalloc(sz);
  1347. if (!ev)
  1348. return -ENOMEM;
  1349. ev->id_index.header.type = PERF_RECORD_ID_INDEX;
  1350. ev->id_index.header.size = sz;
  1351. ev->id_index.nr = n;
  1352. evlist__for_each_entry(evlist, evsel) {
  1353. u32 j;
  1354. for (j = 0; j < evsel->core.ids; j++) {
  1355. struct id_index_entry *e;
  1356. struct perf_sample_id *sid;
  1357. if (i >= n) {
  1358. err = process(tool, ev, NULL, machine);
  1359. if (err)
  1360. goto out_err;
  1361. nr -= n;
  1362. i = 0;
  1363. }
  1364. e = &ev->id_index.entries[i++];
  1365. e->id = evsel->core.id[j];
  1366. sid = perf_evlist__id2sid(evlist, e->id);
  1367. if (!sid) {
  1368. free(ev);
  1369. return -ENOENT;
  1370. }
  1371. e->idx = sid->idx;
  1372. e->cpu = sid->cpu;
  1373. e->tid = sid->tid;
  1374. }
  1375. }
  1376. sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
  1377. ev->id_index.header.size = sz;
  1378. ev->id_index.nr = nr;
  1379. err = process(tool, ev, NULL, machine);
  1380. out_err:
  1381. free(ev);
  1382. return err;
  1383. }
  1384. int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
  1385. struct target *target, struct perf_thread_map *threads,
  1386. perf_event__handler_t process, bool data_mmap,
  1387. unsigned int nr_threads_synthesize)
  1388. {
  1389. if (target__has_task(target))
  1390. return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
  1391. else if (target__has_cpu(target))
  1392. return perf_event__synthesize_threads(tool, process,
  1393. machine, data_mmap,
  1394. nr_threads_synthesize);
  1395. /* command specified */
  1396. return 0;
  1397. }
  1398. int machine__synthesize_threads(struct machine *machine, struct target *target,
  1399. struct perf_thread_map *threads, bool data_mmap,
  1400. unsigned int nr_threads_synthesize)
  1401. {
  1402. return __machine__synthesize_threads(machine, NULL, target, threads,
  1403. perf_event__process, data_mmap,
  1404. nr_threads_synthesize);
  1405. }
  1406. static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
  1407. {
  1408. struct perf_record_event_update *ev;
  1409. size += sizeof(*ev);
  1410. size = PERF_ALIGN(size, sizeof(u64));
  1411. ev = zalloc(size);
  1412. if (ev) {
  1413. ev->header.type = PERF_RECORD_EVENT_UPDATE;
  1414. ev->header.size = (u16)size;
  1415. ev->type = type;
  1416. ev->id = id;
  1417. }
  1418. return ev;
  1419. }
  1420. int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
  1421. perf_event__handler_t process)
  1422. {
  1423. size_t size = strlen(evsel->unit);
  1424. struct perf_record_event_update *ev;
  1425. int err;
  1426. ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
  1427. if (ev == NULL)
  1428. return -ENOMEM;
  1429. strlcpy(ev->data, evsel->unit, size + 1);
  1430. err = process(tool, (union perf_event *)ev, NULL, NULL);
  1431. free(ev);
  1432. return err;
  1433. }
  1434. int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
  1435. perf_event__handler_t process)
  1436. {
  1437. struct perf_record_event_update *ev;
  1438. struct perf_record_event_update_scale *ev_data;
  1439. int err;
  1440. ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
  1441. if (ev == NULL)
  1442. return -ENOMEM;
  1443. ev_data = (struct perf_record_event_update_scale *)ev->data;
  1444. ev_data->scale = evsel->scale;
  1445. err = process(tool, (union perf_event *)ev, NULL, NULL);
  1446. free(ev);
  1447. return err;
  1448. }
  1449. int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
  1450. perf_event__handler_t process)
  1451. {
  1452. struct perf_record_event_update *ev;
  1453. size_t len = strlen(evsel->name);
  1454. int err;
  1455. ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
  1456. if (ev == NULL)
  1457. return -ENOMEM;
  1458. strlcpy(ev->data, evsel->name, len + 1);
  1459. err = process(tool, (union perf_event *)ev, NULL, NULL);
  1460. free(ev);
  1461. return err;
  1462. }
  1463. int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
  1464. perf_event__handler_t process)
  1465. {
  1466. size_t size = sizeof(struct perf_record_event_update);
  1467. struct perf_record_event_update *ev;
  1468. int max, err;
  1469. u16 type;
  1470. if (!evsel->core.own_cpus)
  1471. return 0;
  1472. ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
  1473. if (!ev)
  1474. return -ENOMEM;
  1475. ev->header.type = PERF_RECORD_EVENT_UPDATE;
  1476. ev->header.size = (u16)size;
  1477. ev->type = PERF_EVENT_UPDATE__CPUS;
  1478. ev->id = evsel->core.id[0];
  1479. cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
  1480. evsel->core.own_cpus, type, max);
  1481. err = process(tool, (union perf_event *)ev, NULL, NULL);
  1482. free(ev);
  1483. return err;
  1484. }
  1485. int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
  1486. perf_event__handler_t process)
  1487. {
  1488. struct evsel *evsel;
  1489. int err = 0;
  1490. evlist__for_each_entry(evlist, evsel) {
  1491. err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
  1492. evsel->core.id, process);
  1493. if (err) {
  1494. pr_debug("failed to create perf header attribute\n");
  1495. return err;
  1496. }
  1497. }
  1498. return err;
  1499. }
  1500. static bool has_unit(struct evsel *evsel)
  1501. {
  1502. return evsel->unit && *evsel->unit;
  1503. }
  1504. static bool has_scale(struct evsel *evsel)
  1505. {
  1506. return evsel->scale != 1;
  1507. }
  1508. int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
  1509. perf_event__handler_t process, bool is_pipe)
  1510. {
  1511. struct evsel *evsel;
  1512. int err;
  1513. /*
  1514. * Synthesize other events stuff not carried within
  1515. * attr event - unit, scale, name
  1516. */
  1517. evlist__for_each_entry(evsel_list, evsel) {
  1518. if (!evsel->supported)
  1519. continue;
  1520. /*
  1521. * Synthesize unit and scale only if it's defined.
  1522. */
  1523. if (has_unit(evsel)) {
  1524. err = perf_event__synthesize_event_update_unit(tool, evsel, process);
  1525. if (err < 0) {
  1526. pr_err("Couldn't synthesize evsel unit.\n");
  1527. return err;
  1528. }
  1529. }
  1530. if (has_scale(evsel)) {
  1531. err = perf_event__synthesize_event_update_scale(tool, evsel, process);
  1532. if (err < 0) {
  1533. pr_err("Couldn't synthesize evsel evsel.\n");
  1534. return err;
  1535. }
  1536. }
  1537. if (evsel->core.own_cpus) {
  1538. err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
  1539. if (err < 0) {
  1540. pr_err("Couldn't synthesize evsel cpus.\n");
  1541. return err;
  1542. }
  1543. }
  1544. /*
  1545. * Name is needed only for pipe output,
  1546. * perf.data carries event names.
  1547. */
  1548. if (is_pipe) {
  1549. err = perf_event__synthesize_event_update_name(tool, evsel, process);
  1550. if (err < 0) {
  1551. pr_err("Couldn't synthesize evsel name.\n");
  1552. return err;
  1553. }
  1554. }
  1555. }
  1556. return 0;
  1557. }
  1558. int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
  1559. u32 ids, u64 *id, perf_event__handler_t process)
  1560. {
  1561. union perf_event *ev;
  1562. size_t size;
  1563. int err;
  1564. size = sizeof(struct perf_event_attr);
  1565. size = PERF_ALIGN(size, sizeof(u64));
  1566. size += sizeof(struct perf_event_header);
  1567. size += ids * sizeof(u64);
  1568. ev = zalloc(size);
  1569. if (ev == NULL)
  1570. return -ENOMEM;
  1571. ev->attr.attr = *attr;
  1572. memcpy(ev->attr.id, id, ids * sizeof(u64));
  1573. ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
  1574. ev->attr.header.size = (u16)size;
  1575. if (ev->attr.header.size == size)
  1576. err = process(tool, ev, NULL, NULL);
  1577. else
  1578. err = -E2BIG;
  1579. free(ev);
  1580. return err;
  1581. }
  1582. int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
  1583. perf_event__handler_t process)
  1584. {
  1585. union perf_event ev;
  1586. struct tracing_data *tdata;
  1587. ssize_t size = 0, aligned_size = 0, padding;
  1588. struct feat_fd ff;
  1589. /*
  1590. * We are going to store the size of the data followed
  1591. * by the data contents. Since the fd descriptor is a pipe,
  1592. * we cannot seek back to store the size of the data once
  1593. * we know it. Instead we:
  1594. *
  1595. * - write the tracing data to the temp file
  1596. * - get/write the data size to pipe
  1597. * - write the tracing data from the temp file
  1598. * to the pipe
  1599. */
  1600. tdata = tracing_data_get(&evlist->core.entries, fd, true);
  1601. if (!tdata)
  1602. return -1;
  1603. memset(&ev, 0, sizeof(ev));
  1604. ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
  1605. size = tdata->size;
  1606. aligned_size = PERF_ALIGN(size, sizeof(u64));
  1607. padding = aligned_size - size;
  1608. ev.tracing_data.header.size = sizeof(ev.tracing_data);
  1609. ev.tracing_data.size = aligned_size;
  1610. process(tool, &ev, NULL, NULL);
  1611. /*
  1612. * The put function will copy all the tracing data
  1613. * stored in temp file to the pipe.
  1614. */
  1615. tracing_data_put(tdata);
  1616. ff = (struct feat_fd){ .fd = fd };
  1617. if (write_padded(&ff, NULL, 0, padding))
  1618. return -1;
  1619. return aligned_size;
  1620. }
  1621. int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
  1622. perf_event__handler_t process, struct machine *machine)
  1623. {
  1624. union perf_event ev;
  1625. size_t len;
  1626. if (!pos->hit)
  1627. return 0;
  1628. memset(&ev, 0, sizeof(ev));
  1629. len = pos->long_name_len + 1;
  1630. len = PERF_ALIGN(len, NAME_ALIGN);
  1631. memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data));
  1632. ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
  1633. ev.build_id.header.misc = misc;
  1634. ev.build_id.pid = machine->pid;
  1635. ev.build_id.header.size = sizeof(ev.build_id) + len;
  1636. memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
  1637. return process(tool, &ev, NULL, machine);
  1638. }
  1639. int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
  1640. struct evlist *evlist, perf_event__handler_t process, bool attrs)
  1641. {
  1642. int err;
  1643. if (attrs) {
  1644. err = perf_event__synthesize_attrs(tool, evlist, process);
  1645. if (err < 0) {
  1646. pr_err("Couldn't synthesize attrs.\n");
  1647. return err;
  1648. }
  1649. }
  1650. err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
  1651. err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
  1652. if (err < 0) {
  1653. pr_err("Couldn't synthesize thread map.\n");
  1654. return err;
  1655. }
  1656. err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
  1657. if (err < 0) {
  1658. pr_err("Couldn't synthesize thread map.\n");
  1659. return err;
  1660. }
  1661. err = perf_event__synthesize_stat_config(tool, config, process, NULL);
  1662. if (err < 0) {
  1663. pr_err("Couldn't synthesize config.\n");
  1664. return err;
  1665. }
  1666. return 0;
  1667. }
  1668. extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
  1669. int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
  1670. struct evlist *evlist, perf_event__handler_t process)
  1671. {
  1672. struct perf_header *header = &session->header;
  1673. struct perf_record_header_feature *fe;
  1674. struct feat_fd ff;
  1675. size_t sz, sz_hdr;
  1676. int feat, ret;
  1677. sz_hdr = sizeof(fe->header);
  1678. sz = sizeof(union perf_event);
  1679. /* get a nice alignment */
  1680. sz = PERF_ALIGN(sz, page_size);
  1681. memset(&ff, 0, sizeof(ff));
  1682. ff.buf = malloc(sz);
  1683. if (!ff.buf)
  1684. return -ENOMEM;
  1685. ff.size = sz - sz_hdr;
  1686. ff.ph = &session->header;
  1687. for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
  1688. if (!feat_ops[feat].synthesize) {
  1689. pr_debug("No record header feature for header :%d\n", feat);
  1690. continue;
  1691. }
  1692. ff.offset = sizeof(*fe);
  1693. ret = feat_ops[feat].write(&ff, evlist);
  1694. if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
  1695. pr_debug("Error writing feature\n");
  1696. continue;
  1697. }
  1698. /* ff.buf may have changed due to realloc in do_write() */
  1699. fe = ff.buf;
  1700. memset(fe, 0, sizeof(*fe));
  1701. fe->feat_id = feat;
  1702. fe->header.type = PERF_RECORD_HEADER_FEATURE;
  1703. fe->header.size = ff.offset;
  1704. ret = process(tool, ff.buf, NULL, NULL);
  1705. if (ret) {
  1706. free(ff.buf);
  1707. return ret;
  1708. }
  1709. }
  1710. /* Send HEADER_LAST_FEATURE mark. */
  1711. fe = ff.buf;
  1712. fe->feat_id = HEADER_LAST_FEATURE;
  1713. fe->header.type = PERF_RECORD_HEADER_FEATURE;
  1714. fe->header.size = sizeof(*fe);
  1715. ret = process(tool, ff.buf, NULL, NULL);
  1716. free(ff.buf);
  1717. return ret;
  1718. }