builtin-record.c 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * builtin-record.c
  4. *
  5. * Builtin record command: Record the profile of a workload
  6. * (or a CPU, or a PID) into the perf.data output file - for
  7. * later analysis via perf report.
  8. */
  9. #include "builtin.h"
  10. #include "util/build-id.h"
  11. #include <subcmd/parse-options.h>
  12. #include "util/parse-events.h"
  13. #include "util/config.h"
  14. #include "util/callchain.h"
  15. #include "util/cgroup.h"
  16. #include "util/header.h"
  17. #include "util/event.h"
  18. #include "util/evlist.h"
  19. #include "util/evsel.h"
  20. #include "util/debug.h"
  21. #include "util/mmap.h"
  22. #include "util/target.h"
  23. #include "util/session.h"
  24. #include "util/tool.h"
  25. #include "util/symbol.h"
  26. #include "util/record.h"
  27. #include "util/cpumap.h"
  28. #include "util/thread_map.h"
  29. #include "util/data.h"
  30. #include "util/perf_regs.h"
  31. #include "util/auxtrace.h"
  32. #include "util/tsc.h"
  33. #include "util/parse-branch-options.h"
  34. #include "util/parse-regs-options.h"
  35. #include "util/perf_api_probe.h"
  36. #include "util/llvm-utils.h"
  37. #include "util/bpf-loader.h"
  38. #include "util/trigger.h"
  39. #include "util/perf-hooks.h"
  40. #include "util/cpu-set-sched.h"
  41. #include "util/synthetic-events.h"
  42. #include "util/time-utils.h"
  43. #include "util/units.h"
  44. #include "util/bpf-event.h"
  45. #include "util/util.h"
  46. #include "util/pfm.h"
  47. #include "util/clockid.h"
  48. #include "asm/bug.h"
  49. #include "perf.h"
  50. #include <errno.h>
  51. #include <inttypes.h>
  52. #include <locale.h>
  53. #include <poll.h>
  54. #include <pthread.h>
  55. #include <unistd.h>
  56. #include <sched.h>
  57. #include <signal.h>
  58. #ifdef HAVE_EVENTFD_SUPPORT
  59. #include <sys/eventfd.h>
  60. #endif
  61. #include <sys/mman.h>
  62. #include <sys/wait.h>
  63. #include <sys/types.h>
  64. #include <sys/stat.h>
  65. #include <fcntl.h>
  66. #include <linux/err.h>
  67. #include <linux/string.h>
  68. #include <linux/time64.h>
  69. #include <linux/zalloc.h>
  70. #include <linux/bitmap.h>
  71. #include <sys/time.h>
  72. struct switch_output {
  73. bool enabled;
  74. bool signal;
  75. unsigned long size;
  76. unsigned long time;
  77. const char *str;
  78. bool set;
  79. char **filenames;
  80. int num_files;
  81. int cur_file;
  82. };
  83. struct record {
  84. struct perf_tool tool;
  85. struct record_opts opts;
  86. u64 bytes_written;
  87. struct perf_data data;
  88. struct auxtrace_record *itr;
  89. struct evlist *evlist;
  90. struct perf_session *session;
  91. struct evlist *sb_evlist;
  92. pthread_t thread_id;
  93. int realtime_prio;
  94. bool switch_output_event_set;
  95. bool no_buildid;
  96. bool no_buildid_set;
  97. bool no_buildid_cache;
  98. bool no_buildid_cache_set;
  99. bool buildid_all;
  100. bool timestamp_filename;
  101. bool timestamp_boundary;
  102. struct switch_output switch_output;
  103. unsigned long long samples;
  104. struct mmap_cpu_mask affinity_mask;
  105. unsigned long output_max_size; /* = 0: unlimited */
  106. };
  107. static volatile int done;
  108. static volatile int auxtrace_record__snapshot_started;
  109. static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
  110. static DEFINE_TRIGGER(switch_output_trigger);
  111. static const char *affinity_tags[PERF_AFFINITY_MAX] = {
  112. "SYS", "NODE", "CPU"
  113. };
  114. static bool switch_output_signal(struct record *rec)
  115. {
  116. return rec->switch_output.signal &&
  117. trigger_is_ready(&switch_output_trigger);
  118. }
  119. static bool switch_output_size(struct record *rec)
  120. {
  121. return rec->switch_output.size &&
  122. trigger_is_ready(&switch_output_trigger) &&
  123. (rec->bytes_written >= rec->switch_output.size);
  124. }
  125. static bool switch_output_time(struct record *rec)
  126. {
  127. return rec->switch_output.time &&
  128. trigger_is_ready(&switch_output_trigger);
  129. }
  130. static bool record__output_max_size_exceeded(struct record *rec)
  131. {
  132. return rec->output_max_size &&
  133. (rec->bytes_written >= rec->output_max_size);
  134. }
  135. static int record__write(struct record *rec, struct mmap *map __maybe_unused,
  136. void *bf, size_t size)
  137. {
  138. struct perf_data_file *file = &rec->session->data->file;
  139. if (perf_data_file__write(file, bf, size) < 0) {
  140. pr_err("failed to write perf data, error: %m\n");
  141. return -1;
  142. }
  143. rec->bytes_written += size;
  144. if (record__output_max_size_exceeded(rec) && !done) {
  145. fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
  146. " stopping session ]\n",
  147. rec->bytes_written >> 10);
  148. done = 1;
  149. }
  150. if (switch_output_size(rec))
  151. trigger_hit(&switch_output_trigger);
  152. return 0;
  153. }
  154. static int record__aio_enabled(struct record *rec);
  155. static int record__comp_enabled(struct record *rec);
  156. static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
  157. void *src, size_t src_size);
  158. #ifdef HAVE_AIO_SUPPORT
  159. static int record__aio_write(struct aiocb *cblock, int trace_fd,
  160. void *buf, size_t size, off_t off)
  161. {
  162. int rc;
  163. cblock->aio_fildes = trace_fd;
  164. cblock->aio_buf = buf;
  165. cblock->aio_nbytes = size;
  166. cblock->aio_offset = off;
  167. cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
  168. do {
  169. rc = aio_write(cblock);
  170. if (rc == 0) {
  171. break;
  172. } else if (errno != EAGAIN) {
  173. cblock->aio_fildes = -1;
  174. pr_err("failed to queue perf data, error: %m\n");
  175. break;
  176. }
  177. } while (1);
  178. return rc;
  179. }
  180. static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
  181. {
  182. void *rem_buf;
  183. off_t rem_off;
  184. size_t rem_size;
  185. int rc, aio_errno;
  186. ssize_t aio_ret, written;
  187. aio_errno = aio_error(cblock);
  188. if (aio_errno == EINPROGRESS)
  189. return 0;
  190. written = aio_ret = aio_return(cblock);
  191. if (aio_ret < 0) {
  192. if (aio_errno != EINTR)
  193. pr_err("failed to write perf data, error: %m\n");
  194. written = 0;
  195. }
  196. rem_size = cblock->aio_nbytes - written;
  197. if (rem_size == 0) {
  198. cblock->aio_fildes = -1;
  199. /*
  200. * md->refcount is incremented in record__aio_pushfn() for
  201. * every aio write request started in record__aio_push() so
  202. * decrement it because the request is now complete.
  203. */
  204. perf_mmap__put(&md->core);
  205. rc = 1;
  206. } else {
  207. /*
  208. * aio write request may require restart with the
  209. * reminder if the kernel didn't write whole
  210. * chunk at once.
  211. */
  212. rem_off = cblock->aio_offset + written;
  213. rem_buf = (void *)(cblock->aio_buf + written);
  214. record__aio_write(cblock, cblock->aio_fildes,
  215. rem_buf, rem_size, rem_off);
  216. rc = 0;
  217. }
  218. return rc;
  219. }
  220. static int record__aio_sync(struct mmap *md, bool sync_all)
  221. {
  222. struct aiocb **aiocb = md->aio.aiocb;
  223. struct aiocb *cblocks = md->aio.cblocks;
  224. struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
  225. int i, do_suspend;
  226. do {
  227. do_suspend = 0;
  228. for (i = 0; i < md->aio.nr_cblocks; ++i) {
  229. if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
  230. if (sync_all)
  231. aiocb[i] = NULL;
  232. else
  233. return i;
  234. } else {
  235. /*
  236. * Started aio write is not complete yet
  237. * so it has to be waited before the
  238. * next allocation.
  239. */
  240. aiocb[i] = &cblocks[i];
  241. do_suspend = 1;
  242. }
  243. }
  244. if (!do_suspend)
  245. return -1;
  246. while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
  247. if (!(errno == EAGAIN || errno == EINTR))
  248. pr_err("failed to sync perf data, error: %m\n");
  249. }
  250. } while (1);
  251. }
  252. struct record_aio {
  253. struct record *rec;
  254. void *data;
  255. size_t size;
  256. };
  257. static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
  258. {
  259. struct record_aio *aio = to;
  260. /*
  261. * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
  262. * to release space in the kernel buffer as fast as possible, calling
  263. * perf_mmap__consume() from perf_mmap__push() function.
  264. *
  265. * That lets the kernel to proceed with storing more profiling data into
  266. * the kernel buffer earlier than other per-cpu kernel buffers are handled.
  267. *
  268. * Coping can be done in two steps in case the chunk of profiling data
  269. * crosses the upper bound of the kernel buffer. In this case we first move
  270. * part of data from map->start till the upper bound and then the reminder
  271. * from the beginning of the kernel buffer till the end of the data chunk.
  272. */
  273. if (record__comp_enabled(aio->rec)) {
  274. size = zstd_compress(aio->rec->session, aio->data + aio->size,
  275. mmap__mmap_len(map) - aio->size,
  276. buf, size);
  277. } else {
  278. memcpy(aio->data + aio->size, buf, size);
  279. }
  280. if (!aio->size) {
  281. /*
  282. * Increment map->refcount to guard map->aio.data[] buffer
  283. * from premature deallocation because map object can be
  284. * released earlier than aio write request started on
  285. * map->aio.data[] buffer is complete.
  286. *
  287. * perf_mmap__put() is done at record__aio_complete()
  288. * after started aio request completion or at record__aio_push()
  289. * if the request failed to start.
  290. */
  291. perf_mmap__get(&map->core);
  292. }
  293. aio->size += size;
  294. return size;
  295. }
  296. static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
  297. {
  298. int ret, idx;
  299. int trace_fd = rec->session->data->file.fd;
  300. struct record_aio aio = { .rec = rec, .size = 0 };
  301. /*
  302. * Call record__aio_sync() to wait till map->aio.data[] buffer
  303. * becomes available after previous aio write operation.
  304. */
  305. idx = record__aio_sync(map, false);
  306. aio.data = map->aio.data[idx];
  307. ret = perf_mmap__push(map, &aio, record__aio_pushfn);
  308. if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
  309. return ret;
  310. rec->samples++;
  311. ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
  312. if (!ret) {
  313. *off += aio.size;
  314. rec->bytes_written += aio.size;
  315. if (switch_output_size(rec))
  316. trigger_hit(&switch_output_trigger);
  317. } else {
  318. /*
  319. * Decrement map->refcount incremented in record__aio_pushfn()
  320. * back if record__aio_write() operation failed to start, otherwise
  321. * map->refcount is decremented in record__aio_complete() after
  322. * aio write operation finishes successfully.
  323. */
  324. perf_mmap__put(&map->core);
  325. }
  326. return ret;
  327. }
  328. static off_t record__aio_get_pos(int trace_fd)
  329. {
  330. return lseek(trace_fd, 0, SEEK_CUR);
  331. }
  332. static void record__aio_set_pos(int trace_fd, off_t pos)
  333. {
  334. lseek(trace_fd, pos, SEEK_SET);
  335. }
  336. static void record__aio_mmap_read_sync(struct record *rec)
  337. {
  338. int i;
  339. struct evlist *evlist = rec->evlist;
  340. struct mmap *maps = evlist->mmap;
  341. if (!record__aio_enabled(rec))
  342. return;
  343. for (i = 0; i < evlist->core.nr_mmaps; i++) {
  344. struct mmap *map = &maps[i];
  345. if (map->core.base)
  346. record__aio_sync(map, true);
  347. }
  348. }
  349. static int nr_cblocks_default = 1;
  350. static int nr_cblocks_max = 4;
  351. static int record__aio_parse(const struct option *opt,
  352. const char *str,
  353. int unset)
  354. {
  355. struct record_opts *opts = (struct record_opts *)opt->value;
  356. if (unset) {
  357. opts->nr_cblocks = 0;
  358. } else {
  359. if (str)
  360. opts->nr_cblocks = strtol(str, NULL, 0);
  361. if (!opts->nr_cblocks)
  362. opts->nr_cblocks = nr_cblocks_default;
  363. }
  364. return 0;
  365. }
  366. #else /* HAVE_AIO_SUPPORT */
  367. static int nr_cblocks_max = 0;
  368. static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
  369. off_t *off __maybe_unused)
  370. {
  371. return -1;
  372. }
  373. static off_t record__aio_get_pos(int trace_fd __maybe_unused)
  374. {
  375. return -1;
  376. }
  377. static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
  378. {
  379. }
  380. static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
  381. {
  382. }
  383. #endif
  384. static int record__aio_enabled(struct record *rec)
  385. {
  386. return rec->opts.nr_cblocks > 0;
  387. }
  388. #define MMAP_FLUSH_DEFAULT 1
  389. static int record__mmap_flush_parse(const struct option *opt,
  390. const char *str,
  391. int unset)
  392. {
  393. int flush_max;
  394. struct record_opts *opts = (struct record_opts *)opt->value;
  395. static struct parse_tag tags[] = {
  396. { .tag = 'B', .mult = 1 },
  397. { .tag = 'K', .mult = 1 << 10 },
  398. { .tag = 'M', .mult = 1 << 20 },
  399. { .tag = 'G', .mult = 1 << 30 },
  400. { .tag = 0 },
  401. };
  402. if (unset)
  403. return 0;
  404. if (str) {
  405. opts->mmap_flush = parse_tag_value(str, tags);
  406. if (opts->mmap_flush == (int)-1)
  407. opts->mmap_flush = strtol(str, NULL, 0);
  408. }
  409. if (!opts->mmap_flush)
  410. opts->mmap_flush = MMAP_FLUSH_DEFAULT;
  411. flush_max = evlist__mmap_size(opts->mmap_pages);
  412. flush_max /= 4;
  413. if (opts->mmap_flush > flush_max)
  414. opts->mmap_flush = flush_max;
  415. return 0;
  416. }
  417. #ifdef HAVE_ZSTD_SUPPORT
  418. static unsigned int comp_level_default = 1;
  419. static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
  420. {
  421. struct record_opts *opts = opt->value;
  422. if (unset) {
  423. opts->comp_level = 0;
  424. } else {
  425. if (str)
  426. opts->comp_level = strtol(str, NULL, 0);
  427. if (!opts->comp_level)
  428. opts->comp_level = comp_level_default;
  429. }
  430. return 0;
  431. }
  432. #endif
  433. static unsigned int comp_level_max = 22;
  434. static int record__comp_enabled(struct record *rec)
  435. {
  436. return rec->opts.comp_level > 0;
  437. }
  438. static int process_synthesized_event(struct perf_tool *tool,
  439. union perf_event *event,
  440. struct perf_sample *sample __maybe_unused,
  441. struct machine *machine __maybe_unused)
  442. {
  443. struct record *rec = container_of(tool, struct record, tool);
  444. return record__write(rec, NULL, event, event->header.size);
  445. }
  446. static int process_locked_synthesized_event(struct perf_tool *tool,
  447. union perf_event *event,
  448. struct perf_sample *sample __maybe_unused,
  449. struct machine *machine __maybe_unused)
  450. {
  451. static pthread_mutex_t synth_lock = PTHREAD_MUTEX_INITIALIZER;
  452. int ret;
  453. pthread_mutex_lock(&synth_lock);
  454. ret = process_synthesized_event(tool, event, sample, machine);
  455. pthread_mutex_unlock(&synth_lock);
  456. return ret;
  457. }
  458. static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
  459. {
  460. struct record *rec = to;
  461. if (record__comp_enabled(rec)) {
  462. size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size);
  463. bf = map->data;
  464. }
  465. rec->samples++;
  466. return record__write(rec, map, bf, size);
  467. }
  468. static volatile int signr = -1;
  469. static volatile int child_finished;
  470. #ifdef HAVE_EVENTFD_SUPPORT
  471. static int done_fd = -1;
  472. #endif
  473. static void sig_handler(int sig)
  474. {
  475. if (sig == SIGCHLD)
  476. child_finished = 1;
  477. else
  478. signr = sig;
  479. done = 1;
  480. #ifdef HAVE_EVENTFD_SUPPORT
  481. {
  482. u64 tmp = 1;
  483. /*
  484. * It is possible for this signal handler to run after done is checked
  485. * in the main loop, but before the perf counter fds are polled. If this
  486. * happens, the poll() will continue to wait even though done is set,
  487. * and will only break out if either another signal is received, or the
  488. * counters are ready for read. To ensure the poll() doesn't sleep when
  489. * done is set, use an eventfd (done_fd) to wake up the poll().
  490. */
  491. if (write(done_fd, &tmp, sizeof(tmp)) < 0)
  492. pr_err("failed to signal wakeup fd, error: %m\n");
  493. }
  494. #endif // HAVE_EVENTFD_SUPPORT
  495. }
  496. static void sigsegv_handler(int sig)
  497. {
  498. perf_hooks__recover();
  499. sighandler_dump_stack(sig);
  500. }
  501. static void record__sig_exit(void)
  502. {
  503. if (signr == -1)
  504. return;
  505. signal(signr, SIG_DFL);
  506. raise(signr);
  507. }
  508. #ifdef HAVE_AUXTRACE_SUPPORT
  509. static int record__process_auxtrace(struct perf_tool *tool,
  510. struct mmap *map,
  511. union perf_event *event, void *data1,
  512. size_t len1, void *data2, size_t len2)
  513. {
  514. struct record *rec = container_of(tool, struct record, tool);
  515. struct perf_data *data = &rec->data;
  516. size_t padding;
  517. u8 pad[8] = {0};
  518. if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
  519. off_t file_offset;
  520. int fd = perf_data__fd(data);
  521. int err;
  522. file_offset = lseek(fd, 0, SEEK_CUR);
  523. if (file_offset == -1)
  524. return -1;
  525. err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
  526. event, file_offset);
  527. if (err)
  528. return err;
  529. }
  530. /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
  531. padding = (len1 + len2) & 7;
  532. if (padding)
  533. padding = 8 - padding;
  534. record__write(rec, map, event, event->header.size);
  535. record__write(rec, map, data1, len1);
  536. if (len2)
  537. record__write(rec, map, data2, len2);
  538. record__write(rec, map, &pad, padding);
  539. return 0;
  540. }
  541. static int record__auxtrace_mmap_read(struct record *rec,
  542. struct mmap *map)
  543. {
  544. int ret;
  545. ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
  546. record__process_auxtrace);
  547. if (ret < 0)
  548. return ret;
  549. if (ret)
  550. rec->samples++;
  551. return 0;
  552. }
  553. static int record__auxtrace_mmap_read_snapshot(struct record *rec,
  554. struct mmap *map)
  555. {
  556. int ret;
  557. ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
  558. record__process_auxtrace,
  559. rec->opts.auxtrace_snapshot_size);
  560. if (ret < 0)
  561. return ret;
  562. if (ret)
  563. rec->samples++;
  564. return 0;
  565. }
  566. static int record__auxtrace_read_snapshot_all(struct record *rec)
  567. {
  568. int i;
  569. int rc = 0;
  570. for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
  571. struct mmap *map = &rec->evlist->mmap[i];
  572. if (!map->auxtrace_mmap.base)
  573. continue;
  574. if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
  575. rc = -1;
  576. goto out;
  577. }
  578. }
  579. out:
  580. return rc;
  581. }
  582. static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
  583. {
  584. pr_debug("Recording AUX area tracing snapshot\n");
  585. if (record__auxtrace_read_snapshot_all(rec) < 0) {
  586. trigger_error(&auxtrace_snapshot_trigger);
  587. } else {
  588. if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
  589. trigger_error(&auxtrace_snapshot_trigger);
  590. else
  591. trigger_ready(&auxtrace_snapshot_trigger);
  592. }
  593. }
  594. static int record__auxtrace_snapshot_exit(struct record *rec)
  595. {
  596. if (trigger_is_error(&auxtrace_snapshot_trigger))
  597. return 0;
  598. if (!auxtrace_record__snapshot_started &&
  599. auxtrace_record__snapshot_start(rec->itr))
  600. return -1;
  601. record__read_auxtrace_snapshot(rec, true);
  602. if (trigger_is_error(&auxtrace_snapshot_trigger))
  603. return -1;
  604. return 0;
  605. }
  606. static int record__auxtrace_init(struct record *rec)
  607. {
  608. int err;
  609. if (!rec->itr) {
  610. rec->itr = auxtrace_record__init(rec->evlist, &err);
  611. if (err)
  612. return err;
  613. }
  614. err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
  615. rec->opts.auxtrace_snapshot_opts);
  616. if (err)
  617. return err;
  618. err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
  619. rec->opts.auxtrace_sample_opts);
  620. if (err)
  621. return err;
  622. return auxtrace_parse_filters(rec->evlist);
  623. }
  624. #else
  625. static inline
  626. int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
  627. struct mmap *map __maybe_unused)
  628. {
  629. return 0;
  630. }
  631. static inline
  632. void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
  633. bool on_exit __maybe_unused)
  634. {
  635. }
  636. static inline
  637. int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
  638. {
  639. return 0;
  640. }
  641. static inline
  642. int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
  643. {
  644. return 0;
  645. }
  646. static int record__auxtrace_init(struct record *rec __maybe_unused)
  647. {
  648. return 0;
  649. }
  650. #endif
  651. static int record__config_text_poke(struct evlist *evlist)
  652. {
  653. struct evsel *evsel;
  654. int err;
  655. /* Nothing to do if text poke is already configured */
  656. evlist__for_each_entry(evlist, evsel) {
  657. if (evsel->core.attr.text_poke)
  658. return 0;
  659. }
  660. err = parse_events(evlist, "dummy:u", NULL);
  661. if (err)
  662. return err;
  663. evsel = evlist__last(evlist);
  664. evsel->core.attr.freq = 0;
  665. evsel->core.attr.sample_period = 1;
  666. evsel->core.attr.text_poke = 1;
  667. evsel->core.attr.ksymbol = 1;
  668. evsel->core.system_wide = true;
  669. evsel->no_aux_samples = true;
  670. evsel->immediate = true;
  671. /* Text poke must be collected on all CPUs */
  672. perf_cpu_map__put(evsel->core.own_cpus);
  673. evsel->core.own_cpus = perf_cpu_map__new(NULL);
  674. perf_cpu_map__put(evsel->core.cpus);
  675. evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
  676. evsel__set_sample_bit(evsel, TIME);
  677. return 0;
  678. }
  679. static bool record__kcore_readable(struct machine *machine)
  680. {
  681. char kcore[PATH_MAX];
  682. int fd;
  683. scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
  684. fd = open(kcore, O_RDONLY);
  685. if (fd < 0)
  686. return false;
  687. close(fd);
  688. return true;
  689. }
  690. static int record__kcore_copy(struct machine *machine, struct perf_data *data)
  691. {
  692. char from_dir[PATH_MAX];
  693. char kcore_dir[PATH_MAX];
  694. int ret;
  695. snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
  696. ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
  697. if (ret)
  698. return ret;
  699. return kcore_copy(from_dir, kcore_dir);
  700. }
  701. static int record__mmap_evlist(struct record *rec,
  702. struct evlist *evlist)
  703. {
  704. struct record_opts *opts = &rec->opts;
  705. bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
  706. opts->auxtrace_sample_mode;
  707. char msg[512];
  708. if (opts->affinity != PERF_AFFINITY_SYS)
  709. cpu__setup_cpunode_map();
  710. if (evlist__mmap_ex(evlist, opts->mmap_pages,
  711. opts->auxtrace_mmap_pages,
  712. auxtrace_overwrite,
  713. opts->nr_cblocks, opts->affinity,
  714. opts->mmap_flush, opts->comp_level) < 0) {
  715. if (errno == EPERM) {
  716. pr_err("Permission error mapping pages.\n"
  717. "Consider increasing "
  718. "/proc/sys/kernel/perf_event_mlock_kb,\n"
  719. "or try again with a smaller value of -m/--mmap_pages.\n"
  720. "(current value: %u,%u)\n",
  721. opts->mmap_pages, opts->auxtrace_mmap_pages);
  722. return -errno;
  723. } else {
  724. pr_err("failed to mmap with %d (%s)\n", errno,
  725. str_error_r(errno, msg, sizeof(msg)));
  726. if (errno)
  727. return -errno;
  728. else
  729. return -EINVAL;
  730. }
  731. }
  732. return 0;
  733. }
  734. static int record__mmap(struct record *rec)
  735. {
  736. return record__mmap_evlist(rec, rec->evlist);
  737. }
  738. static int record__open(struct record *rec)
  739. {
  740. char msg[BUFSIZ];
  741. struct evsel *pos;
  742. struct evlist *evlist = rec->evlist;
  743. struct perf_session *session = rec->session;
  744. struct record_opts *opts = &rec->opts;
  745. int rc = 0;
  746. /*
  747. * For initial_delay or system wide, we need to add a dummy event so
  748. * that we can track PERF_RECORD_MMAP to cover the delay of waiting or
  749. * event synthesis.
  750. */
  751. if (opts->initial_delay || target__has_cpu(&opts->target)) {
  752. pos = perf_evlist__get_tracking_event(evlist);
  753. if (!evsel__is_dummy_event(pos)) {
  754. /* Set up dummy event. */
  755. if (evlist__add_dummy(evlist))
  756. return -ENOMEM;
  757. pos = evlist__last(evlist);
  758. perf_evlist__set_tracking_event(evlist, pos);
  759. }
  760. /*
  761. * Enable the dummy event when the process is forked for
  762. * initial_delay, immediately for system wide.
  763. */
  764. if (opts->initial_delay && !pos->immediate)
  765. pos->core.attr.enable_on_exec = 1;
  766. else
  767. pos->immediate = 1;
  768. }
  769. perf_evlist__config(evlist, opts, &callchain_param);
  770. evlist__for_each_entry(evlist, pos) {
  771. try_again:
  772. if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
  773. if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
  774. if (verbose > 0)
  775. ui__warning("%s\n", msg);
  776. goto try_again;
  777. }
  778. if ((errno == EINVAL || errno == EBADF) &&
  779. pos->leader != pos &&
  780. pos->weak_group) {
  781. pos = perf_evlist__reset_weak_group(evlist, pos, true);
  782. goto try_again;
  783. }
  784. rc = -errno;
  785. evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
  786. ui__error("%s\n", msg);
  787. goto out;
  788. }
  789. pos->supported = true;
  790. }
  791. if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) {
  792. pr_warning(
  793. "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
  794. "check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
  795. "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
  796. "file is not found in the buildid cache or in the vmlinux path.\n\n"
  797. "Samples in kernel modules won't be resolved at all.\n\n"
  798. "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
  799. "even with a suitable vmlinux or kallsyms file.\n\n");
  800. }
  801. if (perf_evlist__apply_filters(evlist, &pos)) {
  802. pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
  803. pos->filter, evsel__name(pos), errno,
  804. str_error_r(errno, msg, sizeof(msg)));
  805. rc = -1;
  806. goto out;
  807. }
  808. rc = record__mmap(rec);
  809. if (rc)
  810. goto out;
  811. session->evlist = evlist;
  812. perf_session__set_id_hdr_size(session);
  813. out:
  814. return rc;
  815. }
  816. static int process_sample_event(struct perf_tool *tool,
  817. union perf_event *event,
  818. struct perf_sample *sample,
  819. struct evsel *evsel,
  820. struct machine *machine)
  821. {
  822. struct record *rec = container_of(tool, struct record, tool);
  823. if (rec->evlist->first_sample_time == 0)
  824. rec->evlist->first_sample_time = sample->time;
  825. rec->evlist->last_sample_time = sample->time;
  826. if (rec->buildid_all)
  827. return 0;
  828. rec->samples++;
  829. return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
  830. }
  831. static int process_buildids(struct record *rec)
  832. {
  833. struct perf_session *session = rec->session;
  834. if (perf_data__size(&rec->data) == 0)
  835. return 0;
  836. /*
  837. * During this process, it'll load kernel map and replace the
  838. * dso->long_name to a real pathname it found. In this case
  839. * we prefer the vmlinux path like
  840. * /lib/modules/3.16.4/build/vmlinux
  841. *
  842. * rather than build-id path (in debug directory).
  843. * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
  844. */
  845. symbol_conf.ignore_vmlinux_buildid = true;
  846. /*
  847. * If --buildid-all is given, it marks all DSO regardless of hits,
  848. * so no need to process samples. But if timestamp_boundary is enabled,
  849. * it still needs to walk on all samples to get the timestamps of
  850. * first/last samples.
  851. */
  852. if (rec->buildid_all && !rec->timestamp_boundary)
  853. rec->tool.sample = NULL;
  854. return perf_session__process_events(session);
  855. }
  856. static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
  857. {
  858. int err;
  859. struct perf_tool *tool = data;
  860. /*
  861. *As for guest kernel when processing subcommand record&report,
  862. *we arrange module mmap prior to guest kernel mmap and trigger
  863. *a preload dso because default guest module symbols are loaded
  864. *from guest kallsyms instead of /lib/modules/XXX/XXX. This
  865. *method is used to avoid symbol missing when the first addr is
  866. *in module instead of in guest kernel.
  867. */
  868. err = perf_event__synthesize_modules(tool, process_synthesized_event,
  869. machine);
  870. if (err < 0)
  871. pr_err("Couldn't record guest kernel [%d]'s reference"
  872. " relocation symbol.\n", machine->pid);
  873. /*
  874. * We use _stext for guest kernel because guest kernel's /proc/kallsyms
  875. * have no _text sometimes.
  876. */
  877. err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
  878. machine);
  879. if (err < 0)
  880. pr_err("Couldn't record guest kernel [%d]'s reference"
  881. " relocation symbol.\n", machine->pid);
  882. }
  883. static struct perf_event_header finished_round_event = {
  884. .size = sizeof(struct perf_event_header),
  885. .type = PERF_RECORD_FINISHED_ROUND,
  886. };
  887. static void record__adjust_affinity(struct record *rec, struct mmap *map)
  888. {
  889. if (rec->opts.affinity != PERF_AFFINITY_SYS &&
  890. !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits,
  891. rec->affinity_mask.nbits)) {
  892. bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits);
  893. bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits,
  894. map->affinity_mask.bits, rec->affinity_mask.nbits);
  895. sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask),
  896. (cpu_set_t *)rec->affinity_mask.bits);
  897. if (verbose == 2)
  898. mmap_cpu_mask__scnprintf(&rec->affinity_mask, "thread");
  899. }
  900. }
  901. static size_t process_comp_header(void *record, size_t increment)
  902. {
  903. struct perf_record_compressed *event = record;
  904. size_t size = sizeof(*event);
  905. if (increment) {
  906. event->header.size += increment;
  907. return increment;
  908. }
  909. event->header.type = PERF_RECORD_COMPRESSED;
  910. event->header.size = size;
  911. return size;
  912. }
  913. static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
  914. void *src, size_t src_size)
  915. {
  916. size_t compressed;
  917. size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
  918. compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
  919. max_record_size, process_comp_header);
  920. session->bytes_transferred += src_size;
  921. session->bytes_compressed += compressed;
  922. return compressed;
  923. }
  924. static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
  925. bool overwrite, bool synch)
  926. {
  927. u64 bytes_written = rec->bytes_written;
  928. int i;
  929. int rc = 0;
  930. struct mmap *maps;
  931. int trace_fd = rec->data.file.fd;
  932. off_t off = 0;
  933. if (!evlist)
  934. return 0;
  935. maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
  936. if (!maps)
  937. return 0;
  938. if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
  939. return 0;
  940. if (record__aio_enabled(rec))
  941. off = record__aio_get_pos(trace_fd);
  942. for (i = 0; i < evlist->core.nr_mmaps; i++) {
  943. u64 flush = 0;
  944. struct mmap *map = &maps[i];
  945. if (map->core.base) {
  946. record__adjust_affinity(rec, map);
  947. if (synch) {
  948. flush = map->core.flush;
  949. map->core.flush = 1;
  950. }
  951. if (!record__aio_enabled(rec)) {
  952. if (perf_mmap__push(map, rec, record__pushfn) < 0) {
  953. if (synch)
  954. map->core.flush = flush;
  955. rc = -1;
  956. goto out;
  957. }
  958. } else {
  959. if (record__aio_push(rec, map, &off) < 0) {
  960. record__aio_set_pos(trace_fd, off);
  961. if (synch)
  962. map->core.flush = flush;
  963. rc = -1;
  964. goto out;
  965. }
  966. }
  967. if (synch)
  968. map->core.flush = flush;
  969. }
  970. if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
  971. !rec->opts.auxtrace_sample_mode &&
  972. record__auxtrace_mmap_read(rec, map) != 0) {
  973. rc = -1;
  974. goto out;
  975. }
  976. }
  977. if (record__aio_enabled(rec))
  978. record__aio_set_pos(trace_fd, off);
  979. /*
  980. * Mark the round finished in case we wrote
  981. * at least one event.
  982. */
  983. if (bytes_written != rec->bytes_written)
  984. rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
  985. if (overwrite)
  986. perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
  987. out:
  988. return rc;
  989. }
  990. static int record__mmap_read_all(struct record *rec, bool synch)
  991. {
  992. int err;
  993. err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
  994. if (err)
  995. return err;
  996. return record__mmap_read_evlist(rec, rec->evlist, true, synch);
  997. }
  998. static void record__init_features(struct record *rec)
  999. {
  1000. struct perf_session *session = rec->session;
  1001. int feat;
  1002. for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
  1003. perf_header__set_feat(&session->header, feat);
  1004. if (rec->no_buildid)
  1005. perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
  1006. if (!have_tracepoints(&rec->evlist->core.entries))
  1007. perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
  1008. if (!rec->opts.branch_stack)
  1009. perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
  1010. if (!rec->opts.full_auxtrace)
  1011. perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
  1012. if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
  1013. perf_header__clear_feat(&session->header, HEADER_CLOCKID);
  1014. if (!rec->opts.use_clockid)
  1015. perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
  1016. perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
  1017. if (!record__comp_enabled(rec))
  1018. perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
  1019. perf_header__clear_feat(&session->header, HEADER_STAT);
  1020. }
  1021. static void
  1022. record__finish_output(struct record *rec)
  1023. {
  1024. struct perf_data *data = &rec->data;
  1025. int fd = perf_data__fd(data);
  1026. if (data->is_pipe)
  1027. return;
  1028. rec->session->header.data_size += rec->bytes_written;
  1029. data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
  1030. if (!rec->no_buildid) {
  1031. process_buildids(rec);
  1032. if (rec->buildid_all)
  1033. dsos__hit_all(rec->session);
  1034. }
  1035. perf_session__write_header(rec->session, rec->evlist, fd, true);
  1036. return;
  1037. }
  1038. static int record__synthesize_workload(struct record *rec, bool tail)
  1039. {
  1040. int err;
  1041. struct perf_thread_map *thread_map;
  1042. if (rec->opts.tail_synthesize != tail)
  1043. return 0;
  1044. thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
  1045. if (thread_map == NULL)
  1046. return -1;
  1047. err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
  1048. process_synthesized_event,
  1049. &rec->session->machines.host,
  1050. rec->opts.sample_address);
  1051. perf_thread_map__put(thread_map);
  1052. return err;
  1053. }
  1054. static int record__synthesize(struct record *rec, bool tail);
  1055. static int
  1056. record__switch_output(struct record *rec, bool at_exit)
  1057. {
  1058. struct perf_data *data = &rec->data;
  1059. int fd, err;
  1060. char *new_filename;
  1061. /* Same Size: "2015122520103046"*/
  1062. char timestamp[] = "InvalidTimestamp";
  1063. record__aio_mmap_read_sync(rec);
  1064. record__synthesize(rec, true);
  1065. if (target__none(&rec->opts.target))
  1066. record__synthesize_workload(rec, true);
  1067. rec->samples = 0;
  1068. record__finish_output(rec);
  1069. err = fetch_current_timestamp(timestamp, sizeof(timestamp));
  1070. if (err) {
  1071. pr_err("Failed to get current timestamp\n");
  1072. return -EINVAL;
  1073. }
  1074. fd = perf_data__switch(data, timestamp,
  1075. rec->session->header.data_offset,
  1076. at_exit, &new_filename);
  1077. if (fd >= 0 && !at_exit) {
  1078. rec->bytes_written = 0;
  1079. rec->session->header.data_size = 0;
  1080. }
  1081. if (!quiet)
  1082. fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
  1083. data->path, timestamp);
  1084. if (rec->switch_output.num_files) {
  1085. int n = rec->switch_output.cur_file + 1;
  1086. if (n >= rec->switch_output.num_files)
  1087. n = 0;
  1088. rec->switch_output.cur_file = n;
  1089. if (rec->switch_output.filenames[n]) {
  1090. remove(rec->switch_output.filenames[n]);
  1091. zfree(&rec->switch_output.filenames[n]);
  1092. }
  1093. rec->switch_output.filenames[n] = new_filename;
  1094. } else {
  1095. free(new_filename);
  1096. }
  1097. /* Output tracking events */
  1098. if (!at_exit) {
  1099. record__synthesize(rec, false);
  1100. /*
  1101. * In 'perf record --switch-output' without -a,
  1102. * record__synthesize() in record__switch_output() won't
  1103. * generate tracking events because there's no thread_map
  1104. * in evlist. Which causes newly created perf.data doesn't
  1105. * contain map and comm information.
  1106. * Create a fake thread_map and directly call
  1107. * perf_event__synthesize_thread_map() for those events.
  1108. */
  1109. if (target__none(&rec->opts.target))
  1110. record__synthesize_workload(rec, false);
  1111. }
  1112. return fd;
  1113. }
  1114. static volatile int workload_exec_errno;
  1115. /*
  1116. * perf_evlist__prepare_workload will send a SIGUSR1
  1117. * if the fork fails, since we asked by setting its
  1118. * want_signal to true.
  1119. */
  1120. static void workload_exec_failed_signal(int signo __maybe_unused,
  1121. siginfo_t *info,
  1122. void *ucontext __maybe_unused)
  1123. {
  1124. workload_exec_errno = info->si_value.sival_int;
  1125. done = 1;
  1126. child_finished = 1;
  1127. }
  1128. static void snapshot_sig_handler(int sig);
  1129. static void alarm_sig_handler(int sig);
  1130. static const struct perf_event_mmap_page *
  1131. perf_evlist__pick_pc(struct evlist *evlist)
  1132. {
  1133. if (evlist) {
  1134. if (evlist->mmap && evlist->mmap[0].core.base)
  1135. return evlist->mmap[0].core.base;
  1136. if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
  1137. return evlist->overwrite_mmap[0].core.base;
  1138. }
  1139. return NULL;
  1140. }
  1141. static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
  1142. {
  1143. const struct perf_event_mmap_page *pc;
  1144. pc = perf_evlist__pick_pc(rec->evlist);
  1145. if (pc)
  1146. return pc;
  1147. return NULL;
  1148. }
  1149. static int record__synthesize(struct record *rec, bool tail)
  1150. {
  1151. struct perf_session *session = rec->session;
  1152. struct machine *machine = &session->machines.host;
  1153. struct perf_data *data = &rec->data;
  1154. struct record_opts *opts = &rec->opts;
  1155. struct perf_tool *tool = &rec->tool;
  1156. int fd = perf_data__fd(data);
  1157. int err = 0;
  1158. event_op f = process_synthesized_event;
  1159. if (rec->opts.tail_synthesize != tail)
  1160. return 0;
  1161. if (data->is_pipe) {
  1162. /*
  1163. * We need to synthesize events first, because some
  1164. * features works on top of them (on report side).
  1165. */
  1166. err = perf_event__synthesize_attrs(tool, rec->evlist,
  1167. process_synthesized_event);
  1168. if (err < 0) {
  1169. pr_err("Couldn't synthesize attrs.\n");
  1170. goto out;
  1171. }
  1172. err = perf_event__synthesize_features(tool, session, rec->evlist,
  1173. process_synthesized_event);
  1174. if (err < 0) {
  1175. pr_err("Couldn't synthesize features.\n");
  1176. return err;
  1177. }
  1178. if (have_tracepoints(&rec->evlist->core.entries)) {
  1179. /*
  1180. * FIXME err <= 0 here actually means that
  1181. * there were no tracepoints so its not really
  1182. * an error, just that we don't need to
  1183. * synthesize anything. We really have to
  1184. * return this more properly and also
  1185. * propagate errors that now are calling die()
  1186. */
  1187. err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
  1188. process_synthesized_event);
  1189. if (err <= 0) {
  1190. pr_err("Couldn't record tracing data.\n");
  1191. goto out;
  1192. }
  1193. rec->bytes_written += err;
  1194. }
  1195. }
  1196. err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
  1197. process_synthesized_event, machine);
  1198. if (err)
  1199. goto out;
  1200. /* Synthesize id_index before auxtrace_info */
  1201. if (rec->opts.auxtrace_sample_mode) {
  1202. err = perf_event__synthesize_id_index(tool,
  1203. process_synthesized_event,
  1204. session->evlist, machine);
  1205. if (err)
  1206. goto out;
  1207. }
  1208. if (rec->opts.full_auxtrace) {
  1209. err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
  1210. session, process_synthesized_event);
  1211. if (err)
  1212. goto out;
  1213. }
  1214. if (!perf_evlist__exclude_kernel(rec->evlist)) {
  1215. err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
  1216. machine);
  1217. WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
  1218. "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
  1219. "Check /proc/kallsyms permission or run as root.\n");
  1220. err = perf_event__synthesize_modules(tool, process_synthesized_event,
  1221. machine);
  1222. WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
  1223. "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
  1224. "Check /proc/modules permission or run as root.\n");
  1225. }
  1226. if (perf_guest) {
  1227. machines__process_guests(&session->machines,
  1228. perf_event__synthesize_guest_os, tool);
  1229. }
  1230. err = perf_event__synthesize_extra_attr(&rec->tool,
  1231. rec->evlist,
  1232. process_synthesized_event,
  1233. data->is_pipe);
  1234. if (err)
  1235. goto out;
  1236. err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
  1237. process_synthesized_event,
  1238. NULL);
  1239. if (err < 0) {
  1240. pr_err("Couldn't synthesize thread map.\n");
  1241. return err;
  1242. }
  1243. err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
  1244. process_synthesized_event, NULL);
  1245. if (err < 0) {
  1246. pr_err("Couldn't synthesize cpu map.\n");
  1247. return err;
  1248. }
  1249. err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
  1250. machine, opts);
  1251. if (err < 0)
  1252. pr_warning("Couldn't synthesize bpf events.\n");
  1253. err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
  1254. machine);
  1255. if (err < 0)
  1256. pr_warning("Couldn't synthesize cgroup events.\n");
  1257. if (rec->opts.nr_threads_synthesize > 1) {
  1258. perf_set_multithreaded();
  1259. f = process_locked_synthesized_event;
  1260. }
  1261. err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
  1262. f, opts->sample_address,
  1263. rec->opts.nr_threads_synthesize);
  1264. if (rec->opts.nr_threads_synthesize > 1)
  1265. perf_set_singlethreaded();
  1266. out:
  1267. return err;
  1268. }
  1269. static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
  1270. {
  1271. struct record *rec = data;
  1272. pthread_kill(rec->thread_id, SIGUSR2);
  1273. return 0;
  1274. }
  1275. static int record__setup_sb_evlist(struct record *rec)
  1276. {
  1277. struct record_opts *opts = &rec->opts;
  1278. if (rec->sb_evlist != NULL) {
  1279. /*
  1280. * We get here if --switch-output-event populated the
  1281. * sb_evlist, so associate a callback that will send a SIGUSR2
  1282. * to the main thread.
  1283. */
  1284. evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
  1285. rec->thread_id = pthread_self();
  1286. }
  1287. #ifdef HAVE_LIBBPF_SUPPORT
  1288. if (!opts->no_bpf_event) {
  1289. if (rec->sb_evlist == NULL) {
  1290. rec->sb_evlist = evlist__new();
  1291. if (rec->sb_evlist == NULL) {
  1292. pr_err("Couldn't create side band evlist.\n.");
  1293. return -1;
  1294. }
  1295. }
  1296. if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
  1297. pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
  1298. return -1;
  1299. }
  1300. }
  1301. #endif
  1302. if (perf_evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
  1303. pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
  1304. opts->no_bpf_event = true;
  1305. }
  1306. return 0;
  1307. }
  1308. static int record__init_clock(struct record *rec)
  1309. {
  1310. struct perf_session *session = rec->session;
  1311. struct timespec ref_clockid;
  1312. struct timeval ref_tod;
  1313. u64 ref;
  1314. if (!rec->opts.use_clockid)
  1315. return 0;
  1316. if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
  1317. session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
  1318. session->header.env.clock.clockid = rec->opts.clockid;
  1319. if (gettimeofday(&ref_tod, NULL) != 0) {
  1320. pr_err("gettimeofday failed, cannot set reference time.\n");
  1321. return -1;
  1322. }
  1323. if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
  1324. pr_err("clock_gettime failed, cannot set reference time.\n");
  1325. return -1;
  1326. }
  1327. ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
  1328. (u64) ref_tod.tv_usec * NSEC_PER_USEC;
  1329. session->header.env.clock.tod_ns = ref;
  1330. ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
  1331. (u64) ref_clockid.tv_nsec;
  1332. session->header.env.clock.clockid_ns = ref;
  1333. return 0;
  1334. }
  1335. static void hit_auxtrace_snapshot_trigger(struct record *rec)
  1336. {
  1337. if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
  1338. trigger_hit(&auxtrace_snapshot_trigger);
  1339. auxtrace_record__snapshot_started = 1;
  1340. if (auxtrace_record__snapshot_start(rec->itr))
  1341. trigger_error(&auxtrace_snapshot_trigger);
  1342. }
  1343. }
  1344. static int __cmd_record(struct record *rec, int argc, const char **argv)
  1345. {
  1346. int err;
  1347. int status = 0;
  1348. unsigned long waking = 0;
  1349. const bool forks = argc > 0;
  1350. struct perf_tool *tool = &rec->tool;
  1351. struct record_opts *opts = &rec->opts;
  1352. struct perf_data *data = &rec->data;
  1353. struct perf_session *session;
  1354. bool disabled = false, draining = false;
  1355. int fd;
  1356. float ratio = 0;
  1357. enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
  1358. atexit(record__sig_exit);
  1359. signal(SIGCHLD, sig_handler);
  1360. signal(SIGINT, sig_handler);
  1361. signal(SIGTERM, sig_handler);
  1362. signal(SIGSEGV, sigsegv_handler);
  1363. if (rec->opts.record_namespaces)
  1364. tool->namespace_events = true;
  1365. if (rec->opts.record_cgroup) {
  1366. #ifdef HAVE_FILE_HANDLE
  1367. tool->cgroup_events = true;
  1368. #else
  1369. pr_err("cgroup tracking is not supported\n");
  1370. return -1;
  1371. #endif
  1372. }
  1373. if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
  1374. signal(SIGUSR2, snapshot_sig_handler);
  1375. if (rec->opts.auxtrace_snapshot_mode)
  1376. trigger_on(&auxtrace_snapshot_trigger);
  1377. if (rec->switch_output.enabled)
  1378. trigger_on(&switch_output_trigger);
  1379. } else {
  1380. signal(SIGUSR2, SIG_IGN);
  1381. }
  1382. session = perf_session__new(data, false, tool);
  1383. if (IS_ERR(session)) {
  1384. pr_err("Perf session creation failed.\n");
  1385. return PTR_ERR(session);
  1386. }
  1387. fd = perf_data__fd(data);
  1388. rec->session = session;
  1389. if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
  1390. pr_err("Compression initialization failed.\n");
  1391. return -1;
  1392. }
  1393. #ifdef HAVE_EVENTFD_SUPPORT
  1394. done_fd = eventfd(0, EFD_NONBLOCK);
  1395. if (done_fd < 0) {
  1396. pr_err("Failed to create wakeup eventfd, error: %m\n");
  1397. status = -1;
  1398. goto out_delete_session;
  1399. }
  1400. err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
  1401. if (err < 0) {
  1402. pr_err("Failed to add wakeup eventfd to poll list\n");
  1403. status = err;
  1404. goto out_delete_session;
  1405. }
  1406. #endif // HAVE_EVENTFD_SUPPORT
  1407. session->header.env.comp_type = PERF_COMP_ZSTD;
  1408. session->header.env.comp_level = rec->opts.comp_level;
  1409. if (rec->opts.kcore &&
  1410. !record__kcore_readable(&session->machines.host)) {
  1411. pr_err("ERROR: kcore is not readable.\n");
  1412. return -1;
  1413. }
  1414. if (record__init_clock(rec))
  1415. return -1;
  1416. record__init_features(rec);
  1417. if (forks) {
  1418. err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
  1419. argv, data->is_pipe,
  1420. workload_exec_failed_signal);
  1421. if (err < 0) {
  1422. pr_err("Couldn't run the workload!\n");
  1423. status = err;
  1424. goto out_delete_session;
  1425. }
  1426. }
  1427. /*
  1428. * If we have just single event and are sending data
  1429. * through pipe, we need to force the ids allocation,
  1430. * because we synthesize event name through the pipe
  1431. * and need the id for that.
  1432. */
  1433. if (data->is_pipe && rec->evlist->core.nr_entries == 1)
  1434. rec->opts.sample_id = true;
  1435. if (record__open(rec) != 0) {
  1436. err = -1;
  1437. goto out_child;
  1438. }
  1439. session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
  1440. if (rec->opts.kcore) {
  1441. err = record__kcore_copy(&session->machines.host, data);
  1442. if (err) {
  1443. pr_err("ERROR: Failed to copy kcore\n");
  1444. goto out_child;
  1445. }
  1446. }
  1447. err = bpf__apply_obj_config();
  1448. if (err) {
  1449. char errbuf[BUFSIZ];
  1450. bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
  1451. pr_err("ERROR: Apply config to BPF failed: %s\n",
  1452. errbuf);
  1453. goto out_child;
  1454. }
  1455. /*
  1456. * Normally perf_session__new would do this, but it doesn't have the
  1457. * evlist.
  1458. */
  1459. if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
  1460. pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
  1461. rec->tool.ordered_events = false;
  1462. }
  1463. if (!rec->evlist->nr_groups)
  1464. perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
  1465. if (data->is_pipe) {
  1466. err = perf_header__write_pipe(fd);
  1467. if (err < 0)
  1468. goto out_child;
  1469. } else {
  1470. err = perf_session__write_header(session, rec->evlist, fd, false);
  1471. if (err < 0)
  1472. goto out_child;
  1473. }
  1474. err = -1;
  1475. if (!rec->no_buildid
  1476. && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
  1477. pr_err("Couldn't generate buildids. "
  1478. "Use --no-buildid to profile anyway.\n");
  1479. goto out_child;
  1480. }
  1481. err = record__setup_sb_evlist(rec);
  1482. if (err)
  1483. goto out_child;
  1484. err = record__synthesize(rec, false);
  1485. if (err < 0)
  1486. goto out_child;
  1487. if (rec->realtime_prio) {
  1488. struct sched_param param;
  1489. param.sched_priority = rec->realtime_prio;
  1490. if (sched_setscheduler(0, SCHED_FIFO, &param)) {
  1491. pr_err("Could not set realtime priority.\n");
  1492. err = -1;
  1493. goto out_child;
  1494. }
  1495. }
  1496. /*
  1497. * When perf is starting the traced process, all the events
  1498. * (apart from group members) have enable_on_exec=1 set,
  1499. * so don't spoil it by prematurely enabling them.
  1500. */
  1501. if (!target__none(&opts->target) && !opts->initial_delay)
  1502. evlist__enable(rec->evlist);
  1503. /*
  1504. * Let the child rip
  1505. */
  1506. if (forks) {
  1507. struct machine *machine = &session->machines.host;
  1508. union perf_event *event;
  1509. pid_t tgid;
  1510. event = malloc(sizeof(event->comm) + machine->id_hdr_size);
  1511. if (event == NULL) {
  1512. err = -ENOMEM;
  1513. goto out_child;
  1514. }
  1515. /*
  1516. * Some H/W events are generated before COMM event
  1517. * which is emitted during exec(), so perf script
  1518. * cannot see a correct process name for those events.
  1519. * Synthesize COMM event to prevent it.
  1520. */
  1521. tgid = perf_event__synthesize_comm(tool, event,
  1522. rec->evlist->workload.pid,
  1523. process_synthesized_event,
  1524. machine);
  1525. free(event);
  1526. if (tgid == -1)
  1527. goto out_child;
  1528. event = malloc(sizeof(event->namespaces) +
  1529. (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
  1530. machine->id_hdr_size);
  1531. if (event == NULL) {
  1532. err = -ENOMEM;
  1533. goto out_child;
  1534. }
  1535. /*
  1536. * Synthesize NAMESPACES event for the command specified.
  1537. */
  1538. perf_event__synthesize_namespaces(tool, event,
  1539. rec->evlist->workload.pid,
  1540. tgid, process_synthesized_event,
  1541. machine);
  1542. free(event);
  1543. perf_evlist__start_workload(rec->evlist);
  1544. }
  1545. if (evlist__initialize_ctlfd(rec->evlist, opts->ctl_fd, opts->ctl_fd_ack))
  1546. goto out_child;
  1547. if (opts->initial_delay) {
  1548. pr_info(EVLIST_DISABLED_MSG);
  1549. if (opts->initial_delay > 0) {
  1550. usleep(opts->initial_delay * USEC_PER_MSEC);
  1551. evlist__enable(rec->evlist);
  1552. pr_info(EVLIST_ENABLED_MSG);
  1553. }
  1554. }
  1555. trigger_ready(&auxtrace_snapshot_trigger);
  1556. trigger_ready(&switch_output_trigger);
  1557. perf_hooks__invoke_record_start();
  1558. for (;;) {
  1559. unsigned long long hits = rec->samples;
  1560. /*
  1561. * rec->evlist->bkw_mmap_state is possible to be
  1562. * BKW_MMAP_EMPTY here: when done == true and
  1563. * hits != rec->samples in previous round.
  1564. *
  1565. * perf_evlist__toggle_bkw_mmap ensure we never
  1566. * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
  1567. */
  1568. if (trigger_is_hit(&switch_output_trigger) || done || draining)
  1569. perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
  1570. if (record__mmap_read_all(rec, false) < 0) {
  1571. trigger_error(&auxtrace_snapshot_trigger);
  1572. trigger_error(&switch_output_trigger);
  1573. err = -1;
  1574. goto out_child;
  1575. }
  1576. if (auxtrace_record__snapshot_started) {
  1577. auxtrace_record__snapshot_started = 0;
  1578. if (!trigger_is_error(&auxtrace_snapshot_trigger))
  1579. record__read_auxtrace_snapshot(rec, false);
  1580. if (trigger_is_error(&auxtrace_snapshot_trigger)) {
  1581. pr_err("AUX area tracing snapshot failed\n");
  1582. err = -1;
  1583. goto out_child;
  1584. }
  1585. }
  1586. if (trigger_is_hit(&switch_output_trigger)) {
  1587. /*
  1588. * If switch_output_trigger is hit, the data in
  1589. * overwritable ring buffer should have been collected,
  1590. * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
  1591. *
  1592. * If SIGUSR2 raise after or during record__mmap_read_all(),
  1593. * record__mmap_read_all() didn't collect data from
  1594. * overwritable ring buffer. Read again.
  1595. */
  1596. if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
  1597. continue;
  1598. trigger_ready(&switch_output_trigger);
  1599. /*
  1600. * Reenable events in overwrite ring buffer after
  1601. * record__mmap_read_all(): we should have collected
  1602. * data from it.
  1603. */
  1604. perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
  1605. if (!quiet)
  1606. fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
  1607. waking);
  1608. waking = 0;
  1609. fd = record__switch_output(rec, false);
  1610. if (fd < 0) {
  1611. pr_err("Failed to switch to new file\n");
  1612. trigger_error(&switch_output_trigger);
  1613. err = fd;
  1614. goto out_child;
  1615. }
  1616. /* re-arm the alarm */
  1617. if (rec->switch_output.time)
  1618. alarm(rec->switch_output.time);
  1619. }
  1620. if (hits == rec->samples) {
  1621. if (done || draining)
  1622. break;
  1623. err = evlist__poll(rec->evlist, -1);
  1624. /*
  1625. * Propagate error, only if there's any. Ignore positive
  1626. * number of returned events and interrupt error.
  1627. */
  1628. if (err > 0 || (err < 0 && errno == EINTR))
  1629. err = 0;
  1630. waking++;
  1631. if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
  1632. draining = true;
  1633. }
  1634. if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
  1635. switch (cmd) {
  1636. case EVLIST_CTL_CMD_ENABLE:
  1637. pr_info(EVLIST_ENABLED_MSG);
  1638. break;
  1639. case EVLIST_CTL_CMD_DISABLE:
  1640. pr_info(EVLIST_DISABLED_MSG);
  1641. break;
  1642. case EVLIST_CTL_CMD_SNAPSHOT:
  1643. hit_auxtrace_snapshot_trigger(rec);
  1644. evlist__ctlfd_ack(rec->evlist);
  1645. break;
  1646. case EVLIST_CTL_CMD_ACK:
  1647. case EVLIST_CTL_CMD_UNSUPPORTED:
  1648. default:
  1649. break;
  1650. }
  1651. }
  1652. /*
  1653. * When perf is starting the traced process, at the end events
  1654. * die with the process and we wait for that. Thus no need to
  1655. * disable events in this case.
  1656. */
  1657. if (done && !disabled && !target__none(&opts->target)) {
  1658. trigger_off(&auxtrace_snapshot_trigger);
  1659. evlist__disable(rec->evlist);
  1660. disabled = true;
  1661. }
  1662. }
  1663. trigger_off(&auxtrace_snapshot_trigger);
  1664. trigger_off(&switch_output_trigger);
  1665. if (opts->auxtrace_snapshot_on_exit)
  1666. record__auxtrace_snapshot_exit(rec);
  1667. if (forks && workload_exec_errno) {
  1668. char msg[STRERR_BUFSIZE];
  1669. const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
  1670. pr_err("Workload failed: %s\n", emsg);
  1671. err = -1;
  1672. goto out_child;
  1673. }
  1674. if (!quiet)
  1675. fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
  1676. if (target__none(&rec->opts.target))
  1677. record__synthesize_workload(rec, true);
  1678. out_child:
  1679. evlist__finalize_ctlfd(rec->evlist);
  1680. record__mmap_read_all(rec, true);
  1681. record__aio_mmap_read_sync(rec);
  1682. if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
  1683. ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
  1684. session->header.env.comp_ratio = ratio + 0.5;
  1685. }
  1686. if (forks) {
  1687. int exit_status;
  1688. if (!child_finished)
  1689. kill(rec->evlist->workload.pid, SIGTERM);
  1690. wait(&exit_status);
  1691. if (err < 0)
  1692. status = err;
  1693. else if (WIFEXITED(exit_status))
  1694. status = WEXITSTATUS(exit_status);
  1695. else if (WIFSIGNALED(exit_status))
  1696. signr = WTERMSIG(exit_status);
  1697. } else
  1698. status = err;
  1699. record__synthesize(rec, true);
  1700. /* this will be recalculated during process_buildids() */
  1701. rec->samples = 0;
  1702. if (!err) {
  1703. if (!rec->timestamp_filename) {
  1704. record__finish_output(rec);
  1705. } else {
  1706. fd = record__switch_output(rec, true);
  1707. if (fd < 0) {
  1708. status = fd;
  1709. goto out_delete_session;
  1710. }
  1711. }
  1712. }
  1713. perf_hooks__invoke_record_end();
  1714. if (!err && !quiet) {
  1715. char samples[128];
  1716. const char *postfix = rec->timestamp_filename ?
  1717. ".<timestamp>" : "";
  1718. if (rec->samples && !rec->opts.full_auxtrace)
  1719. scnprintf(samples, sizeof(samples),
  1720. " (%" PRIu64 " samples)", rec->samples);
  1721. else
  1722. samples[0] = '\0';
  1723. fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
  1724. perf_data__size(data) / 1024.0 / 1024.0,
  1725. data->path, postfix, samples);
  1726. if (ratio) {
  1727. fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
  1728. rec->session->bytes_transferred / 1024.0 / 1024.0,
  1729. ratio);
  1730. }
  1731. fprintf(stderr, " ]\n");
  1732. }
  1733. out_delete_session:
  1734. #ifdef HAVE_EVENTFD_SUPPORT
  1735. if (done_fd >= 0)
  1736. close(done_fd);
  1737. #endif
  1738. zstd_fini(&session->zstd_data);
  1739. perf_session__delete(session);
  1740. if (!opts->no_bpf_event)
  1741. perf_evlist__stop_sb_thread(rec->sb_evlist);
  1742. return status;
  1743. }
  1744. static void callchain_debug(struct callchain_param *callchain)
  1745. {
  1746. static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
  1747. pr_debug("callchain: type %s\n", str[callchain->record_mode]);
  1748. if (callchain->record_mode == CALLCHAIN_DWARF)
  1749. pr_debug("callchain: stack dump size %d\n",
  1750. callchain->dump_size);
  1751. }
  1752. int record_opts__parse_callchain(struct record_opts *record,
  1753. struct callchain_param *callchain,
  1754. const char *arg, bool unset)
  1755. {
  1756. int ret;
  1757. callchain->enabled = !unset;
  1758. /* --no-call-graph */
  1759. if (unset) {
  1760. callchain->record_mode = CALLCHAIN_NONE;
  1761. pr_debug("callchain: disabled\n");
  1762. return 0;
  1763. }
  1764. ret = parse_callchain_record_opt(arg, callchain);
  1765. if (!ret) {
  1766. /* Enable data address sampling for DWARF unwind. */
  1767. if (callchain->record_mode == CALLCHAIN_DWARF)
  1768. record->sample_address = true;
  1769. callchain_debug(callchain);
  1770. }
  1771. return ret;
  1772. }
  1773. int record_parse_callchain_opt(const struct option *opt,
  1774. const char *arg,
  1775. int unset)
  1776. {
  1777. return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
  1778. }
  1779. int record_callchain_opt(const struct option *opt,
  1780. const char *arg __maybe_unused,
  1781. int unset __maybe_unused)
  1782. {
  1783. struct callchain_param *callchain = opt->value;
  1784. callchain->enabled = true;
  1785. if (callchain->record_mode == CALLCHAIN_NONE)
  1786. callchain->record_mode = CALLCHAIN_FP;
  1787. callchain_debug(callchain);
  1788. return 0;
  1789. }
  1790. static int perf_record_config(const char *var, const char *value, void *cb)
  1791. {
  1792. struct record *rec = cb;
  1793. if (!strcmp(var, "record.build-id")) {
  1794. if (!strcmp(value, "cache"))
  1795. rec->no_buildid_cache = false;
  1796. else if (!strcmp(value, "no-cache"))
  1797. rec->no_buildid_cache = true;
  1798. else if (!strcmp(value, "skip"))
  1799. rec->no_buildid = true;
  1800. else
  1801. return -1;
  1802. return 0;
  1803. }
  1804. if (!strcmp(var, "record.call-graph")) {
  1805. var = "call-graph.record-mode";
  1806. return perf_default_config(var, value, cb);
  1807. }
  1808. #ifdef HAVE_AIO_SUPPORT
  1809. if (!strcmp(var, "record.aio")) {
  1810. rec->opts.nr_cblocks = strtol(value, NULL, 0);
  1811. if (!rec->opts.nr_cblocks)
  1812. rec->opts.nr_cblocks = nr_cblocks_default;
  1813. }
  1814. #endif
  1815. return 0;
  1816. }
  1817. static int record__parse_affinity(const struct option *opt, const char *str, int unset)
  1818. {
  1819. struct record_opts *opts = (struct record_opts *)opt->value;
  1820. if (unset || !str)
  1821. return 0;
  1822. if (!strcasecmp(str, "node"))
  1823. opts->affinity = PERF_AFFINITY_NODE;
  1824. else if (!strcasecmp(str, "cpu"))
  1825. opts->affinity = PERF_AFFINITY_CPU;
  1826. return 0;
  1827. }
  1828. static int parse_output_max_size(const struct option *opt,
  1829. const char *str, int unset)
  1830. {
  1831. unsigned long *s = (unsigned long *)opt->value;
  1832. static struct parse_tag tags_size[] = {
  1833. { .tag = 'B', .mult = 1 },
  1834. { .tag = 'K', .mult = 1 << 10 },
  1835. { .tag = 'M', .mult = 1 << 20 },
  1836. { .tag = 'G', .mult = 1 << 30 },
  1837. { .tag = 0 },
  1838. };
  1839. unsigned long val;
  1840. if (unset) {
  1841. *s = 0;
  1842. return 0;
  1843. }
  1844. val = parse_tag_value(str, tags_size);
  1845. if (val != (unsigned long) -1) {
  1846. *s = val;
  1847. return 0;
  1848. }
  1849. return -1;
  1850. }
  1851. static int record__parse_mmap_pages(const struct option *opt,
  1852. const char *str,
  1853. int unset __maybe_unused)
  1854. {
  1855. struct record_opts *opts = opt->value;
  1856. char *s, *p;
  1857. unsigned int mmap_pages;
  1858. int ret;
  1859. if (!str)
  1860. return -EINVAL;
  1861. s = strdup(str);
  1862. if (!s)
  1863. return -ENOMEM;
  1864. p = strchr(s, ',');
  1865. if (p)
  1866. *p = '\0';
  1867. if (*s) {
  1868. ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
  1869. if (ret)
  1870. goto out_free;
  1871. opts->mmap_pages = mmap_pages;
  1872. }
  1873. if (!p) {
  1874. ret = 0;
  1875. goto out_free;
  1876. }
  1877. ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
  1878. if (ret)
  1879. goto out_free;
  1880. opts->auxtrace_mmap_pages = mmap_pages;
  1881. out_free:
  1882. free(s);
  1883. return ret;
  1884. }
  1885. static int parse_control_option(const struct option *opt,
  1886. const char *str,
  1887. int unset __maybe_unused)
  1888. {
  1889. struct record_opts *opts = opt->value;
  1890. return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close);
  1891. }
  1892. static void switch_output_size_warn(struct record *rec)
  1893. {
  1894. u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
  1895. struct switch_output *s = &rec->switch_output;
  1896. wakeup_size /= 2;
  1897. if (s->size < wakeup_size) {
  1898. char buf[100];
  1899. unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
  1900. pr_warning("WARNING: switch-output data size lower than "
  1901. "wakeup kernel buffer size (%s) "
  1902. "expect bigger perf.data sizes\n", buf);
  1903. }
  1904. }
  1905. static int switch_output_setup(struct record *rec)
  1906. {
  1907. struct switch_output *s = &rec->switch_output;
  1908. static struct parse_tag tags_size[] = {
  1909. { .tag = 'B', .mult = 1 },
  1910. { .tag = 'K', .mult = 1 << 10 },
  1911. { .tag = 'M', .mult = 1 << 20 },
  1912. { .tag = 'G', .mult = 1 << 30 },
  1913. { .tag = 0 },
  1914. };
  1915. static struct parse_tag tags_time[] = {
  1916. { .tag = 's', .mult = 1 },
  1917. { .tag = 'm', .mult = 60 },
  1918. { .tag = 'h', .mult = 60*60 },
  1919. { .tag = 'd', .mult = 60*60*24 },
  1920. { .tag = 0 },
  1921. };
  1922. unsigned long val;
  1923. /*
  1924. * If we're using --switch-output-events, then we imply its
  1925. * --switch-output=signal, as we'll send a SIGUSR2 from the side band
  1926. * thread to its parent.
  1927. */
  1928. if (rec->switch_output_event_set)
  1929. goto do_signal;
  1930. if (!s->set)
  1931. return 0;
  1932. if (!strcmp(s->str, "signal")) {
  1933. do_signal:
  1934. s->signal = true;
  1935. pr_debug("switch-output with SIGUSR2 signal\n");
  1936. goto enabled;
  1937. }
  1938. val = parse_tag_value(s->str, tags_size);
  1939. if (val != (unsigned long) -1) {
  1940. s->size = val;
  1941. pr_debug("switch-output with %s size threshold\n", s->str);
  1942. goto enabled;
  1943. }
  1944. val = parse_tag_value(s->str, tags_time);
  1945. if (val != (unsigned long) -1) {
  1946. s->time = val;
  1947. pr_debug("switch-output with %s time threshold (%lu seconds)\n",
  1948. s->str, s->time);
  1949. goto enabled;
  1950. }
  1951. return -1;
  1952. enabled:
  1953. rec->timestamp_filename = true;
  1954. s->enabled = true;
  1955. if (s->size && !rec->opts.no_buffering)
  1956. switch_output_size_warn(rec);
  1957. return 0;
  1958. }
  1959. static const char * const __record_usage[] = {
  1960. "perf record [<options>] [<command>]",
  1961. "perf record [<options>] -- <command> [<options>]",
  1962. NULL
  1963. };
  1964. const char * const *record_usage = __record_usage;
  1965. static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
  1966. struct perf_sample *sample, struct machine *machine)
  1967. {
  1968. /*
  1969. * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
  1970. * no need to add them twice.
  1971. */
  1972. if (!(event->header.misc & PERF_RECORD_MISC_USER))
  1973. return 0;
  1974. return perf_event__process_mmap(tool, event, sample, machine);
  1975. }
  1976. static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
  1977. struct perf_sample *sample, struct machine *machine)
  1978. {
  1979. /*
  1980. * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
  1981. * no need to add them twice.
  1982. */
  1983. if (!(event->header.misc & PERF_RECORD_MISC_USER))
  1984. return 0;
  1985. return perf_event__process_mmap2(tool, event, sample, machine);
  1986. }
  1987. /*
  1988. * XXX Ideally would be local to cmd_record() and passed to a record__new
  1989. * because we need to have access to it in record__exit, that is called
  1990. * after cmd_record() exits, but since record_options need to be accessible to
  1991. * builtin-script, leave it here.
  1992. *
  1993. * At least we don't ouch it in all the other functions here directly.
  1994. *
  1995. * Just say no to tons of global variables, sigh.
  1996. */
  1997. static struct record record = {
  1998. .opts = {
  1999. .sample_time = true,
  2000. .mmap_pages = UINT_MAX,
  2001. .user_freq = UINT_MAX,
  2002. .user_interval = ULLONG_MAX,
  2003. .freq = 4000,
  2004. .target = {
  2005. .uses_mmap = true,
  2006. .default_per_cpu = true,
  2007. },
  2008. .mmap_flush = MMAP_FLUSH_DEFAULT,
  2009. .nr_threads_synthesize = 1,
  2010. .ctl_fd = -1,
  2011. .ctl_fd_ack = -1,
  2012. },
  2013. .tool = {
  2014. .sample = process_sample_event,
  2015. .fork = perf_event__process_fork,
  2016. .exit = perf_event__process_exit,
  2017. .comm = perf_event__process_comm,
  2018. .namespaces = perf_event__process_namespaces,
  2019. .mmap = build_id__process_mmap,
  2020. .mmap2 = build_id__process_mmap2,
  2021. .ordered_events = true,
  2022. },
  2023. };
  2024. const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
  2025. "\n\t\t\t\tDefault: fp";
  2026. static bool dry_run;
  2027. /*
  2028. * XXX Will stay a global variable till we fix builtin-script.c to stop messing
  2029. * with it and switch to use the library functions in perf_evlist that came
  2030. * from builtin-record.c, i.e. use record_opts,
  2031. * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
  2032. * using pipes, etc.
  2033. */
  2034. static struct option __record_options[] = {
  2035. OPT_CALLBACK('e', "event", &record.evlist, "event",
  2036. "event selector. use 'perf list' to list available events",
  2037. parse_events_option),
  2038. OPT_CALLBACK(0, "filter", &record.evlist, "filter",
  2039. "event filter", parse_filter),
  2040. OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
  2041. NULL, "don't record events from perf itself",
  2042. exclude_perf),
  2043. OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
  2044. "record events on existing process id"),
  2045. OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
  2046. "record events on existing thread id"),
  2047. OPT_INTEGER('r', "realtime", &record.realtime_prio,
  2048. "collect data with this RT SCHED_FIFO priority"),
  2049. OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
  2050. "collect data without buffering"),
  2051. OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
  2052. "collect raw sample records from all opened counters"),
  2053. OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
  2054. "system-wide collection from all CPUs"),
  2055. OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
  2056. "list of cpus to monitor"),
  2057. OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
  2058. OPT_STRING('o', "output", &record.data.path, "file",
  2059. "output file name"),
  2060. OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
  2061. &record.opts.no_inherit_set,
  2062. "child tasks do not inherit counters"),
  2063. OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
  2064. "synthesize non-sample events at the end of output"),
  2065. OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
  2066. OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
  2067. OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
  2068. "Fail if the specified frequency can't be used"),
  2069. OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
  2070. "profile at this frequency",
  2071. record__parse_freq),
  2072. OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
  2073. "number of mmap data pages and AUX area tracing mmap pages",
  2074. record__parse_mmap_pages),
  2075. OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
  2076. "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
  2077. record__mmap_flush_parse),
  2078. OPT_BOOLEAN(0, "group", &record.opts.group,
  2079. "put the counters into a counter group"),
  2080. OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
  2081. NULL, "enables call-graph recording" ,
  2082. &record_callchain_opt),
  2083. OPT_CALLBACK(0, "call-graph", &record.opts,
  2084. "record_mode[,record_size]", record_callchain_help,
  2085. &record_parse_callchain_opt),
  2086. OPT_INCR('v', "verbose", &verbose,
  2087. "be more verbose (show counter open errors, etc)"),
  2088. OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
  2089. OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
  2090. "per thread counts"),
  2091. OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
  2092. OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
  2093. "Record the sample physical addresses"),
  2094. OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
  2095. OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
  2096. &record.opts.sample_time_set,
  2097. "Record the sample timestamps"),
  2098. OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
  2099. "Record the sample period"),
  2100. OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
  2101. "don't sample"),
  2102. OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
  2103. &record.no_buildid_cache_set,
  2104. "do not update the buildid cache"),
  2105. OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
  2106. &record.no_buildid_set,
  2107. "do not collect buildids in perf.data"),
  2108. OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
  2109. "monitor event in cgroup name only",
  2110. parse_cgroups),
  2111. OPT_INTEGER('D', "delay", &record.opts.initial_delay,
  2112. "ms to wait before starting measurement after program start (-1: start with events disabled)"),
  2113. OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
  2114. OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
  2115. "user to profile"),
  2116. OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
  2117. "branch any", "sample any taken branches",
  2118. parse_branch_stack),
  2119. OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
  2120. "branch filter mask", "branch stack filter modes",
  2121. parse_branch_stack),
  2122. OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
  2123. "sample by weight (on special events only)"),
  2124. OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
  2125. "sample transaction flags (special events only)"),
  2126. OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
  2127. "use per-thread mmaps"),
  2128. OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
  2129. "sample selected machine registers on interrupt,"
  2130. " use '-I?' to list register names", parse_intr_regs),
  2131. OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
  2132. "sample selected machine registers on interrupt,"
  2133. " use '--user-regs=?' to list register names", parse_user_regs),
  2134. OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
  2135. "Record running/enabled time of read (:S) events"),
  2136. OPT_CALLBACK('k', "clockid", &record.opts,
  2137. "clockid", "clockid to use for events, see clock_gettime()",
  2138. parse_clockid),
  2139. OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
  2140. "opts", "AUX area tracing Snapshot Mode", ""),
  2141. OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
  2142. "opts", "sample AUX area", ""),
  2143. OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
  2144. "per thread proc mmap processing timeout in ms"),
  2145. OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
  2146. "Record namespaces events"),
  2147. OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
  2148. "Record cgroup events"),
  2149. OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
  2150. &record.opts.record_switch_events_set,
  2151. "Record context switch events"),
  2152. OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
  2153. "Configure all used events to run in kernel space.",
  2154. PARSE_OPT_EXCLUSIVE),
  2155. OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
  2156. "Configure all used events to run in user space.",
  2157. PARSE_OPT_EXCLUSIVE),
  2158. OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
  2159. "collect kernel callchains"),
  2160. OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
  2161. "collect user callchains"),
  2162. OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
  2163. "clang binary to use for compiling BPF scriptlets"),
  2164. OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
  2165. "options passed to clang when compiling BPF scriptlets"),
  2166. OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
  2167. "file", "vmlinux pathname"),
  2168. OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
  2169. "Record build-id of all DSOs regardless of hits"),
  2170. OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
  2171. "append timestamp to output filename"),
  2172. OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
  2173. "Record timestamp boundary (time of first/last samples)"),
  2174. OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
  2175. &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
  2176. "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
  2177. "signal"),
  2178. OPT_CALLBACK_SET(0, "switch-output-event", &record.sb_evlist, &record.switch_output_event_set, "switch output event",
  2179. "switch output event selector. use 'perf list' to list available events",
  2180. parse_events_option_new_evlist),
  2181. OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
  2182. "Limit number of switch output generated files"),
  2183. OPT_BOOLEAN(0, "dry-run", &dry_run,
  2184. "Parse options then exit"),
  2185. #ifdef HAVE_AIO_SUPPORT
  2186. OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
  2187. &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
  2188. record__aio_parse),
  2189. #endif
  2190. OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
  2191. "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
  2192. record__parse_affinity),
  2193. #ifdef HAVE_ZSTD_SUPPORT
  2194. OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
  2195. "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
  2196. record__parse_comp_level),
  2197. #endif
  2198. OPT_CALLBACK(0, "max-size", &record.output_max_size,
  2199. "size", "Limit the maximum size of the output file", parse_output_max_size),
  2200. OPT_UINTEGER(0, "num-thread-synthesize",
  2201. &record.opts.nr_threads_synthesize,
  2202. "number of threads to run for event synthesis"),
  2203. #ifdef HAVE_LIBPFM
  2204. OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
  2205. "libpfm4 event selector. use 'perf list' to list available events",
  2206. parse_libpfm_events_option),
  2207. #endif
  2208. OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
  2209. "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events,\n"
  2210. "\t\t\t 'snapshot': AUX area tracing snapshot).\n"
  2211. "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
  2212. "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
  2213. parse_control_option),
  2214. OPT_END()
  2215. };
  2216. struct option *record_options = __record_options;
  2217. int cmd_record(int argc, const char **argv)
  2218. {
  2219. int err;
  2220. struct record *rec = &record;
  2221. char errbuf[BUFSIZ];
  2222. setlocale(LC_ALL, "");
  2223. #ifndef HAVE_LIBBPF_SUPPORT
  2224. # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
  2225. set_nobuild('\0', "clang-path", true);
  2226. set_nobuild('\0', "clang-opt", true);
  2227. # undef set_nobuild
  2228. #endif
  2229. #ifndef HAVE_BPF_PROLOGUE
  2230. # if !defined (HAVE_DWARF_SUPPORT)
  2231. # define REASON "NO_DWARF=1"
  2232. # elif !defined (HAVE_LIBBPF_SUPPORT)
  2233. # define REASON "NO_LIBBPF=1"
  2234. # else
  2235. # define REASON "this architecture doesn't support BPF prologue"
  2236. # endif
  2237. # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
  2238. set_nobuild('\0', "vmlinux", true);
  2239. # undef set_nobuild
  2240. # undef REASON
  2241. #endif
  2242. rec->opts.affinity = PERF_AFFINITY_SYS;
  2243. rec->evlist = evlist__new();
  2244. if (rec->evlist == NULL)
  2245. return -ENOMEM;
  2246. err = perf_config(perf_record_config, rec);
  2247. if (err)
  2248. return err;
  2249. argc = parse_options(argc, argv, record_options, record_usage,
  2250. PARSE_OPT_STOP_AT_NON_OPTION);
  2251. if (quiet)
  2252. perf_quiet_option();
  2253. /* Make system wide (-a) the default target. */
  2254. if (!argc && target__none(&rec->opts.target))
  2255. rec->opts.target.system_wide = true;
  2256. if (nr_cgroups && !rec->opts.target.system_wide) {
  2257. usage_with_options_msg(record_usage, record_options,
  2258. "cgroup monitoring only available in system-wide mode");
  2259. }
  2260. if (rec->opts.kcore)
  2261. rec->data.is_dir = true;
  2262. if (rec->opts.comp_level != 0) {
  2263. pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
  2264. rec->no_buildid = true;
  2265. }
  2266. if (rec->opts.record_switch_events &&
  2267. !perf_can_record_switch_events()) {
  2268. ui__error("kernel does not support recording context switch events\n");
  2269. parse_options_usage(record_usage, record_options, "switch-events", 0);
  2270. err = -EINVAL;
  2271. goto out_opts;
  2272. }
  2273. if (switch_output_setup(rec)) {
  2274. parse_options_usage(record_usage, record_options, "switch-output", 0);
  2275. err = -EINVAL;
  2276. goto out_opts;
  2277. }
  2278. if (rec->switch_output.time) {
  2279. signal(SIGALRM, alarm_sig_handler);
  2280. alarm(rec->switch_output.time);
  2281. }
  2282. if (rec->switch_output.num_files) {
  2283. rec->switch_output.filenames = calloc(sizeof(char *),
  2284. rec->switch_output.num_files);
  2285. if (!rec->switch_output.filenames) {
  2286. err = -EINVAL;
  2287. goto out_opts;
  2288. }
  2289. }
  2290. /*
  2291. * Allow aliases to facilitate the lookup of symbols for address
  2292. * filters. Refer to auxtrace_parse_filters().
  2293. */
  2294. symbol_conf.allow_aliases = true;
  2295. symbol__init(NULL);
  2296. if (rec->opts.affinity != PERF_AFFINITY_SYS) {
  2297. rec->affinity_mask.nbits = cpu__max_cpu();
  2298. rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
  2299. if (!rec->affinity_mask.bits) {
  2300. pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
  2301. err = -ENOMEM;
  2302. goto out_opts;
  2303. }
  2304. pr_debug2("thread mask[%zd]: empty\n", rec->affinity_mask.nbits);
  2305. }
  2306. err = record__auxtrace_init(rec);
  2307. if (err)
  2308. goto out;
  2309. if (dry_run)
  2310. goto out;
  2311. err = bpf__setup_stdout(rec->evlist);
  2312. if (err) {
  2313. bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
  2314. pr_err("ERROR: Setup BPF stdout failed: %s\n",
  2315. errbuf);
  2316. goto out;
  2317. }
  2318. err = -ENOMEM;
  2319. if (rec->no_buildid_cache || rec->no_buildid) {
  2320. disable_buildid_cache();
  2321. } else if (rec->switch_output.enabled) {
  2322. /*
  2323. * In 'perf record --switch-output', disable buildid
  2324. * generation by default to reduce data file switching
  2325. * overhead. Still generate buildid if they are required
  2326. * explicitly using
  2327. *
  2328. * perf record --switch-output --no-no-buildid \
  2329. * --no-no-buildid-cache
  2330. *
  2331. * Following code equals to:
  2332. *
  2333. * if ((rec->no_buildid || !rec->no_buildid_set) &&
  2334. * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
  2335. * disable_buildid_cache();
  2336. */
  2337. bool disable = true;
  2338. if (rec->no_buildid_set && !rec->no_buildid)
  2339. disable = false;
  2340. if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
  2341. disable = false;
  2342. if (disable) {
  2343. rec->no_buildid = true;
  2344. rec->no_buildid_cache = true;
  2345. disable_buildid_cache();
  2346. }
  2347. }
  2348. if (record.opts.overwrite)
  2349. record.opts.tail_synthesize = true;
  2350. if (rec->evlist->core.nr_entries == 0 &&
  2351. __evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
  2352. pr_err("Not enough memory for event selector list\n");
  2353. goto out;
  2354. }
  2355. if (rec->opts.target.tid && !rec->opts.no_inherit_set)
  2356. rec->opts.no_inherit = true;
  2357. err = target__validate(&rec->opts.target);
  2358. if (err) {
  2359. target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
  2360. ui__warning("%s\n", errbuf);
  2361. }
  2362. err = target__parse_uid(&rec->opts.target);
  2363. if (err) {
  2364. int saved_errno = errno;
  2365. target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
  2366. ui__error("%s", errbuf);
  2367. err = -saved_errno;
  2368. goto out;
  2369. }
  2370. /* Enable ignoring missing threads when -u/-p option is defined. */
  2371. rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
  2372. err = -ENOMEM;
  2373. if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
  2374. usage_with_options(record_usage, record_options);
  2375. err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
  2376. if (err)
  2377. goto out;
  2378. /*
  2379. * We take all buildids when the file contains
  2380. * AUX area tracing data because we do not decode the
  2381. * trace because it would take too long.
  2382. */
  2383. if (rec->opts.full_auxtrace)
  2384. rec->buildid_all = true;
  2385. if (rec->opts.text_poke) {
  2386. err = record__config_text_poke(rec->evlist);
  2387. if (err) {
  2388. pr_err("record__config_text_poke failed, error %d\n", err);
  2389. goto out;
  2390. }
  2391. }
  2392. if (record_opts__config(&rec->opts)) {
  2393. err = -EINVAL;
  2394. goto out;
  2395. }
  2396. if (rec->opts.nr_cblocks > nr_cblocks_max)
  2397. rec->opts.nr_cblocks = nr_cblocks_max;
  2398. pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
  2399. pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
  2400. pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
  2401. if (rec->opts.comp_level > comp_level_max)
  2402. rec->opts.comp_level = comp_level_max;
  2403. pr_debug("comp level: %d\n", rec->opts.comp_level);
  2404. err = __cmd_record(&record, argc, argv);
  2405. out:
  2406. bitmap_free(rec->affinity_mask.bits);
  2407. evlist__delete(rec->evlist);
  2408. symbol__exit();
  2409. auxtrace_record__free(rec->itr);
  2410. out_opts:
  2411. evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
  2412. return err;
  2413. }
  2414. static void snapshot_sig_handler(int sig __maybe_unused)
  2415. {
  2416. struct record *rec = &record;
  2417. hit_auxtrace_snapshot_trigger(rec);
  2418. if (switch_output_signal(rec))
  2419. trigger_hit(&switch_output_trigger);
  2420. }
  2421. static void alarm_sig_handler(int sig __maybe_unused)
  2422. {
  2423. struct record *rec = &record;
  2424. if (switch_output_time(rec))
  2425. trigger_hit(&switch_output_trigger);
  2426. }