builtin-timechart.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * builtin-timechart.c - make an svg timechart of system activity
  4. *
  5. * (C) Copyright 2009 Intel Corporation
  6. *
  7. * Authors:
  8. * Arjan van de Ven <arjan@linux.intel.com>
  9. */
  10. #include <errno.h>
  11. #include <inttypes.h>
  12. #include "builtin.h"
  13. #include "util/color.h"
  14. #include <linux/list.h>
  15. #include "util/evlist.h" // for struct evsel_str_handler
  16. #include "util/evsel.h"
  17. #include <linux/kernel.h>
  18. #include <linux/rbtree.h>
  19. #include <linux/time64.h>
  20. #include <linux/zalloc.h>
  21. #include "util/symbol.h"
  22. #include "util/thread.h"
  23. #include "util/callchain.h"
  24. #include "perf.h"
  25. #include "util/header.h"
  26. #include <subcmd/pager.h>
  27. #include <subcmd/parse-options.h>
  28. #include "util/parse-events.h"
  29. #include "util/event.h"
  30. #include "util/session.h"
  31. #include "util/svghelper.h"
  32. #include "util/tool.h"
  33. #include "util/data.h"
  34. #include "util/debug.h"
  35. #include <linux/err.h>
  36. #ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
  37. FILE *open_memstream(char **ptr, size_t *sizeloc);
  38. #endif
  39. #define SUPPORT_OLD_POWER_EVENTS 1
  40. #define PWR_EVENT_EXIT -1
  41. struct per_pid;
  42. struct power_event;
  43. struct wake_event;
  44. struct timechart {
  45. struct perf_tool tool;
  46. struct per_pid *all_data;
  47. struct power_event *power_events;
  48. struct wake_event *wake_events;
  49. int proc_num;
  50. unsigned int numcpus;
  51. u64 min_freq, /* Lowest CPU frequency seen */
  52. max_freq, /* Highest CPU frequency seen */
  53. turbo_frequency,
  54. first_time, last_time;
  55. bool power_only,
  56. tasks_only,
  57. with_backtrace,
  58. topology;
  59. bool force;
  60. /* IO related settings */
  61. bool io_only,
  62. skip_eagain;
  63. u64 io_events;
  64. u64 min_time,
  65. merge_dist;
  66. };
  67. struct per_pidcomm;
  68. struct cpu_sample;
  69. struct io_sample;
  70. /*
  71. * Datastructure layout:
  72. * We keep an list of "pid"s, matching the kernels notion of a task struct.
  73. * Each "pid" entry, has a list of "comm"s.
  74. * this is because we want to track different programs different, while
  75. * exec will reuse the original pid (by design).
  76. * Each comm has a list of samples that will be used to draw
  77. * final graph.
  78. */
  79. struct per_pid {
  80. struct per_pid *next;
  81. int pid;
  82. int ppid;
  83. u64 start_time;
  84. u64 end_time;
  85. u64 total_time;
  86. u64 total_bytes;
  87. int display;
  88. struct per_pidcomm *all;
  89. struct per_pidcomm *current;
  90. };
  91. struct per_pidcomm {
  92. struct per_pidcomm *next;
  93. u64 start_time;
  94. u64 end_time;
  95. u64 total_time;
  96. u64 max_bytes;
  97. u64 total_bytes;
  98. int Y;
  99. int display;
  100. long state;
  101. u64 state_since;
  102. char *comm;
  103. struct cpu_sample *samples;
  104. struct io_sample *io_samples;
  105. };
  106. struct sample_wrapper {
  107. struct sample_wrapper *next;
  108. u64 timestamp;
  109. unsigned char data[];
  110. };
  111. #define TYPE_NONE 0
  112. #define TYPE_RUNNING 1
  113. #define TYPE_WAITING 2
  114. #define TYPE_BLOCKED 3
  115. struct cpu_sample {
  116. struct cpu_sample *next;
  117. u64 start_time;
  118. u64 end_time;
  119. int type;
  120. int cpu;
  121. const char *backtrace;
  122. };
  123. enum {
  124. IOTYPE_READ,
  125. IOTYPE_WRITE,
  126. IOTYPE_SYNC,
  127. IOTYPE_TX,
  128. IOTYPE_RX,
  129. IOTYPE_POLL,
  130. };
  131. struct io_sample {
  132. struct io_sample *next;
  133. u64 start_time;
  134. u64 end_time;
  135. u64 bytes;
  136. int type;
  137. int fd;
  138. int err;
  139. int merges;
  140. };
  141. #define CSTATE 1
  142. #define PSTATE 2
  143. struct power_event {
  144. struct power_event *next;
  145. int type;
  146. int state;
  147. u64 start_time;
  148. u64 end_time;
  149. int cpu;
  150. };
  151. struct wake_event {
  152. struct wake_event *next;
  153. int waker;
  154. int wakee;
  155. u64 time;
  156. const char *backtrace;
  157. };
  158. struct process_filter {
  159. char *name;
  160. int pid;
  161. struct process_filter *next;
  162. };
  163. static struct process_filter *process_filter;
  164. static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
  165. {
  166. struct per_pid *cursor = tchart->all_data;
  167. while (cursor) {
  168. if (cursor->pid == pid)
  169. return cursor;
  170. cursor = cursor->next;
  171. }
  172. cursor = zalloc(sizeof(*cursor));
  173. assert(cursor != NULL);
  174. cursor->pid = pid;
  175. cursor->next = tchart->all_data;
  176. tchart->all_data = cursor;
  177. return cursor;
  178. }
  179. static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
  180. {
  181. struct per_pid *p;
  182. struct per_pidcomm *c;
  183. p = find_create_pid(tchart, pid);
  184. c = p->all;
  185. while (c) {
  186. if (c->comm && strcmp(c->comm, comm) == 0) {
  187. p->current = c;
  188. return;
  189. }
  190. if (!c->comm) {
  191. c->comm = strdup(comm);
  192. p->current = c;
  193. return;
  194. }
  195. c = c->next;
  196. }
  197. c = zalloc(sizeof(*c));
  198. assert(c != NULL);
  199. c->comm = strdup(comm);
  200. p->current = c;
  201. c->next = p->all;
  202. p->all = c;
  203. }
  204. static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
  205. {
  206. struct per_pid *p, *pp;
  207. p = find_create_pid(tchart, pid);
  208. pp = find_create_pid(tchart, ppid);
  209. p->ppid = ppid;
  210. if (pp->current && pp->current->comm && !p->current)
  211. pid_set_comm(tchart, pid, pp->current->comm);
  212. p->start_time = timestamp;
  213. if (p->current && !p->current->start_time) {
  214. p->current->start_time = timestamp;
  215. p->current->state_since = timestamp;
  216. }
  217. }
  218. static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
  219. {
  220. struct per_pid *p;
  221. p = find_create_pid(tchart, pid);
  222. p->end_time = timestamp;
  223. if (p->current)
  224. p->current->end_time = timestamp;
  225. }
  226. static void pid_put_sample(struct timechart *tchart, int pid, int type,
  227. unsigned int cpu, u64 start, u64 end,
  228. const char *backtrace)
  229. {
  230. struct per_pid *p;
  231. struct per_pidcomm *c;
  232. struct cpu_sample *sample;
  233. p = find_create_pid(tchart, pid);
  234. c = p->current;
  235. if (!c) {
  236. c = zalloc(sizeof(*c));
  237. assert(c != NULL);
  238. p->current = c;
  239. c->next = p->all;
  240. p->all = c;
  241. }
  242. sample = zalloc(sizeof(*sample));
  243. assert(sample != NULL);
  244. sample->start_time = start;
  245. sample->end_time = end;
  246. sample->type = type;
  247. sample->next = c->samples;
  248. sample->cpu = cpu;
  249. sample->backtrace = backtrace;
  250. c->samples = sample;
  251. if (sample->type == TYPE_RUNNING && end > start && start > 0) {
  252. c->total_time += (end-start);
  253. p->total_time += (end-start);
  254. }
  255. if (c->start_time == 0 || c->start_time > start)
  256. c->start_time = start;
  257. if (p->start_time == 0 || p->start_time > start)
  258. p->start_time = start;
  259. }
  260. #define MAX_CPUS 4096
  261. static u64 cpus_cstate_start_times[MAX_CPUS];
  262. static int cpus_cstate_state[MAX_CPUS];
  263. static u64 cpus_pstate_start_times[MAX_CPUS];
  264. static u64 cpus_pstate_state[MAX_CPUS];
  265. static int process_comm_event(struct perf_tool *tool,
  266. union perf_event *event,
  267. struct perf_sample *sample __maybe_unused,
  268. struct machine *machine __maybe_unused)
  269. {
  270. struct timechart *tchart = container_of(tool, struct timechart, tool);
  271. pid_set_comm(tchart, event->comm.tid, event->comm.comm);
  272. return 0;
  273. }
  274. static int process_fork_event(struct perf_tool *tool,
  275. union perf_event *event,
  276. struct perf_sample *sample __maybe_unused,
  277. struct machine *machine __maybe_unused)
  278. {
  279. struct timechart *tchart = container_of(tool, struct timechart, tool);
  280. pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
  281. return 0;
  282. }
  283. static int process_exit_event(struct perf_tool *tool,
  284. union perf_event *event,
  285. struct perf_sample *sample __maybe_unused,
  286. struct machine *machine __maybe_unused)
  287. {
  288. struct timechart *tchart = container_of(tool, struct timechart, tool);
  289. pid_exit(tchart, event->fork.pid, event->fork.time);
  290. return 0;
  291. }
  292. #ifdef SUPPORT_OLD_POWER_EVENTS
  293. static int use_old_power_events;
  294. #endif
  295. static void c_state_start(int cpu, u64 timestamp, int state)
  296. {
  297. cpus_cstate_start_times[cpu] = timestamp;
  298. cpus_cstate_state[cpu] = state;
  299. }
  300. static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
  301. {
  302. struct power_event *pwr = zalloc(sizeof(*pwr));
  303. if (!pwr)
  304. return;
  305. pwr->state = cpus_cstate_state[cpu];
  306. pwr->start_time = cpus_cstate_start_times[cpu];
  307. pwr->end_time = timestamp;
  308. pwr->cpu = cpu;
  309. pwr->type = CSTATE;
  310. pwr->next = tchart->power_events;
  311. tchart->power_events = pwr;
  312. }
  313. static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
  314. {
  315. struct power_event *pwr;
  316. if (new_freq > 8000000) /* detect invalid data */
  317. return;
  318. pwr = zalloc(sizeof(*pwr));
  319. if (!pwr)
  320. return;
  321. pwr->state = cpus_pstate_state[cpu];
  322. pwr->start_time = cpus_pstate_start_times[cpu];
  323. pwr->end_time = timestamp;
  324. pwr->cpu = cpu;
  325. pwr->type = PSTATE;
  326. pwr->next = tchart->power_events;
  327. if (!pwr->start_time)
  328. pwr->start_time = tchart->first_time;
  329. tchart->power_events = pwr;
  330. cpus_pstate_state[cpu] = new_freq;
  331. cpus_pstate_start_times[cpu] = timestamp;
  332. if ((u64)new_freq > tchart->max_freq)
  333. tchart->max_freq = new_freq;
  334. if (new_freq < tchart->min_freq || tchart->min_freq == 0)
  335. tchart->min_freq = new_freq;
  336. if (new_freq == tchart->max_freq - 1000)
  337. tchart->turbo_frequency = tchart->max_freq;
  338. }
  339. static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
  340. int waker, int wakee, u8 flags, const char *backtrace)
  341. {
  342. struct per_pid *p;
  343. struct wake_event *we = zalloc(sizeof(*we));
  344. if (!we)
  345. return;
  346. we->time = timestamp;
  347. we->waker = waker;
  348. we->backtrace = backtrace;
  349. if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
  350. we->waker = -1;
  351. we->wakee = wakee;
  352. we->next = tchart->wake_events;
  353. tchart->wake_events = we;
  354. p = find_create_pid(tchart, we->wakee);
  355. if (p && p->current && p->current->state == TYPE_NONE) {
  356. p->current->state_since = timestamp;
  357. p->current->state = TYPE_WAITING;
  358. }
  359. if (p && p->current && p->current->state == TYPE_BLOCKED) {
  360. pid_put_sample(tchart, p->pid, p->current->state, cpu,
  361. p->current->state_since, timestamp, NULL);
  362. p->current->state_since = timestamp;
  363. p->current->state = TYPE_WAITING;
  364. }
  365. }
  366. static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
  367. int prev_pid, int next_pid, u64 prev_state,
  368. const char *backtrace)
  369. {
  370. struct per_pid *p = NULL, *prev_p;
  371. prev_p = find_create_pid(tchart, prev_pid);
  372. p = find_create_pid(tchart, next_pid);
  373. if (prev_p->current && prev_p->current->state != TYPE_NONE)
  374. pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
  375. prev_p->current->state_since, timestamp,
  376. backtrace);
  377. if (p && p->current) {
  378. if (p->current->state != TYPE_NONE)
  379. pid_put_sample(tchart, next_pid, p->current->state, cpu,
  380. p->current->state_since, timestamp,
  381. backtrace);
  382. p->current->state_since = timestamp;
  383. p->current->state = TYPE_RUNNING;
  384. }
  385. if (prev_p->current) {
  386. prev_p->current->state = TYPE_NONE;
  387. prev_p->current->state_since = timestamp;
  388. if (prev_state & 2)
  389. prev_p->current->state = TYPE_BLOCKED;
  390. if (prev_state == 0)
  391. prev_p->current->state = TYPE_WAITING;
  392. }
  393. }
  394. static const char *cat_backtrace(union perf_event *event,
  395. struct perf_sample *sample,
  396. struct machine *machine)
  397. {
  398. struct addr_location al;
  399. unsigned int i;
  400. char *p = NULL;
  401. size_t p_len;
  402. u8 cpumode = PERF_RECORD_MISC_USER;
  403. struct addr_location tal;
  404. struct ip_callchain *chain = sample->callchain;
  405. FILE *f = open_memstream(&p, &p_len);
  406. if (!f) {
  407. perror("open_memstream error");
  408. return NULL;
  409. }
  410. if (!chain)
  411. goto exit;
  412. if (machine__resolve(machine, &al, sample) < 0) {
  413. fprintf(stderr, "problem processing %d event, skipping it.\n",
  414. event->header.type);
  415. goto exit;
  416. }
  417. for (i = 0; i < chain->nr; i++) {
  418. u64 ip;
  419. if (callchain_param.order == ORDER_CALLEE)
  420. ip = chain->ips[i];
  421. else
  422. ip = chain->ips[chain->nr - i - 1];
  423. if (ip >= PERF_CONTEXT_MAX) {
  424. switch (ip) {
  425. case PERF_CONTEXT_HV:
  426. cpumode = PERF_RECORD_MISC_HYPERVISOR;
  427. break;
  428. case PERF_CONTEXT_KERNEL:
  429. cpumode = PERF_RECORD_MISC_KERNEL;
  430. break;
  431. case PERF_CONTEXT_USER:
  432. cpumode = PERF_RECORD_MISC_USER;
  433. break;
  434. default:
  435. pr_debug("invalid callchain context: "
  436. "%"PRId64"\n", (s64) ip);
  437. /*
  438. * It seems the callchain is corrupted.
  439. * Discard all.
  440. */
  441. zfree(&p);
  442. goto exit_put;
  443. }
  444. continue;
  445. }
  446. tal.filtered = 0;
  447. if (thread__find_symbol(al.thread, cpumode, ip, &tal))
  448. fprintf(f, "..... %016" PRIx64 " %s\n", ip, tal.sym->name);
  449. else
  450. fprintf(f, "..... %016" PRIx64 "\n", ip);
  451. }
  452. exit_put:
  453. addr_location__put(&al);
  454. exit:
  455. fclose(f);
  456. return p;
  457. }
  458. typedef int (*tracepoint_handler)(struct timechart *tchart,
  459. struct evsel *evsel,
  460. struct perf_sample *sample,
  461. const char *backtrace);
  462. static int process_sample_event(struct perf_tool *tool,
  463. union perf_event *event,
  464. struct perf_sample *sample,
  465. struct evsel *evsel,
  466. struct machine *machine)
  467. {
  468. struct timechart *tchart = container_of(tool, struct timechart, tool);
  469. if (evsel->core.attr.sample_type & PERF_SAMPLE_TIME) {
  470. if (!tchart->first_time || tchart->first_time > sample->time)
  471. tchart->first_time = sample->time;
  472. if (tchart->last_time < sample->time)
  473. tchart->last_time = sample->time;
  474. }
  475. if (evsel->handler != NULL) {
  476. tracepoint_handler f = evsel->handler;
  477. return f(tchart, evsel, sample,
  478. cat_backtrace(event, sample, machine));
  479. }
  480. return 0;
  481. }
  482. static int
  483. process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
  484. struct evsel *evsel,
  485. struct perf_sample *sample,
  486. const char *backtrace __maybe_unused)
  487. {
  488. u32 state = evsel__intval(evsel, sample, "state");
  489. u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
  490. if (state == (u32)PWR_EVENT_EXIT)
  491. c_state_end(tchart, cpu_id, sample->time);
  492. else
  493. c_state_start(cpu_id, sample->time, state);
  494. return 0;
  495. }
  496. static int
  497. process_sample_cpu_frequency(struct timechart *tchart,
  498. struct evsel *evsel,
  499. struct perf_sample *sample,
  500. const char *backtrace __maybe_unused)
  501. {
  502. u32 state = evsel__intval(evsel, sample, "state");
  503. u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
  504. p_state_change(tchart, cpu_id, sample->time, state);
  505. return 0;
  506. }
  507. static int
  508. process_sample_sched_wakeup(struct timechart *tchart,
  509. struct evsel *evsel,
  510. struct perf_sample *sample,
  511. const char *backtrace)
  512. {
  513. u8 flags = evsel__intval(evsel, sample, "common_flags");
  514. int waker = evsel__intval(evsel, sample, "common_pid");
  515. int wakee = evsel__intval(evsel, sample, "pid");
  516. sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
  517. return 0;
  518. }
  519. static int
  520. process_sample_sched_switch(struct timechart *tchart,
  521. struct evsel *evsel,
  522. struct perf_sample *sample,
  523. const char *backtrace)
  524. {
  525. int prev_pid = evsel__intval(evsel, sample, "prev_pid");
  526. int next_pid = evsel__intval(evsel, sample, "next_pid");
  527. u64 prev_state = evsel__intval(evsel, sample, "prev_state");
  528. sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
  529. prev_state, backtrace);
  530. return 0;
  531. }
  532. #ifdef SUPPORT_OLD_POWER_EVENTS
  533. static int
  534. process_sample_power_start(struct timechart *tchart __maybe_unused,
  535. struct evsel *evsel,
  536. struct perf_sample *sample,
  537. const char *backtrace __maybe_unused)
  538. {
  539. u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
  540. u64 value = evsel__intval(evsel, sample, "value");
  541. c_state_start(cpu_id, sample->time, value);
  542. return 0;
  543. }
  544. static int
  545. process_sample_power_end(struct timechart *tchart,
  546. struct evsel *evsel __maybe_unused,
  547. struct perf_sample *sample,
  548. const char *backtrace __maybe_unused)
  549. {
  550. c_state_end(tchart, sample->cpu, sample->time);
  551. return 0;
  552. }
  553. static int
  554. process_sample_power_frequency(struct timechart *tchart,
  555. struct evsel *evsel,
  556. struct perf_sample *sample,
  557. const char *backtrace __maybe_unused)
  558. {
  559. u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
  560. u64 value = evsel__intval(evsel, sample, "value");
  561. p_state_change(tchart, cpu_id, sample->time, value);
  562. return 0;
  563. }
  564. #endif /* SUPPORT_OLD_POWER_EVENTS */
  565. /*
  566. * After the last sample we need to wrap up the current C/P state
  567. * and close out each CPU for these.
  568. */
  569. static void end_sample_processing(struct timechart *tchart)
  570. {
  571. u64 cpu;
  572. struct power_event *pwr;
  573. for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
  574. /* C state */
  575. #if 0
  576. pwr = zalloc(sizeof(*pwr));
  577. if (!pwr)
  578. return;
  579. pwr->state = cpus_cstate_state[cpu];
  580. pwr->start_time = cpus_cstate_start_times[cpu];
  581. pwr->end_time = tchart->last_time;
  582. pwr->cpu = cpu;
  583. pwr->type = CSTATE;
  584. pwr->next = tchart->power_events;
  585. tchart->power_events = pwr;
  586. #endif
  587. /* P state */
  588. pwr = zalloc(sizeof(*pwr));
  589. if (!pwr)
  590. return;
  591. pwr->state = cpus_pstate_state[cpu];
  592. pwr->start_time = cpus_pstate_start_times[cpu];
  593. pwr->end_time = tchart->last_time;
  594. pwr->cpu = cpu;
  595. pwr->type = PSTATE;
  596. pwr->next = tchart->power_events;
  597. if (!pwr->start_time)
  598. pwr->start_time = tchart->first_time;
  599. if (!pwr->state)
  600. pwr->state = tchart->min_freq;
  601. tchart->power_events = pwr;
  602. }
  603. }
  604. static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
  605. u64 start, int fd)
  606. {
  607. struct per_pid *p = find_create_pid(tchart, pid);
  608. struct per_pidcomm *c = p->current;
  609. struct io_sample *sample;
  610. struct io_sample *prev;
  611. if (!c) {
  612. c = zalloc(sizeof(*c));
  613. if (!c)
  614. return -ENOMEM;
  615. p->current = c;
  616. c->next = p->all;
  617. p->all = c;
  618. }
  619. prev = c->io_samples;
  620. if (prev && prev->start_time && !prev->end_time) {
  621. pr_warning("Skip invalid start event: "
  622. "previous event already started!\n");
  623. /* remove previous event that has been started,
  624. * we are not sure we will ever get an end for it */
  625. c->io_samples = prev->next;
  626. free(prev);
  627. return 0;
  628. }
  629. sample = zalloc(sizeof(*sample));
  630. if (!sample)
  631. return -ENOMEM;
  632. sample->start_time = start;
  633. sample->type = type;
  634. sample->fd = fd;
  635. sample->next = c->io_samples;
  636. c->io_samples = sample;
  637. if (c->start_time == 0 || c->start_time > start)
  638. c->start_time = start;
  639. return 0;
  640. }
  641. static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
  642. u64 end, long ret)
  643. {
  644. struct per_pid *p = find_create_pid(tchart, pid);
  645. struct per_pidcomm *c = p->current;
  646. struct io_sample *sample, *prev;
  647. if (!c) {
  648. pr_warning("Invalid pidcomm!\n");
  649. return -1;
  650. }
  651. sample = c->io_samples;
  652. if (!sample) /* skip partially captured events */
  653. return 0;
  654. if (sample->end_time) {
  655. pr_warning("Skip invalid end event: "
  656. "previous event already ended!\n");
  657. return 0;
  658. }
  659. if (sample->type != type) {
  660. pr_warning("Skip invalid end event: invalid event type!\n");
  661. return 0;
  662. }
  663. sample->end_time = end;
  664. prev = sample->next;
  665. /* we want to be able to see small and fast transfers, so make them
  666. * at least min_time long, but don't overlap them */
  667. if (sample->end_time - sample->start_time < tchart->min_time)
  668. sample->end_time = sample->start_time + tchart->min_time;
  669. if (prev && sample->start_time < prev->end_time) {
  670. if (prev->err) /* try to make errors more visible */
  671. sample->start_time = prev->end_time;
  672. else
  673. prev->end_time = sample->start_time;
  674. }
  675. if (ret < 0) {
  676. sample->err = ret;
  677. } else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
  678. type == IOTYPE_TX || type == IOTYPE_RX) {
  679. if ((u64)ret > c->max_bytes)
  680. c->max_bytes = ret;
  681. c->total_bytes += ret;
  682. p->total_bytes += ret;
  683. sample->bytes = ret;
  684. }
  685. /* merge two requests to make svg smaller and render-friendly */
  686. if (prev &&
  687. prev->type == sample->type &&
  688. prev->err == sample->err &&
  689. prev->fd == sample->fd &&
  690. prev->end_time + tchart->merge_dist >= sample->start_time) {
  691. sample->bytes += prev->bytes;
  692. sample->merges += prev->merges + 1;
  693. sample->start_time = prev->start_time;
  694. sample->next = prev->next;
  695. free(prev);
  696. if (!sample->err && sample->bytes > c->max_bytes)
  697. c->max_bytes = sample->bytes;
  698. }
  699. tchart->io_events++;
  700. return 0;
  701. }
  702. static int
  703. process_enter_read(struct timechart *tchart,
  704. struct evsel *evsel,
  705. struct perf_sample *sample)
  706. {
  707. long fd = evsel__intval(evsel, sample, "fd");
  708. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
  709. sample->time, fd);
  710. }
  711. static int
  712. process_exit_read(struct timechart *tchart,
  713. struct evsel *evsel,
  714. struct perf_sample *sample)
  715. {
  716. long ret = evsel__intval(evsel, sample, "ret");
  717. return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
  718. sample->time, ret);
  719. }
  720. static int
  721. process_enter_write(struct timechart *tchart,
  722. struct evsel *evsel,
  723. struct perf_sample *sample)
  724. {
  725. long fd = evsel__intval(evsel, sample, "fd");
  726. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
  727. sample->time, fd);
  728. }
  729. static int
  730. process_exit_write(struct timechart *tchart,
  731. struct evsel *evsel,
  732. struct perf_sample *sample)
  733. {
  734. long ret = evsel__intval(evsel, sample, "ret");
  735. return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
  736. sample->time, ret);
  737. }
  738. static int
  739. process_enter_sync(struct timechart *tchart,
  740. struct evsel *evsel,
  741. struct perf_sample *sample)
  742. {
  743. long fd = evsel__intval(evsel, sample, "fd");
  744. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
  745. sample->time, fd);
  746. }
  747. static int
  748. process_exit_sync(struct timechart *tchart,
  749. struct evsel *evsel,
  750. struct perf_sample *sample)
  751. {
  752. long ret = evsel__intval(evsel, sample, "ret");
  753. return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
  754. sample->time, ret);
  755. }
  756. static int
  757. process_enter_tx(struct timechart *tchart,
  758. struct evsel *evsel,
  759. struct perf_sample *sample)
  760. {
  761. long fd = evsel__intval(evsel, sample, "fd");
  762. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
  763. sample->time, fd);
  764. }
  765. static int
  766. process_exit_tx(struct timechart *tchart,
  767. struct evsel *evsel,
  768. struct perf_sample *sample)
  769. {
  770. long ret = evsel__intval(evsel, sample, "ret");
  771. return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
  772. sample->time, ret);
  773. }
  774. static int
  775. process_enter_rx(struct timechart *tchart,
  776. struct evsel *evsel,
  777. struct perf_sample *sample)
  778. {
  779. long fd = evsel__intval(evsel, sample, "fd");
  780. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
  781. sample->time, fd);
  782. }
  783. static int
  784. process_exit_rx(struct timechart *tchart,
  785. struct evsel *evsel,
  786. struct perf_sample *sample)
  787. {
  788. long ret = evsel__intval(evsel, sample, "ret");
  789. return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
  790. sample->time, ret);
  791. }
  792. static int
  793. process_enter_poll(struct timechart *tchart,
  794. struct evsel *evsel,
  795. struct perf_sample *sample)
  796. {
  797. long fd = evsel__intval(evsel, sample, "fd");
  798. return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
  799. sample->time, fd);
  800. }
  801. static int
  802. process_exit_poll(struct timechart *tchart,
  803. struct evsel *evsel,
  804. struct perf_sample *sample)
  805. {
  806. long ret = evsel__intval(evsel, sample, "ret");
  807. return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
  808. sample->time, ret);
  809. }
  810. /*
  811. * Sort the pid datastructure
  812. */
  813. static void sort_pids(struct timechart *tchart)
  814. {
  815. struct per_pid *new_list, *p, *cursor, *prev;
  816. /* sort by ppid first, then by pid, lowest to highest */
  817. new_list = NULL;
  818. while (tchart->all_data) {
  819. p = tchart->all_data;
  820. tchart->all_data = p->next;
  821. p->next = NULL;
  822. if (new_list == NULL) {
  823. new_list = p;
  824. p->next = NULL;
  825. continue;
  826. }
  827. prev = NULL;
  828. cursor = new_list;
  829. while (cursor) {
  830. if (cursor->ppid > p->ppid ||
  831. (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
  832. /* must insert before */
  833. if (prev) {
  834. p->next = prev->next;
  835. prev->next = p;
  836. cursor = NULL;
  837. continue;
  838. } else {
  839. p->next = new_list;
  840. new_list = p;
  841. cursor = NULL;
  842. continue;
  843. }
  844. }
  845. prev = cursor;
  846. cursor = cursor->next;
  847. if (!cursor)
  848. prev->next = p;
  849. }
  850. }
  851. tchart->all_data = new_list;
  852. }
  853. static void draw_c_p_states(struct timechart *tchart)
  854. {
  855. struct power_event *pwr;
  856. pwr = tchart->power_events;
  857. /*
  858. * two pass drawing so that the P state bars are on top of the C state blocks
  859. */
  860. while (pwr) {
  861. if (pwr->type == CSTATE)
  862. svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
  863. pwr = pwr->next;
  864. }
  865. pwr = tchart->power_events;
  866. while (pwr) {
  867. if (pwr->type == PSTATE) {
  868. if (!pwr->state)
  869. pwr->state = tchart->min_freq;
  870. svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
  871. }
  872. pwr = pwr->next;
  873. }
  874. }
  875. static void draw_wakeups(struct timechart *tchart)
  876. {
  877. struct wake_event *we;
  878. struct per_pid *p;
  879. struct per_pidcomm *c;
  880. we = tchart->wake_events;
  881. while (we) {
  882. int from = 0, to = 0;
  883. char *task_from = NULL, *task_to = NULL;
  884. /* locate the column of the waker and wakee */
  885. p = tchart->all_data;
  886. while (p) {
  887. if (p->pid == we->waker || p->pid == we->wakee) {
  888. c = p->all;
  889. while (c) {
  890. if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
  891. if (p->pid == we->waker && !from) {
  892. from = c->Y;
  893. task_from = strdup(c->comm);
  894. }
  895. if (p->pid == we->wakee && !to) {
  896. to = c->Y;
  897. task_to = strdup(c->comm);
  898. }
  899. }
  900. c = c->next;
  901. }
  902. c = p->all;
  903. while (c) {
  904. if (p->pid == we->waker && !from) {
  905. from = c->Y;
  906. task_from = strdup(c->comm);
  907. }
  908. if (p->pid == we->wakee && !to) {
  909. to = c->Y;
  910. task_to = strdup(c->comm);
  911. }
  912. c = c->next;
  913. }
  914. }
  915. p = p->next;
  916. }
  917. if (!task_from) {
  918. task_from = malloc(40);
  919. sprintf(task_from, "[%i]", we->waker);
  920. }
  921. if (!task_to) {
  922. task_to = malloc(40);
  923. sprintf(task_to, "[%i]", we->wakee);
  924. }
  925. if (we->waker == -1)
  926. svg_interrupt(we->time, to, we->backtrace);
  927. else if (from && to && abs(from - to) == 1)
  928. svg_wakeline(we->time, from, to, we->backtrace);
  929. else
  930. svg_partial_wakeline(we->time, from, task_from, to,
  931. task_to, we->backtrace);
  932. we = we->next;
  933. free(task_from);
  934. free(task_to);
  935. }
  936. }
  937. static void draw_cpu_usage(struct timechart *tchart)
  938. {
  939. struct per_pid *p;
  940. struct per_pidcomm *c;
  941. struct cpu_sample *sample;
  942. p = tchart->all_data;
  943. while (p) {
  944. c = p->all;
  945. while (c) {
  946. sample = c->samples;
  947. while (sample) {
  948. if (sample->type == TYPE_RUNNING) {
  949. svg_process(sample->cpu,
  950. sample->start_time,
  951. sample->end_time,
  952. p->pid,
  953. c->comm,
  954. sample->backtrace);
  955. }
  956. sample = sample->next;
  957. }
  958. c = c->next;
  959. }
  960. p = p->next;
  961. }
  962. }
  963. static void draw_io_bars(struct timechart *tchart)
  964. {
  965. const char *suf;
  966. double bytes;
  967. char comm[256];
  968. struct per_pid *p;
  969. struct per_pidcomm *c;
  970. struct io_sample *sample;
  971. int Y = 1;
  972. p = tchart->all_data;
  973. while (p) {
  974. c = p->all;
  975. while (c) {
  976. if (!c->display) {
  977. c->Y = 0;
  978. c = c->next;
  979. continue;
  980. }
  981. svg_box(Y, c->start_time, c->end_time, "process3");
  982. sample = c->io_samples;
  983. for (sample = c->io_samples; sample; sample = sample->next) {
  984. double h = (double)sample->bytes / c->max_bytes;
  985. if (tchart->skip_eagain &&
  986. sample->err == -EAGAIN)
  987. continue;
  988. if (sample->err)
  989. h = 1;
  990. if (sample->type == IOTYPE_SYNC)
  991. svg_fbox(Y,
  992. sample->start_time,
  993. sample->end_time,
  994. 1,
  995. sample->err ? "error" : "sync",
  996. sample->fd,
  997. sample->err,
  998. sample->merges);
  999. else if (sample->type == IOTYPE_POLL)
  1000. svg_fbox(Y,
  1001. sample->start_time,
  1002. sample->end_time,
  1003. 1,
  1004. sample->err ? "error" : "poll",
  1005. sample->fd,
  1006. sample->err,
  1007. sample->merges);
  1008. else if (sample->type == IOTYPE_READ)
  1009. svg_ubox(Y,
  1010. sample->start_time,
  1011. sample->end_time,
  1012. h,
  1013. sample->err ? "error" : "disk",
  1014. sample->fd,
  1015. sample->err,
  1016. sample->merges);
  1017. else if (sample->type == IOTYPE_WRITE)
  1018. svg_lbox(Y,
  1019. sample->start_time,
  1020. sample->end_time,
  1021. h,
  1022. sample->err ? "error" : "disk",
  1023. sample->fd,
  1024. sample->err,
  1025. sample->merges);
  1026. else if (sample->type == IOTYPE_RX)
  1027. svg_ubox(Y,
  1028. sample->start_time,
  1029. sample->end_time,
  1030. h,
  1031. sample->err ? "error" : "net",
  1032. sample->fd,
  1033. sample->err,
  1034. sample->merges);
  1035. else if (sample->type == IOTYPE_TX)
  1036. svg_lbox(Y,
  1037. sample->start_time,
  1038. sample->end_time,
  1039. h,
  1040. sample->err ? "error" : "net",
  1041. sample->fd,
  1042. sample->err,
  1043. sample->merges);
  1044. }
  1045. suf = "";
  1046. bytes = c->total_bytes;
  1047. if (bytes > 1024) {
  1048. bytes = bytes / 1024;
  1049. suf = "K";
  1050. }
  1051. if (bytes > 1024) {
  1052. bytes = bytes / 1024;
  1053. suf = "M";
  1054. }
  1055. if (bytes > 1024) {
  1056. bytes = bytes / 1024;
  1057. suf = "G";
  1058. }
  1059. sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
  1060. svg_text(Y, c->start_time, comm);
  1061. c->Y = Y;
  1062. Y++;
  1063. c = c->next;
  1064. }
  1065. p = p->next;
  1066. }
  1067. }
  1068. static void draw_process_bars(struct timechart *tchart)
  1069. {
  1070. struct per_pid *p;
  1071. struct per_pidcomm *c;
  1072. struct cpu_sample *sample;
  1073. int Y = 0;
  1074. Y = 2 * tchart->numcpus + 2;
  1075. p = tchart->all_data;
  1076. while (p) {
  1077. c = p->all;
  1078. while (c) {
  1079. if (!c->display) {
  1080. c->Y = 0;
  1081. c = c->next;
  1082. continue;
  1083. }
  1084. svg_box(Y, c->start_time, c->end_time, "process");
  1085. sample = c->samples;
  1086. while (sample) {
  1087. if (sample->type == TYPE_RUNNING)
  1088. svg_running(Y, sample->cpu,
  1089. sample->start_time,
  1090. sample->end_time,
  1091. sample->backtrace);
  1092. if (sample->type == TYPE_BLOCKED)
  1093. svg_blocked(Y, sample->cpu,
  1094. sample->start_time,
  1095. sample->end_time,
  1096. sample->backtrace);
  1097. if (sample->type == TYPE_WAITING)
  1098. svg_waiting(Y, sample->cpu,
  1099. sample->start_time,
  1100. sample->end_time,
  1101. sample->backtrace);
  1102. sample = sample->next;
  1103. }
  1104. if (c->comm) {
  1105. char comm[256];
  1106. if (c->total_time > 5000000000) /* 5 seconds */
  1107. sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
  1108. else
  1109. sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
  1110. svg_text(Y, c->start_time, comm);
  1111. }
  1112. c->Y = Y;
  1113. Y++;
  1114. c = c->next;
  1115. }
  1116. p = p->next;
  1117. }
  1118. }
  1119. static void add_process_filter(const char *string)
  1120. {
  1121. int pid = strtoull(string, NULL, 10);
  1122. struct process_filter *filt = malloc(sizeof(*filt));
  1123. if (!filt)
  1124. return;
  1125. filt->name = strdup(string);
  1126. filt->pid = pid;
  1127. filt->next = process_filter;
  1128. process_filter = filt;
  1129. }
  1130. static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
  1131. {
  1132. struct process_filter *filt;
  1133. if (!process_filter)
  1134. return 1;
  1135. filt = process_filter;
  1136. while (filt) {
  1137. if (filt->pid && p->pid == filt->pid)
  1138. return 1;
  1139. if (strcmp(filt->name, c->comm) == 0)
  1140. return 1;
  1141. filt = filt->next;
  1142. }
  1143. return 0;
  1144. }
  1145. static int determine_display_tasks_filtered(struct timechart *tchart)
  1146. {
  1147. struct per_pid *p;
  1148. struct per_pidcomm *c;
  1149. int count = 0;
  1150. p = tchart->all_data;
  1151. while (p) {
  1152. p->display = 0;
  1153. if (p->start_time == 1)
  1154. p->start_time = tchart->first_time;
  1155. /* no exit marker, task kept running to the end */
  1156. if (p->end_time == 0)
  1157. p->end_time = tchart->last_time;
  1158. c = p->all;
  1159. while (c) {
  1160. c->display = 0;
  1161. if (c->start_time == 1)
  1162. c->start_time = tchart->first_time;
  1163. if (passes_filter(p, c)) {
  1164. c->display = 1;
  1165. p->display = 1;
  1166. count++;
  1167. }
  1168. if (c->end_time == 0)
  1169. c->end_time = tchart->last_time;
  1170. c = c->next;
  1171. }
  1172. p = p->next;
  1173. }
  1174. return count;
  1175. }
  1176. static int determine_display_tasks(struct timechart *tchart, u64 threshold)
  1177. {
  1178. struct per_pid *p;
  1179. struct per_pidcomm *c;
  1180. int count = 0;
  1181. p = tchart->all_data;
  1182. while (p) {
  1183. p->display = 0;
  1184. if (p->start_time == 1)
  1185. p->start_time = tchart->first_time;
  1186. /* no exit marker, task kept running to the end */
  1187. if (p->end_time == 0)
  1188. p->end_time = tchart->last_time;
  1189. if (p->total_time >= threshold)
  1190. p->display = 1;
  1191. c = p->all;
  1192. while (c) {
  1193. c->display = 0;
  1194. if (c->start_time == 1)
  1195. c->start_time = tchart->first_time;
  1196. if (c->total_time >= threshold) {
  1197. c->display = 1;
  1198. count++;
  1199. }
  1200. if (c->end_time == 0)
  1201. c->end_time = tchart->last_time;
  1202. c = c->next;
  1203. }
  1204. p = p->next;
  1205. }
  1206. return count;
  1207. }
  1208. static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
  1209. {
  1210. struct per_pid *p;
  1211. struct per_pidcomm *c;
  1212. int count = 0;
  1213. p = timechart->all_data;
  1214. while (p) {
  1215. /* no exit marker, task kept running to the end */
  1216. if (p->end_time == 0)
  1217. p->end_time = timechart->last_time;
  1218. c = p->all;
  1219. while (c) {
  1220. c->display = 0;
  1221. if (c->total_bytes >= threshold) {
  1222. c->display = 1;
  1223. count++;
  1224. }
  1225. if (c->end_time == 0)
  1226. c->end_time = timechart->last_time;
  1227. c = c->next;
  1228. }
  1229. p = p->next;
  1230. }
  1231. return count;
  1232. }
  1233. #define BYTES_THRESH (1 * 1024 * 1024)
  1234. #define TIME_THRESH 10000000
  1235. static void write_svg_file(struct timechart *tchart, const char *filename)
  1236. {
  1237. u64 i;
  1238. int count;
  1239. int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
  1240. if (tchart->power_only)
  1241. tchart->proc_num = 0;
  1242. /* We'd like to show at least proc_num tasks;
  1243. * be less picky if we have fewer */
  1244. do {
  1245. if (process_filter)
  1246. count = determine_display_tasks_filtered(tchart);
  1247. else if (tchart->io_events)
  1248. count = determine_display_io_tasks(tchart, thresh);
  1249. else
  1250. count = determine_display_tasks(tchart, thresh);
  1251. thresh /= 10;
  1252. } while (!process_filter && thresh && count < tchart->proc_num);
  1253. if (!tchart->proc_num)
  1254. count = 0;
  1255. if (tchart->io_events) {
  1256. open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
  1257. svg_time_grid(0.5);
  1258. svg_io_legenda();
  1259. draw_io_bars(tchart);
  1260. } else {
  1261. open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
  1262. svg_time_grid(0);
  1263. svg_legenda();
  1264. for (i = 0; i < tchart->numcpus; i++)
  1265. svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
  1266. draw_cpu_usage(tchart);
  1267. if (tchart->proc_num)
  1268. draw_process_bars(tchart);
  1269. if (!tchart->tasks_only)
  1270. draw_c_p_states(tchart);
  1271. if (tchart->proc_num)
  1272. draw_wakeups(tchart);
  1273. }
  1274. svg_close();
  1275. }
  1276. static int process_header(struct perf_file_section *section __maybe_unused,
  1277. struct perf_header *ph,
  1278. int feat,
  1279. int fd __maybe_unused,
  1280. void *data)
  1281. {
  1282. struct timechart *tchart = data;
  1283. switch (feat) {
  1284. case HEADER_NRCPUS:
  1285. tchart->numcpus = ph->env.nr_cpus_avail;
  1286. break;
  1287. case HEADER_CPU_TOPOLOGY:
  1288. if (!tchart->topology)
  1289. break;
  1290. if (svg_build_topology_map(&ph->env))
  1291. fprintf(stderr, "problem building topology\n");
  1292. break;
  1293. default:
  1294. break;
  1295. }
  1296. return 0;
  1297. }
  1298. static int __cmd_timechart(struct timechart *tchart, const char *output_name)
  1299. {
  1300. const struct evsel_str_handler power_tracepoints[] = {
  1301. { "power:cpu_idle", process_sample_cpu_idle },
  1302. { "power:cpu_frequency", process_sample_cpu_frequency },
  1303. { "sched:sched_wakeup", process_sample_sched_wakeup },
  1304. { "sched:sched_switch", process_sample_sched_switch },
  1305. #ifdef SUPPORT_OLD_POWER_EVENTS
  1306. { "power:power_start", process_sample_power_start },
  1307. { "power:power_end", process_sample_power_end },
  1308. { "power:power_frequency", process_sample_power_frequency },
  1309. #endif
  1310. { "syscalls:sys_enter_read", process_enter_read },
  1311. { "syscalls:sys_enter_pread64", process_enter_read },
  1312. { "syscalls:sys_enter_readv", process_enter_read },
  1313. { "syscalls:sys_enter_preadv", process_enter_read },
  1314. { "syscalls:sys_enter_write", process_enter_write },
  1315. { "syscalls:sys_enter_pwrite64", process_enter_write },
  1316. { "syscalls:sys_enter_writev", process_enter_write },
  1317. { "syscalls:sys_enter_pwritev", process_enter_write },
  1318. { "syscalls:sys_enter_sync", process_enter_sync },
  1319. { "syscalls:sys_enter_sync_file_range", process_enter_sync },
  1320. { "syscalls:sys_enter_fsync", process_enter_sync },
  1321. { "syscalls:sys_enter_msync", process_enter_sync },
  1322. { "syscalls:sys_enter_recvfrom", process_enter_rx },
  1323. { "syscalls:sys_enter_recvmmsg", process_enter_rx },
  1324. { "syscalls:sys_enter_recvmsg", process_enter_rx },
  1325. { "syscalls:sys_enter_sendto", process_enter_tx },
  1326. { "syscalls:sys_enter_sendmsg", process_enter_tx },
  1327. { "syscalls:sys_enter_sendmmsg", process_enter_tx },
  1328. { "syscalls:sys_enter_epoll_pwait", process_enter_poll },
  1329. { "syscalls:sys_enter_epoll_wait", process_enter_poll },
  1330. { "syscalls:sys_enter_poll", process_enter_poll },
  1331. { "syscalls:sys_enter_ppoll", process_enter_poll },
  1332. { "syscalls:sys_enter_pselect6", process_enter_poll },
  1333. { "syscalls:sys_enter_select", process_enter_poll },
  1334. { "syscalls:sys_exit_read", process_exit_read },
  1335. { "syscalls:sys_exit_pread64", process_exit_read },
  1336. { "syscalls:sys_exit_readv", process_exit_read },
  1337. { "syscalls:sys_exit_preadv", process_exit_read },
  1338. { "syscalls:sys_exit_write", process_exit_write },
  1339. { "syscalls:sys_exit_pwrite64", process_exit_write },
  1340. { "syscalls:sys_exit_writev", process_exit_write },
  1341. { "syscalls:sys_exit_pwritev", process_exit_write },
  1342. { "syscalls:sys_exit_sync", process_exit_sync },
  1343. { "syscalls:sys_exit_sync_file_range", process_exit_sync },
  1344. { "syscalls:sys_exit_fsync", process_exit_sync },
  1345. { "syscalls:sys_exit_msync", process_exit_sync },
  1346. { "syscalls:sys_exit_recvfrom", process_exit_rx },
  1347. { "syscalls:sys_exit_recvmmsg", process_exit_rx },
  1348. { "syscalls:sys_exit_recvmsg", process_exit_rx },
  1349. { "syscalls:sys_exit_sendto", process_exit_tx },
  1350. { "syscalls:sys_exit_sendmsg", process_exit_tx },
  1351. { "syscalls:sys_exit_sendmmsg", process_exit_tx },
  1352. { "syscalls:sys_exit_epoll_pwait", process_exit_poll },
  1353. { "syscalls:sys_exit_epoll_wait", process_exit_poll },
  1354. { "syscalls:sys_exit_poll", process_exit_poll },
  1355. { "syscalls:sys_exit_ppoll", process_exit_poll },
  1356. { "syscalls:sys_exit_pselect6", process_exit_poll },
  1357. { "syscalls:sys_exit_select", process_exit_poll },
  1358. };
  1359. struct perf_data data = {
  1360. .path = input_name,
  1361. .mode = PERF_DATA_MODE_READ,
  1362. .force = tchart->force,
  1363. };
  1364. struct perf_session *session = perf_session__new(&data, false,
  1365. &tchart->tool);
  1366. int ret = -EINVAL;
  1367. if (IS_ERR(session))
  1368. return PTR_ERR(session);
  1369. symbol__init(&session->header.env);
  1370. (void)perf_header__process_sections(&session->header,
  1371. perf_data__fd(session->data),
  1372. tchart,
  1373. process_header);
  1374. if (!perf_session__has_traces(session, "timechart record"))
  1375. goto out_delete;
  1376. if (perf_session__set_tracepoints_handlers(session,
  1377. power_tracepoints)) {
  1378. pr_err("Initializing session tracepoint handlers failed\n");
  1379. goto out_delete;
  1380. }
  1381. ret = perf_session__process_events(session);
  1382. if (ret)
  1383. goto out_delete;
  1384. end_sample_processing(tchart);
  1385. sort_pids(tchart);
  1386. write_svg_file(tchart, output_name);
  1387. pr_info("Written %2.1f seconds of trace to %s.\n",
  1388. (tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
  1389. out_delete:
  1390. perf_session__delete(session);
  1391. return ret;
  1392. }
  1393. static int timechart__io_record(int argc, const char **argv)
  1394. {
  1395. unsigned int rec_argc, i;
  1396. const char **rec_argv;
  1397. const char **p;
  1398. char *filter = NULL;
  1399. const char * const common_args[] = {
  1400. "record", "-a", "-R", "-c", "1",
  1401. };
  1402. unsigned int common_args_nr = ARRAY_SIZE(common_args);
  1403. const char * const disk_events[] = {
  1404. "syscalls:sys_enter_read",
  1405. "syscalls:sys_enter_pread64",
  1406. "syscalls:sys_enter_readv",
  1407. "syscalls:sys_enter_preadv",
  1408. "syscalls:sys_enter_write",
  1409. "syscalls:sys_enter_pwrite64",
  1410. "syscalls:sys_enter_writev",
  1411. "syscalls:sys_enter_pwritev",
  1412. "syscalls:sys_enter_sync",
  1413. "syscalls:sys_enter_sync_file_range",
  1414. "syscalls:sys_enter_fsync",
  1415. "syscalls:sys_enter_msync",
  1416. "syscalls:sys_exit_read",
  1417. "syscalls:sys_exit_pread64",
  1418. "syscalls:sys_exit_readv",
  1419. "syscalls:sys_exit_preadv",
  1420. "syscalls:sys_exit_write",
  1421. "syscalls:sys_exit_pwrite64",
  1422. "syscalls:sys_exit_writev",
  1423. "syscalls:sys_exit_pwritev",
  1424. "syscalls:sys_exit_sync",
  1425. "syscalls:sys_exit_sync_file_range",
  1426. "syscalls:sys_exit_fsync",
  1427. "syscalls:sys_exit_msync",
  1428. };
  1429. unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
  1430. const char * const net_events[] = {
  1431. "syscalls:sys_enter_recvfrom",
  1432. "syscalls:sys_enter_recvmmsg",
  1433. "syscalls:sys_enter_recvmsg",
  1434. "syscalls:sys_enter_sendto",
  1435. "syscalls:sys_enter_sendmsg",
  1436. "syscalls:sys_enter_sendmmsg",
  1437. "syscalls:sys_exit_recvfrom",
  1438. "syscalls:sys_exit_recvmmsg",
  1439. "syscalls:sys_exit_recvmsg",
  1440. "syscalls:sys_exit_sendto",
  1441. "syscalls:sys_exit_sendmsg",
  1442. "syscalls:sys_exit_sendmmsg",
  1443. };
  1444. unsigned int net_events_nr = ARRAY_SIZE(net_events);
  1445. const char * const poll_events[] = {
  1446. "syscalls:sys_enter_epoll_pwait",
  1447. "syscalls:sys_enter_epoll_wait",
  1448. "syscalls:sys_enter_poll",
  1449. "syscalls:sys_enter_ppoll",
  1450. "syscalls:sys_enter_pselect6",
  1451. "syscalls:sys_enter_select",
  1452. "syscalls:sys_exit_epoll_pwait",
  1453. "syscalls:sys_exit_epoll_wait",
  1454. "syscalls:sys_exit_poll",
  1455. "syscalls:sys_exit_ppoll",
  1456. "syscalls:sys_exit_pselect6",
  1457. "syscalls:sys_exit_select",
  1458. };
  1459. unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
  1460. rec_argc = common_args_nr +
  1461. disk_events_nr * 4 +
  1462. net_events_nr * 4 +
  1463. poll_events_nr * 4 +
  1464. argc;
  1465. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  1466. if (rec_argv == NULL)
  1467. return -ENOMEM;
  1468. if (asprintf(&filter, "common_pid != %d", getpid()) < 0) {
  1469. free(rec_argv);
  1470. return -ENOMEM;
  1471. }
  1472. p = rec_argv;
  1473. for (i = 0; i < common_args_nr; i++)
  1474. *p++ = strdup(common_args[i]);
  1475. for (i = 0; i < disk_events_nr; i++) {
  1476. if (!is_valid_tracepoint(disk_events[i])) {
  1477. rec_argc -= 4;
  1478. continue;
  1479. }
  1480. *p++ = "-e";
  1481. *p++ = strdup(disk_events[i]);
  1482. *p++ = "--filter";
  1483. *p++ = filter;
  1484. }
  1485. for (i = 0; i < net_events_nr; i++) {
  1486. if (!is_valid_tracepoint(net_events[i])) {
  1487. rec_argc -= 4;
  1488. continue;
  1489. }
  1490. *p++ = "-e";
  1491. *p++ = strdup(net_events[i]);
  1492. *p++ = "--filter";
  1493. *p++ = filter;
  1494. }
  1495. for (i = 0; i < poll_events_nr; i++) {
  1496. if (!is_valid_tracepoint(poll_events[i])) {
  1497. rec_argc -= 4;
  1498. continue;
  1499. }
  1500. *p++ = "-e";
  1501. *p++ = strdup(poll_events[i]);
  1502. *p++ = "--filter";
  1503. *p++ = filter;
  1504. }
  1505. for (i = 0; i < (unsigned int)argc; i++)
  1506. *p++ = argv[i];
  1507. return cmd_record(rec_argc, rec_argv);
  1508. }
  1509. static int timechart__record(struct timechart *tchart, int argc, const char **argv)
  1510. {
  1511. unsigned int rec_argc, i, j;
  1512. const char **rec_argv;
  1513. const char **p;
  1514. unsigned int record_elems;
  1515. const char * const common_args[] = {
  1516. "record", "-a", "-R", "-c", "1",
  1517. };
  1518. unsigned int common_args_nr = ARRAY_SIZE(common_args);
  1519. const char * const backtrace_args[] = {
  1520. "-g",
  1521. };
  1522. unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
  1523. const char * const power_args[] = {
  1524. "-e", "power:cpu_frequency",
  1525. "-e", "power:cpu_idle",
  1526. };
  1527. unsigned int power_args_nr = ARRAY_SIZE(power_args);
  1528. const char * const old_power_args[] = {
  1529. #ifdef SUPPORT_OLD_POWER_EVENTS
  1530. "-e", "power:power_start",
  1531. "-e", "power:power_end",
  1532. "-e", "power:power_frequency",
  1533. #endif
  1534. };
  1535. unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
  1536. const char * const tasks_args[] = {
  1537. "-e", "sched:sched_wakeup",
  1538. "-e", "sched:sched_switch",
  1539. };
  1540. unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
  1541. #ifdef SUPPORT_OLD_POWER_EVENTS
  1542. if (!is_valid_tracepoint("power:cpu_idle") &&
  1543. is_valid_tracepoint("power:power_start")) {
  1544. use_old_power_events = 1;
  1545. power_args_nr = 0;
  1546. } else {
  1547. old_power_args_nr = 0;
  1548. }
  1549. #endif
  1550. if (tchart->power_only)
  1551. tasks_args_nr = 0;
  1552. if (tchart->tasks_only) {
  1553. power_args_nr = 0;
  1554. old_power_args_nr = 0;
  1555. }
  1556. if (!tchart->with_backtrace)
  1557. backtrace_args_no = 0;
  1558. record_elems = common_args_nr + tasks_args_nr +
  1559. power_args_nr + old_power_args_nr + backtrace_args_no;
  1560. rec_argc = record_elems + argc;
  1561. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  1562. if (rec_argv == NULL)
  1563. return -ENOMEM;
  1564. p = rec_argv;
  1565. for (i = 0; i < common_args_nr; i++)
  1566. *p++ = strdup(common_args[i]);
  1567. for (i = 0; i < backtrace_args_no; i++)
  1568. *p++ = strdup(backtrace_args[i]);
  1569. for (i = 0; i < tasks_args_nr; i++)
  1570. *p++ = strdup(tasks_args[i]);
  1571. for (i = 0; i < power_args_nr; i++)
  1572. *p++ = strdup(power_args[i]);
  1573. for (i = 0; i < old_power_args_nr; i++)
  1574. *p++ = strdup(old_power_args[i]);
  1575. for (j = 0; j < (unsigned int)argc; j++)
  1576. *p++ = argv[j];
  1577. return cmd_record(rec_argc, rec_argv);
  1578. }
  1579. static int
  1580. parse_process(const struct option *opt __maybe_unused, const char *arg,
  1581. int __maybe_unused unset)
  1582. {
  1583. if (arg)
  1584. add_process_filter(arg);
  1585. return 0;
  1586. }
  1587. static int
  1588. parse_highlight(const struct option *opt __maybe_unused, const char *arg,
  1589. int __maybe_unused unset)
  1590. {
  1591. unsigned long duration = strtoul(arg, NULL, 0);
  1592. if (svg_highlight || svg_highlight_name)
  1593. return -1;
  1594. if (duration)
  1595. svg_highlight = duration;
  1596. else
  1597. svg_highlight_name = strdup(arg);
  1598. return 0;
  1599. }
  1600. static int
  1601. parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
  1602. {
  1603. char unit = 'n';
  1604. u64 *value = opt->value;
  1605. if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
  1606. switch (unit) {
  1607. case 'm':
  1608. *value *= NSEC_PER_MSEC;
  1609. break;
  1610. case 'u':
  1611. *value *= NSEC_PER_USEC;
  1612. break;
  1613. case 'n':
  1614. break;
  1615. default:
  1616. return -1;
  1617. }
  1618. }
  1619. return 0;
  1620. }
  1621. int cmd_timechart(int argc, const char **argv)
  1622. {
  1623. struct timechart tchart = {
  1624. .tool = {
  1625. .comm = process_comm_event,
  1626. .fork = process_fork_event,
  1627. .exit = process_exit_event,
  1628. .sample = process_sample_event,
  1629. .ordered_events = true,
  1630. },
  1631. .proc_num = 15,
  1632. .min_time = NSEC_PER_MSEC,
  1633. .merge_dist = 1000,
  1634. };
  1635. const char *output_name = "output.svg";
  1636. const struct option timechart_common_options[] = {
  1637. OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
  1638. OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, "output processes data only"),
  1639. OPT_END()
  1640. };
  1641. const struct option timechart_options[] = {
  1642. OPT_STRING('i', "input", &input_name, "file", "input file name"),
  1643. OPT_STRING('o', "output", &output_name, "file", "output file name"),
  1644. OPT_INTEGER('w', "width", &svg_page_width, "page width"),
  1645. OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
  1646. "highlight tasks. Pass duration in ns or process name.",
  1647. parse_highlight),
  1648. OPT_CALLBACK('p', "process", NULL, "process",
  1649. "process selector. Pass a pid or process name.",
  1650. parse_process),
  1651. OPT_CALLBACK(0, "symfs", NULL, "directory",
  1652. "Look for files with symbols relative to this directory",
  1653. symbol__config_symfs),
  1654. OPT_INTEGER('n', "proc-num", &tchart.proc_num,
  1655. "min. number of tasks to print"),
  1656. OPT_BOOLEAN('t', "topology", &tchart.topology,
  1657. "sort CPUs according to topology"),
  1658. OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
  1659. "skip EAGAIN errors"),
  1660. OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
  1661. "all IO faster than min-time will visually appear longer",
  1662. parse_time),
  1663. OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
  1664. "merge events that are merge-dist us apart",
  1665. parse_time),
  1666. OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
  1667. OPT_PARENT(timechart_common_options),
  1668. };
  1669. const char * const timechart_subcommands[] = { "record", NULL };
  1670. const char *timechart_usage[] = {
  1671. "perf timechart [<options>] {record}",
  1672. NULL
  1673. };
  1674. const struct option timechart_record_options[] = {
  1675. OPT_BOOLEAN('I', "io-only", &tchart.io_only,
  1676. "record only IO data"),
  1677. OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
  1678. OPT_PARENT(timechart_common_options),
  1679. };
  1680. const char * const timechart_record_usage[] = {
  1681. "perf timechart record [<options>]",
  1682. NULL
  1683. };
  1684. argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
  1685. timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
  1686. if (tchart.power_only && tchart.tasks_only) {
  1687. pr_err("-P and -T options cannot be used at the same time.\n");
  1688. return -1;
  1689. }
  1690. if (argc && !strncmp(argv[0], "rec", 3)) {
  1691. argc = parse_options(argc, argv, timechart_record_options,
  1692. timechart_record_usage,
  1693. PARSE_OPT_STOP_AT_NON_OPTION);
  1694. if (tchart.power_only && tchart.tasks_only) {
  1695. pr_err("-P and -T options cannot be used at the same time.\n");
  1696. return -1;
  1697. }
  1698. if (tchart.io_only)
  1699. return timechart__io_record(argc, argv);
  1700. else
  1701. return timechart__record(&tchart, argc, argv);
  1702. } else if (argc)
  1703. usage_with_options(timechart_usage, timechart_options);
  1704. setup_pager();
  1705. return __cmd_timechart(&tchart, output_name);
  1706. }