hist.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <inttypes.h>
  3. #include <math.h>
  4. #include <stdlib.h>
  5. #include <string.h>
  6. #include <linux/compiler.h>
  7. #include "../util/callchain.h"
  8. #include "../util/debug.h"
  9. #include "../util/hist.h"
  10. #include "../util/sort.h"
  11. #include "../util/evsel.h"
  12. #include "../util/evlist.h"
  13. #include "../perf.h"
  14. /* hist period print (hpp) functions */
  15. #define hpp__call_print_fn(hpp, fn, fmt, ...) \
  16. ({ \
  17. int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
  18. advance_hpp(hpp, __ret); \
  19. __ret; \
  20. })
  21. static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
  22. hpp_field_fn get_field, const char *fmt, int len,
  23. hpp_snprint_fn print_fn, bool fmt_percent)
  24. {
  25. int ret;
  26. struct hists *hists = he->hists;
  27. struct evsel *evsel = hists_to_evsel(hists);
  28. char *buf = hpp->buf;
  29. size_t size = hpp->size;
  30. if (fmt_percent) {
  31. double percent = 0.0;
  32. u64 total = hists__total_period(hists);
  33. if (total)
  34. percent = 100.0 * get_field(he) / total;
  35. ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
  36. } else
  37. ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
  38. if (evsel__is_group_event(evsel)) {
  39. int prev_idx, idx_delta;
  40. struct hist_entry *pair;
  41. int nr_members = evsel->core.nr_members;
  42. prev_idx = evsel__group_idx(evsel);
  43. list_for_each_entry(pair, &he->pairs.head, pairs.node) {
  44. u64 period = get_field(pair);
  45. u64 total = hists__total_period(pair->hists);
  46. if (!total)
  47. continue;
  48. evsel = hists_to_evsel(pair->hists);
  49. idx_delta = evsel__group_idx(evsel) - prev_idx - 1;
  50. while (idx_delta--) {
  51. /*
  52. * zero-fill group members in the middle which
  53. * have no sample
  54. */
  55. if (fmt_percent) {
  56. ret += hpp__call_print_fn(hpp, print_fn,
  57. fmt, len, 0.0);
  58. } else {
  59. ret += hpp__call_print_fn(hpp, print_fn,
  60. fmt, len, 0ULL);
  61. }
  62. }
  63. if (fmt_percent) {
  64. ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
  65. 100.0 * period / total);
  66. } else {
  67. ret += hpp__call_print_fn(hpp, print_fn, fmt,
  68. len, period);
  69. }
  70. prev_idx = evsel__group_idx(evsel);
  71. }
  72. idx_delta = nr_members - prev_idx - 1;
  73. while (idx_delta--) {
  74. /*
  75. * zero-fill group members at last which have no sample
  76. */
  77. if (fmt_percent) {
  78. ret += hpp__call_print_fn(hpp, print_fn,
  79. fmt, len, 0.0);
  80. } else {
  81. ret += hpp__call_print_fn(hpp, print_fn,
  82. fmt, len, 0ULL);
  83. }
  84. }
  85. }
  86. /*
  87. * Restore original buf and size as it's where caller expects
  88. * the result will be saved.
  89. */
  90. hpp->buf = buf;
  91. hpp->size = size;
  92. return ret;
  93. }
  94. int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  95. struct hist_entry *he, hpp_field_fn get_field,
  96. const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
  97. {
  98. int len = fmt->user_len ?: fmt->len;
  99. if (symbol_conf.field_sep) {
  100. return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
  101. print_fn, fmt_percent);
  102. }
  103. if (fmt_percent)
  104. len -= 2; /* 2 for a space and a % sign */
  105. else
  106. len -= 1;
  107. return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
  108. }
  109. int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  110. struct hist_entry *he, hpp_field_fn get_field,
  111. const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
  112. {
  113. if (!symbol_conf.cumulate_callchain) {
  114. int len = fmt->user_len ?: fmt->len;
  115. return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
  116. }
  117. return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
  118. }
  119. static int field_cmp(u64 field_a, u64 field_b)
  120. {
  121. if (field_a > field_b)
  122. return 1;
  123. if (field_a < field_b)
  124. return -1;
  125. return 0;
  126. }
  127. static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
  128. hpp_field_fn get_field, int nr_members,
  129. u64 **fields_a, u64 **fields_b)
  130. {
  131. u64 *fa = calloc(nr_members, sizeof(*fa)),
  132. *fb = calloc(nr_members, sizeof(*fb));
  133. struct hist_entry *pair;
  134. if (!fa || !fb)
  135. goto out_free;
  136. list_for_each_entry(pair, &a->pairs.head, pairs.node) {
  137. struct evsel *evsel = hists_to_evsel(pair->hists);
  138. fa[evsel__group_idx(evsel)] = get_field(pair);
  139. }
  140. list_for_each_entry(pair, &b->pairs.head, pairs.node) {
  141. struct evsel *evsel = hists_to_evsel(pair->hists);
  142. fb[evsel__group_idx(evsel)] = get_field(pair);
  143. }
  144. *fields_a = fa;
  145. *fields_b = fb;
  146. return 0;
  147. out_free:
  148. free(fa);
  149. free(fb);
  150. *fields_a = *fields_b = NULL;
  151. return -1;
  152. }
  153. static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
  154. hpp_field_fn get_field, int idx)
  155. {
  156. struct evsel *evsel = hists_to_evsel(a->hists);
  157. u64 *fields_a, *fields_b;
  158. int cmp, nr_members, ret, i;
  159. cmp = field_cmp(get_field(a), get_field(b));
  160. if (!evsel__is_group_event(evsel))
  161. return cmp;
  162. nr_members = evsel->core.nr_members;
  163. if (idx < 1 || idx >= nr_members)
  164. return cmp;
  165. ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
  166. if (ret) {
  167. ret = cmp;
  168. goto out;
  169. }
  170. ret = field_cmp(fields_a[idx], fields_b[idx]);
  171. if (ret)
  172. goto out;
  173. for (i = 1; i < nr_members; i++) {
  174. if (i != idx) {
  175. ret = field_cmp(fields_a[i], fields_b[i]);
  176. if (ret)
  177. goto out;
  178. }
  179. }
  180. out:
  181. free(fields_a);
  182. free(fields_b);
  183. return ret;
  184. }
  185. static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
  186. hpp_field_fn get_field)
  187. {
  188. s64 ret;
  189. int i, nr_members;
  190. struct evsel *evsel;
  191. u64 *fields_a, *fields_b;
  192. if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
  193. return __hpp__group_sort_idx(a, b, get_field,
  194. symbol_conf.group_sort_idx);
  195. }
  196. ret = field_cmp(get_field(a), get_field(b));
  197. if (ret || !symbol_conf.event_group)
  198. return ret;
  199. evsel = hists_to_evsel(a->hists);
  200. if (!evsel__is_group_event(evsel))
  201. return ret;
  202. nr_members = evsel->core.nr_members;
  203. i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
  204. if (i)
  205. goto out;
  206. for (i = 1; i < nr_members; i++) {
  207. ret = field_cmp(fields_a[i], fields_b[i]);
  208. if (ret)
  209. break;
  210. }
  211. out:
  212. free(fields_a);
  213. free(fields_b);
  214. return ret;
  215. }
  216. static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
  217. hpp_field_fn get_field)
  218. {
  219. s64 ret = 0;
  220. if (symbol_conf.cumulate_callchain) {
  221. /*
  222. * Put caller above callee when they have equal period.
  223. */
  224. ret = field_cmp(get_field(a), get_field(b));
  225. if (ret)
  226. return ret;
  227. if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
  228. return 0;
  229. ret = b->callchain->max_depth - a->callchain->max_depth;
  230. if (callchain_param.order == ORDER_CALLER)
  231. ret = -ret;
  232. }
  233. return ret;
  234. }
  235. static int hpp__width_fn(struct perf_hpp_fmt *fmt,
  236. struct perf_hpp *hpp __maybe_unused,
  237. struct hists *hists)
  238. {
  239. int len = fmt->user_len ?: fmt->len;
  240. struct evsel *evsel = hists_to_evsel(hists);
  241. if (symbol_conf.event_group)
  242. len = max(len, evsel->core.nr_members * fmt->len);
  243. if (len < (int)strlen(fmt->name))
  244. len = strlen(fmt->name);
  245. return len;
  246. }
  247. static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  248. struct hists *hists, int line __maybe_unused,
  249. int *span __maybe_unused)
  250. {
  251. int len = hpp__width_fn(fmt, hpp, hists);
  252. return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
  253. }
  254. int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  255. {
  256. va_list args;
  257. ssize_t ssize = hpp->size;
  258. double percent;
  259. int ret, len;
  260. va_start(args, fmt);
  261. len = va_arg(args, int);
  262. percent = va_arg(args, double);
  263. ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
  264. va_end(args);
  265. return (ret >= ssize) ? (ssize - 1) : ret;
  266. }
  267. static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  268. {
  269. va_list args;
  270. ssize_t ssize = hpp->size;
  271. int ret;
  272. va_start(args, fmt);
  273. ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
  274. va_end(args);
  275. return (ret >= ssize) ? (ssize - 1) : ret;
  276. }
  277. #define __HPP_COLOR_PERCENT_FN(_type, _field) \
  278. static u64 he_get_##_field(struct hist_entry *he) \
  279. { \
  280. return he->stat._field; \
  281. } \
  282. \
  283. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  284. struct perf_hpp *hpp, struct hist_entry *he) \
  285. { \
  286. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  287. hpp_color_scnprintf, true); \
  288. }
  289. #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
  290. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  291. struct perf_hpp *hpp, struct hist_entry *he) \
  292. { \
  293. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  294. hpp_entry_scnprintf, true); \
  295. }
  296. #define __HPP_SORT_FN(_type, _field) \
  297. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  298. struct hist_entry *a, struct hist_entry *b) \
  299. { \
  300. return __hpp__sort(a, b, he_get_##_field); \
  301. }
  302. #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  303. static u64 he_get_acc_##_field(struct hist_entry *he) \
  304. { \
  305. return he->stat_acc->_field; \
  306. } \
  307. \
  308. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  309. struct perf_hpp *hpp, struct hist_entry *he) \
  310. { \
  311. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  312. hpp_color_scnprintf, true); \
  313. }
  314. #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  315. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  316. struct perf_hpp *hpp, struct hist_entry *he) \
  317. { \
  318. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  319. hpp_entry_scnprintf, true); \
  320. }
  321. #define __HPP_SORT_ACC_FN(_type, _field) \
  322. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  323. struct hist_entry *a, struct hist_entry *b) \
  324. { \
  325. return __hpp__sort_acc(a, b, he_get_acc_##_field); \
  326. }
  327. #define __HPP_ENTRY_RAW_FN(_type, _field) \
  328. static u64 he_get_raw_##_field(struct hist_entry *he) \
  329. { \
  330. return he->stat._field; \
  331. } \
  332. \
  333. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  334. struct perf_hpp *hpp, struct hist_entry *he) \
  335. { \
  336. return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
  337. hpp_entry_scnprintf, false); \
  338. }
  339. #define __HPP_SORT_RAW_FN(_type, _field) \
  340. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  341. struct hist_entry *a, struct hist_entry *b) \
  342. { \
  343. return __hpp__sort(a, b, he_get_raw_##_field); \
  344. }
  345. #define HPP_PERCENT_FNS(_type, _field) \
  346. __HPP_COLOR_PERCENT_FN(_type, _field) \
  347. __HPP_ENTRY_PERCENT_FN(_type, _field) \
  348. __HPP_SORT_FN(_type, _field)
  349. #define HPP_PERCENT_ACC_FNS(_type, _field) \
  350. __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  351. __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  352. __HPP_SORT_ACC_FN(_type, _field)
  353. #define HPP_RAW_FNS(_type, _field) \
  354. __HPP_ENTRY_RAW_FN(_type, _field) \
  355. __HPP_SORT_RAW_FN(_type, _field)
  356. HPP_PERCENT_FNS(overhead, period)
  357. HPP_PERCENT_FNS(overhead_sys, period_sys)
  358. HPP_PERCENT_FNS(overhead_us, period_us)
  359. HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
  360. HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
  361. HPP_PERCENT_ACC_FNS(overhead_acc, period)
  362. HPP_RAW_FNS(samples, nr_events)
  363. HPP_RAW_FNS(period, period)
  364. static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
  365. struct hist_entry *a __maybe_unused,
  366. struct hist_entry *b __maybe_unused)
  367. {
  368. return 0;
  369. }
  370. static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
  371. {
  372. return a->header == hpp__header_fn;
  373. }
  374. static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  375. {
  376. if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
  377. return false;
  378. return a->idx == b->idx;
  379. }
  380. #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
  381. { \
  382. .name = _name, \
  383. .header = hpp__header_fn, \
  384. .width = hpp__width_fn, \
  385. .color = hpp__color_ ## _fn, \
  386. .entry = hpp__entry_ ## _fn, \
  387. .cmp = hpp__nop_cmp, \
  388. .collapse = hpp__nop_cmp, \
  389. .sort = hpp__sort_ ## _fn, \
  390. .idx = PERF_HPP__ ## _idx, \
  391. .equal = hpp__equal, \
  392. }
  393. #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
  394. { \
  395. .name = _name, \
  396. .header = hpp__header_fn, \
  397. .width = hpp__width_fn, \
  398. .color = hpp__color_ ## _fn, \
  399. .entry = hpp__entry_ ## _fn, \
  400. .cmp = hpp__nop_cmp, \
  401. .collapse = hpp__nop_cmp, \
  402. .sort = hpp__sort_ ## _fn, \
  403. .idx = PERF_HPP__ ## _idx, \
  404. .equal = hpp__equal, \
  405. }
  406. #define HPP__PRINT_FNS(_name, _fn, _idx) \
  407. { \
  408. .name = _name, \
  409. .header = hpp__header_fn, \
  410. .width = hpp__width_fn, \
  411. .entry = hpp__entry_ ## _fn, \
  412. .cmp = hpp__nop_cmp, \
  413. .collapse = hpp__nop_cmp, \
  414. .sort = hpp__sort_ ## _fn, \
  415. .idx = PERF_HPP__ ## _idx, \
  416. .equal = hpp__equal, \
  417. }
  418. struct perf_hpp_fmt perf_hpp__format[] = {
  419. HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
  420. HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
  421. HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
  422. HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
  423. HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
  424. HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
  425. HPP__PRINT_FNS("Samples", samples, SAMPLES),
  426. HPP__PRINT_FNS("Period", period, PERIOD)
  427. };
  428. struct perf_hpp_list perf_hpp_list = {
  429. .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
  430. .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
  431. .nr_header_lines = 1,
  432. };
  433. #undef HPP__COLOR_PRINT_FNS
  434. #undef HPP__COLOR_ACC_PRINT_FNS
  435. #undef HPP__PRINT_FNS
  436. #undef HPP_PERCENT_FNS
  437. #undef HPP_PERCENT_ACC_FNS
  438. #undef HPP_RAW_FNS
  439. #undef __HPP_HEADER_FN
  440. #undef __HPP_WIDTH_FN
  441. #undef __HPP_COLOR_PERCENT_FN
  442. #undef __HPP_ENTRY_PERCENT_FN
  443. #undef __HPP_COLOR_ACC_PERCENT_FN
  444. #undef __HPP_ENTRY_ACC_PERCENT_FN
  445. #undef __HPP_ENTRY_RAW_FN
  446. #undef __HPP_SORT_FN
  447. #undef __HPP_SORT_ACC_FN
  448. #undef __HPP_SORT_RAW_FN
  449. static void fmt_free(struct perf_hpp_fmt *fmt)
  450. {
  451. /*
  452. * At this point fmt should be completely
  453. * unhooked, if not it's a bug.
  454. */
  455. BUG_ON(!list_empty(&fmt->list));
  456. BUG_ON(!list_empty(&fmt->sort_list));
  457. if (fmt->free)
  458. fmt->free(fmt);
  459. }
  460. void perf_hpp__init(void)
  461. {
  462. int i;
  463. for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
  464. struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
  465. INIT_LIST_HEAD(&fmt->list);
  466. /* sort_list may be linked by setup_sorting() */
  467. if (fmt->sort_list.next == NULL)
  468. INIT_LIST_HEAD(&fmt->sort_list);
  469. }
  470. /*
  471. * If user specified field order, no need to setup default fields.
  472. */
  473. if (is_strict_order(field_order))
  474. return;
  475. if (symbol_conf.cumulate_callchain) {
  476. hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
  477. perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
  478. }
  479. hpp_dimension__add_output(PERF_HPP__OVERHEAD);
  480. if (symbol_conf.show_cpu_utilization) {
  481. hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
  482. hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
  483. if (perf_guest) {
  484. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
  485. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
  486. }
  487. }
  488. if (symbol_conf.show_nr_samples)
  489. hpp_dimension__add_output(PERF_HPP__SAMPLES);
  490. if (symbol_conf.show_total_period)
  491. hpp_dimension__add_output(PERF_HPP__PERIOD);
  492. }
  493. void perf_hpp_list__column_register(struct perf_hpp_list *list,
  494. struct perf_hpp_fmt *format)
  495. {
  496. list_add_tail(&format->list, &list->fields);
  497. }
  498. void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
  499. struct perf_hpp_fmt *format)
  500. {
  501. list_add_tail(&format->sort_list, &list->sorts);
  502. }
  503. void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
  504. struct perf_hpp_fmt *format)
  505. {
  506. list_add(&format->sort_list, &list->sorts);
  507. }
  508. static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
  509. {
  510. list_del_init(&format->list);
  511. fmt_free(format);
  512. }
  513. void perf_hpp__cancel_cumulate(void)
  514. {
  515. struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
  516. if (is_strict_order(field_order))
  517. return;
  518. ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
  519. acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
  520. perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
  521. if (acc->equal(acc, fmt)) {
  522. perf_hpp__column_unregister(fmt);
  523. continue;
  524. }
  525. if (ovh->equal(ovh, fmt))
  526. fmt->name = "Overhead";
  527. }
  528. }
  529. static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  530. {
  531. return a->equal && a->equal(a, b);
  532. }
  533. void perf_hpp__setup_output_field(struct perf_hpp_list *list)
  534. {
  535. struct perf_hpp_fmt *fmt;
  536. /* append sort keys to output field */
  537. perf_hpp_list__for_each_sort_list(list, fmt) {
  538. struct perf_hpp_fmt *pos;
  539. /* skip sort-only fields ("sort_compute" in perf diff) */
  540. if (!fmt->entry && !fmt->color)
  541. continue;
  542. perf_hpp_list__for_each_format(list, pos) {
  543. if (fmt_equal(fmt, pos))
  544. goto next;
  545. }
  546. perf_hpp__column_register(fmt);
  547. next:
  548. continue;
  549. }
  550. }
  551. void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
  552. {
  553. struct perf_hpp_fmt *fmt;
  554. /* append output fields to sort keys */
  555. perf_hpp_list__for_each_format(list, fmt) {
  556. struct perf_hpp_fmt *pos;
  557. perf_hpp_list__for_each_sort_list(list, pos) {
  558. if (fmt_equal(fmt, pos))
  559. goto next;
  560. }
  561. perf_hpp__register_sort_field(fmt);
  562. next:
  563. continue;
  564. }
  565. }
  566. void perf_hpp__reset_output_field(struct perf_hpp_list *list)
  567. {
  568. struct perf_hpp_fmt *fmt, *tmp;
  569. /* reset output fields */
  570. perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
  571. list_del_init(&fmt->list);
  572. list_del_init(&fmt->sort_list);
  573. fmt_free(fmt);
  574. }
  575. /* reset sort keys */
  576. perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
  577. list_del_init(&fmt->list);
  578. list_del_init(&fmt->sort_list);
  579. fmt_free(fmt);
  580. }
  581. }
  582. /*
  583. * See hists__fprintf to match the column widths
  584. */
  585. unsigned int hists__sort_list_width(struct hists *hists)
  586. {
  587. struct perf_hpp_fmt *fmt;
  588. int ret = 0;
  589. bool first = true;
  590. struct perf_hpp dummy_hpp;
  591. hists__for_each_format(hists, fmt) {
  592. if (perf_hpp__should_skip(fmt, hists))
  593. continue;
  594. if (first)
  595. first = false;
  596. else
  597. ret += 2;
  598. ret += fmt->width(fmt, &dummy_hpp, hists);
  599. }
  600. if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
  601. ret += 3 + BITS_PER_LONG / 4;
  602. return ret;
  603. }
  604. unsigned int hists__overhead_width(struct hists *hists)
  605. {
  606. struct perf_hpp_fmt *fmt;
  607. int ret = 0;
  608. bool first = true;
  609. struct perf_hpp dummy_hpp;
  610. hists__for_each_format(hists, fmt) {
  611. if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
  612. break;
  613. if (first)
  614. first = false;
  615. else
  616. ret += 2;
  617. ret += fmt->width(fmt, &dummy_hpp, hists);
  618. }
  619. return ret;
  620. }
  621. void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
  622. {
  623. if (perf_hpp__is_sort_entry(fmt))
  624. return perf_hpp__reset_sort_width(fmt, hists);
  625. if (perf_hpp__is_dynamic_entry(fmt))
  626. return;
  627. BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
  628. switch (fmt->idx) {
  629. case PERF_HPP__OVERHEAD:
  630. case PERF_HPP__OVERHEAD_SYS:
  631. case PERF_HPP__OVERHEAD_US:
  632. case PERF_HPP__OVERHEAD_ACC:
  633. fmt->len = 8;
  634. break;
  635. case PERF_HPP__OVERHEAD_GUEST_SYS:
  636. case PERF_HPP__OVERHEAD_GUEST_US:
  637. fmt->len = 9;
  638. break;
  639. case PERF_HPP__SAMPLES:
  640. case PERF_HPP__PERIOD:
  641. fmt->len = 12;
  642. break;
  643. default:
  644. break;
  645. }
  646. }
  647. void hists__reset_column_width(struct hists *hists)
  648. {
  649. struct perf_hpp_fmt *fmt;
  650. struct perf_hpp_list_node *node;
  651. hists__for_each_format(hists, fmt)
  652. perf_hpp__reset_width(fmt, hists);
  653. /* hierarchy entries have their own hpp list */
  654. list_for_each_entry(node, &hists->hpp_formats, list) {
  655. perf_hpp_list__for_each_format(&node->hpp, fmt)
  656. perf_hpp__reset_width(fmt, hists);
  657. }
  658. }
  659. void perf_hpp__set_user_width(const char *width_list_str)
  660. {
  661. struct perf_hpp_fmt *fmt;
  662. const char *ptr = width_list_str;
  663. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  664. char *p;
  665. int len = strtol(ptr, &p, 10);
  666. fmt->user_len = len;
  667. if (*p == ',')
  668. ptr = p + 1;
  669. else
  670. break;
  671. }
  672. }
  673. static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
  674. {
  675. struct perf_hpp_list_node *node = NULL;
  676. struct perf_hpp_fmt *fmt_copy;
  677. bool found = false;
  678. bool skip = perf_hpp__should_skip(fmt, hists);
  679. list_for_each_entry(node, &hists->hpp_formats, list) {
  680. if (node->level == fmt->level) {
  681. found = true;
  682. break;
  683. }
  684. }
  685. if (!found) {
  686. node = malloc(sizeof(*node));
  687. if (node == NULL)
  688. return -1;
  689. node->skip = skip;
  690. node->level = fmt->level;
  691. perf_hpp_list__init(&node->hpp);
  692. hists->nr_hpp_node++;
  693. list_add_tail(&node->list, &hists->hpp_formats);
  694. }
  695. fmt_copy = perf_hpp_fmt__dup(fmt);
  696. if (fmt_copy == NULL)
  697. return -1;
  698. if (!skip)
  699. node->skip = false;
  700. list_add_tail(&fmt_copy->list, &node->hpp.fields);
  701. list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
  702. return 0;
  703. }
  704. int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
  705. struct evlist *evlist)
  706. {
  707. struct evsel *evsel;
  708. struct perf_hpp_fmt *fmt;
  709. struct hists *hists;
  710. int ret;
  711. if (!symbol_conf.report_hierarchy)
  712. return 0;
  713. evlist__for_each_entry(evlist, evsel) {
  714. hists = evsel__hists(evsel);
  715. perf_hpp_list__for_each_sort_list(list, fmt) {
  716. if (perf_hpp__is_dynamic_entry(fmt) &&
  717. !perf_hpp__defined_dynamic_entry(fmt, hists))
  718. continue;
  719. ret = add_hierarchy_fmt(hists, fmt);
  720. if (ret < 0)
  721. return ret;
  722. }
  723. }
  724. return 0;
  725. }