stat-shadow.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <stdio.h>
  3. #include "evsel.h"
  4. #include "stat.h"
  5. #include "color.h"
  6. #include "pmu.h"
  7. #include "rblist.h"
  8. #include "evlist.h"
  9. #include "expr.h"
  10. #include "metricgroup.h"
  11. #include <linux/zalloc.h>
  12. /*
  13. * AGGR_GLOBAL: Use CPU 0
  14. * AGGR_SOCKET: Use first CPU of socket
  15. * AGGR_DIE: Use first CPU of die
  16. * AGGR_CORE: Use first CPU of core
  17. * AGGR_NONE: Use matching CPU
  18. * AGGR_THREAD: Not supported?
  19. */
  20. struct runtime_stat rt_stat;
  21. struct stats walltime_nsecs_stats;
  22. struct saved_value {
  23. struct rb_node rb_node;
  24. struct evsel *evsel;
  25. enum stat_type type;
  26. int ctx;
  27. int cpu;
  28. struct runtime_stat *stat;
  29. struct stats stats;
  30. u64 metric_total;
  31. int metric_other;
  32. };
  33. static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
  34. {
  35. struct saved_value *a = container_of(rb_node,
  36. struct saved_value,
  37. rb_node);
  38. const struct saved_value *b = entry;
  39. if (a->cpu != b->cpu)
  40. return a->cpu - b->cpu;
  41. /*
  42. * Previously the rbtree was used to link generic metrics.
  43. * The keys were evsel/cpu. Now the rbtree is extended to support
  44. * per-thread shadow stats. For shadow stats case, the keys
  45. * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
  46. * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
  47. */
  48. if (a->type != b->type)
  49. return a->type - b->type;
  50. if (a->ctx != b->ctx)
  51. return a->ctx - b->ctx;
  52. if (a->evsel == NULL && b->evsel == NULL) {
  53. if (a->stat == b->stat)
  54. return 0;
  55. if ((char *)a->stat < (char *)b->stat)
  56. return -1;
  57. return 1;
  58. }
  59. if (a->evsel == b->evsel)
  60. return 0;
  61. if ((char *)a->evsel < (char *)b->evsel)
  62. return -1;
  63. return +1;
  64. }
  65. static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
  66. const void *entry)
  67. {
  68. struct saved_value *nd = malloc(sizeof(struct saved_value));
  69. if (!nd)
  70. return NULL;
  71. memcpy(nd, entry, sizeof(struct saved_value));
  72. return &nd->rb_node;
  73. }
  74. static void saved_value_delete(struct rblist *rblist __maybe_unused,
  75. struct rb_node *rb_node)
  76. {
  77. struct saved_value *v;
  78. BUG_ON(!rb_node);
  79. v = container_of(rb_node, struct saved_value, rb_node);
  80. free(v);
  81. }
  82. static struct saved_value *saved_value_lookup(struct evsel *evsel,
  83. int cpu,
  84. bool create,
  85. enum stat_type type,
  86. int ctx,
  87. struct runtime_stat *st)
  88. {
  89. struct rblist *rblist;
  90. struct rb_node *nd;
  91. struct saved_value dm = {
  92. .cpu = cpu,
  93. .evsel = evsel,
  94. .type = type,
  95. .ctx = ctx,
  96. .stat = st,
  97. };
  98. rblist = &st->value_list;
  99. nd = rblist__find(rblist, &dm);
  100. if (nd)
  101. return container_of(nd, struct saved_value, rb_node);
  102. if (create) {
  103. rblist__add_node(rblist, &dm);
  104. nd = rblist__find(rblist, &dm);
  105. if (nd)
  106. return container_of(nd, struct saved_value, rb_node);
  107. }
  108. return NULL;
  109. }
  110. void runtime_stat__init(struct runtime_stat *st)
  111. {
  112. struct rblist *rblist = &st->value_list;
  113. rblist__init(rblist);
  114. rblist->node_cmp = saved_value_cmp;
  115. rblist->node_new = saved_value_new;
  116. rblist->node_delete = saved_value_delete;
  117. }
  118. void runtime_stat__exit(struct runtime_stat *st)
  119. {
  120. rblist__exit(&st->value_list);
  121. }
  122. void perf_stat__init_shadow_stats(void)
  123. {
  124. runtime_stat__init(&rt_stat);
  125. }
  126. static int evsel_context(struct evsel *evsel)
  127. {
  128. int ctx = 0;
  129. if (evsel->core.attr.exclude_kernel)
  130. ctx |= CTX_BIT_KERNEL;
  131. if (evsel->core.attr.exclude_user)
  132. ctx |= CTX_BIT_USER;
  133. if (evsel->core.attr.exclude_hv)
  134. ctx |= CTX_BIT_HV;
  135. if (evsel->core.attr.exclude_host)
  136. ctx |= CTX_BIT_HOST;
  137. if (evsel->core.attr.exclude_idle)
  138. ctx |= CTX_BIT_IDLE;
  139. return ctx;
  140. }
  141. static void reset_stat(struct runtime_stat *st)
  142. {
  143. struct rblist *rblist;
  144. struct rb_node *pos, *next;
  145. rblist = &st->value_list;
  146. next = rb_first_cached(&rblist->entries);
  147. while (next) {
  148. pos = next;
  149. next = rb_next(pos);
  150. memset(&container_of(pos, struct saved_value, rb_node)->stats,
  151. 0,
  152. sizeof(struct stats));
  153. }
  154. }
  155. void perf_stat__reset_shadow_stats(void)
  156. {
  157. reset_stat(&rt_stat);
  158. memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
  159. }
  160. void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
  161. {
  162. reset_stat(st);
  163. }
  164. static void update_runtime_stat(struct runtime_stat *st,
  165. enum stat_type type,
  166. int ctx, int cpu, u64 count)
  167. {
  168. struct saved_value *v = saved_value_lookup(NULL, cpu, true,
  169. type, ctx, st);
  170. if (v)
  171. update_stats(&v->stats, count);
  172. }
  173. /*
  174. * Update various tracking values we maintain to print
  175. * more semantic information such as miss/hit ratios,
  176. * instruction rates, etc:
  177. */
  178. void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
  179. int cpu, struct runtime_stat *st)
  180. {
  181. int ctx = evsel_context(counter);
  182. u64 count_ns = count;
  183. struct saved_value *v;
  184. count *= counter->scale;
  185. if (evsel__is_clock(counter))
  186. update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
  187. else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
  188. update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
  189. else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
  190. update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
  191. else if (perf_stat_evsel__is(counter, TRANSACTION_START))
  192. update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
  193. else if (perf_stat_evsel__is(counter, ELISION_START))
  194. update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
  195. else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
  196. update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
  197. ctx, cpu, count);
  198. else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
  199. update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
  200. ctx, cpu, count);
  201. else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
  202. update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
  203. ctx, cpu, count);
  204. else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
  205. update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
  206. ctx, cpu, count);
  207. else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
  208. update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
  209. ctx, cpu, count);
  210. else if (perf_stat_evsel__is(counter, TOPDOWN_RETIRING))
  211. update_runtime_stat(st, STAT_TOPDOWN_RETIRING,
  212. ctx, cpu, count);
  213. else if (perf_stat_evsel__is(counter, TOPDOWN_BAD_SPEC))
  214. update_runtime_stat(st, STAT_TOPDOWN_BAD_SPEC,
  215. ctx, cpu, count);
  216. else if (perf_stat_evsel__is(counter, TOPDOWN_FE_BOUND))
  217. update_runtime_stat(st, STAT_TOPDOWN_FE_BOUND,
  218. ctx, cpu, count);
  219. else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND))
  220. update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND,
  221. ctx, cpu, count);
  222. else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
  223. update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
  224. ctx, cpu, count);
  225. else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
  226. update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
  227. ctx, cpu, count);
  228. else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
  229. update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
  230. else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
  231. update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
  232. else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
  233. update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
  234. else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
  235. update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
  236. else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL))
  237. update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
  238. else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
  239. update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
  240. else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
  241. update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
  242. else if (perf_stat_evsel__is(counter, SMI_NUM))
  243. update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
  244. else if (perf_stat_evsel__is(counter, APERF))
  245. update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
  246. if (counter->collect_stat) {
  247. v = saved_value_lookup(counter, cpu, true, STAT_NONE, 0, st);
  248. update_stats(&v->stats, count);
  249. if (counter->metric_leader)
  250. v->metric_total += count;
  251. } else if (counter->metric_leader) {
  252. v = saved_value_lookup(counter->metric_leader,
  253. cpu, true, STAT_NONE, 0, st);
  254. v->metric_total += count;
  255. v->metric_other++;
  256. }
  257. }
  258. /* used for get_ratio_color() */
  259. enum grc_type {
  260. GRC_STALLED_CYCLES_FE,
  261. GRC_STALLED_CYCLES_BE,
  262. GRC_CACHE_MISSES,
  263. GRC_MAX_NR
  264. };
  265. static const char *get_ratio_color(enum grc_type type, double ratio)
  266. {
  267. static const double grc_table[GRC_MAX_NR][3] = {
  268. [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
  269. [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
  270. [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
  271. };
  272. const char *color = PERF_COLOR_NORMAL;
  273. if (ratio > grc_table[type][0])
  274. color = PERF_COLOR_RED;
  275. else if (ratio > grc_table[type][1])
  276. color = PERF_COLOR_MAGENTA;
  277. else if (ratio > grc_table[type][2])
  278. color = PERF_COLOR_YELLOW;
  279. return color;
  280. }
  281. static struct evsel *perf_stat__find_event(struct evlist *evsel_list,
  282. const char *name)
  283. {
  284. struct evsel *c2;
  285. evlist__for_each_entry (evsel_list, c2) {
  286. if (!strcasecmp(c2->name, name) && !c2->collect_stat)
  287. return c2;
  288. }
  289. return NULL;
  290. }
  291. /* Mark MetricExpr target events and link events using them to them. */
  292. void perf_stat__collect_metric_expr(struct evlist *evsel_list)
  293. {
  294. struct evsel *counter, *leader, **metric_events, *oc;
  295. bool found;
  296. struct expr_parse_ctx ctx;
  297. struct hashmap_entry *cur;
  298. size_t bkt;
  299. int i;
  300. expr__ctx_init(&ctx);
  301. evlist__for_each_entry(evsel_list, counter) {
  302. bool invalid = false;
  303. leader = counter->leader;
  304. if (!counter->metric_expr)
  305. continue;
  306. expr__ctx_clear(&ctx);
  307. metric_events = counter->metric_events;
  308. if (!metric_events) {
  309. if (expr__find_other(counter->metric_expr,
  310. counter->name,
  311. &ctx, 1) < 0)
  312. continue;
  313. metric_events = calloc(sizeof(struct evsel *),
  314. hashmap__size(&ctx.ids) + 1);
  315. if (!metric_events) {
  316. expr__ctx_clear(&ctx);
  317. return;
  318. }
  319. counter->metric_events = metric_events;
  320. }
  321. i = 0;
  322. hashmap__for_each_entry((&ctx.ids), cur, bkt) {
  323. const char *metric_name = (const char *)cur->key;
  324. found = false;
  325. if (leader) {
  326. /* Search in group */
  327. for_each_group_member (oc, leader) {
  328. if (!strcasecmp(oc->name,
  329. metric_name) &&
  330. !oc->collect_stat) {
  331. found = true;
  332. break;
  333. }
  334. }
  335. }
  336. if (!found) {
  337. /* Search ignoring groups */
  338. oc = perf_stat__find_event(evsel_list,
  339. metric_name);
  340. }
  341. if (!oc) {
  342. /* Deduping one is good enough to handle duplicated PMUs. */
  343. static char *printed;
  344. /*
  345. * Adding events automatically would be difficult, because
  346. * it would risk creating groups that are not schedulable.
  347. * perf stat doesn't understand all the scheduling constraints
  348. * of events. So we ask the user instead to add the missing
  349. * events.
  350. */
  351. if (!printed ||
  352. strcasecmp(printed, metric_name)) {
  353. fprintf(stderr,
  354. "Add %s event to groups to get metric expression for %s\n",
  355. metric_name,
  356. counter->name);
  357. printed = strdup(metric_name);
  358. }
  359. invalid = true;
  360. continue;
  361. }
  362. metric_events[i++] = oc;
  363. oc->collect_stat = true;
  364. }
  365. metric_events[i] = NULL;
  366. if (invalid) {
  367. free(metric_events);
  368. counter->metric_events = NULL;
  369. counter->metric_expr = NULL;
  370. }
  371. }
  372. expr__ctx_clear(&ctx);
  373. }
  374. static double runtime_stat_avg(struct runtime_stat *st,
  375. enum stat_type type, int ctx, int cpu)
  376. {
  377. struct saved_value *v;
  378. v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
  379. if (!v)
  380. return 0.0;
  381. return avg_stats(&v->stats);
  382. }
  383. static double runtime_stat_n(struct runtime_stat *st,
  384. enum stat_type type, int ctx, int cpu)
  385. {
  386. struct saved_value *v;
  387. v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
  388. if (!v)
  389. return 0.0;
  390. return v->stats.n;
  391. }
  392. static void print_stalled_cycles_frontend(struct perf_stat_config *config,
  393. int cpu,
  394. struct evsel *evsel, double avg,
  395. struct perf_stat_output_ctx *out,
  396. struct runtime_stat *st)
  397. {
  398. double total, ratio = 0.0;
  399. const char *color;
  400. int ctx = evsel_context(evsel);
  401. total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
  402. if (total)
  403. ratio = avg / total * 100.0;
  404. color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
  405. if (ratio)
  406. out->print_metric(config, out->ctx, color, "%7.2f%%", "frontend cycles idle",
  407. ratio);
  408. else
  409. out->print_metric(config, out->ctx, NULL, NULL, "frontend cycles idle", 0);
  410. }
  411. static void print_stalled_cycles_backend(struct perf_stat_config *config,
  412. int cpu,
  413. struct evsel *evsel, double avg,
  414. struct perf_stat_output_ctx *out,
  415. struct runtime_stat *st)
  416. {
  417. double total, ratio = 0.0;
  418. const char *color;
  419. int ctx = evsel_context(evsel);
  420. total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
  421. if (total)
  422. ratio = avg / total * 100.0;
  423. color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
  424. out->print_metric(config, out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
  425. }
  426. static void print_branch_misses(struct perf_stat_config *config,
  427. int cpu,
  428. struct evsel *evsel,
  429. double avg,
  430. struct perf_stat_output_ctx *out,
  431. struct runtime_stat *st)
  432. {
  433. double total, ratio = 0.0;
  434. const char *color;
  435. int ctx = evsel_context(evsel);
  436. total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
  437. if (total)
  438. ratio = avg / total * 100.0;
  439. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  440. out->print_metric(config, out->ctx, color, "%7.2f%%", "of all branches", ratio);
  441. }
  442. static void print_l1_dcache_misses(struct perf_stat_config *config,
  443. int cpu,
  444. struct evsel *evsel,
  445. double avg,
  446. struct perf_stat_output_ctx *out,
  447. struct runtime_stat *st)
  448. {
  449. double total, ratio = 0.0;
  450. const char *color;
  451. int ctx = evsel_context(evsel);
  452. total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
  453. if (total)
  454. ratio = avg / total * 100.0;
  455. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  456. out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache accesses", ratio);
  457. }
  458. static void print_l1_icache_misses(struct perf_stat_config *config,
  459. int cpu,
  460. struct evsel *evsel,
  461. double avg,
  462. struct perf_stat_output_ctx *out,
  463. struct runtime_stat *st)
  464. {
  465. double total, ratio = 0.0;
  466. const char *color;
  467. int ctx = evsel_context(evsel);
  468. total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
  469. if (total)
  470. ratio = avg / total * 100.0;
  471. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  472. out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache accesses", ratio);
  473. }
  474. static void print_dtlb_cache_misses(struct perf_stat_config *config,
  475. int cpu,
  476. struct evsel *evsel,
  477. double avg,
  478. struct perf_stat_output_ctx *out,
  479. struct runtime_stat *st)
  480. {
  481. double total, ratio = 0.0;
  482. const char *color;
  483. int ctx = evsel_context(evsel);
  484. total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
  485. if (total)
  486. ratio = avg / total * 100.0;
  487. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  488. out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache accesses", ratio);
  489. }
  490. static void print_itlb_cache_misses(struct perf_stat_config *config,
  491. int cpu,
  492. struct evsel *evsel,
  493. double avg,
  494. struct perf_stat_output_ctx *out,
  495. struct runtime_stat *st)
  496. {
  497. double total, ratio = 0.0;
  498. const char *color;
  499. int ctx = evsel_context(evsel);
  500. total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
  501. if (total)
  502. ratio = avg / total * 100.0;
  503. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  504. out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache accesses", ratio);
  505. }
  506. static void print_ll_cache_misses(struct perf_stat_config *config,
  507. int cpu,
  508. struct evsel *evsel,
  509. double avg,
  510. struct perf_stat_output_ctx *out,
  511. struct runtime_stat *st)
  512. {
  513. double total, ratio = 0.0;
  514. const char *color;
  515. int ctx = evsel_context(evsel);
  516. total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
  517. if (total)
  518. ratio = avg / total * 100.0;
  519. color = get_ratio_color(GRC_CACHE_MISSES, ratio);
  520. out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache accesses", ratio);
  521. }
  522. /*
  523. * High level "TopDown" CPU core pipe line bottleneck break down.
  524. *
  525. * Basic concept following
  526. * Yasin, A Top Down Method for Performance analysis and Counter architecture
  527. * ISPASS14
  528. *
  529. * The CPU pipeline is divided into 4 areas that can be bottlenecks:
  530. *
  531. * Frontend -> Backend -> Retiring
  532. * BadSpeculation in addition means out of order execution that is thrown away
  533. * (for example branch mispredictions)
  534. * Frontend is instruction decoding.
  535. * Backend is execution, like computation and accessing data in memory
  536. * Retiring is good execution that is not directly bottlenecked
  537. *
  538. * The formulas are computed in slots.
  539. * A slot is an entry in the pipeline each for the pipeline width
  540. * (for example a 4-wide pipeline has 4 slots for each cycle)
  541. *
  542. * Formulas:
  543. * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
  544. * TotalSlots
  545. * Retiring = SlotsRetired / TotalSlots
  546. * FrontendBound = FetchBubbles / TotalSlots
  547. * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
  548. *
  549. * The kernel provides the mapping to the low level CPU events and any scaling
  550. * needed for the CPU pipeline width, for example:
  551. *
  552. * TotalSlots = Cycles * 4
  553. *
  554. * The scaling factor is communicated in the sysfs unit.
  555. *
  556. * In some cases the CPU may not be able to measure all the formulas due to
  557. * missing events. In this case multiple formulas are combined, as possible.
  558. *
  559. * Full TopDown supports more levels to sub-divide each area: for example
  560. * BackendBound into computing bound and memory bound. For now we only
  561. * support Level 1 TopDown.
  562. */
  563. static double sanitize_val(double x)
  564. {
  565. if (x < 0 && x >= -0.02)
  566. return 0.0;
  567. return x;
  568. }
  569. static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
  570. {
  571. return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
  572. }
  573. static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
  574. {
  575. double bad_spec = 0;
  576. double total_slots;
  577. double total;
  578. total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
  579. runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
  580. runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
  581. total_slots = td_total_slots(ctx, cpu, st);
  582. if (total_slots)
  583. bad_spec = total / total_slots;
  584. return sanitize_val(bad_spec);
  585. }
  586. static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
  587. {
  588. double retiring = 0;
  589. double total_slots = td_total_slots(ctx, cpu, st);
  590. double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
  591. ctx, cpu);
  592. if (total_slots)
  593. retiring = ret_slots / total_slots;
  594. return retiring;
  595. }
  596. static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
  597. {
  598. double fe_bound = 0;
  599. double total_slots = td_total_slots(ctx, cpu, st);
  600. double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
  601. ctx, cpu);
  602. if (total_slots)
  603. fe_bound = fetch_bub / total_slots;
  604. return fe_bound;
  605. }
  606. static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
  607. {
  608. double sum = (td_fe_bound(ctx, cpu, st) +
  609. td_bad_spec(ctx, cpu, st) +
  610. td_retiring(ctx, cpu, st));
  611. if (sum == 0)
  612. return 0;
  613. return sanitize_val(1.0 - sum);
  614. }
  615. /*
  616. * Kernel reports metrics multiplied with slots. To get back
  617. * the ratios we need to recreate the sum.
  618. */
  619. static double td_metric_ratio(int ctx, int cpu,
  620. enum stat_type type,
  621. struct runtime_stat *stat)
  622. {
  623. double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, ctx, cpu) +
  624. runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, ctx, cpu) +
  625. runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, ctx, cpu) +
  626. runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, ctx, cpu);
  627. double d = runtime_stat_avg(stat, type, ctx, cpu);
  628. if (sum)
  629. return d / sum;
  630. return 0;
  631. }
  632. /*
  633. * ... but only if most of the values are actually available.
  634. * We allow two missing.
  635. */
  636. static bool full_td(int ctx, int cpu,
  637. struct runtime_stat *stat)
  638. {
  639. int c = 0;
  640. if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, ctx, cpu) > 0)
  641. c++;
  642. if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, ctx, cpu) > 0)
  643. c++;
  644. if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, ctx, cpu) > 0)
  645. c++;
  646. if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, ctx, cpu) > 0)
  647. c++;
  648. return c >= 2;
  649. }
  650. static void print_smi_cost(struct perf_stat_config *config,
  651. int cpu, struct evsel *evsel,
  652. struct perf_stat_output_ctx *out,
  653. struct runtime_stat *st)
  654. {
  655. double smi_num, aperf, cycles, cost = 0.0;
  656. int ctx = evsel_context(evsel);
  657. const char *color = NULL;
  658. smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
  659. aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
  660. cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
  661. if ((cycles == 0) || (aperf == 0))
  662. return;
  663. if (smi_num)
  664. cost = (aperf - cycles) / aperf * 100.00;
  665. if (cost > 10)
  666. color = PERF_COLOR_RED;
  667. out->print_metric(config, out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
  668. out->print_metric(config, out->ctx, NULL, "%4.0f", "SMI#", smi_num);
  669. }
  670. static int prepare_metric(struct evsel **metric_events,
  671. struct metric_ref *metric_refs,
  672. struct expr_parse_ctx *pctx,
  673. int cpu,
  674. struct runtime_stat *st)
  675. {
  676. double scale;
  677. char *n, *pn;
  678. int i, j, ret;
  679. expr__ctx_init(pctx);
  680. for (i = 0; metric_events[i]; i++) {
  681. struct saved_value *v;
  682. struct stats *stats;
  683. u64 metric_total = 0;
  684. if (!strcmp(metric_events[i]->name, "duration_time")) {
  685. stats = &walltime_nsecs_stats;
  686. scale = 1e-9;
  687. } else {
  688. v = saved_value_lookup(metric_events[i], cpu, false,
  689. STAT_NONE, 0, st);
  690. if (!v)
  691. break;
  692. stats = &v->stats;
  693. scale = 1.0;
  694. if (v->metric_other)
  695. metric_total = v->metric_total;
  696. }
  697. n = strdup(metric_events[i]->name);
  698. if (!n)
  699. return -ENOMEM;
  700. /*
  701. * This display code with --no-merge adds [cpu] postfixes.
  702. * These are not supported by the parser. Remove everything
  703. * after the space.
  704. */
  705. pn = strchr(n, ' ');
  706. if (pn)
  707. *pn = 0;
  708. if (metric_total)
  709. expr__add_id_val(pctx, n, metric_total);
  710. else
  711. expr__add_id_val(pctx, n, avg_stats(stats)*scale);
  712. }
  713. for (j = 0; metric_refs && metric_refs[j].metric_name; j++) {
  714. ret = expr__add_ref(pctx, &metric_refs[j]);
  715. if (ret)
  716. return ret;
  717. }
  718. return i;
  719. }
  720. static void generic_metric(struct perf_stat_config *config,
  721. const char *metric_expr,
  722. struct evsel **metric_events,
  723. struct metric_ref *metric_refs,
  724. char *name,
  725. const char *metric_name,
  726. const char *metric_unit,
  727. int runtime,
  728. int cpu,
  729. struct perf_stat_output_ctx *out,
  730. struct runtime_stat *st)
  731. {
  732. print_metric_t print_metric = out->print_metric;
  733. struct expr_parse_ctx pctx;
  734. double ratio, scale;
  735. int i;
  736. void *ctxp = out->ctx;
  737. i = prepare_metric(metric_events, metric_refs, &pctx, cpu, st);
  738. if (i < 0)
  739. return;
  740. if (!metric_events[i]) {
  741. if (expr__parse(&ratio, &pctx, metric_expr, runtime) == 0) {
  742. char *unit;
  743. char metric_bf[64];
  744. if (metric_unit && metric_name) {
  745. if (perf_pmu__convert_scale(metric_unit,
  746. &unit, &scale) >= 0) {
  747. ratio *= scale;
  748. }
  749. if (strstr(metric_expr, "?"))
  750. scnprintf(metric_bf, sizeof(metric_bf),
  751. "%s %s_%d", unit, metric_name, runtime);
  752. else
  753. scnprintf(metric_bf, sizeof(metric_bf),
  754. "%s %s", unit, metric_name);
  755. print_metric(config, ctxp, NULL, "%8.1f",
  756. metric_bf, ratio);
  757. } else {
  758. print_metric(config, ctxp, NULL, "%8.2f",
  759. metric_name ?
  760. metric_name :
  761. out->force_header ? name : "",
  762. ratio);
  763. }
  764. } else {
  765. print_metric(config, ctxp, NULL, NULL,
  766. out->force_header ?
  767. (metric_name ? metric_name : name) : "", 0);
  768. }
  769. } else {
  770. print_metric(config, ctxp, NULL, NULL,
  771. out->force_header ?
  772. (metric_name ? metric_name : name) : "", 0);
  773. }
  774. expr__ctx_clear(&pctx);
  775. }
  776. double test_generic_metric(struct metric_expr *mexp, int cpu, struct runtime_stat *st)
  777. {
  778. struct expr_parse_ctx pctx;
  779. double ratio = 0.0;
  780. if (prepare_metric(mexp->metric_events, mexp->metric_refs, &pctx, cpu, st) < 0)
  781. goto out;
  782. if (expr__parse(&ratio, &pctx, mexp->metric_expr, 1))
  783. ratio = 0.0;
  784. out:
  785. expr__ctx_clear(&pctx);
  786. return ratio;
  787. }
  788. void perf_stat__print_shadow_stats(struct perf_stat_config *config,
  789. struct evsel *evsel,
  790. double avg, int cpu,
  791. struct perf_stat_output_ctx *out,
  792. struct rblist *metric_events,
  793. struct runtime_stat *st)
  794. {
  795. void *ctxp = out->ctx;
  796. print_metric_t print_metric = out->print_metric;
  797. double total, ratio = 0.0, total2;
  798. const char *color = NULL;
  799. int ctx = evsel_context(evsel);
  800. struct metric_event *me;
  801. int num = 1;
  802. if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
  803. total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
  804. if (total) {
  805. ratio = avg / total;
  806. print_metric(config, ctxp, NULL, "%7.2f ",
  807. "insn per cycle", ratio);
  808. } else {
  809. print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
  810. }
  811. total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT,
  812. ctx, cpu);
  813. total = max(total, runtime_stat_avg(st,
  814. STAT_STALLED_CYCLES_BACK,
  815. ctx, cpu));
  816. if (total && avg) {
  817. out->new_line(config, ctxp);
  818. ratio = total / avg;
  819. print_metric(config, ctxp, NULL, "%7.2f ",
  820. "stalled cycles per insn",
  821. ratio);
  822. }
  823. } else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
  824. if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
  825. print_branch_misses(config, cpu, evsel, avg, out, st);
  826. else
  827. print_metric(config, ctxp, NULL, NULL, "of all branches", 0);
  828. } else if (
  829. evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
  830. evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_L1D |
  831. ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
  832. ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
  833. if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
  834. print_l1_dcache_misses(config, cpu, evsel, avg, out, st);
  835. else
  836. print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0);
  837. } else if (
  838. evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
  839. evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_L1I |
  840. ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
  841. ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
  842. if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
  843. print_l1_icache_misses(config, cpu, evsel, avg, out, st);
  844. else
  845. print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0);
  846. } else if (
  847. evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
  848. evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
  849. ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
  850. ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
  851. if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
  852. print_dtlb_cache_misses(config, cpu, evsel, avg, out, st);
  853. else
  854. print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0);
  855. } else if (
  856. evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
  857. evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
  858. ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
  859. ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
  860. if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
  861. print_itlb_cache_misses(config, cpu, evsel, avg, out, st);
  862. else
  863. print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0);
  864. } else if (
  865. evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
  866. evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_LL |
  867. ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
  868. ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
  869. if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
  870. print_ll_cache_misses(config, cpu, evsel, avg, out, st);
  871. else
  872. print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0);
  873. } else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
  874. total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
  875. if (total)
  876. ratio = avg * 100 / total;
  877. if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
  878. print_metric(config, ctxp, NULL, "%8.3f %%",
  879. "of all cache refs", ratio);
  880. else
  881. print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
  882. } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
  883. print_stalled_cycles_frontend(config, cpu, evsel, avg, out, st);
  884. } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
  885. print_stalled_cycles_backend(config, cpu, evsel, avg, out, st);
  886. } else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
  887. total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
  888. if (total) {
  889. ratio = avg / total;
  890. print_metric(config, ctxp, NULL, "%8.3f", "GHz", ratio);
  891. } else {
  892. print_metric(config, ctxp, NULL, NULL, "Ghz", 0);
  893. }
  894. } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
  895. total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
  896. if (total)
  897. print_metric(config, ctxp, NULL,
  898. "%7.2f%%", "transactional cycles",
  899. 100.0 * (avg / total));
  900. else
  901. print_metric(config, ctxp, NULL, NULL, "transactional cycles",
  902. 0);
  903. } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
  904. total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
  905. total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
  906. if (total2 < avg)
  907. total2 = avg;
  908. if (total)
  909. print_metric(config, ctxp, NULL, "%7.2f%%", "aborted cycles",
  910. 100.0 * ((total2-avg) / total));
  911. else
  912. print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0);
  913. } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
  914. total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
  915. ctx, cpu);
  916. if (avg)
  917. ratio = total / avg;
  918. if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
  919. print_metric(config, ctxp, NULL, "%8.0f",
  920. "cycles / transaction", ratio);
  921. else
  922. print_metric(config, ctxp, NULL, NULL, "cycles / transaction",
  923. 0);
  924. } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
  925. total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
  926. ctx, cpu);
  927. if (avg)
  928. ratio = total / avg;
  929. print_metric(config, ctxp, NULL, "%8.0f", "cycles / elision", ratio);
  930. } else if (evsel__is_clock(evsel)) {
  931. if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
  932. print_metric(config, ctxp, NULL, "%8.3f", "CPUs utilized",
  933. avg / (ratio * evsel->scale));
  934. else
  935. print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
  936. } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
  937. double fe_bound = td_fe_bound(ctx, cpu, st);
  938. if (fe_bound > 0.2)
  939. color = PERF_COLOR_RED;
  940. print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
  941. fe_bound * 100.);
  942. } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
  943. double retiring = td_retiring(ctx, cpu, st);
  944. if (retiring > 0.7)
  945. color = PERF_COLOR_GREEN;
  946. print_metric(config, ctxp, color, "%8.1f%%", "retiring",
  947. retiring * 100.);
  948. } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
  949. double bad_spec = td_bad_spec(ctx, cpu, st);
  950. if (bad_spec > 0.1)
  951. color = PERF_COLOR_RED;
  952. print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
  953. bad_spec * 100.);
  954. } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
  955. double be_bound = td_be_bound(ctx, cpu, st);
  956. const char *name = "backend bound";
  957. static int have_recovery_bubbles = -1;
  958. /* In case the CPU does not support topdown-recovery-bubbles */
  959. if (have_recovery_bubbles < 0)
  960. have_recovery_bubbles = pmu_have_event("cpu",
  961. "topdown-recovery-bubbles");
  962. if (!have_recovery_bubbles)
  963. name = "backend bound/bad spec";
  964. if (be_bound > 0.2)
  965. color = PERF_COLOR_RED;
  966. if (td_total_slots(ctx, cpu, st) > 0)
  967. print_metric(config, ctxp, color, "%8.1f%%", name,
  968. be_bound * 100.);
  969. else
  970. print_metric(config, ctxp, NULL, NULL, name, 0);
  971. } else if (perf_stat_evsel__is(evsel, TOPDOWN_RETIRING) &&
  972. full_td(ctx, cpu, st)) {
  973. double retiring = td_metric_ratio(ctx, cpu,
  974. STAT_TOPDOWN_RETIRING, st);
  975. if (retiring > 0.7)
  976. color = PERF_COLOR_GREEN;
  977. print_metric(config, ctxp, color, "%8.1f%%", "retiring",
  978. retiring * 100.);
  979. } else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) &&
  980. full_td(ctx, cpu, st)) {
  981. double fe_bound = td_metric_ratio(ctx, cpu,
  982. STAT_TOPDOWN_FE_BOUND, st);
  983. if (fe_bound > 0.2)
  984. color = PERF_COLOR_RED;
  985. print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
  986. fe_bound * 100.);
  987. } else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) &&
  988. full_td(ctx, cpu, st)) {
  989. double be_bound = td_metric_ratio(ctx, cpu,
  990. STAT_TOPDOWN_BE_BOUND, st);
  991. if (be_bound > 0.2)
  992. color = PERF_COLOR_RED;
  993. print_metric(config, ctxp, color, "%8.1f%%", "backend bound",
  994. be_bound * 100.);
  995. } else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) &&
  996. full_td(ctx, cpu, st)) {
  997. double bad_spec = td_metric_ratio(ctx, cpu,
  998. STAT_TOPDOWN_BAD_SPEC, st);
  999. if (bad_spec > 0.1)
  1000. color = PERF_COLOR_RED;
  1001. print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
  1002. bad_spec * 100.);
  1003. } else if (evsel->metric_expr) {
  1004. generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
  1005. evsel->name, evsel->metric_name, NULL, 1, cpu, out, st);
  1006. } else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
  1007. char unit = 'M';
  1008. char unit_buf[10];
  1009. total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
  1010. if (total)
  1011. ratio = 1000.0 * avg / total;
  1012. if (ratio < 0.001) {
  1013. ratio *= 1000;
  1014. unit = 'K';
  1015. }
  1016. snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
  1017. print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
  1018. } else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
  1019. print_smi_cost(config, cpu, evsel, out, st);
  1020. } else {
  1021. num = 0;
  1022. }
  1023. if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
  1024. struct metric_expr *mexp;
  1025. list_for_each_entry (mexp, &me->head, nd) {
  1026. if (num++ > 0)
  1027. out->new_line(config, ctxp);
  1028. generic_metric(config, mexp->metric_expr, mexp->metric_events,
  1029. mexp->metric_refs, evsel->name, mexp->metric_name,
  1030. mexp->metric_unit, mexp->runtime, cpu, out, st);
  1031. }
  1032. }
  1033. if (num == 0)
  1034. print_metric(config, ctxp, NULL, NULL, NULL, 0);
  1035. }