stat.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <math.h>
  5. #include <string.h>
  6. #include "counts.h"
  7. #include "cpumap.h"
  8. #include "debug.h"
  9. #include "header.h"
  10. #include "stat.h"
  11. #include "session.h"
  12. #include "target.h"
  13. #include "evlist.h"
  14. #include "evsel.h"
  15. #include "thread_map.h"
  16. #include <linux/zalloc.h>
  17. void update_stats(struct stats *stats, u64 val)
  18. {
  19. double delta;
  20. stats->n++;
  21. delta = val - stats->mean;
  22. stats->mean += delta / stats->n;
  23. stats->M2 += delta*(val - stats->mean);
  24. if (val > stats->max)
  25. stats->max = val;
  26. if (val < stats->min)
  27. stats->min = val;
  28. }
  29. double avg_stats(struct stats *stats)
  30. {
  31. return stats->mean;
  32. }
  33. /*
  34. * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
  35. *
  36. * (\Sum n_i^2) - ((\Sum n_i)^2)/n
  37. * s^2 = -------------------------------
  38. * n - 1
  39. *
  40. * http://en.wikipedia.org/wiki/Stddev
  41. *
  42. * The std dev of the mean is related to the std dev by:
  43. *
  44. * s
  45. * s_mean = -------
  46. * sqrt(n)
  47. *
  48. */
  49. double stddev_stats(struct stats *stats)
  50. {
  51. double variance, variance_mean;
  52. if (stats->n < 2)
  53. return 0.0;
  54. variance = stats->M2 / (stats->n - 1);
  55. variance_mean = variance / stats->n;
  56. return sqrt(variance_mean);
  57. }
  58. double rel_stddev_stats(double stddev, double avg)
  59. {
  60. double pct = 0.0;
  61. if (avg)
  62. pct = 100.0 * stddev/avg;
  63. return pct;
  64. }
  65. bool __perf_evsel_stat__is(struct evsel *evsel,
  66. enum perf_stat_evsel_id id)
  67. {
  68. struct perf_stat_evsel *ps = evsel->stats;
  69. return ps->id == id;
  70. }
  71. #define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
  72. static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
  73. ID(NONE, x),
  74. ID(CYCLES_IN_TX, cpu/cycles-t/),
  75. ID(TRANSACTION_START, cpu/tx-start/),
  76. ID(ELISION_START, cpu/el-start/),
  77. ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
  78. ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
  79. ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
  80. ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
  81. ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
  82. ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
  83. ID(TOPDOWN_RETIRING, topdown-retiring),
  84. ID(TOPDOWN_BAD_SPEC, topdown-bad-spec),
  85. ID(TOPDOWN_FE_BOUND, topdown-fe-bound),
  86. ID(TOPDOWN_BE_BOUND, topdown-be-bound),
  87. ID(SMI_NUM, msr/smi/),
  88. ID(APERF, msr/aperf/),
  89. };
  90. #undef ID
  91. static void perf_stat_evsel_id_init(struct evsel *evsel)
  92. {
  93. struct perf_stat_evsel *ps = evsel->stats;
  94. int i;
  95. /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
  96. for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
  97. if (!strcmp(evsel__name(evsel), id_str[i])) {
  98. ps->id = i;
  99. break;
  100. }
  101. }
  102. }
  103. static void evsel__reset_stat_priv(struct evsel *evsel)
  104. {
  105. int i;
  106. struct perf_stat_evsel *ps = evsel->stats;
  107. for (i = 0; i < 3; i++)
  108. init_stats(&ps->res_stats[i]);
  109. perf_stat_evsel_id_init(evsel);
  110. }
  111. static int evsel__alloc_stat_priv(struct evsel *evsel)
  112. {
  113. evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
  114. if (evsel->stats == NULL)
  115. return -ENOMEM;
  116. evsel__reset_stat_priv(evsel);
  117. return 0;
  118. }
  119. static void evsel__free_stat_priv(struct evsel *evsel)
  120. {
  121. struct perf_stat_evsel *ps = evsel->stats;
  122. if (ps)
  123. zfree(&ps->group_data);
  124. zfree(&evsel->stats);
  125. }
  126. static int evsel__alloc_prev_raw_counts(struct evsel *evsel, int ncpus, int nthreads)
  127. {
  128. struct perf_counts *counts;
  129. counts = perf_counts__new(ncpus, nthreads);
  130. if (counts)
  131. evsel->prev_raw_counts = counts;
  132. return counts ? 0 : -ENOMEM;
  133. }
  134. static void evsel__free_prev_raw_counts(struct evsel *evsel)
  135. {
  136. perf_counts__delete(evsel->prev_raw_counts);
  137. evsel->prev_raw_counts = NULL;
  138. }
  139. static void evsel__reset_prev_raw_counts(struct evsel *evsel)
  140. {
  141. if (evsel->prev_raw_counts)
  142. perf_counts__reset(evsel->prev_raw_counts);
  143. }
  144. static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
  145. {
  146. int ncpus = evsel__nr_cpus(evsel);
  147. int nthreads = perf_thread_map__nr(evsel->core.threads);
  148. if (evsel__alloc_stat_priv(evsel) < 0 ||
  149. evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
  150. (alloc_raw && evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
  151. return -ENOMEM;
  152. return 0;
  153. }
  154. int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw)
  155. {
  156. struct evsel *evsel;
  157. evlist__for_each_entry(evlist, evsel) {
  158. if (evsel__alloc_stats(evsel, alloc_raw))
  159. goto out_free;
  160. }
  161. return 0;
  162. out_free:
  163. perf_evlist__free_stats(evlist);
  164. return -1;
  165. }
  166. void perf_evlist__free_stats(struct evlist *evlist)
  167. {
  168. struct evsel *evsel;
  169. evlist__for_each_entry(evlist, evsel) {
  170. evsel__free_stat_priv(evsel);
  171. evsel__free_counts(evsel);
  172. evsel__free_prev_raw_counts(evsel);
  173. }
  174. }
  175. void perf_evlist__reset_stats(struct evlist *evlist)
  176. {
  177. struct evsel *evsel;
  178. evlist__for_each_entry(evlist, evsel) {
  179. evsel__reset_stat_priv(evsel);
  180. evsel__reset_counts(evsel);
  181. }
  182. }
  183. void perf_evlist__reset_prev_raw_counts(struct evlist *evlist)
  184. {
  185. struct evsel *evsel;
  186. evlist__for_each_entry(evlist, evsel)
  187. evsel__reset_prev_raw_counts(evsel);
  188. }
  189. static void perf_evsel__copy_prev_raw_counts(struct evsel *evsel)
  190. {
  191. int ncpus = evsel__nr_cpus(evsel);
  192. int nthreads = perf_thread_map__nr(evsel->core.threads);
  193. for (int thread = 0; thread < nthreads; thread++) {
  194. for (int cpu = 0; cpu < ncpus; cpu++) {
  195. *perf_counts(evsel->counts, cpu, thread) =
  196. *perf_counts(evsel->prev_raw_counts, cpu,
  197. thread);
  198. }
  199. }
  200. evsel->counts->aggr = evsel->prev_raw_counts->aggr;
  201. }
  202. void perf_evlist__copy_prev_raw_counts(struct evlist *evlist)
  203. {
  204. struct evsel *evsel;
  205. evlist__for_each_entry(evlist, evsel)
  206. perf_evsel__copy_prev_raw_counts(evsel);
  207. }
  208. void perf_evlist__save_aggr_prev_raw_counts(struct evlist *evlist)
  209. {
  210. struct evsel *evsel;
  211. /*
  212. * To collect the overall statistics for interval mode,
  213. * we copy the counts from evsel->prev_raw_counts to
  214. * evsel->counts. The perf_stat_process_counter creates
  215. * aggr values from per cpu values, but the per cpu values
  216. * are 0 for AGGR_GLOBAL. So we use a trick that saves the
  217. * previous aggr value to the first member of perf_counts,
  218. * then aggr calculation in process_counter_values can work
  219. * correctly.
  220. */
  221. evlist__for_each_entry(evlist, evsel) {
  222. *perf_counts(evsel->prev_raw_counts, 0, 0) =
  223. evsel->prev_raw_counts->aggr;
  224. }
  225. }
  226. static void zero_per_pkg(struct evsel *counter)
  227. {
  228. if (counter->per_pkg_mask)
  229. memset(counter->per_pkg_mask, 0, cpu__max_cpu());
  230. }
  231. static int check_per_pkg(struct evsel *counter,
  232. struct perf_counts_values *vals, int cpu, bool *skip)
  233. {
  234. unsigned long *mask = counter->per_pkg_mask;
  235. struct perf_cpu_map *cpus = evsel__cpus(counter);
  236. int s;
  237. *skip = false;
  238. if (!counter->per_pkg)
  239. return 0;
  240. if (perf_cpu_map__empty(cpus))
  241. return 0;
  242. if (!mask) {
  243. mask = zalloc(cpu__max_cpu());
  244. if (!mask)
  245. return -ENOMEM;
  246. counter->per_pkg_mask = mask;
  247. }
  248. /*
  249. * we do not consider an event that has not run as a good
  250. * instance to mark a package as used (skip=1). Otherwise
  251. * we may run into a situation where the first CPU in a package
  252. * is not running anything, yet the second is, and this function
  253. * would mark the package as used after the first CPU and would
  254. * not read the values from the second CPU.
  255. */
  256. if (!(vals->run && vals->ena))
  257. return 0;
  258. s = cpu_map__get_socket(cpus, cpu, NULL);
  259. if (s < 0)
  260. return -1;
  261. *skip = test_and_set_bit(s, mask) == 1;
  262. return 0;
  263. }
  264. static int
  265. process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
  266. int cpu, int thread,
  267. struct perf_counts_values *count)
  268. {
  269. struct perf_counts_values *aggr = &evsel->counts->aggr;
  270. static struct perf_counts_values zero;
  271. bool skip = false;
  272. if (check_per_pkg(evsel, count, cpu, &skip)) {
  273. pr_err("failed to read per-pkg counter\n");
  274. return -1;
  275. }
  276. if (skip)
  277. count = &zero;
  278. switch (config->aggr_mode) {
  279. case AGGR_THREAD:
  280. case AGGR_CORE:
  281. case AGGR_DIE:
  282. case AGGR_SOCKET:
  283. case AGGR_NODE:
  284. case AGGR_NONE:
  285. if (!evsel->snapshot)
  286. evsel__compute_deltas(evsel, cpu, thread, count);
  287. perf_counts_values__scale(count, config->scale, NULL);
  288. if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
  289. perf_stat__update_shadow_stats(evsel, count->val,
  290. cpu, &rt_stat);
  291. }
  292. if (config->aggr_mode == AGGR_THREAD) {
  293. if (config->stats)
  294. perf_stat__update_shadow_stats(evsel,
  295. count->val, 0, &config->stats[thread]);
  296. else
  297. perf_stat__update_shadow_stats(evsel,
  298. count->val, 0, &rt_stat);
  299. }
  300. break;
  301. case AGGR_GLOBAL:
  302. aggr->val += count->val;
  303. aggr->ena += count->ena;
  304. aggr->run += count->run;
  305. case AGGR_UNSET:
  306. default:
  307. break;
  308. }
  309. return 0;
  310. }
  311. static int process_counter_maps(struct perf_stat_config *config,
  312. struct evsel *counter)
  313. {
  314. int nthreads = perf_thread_map__nr(counter->core.threads);
  315. int ncpus = evsel__nr_cpus(counter);
  316. int cpu, thread;
  317. if (counter->core.system_wide)
  318. nthreads = 1;
  319. for (thread = 0; thread < nthreads; thread++) {
  320. for (cpu = 0; cpu < ncpus; cpu++) {
  321. if (process_counter_values(config, counter, cpu, thread,
  322. perf_counts(counter->counts, cpu, thread)))
  323. return -1;
  324. }
  325. }
  326. return 0;
  327. }
  328. int perf_stat_process_counter(struct perf_stat_config *config,
  329. struct evsel *counter)
  330. {
  331. struct perf_counts_values *aggr = &counter->counts->aggr;
  332. struct perf_stat_evsel *ps = counter->stats;
  333. u64 *count = counter->counts->aggr.values;
  334. int i, ret;
  335. aggr->val = aggr->ena = aggr->run = 0;
  336. /*
  337. * We calculate counter's data every interval,
  338. * and the display code shows ps->res_stats
  339. * avg value. We need to zero the stats for
  340. * interval mode, otherwise overall avg running
  341. * averages will be shown for each interval.
  342. */
  343. if (config->interval || config->summary) {
  344. for (i = 0; i < 3; i++)
  345. init_stats(&ps->res_stats[i]);
  346. }
  347. if (counter->per_pkg)
  348. zero_per_pkg(counter);
  349. ret = process_counter_maps(config, counter);
  350. if (ret)
  351. return ret;
  352. if (config->aggr_mode != AGGR_GLOBAL)
  353. return 0;
  354. if (!counter->snapshot)
  355. evsel__compute_deltas(counter, -1, -1, aggr);
  356. perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
  357. for (i = 0; i < 3; i++)
  358. update_stats(&ps->res_stats[i], count[i]);
  359. if (verbose > 0) {
  360. fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
  361. evsel__name(counter), count[0], count[1], count[2]);
  362. }
  363. /*
  364. * Save the full runtime - to allow normalization during printout:
  365. */
  366. perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
  367. return 0;
  368. }
  369. int perf_event__process_stat_event(struct perf_session *session,
  370. union perf_event *event)
  371. {
  372. struct perf_counts_values count;
  373. struct perf_record_stat *st = &event->stat;
  374. struct evsel *counter;
  375. count.val = st->val;
  376. count.ena = st->ena;
  377. count.run = st->run;
  378. counter = perf_evlist__id2evsel(session->evlist, st->id);
  379. if (!counter) {
  380. pr_err("Failed to resolve counter for stat event.\n");
  381. return -EINVAL;
  382. }
  383. *perf_counts(counter->counts, st->cpu, st->thread) = count;
  384. counter->supported = true;
  385. return 0;
  386. }
  387. size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
  388. {
  389. struct perf_record_stat *st = (struct perf_record_stat *)event;
  390. size_t ret;
  391. ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
  392. st->id, st->cpu, st->thread);
  393. ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
  394. st->val, st->ena, st->run);
  395. return ret;
  396. }
  397. size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
  398. {
  399. struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
  400. size_t ret;
  401. ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
  402. rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
  403. return ret;
  404. }
  405. size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
  406. {
  407. struct perf_stat_config sc;
  408. size_t ret;
  409. perf_event__read_stat_config(&sc, &event->stat_config);
  410. ret = fprintf(fp, "\n");
  411. ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
  412. ret += fprintf(fp, "... scale %d\n", sc.scale);
  413. ret += fprintf(fp, "... interval %u\n", sc.interval);
  414. return ret;
  415. }
  416. int create_perf_stat_counter(struct evsel *evsel,
  417. struct perf_stat_config *config,
  418. struct target *target,
  419. int cpu)
  420. {
  421. struct perf_event_attr *attr = &evsel->core.attr;
  422. struct evsel *leader = evsel->leader;
  423. attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
  424. PERF_FORMAT_TOTAL_TIME_RUNNING;
  425. /*
  426. * The event is part of non trivial group, let's enable
  427. * the group read (for leader) and ID retrieval for all
  428. * members.
  429. */
  430. if (leader->core.nr_members > 1)
  431. attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
  432. attr->inherit = !config->no_inherit;
  433. /*
  434. * Some events get initialized with sample_(period/type) set,
  435. * like tracepoints. Clear it up for counting.
  436. */
  437. attr->sample_period = 0;
  438. if (config->identifier)
  439. attr->sample_type = PERF_SAMPLE_IDENTIFIER;
  440. if (config->all_user) {
  441. attr->exclude_kernel = 1;
  442. attr->exclude_user = 0;
  443. }
  444. if (config->all_kernel) {
  445. attr->exclude_kernel = 0;
  446. attr->exclude_user = 1;
  447. }
  448. /*
  449. * Disabling all counters initially, they will be enabled
  450. * either manually by us or by kernel via enable_on_exec
  451. * set later.
  452. */
  453. if (evsel__is_group_leader(evsel)) {
  454. attr->disabled = 1;
  455. /*
  456. * In case of initial_delay we enable tracee
  457. * events manually.
  458. */
  459. if (target__none(target) && !config->initial_delay)
  460. attr->enable_on_exec = 1;
  461. }
  462. if (target__has_cpu(target) && !target__has_per_thread(target))
  463. return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu);
  464. return evsel__open_per_thread(evsel, evsel->core.threads);
  465. }