record.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "debug.h"
  3. #include "evlist.h"
  4. #include "evsel.h"
  5. #include "evsel_config.h"
  6. #include "parse-events.h"
  7. #include <errno.h>
  8. #include <limits.h>
  9. #include <stdlib.h>
  10. #include <api/fs/fs.h>
  11. #include <subcmd/parse-options.h>
  12. #include <perf/cpumap.h>
  13. #include "cloexec.h"
  14. #include "util/perf_api_probe.h"
  15. #include "record.h"
  16. #include "../perf-sys.h"
  17. #include "topdown.h"
  18. /*
  19. * evsel__config_leader_sampling() uses special rules for leader sampling.
  20. * However, if the leader is an AUX area event, then assume the event to sample
  21. * is the next event.
  22. */
  23. static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist)
  24. {
  25. struct evsel *leader = evsel->leader;
  26. if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader)) {
  27. evlist__for_each_entry(evlist, evsel) {
  28. if (evsel->leader == leader && evsel != evsel->leader)
  29. return evsel;
  30. }
  31. }
  32. return leader;
  33. }
  34. static u64 evsel__config_term_mask(struct evsel *evsel)
  35. {
  36. struct evsel_config_term *term;
  37. struct list_head *config_terms = &evsel->config_terms;
  38. u64 term_types = 0;
  39. list_for_each_entry(term, config_terms, list) {
  40. term_types |= 1 << term->type;
  41. }
  42. return term_types;
  43. }
  44. static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
  45. {
  46. struct perf_event_attr *attr = &evsel->core.attr;
  47. struct evsel *leader = evsel->leader;
  48. struct evsel *read_sampler;
  49. u64 term_types, freq_mask;
  50. if (!leader->sample_read)
  51. return;
  52. read_sampler = evsel__read_sampler(evsel, evlist);
  53. if (evsel == read_sampler)
  54. return;
  55. term_types = evsel__config_term_mask(evsel);
  56. /*
  57. * Disable sampling for all group members except those with explicit
  58. * config terms or the leader. In the case of an AUX area event, the 2nd
  59. * event in the group is the one that 'leads' the sampling.
  60. */
  61. freq_mask = (1 << EVSEL__CONFIG_TERM_FREQ) | (1 << EVSEL__CONFIG_TERM_PERIOD);
  62. if ((term_types & freq_mask) == 0) {
  63. attr->freq = 0;
  64. attr->sample_freq = 0;
  65. attr->sample_period = 0;
  66. }
  67. if ((term_types & (1 << EVSEL__CONFIG_TERM_OVERWRITE)) == 0)
  68. attr->write_backward = 0;
  69. /*
  70. * We don't get a sample for slave events, we make them when delivering
  71. * the group leader sample. Set the slave event to follow the master
  72. * sample_type to ease up reporting.
  73. * An AUX area event also has sample_type requirements, so also include
  74. * the sample type bits from the leader's sample_type to cover that
  75. * case.
  76. */
  77. attr->sample_type = read_sampler->core.attr.sample_type |
  78. leader->core.attr.sample_type;
  79. }
  80. void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
  81. struct callchain_param *callchain)
  82. {
  83. struct evsel *evsel;
  84. bool use_sample_identifier = false;
  85. bool use_comm_exec;
  86. bool sample_id = opts->sample_id;
  87. /*
  88. * Set the evsel leader links before we configure attributes,
  89. * since some might depend on this info.
  90. */
  91. if (opts->group)
  92. perf_evlist__set_leader(evlist);
  93. if (evlist->core.cpus->map[0] < 0)
  94. opts->no_inherit = true;
  95. use_comm_exec = perf_can_comm_exec();
  96. evlist__for_each_entry(evlist, evsel) {
  97. evsel__config(evsel, opts, callchain);
  98. if (evsel->tracking && use_comm_exec)
  99. evsel->core.attr.comm_exec = 1;
  100. }
  101. /* Configure leader sampling here now that the sample type is known */
  102. evlist__for_each_entry(evlist, evsel)
  103. evsel__config_leader_sampling(evsel, evlist);
  104. if (opts->full_auxtrace) {
  105. /*
  106. * Need to be able to synthesize and parse selected events with
  107. * arbitrary sample types, which requires always being able to
  108. * match the id.
  109. */
  110. use_sample_identifier = perf_can_sample_identifier();
  111. sample_id = true;
  112. } else if (evlist->core.nr_entries > 1) {
  113. struct evsel *first = evlist__first(evlist);
  114. evlist__for_each_entry(evlist, evsel) {
  115. if (evsel->core.attr.sample_type == first->core.attr.sample_type)
  116. continue;
  117. use_sample_identifier = perf_can_sample_identifier();
  118. break;
  119. }
  120. sample_id = true;
  121. }
  122. if (sample_id) {
  123. evlist__for_each_entry(evlist, evsel)
  124. evsel__set_sample_id(evsel, use_sample_identifier);
  125. }
  126. perf_evlist__set_id_pos(evlist);
  127. }
  128. static int get_max_rate(unsigned int *rate)
  129. {
  130. return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
  131. }
  132. static int record_opts__config_freq(struct record_opts *opts)
  133. {
  134. bool user_freq = opts->user_freq != UINT_MAX;
  135. unsigned int max_rate;
  136. if (opts->user_interval != ULLONG_MAX)
  137. opts->default_interval = opts->user_interval;
  138. if (user_freq)
  139. opts->freq = opts->user_freq;
  140. /*
  141. * User specified count overrides default frequency.
  142. */
  143. if (opts->default_interval)
  144. opts->freq = 0;
  145. else if (opts->freq) {
  146. opts->default_interval = opts->freq;
  147. } else {
  148. pr_err("frequency and count are zero, aborting\n");
  149. return -1;
  150. }
  151. if (get_max_rate(&max_rate))
  152. return 0;
  153. /*
  154. * User specified frequency is over current maximum.
  155. */
  156. if (user_freq && (max_rate < opts->freq)) {
  157. if (opts->strict_freq) {
  158. pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
  159. " Please use -F freq option with a lower value or consider\n"
  160. " tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
  161. max_rate);
  162. return -1;
  163. } else {
  164. pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
  165. " The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
  166. " The kernel will lower it when perf's interrupts take too long.\n"
  167. " Use --strict-freq to disable this throttling, refusing to record.\n",
  168. max_rate, opts->freq, max_rate);
  169. opts->freq = max_rate;
  170. }
  171. }
  172. /*
  173. * Default frequency is over current maximum.
  174. */
  175. if (max_rate < opts->freq) {
  176. pr_warning("Lowering default frequency rate to %u.\n"
  177. "Please consider tweaking "
  178. "/proc/sys/kernel/perf_event_max_sample_rate.\n",
  179. max_rate);
  180. opts->freq = max_rate;
  181. }
  182. return 0;
  183. }
  184. int record_opts__config(struct record_opts *opts)
  185. {
  186. return record_opts__config_freq(opts);
  187. }
  188. bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
  189. {
  190. struct evlist *temp_evlist;
  191. struct evsel *evsel;
  192. int err, fd, cpu;
  193. bool ret = false;
  194. pid_t pid = -1;
  195. temp_evlist = evlist__new();
  196. if (!temp_evlist)
  197. return false;
  198. err = parse_events(temp_evlist, str, NULL);
  199. if (err)
  200. goto out_delete;
  201. evsel = evlist__last(temp_evlist);
  202. if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
  203. struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
  204. cpu = cpus ? cpus->map[0] : 0;
  205. perf_cpu_map__put(cpus);
  206. } else {
  207. cpu = evlist->core.cpus->map[0];
  208. }
  209. while (1) {
  210. fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1,
  211. perf_event_open_cloexec_flag());
  212. if (fd < 0) {
  213. if (pid == -1 && errno == EACCES) {
  214. pid = 0;
  215. continue;
  216. }
  217. goto out_delete;
  218. }
  219. break;
  220. }
  221. close(fd);
  222. ret = true;
  223. out_delete:
  224. evlist__delete(temp_evlist);
  225. return ret;
  226. }
  227. int record__parse_freq(const struct option *opt, const char *str, int unset __maybe_unused)
  228. {
  229. unsigned int freq;
  230. struct record_opts *opts = opt->value;
  231. if (!str)
  232. return -EINVAL;
  233. if (strcasecmp(str, "max") == 0) {
  234. if (get_max_rate(&freq)) {
  235. pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
  236. return -1;
  237. }
  238. pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq);
  239. } else {
  240. freq = atoi(str);
  241. }
  242. opts->user_freq = freq;
  243. return 0;
  244. }