env.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "cpumap.h"
  3. #include "debug.h"
  4. #include "env.h"
  5. #include "util/header.h"
  6. #include <linux/ctype.h>
  7. #include <linux/zalloc.h>
  8. #include "bpf-event.h"
  9. #include "cgroup.h"
  10. #include <errno.h>
  11. #include <sys/utsname.h>
  12. #include <bpf/libbpf.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. struct perf_env perf_env;
  16. void perf_env__insert_bpf_prog_info(struct perf_env *env,
  17. struct bpf_prog_info_node *info_node)
  18. {
  19. __u32 prog_id = info_node->info_linear->info.id;
  20. struct bpf_prog_info_node *node;
  21. struct rb_node *parent = NULL;
  22. struct rb_node **p;
  23. down_write(&env->bpf_progs.lock);
  24. p = &env->bpf_progs.infos.rb_node;
  25. while (*p != NULL) {
  26. parent = *p;
  27. node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
  28. if (prog_id < node->info_linear->info.id) {
  29. p = &(*p)->rb_left;
  30. } else if (prog_id > node->info_linear->info.id) {
  31. p = &(*p)->rb_right;
  32. } else {
  33. pr_debug("duplicated bpf prog info %u\n", prog_id);
  34. goto out;
  35. }
  36. }
  37. rb_link_node(&info_node->rb_node, parent, p);
  38. rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
  39. env->bpf_progs.infos_cnt++;
  40. out:
  41. up_write(&env->bpf_progs.lock);
  42. }
  43. struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
  44. __u32 prog_id)
  45. {
  46. struct bpf_prog_info_node *node = NULL;
  47. struct rb_node *n;
  48. down_read(&env->bpf_progs.lock);
  49. n = env->bpf_progs.infos.rb_node;
  50. while (n) {
  51. node = rb_entry(n, struct bpf_prog_info_node, rb_node);
  52. if (prog_id < node->info_linear->info.id)
  53. n = n->rb_left;
  54. else if (prog_id > node->info_linear->info.id)
  55. n = n->rb_right;
  56. else
  57. goto out;
  58. }
  59. node = NULL;
  60. out:
  61. up_read(&env->bpf_progs.lock);
  62. return node;
  63. }
  64. bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
  65. {
  66. struct rb_node *parent = NULL;
  67. __u32 btf_id = btf_node->id;
  68. struct btf_node *node;
  69. struct rb_node **p;
  70. bool ret = true;
  71. down_write(&env->bpf_progs.lock);
  72. p = &env->bpf_progs.btfs.rb_node;
  73. while (*p != NULL) {
  74. parent = *p;
  75. node = rb_entry(parent, struct btf_node, rb_node);
  76. if (btf_id < node->id) {
  77. p = &(*p)->rb_left;
  78. } else if (btf_id > node->id) {
  79. p = &(*p)->rb_right;
  80. } else {
  81. pr_debug("duplicated btf %u\n", btf_id);
  82. ret = false;
  83. goto out;
  84. }
  85. }
  86. rb_link_node(&btf_node->rb_node, parent, p);
  87. rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
  88. env->bpf_progs.btfs_cnt++;
  89. out:
  90. up_write(&env->bpf_progs.lock);
  91. return ret;
  92. }
  93. struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
  94. {
  95. struct btf_node *node = NULL;
  96. struct rb_node *n;
  97. down_read(&env->bpf_progs.lock);
  98. n = env->bpf_progs.btfs.rb_node;
  99. while (n) {
  100. node = rb_entry(n, struct btf_node, rb_node);
  101. if (btf_id < node->id)
  102. n = n->rb_left;
  103. else if (btf_id > node->id)
  104. n = n->rb_right;
  105. else
  106. goto out;
  107. }
  108. node = NULL;
  109. out:
  110. up_read(&env->bpf_progs.lock);
  111. return node;
  112. }
  113. /* purge data in bpf_progs.infos tree */
  114. static void perf_env__purge_bpf(struct perf_env *env)
  115. {
  116. struct rb_root *root;
  117. struct rb_node *next;
  118. down_write(&env->bpf_progs.lock);
  119. root = &env->bpf_progs.infos;
  120. next = rb_first(root);
  121. while (next) {
  122. struct bpf_prog_info_node *node;
  123. node = rb_entry(next, struct bpf_prog_info_node, rb_node);
  124. next = rb_next(&node->rb_node);
  125. rb_erase(&node->rb_node, root);
  126. free(node->info_linear);
  127. free(node);
  128. }
  129. env->bpf_progs.infos_cnt = 0;
  130. root = &env->bpf_progs.btfs;
  131. next = rb_first(root);
  132. while (next) {
  133. struct btf_node *node;
  134. node = rb_entry(next, struct btf_node, rb_node);
  135. next = rb_next(&node->rb_node);
  136. rb_erase(&node->rb_node, root);
  137. free(node);
  138. }
  139. env->bpf_progs.btfs_cnt = 0;
  140. up_write(&env->bpf_progs.lock);
  141. }
  142. void perf_env__exit(struct perf_env *env)
  143. {
  144. int i;
  145. perf_env__purge_bpf(env);
  146. perf_env__purge_cgroups(env);
  147. zfree(&env->hostname);
  148. zfree(&env->os_release);
  149. zfree(&env->version);
  150. zfree(&env->arch);
  151. zfree(&env->cpu_desc);
  152. zfree(&env->cpuid);
  153. zfree(&env->cmdline);
  154. zfree(&env->cmdline_argv);
  155. zfree(&env->sibling_dies);
  156. zfree(&env->sibling_cores);
  157. zfree(&env->sibling_threads);
  158. zfree(&env->pmu_mappings);
  159. zfree(&env->cpu);
  160. zfree(&env->cpu_pmu_caps);
  161. zfree(&env->numa_map);
  162. for (i = 0; i < env->nr_numa_nodes; i++)
  163. perf_cpu_map__put(env->numa_nodes[i].map);
  164. zfree(&env->numa_nodes);
  165. for (i = 0; i < env->caches_cnt; i++)
  166. cpu_cache_level__free(&env->caches[i]);
  167. zfree(&env->caches);
  168. for (i = 0; i < env->nr_memory_nodes; i++)
  169. zfree(&env->memory_nodes[i].set);
  170. zfree(&env->memory_nodes);
  171. }
  172. void perf_env__init(struct perf_env *env)
  173. {
  174. env->bpf_progs.infos = RB_ROOT;
  175. env->bpf_progs.btfs = RB_ROOT;
  176. init_rwsem(&env->bpf_progs.lock);
  177. }
  178. int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
  179. {
  180. int i;
  181. /* do not include NULL termination */
  182. env->cmdline_argv = calloc(argc, sizeof(char *));
  183. if (env->cmdline_argv == NULL)
  184. goto out_enomem;
  185. /*
  186. * Must copy argv contents because it gets moved around during option
  187. * parsing:
  188. */
  189. for (i = 0; i < argc ; i++) {
  190. env->cmdline_argv[i] = argv[i];
  191. if (env->cmdline_argv[i] == NULL)
  192. goto out_free;
  193. }
  194. env->nr_cmdline = argc;
  195. return 0;
  196. out_free:
  197. zfree(&env->cmdline_argv);
  198. out_enomem:
  199. return -ENOMEM;
  200. }
  201. int perf_env__read_cpu_topology_map(struct perf_env *env)
  202. {
  203. int cpu, nr_cpus;
  204. if (env->cpu != NULL)
  205. return 0;
  206. if (env->nr_cpus_avail == 0)
  207. env->nr_cpus_avail = cpu__max_present_cpu();
  208. nr_cpus = env->nr_cpus_avail;
  209. if (nr_cpus == -1)
  210. return -EINVAL;
  211. env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
  212. if (env->cpu == NULL)
  213. return -ENOMEM;
  214. for (cpu = 0; cpu < nr_cpus; ++cpu) {
  215. env->cpu[cpu].core_id = cpu_map__get_core_id(cpu);
  216. env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
  217. env->cpu[cpu].die_id = cpu_map__get_die_id(cpu);
  218. }
  219. env->nr_cpus_avail = nr_cpus;
  220. return 0;
  221. }
  222. int perf_env__read_cpuid(struct perf_env *env)
  223. {
  224. char cpuid[128];
  225. int err = get_cpuid(cpuid, sizeof(cpuid));
  226. if (err)
  227. return err;
  228. free(env->cpuid);
  229. env->cpuid = strdup(cpuid);
  230. if (env->cpuid == NULL)
  231. return ENOMEM;
  232. return 0;
  233. }
  234. static int perf_env__read_arch(struct perf_env *env)
  235. {
  236. struct utsname uts;
  237. if (env->arch)
  238. return 0;
  239. if (!uname(&uts))
  240. env->arch = strdup(uts.machine);
  241. return env->arch ? 0 : -ENOMEM;
  242. }
  243. static int perf_env__read_nr_cpus_avail(struct perf_env *env)
  244. {
  245. if (env->nr_cpus_avail == 0)
  246. env->nr_cpus_avail = cpu__max_present_cpu();
  247. return env->nr_cpus_avail ? 0 : -ENOENT;
  248. }
  249. const char *perf_env__raw_arch(struct perf_env *env)
  250. {
  251. return env && !perf_env__read_arch(env) ? env->arch : "unknown";
  252. }
  253. int perf_env__nr_cpus_avail(struct perf_env *env)
  254. {
  255. return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
  256. }
  257. void cpu_cache_level__free(struct cpu_cache_level *cache)
  258. {
  259. zfree(&cache->type);
  260. zfree(&cache->map);
  261. zfree(&cache->size);
  262. }
  263. /*
  264. * Return architecture name in a normalized form.
  265. * The conversion logic comes from the Makefile.
  266. */
  267. static const char *normalize_arch(char *arch)
  268. {
  269. if (!strcmp(arch, "x86_64"))
  270. return "x86";
  271. if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
  272. return "x86";
  273. if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
  274. return "sparc";
  275. if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
  276. return "arm64";
  277. if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
  278. return "arm";
  279. if (!strncmp(arch, "s390", 4))
  280. return "s390";
  281. if (!strncmp(arch, "parisc", 6))
  282. return "parisc";
  283. if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
  284. return "powerpc";
  285. if (!strncmp(arch, "mips", 4))
  286. return "mips";
  287. if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
  288. return "sh";
  289. return arch;
  290. }
  291. const char *perf_env__arch(struct perf_env *env)
  292. {
  293. char *arch_name;
  294. if (!env || !env->arch) { /* Assume local operation */
  295. static struct utsname uts = { .machine[0] = '\0', };
  296. if (uts.machine[0] == '\0' && uname(&uts) < 0)
  297. return NULL;
  298. arch_name = uts.machine;
  299. } else
  300. arch_name = env->arch;
  301. return normalize_arch(arch_name);
  302. }
  303. int perf_env__numa_node(struct perf_env *env, int cpu)
  304. {
  305. if (!env->nr_numa_map) {
  306. struct numa_node *nn;
  307. int i, nr = 0;
  308. for (i = 0; i < env->nr_numa_nodes; i++) {
  309. nn = &env->numa_nodes[i];
  310. nr = max(nr, perf_cpu_map__max(nn->map));
  311. }
  312. nr++;
  313. /*
  314. * We initialize the numa_map array to prepare
  315. * it for missing cpus, which return node -1
  316. */
  317. env->numa_map = malloc(nr * sizeof(int));
  318. if (!env->numa_map)
  319. return -1;
  320. for (i = 0; i < nr; i++)
  321. env->numa_map[i] = -1;
  322. env->nr_numa_map = nr;
  323. for (i = 0; i < env->nr_numa_nodes; i++) {
  324. int tmp, j;
  325. nn = &env->numa_nodes[i];
  326. perf_cpu_map__for_each_cpu(j, tmp, nn->map)
  327. env->numa_map[j] = i;
  328. }
  329. }
  330. return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
  331. }