cpumap.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <perf/cpumap.h>
  3. #include <stdlib.h>
  4. #include <linux/refcount.h>
  5. #include <internal/cpumap.h>
  6. #include <asm/bug.h>
  7. #include <stdio.h>
  8. #include <string.h>
  9. #include <unistd.h>
  10. #include <ctype.h>
  11. #include <limits.h>
  12. struct perf_cpu_map *perf_cpu_map__dummy_new(void)
  13. {
  14. struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
  15. if (cpus != NULL) {
  16. cpus->nr = 1;
  17. cpus->map[0] = -1;
  18. refcount_set(&cpus->refcnt, 1);
  19. }
  20. return cpus;
  21. }
  22. static void cpu_map__delete(struct perf_cpu_map *map)
  23. {
  24. if (map) {
  25. WARN_ONCE(refcount_read(&map->refcnt) != 0,
  26. "cpu_map refcnt unbalanced\n");
  27. free(map);
  28. }
  29. }
  30. struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
  31. {
  32. if (map)
  33. refcount_inc(&map->refcnt);
  34. return map;
  35. }
  36. void perf_cpu_map__put(struct perf_cpu_map *map)
  37. {
  38. if (map && refcount_dec_and_test(&map->refcnt))
  39. cpu_map__delete(map);
  40. }
  41. static struct perf_cpu_map *cpu_map__default_new(void)
  42. {
  43. struct perf_cpu_map *cpus;
  44. int nr_cpus;
  45. nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  46. if (nr_cpus < 0)
  47. return NULL;
  48. cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
  49. if (cpus != NULL) {
  50. int i;
  51. for (i = 0; i < nr_cpus; ++i)
  52. cpus->map[i] = i;
  53. cpus->nr = nr_cpus;
  54. refcount_set(&cpus->refcnt, 1);
  55. }
  56. return cpus;
  57. }
  58. static int cmp_int(const void *a, const void *b)
  59. {
  60. return *(const int *)a - *(const int*)b;
  61. }
  62. static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
  63. {
  64. size_t payload_size = nr_cpus * sizeof(int);
  65. struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
  66. int i, j;
  67. if (cpus != NULL) {
  68. memcpy(cpus->map, tmp_cpus, payload_size);
  69. qsort(cpus->map, nr_cpus, sizeof(int), cmp_int);
  70. /* Remove dups */
  71. j = 0;
  72. for (i = 0; i < nr_cpus; i++) {
  73. if (i == 0 || cpus->map[i] != cpus->map[i - 1])
  74. cpus->map[j++] = cpus->map[i];
  75. }
  76. cpus->nr = j;
  77. assert(j <= nr_cpus);
  78. refcount_set(&cpus->refcnt, 1);
  79. }
  80. return cpus;
  81. }
  82. struct perf_cpu_map *perf_cpu_map__read(FILE *file)
  83. {
  84. struct perf_cpu_map *cpus = NULL;
  85. int nr_cpus = 0;
  86. int *tmp_cpus = NULL, *tmp;
  87. int max_entries = 0;
  88. int n, cpu, prev;
  89. char sep;
  90. sep = 0;
  91. prev = -1;
  92. for (;;) {
  93. n = fscanf(file, "%u%c", &cpu, &sep);
  94. if (n <= 0)
  95. break;
  96. if (prev >= 0) {
  97. int new_max = nr_cpus + cpu - prev - 1;
  98. WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
  99. "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
  100. if (new_max >= max_entries) {
  101. max_entries = new_max + MAX_NR_CPUS / 2;
  102. tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  103. if (tmp == NULL)
  104. goto out_free_tmp;
  105. tmp_cpus = tmp;
  106. }
  107. while (++prev < cpu)
  108. tmp_cpus[nr_cpus++] = prev;
  109. }
  110. if (nr_cpus == max_entries) {
  111. max_entries += MAX_NR_CPUS;
  112. tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  113. if (tmp == NULL)
  114. goto out_free_tmp;
  115. tmp_cpus = tmp;
  116. }
  117. tmp_cpus[nr_cpus++] = cpu;
  118. if (n == 2 && sep == '-')
  119. prev = cpu;
  120. else
  121. prev = -1;
  122. if (n == 1 || sep == '\n')
  123. break;
  124. }
  125. if (nr_cpus > 0)
  126. cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
  127. else
  128. cpus = cpu_map__default_new();
  129. out_free_tmp:
  130. free(tmp_cpus);
  131. return cpus;
  132. }
  133. static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
  134. {
  135. struct perf_cpu_map *cpus = NULL;
  136. FILE *onlnf;
  137. onlnf = fopen("/sys/devices/system/cpu/online", "r");
  138. if (!onlnf)
  139. return cpu_map__default_new();
  140. cpus = perf_cpu_map__read(onlnf);
  141. fclose(onlnf);
  142. return cpus;
  143. }
  144. struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
  145. {
  146. struct perf_cpu_map *cpus = NULL;
  147. unsigned long start_cpu, end_cpu = 0;
  148. char *p = NULL;
  149. int i, nr_cpus = 0;
  150. int *tmp_cpus = NULL, *tmp;
  151. int max_entries = 0;
  152. if (!cpu_list)
  153. return cpu_map__read_all_cpu_map();
  154. /*
  155. * must handle the case of empty cpumap to cover
  156. * TOPOLOGY header for NUMA nodes with no CPU
  157. * ( e.g., because of CPU hotplug)
  158. */
  159. if (!isdigit(*cpu_list) && *cpu_list != '\0')
  160. goto out;
  161. while (isdigit(*cpu_list)) {
  162. p = NULL;
  163. start_cpu = strtoul(cpu_list, &p, 0);
  164. if (start_cpu >= INT_MAX
  165. || (*p != '\0' && *p != ',' && *p != '-'))
  166. goto invalid;
  167. if (*p == '-') {
  168. cpu_list = ++p;
  169. p = NULL;
  170. end_cpu = strtoul(cpu_list, &p, 0);
  171. if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
  172. goto invalid;
  173. if (end_cpu < start_cpu)
  174. goto invalid;
  175. } else {
  176. end_cpu = start_cpu;
  177. }
  178. WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
  179. "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
  180. for (; start_cpu <= end_cpu; start_cpu++) {
  181. /* check for duplicates */
  182. for (i = 0; i < nr_cpus; i++)
  183. if (tmp_cpus[i] == (int)start_cpu)
  184. goto invalid;
  185. if (nr_cpus == max_entries) {
  186. max_entries += MAX_NR_CPUS;
  187. tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  188. if (tmp == NULL)
  189. goto invalid;
  190. tmp_cpus = tmp;
  191. }
  192. tmp_cpus[nr_cpus++] = (int)start_cpu;
  193. }
  194. if (*p)
  195. ++p;
  196. cpu_list = p;
  197. }
  198. if (nr_cpus > 0)
  199. cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
  200. else if (*cpu_list != '\0')
  201. cpus = cpu_map__default_new();
  202. else
  203. cpus = perf_cpu_map__dummy_new();
  204. invalid:
  205. free(tmp_cpus);
  206. out:
  207. return cpus;
  208. }
  209. int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
  210. {
  211. if (cpus && idx < cpus->nr)
  212. return cpus->map[idx];
  213. return -1;
  214. }
  215. int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
  216. {
  217. return cpus ? cpus->nr : 1;
  218. }
  219. bool perf_cpu_map__empty(const struct perf_cpu_map *map)
  220. {
  221. return map ? map->map[0] == -1 : true;
  222. }
  223. int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
  224. {
  225. int i;
  226. for (i = 0; i < cpus->nr; ++i) {
  227. if (cpus->map[i] == cpu)
  228. return i;
  229. }
  230. return -1;
  231. }
  232. int perf_cpu_map__max(struct perf_cpu_map *map)
  233. {
  234. int i, max = -1;
  235. for (i = 0; i < map->nr; i++) {
  236. if (map->map[i] > max)
  237. max = map->map[i];
  238. }
  239. return max;
  240. }
  241. /*
  242. * Merge two cpumaps
  243. *
  244. * orig either gets freed and replaced with a new map, or reused
  245. * with no reference count change (similar to "realloc")
  246. * other has its reference count increased.
  247. */
  248. struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
  249. struct perf_cpu_map *other)
  250. {
  251. int *tmp_cpus;
  252. int tmp_len;
  253. int i, j, k;
  254. struct perf_cpu_map *merged;
  255. if (!orig && !other)
  256. return NULL;
  257. if (!orig) {
  258. perf_cpu_map__get(other);
  259. return other;
  260. }
  261. if (!other)
  262. return orig;
  263. if (orig->nr == other->nr &&
  264. !memcmp(orig->map, other->map, orig->nr * sizeof(int)))
  265. return orig;
  266. tmp_len = orig->nr + other->nr;
  267. tmp_cpus = malloc(tmp_len * sizeof(int));
  268. if (!tmp_cpus)
  269. return NULL;
  270. /* Standard merge algorithm from wikipedia */
  271. i = j = k = 0;
  272. while (i < orig->nr && j < other->nr) {
  273. if (orig->map[i] <= other->map[j]) {
  274. if (orig->map[i] == other->map[j])
  275. j++;
  276. tmp_cpus[k++] = orig->map[i++];
  277. } else
  278. tmp_cpus[k++] = other->map[j++];
  279. }
  280. while (i < orig->nr)
  281. tmp_cpus[k++] = orig->map[i++];
  282. while (j < other->nr)
  283. tmp_cpus[k++] = other->map[j++];
  284. assert(k <= tmp_len);
  285. merged = cpu_map__trim_new(k, tmp_cpus);
  286. free(tmp_cpus);
  287. perf_cpu_map__put(orig);
  288. return merged;
  289. }