arch_topology.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Arch specific cpu topology information
  4. *
  5. * Copyright (C) 2016, ARM Ltd.
  6. * Written by: Juri Lelli, ARM Ltd.
  7. */
  8. #include <linux/acpi.h>
  9. #include <linux/cpu.h>
  10. #include <linux/cpufreq.h>
  11. #include <linux/device.h>
  12. #include <linux/of.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include <linux/sched/topology.h>
  16. #include <linux/cpuset.h>
  17. #include <linux/cpumask.h>
  18. #include <linux/init.h>
  19. #include <linux/percpu.h>
  20. #include <linux/sched.h>
  21. #include <linux/smp.h>
  22. #include <trace/hooks/topology.h>
  23. bool topology_scale_freq_invariant(void)
  24. {
  25. return cpufreq_supports_freq_invariance() ||
  26. arch_freq_counters_available(cpu_online_mask);
  27. }
  28. __weak bool arch_freq_counters_available(const struct cpumask *cpus)
  29. {
  30. return false;
  31. }
  32. DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
  33. EXPORT_PER_CPU_SYMBOL_GPL(freq_scale);
  34. void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
  35. unsigned long max_freq)
  36. {
  37. unsigned long scale;
  38. int i;
  39. if (WARN_ON_ONCE(!cur_freq || !max_freq))
  40. return;
  41. /*
  42. * If the use of counters for FIE is enabled, just return as we don't
  43. * want to update the scale factor with information from CPUFREQ.
  44. * Instead the scale factor will be updated from arch_scale_freq_tick.
  45. */
  46. if (arch_freq_counters_available(cpus))
  47. return;
  48. scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
  49. trace_android_vh_arch_set_freq_scale(cpus, cur_freq, max_freq, &scale);
  50. for_each_cpu(i, cpus)
  51. per_cpu(freq_scale, i) = scale;
  52. }
  53. DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
  54. EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
  55. void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
  56. {
  57. per_cpu(cpu_scale, cpu) = capacity;
  58. }
  59. DEFINE_PER_CPU(unsigned long, thermal_pressure);
  60. EXPORT_PER_CPU_SYMBOL_GPL(thermal_pressure);
  61. void topology_set_thermal_pressure(const struct cpumask *cpus,
  62. unsigned long th_pressure)
  63. {
  64. int cpu;
  65. for_each_cpu(cpu, cpus)
  66. WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
  67. }
  68. EXPORT_SYMBOL_GPL(topology_set_thermal_pressure);
  69. static ssize_t cpu_capacity_show(struct device *dev,
  70. struct device_attribute *attr,
  71. char *buf)
  72. {
  73. struct cpu *cpu = container_of(dev, struct cpu, dev);
  74. return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
  75. }
  76. static void update_topology_flags_workfn(struct work_struct *work);
  77. static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
  78. static DEVICE_ATTR_RO(cpu_capacity);
  79. static int register_cpu_capacity_sysctl(void)
  80. {
  81. int i;
  82. struct device *cpu;
  83. for_each_possible_cpu(i) {
  84. cpu = get_cpu_device(i);
  85. if (!cpu) {
  86. pr_err("%s: too early to get CPU%d device!\n",
  87. __func__, i);
  88. continue;
  89. }
  90. device_create_file(cpu, &dev_attr_cpu_capacity);
  91. }
  92. return 0;
  93. }
  94. subsys_initcall(register_cpu_capacity_sysctl);
  95. static int update_topology;
  96. bool topology_update_done;
  97. EXPORT_SYMBOL_GPL(topology_update_done);
  98. int topology_update_cpu_topology(void)
  99. {
  100. return update_topology;
  101. }
  102. /*
  103. * Updating the sched_domains can't be done directly from cpufreq callbacks
  104. * due to locking, so queue the work for later.
  105. */
  106. static void update_topology_flags_workfn(struct work_struct *work)
  107. {
  108. update_topology = 1;
  109. rebuild_sched_domains();
  110. topology_update_done = true;
  111. trace_android_vh_update_topology_flags_workfn(NULL);
  112. pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
  113. update_topology = 0;
  114. }
  115. static DEFINE_PER_CPU(u32, freq_factor) = 1;
  116. static u32 *raw_capacity;
  117. static int free_raw_capacity(void)
  118. {
  119. kfree(raw_capacity);
  120. raw_capacity = NULL;
  121. return 0;
  122. }
  123. void topology_normalize_cpu_scale(void)
  124. {
  125. u64 capacity;
  126. u64 capacity_scale;
  127. int cpu;
  128. if (!raw_capacity)
  129. return;
  130. capacity_scale = 1;
  131. for_each_possible_cpu(cpu) {
  132. capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
  133. capacity_scale = max(capacity, capacity_scale);
  134. }
  135. pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
  136. for_each_possible_cpu(cpu) {
  137. capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
  138. capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
  139. capacity_scale);
  140. topology_set_cpu_scale(cpu, capacity);
  141. pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
  142. cpu, topology_get_cpu_scale(cpu));
  143. }
  144. }
  145. bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
  146. {
  147. struct clk *cpu_clk;
  148. static bool cap_parsing_failed;
  149. int ret;
  150. u32 cpu_capacity;
  151. if (cap_parsing_failed)
  152. return false;
  153. ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
  154. &cpu_capacity);
  155. if (!ret) {
  156. if (!raw_capacity) {
  157. raw_capacity = kcalloc(num_possible_cpus(),
  158. sizeof(*raw_capacity),
  159. GFP_KERNEL);
  160. if (!raw_capacity) {
  161. cap_parsing_failed = true;
  162. return false;
  163. }
  164. }
  165. raw_capacity[cpu] = cpu_capacity;
  166. pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
  167. cpu_node, raw_capacity[cpu]);
  168. /*
  169. * Update freq_factor for calculating early boot cpu capacities.
  170. * For non-clk CPU DVFS mechanism, there's no way to get the
  171. * frequency value now, assuming they are running at the same
  172. * frequency (by keeping the initial freq_factor value).
  173. */
  174. cpu_clk = of_clk_get(cpu_node, 0);
  175. if (!PTR_ERR_OR_ZERO(cpu_clk)) {
  176. per_cpu(freq_factor, cpu) =
  177. clk_get_rate(cpu_clk) / 1000;
  178. clk_put(cpu_clk);
  179. }
  180. } else {
  181. if (raw_capacity) {
  182. pr_err("cpu_capacity: missing %pOF raw capacity\n",
  183. cpu_node);
  184. pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
  185. }
  186. cap_parsing_failed = true;
  187. free_raw_capacity();
  188. }
  189. return !ret;
  190. }
  191. #ifdef CONFIG_CPU_FREQ
  192. static cpumask_var_t cpus_to_visit;
  193. static void parsing_done_workfn(struct work_struct *work);
  194. static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
  195. static int
  196. init_cpu_capacity_callback(struct notifier_block *nb,
  197. unsigned long val,
  198. void *data)
  199. {
  200. struct cpufreq_policy *policy = data;
  201. int cpu;
  202. if (!raw_capacity)
  203. return 0;
  204. if (val != CPUFREQ_CREATE_POLICY)
  205. return 0;
  206. pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
  207. cpumask_pr_args(policy->related_cpus),
  208. cpumask_pr_args(cpus_to_visit));
  209. cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
  210. for_each_cpu(cpu, policy->related_cpus)
  211. per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
  212. if (cpumask_empty(cpus_to_visit)) {
  213. topology_normalize_cpu_scale();
  214. schedule_work(&update_topology_flags_work);
  215. free_raw_capacity();
  216. pr_debug("cpu_capacity: parsing done\n");
  217. schedule_work(&parsing_done_work);
  218. }
  219. return 0;
  220. }
  221. static struct notifier_block init_cpu_capacity_notifier = {
  222. .notifier_call = init_cpu_capacity_callback,
  223. };
  224. static int __init register_cpufreq_notifier(void)
  225. {
  226. int ret;
  227. /*
  228. * on ACPI-based systems we need to use the default cpu capacity
  229. * until we have the necessary code to parse the cpu capacity, so
  230. * skip registering cpufreq notifier.
  231. */
  232. if (!acpi_disabled || !raw_capacity)
  233. return -EINVAL;
  234. if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
  235. return -ENOMEM;
  236. cpumask_copy(cpus_to_visit, cpu_possible_mask);
  237. ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
  238. CPUFREQ_POLICY_NOTIFIER);
  239. if (ret)
  240. free_cpumask_var(cpus_to_visit);
  241. return ret;
  242. }
  243. core_initcall(register_cpufreq_notifier);
  244. static void parsing_done_workfn(struct work_struct *work)
  245. {
  246. cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
  247. CPUFREQ_POLICY_NOTIFIER);
  248. free_cpumask_var(cpus_to_visit);
  249. }
  250. #else
  251. core_initcall(free_raw_capacity);
  252. #endif
  253. #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
  254. /*
  255. * This function returns the logic cpu number of the node.
  256. * There are basically three kinds of return values:
  257. * (1) logic cpu number which is > 0.
  258. * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
  259. * there is no possible logical CPU in the kernel to match. This happens
  260. * when CONFIG_NR_CPUS is configure to be smaller than the number of
  261. * CPU nodes in DT. We need to just ignore this case.
  262. * (3) -1 if the node does not exist in the device tree
  263. */
  264. static int __init get_cpu_for_node(struct device_node *node)
  265. {
  266. struct device_node *cpu_node;
  267. int cpu;
  268. cpu_node = of_parse_phandle(node, "cpu", 0);
  269. if (!cpu_node)
  270. return -1;
  271. cpu = of_cpu_node_to_id(cpu_node);
  272. if (cpu >= 0)
  273. topology_parse_cpu_capacity(cpu_node, cpu);
  274. else
  275. pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
  276. cpu_node, cpumask_pr_args(cpu_possible_mask));
  277. of_node_put(cpu_node);
  278. return cpu;
  279. }
  280. static int __init parse_core(struct device_node *core, int package_id,
  281. int core_id)
  282. {
  283. char name[20];
  284. bool leaf = true;
  285. int i = 0;
  286. int cpu;
  287. struct device_node *t;
  288. do {
  289. snprintf(name, sizeof(name), "thread%d", i);
  290. t = of_get_child_by_name(core, name);
  291. if (t) {
  292. leaf = false;
  293. cpu = get_cpu_for_node(t);
  294. if (cpu >= 0) {
  295. cpu_topology[cpu].package_id = package_id;
  296. cpu_topology[cpu].core_id = core_id;
  297. cpu_topology[cpu].thread_id = i;
  298. } else if (cpu != -ENODEV) {
  299. pr_err("%pOF: Can't get CPU for thread\n", t);
  300. of_node_put(t);
  301. return -EINVAL;
  302. }
  303. of_node_put(t);
  304. }
  305. i++;
  306. } while (t);
  307. cpu = get_cpu_for_node(core);
  308. if (cpu >= 0) {
  309. if (!leaf) {
  310. pr_err("%pOF: Core has both threads and CPU\n",
  311. core);
  312. return -EINVAL;
  313. }
  314. cpu_topology[cpu].package_id = package_id;
  315. cpu_topology[cpu].core_id = core_id;
  316. } else if (leaf && cpu != -ENODEV) {
  317. pr_err("%pOF: Can't get CPU for leaf core\n", core);
  318. return -EINVAL;
  319. }
  320. return 0;
  321. }
  322. static int __init parse_cluster(struct device_node *cluster, int depth)
  323. {
  324. char name[20];
  325. bool leaf = true;
  326. bool has_cores = false;
  327. struct device_node *c;
  328. static int package_id __initdata;
  329. int core_id = 0;
  330. int i, ret;
  331. /*
  332. * First check for child clusters; we currently ignore any
  333. * information about the nesting of clusters and present the
  334. * scheduler with a flat list of them.
  335. */
  336. i = 0;
  337. do {
  338. snprintf(name, sizeof(name), "cluster%d", i);
  339. c = of_get_child_by_name(cluster, name);
  340. if (c) {
  341. leaf = false;
  342. ret = parse_cluster(c, depth + 1);
  343. of_node_put(c);
  344. if (ret != 0)
  345. return ret;
  346. }
  347. i++;
  348. } while (c);
  349. /* Now check for cores */
  350. i = 0;
  351. do {
  352. snprintf(name, sizeof(name), "core%d", i);
  353. c = of_get_child_by_name(cluster, name);
  354. if (c) {
  355. has_cores = true;
  356. if (depth == 0) {
  357. pr_err("%pOF: cpu-map children should be clusters\n",
  358. c);
  359. of_node_put(c);
  360. return -EINVAL;
  361. }
  362. if (leaf) {
  363. ret = parse_core(c, package_id, core_id++);
  364. } else {
  365. pr_err("%pOF: Non-leaf cluster with core %s\n",
  366. cluster, name);
  367. ret = -EINVAL;
  368. }
  369. of_node_put(c);
  370. if (ret != 0)
  371. return ret;
  372. }
  373. i++;
  374. } while (c);
  375. if (leaf && !has_cores)
  376. pr_warn("%pOF: empty cluster\n", cluster);
  377. if (leaf)
  378. package_id++;
  379. return 0;
  380. }
  381. static int __init parse_dt_topology(void)
  382. {
  383. struct device_node *cn, *map;
  384. int ret = 0;
  385. int cpu;
  386. cn = of_find_node_by_path("/cpus");
  387. if (!cn) {
  388. pr_err("No CPU information found in DT\n");
  389. return 0;
  390. }
  391. /*
  392. * When topology is provided cpu-map is essentially a root
  393. * cluster with restricted subnodes.
  394. */
  395. map = of_get_child_by_name(cn, "cpu-map");
  396. if (!map)
  397. goto out;
  398. ret = parse_cluster(map, 0);
  399. if (ret != 0)
  400. goto out_map;
  401. topology_normalize_cpu_scale();
  402. /*
  403. * Check that all cores are in the topology; the SMP code will
  404. * only mark cores described in the DT as possible.
  405. */
  406. for_each_possible_cpu(cpu)
  407. if (cpu_topology[cpu].package_id == -1)
  408. ret = -EINVAL;
  409. out_map:
  410. of_node_put(map);
  411. out:
  412. of_node_put(cn);
  413. return ret;
  414. }
  415. #endif
  416. /*
  417. * cpu topology table
  418. */
  419. struct cpu_topology cpu_topology[NR_CPUS];
  420. EXPORT_SYMBOL_GPL(cpu_topology);
  421. const struct cpumask *cpu_coregroup_mask(int cpu)
  422. {
  423. const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
  424. /* Find the smaller of NUMA, core or LLC siblings */
  425. if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
  426. /* not numa in package, lets use the package siblings */
  427. core_mask = &cpu_topology[cpu].core_sibling;
  428. }
  429. if (cpu_topology[cpu].llc_id != -1) {
  430. if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
  431. core_mask = &cpu_topology[cpu].llc_sibling;
  432. }
  433. return core_mask;
  434. }
  435. void update_siblings_masks(unsigned int cpuid)
  436. {
  437. struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
  438. int cpu;
  439. /* update core and thread sibling masks */
  440. for_each_online_cpu(cpu) {
  441. cpu_topo = &cpu_topology[cpu];
  442. if (cpuid_topo->llc_id == cpu_topo->llc_id) {
  443. cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
  444. cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
  445. }
  446. if (cpuid_topo->package_id != cpu_topo->package_id)
  447. continue;
  448. cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
  449. cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
  450. if (cpuid_topo->core_id != cpu_topo->core_id)
  451. continue;
  452. cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
  453. cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
  454. }
  455. }
  456. static void clear_cpu_topology(int cpu)
  457. {
  458. struct cpu_topology *cpu_topo = &cpu_topology[cpu];
  459. cpumask_clear(&cpu_topo->llc_sibling);
  460. cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
  461. cpumask_clear(&cpu_topo->core_sibling);
  462. cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
  463. cpumask_clear(&cpu_topo->thread_sibling);
  464. cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
  465. }
  466. void __init reset_cpu_topology(void)
  467. {
  468. unsigned int cpu;
  469. for_each_possible_cpu(cpu) {
  470. struct cpu_topology *cpu_topo = &cpu_topology[cpu];
  471. cpu_topo->thread_id = -1;
  472. cpu_topo->core_id = -1;
  473. cpu_topo->package_id = -1;
  474. cpu_topo->llc_id = -1;
  475. clear_cpu_topology(cpu);
  476. }
  477. }
  478. void remove_cpu_topology(unsigned int cpu)
  479. {
  480. int sibling;
  481. for_each_cpu(sibling, topology_core_cpumask(cpu))
  482. cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
  483. for_each_cpu(sibling, topology_sibling_cpumask(cpu))
  484. cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
  485. for_each_cpu(sibling, topology_llc_cpumask(cpu))
  486. cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
  487. clear_cpu_topology(cpu);
  488. }
  489. __weak int __init parse_acpi_topology(void)
  490. {
  491. return 0;
  492. }
  493. #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
  494. void __init init_cpu_topology(void)
  495. {
  496. reset_cpu_topology();
  497. /*
  498. * Discard anything that was parsed if we hit an error so we
  499. * don't use partial information.
  500. */
  501. if (parse_acpi_topology())
  502. reset_cpu_topology();
  503. else if (of_have_populated_dt() && parse_dt_topology())
  504. reset_cpu_topology();
  505. }
  506. #endif