cppc_cpufreq.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * CPPC (Collaborative Processor Performance Control) driver for
  4. * interfacing with the CPUfreq layer and governors. See
  5. * cppc_acpi.c for CPPC specific methods.
  6. *
  7. * (C) Copyright 2014, 2015 Linaro Ltd.
  8. * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
  9. */
  10. #define pr_fmt(fmt) "CPPC Cpufreq:" fmt
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/delay.h>
  14. #include <linux/cpu.h>
  15. #include <linux/cpufreq.h>
  16. #include <linux/dmi.h>
  17. #include <linux/time.h>
  18. #include <linux/vmalloc.h>
  19. #include <asm/unaligned.h>
  20. #include <acpi/cppc_acpi.h>
  21. /* Minimum struct length needed for the DMI processor entry we want */
  22. #define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
  23. /* Offest in the DMI processor structure for the max frequency */
  24. #define DMI_PROCESSOR_MAX_SPEED 0x14
  25. /*
  26. * These structs contain information parsed from per CPU
  27. * ACPI _CPC structures.
  28. * e.g. For each CPU the highest, lowest supported
  29. * performance capabilities, desired performance level
  30. * requested etc.
  31. */
  32. static struct cppc_cpudata **all_cpu_data;
  33. static bool boost_supported;
  34. struct cppc_workaround_oem_info {
  35. char oem_id[ACPI_OEM_ID_SIZE + 1];
  36. char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
  37. u32 oem_revision;
  38. };
  39. static struct cppc_workaround_oem_info wa_info[] = {
  40. {
  41. .oem_id = "HISI ",
  42. .oem_table_id = "HIP07 ",
  43. .oem_revision = 0,
  44. }, {
  45. .oem_id = "HISI ",
  46. .oem_table_id = "HIP08 ",
  47. .oem_revision = 0,
  48. }
  49. };
  50. /* Callback function used to retrieve the max frequency from DMI */
  51. static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
  52. {
  53. const u8 *dmi_data = (const u8 *)dm;
  54. u16 *mhz = (u16 *)private;
  55. if (dm->type == DMI_ENTRY_PROCESSOR &&
  56. dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
  57. u16 val = (u16)get_unaligned((const u16 *)
  58. (dmi_data + DMI_PROCESSOR_MAX_SPEED));
  59. *mhz = val > *mhz ? val : *mhz;
  60. }
  61. }
  62. /* Look up the max frequency in DMI */
  63. static u64 cppc_get_dmi_max_khz(void)
  64. {
  65. u16 mhz = 0;
  66. dmi_walk(cppc_find_dmi_mhz, &mhz);
  67. /*
  68. * Real stupid fallback value, just in case there is no
  69. * actual value set.
  70. */
  71. mhz = mhz ? mhz : 1;
  72. return (1000 * mhz);
  73. }
  74. /*
  75. * If CPPC lowest_freq and nominal_freq registers are exposed then we can
  76. * use them to convert perf to freq and vice versa
  77. *
  78. * If the perf/freq point lies between Nominal and Lowest, we can treat
  79. * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
  80. * and extrapolate the rest
  81. * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
  82. */
  83. static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
  84. unsigned int perf)
  85. {
  86. static u64 max_khz;
  87. struct cppc_perf_caps *caps = &cpu->perf_caps;
  88. u64 mul, div;
  89. if (caps->lowest_freq && caps->nominal_freq) {
  90. if (perf >= caps->nominal_perf) {
  91. mul = caps->nominal_freq;
  92. div = caps->nominal_perf;
  93. } else {
  94. mul = caps->nominal_freq - caps->lowest_freq;
  95. div = caps->nominal_perf - caps->lowest_perf;
  96. }
  97. } else {
  98. if (!max_khz)
  99. max_khz = cppc_get_dmi_max_khz();
  100. mul = max_khz;
  101. div = caps->highest_perf;
  102. }
  103. return (u64)perf * mul / div;
  104. }
  105. static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu,
  106. unsigned int freq)
  107. {
  108. static u64 max_khz;
  109. struct cppc_perf_caps *caps = &cpu->perf_caps;
  110. u64 mul, div;
  111. if (caps->lowest_freq && caps->nominal_freq) {
  112. if (freq >= caps->nominal_freq) {
  113. mul = caps->nominal_perf;
  114. div = caps->nominal_freq;
  115. } else {
  116. mul = caps->lowest_perf;
  117. div = caps->lowest_freq;
  118. }
  119. } else {
  120. if (!max_khz)
  121. max_khz = cppc_get_dmi_max_khz();
  122. mul = caps->highest_perf;
  123. div = max_khz;
  124. }
  125. return (u64)freq * mul / div;
  126. }
  127. static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
  128. unsigned int target_freq,
  129. unsigned int relation)
  130. {
  131. struct cppc_cpudata *cpu;
  132. struct cpufreq_freqs freqs;
  133. u32 desired_perf;
  134. int ret = 0;
  135. cpu = all_cpu_data[policy->cpu];
  136. desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq);
  137. /* Return if it is exactly the same perf */
  138. if (desired_perf == cpu->perf_ctrls.desired_perf)
  139. return ret;
  140. cpu->perf_ctrls.desired_perf = desired_perf;
  141. freqs.old = policy->cur;
  142. freqs.new = target_freq;
  143. cpufreq_freq_transition_begin(policy, &freqs);
  144. ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls);
  145. cpufreq_freq_transition_end(policy, &freqs, ret != 0);
  146. if (ret)
  147. pr_debug("Failed to set target on CPU:%d. ret:%d\n",
  148. cpu->cpu, ret);
  149. return ret;
  150. }
  151. static int cppc_verify_policy(struct cpufreq_policy_data *policy)
  152. {
  153. cpufreq_verify_within_cpu_limits(policy);
  154. return 0;
  155. }
  156. static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
  157. {
  158. int cpu_num = policy->cpu;
  159. struct cppc_cpudata *cpu = all_cpu_data[cpu_num];
  160. int ret;
  161. cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf;
  162. ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
  163. if (ret)
  164. pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
  165. cpu->perf_caps.lowest_perf, cpu_num, ret);
  166. }
  167. /*
  168. * The PCC subspace describes the rate at which platform can accept commands
  169. * on the shared PCC channel (including READs which do not count towards freq
  170. * trasition requests), so ideally we need to use the PCC values as a fallback
  171. * if we don't have a platform specific transition_delay_us
  172. */
  173. #ifdef CONFIG_ARM64
  174. #include <asm/cputype.h>
  175. static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
  176. {
  177. unsigned long implementor = read_cpuid_implementor();
  178. unsigned long part_num = read_cpuid_part_number();
  179. unsigned int delay_us = 0;
  180. switch (implementor) {
  181. case ARM_CPU_IMP_QCOM:
  182. switch (part_num) {
  183. case QCOM_CPU_PART_FALKOR_V1:
  184. case QCOM_CPU_PART_FALKOR:
  185. delay_us = 10000;
  186. break;
  187. default:
  188. delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
  189. break;
  190. }
  191. break;
  192. default:
  193. delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
  194. break;
  195. }
  196. return delay_us;
  197. }
  198. #else
  199. static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
  200. {
  201. return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
  202. }
  203. #endif
  204. static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
  205. {
  206. struct cppc_cpudata *cpu;
  207. unsigned int cpu_num = policy->cpu;
  208. int ret = 0;
  209. cpu = all_cpu_data[policy->cpu];
  210. cpu->cpu = cpu_num;
  211. ret = cppc_get_perf_caps(policy->cpu, &cpu->perf_caps);
  212. if (ret) {
  213. pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
  214. cpu_num, ret);
  215. return ret;
  216. }
  217. /* Convert the lowest and nominal freq from MHz to KHz */
  218. cpu->perf_caps.lowest_freq *= 1000;
  219. cpu->perf_caps.nominal_freq *= 1000;
  220. /*
  221. * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
  222. * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
  223. */
  224. policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
  225. policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf);
  226. /*
  227. * Set cpuinfo.min_freq to Lowest to make the full range of performance
  228. * available if userspace wants to use any perf between lowest & lowest
  229. * nonlinear perf
  230. */
  231. policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
  232. policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf);
  233. policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
  234. policy->shared_type = cpu->shared_type;
  235. if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
  236. int i;
  237. cpumask_copy(policy->cpus, cpu->shared_cpu_map);
  238. for_each_cpu(i, policy->cpus) {
  239. if (unlikely(i == policy->cpu))
  240. continue;
  241. memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
  242. sizeof(cpu->perf_caps));
  243. }
  244. } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
  245. /* Support only SW_ANY for now. */
  246. pr_debug("Unsupported CPU co-ord type\n");
  247. return -EFAULT;
  248. }
  249. cpu->cur_policy = policy;
  250. /*
  251. * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
  252. * is supported.
  253. */
  254. if (cpu->perf_caps.highest_perf > cpu->perf_caps.nominal_perf)
  255. boost_supported = true;
  256. /* Set policy->cur to max now. The governors will adjust later. */
  257. policy->cur = cppc_cpufreq_perf_to_khz(cpu,
  258. cpu->perf_caps.highest_perf);
  259. cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
  260. ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
  261. if (ret)
  262. pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
  263. cpu->perf_caps.highest_perf, cpu_num, ret);
  264. return ret;
  265. }
  266. static inline u64 get_delta(u64 t1, u64 t0)
  267. {
  268. if (t1 > t0 || t0 > ~(u32)0)
  269. return t1 - t0;
  270. return (u32)t1 - (u32)t0;
  271. }
  272. static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu,
  273. struct cppc_perf_fb_ctrs fb_ctrs_t0,
  274. struct cppc_perf_fb_ctrs fb_ctrs_t1)
  275. {
  276. u64 delta_reference, delta_delivered;
  277. u64 reference_perf, delivered_perf;
  278. reference_perf = fb_ctrs_t0.reference_perf;
  279. delta_reference = get_delta(fb_ctrs_t1.reference,
  280. fb_ctrs_t0.reference);
  281. delta_delivered = get_delta(fb_ctrs_t1.delivered,
  282. fb_ctrs_t0.delivered);
  283. /* Check to avoid divide-by zero */
  284. if (delta_reference || delta_delivered)
  285. delivered_perf = (reference_perf * delta_delivered) /
  286. delta_reference;
  287. else
  288. delivered_perf = cpu->perf_ctrls.desired_perf;
  289. return cppc_cpufreq_perf_to_khz(cpu, delivered_perf);
  290. }
  291. static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
  292. {
  293. struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
  294. struct cppc_cpudata *cpu = all_cpu_data[cpunum];
  295. int ret;
  296. ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
  297. if (ret)
  298. return ret;
  299. udelay(2); /* 2usec delay between sampling */
  300. ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t1);
  301. if (ret)
  302. return ret;
  303. return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
  304. }
  305. static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
  306. {
  307. struct cppc_cpudata *cpudata;
  308. int ret;
  309. if (!boost_supported) {
  310. pr_err("BOOST not supported by CPU or firmware\n");
  311. return -EINVAL;
  312. }
  313. cpudata = all_cpu_data[policy->cpu];
  314. if (state)
  315. policy->max = cppc_cpufreq_perf_to_khz(cpudata,
  316. cpudata->perf_caps.highest_perf);
  317. else
  318. policy->max = cppc_cpufreq_perf_to_khz(cpudata,
  319. cpudata->perf_caps.nominal_perf);
  320. policy->cpuinfo.max_freq = policy->max;
  321. ret = freq_qos_update_request(policy->max_freq_req, policy->max);
  322. if (ret < 0)
  323. return ret;
  324. return 0;
  325. }
  326. static struct cpufreq_driver cppc_cpufreq_driver = {
  327. .flags = CPUFREQ_CONST_LOOPS,
  328. .verify = cppc_verify_policy,
  329. .target = cppc_cpufreq_set_target,
  330. .get = cppc_cpufreq_get_rate,
  331. .init = cppc_cpufreq_cpu_init,
  332. .stop_cpu = cppc_cpufreq_stop_cpu,
  333. .set_boost = cppc_cpufreq_set_boost,
  334. .name = "cppc_cpufreq",
  335. };
  336. /*
  337. * HISI platform does not support delivered performance counter and
  338. * reference performance counter. It can calculate the performance using the
  339. * platform specific mechanism. We reuse the desired performance register to
  340. * store the real performance calculated by the platform.
  341. */
  342. static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum)
  343. {
  344. struct cppc_cpudata *cpudata = all_cpu_data[cpunum];
  345. u64 desired_perf;
  346. int ret;
  347. ret = cppc_get_desired_perf(cpunum, &desired_perf);
  348. if (ret < 0)
  349. return -EIO;
  350. return cppc_cpufreq_perf_to_khz(cpudata, desired_perf);
  351. }
  352. static void cppc_check_hisi_workaround(void)
  353. {
  354. struct acpi_table_header *tbl;
  355. acpi_status status = AE_OK;
  356. int i;
  357. status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
  358. if (ACPI_FAILURE(status) || !tbl)
  359. return;
  360. for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
  361. if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
  362. !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
  363. wa_info[i].oem_revision == tbl->oem_revision) {
  364. /* Overwrite the get() callback */
  365. cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
  366. break;
  367. }
  368. }
  369. acpi_put_table(tbl);
  370. }
  371. static int __init cppc_cpufreq_init(void)
  372. {
  373. int i, ret = 0;
  374. struct cppc_cpudata *cpu;
  375. if (acpi_disabled)
  376. return -ENODEV;
  377. all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *),
  378. GFP_KERNEL);
  379. if (!all_cpu_data)
  380. return -ENOMEM;
  381. for_each_possible_cpu(i) {
  382. all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
  383. if (!all_cpu_data[i])
  384. goto out;
  385. cpu = all_cpu_data[i];
  386. if (!zalloc_cpumask_var(&cpu->shared_cpu_map, GFP_KERNEL))
  387. goto out;
  388. }
  389. ret = acpi_get_psd_map(all_cpu_data);
  390. if (ret) {
  391. pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
  392. goto out;
  393. }
  394. cppc_check_hisi_workaround();
  395. ret = cpufreq_register_driver(&cppc_cpufreq_driver);
  396. if (ret)
  397. goto out;
  398. return ret;
  399. out:
  400. for_each_possible_cpu(i) {
  401. cpu = all_cpu_data[i];
  402. if (!cpu)
  403. break;
  404. free_cpumask_var(cpu->shared_cpu_map);
  405. kfree(cpu);
  406. }
  407. kfree(all_cpu_data);
  408. return -ENODEV;
  409. }
  410. static void __exit cppc_cpufreq_exit(void)
  411. {
  412. struct cppc_cpudata *cpu;
  413. int i;
  414. cpufreq_unregister_driver(&cppc_cpufreq_driver);
  415. for_each_possible_cpu(i) {
  416. cpu = all_cpu_data[i];
  417. free_cpumask_var(cpu->shared_cpu_map);
  418. kfree(cpu);
  419. }
  420. kfree(all_cpu_data);
  421. }
  422. module_exit(cppc_cpufreq_exit);
  423. MODULE_AUTHOR("Ashwin Chaugule");
  424. MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
  425. MODULE_LICENSE("GPL");
  426. late_initcall(cppc_cpufreq_init);
  427. static const struct acpi_device_id cppc_acpi_ids[] __used = {
  428. {ACPI_PROCESSOR_DEVICE_HID, },
  429. {}
  430. };
  431. MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);