acpi-cpufreq.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * acpi-cpufreq.c - ACPI Processor P-States Driver
  4. *
  5. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  6. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  7. * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
  8. * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/smp.h>
  15. #include <linux/sched.h>
  16. #include <linux/cpufreq.h>
  17. #include <linux/compiler.h>
  18. #include <linux/dmi.h>
  19. #include <linux/slab.h>
  20. #include <linux/acpi.h>
  21. #include <linux/io.h>
  22. #include <linux/delay.h>
  23. #include <linux/uaccess.h>
  24. #include <acpi/processor.h>
  25. #include <acpi/cppc_acpi.h>
  26. #include <asm/msr.h>
  27. #include <asm/processor.h>
  28. #include <asm/cpufeature.h>
  29. #include <asm/cpu_device_id.h>
  30. MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
  31. MODULE_DESCRIPTION("ACPI Processor P-States Driver");
  32. MODULE_LICENSE("GPL");
  33. enum {
  34. UNDEFINED_CAPABLE = 0,
  35. SYSTEM_INTEL_MSR_CAPABLE,
  36. SYSTEM_AMD_MSR_CAPABLE,
  37. SYSTEM_IO_CAPABLE,
  38. };
  39. #define INTEL_MSR_RANGE (0xffff)
  40. #define AMD_MSR_RANGE (0x7)
  41. #define HYGON_MSR_RANGE (0x7)
  42. #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
  43. struct acpi_cpufreq_data {
  44. unsigned int resume;
  45. unsigned int cpu_feature;
  46. unsigned int acpi_perf_cpu;
  47. cpumask_var_t freqdomain_cpus;
  48. void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
  49. u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
  50. };
  51. /* acpi_perf_data is a pointer to percpu data. */
  52. static struct acpi_processor_performance __percpu *acpi_perf_data;
  53. static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
  54. {
  55. return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
  56. }
  57. static struct cpufreq_driver acpi_cpufreq_driver;
  58. static unsigned int acpi_pstate_strict;
  59. static bool boost_state(unsigned int cpu)
  60. {
  61. u32 lo, hi;
  62. u64 msr;
  63. switch (boot_cpu_data.x86_vendor) {
  64. case X86_VENDOR_INTEL:
  65. rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
  66. msr = lo | ((u64)hi << 32);
  67. return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
  68. case X86_VENDOR_HYGON:
  69. case X86_VENDOR_AMD:
  70. rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
  71. msr = lo | ((u64)hi << 32);
  72. return !(msr & MSR_K7_HWCR_CPB_DIS);
  73. }
  74. return false;
  75. }
  76. static int boost_set_msr(bool enable)
  77. {
  78. u32 msr_addr;
  79. u64 msr_mask, val;
  80. switch (boot_cpu_data.x86_vendor) {
  81. case X86_VENDOR_INTEL:
  82. msr_addr = MSR_IA32_MISC_ENABLE;
  83. msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
  84. break;
  85. case X86_VENDOR_HYGON:
  86. case X86_VENDOR_AMD:
  87. msr_addr = MSR_K7_HWCR;
  88. msr_mask = MSR_K7_HWCR_CPB_DIS;
  89. break;
  90. default:
  91. return -EINVAL;
  92. }
  93. rdmsrl(msr_addr, val);
  94. if (enable)
  95. val &= ~msr_mask;
  96. else
  97. val |= msr_mask;
  98. wrmsrl(msr_addr, val);
  99. return 0;
  100. }
  101. static void boost_set_msr_each(void *p_en)
  102. {
  103. bool enable = (bool) p_en;
  104. boost_set_msr(enable);
  105. }
  106. static int set_boost(struct cpufreq_policy *policy, int val)
  107. {
  108. on_each_cpu_mask(policy->cpus, boost_set_msr_each,
  109. (void *)(long)val, 1);
  110. pr_debug("CPU %*pbl: Core Boosting %sabled.\n",
  111. cpumask_pr_args(policy->cpus), val ? "en" : "dis");
  112. return 0;
  113. }
  114. static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
  115. {
  116. struct acpi_cpufreq_data *data = policy->driver_data;
  117. if (unlikely(!data))
  118. return -ENODEV;
  119. return cpufreq_show_cpus(data->freqdomain_cpus, buf);
  120. }
  121. cpufreq_freq_attr_ro(freqdomain_cpus);
  122. #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
  123. static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
  124. size_t count)
  125. {
  126. int ret;
  127. unsigned int val = 0;
  128. if (!acpi_cpufreq_driver.set_boost)
  129. return -EINVAL;
  130. ret = kstrtouint(buf, 10, &val);
  131. if (ret || val > 1)
  132. return -EINVAL;
  133. get_online_cpus();
  134. set_boost(policy, val);
  135. put_online_cpus();
  136. return count;
  137. }
  138. static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
  139. {
  140. return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
  141. }
  142. cpufreq_freq_attr_rw(cpb);
  143. #endif
  144. static int check_est_cpu(unsigned int cpuid)
  145. {
  146. struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
  147. return cpu_has(cpu, X86_FEATURE_EST);
  148. }
  149. static int check_amd_hwpstate_cpu(unsigned int cpuid)
  150. {
  151. struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
  152. return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
  153. }
  154. static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
  155. {
  156. struct acpi_cpufreq_data *data = policy->driver_data;
  157. struct acpi_processor_performance *perf;
  158. int i;
  159. perf = to_perf_data(data);
  160. for (i = 0; i < perf->state_count; i++) {
  161. if (value == perf->states[i].status)
  162. return policy->freq_table[i].frequency;
  163. }
  164. return 0;
  165. }
  166. static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
  167. {
  168. struct acpi_cpufreq_data *data = policy->driver_data;
  169. struct cpufreq_frequency_table *pos;
  170. struct acpi_processor_performance *perf;
  171. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
  172. msr &= AMD_MSR_RANGE;
  173. else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
  174. msr &= HYGON_MSR_RANGE;
  175. else
  176. msr &= INTEL_MSR_RANGE;
  177. perf = to_perf_data(data);
  178. cpufreq_for_each_entry(pos, policy->freq_table)
  179. if (msr == perf->states[pos->driver_data].status)
  180. return pos->frequency;
  181. return policy->freq_table[0].frequency;
  182. }
  183. static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
  184. {
  185. struct acpi_cpufreq_data *data = policy->driver_data;
  186. switch (data->cpu_feature) {
  187. case SYSTEM_INTEL_MSR_CAPABLE:
  188. case SYSTEM_AMD_MSR_CAPABLE:
  189. return extract_msr(policy, val);
  190. case SYSTEM_IO_CAPABLE:
  191. return extract_io(policy, val);
  192. default:
  193. return 0;
  194. }
  195. }
  196. static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
  197. {
  198. u32 val, dummy __always_unused;
  199. rdmsr(MSR_IA32_PERF_CTL, val, dummy);
  200. return val;
  201. }
  202. static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
  203. {
  204. u32 lo, hi;
  205. rdmsr(MSR_IA32_PERF_CTL, lo, hi);
  206. lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
  207. wrmsr(MSR_IA32_PERF_CTL, lo, hi);
  208. }
  209. static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
  210. {
  211. u32 val, dummy __always_unused;
  212. rdmsr(MSR_AMD_PERF_CTL, val, dummy);
  213. return val;
  214. }
  215. static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
  216. {
  217. wrmsr(MSR_AMD_PERF_CTL, val, 0);
  218. }
  219. static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
  220. {
  221. u32 val;
  222. acpi_os_read_port(reg->address, &val, reg->bit_width);
  223. return val;
  224. }
  225. static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
  226. {
  227. acpi_os_write_port(reg->address, val, reg->bit_width);
  228. }
  229. struct drv_cmd {
  230. struct acpi_pct_register *reg;
  231. u32 val;
  232. union {
  233. void (*write)(struct acpi_pct_register *reg, u32 val);
  234. u32 (*read)(struct acpi_pct_register *reg);
  235. } func;
  236. };
  237. /* Called via smp_call_function_single(), on the target CPU */
  238. static void do_drv_read(void *_cmd)
  239. {
  240. struct drv_cmd *cmd = _cmd;
  241. cmd->val = cmd->func.read(cmd->reg);
  242. }
  243. static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
  244. {
  245. struct acpi_processor_performance *perf = to_perf_data(data);
  246. struct drv_cmd cmd = {
  247. .reg = &perf->control_register,
  248. .func.read = data->cpu_freq_read,
  249. };
  250. int err;
  251. err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
  252. WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
  253. return cmd.val;
  254. }
  255. /* Called via smp_call_function_many(), on the target CPUs */
  256. static void do_drv_write(void *_cmd)
  257. {
  258. struct drv_cmd *cmd = _cmd;
  259. cmd->func.write(cmd->reg, cmd->val);
  260. }
  261. static void drv_write(struct acpi_cpufreq_data *data,
  262. const struct cpumask *mask, u32 val)
  263. {
  264. struct acpi_processor_performance *perf = to_perf_data(data);
  265. struct drv_cmd cmd = {
  266. .reg = &perf->control_register,
  267. .val = val,
  268. .func.write = data->cpu_freq_write,
  269. };
  270. int this_cpu;
  271. this_cpu = get_cpu();
  272. if (cpumask_test_cpu(this_cpu, mask))
  273. do_drv_write(&cmd);
  274. smp_call_function_many(mask, do_drv_write, &cmd, 1);
  275. put_cpu();
  276. }
  277. static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
  278. {
  279. u32 val;
  280. if (unlikely(cpumask_empty(mask)))
  281. return 0;
  282. val = drv_read(data, mask);
  283. pr_debug("%s = %u\n", __func__, val);
  284. return val;
  285. }
  286. static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
  287. {
  288. struct acpi_cpufreq_data *data;
  289. struct cpufreq_policy *policy;
  290. unsigned int freq;
  291. unsigned int cached_freq;
  292. pr_debug("%s (%d)\n", __func__, cpu);
  293. policy = cpufreq_cpu_get_raw(cpu);
  294. if (unlikely(!policy))
  295. return 0;
  296. data = policy->driver_data;
  297. if (unlikely(!data || !policy->freq_table))
  298. return 0;
  299. cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
  300. freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
  301. if (freq != cached_freq) {
  302. /*
  303. * The dreaded BIOS frequency change behind our back.
  304. * Force set the frequency on next target call.
  305. */
  306. data->resume = 1;
  307. }
  308. pr_debug("cur freq = %u\n", freq);
  309. return freq;
  310. }
  311. static unsigned int check_freqs(struct cpufreq_policy *policy,
  312. const struct cpumask *mask, unsigned int freq)
  313. {
  314. struct acpi_cpufreq_data *data = policy->driver_data;
  315. unsigned int cur_freq;
  316. unsigned int i;
  317. for (i = 0; i < 100; i++) {
  318. cur_freq = extract_freq(policy, get_cur_val(mask, data));
  319. if (cur_freq == freq)
  320. return 1;
  321. udelay(10);
  322. }
  323. return 0;
  324. }
  325. static int acpi_cpufreq_target(struct cpufreq_policy *policy,
  326. unsigned int index)
  327. {
  328. struct acpi_cpufreq_data *data = policy->driver_data;
  329. struct acpi_processor_performance *perf;
  330. const struct cpumask *mask;
  331. unsigned int next_perf_state = 0; /* Index into perf table */
  332. int result = 0;
  333. if (unlikely(!data)) {
  334. return -ENODEV;
  335. }
  336. perf = to_perf_data(data);
  337. next_perf_state = policy->freq_table[index].driver_data;
  338. if (perf->state == next_perf_state) {
  339. if (unlikely(data->resume)) {
  340. pr_debug("Called after resume, resetting to P%d\n",
  341. next_perf_state);
  342. data->resume = 0;
  343. } else {
  344. pr_debug("Already at target state (P%d)\n",
  345. next_perf_state);
  346. return 0;
  347. }
  348. }
  349. /*
  350. * The core won't allow CPUs to go away until the governor has been
  351. * stopped, so we can rely on the stability of policy->cpus.
  352. */
  353. mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
  354. cpumask_of(policy->cpu) : policy->cpus;
  355. drv_write(data, mask, perf->states[next_perf_state].control);
  356. if (acpi_pstate_strict) {
  357. if (!check_freqs(policy, mask,
  358. policy->freq_table[index].frequency)) {
  359. pr_debug("%s (%d)\n", __func__, policy->cpu);
  360. result = -EAGAIN;
  361. }
  362. }
  363. if (!result)
  364. perf->state = next_perf_state;
  365. return result;
  366. }
  367. static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
  368. unsigned int target_freq)
  369. {
  370. struct acpi_cpufreq_data *data = policy->driver_data;
  371. struct acpi_processor_performance *perf;
  372. struct cpufreq_frequency_table *entry;
  373. unsigned int next_perf_state, next_freq, index;
  374. /*
  375. * Find the closest frequency above target_freq.
  376. */
  377. if (policy->cached_target_freq == target_freq)
  378. index = policy->cached_resolved_idx;
  379. else
  380. index = cpufreq_table_find_index_dl(policy, target_freq);
  381. entry = &policy->freq_table[index];
  382. next_freq = entry->frequency;
  383. next_perf_state = entry->driver_data;
  384. perf = to_perf_data(data);
  385. if (perf->state == next_perf_state) {
  386. if (unlikely(data->resume))
  387. data->resume = 0;
  388. else
  389. return next_freq;
  390. }
  391. data->cpu_freq_write(&perf->control_register,
  392. perf->states[next_perf_state].control);
  393. perf->state = next_perf_state;
  394. return next_freq;
  395. }
  396. static unsigned long
  397. acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
  398. {
  399. struct acpi_processor_performance *perf;
  400. perf = to_perf_data(data);
  401. if (cpu_khz) {
  402. /* search the closest match to cpu_khz */
  403. unsigned int i;
  404. unsigned long freq;
  405. unsigned long freqn = perf->states[0].core_frequency * 1000;
  406. for (i = 0; i < (perf->state_count-1); i++) {
  407. freq = freqn;
  408. freqn = perf->states[i+1].core_frequency * 1000;
  409. if ((2 * cpu_khz) > (freqn + freq)) {
  410. perf->state = i;
  411. return freq;
  412. }
  413. }
  414. perf->state = perf->state_count-1;
  415. return freqn;
  416. } else {
  417. /* assume CPU is at P0... */
  418. perf->state = 0;
  419. return perf->states[0].core_frequency * 1000;
  420. }
  421. }
  422. static void free_acpi_perf_data(void)
  423. {
  424. unsigned int i;
  425. /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
  426. for_each_possible_cpu(i)
  427. free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
  428. ->shared_cpu_map);
  429. free_percpu(acpi_perf_data);
  430. }
  431. static int cpufreq_boost_online(unsigned int cpu)
  432. {
  433. /*
  434. * On the CPU_UP path we simply keep the boost-disable flag
  435. * in sync with the current global state.
  436. */
  437. return boost_set_msr(acpi_cpufreq_driver.boost_enabled);
  438. }
  439. static int cpufreq_boost_down_prep(unsigned int cpu)
  440. {
  441. /*
  442. * Clear the boost-disable bit on the CPU_DOWN path so that
  443. * this cpu cannot block the remaining ones from boosting.
  444. */
  445. return boost_set_msr(1);
  446. }
  447. /*
  448. * acpi_cpufreq_early_init - initialize ACPI P-States library
  449. *
  450. * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
  451. * in order to determine correct frequency and voltage pairings. We can
  452. * do _PDC and _PSD and find out the processor dependency for the
  453. * actual init that will happen later...
  454. */
  455. static int __init acpi_cpufreq_early_init(void)
  456. {
  457. unsigned int i;
  458. pr_debug("%s\n", __func__);
  459. acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
  460. if (!acpi_perf_data) {
  461. pr_debug("Memory allocation error for acpi_perf_data.\n");
  462. return -ENOMEM;
  463. }
  464. for_each_possible_cpu(i) {
  465. if (!zalloc_cpumask_var_node(
  466. &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
  467. GFP_KERNEL, cpu_to_node(i))) {
  468. /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
  469. free_acpi_perf_data();
  470. return -ENOMEM;
  471. }
  472. }
  473. /* Do initialization in ACPI core */
  474. acpi_processor_preregister_performance(acpi_perf_data);
  475. return 0;
  476. }
  477. #ifdef CONFIG_SMP
  478. /*
  479. * Some BIOSes do SW_ANY coordination internally, either set it up in hw
  480. * or do it in BIOS firmware and won't inform about it to OS. If not
  481. * detected, this has a side effect of making CPU run at a different speed
  482. * than OS intended it to run at. Detect it and handle it cleanly.
  483. */
  484. static int bios_with_sw_any_bug;
  485. static int sw_any_bug_found(const struct dmi_system_id *d)
  486. {
  487. bios_with_sw_any_bug = 1;
  488. return 0;
  489. }
  490. static const struct dmi_system_id sw_any_bug_dmi_table[] = {
  491. {
  492. .callback = sw_any_bug_found,
  493. .ident = "Supermicro Server X6DLP",
  494. .matches = {
  495. DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
  496. DMI_MATCH(DMI_BIOS_VERSION, "080010"),
  497. DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
  498. },
  499. },
  500. { }
  501. };
  502. static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
  503. {
  504. /* Intel Xeon Processor 7100 Series Specification Update
  505. * https://www.intel.com/Assets/PDF/specupdate/314554.pdf
  506. * AL30: A Machine Check Exception (MCE) Occurring during an
  507. * Enhanced Intel SpeedStep Technology Ratio Change May Cause
  508. * Both Processor Cores to Lock Up. */
  509. if (c->x86_vendor == X86_VENDOR_INTEL) {
  510. if ((c->x86 == 15) &&
  511. (c->x86_model == 6) &&
  512. (c->x86_stepping == 8)) {
  513. pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
  514. return -ENODEV;
  515. }
  516. }
  517. return 0;
  518. }
  519. #endif
  520. #ifdef CONFIG_ACPI_CPPC_LIB
  521. static u64 get_max_boost_ratio(unsigned int cpu)
  522. {
  523. struct cppc_perf_caps perf_caps;
  524. u64 highest_perf, nominal_perf;
  525. int ret;
  526. if (acpi_pstate_strict)
  527. return 0;
  528. ret = cppc_get_perf_caps(cpu, &perf_caps);
  529. if (ret) {
  530. pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
  531. cpu, ret);
  532. return 0;
  533. }
  534. highest_perf = perf_caps.highest_perf;
  535. nominal_perf = perf_caps.nominal_perf;
  536. if (!highest_perf || !nominal_perf) {
  537. pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
  538. return 0;
  539. }
  540. if (highest_perf < nominal_perf) {
  541. pr_debug("CPU%d: nominal performance above highest\n", cpu);
  542. return 0;
  543. }
  544. return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
  545. }
  546. #else
  547. static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
  548. #endif
  549. static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
  550. {
  551. struct cpufreq_frequency_table *freq_table;
  552. struct acpi_processor_performance *perf;
  553. struct acpi_cpufreq_data *data;
  554. unsigned int cpu = policy->cpu;
  555. struct cpuinfo_x86 *c = &cpu_data(cpu);
  556. unsigned int valid_states = 0;
  557. unsigned int result = 0;
  558. u64 max_boost_ratio;
  559. unsigned int i;
  560. #ifdef CONFIG_SMP
  561. static int blacklisted;
  562. #endif
  563. pr_debug("%s\n", __func__);
  564. #ifdef CONFIG_SMP
  565. if (blacklisted)
  566. return blacklisted;
  567. blacklisted = acpi_cpufreq_blacklist(c);
  568. if (blacklisted)
  569. return blacklisted;
  570. #endif
  571. data = kzalloc(sizeof(*data), GFP_KERNEL);
  572. if (!data)
  573. return -ENOMEM;
  574. if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
  575. result = -ENOMEM;
  576. goto err_free;
  577. }
  578. perf = per_cpu_ptr(acpi_perf_data, cpu);
  579. data->acpi_perf_cpu = cpu;
  580. policy->driver_data = data;
  581. if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
  582. acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
  583. result = acpi_processor_register_performance(perf, cpu);
  584. if (result)
  585. goto err_free_mask;
  586. policy->shared_type = perf->shared_type;
  587. /*
  588. * Will let policy->cpus know about dependency only when software
  589. * coordination is required.
  590. */
  591. if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
  592. policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
  593. cpumask_copy(policy->cpus, perf->shared_cpu_map);
  594. }
  595. cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
  596. #ifdef CONFIG_SMP
  597. dmi_check_system(sw_any_bug_dmi_table);
  598. if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
  599. policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
  600. cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
  601. }
  602. if (check_amd_hwpstate_cpu(cpu) && boot_cpu_data.x86 < 0x19 &&
  603. !acpi_pstate_strict) {
  604. cpumask_clear(policy->cpus);
  605. cpumask_set_cpu(cpu, policy->cpus);
  606. cpumask_copy(data->freqdomain_cpus,
  607. topology_sibling_cpumask(cpu));
  608. policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
  609. pr_info_once("overriding BIOS provided _PSD data\n");
  610. }
  611. #endif
  612. /* capability check */
  613. if (perf->state_count <= 1) {
  614. pr_debug("No P-States\n");
  615. result = -ENODEV;
  616. goto err_unreg;
  617. }
  618. if (perf->control_register.space_id != perf->status_register.space_id) {
  619. result = -ENODEV;
  620. goto err_unreg;
  621. }
  622. switch (perf->control_register.space_id) {
  623. case ACPI_ADR_SPACE_SYSTEM_IO:
  624. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
  625. boot_cpu_data.x86 == 0xf) {
  626. pr_debug("AMD K8 systems must use native drivers.\n");
  627. result = -ENODEV;
  628. goto err_unreg;
  629. }
  630. pr_debug("SYSTEM IO addr space\n");
  631. data->cpu_feature = SYSTEM_IO_CAPABLE;
  632. data->cpu_freq_read = cpu_freq_read_io;
  633. data->cpu_freq_write = cpu_freq_write_io;
  634. break;
  635. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  636. pr_debug("HARDWARE addr space\n");
  637. if (check_est_cpu(cpu)) {
  638. data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
  639. data->cpu_freq_read = cpu_freq_read_intel;
  640. data->cpu_freq_write = cpu_freq_write_intel;
  641. break;
  642. }
  643. if (check_amd_hwpstate_cpu(cpu)) {
  644. data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
  645. data->cpu_freq_read = cpu_freq_read_amd;
  646. data->cpu_freq_write = cpu_freq_write_amd;
  647. break;
  648. }
  649. result = -ENODEV;
  650. goto err_unreg;
  651. default:
  652. pr_debug("Unknown addr space %d\n",
  653. (u32) (perf->control_register.space_id));
  654. result = -ENODEV;
  655. goto err_unreg;
  656. }
  657. freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
  658. GFP_KERNEL);
  659. if (!freq_table) {
  660. result = -ENOMEM;
  661. goto err_unreg;
  662. }
  663. /* detect transition latency */
  664. policy->cpuinfo.transition_latency = 0;
  665. for (i = 0; i < perf->state_count; i++) {
  666. if ((perf->states[i].transition_latency * 1000) >
  667. policy->cpuinfo.transition_latency)
  668. policy->cpuinfo.transition_latency =
  669. perf->states[i].transition_latency * 1000;
  670. }
  671. /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
  672. if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
  673. policy->cpuinfo.transition_latency > 20 * 1000) {
  674. policy->cpuinfo.transition_latency = 20 * 1000;
  675. pr_info_once("P-state transition latency capped at 20 uS\n");
  676. }
  677. /* table init */
  678. for (i = 0; i < perf->state_count; i++) {
  679. if (i > 0 && perf->states[i].core_frequency >=
  680. freq_table[valid_states-1].frequency / 1000)
  681. continue;
  682. freq_table[valid_states].driver_data = i;
  683. freq_table[valid_states].frequency =
  684. perf->states[i].core_frequency * 1000;
  685. valid_states++;
  686. }
  687. freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
  688. max_boost_ratio = get_max_boost_ratio(cpu);
  689. if (max_boost_ratio) {
  690. unsigned int freq = freq_table[0].frequency;
  691. /*
  692. * Because the loop above sorts the freq_table entries in the
  693. * descending order, freq is the maximum frequency in the table.
  694. * Assume that it corresponds to the CPPC nominal frequency and
  695. * use it to set cpuinfo.max_freq.
  696. */
  697. policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
  698. } else {
  699. /*
  700. * If the maximum "boost" frequency is unknown, ask the arch
  701. * scale-invariance code to use the "nominal" performance for
  702. * CPU utilization scaling so as to prevent the schedutil
  703. * governor from selecting inadequate CPU frequencies.
  704. */
  705. arch_set_max_freq_ratio(true);
  706. }
  707. policy->freq_table = freq_table;
  708. perf->state = 0;
  709. switch (perf->control_register.space_id) {
  710. case ACPI_ADR_SPACE_SYSTEM_IO:
  711. /*
  712. * The core will not set policy->cur, because
  713. * cpufreq_driver->get is NULL, so we need to set it here.
  714. * However, we have to guess it, because the current speed is
  715. * unknown and not detectable via IO ports.
  716. */
  717. policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
  718. break;
  719. case ACPI_ADR_SPACE_FIXED_HARDWARE:
  720. acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
  721. break;
  722. default:
  723. break;
  724. }
  725. /* notify BIOS that we exist */
  726. acpi_processor_notify_smm(THIS_MODULE);
  727. pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
  728. for (i = 0; i < perf->state_count; i++)
  729. pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
  730. (i == perf->state ? '*' : ' '), i,
  731. (u32) perf->states[i].core_frequency,
  732. (u32) perf->states[i].power,
  733. (u32) perf->states[i].transition_latency);
  734. /*
  735. * the first call to ->target() should result in us actually
  736. * writing something to the appropriate registers.
  737. */
  738. data->resume = 1;
  739. policy->fast_switch_possible = !acpi_pstate_strict &&
  740. !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
  741. return result;
  742. err_unreg:
  743. acpi_processor_unregister_performance(cpu);
  744. err_free_mask:
  745. free_cpumask_var(data->freqdomain_cpus);
  746. err_free:
  747. kfree(data);
  748. policy->driver_data = NULL;
  749. return result;
  750. }
  751. static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
  752. {
  753. struct acpi_cpufreq_data *data = policy->driver_data;
  754. pr_debug("%s\n", __func__);
  755. policy->fast_switch_possible = false;
  756. policy->driver_data = NULL;
  757. acpi_processor_unregister_performance(data->acpi_perf_cpu);
  758. free_cpumask_var(data->freqdomain_cpus);
  759. kfree(policy->freq_table);
  760. kfree(data);
  761. return 0;
  762. }
  763. static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
  764. {
  765. struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
  766. policy->cpu);
  767. unsigned int freq = policy->freq_table[0].frequency;
  768. if (perf->states[0].core_frequency * 1000 != freq)
  769. pr_warn(FW_WARN "P-state 0 is not max freq\n");
  770. }
  771. static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
  772. {
  773. struct acpi_cpufreq_data *data = policy->driver_data;
  774. pr_debug("%s\n", __func__);
  775. data->resume = 1;
  776. return 0;
  777. }
  778. static struct freq_attr *acpi_cpufreq_attr[] = {
  779. &cpufreq_freq_attr_scaling_available_freqs,
  780. &freqdomain_cpus,
  781. #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
  782. &cpb,
  783. #endif
  784. NULL,
  785. };
  786. static struct cpufreq_driver acpi_cpufreq_driver = {
  787. .verify = cpufreq_generic_frequency_table_verify,
  788. .target_index = acpi_cpufreq_target,
  789. .fast_switch = acpi_cpufreq_fast_switch,
  790. .bios_limit = acpi_processor_get_bios_limit,
  791. .init = acpi_cpufreq_cpu_init,
  792. .exit = acpi_cpufreq_cpu_exit,
  793. .ready = acpi_cpufreq_cpu_ready,
  794. .resume = acpi_cpufreq_resume,
  795. .name = "acpi-cpufreq",
  796. .attr = acpi_cpufreq_attr,
  797. };
  798. static enum cpuhp_state acpi_cpufreq_online;
  799. static void __init acpi_cpufreq_boost_init(void)
  800. {
  801. int ret;
  802. if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
  803. pr_debug("Boost capabilities not present in the processor\n");
  804. return;
  805. }
  806. acpi_cpufreq_driver.set_boost = set_boost;
  807. acpi_cpufreq_driver.boost_enabled = boost_state(0);
  808. /*
  809. * This calls the online callback on all online cpu and forces all
  810. * MSRs to the same value.
  811. */
  812. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
  813. cpufreq_boost_online, cpufreq_boost_down_prep);
  814. if (ret < 0) {
  815. pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
  816. return;
  817. }
  818. acpi_cpufreq_online = ret;
  819. }
  820. static void acpi_cpufreq_boost_exit(void)
  821. {
  822. if (acpi_cpufreq_online > 0)
  823. cpuhp_remove_state_nocalls(acpi_cpufreq_online);
  824. }
  825. static int __init acpi_cpufreq_init(void)
  826. {
  827. int ret;
  828. if (acpi_disabled)
  829. return -ENODEV;
  830. /* don't keep reloading if cpufreq_driver exists */
  831. if (cpufreq_get_current_driver())
  832. return -EEXIST;
  833. pr_debug("%s\n", __func__);
  834. ret = acpi_cpufreq_early_init();
  835. if (ret)
  836. return ret;
  837. #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
  838. /* this is a sysfs file with a strange name and an even stranger
  839. * semantic - per CPU instantiation, but system global effect.
  840. * Lets enable it only on AMD CPUs for compatibility reasons and
  841. * only if configured. This is considered legacy code, which
  842. * will probably be removed at some point in the future.
  843. */
  844. if (!check_amd_hwpstate_cpu(0)) {
  845. struct freq_attr **attr;
  846. pr_debug("CPB unsupported, do not expose it\n");
  847. for (attr = acpi_cpufreq_attr; *attr; attr++)
  848. if (*attr == &cpb) {
  849. *attr = NULL;
  850. break;
  851. }
  852. }
  853. #endif
  854. acpi_cpufreq_boost_init();
  855. ret = cpufreq_register_driver(&acpi_cpufreq_driver);
  856. if (ret) {
  857. free_acpi_perf_data();
  858. acpi_cpufreq_boost_exit();
  859. }
  860. return ret;
  861. }
  862. static void __exit acpi_cpufreq_exit(void)
  863. {
  864. pr_debug("%s\n", __func__);
  865. acpi_cpufreq_boost_exit();
  866. cpufreq_unregister_driver(&acpi_cpufreq_driver);
  867. free_acpi_perf_data();
  868. }
  869. module_param(acpi_pstate_strict, uint, 0644);
  870. MODULE_PARM_DESC(acpi_pstate_strict,
  871. "value 0 or non-zero. non-zero -> strict ACPI checks are "
  872. "performed during frequency changes.");
  873. late_initcall(acpi_cpufreq_init);
  874. module_exit(acpi_cpufreq_exit);
  875. static const struct x86_cpu_id __maybe_unused acpi_cpufreq_ids[] = {
  876. X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL),
  877. X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL),
  878. {}
  879. };
  880. MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
  881. static const struct acpi_device_id __maybe_unused processor_device_ids[] = {
  882. {ACPI_PROCESSOR_OBJECT_HID, },
  883. {ACPI_PROCESSOR_DEVICE_HID, },
  884. {},
  885. };
  886. MODULE_DEVICE_TABLE(acpi, processor_device_ids);
  887. MODULE_ALIAS("acpi");