tegra194-cpufreq.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved
  4. */
  5. #include <linux/cpu.h>
  6. #include <linux/cpufreq.h>
  7. #include <linux/delay.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/module.h>
  10. #include <linux/of.h>
  11. #include <linux/of_platform.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/slab.h>
  14. #include <asm/smp_plat.h>
  15. #include <soc/tegra/bpmp.h>
  16. #include <soc/tegra/bpmp-abi.h>
  17. #define KHZ 1000
  18. #define REF_CLK_MHZ 408 /* 408 MHz */
  19. #define US_DELAY 500
  20. #define US_DELAY_MIN 2
  21. #define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ)
  22. #define MAX_CNT ~0U
  23. /* cpufreq transisition latency */
  24. #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
  25. enum cluster {
  26. CLUSTER0,
  27. CLUSTER1,
  28. CLUSTER2,
  29. CLUSTER3,
  30. MAX_CLUSTERS,
  31. };
  32. struct tegra194_cpufreq_data {
  33. void __iomem *regs;
  34. size_t num_clusters;
  35. struct cpufreq_frequency_table **tables;
  36. };
  37. struct tegra_cpu_ctr {
  38. u32 cpu;
  39. u32 delay;
  40. u32 coreclk_cnt, last_coreclk_cnt;
  41. u32 refclk_cnt, last_refclk_cnt;
  42. };
  43. struct read_counters_work {
  44. struct work_struct work;
  45. struct tegra_cpu_ctr c;
  46. };
  47. static struct workqueue_struct *read_counters_wq;
  48. static void get_cpu_cluster(void *cluster)
  49. {
  50. u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
  51. *((uint32_t *)cluster) = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  52. }
  53. /*
  54. * Read per-core Read-only system register NVFREQ_FEEDBACK_EL1.
  55. * The register provides frequency feedback information to
  56. * determine the average actual frequency a core has run at over
  57. * a period of time.
  58. * [31:0] PLLP counter: Counts at fixed frequency (408 MHz)
  59. * [63:32] Core clock counter: counts on every core clock cycle
  60. * where the core is architecturally clocking
  61. */
  62. static u64 read_freq_feedback(void)
  63. {
  64. u64 val = 0;
  65. asm volatile("mrs %0, s3_0_c15_c0_5" : "=r" (val) : );
  66. return val;
  67. }
  68. static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response
  69. *nltbl, u16 ndiv)
  70. {
  71. return nltbl->ref_clk_hz / KHZ * ndiv / (nltbl->pdiv * nltbl->mdiv);
  72. }
  73. static void tegra_read_counters(struct work_struct *work)
  74. {
  75. struct read_counters_work *read_counters_work;
  76. struct tegra_cpu_ctr *c;
  77. u64 val;
  78. /*
  79. * ref_clk_counter(32 bit counter) runs on constant clk,
  80. * pll_p(408MHz).
  81. * It will take = 2 ^ 32 / 408 MHz to overflow ref clk counter
  82. * = 10526880 usec = 10.527 sec to overflow
  83. *
  84. * Like wise core_clk_counter(32 bit counter) runs on core clock.
  85. * It's synchronized to crab_clk (cpu_crab_clk) which runs at
  86. * freq of cluster. Assuming max cluster clock ~2000MHz,
  87. * It will take = 2 ^ 32 / 2000 MHz to overflow core clk counter
  88. * = ~2.147 sec to overflow
  89. */
  90. read_counters_work = container_of(work, struct read_counters_work,
  91. work);
  92. c = &read_counters_work->c;
  93. val = read_freq_feedback();
  94. c->last_refclk_cnt = lower_32_bits(val);
  95. c->last_coreclk_cnt = upper_32_bits(val);
  96. udelay(c->delay);
  97. val = read_freq_feedback();
  98. c->refclk_cnt = lower_32_bits(val);
  99. c->coreclk_cnt = upper_32_bits(val);
  100. }
  101. /*
  102. * Return instantaneous cpu speed
  103. * Instantaneous freq is calculated as -
  104. * -Takes sample on every query of getting the freq.
  105. * - Read core and ref clock counters;
  106. * - Delay for X us
  107. * - Read above cycle counters again
  108. * - Calculates freq by subtracting current and previous counters
  109. * divided by the delay time or eqv. of ref_clk_counter in delta time
  110. * - Return Kcycles/second, freq in KHz
  111. *
  112. * delta time period = x sec
  113. * = delta ref_clk_counter / (408 * 10^6) sec
  114. * freq in Hz = cycles/sec
  115. * = (delta cycles / x sec
  116. * = (delta cycles * 408 * 10^6) / delta ref_clk_counter
  117. * in KHz = (delta cycles * 408 * 10^3) / delta ref_clk_counter
  118. *
  119. * @cpu - logical cpu whose freq to be updated
  120. * Returns freq in KHz on success, 0 if cpu is offline
  121. */
  122. static unsigned int tegra194_get_speed_common(u32 cpu, u32 delay)
  123. {
  124. struct read_counters_work read_counters_work;
  125. struct tegra_cpu_ctr c;
  126. u32 delta_refcnt;
  127. u32 delta_ccnt;
  128. u32 rate_mhz;
  129. /*
  130. * udelay() is required to reconstruct cpu frequency over an
  131. * observation window. Using workqueue to call udelay() with
  132. * interrupts enabled.
  133. */
  134. read_counters_work.c.cpu = cpu;
  135. read_counters_work.c.delay = delay;
  136. INIT_WORK_ONSTACK(&read_counters_work.work, tegra_read_counters);
  137. queue_work_on(cpu, read_counters_wq, &read_counters_work.work);
  138. flush_work(&read_counters_work.work);
  139. c = read_counters_work.c;
  140. if (c.coreclk_cnt < c.last_coreclk_cnt)
  141. delta_ccnt = c.coreclk_cnt + (MAX_CNT - c.last_coreclk_cnt);
  142. else
  143. delta_ccnt = c.coreclk_cnt - c.last_coreclk_cnt;
  144. if (!delta_ccnt)
  145. return 0;
  146. /* ref clock is 32 bits */
  147. if (c.refclk_cnt < c.last_refclk_cnt)
  148. delta_refcnt = c.refclk_cnt + (MAX_CNT - c.last_refclk_cnt);
  149. else
  150. delta_refcnt = c.refclk_cnt - c.last_refclk_cnt;
  151. if (!delta_refcnt) {
  152. pr_debug("cpufreq: %d is idle, delta_refcnt: 0\n", cpu);
  153. return 0;
  154. }
  155. rate_mhz = ((unsigned long)(delta_ccnt * REF_CLK_MHZ)) / delta_refcnt;
  156. return (rate_mhz * KHZ); /* in KHz */
  157. }
  158. static unsigned int tegra194_get_speed(u32 cpu)
  159. {
  160. return tegra194_get_speed_common(cpu, US_DELAY);
  161. }
  162. static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
  163. {
  164. struct tegra194_cpufreq_data *data = cpufreq_get_driver_data();
  165. u32 cpu;
  166. u32 cl;
  167. smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true);
  168. if (cl >= data->num_clusters)
  169. return -EINVAL;
  170. /* boot freq */
  171. policy->cur = tegra194_get_speed_common(policy->cpu, US_DELAY_MIN);
  172. /* set same policy for all cpus in a cluster */
  173. for (cpu = (cl * 2); cpu < ((cl + 1) * 2); cpu++)
  174. cpumask_set_cpu(cpu, policy->cpus);
  175. policy->freq_table = data->tables[cl];
  176. policy->cpuinfo.transition_latency = TEGRA_CPUFREQ_TRANSITION_LATENCY;
  177. return 0;
  178. }
  179. static void set_cpu_ndiv(void *data)
  180. {
  181. struct cpufreq_frequency_table *tbl = data;
  182. u64 ndiv_val = (u64)tbl->driver_data;
  183. asm volatile("msr s3_0_c15_c0_4, %0" : : "r" (ndiv_val));
  184. }
  185. static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
  186. unsigned int index)
  187. {
  188. struct cpufreq_frequency_table *tbl = policy->freq_table + index;
  189. /*
  190. * Each core writes frequency in per core register. Then both cores
  191. * in a cluster run at same frequency which is the maximum frequency
  192. * request out of the values requested by both cores in that cluster.
  193. */
  194. on_each_cpu_mask(policy->cpus, set_cpu_ndiv, tbl, true);
  195. return 0;
  196. }
  197. static struct cpufreq_driver tegra194_cpufreq_driver = {
  198. .name = "tegra194",
  199. .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
  200. CPUFREQ_NEED_INITIAL_FREQ_CHECK,
  201. .verify = cpufreq_generic_frequency_table_verify,
  202. .target_index = tegra194_cpufreq_set_target,
  203. .get = tegra194_get_speed,
  204. .init = tegra194_cpufreq_init,
  205. .attr = cpufreq_generic_attr,
  206. };
  207. static void tegra194_cpufreq_free_resources(void)
  208. {
  209. destroy_workqueue(read_counters_wq);
  210. }
  211. static struct cpufreq_frequency_table *
  212. init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp,
  213. unsigned int cluster_id)
  214. {
  215. struct cpufreq_frequency_table *freq_table;
  216. struct mrq_cpu_ndiv_limits_response resp;
  217. unsigned int num_freqs, ndiv, delta_ndiv;
  218. struct mrq_cpu_ndiv_limits_request req;
  219. struct tegra_bpmp_message msg;
  220. u16 freq_table_step_size;
  221. int err, index;
  222. memset(&req, 0, sizeof(req));
  223. req.cluster_id = cluster_id;
  224. memset(&msg, 0, sizeof(msg));
  225. msg.mrq = MRQ_CPU_NDIV_LIMITS;
  226. msg.tx.data = &req;
  227. msg.tx.size = sizeof(req);
  228. msg.rx.data = &resp;
  229. msg.rx.size = sizeof(resp);
  230. err = tegra_bpmp_transfer(bpmp, &msg);
  231. if (err)
  232. return ERR_PTR(err);
  233. /*
  234. * Make sure frequency table step is a multiple of mdiv to match
  235. * vhint table granularity.
  236. */
  237. freq_table_step_size = resp.mdiv *
  238. DIV_ROUND_UP(CPUFREQ_TBL_STEP_HZ, resp.ref_clk_hz);
  239. dev_dbg(&pdev->dev, "cluster %d: frequency table step size: %d\n",
  240. cluster_id, freq_table_step_size);
  241. delta_ndiv = resp.ndiv_max - resp.ndiv_min;
  242. if (unlikely(delta_ndiv == 0)) {
  243. num_freqs = 1;
  244. } else {
  245. /* We store both ndiv_min and ndiv_max hence the +1 */
  246. num_freqs = delta_ndiv / freq_table_step_size + 1;
  247. }
  248. num_freqs += (delta_ndiv % freq_table_step_size) ? 1 : 0;
  249. freq_table = devm_kcalloc(&pdev->dev, num_freqs + 1,
  250. sizeof(*freq_table), GFP_KERNEL);
  251. if (!freq_table)
  252. return ERR_PTR(-ENOMEM);
  253. for (index = 0, ndiv = resp.ndiv_min;
  254. ndiv < resp.ndiv_max;
  255. index++, ndiv += freq_table_step_size) {
  256. freq_table[index].driver_data = ndiv;
  257. freq_table[index].frequency = map_ndiv_to_freq(&resp, ndiv);
  258. }
  259. freq_table[index].driver_data = resp.ndiv_max;
  260. freq_table[index++].frequency = map_ndiv_to_freq(&resp, resp.ndiv_max);
  261. freq_table[index].frequency = CPUFREQ_TABLE_END;
  262. return freq_table;
  263. }
  264. static int tegra194_cpufreq_probe(struct platform_device *pdev)
  265. {
  266. struct tegra194_cpufreq_data *data;
  267. struct tegra_bpmp *bpmp;
  268. int err, i;
  269. data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
  270. if (!data)
  271. return -ENOMEM;
  272. data->num_clusters = MAX_CLUSTERS;
  273. data->tables = devm_kcalloc(&pdev->dev, data->num_clusters,
  274. sizeof(*data->tables), GFP_KERNEL);
  275. if (!data->tables)
  276. return -ENOMEM;
  277. platform_set_drvdata(pdev, data);
  278. bpmp = tegra_bpmp_get(&pdev->dev);
  279. if (IS_ERR(bpmp))
  280. return PTR_ERR(bpmp);
  281. read_counters_wq = alloc_workqueue("read_counters_wq", __WQ_LEGACY, 1);
  282. if (!read_counters_wq) {
  283. dev_err(&pdev->dev, "fail to create_workqueue\n");
  284. err = -EINVAL;
  285. goto put_bpmp;
  286. }
  287. for (i = 0; i < data->num_clusters; i++) {
  288. data->tables[i] = init_freq_table(pdev, bpmp, i);
  289. if (IS_ERR(data->tables[i])) {
  290. err = PTR_ERR(data->tables[i]);
  291. goto err_free_res;
  292. }
  293. }
  294. tegra194_cpufreq_driver.driver_data = data;
  295. err = cpufreq_register_driver(&tegra194_cpufreq_driver);
  296. if (!err)
  297. goto put_bpmp;
  298. err_free_res:
  299. tegra194_cpufreq_free_resources();
  300. put_bpmp:
  301. tegra_bpmp_put(bpmp);
  302. return err;
  303. }
  304. static int tegra194_cpufreq_remove(struct platform_device *pdev)
  305. {
  306. cpufreq_unregister_driver(&tegra194_cpufreq_driver);
  307. tegra194_cpufreq_free_resources();
  308. return 0;
  309. }
  310. static const struct of_device_id tegra194_cpufreq_of_match[] = {
  311. { .compatible = "nvidia,tegra194-ccplex", },
  312. { /* sentinel */ }
  313. };
  314. MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match);
  315. static struct platform_driver tegra194_ccplex_driver = {
  316. .driver = {
  317. .name = "tegra194-cpufreq",
  318. .of_match_table = tegra194_cpufreq_of_match,
  319. },
  320. .probe = tegra194_cpufreq_probe,
  321. .remove = tegra194_cpufreq_remove,
  322. };
  323. module_platform_driver(tegra194_ccplex_driver);
  324. MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
  325. MODULE_AUTHOR("Sumit Gupta <sumitg@nvidia.com>");
  326. MODULE_DESCRIPTION("NVIDIA Tegra194 cpufreq driver");
  327. MODULE_LICENSE("GPL v2");