cpufreq.c 75 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/drivers/cpufreq/cpufreq.c
  4. *
  5. * Copyright (C) 2001 Russell King
  6. * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
  7. * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
  8. *
  9. * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
  10. * Added handling for CPU hotplug
  11. * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
  12. * Fix handling for CPU hotplug -- affected CPUs
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <linux/cpu.h>
  16. #include <linux/cpufreq.h>
  17. #include <linux/cpufreq_times.h>
  18. #include <linux/cpu_cooling.h>
  19. #include <linux/delay.h>
  20. #include <linux/device.h>
  21. #include <linux/init.h>
  22. #include <linux/kernel_stat.h>
  23. #include <linux/module.h>
  24. #include <linux/mutex.h>
  25. #include <linux/pm_qos.h>
  26. #include <linux/slab.h>
  27. #include <linux/suspend.h>
  28. #include <linux/syscore_ops.h>
  29. #include <linux/tick.h>
  30. #include <trace/events/power.h>
  31. #include <trace/hooks/cpufreq.h>
  32. static LIST_HEAD(cpufreq_policy_list);
  33. /* Macros to iterate over CPU policies */
  34. #define for_each_suitable_policy(__policy, __active) \
  35. list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
  36. if ((__active) == !policy_is_inactive(__policy))
  37. #define for_each_active_policy(__policy) \
  38. for_each_suitable_policy(__policy, true)
  39. #define for_each_inactive_policy(__policy) \
  40. for_each_suitable_policy(__policy, false)
  41. #define for_each_policy(__policy) \
  42. list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
  43. /* Iterate over governors */
  44. static LIST_HEAD(cpufreq_governor_list);
  45. #define for_each_governor(__governor) \
  46. list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
  47. static char default_governor[CPUFREQ_NAME_LEN];
  48. /*
  49. * The "cpufreq driver" - the arch- or hardware-dependent low
  50. * level driver of CPUFreq support, and its spinlock. This lock
  51. * also protects the cpufreq_cpu_data array.
  52. */
  53. static struct cpufreq_driver *cpufreq_driver;
  54. static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
  55. static DEFINE_RWLOCK(cpufreq_driver_lock);
  56. static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
  57. bool cpufreq_supports_freq_invariance(void)
  58. {
  59. return static_branch_likely(&cpufreq_freq_invariance);
  60. }
  61. /* Flag to suspend/resume CPUFreq governors */
  62. static bool cpufreq_suspended;
  63. static inline bool has_target(void)
  64. {
  65. return cpufreq_driver->target_index || cpufreq_driver->target;
  66. }
  67. /* internal prototypes */
  68. static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
  69. static int cpufreq_init_governor(struct cpufreq_policy *policy);
  70. static void cpufreq_exit_governor(struct cpufreq_policy *policy);
  71. static void cpufreq_governor_limits(struct cpufreq_policy *policy);
  72. static int cpufreq_set_policy(struct cpufreq_policy *policy,
  73. struct cpufreq_governor *new_gov,
  74. unsigned int new_pol);
  75. /*
  76. * Two notifier lists: the "policy" list is involved in the
  77. * validation process for a new CPU frequency policy; the
  78. * "transition" list for kernel code that needs to handle
  79. * changes to devices when the CPU clock speed changes.
  80. * The mutex locks both lists.
  81. */
  82. static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
  83. SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
  84. static int off __read_mostly;
  85. static int cpufreq_disabled(void)
  86. {
  87. return off;
  88. }
  89. void disable_cpufreq(void)
  90. {
  91. off = 1;
  92. }
  93. static DEFINE_MUTEX(cpufreq_governor_mutex);
  94. bool have_governor_per_policy(void)
  95. {
  96. return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
  97. }
  98. EXPORT_SYMBOL_GPL(have_governor_per_policy);
  99. static struct kobject *cpufreq_global_kobject;
  100. struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
  101. {
  102. if (have_governor_per_policy())
  103. return &policy->kobj;
  104. else
  105. return cpufreq_global_kobject;
  106. }
  107. EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
  108. static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
  109. {
  110. struct kernel_cpustat kcpustat;
  111. u64 cur_wall_time;
  112. u64 idle_time;
  113. u64 busy_time;
  114. cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
  115. kcpustat_cpu_fetch(&kcpustat, cpu);
  116. busy_time = kcpustat.cpustat[CPUTIME_USER];
  117. busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
  118. busy_time += kcpustat.cpustat[CPUTIME_IRQ];
  119. busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
  120. busy_time += kcpustat.cpustat[CPUTIME_STEAL];
  121. busy_time += kcpustat.cpustat[CPUTIME_NICE];
  122. idle_time = cur_wall_time - busy_time;
  123. if (wall)
  124. *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
  125. return div_u64(idle_time, NSEC_PER_USEC);
  126. }
  127. u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
  128. {
  129. u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
  130. if (idle_time == -1ULL)
  131. return get_cpu_idle_time_jiffy(cpu, wall);
  132. else if (!io_busy)
  133. idle_time += get_cpu_iowait_time_us(cpu, wall);
  134. return idle_time;
  135. }
  136. EXPORT_SYMBOL_GPL(get_cpu_idle_time);
  137. /*
  138. * This is a generic cpufreq init() routine which can be used by cpufreq
  139. * drivers of SMP systems. It will do following:
  140. * - validate & show freq table passed
  141. * - set policies transition latency
  142. * - policy->cpus with all possible CPUs
  143. */
  144. void cpufreq_generic_init(struct cpufreq_policy *policy,
  145. struct cpufreq_frequency_table *table,
  146. unsigned int transition_latency)
  147. {
  148. policy->freq_table = table;
  149. policy->cpuinfo.transition_latency = transition_latency;
  150. /*
  151. * The driver only supports the SMP configuration where all processors
  152. * share the clock and voltage and clock.
  153. */
  154. cpumask_setall(policy->cpus);
  155. }
  156. EXPORT_SYMBOL_GPL(cpufreq_generic_init);
  157. struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
  158. {
  159. struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
  160. return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
  161. }
  162. EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
  163. unsigned int cpufreq_generic_get(unsigned int cpu)
  164. {
  165. struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
  166. if (!policy || IS_ERR(policy->clk)) {
  167. pr_err("%s: No %s associated to cpu: %d\n",
  168. __func__, policy ? "clk" : "policy", cpu);
  169. return 0;
  170. }
  171. return clk_get_rate(policy->clk) / 1000;
  172. }
  173. EXPORT_SYMBOL_GPL(cpufreq_generic_get);
  174. /**
  175. * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
  176. * @cpu: CPU to find the policy for.
  177. *
  178. * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
  179. * the kobject reference counter of that policy. Return a valid policy on
  180. * success or NULL on failure.
  181. *
  182. * The policy returned by this function has to be released with the help of
  183. * cpufreq_cpu_put() to balance its kobject reference counter properly.
  184. */
  185. struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
  186. {
  187. struct cpufreq_policy *policy = NULL;
  188. unsigned long flags;
  189. if (WARN_ON(cpu >= nr_cpu_ids))
  190. return NULL;
  191. /* get the cpufreq driver */
  192. read_lock_irqsave(&cpufreq_driver_lock, flags);
  193. if (cpufreq_driver) {
  194. /* get the CPU */
  195. policy = cpufreq_cpu_get_raw(cpu);
  196. if (policy)
  197. kobject_get(&policy->kobj);
  198. }
  199. read_unlock_irqrestore(&cpufreq_driver_lock, flags);
  200. return policy;
  201. }
  202. EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
  203. /**
  204. * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
  205. * @policy: cpufreq policy returned by cpufreq_cpu_get().
  206. */
  207. void cpufreq_cpu_put(struct cpufreq_policy *policy)
  208. {
  209. kobject_put(&policy->kobj);
  210. }
  211. EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
  212. /**
  213. * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
  214. * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
  215. */
  216. void cpufreq_cpu_release(struct cpufreq_policy *policy)
  217. {
  218. if (WARN_ON(!policy))
  219. return;
  220. lockdep_assert_held(&policy->rwsem);
  221. up_write(&policy->rwsem);
  222. cpufreq_cpu_put(policy);
  223. }
  224. /**
  225. * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
  226. * @cpu: CPU to find the policy for.
  227. *
  228. * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
  229. * if the policy returned by it is not NULL, acquire its rwsem for writing.
  230. * Return the policy if it is active or release it and return NULL otherwise.
  231. *
  232. * The policy returned by this function has to be released with the help of
  233. * cpufreq_cpu_release() in order to release its rwsem and balance its usage
  234. * counter properly.
  235. */
  236. struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
  237. {
  238. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  239. if (!policy)
  240. return NULL;
  241. down_write(&policy->rwsem);
  242. if (policy_is_inactive(policy)) {
  243. cpufreq_cpu_release(policy);
  244. return NULL;
  245. }
  246. return policy;
  247. }
  248. /*********************************************************************
  249. * EXTERNALLY AFFECTING FREQUENCY CHANGES *
  250. *********************************************************************/
  251. /*
  252. * adjust_jiffies - adjust the system "loops_per_jiffy"
  253. *
  254. * This function alters the system "loops_per_jiffy" for the clock
  255. * speed change. Note that loops_per_jiffy cannot be updated on SMP
  256. * systems as each CPU might be scaled differently. So, use the arch
  257. * per-CPU loops_per_jiffy value wherever possible.
  258. */
  259. static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
  260. {
  261. #ifndef CONFIG_SMP
  262. static unsigned long l_p_j_ref;
  263. static unsigned int l_p_j_ref_freq;
  264. if (ci->flags & CPUFREQ_CONST_LOOPS)
  265. return;
  266. if (!l_p_j_ref_freq) {
  267. l_p_j_ref = loops_per_jiffy;
  268. l_p_j_ref_freq = ci->old;
  269. pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
  270. l_p_j_ref, l_p_j_ref_freq);
  271. }
  272. if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
  273. loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
  274. ci->new);
  275. pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
  276. loops_per_jiffy, ci->new);
  277. }
  278. #endif
  279. }
  280. /**
  281. * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies.
  282. * @policy: cpufreq policy to enable fast frequency switching for.
  283. * @freqs: contain details of the frequency update.
  284. * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
  285. *
  286. * This function calls the transition notifiers and the "adjust_jiffies"
  287. * function. It is called twice on all CPU frequency changes that have
  288. * external effects.
  289. */
  290. static void cpufreq_notify_transition(struct cpufreq_policy *policy,
  291. struct cpufreq_freqs *freqs,
  292. unsigned int state)
  293. {
  294. int cpu;
  295. BUG_ON(irqs_disabled());
  296. if (cpufreq_disabled())
  297. return;
  298. freqs->policy = policy;
  299. freqs->flags = cpufreq_driver->flags;
  300. pr_debug("notification %u of frequency transition to %u kHz\n",
  301. state, freqs->new);
  302. switch (state) {
  303. case CPUFREQ_PRECHANGE:
  304. /*
  305. * Detect if the driver reported a value as "old frequency"
  306. * which is not equal to what the cpufreq core thinks is
  307. * "old frequency".
  308. */
  309. if (policy->cur && policy->cur != freqs->old) {
  310. pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
  311. freqs->old, policy->cur);
  312. freqs->old = policy->cur;
  313. }
  314. srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
  315. CPUFREQ_PRECHANGE, freqs);
  316. adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
  317. break;
  318. case CPUFREQ_POSTCHANGE:
  319. adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
  320. pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
  321. cpumask_pr_args(policy->cpus));
  322. for_each_cpu(cpu, policy->cpus)
  323. trace_cpu_frequency(freqs->new, cpu);
  324. srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
  325. CPUFREQ_POSTCHANGE, freqs);
  326. cpufreq_stats_record_transition(policy, freqs->new);
  327. cpufreq_times_record_transition(policy, freqs->new);
  328. policy->cur = freqs->new;
  329. trace_android_rvh_cpufreq_transition(policy);
  330. }
  331. }
  332. /* Do post notifications when there are chances that transition has failed */
  333. static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
  334. struct cpufreq_freqs *freqs, int transition_failed)
  335. {
  336. cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
  337. if (!transition_failed)
  338. return;
  339. swap(freqs->old, freqs->new);
  340. cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
  341. cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
  342. }
  343. void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
  344. struct cpufreq_freqs *freqs)
  345. {
  346. /*
  347. * Catch double invocations of _begin() which lead to self-deadlock.
  348. * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
  349. * doesn't invoke _begin() on their behalf, and hence the chances of
  350. * double invocations are very low. Moreover, there are scenarios
  351. * where these checks can emit false-positive warnings in these
  352. * drivers; so we avoid that by skipping them altogether.
  353. */
  354. WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
  355. && current == policy->transition_task);
  356. wait:
  357. wait_event(policy->transition_wait, !policy->transition_ongoing);
  358. spin_lock(&policy->transition_lock);
  359. if (unlikely(policy->transition_ongoing)) {
  360. spin_unlock(&policy->transition_lock);
  361. goto wait;
  362. }
  363. policy->transition_ongoing = true;
  364. policy->transition_task = current;
  365. spin_unlock(&policy->transition_lock);
  366. cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
  367. }
  368. EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
  369. void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
  370. struct cpufreq_freqs *freqs, int transition_failed)
  371. {
  372. if (WARN_ON(!policy->transition_ongoing))
  373. return;
  374. cpufreq_notify_post_transition(policy, freqs, transition_failed);
  375. arch_set_freq_scale(policy->related_cpus,
  376. policy->cur,
  377. policy->cpuinfo.max_freq);
  378. policy->transition_ongoing = false;
  379. policy->transition_task = NULL;
  380. wake_up(&policy->transition_wait);
  381. }
  382. EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
  383. /*
  384. * Fast frequency switching status count. Positive means "enabled", negative
  385. * means "disabled" and 0 means "not decided yet".
  386. */
  387. static int cpufreq_fast_switch_count;
  388. static DEFINE_MUTEX(cpufreq_fast_switch_lock);
  389. static void cpufreq_list_transition_notifiers(void)
  390. {
  391. struct notifier_block *nb;
  392. pr_info("Registered transition notifiers:\n");
  393. mutex_lock(&cpufreq_transition_notifier_list.mutex);
  394. for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
  395. pr_info("%pS\n", nb->notifier_call);
  396. mutex_unlock(&cpufreq_transition_notifier_list.mutex);
  397. }
  398. /**
  399. * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
  400. * @policy: cpufreq policy to enable fast frequency switching for.
  401. *
  402. * Try to enable fast frequency switching for @policy.
  403. *
  404. * The attempt will fail if there is at least one transition notifier registered
  405. * at this point, as fast frequency switching is quite fundamentally at odds
  406. * with transition notifiers. Thus if successful, it will make registration of
  407. * transition notifiers fail going forward.
  408. */
  409. void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
  410. {
  411. lockdep_assert_held(&policy->rwsem);
  412. if (!policy->fast_switch_possible)
  413. return;
  414. mutex_lock(&cpufreq_fast_switch_lock);
  415. if (cpufreq_fast_switch_count >= 0) {
  416. cpufreq_fast_switch_count++;
  417. policy->fast_switch_enabled = true;
  418. } else {
  419. pr_warn("CPU%u: Fast frequency switching not enabled\n",
  420. policy->cpu);
  421. cpufreq_list_transition_notifiers();
  422. }
  423. mutex_unlock(&cpufreq_fast_switch_lock);
  424. }
  425. EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
  426. /**
  427. * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
  428. * @policy: cpufreq policy to disable fast frequency switching for.
  429. */
  430. void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
  431. {
  432. mutex_lock(&cpufreq_fast_switch_lock);
  433. if (policy->fast_switch_enabled) {
  434. policy->fast_switch_enabled = false;
  435. if (!WARN_ON(cpufreq_fast_switch_count <= 0))
  436. cpufreq_fast_switch_count--;
  437. }
  438. mutex_unlock(&cpufreq_fast_switch_lock);
  439. }
  440. EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
  441. /**
  442. * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
  443. * one.
  444. * @policy: associated policy to interrogate
  445. * @target_freq: target frequency to resolve.
  446. *
  447. * The target to driver frequency mapping is cached in the policy.
  448. *
  449. * Return: Lowest driver-supported frequency greater than or equal to the
  450. * given target_freq, subject to policy (min/max) and driver limitations.
  451. */
  452. unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
  453. unsigned int target_freq)
  454. {
  455. unsigned int old_target_freq = target_freq;
  456. target_freq = clamp_val(target_freq, policy->min, policy->max);
  457. trace_android_vh_cpufreq_resolve_freq(policy, target_freq, old_target_freq);
  458. policy->cached_target_freq = target_freq;
  459. if (cpufreq_driver->target_index) {
  460. unsigned int idx;
  461. idx = cpufreq_frequency_table_target(policy, target_freq,
  462. CPUFREQ_RELATION_L);
  463. policy->cached_resolved_idx = idx;
  464. return policy->freq_table[idx].frequency;
  465. }
  466. if (cpufreq_driver->resolve_freq)
  467. return cpufreq_driver->resolve_freq(policy, target_freq);
  468. return target_freq;
  469. }
  470. EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
  471. unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
  472. {
  473. unsigned int latency;
  474. if (policy->transition_delay_us)
  475. return policy->transition_delay_us;
  476. latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
  477. if (latency) {
  478. /*
  479. * For platforms that can change the frequency very fast (< 10
  480. * us), the above formula gives a decent transition delay. But
  481. * for platforms where transition_latency is in milliseconds, it
  482. * ends up giving unrealistic values.
  483. *
  484. * Cap the default transition delay to 10 ms, which seems to be
  485. * a reasonable amount of time after which we should reevaluate
  486. * the frequency.
  487. */
  488. return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
  489. }
  490. return LATENCY_MULTIPLIER;
  491. }
  492. EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
  493. /*********************************************************************
  494. * SYSFS INTERFACE *
  495. *********************************************************************/
  496. static ssize_t show_boost(struct kobject *kobj,
  497. struct kobj_attribute *attr, char *buf)
  498. {
  499. return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
  500. }
  501. static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
  502. const char *buf, size_t count)
  503. {
  504. int ret, enable;
  505. ret = sscanf(buf, "%d", &enable);
  506. if (ret != 1 || enable < 0 || enable > 1)
  507. return -EINVAL;
  508. if (cpufreq_boost_trigger_state(enable)) {
  509. pr_err("%s: Cannot %s BOOST!\n",
  510. __func__, enable ? "enable" : "disable");
  511. return -EINVAL;
  512. }
  513. pr_debug("%s: cpufreq BOOST %s\n",
  514. __func__, enable ? "enabled" : "disabled");
  515. return count;
  516. }
  517. define_one_global_rw(boost);
  518. static struct cpufreq_governor *find_governor(const char *str_governor)
  519. {
  520. struct cpufreq_governor *t;
  521. for_each_governor(t)
  522. if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
  523. return t;
  524. return NULL;
  525. }
  526. static struct cpufreq_governor *get_governor(const char *str_governor)
  527. {
  528. struct cpufreq_governor *t;
  529. mutex_lock(&cpufreq_governor_mutex);
  530. t = find_governor(str_governor);
  531. if (!t)
  532. goto unlock;
  533. if (!try_module_get(t->owner))
  534. t = NULL;
  535. unlock:
  536. mutex_unlock(&cpufreq_governor_mutex);
  537. return t;
  538. }
  539. static unsigned int cpufreq_parse_policy(char *str_governor)
  540. {
  541. if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
  542. return CPUFREQ_POLICY_PERFORMANCE;
  543. if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
  544. return CPUFREQ_POLICY_POWERSAVE;
  545. return CPUFREQ_POLICY_UNKNOWN;
  546. }
  547. /**
  548. * cpufreq_parse_governor - parse a governor string only for has_target()
  549. * @str_governor: Governor name.
  550. */
  551. static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
  552. {
  553. struct cpufreq_governor *t;
  554. t = get_governor(str_governor);
  555. if (t)
  556. return t;
  557. if (request_module("cpufreq_%s", str_governor))
  558. return NULL;
  559. return get_governor(str_governor);
  560. }
  561. /*
  562. * cpufreq_per_cpu_attr_read() / show_##file_name() -
  563. * print out cpufreq information
  564. *
  565. * Write out information from cpufreq_driver->policy[cpu]; object must be
  566. * "unsigned int".
  567. */
  568. #define show_one(file_name, object) \
  569. static ssize_t show_##file_name \
  570. (struct cpufreq_policy *policy, char *buf) \
  571. { \
  572. return sprintf(buf, "%u\n", policy->object); \
  573. }
  574. static ssize_t show_cpuinfo_max_freq(struct cpufreq_policy *policy, char *buf)
  575. {
  576. unsigned int max_freq = policy->cpuinfo.max_freq;
  577. trace_android_vh_show_max_freq(policy, &max_freq);
  578. trace_android_rvh_show_max_freq(policy, &max_freq);
  579. return sprintf(buf, "%u\n", max_freq);
  580. }
  581. show_one(cpuinfo_min_freq, cpuinfo.min_freq);
  582. show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
  583. show_one(scaling_min_freq, min);
  584. show_one(scaling_max_freq, max);
  585. __weak unsigned int arch_freq_get_on_cpu(int cpu)
  586. {
  587. return 0;
  588. }
  589. static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
  590. {
  591. ssize_t ret;
  592. unsigned int freq;
  593. freq = arch_freq_get_on_cpu(policy->cpu);
  594. if (freq)
  595. ret = sprintf(buf, "%u\n", freq);
  596. else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
  597. ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
  598. else
  599. ret = sprintf(buf, "%u\n", policy->cur);
  600. return ret;
  601. }
  602. /*
  603. * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
  604. */
  605. #define store_one(file_name, object) \
  606. static ssize_t store_##file_name \
  607. (struct cpufreq_policy *policy, const char *buf, size_t count) \
  608. { \
  609. unsigned long val; \
  610. int ret; \
  611. \
  612. ret = sscanf(buf, "%lu", &val); \
  613. if (ret != 1) \
  614. return -EINVAL; \
  615. \
  616. ret = freq_qos_update_request(policy->object##_freq_req, val);\
  617. return ret >= 0 ? count : ret; \
  618. }
  619. store_one(scaling_min_freq, min);
  620. store_one(scaling_max_freq, max);
  621. /*
  622. * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
  623. */
  624. static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
  625. char *buf)
  626. {
  627. unsigned int cur_freq = __cpufreq_get(policy);
  628. if (cur_freq)
  629. return sprintf(buf, "%u\n", cur_freq);
  630. return sprintf(buf, "<unknown>\n");
  631. }
  632. /*
  633. * show_scaling_governor - show the current policy for the specified CPU
  634. */
  635. static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
  636. {
  637. if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
  638. return sprintf(buf, "powersave\n");
  639. else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
  640. return sprintf(buf, "performance\n");
  641. else if (policy->governor)
  642. return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
  643. policy->governor->name);
  644. return -EINVAL;
  645. }
  646. /*
  647. * store_scaling_governor - store policy for the specified CPU
  648. */
  649. static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
  650. const char *buf, size_t count)
  651. {
  652. char str_governor[16];
  653. int ret;
  654. ret = sscanf(buf, "%15s", str_governor);
  655. if (ret != 1)
  656. return -EINVAL;
  657. if (cpufreq_driver->setpolicy) {
  658. unsigned int new_pol;
  659. new_pol = cpufreq_parse_policy(str_governor);
  660. if (!new_pol)
  661. return -EINVAL;
  662. ret = cpufreq_set_policy(policy, NULL, new_pol);
  663. } else {
  664. struct cpufreq_governor *new_gov;
  665. new_gov = cpufreq_parse_governor(str_governor);
  666. if (!new_gov)
  667. return -EINVAL;
  668. ret = cpufreq_set_policy(policy, new_gov,
  669. CPUFREQ_POLICY_UNKNOWN);
  670. module_put(new_gov->owner);
  671. }
  672. return ret ? ret : count;
  673. }
  674. /*
  675. * show_scaling_driver - show the cpufreq driver currently loaded
  676. */
  677. static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
  678. {
  679. return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
  680. }
  681. /*
  682. * show_scaling_available_governors - show the available CPUfreq governors
  683. */
  684. static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
  685. char *buf)
  686. {
  687. ssize_t i = 0;
  688. struct cpufreq_governor *t;
  689. if (!has_target()) {
  690. i += sprintf(buf, "performance powersave");
  691. goto out;
  692. }
  693. mutex_lock(&cpufreq_governor_mutex);
  694. for_each_governor(t) {
  695. if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
  696. - (CPUFREQ_NAME_LEN + 2)))
  697. break;
  698. i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
  699. }
  700. mutex_unlock(&cpufreq_governor_mutex);
  701. out:
  702. i += sprintf(&buf[i], "\n");
  703. return i;
  704. }
  705. ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
  706. {
  707. ssize_t i = 0;
  708. unsigned int cpu;
  709. for_each_cpu(cpu, mask) {
  710. if (i)
  711. i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
  712. i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
  713. if (i >= (PAGE_SIZE - 5))
  714. break;
  715. }
  716. i += sprintf(&buf[i], "\n");
  717. return i;
  718. }
  719. EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
  720. /*
  721. * show_related_cpus - show the CPUs affected by each transition even if
  722. * hw coordination is in use
  723. */
  724. static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
  725. {
  726. return cpufreq_show_cpus(policy->related_cpus, buf);
  727. }
  728. /*
  729. * show_affected_cpus - show the CPUs affected by each transition
  730. */
  731. static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
  732. {
  733. return cpufreq_show_cpus(policy->cpus, buf);
  734. }
  735. static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
  736. const char *buf, size_t count)
  737. {
  738. unsigned int freq = 0;
  739. unsigned int ret;
  740. if (!policy->governor || !policy->governor->store_setspeed)
  741. return -EINVAL;
  742. ret = sscanf(buf, "%u", &freq);
  743. if (ret != 1)
  744. return -EINVAL;
  745. policy->governor->store_setspeed(policy, freq);
  746. return count;
  747. }
  748. static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
  749. {
  750. if (!policy->governor || !policy->governor->show_setspeed)
  751. return sprintf(buf, "<unsupported>\n");
  752. return policy->governor->show_setspeed(policy, buf);
  753. }
  754. /*
  755. * show_bios_limit - show the current cpufreq HW/BIOS limitation
  756. */
  757. static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
  758. {
  759. unsigned int limit;
  760. int ret;
  761. ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
  762. if (!ret)
  763. return sprintf(buf, "%u\n", limit);
  764. return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
  765. }
  766. cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
  767. cpufreq_freq_attr_ro(cpuinfo_min_freq);
  768. cpufreq_freq_attr_ro(cpuinfo_max_freq);
  769. cpufreq_freq_attr_ro(cpuinfo_transition_latency);
  770. cpufreq_freq_attr_ro(scaling_available_governors);
  771. cpufreq_freq_attr_ro(scaling_driver);
  772. cpufreq_freq_attr_ro(scaling_cur_freq);
  773. cpufreq_freq_attr_ro(bios_limit);
  774. cpufreq_freq_attr_ro(related_cpus);
  775. cpufreq_freq_attr_ro(affected_cpus);
  776. cpufreq_freq_attr_rw(scaling_min_freq);
  777. cpufreq_freq_attr_rw(scaling_max_freq);
  778. cpufreq_freq_attr_rw(scaling_governor);
  779. cpufreq_freq_attr_rw(scaling_setspeed);
  780. static struct attribute *default_attrs[] = {
  781. &cpuinfo_min_freq.attr,
  782. &cpuinfo_max_freq.attr,
  783. &cpuinfo_transition_latency.attr,
  784. &scaling_min_freq.attr,
  785. &scaling_max_freq.attr,
  786. &affected_cpus.attr,
  787. &related_cpus.attr,
  788. &scaling_governor.attr,
  789. &scaling_driver.attr,
  790. &scaling_available_governors.attr,
  791. &scaling_setspeed.attr,
  792. NULL
  793. };
  794. #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
  795. #define to_attr(a) container_of(a, struct freq_attr, attr)
  796. static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
  797. {
  798. struct cpufreq_policy *policy = to_policy(kobj);
  799. struct freq_attr *fattr = to_attr(attr);
  800. ssize_t ret;
  801. if (!fattr->show)
  802. return -EIO;
  803. down_read(&policy->rwsem);
  804. ret = fattr->show(policy, buf);
  805. up_read(&policy->rwsem);
  806. return ret;
  807. }
  808. static ssize_t store(struct kobject *kobj, struct attribute *attr,
  809. const char *buf, size_t count)
  810. {
  811. struct cpufreq_policy *policy = to_policy(kobj);
  812. struct freq_attr *fattr = to_attr(attr);
  813. ssize_t ret = -EINVAL;
  814. if (!fattr->store)
  815. return -EIO;
  816. /*
  817. * cpus_read_trylock() is used here to work around a circular lock
  818. * dependency problem with respect to the cpufreq_register_driver().
  819. */
  820. if (!cpus_read_trylock())
  821. return -EBUSY;
  822. if (cpu_online(policy->cpu)) {
  823. down_write(&policy->rwsem);
  824. ret = fattr->store(policy, buf, count);
  825. up_write(&policy->rwsem);
  826. }
  827. cpus_read_unlock();
  828. return ret;
  829. }
  830. static void cpufreq_sysfs_release(struct kobject *kobj)
  831. {
  832. struct cpufreq_policy *policy = to_policy(kobj);
  833. pr_debug("last reference is dropped\n");
  834. complete(&policy->kobj_unregister);
  835. }
  836. static const struct sysfs_ops sysfs_ops = {
  837. .show = show,
  838. .store = store,
  839. };
  840. static struct kobj_type ktype_cpufreq = {
  841. .sysfs_ops = &sysfs_ops,
  842. .default_attrs = default_attrs,
  843. .release = cpufreq_sysfs_release,
  844. };
  845. static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
  846. struct device *dev)
  847. {
  848. if (unlikely(!dev))
  849. return;
  850. if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
  851. return;
  852. dev_dbg(dev, "%s: Adding symlink\n", __func__);
  853. if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
  854. dev_err(dev, "cpufreq symlink creation failed\n");
  855. }
  856. static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
  857. struct device *dev)
  858. {
  859. dev_dbg(dev, "%s: Removing symlink\n", __func__);
  860. sysfs_remove_link(&dev->kobj, "cpufreq");
  861. }
  862. static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
  863. {
  864. struct freq_attr **drv_attr;
  865. int ret = 0;
  866. /* set up files for this cpu device */
  867. drv_attr = cpufreq_driver->attr;
  868. while (drv_attr && *drv_attr) {
  869. ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
  870. if (ret)
  871. return ret;
  872. drv_attr++;
  873. }
  874. if (cpufreq_driver->get) {
  875. ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
  876. if (ret)
  877. return ret;
  878. }
  879. ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
  880. if (ret)
  881. return ret;
  882. if (cpufreq_driver->bios_limit) {
  883. ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
  884. if (ret)
  885. return ret;
  886. }
  887. return 0;
  888. }
  889. static int cpufreq_init_policy(struct cpufreq_policy *policy)
  890. {
  891. struct cpufreq_governor *gov = NULL;
  892. unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
  893. int ret;
  894. if (has_target()) {
  895. /* Update policy governor to the one used before hotplug. */
  896. gov = get_governor(policy->last_governor);
  897. if (gov) {
  898. pr_debug("Restoring governor %s for cpu %d\n",
  899. gov->name, policy->cpu);
  900. } else {
  901. gov = get_governor(default_governor);
  902. }
  903. if (!gov) {
  904. gov = cpufreq_default_governor();
  905. __module_get(gov->owner);
  906. }
  907. } else {
  908. /* Use the default policy if there is no last_policy. */
  909. if (policy->last_policy) {
  910. pol = policy->last_policy;
  911. } else {
  912. pol = cpufreq_parse_policy(default_governor);
  913. /*
  914. * In case the default governor is neither "performance"
  915. * nor "powersave", fall back to the initial policy
  916. * value set by the driver.
  917. */
  918. if (pol == CPUFREQ_POLICY_UNKNOWN)
  919. pol = policy->policy;
  920. }
  921. if (pol != CPUFREQ_POLICY_PERFORMANCE &&
  922. pol != CPUFREQ_POLICY_POWERSAVE)
  923. return -ENODATA;
  924. }
  925. ret = cpufreq_set_policy(policy, gov, pol);
  926. if (gov)
  927. module_put(gov->owner);
  928. return ret;
  929. }
  930. static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
  931. {
  932. int ret = 0;
  933. /* Has this CPU been taken care of already? */
  934. if (cpumask_test_cpu(cpu, policy->cpus))
  935. return 0;
  936. down_write(&policy->rwsem);
  937. if (has_target())
  938. cpufreq_stop_governor(policy);
  939. cpumask_set_cpu(cpu, policy->cpus);
  940. if (has_target()) {
  941. ret = cpufreq_start_governor(policy);
  942. if (ret)
  943. pr_err("%s: Failed to start governor\n", __func__);
  944. }
  945. up_write(&policy->rwsem);
  946. return ret;
  947. }
  948. void refresh_frequency_limits(struct cpufreq_policy *policy)
  949. {
  950. if (!policy_is_inactive(policy)) {
  951. pr_debug("updating policy for CPU %u\n", policy->cpu);
  952. cpufreq_set_policy(policy, policy->governor, policy->policy);
  953. }
  954. }
  955. EXPORT_SYMBOL(refresh_frequency_limits);
  956. static void handle_update(struct work_struct *work)
  957. {
  958. struct cpufreq_policy *policy =
  959. container_of(work, struct cpufreq_policy, update);
  960. pr_debug("handle_update for cpu %u called\n", policy->cpu);
  961. down_write(&policy->rwsem);
  962. refresh_frequency_limits(policy);
  963. up_write(&policy->rwsem);
  964. }
  965. static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
  966. void *data)
  967. {
  968. struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
  969. schedule_work(&policy->update);
  970. return 0;
  971. }
  972. static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
  973. void *data)
  974. {
  975. struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
  976. schedule_work(&policy->update);
  977. return 0;
  978. }
  979. static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
  980. {
  981. struct kobject *kobj;
  982. struct completion *cmp;
  983. down_write(&policy->rwsem);
  984. cpufreq_stats_free_table(policy);
  985. kobj = &policy->kobj;
  986. cmp = &policy->kobj_unregister;
  987. up_write(&policy->rwsem);
  988. kobject_put(kobj);
  989. /*
  990. * We need to make sure that the underlying kobj is
  991. * actually not referenced anymore by anybody before we
  992. * proceed with unloading.
  993. */
  994. pr_debug("waiting for dropping of refcount\n");
  995. wait_for_completion(cmp);
  996. pr_debug("wait complete\n");
  997. }
  998. static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
  999. {
  1000. struct cpufreq_policy *policy;
  1001. struct device *dev = get_cpu_device(cpu);
  1002. int ret;
  1003. if (!dev)
  1004. return NULL;
  1005. policy = kzalloc(sizeof(*policy), GFP_KERNEL);
  1006. if (!policy)
  1007. return NULL;
  1008. if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
  1009. goto err_free_policy;
  1010. if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
  1011. goto err_free_cpumask;
  1012. if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
  1013. goto err_free_rcpumask;
  1014. ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
  1015. cpufreq_global_kobject, "policy%u", cpu);
  1016. if (ret) {
  1017. dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
  1018. /*
  1019. * The entire policy object will be freed below, but the extra
  1020. * memory allocated for the kobject name needs to be freed by
  1021. * releasing the kobject.
  1022. */
  1023. kobject_put(&policy->kobj);
  1024. goto err_free_real_cpus;
  1025. }
  1026. freq_constraints_init(&policy->constraints);
  1027. policy->nb_min.notifier_call = cpufreq_notifier_min;
  1028. policy->nb_max.notifier_call = cpufreq_notifier_max;
  1029. ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
  1030. &policy->nb_min);
  1031. if (ret) {
  1032. dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
  1033. ret, cpumask_pr_args(policy->cpus));
  1034. goto err_kobj_remove;
  1035. }
  1036. ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
  1037. &policy->nb_max);
  1038. if (ret) {
  1039. dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
  1040. ret, cpumask_pr_args(policy->cpus));
  1041. goto err_min_qos_notifier;
  1042. }
  1043. INIT_LIST_HEAD(&policy->policy_list);
  1044. init_rwsem(&policy->rwsem);
  1045. spin_lock_init(&policy->transition_lock);
  1046. init_waitqueue_head(&policy->transition_wait);
  1047. init_completion(&policy->kobj_unregister);
  1048. INIT_WORK(&policy->update, handle_update);
  1049. policy->cpu = cpu;
  1050. return policy;
  1051. err_min_qos_notifier:
  1052. freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
  1053. &policy->nb_min);
  1054. err_kobj_remove:
  1055. cpufreq_policy_put_kobj(policy);
  1056. err_free_real_cpus:
  1057. free_cpumask_var(policy->real_cpus);
  1058. err_free_rcpumask:
  1059. free_cpumask_var(policy->related_cpus);
  1060. err_free_cpumask:
  1061. free_cpumask_var(policy->cpus);
  1062. err_free_policy:
  1063. kfree(policy);
  1064. return NULL;
  1065. }
  1066. static void cpufreq_policy_free(struct cpufreq_policy *policy)
  1067. {
  1068. unsigned long flags;
  1069. int cpu;
  1070. /* Remove policy from list */
  1071. write_lock_irqsave(&cpufreq_driver_lock, flags);
  1072. list_del(&policy->policy_list);
  1073. for_each_cpu(cpu, policy->related_cpus)
  1074. per_cpu(cpufreq_cpu_data, cpu) = NULL;
  1075. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1076. freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
  1077. &policy->nb_max);
  1078. freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
  1079. &policy->nb_min);
  1080. /* Cancel any pending policy->update work before freeing the policy. */
  1081. cancel_work_sync(&policy->update);
  1082. if (policy->max_freq_req) {
  1083. /*
  1084. * CPUFREQ_CREATE_POLICY notification is sent only after
  1085. * successfully adding max_freq_req request.
  1086. */
  1087. blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
  1088. CPUFREQ_REMOVE_POLICY, policy);
  1089. freq_qos_remove_request(policy->max_freq_req);
  1090. }
  1091. freq_qos_remove_request(policy->min_freq_req);
  1092. kfree(policy->min_freq_req);
  1093. cpufreq_policy_put_kobj(policy);
  1094. free_cpumask_var(policy->real_cpus);
  1095. free_cpumask_var(policy->related_cpus);
  1096. free_cpumask_var(policy->cpus);
  1097. kfree(policy);
  1098. }
  1099. static int cpufreq_online(unsigned int cpu)
  1100. {
  1101. struct cpufreq_policy *policy;
  1102. bool new_policy;
  1103. unsigned long flags;
  1104. unsigned int j;
  1105. int ret;
  1106. pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
  1107. /* Check if this CPU already has a policy to manage it */
  1108. policy = per_cpu(cpufreq_cpu_data, cpu);
  1109. if (policy) {
  1110. WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
  1111. if (!policy_is_inactive(policy))
  1112. return cpufreq_add_policy_cpu(policy, cpu);
  1113. /* This is the only online CPU for the policy. Start over. */
  1114. new_policy = false;
  1115. down_write(&policy->rwsem);
  1116. policy->cpu = cpu;
  1117. policy->governor = NULL;
  1118. up_write(&policy->rwsem);
  1119. } else {
  1120. new_policy = true;
  1121. policy = cpufreq_policy_alloc(cpu);
  1122. if (!policy)
  1123. return -ENOMEM;
  1124. }
  1125. if (!new_policy && cpufreq_driver->online) {
  1126. ret = cpufreq_driver->online(policy);
  1127. if (ret) {
  1128. pr_debug("%s: %d: initialization failed\n", __func__,
  1129. __LINE__);
  1130. goto out_exit_policy;
  1131. }
  1132. /* Recover policy->cpus using related_cpus */
  1133. cpumask_copy(policy->cpus, policy->related_cpus);
  1134. } else {
  1135. cpumask_copy(policy->cpus, cpumask_of(cpu));
  1136. /*
  1137. * Call driver. From then on the cpufreq must be able
  1138. * to accept all calls to ->verify and ->setpolicy for this CPU.
  1139. */
  1140. ret = cpufreq_driver->init(policy);
  1141. if (ret) {
  1142. pr_debug("%s: %d: initialization failed\n", __func__,
  1143. __LINE__);
  1144. goto out_free_policy;
  1145. }
  1146. /*
  1147. * The initialization has succeeded and the policy is online.
  1148. * If there is a problem with its frequency table, take it
  1149. * offline and drop it.
  1150. */
  1151. ret = cpufreq_table_validate_and_sort(policy);
  1152. if (ret)
  1153. goto out_offline_policy;
  1154. /* related_cpus should at least include policy->cpus. */
  1155. cpumask_copy(policy->related_cpus, policy->cpus);
  1156. }
  1157. down_write(&policy->rwsem);
  1158. /*
  1159. * affected cpus must always be the one, which are online. We aren't
  1160. * managing offline cpus here.
  1161. */
  1162. cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
  1163. if (new_policy) {
  1164. for_each_cpu(j, policy->related_cpus) {
  1165. per_cpu(cpufreq_cpu_data, j) = policy;
  1166. add_cpu_dev_symlink(policy, j, get_cpu_device(j));
  1167. }
  1168. policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
  1169. GFP_KERNEL);
  1170. if (!policy->min_freq_req)
  1171. goto out_destroy_policy;
  1172. ret = freq_qos_add_request(&policy->constraints,
  1173. policy->min_freq_req, FREQ_QOS_MIN,
  1174. FREQ_QOS_MIN_DEFAULT_VALUE);
  1175. if (ret < 0) {
  1176. /*
  1177. * So we don't call freq_qos_remove_request() for an
  1178. * uninitialized request.
  1179. */
  1180. kfree(policy->min_freq_req);
  1181. policy->min_freq_req = NULL;
  1182. goto out_destroy_policy;
  1183. }
  1184. /*
  1185. * This must be initialized right here to avoid calling
  1186. * freq_qos_remove_request() on uninitialized request in case
  1187. * of errors.
  1188. */
  1189. policy->max_freq_req = policy->min_freq_req + 1;
  1190. ret = freq_qos_add_request(&policy->constraints,
  1191. policy->max_freq_req, FREQ_QOS_MAX,
  1192. FREQ_QOS_MAX_DEFAULT_VALUE);
  1193. if (ret < 0) {
  1194. policy->max_freq_req = NULL;
  1195. goto out_destroy_policy;
  1196. }
  1197. blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
  1198. CPUFREQ_CREATE_POLICY, policy);
  1199. }
  1200. if (cpufreq_driver->get && has_target()) {
  1201. policy->cur = cpufreq_driver->get(policy->cpu);
  1202. if (!policy->cur) {
  1203. pr_err("%s: ->get() failed\n", __func__);
  1204. goto out_destroy_policy;
  1205. }
  1206. }
  1207. /*
  1208. * Sometimes boot loaders set CPU frequency to a value outside of
  1209. * frequency table present with cpufreq core. In such cases CPU might be
  1210. * unstable if it has to run on that frequency for long duration of time
  1211. * and so its better to set it to a frequency which is specified in
  1212. * freq-table. This also makes cpufreq stats inconsistent as
  1213. * cpufreq-stats would fail to register because current frequency of CPU
  1214. * isn't found in freq-table.
  1215. *
  1216. * Because we don't want this change to effect boot process badly, we go
  1217. * for the next freq which is >= policy->cur ('cur' must be set by now,
  1218. * otherwise we will end up setting freq to lowest of the table as 'cur'
  1219. * is initialized to zero).
  1220. *
  1221. * We are passing target-freq as "policy->cur - 1" otherwise
  1222. * __cpufreq_driver_target() would simply fail, as policy->cur will be
  1223. * equal to target-freq.
  1224. */
  1225. if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
  1226. && has_target()) {
  1227. unsigned int old_freq = policy->cur;
  1228. /* Are we running at unknown frequency ? */
  1229. ret = cpufreq_frequency_table_get_index(policy, old_freq);
  1230. if (ret == -EINVAL) {
  1231. ret = __cpufreq_driver_target(policy, old_freq - 1,
  1232. CPUFREQ_RELATION_L);
  1233. /*
  1234. * Reaching here after boot in a few seconds may not
  1235. * mean that system will remain stable at "unknown"
  1236. * frequency for longer duration. Hence, a BUG_ON().
  1237. */
  1238. BUG_ON(ret);
  1239. pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
  1240. __func__, policy->cpu, old_freq, policy->cur);
  1241. }
  1242. }
  1243. if (new_policy) {
  1244. ret = cpufreq_add_dev_interface(policy);
  1245. if (ret)
  1246. goto out_destroy_policy;
  1247. cpufreq_stats_create_table(policy);
  1248. cpufreq_times_create_policy(policy);
  1249. write_lock_irqsave(&cpufreq_driver_lock, flags);
  1250. list_add(&policy->policy_list, &cpufreq_policy_list);
  1251. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1252. }
  1253. ret = cpufreq_init_policy(policy);
  1254. if (ret) {
  1255. pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
  1256. __func__, cpu, ret);
  1257. goto out_destroy_policy;
  1258. }
  1259. up_write(&policy->rwsem);
  1260. kobject_uevent(&policy->kobj, KOBJ_ADD);
  1261. /* Callback for handling stuff after policy is ready */
  1262. if (cpufreq_driver->ready)
  1263. cpufreq_driver->ready(policy);
  1264. if (cpufreq_thermal_control_enabled(cpufreq_driver))
  1265. policy->cdev = of_cpufreq_cooling_register(policy);
  1266. pr_debug("initialization complete\n");
  1267. return 0;
  1268. out_destroy_policy:
  1269. for_each_cpu(j, policy->real_cpus)
  1270. remove_cpu_dev_symlink(policy, get_cpu_device(j));
  1271. up_write(&policy->rwsem);
  1272. out_offline_policy:
  1273. if (cpufreq_driver->offline)
  1274. cpufreq_driver->offline(policy);
  1275. out_exit_policy:
  1276. if (cpufreq_driver->exit)
  1277. cpufreq_driver->exit(policy);
  1278. out_free_policy:
  1279. cpufreq_policy_free(policy);
  1280. return ret;
  1281. }
  1282. /**
  1283. * cpufreq_add_dev - the cpufreq interface for a CPU device.
  1284. * @dev: CPU device.
  1285. * @sif: Subsystem interface structure pointer (not used)
  1286. */
  1287. static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
  1288. {
  1289. struct cpufreq_policy *policy;
  1290. unsigned cpu = dev->id;
  1291. int ret;
  1292. dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
  1293. if (cpu_online(cpu)) {
  1294. ret = cpufreq_online(cpu);
  1295. if (ret)
  1296. return ret;
  1297. }
  1298. /* Create sysfs link on CPU registration */
  1299. policy = per_cpu(cpufreq_cpu_data, cpu);
  1300. if (policy)
  1301. add_cpu_dev_symlink(policy, cpu, dev);
  1302. return 0;
  1303. }
  1304. static int cpufreq_offline(unsigned int cpu)
  1305. {
  1306. struct cpufreq_policy *policy;
  1307. int ret;
  1308. pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
  1309. policy = cpufreq_cpu_get_raw(cpu);
  1310. if (!policy) {
  1311. pr_debug("%s: No cpu_data found\n", __func__);
  1312. return 0;
  1313. }
  1314. down_write(&policy->rwsem);
  1315. if (has_target())
  1316. cpufreq_stop_governor(policy);
  1317. cpumask_clear_cpu(cpu, policy->cpus);
  1318. if (policy_is_inactive(policy)) {
  1319. if (has_target())
  1320. strncpy(policy->last_governor, policy->governor->name,
  1321. CPUFREQ_NAME_LEN);
  1322. else
  1323. policy->last_policy = policy->policy;
  1324. } else if (cpu == policy->cpu) {
  1325. /* Nominate new CPU */
  1326. policy->cpu = cpumask_any(policy->cpus);
  1327. }
  1328. /* Start governor again for active policy */
  1329. if (!policy_is_inactive(policy)) {
  1330. if (has_target()) {
  1331. ret = cpufreq_start_governor(policy);
  1332. if (ret)
  1333. pr_err("%s: Failed to start governor\n", __func__);
  1334. }
  1335. goto unlock;
  1336. }
  1337. if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
  1338. cpufreq_cooling_unregister(policy->cdev);
  1339. policy->cdev = NULL;
  1340. }
  1341. if (cpufreq_driver->stop_cpu)
  1342. cpufreq_driver->stop_cpu(policy);
  1343. if (has_target())
  1344. cpufreq_exit_governor(policy);
  1345. /*
  1346. * Perform the ->offline() during light-weight tear-down, as
  1347. * that allows fast recovery when the CPU comes back.
  1348. */
  1349. if (cpufreq_driver->offline) {
  1350. cpufreq_driver->offline(policy);
  1351. } else if (cpufreq_driver->exit) {
  1352. cpufreq_driver->exit(policy);
  1353. policy->freq_table = NULL;
  1354. }
  1355. unlock:
  1356. up_write(&policy->rwsem);
  1357. return 0;
  1358. }
  1359. /*
  1360. * cpufreq_remove_dev - remove a CPU device
  1361. *
  1362. * Removes the cpufreq interface for a CPU device.
  1363. */
  1364. static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
  1365. {
  1366. unsigned int cpu = dev->id;
  1367. struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
  1368. if (!policy)
  1369. return;
  1370. if (cpu_online(cpu))
  1371. cpufreq_offline(cpu);
  1372. cpumask_clear_cpu(cpu, policy->real_cpus);
  1373. remove_cpu_dev_symlink(policy, dev);
  1374. if (cpumask_empty(policy->real_cpus)) {
  1375. /* We did light-weight exit earlier, do full tear down now */
  1376. if (cpufreq_driver->offline)
  1377. cpufreq_driver->exit(policy);
  1378. cpufreq_policy_free(policy);
  1379. }
  1380. }
  1381. /**
  1382. * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
  1383. * in deep trouble.
  1384. * @policy: policy managing CPUs
  1385. * @new_freq: CPU frequency the CPU actually runs at
  1386. *
  1387. * We adjust to current frequency first, and need to clean up later.
  1388. * So either call to cpufreq_update_policy() or schedule handle_update()).
  1389. */
  1390. static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
  1391. unsigned int new_freq)
  1392. {
  1393. struct cpufreq_freqs freqs;
  1394. pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
  1395. policy->cur, new_freq);
  1396. freqs.old = policy->cur;
  1397. freqs.new = new_freq;
  1398. cpufreq_freq_transition_begin(policy, &freqs);
  1399. cpufreq_freq_transition_end(policy, &freqs, 0);
  1400. }
  1401. static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
  1402. {
  1403. unsigned int new_freq;
  1404. new_freq = cpufreq_driver->get(policy->cpu);
  1405. if (!new_freq)
  1406. return 0;
  1407. /*
  1408. * If fast frequency switching is used with the given policy, the check
  1409. * against policy->cur is pointless, so skip it in that case.
  1410. */
  1411. if (policy->fast_switch_enabled || !has_target())
  1412. return new_freq;
  1413. if (policy->cur != new_freq) {
  1414. cpufreq_out_of_sync(policy, new_freq);
  1415. if (update)
  1416. schedule_work(&policy->update);
  1417. }
  1418. return new_freq;
  1419. }
  1420. /**
  1421. * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
  1422. * @cpu: CPU number
  1423. *
  1424. * This is the last known freq, without actually getting it from the driver.
  1425. * Return value will be same as what is shown in scaling_cur_freq in sysfs.
  1426. */
  1427. unsigned int cpufreq_quick_get(unsigned int cpu)
  1428. {
  1429. struct cpufreq_policy *policy;
  1430. unsigned int ret_freq = 0;
  1431. unsigned long flags;
  1432. read_lock_irqsave(&cpufreq_driver_lock, flags);
  1433. if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
  1434. ret_freq = cpufreq_driver->get(cpu);
  1435. read_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1436. return ret_freq;
  1437. }
  1438. read_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1439. policy = cpufreq_cpu_get(cpu);
  1440. if (policy) {
  1441. ret_freq = policy->cur;
  1442. cpufreq_cpu_put(policy);
  1443. }
  1444. return ret_freq;
  1445. }
  1446. EXPORT_SYMBOL(cpufreq_quick_get);
  1447. /**
  1448. * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
  1449. * @cpu: CPU number
  1450. *
  1451. * Just return the max possible frequency for a given CPU.
  1452. */
  1453. unsigned int cpufreq_quick_get_max(unsigned int cpu)
  1454. {
  1455. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  1456. unsigned int ret_freq = 0;
  1457. if (policy) {
  1458. ret_freq = policy->max;
  1459. cpufreq_cpu_put(policy);
  1460. }
  1461. return ret_freq;
  1462. }
  1463. EXPORT_SYMBOL(cpufreq_quick_get_max);
  1464. /**
  1465. * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
  1466. * @cpu: CPU number
  1467. *
  1468. * The default return value is the max_freq field of cpuinfo.
  1469. */
  1470. __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
  1471. {
  1472. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  1473. unsigned int ret_freq = 0;
  1474. if (policy) {
  1475. ret_freq = policy->cpuinfo.max_freq;
  1476. cpufreq_cpu_put(policy);
  1477. }
  1478. return ret_freq;
  1479. }
  1480. EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
  1481. static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
  1482. {
  1483. if (unlikely(policy_is_inactive(policy)))
  1484. return 0;
  1485. return cpufreq_verify_current_freq(policy, true);
  1486. }
  1487. /**
  1488. * cpufreq_get - get the current CPU frequency (in kHz)
  1489. * @cpu: CPU number
  1490. *
  1491. * Get the CPU current (static) CPU frequency
  1492. */
  1493. unsigned int cpufreq_get(unsigned int cpu)
  1494. {
  1495. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  1496. unsigned int ret_freq = 0;
  1497. if (policy) {
  1498. down_read(&policy->rwsem);
  1499. if (cpufreq_driver->get)
  1500. ret_freq = __cpufreq_get(policy);
  1501. up_read(&policy->rwsem);
  1502. cpufreq_cpu_put(policy);
  1503. }
  1504. return ret_freq;
  1505. }
  1506. EXPORT_SYMBOL(cpufreq_get);
  1507. static struct subsys_interface cpufreq_interface = {
  1508. .name = "cpufreq",
  1509. .subsys = &cpu_subsys,
  1510. .add_dev = cpufreq_add_dev,
  1511. .remove_dev = cpufreq_remove_dev,
  1512. };
  1513. /*
  1514. * In case platform wants some specific frequency to be configured
  1515. * during suspend..
  1516. */
  1517. int cpufreq_generic_suspend(struct cpufreq_policy *policy)
  1518. {
  1519. int ret;
  1520. if (!policy->suspend_freq) {
  1521. pr_debug("%s: suspend_freq not defined\n", __func__);
  1522. return 0;
  1523. }
  1524. pr_debug("%s: Setting suspend-freq: %u\n", __func__,
  1525. policy->suspend_freq);
  1526. ret = __cpufreq_driver_target(policy, policy->suspend_freq,
  1527. CPUFREQ_RELATION_H);
  1528. if (ret)
  1529. pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
  1530. __func__, policy->suspend_freq, ret);
  1531. return ret;
  1532. }
  1533. EXPORT_SYMBOL(cpufreq_generic_suspend);
  1534. /**
  1535. * cpufreq_suspend() - Suspend CPUFreq governors
  1536. *
  1537. * Called during system wide Suspend/Hibernate cycles for suspending governors
  1538. * as some platforms can't change frequency after this point in suspend cycle.
  1539. * Because some of the devices (like: i2c, regulators, etc) they use for
  1540. * changing frequency are suspended quickly after this point.
  1541. */
  1542. void cpufreq_suspend(void)
  1543. {
  1544. struct cpufreq_policy *policy;
  1545. if (!cpufreq_driver)
  1546. return;
  1547. if (!has_target() && !cpufreq_driver->suspend)
  1548. goto suspend;
  1549. pr_debug("%s: Suspending Governors\n", __func__);
  1550. for_each_active_policy(policy) {
  1551. if (has_target()) {
  1552. down_write(&policy->rwsem);
  1553. cpufreq_stop_governor(policy);
  1554. up_write(&policy->rwsem);
  1555. }
  1556. if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
  1557. pr_err("%s: Failed to suspend driver: %s\n", __func__,
  1558. cpufreq_driver->name);
  1559. }
  1560. suspend:
  1561. cpufreq_suspended = true;
  1562. }
  1563. /**
  1564. * cpufreq_resume() - Resume CPUFreq governors
  1565. *
  1566. * Called during system wide Suspend/Hibernate cycle for resuming governors that
  1567. * are suspended with cpufreq_suspend().
  1568. */
  1569. void cpufreq_resume(void)
  1570. {
  1571. struct cpufreq_policy *policy;
  1572. int ret;
  1573. if (!cpufreq_driver)
  1574. return;
  1575. if (unlikely(!cpufreq_suspended))
  1576. return;
  1577. cpufreq_suspended = false;
  1578. if (!has_target() && !cpufreq_driver->resume)
  1579. return;
  1580. pr_debug("%s: Resuming Governors\n", __func__);
  1581. for_each_active_policy(policy) {
  1582. if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
  1583. pr_err("%s: Failed to resume driver: %p\n", __func__,
  1584. policy);
  1585. } else if (has_target()) {
  1586. down_write(&policy->rwsem);
  1587. ret = cpufreq_start_governor(policy);
  1588. up_write(&policy->rwsem);
  1589. if (ret)
  1590. pr_err("%s: Failed to start governor for policy: %p\n",
  1591. __func__, policy);
  1592. }
  1593. }
  1594. }
  1595. /**
  1596. * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
  1597. * @flags: Flags to test against the current cpufreq driver's flags.
  1598. *
  1599. * Assumes that the driver is there, so callers must ensure that this is the
  1600. * case.
  1601. */
  1602. bool cpufreq_driver_test_flags(u16 flags)
  1603. {
  1604. return !!(cpufreq_driver->flags & flags);
  1605. }
  1606. /**
  1607. * cpufreq_get_current_driver - return current driver's name
  1608. *
  1609. * Return the name string of the currently loaded cpufreq driver
  1610. * or NULL, if none.
  1611. */
  1612. const char *cpufreq_get_current_driver(void)
  1613. {
  1614. if (cpufreq_driver)
  1615. return cpufreq_driver->name;
  1616. return NULL;
  1617. }
  1618. EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
  1619. /**
  1620. * cpufreq_get_driver_data - return current driver data
  1621. *
  1622. * Return the private data of the currently loaded cpufreq
  1623. * driver, or NULL if no cpufreq driver is loaded.
  1624. */
  1625. void *cpufreq_get_driver_data(void)
  1626. {
  1627. if (cpufreq_driver)
  1628. return cpufreq_driver->driver_data;
  1629. return NULL;
  1630. }
  1631. EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
  1632. /*********************************************************************
  1633. * NOTIFIER LISTS INTERFACE *
  1634. *********************************************************************/
  1635. /**
  1636. * cpufreq_register_notifier - register a driver with cpufreq
  1637. * @nb: notifier function to register
  1638. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
  1639. *
  1640. * Add a driver to one of two lists: either a list of drivers that
  1641. * are notified about clock rate changes (once before and once after
  1642. * the transition), or a list of drivers that are notified about
  1643. * changes in cpufreq policy.
  1644. *
  1645. * This function may sleep, and has the same return conditions as
  1646. * blocking_notifier_chain_register.
  1647. */
  1648. int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
  1649. {
  1650. int ret;
  1651. if (cpufreq_disabled())
  1652. return -EINVAL;
  1653. switch (list) {
  1654. case CPUFREQ_TRANSITION_NOTIFIER:
  1655. mutex_lock(&cpufreq_fast_switch_lock);
  1656. if (cpufreq_fast_switch_count > 0) {
  1657. mutex_unlock(&cpufreq_fast_switch_lock);
  1658. return -EBUSY;
  1659. }
  1660. ret = srcu_notifier_chain_register(
  1661. &cpufreq_transition_notifier_list, nb);
  1662. if (!ret)
  1663. cpufreq_fast_switch_count--;
  1664. mutex_unlock(&cpufreq_fast_switch_lock);
  1665. break;
  1666. case CPUFREQ_POLICY_NOTIFIER:
  1667. ret = blocking_notifier_chain_register(
  1668. &cpufreq_policy_notifier_list, nb);
  1669. break;
  1670. default:
  1671. ret = -EINVAL;
  1672. }
  1673. return ret;
  1674. }
  1675. EXPORT_SYMBOL(cpufreq_register_notifier);
  1676. /**
  1677. * cpufreq_unregister_notifier - unregister a driver with cpufreq
  1678. * @nb: notifier block to be unregistered
  1679. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
  1680. *
  1681. * Remove a driver from the CPU frequency notifier list.
  1682. *
  1683. * This function may sleep, and has the same return conditions as
  1684. * blocking_notifier_chain_unregister.
  1685. */
  1686. int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
  1687. {
  1688. int ret;
  1689. if (cpufreq_disabled())
  1690. return -EINVAL;
  1691. switch (list) {
  1692. case CPUFREQ_TRANSITION_NOTIFIER:
  1693. mutex_lock(&cpufreq_fast_switch_lock);
  1694. ret = srcu_notifier_chain_unregister(
  1695. &cpufreq_transition_notifier_list, nb);
  1696. if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
  1697. cpufreq_fast_switch_count++;
  1698. mutex_unlock(&cpufreq_fast_switch_lock);
  1699. break;
  1700. case CPUFREQ_POLICY_NOTIFIER:
  1701. ret = blocking_notifier_chain_unregister(
  1702. &cpufreq_policy_notifier_list, nb);
  1703. break;
  1704. default:
  1705. ret = -EINVAL;
  1706. }
  1707. return ret;
  1708. }
  1709. EXPORT_SYMBOL(cpufreq_unregister_notifier);
  1710. /*********************************************************************
  1711. * GOVERNORS *
  1712. *********************************************************************/
  1713. /**
  1714. * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
  1715. * @policy: cpufreq policy to switch the frequency for.
  1716. * @target_freq: New frequency to set (may be approximate).
  1717. *
  1718. * Carry out a fast frequency switch without sleeping.
  1719. *
  1720. * The driver's ->fast_switch() callback invoked by this function must be
  1721. * suitable for being called from within RCU-sched read-side critical sections
  1722. * and it is expected to select the minimum available frequency greater than or
  1723. * equal to @target_freq (CPUFREQ_RELATION_L).
  1724. *
  1725. * This function must not be called if policy->fast_switch_enabled is unset.
  1726. *
  1727. * Governors calling this function must guarantee that it will never be invoked
  1728. * twice in parallel for the same policy and that it will never be called in
  1729. * parallel with either ->target() or ->target_index() for the same policy.
  1730. *
  1731. * Returns the actual frequency set for the CPU.
  1732. *
  1733. * If 0 is returned by the driver's ->fast_switch() callback to indicate an
  1734. * error condition, the hardware configuration must be preserved.
  1735. */
  1736. unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
  1737. unsigned int target_freq)
  1738. {
  1739. unsigned int freq;
  1740. unsigned int old_target_freq = target_freq;
  1741. int cpu;
  1742. target_freq = clamp_val(target_freq, policy->min, policy->max);
  1743. trace_android_vh_cpufreq_fast_switch(policy, target_freq, old_target_freq);
  1744. freq = cpufreq_driver->fast_switch(policy, target_freq);
  1745. if (!freq)
  1746. return 0;
  1747. policy->cur = freq;
  1748. arch_set_freq_scale(policy->related_cpus, freq,
  1749. policy->cpuinfo.max_freq);
  1750. cpufreq_stats_record_transition(policy, freq);
  1751. cpufreq_times_record_transition(policy, freq);
  1752. trace_android_rvh_cpufreq_transition(policy);
  1753. if (trace_cpu_frequency_enabled()) {
  1754. for_each_cpu(cpu, policy->cpus)
  1755. trace_cpu_frequency(freq, cpu);
  1756. }
  1757. return freq;
  1758. }
  1759. EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
  1760. /* Must set freqs->new to intermediate frequency */
  1761. static int __target_intermediate(struct cpufreq_policy *policy,
  1762. struct cpufreq_freqs *freqs, int index)
  1763. {
  1764. int ret;
  1765. freqs->new = cpufreq_driver->get_intermediate(policy, index);
  1766. /* We don't need to switch to intermediate freq */
  1767. if (!freqs->new)
  1768. return 0;
  1769. pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
  1770. __func__, policy->cpu, freqs->old, freqs->new);
  1771. cpufreq_freq_transition_begin(policy, freqs);
  1772. ret = cpufreq_driver->target_intermediate(policy, index);
  1773. cpufreq_freq_transition_end(policy, freqs, ret);
  1774. if (ret)
  1775. pr_err("%s: Failed to change to intermediate frequency: %d\n",
  1776. __func__, ret);
  1777. return ret;
  1778. }
  1779. static int __target_index(struct cpufreq_policy *policy, int index)
  1780. {
  1781. struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
  1782. unsigned int intermediate_freq = 0;
  1783. unsigned int newfreq = policy->freq_table[index].frequency;
  1784. int retval = -EINVAL;
  1785. bool notify;
  1786. if (newfreq == policy->cur)
  1787. return 0;
  1788. notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
  1789. if (notify) {
  1790. /* Handle switching to intermediate frequency */
  1791. if (cpufreq_driver->get_intermediate) {
  1792. retval = __target_intermediate(policy, &freqs, index);
  1793. if (retval)
  1794. return retval;
  1795. intermediate_freq = freqs.new;
  1796. /* Set old freq to intermediate */
  1797. if (intermediate_freq)
  1798. freqs.old = freqs.new;
  1799. }
  1800. freqs.new = newfreq;
  1801. pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
  1802. __func__, policy->cpu, freqs.old, freqs.new);
  1803. cpufreq_freq_transition_begin(policy, &freqs);
  1804. }
  1805. retval = cpufreq_driver->target_index(policy, index);
  1806. if (retval)
  1807. pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
  1808. retval);
  1809. if (notify) {
  1810. cpufreq_freq_transition_end(policy, &freqs, retval);
  1811. /*
  1812. * Failed after setting to intermediate freq? Driver should have
  1813. * reverted back to initial frequency and so should we. Check
  1814. * here for intermediate_freq instead of get_intermediate, in
  1815. * case we haven't switched to intermediate freq at all.
  1816. */
  1817. if (unlikely(retval && intermediate_freq)) {
  1818. freqs.old = intermediate_freq;
  1819. freqs.new = policy->restore_freq;
  1820. cpufreq_freq_transition_begin(policy, &freqs);
  1821. cpufreq_freq_transition_end(policy, &freqs, 0);
  1822. }
  1823. }
  1824. return retval;
  1825. }
  1826. int __cpufreq_driver_target(struct cpufreq_policy *policy,
  1827. unsigned int target_freq,
  1828. unsigned int relation)
  1829. {
  1830. unsigned int old_target_freq = target_freq;
  1831. int index;
  1832. if (cpufreq_disabled())
  1833. return -ENODEV;
  1834. /* Make sure that target_freq is within supported range */
  1835. target_freq = clamp_val(target_freq, policy->min, policy->max);
  1836. trace_android_vh_cpufreq_target(policy, target_freq, old_target_freq);
  1837. pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
  1838. policy->cpu, target_freq, relation, old_target_freq);
  1839. /*
  1840. * This might look like a redundant call as we are checking it again
  1841. * after finding index. But it is left intentionally for cases where
  1842. * exactly same freq is called again and so we can save on few function
  1843. * calls.
  1844. */
  1845. if (target_freq == policy->cur &&
  1846. !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
  1847. return 0;
  1848. /* Save last value to restore later on errors */
  1849. policy->restore_freq = policy->cur;
  1850. if (cpufreq_driver->target)
  1851. return cpufreq_driver->target(policy, target_freq, relation);
  1852. if (!cpufreq_driver->target_index)
  1853. return -EINVAL;
  1854. index = cpufreq_frequency_table_target(policy, target_freq, relation);
  1855. return __target_index(policy, index);
  1856. }
  1857. EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
  1858. int cpufreq_driver_target(struct cpufreq_policy *policy,
  1859. unsigned int target_freq,
  1860. unsigned int relation)
  1861. {
  1862. int ret;
  1863. down_write(&policy->rwsem);
  1864. ret = __cpufreq_driver_target(policy, target_freq, relation);
  1865. up_write(&policy->rwsem);
  1866. return ret;
  1867. }
  1868. EXPORT_SYMBOL_GPL(cpufreq_driver_target);
  1869. __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
  1870. {
  1871. return NULL;
  1872. }
  1873. static int cpufreq_init_governor(struct cpufreq_policy *policy)
  1874. {
  1875. int ret;
  1876. /* Don't start any governor operations if we are entering suspend */
  1877. if (cpufreq_suspended)
  1878. return 0;
  1879. /*
  1880. * Governor might not be initiated here if ACPI _PPC changed
  1881. * notification happened, so check it.
  1882. */
  1883. if (!policy->governor)
  1884. return -EINVAL;
  1885. /* Platform doesn't want dynamic frequency switching ? */
  1886. if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
  1887. cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
  1888. struct cpufreq_governor *gov = cpufreq_fallback_governor();
  1889. if (gov) {
  1890. pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
  1891. policy->governor->name, gov->name);
  1892. policy->governor = gov;
  1893. } else {
  1894. return -EINVAL;
  1895. }
  1896. }
  1897. if (!try_module_get(policy->governor->owner))
  1898. return -EINVAL;
  1899. pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
  1900. if (policy->governor->init) {
  1901. ret = policy->governor->init(policy);
  1902. if (ret) {
  1903. module_put(policy->governor->owner);
  1904. return ret;
  1905. }
  1906. }
  1907. policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
  1908. return 0;
  1909. }
  1910. static void cpufreq_exit_governor(struct cpufreq_policy *policy)
  1911. {
  1912. if (cpufreq_suspended || !policy->governor)
  1913. return;
  1914. pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
  1915. if (policy->governor->exit)
  1916. policy->governor->exit(policy);
  1917. module_put(policy->governor->owner);
  1918. }
  1919. int cpufreq_start_governor(struct cpufreq_policy *policy)
  1920. {
  1921. int ret;
  1922. if (cpufreq_suspended)
  1923. return 0;
  1924. if (!policy->governor)
  1925. return -EINVAL;
  1926. pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
  1927. if (cpufreq_driver->get)
  1928. cpufreq_verify_current_freq(policy, false);
  1929. if (policy->governor->start) {
  1930. ret = policy->governor->start(policy);
  1931. if (ret)
  1932. return ret;
  1933. }
  1934. if (policy->governor->limits)
  1935. policy->governor->limits(policy);
  1936. return 0;
  1937. }
  1938. void cpufreq_stop_governor(struct cpufreq_policy *policy)
  1939. {
  1940. if (cpufreq_suspended || !policy->governor)
  1941. return;
  1942. pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
  1943. if (policy->governor->stop)
  1944. policy->governor->stop(policy);
  1945. }
  1946. static void cpufreq_governor_limits(struct cpufreq_policy *policy)
  1947. {
  1948. if (cpufreq_suspended || !policy->governor)
  1949. return;
  1950. pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
  1951. if (policy->governor->limits)
  1952. policy->governor->limits(policy);
  1953. }
  1954. int cpufreq_register_governor(struct cpufreq_governor *governor)
  1955. {
  1956. int err;
  1957. if (!governor)
  1958. return -EINVAL;
  1959. if (cpufreq_disabled())
  1960. return -ENODEV;
  1961. mutex_lock(&cpufreq_governor_mutex);
  1962. err = -EBUSY;
  1963. if (!find_governor(governor->name)) {
  1964. err = 0;
  1965. list_add(&governor->governor_list, &cpufreq_governor_list);
  1966. }
  1967. mutex_unlock(&cpufreq_governor_mutex);
  1968. return err;
  1969. }
  1970. EXPORT_SYMBOL_GPL(cpufreq_register_governor);
  1971. void cpufreq_unregister_governor(struct cpufreq_governor *governor)
  1972. {
  1973. struct cpufreq_policy *policy;
  1974. unsigned long flags;
  1975. if (!governor)
  1976. return;
  1977. if (cpufreq_disabled())
  1978. return;
  1979. /* clear last_governor for all inactive policies */
  1980. read_lock_irqsave(&cpufreq_driver_lock, flags);
  1981. for_each_inactive_policy(policy) {
  1982. if (!strcmp(policy->last_governor, governor->name)) {
  1983. policy->governor = NULL;
  1984. strcpy(policy->last_governor, "\0");
  1985. }
  1986. }
  1987. read_unlock_irqrestore(&cpufreq_driver_lock, flags);
  1988. mutex_lock(&cpufreq_governor_mutex);
  1989. list_del(&governor->governor_list);
  1990. mutex_unlock(&cpufreq_governor_mutex);
  1991. }
  1992. EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
  1993. /*********************************************************************
  1994. * POLICY INTERFACE *
  1995. *********************************************************************/
  1996. /**
  1997. * cpufreq_get_policy - get the current cpufreq_policy
  1998. * @policy: struct cpufreq_policy into which the current cpufreq_policy
  1999. * is written
  2000. * @cpu: CPU to find the policy for
  2001. *
  2002. * Reads the current cpufreq policy.
  2003. */
  2004. int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
  2005. {
  2006. struct cpufreq_policy *cpu_policy;
  2007. if (!policy)
  2008. return -EINVAL;
  2009. cpu_policy = cpufreq_cpu_get(cpu);
  2010. if (!cpu_policy)
  2011. return -EINVAL;
  2012. memcpy(policy, cpu_policy, sizeof(*policy));
  2013. cpufreq_cpu_put(cpu_policy);
  2014. return 0;
  2015. }
  2016. EXPORT_SYMBOL(cpufreq_get_policy);
  2017. /**
  2018. * cpufreq_set_policy - Modify cpufreq policy parameters.
  2019. * @policy: Policy object to modify.
  2020. * @new_gov: Policy governor pointer.
  2021. * @new_pol: Policy value (for drivers with built-in governors).
  2022. *
  2023. * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
  2024. * limits to be set for the policy, update @policy with the verified limits
  2025. * values and either invoke the driver's ->setpolicy() callback (if present) or
  2026. * carry out a governor update for @policy. That is, run the current governor's
  2027. * ->limits() callback (if @new_gov points to the same object as the one in
  2028. * @policy) or replace the governor for @policy with @new_gov.
  2029. *
  2030. * The cpuinfo part of @policy is not updated by this function.
  2031. */
  2032. static int cpufreq_set_policy(struct cpufreq_policy *policy,
  2033. struct cpufreq_governor *new_gov,
  2034. unsigned int new_pol)
  2035. {
  2036. struct cpufreq_policy_data new_data;
  2037. struct cpufreq_governor *old_gov;
  2038. int ret;
  2039. memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
  2040. new_data.freq_table = policy->freq_table;
  2041. new_data.cpu = policy->cpu;
  2042. /*
  2043. * PM QoS framework collects all the requests from users and provide us
  2044. * the final aggregated value here.
  2045. */
  2046. new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
  2047. new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
  2048. pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
  2049. new_data.cpu, new_data.min, new_data.max);
  2050. /*
  2051. * Verify that the CPU speed can be set within these limits and make sure
  2052. * that min <= max.
  2053. */
  2054. ret = cpufreq_driver->verify(&new_data);
  2055. if (ret)
  2056. return ret;
  2057. policy->min = new_data.min;
  2058. policy->max = new_data.max;
  2059. trace_cpu_frequency_limits(policy);
  2060. policy->cached_target_freq = UINT_MAX;
  2061. pr_debug("new min and max freqs are %u - %u kHz\n",
  2062. policy->min, policy->max);
  2063. if (cpufreq_driver->setpolicy) {
  2064. policy->policy = new_pol;
  2065. pr_debug("setting range\n");
  2066. return cpufreq_driver->setpolicy(policy);
  2067. }
  2068. if (new_gov == policy->governor) {
  2069. pr_debug("governor limits update\n");
  2070. cpufreq_governor_limits(policy);
  2071. return 0;
  2072. }
  2073. pr_debug("governor switch\n");
  2074. /* save old, working values */
  2075. old_gov = policy->governor;
  2076. /* end old governor */
  2077. if (old_gov) {
  2078. cpufreq_stop_governor(policy);
  2079. cpufreq_exit_governor(policy);
  2080. }
  2081. /* start new governor */
  2082. policy->governor = new_gov;
  2083. ret = cpufreq_init_governor(policy);
  2084. if (!ret) {
  2085. ret = cpufreq_start_governor(policy);
  2086. if (!ret) {
  2087. pr_debug("governor change\n");
  2088. return 0;
  2089. }
  2090. cpufreq_exit_governor(policy);
  2091. }
  2092. /* new governor failed, so re-start old one */
  2093. pr_debug("starting governor %s failed\n", policy->governor->name);
  2094. if (old_gov) {
  2095. policy->governor = old_gov;
  2096. if (cpufreq_init_governor(policy))
  2097. policy->governor = NULL;
  2098. else
  2099. cpufreq_start_governor(policy);
  2100. }
  2101. return ret;
  2102. }
  2103. EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency_limits);
  2104. /**
  2105. * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
  2106. * @cpu: CPU to re-evaluate the policy for.
  2107. *
  2108. * Update the current frequency for the cpufreq policy of @cpu and use
  2109. * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
  2110. * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
  2111. * for the policy in question, among other things.
  2112. */
  2113. void cpufreq_update_policy(unsigned int cpu)
  2114. {
  2115. struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
  2116. if (!policy)
  2117. return;
  2118. /*
  2119. * BIOS might change freq behind our back
  2120. * -> ask driver for current freq and notify governors about a change
  2121. */
  2122. if (cpufreq_driver->get && has_target() &&
  2123. (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
  2124. goto unlock;
  2125. refresh_frequency_limits(policy);
  2126. unlock:
  2127. cpufreq_cpu_release(policy);
  2128. }
  2129. EXPORT_SYMBOL(cpufreq_update_policy);
  2130. /**
  2131. * cpufreq_update_limits - Update policy limits for a given CPU.
  2132. * @cpu: CPU to update the policy limits for.
  2133. *
  2134. * Invoke the driver's ->update_limits callback if present or call
  2135. * cpufreq_update_policy() for @cpu.
  2136. */
  2137. void cpufreq_update_limits(unsigned int cpu)
  2138. {
  2139. if (cpufreq_driver->update_limits)
  2140. cpufreq_driver->update_limits(cpu);
  2141. else
  2142. cpufreq_update_policy(cpu);
  2143. }
  2144. EXPORT_SYMBOL_GPL(cpufreq_update_limits);
  2145. /*********************************************************************
  2146. * BOOST *
  2147. *********************************************************************/
  2148. static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
  2149. {
  2150. int ret;
  2151. if (!policy->freq_table)
  2152. return -ENXIO;
  2153. ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
  2154. if (ret) {
  2155. pr_err("%s: Policy frequency update failed\n", __func__);
  2156. return ret;
  2157. }
  2158. ret = freq_qos_update_request(policy->max_freq_req, policy->max);
  2159. if (ret < 0)
  2160. return ret;
  2161. return 0;
  2162. }
  2163. int cpufreq_boost_trigger_state(int state)
  2164. {
  2165. struct cpufreq_policy *policy;
  2166. unsigned long flags;
  2167. int ret = 0;
  2168. if (cpufreq_driver->boost_enabled == state)
  2169. return 0;
  2170. write_lock_irqsave(&cpufreq_driver_lock, flags);
  2171. cpufreq_driver->boost_enabled = state;
  2172. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2173. get_online_cpus();
  2174. for_each_active_policy(policy) {
  2175. ret = cpufreq_driver->set_boost(policy, state);
  2176. if (ret)
  2177. goto err_reset_state;
  2178. }
  2179. put_online_cpus();
  2180. return 0;
  2181. err_reset_state:
  2182. put_online_cpus();
  2183. write_lock_irqsave(&cpufreq_driver_lock, flags);
  2184. cpufreq_driver->boost_enabled = !state;
  2185. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2186. pr_err("%s: Cannot %s BOOST\n",
  2187. __func__, state ? "enable" : "disable");
  2188. return ret;
  2189. }
  2190. static bool cpufreq_boost_supported(void)
  2191. {
  2192. return cpufreq_driver->set_boost;
  2193. }
  2194. static int create_boost_sysfs_file(void)
  2195. {
  2196. int ret;
  2197. ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
  2198. if (ret)
  2199. pr_err("%s: cannot register global BOOST sysfs file\n",
  2200. __func__);
  2201. return ret;
  2202. }
  2203. static void remove_boost_sysfs_file(void)
  2204. {
  2205. if (cpufreq_boost_supported())
  2206. sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
  2207. }
  2208. int cpufreq_enable_boost_support(void)
  2209. {
  2210. if (!cpufreq_driver)
  2211. return -EINVAL;
  2212. if (cpufreq_boost_supported())
  2213. return 0;
  2214. cpufreq_driver->set_boost = cpufreq_boost_set_sw;
  2215. /* This will get removed on driver unregister */
  2216. return create_boost_sysfs_file();
  2217. }
  2218. EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
  2219. int cpufreq_boost_enabled(void)
  2220. {
  2221. return cpufreq_driver->boost_enabled;
  2222. }
  2223. EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
  2224. /*********************************************************************
  2225. * REGISTER / UNREGISTER CPUFREQ DRIVER *
  2226. *********************************************************************/
  2227. static enum cpuhp_state hp_online;
  2228. static int cpuhp_cpufreq_online(unsigned int cpu)
  2229. {
  2230. cpufreq_online(cpu);
  2231. return 0;
  2232. }
  2233. static int cpuhp_cpufreq_offline(unsigned int cpu)
  2234. {
  2235. cpufreq_offline(cpu);
  2236. return 0;
  2237. }
  2238. /**
  2239. * cpufreq_register_driver - register a CPU Frequency driver
  2240. * @driver_data: A struct cpufreq_driver containing the values#
  2241. * submitted by the CPU Frequency driver.
  2242. *
  2243. * Registers a CPU Frequency driver to this core code. This code
  2244. * returns zero on success, -EEXIST when another driver got here first
  2245. * (and isn't unregistered in the meantime).
  2246. *
  2247. */
  2248. int cpufreq_register_driver(struct cpufreq_driver *driver_data)
  2249. {
  2250. unsigned long flags;
  2251. int ret;
  2252. if (cpufreq_disabled())
  2253. return -ENODEV;
  2254. /*
  2255. * The cpufreq core depends heavily on the availability of device
  2256. * structure, make sure they are available before proceeding further.
  2257. */
  2258. if (!get_cpu_device(0))
  2259. return -EPROBE_DEFER;
  2260. if (!driver_data || !driver_data->verify || !driver_data->init ||
  2261. !(driver_data->setpolicy || driver_data->target_index ||
  2262. driver_data->target) ||
  2263. (driver_data->setpolicy && (driver_data->target_index ||
  2264. driver_data->target)) ||
  2265. (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
  2266. (!driver_data->online != !driver_data->offline))
  2267. return -EINVAL;
  2268. pr_debug("trying to register driver %s\n", driver_data->name);
  2269. /* Protect against concurrent CPU online/offline. */
  2270. cpus_read_lock();
  2271. write_lock_irqsave(&cpufreq_driver_lock, flags);
  2272. if (cpufreq_driver) {
  2273. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2274. ret = -EEXIST;
  2275. goto out;
  2276. }
  2277. cpufreq_driver = driver_data;
  2278. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2279. /*
  2280. * Mark support for the scheduler's frequency invariance engine for
  2281. * drivers that implement target(), target_index() or fast_switch().
  2282. */
  2283. if (!cpufreq_driver->setpolicy) {
  2284. static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
  2285. pr_debug("supports frequency invariance");
  2286. }
  2287. if (driver_data->setpolicy)
  2288. driver_data->flags |= CPUFREQ_CONST_LOOPS;
  2289. if (cpufreq_boost_supported()) {
  2290. ret = create_boost_sysfs_file();
  2291. if (ret)
  2292. goto err_null_driver;
  2293. }
  2294. ret = subsys_interface_register(&cpufreq_interface);
  2295. if (ret)
  2296. goto err_boost_unreg;
  2297. if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
  2298. list_empty(&cpufreq_policy_list)) {
  2299. /* if all ->init() calls failed, unregister */
  2300. ret = -ENODEV;
  2301. pr_debug("%s: No CPU initialized for driver %s\n", __func__,
  2302. driver_data->name);
  2303. goto err_if_unreg;
  2304. }
  2305. ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
  2306. "cpufreq:online",
  2307. cpuhp_cpufreq_online,
  2308. cpuhp_cpufreq_offline);
  2309. if (ret < 0)
  2310. goto err_if_unreg;
  2311. hp_online = ret;
  2312. ret = 0;
  2313. pr_debug("driver %s up and running\n", driver_data->name);
  2314. goto out;
  2315. err_if_unreg:
  2316. subsys_interface_unregister(&cpufreq_interface);
  2317. err_boost_unreg:
  2318. remove_boost_sysfs_file();
  2319. err_null_driver:
  2320. write_lock_irqsave(&cpufreq_driver_lock, flags);
  2321. cpufreq_driver = NULL;
  2322. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2323. out:
  2324. cpus_read_unlock();
  2325. return ret;
  2326. }
  2327. EXPORT_SYMBOL_GPL(cpufreq_register_driver);
  2328. /*
  2329. * cpufreq_unregister_driver - unregister the current CPUFreq driver
  2330. *
  2331. * Unregister the current CPUFreq driver. Only call this if you have
  2332. * the right to do so, i.e. if you have succeeded in initialising before!
  2333. * Returns zero if successful, and -EINVAL if the cpufreq_driver is
  2334. * currently not initialised.
  2335. */
  2336. int cpufreq_unregister_driver(struct cpufreq_driver *driver)
  2337. {
  2338. unsigned long flags;
  2339. if (!cpufreq_driver || (driver != cpufreq_driver))
  2340. return -EINVAL;
  2341. pr_debug("unregistering driver %s\n", driver->name);
  2342. /* Protect against concurrent cpu hotplug */
  2343. cpus_read_lock();
  2344. subsys_interface_unregister(&cpufreq_interface);
  2345. remove_boost_sysfs_file();
  2346. static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
  2347. cpuhp_remove_state_nocalls_cpuslocked(hp_online);
  2348. write_lock_irqsave(&cpufreq_driver_lock, flags);
  2349. cpufreq_driver = NULL;
  2350. write_unlock_irqrestore(&cpufreq_driver_lock, flags);
  2351. cpus_read_unlock();
  2352. return 0;
  2353. }
  2354. EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
  2355. static int __init cpufreq_core_init(void)
  2356. {
  2357. struct cpufreq_governor *gov = cpufreq_default_governor();
  2358. if (cpufreq_disabled())
  2359. return -ENODEV;
  2360. cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
  2361. BUG_ON(!cpufreq_global_kobject);
  2362. if (!strlen(default_governor))
  2363. strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
  2364. return 0;
  2365. }
  2366. module_param(off, int, 0444);
  2367. module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
  2368. core_initcall(cpufreq_core_init);