sysfs.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/device.h>
  3. #include <linux/cpu.h>
  4. #include <linux/smp.h>
  5. #include <linux/percpu.h>
  6. #include <linux/init.h>
  7. #include <linux/sched.h>
  8. #include <linux/export.h>
  9. #include <linux/nodemask.h>
  10. #include <linux/cpumask.h>
  11. #include <linux/notifier.h>
  12. #include <asm/current.h>
  13. #include <asm/processor.h>
  14. #include <asm/cputable.h>
  15. #include <asm/hvcall.h>
  16. #include <asm/prom.h>
  17. #include <asm/machdep.h>
  18. #include <asm/smp.h>
  19. #include <asm/pmc.h>
  20. #include <asm/firmware.h>
  21. #include <asm/idle.h>
  22. #include <asm/svm.h>
  23. #include "cacheinfo.h"
  24. #include "setup.h"
  25. #ifdef CONFIG_PPC64
  26. #include <asm/paca.h>
  27. #include <asm/lppaca.h>
  28. #endif
  29. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  30. #ifdef CONFIG_PPC64
  31. /*
  32. * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle:
  33. * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in
  34. * 2014:
  35. *
  36. * "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean
  37. * up the kernel code."
  38. *
  39. * powerpc-utils stopped using it as of 1.3.8. At some point in the future this
  40. * code should be removed.
  41. */
  42. static ssize_t store_smt_snooze_delay(struct device *dev,
  43. struct device_attribute *attr,
  44. const char *buf,
  45. size_t count)
  46. {
  47. pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n",
  48. current->comm, current->pid);
  49. return count;
  50. }
  51. static ssize_t show_smt_snooze_delay(struct device *dev,
  52. struct device_attribute *attr,
  53. char *buf)
  54. {
  55. pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n",
  56. current->comm, current->pid);
  57. return sprintf(buf, "100\n");
  58. }
  59. static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
  60. store_smt_snooze_delay);
  61. static int __init setup_smt_snooze_delay(char *str)
  62. {
  63. if (!cpu_has_feature(CPU_FTR_SMT))
  64. return 1;
  65. pr_warn("smt-snooze-delay command line option has no effect\n");
  66. return 1;
  67. }
  68. __setup("smt-snooze-delay=", setup_smt_snooze_delay);
  69. #endif /* CONFIG_PPC64 */
  70. #define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
  71. static void read_##NAME(void *val) \
  72. { \
  73. *(unsigned long *)val = mfspr(ADDRESS); \
  74. } \
  75. static void write_##NAME(void *val) \
  76. { \
  77. EXTRA; \
  78. mtspr(ADDRESS, *(unsigned long *)val); \
  79. }
  80. #define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
  81. static ssize_t show_##NAME(struct device *dev, \
  82. struct device_attribute *attr, \
  83. char *buf) \
  84. { \
  85. struct cpu *cpu = container_of(dev, struct cpu, dev); \
  86. unsigned long val; \
  87. smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
  88. return sprintf(buf, "%lx\n", val); \
  89. } \
  90. static ssize_t __used \
  91. store_##NAME(struct device *dev, struct device_attribute *attr, \
  92. const char *buf, size_t count) \
  93. { \
  94. struct cpu *cpu = container_of(dev, struct cpu, dev); \
  95. unsigned long val; \
  96. int ret = sscanf(buf, "%lx", &val); \
  97. if (ret != 1) \
  98. return -EINVAL; \
  99. smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
  100. return count; \
  101. }
  102. #define SYSFS_PMCSETUP(NAME, ADDRESS) \
  103. __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
  104. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  105. #define SYSFS_SPRSETUP(NAME, ADDRESS) \
  106. __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
  107. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  108. #define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
  109. __SYSFS_SPRSETUP_SHOW_STORE(NAME)
  110. #ifdef CONFIG_PPC64
  111. /*
  112. * This is the system wide DSCR register default value. Any
  113. * change to this default value through the sysfs interface
  114. * will update all per cpu DSCR default values across the
  115. * system stored in their respective PACA structures.
  116. */
  117. static unsigned long dscr_default;
  118. /**
  119. * read_dscr() - Fetch the cpu specific DSCR default
  120. * @val: Returned cpu specific DSCR default value
  121. *
  122. * This function returns the per cpu DSCR default value
  123. * for any cpu which is contained in it's PACA structure.
  124. */
  125. static void read_dscr(void *val)
  126. {
  127. *(unsigned long *)val = get_paca()->dscr_default;
  128. }
  129. /**
  130. * write_dscr() - Update the cpu specific DSCR default
  131. * @val: New cpu specific DSCR default value to update
  132. *
  133. * This function updates the per cpu DSCR default value
  134. * for any cpu which is contained in it's PACA structure.
  135. */
  136. static void write_dscr(void *val)
  137. {
  138. get_paca()->dscr_default = *(unsigned long *)val;
  139. if (!current->thread.dscr_inherit) {
  140. current->thread.dscr = *(unsigned long *)val;
  141. mtspr(SPRN_DSCR, *(unsigned long *)val);
  142. }
  143. }
  144. SYSFS_SPRSETUP_SHOW_STORE(dscr);
  145. static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
  146. static void add_write_permission_dev_attr(struct device_attribute *attr)
  147. {
  148. attr->attr.mode |= 0200;
  149. }
  150. /**
  151. * show_dscr_default() - Fetch the system wide DSCR default
  152. * @dev: Device structure
  153. * @attr: Device attribute structure
  154. * @buf: Interface buffer
  155. *
  156. * This function returns the system wide DSCR default value.
  157. */
  158. static ssize_t show_dscr_default(struct device *dev,
  159. struct device_attribute *attr, char *buf)
  160. {
  161. return sprintf(buf, "%lx\n", dscr_default);
  162. }
  163. /**
  164. * store_dscr_default() - Update the system wide DSCR default
  165. * @dev: Device structure
  166. * @attr: Device attribute structure
  167. * @buf: Interface buffer
  168. * @count: Size of the update
  169. *
  170. * This function updates the system wide DSCR default value.
  171. */
  172. static ssize_t __used store_dscr_default(struct device *dev,
  173. struct device_attribute *attr, const char *buf,
  174. size_t count)
  175. {
  176. unsigned long val;
  177. int ret = 0;
  178. ret = sscanf(buf, "%lx", &val);
  179. if (ret != 1)
  180. return -EINVAL;
  181. dscr_default = val;
  182. on_each_cpu(write_dscr, &val, 1);
  183. return count;
  184. }
  185. static DEVICE_ATTR(dscr_default, 0600,
  186. show_dscr_default, store_dscr_default);
  187. static void sysfs_create_dscr_default(void)
  188. {
  189. if (cpu_has_feature(CPU_FTR_DSCR)) {
  190. int cpu;
  191. dscr_default = spr_default_dscr;
  192. for_each_possible_cpu(cpu)
  193. paca_ptrs[cpu]->dscr_default = dscr_default;
  194. device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
  195. }
  196. }
  197. #endif /* CONFIG_PPC64 */
  198. #ifdef CONFIG_PPC_FSL_BOOK3E
  199. #define MAX_BIT 63
  200. static u64 pw20_wt;
  201. static u64 altivec_idle_wt;
  202. static unsigned int get_idle_ticks_bit(u64 ns)
  203. {
  204. u64 cycle;
  205. if (ns >= 10000)
  206. cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
  207. else
  208. cycle = div_u64(ns * tb_ticks_per_usec, 1000);
  209. if (!cycle)
  210. return 0;
  211. return ilog2(cycle);
  212. }
  213. static void do_show_pwrmgtcr0(void *val)
  214. {
  215. u32 *value = val;
  216. *value = mfspr(SPRN_PWRMGTCR0);
  217. }
  218. static ssize_t show_pw20_state(struct device *dev,
  219. struct device_attribute *attr, char *buf)
  220. {
  221. u32 value;
  222. unsigned int cpu = dev->id;
  223. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  224. value &= PWRMGTCR0_PW20_WAIT;
  225. return sprintf(buf, "%u\n", value ? 1 : 0);
  226. }
  227. static void do_store_pw20_state(void *val)
  228. {
  229. u32 *value = val;
  230. u32 pw20_state;
  231. pw20_state = mfspr(SPRN_PWRMGTCR0);
  232. if (*value)
  233. pw20_state |= PWRMGTCR0_PW20_WAIT;
  234. else
  235. pw20_state &= ~PWRMGTCR0_PW20_WAIT;
  236. mtspr(SPRN_PWRMGTCR0, pw20_state);
  237. }
  238. static ssize_t store_pw20_state(struct device *dev,
  239. struct device_attribute *attr,
  240. const char *buf, size_t count)
  241. {
  242. u32 value;
  243. unsigned int cpu = dev->id;
  244. if (kstrtou32(buf, 0, &value))
  245. return -EINVAL;
  246. if (value > 1)
  247. return -EINVAL;
  248. smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
  249. return count;
  250. }
  251. static ssize_t show_pw20_wait_time(struct device *dev,
  252. struct device_attribute *attr, char *buf)
  253. {
  254. u32 value;
  255. u64 tb_cycle = 1;
  256. u64 time;
  257. unsigned int cpu = dev->id;
  258. if (!pw20_wt) {
  259. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  260. value = (value & PWRMGTCR0_PW20_ENT) >>
  261. PWRMGTCR0_PW20_ENT_SHIFT;
  262. tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
  263. /* convert ms to ns */
  264. if (tb_ticks_per_usec > 1000) {
  265. time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
  266. } else {
  267. u32 rem_us;
  268. time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
  269. &rem_us);
  270. time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
  271. }
  272. } else {
  273. time = pw20_wt;
  274. }
  275. return sprintf(buf, "%llu\n", time > 0 ? time : 0);
  276. }
  277. static void set_pw20_wait_entry_bit(void *val)
  278. {
  279. u32 *value = val;
  280. u32 pw20_idle;
  281. pw20_idle = mfspr(SPRN_PWRMGTCR0);
  282. /* Set Automatic PW20 Core Idle Count */
  283. /* clear count */
  284. pw20_idle &= ~PWRMGTCR0_PW20_ENT;
  285. /* set count */
  286. pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
  287. mtspr(SPRN_PWRMGTCR0, pw20_idle);
  288. }
  289. static ssize_t store_pw20_wait_time(struct device *dev,
  290. struct device_attribute *attr,
  291. const char *buf, size_t count)
  292. {
  293. u32 entry_bit;
  294. u64 value;
  295. unsigned int cpu = dev->id;
  296. if (kstrtou64(buf, 0, &value))
  297. return -EINVAL;
  298. if (!value)
  299. return -EINVAL;
  300. entry_bit = get_idle_ticks_bit(value);
  301. if (entry_bit > MAX_BIT)
  302. return -EINVAL;
  303. pw20_wt = value;
  304. smp_call_function_single(cpu, set_pw20_wait_entry_bit,
  305. &entry_bit, 1);
  306. return count;
  307. }
  308. static ssize_t show_altivec_idle(struct device *dev,
  309. struct device_attribute *attr, char *buf)
  310. {
  311. u32 value;
  312. unsigned int cpu = dev->id;
  313. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  314. value &= PWRMGTCR0_AV_IDLE_PD_EN;
  315. return sprintf(buf, "%u\n", value ? 1 : 0);
  316. }
  317. static void do_store_altivec_idle(void *val)
  318. {
  319. u32 *value = val;
  320. u32 altivec_idle;
  321. altivec_idle = mfspr(SPRN_PWRMGTCR0);
  322. if (*value)
  323. altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
  324. else
  325. altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
  326. mtspr(SPRN_PWRMGTCR0, altivec_idle);
  327. }
  328. static ssize_t store_altivec_idle(struct device *dev,
  329. struct device_attribute *attr,
  330. const char *buf, size_t count)
  331. {
  332. u32 value;
  333. unsigned int cpu = dev->id;
  334. if (kstrtou32(buf, 0, &value))
  335. return -EINVAL;
  336. if (value > 1)
  337. return -EINVAL;
  338. smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
  339. return count;
  340. }
  341. static ssize_t show_altivec_idle_wait_time(struct device *dev,
  342. struct device_attribute *attr, char *buf)
  343. {
  344. u32 value;
  345. u64 tb_cycle = 1;
  346. u64 time;
  347. unsigned int cpu = dev->id;
  348. if (!altivec_idle_wt) {
  349. smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
  350. value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
  351. PWRMGTCR0_AV_IDLE_CNT_SHIFT;
  352. tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
  353. /* convert ms to ns */
  354. if (tb_ticks_per_usec > 1000) {
  355. time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
  356. } else {
  357. u32 rem_us;
  358. time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
  359. &rem_us);
  360. time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
  361. }
  362. } else {
  363. time = altivec_idle_wt;
  364. }
  365. return sprintf(buf, "%llu\n", time > 0 ? time : 0);
  366. }
  367. static void set_altivec_idle_wait_entry_bit(void *val)
  368. {
  369. u32 *value = val;
  370. u32 altivec_idle;
  371. altivec_idle = mfspr(SPRN_PWRMGTCR0);
  372. /* Set Automatic AltiVec Idle Count */
  373. /* clear count */
  374. altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
  375. /* set count */
  376. altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
  377. mtspr(SPRN_PWRMGTCR0, altivec_idle);
  378. }
  379. static ssize_t store_altivec_idle_wait_time(struct device *dev,
  380. struct device_attribute *attr,
  381. const char *buf, size_t count)
  382. {
  383. u32 entry_bit;
  384. u64 value;
  385. unsigned int cpu = dev->id;
  386. if (kstrtou64(buf, 0, &value))
  387. return -EINVAL;
  388. if (!value)
  389. return -EINVAL;
  390. entry_bit = get_idle_ticks_bit(value);
  391. if (entry_bit > MAX_BIT)
  392. return -EINVAL;
  393. altivec_idle_wt = value;
  394. smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
  395. &entry_bit, 1);
  396. return count;
  397. }
  398. /*
  399. * Enable/Disable interface:
  400. * 0, disable. 1, enable.
  401. */
  402. static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
  403. static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
  404. /*
  405. * Set wait time interface:(Nanosecond)
  406. * Example: Base on TBfreq is 41MHZ.
  407. * 1~48(ns): TB[63]
  408. * 49~97(ns): TB[62]
  409. * 98~195(ns): TB[61]
  410. * 196~390(ns): TB[60]
  411. * 391~780(ns): TB[59]
  412. * 781~1560(ns): TB[58]
  413. * ...
  414. */
  415. static DEVICE_ATTR(pw20_wait_time, 0600,
  416. show_pw20_wait_time,
  417. store_pw20_wait_time);
  418. static DEVICE_ATTR(altivec_idle_wait_time, 0600,
  419. show_altivec_idle_wait_time,
  420. store_altivec_idle_wait_time);
  421. #endif
  422. /*
  423. * Enabling PMCs will slow partition context switch times so we only do
  424. * it the first time we write to the PMCs.
  425. */
  426. static DEFINE_PER_CPU(char, pmcs_enabled);
  427. void ppc_enable_pmcs(void)
  428. {
  429. ppc_set_pmu_inuse(1);
  430. /* Only need to enable them once */
  431. if (__this_cpu_read(pmcs_enabled))
  432. return;
  433. __this_cpu_write(pmcs_enabled, 1);
  434. if (ppc_md.enable_pmcs)
  435. ppc_md.enable_pmcs();
  436. }
  437. EXPORT_SYMBOL(ppc_enable_pmcs);
  438. /* Let's define all possible registers, we'll only hook up the ones
  439. * that are implemented on the current processor
  440. */
  441. #ifdef CONFIG_PMU_SYSFS
  442. #if defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
  443. #define HAS_PPC_PMC_CLASSIC 1
  444. #define HAS_PPC_PMC_IBM 1
  445. #endif
  446. #ifdef CONFIG_PPC64
  447. #define HAS_PPC_PMC_PA6T 1
  448. #define HAS_PPC_PMC56 1
  449. #endif
  450. #ifdef CONFIG_PPC_BOOK3S_32
  451. #define HAS_PPC_PMC_G4 1
  452. #endif
  453. #endif /* CONFIG_PMU_SYSFS */
  454. #if defined(CONFIG_PPC64) && defined(CONFIG_DEBUG_MISC)
  455. #define HAS_PPC_PA6T
  456. #endif
  457. /*
  458. * SPRs which are not related to PMU.
  459. */
  460. #ifdef CONFIG_PPC64
  461. SYSFS_SPRSETUP(purr, SPRN_PURR);
  462. SYSFS_SPRSETUP(spurr, SPRN_SPURR);
  463. SYSFS_SPRSETUP(pir, SPRN_PIR);
  464. SYSFS_SPRSETUP(tscr, SPRN_TSCR);
  465. /*
  466. Lets only enable read for phyp resources and
  467. enable write when needed with a separate function.
  468. Lets be conservative and default to pseries.
  469. */
  470. static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
  471. static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
  472. static DEVICE_ATTR(pir, 0400, show_pir, NULL);
  473. static DEVICE_ATTR(tscr, 0600, show_tscr, store_tscr);
  474. #endif /* CONFIG_PPC64 */
  475. #ifdef HAS_PPC_PMC_CLASSIC
  476. SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
  477. SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
  478. SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
  479. SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
  480. SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
  481. SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
  482. SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
  483. SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
  484. #endif
  485. #ifdef HAS_PPC_PMC_G4
  486. SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
  487. #endif
  488. #ifdef HAS_PPC_PMC56
  489. SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
  490. SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
  491. SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
  492. SYSFS_PMCSETUP(mmcr3, SPRN_MMCR3);
  493. static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
  494. static DEVICE_ATTR(mmcr3, 0600, show_mmcr3, store_mmcr3);
  495. #endif /* HAS_PPC_PMC56 */
  496. #ifdef HAS_PPC_PMC_PA6T
  497. SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
  498. SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
  499. SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
  500. SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
  501. SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
  502. SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
  503. #endif
  504. #ifdef HAS_PPC_PA6T
  505. SYSFS_SPRSETUP(hid0, SPRN_HID0);
  506. SYSFS_SPRSETUP(hid1, SPRN_HID1);
  507. SYSFS_SPRSETUP(hid4, SPRN_HID4);
  508. SYSFS_SPRSETUP(hid5, SPRN_HID5);
  509. SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
  510. SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
  511. SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
  512. SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
  513. SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
  514. SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
  515. SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
  516. SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
  517. SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
  518. SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
  519. SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
  520. SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
  521. SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
  522. SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
  523. SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
  524. SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
  525. SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
  526. SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
  527. SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
  528. SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
  529. SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
  530. SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
  531. SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
  532. SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
  533. #endif /* HAS_PPC_PA6T */
  534. #ifdef HAS_PPC_PMC_IBM
  535. static struct device_attribute ibm_common_attrs[] = {
  536. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  537. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  538. };
  539. #endif /* HAS_PPC_PMC_IBM */
  540. #ifdef HAS_PPC_PMC_G4
  541. static struct device_attribute g4_common_attrs[] = {
  542. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  543. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  544. __ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
  545. };
  546. #endif /* HAS_PPC_PMC_G4 */
  547. #ifdef HAS_PPC_PMC_CLASSIC
  548. static struct device_attribute classic_pmc_attrs[] = {
  549. __ATTR(pmc1, 0600, show_pmc1, store_pmc1),
  550. __ATTR(pmc2, 0600, show_pmc2, store_pmc2),
  551. __ATTR(pmc3, 0600, show_pmc3, store_pmc3),
  552. __ATTR(pmc4, 0600, show_pmc4, store_pmc4),
  553. __ATTR(pmc5, 0600, show_pmc5, store_pmc5),
  554. __ATTR(pmc6, 0600, show_pmc6, store_pmc6),
  555. #ifdef HAS_PPC_PMC56
  556. __ATTR(pmc7, 0600, show_pmc7, store_pmc7),
  557. __ATTR(pmc8, 0600, show_pmc8, store_pmc8),
  558. #endif
  559. };
  560. #endif
  561. #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
  562. static struct device_attribute pa6t_attrs[] = {
  563. #ifdef HAS_PPC_PMC_PA6T
  564. __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
  565. __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
  566. __ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
  567. __ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
  568. __ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
  569. __ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
  570. __ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
  571. __ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
  572. #endif
  573. #ifdef HAS_PPC_PA6T
  574. __ATTR(hid0, 0600, show_hid0, store_hid0),
  575. __ATTR(hid1, 0600, show_hid1, store_hid1),
  576. __ATTR(hid4, 0600, show_hid4, store_hid4),
  577. __ATTR(hid5, 0600, show_hid5, store_hid5),
  578. __ATTR(ima0, 0600, show_ima0, store_ima0),
  579. __ATTR(ima1, 0600, show_ima1, store_ima1),
  580. __ATTR(ima2, 0600, show_ima2, store_ima2),
  581. __ATTR(ima3, 0600, show_ima3, store_ima3),
  582. __ATTR(ima4, 0600, show_ima4, store_ima4),
  583. __ATTR(ima5, 0600, show_ima5, store_ima5),
  584. __ATTR(ima6, 0600, show_ima6, store_ima6),
  585. __ATTR(ima7, 0600, show_ima7, store_ima7),
  586. __ATTR(ima8, 0600, show_ima8, store_ima8),
  587. __ATTR(ima9, 0600, show_ima9, store_ima9),
  588. __ATTR(imaat, 0600, show_imaat, store_imaat),
  589. __ATTR(btcr, 0600, show_btcr, store_btcr),
  590. __ATTR(pccr, 0600, show_pccr, store_pccr),
  591. __ATTR(rpccr, 0600, show_rpccr, store_rpccr),
  592. __ATTR(der, 0600, show_der, store_der),
  593. __ATTR(mer, 0600, show_mer, store_mer),
  594. __ATTR(ber, 0600, show_ber, store_ber),
  595. __ATTR(ier, 0600, show_ier, store_ier),
  596. __ATTR(sier, 0600, show_sier, store_sier),
  597. __ATTR(siar, 0600, show_siar, store_siar),
  598. __ATTR(tsr0, 0600, show_tsr0, store_tsr0),
  599. __ATTR(tsr1, 0600, show_tsr1, store_tsr1),
  600. __ATTR(tsr2, 0600, show_tsr2, store_tsr2),
  601. __ATTR(tsr3, 0600, show_tsr3, store_tsr3),
  602. #endif /* HAS_PPC_PA6T */
  603. };
  604. #endif
  605. #ifdef CONFIG_PPC_SVM
  606. static ssize_t show_svm(struct device *dev, struct device_attribute *attr, char *buf)
  607. {
  608. return sprintf(buf, "%u\n", is_secure_guest());
  609. }
  610. static DEVICE_ATTR(svm, 0444, show_svm, NULL);
  611. static void create_svm_file(void)
  612. {
  613. device_create_file(cpu_subsys.dev_root, &dev_attr_svm);
  614. }
  615. #else
  616. static void create_svm_file(void)
  617. {
  618. }
  619. #endif /* CONFIG_PPC_SVM */
  620. #ifdef CONFIG_PPC_PSERIES
  621. static void read_idle_purr(void *val)
  622. {
  623. u64 *ret = val;
  624. *ret = read_this_idle_purr();
  625. }
  626. static ssize_t idle_purr_show(struct device *dev,
  627. struct device_attribute *attr, char *buf)
  628. {
  629. struct cpu *cpu = container_of(dev, struct cpu, dev);
  630. u64 val;
  631. smp_call_function_single(cpu->dev.id, read_idle_purr, &val, 1);
  632. return sprintf(buf, "%llx\n", val);
  633. }
  634. static DEVICE_ATTR(idle_purr, 0400, idle_purr_show, NULL);
  635. static void create_idle_purr_file(struct device *s)
  636. {
  637. if (firmware_has_feature(FW_FEATURE_LPAR))
  638. device_create_file(s, &dev_attr_idle_purr);
  639. }
  640. static void remove_idle_purr_file(struct device *s)
  641. {
  642. if (firmware_has_feature(FW_FEATURE_LPAR))
  643. device_remove_file(s, &dev_attr_idle_purr);
  644. }
  645. static void read_idle_spurr(void *val)
  646. {
  647. u64 *ret = val;
  648. *ret = read_this_idle_spurr();
  649. }
  650. static ssize_t idle_spurr_show(struct device *dev,
  651. struct device_attribute *attr, char *buf)
  652. {
  653. struct cpu *cpu = container_of(dev, struct cpu, dev);
  654. u64 val;
  655. smp_call_function_single(cpu->dev.id, read_idle_spurr, &val, 1);
  656. return sprintf(buf, "%llx\n", val);
  657. }
  658. static DEVICE_ATTR(idle_spurr, 0400, idle_spurr_show, NULL);
  659. static void create_idle_spurr_file(struct device *s)
  660. {
  661. if (firmware_has_feature(FW_FEATURE_LPAR))
  662. device_create_file(s, &dev_attr_idle_spurr);
  663. }
  664. static void remove_idle_spurr_file(struct device *s)
  665. {
  666. if (firmware_has_feature(FW_FEATURE_LPAR))
  667. device_remove_file(s, &dev_attr_idle_spurr);
  668. }
  669. #else /* CONFIG_PPC_PSERIES */
  670. #define create_idle_purr_file(s)
  671. #define remove_idle_purr_file(s)
  672. #define create_idle_spurr_file(s)
  673. #define remove_idle_spurr_file(s)
  674. #endif /* CONFIG_PPC_PSERIES */
  675. static int register_cpu_online(unsigned int cpu)
  676. {
  677. struct cpu *c = &per_cpu(cpu_devices, cpu);
  678. struct device *s = &c->dev;
  679. struct device_attribute *attrs, *pmc_attrs;
  680. int i, nattrs;
  681. /* For cpus present at boot a reference was already grabbed in register_cpu() */
  682. if (!s->of_node)
  683. s->of_node = of_get_cpu_node(cpu, NULL);
  684. #ifdef CONFIG_PPC64
  685. if (cpu_has_feature(CPU_FTR_SMT))
  686. device_create_file(s, &dev_attr_smt_snooze_delay);
  687. #endif
  688. /* PMC stuff */
  689. switch (cur_cpu_spec->pmc_type) {
  690. #ifdef HAS_PPC_PMC_IBM
  691. case PPC_PMC_IBM:
  692. attrs = ibm_common_attrs;
  693. nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
  694. pmc_attrs = classic_pmc_attrs;
  695. break;
  696. #endif /* HAS_PPC_PMC_IBM */
  697. #ifdef HAS_PPC_PMC_G4
  698. case PPC_PMC_G4:
  699. attrs = g4_common_attrs;
  700. nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
  701. pmc_attrs = classic_pmc_attrs;
  702. break;
  703. #endif /* HAS_PPC_PMC_G4 */
  704. #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
  705. case PPC_PMC_PA6T:
  706. /* PA Semi starts counting at PMC0 */
  707. attrs = pa6t_attrs;
  708. nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
  709. pmc_attrs = NULL;
  710. break;
  711. #endif
  712. default:
  713. attrs = NULL;
  714. nattrs = 0;
  715. pmc_attrs = NULL;
  716. }
  717. for (i = 0; i < nattrs; i++)
  718. device_create_file(s, &attrs[i]);
  719. if (pmc_attrs)
  720. for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
  721. device_create_file(s, &pmc_attrs[i]);
  722. #ifdef CONFIG_PPC64
  723. #ifdef CONFIG_PMU_SYSFS
  724. if (cpu_has_feature(CPU_FTR_MMCRA))
  725. device_create_file(s, &dev_attr_mmcra);
  726. if (cpu_has_feature(CPU_FTR_ARCH_31))
  727. device_create_file(s, &dev_attr_mmcr3);
  728. #endif /* CONFIG_PMU_SYSFS */
  729. if (cpu_has_feature(CPU_FTR_PURR)) {
  730. if (!firmware_has_feature(FW_FEATURE_LPAR))
  731. add_write_permission_dev_attr(&dev_attr_purr);
  732. device_create_file(s, &dev_attr_purr);
  733. create_idle_purr_file(s);
  734. }
  735. if (cpu_has_feature(CPU_FTR_SPURR)) {
  736. device_create_file(s, &dev_attr_spurr);
  737. create_idle_spurr_file(s);
  738. }
  739. if (cpu_has_feature(CPU_FTR_DSCR))
  740. device_create_file(s, &dev_attr_dscr);
  741. if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
  742. device_create_file(s, &dev_attr_pir);
  743. if (cpu_has_feature(CPU_FTR_ARCH_206) &&
  744. !firmware_has_feature(FW_FEATURE_LPAR))
  745. device_create_file(s, &dev_attr_tscr);
  746. #endif /* CONFIG_PPC64 */
  747. #ifdef CONFIG_PPC_FSL_BOOK3E
  748. if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
  749. device_create_file(s, &dev_attr_pw20_state);
  750. device_create_file(s, &dev_attr_pw20_wait_time);
  751. device_create_file(s, &dev_attr_altivec_idle);
  752. device_create_file(s, &dev_attr_altivec_idle_wait_time);
  753. }
  754. #endif
  755. cacheinfo_cpu_online(cpu);
  756. return 0;
  757. }
  758. #ifdef CONFIG_HOTPLUG_CPU
  759. static int unregister_cpu_online(unsigned int cpu)
  760. {
  761. struct cpu *c = &per_cpu(cpu_devices, cpu);
  762. struct device *s = &c->dev;
  763. struct device_attribute *attrs, *pmc_attrs;
  764. int i, nattrs;
  765. BUG_ON(!c->hotpluggable);
  766. #ifdef CONFIG_PPC64
  767. if (cpu_has_feature(CPU_FTR_SMT))
  768. device_remove_file(s, &dev_attr_smt_snooze_delay);
  769. #endif
  770. /* PMC stuff */
  771. switch (cur_cpu_spec->pmc_type) {
  772. #ifdef HAS_PPC_PMC_IBM
  773. case PPC_PMC_IBM:
  774. attrs = ibm_common_attrs;
  775. nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
  776. pmc_attrs = classic_pmc_attrs;
  777. break;
  778. #endif /* HAS_PPC_PMC_IBM */
  779. #ifdef HAS_PPC_PMC_G4
  780. case PPC_PMC_G4:
  781. attrs = g4_common_attrs;
  782. nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
  783. pmc_attrs = classic_pmc_attrs;
  784. break;
  785. #endif /* HAS_PPC_PMC_G4 */
  786. #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
  787. case PPC_PMC_PA6T:
  788. /* PA Semi starts counting at PMC0 */
  789. attrs = pa6t_attrs;
  790. nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
  791. pmc_attrs = NULL;
  792. break;
  793. #endif
  794. default:
  795. attrs = NULL;
  796. nattrs = 0;
  797. pmc_attrs = NULL;
  798. }
  799. for (i = 0; i < nattrs; i++)
  800. device_remove_file(s, &attrs[i]);
  801. if (pmc_attrs)
  802. for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
  803. device_remove_file(s, &pmc_attrs[i]);
  804. #ifdef CONFIG_PPC64
  805. #ifdef CONFIG_PMU_SYSFS
  806. if (cpu_has_feature(CPU_FTR_MMCRA))
  807. device_remove_file(s, &dev_attr_mmcra);
  808. if (cpu_has_feature(CPU_FTR_ARCH_31))
  809. device_remove_file(s, &dev_attr_mmcr3);
  810. #endif /* CONFIG_PMU_SYSFS */
  811. if (cpu_has_feature(CPU_FTR_PURR)) {
  812. device_remove_file(s, &dev_attr_purr);
  813. remove_idle_purr_file(s);
  814. }
  815. if (cpu_has_feature(CPU_FTR_SPURR)) {
  816. device_remove_file(s, &dev_attr_spurr);
  817. remove_idle_spurr_file(s);
  818. }
  819. if (cpu_has_feature(CPU_FTR_DSCR))
  820. device_remove_file(s, &dev_attr_dscr);
  821. if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
  822. device_remove_file(s, &dev_attr_pir);
  823. if (cpu_has_feature(CPU_FTR_ARCH_206) &&
  824. !firmware_has_feature(FW_FEATURE_LPAR))
  825. device_remove_file(s, &dev_attr_tscr);
  826. #endif /* CONFIG_PPC64 */
  827. #ifdef CONFIG_PPC_FSL_BOOK3E
  828. if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
  829. device_remove_file(s, &dev_attr_pw20_state);
  830. device_remove_file(s, &dev_attr_pw20_wait_time);
  831. device_remove_file(s, &dev_attr_altivec_idle);
  832. device_remove_file(s, &dev_attr_altivec_idle_wait_time);
  833. }
  834. #endif
  835. cacheinfo_cpu_offline(cpu);
  836. of_node_put(s->of_node);
  837. s->of_node = NULL;
  838. return 0;
  839. }
  840. #else /* !CONFIG_HOTPLUG_CPU */
  841. #define unregister_cpu_online NULL
  842. #endif
  843. #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
  844. ssize_t arch_cpu_probe(const char *buf, size_t count)
  845. {
  846. if (ppc_md.cpu_probe)
  847. return ppc_md.cpu_probe(buf, count);
  848. return -EINVAL;
  849. }
  850. ssize_t arch_cpu_release(const char *buf, size_t count)
  851. {
  852. if (ppc_md.cpu_release)
  853. return ppc_md.cpu_release(buf, count);
  854. return -EINVAL;
  855. }
  856. #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
  857. static DEFINE_MUTEX(cpu_mutex);
  858. int cpu_add_dev_attr(struct device_attribute *attr)
  859. {
  860. int cpu;
  861. mutex_lock(&cpu_mutex);
  862. for_each_possible_cpu(cpu) {
  863. device_create_file(get_cpu_device(cpu), attr);
  864. }
  865. mutex_unlock(&cpu_mutex);
  866. return 0;
  867. }
  868. EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
  869. int cpu_add_dev_attr_group(struct attribute_group *attrs)
  870. {
  871. int cpu;
  872. struct device *dev;
  873. int ret;
  874. mutex_lock(&cpu_mutex);
  875. for_each_possible_cpu(cpu) {
  876. dev = get_cpu_device(cpu);
  877. ret = sysfs_create_group(&dev->kobj, attrs);
  878. WARN_ON(ret != 0);
  879. }
  880. mutex_unlock(&cpu_mutex);
  881. return 0;
  882. }
  883. EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
  884. void cpu_remove_dev_attr(struct device_attribute *attr)
  885. {
  886. int cpu;
  887. mutex_lock(&cpu_mutex);
  888. for_each_possible_cpu(cpu) {
  889. device_remove_file(get_cpu_device(cpu), attr);
  890. }
  891. mutex_unlock(&cpu_mutex);
  892. }
  893. EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
  894. void cpu_remove_dev_attr_group(struct attribute_group *attrs)
  895. {
  896. int cpu;
  897. struct device *dev;
  898. mutex_lock(&cpu_mutex);
  899. for_each_possible_cpu(cpu) {
  900. dev = get_cpu_device(cpu);
  901. sysfs_remove_group(&dev->kobj, attrs);
  902. }
  903. mutex_unlock(&cpu_mutex);
  904. }
  905. EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
  906. /* NUMA stuff */
  907. #ifdef CONFIG_NUMA
  908. static void register_nodes(void)
  909. {
  910. int i;
  911. for (i = 0; i < MAX_NUMNODES; i++)
  912. register_one_node(i);
  913. }
  914. int sysfs_add_device_to_node(struct device *dev, int nid)
  915. {
  916. struct node *node = node_devices[nid];
  917. return sysfs_create_link(&node->dev.kobj, &dev->kobj,
  918. kobject_name(&dev->kobj));
  919. }
  920. EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
  921. void sysfs_remove_device_from_node(struct device *dev, int nid)
  922. {
  923. struct node *node = node_devices[nid];
  924. sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
  925. }
  926. EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
  927. #else
  928. static void register_nodes(void)
  929. {
  930. return;
  931. }
  932. #endif
  933. /* Only valid if CPU is present. */
  934. static ssize_t show_physical_id(struct device *dev,
  935. struct device_attribute *attr, char *buf)
  936. {
  937. struct cpu *cpu = container_of(dev, struct cpu, dev);
  938. return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
  939. }
  940. static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
  941. static int __init topology_init(void)
  942. {
  943. int cpu, r;
  944. register_nodes();
  945. for_each_possible_cpu(cpu) {
  946. struct cpu *c = &per_cpu(cpu_devices, cpu);
  947. #ifdef CONFIG_HOTPLUG_CPU
  948. /*
  949. * For now, we just see if the system supports making
  950. * the RTAS calls for CPU hotplug. But, there may be a
  951. * more comprehensive way to do this for an individual
  952. * CPU. For instance, the boot cpu might never be valid
  953. * for hotplugging.
  954. */
  955. if (smp_ops && smp_ops->cpu_offline_self)
  956. c->hotpluggable = 1;
  957. #endif
  958. if (cpu_online(cpu) || c->hotpluggable) {
  959. register_cpu(c, cpu);
  960. device_create_file(&c->dev, &dev_attr_physical_id);
  961. }
  962. }
  963. r = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/topology:online",
  964. register_cpu_online, unregister_cpu_online);
  965. WARN_ON(r < 0);
  966. #ifdef CONFIG_PPC64
  967. sysfs_create_dscr_default();
  968. #endif /* CONFIG_PPC64 */
  969. create_svm_file();
  970. return 0;
  971. }
  972. subsys_initcall(topology_init);