acpi_processor.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * acpi_processor.c - ACPI processor enumeration support
  4. *
  5. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  6. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  7. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  8. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  9. * Copyright (C) 2013, Intel Corporation
  10. * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  11. */
  12. #include <linux/acpi.h>
  13. #include <linux/device.h>
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/pci.h>
  17. #include <acpi/processor.h>
  18. #include <asm/cpu.h>
  19. #include "internal.h"
  20. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  21. ACPI_MODULE_NAME("processor");
  22. DEFINE_PER_CPU(struct acpi_processor *, processors);
  23. EXPORT_PER_CPU_SYMBOL(processors);
  24. /* --------------------------------------------------------------------------
  25. Errata Handling
  26. -------------------------------------------------------------------------- */
  27. struct acpi_processor_errata errata __read_mostly;
  28. EXPORT_SYMBOL_GPL(errata);
  29. static int acpi_processor_errata_piix4(struct pci_dev *dev)
  30. {
  31. u8 value1 = 0;
  32. u8 value2 = 0;
  33. if (!dev)
  34. return -EINVAL;
  35. /*
  36. * Note that 'dev' references the PIIX4 ACPI Controller.
  37. */
  38. switch (dev->revision) {
  39. case 0:
  40. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
  41. break;
  42. case 1:
  43. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
  44. break;
  45. case 2:
  46. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
  47. break;
  48. case 3:
  49. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
  50. break;
  51. default:
  52. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
  53. break;
  54. }
  55. switch (dev->revision) {
  56. case 0: /* PIIX4 A-step */
  57. case 1: /* PIIX4 B-step */
  58. /*
  59. * See specification changes #13 ("Manual Throttle Duty Cycle")
  60. * and #14 ("Enabling and Disabling Manual Throttle"), plus
  61. * erratum #5 ("STPCLK# Deassertion Time") from the January
  62. * 2002 PIIX4 specification update. Applies to only older
  63. * PIIX4 models.
  64. */
  65. errata.piix4.throttle = 1;
  66. fallthrough;
  67. case 2: /* PIIX4E */
  68. case 3: /* PIIX4M */
  69. /*
  70. * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
  71. * Livelock") from the January 2002 PIIX4 specification update.
  72. * Applies to all PIIX4 models.
  73. */
  74. /*
  75. * BM-IDE
  76. * ------
  77. * Find the PIIX4 IDE Controller and get the Bus Master IDE
  78. * Status register address. We'll use this later to read
  79. * each IDE controller's DMA status to make sure we catch all
  80. * DMA activity.
  81. */
  82. dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
  83. PCI_DEVICE_ID_INTEL_82371AB,
  84. PCI_ANY_ID, PCI_ANY_ID, NULL);
  85. if (dev) {
  86. errata.piix4.bmisx = pci_resource_start(dev, 4);
  87. pci_dev_put(dev);
  88. }
  89. /*
  90. * Type-F DMA
  91. * ----------
  92. * Find the PIIX4 ISA Controller and read the Motherboard
  93. * DMA controller's status to see if Type-F (Fast) DMA mode
  94. * is enabled (bit 7) on either channel. Note that we'll
  95. * disable C3 support if this is enabled, as some legacy
  96. * devices won't operate well if fast DMA is disabled.
  97. */
  98. dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
  99. PCI_DEVICE_ID_INTEL_82371AB_0,
  100. PCI_ANY_ID, PCI_ANY_ID, NULL);
  101. if (dev) {
  102. pci_read_config_byte(dev, 0x76, &value1);
  103. pci_read_config_byte(dev, 0x77, &value2);
  104. if ((value1 & 0x80) || (value2 & 0x80))
  105. errata.piix4.fdma = 1;
  106. pci_dev_put(dev);
  107. }
  108. break;
  109. }
  110. if (errata.piix4.bmisx)
  111. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  112. "Bus master activity detection (BM-IDE) erratum enabled\n"));
  113. if (errata.piix4.fdma)
  114. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  115. "Type-F DMA livelock erratum (C3 disabled)\n"));
  116. return 0;
  117. }
  118. static int acpi_processor_errata(void)
  119. {
  120. int result = 0;
  121. struct pci_dev *dev = NULL;
  122. /*
  123. * PIIX4
  124. */
  125. dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
  126. PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
  127. PCI_ANY_ID, NULL);
  128. if (dev) {
  129. result = acpi_processor_errata_piix4(dev);
  130. pci_dev_put(dev);
  131. }
  132. return result;
  133. }
  134. /* --------------------------------------------------------------------------
  135. Initialization
  136. -------------------------------------------------------------------------- */
  137. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  138. int __weak acpi_map_cpu(acpi_handle handle,
  139. phys_cpuid_t physid, u32 acpi_id, int *pcpu)
  140. {
  141. return -ENODEV;
  142. }
  143. int __weak acpi_unmap_cpu(int cpu)
  144. {
  145. return -ENODEV;
  146. }
  147. int __weak arch_register_cpu(int cpu)
  148. {
  149. return -ENODEV;
  150. }
  151. void __weak arch_unregister_cpu(int cpu) {}
  152. static int acpi_processor_hotadd_init(struct acpi_processor *pr)
  153. {
  154. unsigned long long sta;
  155. acpi_status status;
  156. int ret;
  157. if (invalid_phys_cpuid(pr->phys_id))
  158. return -ENODEV;
  159. status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
  160. if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
  161. return -ENODEV;
  162. cpu_maps_update_begin();
  163. cpu_hotplug_begin();
  164. ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id);
  165. if (ret)
  166. goto out;
  167. ret = arch_register_cpu(pr->id);
  168. if (ret) {
  169. acpi_unmap_cpu(pr->id);
  170. goto out;
  171. }
  172. /*
  173. * CPU got hot-added, but cpu_data is not initialized yet. Set a flag
  174. * to delay cpu_idle/throttling initialization and do it when the CPU
  175. * gets online for the first time.
  176. */
  177. pr_info("CPU%d has been hot-added\n", pr->id);
  178. pr->flags.need_hotplug_init = 1;
  179. out:
  180. cpu_hotplug_done();
  181. cpu_maps_update_done();
  182. return ret;
  183. }
  184. #else
  185. static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
  186. {
  187. return -ENODEV;
  188. }
  189. #endif /* CONFIG_ACPI_HOTPLUG_CPU */
  190. static int acpi_processor_get_info(struct acpi_device *device)
  191. {
  192. union acpi_object object = { 0 };
  193. struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
  194. struct acpi_processor *pr = acpi_driver_data(device);
  195. int device_declaration = 0;
  196. acpi_status status = AE_OK;
  197. static int cpu0_initialized;
  198. unsigned long long value;
  199. acpi_processor_errata();
  200. /*
  201. * Check to see if we have bus mastering arbitration control. This
  202. * is required for proper C3 usage (to maintain cache coherency).
  203. */
  204. if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
  205. pr->flags.bm_control = 1;
  206. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  207. "Bus mastering arbitration control present\n"));
  208. } else
  209. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  210. "No bus mastering arbitration control\n"));
  211. if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
  212. /* Declared with "Processor" statement; match ProcessorID */
  213. status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
  214. if (ACPI_FAILURE(status)) {
  215. dev_err(&device->dev,
  216. "Failed to evaluate processor object (0x%x)\n",
  217. status);
  218. return -ENODEV;
  219. }
  220. pr->acpi_id = object.processor.proc_id;
  221. } else {
  222. /*
  223. * Declared with "Device" statement; match _UID.
  224. */
  225. status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
  226. NULL, &value);
  227. if (ACPI_FAILURE(status)) {
  228. dev_err(&device->dev,
  229. "Failed to evaluate processor _UID (0x%x)\n",
  230. status);
  231. return -ENODEV;
  232. }
  233. device_declaration = 1;
  234. pr->acpi_id = value;
  235. }
  236. if (acpi_duplicate_processor_id(pr->acpi_id)) {
  237. if (pr->acpi_id == 0xff)
  238. dev_info_once(&device->dev,
  239. "Entry not well-defined, consider updating BIOS\n");
  240. else
  241. dev_err(&device->dev,
  242. "Failed to get unique processor _UID (0x%x)\n",
  243. pr->acpi_id);
  244. return -ENODEV;
  245. }
  246. pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
  247. pr->acpi_id);
  248. if (invalid_phys_cpuid(pr->phys_id))
  249. acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
  250. pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
  251. if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
  252. cpu0_initialized = 1;
  253. /*
  254. * Handle UP system running SMP kernel, with no CPU
  255. * entry in MADT
  256. */
  257. if (invalid_logical_cpuid(pr->id) && (num_online_cpus() == 1))
  258. pr->id = 0;
  259. }
  260. /*
  261. * Extra Processor objects may be enumerated on MP systems with
  262. * less than the max # of CPUs. They should be ignored _iff
  263. * they are physically not present.
  264. *
  265. * NOTE: Even if the processor has a cpuid, it may not be present
  266. * because cpuid <-> apicid mapping is persistent now.
  267. */
  268. if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) {
  269. int ret = acpi_processor_hotadd_init(pr);
  270. if (ret)
  271. return ret;
  272. }
  273. /*
  274. * On some boxes several processors use the same processor bus id.
  275. * But they are located in different scope. For example:
  276. * \_SB.SCK0.CPU0
  277. * \_SB.SCK1.CPU0
  278. * Rename the processor device bus id. And the new bus id will be
  279. * generated as the following format:
  280. * CPU+CPU ID.
  281. */
  282. sprintf(acpi_device_bid(device), "CPU%X", pr->id);
  283. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
  284. pr->acpi_id));
  285. if (!object.processor.pblk_address)
  286. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
  287. else if (object.processor.pblk_length != 6)
  288. dev_err(&device->dev, "Invalid PBLK length [%d]\n",
  289. object.processor.pblk_length);
  290. else {
  291. pr->throttling.address = object.processor.pblk_address;
  292. pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
  293. pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
  294. pr->pblk = object.processor.pblk_address;
  295. }
  296. /*
  297. * If ACPI describes a slot number for this CPU, we can use it to
  298. * ensure we get the right value in the "physical id" field
  299. * of /proc/cpuinfo
  300. */
  301. status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
  302. if (ACPI_SUCCESS(status))
  303. arch_fix_phys_package_id(pr->id, value);
  304. return 0;
  305. }
  306. /*
  307. * Do not put anything in here which needs the core to be online.
  308. * For example MSR access or setting up things which check for cpuinfo_x86
  309. * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
  310. * Such things have to be put in and set up by the processor driver's .probe().
  311. */
  312. static DEFINE_PER_CPU(void *, processor_device_array);
  313. static int acpi_processor_add(struct acpi_device *device,
  314. const struct acpi_device_id *id)
  315. {
  316. struct acpi_processor *pr;
  317. struct device *dev;
  318. int result = 0;
  319. pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
  320. if (!pr)
  321. return -ENOMEM;
  322. if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
  323. result = -ENOMEM;
  324. goto err_free_pr;
  325. }
  326. pr->handle = device->handle;
  327. strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
  328. strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
  329. device->driver_data = pr;
  330. result = acpi_processor_get_info(device);
  331. if (result) /* Processor is not physically present or unavailable */
  332. return 0;
  333. BUG_ON(pr->id >= nr_cpu_ids);
  334. /*
  335. * Buggy BIOS check.
  336. * ACPI id of processors can be reported wrongly by the BIOS.
  337. * Don't trust it blindly
  338. */
  339. if (per_cpu(processor_device_array, pr->id) != NULL &&
  340. per_cpu(processor_device_array, pr->id) != device) {
  341. dev_warn(&device->dev,
  342. "BIOS reported wrong ACPI id %d for the processor\n",
  343. pr->id);
  344. /* Give up, but do not abort the namespace scan. */
  345. goto err;
  346. }
  347. /*
  348. * processor_device_array is not cleared on errors to allow buggy BIOS
  349. * checks.
  350. */
  351. per_cpu(processor_device_array, pr->id) = device;
  352. per_cpu(processors, pr->id) = pr;
  353. dev = get_cpu_device(pr->id);
  354. if (!dev) {
  355. result = -ENODEV;
  356. goto err;
  357. }
  358. result = acpi_bind_one(dev, device);
  359. if (result)
  360. goto err;
  361. pr->dev = dev;
  362. /* Trigger the processor driver's .probe() if present. */
  363. if (device_attach(dev) >= 0)
  364. return 1;
  365. dev_err(dev, "Processor driver could not be attached\n");
  366. acpi_unbind_one(dev);
  367. err:
  368. free_cpumask_var(pr->throttling.shared_cpu_map);
  369. device->driver_data = NULL;
  370. per_cpu(processors, pr->id) = NULL;
  371. err_free_pr:
  372. kfree(pr);
  373. return result;
  374. }
  375. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  376. /* --------------------------------------------------------------------------
  377. Removal
  378. -------------------------------------------------------------------------- */
  379. static void acpi_processor_remove(struct acpi_device *device)
  380. {
  381. struct acpi_processor *pr;
  382. if (!device || !acpi_driver_data(device))
  383. return;
  384. pr = acpi_driver_data(device);
  385. if (pr->id >= nr_cpu_ids)
  386. goto out;
  387. /*
  388. * The only reason why we ever get here is CPU hot-removal. The CPU is
  389. * already offline and the ACPI device removal locking prevents it from
  390. * being put back online at this point.
  391. *
  392. * Unbind the driver from the processor device and detach it from the
  393. * ACPI companion object.
  394. */
  395. device_release_driver(pr->dev);
  396. acpi_unbind_one(pr->dev);
  397. /* Clean up. */
  398. per_cpu(processor_device_array, pr->id) = NULL;
  399. per_cpu(processors, pr->id) = NULL;
  400. cpu_maps_update_begin();
  401. cpu_hotplug_begin();
  402. /* Remove the CPU. */
  403. arch_unregister_cpu(pr->id);
  404. acpi_unmap_cpu(pr->id);
  405. cpu_hotplug_done();
  406. cpu_maps_update_done();
  407. try_offline_node(cpu_to_node(pr->id));
  408. out:
  409. free_cpumask_var(pr->throttling.shared_cpu_map);
  410. kfree(pr);
  411. }
  412. #endif /* CONFIG_ACPI_HOTPLUG_CPU */
  413. #ifdef CONFIG_X86
  414. static bool acpi_hwp_native_thermal_lvt_set;
  415. static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
  416. u32 lvl,
  417. void *context,
  418. void **rv)
  419. {
  420. u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
  421. u32 capbuf[2];
  422. struct acpi_osc_context osc_context = {
  423. .uuid_str = sb_uuid_str,
  424. .rev = 1,
  425. .cap.length = 8,
  426. .cap.pointer = capbuf,
  427. };
  428. if (acpi_hwp_native_thermal_lvt_set)
  429. return AE_CTRL_TERMINATE;
  430. capbuf[0] = 0x0000;
  431. capbuf[1] = 0x1000; /* set bit 12 */
  432. if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
  433. if (osc_context.ret.pointer && osc_context.ret.length > 1) {
  434. u32 *capbuf_ret = osc_context.ret.pointer;
  435. if (capbuf_ret[1] & 0x1000) {
  436. acpi_handle_info(handle,
  437. "_OSC native thermal LVT Acked\n");
  438. acpi_hwp_native_thermal_lvt_set = true;
  439. }
  440. }
  441. kfree(osc_context.ret.pointer);
  442. }
  443. return AE_OK;
  444. }
  445. void __init acpi_early_processor_osc(void)
  446. {
  447. if (boot_cpu_has(X86_FEATURE_HWP)) {
  448. acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
  449. ACPI_UINT32_MAX,
  450. acpi_hwp_native_thermal_lvt_osc,
  451. NULL, NULL, NULL);
  452. acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
  453. acpi_hwp_native_thermal_lvt_osc,
  454. NULL, NULL);
  455. }
  456. }
  457. #endif
  458. /*
  459. * The following ACPI IDs are known to be suitable for representing as
  460. * processor devices.
  461. */
  462. static const struct acpi_device_id processor_device_ids[] = {
  463. { ACPI_PROCESSOR_OBJECT_HID, },
  464. { ACPI_PROCESSOR_DEVICE_HID, },
  465. { }
  466. };
  467. static struct acpi_scan_handler processor_handler = {
  468. .ids = processor_device_ids,
  469. .attach = acpi_processor_add,
  470. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  471. .detach = acpi_processor_remove,
  472. #endif
  473. .hotplug = {
  474. .enabled = true,
  475. },
  476. };
  477. static int acpi_processor_container_attach(struct acpi_device *dev,
  478. const struct acpi_device_id *id)
  479. {
  480. return 1;
  481. }
  482. static const struct acpi_device_id processor_container_ids[] = {
  483. { ACPI_PROCESSOR_CONTAINER_HID, },
  484. { }
  485. };
  486. static struct acpi_scan_handler processor_container_handler = {
  487. .ids = processor_container_ids,
  488. .attach = acpi_processor_container_attach,
  489. };
  490. /* The number of the unique processor IDs */
  491. static int nr_unique_ids __initdata;
  492. /* The number of the duplicate processor IDs */
  493. static int nr_duplicate_ids;
  494. /* Used to store the unique processor IDs */
  495. static int unique_processor_ids[] __initdata = {
  496. [0 ... NR_CPUS - 1] = -1,
  497. };
  498. /* Used to store the duplicate processor IDs */
  499. static int duplicate_processor_ids[] = {
  500. [0 ... NR_CPUS - 1] = -1,
  501. };
  502. static void __init processor_validated_ids_update(int proc_id)
  503. {
  504. int i;
  505. if (nr_unique_ids == NR_CPUS||nr_duplicate_ids == NR_CPUS)
  506. return;
  507. /*
  508. * Firstly, compare the proc_id with duplicate IDs, if the proc_id is
  509. * already in the IDs, do nothing.
  510. */
  511. for (i = 0; i < nr_duplicate_ids; i++) {
  512. if (duplicate_processor_ids[i] == proc_id)
  513. return;
  514. }
  515. /*
  516. * Secondly, compare the proc_id with unique IDs, if the proc_id is in
  517. * the IDs, put it in the duplicate IDs.
  518. */
  519. for (i = 0; i < nr_unique_ids; i++) {
  520. if (unique_processor_ids[i] == proc_id) {
  521. duplicate_processor_ids[nr_duplicate_ids] = proc_id;
  522. nr_duplicate_ids++;
  523. return;
  524. }
  525. }
  526. /*
  527. * Lastly, the proc_id is a unique ID, put it in the unique IDs.
  528. */
  529. unique_processor_ids[nr_unique_ids] = proc_id;
  530. nr_unique_ids++;
  531. }
  532. static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
  533. u32 lvl,
  534. void *context,
  535. void **rv)
  536. {
  537. acpi_status status;
  538. acpi_object_type acpi_type;
  539. unsigned long long uid;
  540. union acpi_object object = { 0 };
  541. struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
  542. status = acpi_get_type(handle, &acpi_type);
  543. if (ACPI_FAILURE(status))
  544. return status;
  545. switch (acpi_type) {
  546. case ACPI_TYPE_PROCESSOR:
  547. status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
  548. if (ACPI_FAILURE(status))
  549. goto err;
  550. uid = object.processor.proc_id;
  551. break;
  552. case ACPI_TYPE_DEVICE:
  553. status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
  554. if (ACPI_FAILURE(status))
  555. goto err;
  556. break;
  557. default:
  558. goto err;
  559. }
  560. processor_validated_ids_update(uid);
  561. return AE_OK;
  562. err:
  563. /* Exit on error, but don't abort the namespace walk */
  564. acpi_handle_info(handle, "Invalid processor object\n");
  565. return AE_OK;
  566. }
  567. static void __init acpi_processor_check_duplicates(void)
  568. {
  569. /* check the correctness for all processors in ACPI namespace */
  570. acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
  571. ACPI_UINT32_MAX,
  572. acpi_processor_ids_walk,
  573. NULL, NULL, NULL);
  574. acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
  575. NULL, NULL);
  576. }
  577. bool acpi_duplicate_processor_id(int proc_id)
  578. {
  579. int i;
  580. /*
  581. * compare the proc_id with duplicate IDs, if the proc_id is already
  582. * in the duplicate IDs, return true, otherwise, return false.
  583. */
  584. for (i = 0; i < nr_duplicate_ids; i++) {
  585. if (duplicate_processor_ids[i] == proc_id)
  586. return true;
  587. }
  588. return false;
  589. }
  590. void __init acpi_processor_init(void)
  591. {
  592. acpi_processor_check_duplicates();
  593. acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
  594. acpi_scan_add_handler(&processor_container_handler);
  595. }
  596. #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
  597. /**
  598. * acpi_processor_claim_cst_control - Request _CST control from the platform.
  599. */
  600. bool acpi_processor_claim_cst_control(void)
  601. {
  602. static bool cst_control_claimed;
  603. acpi_status status;
  604. if (!acpi_gbl_FADT.cst_control || cst_control_claimed)
  605. return true;
  606. status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
  607. acpi_gbl_FADT.cst_control, 8);
  608. if (ACPI_FAILURE(status)) {
  609. pr_warn("ACPI: Failed to claim processor _CST control\n");
  610. return false;
  611. }
  612. cst_control_claimed = true;
  613. return true;
  614. }
  615. EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
  616. /**
  617. * acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
  618. * @handle: ACPI handle of the processor object containing the _CST.
  619. * @cpu: The numeric ID of the target CPU.
  620. * @info: Object write the C-states information into.
  621. *
  622. * Extract the C-state information for the given CPU from the output of the _CST
  623. * control method under the corresponding ACPI processor object (or processor
  624. * device object) and populate @info with it.
  625. *
  626. * If any ACPI_ADR_SPACE_FIXED_HARDWARE C-states are found, invoke
  627. * acpi_processor_ffh_cstate_probe() to verify them and update the
  628. * cpu_cstate_entry data for @cpu.
  629. */
  630. int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
  631. struct acpi_processor_power *info)
  632. {
  633. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  634. union acpi_object *cst;
  635. acpi_status status;
  636. u64 count;
  637. int last_index = 0;
  638. int i, ret = 0;
  639. status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
  640. if (ACPI_FAILURE(status)) {
  641. acpi_handle_debug(handle, "No _CST\n");
  642. return -ENODEV;
  643. }
  644. cst = buffer.pointer;
  645. /* There must be at least 2 elements. */
  646. if (!cst || cst->type != ACPI_TYPE_PACKAGE || cst->package.count < 2) {
  647. acpi_handle_warn(handle, "Invalid _CST output\n");
  648. ret = -EFAULT;
  649. goto end;
  650. }
  651. count = cst->package.elements[0].integer.value;
  652. /* Validate the number of C-states. */
  653. if (count < 1 || count != cst->package.count - 1) {
  654. acpi_handle_warn(handle, "Inconsistent _CST data\n");
  655. ret = -EFAULT;
  656. goto end;
  657. }
  658. for (i = 1; i <= count; i++) {
  659. union acpi_object *element;
  660. union acpi_object *obj;
  661. struct acpi_power_register *reg;
  662. struct acpi_processor_cx cx;
  663. /*
  664. * If there is not enough space for all C-states, skip the
  665. * excess ones and log a warning.
  666. */
  667. if (last_index >= ACPI_PROCESSOR_MAX_POWER - 1) {
  668. acpi_handle_warn(handle,
  669. "No room for more idle states (limit: %d)\n",
  670. ACPI_PROCESSOR_MAX_POWER - 1);
  671. break;
  672. }
  673. memset(&cx, 0, sizeof(cx));
  674. element = &cst->package.elements[i];
  675. if (element->type != ACPI_TYPE_PACKAGE) {
  676. acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n",
  677. i, element->type);
  678. continue;
  679. }
  680. if (element->package.count != 4) {
  681. acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n",
  682. i, element->package.count);
  683. continue;
  684. }
  685. obj = &element->package.elements[0];
  686. if (obj->type != ACPI_TYPE_BUFFER) {
  687. acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n",
  688. i, obj->type);
  689. continue;
  690. }
  691. reg = (struct acpi_power_register *)obj->buffer.pointer;
  692. obj = &element->package.elements[1];
  693. if (obj->type != ACPI_TYPE_INTEGER) {
  694. acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n",
  695. i, obj->type);
  696. continue;
  697. }
  698. cx.type = obj->integer.value;
  699. /*
  700. * There are known cases in which the _CST output does not
  701. * contain C1, so if the type of the first state found is not
  702. * C1, leave an empty slot for C1 to be filled in later.
  703. */
  704. if (i == 1 && cx.type != ACPI_STATE_C1)
  705. last_index = 1;
  706. cx.address = reg->address;
  707. cx.index = last_index + 1;
  708. if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
  709. if (!acpi_processor_ffh_cstate_probe(cpu, &cx, reg)) {
  710. /*
  711. * In the majority of cases _CST describes C1 as
  712. * a FIXED_HARDWARE C-state, but if the command
  713. * line forbids using MWAIT, use CSTATE_HALT for
  714. * C1 regardless.
  715. */
  716. if (cx.type == ACPI_STATE_C1 &&
  717. boot_option_idle_override == IDLE_NOMWAIT) {
  718. cx.entry_method = ACPI_CSTATE_HALT;
  719. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  720. } else {
  721. cx.entry_method = ACPI_CSTATE_FFH;
  722. }
  723. } else if (cx.type == ACPI_STATE_C1) {
  724. /*
  725. * In the special case of C1, FIXED_HARDWARE can
  726. * be handled by executing the HLT instruction.
  727. */
  728. cx.entry_method = ACPI_CSTATE_HALT;
  729. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  730. } else {
  731. acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n",
  732. i);
  733. continue;
  734. }
  735. } else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
  736. cx.entry_method = ACPI_CSTATE_SYSTEMIO;
  737. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
  738. cx.address);
  739. } else {
  740. acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n",
  741. i, reg->space_id);
  742. continue;
  743. }
  744. if (cx.type == ACPI_STATE_C1)
  745. cx.valid = 1;
  746. obj = &element->package.elements[2];
  747. if (obj->type != ACPI_TYPE_INTEGER) {
  748. acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n",
  749. i, obj->type);
  750. continue;
  751. }
  752. cx.latency = obj->integer.value;
  753. obj = &element->package.elements[3];
  754. if (obj->type != ACPI_TYPE_INTEGER) {
  755. acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n",
  756. i, obj->type);
  757. continue;
  758. }
  759. memcpy(&info->states[++last_index], &cx, sizeof(cx));
  760. }
  761. acpi_handle_info(handle, "Found %d idle states\n", last_index);
  762. info->count = last_index;
  763. end:
  764. kfree(buffer.pointer);
  765. return ret;
  766. }
  767. EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
  768. #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */