visorchipset.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2010 - 2015 UNISYS CORPORATION
  4. * All rights reserved.
  5. */
  6. #include <linux/acpi.h>
  7. #include <linux/crash_dump.h>
  8. #include <linux/visorbus.h>
  9. #include "visorbus_private.h"
  10. /* {72120008-4AAB-11DC-8530-444553544200} */
  11. #define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
  12. 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
  13. static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
  14. static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
  15. static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
  16. #define POLLJIFFIES_CONTROLVM_FAST 1
  17. #define POLLJIFFIES_CONTROLVM_SLOW 100
  18. #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
  19. #define UNISYS_VISOR_LEAF_ID 0x40000000
  20. /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
  21. #define UNISYS_VISOR_ID_EBX 0x73696e55
  22. #define UNISYS_VISOR_ID_ECX 0x70537379
  23. #define UNISYS_VISOR_ID_EDX 0x34367261
  24. /*
  25. * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
  26. * to slow polling mode. As soon as we get a controlvm message, we switch back
  27. * to fast polling mode.
  28. */
  29. #define MIN_IDLE_SECONDS 10
  30. struct parser_context {
  31. unsigned long allocbytes;
  32. unsigned long param_bytes;
  33. u8 *curr;
  34. unsigned long bytes_remaining;
  35. bool byte_stream;
  36. struct visor_controlvm_parameters_header data;
  37. };
  38. /* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
  39. #define VMCALL_CONTROLVM_ADDR 0x0501
  40. enum vmcall_result {
  41. VMCALL_RESULT_SUCCESS = 0,
  42. VMCALL_RESULT_INVALID_PARAM = 1,
  43. VMCALL_RESULT_DATA_UNAVAILABLE = 2,
  44. VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
  45. VMCALL_RESULT_DEVICE_ERROR = 4,
  46. VMCALL_RESULT_DEVICE_NOT_READY = 5
  47. };
  48. /*
  49. * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
  50. * parameters to VMCALL_CONTROLVM_ADDR
  51. * interface.
  52. * @address: The Guest-relative physical address of the ControlVm channel.
  53. * This VMCall fills this in with the appropriate address.
  54. * Contents provided by this VMCALL (OUT).
  55. * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
  56. * this in with the appropriate address. Contents provided by
  57. * this VMCALL (OUT).
  58. * @unused: Unused Bytes in the 64-Bit Aligned Struct.
  59. */
  60. struct vmcall_io_controlvm_addr_params {
  61. u64 address;
  62. u32 channel_bytes;
  63. u8 unused[4];
  64. } __packed;
  65. struct visorchipset_device {
  66. struct acpi_device *acpi_device;
  67. unsigned long poll_jiffies;
  68. /* when we got our last controlvm message */
  69. unsigned long most_recent_message_jiffies;
  70. struct delayed_work periodic_controlvm_work;
  71. struct visorchannel *controlvm_channel;
  72. unsigned long controlvm_payload_bytes_buffered;
  73. /*
  74. * The following variables are used to handle the scenario where we are
  75. * unable to offload the payload from a controlvm message due to memory
  76. * requirements. In this scenario, we simply stash the controlvm
  77. * message, then attempt to process it again the next time
  78. * controlvm_periodic_work() runs.
  79. */
  80. struct controlvm_message controlvm_pending_msg;
  81. bool controlvm_pending_msg_valid;
  82. struct vmcall_io_controlvm_addr_params controlvm_params;
  83. };
  84. static struct visorchipset_device *chipset_dev;
  85. struct parahotplug_request {
  86. struct list_head list;
  87. int id;
  88. unsigned long expiration;
  89. struct controlvm_message msg;
  90. };
  91. /* prototypes for attributes */
  92. static ssize_t toolaction_show(struct device *dev,
  93. struct device_attribute *attr,
  94. char *buf)
  95. {
  96. u8 tool_action = 0;
  97. int err;
  98. err = visorchannel_read(chipset_dev->controlvm_channel,
  99. offsetof(struct visor_controlvm_channel,
  100. tool_action),
  101. &tool_action, sizeof(u8));
  102. if (err)
  103. return err;
  104. return sprintf(buf, "%u\n", tool_action);
  105. }
  106. static ssize_t toolaction_store(struct device *dev,
  107. struct device_attribute *attr,
  108. const char *buf, size_t count)
  109. {
  110. u8 tool_action;
  111. int err;
  112. if (kstrtou8(buf, 10, &tool_action))
  113. return -EINVAL;
  114. err = visorchannel_write(chipset_dev->controlvm_channel,
  115. offsetof(struct visor_controlvm_channel,
  116. tool_action),
  117. &tool_action, sizeof(u8));
  118. if (err)
  119. return err;
  120. return count;
  121. }
  122. static DEVICE_ATTR_RW(toolaction);
  123. static ssize_t boottotool_show(struct device *dev,
  124. struct device_attribute *attr,
  125. char *buf)
  126. {
  127. struct efi_visor_indication efi_visor_indication;
  128. int err;
  129. err = visorchannel_read(chipset_dev->controlvm_channel,
  130. offsetof(struct visor_controlvm_channel,
  131. efi_visor_ind),
  132. &efi_visor_indication,
  133. sizeof(struct efi_visor_indication));
  134. if (err)
  135. return err;
  136. return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
  137. }
  138. static ssize_t boottotool_store(struct device *dev,
  139. struct device_attribute *attr,
  140. const char *buf, size_t count)
  141. {
  142. int val, err;
  143. struct efi_visor_indication efi_visor_indication;
  144. if (kstrtoint(buf, 10, &val))
  145. return -EINVAL;
  146. efi_visor_indication.boot_to_tool = val;
  147. err = visorchannel_write(chipset_dev->controlvm_channel,
  148. offsetof(struct visor_controlvm_channel,
  149. efi_visor_ind),
  150. &(efi_visor_indication),
  151. sizeof(struct efi_visor_indication));
  152. if (err)
  153. return err;
  154. return count;
  155. }
  156. static DEVICE_ATTR_RW(boottotool);
  157. static ssize_t error_show(struct device *dev, struct device_attribute *attr,
  158. char *buf)
  159. {
  160. u32 error = 0;
  161. int err;
  162. err = visorchannel_read(chipset_dev->controlvm_channel,
  163. offsetof(struct visor_controlvm_channel,
  164. installation_error),
  165. &error, sizeof(u32));
  166. if (err)
  167. return err;
  168. return sprintf(buf, "%u\n", error);
  169. }
  170. static ssize_t error_store(struct device *dev, struct device_attribute *attr,
  171. const char *buf, size_t count)
  172. {
  173. u32 error;
  174. int err;
  175. if (kstrtou32(buf, 10, &error))
  176. return -EINVAL;
  177. err = visorchannel_write(chipset_dev->controlvm_channel,
  178. offsetof(struct visor_controlvm_channel,
  179. installation_error),
  180. &error, sizeof(u32));
  181. if (err)
  182. return err;
  183. return count;
  184. }
  185. static DEVICE_ATTR_RW(error);
  186. static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
  187. char *buf)
  188. {
  189. u32 text_id = 0;
  190. int err;
  191. err = visorchannel_read(chipset_dev->controlvm_channel,
  192. offsetof(struct visor_controlvm_channel,
  193. installation_text_id),
  194. &text_id, sizeof(u32));
  195. if (err)
  196. return err;
  197. return sprintf(buf, "%u\n", text_id);
  198. }
  199. static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
  200. const char *buf, size_t count)
  201. {
  202. u32 text_id;
  203. int err;
  204. if (kstrtou32(buf, 10, &text_id))
  205. return -EINVAL;
  206. err = visorchannel_write(chipset_dev->controlvm_channel,
  207. offsetof(struct visor_controlvm_channel,
  208. installation_text_id),
  209. &text_id, sizeof(u32));
  210. if (err)
  211. return err;
  212. return count;
  213. }
  214. static DEVICE_ATTR_RW(textid);
  215. static ssize_t remaining_steps_show(struct device *dev,
  216. struct device_attribute *attr, char *buf)
  217. {
  218. u16 remaining_steps = 0;
  219. int err;
  220. err = visorchannel_read(chipset_dev->controlvm_channel,
  221. offsetof(struct visor_controlvm_channel,
  222. installation_remaining_steps),
  223. &remaining_steps, sizeof(u16));
  224. if (err)
  225. return err;
  226. return sprintf(buf, "%hu\n", remaining_steps);
  227. }
  228. static ssize_t remaining_steps_store(struct device *dev,
  229. struct device_attribute *attr,
  230. const char *buf, size_t count)
  231. {
  232. u16 remaining_steps;
  233. int err;
  234. if (kstrtou16(buf, 10, &remaining_steps))
  235. return -EINVAL;
  236. err = visorchannel_write(chipset_dev->controlvm_channel,
  237. offsetof(struct visor_controlvm_channel,
  238. installation_remaining_steps),
  239. &remaining_steps, sizeof(u16));
  240. if (err)
  241. return err;
  242. return count;
  243. }
  244. static DEVICE_ATTR_RW(remaining_steps);
  245. static void controlvm_init_response(struct controlvm_message *msg,
  246. struct controlvm_message_header *msg_hdr,
  247. int response)
  248. {
  249. memset(msg, 0, sizeof(struct controlvm_message));
  250. memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
  251. msg->hdr.payload_bytes = 0;
  252. msg->hdr.payload_vm_offset = 0;
  253. msg->hdr.payload_max_bytes = 0;
  254. if (response < 0) {
  255. msg->hdr.flags.failed = 1;
  256. msg->hdr.completion_status = (u32)(-response);
  257. }
  258. }
  259. static int controlvm_respond_chipset_init(
  260. struct controlvm_message_header *msg_hdr,
  261. int response,
  262. enum visor_chipset_feature features)
  263. {
  264. struct controlvm_message outmsg;
  265. controlvm_init_response(&outmsg, msg_hdr, response);
  266. outmsg.cmd.init_chipset.features = features;
  267. return visorchannel_signalinsert(chipset_dev->controlvm_channel,
  268. CONTROLVM_QUEUE_REQUEST, &outmsg);
  269. }
  270. static int chipset_init(struct controlvm_message *inmsg)
  271. {
  272. static int chipset_inited;
  273. enum visor_chipset_feature features = 0;
  274. int rc = CONTROLVM_RESP_SUCCESS;
  275. int res = 0;
  276. if (chipset_inited) {
  277. rc = -CONTROLVM_RESP_ALREADY_DONE;
  278. res = -EIO;
  279. goto out_respond;
  280. }
  281. chipset_inited = 1;
  282. /*
  283. * Set features to indicate we support parahotplug (if Command also
  284. * supports it). Set the "reply" bit so Command knows this is a
  285. * features-aware driver.
  286. */
  287. features = inmsg->cmd.init_chipset.features &
  288. VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
  289. features |= VISOR_CHIPSET_FEATURE_REPLY;
  290. out_respond:
  291. if (inmsg->hdr.flags.response_expected)
  292. res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
  293. return res;
  294. }
  295. static int controlvm_respond(struct controlvm_message_header *msg_hdr,
  296. int response, struct visor_segment_state *state)
  297. {
  298. struct controlvm_message outmsg;
  299. controlvm_init_response(&outmsg, msg_hdr, response);
  300. if (outmsg.hdr.flags.test_message == 1)
  301. return -EINVAL;
  302. if (state) {
  303. outmsg.cmd.device_change_state.state = *state;
  304. outmsg.cmd.device_change_state.flags.phys_device = 1;
  305. }
  306. return visorchannel_signalinsert(chipset_dev->controlvm_channel,
  307. CONTROLVM_QUEUE_REQUEST, &outmsg);
  308. }
  309. enum crash_obj_type {
  310. CRASH_DEV,
  311. CRASH_BUS,
  312. };
  313. static int save_crash_message(struct controlvm_message *msg,
  314. enum crash_obj_type cr_type)
  315. {
  316. u32 local_crash_msg_offset;
  317. u16 local_crash_msg_count;
  318. int err;
  319. err = visorchannel_read(chipset_dev->controlvm_channel,
  320. offsetof(struct visor_controlvm_channel,
  321. saved_crash_message_count),
  322. &local_crash_msg_count, sizeof(u16));
  323. if (err) {
  324. dev_err(&chipset_dev->acpi_device->dev,
  325. "failed to read message count\n");
  326. return err;
  327. }
  328. if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
  329. dev_err(&chipset_dev->acpi_device->dev,
  330. "invalid number of messages\n");
  331. return -EIO;
  332. }
  333. err = visorchannel_read(chipset_dev->controlvm_channel,
  334. offsetof(struct visor_controlvm_channel,
  335. saved_crash_message_offset),
  336. &local_crash_msg_offset, sizeof(u32));
  337. if (err) {
  338. dev_err(&chipset_dev->acpi_device->dev,
  339. "failed to read offset\n");
  340. return err;
  341. }
  342. switch (cr_type) {
  343. case CRASH_DEV:
  344. local_crash_msg_offset += sizeof(struct controlvm_message);
  345. err = visorchannel_write(chipset_dev->controlvm_channel,
  346. local_crash_msg_offset, msg,
  347. sizeof(struct controlvm_message));
  348. if (err) {
  349. dev_err(&chipset_dev->acpi_device->dev,
  350. "failed to write dev msg\n");
  351. return err;
  352. }
  353. break;
  354. case CRASH_BUS:
  355. err = visorchannel_write(chipset_dev->controlvm_channel,
  356. local_crash_msg_offset, msg,
  357. sizeof(struct controlvm_message));
  358. if (err) {
  359. dev_err(&chipset_dev->acpi_device->dev,
  360. "failed to write bus msg\n");
  361. return err;
  362. }
  363. break;
  364. default:
  365. dev_err(&chipset_dev->acpi_device->dev,
  366. "Invalid crash_obj_type\n");
  367. break;
  368. }
  369. return 0;
  370. }
  371. static int controlvm_responder(enum controlvm_id cmd_id,
  372. struct controlvm_message_header *pending_msg_hdr,
  373. int response)
  374. {
  375. if (pending_msg_hdr->id != (u32)cmd_id)
  376. return -EINVAL;
  377. return controlvm_respond(pending_msg_hdr, response, NULL);
  378. }
  379. static int device_changestate_responder(enum controlvm_id cmd_id,
  380. struct visor_device *p, int response,
  381. struct visor_segment_state state)
  382. {
  383. struct controlvm_message outmsg;
  384. if (p->pending_msg_hdr->id != cmd_id)
  385. return -EINVAL;
  386. controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
  387. outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
  388. outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
  389. outmsg.cmd.device_change_state.state = state;
  390. return visorchannel_signalinsert(chipset_dev->controlvm_channel,
  391. CONTROLVM_QUEUE_REQUEST, &outmsg);
  392. }
  393. static int visorbus_create(struct controlvm_message *inmsg)
  394. {
  395. struct controlvm_message_packet *cmd = &inmsg->cmd;
  396. struct controlvm_message_header *pmsg_hdr;
  397. u32 bus_no = cmd->create_bus.bus_no;
  398. struct visor_device *bus_info;
  399. struct visorchannel *visorchannel;
  400. int err;
  401. bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
  402. if (bus_info && bus_info->state.created == 1) {
  403. dev_err(&chipset_dev->acpi_device->dev,
  404. "failed %s: already exists\n", __func__);
  405. err = -EEXIST;
  406. goto err_respond;
  407. }
  408. bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
  409. if (!bus_info) {
  410. err = -ENOMEM;
  411. goto err_respond;
  412. }
  413. INIT_LIST_HEAD(&bus_info->list_all);
  414. bus_info->chipset_bus_no = bus_no;
  415. bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
  416. if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
  417. err = save_crash_message(inmsg, CRASH_BUS);
  418. if (err)
  419. goto err_free_bus_info;
  420. }
  421. if (inmsg->hdr.flags.response_expected == 1) {
  422. pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
  423. if (!pmsg_hdr) {
  424. err = -ENOMEM;
  425. goto err_free_bus_info;
  426. }
  427. memcpy(pmsg_hdr, &inmsg->hdr,
  428. sizeof(struct controlvm_message_header));
  429. bus_info->pending_msg_hdr = pmsg_hdr;
  430. }
  431. visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
  432. GFP_KERNEL,
  433. &cmd->create_bus.bus_data_type_guid,
  434. false);
  435. if (!visorchannel) {
  436. err = -ENOMEM;
  437. goto err_free_pending_msg;
  438. }
  439. bus_info->visorchannel = visorchannel;
  440. /* Response will be handled by visorbus_create_instance on success */
  441. err = visorbus_create_instance(bus_info);
  442. if (err)
  443. goto err_destroy_channel;
  444. return 0;
  445. err_destroy_channel:
  446. visorchannel_destroy(visorchannel);
  447. err_free_pending_msg:
  448. kfree(bus_info->pending_msg_hdr);
  449. err_free_bus_info:
  450. kfree(bus_info);
  451. err_respond:
  452. if (inmsg->hdr.flags.response_expected == 1)
  453. controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
  454. return err;
  455. }
  456. static int visorbus_destroy(struct controlvm_message *inmsg)
  457. {
  458. struct controlvm_message_header *pmsg_hdr;
  459. u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
  460. struct visor_device *bus_info;
  461. int err;
  462. bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
  463. if (!bus_info) {
  464. err = -ENODEV;
  465. goto err_respond;
  466. }
  467. if (bus_info->state.created == 0) {
  468. err = -ENOENT;
  469. goto err_respond;
  470. }
  471. if (bus_info->pending_msg_hdr) {
  472. /* only non-NULL if dev is still waiting on a response */
  473. err = -EEXIST;
  474. goto err_respond;
  475. }
  476. if (inmsg->hdr.flags.response_expected == 1) {
  477. pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
  478. if (!pmsg_hdr) {
  479. err = -ENOMEM;
  480. goto err_respond;
  481. }
  482. memcpy(pmsg_hdr, &inmsg->hdr,
  483. sizeof(struct controlvm_message_header));
  484. bus_info->pending_msg_hdr = pmsg_hdr;
  485. }
  486. /* Response will be handled by visorbus_remove_instance */
  487. visorbus_remove_instance(bus_info);
  488. return 0;
  489. err_respond:
  490. if (inmsg->hdr.flags.response_expected == 1)
  491. controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
  492. return err;
  493. }
  494. static const guid_t *parser_id_get(struct parser_context *ctx)
  495. {
  496. return &ctx->data.id;
  497. }
  498. static void *parser_string_get(u8 *pscan, int nscan)
  499. {
  500. int value_length;
  501. void *value;
  502. if (nscan == 0)
  503. return NULL;
  504. value_length = strnlen(pscan, nscan);
  505. value = kzalloc(value_length + 1, GFP_KERNEL);
  506. if (!value)
  507. return NULL;
  508. if (value_length > 0)
  509. memcpy(value, pscan, value_length);
  510. return value;
  511. }
  512. static void *parser_name_get(struct parser_context *ctx)
  513. {
  514. struct visor_controlvm_parameters_header *phdr;
  515. phdr = &ctx->data;
  516. if ((unsigned long)phdr->name_offset +
  517. (unsigned long)phdr->name_length > ctx->param_bytes)
  518. return NULL;
  519. ctx->curr = (char *)&phdr + phdr->name_offset;
  520. ctx->bytes_remaining = phdr->name_length;
  521. return parser_string_get(ctx->curr, phdr->name_length);
  522. }
  523. static int visorbus_configure(struct controlvm_message *inmsg,
  524. struct parser_context *parser_ctx)
  525. {
  526. struct controlvm_message_packet *cmd = &inmsg->cmd;
  527. u32 bus_no;
  528. struct visor_device *bus_info;
  529. int err = 0;
  530. bus_no = cmd->configure_bus.bus_no;
  531. bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
  532. if (!bus_info) {
  533. err = -EINVAL;
  534. goto err_respond;
  535. }
  536. if (bus_info->state.created == 0) {
  537. err = -EINVAL;
  538. goto err_respond;
  539. }
  540. if (bus_info->pending_msg_hdr) {
  541. err = -EIO;
  542. goto err_respond;
  543. }
  544. err = visorchannel_set_clientpartition(bus_info->visorchannel,
  545. cmd->configure_bus.guest_handle);
  546. if (err)
  547. goto err_respond;
  548. if (parser_ctx) {
  549. const guid_t *partition_guid = parser_id_get(parser_ctx);
  550. guid_copy(&bus_info->partition_guid, partition_guid);
  551. bus_info->name = parser_name_get(parser_ctx);
  552. }
  553. if (inmsg->hdr.flags.response_expected == 1)
  554. controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
  555. return 0;
  556. err_respond:
  557. dev_err(&chipset_dev->acpi_device->dev,
  558. "%s exited with err: %d\n", __func__, err);
  559. if (inmsg->hdr.flags.response_expected == 1)
  560. controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
  561. return err;
  562. }
  563. static int visorbus_device_create(struct controlvm_message *inmsg)
  564. {
  565. struct controlvm_message_packet *cmd = &inmsg->cmd;
  566. struct controlvm_message_header *pmsg_hdr;
  567. u32 bus_no = cmd->create_device.bus_no;
  568. u32 dev_no = cmd->create_device.dev_no;
  569. struct visor_device *dev_info;
  570. struct visor_device *bus_info;
  571. struct visorchannel *visorchannel;
  572. int err;
  573. bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
  574. if (!bus_info) {
  575. dev_err(&chipset_dev->acpi_device->dev,
  576. "failed to get bus by id: %d\n", bus_no);
  577. err = -ENODEV;
  578. goto err_respond;
  579. }
  580. if (bus_info->state.created == 0) {
  581. dev_err(&chipset_dev->acpi_device->dev,
  582. "bus not created, id: %d\n", bus_no);
  583. err = -EINVAL;
  584. goto err_respond;
  585. }
  586. dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
  587. if (dev_info && dev_info->state.created == 1) {
  588. dev_err(&chipset_dev->acpi_device->dev,
  589. "failed to get bus by id: %d/%d\n", bus_no, dev_no);
  590. err = -EEXIST;
  591. goto err_respond;
  592. }
  593. dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
  594. if (!dev_info) {
  595. err = -ENOMEM;
  596. goto err_respond;
  597. }
  598. dev_info->chipset_bus_no = bus_no;
  599. dev_info->chipset_dev_no = dev_no;
  600. guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
  601. dev_info->device.parent = &bus_info->device;
  602. visorchannel = visorchannel_create(cmd->create_device.channel_addr,
  603. GFP_KERNEL,
  604. &cmd->create_device.data_type_guid,
  605. true);
  606. if (!visorchannel) {
  607. dev_err(&chipset_dev->acpi_device->dev,
  608. "failed to create visorchannel: %d/%d\n",
  609. bus_no, dev_no);
  610. err = -ENOMEM;
  611. goto err_free_dev_info;
  612. }
  613. dev_info->visorchannel = visorchannel;
  614. guid_copy(&dev_info->channel_type_guid,
  615. &cmd->create_device.data_type_guid);
  616. if (guid_equal(&cmd->create_device.data_type_guid,
  617. &visor_vhba_channel_guid)) {
  618. err = save_crash_message(inmsg, CRASH_DEV);
  619. if (err)
  620. goto err_destroy_visorchannel;
  621. }
  622. if (inmsg->hdr.flags.response_expected == 1) {
  623. pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
  624. if (!pmsg_hdr) {
  625. err = -ENOMEM;
  626. goto err_destroy_visorchannel;
  627. }
  628. memcpy(pmsg_hdr, &inmsg->hdr,
  629. sizeof(struct controlvm_message_header));
  630. dev_info->pending_msg_hdr = pmsg_hdr;
  631. }
  632. /* create_visor_device will send response */
  633. err = create_visor_device(dev_info);
  634. if (err)
  635. goto err_destroy_visorchannel;
  636. return 0;
  637. err_destroy_visorchannel:
  638. visorchannel_destroy(visorchannel);
  639. err_free_dev_info:
  640. kfree(dev_info);
  641. err_respond:
  642. if (inmsg->hdr.flags.response_expected == 1)
  643. controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
  644. return err;
  645. }
  646. static int visorbus_device_changestate(struct controlvm_message *inmsg)
  647. {
  648. struct controlvm_message_packet *cmd = &inmsg->cmd;
  649. struct controlvm_message_header *pmsg_hdr;
  650. u32 bus_no = cmd->device_change_state.bus_no;
  651. u32 dev_no = cmd->device_change_state.dev_no;
  652. struct visor_segment_state state = cmd->device_change_state.state;
  653. struct visor_device *dev_info;
  654. int err = 0;
  655. dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
  656. if (!dev_info) {
  657. err = -ENODEV;
  658. goto err_respond;
  659. }
  660. if (dev_info->state.created == 0) {
  661. err = -EINVAL;
  662. goto err_respond;
  663. }
  664. if (dev_info->pending_msg_hdr) {
  665. /* only non-NULL if dev is still waiting on a response */
  666. err = -EIO;
  667. goto err_respond;
  668. }
  669. if (inmsg->hdr.flags.response_expected == 1) {
  670. pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
  671. if (!pmsg_hdr) {
  672. err = -ENOMEM;
  673. goto err_respond;
  674. }
  675. memcpy(pmsg_hdr, &inmsg->hdr,
  676. sizeof(struct controlvm_message_header));
  677. dev_info->pending_msg_hdr = pmsg_hdr;
  678. }
  679. if (state.alive == segment_state_running.alive &&
  680. state.operating == segment_state_running.operating)
  681. /* Response will be sent from visorchipset_device_resume */
  682. err = visorchipset_device_resume(dev_info);
  683. /* ServerNotReady / ServerLost / SegmentStateStandby */
  684. else if (state.alive == segment_state_standby.alive &&
  685. state.operating == segment_state_standby.operating)
  686. /*
  687. * technically this is standby case where server is lost.
  688. * Response will be sent from visorchipset_device_pause.
  689. */
  690. err = visorchipset_device_pause(dev_info);
  691. if (err)
  692. goto err_respond;
  693. return 0;
  694. err_respond:
  695. dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
  696. if (inmsg->hdr.flags.response_expected == 1)
  697. controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
  698. return err;
  699. }
  700. static int visorbus_device_destroy(struct controlvm_message *inmsg)
  701. {
  702. struct controlvm_message_packet *cmd = &inmsg->cmd;
  703. struct controlvm_message_header *pmsg_hdr;
  704. u32 bus_no = cmd->destroy_device.bus_no;
  705. u32 dev_no = cmd->destroy_device.dev_no;
  706. struct visor_device *dev_info;
  707. int err;
  708. dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
  709. if (!dev_info) {
  710. err = -ENODEV;
  711. goto err_respond;
  712. }
  713. if (dev_info->state.created == 0) {
  714. err = -EINVAL;
  715. goto err_respond;
  716. }
  717. if (dev_info->pending_msg_hdr) {
  718. /* only non-NULL if dev is still waiting on a response */
  719. err = -EIO;
  720. goto err_respond;
  721. }
  722. if (inmsg->hdr.flags.response_expected == 1) {
  723. pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
  724. if (!pmsg_hdr) {
  725. err = -ENOMEM;
  726. goto err_respond;
  727. }
  728. memcpy(pmsg_hdr, &inmsg->hdr,
  729. sizeof(struct controlvm_message_header));
  730. dev_info->pending_msg_hdr = pmsg_hdr;
  731. }
  732. kfree(dev_info->name);
  733. remove_visor_device(dev_info);
  734. return 0;
  735. err_respond:
  736. if (inmsg->hdr.flags.response_expected == 1)
  737. controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
  738. return err;
  739. }
  740. /*
  741. * The general parahotplug flow works as follows. The visorchipset receives
  742. * a DEVICE_CHANGESTATE message from Command specifying a physical device
  743. * to enable or disable. The CONTROLVM message handler calls
  744. * parahotplug_process_message, which then adds the message to a global list
  745. * and kicks off a udev event which causes a user level script to enable or
  746. * disable the specified device. The udev script then writes to
  747. * /sys/devices/platform/visorchipset/parahotplug, which causes the
  748. * parahotplug store functions to get called, at which point the
  749. * appropriate CONTROLVM message is retrieved from the list and responded to.
  750. */
  751. #define PARAHOTPLUG_TIMEOUT_MS 2000
  752. /*
  753. * parahotplug_next_id() - generate unique int to match an outstanding
  754. * CONTROLVM message with a udev script /sys
  755. * response
  756. *
  757. * Return: a unique integer value
  758. */
  759. static int parahotplug_next_id(void)
  760. {
  761. static atomic_t id = ATOMIC_INIT(0);
  762. return atomic_inc_return(&id);
  763. }
  764. /*
  765. * parahotplug_next_expiration() - returns the time (in jiffies) when a
  766. * CONTROLVM message on the list should expire
  767. * -- PARAHOTPLUG_TIMEOUT_MS in the future
  768. *
  769. * Return: expected expiration time (in jiffies)
  770. */
  771. static unsigned long parahotplug_next_expiration(void)
  772. {
  773. return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
  774. }
  775. /*
  776. * parahotplug_request_create() - create a parahotplug_request, which is
  777. * basically a wrapper for a CONTROLVM_MESSAGE
  778. * that we can stick on a list
  779. * @msg: the message to insert in the request
  780. *
  781. * Return: the request containing the provided message
  782. */
  783. static struct parahotplug_request *parahotplug_request_create(
  784. struct controlvm_message *msg)
  785. {
  786. struct parahotplug_request *req;
  787. req = kmalloc(sizeof(*req), GFP_KERNEL);
  788. if (!req)
  789. return NULL;
  790. req->id = parahotplug_next_id();
  791. req->expiration = parahotplug_next_expiration();
  792. req->msg = *msg;
  793. return req;
  794. }
  795. /*
  796. * parahotplug_request_destroy() - free a parahotplug_request
  797. * @req: the request to deallocate
  798. */
  799. static void parahotplug_request_destroy(struct parahotplug_request *req)
  800. {
  801. kfree(req);
  802. }
  803. static LIST_HEAD(parahotplug_request_list);
  804. /* lock for above */
  805. static DEFINE_SPINLOCK(parahotplug_request_list_lock);
  806. /*
  807. * parahotplug_request_complete() - mark request as complete
  808. * @id: the id of the request
  809. * @active: indicates whether the request is assigned to active partition
  810. *
  811. * Called from the /sys handler, which means the user script has
  812. * finished the enable/disable. Find the matching identifier, and
  813. * respond to the CONTROLVM message with success.
  814. *
  815. * Return: 0 on success or -EINVAL on failure
  816. */
  817. static int parahotplug_request_complete(int id, u16 active)
  818. {
  819. struct list_head *pos;
  820. struct list_head *tmp;
  821. struct parahotplug_request *req;
  822. spin_lock(&parahotplug_request_list_lock);
  823. /* Look for a request matching "id". */
  824. list_for_each_safe(pos, tmp, &parahotplug_request_list) {
  825. req = list_entry(pos, struct parahotplug_request, list);
  826. if (req->id == id) {
  827. /*
  828. * Found a match. Remove it from the list and
  829. * respond.
  830. */
  831. list_del(pos);
  832. spin_unlock(&parahotplug_request_list_lock);
  833. req->msg.cmd.device_change_state.state.active = active;
  834. if (req->msg.hdr.flags.response_expected)
  835. controlvm_respond(
  836. &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
  837. &req->msg.cmd.device_change_state.state);
  838. parahotplug_request_destroy(req);
  839. return 0;
  840. }
  841. }
  842. spin_unlock(&parahotplug_request_list_lock);
  843. return -EINVAL;
  844. }
  845. /*
  846. * devicedisabled_store() - disables the hotplug device
  847. * @dev: sysfs interface variable not utilized in this function
  848. * @attr: sysfs interface variable not utilized in this function
  849. * @buf: buffer containing the device id
  850. * @count: the size of the buffer
  851. *
  852. * The parahotplug/devicedisabled interface gets called by our support script
  853. * when an SR-IOV device has been shut down. The ID is passed to the script
  854. * and then passed back when the device has been removed.
  855. *
  856. * Return: the size of the buffer for success or negative for error
  857. */
  858. static ssize_t devicedisabled_store(struct device *dev,
  859. struct device_attribute *attr,
  860. const char *buf, size_t count)
  861. {
  862. unsigned int id;
  863. int err;
  864. if (kstrtouint(buf, 10, &id))
  865. return -EINVAL;
  866. err = parahotplug_request_complete(id, 0);
  867. if (err < 0)
  868. return err;
  869. return count;
  870. }
  871. static DEVICE_ATTR_WO(devicedisabled);
  872. /*
  873. * deviceenabled_store() - enables the hotplug device
  874. * @dev: sysfs interface variable not utilized in this function
  875. * @attr: sysfs interface variable not utilized in this function
  876. * @buf: buffer containing the device id
  877. * @count: the size of the buffer
  878. *
  879. * The parahotplug/deviceenabled interface gets called by our support script
  880. * when an SR-IOV device has been recovered. The ID is passed to the script
  881. * and then passed back when the device has been brought back up.
  882. *
  883. * Return: the size of the buffer for success or negative for error
  884. */
  885. static ssize_t deviceenabled_store(struct device *dev,
  886. struct device_attribute *attr,
  887. const char *buf, size_t count)
  888. {
  889. unsigned int id;
  890. if (kstrtouint(buf, 10, &id))
  891. return -EINVAL;
  892. parahotplug_request_complete(id, 1);
  893. return count;
  894. }
  895. static DEVICE_ATTR_WO(deviceenabled);
  896. static struct attribute *visorchipset_install_attrs[] = {
  897. &dev_attr_toolaction.attr,
  898. &dev_attr_boottotool.attr,
  899. &dev_attr_error.attr,
  900. &dev_attr_textid.attr,
  901. &dev_attr_remaining_steps.attr,
  902. NULL
  903. };
  904. static const struct attribute_group visorchipset_install_group = {
  905. .name = "install",
  906. .attrs = visorchipset_install_attrs
  907. };
  908. static struct attribute *visorchipset_parahotplug_attrs[] = {
  909. &dev_attr_devicedisabled.attr,
  910. &dev_attr_deviceenabled.attr,
  911. NULL
  912. };
  913. static const struct attribute_group visorchipset_parahotplug_group = {
  914. .name = "parahotplug",
  915. .attrs = visorchipset_parahotplug_attrs
  916. };
  917. static const struct attribute_group *visorchipset_dev_groups[] = {
  918. &visorchipset_install_group,
  919. &visorchipset_parahotplug_group,
  920. NULL
  921. };
  922. /*
  923. * parahotplug_request_kickoff() - initiate parahotplug request
  924. * @req: the request to initiate
  925. *
  926. * Cause uevent to run the user level script to do the disable/enable specified
  927. * in the parahotplug_request.
  928. */
  929. static int parahotplug_request_kickoff(struct parahotplug_request *req)
  930. {
  931. struct controlvm_message_packet *cmd = &req->msg.cmd;
  932. char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
  933. env_func[40];
  934. char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
  935. env_func, NULL
  936. };
  937. sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
  938. sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
  939. sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
  940. cmd->device_change_state.state.active);
  941. sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
  942. cmd->device_change_state.bus_no);
  943. sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
  944. cmd->device_change_state.dev_no >> 3);
  945. sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
  946. cmd->device_change_state.dev_no & 0x7);
  947. return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
  948. KOBJ_CHANGE, envp);
  949. }
  950. /*
  951. * parahotplug_process_message() - enables or disables a PCI device by kicking
  952. * off a udev script
  953. * @inmsg: the message indicating whether to enable or disable
  954. */
  955. static int parahotplug_process_message(struct controlvm_message *inmsg)
  956. {
  957. struct parahotplug_request *req;
  958. int err;
  959. req = parahotplug_request_create(inmsg);
  960. if (!req)
  961. return -ENOMEM;
  962. /*
  963. * For enable messages, just respond with success right away, we don't
  964. * need to wait to see if the enable was successful.
  965. */
  966. if (inmsg->cmd.device_change_state.state.active) {
  967. err = parahotplug_request_kickoff(req);
  968. if (err)
  969. goto err_respond;
  970. controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
  971. &inmsg->cmd.device_change_state.state);
  972. parahotplug_request_destroy(req);
  973. return 0;
  974. }
  975. /*
  976. * For disable messages, add the request to the request list before
  977. * kicking off the udev script. It won't get responded to until the
  978. * script has indicated it's done.
  979. */
  980. spin_lock(&parahotplug_request_list_lock);
  981. list_add_tail(&req->list, &parahotplug_request_list);
  982. spin_unlock(&parahotplug_request_list_lock);
  983. err = parahotplug_request_kickoff(req);
  984. if (err)
  985. goto err_respond;
  986. return 0;
  987. err_respond:
  988. controlvm_respond(&inmsg->hdr, err,
  989. &inmsg->cmd.device_change_state.state);
  990. return err;
  991. }
  992. /*
  993. * chipset_ready_uevent() - sends chipset_ready action
  994. *
  995. * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
  996. *
  997. * Return: 0 on success, negative on failure
  998. */
  999. static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
  1000. {
  1001. int res;
  1002. res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
  1003. if (msg_hdr->flags.response_expected)
  1004. controlvm_respond(msg_hdr, res, NULL);
  1005. return res;
  1006. }
  1007. /*
  1008. * chipset_selftest_uevent() - sends chipset_selftest action
  1009. *
  1010. * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
  1011. *
  1012. * Return: 0 on success, negative on failure
  1013. */
  1014. static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
  1015. {
  1016. char env_selftest[20];
  1017. char *envp[] = { env_selftest, NULL };
  1018. int res;
  1019. sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
  1020. res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
  1021. KOBJ_CHANGE, envp);
  1022. if (msg_hdr->flags.response_expected)
  1023. controlvm_respond(msg_hdr, res, NULL);
  1024. return res;
  1025. }
  1026. /*
  1027. * chipset_notready_uevent() - sends chipset_notready action
  1028. *
  1029. * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
  1030. *
  1031. * Return: 0 on success, negative on failure
  1032. */
  1033. static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
  1034. {
  1035. int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
  1036. KOBJ_OFFLINE);
  1037. if (msg_hdr->flags.response_expected)
  1038. controlvm_respond(msg_hdr, res, NULL);
  1039. return res;
  1040. }
  1041. static int unisys_vmcall(unsigned long tuple, unsigned long param)
  1042. {
  1043. int result = 0;
  1044. unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
  1045. unsigned long reg_ebx;
  1046. unsigned long reg_ecx;
  1047. reg_ebx = param & 0xFFFFFFFF;
  1048. reg_ecx = param >> 32;
  1049. cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
  1050. if (!(cpuid_ecx & 0x80000000))
  1051. return -EPERM;
  1052. __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
  1053. "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
  1054. if (result)
  1055. goto error;
  1056. return 0;
  1057. /* Need to convert from VMCALL error codes to Linux */
  1058. error:
  1059. switch (result) {
  1060. case VMCALL_RESULT_INVALID_PARAM:
  1061. return -EINVAL;
  1062. case VMCALL_RESULT_DATA_UNAVAILABLE:
  1063. return -ENODEV;
  1064. default:
  1065. return -EFAULT;
  1066. }
  1067. }
  1068. static int controlvm_channel_create(struct visorchipset_device *dev)
  1069. {
  1070. struct visorchannel *chan;
  1071. u64 addr;
  1072. int err;
  1073. err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
  1074. virt_to_phys(&dev->controlvm_params));
  1075. if (err)
  1076. return err;
  1077. addr = dev->controlvm_params.address;
  1078. chan = visorchannel_create(addr, GFP_KERNEL,
  1079. &visor_controlvm_channel_guid, true);
  1080. if (!chan)
  1081. return -ENOMEM;
  1082. dev->controlvm_channel = chan;
  1083. return 0;
  1084. }
  1085. static void setup_crash_devices_work_queue(struct work_struct *work)
  1086. {
  1087. struct controlvm_message local_crash_bus_msg;
  1088. struct controlvm_message local_crash_dev_msg;
  1089. struct controlvm_message msg = {
  1090. .hdr.id = CONTROLVM_CHIPSET_INIT,
  1091. .cmd.init_chipset = {
  1092. .bus_count = 23,
  1093. .switch_count = 0,
  1094. },
  1095. };
  1096. u32 local_crash_msg_offset;
  1097. u16 local_crash_msg_count;
  1098. /* send init chipset msg */
  1099. chipset_init(&msg);
  1100. /* get saved message count */
  1101. if (visorchannel_read(chipset_dev->controlvm_channel,
  1102. offsetof(struct visor_controlvm_channel,
  1103. saved_crash_message_count),
  1104. &local_crash_msg_count, sizeof(u16)) < 0) {
  1105. dev_err(&chipset_dev->acpi_device->dev,
  1106. "failed to read channel\n");
  1107. return;
  1108. }
  1109. if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
  1110. dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
  1111. return;
  1112. }
  1113. /* get saved crash message offset */
  1114. if (visorchannel_read(chipset_dev->controlvm_channel,
  1115. offsetof(struct visor_controlvm_channel,
  1116. saved_crash_message_offset),
  1117. &local_crash_msg_offset, sizeof(u32)) < 0) {
  1118. dev_err(&chipset_dev->acpi_device->dev,
  1119. "failed to read channel\n");
  1120. return;
  1121. }
  1122. /* read create device message for storage bus offset */
  1123. if (visorchannel_read(chipset_dev->controlvm_channel,
  1124. local_crash_msg_offset,
  1125. &local_crash_bus_msg,
  1126. sizeof(struct controlvm_message)) < 0) {
  1127. dev_err(&chipset_dev->acpi_device->dev,
  1128. "failed to read channel\n");
  1129. return;
  1130. }
  1131. /* read create device message for storage device */
  1132. if (visorchannel_read(chipset_dev->controlvm_channel,
  1133. local_crash_msg_offset +
  1134. sizeof(struct controlvm_message),
  1135. &local_crash_dev_msg,
  1136. sizeof(struct controlvm_message)) < 0) {
  1137. dev_err(&chipset_dev->acpi_device->dev,
  1138. "failed to read channel\n");
  1139. return;
  1140. }
  1141. /* reuse IOVM create bus message */
  1142. if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
  1143. dev_err(&chipset_dev->acpi_device->dev,
  1144. "no valid create_bus message\n");
  1145. return;
  1146. }
  1147. visorbus_create(&local_crash_bus_msg);
  1148. /* reuse create device message for storage device */
  1149. if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
  1150. dev_err(&chipset_dev->acpi_device->dev,
  1151. "no valid create_device message\n");
  1152. return;
  1153. }
  1154. visorbus_device_create(&local_crash_dev_msg);
  1155. }
  1156. void visorbus_response(struct visor_device *bus_info, int response,
  1157. int controlvm_id)
  1158. {
  1159. if (!bus_info->pending_msg_hdr)
  1160. return;
  1161. controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
  1162. kfree(bus_info->pending_msg_hdr);
  1163. bus_info->pending_msg_hdr = NULL;
  1164. }
  1165. void visorbus_device_changestate_response(struct visor_device *dev_info,
  1166. int response,
  1167. struct visor_segment_state state)
  1168. {
  1169. if (!dev_info->pending_msg_hdr)
  1170. return;
  1171. device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
  1172. response, state);
  1173. kfree(dev_info->pending_msg_hdr);
  1174. dev_info->pending_msg_hdr = NULL;
  1175. }
  1176. static void parser_done(struct parser_context *ctx)
  1177. {
  1178. chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
  1179. kfree(ctx);
  1180. }
  1181. static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
  1182. bool *retry)
  1183. {
  1184. unsigned long allocbytes;
  1185. struct parser_context *ctx;
  1186. void *mapping;
  1187. *retry = false;
  1188. /* alloc an extra byte to ensure payload is \0 terminated */
  1189. allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) -
  1190. sizeof(struct visor_controlvm_parameters_header));
  1191. if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
  1192. MAX_CONTROLVM_PAYLOAD_BYTES) {
  1193. *retry = true;
  1194. return NULL;
  1195. }
  1196. ctx = kzalloc(allocbytes, GFP_KERNEL);
  1197. if (!ctx) {
  1198. *retry = true;
  1199. return NULL;
  1200. }
  1201. ctx->allocbytes = allocbytes;
  1202. ctx->param_bytes = bytes;
  1203. mapping = memremap(addr, bytes, MEMREMAP_WB);
  1204. if (!mapping)
  1205. goto err_finish_ctx;
  1206. memcpy(&ctx->data, mapping, bytes);
  1207. memunmap(mapping);
  1208. ctx->byte_stream = true;
  1209. chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
  1210. return ctx;
  1211. err_finish_ctx:
  1212. kfree(ctx);
  1213. return NULL;
  1214. }
  1215. /*
  1216. * handle_command() - process a controlvm message
  1217. * @inmsg: the message to process
  1218. * @channel_addr: address of the controlvm channel
  1219. *
  1220. * Return:
  1221. * 0 - Successfully processed the message
  1222. * -EAGAIN - ControlVM message was not processed and should be retried
  1223. * reading the next controlvm message; a scenario where this can
  1224. * occur is when we need to throttle the allocation of memory in
  1225. * which to copy out controlvm payload data.
  1226. * < 0 - error: ControlVM message was processed but an error occurred.
  1227. */
  1228. static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
  1229. {
  1230. struct controlvm_message_packet *cmd = &inmsg.cmd;
  1231. u64 parm_addr;
  1232. u32 parm_bytes;
  1233. struct parser_context *parser_ctx = NULL;
  1234. struct controlvm_message ackmsg;
  1235. int err = 0;
  1236. /* create parsing context if necessary */
  1237. parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
  1238. parm_bytes = inmsg.hdr.payload_bytes;
  1239. /*
  1240. * Parameter and channel addresses within test messages actually lie
  1241. * within our OS-controlled memory. We need to know that, because it
  1242. * makes a difference in how we compute the virtual address.
  1243. */
  1244. if (parm_bytes) {
  1245. bool retry;
  1246. parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
  1247. if (!parser_ctx && retry)
  1248. return -EAGAIN;
  1249. }
  1250. controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
  1251. err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
  1252. CONTROLVM_QUEUE_ACK, &ackmsg);
  1253. if (err)
  1254. return err;
  1255. switch (inmsg.hdr.id) {
  1256. case CONTROLVM_CHIPSET_INIT:
  1257. err = chipset_init(&inmsg);
  1258. break;
  1259. case CONTROLVM_BUS_CREATE:
  1260. err = visorbus_create(&inmsg);
  1261. break;
  1262. case CONTROLVM_BUS_DESTROY:
  1263. err = visorbus_destroy(&inmsg);
  1264. break;
  1265. case CONTROLVM_BUS_CONFIGURE:
  1266. err = visorbus_configure(&inmsg, parser_ctx);
  1267. break;
  1268. case CONTROLVM_DEVICE_CREATE:
  1269. err = visorbus_device_create(&inmsg);
  1270. break;
  1271. case CONTROLVM_DEVICE_CHANGESTATE:
  1272. if (cmd->device_change_state.flags.phys_device) {
  1273. err = parahotplug_process_message(&inmsg);
  1274. } else {
  1275. /*
  1276. * save the hdr and cmd structures for later use when
  1277. * sending back the response to Command
  1278. */
  1279. err = visorbus_device_changestate(&inmsg);
  1280. break;
  1281. }
  1282. break;
  1283. case CONTROLVM_DEVICE_DESTROY:
  1284. err = visorbus_device_destroy(&inmsg);
  1285. break;
  1286. case CONTROLVM_DEVICE_CONFIGURE:
  1287. /* no op just send a respond that we passed */
  1288. if (inmsg.hdr.flags.response_expected)
  1289. controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
  1290. NULL);
  1291. break;
  1292. case CONTROLVM_CHIPSET_READY:
  1293. err = chipset_ready_uevent(&inmsg.hdr);
  1294. break;
  1295. case CONTROLVM_CHIPSET_SELFTEST:
  1296. err = chipset_selftest_uevent(&inmsg.hdr);
  1297. break;
  1298. case CONTROLVM_CHIPSET_STOP:
  1299. err = chipset_notready_uevent(&inmsg.hdr);
  1300. break;
  1301. default:
  1302. err = -ENOMSG;
  1303. if (inmsg.hdr.flags.response_expected)
  1304. controlvm_respond(&inmsg.hdr,
  1305. -CONTROLVM_RESP_ID_UNKNOWN, NULL);
  1306. break;
  1307. }
  1308. if (parser_ctx) {
  1309. parser_done(parser_ctx);
  1310. parser_ctx = NULL;
  1311. }
  1312. return err;
  1313. }
  1314. /*
  1315. * read_controlvm_event() - retreives the next message from the
  1316. * CONTROLVM_QUEUE_EVENT queue in the controlvm
  1317. * channel
  1318. * @msg: pointer to the retrieved message
  1319. *
  1320. * Return: 0 if valid message was retrieved or -error
  1321. */
  1322. static int read_controlvm_event(struct controlvm_message *msg)
  1323. {
  1324. int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
  1325. CONTROLVM_QUEUE_EVENT, msg);
  1326. if (err)
  1327. return err;
  1328. /* got a message */
  1329. if (msg->hdr.flags.test_message == 1)
  1330. return -EINVAL;
  1331. return 0;
  1332. }
  1333. /*
  1334. * parahotplug_process_list() - remove any request from the list that's been on
  1335. * there too long and respond with an error
  1336. */
  1337. static void parahotplug_process_list(void)
  1338. {
  1339. struct list_head *pos;
  1340. struct list_head *tmp;
  1341. spin_lock(&parahotplug_request_list_lock);
  1342. list_for_each_safe(pos, tmp, &parahotplug_request_list) {
  1343. struct parahotplug_request *req =
  1344. list_entry(pos, struct parahotplug_request, list);
  1345. if (!time_after_eq(jiffies, req->expiration))
  1346. continue;
  1347. list_del(pos);
  1348. if (req->msg.hdr.flags.response_expected)
  1349. controlvm_respond(
  1350. &req->msg.hdr,
  1351. CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
  1352. &req->msg.cmd.device_change_state.state);
  1353. parahotplug_request_destroy(req);
  1354. }
  1355. spin_unlock(&parahotplug_request_list_lock);
  1356. }
  1357. static void controlvm_periodic_work(struct work_struct *work)
  1358. {
  1359. struct controlvm_message inmsg;
  1360. int count = 0;
  1361. int err;
  1362. /* Drain the RESPONSE queue make it empty */
  1363. do {
  1364. err = visorchannel_signalremove(chipset_dev->controlvm_channel,
  1365. CONTROLVM_QUEUE_RESPONSE,
  1366. &inmsg);
  1367. } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
  1368. if (err != -EAGAIN)
  1369. goto schedule_out;
  1370. if (chipset_dev->controlvm_pending_msg_valid) {
  1371. /*
  1372. * we throttled processing of a prior msg, so try to process
  1373. * it again rather than reading a new one
  1374. */
  1375. inmsg = chipset_dev->controlvm_pending_msg;
  1376. chipset_dev->controlvm_pending_msg_valid = false;
  1377. err = 0;
  1378. } else {
  1379. err = read_controlvm_event(&inmsg);
  1380. }
  1381. while (!err) {
  1382. chipset_dev->most_recent_message_jiffies = jiffies;
  1383. err = handle_command(inmsg,
  1384. visorchannel_get_physaddr
  1385. (chipset_dev->controlvm_channel));
  1386. if (err == -EAGAIN) {
  1387. chipset_dev->controlvm_pending_msg = inmsg;
  1388. chipset_dev->controlvm_pending_msg_valid = true;
  1389. break;
  1390. }
  1391. err = read_controlvm_event(&inmsg);
  1392. }
  1393. /* parahotplug_worker */
  1394. parahotplug_process_list();
  1395. /*
  1396. * The controlvm messages are sent in a bulk. If we start receiving messages, we
  1397. * want the polling to be fast. If we do not receive any message for
  1398. * MIN_IDLE_SECONDS, we can slow down the polling.
  1399. */
  1400. schedule_out:
  1401. if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
  1402. (HZ * MIN_IDLE_SECONDS))) {
  1403. /*
  1404. * it's been longer than MIN_IDLE_SECONDS since we processed
  1405. * our last controlvm message; slow down the polling
  1406. */
  1407. if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
  1408. chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
  1409. } else {
  1410. if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
  1411. chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
  1412. }
  1413. schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
  1414. chipset_dev->poll_jiffies);
  1415. }
  1416. static int visorchipset_init(struct acpi_device *acpi_device)
  1417. {
  1418. int err = -ENOMEM;
  1419. struct visorchannel *controlvm_channel;
  1420. chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
  1421. if (!chipset_dev)
  1422. goto error;
  1423. err = controlvm_channel_create(chipset_dev);
  1424. if (err)
  1425. goto error_free_chipset_dev;
  1426. acpi_device->driver_data = chipset_dev;
  1427. chipset_dev->acpi_device = acpi_device;
  1428. chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
  1429. err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
  1430. visorchipset_dev_groups);
  1431. if (err < 0)
  1432. goto error_destroy_channel;
  1433. controlvm_channel = chipset_dev->controlvm_channel;
  1434. if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
  1435. &chipset_dev->acpi_device->dev,
  1436. &visor_controlvm_channel_guid,
  1437. "controlvm",
  1438. sizeof(struct visor_controlvm_channel),
  1439. VISOR_CONTROLVM_CHANNEL_VERSIONID,
  1440. VISOR_CHANNEL_SIGNATURE)) {
  1441. err = -ENODEV;
  1442. goto error_delete_groups;
  1443. }
  1444. /* if booting in a crash kernel */
  1445. if (is_kdump_kernel())
  1446. INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
  1447. setup_crash_devices_work_queue);
  1448. else
  1449. INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
  1450. controlvm_periodic_work);
  1451. chipset_dev->most_recent_message_jiffies = jiffies;
  1452. chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
  1453. schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
  1454. chipset_dev->poll_jiffies);
  1455. err = visorbus_init();
  1456. if (err < 0)
  1457. goto error_cancel_work;
  1458. return 0;
  1459. error_cancel_work:
  1460. cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
  1461. error_delete_groups:
  1462. sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
  1463. visorchipset_dev_groups);
  1464. error_destroy_channel:
  1465. visorchannel_destroy(chipset_dev->controlvm_channel);
  1466. error_free_chipset_dev:
  1467. kfree(chipset_dev);
  1468. error:
  1469. dev_err(&acpi_device->dev, "failed with error %d\n", err);
  1470. return err;
  1471. }
  1472. static int visorchipset_exit(struct acpi_device *acpi_device)
  1473. {
  1474. visorbus_exit();
  1475. cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
  1476. sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
  1477. visorchipset_dev_groups);
  1478. visorchannel_destroy(chipset_dev->controlvm_channel);
  1479. kfree(chipset_dev);
  1480. return 0;
  1481. }
  1482. static const struct acpi_device_id unisys_device_ids[] = {
  1483. {"PNP0A07", 0},
  1484. {"", 0},
  1485. };
  1486. static struct acpi_driver unisys_acpi_driver = {
  1487. .name = "unisys_acpi",
  1488. .class = "unisys_acpi_class",
  1489. .owner = THIS_MODULE,
  1490. .ids = unisys_device_ids,
  1491. .ops = {
  1492. .add = visorchipset_init,
  1493. .remove = visorchipset_exit,
  1494. },
  1495. };
  1496. MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
  1497. static __init int visorutil_spar_detect(void)
  1498. {
  1499. unsigned int eax, ebx, ecx, edx;
  1500. if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
  1501. /* check the ID */
  1502. cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
  1503. return (ebx == UNISYS_VISOR_ID_EBX) &&
  1504. (ecx == UNISYS_VISOR_ID_ECX) &&
  1505. (edx == UNISYS_VISOR_ID_EDX);
  1506. }
  1507. return 0;
  1508. }
  1509. static int __init init_unisys(void)
  1510. {
  1511. int result;
  1512. if (!visorutil_spar_detect())
  1513. return -ENODEV;
  1514. result = acpi_bus_register_driver(&unisys_acpi_driver);
  1515. if (result)
  1516. return -ENODEV;
  1517. pr_info("Unisys Visorchipset Driver Loaded.\n");
  1518. return 0;
  1519. };
  1520. static void __exit exit_unisys(void)
  1521. {
  1522. acpi_bus_unregister_driver(&unisys_acpi_driver);
  1523. }
  1524. module_init(init_unisys);
  1525. module_exit(exit_unisys);
  1526. MODULE_AUTHOR("Unisys");
  1527. MODULE_LICENSE("GPL");
  1528. MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");