vfio_ap_ops.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Adjunct processor matrix VFIO device driver callbacks.
  4. *
  5. * Copyright IBM Corp. 2018
  6. *
  7. * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
  8. * Halil Pasic <pasic@linux.ibm.com>
  9. * Pierre Morel <pmorel@linux.ibm.com>
  10. */
  11. #include <linux/string.h>
  12. #include <linux/vfio.h>
  13. #include <linux/device.h>
  14. #include <linux/list.h>
  15. #include <linux/ctype.h>
  16. #include <linux/bitops.h>
  17. #include <linux/kvm_host.h>
  18. #include <linux/module.h>
  19. #include <asm/kvm.h>
  20. #include <asm/zcrypt.h>
  21. #include "vfio_ap_private.h"
  22. #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
  23. #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
  24. static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
  25. static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
  26. static int match_apqn(struct device *dev, const void *data)
  27. {
  28. struct vfio_ap_queue *q = dev_get_drvdata(dev);
  29. return (q->apqn == *(int *)(data)) ? 1 : 0;
  30. }
  31. /**
  32. * vfio_ap_get_queue: Retrieve a queue with a specific APQN from a list
  33. * @matrix_mdev: the associated mediated matrix
  34. * @apqn: The queue APQN
  35. *
  36. * Retrieve a queue with a specific APQN from the list of the
  37. * devices of the vfio_ap_drv.
  38. * Verify that the APID and the APQI are set in the matrix.
  39. *
  40. * Returns the pointer to the associated vfio_ap_queue
  41. */
  42. static struct vfio_ap_queue *vfio_ap_get_queue(
  43. struct ap_matrix_mdev *matrix_mdev,
  44. int apqn)
  45. {
  46. struct vfio_ap_queue *q;
  47. if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
  48. return NULL;
  49. if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
  50. return NULL;
  51. q = vfio_ap_find_queue(apqn);
  52. if (q)
  53. q->matrix_mdev = matrix_mdev;
  54. return q;
  55. }
  56. /**
  57. * vfio_ap_wait_for_irqclear
  58. * @apqn: The AP Queue number
  59. *
  60. * Checks the IRQ bit for the status of this APQN using ap_tapq.
  61. * Returns if the ap_tapq function succeeded and the bit is clear.
  62. * Returns if ap_tapq function failed with invalid, deconfigured or
  63. * checkstopped AP.
  64. * Otherwise retries up to 5 times after waiting 20ms.
  65. *
  66. */
  67. static void vfio_ap_wait_for_irqclear(int apqn)
  68. {
  69. struct ap_queue_status status;
  70. int retry = 5;
  71. do {
  72. status = ap_tapq(apqn, NULL);
  73. switch (status.response_code) {
  74. case AP_RESPONSE_NORMAL:
  75. case AP_RESPONSE_RESET_IN_PROGRESS:
  76. if (!status.irq_enabled)
  77. return;
  78. fallthrough;
  79. case AP_RESPONSE_BUSY:
  80. msleep(20);
  81. break;
  82. case AP_RESPONSE_Q_NOT_AVAIL:
  83. case AP_RESPONSE_DECONFIGURED:
  84. case AP_RESPONSE_CHECKSTOPPED:
  85. default:
  86. WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
  87. status.response_code, apqn);
  88. return;
  89. }
  90. } while (--retry);
  91. WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
  92. __func__, status.response_code, apqn);
  93. }
  94. /**
  95. * vfio_ap_free_aqic_resources
  96. * @q: The vfio_ap_queue
  97. *
  98. * Unregisters the ISC in the GIB when the saved ISC not invalid.
  99. * Unpin the guest's page holding the NIB when it exist.
  100. * Reset the saved_pfn and saved_isc to invalid values.
  101. *
  102. */
  103. static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
  104. {
  105. if (!q)
  106. return;
  107. if (q->saved_isc != VFIO_AP_ISC_INVALID &&
  108. !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
  109. kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
  110. q->saved_isc = VFIO_AP_ISC_INVALID;
  111. }
  112. if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
  113. vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
  114. &q->saved_pfn, 1);
  115. q->saved_pfn = 0;
  116. }
  117. }
  118. /**
  119. * vfio_ap_irq_disable
  120. * @q: The vfio_ap_queue
  121. *
  122. * Uses ap_aqic to disable the interruption and in case of success, reset
  123. * in progress or IRQ disable command already proceeded: calls
  124. * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
  125. * and calls vfio_ap_free_aqic_resources() to free the resources associated
  126. * with the AP interrupt handling.
  127. *
  128. * In the case the AP is busy, or a reset is in progress,
  129. * retries after 20ms, up to 5 times.
  130. *
  131. * Returns if ap_aqic function failed with invalid, deconfigured or
  132. * checkstopped AP.
  133. */
  134. static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
  135. {
  136. struct ap_qirq_ctrl aqic_gisa = {};
  137. struct ap_queue_status status;
  138. int retries = 5;
  139. do {
  140. status = ap_aqic(q->apqn, aqic_gisa, NULL);
  141. switch (status.response_code) {
  142. case AP_RESPONSE_OTHERWISE_CHANGED:
  143. case AP_RESPONSE_NORMAL:
  144. vfio_ap_wait_for_irqclear(q->apqn);
  145. goto end_free;
  146. case AP_RESPONSE_RESET_IN_PROGRESS:
  147. case AP_RESPONSE_BUSY:
  148. msleep(20);
  149. break;
  150. case AP_RESPONSE_Q_NOT_AVAIL:
  151. case AP_RESPONSE_DECONFIGURED:
  152. case AP_RESPONSE_CHECKSTOPPED:
  153. case AP_RESPONSE_INVALID_ADDRESS:
  154. default:
  155. /* All cases in default means AP not operational */
  156. WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
  157. status.response_code);
  158. goto end_free;
  159. }
  160. } while (retries--);
  161. WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
  162. status.response_code);
  163. end_free:
  164. vfio_ap_free_aqic_resources(q);
  165. q->matrix_mdev = NULL;
  166. return status;
  167. }
  168. /**
  169. * vfio_ap_setirq: Enable Interruption for a APQN
  170. *
  171. * @dev: the device associated with the ap_queue
  172. * @q: the vfio_ap_queue holding AQIC parameters
  173. *
  174. * Pin the NIB saved in *q
  175. * Register the guest ISC to GIB interface and retrieve the
  176. * host ISC to issue the host side PQAP/AQIC
  177. *
  178. * Response.status may be set to AP_RESPONSE_INVALID_ADDRESS in case the
  179. * vfio_pin_pages failed.
  180. *
  181. * Otherwise return the ap_queue_status returned by the ap_aqic(),
  182. * all retry handling will be done by the guest.
  183. */
  184. static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
  185. int isc,
  186. unsigned long nib)
  187. {
  188. struct ap_qirq_ctrl aqic_gisa = {};
  189. struct ap_queue_status status = {};
  190. struct kvm_s390_gisa *gisa;
  191. struct kvm *kvm;
  192. unsigned long h_nib, g_pfn, h_pfn;
  193. int ret;
  194. g_pfn = nib >> PAGE_SHIFT;
  195. ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1,
  196. IOMMU_READ | IOMMU_WRITE, &h_pfn);
  197. switch (ret) {
  198. case 1:
  199. break;
  200. default:
  201. status.response_code = AP_RESPONSE_INVALID_ADDRESS;
  202. return status;
  203. }
  204. kvm = q->matrix_mdev->kvm;
  205. gisa = kvm->arch.gisa_int.origin;
  206. h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK);
  207. aqic_gisa.gisc = isc;
  208. aqic_gisa.isc = kvm_s390_gisc_register(kvm, isc);
  209. aqic_gisa.ir = 1;
  210. aqic_gisa.gisa = (uint64_t)gisa >> 4;
  211. status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib);
  212. switch (status.response_code) {
  213. case AP_RESPONSE_NORMAL:
  214. /* See if we did clear older IRQ configuration */
  215. vfio_ap_free_aqic_resources(q);
  216. q->saved_pfn = g_pfn;
  217. q->saved_isc = isc;
  218. break;
  219. case AP_RESPONSE_OTHERWISE_CHANGED:
  220. /* We could not modify IRQ setings: clear new configuration */
  221. vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1);
  222. kvm_s390_gisc_unregister(kvm, isc);
  223. break;
  224. default:
  225. pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
  226. status.response_code);
  227. vfio_ap_irq_disable(q);
  228. break;
  229. }
  230. return status;
  231. }
  232. /**
  233. * handle_pqap: PQAP instruction callback
  234. *
  235. * @vcpu: The vcpu on which we received the PQAP instruction
  236. *
  237. * Get the general register contents to initialize internal variables.
  238. * REG[0]: APQN
  239. * REG[1]: IR and ISC
  240. * REG[2]: NIB
  241. *
  242. * Response.status may be set to following Response Code:
  243. * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
  244. * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
  245. * - AP_RESPONSE_NORMAL (0) : in case of successs
  246. * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
  247. * We take the matrix_dev lock to ensure serialization on queues and
  248. * mediated device access.
  249. *
  250. * Return 0 if we could handle the request inside KVM.
  251. * otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
  252. */
  253. static int handle_pqap(struct kvm_vcpu *vcpu)
  254. {
  255. uint64_t status;
  256. uint16_t apqn;
  257. struct vfio_ap_queue *q;
  258. struct ap_queue_status qstatus = {
  259. .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
  260. struct ap_matrix_mdev *matrix_mdev;
  261. /* If we do not use the AIV facility just go to userland */
  262. if (!(vcpu->arch.sie_block->eca & ECA_AIV))
  263. return -EOPNOTSUPP;
  264. apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
  265. mutex_lock(&matrix_dev->lock);
  266. if (!vcpu->kvm->arch.crypto.pqap_hook)
  267. goto out_unlock;
  268. matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
  269. struct ap_matrix_mdev, pqap_hook);
  270. q = vfio_ap_get_queue(matrix_mdev, apqn);
  271. if (!q)
  272. goto out_unlock;
  273. status = vcpu->run->s.regs.gprs[1];
  274. /* If IR bit(16) is set we enable the interrupt */
  275. if ((status >> (63 - 16)) & 0x01)
  276. qstatus = vfio_ap_irq_enable(q, status & 0x07,
  277. vcpu->run->s.regs.gprs[2]);
  278. else
  279. qstatus = vfio_ap_irq_disable(q);
  280. out_unlock:
  281. memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
  282. vcpu->run->s.regs.gprs[1] >>= 32;
  283. mutex_unlock(&matrix_dev->lock);
  284. return 0;
  285. }
  286. static void vfio_ap_matrix_init(struct ap_config_info *info,
  287. struct ap_matrix *matrix)
  288. {
  289. matrix->apm_max = info->apxa ? info->Na : 63;
  290. matrix->aqm_max = info->apxa ? info->Nd : 15;
  291. matrix->adm_max = info->apxa ? info->Nd : 15;
  292. }
  293. static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
  294. {
  295. struct ap_matrix_mdev *matrix_mdev;
  296. if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
  297. return -EPERM;
  298. matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
  299. if (!matrix_mdev) {
  300. atomic_inc(&matrix_dev->available_instances);
  301. return -ENOMEM;
  302. }
  303. matrix_mdev->mdev = mdev;
  304. vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
  305. mdev_set_drvdata(mdev, matrix_mdev);
  306. matrix_mdev->pqap_hook.hook = handle_pqap;
  307. matrix_mdev->pqap_hook.owner = THIS_MODULE;
  308. mutex_lock(&matrix_dev->lock);
  309. list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
  310. mutex_unlock(&matrix_dev->lock);
  311. return 0;
  312. }
  313. static int vfio_ap_mdev_remove(struct mdev_device *mdev)
  314. {
  315. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  316. if (matrix_mdev->kvm)
  317. return -EBUSY;
  318. mutex_lock(&matrix_dev->lock);
  319. vfio_ap_mdev_reset_queues(mdev);
  320. list_del(&matrix_mdev->node);
  321. mutex_unlock(&matrix_dev->lock);
  322. kfree(matrix_mdev);
  323. mdev_set_drvdata(mdev, NULL);
  324. atomic_inc(&matrix_dev->available_instances);
  325. return 0;
  326. }
  327. static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
  328. {
  329. return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
  330. }
  331. static MDEV_TYPE_ATTR_RO(name);
  332. static ssize_t available_instances_show(struct kobject *kobj,
  333. struct device *dev, char *buf)
  334. {
  335. return sprintf(buf, "%d\n",
  336. atomic_read(&matrix_dev->available_instances));
  337. }
  338. static MDEV_TYPE_ATTR_RO(available_instances);
  339. static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
  340. char *buf)
  341. {
  342. return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
  343. }
  344. static MDEV_TYPE_ATTR_RO(device_api);
  345. static struct attribute *vfio_ap_mdev_type_attrs[] = {
  346. &mdev_type_attr_name.attr,
  347. &mdev_type_attr_device_api.attr,
  348. &mdev_type_attr_available_instances.attr,
  349. NULL,
  350. };
  351. static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
  352. .name = VFIO_AP_MDEV_TYPE_HWVIRT,
  353. .attrs = vfio_ap_mdev_type_attrs,
  354. };
  355. static struct attribute_group *vfio_ap_mdev_type_groups[] = {
  356. &vfio_ap_mdev_hwvirt_type_group,
  357. NULL,
  358. };
  359. struct vfio_ap_queue_reserved {
  360. unsigned long *apid;
  361. unsigned long *apqi;
  362. bool reserved;
  363. };
  364. /**
  365. * vfio_ap_has_queue
  366. *
  367. * @dev: an AP queue device
  368. * @data: a struct vfio_ap_queue_reserved reference
  369. *
  370. * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
  371. * apid or apqi specified in @data:
  372. *
  373. * - If @data contains both an apid and apqi value, then @data will be flagged
  374. * as reserved if the APID and APQI fields for the AP queue device matches
  375. *
  376. * - If @data contains only an apid value, @data will be flagged as
  377. * reserved if the APID field in the AP queue device matches
  378. *
  379. * - If @data contains only an apqi value, @data will be flagged as
  380. * reserved if the APQI field in the AP queue device matches
  381. *
  382. * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
  383. * @data does not contain either an apid or apqi.
  384. */
  385. static int vfio_ap_has_queue(struct device *dev, void *data)
  386. {
  387. struct vfio_ap_queue_reserved *qres = data;
  388. struct ap_queue *ap_queue = to_ap_queue(dev);
  389. ap_qid_t qid;
  390. unsigned long id;
  391. if (qres->apid && qres->apqi) {
  392. qid = AP_MKQID(*qres->apid, *qres->apqi);
  393. if (qid == ap_queue->qid)
  394. qres->reserved = true;
  395. } else if (qres->apid && !qres->apqi) {
  396. id = AP_QID_CARD(ap_queue->qid);
  397. if (id == *qres->apid)
  398. qres->reserved = true;
  399. } else if (!qres->apid && qres->apqi) {
  400. id = AP_QID_QUEUE(ap_queue->qid);
  401. if (id == *qres->apqi)
  402. qres->reserved = true;
  403. } else {
  404. return -EINVAL;
  405. }
  406. return 0;
  407. }
  408. /**
  409. * vfio_ap_verify_queue_reserved
  410. *
  411. * @matrix_dev: a mediated matrix device
  412. * @apid: an AP adapter ID
  413. * @apqi: an AP queue index
  414. *
  415. * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
  416. * driver according to the following rules:
  417. *
  418. * - If both @apid and @apqi are not NULL, then there must be an AP queue
  419. * device bound to the vfio_ap driver with the APQN identified by @apid and
  420. * @apqi
  421. *
  422. * - If only @apid is not NULL, then there must be an AP queue device bound
  423. * to the vfio_ap driver with an APQN containing @apid
  424. *
  425. * - If only @apqi is not NULL, then there must be an AP queue device bound
  426. * to the vfio_ap driver with an APQN containing @apqi
  427. *
  428. * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
  429. */
  430. static int vfio_ap_verify_queue_reserved(unsigned long *apid,
  431. unsigned long *apqi)
  432. {
  433. int ret;
  434. struct vfio_ap_queue_reserved qres;
  435. qres.apid = apid;
  436. qres.apqi = apqi;
  437. qres.reserved = false;
  438. ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
  439. &qres, vfio_ap_has_queue);
  440. if (ret)
  441. return ret;
  442. if (qres.reserved)
  443. return 0;
  444. return -EADDRNOTAVAIL;
  445. }
  446. static int
  447. vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
  448. unsigned long apid)
  449. {
  450. int ret;
  451. unsigned long apqi;
  452. unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
  453. if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
  454. return vfio_ap_verify_queue_reserved(&apid, NULL);
  455. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
  456. ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
  457. if (ret)
  458. return ret;
  459. }
  460. return 0;
  461. }
  462. /**
  463. * vfio_ap_mdev_verify_no_sharing
  464. *
  465. * Verifies that the APQNs derived from the cross product of the AP adapter IDs
  466. * and AP queue indexes comprising the AP matrix are not configured for another
  467. * mediated device. AP queue sharing is not allowed.
  468. *
  469. * @matrix_mdev: the mediated matrix device
  470. *
  471. * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
  472. */
  473. static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
  474. {
  475. struct ap_matrix_mdev *lstdev;
  476. DECLARE_BITMAP(apm, AP_DEVICES);
  477. DECLARE_BITMAP(aqm, AP_DOMAINS);
  478. list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
  479. if (matrix_mdev == lstdev)
  480. continue;
  481. memset(apm, 0, sizeof(apm));
  482. memset(aqm, 0, sizeof(aqm));
  483. /*
  484. * We work on full longs, as we can only exclude the leftover
  485. * bits in non-inverse order. The leftover is all zeros.
  486. */
  487. if (!bitmap_and(apm, matrix_mdev->matrix.apm,
  488. lstdev->matrix.apm, AP_DEVICES))
  489. continue;
  490. if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
  491. lstdev->matrix.aqm, AP_DOMAINS))
  492. continue;
  493. return -EADDRINUSE;
  494. }
  495. return 0;
  496. }
  497. /**
  498. * assign_adapter_store
  499. *
  500. * @dev: the matrix device
  501. * @attr: the mediated matrix device's assign_adapter attribute
  502. * @buf: a buffer containing the AP adapter number (APID) to
  503. * be assigned
  504. * @count: the number of bytes in @buf
  505. *
  506. * Parses the APID from @buf and sets the corresponding bit in the mediated
  507. * matrix device's APM.
  508. *
  509. * Returns the number of bytes processed if the APID is valid; otherwise,
  510. * returns one of the following errors:
  511. *
  512. * 1. -EINVAL
  513. * The APID is not a valid number
  514. *
  515. * 2. -ENODEV
  516. * The APID exceeds the maximum value configured for the system
  517. *
  518. * 3. -EADDRNOTAVAIL
  519. * An APQN derived from the cross product of the APID being assigned
  520. * and the APQIs previously assigned is not bound to the vfio_ap device
  521. * driver; or, if no APQIs have yet been assigned, the APID is not
  522. * contained in an APQN bound to the vfio_ap device driver.
  523. *
  524. * 4. -EADDRINUSE
  525. * An APQN derived from the cross product of the APID being assigned
  526. * and the APQIs previously assigned is being used by another mediated
  527. * matrix device
  528. */
  529. static ssize_t assign_adapter_store(struct device *dev,
  530. struct device_attribute *attr,
  531. const char *buf, size_t count)
  532. {
  533. int ret;
  534. unsigned long apid;
  535. struct mdev_device *mdev = mdev_from_dev(dev);
  536. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  537. /* If the guest is running, disallow assignment of adapter */
  538. if (matrix_mdev->kvm)
  539. return -EBUSY;
  540. ret = kstrtoul(buf, 0, &apid);
  541. if (ret)
  542. return ret;
  543. if (apid > matrix_mdev->matrix.apm_max)
  544. return -ENODEV;
  545. /*
  546. * Set the bit in the AP mask (APM) corresponding to the AP adapter
  547. * number (APID). The bits in the mask, from most significant to least
  548. * significant bit, correspond to APIDs 0-255.
  549. */
  550. mutex_lock(&matrix_dev->lock);
  551. ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
  552. if (ret)
  553. goto done;
  554. set_bit_inv(apid, matrix_mdev->matrix.apm);
  555. ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
  556. if (ret)
  557. goto share_err;
  558. ret = count;
  559. goto done;
  560. share_err:
  561. clear_bit_inv(apid, matrix_mdev->matrix.apm);
  562. done:
  563. mutex_unlock(&matrix_dev->lock);
  564. return ret;
  565. }
  566. static DEVICE_ATTR_WO(assign_adapter);
  567. /**
  568. * unassign_adapter_store
  569. *
  570. * @dev: the matrix device
  571. * @attr: the mediated matrix device's unassign_adapter attribute
  572. * @buf: a buffer containing the adapter number (APID) to be unassigned
  573. * @count: the number of bytes in @buf
  574. *
  575. * Parses the APID from @buf and clears the corresponding bit in the mediated
  576. * matrix device's APM.
  577. *
  578. * Returns the number of bytes processed if the APID is valid; otherwise,
  579. * returns one of the following errors:
  580. * -EINVAL if the APID is not a number
  581. * -ENODEV if the APID it exceeds the maximum value configured for the
  582. * system
  583. */
  584. static ssize_t unassign_adapter_store(struct device *dev,
  585. struct device_attribute *attr,
  586. const char *buf, size_t count)
  587. {
  588. int ret;
  589. unsigned long apid;
  590. struct mdev_device *mdev = mdev_from_dev(dev);
  591. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  592. /* If the guest is running, disallow un-assignment of adapter */
  593. if (matrix_mdev->kvm)
  594. return -EBUSY;
  595. ret = kstrtoul(buf, 0, &apid);
  596. if (ret)
  597. return ret;
  598. if (apid > matrix_mdev->matrix.apm_max)
  599. return -ENODEV;
  600. mutex_lock(&matrix_dev->lock);
  601. clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
  602. mutex_unlock(&matrix_dev->lock);
  603. return count;
  604. }
  605. static DEVICE_ATTR_WO(unassign_adapter);
  606. static int
  607. vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
  608. unsigned long apqi)
  609. {
  610. int ret;
  611. unsigned long apid;
  612. unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
  613. if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
  614. return vfio_ap_verify_queue_reserved(NULL, &apqi);
  615. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
  616. ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
  617. if (ret)
  618. return ret;
  619. }
  620. return 0;
  621. }
  622. /**
  623. * assign_domain_store
  624. *
  625. * @dev: the matrix device
  626. * @attr: the mediated matrix device's assign_domain attribute
  627. * @buf: a buffer containing the AP queue index (APQI) of the domain to
  628. * be assigned
  629. * @count: the number of bytes in @buf
  630. *
  631. * Parses the APQI from @buf and sets the corresponding bit in the mediated
  632. * matrix device's AQM.
  633. *
  634. * Returns the number of bytes processed if the APQI is valid; otherwise returns
  635. * one of the following errors:
  636. *
  637. * 1. -EINVAL
  638. * The APQI is not a valid number
  639. *
  640. * 2. -ENODEV
  641. * The APQI exceeds the maximum value configured for the system
  642. *
  643. * 3. -EADDRNOTAVAIL
  644. * An APQN derived from the cross product of the APQI being assigned
  645. * and the APIDs previously assigned is not bound to the vfio_ap device
  646. * driver; or, if no APIDs have yet been assigned, the APQI is not
  647. * contained in an APQN bound to the vfio_ap device driver.
  648. *
  649. * 4. -EADDRINUSE
  650. * An APQN derived from the cross product of the APQI being assigned
  651. * and the APIDs previously assigned is being used by another mediated
  652. * matrix device
  653. */
  654. static ssize_t assign_domain_store(struct device *dev,
  655. struct device_attribute *attr,
  656. const char *buf, size_t count)
  657. {
  658. int ret;
  659. unsigned long apqi;
  660. struct mdev_device *mdev = mdev_from_dev(dev);
  661. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  662. unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
  663. /* If the guest is running, disallow assignment of domain */
  664. if (matrix_mdev->kvm)
  665. return -EBUSY;
  666. ret = kstrtoul(buf, 0, &apqi);
  667. if (ret)
  668. return ret;
  669. if (apqi > max_apqi)
  670. return -ENODEV;
  671. mutex_lock(&matrix_dev->lock);
  672. ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
  673. if (ret)
  674. goto done;
  675. set_bit_inv(apqi, matrix_mdev->matrix.aqm);
  676. ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
  677. if (ret)
  678. goto share_err;
  679. ret = count;
  680. goto done;
  681. share_err:
  682. clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
  683. done:
  684. mutex_unlock(&matrix_dev->lock);
  685. return ret;
  686. }
  687. static DEVICE_ATTR_WO(assign_domain);
  688. /**
  689. * unassign_domain_store
  690. *
  691. * @dev: the matrix device
  692. * @attr: the mediated matrix device's unassign_domain attribute
  693. * @buf: a buffer containing the AP queue index (APQI) of the domain to
  694. * be unassigned
  695. * @count: the number of bytes in @buf
  696. *
  697. * Parses the APQI from @buf and clears the corresponding bit in the
  698. * mediated matrix device's AQM.
  699. *
  700. * Returns the number of bytes processed if the APQI is valid; otherwise,
  701. * returns one of the following errors:
  702. * -EINVAL if the APQI is not a number
  703. * -ENODEV if the APQI exceeds the maximum value configured for the system
  704. */
  705. static ssize_t unassign_domain_store(struct device *dev,
  706. struct device_attribute *attr,
  707. const char *buf, size_t count)
  708. {
  709. int ret;
  710. unsigned long apqi;
  711. struct mdev_device *mdev = mdev_from_dev(dev);
  712. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  713. /* If the guest is running, disallow un-assignment of domain */
  714. if (matrix_mdev->kvm)
  715. return -EBUSY;
  716. ret = kstrtoul(buf, 0, &apqi);
  717. if (ret)
  718. return ret;
  719. if (apqi > matrix_mdev->matrix.aqm_max)
  720. return -ENODEV;
  721. mutex_lock(&matrix_dev->lock);
  722. clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
  723. mutex_unlock(&matrix_dev->lock);
  724. return count;
  725. }
  726. static DEVICE_ATTR_WO(unassign_domain);
  727. /**
  728. * assign_control_domain_store
  729. *
  730. * @dev: the matrix device
  731. * @attr: the mediated matrix device's assign_control_domain attribute
  732. * @buf: a buffer containing the domain ID to be assigned
  733. * @count: the number of bytes in @buf
  734. *
  735. * Parses the domain ID from @buf and sets the corresponding bit in the mediated
  736. * matrix device's ADM.
  737. *
  738. * Returns the number of bytes processed if the domain ID is valid; otherwise,
  739. * returns one of the following errors:
  740. * -EINVAL if the ID is not a number
  741. * -ENODEV if the ID exceeds the maximum value configured for the system
  742. */
  743. static ssize_t assign_control_domain_store(struct device *dev,
  744. struct device_attribute *attr,
  745. const char *buf, size_t count)
  746. {
  747. int ret;
  748. unsigned long id;
  749. struct mdev_device *mdev = mdev_from_dev(dev);
  750. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  751. /* If the guest is running, disallow assignment of control domain */
  752. if (matrix_mdev->kvm)
  753. return -EBUSY;
  754. ret = kstrtoul(buf, 0, &id);
  755. if (ret)
  756. return ret;
  757. if (id > matrix_mdev->matrix.adm_max)
  758. return -ENODEV;
  759. /* Set the bit in the ADM (bitmask) corresponding to the AP control
  760. * domain number (id). The bits in the mask, from most significant to
  761. * least significant, correspond to IDs 0 up to the one less than the
  762. * number of control domains that can be assigned.
  763. */
  764. mutex_lock(&matrix_dev->lock);
  765. set_bit_inv(id, matrix_mdev->matrix.adm);
  766. mutex_unlock(&matrix_dev->lock);
  767. return count;
  768. }
  769. static DEVICE_ATTR_WO(assign_control_domain);
  770. /**
  771. * unassign_control_domain_store
  772. *
  773. * @dev: the matrix device
  774. * @attr: the mediated matrix device's unassign_control_domain attribute
  775. * @buf: a buffer containing the domain ID to be unassigned
  776. * @count: the number of bytes in @buf
  777. *
  778. * Parses the domain ID from @buf and clears the corresponding bit in the
  779. * mediated matrix device's ADM.
  780. *
  781. * Returns the number of bytes processed if the domain ID is valid; otherwise,
  782. * returns one of the following errors:
  783. * -EINVAL if the ID is not a number
  784. * -ENODEV if the ID exceeds the maximum value configured for the system
  785. */
  786. static ssize_t unassign_control_domain_store(struct device *dev,
  787. struct device_attribute *attr,
  788. const char *buf, size_t count)
  789. {
  790. int ret;
  791. unsigned long domid;
  792. struct mdev_device *mdev = mdev_from_dev(dev);
  793. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  794. unsigned long max_domid = matrix_mdev->matrix.adm_max;
  795. /* If the guest is running, disallow un-assignment of control domain */
  796. if (matrix_mdev->kvm)
  797. return -EBUSY;
  798. ret = kstrtoul(buf, 0, &domid);
  799. if (ret)
  800. return ret;
  801. if (domid > max_domid)
  802. return -ENODEV;
  803. mutex_lock(&matrix_dev->lock);
  804. clear_bit_inv(domid, matrix_mdev->matrix.adm);
  805. mutex_unlock(&matrix_dev->lock);
  806. return count;
  807. }
  808. static DEVICE_ATTR_WO(unassign_control_domain);
  809. static ssize_t control_domains_show(struct device *dev,
  810. struct device_attribute *dev_attr,
  811. char *buf)
  812. {
  813. unsigned long id;
  814. int nchars = 0;
  815. int n;
  816. char *bufpos = buf;
  817. struct mdev_device *mdev = mdev_from_dev(dev);
  818. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  819. unsigned long max_domid = matrix_mdev->matrix.adm_max;
  820. mutex_lock(&matrix_dev->lock);
  821. for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
  822. n = sprintf(bufpos, "%04lx\n", id);
  823. bufpos += n;
  824. nchars += n;
  825. }
  826. mutex_unlock(&matrix_dev->lock);
  827. return nchars;
  828. }
  829. static DEVICE_ATTR_RO(control_domains);
  830. static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
  831. char *buf)
  832. {
  833. struct mdev_device *mdev = mdev_from_dev(dev);
  834. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  835. char *bufpos = buf;
  836. unsigned long apid;
  837. unsigned long apqi;
  838. unsigned long apid1;
  839. unsigned long apqi1;
  840. unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
  841. unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
  842. int nchars = 0;
  843. int n;
  844. apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
  845. apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
  846. mutex_lock(&matrix_dev->lock);
  847. if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
  848. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
  849. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
  850. naqm_bits) {
  851. n = sprintf(bufpos, "%02lx.%04lx\n", apid,
  852. apqi);
  853. bufpos += n;
  854. nchars += n;
  855. }
  856. }
  857. } else if (apid1 < napm_bits) {
  858. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
  859. n = sprintf(bufpos, "%02lx.\n", apid);
  860. bufpos += n;
  861. nchars += n;
  862. }
  863. } else if (apqi1 < naqm_bits) {
  864. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
  865. n = sprintf(bufpos, ".%04lx\n", apqi);
  866. bufpos += n;
  867. nchars += n;
  868. }
  869. }
  870. mutex_unlock(&matrix_dev->lock);
  871. return nchars;
  872. }
  873. static DEVICE_ATTR_RO(matrix);
  874. static struct attribute *vfio_ap_mdev_attrs[] = {
  875. &dev_attr_assign_adapter.attr,
  876. &dev_attr_unassign_adapter.attr,
  877. &dev_attr_assign_domain.attr,
  878. &dev_attr_unassign_domain.attr,
  879. &dev_attr_assign_control_domain.attr,
  880. &dev_attr_unassign_control_domain.attr,
  881. &dev_attr_control_domains.attr,
  882. &dev_attr_matrix.attr,
  883. NULL,
  884. };
  885. static struct attribute_group vfio_ap_mdev_attr_group = {
  886. .attrs = vfio_ap_mdev_attrs
  887. };
  888. static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
  889. &vfio_ap_mdev_attr_group,
  890. NULL
  891. };
  892. /**
  893. * vfio_ap_mdev_set_kvm
  894. *
  895. * @matrix_mdev: a mediated matrix device
  896. * @kvm: reference to KVM instance
  897. *
  898. * Verifies no other mediated matrix device has @kvm and sets a reference to
  899. * it in @matrix_mdev->kvm.
  900. *
  901. * Return 0 if no other mediated matrix device has a reference to @kvm;
  902. * otherwise, returns an -EPERM.
  903. */
  904. static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
  905. struct kvm *kvm)
  906. {
  907. struct ap_matrix_mdev *m;
  908. mutex_lock(&matrix_dev->lock);
  909. list_for_each_entry(m, &matrix_dev->mdev_list, node) {
  910. if ((m != matrix_mdev) && (m->kvm == kvm)) {
  911. mutex_unlock(&matrix_dev->lock);
  912. return -EPERM;
  913. }
  914. }
  915. matrix_mdev->kvm = kvm;
  916. kvm_get_kvm(kvm);
  917. kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
  918. mutex_unlock(&matrix_dev->lock);
  919. return 0;
  920. }
  921. /*
  922. * vfio_ap_mdev_iommu_notifier: IOMMU notifier callback
  923. *
  924. * @nb: The notifier block
  925. * @action: Action to be taken
  926. * @data: data associated with the request
  927. *
  928. * For an UNMAP request, unpin the guest IOVA (the NIB guest address we
  929. * pinned before). Other requests are ignored.
  930. *
  931. */
  932. static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
  933. unsigned long action, void *data)
  934. {
  935. struct ap_matrix_mdev *matrix_mdev;
  936. matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier);
  937. if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
  938. struct vfio_iommu_type1_dma_unmap *unmap = data;
  939. unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
  940. vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1);
  941. return NOTIFY_OK;
  942. }
  943. return NOTIFY_DONE;
  944. }
  945. static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
  946. unsigned long action, void *data)
  947. {
  948. int ret;
  949. struct ap_matrix_mdev *matrix_mdev;
  950. if (action != VFIO_GROUP_NOTIFY_SET_KVM)
  951. return NOTIFY_OK;
  952. matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
  953. if (!data) {
  954. matrix_mdev->kvm = NULL;
  955. return NOTIFY_OK;
  956. }
  957. ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
  958. if (ret)
  959. return NOTIFY_DONE;
  960. /* If there is no CRYCB pointer, then we can't copy the masks */
  961. if (!matrix_mdev->kvm->arch.crypto.crycbd)
  962. return NOTIFY_DONE;
  963. kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
  964. matrix_mdev->matrix.aqm,
  965. matrix_mdev->matrix.adm);
  966. return NOTIFY_OK;
  967. }
  968. static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
  969. {
  970. struct device *dev;
  971. struct vfio_ap_queue *q = NULL;
  972. dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
  973. &apqn, match_apqn);
  974. if (dev) {
  975. q = dev_get_drvdata(dev);
  976. put_device(dev);
  977. }
  978. return q;
  979. }
  980. int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
  981. unsigned int retry)
  982. {
  983. struct ap_queue_status status;
  984. int ret;
  985. int retry2 = 2;
  986. if (!q)
  987. return 0;
  988. retry_zapq:
  989. status = ap_zapq(q->apqn);
  990. switch (status.response_code) {
  991. case AP_RESPONSE_NORMAL:
  992. ret = 0;
  993. break;
  994. case AP_RESPONSE_RESET_IN_PROGRESS:
  995. if (retry--) {
  996. msleep(20);
  997. goto retry_zapq;
  998. }
  999. ret = -EBUSY;
  1000. break;
  1001. case AP_RESPONSE_Q_NOT_AVAIL:
  1002. case AP_RESPONSE_DECONFIGURED:
  1003. case AP_RESPONSE_CHECKSTOPPED:
  1004. WARN_ON_ONCE(status.irq_enabled);
  1005. ret = -EBUSY;
  1006. goto free_resources;
  1007. default:
  1008. /* things are really broken, give up */
  1009. WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
  1010. status.response_code);
  1011. return -EIO;
  1012. }
  1013. /* wait for the reset to take effect */
  1014. while (retry2--) {
  1015. if (status.queue_empty && !status.irq_enabled)
  1016. break;
  1017. msleep(20);
  1018. status = ap_tapq(q->apqn, NULL);
  1019. }
  1020. WARN_ON_ONCE(retry2 <= 0);
  1021. free_resources:
  1022. vfio_ap_free_aqic_resources(q);
  1023. return ret;
  1024. }
  1025. static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
  1026. {
  1027. int ret;
  1028. int rc = 0;
  1029. unsigned long apid, apqi;
  1030. struct vfio_ap_queue *q;
  1031. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  1032. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
  1033. matrix_mdev->matrix.apm_max + 1) {
  1034. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
  1035. matrix_mdev->matrix.aqm_max + 1) {
  1036. q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
  1037. ret = vfio_ap_mdev_reset_queue(q, 1);
  1038. /*
  1039. * Regardless whether a queue turns out to be busy, or
  1040. * is not operational, we need to continue resetting
  1041. * the remaining queues.
  1042. */
  1043. if (ret)
  1044. rc = ret;
  1045. }
  1046. }
  1047. return rc;
  1048. }
  1049. static int vfio_ap_mdev_open(struct mdev_device *mdev)
  1050. {
  1051. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  1052. unsigned long events;
  1053. int ret;
  1054. if (!try_module_get(THIS_MODULE))
  1055. return -ENODEV;
  1056. matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
  1057. events = VFIO_GROUP_NOTIFY_SET_KVM;
  1058. ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
  1059. &events, &matrix_mdev->group_notifier);
  1060. if (ret) {
  1061. module_put(THIS_MODULE);
  1062. return ret;
  1063. }
  1064. matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
  1065. events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
  1066. ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
  1067. &events, &matrix_mdev->iommu_notifier);
  1068. if (!ret)
  1069. return ret;
  1070. vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
  1071. &matrix_mdev->group_notifier);
  1072. module_put(THIS_MODULE);
  1073. return ret;
  1074. }
  1075. static void vfio_ap_mdev_release(struct mdev_device *mdev)
  1076. {
  1077. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  1078. mutex_lock(&matrix_dev->lock);
  1079. if (matrix_mdev->kvm) {
  1080. kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
  1081. matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
  1082. vfio_ap_mdev_reset_queues(mdev);
  1083. kvm_put_kvm(matrix_mdev->kvm);
  1084. matrix_mdev->kvm = NULL;
  1085. }
  1086. mutex_unlock(&matrix_dev->lock);
  1087. vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
  1088. &matrix_mdev->iommu_notifier);
  1089. vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
  1090. &matrix_mdev->group_notifier);
  1091. module_put(THIS_MODULE);
  1092. }
  1093. static int vfio_ap_mdev_get_device_info(unsigned long arg)
  1094. {
  1095. unsigned long minsz;
  1096. struct vfio_device_info info;
  1097. minsz = offsetofend(struct vfio_device_info, num_irqs);
  1098. if (copy_from_user(&info, (void __user *)arg, minsz))
  1099. return -EFAULT;
  1100. if (info.argsz < minsz)
  1101. return -EINVAL;
  1102. info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
  1103. info.num_regions = 0;
  1104. info.num_irqs = 0;
  1105. return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
  1106. }
  1107. static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
  1108. unsigned int cmd, unsigned long arg)
  1109. {
  1110. int ret;
  1111. mutex_lock(&matrix_dev->lock);
  1112. switch (cmd) {
  1113. case VFIO_DEVICE_GET_INFO:
  1114. ret = vfio_ap_mdev_get_device_info(arg);
  1115. break;
  1116. case VFIO_DEVICE_RESET:
  1117. ret = vfio_ap_mdev_reset_queues(mdev);
  1118. break;
  1119. default:
  1120. ret = -EOPNOTSUPP;
  1121. break;
  1122. }
  1123. mutex_unlock(&matrix_dev->lock);
  1124. return ret;
  1125. }
  1126. static const struct mdev_parent_ops vfio_ap_matrix_ops = {
  1127. .owner = THIS_MODULE,
  1128. .supported_type_groups = vfio_ap_mdev_type_groups,
  1129. .mdev_attr_groups = vfio_ap_mdev_attr_groups,
  1130. .create = vfio_ap_mdev_create,
  1131. .remove = vfio_ap_mdev_remove,
  1132. .open = vfio_ap_mdev_open,
  1133. .release = vfio_ap_mdev_release,
  1134. .ioctl = vfio_ap_mdev_ioctl,
  1135. };
  1136. int vfio_ap_mdev_register(void)
  1137. {
  1138. atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
  1139. return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
  1140. }
  1141. void vfio_ap_mdev_unregister(void)
  1142. {
  1143. mdev_unregister_device(&matrix_dev->device);
  1144. }