virtio_pci_common.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Virtio PCI driver - common functionality for all device versions
  4. *
  5. * This module allows virtio devices to be used over a virtual PCI device.
  6. * This can be used with QEMU based VMMs like KVM or Xen.
  7. *
  8. * Copyright IBM Corp. 2007
  9. * Copyright Red Hat, Inc. 2014
  10. *
  11. * Authors:
  12. * Anthony Liguori <aliguori@us.ibm.com>
  13. * Rusty Russell <rusty@rustcorp.com.au>
  14. * Michael S. Tsirkin <mst@redhat.com>
  15. */
  16. #include "virtio_pci_common.h"
  17. static bool force_legacy = false;
  18. #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
  19. module_param(force_legacy, bool, 0444);
  20. MODULE_PARM_DESC(force_legacy,
  21. "Force legacy mode for transitional virtio 1 devices");
  22. #endif
  23. /* wait for pending irq handlers */
  24. void vp_synchronize_vectors(struct virtio_device *vdev)
  25. {
  26. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  27. int i;
  28. if (vp_dev->intx_enabled)
  29. synchronize_irq(vp_dev->pci_dev->irq);
  30. for (i = 0; i < vp_dev->msix_vectors; ++i)
  31. synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
  32. }
  33. /* the notify function used when creating a virt queue */
  34. bool vp_notify(struct virtqueue *vq)
  35. {
  36. /* we write the queue's selector into the notification register to
  37. * signal the other end */
  38. iowrite16(vq->index, (void __iomem *)vq->priv);
  39. return true;
  40. }
  41. /* Handle a configuration change: Tell driver if it wants to know. */
  42. static irqreturn_t vp_config_changed(int irq, void *opaque)
  43. {
  44. struct virtio_pci_device *vp_dev = opaque;
  45. virtio_config_changed(&vp_dev->vdev);
  46. return IRQ_HANDLED;
  47. }
  48. /* Notify all virtqueues on an interrupt. */
  49. static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
  50. {
  51. struct virtio_pci_device *vp_dev = opaque;
  52. struct virtio_pci_vq_info *info;
  53. irqreturn_t ret = IRQ_NONE;
  54. unsigned long flags;
  55. spin_lock_irqsave(&vp_dev->lock, flags);
  56. list_for_each_entry(info, &vp_dev->virtqueues, node) {
  57. if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
  58. ret = IRQ_HANDLED;
  59. }
  60. spin_unlock_irqrestore(&vp_dev->lock, flags);
  61. return ret;
  62. }
  63. /* A small wrapper to also acknowledge the interrupt when it's handled.
  64. * I really need an EIO hook for the vring so I can ack the interrupt once we
  65. * know that we'll be handling the IRQ but before we invoke the callback since
  66. * the callback may notify the host which results in the host attempting to
  67. * raise an interrupt that we would then mask once we acknowledged the
  68. * interrupt. */
  69. static irqreturn_t vp_interrupt(int irq, void *opaque)
  70. {
  71. struct virtio_pci_device *vp_dev = opaque;
  72. u8 isr;
  73. /* reading the ISR has the effect of also clearing it so it's very
  74. * important to save off the value. */
  75. isr = ioread8(vp_dev->isr);
  76. /* It's definitely not us if the ISR was not high */
  77. if (!isr)
  78. return IRQ_NONE;
  79. /* Configuration change? Tell driver if it wants to know. */
  80. if (isr & VIRTIO_PCI_ISR_CONFIG)
  81. vp_config_changed(irq, opaque);
  82. return vp_vring_interrupt(irq, opaque);
  83. }
  84. static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
  85. bool per_vq_vectors, struct irq_affinity *desc)
  86. {
  87. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  88. const char *name = dev_name(&vp_dev->vdev.dev);
  89. unsigned flags = PCI_IRQ_MSIX;
  90. unsigned i, v;
  91. int err = -ENOMEM;
  92. vp_dev->msix_vectors = nvectors;
  93. vp_dev->msix_names = kmalloc_array(nvectors,
  94. sizeof(*vp_dev->msix_names),
  95. GFP_KERNEL);
  96. if (!vp_dev->msix_names)
  97. goto error;
  98. vp_dev->msix_affinity_masks
  99. = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks),
  100. GFP_KERNEL);
  101. if (!vp_dev->msix_affinity_masks)
  102. goto error;
  103. for (i = 0; i < nvectors; ++i)
  104. if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
  105. GFP_KERNEL))
  106. goto error;
  107. if (desc) {
  108. flags |= PCI_IRQ_AFFINITY;
  109. desc->pre_vectors++; /* virtio config vector */
  110. }
  111. err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
  112. nvectors, flags, desc);
  113. if (err < 0)
  114. goto error;
  115. vp_dev->msix_enabled = 1;
  116. /* Set the vector used for configuration */
  117. v = vp_dev->msix_used_vectors;
  118. snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
  119. "%s-config", name);
  120. err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
  121. vp_config_changed, 0, vp_dev->msix_names[v],
  122. vp_dev);
  123. if (err)
  124. goto error;
  125. ++vp_dev->msix_used_vectors;
  126. v = vp_dev->config_vector(vp_dev, v);
  127. /* Verify we had enough resources to assign the vector */
  128. if (v == VIRTIO_MSI_NO_VECTOR) {
  129. err = -EBUSY;
  130. goto error;
  131. }
  132. if (!per_vq_vectors) {
  133. /* Shared vector for all VQs */
  134. v = vp_dev->msix_used_vectors;
  135. snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
  136. "%s-virtqueues", name);
  137. err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
  138. vp_vring_interrupt, 0, vp_dev->msix_names[v],
  139. vp_dev);
  140. if (err)
  141. goto error;
  142. ++vp_dev->msix_used_vectors;
  143. }
  144. return 0;
  145. error:
  146. return err;
  147. }
  148. static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
  149. void (*callback)(struct virtqueue *vq),
  150. const char *name,
  151. bool ctx,
  152. u16 msix_vec)
  153. {
  154. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  155. struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
  156. struct virtqueue *vq;
  157. unsigned long flags;
  158. /* fill out our structure that represents an active queue */
  159. if (!info)
  160. return ERR_PTR(-ENOMEM);
  161. vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
  162. msix_vec);
  163. if (IS_ERR(vq))
  164. goto out_info;
  165. info->vq = vq;
  166. if (callback) {
  167. spin_lock_irqsave(&vp_dev->lock, flags);
  168. list_add(&info->node, &vp_dev->virtqueues);
  169. spin_unlock_irqrestore(&vp_dev->lock, flags);
  170. } else {
  171. INIT_LIST_HEAD(&info->node);
  172. }
  173. vp_dev->vqs[index] = info;
  174. return vq;
  175. out_info:
  176. kfree(info);
  177. return vq;
  178. }
  179. static void vp_del_vq(struct virtqueue *vq)
  180. {
  181. struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
  182. struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
  183. unsigned long flags;
  184. spin_lock_irqsave(&vp_dev->lock, flags);
  185. list_del(&info->node);
  186. spin_unlock_irqrestore(&vp_dev->lock, flags);
  187. vp_dev->del_vq(info);
  188. kfree(info);
  189. }
  190. /* the config->del_vqs() implementation */
  191. void vp_del_vqs(struct virtio_device *vdev)
  192. {
  193. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  194. struct virtqueue *vq, *n;
  195. int i;
  196. list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
  197. if (vp_dev->per_vq_vectors) {
  198. int v = vp_dev->vqs[vq->index]->msix_vector;
  199. if (v != VIRTIO_MSI_NO_VECTOR) {
  200. int irq = pci_irq_vector(vp_dev->pci_dev, v);
  201. irq_set_affinity_hint(irq, NULL);
  202. free_irq(irq, vq);
  203. }
  204. }
  205. vp_del_vq(vq);
  206. }
  207. vp_dev->per_vq_vectors = false;
  208. if (vp_dev->intx_enabled) {
  209. free_irq(vp_dev->pci_dev->irq, vp_dev);
  210. vp_dev->intx_enabled = 0;
  211. }
  212. for (i = 0; i < vp_dev->msix_used_vectors; ++i)
  213. free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
  214. if (vp_dev->msix_affinity_masks) {
  215. for (i = 0; i < vp_dev->msix_vectors; i++)
  216. if (vp_dev->msix_affinity_masks[i])
  217. free_cpumask_var(vp_dev->msix_affinity_masks[i]);
  218. }
  219. if (vp_dev->msix_enabled) {
  220. /* Disable the vector used for configuration */
  221. vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
  222. pci_free_irq_vectors(vp_dev->pci_dev);
  223. vp_dev->msix_enabled = 0;
  224. }
  225. vp_dev->msix_vectors = 0;
  226. vp_dev->msix_used_vectors = 0;
  227. kfree(vp_dev->msix_names);
  228. vp_dev->msix_names = NULL;
  229. kfree(vp_dev->msix_affinity_masks);
  230. vp_dev->msix_affinity_masks = NULL;
  231. kfree(vp_dev->vqs);
  232. vp_dev->vqs = NULL;
  233. }
  234. static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
  235. struct virtqueue *vqs[], vq_callback_t *callbacks[],
  236. const char * const names[], bool per_vq_vectors,
  237. const bool *ctx,
  238. struct irq_affinity *desc)
  239. {
  240. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  241. u16 msix_vec;
  242. int i, err, nvectors, allocated_vectors, queue_idx = 0;
  243. vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
  244. if (!vp_dev->vqs)
  245. return -ENOMEM;
  246. if (per_vq_vectors) {
  247. /* Best option: one for change interrupt, one per vq. */
  248. nvectors = 1;
  249. for (i = 0; i < nvqs; ++i)
  250. if (names[i] && callbacks[i])
  251. ++nvectors;
  252. } else {
  253. /* Second best: one for change, shared for all vqs. */
  254. nvectors = 2;
  255. }
  256. err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
  257. per_vq_vectors ? desc : NULL);
  258. if (err)
  259. goto error_find;
  260. vp_dev->per_vq_vectors = per_vq_vectors;
  261. allocated_vectors = vp_dev->msix_used_vectors;
  262. for (i = 0; i < nvqs; ++i) {
  263. if (!names[i]) {
  264. vqs[i] = NULL;
  265. continue;
  266. }
  267. if (!callbacks[i])
  268. msix_vec = VIRTIO_MSI_NO_VECTOR;
  269. else if (vp_dev->per_vq_vectors)
  270. msix_vec = allocated_vectors++;
  271. else
  272. msix_vec = VP_MSIX_VQ_VECTOR;
  273. vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
  274. ctx ? ctx[i] : false,
  275. msix_vec);
  276. if (IS_ERR(vqs[i])) {
  277. err = PTR_ERR(vqs[i]);
  278. goto error_find;
  279. }
  280. if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
  281. continue;
  282. /* allocate per-vq irq if available and necessary */
  283. snprintf(vp_dev->msix_names[msix_vec],
  284. sizeof *vp_dev->msix_names,
  285. "%s-%s",
  286. dev_name(&vp_dev->vdev.dev), names[i]);
  287. err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
  288. vring_interrupt, 0,
  289. vp_dev->msix_names[msix_vec],
  290. vqs[i]);
  291. if (err)
  292. goto error_find;
  293. }
  294. return 0;
  295. error_find:
  296. vp_del_vqs(vdev);
  297. return err;
  298. }
  299. static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
  300. struct virtqueue *vqs[], vq_callback_t *callbacks[],
  301. const char * const names[], const bool *ctx)
  302. {
  303. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  304. int i, err, queue_idx = 0;
  305. vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
  306. if (!vp_dev->vqs)
  307. return -ENOMEM;
  308. err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
  309. dev_name(&vdev->dev), vp_dev);
  310. if (err)
  311. goto out_del_vqs;
  312. vp_dev->intx_enabled = 1;
  313. vp_dev->per_vq_vectors = false;
  314. for (i = 0; i < nvqs; ++i) {
  315. if (!names[i]) {
  316. vqs[i] = NULL;
  317. continue;
  318. }
  319. vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
  320. ctx ? ctx[i] : false,
  321. VIRTIO_MSI_NO_VECTOR);
  322. if (IS_ERR(vqs[i])) {
  323. err = PTR_ERR(vqs[i]);
  324. goto out_del_vqs;
  325. }
  326. }
  327. return 0;
  328. out_del_vqs:
  329. vp_del_vqs(vdev);
  330. return err;
  331. }
  332. /* the config->find_vqs() implementation */
  333. int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
  334. struct virtqueue *vqs[], vq_callback_t *callbacks[],
  335. const char * const names[], const bool *ctx,
  336. struct irq_affinity *desc)
  337. {
  338. int err;
  339. /* Try MSI-X with one vector per queue. */
  340. err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
  341. if (!err)
  342. return 0;
  343. /* Fallback: MSI-X with one vector for config, one shared for queues. */
  344. err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
  345. if (!err)
  346. return 0;
  347. /* Finally fall back to regular interrupts. */
  348. return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
  349. }
  350. const char *vp_bus_name(struct virtio_device *vdev)
  351. {
  352. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  353. return pci_name(vp_dev->pci_dev);
  354. }
  355. /* Setup the affinity for a virtqueue:
  356. * - force the affinity for per vq vector
  357. * - OR over all affinities for shared MSI
  358. * - ignore the affinity request if we're using INTX
  359. */
  360. int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
  361. {
  362. struct virtio_device *vdev = vq->vdev;
  363. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  364. struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
  365. struct cpumask *mask;
  366. unsigned int irq;
  367. if (!vq->callback)
  368. return -EINVAL;
  369. if (vp_dev->msix_enabled) {
  370. mask = vp_dev->msix_affinity_masks[info->msix_vector];
  371. irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
  372. if (!cpu_mask)
  373. irq_set_affinity_hint(irq, NULL);
  374. else {
  375. cpumask_copy(mask, cpu_mask);
  376. irq_set_affinity_hint(irq, mask);
  377. }
  378. }
  379. return 0;
  380. }
  381. const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
  382. {
  383. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  384. if (!vp_dev->per_vq_vectors ||
  385. vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
  386. return NULL;
  387. return pci_irq_get_affinity(vp_dev->pci_dev,
  388. vp_dev->vqs[index]->msix_vector);
  389. }
  390. #ifdef CONFIG_PM_SLEEP
  391. static int virtio_pci_freeze(struct device *dev)
  392. {
  393. struct pci_dev *pci_dev = to_pci_dev(dev);
  394. struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
  395. int ret;
  396. ret = virtio_device_freeze(&vp_dev->vdev);
  397. if (!ret)
  398. pci_disable_device(pci_dev);
  399. return ret;
  400. }
  401. static int virtio_pci_restore(struct device *dev)
  402. {
  403. struct pci_dev *pci_dev = to_pci_dev(dev);
  404. struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
  405. int ret;
  406. ret = pci_enable_device(pci_dev);
  407. if (ret)
  408. return ret;
  409. pci_set_master(pci_dev);
  410. return virtio_device_restore(&vp_dev->vdev);
  411. }
  412. static const struct dev_pm_ops virtio_pci_pm_ops = {
  413. SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
  414. };
  415. #endif
  416. /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
  417. static const struct pci_device_id virtio_pci_id_table[] = {
  418. { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
  419. { 0 }
  420. };
  421. MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
  422. static void virtio_pci_release_dev(struct device *_d)
  423. {
  424. struct virtio_device *vdev = dev_to_virtio(_d);
  425. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  426. /* As struct device is a kobject, it's not safe to
  427. * free the memory (including the reference counter itself)
  428. * until it's release callback. */
  429. kfree(vp_dev);
  430. }
  431. static int virtio_pci_probe(struct pci_dev *pci_dev,
  432. const struct pci_device_id *id)
  433. {
  434. struct virtio_pci_device *vp_dev, *reg_dev = NULL;
  435. int rc;
  436. /* allocate our structure and fill it out */
  437. vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
  438. if (!vp_dev)
  439. return -ENOMEM;
  440. pci_set_drvdata(pci_dev, vp_dev);
  441. vp_dev->vdev.dev.parent = &pci_dev->dev;
  442. vp_dev->vdev.dev.release = virtio_pci_release_dev;
  443. vp_dev->pci_dev = pci_dev;
  444. INIT_LIST_HEAD(&vp_dev->virtqueues);
  445. spin_lock_init(&vp_dev->lock);
  446. /* enable the device */
  447. rc = pci_enable_device(pci_dev);
  448. if (rc)
  449. goto err_enable_device;
  450. if (force_legacy) {
  451. rc = virtio_pci_legacy_probe(vp_dev);
  452. /* Also try modern mode if we can't map BAR0 (no IO space). */
  453. if (rc == -ENODEV || rc == -ENOMEM)
  454. rc = virtio_pci_modern_probe(vp_dev);
  455. if (rc)
  456. goto err_probe;
  457. } else {
  458. rc = virtio_pci_modern_probe(vp_dev);
  459. if (rc == -ENODEV)
  460. rc = virtio_pci_legacy_probe(vp_dev);
  461. if (rc)
  462. goto err_probe;
  463. }
  464. pci_set_master(pci_dev);
  465. rc = register_virtio_device(&vp_dev->vdev);
  466. reg_dev = vp_dev;
  467. if (rc)
  468. goto err_register;
  469. return 0;
  470. err_register:
  471. if (vp_dev->ioaddr)
  472. virtio_pci_legacy_remove(vp_dev);
  473. else
  474. virtio_pci_modern_remove(vp_dev);
  475. err_probe:
  476. pci_disable_device(pci_dev);
  477. err_enable_device:
  478. if (reg_dev)
  479. put_device(&vp_dev->vdev.dev);
  480. else
  481. kfree(vp_dev);
  482. return rc;
  483. }
  484. static void virtio_pci_remove(struct pci_dev *pci_dev)
  485. {
  486. struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
  487. struct device *dev = get_device(&vp_dev->vdev.dev);
  488. /*
  489. * Device is marked broken on surprise removal so that virtio upper
  490. * layers can abort any ongoing operation.
  491. */
  492. if (!pci_device_is_present(pci_dev))
  493. virtio_break_device(&vp_dev->vdev);
  494. pci_disable_sriov(pci_dev);
  495. unregister_virtio_device(&vp_dev->vdev);
  496. if (vp_dev->ioaddr)
  497. virtio_pci_legacy_remove(vp_dev);
  498. else
  499. virtio_pci_modern_remove(vp_dev);
  500. pci_disable_device(pci_dev);
  501. put_device(dev);
  502. }
  503. static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
  504. {
  505. struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
  506. struct virtio_device *vdev = &vp_dev->vdev;
  507. int ret;
  508. if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK))
  509. return -EBUSY;
  510. if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV))
  511. return -EINVAL;
  512. if (pci_vfs_assigned(pci_dev))
  513. return -EPERM;
  514. if (num_vfs == 0) {
  515. pci_disable_sriov(pci_dev);
  516. return 0;
  517. }
  518. ret = pci_enable_sriov(pci_dev, num_vfs);
  519. if (ret < 0)
  520. return ret;
  521. return num_vfs;
  522. }
  523. static struct pci_driver virtio_pci_driver = {
  524. .name = "virtio-pci",
  525. .id_table = virtio_pci_id_table,
  526. .probe = virtio_pci_probe,
  527. .remove = virtio_pci_remove,
  528. #ifdef CONFIG_PM_SLEEP
  529. .driver.pm = &virtio_pci_pm_ops,
  530. #endif
  531. .sriov_configure = virtio_pci_sriov_configure,
  532. };
  533. module_pci_driver(virtio_pci_driver);
  534. MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
  535. MODULE_DESCRIPTION("virtio-pci");
  536. MODULE_LICENSE("GPL");
  537. MODULE_VERSION("1");