virtio_pci_legacy.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Virtio PCI driver - legacy device support
  4. *
  5. * This module allows virtio devices to be used over a virtual PCI device.
  6. * This can be used with QEMU based VMMs like KVM or Xen.
  7. *
  8. * Copyright IBM Corp. 2007
  9. * Copyright Red Hat, Inc. 2014
  10. *
  11. * Authors:
  12. * Anthony Liguori <aliguori@us.ibm.com>
  13. * Rusty Russell <rusty@rustcorp.com.au>
  14. * Michael S. Tsirkin <mst@redhat.com>
  15. */
  16. #include "virtio_pci_common.h"
  17. /* virtio config->get_features() implementation */
  18. static u64 vp_get_features(struct virtio_device *vdev)
  19. {
  20. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  21. /* When someone needs more than 32 feature bits, we'll need to
  22. * steal a bit to indicate that the rest are somewhere else. */
  23. return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
  24. }
  25. /* virtio config->finalize_features() implementation */
  26. static int vp_finalize_features(struct virtio_device *vdev)
  27. {
  28. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  29. /* Give virtio_ring a chance to accept features. */
  30. vring_transport_features(vdev);
  31. /* Make sure we don't have any features > 32 bits! */
  32. BUG_ON((u32)vdev->features != vdev->features);
  33. /* We only support 32 feature bits. */
  34. iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
  35. return 0;
  36. }
  37. /* virtio config->get() implementation */
  38. static void vp_get(struct virtio_device *vdev, unsigned offset,
  39. void *buf, unsigned len)
  40. {
  41. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  42. void __iomem *ioaddr = vp_dev->ioaddr +
  43. VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
  44. offset;
  45. u8 *ptr = buf;
  46. int i;
  47. for (i = 0; i < len; i++)
  48. ptr[i] = ioread8(ioaddr + i);
  49. }
  50. /* the config->set() implementation. it's symmetric to the config->get()
  51. * implementation */
  52. static void vp_set(struct virtio_device *vdev, unsigned offset,
  53. const void *buf, unsigned len)
  54. {
  55. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  56. void __iomem *ioaddr = vp_dev->ioaddr +
  57. VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
  58. offset;
  59. const u8 *ptr = buf;
  60. int i;
  61. for (i = 0; i < len; i++)
  62. iowrite8(ptr[i], ioaddr + i);
  63. }
  64. /* config->{get,set}_status() implementations */
  65. static u8 vp_get_status(struct virtio_device *vdev)
  66. {
  67. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  68. return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  69. }
  70. static void vp_set_status(struct virtio_device *vdev, u8 status)
  71. {
  72. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  73. /* We should never be setting status to 0. */
  74. BUG_ON(status == 0);
  75. iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  76. }
  77. static void vp_reset(struct virtio_device *vdev)
  78. {
  79. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  80. /* 0 status means a reset. */
  81. iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  82. /* Flush out the status write, and flush in device writes,
  83. * including MSi-X interrupts, if any. */
  84. ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  85. /* Flush pending VQ/configuration callbacks. */
  86. vp_synchronize_vectors(vdev);
  87. }
  88. static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
  89. {
  90. /* Setup the vector used for configuration events */
  91. iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
  92. /* Verify we had enough resources to assign the vector */
  93. /* Will also flush the write out to device */
  94. return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
  95. }
  96. static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
  97. struct virtio_pci_vq_info *info,
  98. unsigned index,
  99. void (*callback)(struct virtqueue *vq),
  100. const char *name,
  101. bool ctx,
  102. u16 msix_vec)
  103. {
  104. struct virtqueue *vq;
  105. u16 num;
  106. int err;
  107. u64 q_pfn;
  108. /* Select the queue we're interested in */
  109. iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
  110. /* Check if queue is either not available or already active. */
  111. num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
  112. if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
  113. return ERR_PTR(-ENOENT);
  114. info->msix_vector = msix_vec;
  115. /* create the vring */
  116. vq = vring_create_virtqueue(index, num,
  117. VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
  118. true, false, ctx,
  119. vp_notify, callback, name);
  120. if (!vq)
  121. return ERR_PTR(-ENOMEM);
  122. q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
  123. if (q_pfn >> 32) {
  124. dev_err(&vp_dev->pci_dev->dev,
  125. "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
  126. 0x1ULL << (32 + PAGE_SHIFT - 30));
  127. err = -E2BIG;
  128. goto out_del_vq;
  129. }
  130. /* activate the queue */
  131. iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
  132. vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
  133. if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
  134. iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
  135. msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
  136. if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
  137. err = -EBUSY;
  138. goto out_deactivate;
  139. }
  140. }
  141. return vq;
  142. out_deactivate:
  143. iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
  144. out_del_vq:
  145. vring_del_virtqueue(vq);
  146. return ERR_PTR(err);
  147. }
  148. static void del_vq(struct virtio_pci_vq_info *info)
  149. {
  150. struct virtqueue *vq = info->vq;
  151. struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
  152. iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
  153. if (vp_dev->msix_enabled) {
  154. iowrite16(VIRTIO_MSI_NO_VECTOR,
  155. vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
  156. /* Flush the write out to device */
  157. ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
  158. }
  159. /* Select and deactivate the queue */
  160. iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
  161. vring_del_virtqueue(vq);
  162. }
  163. static const struct virtio_config_ops virtio_pci_config_ops = {
  164. .get = vp_get,
  165. .set = vp_set,
  166. .get_status = vp_get_status,
  167. .set_status = vp_set_status,
  168. .reset = vp_reset,
  169. .find_vqs = vp_find_vqs,
  170. .del_vqs = vp_del_vqs,
  171. .get_features = vp_get_features,
  172. .finalize_features = vp_finalize_features,
  173. .bus_name = vp_bus_name,
  174. .set_vq_affinity = vp_set_vq_affinity,
  175. .get_vq_affinity = vp_get_vq_affinity,
  176. };
  177. /* the PCI probing function */
  178. int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
  179. {
  180. struct pci_dev *pci_dev = vp_dev->pci_dev;
  181. int rc;
  182. /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
  183. if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
  184. return -ENODEV;
  185. if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
  186. printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
  187. VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
  188. return -ENODEV;
  189. }
  190. rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
  191. if (rc) {
  192. rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
  193. } else {
  194. /*
  195. * The virtio ring base address is expressed as a 32-bit PFN,
  196. * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
  197. */
  198. dma_set_coherent_mask(&pci_dev->dev,
  199. DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
  200. }
  201. if (rc)
  202. dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
  203. rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
  204. if (rc)
  205. return rc;
  206. rc = -ENOMEM;
  207. vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
  208. if (!vp_dev->ioaddr)
  209. goto err_iomap;
  210. vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
  211. /* we use the subsystem vendor/device id as the virtio vendor/device
  212. * id. this allows us to use the same PCI vendor/device id for all
  213. * virtio devices and to identify the particular virtio driver by
  214. * the subsystem ids */
  215. vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
  216. vp_dev->vdev.id.device = pci_dev->subsystem_device;
  217. vp_dev->vdev.config = &virtio_pci_config_ops;
  218. vp_dev->config_vector = vp_config_vector;
  219. vp_dev->setup_vq = setup_vq;
  220. vp_dev->del_vq = del_vq;
  221. return 0;
  222. err_iomap:
  223. pci_release_region(pci_dev, 0);
  224. return rc;
  225. }
  226. void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
  227. {
  228. struct pci_dev *pci_dev = vp_dev->pci_dev;
  229. pci_iounmap(pci_dev, vp_dev->ioaddr);
  230. pci_release_region(pci_dev, 0);
  231. }