ifcvf_main.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Intel IFC VF NIC driver for virtio dataplane offloading
  4. *
  5. * Copyright (C) 2020 Intel Corporation.
  6. *
  7. * Author: Zhu Lingshan <lingshan.zhu@intel.com>
  8. *
  9. */
  10. #include <linux/interrupt.h>
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/sysfs.h>
  14. #include "ifcvf_base.h"
  15. #define VERSION_STRING "0.1"
  16. #define DRIVER_AUTHOR "Intel Corporation"
  17. #define IFCVF_DRIVER_NAME "ifcvf"
  18. static irqreturn_t ifcvf_config_changed(int irq, void *arg)
  19. {
  20. struct ifcvf_hw *vf = arg;
  21. if (vf->config_cb.callback)
  22. return vf->config_cb.callback(vf->config_cb.private);
  23. return IRQ_HANDLED;
  24. }
  25. static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
  26. {
  27. struct vring_info *vring = arg;
  28. if (vring->cb.callback)
  29. return vring->cb.callback(vring->cb.private);
  30. return IRQ_HANDLED;
  31. }
  32. static void ifcvf_free_irq_vectors(void *data)
  33. {
  34. pci_free_irq_vectors(data);
  35. }
  36. static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
  37. {
  38. struct pci_dev *pdev = adapter->pdev;
  39. struct ifcvf_hw *vf = &adapter->vf;
  40. int i;
  41. for (i = 0; i < queues; i++) {
  42. devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
  43. vf->vring[i].irq = -EINVAL;
  44. }
  45. devm_free_irq(&pdev->dev, vf->config_irq, vf);
  46. ifcvf_free_irq_vectors(pdev);
  47. }
  48. static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
  49. {
  50. struct pci_dev *pdev = adapter->pdev;
  51. struct ifcvf_hw *vf = &adapter->vf;
  52. int vector, i, ret, irq;
  53. ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
  54. IFCVF_MAX_INTR, PCI_IRQ_MSIX);
  55. if (ret < 0) {
  56. IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
  57. return ret;
  58. }
  59. snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
  60. pci_name(pdev));
  61. vector = 0;
  62. vf->config_irq = pci_irq_vector(pdev, vector);
  63. ret = devm_request_irq(&pdev->dev, vf->config_irq,
  64. ifcvf_config_changed, 0,
  65. vf->config_msix_name, vf);
  66. if (ret) {
  67. IFCVF_ERR(pdev, "Failed to request config irq\n");
  68. return ret;
  69. }
  70. for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
  71. snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
  72. pci_name(pdev), i);
  73. vector = i + IFCVF_MSI_QUEUE_OFF;
  74. irq = pci_irq_vector(pdev, vector);
  75. ret = devm_request_irq(&pdev->dev, irq,
  76. ifcvf_intr_handler, 0,
  77. vf->vring[i].msix_name,
  78. &vf->vring[i]);
  79. if (ret) {
  80. IFCVF_ERR(pdev,
  81. "Failed to request irq for vq %d\n", i);
  82. ifcvf_free_irq(adapter, i);
  83. return ret;
  84. }
  85. vf->vring[i].irq = irq;
  86. }
  87. return 0;
  88. }
  89. static int ifcvf_start_datapath(void *private)
  90. {
  91. struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
  92. u8 status;
  93. int ret;
  94. vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
  95. ret = ifcvf_start_hw(vf);
  96. if (ret < 0) {
  97. status = ifcvf_get_status(vf);
  98. status |= VIRTIO_CONFIG_S_FAILED;
  99. ifcvf_set_status(vf, status);
  100. }
  101. return ret;
  102. }
  103. static int ifcvf_stop_datapath(void *private)
  104. {
  105. struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
  106. int i;
  107. for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
  108. vf->vring[i].cb.callback = NULL;
  109. ifcvf_stop_hw(vf);
  110. return 0;
  111. }
  112. static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
  113. {
  114. struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
  115. int i;
  116. for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
  117. vf->vring[i].last_avail_idx = 0;
  118. vf->vring[i].desc = 0;
  119. vf->vring[i].avail = 0;
  120. vf->vring[i].used = 0;
  121. vf->vring[i].ready = 0;
  122. vf->vring[i].cb.callback = NULL;
  123. vf->vring[i].cb.private = NULL;
  124. }
  125. ifcvf_reset(vf);
  126. }
  127. static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
  128. {
  129. return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
  130. }
  131. static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
  132. {
  133. struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
  134. return &adapter->vf;
  135. }
  136. static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
  137. {
  138. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  139. u64 features;
  140. features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES;
  141. return features;
  142. }
  143. static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
  144. {
  145. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  146. vf->req_features = features;
  147. return 0;
  148. }
  149. static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
  150. {
  151. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  152. return ifcvf_get_status(vf);
  153. }
  154. static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
  155. {
  156. struct ifcvf_adapter *adapter;
  157. struct ifcvf_hw *vf;
  158. u8 status_old;
  159. int ret;
  160. vf = vdpa_to_vf(vdpa_dev);
  161. adapter = dev_get_drvdata(vdpa_dev->dev.parent);
  162. status_old = ifcvf_get_status(vf);
  163. if (status_old == status)
  164. return;
  165. if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
  166. !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
  167. ifcvf_stop_datapath(adapter);
  168. ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
  169. }
  170. if (status == 0) {
  171. ifcvf_reset_vring(adapter);
  172. return;
  173. }
  174. if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
  175. !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
  176. ret = ifcvf_request_irq(adapter);
  177. if (ret) {
  178. status = ifcvf_get_status(vf);
  179. status |= VIRTIO_CONFIG_S_FAILED;
  180. ifcvf_set_status(vf, status);
  181. return;
  182. }
  183. if (ifcvf_start_datapath(adapter) < 0)
  184. IFCVF_ERR(adapter->pdev,
  185. "Failed to set ifcvf vdpa status %u\n",
  186. status);
  187. }
  188. ifcvf_set_status(vf, status);
  189. }
  190. static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
  191. {
  192. return IFCVF_QUEUE_MAX;
  193. }
  194. static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
  195. struct vdpa_vq_state *state)
  196. {
  197. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  198. state->avail_index = ifcvf_get_vq_state(vf, qid);
  199. return 0;
  200. }
  201. static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
  202. const struct vdpa_vq_state *state)
  203. {
  204. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  205. return ifcvf_set_vq_state(vf, qid, state->avail_index);
  206. }
  207. static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
  208. struct vdpa_callback *cb)
  209. {
  210. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  211. vf->vring[qid].cb = *cb;
  212. }
  213. static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
  214. u16 qid, bool ready)
  215. {
  216. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  217. vf->vring[qid].ready = ready;
  218. }
  219. static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
  220. {
  221. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  222. return vf->vring[qid].ready;
  223. }
  224. static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
  225. u32 num)
  226. {
  227. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  228. vf->vring[qid].size = num;
  229. }
  230. static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
  231. u64 desc_area, u64 driver_area,
  232. u64 device_area)
  233. {
  234. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  235. vf->vring[qid].desc = desc_area;
  236. vf->vring[qid].avail = driver_area;
  237. vf->vring[qid].used = device_area;
  238. return 0;
  239. }
  240. static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
  241. {
  242. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  243. ifcvf_notify_queue(vf, qid);
  244. }
  245. static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
  246. {
  247. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  248. return ioread8(&vf->common_cfg->config_generation);
  249. }
  250. static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
  251. {
  252. return VIRTIO_ID_NET;
  253. }
  254. static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
  255. {
  256. return IFCVF_SUBSYS_VENDOR_ID;
  257. }
  258. static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
  259. {
  260. return IFCVF_QUEUE_ALIGNMENT;
  261. }
  262. static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
  263. unsigned int offset,
  264. void *buf, unsigned int len)
  265. {
  266. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  267. WARN_ON(offset + len > sizeof(struct virtio_net_config));
  268. ifcvf_read_net_config(vf, offset, buf, len);
  269. }
  270. static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
  271. unsigned int offset, const void *buf,
  272. unsigned int len)
  273. {
  274. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  275. WARN_ON(offset + len > sizeof(struct virtio_net_config));
  276. ifcvf_write_net_config(vf, offset, buf, len);
  277. }
  278. static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
  279. struct vdpa_callback *cb)
  280. {
  281. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  282. vf->config_cb.callback = cb->callback;
  283. vf->config_cb.private = cb->private;
  284. }
  285. static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
  286. u16 qid)
  287. {
  288. struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
  289. return vf->vring[qid].irq;
  290. }
  291. /*
  292. * IFCVF currently does't have on-chip IOMMU, so not
  293. * implemented set_map()/dma_map()/dma_unmap()
  294. */
  295. static const struct vdpa_config_ops ifc_vdpa_ops = {
  296. .get_features = ifcvf_vdpa_get_features,
  297. .set_features = ifcvf_vdpa_set_features,
  298. .get_status = ifcvf_vdpa_get_status,
  299. .set_status = ifcvf_vdpa_set_status,
  300. .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
  301. .get_vq_state = ifcvf_vdpa_get_vq_state,
  302. .set_vq_state = ifcvf_vdpa_set_vq_state,
  303. .set_vq_cb = ifcvf_vdpa_set_vq_cb,
  304. .set_vq_ready = ifcvf_vdpa_set_vq_ready,
  305. .get_vq_ready = ifcvf_vdpa_get_vq_ready,
  306. .set_vq_num = ifcvf_vdpa_set_vq_num,
  307. .set_vq_address = ifcvf_vdpa_set_vq_address,
  308. .get_vq_irq = ifcvf_vdpa_get_vq_irq,
  309. .kick_vq = ifcvf_vdpa_kick_vq,
  310. .get_generation = ifcvf_vdpa_get_generation,
  311. .get_device_id = ifcvf_vdpa_get_device_id,
  312. .get_vendor_id = ifcvf_vdpa_get_vendor_id,
  313. .get_vq_align = ifcvf_vdpa_get_vq_align,
  314. .get_config = ifcvf_vdpa_get_config,
  315. .set_config = ifcvf_vdpa_set_config,
  316. .set_config_cb = ifcvf_vdpa_set_config_cb,
  317. };
  318. static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  319. {
  320. struct device *dev = &pdev->dev;
  321. struct ifcvf_adapter *adapter;
  322. struct ifcvf_hw *vf;
  323. int ret, i;
  324. ret = pcim_enable_device(pdev);
  325. if (ret) {
  326. IFCVF_ERR(pdev, "Failed to enable device\n");
  327. return ret;
  328. }
  329. ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
  330. IFCVF_DRIVER_NAME);
  331. if (ret) {
  332. IFCVF_ERR(pdev, "Failed to request MMIO region\n");
  333. return ret;
  334. }
  335. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  336. if (ret) {
  337. IFCVF_ERR(pdev, "No usable DMA confiugration\n");
  338. return ret;
  339. }
  340. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  341. if (ret) {
  342. IFCVF_ERR(pdev,
  343. "No usable coherent DMA confiugration\n");
  344. return ret;
  345. }
  346. ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
  347. if (ret) {
  348. IFCVF_ERR(pdev,
  349. "Failed for adding devres for freeing irq vectors\n");
  350. return ret;
  351. }
  352. adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
  353. dev, &ifc_vdpa_ops,
  354. IFCVF_MAX_QUEUE_PAIRS * 2);
  355. if (adapter == NULL) {
  356. IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
  357. return -ENOMEM;
  358. }
  359. pci_set_master(pdev);
  360. pci_set_drvdata(pdev, adapter);
  361. vf = &adapter->vf;
  362. vf->base = pcim_iomap_table(pdev);
  363. adapter->pdev = pdev;
  364. adapter->vdpa.dma_dev = &pdev->dev;
  365. ret = ifcvf_init_hw(vf, pdev);
  366. if (ret) {
  367. IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
  368. goto err;
  369. }
  370. for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
  371. vf->vring[i].irq = -EINVAL;
  372. ret = vdpa_register_device(&adapter->vdpa);
  373. if (ret) {
  374. IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
  375. goto err;
  376. }
  377. return 0;
  378. err:
  379. put_device(&adapter->vdpa.dev);
  380. return ret;
  381. }
  382. static void ifcvf_remove(struct pci_dev *pdev)
  383. {
  384. struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
  385. vdpa_unregister_device(&adapter->vdpa);
  386. }
  387. static struct pci_device_id ifcvf_pci_ids[] = {
  388. { PCI_DEVICE_SUB(IFCVF_VENDOR_ID,
  389. IFCVF_DEVICE_ID,
  390. IFCVF_SUBSYS_VENDOR_ID,
  391. IFCVF_SUBSYS_DEVICE_ID) },
  392. { 0 },
  393. };
  394. MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
  395. static struct pci_driver ifcvf_driver = {
  396. .name = IFCVF_DRIVER_NAME,
  397. .id_table = ifcvf_pci_ids,
  398. .probe = ifcvf_probe,
  399. .remove = ifcvf_remove,
  400. };
  401. module_pci_driver(ifcvf_driver);
  402. MODULE_LICENSE("GPL v2");
  403. MODULE_VERSION(VERSION_STRING);