ifcvf_base.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Intel IFC VF NIC driver for virtio dataplane offloading
  4. *
  5. * Copyright (C) 2020 Intel Corporation.
  6. *
  7. * Author: Zhu Lingshan <lingshan.zhu@intel.com>
  8. *
  9. */
  10. #include "ifcvf_base.h"
  11. static inline u8 ifc_ioread8(u8 __iomem *addr)
  12. {
  13. return ioread8(addr);
  14. }
  15. static inline u16 ifc_ioread16 (__le16 __iomem *addr)
  16. {
  17. return ioread16(addr);
  18. }
  19. static inline u32 ifc_ioread32(__le32 __iomem *addr)
  20. {
  21. return ioread32(addr);
  22. }
  23. static inline void ifc_iowrite8(u8 value, u8 __iomem *addr)
  24. {
  25. iowrite8(value, addr);
  26. }
  27. static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr)
  28. {
  29. iowrite16(value, addr);
  30. }
  31. static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr)
  32. {
  33. iowrite32(value, addr);
  34. }
  35. static void ifc_iowrite64_twopart(u64 val,
  36. __le32 __iomem *lo, __le32 __iomem *hi)
  37. {
  38. ifc_iowrite32((u32)val, lo);
  39. ifc_iowrite32(val >> 32, hi);
  40. }
  41. struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
  42. {
  43. return container_of(hw, struct ifcvf_adapter, vf);
  44. }
  45. static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
  46. struct virtio_pci_cap *cap)
  47. {
  48. struct ifcvf_adapter *ifcvf;
  49. struct pci_dev *pdev;
  50. u32 length, offset;
  51. u8 bar;
  52. length = le32_to_cpu(cap->length);
  53. offset = le32_to_cpu(cap->offset);
  54. bar = cap->bar;
  55. ifcvf= vf_to_adapter(hw);
  56. pdev = ifcvf->pdev;
  57. if (bar >= IFCVF_PCI_MAX_RESOURCE) {
  58. IFCVF_DBG(pdev,
  59. "Invalid bar number %u to get capabilities\n", bar);
  60. return NULL;
  61. }
  62. if (offset + length > pci_resource_len(pdev, bar)) {
  63. IFCVF_DBG(pdev,
  64. "offset(%u) + len(%u) overflows bar%u's capability\n",
  65. offset, length, bar);
  66. return NULL;
  67. }
  68. return hw->base[bar] + offset;
  69. }
  70. static int ifcvf_read_config_range(struct pci_dev *dev,
  71. uint32_t *val, int size, int where)
  72. {
  73. int ret, i;
  74. for (i = 0; i < size; i += 4) {
  75. ret = pci_read_config_dword(dev, where + i, val + i / 4);
  76. if (ret < 0)
  77. return ret;
  78. }
  79. return 0;
  80. }
  81. int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
  82. {
  83. struct virtio_pci_cap cap;
  84. u16 notify_off;
  85. int ret;
  86. u8 pos;
  87. u32 i;
  88. ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
  89. if (ret < 0) {
  90. IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
  91. return -EIO;
  92. }
  93. while (pos) {
  94. ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
  95. sizeof(cap), pos);
  96. if (ret < 0) {
  97. IFCVF_ERR(pdev,
  98. "Failed to get PCI capability at %x\n", pos);
  99. break;
  100. }
  101. if (cap.cap_vndr != PCI_CAP_ID_VNDR)
  102. goto next;
  103. switch (cap.cfg_type) {
  104. case VIRTIO_PCI_CAP_COMMON_CFG:
  105. hw->common_cfg = get_cap_addr(hw, &cap);
  106. IFCVF_DBG(pdev, "hw->common_cfg = %p\n",
  107. hw->common_cfg);
  108. break;
  109. case VIRTIO_PCI_CAP_NOTIFY_CFG:
  110. pci_read_config_dword(pdev, pos + sizeof(cap),
  111. &hw->notify_off_multiplier);
  112. hw->notify_bar = cap.bar;
  113. hw->notify_base = get_cap_addr(hw, &cap);
  114. IFCVF_DBG(pdev, "hw->notify_base = %p\n",
  115. hw->notify_base);
  116. break;
  117. case VIRTIO_PCI_CAP_ISR_CFG:
  118. hw->isr = get_cap_addr(hw, &cap);
  119. IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
  120. break;
  121. case VIRTIO_PCI_CAP_DEVICE_CFG:
  122. hw->net_cfg = get_cap_addr(hw, &cap);
  123. IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg);
  124. break;
  125. }
  126. next:
  127. pos = cap.cap_next;
  128. }
  129. if (hw->common_cfg == NULL || hw->notify_base == NULL ||
  130. hw->isr == NULL || hw->net_cfg == NULL) {
  131. IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
  132. return -EIO;
  133. }
  134. for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
  135. ifc_iowrite16(i, &hw->common_cfg->queue_select);
  136. notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
  137. hw->vring[i].notify_addr = hw->notify_base +
  138. notify_off * hw->notify_off_multiplier;
  139. }
  140. hw->lm_cfg = hw->base[IFCVF_LM_BAR];
  141. IFCVF_DBG(pdev,
  142. "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
  143. hw->common_cfg, hw->notify_base, hw->isr,
  144. hw->net_cfg, hw->notify_off_multiplier);
  145. return 0;
  146. }
  147. u8 ifcvf_get_status(struct ifcvf_hw *hw)
  148. {
  149. return ifc_ioread8(&hw->common_cfg->device_status);
  150. }
  151. void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
  152. {
  153. ifc_iowrite8(status, &hw->common_cfg->device_status);
  154. }
  155. void ifcvf_reset(struct ifcvf_hw *hw)
  156. {
  157. hw->config_cb.callback = NULL;
  158. hw->config_cb.private = NULL;
  159. ifcvf_set_status(hw, 0);
  160. /* flush set_status, make sure VF is stopped, reset */
  161. ifcvf_get_status(hw);
  162. }
  163. static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
  164. {
  165. if (status != 0)
  166. status |= ifcvf_get_status(hw);
  167. ifcvf_set_status(hw, status);
  168. ifcvf_get_status(hw);
  169. }
  170. u64 ifcvf_get_features(struct ifcvf_hw *hw)
  171. {
  172. struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
  173. u32 features_lo, features_hi;
  174. ifc_iowrite32(0, &cfg->device_feature_select);
  175. features_lo = ifc_ioread32(&cfg->device_feature);
  176. ifc_iowrite32(1, &cfg->device_feature_select);
  177. features_hi = ifc_ioread32(&cfg->device_feature);
  178. return ((u64)features_hi << 32) | features_lo;
  179. }
  180. void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
  181. void *dst, int length)
  182. {
  183. u8 old_gen, new_gen, *p;
  184. int i;
  185. WARN_ON(offset + length > sizeof(struct virtio_net_config));
  186. do {
  187. old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
  188. p = dst;
  189. for (i = 0; i < length; i++)
  190. *p++ = ifc_ioread8(hw->net_cfg + offset + i);
  191. new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
  192. } while (old_gen != new_gen);
  193. }
  194. void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
  195. const void *src, int length)
  196. {
  197. const u8 *p;
  198. int i;
  199. p = src;
  200. WARN_ON(offset + length > sizeof(struct virtio_net_config));
  201. for (i = 0; i < length; i++)
  202. ifc_iowrite8(*p++, hw->net_cfg + offset + i);
  203. }
  204. static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
  205. {
  206. struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
  207. ifc_iowrite32(0, &cfg->guest_feature_select);
  208. ifc_iowrite32((u32)features, &cfg->guest_feature);
  209. ifc_iowrite32(1, &cfg->guest_feature_select);
  210. ifc_iowrite32(features >> 32, &cfg->guest_feature);
  211. }
  212. static int ifcvf_config_features(struct ifcvf_hw *hw)
  213. {
  214. struct ifcvf_adapter *ifcvf;
  215. ifcvf = vf_to_adapter(hw);
  216. ifcvf_set_features(hw, hw->req_features);
  217. ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
  218. if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
  219. IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
  220. return -EIO;
  221. }
  222. return 0;
  223. }
  224. u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
  225. {
  226. struct ifcvf_lm_cfg __iomem *ifcvf_lm;
  227. void __iomem *avail_idx_addr;
  228. u16 last_avail_idx;
  229. u32 q_pair_id;
  230. ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
  231. q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
  232. avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
  233. last_avail_idx = ifc_ioread16(avail_idx_addr);
  234. return last_avail_idx;
  235. }
  236. int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
  237. {
  238. struct ifcvf_lm_cfg __iomem *ifcvf_lm;
  239. void __iomem *avail_idx_addr;
  240. u32 q_pair_id;
  241. ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
  242. q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
  243. avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
  244. hw->vring[qid].last_avail_idx = num;
  245. ifc_iowrite16(num, avail_idx_addr);
  246. return 0;
  247. }
  248. static int ifcvf_hw_enable(struct ifcvf_hw *hw)
  249. {
  250. struct virtio_pci_common_cfg __iomem *cfg;
  251. struct ifcvf_adapter *ifcvf;
  252. u32 i;
  253. ifcvf = vf_to_adapter(hw);
  254. cfg = hw->common_cfg;
  255. ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
  256. if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
  257. IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
  258. return -EINVAL;
  259. }
  260. for (i = 0; i < hw->nr_vring; i++) {
  261. if (!hw->vring[i].ready)
  262. break;
  263. ifc_iowrite16(i, &cfg->queue_select);
  264. ifc_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
  265. &cfg->queue_desc_hi);
  266. ifc_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
  267. &cfg->queue_avail_hi);
  268. ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
  269. &cfg->queue_used_hi);
  270. ifc_iowrite16(hw->vring[i].size, &cfg->queue_size);
  271. ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector);
  272. if (ifc_ioread16(&cfg->queue_msix_vector) ==
  273. VIRTIO_MSI_NO_VECTOR) {
  274. IFCVF_ERR(ifcvf->pdev,
  275. "No msix vector for queue %u\n", i);
  276. return -EINVAL;
  277. }
  278. ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
  279. ifc_iowrite16(1, &cfg->queue_enable);
  280. }
  281. return 0;
  282. }
  283. static void ifcvf_hw_disable(struct ifcvf_hw *hw)
  284. {
  285. struct virtio_pci_common_cfg __iomem *cfg;
  286. u32 i;
  287. cfg = hw->common_cfg;
  288. ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->msix_config);
  289. for (i = 0; i < hw->nr_vring; i++) {
  290. ifc_iowrite16(i, &cfg->queue_select);
  291. ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->queue_msix_vector);
  292. }
  293. ifc_ioread16(&cfg->queue_msix_vector);
  294. }
  295. int ifcvf_start_hw(struct ifcvf_hw *hw)
  296. {
  297. ifcvf_reset(hw);
  298. ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
  299. ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
  300. if (ifcvf_config_features(hw) < 0)
  301. return -EINVAL;
  302. if (ifcvf_hw_enable(hw) < 0)
  303. return -EINVAL;
  304. ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
  305. return 0;
  306. }
  307. void ifcvf_stop_hw(struct ifcvf_hw *hw)
  308. {
  309. ifcvf_hw_disable(hw);
  310. ifcvf_reset(hw);
  311. }
  312. void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
  313. {
  314. ifc_iowrite16(qid, hw->vring[qid].notify_addr);
  315. }