vphb.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright 2014 IBM Corp.
  4. */
  5. #include <linux/pci.h>
  6. #include <misc/cxl.h>
  7. #include "cxl.h"
  8. static int cxl_pci_probe_mode(struct pci_bus *bus)
  9. {
  10. return PCI_PROBE_NORMAL;
  11. }
  12. static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  13. {
  14. return -ENODEV;
  15. }
  16. static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
  17. {
  18. /*
  19. * MSI should never be set but need still need to provide this call
  20. * back.
  21. */
  22. }
  23. static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
  24. {
  25. struct pci_controller *phb;
  26. struct cxl_afu *afu;
  27. struct cxl_context *ctx;
  28. phb = pci_bus_to_host(dev->bus);
  29. afu = (struct cxl_afu *)phb->private_data;
  30. if (!cxl_ops->link_ok(afu->adapter, afu)) {
  31. dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
  32. return false;
  33. }
  34. dev->dev.archdata.dma_offset = PAGE_OFFSET;
  35. /*
  36. * Allocate a context to do cxl things too. If we eventually do real
  37. * DMA ops, we'll need a default context to attach them to
  38. */
  39. ctx = cxl_dev_context_init(dev);
  40. if (IS_ERR(ctx))
  41. return false;
  42. dev->dev.archdata.cxl_ctx = ctx;
  43. return (cxl_ops->afu_check_and_enable(afu) == 0);
  44. }
  45. static void cxl_pci_disable_device(struct pci_dev *dev)
  46. {
  47. struct cxl_context *ctx = cxl_get_context(dev);
  48. if (ctx) {
  49. if (ctx->status == STARTED) {
  50. dev_err(&dev->dev, "Default context started\n");
  51. return;
  52. }
  53. dev->dev.archdata.cxl_ctx = NULL;
  54. cxl_release_context(ctx);
  55. }
  56. }
  57. static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
  58. unsigned long type)
  59. {
  60. return 1;
  61. }
  62. static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
  63. {
  64. /* Should we do an AFU reset here ? */
  65. }
  66. static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
  67. {
  68. return (bus << 8) + devfn;
  69. }
  70. static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
  71. {
  72. struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
  73. return phb ? phb->private_data : NULL;
  74. }
  75. static void cxl_afu_configured_put(struct cxl_afu *afu)
  76. {
  77. atomic_dec_if_positive(&afu->configured_state);
  78. }
  79. static bool cxl_afu_configured_get(struct cxl_afu *afu)
  80. {
  81. return atomic_inc_unless_negative(&afu->configured_state);
  82. }
  83. static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
  84. struct cxl_afu *afu, int *_record)
  85. {
  86. int record;
  87. record = cxl_pcie_cfg_record(bus->number, devfn);
  88. if (record > afu->crs_num)
  89. return PCIBIOS_DEVICE_NOT_FOUND;
  90. *_record = record;
  91. return 0;
  92. }
  93. static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
  94. int offset, int len, u32 *val)
  95. {
  96. int rc, record;
  97. struct cxl_afu *afu;
  98. u8 val8;
  99. u16 val16;
  100. u32 val32;
  101. afu = pci_bus_to_afu(bus);
  102. /* Grab a reader lock on afu. */
  103. if (afu == NULL || !cxl_afu_configured_get(afu))
  104. return PCIBIOS_DEVICE_NOT_FOUND;
  105. rc = cxl_pcie_config_info(bus, devfn, afu, &record);
  106. if (rc)
  107. goto out;
  108. switch (len) {
  109. case 1:
  110. rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
  111. *val = val8;
  112. break;
  113. case 2:
  114. rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
  115. *val = val16;
  116. break;
  117. case 4:
  118. rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
  119. *val = val32;
  120. break;
  121. default:
  122. WARN_ON(1);
  123. }
  124. out:
  125. cxl_afu_configured_put(afu);
  126. return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
  127. }
  128. static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
  129. int offset, int len, u32 val)
  130. {
  131. int rc, record;
  132. struct cxl_afu *afu;
  133. afu = pci_bus_to_afu(bus);
  134. /* Grab a reader lock on afu. */
  135. if (afu == NULL || !cxl_afu_configured_get(afu))
  136. return PCIBIOS_DEVICE_NOT_FOUND;
  137. rc = cxl_pcie_config_info(bus, devfn, afu, &record);
  138. if (rc)
  139. goto out;
  140. switch (len) {
  141. case 1:
  142. rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
  143. break;
  144. case 2:
  145. rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
  146. break;
  147. case 4:
  148. rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
  149. break;
  150. default:
  151. WARN_ON(1);
  152. }
  153. out:
  154. cxl_afu_configured_put(afu);
  155. return rc ? PCIBIOS_SET_FAILED : 0;
  156. }
  157. static struct pci_ops cxl_pcie_pci_ops =
  158. {
  159. .read = cxl_pcie_read_config,
  160. .write = cxl_pcie_write_config,
  161. };
  162. static struct pci_controller_ops cxl_pci_controller_ops =
  163. {
  164. .probe_mode = cxl_pci_probe_mode,
  165. .enable_device_hook = cxl_pci_enable_device_hook,
  166. .disable_device = cxl_pci_disable_device,
  167. .release_device = cxl_pci_disable_device,
  168. .window_alignment = cxl_pci_window_alignment,
  169. .reset_secondary_bus = cxl_pci_reset_secondary_bus,
  170. .setup_msi_irqs = cxl_setup_msi_irqs,
  171. .teardown_msi_irqs = cxl_teardown_msi_irqs,
  172. };
  173. int cxl_pci_vphb_add(struct cxl_afu *afu)
  174. {
  175. struct pci_controller *phb;
  176. struct device_node *vphb_dn;
  177. struct device *parent;
  178. /*
  179. * If there are no AFU configuration records we won't have anything to
  180. * expose under the vPHB, so skip creating one, returning success since
  181. * this is still a valid case. This will also opt us out of EEH
  182. * handling since we won't have anything special to do if there are no
  183. * kernel drivers attached to the vPHB, and EEH handling is not yet
  184. * supported in the peer model.
  185. */
  186. if (!afu->crs_num)
  187. return 0;
  188. /* The parent device is the adapter. Reuse the device node of
  189. * the adapter.
  190. * We don't seem to care what device node is used for the vPHB,
  191. * but tools such as lsvpd walk up the device parents looking
  192. * for a valid location code, so we might as well show devices
  193. * attached to the adapter as being located on that adapter.
  194. */
  195. parent = afu->adapter->dev.parent;
  196. vphb_dn = parent->of_node;
  197. /* Alloc and setup PHB data structure */
  198. phb = pcibios_alloc_controller(vphb_dn);
  199. if (!phb)
  200. return -ENODEV;
  201. /* Setup parent in sysfs */
  202. phb->parent = parent;
  203. /* Setup the PHB using arch provided callback */
  204. phb->ops = &cxl_pcie_pci_ops;
  205. phb->cfg_addr = NULL;
  206. phb->cfg_data = NULL;
  207. phb->private_data = afu;
  208. phb->controller_ops = cxl_pci_controller_ops;
  209. /* Scan the bus */
  210. pcibios_scan_phb(phb);
  211. if (phb->bus == NULL)
  212. return -ENXIO;
  213. /* Set release hook on root bus */
  214. pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
  215. pcibios_free_controller_deferred,
  216. (void *) phb);
  217. /* Claim resources. This might need some rework as well depending
  218. * whether we are doing probe-only or not, like assigning unassigned
  219. * resources etc...
  220. */
  221. pcibios_claim_one_bus(phb->bus);
  222. /* Add probed PCI devices to the device model */
  223. pci_bus_add_devices(phb->bus);
  224. afu->phb = phb;
  225. return 0;
  226. }
  227. void cxl_pci_vphb_remove(struct cxl_afu *afu)
  228. {
  229. struct pci_controller *phb;
  230. /* If there is no configuration record we won't have one of these */
  231. if (!afu || !afu->phb)
  232. return;
  233. phb = afu->phb;
  234. afu->phb = NULL;
  235. pci_remove_root_bus(phb->bus);
  236. /*
  237. * We don't free phb here - that's handled by
  238. * pcibios_free_controller_deferred()
  239. */
  240. }
  241. bool cxl_pci_is_vphb_device(struct pci_dev *dev)
  242. {
  243. struct pci_controller *phb;
  244. phb = pci_bus_to_host(dev->bus);
  245. return (phb->ops == &cxl_pcie_pci_ops);
  246. }
  247. struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
  248. {
  249. struct pci_controller *phb;
  250. phb = pci_bus_to_host(dev->bus);
  251. return (struct cxl_afu *)phb->private_data;
  252. }
  253. EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
  254. unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
  255. {
  256. return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
  257. }
  258. EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);