vha_plat_apollo.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862
  1. /*!
  2. *****************************************************************************
  3. * Copyright (c) Imagination Technologies Ltd.
  4. *
  5. * The contents of this file are subject to the MIT license as set out below.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. *
  25. * Alternatively, the contents of this file may be used under the terms of the
  26. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  27. * GPL are applicable instead of those above.
  28. *
  29. * If you wish to allow use of your version of this file only under the terms
  30. * of GPL, and not to allow others to use your version of this file under the
  31. * terms of the MIT license, indicate your decision by deleting the provisions
  32. * above and replace them with the notice and other provisions required by GPL
  33. * as set out in the file called "GPLHEADER" included in this distribution. If
  34. * you do not delete the provisions above, a recipient may use your version of
  35. * this file under the terms of either the MIT license or GPL.
  36. *
  37. * This License is also included in this distribution in the file called
  38. * "MIT_COPYING".
  39. *
  40. *****************************************************************************/
  41. #include <linux/delay.h>
  42. #include <linux/interrupt.h>
  43. #include <linux/module.h>
  44. #include <linux/device.h>
  45. #include <linux/gfp.h>
  46. #include <linux/version.h>
  47. #if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)
  48. #include <linux/dma-mapping.h>
  49. #else
  50. #include <linux/dma-map-ops.h>
  51. #endif
  52. #include <linux/pci.h>
  53. #include <linux/pm.h>
  54. #include <linux/pm_runtime.h>
  55. #include <linux/mod_devicetable.h>
  56. #include "uapi/version.h"
  57. #include "vha_common.h"
  58. #include "vha_plat.h"
  59. #define DEVICE_NAME "vha"
  60. #define IS_APOLLO_DEVICE(devid) ((devid) == PCI_APOLLO_DEVICE_ID)
  61. /*
  62. * from TCF Support FPGA.Technical Reference
  63. * Manual.1.0.92.Internal Atlas GEN.External.doc:
  64. */
  65. /* Altas - System control register bar */
  66. #define PCI_ATLAS_SYS_CTRL_REGS_BAR (0)
  67. /* Altas - System control register offset */
  68. #define PCI_ATLAS_SYS_CTRL_REGS_OFFSET (0x0000)
  69. /* Atlas - Offset of INTERRUPT_STATUS */
  70. /*#define PCI_ATLAS_INTERRUPT_STATUS (0x00E0)*/
  71. /* Atlas - Offset of INTERRUPT_ENABLE */
  72. /*#define PCI_ATLAS_INTERRUPT_ENABLE (0x00F0)*/
  73. /* Atlas - Offset of INTERRUPT_CLEAR */
  74. /*#define PCI_ATLAS_INTERRUPT_CLEAR (0x00F8)*/
  75. /* Atlas - Master interrupt enable */
  76. #define PCI_ATLAS_MASTER_ENABLE (1<<31)
  77. /* Atlas - Device interrupt */
  78. #define PCI_ATLAS_DEVICE_INT (1<<13)
  79. /* Atlas - SCB Logic soft reset */
  80. #define PCI_ATLAS_SCB_RESET (1<<4)
  81. /* Atlas - PDP2 soft reset */
  82. #define PCI_ATLAS_PDP2_RESET (1<<3)
  83. /* Atlas - PDP1 soft reset */
  84. #define PCI_ATLAS_PDP1_RESET (1<<2)
  85. /* Atlas - soft reset the DDR logic */
  86. #define PCI_ATLAS_DDR_RESET (1<<1)
  87. /* Atlas - soft reset the device under test */
  88. #define PCI_ATLAS_DUT_RESET (1<<0)
  89. #define PCI_ATLAS_RESET_REG_OFFSET (0x0080)
  90. #define PCI_ATLAS_RESET_BITS (PCI_ATLAS_DDR_RESET | PCI_ATLAS_DUT_RESET \
  91. | PCI_ATLAS_PDP1_RESET | PCI_ATLAS_PDP2_RESET | \
  92. PCI_ATLAS_SCB_RESET)
  93. /* Apollo - Offset of INTERRUPT_STATUS */
  94. #define PCI_APOLLO_INTERRUPT_STATUS (0x00C8)
  95. /* Apollo - Offset of INTERRUPT_ENABLE */
  96. #define PCI_APOLLO_INTERRUPT_ENABLE (0x00D8)
  97. /* Apollo - Offset of INTERRUPT_CLEAR */
  98. #define PCI_APOLLO_INTERRUPT_CLEAR (0x00E0)
  99. /* Apollo - DCM Logic soft reset */
  100. #define PCI_APOLLO_DCM_RESET (1<<10)
  101. #define PCI_APOLLO_RESET_BITS (PCI_ATLAS_RESET_BITS | PCI_APOLLO_DCM_RESET)
  102. #define PCI_ATLAS_TEST_CTRL (0xb0)
  103. #define PCI_APOLLO_TEST_CTRL (0x98)
  104. #define PCI_ATLAS_VENDOR_ID (0x1010)
  105. #define PCI_ATLAS_DEVICE_ID (0x1CF1)
  106. #define PCI_APOLLO_DEVICE_ID (0x1CF2)
  107. /* Number of core cycles used to measure the core clock frequency */
  108. #define FREQ_MEASURE_CYCLES 0x7fffff
  109. /*#define FPGA_IMAGE_REV_OFFSET (0x604)
  110. #define FPGA_IMAGE_REV_MASK (0xFFFF)*/
  111. /* Parameters applicable when using bus master mode */
  112. static unsigned long contig_phys_start;
  113. module_param(contig_phys_start, ulong, 0444);
  114. MODULE_PARM_DESC(contig_phys_start,
  115. "Physical address of start of contiguous region");
  116. static uint32_t contig_size;
  117. module_param(contig_size, uint, 0444);
  118. MODULE_PARM_DESC(contig_size,
  119. "Size of contiguous region: takes precedence over any PCI based memory");
  120. static uint32_t fpga_heap_type = IMG_MEM_HEAP_TYPE_UNIFIED;
  121. module_param(fpga_heap_type, uint, 0444);
  122. MODULE_PARM_DESC(fpga_heap_type, "Fpga primary heap type");
  123. static unsigned long pci_size;
  124. module_param(pci_size, ulong, 0444);
  125. MODULE_PARM_DESC(pci_size,
  126. "physical size in bytes, when 0 (default), use all memory in the PCI bar");
  127. static unsigned long pci_offset;
  128. module_param(pci_offset, ulong, 0444);
  129. MODULE_PARM_DESC(pci_offset, "offset from PCI bar start. (default: 0)");
  130. static bool mem_static_kptr = true;
  131. module_param(mem_static_kptr, bool, 0444);
  132. MODULE_PARM_DESC(mem_static_kptr,
  133. "Creates static kernel mapping for fpga memory");
  134. /*
  135. * Special handling (not implemented) is required for the VHA device
  136. * to be able to access both carvout buffers (internal memory) and
  137. * dmabuf buffers (system memory).The latter have to go through
  138. * the system bus to be accessed whereas the former do not.
  139. */
  140. static struct heap_config vha_plat_fpga_heap_configs[] = {
  141. /* Primary heap used for internal allocations */
  142. #ifdef FPGA_BUS_MASTERING
  143. {
  144. .type = -1, /* selected with fpga_heap_type */
  145. .options = {
  146. .unified.gfp_type = GFP_DMA32 | __GFP_ZERO,
  147. .unified.max_order = 4,
  148. },
  149. .to_dev_addr = NULL,
  150. .to_host_addr = NULL,
  151. },
  152. #elif CONFIG_GENERIC_ALLOCATOR
  153. {
  154. .type = IMG_MEM_HEAP_TYPE_CARVEOUT,
  155. /* .options.carveout to be filled at run time */
  156. /* .to_dev_addr to be filled at run time */
  157. /* .to_host_addr to be filled at run time */
  158. },
  159. #else
  160. #error Neither FPGA_BUS_MASTERING or CONFIG_GENERIC_ALLOCATOR was defined
  161. #endif
  162. /* Secondary heap used for importing an external memory */
  163. #ifdef FPGA_BUS_MASTERING
  164. {
  165. .type = IMG_MEM_HEAP_TYPE_ANONYMOUS,
  166. },
  167. #endif
  168. #if CONFIG_DMA_SHARED_BUFFER
  169. {
  170. .type = IMG_MEM_HEAP_TYPE_DMABUF,
  171. .to_dev_addr = NULL,
  172. #ifndef FPGA_BUS_MASTERING
  173. .options.dmabuf = {
  174. .use_sg_dma = true,
  175. },
  176. #endif
  177. },
  178. #endif
  179. };
  180. static const int vha_plat_fpga_heaps =
  181. sizeof(vha_plat_fpga_heap_configs)/sizeof(*vha_plat_fpga_heap_configs);
  182. static const struct pci_device_id pci_pci_ids[] = {
  183. { PCI_DEVICE(PCI_ATLAS_VENDOR_ID, PCI_ATLAS_DEVICE_ID), },
  184. { PCI_DEVICE(PCI_ATLAS_VENDOR_ID, PCI_APOLLO_DEVICE_ID), },
  185. { 0, }
  186. };
  187. MODULE_DEVICE_TABLE(pci, pci_pci_ids);
  188. struct imgpci_prvdata {
  189. int irq;
  190. struct {
  191. unsigned long addr;
  192. unsigned long size;
  193. void __iomem *km_addr;
  194. } memmap[3];
  195. struct pci_dev *pci_dev;
  196. };
  197. struct img_pci_driver {
  198. struct pci_dev *pci_dev;
  199. struct pci_driver pci_driver;
  200. };
  201. static int vha_plat_probe(struct pci_dev *pci_dev,
  202. const struct pci_device_id *id);
  203. static void vha_plat_remove(struct pci_dev *dev);
  204. static int vha_plat_suspend(struct device *dev);
  205. static int vha_plat_resume(struct device *dev);
  206. static int vha_plat_runtime_idle(struct device *dev);
  207. static int vha_plat_runtime_suspend(struct device *dev);
  208. static int vha_plat_runtime_resume(struct device *dev);
  209. static struct dev_pm_ops vha_pm_plat_ops = {
  210. #ifdef FPGA_BUS_MASTERING
  211. /* Runtime pm will not work with fpga internal memory
  212. * because pci bus driver suspend is also called,
  213. * which disables core/mem clocks */
  214. SET_RUNTIME_PM_OPS(vha_plat_runtime_suspend,
  215. vha_plat_runtime_resume, vha_plat_runtime_idle)
  216. #endif
  217. SET_SYSTEM_SLEEP_PM_OPS(vha_plat_suspend, vha_plat_resume)
  218. };
  219. static ssize_t info_show(struct device_driver *drv, char *buf)
  220. {
  221. return sprintf(buf, "VHA FPGA driver version : " VERSION_STRING "\n");
  222. }
  223. static DRIVER_ATTR_RO(info);
  224. static struct attribute *drv_attrs[] = {
  225. &driver_attr_info.attr,
  226. NULL
  227. };
  228. ATTRIBUTE_GROUPS(drv);
  229. static struct img_pci_driver vha_pci_drv = {
  230. .pci_driver = {
  231. .name = "vha_pci",
  232. .id_table = pci_pci_ids,
  233. .probe = vha_plat_probe,
  234. .remove = vha_plat_remove,
  235. .driver = {
  236. .pm = &vha_pm_plat_ops,
  237. .groups = drv_groups,
  238. }
  239. },
  240. };
  241. static ulong maxmapsizeMB = (sizeof(void *) == 4) ? 400 : 1024;
  242. static int interrupt_status_reg = -1;
  243. static int interrupt_clear_reg = -1;
  244. static int interrupt_enable_reg = -1;
  245. static int test_ctrl_reg = -1;
  246. static unsigned int fpga_readreg32(struct imgpci_prvdata *data,
  247. int bar, unsigned long offset
  248. )
  249. {
  250. void __iomem *reg =
  251. (void __iomem *)(data->memmap[bar].km_addr + offset);
  252. return ioread32(reg);
  253. }
  254. static void fpga_writereg32(struct imgpci_prvdata *data,
  255. int bar, unsigned long offset, int val)
  256. {
  257. void __iomem *reg =
  258. (void __iomem *)(data->memmap[bar].km_addr + offset);
  259. iowrite32(val, reg);
  260. }
  261. static void reset_fpga(struct pci_dev *dev,
  262. struct imgpci_prvdata *data, unsigned int mask)
  263. {
  264. uint32_t bits = 0;
  265. if (!dev)
  266. return;
  267. bits = PCI_APOLLO_RESET_BITS;
  268. dev_dbg(&dev->dev, "reset fpga!\n");
  269. bits &= mask;
  270. if (bits) {
  271. uint32_t val = fpga_readreg32(data, 0, PCI_ATLAS_RESET_REG_OFFSET);
  272. val &= ~bits;
  273. fpga_writereg32(data, 0, PCI_ATLAS_RESET_REG_OFFSET, val);
  274. udelay(100); /* arbitrary delays, just in case! */
  275. val |= bits;
  276. fpga_writereg32(data, 0, PCI_ATLAS_RESET_REG_OFFSET, val);
  277. /* If not only DUT is reset, add a delay */
  278. if (mask != PCI_ATLAS_DUT_RESET)
  279. msleep(100);
  280. else
  281. udelay(100); /* arbitrary delays, just in case! */
  282. }
  283. dev_dbg(&dev->dev, "reset fpga done!\n");
  284. }
  285. static void fpga_clear_irq(struct imgpci_prvdata *data, unsigned int intstatus)
  286. {
  287. unsigned int max_retries = 1000;
  288. while (fpga_readreg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR,
  289. interrupt_status_reg) && max_retries--)
  290. fpga_writereg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR,
  291. interrupt_clear_reg,
  292. (PCI_ATLAS_MASTER_ENABLE | intstatus));
  293. }
  294. static irqreturn_t pci_thread_irq(int irq, void *dev_id)
  295. {
  296. struct pci_dev *dev = (struct pci_dev *)dev_id;
  297. return vha_handle_thread_irq(&dev->dev);
  298. }
  299. static irqreturn_t pci_isrcb(int irq, void *dev_id)
  300. {
  301. unsigned int intstatus;
  302. struct pci_dev *dev = (struct pci_dev *)dev_id;
  303. struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
  304. irqreturn_t ret = IRQ_NONE;
  305. if (data == NULL || dev_id == NULL) {
  306. /* spurious interrupt: not yet initialised. */
  307. goto exit;
  308. }
  309. intstatus = fpga_readreg32(data,
  310. PCI_ATLAS_SYS_CTRL_REGS_BAR,
  311. interrupt_status_reg);
  312. if (intstatus) {
  313. ret = vha_handle_irq(&dev->dev);
  314. /*
  315. * We need to clear interrupts for the embedded device
  316. * via the fpga interrupt controller...
  317. */
  318. fpga_clear_irq(data, intstatus);
  319. } else {
  320. /* either a spurious interrupt, or, more likely
  321. * a shared interrupt line, which will be handled by another driver
  322. */
  323. goto exit;
  324. }
  325. exit:
  326. return ret;
  327. }
  328. /*
  329. * IO hooks : Address bus for hw registers is 32bit!
  330. */
  331. uint64_t vha_plat_read64(void *addr)
  332. {
  333. return (uint64_t)readl((const volatile void __iomem *)addr) |
  334. ((uint64_t)readl((const volatile void __iomem *)addr + 4) << 32);
  335. }
  336. void vha_plat_write64(void *addr, uint64_t val)
  337. {
  338. writel((uint32_t)(val & 0xffffffff), (volatile void __iomem *)addr);
  339. writel((uint32_t)(val >> 32), (volatile void __iomem *)addr + 4);
  340. }
  341. int vha_plat_deinit(void)
  342. {
  343. struct pci_dev *dev = vha_pci_drv.pci_dev;
  344. int ret;
  345. if (dev) {
  346. struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
  347. if (data) {
  348. /* reset the hardware */
  349. reset_fpga(data->pci_dev, data, ~0);
  350. } else {
  351. dev_dbg(&dev->dev,
  352. "%s: prv data not found, HW reset omitted\n",
  353. __func__);
  354. }
  355. } else {
  356. pr_debug("%s: dev missing, HW reset omitted\n", __func__);
  357. }
  358. /* Unregister the driver from the OS */
  359. pci_unregister_driver(&(vha_pci_drv.pci_driver));
  360. ret = vha_deinit();
  361. if (ret)
  362. pr_err("VHA driver deinit failed\n");
  363. return ret;
  364. }
  365. #ifdef CONFIG_GENERIC_ALLOCATOR
  366. static phys_addr_t carveout_to_dev_addr(union heap_options *options,
  367. phys_addr_t addr)
  368. {
  369. phys_addr_t base = options->carveout.phys;
  370. size_t size = options->carveout.size;
  371. unsigned long offset = options->carveout.offs;
  372. if (addr - offset >= base && addr < base + size - offset)
  373. return addr - base;
  374. pr_err("%s: unexpected addr! base 0x%llx size %zu offs %zu addr 0x%llx\n",
  375. __func__, base, size, offset, addr);
  376. WARN_ON(1);
  377. return addr;
  378. }
  379. static phys_addr_t carveout_to_host_addr(union heap_options *options,
  380. phys_addr_t addr)
  381. {
  382. phys_addr_t base = options->carveout.phys;
  383. size_t size = options->carveout.size;
  384. unsigned long offset = options->carveout.offs;
  385. if (addr < size - offset)
  386. return base + addr;
  387. pr_err("%s: unexpected addr! base %llx size %zu offs %zu addr %#llx\n",
  388. __func__, base, size, offset, addr);
  389. WARN_ON(1);
  390. return addr;
  391. }
  392. static void *carveout_get_kptr(phys_addr_t addr,
  393. size_t size, enum img_mem_attr mattr)
  394. {
  395. /*
  396. * Device memory is I/O memory and as a rule, it cannot
  397. * be dereferenced safely without memory barriers, that
  398. * is why it is guarded by __iomem (return of ioremap)
  399. * and checked by sparse. It is accessed only through
  400. * ioread32(), iowrit32(), etc.
  401. *
  402. * In x86 this memory can be dereferenced and safely
  403. * accessed, i.e. a * __iomem pointer can be casted to
  404. * a regular void* * pointer. We cast this here
  405. * assuming FPGA is x86 and add __force to silence the
  406. * sparse warning
  407. *
  408. * Note: System memory carveout can be used with cached turned on.
  409. * */
  410. void *kptr = NULL;
  411. if (mattr & IMG_MEM_ATTR_UNCACHED)
  412. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
  413. kptr = (void * __force *)ioremap_nocache(addr, size);
  414. #else
  415. kptr = (void * __force *)ioremap(addr, size);
  416. #endif
  417. else if (mattr & IMG_MEM_ATTR_CACHED)
  418. kptr = (void * __force *)ioremap_cache(addr, size);
  419. else if (mattr & IMG_MEM_ATTR_WRITECOMBINE)
  420. kptr = (void * __force *)ioremap_wc(addr, size);
  421. return kptr;
  422. }
  423. static int carveout_put_kptr(void *addr)
  424. {
  425. iounmap((volatile void __iomem *)addr);
  426. return 0;
  427. }
  428. #endif
  429. static int vha_plat_probe(struct pci_dev *pci_dev,
  430. const struct pci_device_id *id)
  431. {
  432. int bar, ret = 0;
  433. struct imgpci_prvdata *data;
  434. size_t maxmapsize = maxmapsizeMB * 1024 * 1024;
  435. struct device *dev = &pci_dev->dev;
  436. int heap;
  437. dev_dbg(dev, "probing device, pci_dev\n");
  438. /* Enable the device */
  439. if (pci_enable_device(pci_dev))
  440. goto out_free;
  441. if (dev->dma_mask) {
  442. dev_info(dev, "%s dev->dma_mask : %#llx\n",
  443. __func__, *dev->dma_mask);
  444. } else {
  445. dev_info(dev, "%s mask unset, setting coherent\n", __func__);
  446. dev->dma_mask = &dev->coherent_dma_mask;
  447. }
  448. dev_info(dev, "%s dma_set_mask %#llx\n", __func__, dma_get_mask(dev));
  449. ret = dma_set_mask(dev, dma_get_mask(dev));
  450. if (ret) {
  451. dev_err(dev, "%s failed to set dma mask\n", __func__);
  452. goto out_disable;
  453. }
  454. /* Reserve PCI I/O and memory resources */
  455. if (pci_request_regions(pci_dev, "imgpci"))
  456. goto out_disable;
  457. /* Create a kernel space mapping for each of the bars */
  458. data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
  459. dev_dbg(dev, "allocated imgpci_prvdata @ %p\n", data);
  460. memset(data, 0, sizeof(*data));
  461. for (bar = 0; bar < 3; bar++) {
  462. data->memmap[bar].size = pci_resource_len(pci_dev, bar);
  463. data->memmap[bar].addr = pci_resource_start(pci_dev, bar);
  464. if (bar == 2) {
  465. if (pci_size)
  466. data->memmap[bar].size = pci_size;
  467. /* ioremap fpga memory only when static mode is used */
  468. if (!mem_static_kptr)
  469. continue;
  470. }
  471. if (data->memmap[bar].size > maxmapsize) {
  472. /*
  473. * We avoid mapping too big regions: we do not need
  474. * such a big amount of memory and some times we do
  475. * not have enough contiguous 'vmallocable' memory.
  476. */
  477. dev_warn(dev, "not mapping all mem for bar %u\n", bar);
  478. data->memmap[bar].size = maxmapsize;
  479. }
  480. data->memmap[bar].km_addr = devm_ioremap(dev,
  481. pci_resource_start(pci_dev, bar),
  482. data->memmap[bar].size);
  483. dev_dbg(dev, "[bar %u] addr: 0x%lx size: 0x%lx km: 0x%p\n",
  484. bar, data->memmap[bar].addr,
  485. data->memmap[bar].size,
  486. data->memmap[bar].km_addr);
  487. }
  488. /* Get the IRQ...*/
  489. data->irq = pci_dev->irq;
  490. data->pci_dev = pci_dev;
  491. vha_pci_drv.pci_dev = pci_dev;
  492. reset_fpga(pci_dev, data, ~0);
  493. interrupt_status_reg = PCI_ATLAS_SYS_CTRL_REGS_OFFSET +
  494. PCI_APOLLO_INTERRUPT_STATUS;
  495. interrupt_clear_reg = PCI_ATLAS_SYS_CTRL_REGS_OFFSET +
  496. PCI_APOLLO_INTERRUPT_CLEAR;
  497. interrupt_enable_reg = PCI_ATLAS_SYS_CTRL_REGS_OFFSET +
  498. PCI_APOLLO_INTERRUPT_ENABLE;
  499. test_ctrl_reg = PCI_ATLAS_SYS_CTRL_REGS_OFFSET +
  500. PCI_APOLLO_TEST_CTRL;
  501. /*
  502. * We need to enable interrupts for the embedded device
  503. * via the fpga interrupt controller...
  504. */
  505. {
  506. unsigned int ena;
  507. ena = fpga_readreg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR,
  508. interrupt_enable_reg);
  509. ena |= PCI_ATLAS_MASTER_ENABLE | PCI_ATLAS_DEVICE_INT;
  510. fpga_writereg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR,
  511. interrupt_enable_reg, ena);
  512. fpga_clear_irq(data, ena);
  513. }
  514. #ifdef FPGA_BUS_MASTERING
  515. dev_dbg(dev, "enabling FPGA bus mastering\n");
  516. fpga_writereg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR, test_ctrl_reg, 0x0);
  517. #else
  518. /* Route to internal RAM - this is reset value */
  519. dev_dbg(dev, "disabling FPGA bus mastering\n");
  520. fpga_writereg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR, test_ctrl_reg, 0x1);
  521. #endif
  522. /* patch heap config with PCI memory addresses */
  523. for (heap = 0; heap < vha_plat_fpga_heaps; heap++) {
  524. struct heap_config *cfg = &vha_plat_fpga_heap_configs[heap];
  525. #ifdef CONFIG_GENERIC_ALLOCATOR
  526. if (cfg->type == IMG_MEM_HEAP_TYPE_CARVEOUT) {
  527. if (contig_size && contig_phys_start) {
  528. /*
  529. * 2 types of carveout memory are supported:
  530. * memory carved out of the main DDR
  531. * memory region.
  532. * eg: linux boot option memmap=512M$0x5CAFFFFF
  533. * This is configured using module parameters:
  534. * contig_phys_start and size
  535. * DDR populated in the actual PCI card,
  536. * in BAR 4.
  537. * The module parameters take precedence
  538. * over PCI memory.
  539. */
  540. cfg->options.carveout.phys = contig_phys_start;
  541. cfg->options.carveout.size = contig_size;
  542. cfg->to_dev_addr = NULL;
  543. cfg->to_host_addr = NULL;
  544. dev_info(dev, "using %dMB CARVEOUT at x%lx\n",
  545. contig_size/1024/1024,
  546. contig_phys_start);
  547. } else {
  548. cfg->options.carveout.phys =
  549. data->memmap[2].addr;
  550. if (mem_static_kptr)
  551. cfg->options.carveout.kptr =
  552. data->memmap[2].km_addr;
  553. cfg->options.carveout.size =
  554. data->memmap[2].size;
  555. cfg->options.carveout.offs = pci_offset;
  556. cfg->to_dev_addr = carveout_to_dev_addr;
  557. cfg->to_host_addr = carveout_to_host_addr;
  558. dev_info(dev, "using %zuMB CARVEOUT from PCI at 0x%llx\n",
  559. cfg->options.carveout.size/1024/1024,
  560. cfg->options.carveout.phys);
  561. }
  562. /* IO memory access callbacks */
  563. if (!mem_static_kptr) {
  564. /* Dynamic kernel memory mapping */
  565. cfg->options.carveout.get_kptr = carveout_get_kptr;
  566. cfg->options.carveout.put_kptr = carveout_put_kptr;
  567. }
  568. break;
  569. }
  570. #endif
  571. if (cfg->type == IMG_MEM_HEAP_TYPE_COHERENT) {
  572. ret = dma_declare_coherent_memory(dev,
  573. contig_phys_start, contig_phys_start,
  574. contig_size
  575. #if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)
  576. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
  577. , DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE
  578. #else
  579. , DMA_MEMORY_EXCLUSIVE
  580. #endif
  581. #endif
  582. );
  583. if (ret == 0) {
  584. dev_err(dev, "failed to initialize coherent memory!\n");
  585. /*
  586. * We will fallback to the default pool anyway
  587. * goto out_release;
  588. */
  589. }
  590. break;
  591. }
  592. }
  593. #ifdef FPGA_BUS_MASTERING
  594. /* Allow the core driver to control pm_runtime */
  595. pm_runtime_allow(dev);
  596. #endif
  597. ret = vha_add_dev(dev, vha_plat_fpga_heap_configs,
  598. vha_plat_fpga_heaps, data,
  599. data->memmap[1].km_addr, data->memmap[1].size);
  600. if (ret) {
  601. dev_err(dev, "failed to initialize driver core!\n");
  602. goto out_heap_deinit;
  603. }
  604. /*
  605. * Reset FPGA DUT only after disabling clocks in
  606. * vha_add_dev()-> get properties.
  607. * This workaround is required to ensure that
  608. * clocks (on daughter board) are enabled for test slave scripts to
  609. * read FPGA build version register.
  610. * NOTE: Asserting other bits like DDR reset bit cause problems
  611. * with bus mastering feature, thus results in memory failures.
  612. */
  613. reset_fpga(pci_dev, data, PCI_ATLAS_DUT_RESET);
  614. {
  615. /*uint32_t fpga_rev = fpga_readreg32(data, 1,
  616. FPGA_IMAGE_REV_OFFSET) & FPGA_IMAGE_REV_MASK;
  617. dev_dbg(dev, "fpga image revision: 0x%x\n", fpga_rev);
  618. if (!fpga_rev || fpga_rev == 0xdead1) {
  619. dev_err(dev, "fpga revision incorrect (0x%x)!\n",
  620. fpga_rev);
  621. goto out_rm_dev;
  622. }*/
  623. }
  624. /* Install the ISR callback...*/
  625. ret = devm_request_threaded_irq(dev, data->irq, &pci_isrcb,
  626. &pci_thread_irq, IRQF_SHARED, DEVICE_NAME,
  627. (void *)pci_dev);
  628. if (ret) {
  629. dev_err(dev, "failed to request irq!\n");
  630. goto out_rm_dev;
  631. }
  632. dev_dbg(dev, "registered irq %d\n", data->irq);
  633. /* Try to calibrate the core if needed */
  634. ret = vha_dev_calibrate(dev, FREQ_MEASURE_CYCLES);
  635. if (ret) {
  636. dev_err(dev, "%s: Failed to start clock calibration!\n", __func__);
  637. goto out_rm_dev;
  638. }
  639. return ret;
  640. out_rm_dev:
  641. vha_rm_dev(dev);
  642. out_heap_deinit:
  643. #if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
  644. /* Release any declared mem regions */
  645. dma_release_declared_memory(dev);
  646. #endif
  647. /*out_release:*/
  648. pci_release_regions(pci_dev);
  649. out_disable:
  650. pci_disable_device(pci_dev);
  651. out_free:
  652. return ret;
  653. }
  654. static void vha_plat_remove(struct pci_dev *dev)
  655. {
  656. struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
  657. dev_dbg(&dev->dev, "removing device\n");
  658. if (data == NULL) {
  659. dev_err(&dev->dev, "PCI priv data missing!\n");
  660. } else {
  661. /*
  662. * We need to disable interrupts for the
  663. * embedded device via the fpga interrupt controller...
  664. */
  665. fpga_writereg32(data, PCI_ATLAS_SYS_CTRL_REGS_BAR,
  666. interrupt_enable_reg, 0x00000000);
  667. #ifdef FPGA_BUS_MASTERING
  668. /* Route to internal RAM - this is reset value */
  669. dev_dbg(&dev->dev, "disabling FPGA bus mastering\n");
  670. fpga_writereg32(data,
  671. PCI_ATLAS_SYS_CTRL_REGS_BAR,
  672. test_ctrl_reg, 0x1);
  673. #endif
  674. }
  675. #if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
  676. /* Release any declared mem regions */
  677. dma_release_declared_memory(&dev->dev);
  678. #endif
  679. pci_release_regions(dev);
  680. pci_disable_device(dev);
  681. vha_rm_dev(&dev->dev);
  682. #ifdef FPGA_BUS_MASTERING
  683. pm_runtime_forbid(&dev->dev);
  684. #endif
  685. }
  686. #ifdef CONFIG_PM
  687. static int vha_plat_suspend(struct device *dev)
  688. {
  689. struct pci_dev *pci_dev = vha_pci_drv.pci_dev;
  690. struct imgpci_prvdata *data = vha_get_plat_data(dev);
  691. int ret;
  692. ret = vha_suspend_dev(dev);
  693. if (ret) {
  694. dev_dbg(dev, "suspend device\n");
  695. reset_fpga(pci_dev, data, PCI_ATLAS_DUT_RESET);
  696. } else
  697. dev_err(dev, "failed to suspend!\n");
  698. return ret;
  699. }
  700. static int vha_plat_resume(struct device *dev)
  701. {
  702. struct pci_dev *pci_dev = vha_pci_drv.pci_dev;
  703. struct imgpci_prvdata *data = vha_get_plat_data(dev);
  704. int ret;
  705. dev_dbg(dev, "resume device\n");
  706. reset_fpga(pci_dev, data, PCI_ATLAS_DUT_RESET);
  707. ret = vha_resume_dev(dev);
  708. if (ret)
  709. dev_err(dev, "failed to resume!\n");
  710. return ret;
  711. }
  712. static int __maybe_unused vha_plat_runtime_idle(struct device *dev)
  713. {
  714. dev_dbg(dev, "%s\n", __func__);
  715. return 0;
  716. }
  717. static int __maybe_unused vha_plat_runtime_suspend(struct device *dev)
  718. {
  719. dev_dbg(dev, "%s\n", __func__);
  720. return 0;
  721. }
  722. static int __maybe_unused vha_plat_runtime_resume(struct device *dev)
  723. {
  724. dev_dbg(dev, "%s\n", __func__);
  725. return 0;
  726. }
  727. #endif
  728. int vha_plat_init(void)
  729. {
  730. int ret;
  731. #ifdef FPGA_BUS_MASTERING
  732. vha_plat_fpga_heap_configs[0].type = fpga_heap_type;
  733. #endif
  734. ret = pci_register_driver(&vha_pci_drv.pci_driver);
  735. if (ret) {
  736. pr_err("failed to register PCI driver!\n");
  737. return ret;
  738. }
  739. /* pci_dev should be set in probe */
  740. if (!vha_pci_drv.pci_dev) {
  741. pr_err("failed to find VHA PCI dev!\n");
  742. pci_unregister_driver(&vha_pci_drv.pci_driver);
  743. return -ENODEV;
  744. }
  745. return 0;
  746. }