vha_plat_orion.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065
  1. /*!
  2. *****************************************************************************
  3. * Copyright (c) Imagination Technologies Ltd.
  4. *
  5. * The contents of this file are subject to the MIT license as set out below.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. *
  25. * Alternatively, the contents of this file may be used under the terms of the
  26. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  27. * GPL are applicable instead of those above.
  28. *
  29. * If you wish to allow use of your version of this file only under the terms
  30. * of GPL, and not to allow others to use your version of this file under the
  31. * terms of the MIT license, indicate your decision by deleting the provisions
  32. * above and replace them with the notice and other provisions required by GPL
  33. * as set out in the file called "GPLHEADER" included in this distribution. If
  34. * you do not delete the provisions above, a recipient may use your version of
  35. * this file under the terms of either the MIT license or GPL.
  36. *
  37. * This License is also included in this distribution in the file called
  38. * "MIT_COPYING".
  39. *
  40. *****************************************************************************/
  41. /*
  42. * Things left to be done at a later point as of 28/02/2019:
  43. *
  44. * - Maybe add code to set the DUT clock
  45. * FIXME: Find a way to get DUT register size from .def files
  46. */
  47. #include <linux/delay.h>
  48. #include <linux/interrupt.h>
  49. #include <linux/module.h>
  50. #include <linux/device.h>
  51. #include <linux/gfp.h>
  52. #include <linux/version.h>
  53. #if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)
  54. #include <linux/dma-mapping.h>
  55. #else
  56. #include <linux/dma-map-ops.h>
  57. #endif
  58. #include <linux/pci.h>
  59. #include <linux/pm.h>
  60. #include <linux/mod_devicetable.h>
  61. #include <linux/workqueue.h>
  62. #include "uapi/version.h"
  63. #include "vha_common.h"
  64. #include "vha_plat.h"
  65. #if defined(CFG_SYS_AURA)
  66. #include <hwdefs/aura_system.h>
  67. #elif defined(CFG_SYS_MIRAGE)
  68. #include <hwdefs/mirage_system.h>
  69. #else
  70. #error System configuration not supported!
  71. #endif
  72. #define DEVICE_NAME "vha"
  73. #define IS_SIRIUS_DEVICE(devid) ((devid) == PCI_SIRIUS_DEVICE_ID)
  74. /*
  75. * from Sirius TRM rev 1.0.3
  76. */
  77. #define PCI_SIRIUS_VENDOR_ID (0x1AEE)
  78. #define PCI_SIRIUS_DEVICE_ID (0x1020)
  79. /* Sirius - System control register bar */
  80. #define PCI_SIRIUS_SYS_CTRL_REGS_BAR (0)
  81. #define PCI_SIRIUS_SYS_CTRL_BASE_OFFSET (0x0000)
  82. /* srs_core */
  83. #define PCI_SIRIUS_SRS_CORE_ID (0x0000)
  84. #define PCI_SIRIUS_SRS_CORE_REVISION (0x0004)
  85. #define PCI_SIRIUS_SRS_CORE_CHANGE_SET (0x0008)
  86. #define PCI_SIRIUS_SRS_CORE_USER_ID (0x000C)
  87. #define PCI_SIRIUS_SRS_CORE_USER_BUILD (0x0010)
  88. #define PCI_SIRIUS_SRS_CORE_SOFT_RESETN (0x0080)
  89. #define PCI_SIRIUS_SRS_CORE_DUT_SOFT_RESETN (0x0084)
  90. #define PCI_SIRIUS_SRS_CORE_SOFT_AUTO_RESETN (0x0088)
  91. #define PCI_SIRIUS_SRS_CORE_CLK_GEN_RESET (0x0090)
  92. #define PCI_SIRIUS_SRS_CORE_NUM_GPIO (0x0180)
  93. #define PCI_SIRIUS_SRS_CORE_GPIO_EN (0x0184)
  94. #define PCI_SIRIUS_SRS_CORE_GPIO (0x0188)
  95. #define PCI_SIRIUS_SRS_CORE_SPI_MASTER_IFACE (0x018C)
  96. #define PCI_SIRIUS_SRS_CORE_SYS_IP_STATUS (0x0200)
  97. #define PCI_SIRIUS_SRS_CORE_CORE_CONTROL (0x020D)
  98. #define PCI_SIRIUS_SRS_CORE_REG_BANK_STATUS (0x0208)
  99. #define PCI_SIRIUS_SRS_CORE_MMCM_LOCK_STATUS (0x020C)
  100. #define PCI_SIRIUS_SRS_CORE_GIST_STATUS (0x0210)
  101. #define PCI_SIRIUS_SRS_CORE_SENSOR_BOARD (0x0214)
  102. /* srs_core bits definitions */
  103. #define DUT_SOFT_RESETN_DUT_SOFT_RESETN_EXTERNAL (1 << 0)
  104. /* srs_clk_blk */
  105. #define PCI_SIRIUS_CLOCK_CTRL_BASE_OFFSET (0x2000)
  106. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_OUT_DIV1 (0x0020)
  107. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_OUT_DIV2 (0x0024)
  108. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_OUT_DIV3 (0x001C)
  109. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_REG_CLK_OUT_DIV1 (0x0028)
  110. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_REG_CLK_OUT_DIV2 (0x002C)
  111. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_MULT1 (0x0050)
  112. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_MULT2 (0x0054)
  113. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_CLK_MULT3 (0x004C)
  114. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_CORE_VLK_IN_DIV (0x0058)
  115. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_OUT_DIV1 (0x0220)
  116. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_OUT_DIV2 (0x0224)
  117. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_OUT_DIV3 (0x021C)
  118. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_MEM_CLK_OUT_DIV1 (0x0228)
  119. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_MEM_CLK_OUT_DIV2 (0x022C)
  120. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_MULT1 (0x0250)
  121. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_MULT2 (0x0254)
  122. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_MULT3 (0x024C)
  123. #define PCI_SIRIUS_SRS_CLK_BLK_DUT_SYS_CLK_IN_DIV (0x0258)
  124. #define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV1 (0x0620)
  125. #define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV2 (0x0624)
  126. #define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_OUT_DIV3 (0x061C)
  127. #define PCI_SIRIUS_SRS_CLK_BLK_PDP_MEM_CLK_OUT_DIV1 (0x0628)
  128. #define PCI_SIRIUS_SRS_CLK_BLK_PDP_MEM_CLK_OUT_DIV2 (0x062C)
  129. #define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_MULT1 (0x0650)
  130. #define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_MULT2 (0x0654)
  131. #define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_MULT3 (0x064C)
  132. #define PCI_SIRIUS_SRS_CLK_BLK_PDP_PIXEL_CLK_IN_DIV (0x0658)
  133. #define PCI_SIRIUS_SRS_REG_SIZE (0x1000)
  134. /* Interrupts are part of CORE */
  135. #define PCI_SIRIUS_CORE_INTERRUPT_STATUS (0x0218)
  136. #define PCI_SIRIUS_CORE_INTERRUPT_ENABLE (0x021C)
  137. #define PCI_SIRIUS_CORE_INTERRUPT_CLR (0x0220)
  138. #define PCI_SIRIUS_CORE_INTERRUPT_TEST (0x0224)
  139. #define PCI_SIRIUS_CORE_INTERRUPT_TIMEOUT_CLR (0x0228)
  140. #define PCI_SIRIUS_CORE_INTERRUPT_TIMEOUT_CLR_CLR (1 << 1)
  141. /* interrupt bits definitions */
  142. #define SIRIUS_INTERRUPT_MASTER_ENABLE (1 << 31)
  143. #define SIRIUS_INTERRUPT_DUT0 (1 << 0)
  144. #define SIRIUS_INTERRUPT_DUT1 (1 << 1)
  145. #define SIRIUS_INTERRUPT_I2C (1 << 2)
  146. #define SIRIUS_INTERRUPT_SPI (1 << 3)
  147. #define SIRIUS_INTERRUPT_PDP (1 << 1)
  148. #define SIRIUS_INTERRUPT_APM (1 << 4)
  149. #define SIRIUS_INTERRUPT_ALL (SIRIUS_INTERRUPT_DUT0 | SIRIUS_INTERRUPT_DUT1 | SIRIUS_INTERRUPT_I2C | \
  150. SIRIUS_INTERRUPT_SPI | SIRIUS_INTERRUPT_PDP | SIRIUS_INTERRUPT_APM)
  151. /* Sirius - Device Under Test (DUT) register bar */
  152. #define PCI_SIRIUS_DUT_REGS_BAR (2)
  153. #define PCI_SIRIUS_DUT_MEM_BAR (4)
  154. /* Number of core cycles used to measure the core clock frequency */
  155. #define FREQ_MEASURE_CYCLES 0x7fffff
  156. /* Parameters applicable when using bus master mode */
  157. static unsigned long contig_phys_start;
  158. module_param(contig_phys_start, ulong, 0444);
  159. MODULE_PARM_DESC(contig_phys_start, "Physical address of start of contiguous region");
  160. static uint32_t contig_size;
  161. module_param(contig_size, uint, 0444);
  162. MODULE_PARM_DESC(contig_size, "Size of contiguous region: takes precedence over any PCI based memory");
  163. static uint32_t fpga_heap_type = IMG_MEM_HEAP_TYPE_UNIFIED;
  164. module_param(fpga_heap_type, uint, 0444);
  165. MODULE_PARM_DESC(fpga_heap_type, "Fpga primary heap type");
  166. static unsigned long pci_size;
  167. module_param(pci_size, ulong, 0444);
  168. MODULE_PARM_DESC(pci_size, "physical size in bytes. when 0 (the default), use all memory in the PCI bar");
  169. static unsigned long pci_offset;
  170. module_param(pci_offset, ulong, 0444);
  171. MODULE_PARM_DESC(pci_offset, "offset from PCI bar start. (default: 0)");
  172. enum pci_irq_type {
  173. IRQ_TYPE_AUTO = 0,
  174. IRQ_TYPE_INTA = 1,
  175. IRQ_TYPE_MSI = 2,
  176. };
  177. static unsigned long pci_irq_type = IRQ_TYPE_AUTO;
  178. module_param(pci_irq_type, ulong, 0444);
  179. MODULE_PARM_DESC(pci_irq_type, "Type of IRQ: 0: Auto, 1: INTA, 2: MSI");
  180. /* Some Orion DUT images include two of them, so we need to allow to select which one to use at load time */
  181. static unsigned long dut_id = 0;
  182. module_param(dut_id, ulong, 0444);
  183. MODULE_PARM_DESC(dut_id, "DUT the driver try to address. valid: {0, 1}, (default: 0)");
  184. /* Maximum DUT_ID allowed */
  185. #define MAX_DUT_ID (1)
  186. static uint32_t sirius_dut_register_offset[] = {
  187. 0x00000000, /* DUT 0 */
  188. 0x20000000, /* DUT 1 */
  189. };
  190. static uint32_t sirius_dut_interrupt_bit[] = {
  191. SIRIUS_INTERRUPT_DUT0, /* DUT 0 */
  192. SIRIUS_INTERRUPT_DUT1, /* DUT 1 */
  193. };
  194. /*
  195. * Special handling (not implemented) is required for the VHA device
  196. * to be able to access both carveout buffers (internal memory) and
  197. * dmabuf buffers (system memory).The latter have to go through
  198. * the system bus to be accessed whereas the former do not.
  199. */
  200. static struct heap_config vha_plat_fpga_heap_configs[] = {
  201. /* Primary heap used for internal allocations */
  202. #ifdef FPGA_BUS_MASTERING
  203. #error Bus mastering not supported
  204. {
  205. .type = -1, /* selected with fpga_heap_type */
  206. .options = {
  207. .unified.gfp_type = GFP_DMA32 | __GFP_ZERO,
  208. .coherent.gfp_flags = GFP_DMA32 | __GFP_ZERO,
  209. },
  210. .to_dev_addr = NULL,
  211. .to_host_addr = NULL,
  212. },
  213. #elif CONFIG_GENERIC_ALLOCATOR
  214. {
  215. .type = IMG_MEM_HEAP_TYPE_CARVEOUT,
  216. /* .options.carveout to be filled at run time */
  217. /* .to_dev_addr to be filled at run time */
  218. /* .to_host_addr to be filled at run time */
  219. .cache_attr = IMG_MEM_ATTR_WRITECOMBINE,
  220. },
  221. #else
  222. #error Neither FPGA_BUS_MASTERING or CONFIG_GENERIC_ALLOCATOR was defined
  223. #endif
  224. /* Secondary heap used for importing an external memory */
  225. #ifdef CONFIG_DMA_SHARED_BUFFER
  226. {
  227. .type = IMG_MEM_HEAP_TYPE_DMABUF,
  228. .to_dev_addr = NULL,
  229. .to_host_addr = NULL,
  230. },
  231. #else
  232. #warning "Memory importing not supported!"
  233. #endif
  234. };
  235. static const int vha_plat_fpga_heaps = sizeof(vha_plat_fpga_heap_configs)/
  236. sizeof(*vha_plat_fpga_heap_configs);
  237. static const struct pci_device_id pci_pci_ids[] = {
  238. { PCI_DEVICE(PCI_SIRIUS_VENDOR_ID, PCI_SIRIUS_DEVICE_ID), },
  239. { 0, }
  240. };
  241. MODULE_DEVICE_TABLE(pci, pci_pci_ids);
  242. enum { SRS_REG_BANK, INTC_REG_BANK, DUT_REG_BANK, DUT_MEM_BANK };
  243. struct imgpci_prvdata {
  244. int irq;
  245. struct {
  246. int bar;
  247. unsigned long addr;
  248. unsigned long size;
  249. void __iomem *km_addr;
  250. } reg_bank[4];
  251. struct pci_dev *pci_dev;
  252. };
  253. struct img_pci_driver {
  254. struct pci_dev *pci_dev;
  255. struct pci_driver pci_driver;
  256. struct delayed_work irq_work;
  257. };
  258. static int vha_plat_probe(struct pci_dev *pci_dev,
  259. const struct pci_device_id *id);
  260. static void vha_plat_remove(struct pci_dev *dev);
  261. static int vha_plat_suspend(struct device *dev);
  262. static int vha_plat_resume(struct device *dev);
  263. static SIMPLE_DEV_PM_OPS(vha_pm_plat_ops,
  264. vha_plat_suspend, vha_plat_resume);
  265. static ssize_t info_show(struct device_driver *drv, char *buf)
  266. {
  267. return sprintf(buf, "VHA Orion driver version : " VERSION_STRING "\n");
  268. }
  269. static DRIVER_ATTR_RO(info);
  270. static struct attribute *drv_attrs[] = {
  271. &driver_attr_info.attr,
  272. NULL
  273. };
  274. ATTRIBUTE_GROUPS(drv);
  275. static struct img_pci_driver vha_pci_drv = {
  276. .pci_driver = {
  277. .name = "vha_orion",
  278. .id_table = pci_pci_ids,
  279. .probe = vha_plat_probe,
  280. .remove = vha_plat_remove,
  281. .driver = {
  282. .groups = drv_groups,
  283. .pm = &vha_pm_plat_ops,
  284. }
  285. },
  286. };
  287. static ulong maxmapsizeMB = (sizeof(void *) == 4) ? 400 : 1024;
  288. /*
  289. * __regreg32 - Generic PCI bar read functions
  290. */
  291. static inline unsigned int __readreg32(struct imgpci_prvdata *data,
  292. int bank, unsigned long offset)
  293. {
  294. void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
  295. offset);
  296. return ioread32(reg);
  297. }
  298. /*
  299. * __writereg32 - Generic PCI bar write functions
  300. */
  301. static inline void __writereg32(struct imgpci_prvdata *data,
  302. int bank, unsigned long offset, int val)
  303. {
  304. void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
  305. offset);
  306. /*pr_err(">>> Writing to bank %d, offset 0x%04X value 0x%08X\n",
  307. * bank, offset, val);*/
  308. iowrite32(val, reg);
  309. }
  310. /*
  311. * sirius_core_writereg32 - Write to Sirius control registers
  312. */
  313. static inline void sirius_core_writereg32(struct imgpci_prvdata *data,
  314. unsigned long offset, int val)
  315. {
  316. __writereg32(data, SRS_REG_BANK, offset, val);
  317. }
  318. /*
  319. * sirius_core_readreg32 - Read Sirius control registers
  320. */
  321. static inline unsigned int sirius_core_readreg32(struct imgpci_prvdata *data,
  322. unsigned long offset)
  323. {
  324. return __readreg32(data, SRS_REG_BANK, offset);
  325. }
  326. /*
  327. * sirius_intc_writereg32 - Write to Sirius control registers
  328. */
  329. static inline void sirius_intc_writereg32(struct imgpci_prvdata *data,
  330. unsigned long offset, int val)
  331. {
  332. __writereg32(data, INTC_REG_BANK, offset, val);
  333. }
  334. /*
  335. * sirius_intc_readreg32 - Read Sirius control registers
  336. */
  337. static inline unsigned int sirius_intc_readreg32(struct imgpci_prvdata *data,
  338. unsigned long offset)
  339. {
  340. return __readreg32(data, INTC_REG_BANK, offset);
  341. }
  342. /*
  343. * reset_dut - Reset the Device Under Test
  344. */
  345. static void reset_dut(struct imgpci_prvdata *data)
  346. {
  347. dev_dbg(&data->pci_dev->dev, "going to reset DUT fpga!\n");
  348. sirius_core_writereg32(data, PCI_SIRIUS_SRS_CORE_DUT_SOFT_RESETN, 0);
  349. udelay(100); /* arbitrary delays, just in case! */
  350. sirius_core_writereg32(data,
  351. PCI_SIRIUS_SRS_CORE_DUT_SOFT_RESETN,
  352. DUT_SOFT_RESETN_DUT_SOFT_RESETN_EXTERNAL);
  353. msleep(500);
  354. dev_dbg(&data->pci_dev->dev, "DUT fpga reset done!\n");
  355. }
  356. /*
  357. * sirius_enable_int - Enable an interrupt
  358. */
  359. static inline void sirius_enable_int(struct imgpci_prvdata *data, uint32_t intmask)
  360. {
  361. uint32_t irq_enabled = sirius_core_readreg32(data, PCI_SIRIUS_CORE_INTERRUPT_ENABLE);
  362. /* Only accept to enable DUT interrupt */
  363. intmask &= sirius_dut_interrupt_bit[dut_id];
  364. sirius_core_writereg32(data, PCI_SIRIUS_CORE_INTERRUPT_ENABLE,
  365. irq_enabled | intmask | SIRIUS_INTERRUPT_MASTER_ENABLE);
  366. }
  367. /*
  368. * sirius_disable_int - Disable an interrupt
  369. */
  370. static inline void sirius_disable_int(struct imgpci_prvdata *data, uint32_t intmask)
  371. {
  372. uint32_t irq_enabled = sirius_core_readreg32(data, PCI_SIRIUS_CORE_INTERRUPT_ENABLE);
  373. /* Only accept to disable DUT interrupt */
  374. intmask &= sirius_dut_interrupt_bit[dut_id];
  375. sirius_core_writereg32(data, PCI_SIRIUS_CORE_INTERRUPT_ENABLE,
  376. irq_enabled & ~intmask);
  377. }
  378. /*
  379. * sirius_read_int_status - Read interrupt status
  380. */
  381. static inline uint32_t sirius_read_int_status(struct imgpci_prvdata *data)
  382. {
  383. return sirius_core_readreg32(data, PCI_SIRIUS_CORE_INTERRUPT_STATUS);
  384. }
  385. /*
  386. * sirius_ack_int - Ack interrupts
  387. */
  388. static inline void sirius_ack_int(struct imgpci_prvdata *data, uint32_t intstatus)
  389. {
  390. unsigned int max_retries = 1000;
  391. while ((sirius_core_readreg32(data, PCI_SIRIUS_CORE_INTERRUPT_STATUS) & intstatus) && max_retries--)
  392. sirius_core_writereg32(data, PCI_SIRIUS_CORE_INTERRUPT_CLR,
  393. (SIRIUS_INTERRUPT_MASTER_ENABLE | intstatus));
  394. /**
  395. * Temporary until FPGA is updated:
  396. * Clear the "timeout" regardless to it's status to prevent some bugs in there
  397. */
  398. sirius_core_writereg32(data, PCI_SIRIUS_CORE_INTERRUPT_TIMEOUT_CLR, PCI_SIRIUS_CORE_INTERRUPT_TIMEOUT_CLR_CLR);
  399. }
  400. /*
  401. * pci_thread_irq - High latency interrupt handler
  402. */
  403. static irqreturn_t pci_thread_irq(int irq, void *dev_id)
  404. {
  405. struct pci_dev *dev = (struct pci_dev *)dev_id;
  406. return vha_handle_thread_irq(&dev->dev);
  407. }
  408. /*
  409. * pci_isr_cb - Low latency interrupt handler
  410. */
  411. static irqreturn_t pci_isr_cb(int irq, void *dev_id)
  412. {
  413. uint32_t intstatus;
  414. struct pci_dev *dev = (struct pci_dev *)dev_id;
  415. struct imgpci_prvdata *data;
  416. irqreturn_t ret = IRQ_NONE;
  417. if (dev_id == NULL) {
  418. /* Spurious interrupt: not yet initialised. */
  419. pr_warn("Spurious interrupt data/dev_id not initialised!\n");
  420. goto exit;
  421. }
  422. data = vha_get_plat_data(&dev->dev);
  423. if (data == NULL) {
  424. /* Spurious interrupt: not yet initialised. */
  425. pr_warn("Invalid driver private data!\n");
  426. goto exit;
  427. }
  428. /* Read interrupt status register */
  429. intstatus = sirius_read_int_status(data);
  430. /* Now handle the ints */
  431. if (intstatus & sirius_dut_interrupt_bit[dut_id]) {
  432. /* call real irq handler */
  433. ret = vha_handle_irq(&dev->dev);
  434. } else {
  435. /* Code made on purpose, on this target, the INT number cannot
  436. * be shared as we are using MSI. So any interrupt which are not
  437. * from the DUT are clearly spurious and unwanted interrupts and
  438. * meaning that one device on Sirius is not properly configured.
  439. */
  440. dev_warn(&dev->dev,
  441. "%s: unexpected or spurious interrupt [%x]!\n",
  442. __func__, intstatus);
  443. WARN_ON(1);
  444. }
  445. /* Ack the ints */
  446. sirius_ack_int(data, intstatus);
  447. exit:
  448. return ret;
  449. }
  450. /**
  451. * sirius_allocate_registers - Allocate memory for a register (or memory) bank
  452. * @pci_dev: the pci device
  453. * @data: pointer to the data
  454. * @bank: bank to set
  455. * @bar: BAR where the register are
  456. * @base: base address in the BAR
  457. * @size: size of the register set
  458. */
  459. static inline int sirius_allocate_registers(struct pci_dev *pci_dev,
  460. struct imgpci_prvdata *data, int bank,
  461. int bar, unsigned long base, unsigned long size)
  462. {
  463. unsigned long bar_size = pci_resource_len(pci_dev, bar);
  464. unsigned long bar_addr = pci_resource_start(pci_dev, bar);
  465. unsigned long bar_max_size = bar_size - base;
  466. BUG_ON((base > bar_size) || ((base+size) > bar_size));
  467. data->reg_bank[bank].bar = bar;
  468. data->reg_bank[bank].addr = bar_addr + base;
  469. data->reg_bank[bank].size = min(size, bar_max_size);
  470. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
  471. data->reg_bank[bank].km_addr = devm_ioremap_nocache(
  472. &pci_dev->dev, data->reg_bank[bank].addr,
  473. data->reg_bank[bank].size);
  474. #else
  475. data->reg_bank[bank].km_addr = devm_ioremap(
  476. &pci_dev->dev, data->reg_bank[bank].addr,
  477. data->reg_bank[bank].size);
  478. #endif
  479. pr_debug("[bank %u] bar:%d addr:%pa size:0x%lx km:0x%p\n",
  480. bank, bar, &data->reg_bank[bank].addr,
  481. data->reg_bank[bank].size,
  482. &data->reg_bank[bank].km_addr);
  483. return data->reg_bank[bank].km_addr == NULL;
  484. }
  485. int vha_plat_deinit(void)
  486. {
  487. struct pci_dev *dev = vha_pci_drv.pci_dev;
  488. int ret;
  489. if (dev) {
  490. struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
  491. if (data) {
  492. /* reset the hardware */
  493. reset_dut(data);
  494. } else {
  495. dev_dbg(&dev->dev,
  496. "%s: prv data not found, HW reset omitted\n",
  497. __func__);
  498. }
  499. } else {
  500. pr_debug("%s: dev missing, HW reset omitted\n",
  501. __func__);
  502. }
  503. /* Unregister the driver from the OS */
  504. pci_unregister_driver(&(vha_pci_drv.pci_driver));
  505. ret = vha_deinit();
  506. if (ret)
  507. pr_err("VHA driver deinit failed\n");
  508. return ret;
  509. }
  510. #define VHA_REGISTERS_START (_REG_START)
  511. #define VHA_REGISTERS_END (_REG_SIZE)
  512. #ifdef CONFIG_GENERIC_ALLOCATOR
  513. static phys_addr_t carveout_to_dev_addr(union heap_options *options,
  514. phys_addr_t addr)
  515. {
  516. phys_addr_t base = options->carveout.phys;
  517. size_t size = options->carveout.size;
  518. unsigned long offset = options->carveout.offs;
  519. if (addr - offset >= base && addr < base + size - offset)
  520. return addr - base;
  521. pr_err("%s: unexpected addr! base 0x%llx size %zu offs %zu addr 0x%llx\n",
  522. __func__, base, size, offset, addr);
  523. WARN_ON(1);
  524. return addr;
  525. }
  526. static phys_addr_t carveout_to_host_addr(union heap_options *options,
  527. phys_addr_t addr)
  528. {
  529. phys_addr_t base = options->carveout.phys;
  530. size_t size = options->carveout.size;
  531. unsigned long offset = options->carveout.offs;
  532. if (addr < size - offset)
  533. return base + addr;
  534. pr_err("%s: unexpected addr! base %llx size %zu offs %zu addr %#llx\n",
  535. __func__, base, size, offset, addr);
  536. WARN_ON(1);
  537. return addr;
  538. }
  539. static void *carveout_get_kptr(phys_addr_t addr,
  540. size_t size, enum img_mem_attr mattr)
  541. {
  542. /*
  543. * Device memory is I/O memory and as a rule, it cannot
  544. * be dereferenced safely without memory barriers, that
  545. * is why it is guarded by __iomem (return of ioremap)
  546. * and checked by sparse. It is accessed only through
  547. * ioread32(), iowrit32(), etc.
  548. *
  549. * In x86 this memory can be dereferenced and safely
  550. * accessed, i.e. a * __iomem pointer can be casted to
  551. * a regular void* * pointer. We cast this here
  552. * assuming FPGA is x86 and add __force to silence the
  553. * sparse warning
  554. *
  555. * Note: System memory carveout can be used with cached turned on.
  556. * */
  557. void *kptr = NULL;
  558. if (mattr & IMG_MEM_ATTR_UNCACHED)
  559. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
  560. kptr = (void * __force *)ioremap_nocache(addr, size);
  561. #else
  562. kptr = (void * __force *)ioremap(addr, size);
  563. #endif
  564. else if (mattr & IMG_MEM_ATTR_CACHED)
  565. kptr = (void * __force *)ioremap_cache(addr, size);
  566. else if (mattr & IMG_MEM_ATTR_WRITECOMBINE)
  567. kptr = (void * __force *)ioremap_wc(addr, size);
  568. pr_debug("Mapping %zu bytes into kernel memory (Phys:%pa, Kptr:%p)\n", size, &addr, &kptr);
  569. pr_debug("[%c%c%c]\n",
  570. (mattr & IMG_MEM_ATTR_UNCACHED) ? 'U' : '.',
  571. (mattr & IMG_MEM_ATTR_CACHED) ? 'C' : '.',
  572. (mattr & IMG_MEM_ATTR_WRITECOMBINE) ? 'W' : '.');
  573. return kptr;
  574. }
  575. static int carveout_put_kptr(void *addr)
  576. {
  577. pr_debug("Unmapping kernel memory (Phys: %p)\n", addr);
  578. iounmap(addr);
  579. return 0;
  580. }
  581. #endif
  582. /*
  583. * IO hooks: We are on a 32bit system so only 32bit access available.
  584. * NOTE: customer may want to use spinlock to avoid
  585. * problems with multi threaded IO access.
  586. *
  587. */
  588. uint64_t vha_plat_read64(void *addr)
  589. {
  590. return (uint64_t)readl(addr) | ((uint64_t)readl(addr + 4) << 32);
  591. }
  592. void vha_plat_write64(void *addr, uint64_t val)
  593. {
  594. writel(val & 0xffffffff, addr);
  595. writel(((uint64_t)val >> 32), addr + 4);
  596. }
  597. static int vha_plat_probe(struct pci_dev *pci_dev,
  598. const struct pci_device_id *id)
  599. {
  600. int ret = 0;
  601. unsigned int int_type;
  602. struct imgpci_prvdata *data;
  603. size_t maxmapsize = maxmapsizeMB * 1024 * 1024;
  604. unsigned long vha_base_mem, vha_mem_size;
  605. struct device *dev = &pci_dev->dev;
  606. int heap;
  607. dev_dbg(dev, "probing device, pci_dev: %p\n", dev);
  608. /* Enable the device */
  609. if (pci_enable_device(pci_dev))
  610. goto out_free;
  611. dev_info(dev, "%s dma_get_mask : %#llx\n",
  612. __func__, dma_get_mask(dev));
  613. if (dev->dma_mask) {
  614. dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
  615. __func__, dev->dma_mask, *dev->dma_mask);
  616. } else {
  617. dev_info(dev, "%s mask unset, setting coherent\n",
  618. __func__);
  619. dev->dma_mask = &dev->coherent_dma_mask;
  620. }
  621. dev_info(dev, "%s dma_set_mask %#llx\n",
  622. __func__, dma_get_mask(dev));
  623. ret = dma_set_mask(dev, dma_get_mask(dev));
  624. if (ret) {
  625. dev_err(dev, "%s failed to set dma mask\n", __func__);
  626. goto out_disable;
  627. }
  628. /* Reserve PCI I/O and memory resources */
  629. if (pci_request_regions(pci_dev, "imgpci"))
  630. goto out_disable;
  631. /* Create a kernel space mapping for each of the bars */
  632. data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
  633. if (!data) {
  634. pr_err("Memory allocation error, aborting.\n");
  635. ret = -ENOMEM;
  636. goto out_release;
  637. }
  638. dev_dbg(dev, "allocated imgpci_prvdata @ %p\n", data);
  639. memset(data, 0, sizeof(*data));
  640. /* Allocate sirius base registers */
  641. ret = sirius_allocate_registers(pci_dev, data,
  642. SRS_REG_BANK, PCI_SIRIUS_SYS_CTRL_REGS_BAR,
  643. PCI_SIRIUS_SYS_CTRL_BASE_OFFSET,
  644. PCI_SIRIUS_SRS_REG_SIZE);
  645. if (ret) {
  646. dev_err(dev, "Can't allocate memory for sirius regs!");
  647. ret = -ENOMEM;
  648. goto out_release;
  649. }
  650. /* FIXME: Check if there is any way to know how many DUTs are on the system */
  651. if (dut_id > MAX_DUT_ID) {
  652. dev_err(dev, "Invalid DUT number (%ld), setting it to 0\n", dut_id);
  653. dut_id = 0;
  654. }
  655. /* Allocate DUT register space */
  656. ret = sirius_allocate_registers(pci_dev, data,
  657. DUT_REG_BANK, PCI_SIRIUS_DUT_REGS_BAR,
  658. VHA_REGISTERS_START + sirius_dut_register_offset[dut_id],
  659. VHA_REGISTERS_END);
  660. if (ret) {
  661. dev_err(dev, "Can't allocate memory for vha regs!");
  662. ret = -ENOMEM;
  663. goto out_release;
  664. }
  665. /* Allocate DUT memory space */
  666. vha_mem_size = pci_resource_len(pci_dev, PCI_SIRIUS_DUT_MEM_BAR);
  667. if (vha_mem_size > maxmapsize)
  668. vha_mem_size = maxmapsize;
  669. vha_base_mem = pci_resource_start(pci_dev, PCI_SIRIUS_DUT_MEM_BAR);
  670. /* change alloc size according to module parameter */
  671. if (pci_size)
  672. vha_mem_size = pci_size;
  673. /* We are not really allocating memory for that reg bank,
  674. * so hand set values here: */
  675. data->reg_bank[DUT_MEM_BANK].bar = PCI_SIRIUS_DUT_MEM_BAR;
  676. data->reg_bank[DUT_MEM_BANK].addr = vha_base_mem;
  677. data->reg_bank[DUT_MEM_BANK].size = vha_mem_size;
  678. pr_debug("[bank %u] bar: %d addr: %pa size: 0x%lx\n",
  679. DUT_MEM_BANK, PCI_SIRIUS_DUT_MEM_BAR,
  680. &data->reg_bank[DUT_MEM_BANK].addr,
  681. data->reg_bank[DUT_MEM_BANK].size);
  682. /* Allocate MSI IRQ if any */
  683. switch (pci_irq_type) {
  684. default:
  685. int_type = PCI_IRQ_ALL_TYPES;
  686. break;
  687. case IRQ_TYPE_INTA:
  688. int_type = PCI_IRQ_LEGACY;
  689. break;
  690. case IRQ_TYPE_MSI:
  691. int_type = PCI_IRQ_MSI | PCI_IRQ_MSIX;
  692. break;
  693. }
  694. ret = pci_alloc_irq_vectors(pci_dev, 1, 1, int_type);
  695. if (ret < 0) {
  696. dev_err(dev, "Can't reserve requested interrupt!");
  697. goto out_release;
  698. }
  699. /* Get the proper IRQ */
  700. data->irq = pci_irq_vector(pci_dev, 0);
  701. data->pci_dev = pci_dev;
  702. vha_pci_drv.pci_dev = pci_dev;
  703. reset_dut(data);
  704. /*
  705. * We need to enable interrupts for the embedded device
  706. * via the fpga interrupt controller...
  707. */
  708. sirius_enable_int(data, sirius_dut_interrupt_bit[dut_id]);
  709. #if 0
  710. /* Sirius does not seems to be able to do bus mastering,
  711. * at least there is not configuration for it */
  712. #ifdef FPGA_BUS_MASTERING
  713. dev_dbg(dev, "enabling FPGA bus mastering\n");
  714. sirius_core_writereg32(data, test_ctrl_reg, 0x0);
  715. #else
  716. /* Route to internal RAM - this is reset value */
  717. dev_dbg(dev, "disabling FPGA bus mastering\n");
  718. sirius_core_writereg32(data, test_ctrl_reg, 0x1);
  719. #endif
  720. #endif
  721. /* patch heap config with PCI memory addresses */
  722. for (heap = 0; heap < vha_plat_fpga_heaps; heap++) {
  723. struct heap_config *cfg = &vha_plat_fpga_heap_configs[heap];
  724. #ifdef CONFIG_GENERIC_ALLOCATOR
  725. if (cfg->type == IMG_MEM_HEAP_TYPE_CARVEOUT) {
  726. if (contig_size && contig_phys_start) {
  727. /*
  728. * 2 types of carveout memory are supported:
  729. * memory carved out of the main DDR
  730. * memory region.
  731. * eg: linux boot option memmap=512M$0x5CAFFFFF
  732. * This is configured using module parameters:
  733. * contig_phys_start and size
  734. * DDR populated in the actual PCI card,
  735. * in BAR 4.
  736. * The module parameters take precedence
  737. * over PCI memory.
  738. */
  739. cfg->options.carveout.phys = contig_phys_start;
  740. cfg->options.carveout.size = contig_size;
  741. cfg->to_dev_addr = NULL;
  742. cfg->to_host_addr = NULL;
  743. dev_info(dev, "using %dMB CARVEOUT at %pa\n",
  744. contig_size/1024/1024,
  745. &contig_phys_start);
  746. } else {
  747. cfg->options.carveout.phys =
  748. data->reg_bank[DUT_MEM_BANK].addr;
  749. cfg->options.carveout.size =
  750. data->reg_bank[DUT_MEM_BANK].size;
  751. cfg->options.carveout.offs = pci_offset;
  752. cfg->to_dev_addr = carveout_to_dev_addr;
  753. cfg->to_host_addr = carveout_to_host_addr;
  754. dev_info(dev, "using %zuMB CARVEOUT from PCI at %pa\n",
  755. cfg->options.carveout.size/1024/1024,
  756. &cfg->options.carveout.phys);
  757. }
  758. /* IO memory access callbacks */
  759. cfg->options.carveout.get_kptr = carveout_get_kptr;
  760. cfg->options.carveout.put_kptr = carveout_put_kptr;
  761. break;
  762. }
  763. #endif
  764. if (cfg->type == IMG_MEM_HEAP_TYPE_COHERENT) {
  765. ret = dma_declare_coherent_memory(dev,
  766. contig_phys_start,
  767. contig_phys_start,
  768. contig_size
  769. #if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)
  770. #if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
  771. , DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE
  772. #else
  773. , DMA_MEMORY_EXCLUSIVE
  774. #endif
  775. #endif
  776. );
  777. if (ret == 0) {
  778. dev_err(dev, "failed to initialize coherent memory!\n");
  779. /* We will fallback to the default pool anyway
  780. goto out_release; */
  781. }
  782. break;
  783. }
  784. }
  785. ret = vha_add_dev(dev, vha_plat_fpga_heap_configs,
  786. vha_plat_fpga_heaps, data,
  787. data->reg_bank[DUT_REG_BANK].km_addr,
  788. data->reg_bank[DUT_REG_BANK].size);
  789. if (ret) {
  790. dev_err(dev, "failed to initialize driver core!\n");
  791. goto out_heap_deinit;
  792. }
  793. /*
  794. * Reset FPGA DUT only after disabling clocks in
  795. * vha_add_dev()-> get properties.
  796. * This workaround is required to ensure that
  797. * clocks (on daughter board) are enabled for test slave scripts to
  798. * read FPGA build version register.
  799. * NOTE: Asserting other bits like DDR reset bit cause problems
  800. * with bus mastering feature, thus results in memory failures.
  801. */
  802. reset_dut(data);
  803. /* Install the ISR callback...*/
  804. ret = devm_request_threaded_irq(dev,
  805. data->irq, &pci_isr_cb,
  806. &pci_thread_irq, IRQF_SHARED,
  807. DEVICE_NAME, (void *)pci_dev);
  808. if (ret) {
  809. dev_err(dev, "failed to request irq!\n");
  810. goto out_rm_dev;
  811. }
  812. dev_dbg(dev, "registered irq %d\n", data->irq);
  813. /* Try to calibrate the core if needed */
  814. ret = vha_dev_calibrate(dev, FREQ_MEASURE_CYCLES);
  815. if (ret) {
  816. dev_err(dev, "%s: Failed to start clock calibration!\n", __func__);
  817. goto out_rm_dev;
  818. }
  819. return ret;
  820. out_rm_dev:
  821. vha_rm_dev(dev);
  822. out_heap_deinit:
  823. #if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
  824. /* Release any declared mem regions */
  825. dma_release_declared_memory(dev);
  826. #endif
  827. /* Make sure int are no longer enabled */
  828. sirius_disable_int(data, sirius_dut_interrupt_bit[dut_id]);
  829. out_release:
  830. pci_release_regions(pci_dev);
  831. out_disable:
  832. pci_disable_device(pci_dev);
  833. out_free:
  834. return ret;
  835. }
  836. static void vha_plat_remove(struct pci_dev *dev)
  837. {
  838. struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
  839. dev_dbg(&dev->dev, "removing device\n");
  840. if (data == NULL) {
  841. dev_err(&dev->dev, "PCI priv data missing!\n");
  842. } else {
  843. /*
  844. * We need to disable interrupts for the
  845. * embedded device via the fpga interrupt controller...
  846. */
  847. sirius_disable_int(data, sirius_dut_interrupt_bit[dut_id]);
  848. /* Unregister int */
  849. devm_free_irq(&dev->dev, data->irq, dev);
  850. pci_free_irq_vectors(dev);
  851. #if 0
  852. #ifdef FPGA_BUS_MASTERING
  853. /* Route to internal RAM - this is reset value */
  854. dev_dbg(&dev->dev, "disabling FPGA bus mastering\n");
  855. sirius_core_writereg32(data, PCI_SIRIUS_SYS_CTRL_REGS_BAR,
  856. test_ctrl_reg, 0x1);
  857. #endif
  858. #endif
  859. }
  860. #if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
  861. /* Release any declared mem regions */
  862. dma_release_declared_memory(&dev->dev);
  863. #endif
  864. pci_release_regions(dev);
  865. pci_disable_device(dev);
  866. vha_rm_dev(&dev->dev);
  867. }
  868. #ifdef CONFIG_PM
  869. static int vha_plat_suspend(struct device *dev)
  870. {
  871. return vha_suspend_dev(dev);
  872. }
  873. static int vha_plat_resume(struct device *dev)
  874. {
  875. return vha_resume_dev(dev);
  876. }
  877. #endif
  878. int vha_plat_init(void)
  879. {
  880. int ret;
  881. #if 0
  882. #ifdef FPGA_BUS_MASTERING
  883. vha_plat_fpga_heap_configs[0].type = fpga_heap_type;
  884. #endif
  885. #endif
  886. ret = pci_register_driver(&vha_pci_drv.pci_driver);
  887. if (ret) {
  888. pr_err("failed to register PCI driver!\n");
  889. return ret;
  890. }
  891. /* pci_dev should be set in probe */
  892. if (!vha_pci_drv.pci_dev) {
  893. pr_err("failed to find VHA PCI dev!\n");
  894. pci_unregister_driver(&vha_pci_drv.pci_driver);
  895. return -ENODEV;
  896. }
  897. return 0;
  898. }