vha_plat_frost.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. /*!
  2. *****************************************************************************
  3. * Copyright (c) Imagination Technologies Ltd.
  4. *
  5. * The contents of this file are subject to the MIT license as set out below.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. *
  25. * Alternatively, the contents of this file may be used under the terms of the
  26. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  27. * GPL are applicable instead of those above.
  28. *
  29. * If you wish to allow use of your version of this file only under the terms
  30. * of GPL, and not to allow others to use your version of this file under the
  31. * terms of the MIT license, indicate your decision by deleting the provisions
  32. * above and replace them with the notice and other provisions required by GPL
  33. * as set out in the file called "GPLHEADER" included in this distribution. If
  34. * you do not delete the provisions above, a recipient may use your version of
  35. * this file under the terms of either the MIT license or GPL.
  36. *
  37. * This License is also included in this distribution in the file called
  38. * "MIT_COPYING".
  39. *
  40. *****************************************************************************/
  41. #include <linux/delay.h>
  42. #include <linux/interrupt.h>
  43. #include <linux/module.h>
  44. #include <linux/device.h>
  45. #include <linux/gfp.h>
  46. #include <linux/dma-mapping.h>
  47. #include <linux/pci.h>
  48. #include <linux/pm.h>
  49. #include <linux/mod_devicetable.h>
  50. #include <linux/workqueue.h>
  51. #include <linux/version.h>
  52. #include "uapi/version.h"
  53. #include "vha_common.h"
  54. #include "vha_plat.h"
  55. #if defined(CFG_SYS_VAGUS)
  56. #include <hwdefs/nn_sys_cr_vagus.h>
  57. #endif
  58. #if defined(CFG_SYS_VAGUS)
  59. #include <hwdefs/vagus_system.h>
  60. #elif defined(CFG_SYS_AURA)
  61. #include <hwdefs/aura_system.h>
  62. #elif defined(CFG_SYS_MIRAGE)
  63. #include <hwdefs/mirage_system.h>
  64. #elif defined(CFG_SYS_MAGNA)
  65. #include <hwdefs/magna_system.h>
  66. #endif
  67. #define DEVICE_NAME "vha"
  68. #define IS_FROST_DEVICE(devid) ((devid) == PCI_FROST_DEVICE_ID)
  69. /*
  70. * from ICE2 card Frost.Technical Reference Manual.docx
  71. */
  72. #define PCI_FROST_VENDOR_ID (0x1AEE)
  73. #define PCI_FROST_DEVICE_ID (0x1030)
  74. /* Frost - System control register bar */
  75. #define PCI_FROST_SYS_CTRL_REGS_BAR (0)
  76. #define PCI_FROST_SYS_CTRL_BASE_OFFSET (0x0000)
  77. /* props */
  78. #define PCI_FROST_CORE_ID (0x0000)
  79. #define PCI_FROST_CORE_REVISION (0x0004)
  80. #define PCI_FROST_CORE_CHANGE_SET (0x0008)
  81. #define PCI_FROST_CORE_USER_ID (0x000C)
  82. #define PCI_FROST_CORE_USER_BUILD (0x0010)
  83. #define PCI_FROST_CORE_SW_IF_VERSION (0x0014)
  84. #define PCI_FROST_CORE_UC_IF_VERSION (0x0018)
  85. /* Interrupt mode */
  86. #define PCI_FROST_CORE_EMU_INTERRUPT_CTRL (0x0048)
  87. /* Resets */
  88. #define PCI_FROST_CORE_INTERNAL_RESETN (0x0080)
  89. #define PCI_FROST_CORE_EXTERNAL_RESETN (0x0084)
  90. #define PCI_FROST_CORE_INTERNAL_AUTO_RESETN (0x008C)
  91. /* Interrupts */
  92. #define PCI_FROST_CORE_INTERRUPT_STATUS (0x0100)
  93. #define PCI_FROST_CORE_INTERRUPT_ENABLE (0x0104)
  94. #define PCI_FROST_CORE_INTERRUPT_CLR (0x010C)
  95. #define PCI_FROST_CORE_INTERRUPT_TEST (0x0110)
  96. #define PCI_FROST_CORE_INTERRUPT_TIMEOUT_CLR (0x0114)
  97. #define PCI_FROST_CORE_INTERRUPT_TIMEOUT (0x0118)
  98. /* MISC */
  99. #define PCI_FROST_CORE_SYSTEM_ID (0x0120)
  100. /* LEDs! */
  101. #define PCI_FROST_CORE_DASH_LEDS (0x01A8)
  102. /* Core stuff */
  103. #define PCI_FROST_CORE_PCIE_TO_EMU_ADDR_OFFSET (0x0204)
  104. #define PCI_FROST_CORE_EMU_TO_PCIE_ADDR_OFFSET (0x0208)
  105. #define PCI_FROST_CORE_CORE_CONTROL (0x0210)
  106. #define PCI_FROST_CORE_EMU_CLK_CNT (0x0214)
  107. /* Interrupt bits */
  108. #define PCI_FROST_CORE_EMU_INTERRUPT_CTRL_ENABLE (1 << 0)
  109. #define PCI_FROST_CORE_EMU_INTERRUPT_CTRL_SENSE (1 << 1)
  110. /* core bits definitions */
  111. #define INTERNAL_RESET_INTERNAL_RESETN_CMDA (1 << 0)
  112. #define INTERNAL_RESET_INTERNAL_RESETN_GIST (1 << 1)
  113. #define EXTERNAL_RESET_EXTERNAL_RESETN_EMU (1 << 0)
  114. #define INTERNAL_AUTO_RESETN_AUX (1 << 0)
  115. /* interrupt bits definitions */
  116. #define INT_INTERRUPT_MASTER_ENABLE (0) /*(1 << 31) - disabled */
  117. #define INT_INTERRUPT_IRQ_TEST (1 << 30)
  118. #define INT_INTERRUPT_CDMA (1 << 1)
  119. #define INT_INTERRUPT_EMU (1 << 0)
  120. #define INT_TEST_INTERRUPT_TEST (1 << 0)
  121. #define INTERRUPT_MST_TIMEOUT_CLR (1 << 1)
  122. #define INTERRUPT_MST_TIMEOUT (1 << 0)
  123. #define PCI_FROST_CORE_REG_SIZE (0x1000)
  124. /* Frost - Device Under Test (DUT) register bar */
  125. #define PCI_FROST_DUT_REG_BAR (2)
  126. #define PCI_FROST_DUT_MEM_BAR (4)
  127. /* Number of core cycles used to measure the core clock frequency */
  128. #define FREQ_MEASURE_CYCLES 0x7fffff
  129. static unsigned long pci_size;
  130. module_param(pci_size, ulong, 0444);
  131. MODULE_PARM_DESC(pci_size, "physical size in bytes. when 0 (the default), use all memory in the PCI bar");
  132. static unsigned long pci_offset;
  133. module_param(pci_offset, ulong, 0444);
  134. MODULE_PARM_DESC(pci_offset, "offset from PCI bar start. (default: 0)");
  135. static unsigned short pool_alloc_order;
  136. module_param(pool_alloc_order, ushort, 0444);
  137. MODULE_PARM_DESC(pool_alloc_order,
  138. "Carveout pool allocation order, depends on PAGE_SIZE, \
  139. for CPU PAGE_SIZE=4kB, 0-4kB, 1-8kB, 2-16kB, 3-32kB, 4-64kB");
  140. static unsigned long poll_interrupts = 1;
  141. module_param(poll_interrupts, ulong, 0444);
  142. MODULE_PARM_DESC(poll_interrupts, "Poll for interrupts? 0: No, 1: Yes");
  143. static unsigned long irq_poll_delay_us = 10000; /* 10 ms */
  144. module_param(irq_poll_delay_us, ulong, 0444);
  145. MODULE_PARM_DESC(irq_poll_delay_us, "Delay in us between each interrupt poll");
  146. static bool irq_self_test;
  147. module_param(irq_self_test, bool, 0444);
  148. MODULE_PARM_DESC(irq_self_test, "Enable self irq test board feature");
  149. static struct heap_config vha_dev_frost_heap_configs[] = {
  150. /* Primary heap used for internal allocations */
  151. #if CONFIG_GENERIC_ALLOCATOR
  152. {
  153. .type = IMG_MEM_HEAP_TYPE_CARVEOUT,
  154. /* .options.carveout to be filled at run time */
  155. /* .to_dev_addr to be filled at run time */
  156. },
  157. #endif
  158. #if CONFIG_DMA_SHARED_BUFFER
  159. {
  160. .type = IMG_MEM_HEAP_TYPE_DMABUF,
  161. .to_dev_addr = NULL,
  162. .options.dmabuf = {
  163. .use_sg_dma = true,
  164. },
  165. },
  166. #else
  167. #warning "Memory importing not supported!"
  168. #endif
  169. };
  170. static const int vha_dev_frost_heaps = sizeof(vha_dev_frost_heap_configs)/
  171. sizeof(*vha_dev_frost_heap_configs);
  172. static const struct pci_device_id pci_pci_ids[] = {
  173. { PCI_DEVICE(PCI_FROST_VENDOR_ID, PCI_FROST_DEVICE_ID), },
  174. { 0, }
  175. };
  176. MODULE_DEVICE_TABLE(pci, pci_pci_ids);
  177. enum { CORE_REG_BANK = 0,
  178. NNA_REG_BANK, MEM_REG_BANK,
  179. REG_BANK_COUNT /* Must be the last */};
  180. struct imgpci_prvdata {
  181. int irq;
  182. struct {
  183. int bar;
  184. unsigned long addr;
  185. unsigned long size;
  186. void __iomem *km_addr;
  187. } reg_bank[REG_BANK_COUNT];
  188. struct pci_dev *pci_dev;
  189. int irq_poll;
  190. struct delayed_work irq_work;
  191. };
  192. struct img_pci_driver {
  193. struct pci_dev *pci_dev;
  194. struct pci_driver pci_driver;
  195. struct delayed_work irq_work;
  196. };
  197. static int vha_plat_probe(struct pci_dev *pci_dev,
  198. const struct pci_device_id *id);
  199. static void vha_plat_remove(struct pci_dev *dev);
  200. static int vha_plat_suspend(struct device *dev);
  201. static int vha_plat_resume(struct device *dev);
  202. static SIMPLE_DEV_PM_OPS(vha_pm_plat_ops,
  203. vha_plat_suspend, vha_plat_resume);
  204. static ssize_t info_show(struct device_driver *drv, char *buf)
  205. {
  206. return sprintf(buf, "VHA Frost driver version : " VERSION_STRING "\n");
  207. }
  208. static inline uint64_t __readreg64(struct imgpci_prvdata *data,
  209. int bank, unsigned long offset) __maybe_unused;
  210. static inline void __writereg64(struct imgpci_prvdata *data,
  211. int bank, unsigned long offset, uint64_t val) __maybe_unused;
  212. static DRIVER_ATTR_RO(info);
  213. static struct attribute *drv_attrs[] = {
  214. &driver_attr_info.attr,
  215. NULL
  216. };
  217. ATTRIBUTE_GROUPS(drv);
  218. static struct img_pci_driver vha_pci_drv = {
  219. .pci_driver = {
  220. .name = "vha_pci",
  221. .id_table = pci_pci_ids,
  222. .probe = vha_plat_probe,
  223. .remove = vha_plat_remove,
  224. .driver = {
  225. .groups = drv_groups,
  226. .pm = &vha_pm_plat_ops,
  227. }
  228. },
  229. };
  230. static ulong maxmapsizeMB = (sizeof(void *) == 4) ? 400 : 2048;
  231. /**
  232. * __readreg32 - Generic PCI bar read functions
  233. * @data: pointer to the data
  234. * @bank: register bank
  235. * @offset: offset within bank
  236. */
  237. static inline unsigned int __readreg32(struct imgpci_prvdata *data,
  238. int bank, unsigned long offset)
  239. {
  240. void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
  241. offset);
  242. return ioread32(reg);
  243. }
  244. /**
  245. * __writereg32 - Generic PCI bar write functions
  246. * @data: pointer to the data
  247. * @bank: register bank
  248. * @offset: offset within bank
  249. * @val: value to be written
  250. */
  251. static inline void __writereg32(struct imgpci_prvdata *data,
  252. int bank, unsigned long offset, int val)
  253. {
  254. void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr +
  255. offset);
  256. iowrite32(val, reg);
  257. }
  258. /*
  259. * __readreg64 - Generic PCI bar read functions
  260. * @data: pointer to the data
  261. * @bank: register bank
  262. * @offset: offset within bank
  263. */
  264. static inline uint64_t __readreg64(struct imgpci_prvdata *data,
  265. int bank, unsigned long offset)
  266. {
  267. void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr + offset);
  268. return (uint64_t)ioread32(reg) | ((uint64_t)ioread32(reg + 4) << 32);
  269. }
  270. /*
  271. * __writereg64 - Generic PCI bar write functions
  272. * @data: pointer to the data
  273. * @bank: register bank
  274. * @offset: offset within bank
  275. * @val: value to be written
  276. */
  277. static inline void __writereg64(struct imgpci_prvdata *data,
  278. int bank, unsigned long offset, uint64_t val)
  279. {
  280. void __iomem *reg = (void __iomem *)(data->reg_bank[bank].km_addr + offset);
  281. iowrite32(val & 0xFFFFFFFF, reg);
  282. iowrite32(val >> 32, reg + 4);
  283. }
  284. /**
  285. * frost_core_writereg32 - Write to Frost control registers
  286. * @data: pointer to the data
  287. * @offset: offset within bank
  288. * @val: value to be written
  289. */
  290. static inline void frost_core_writereg32(struct imgpci_prvdata *data,
  291. unsigned long offset, int val)
  292. {
  293. __writereg32(data, CORE_REG_BANK, offset, val);
  294. }
  295. /**
  296. * frost_core_readreg32 - Read Frost control registers
  297. * @data: pointer to the data
  298. * @offset: offset within bank
  299. */
  300. static inline unsigned int frost_core_readreg32(struct imgpci_prvdata *data,
  301. unsigned long offset)
  302. {
  303. return __readreg32(data, CORE_REG_BANK, offset);
  304. }
  305. static inline void frost_reset_int(struct imgpci_prvdata *data) {
  306. frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_ENABLE, 0);
  307. frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_CLR, 0xFFFFFFFF);
  308. frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_TIMEOUT, 0xFFFFFFFF);
  309. /* SENSE shall be low, because polarity is reversed */
  310. frost_core_writereg32(data, PCI_FROST_CORE_EMU_INTERRUPT_CTRL,
  311. PCI_FROST_CORE_EMU_INTERRUPT_CTRL_ENABLE);
  312. }
  313. /**
  314. * frost_enable_int - Enable an interrupt
  315. * @data: pointer to the data
  316. * @intmask: interrupt mask
  317. */
  318. static inline void frost_enable_int(struct imgpci_prvdata *data,
  319. uint32_t intmask)
  320. {
  321. uint32_t irq_enabled = frost_core_readreg32(data, PCI_FROST_CORE_INTERRUPT_ENABLE);
  322. frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_ENABLE, irq_enabled | intmask | INT_INTERRUPT_MASTER_ENABLE);
  323. }
  324. /**
  325. * frost_disable_int - Disable an interrupt
  326. * @data: pointer to the data
  327. * @intmask: interrupt mask
  328. */
  329. static inline void frost_disable_int(struct imgpci_prvdata *data,
  330. uint32_t intmask)
  331. {
  332. uint32_t irq_enabled = frost_core_readreg32(data, PCI_FROST_CORE_INTERRUPT_ENABLE);
  333. frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_ENABLE,
  334. irq_enabled & ~intmask);
  335. }
  336. /**
  337. * frost_test_int - Test an interrupt
  338. * @data: pointer to the data
  339. */
  340. static inline void frost_test_int(struct imgpci_prvdata *data) {
  341. frost_enable_int(data, INT_INTERRUPT_IRQ_TEST);
  342. pr_warn("%s: trigger interrupt!\n", __func__);
  343. frost_core_writereg32(data, PCI_FROST_CORE_EMU_INTERRUPT_CTRL,
  344. PCI_FROST_CORE_EMU_INTERRUPT_CTRL_SENSE); /* SENSE shall be high */
  345. frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_TEST, INT_TEST_INTERRUPT_TEST);
  346. }
  347. /**
  348. * reset_dut - Reset the Device Under Test
  349. * @data: pointer to the data
  350. */
  351. static void reset_dut(struct imgpci_prvdata *data)
  352. {
  353. uint32_t internal_rst = frost_core_readreg32(data, PCI_FROST_CORE_INTERNAL_RESETN);
  354. uint32_t external_rst = frost_core_readreg32(data, PCI_FROST_CORE_EXTERNAL_RESETN);
  355. dev_dbg(&data->pci_dev->dev, "going to reset DUT frost!\n");
  356. frost_core_writereg32(data, PCI_FROST_CORE_INTERNAL_RESETN,
  357. internal_rst & ~(INTERNAL_RESET_INTERNAL_RESETN_GIST|
  358. INTERNAL_RESET_INTERNAL_RESETN_CMDA));
  359. frost_core_writereg32(data, PCI_FROST_CORE_EXTERNAL_RESETN,
  360. external_rst & ~(EXTERNAL_RESET_EXTERNAL_RESETN_EMU));
  361. udelay(100); /* arbitrary delays, just in case! */
  362. frost_core_writereg32(data, PCI_FROST_CORE_INTERNAL_RESETN, internal_rst);
  363. frost_core_writereg32(data, PCI_FROST_CORE_EXTERNAL_RESETN, external_rst);
  364. msleep(100);
  365. dev_dbg(&data->pci_dev->dev, "DUT frost reset done!\n");
  366. }
  367. /**
  368. * pci_thread_irq - High latency interrupt handler
  369. * @irq: irq number
  370. * @dev_id: pointer to private data
  371. */
  372. static irqreturn_t frost_thread_irq(int irq, void *dev_id)
  373. {
  374. struct pci_dev *dev = (struct pci_dev *)dev_id;
  375. return vha_handle_thread_irq(&dev->dev);
  376. }
  377. /**
  378. * frost_isr_clear - Clear an interrupt
  379. * @data: pointer to the data
  380. * @intstatus: interrupt status
  381. *
  382. * note: the reason of that function is unclear, it is taken from Apollo/Atlas code that have
  383. * the same interrupt handler as Frost, is it because of a bug?
  384. */
  385. static void frost_isr_clear(struct imgpci_prvdata *data, unsigned int intstatus)
  386. {
  387. unsigned int max_retries = 1000;
  388. while ((frost_core_readreg32(data, PCI_FROST_CORE_INTERRUPT_STATUS) & intstatus) && max_retries--) {
  389. frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_CLR,
  390. (INT_INTERRUPT_MASTER_ENABLE | intstatus));
  391. }
  392. if (!max_retries) {
  393. pr_warn("Can't clear irq ! disabling interrupts!\n");
  394. frost_reset_int(data);
  395. }
  396. }
  397. /**
  398. * pci_isr_cb - Low latency interrupt handler
  399. * @irq: irq number
  400. * @dev_id: pointer to private data
  401. */
  402. static irqreturn_t frost_isr_cb(int irq, void *dev_id)
  403. {
  404. uint32_t intstatus;
  405. struct pci_dev *dev = (struct pci_dev *)dev_id;
  406. struct imgpci_prvdata *data;
  407. irqreturn_t ret = IRQ_NONE;
  408. if (dev_id == NULL) {
  409. /* Spurious interrupt: not yet initialised. */
  410. pr_warn("Spurious interrupt data/dev_id not initialised!\n");
  411. goto exit;
  412. }
  413. data = vha_get_plat_data(&dev->dev);
  414. if (data == NULL) {
  415. /* Spurious interrupt: not yet initialised. */
  416. pr_warn("Invalid driver private data!\n");
  417. goto exit;
  418. }
  419. /* Read interrupt status register */
  420. intstatus = frost_core_readreg32(data, PCI_FROST_CORE_INTERRUPT_STATUS);
  421. /* Clear timeout bit just for sanity */
  422. frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_TIMEOUT_CLR,
  423. INTERRUPT_MST_TIMEOUT_CLR);
  424. if (intstatus & INT_INTERRUPT_IRQ_TEST) {
  425. /* Handle test int */
  426. pr_warn("Test interrupt OK! Switch back to normal mode!\n");
  427. frost_core_writereg32(data, PCI_FROST_CORE_INTERRUPT_TEST, 0);
  428. /* Disable irqs */
  429. frost_reset_int(data);
  430. ret = IRQ_HANDLED;
  431. }
  432. if (intstatus & INT_INTERRUPT_EMU) {
  433. /* call real irq handler */
  434. ret = vha_handle_irq(&dev->dev);
  435. }
  436. if (unlikely(intstatus == 0)) {
  437. /* most likely this is a shared interrupt line */
  438. dev_dbg(&dev->dev,
  439. "%s: unexpected or spurious interrupt [%x] (shared IRQ?)!\n",
  440. __func__, intstatus);
  441. goto exit;
  442. }
  443. /* Ack the ints */
  444. frost_isr_clear(data, intstatus);
  445. exit:
  446. return ret;
  447. }
  448. /* Interrupt polling function */
  449. static void frost_poll_interrupt(struct work_struct *work)
  450. {
  451. struct imgpci_prvdata *data = container_of(work,
  452. struct imgpci_prvdata, irq_work.work);
  453. struct pci_dev *dev = data->pci_dev;
  454. int ret;
  455. if (!data->irq_poll)
  456. return;
  457. preempt_disable();
  458. ret = vha_handle_irq(&dev->dev);
  459. preempt_enable();
  460. if (ret == IRQ_WAKE_THREAD)
  461. vha_handle_thread_irq(&dev->dev);
  462. #if 0
  463. {
  464. uint32_t clk_cnt = frost_core_readreg32(data, PCI_FROST_CORE_EMU_CLK_CNT);
  465. pr_debug("%s: EMU clk_cnt%u\n", __func__, clk_cnt);
  466. }
  467. #endif
  468. /* retrigger */
  469. schedule_delayed_work(&data->irq_work,
  470. usecs_to_jiffies(irq_poll_delay_us));
  471. }
  472. /**
  473. * frost_allocate_registers - Allocate memory for a register (or memory) bank
  474. * @pci_dev: pointer to pci device
  475. * @data: pointer to the data
  476. * @bank: bank to set
  477. * @bar: BAR where the register are
  478. * @base: base address in the BAR
  479. * @size: size of the register set
  480. */
  481. static inline int frost_allocate_registers(struct pci_dev *pci_dev,
  482. struct imgpci_prvdata *data, int bank,
  483. int bar, unsigned long base, unsigned long size)
  484. {
  485. unsigned long bar_size = pci_resource_len(pci_dev, bar);
  486. unsigned long bar_addr = pci_resource_start(pci_dev, bar);
  487. unsigned long bar_max_size = bar_size - base;
  488. BUG_ON((base > bar_size) || ((base+size) > bar_size));
  489. data->reg_bank[bank].bar = bar;
  490. data->reg_bank[bank].addr = bar_addr + base;
  491. data->reg_bank[bank].size = min(size, bar_max_size);
  492. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
  493. data->reg_bank[bank].km_addr = devm_ioremap_nocache(
  494. &pci_dev->dev, data->reg_bank[bank].addr,
  495. data->reg_bank[bank].size);
  496. #else
  497. data->reg_bank[bank].km_addr = devm_ioremap(
  498. &pci_dev->dev, data->reg_bank[bank].addr,
  499. data->reg_bank[bank].size);
  500. #endif
  501. pr_debug("[bank %u] bar:%d addr:0x%lx size:0x%lx km:0x%px\n",
  502. bank, bar, data->reg_bank[bank].addr,
  503. data->reg_bank[bank].size,
  504. data->reg_bank[bank].km_addr);
  505. return data->reg_bank[bank].km_addr == NULL;
  506. }
  507. int vha_plat_deinit(void)
  508. {
  509. struct pci_dev *dev = vha_pci_drv.pci_dev;
  510. int ret;
  511. if (dev) {
  512. struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
  513. if (data) {
  514. if (poll_interrupts) {
  515. data->irq_poll = 0;
  516. cancel_delayed_work_sync(&data->irq_work);
  517. }
  518. /* reset the hardware */
  519. reset_dut(data);
  520. } else {
  521. dev_dbg(&dev->dev,
  522. "%s: prv data not found, HW reset omitted\n",
  523. __func__);
  524. }
  525. } else {
  526. /*pr_debug("%s: dev missing, HW reset omitted\n", __func__);*/
  527. }
  528. /* Unregister the driver from the OS */
  529. pci_unregister_driver(&(vha_pci_drv.pci_driver));
  530. ret = vha_deinit();
  531. if (ret)
  532. pr_err("VHA driver deinit failed\n");
  533. return ret;
  534. }
  535. #define NNA_REG_BAR (PCI_FROST_DUT_REG_BAR)
  536. #ifdef CFG_SYS_VAGUS
  537. #define NNA_REG_SIZE (_REG_SIZE + _REG_NNSYS_SIZE)
  538. #else
  539. #define NNA_REG_SIZE (_REG_SIZE)
  540. #endif
  541. #define NNA_REG_OFFSET (_REG_START)
  542. #ifdef CONFIG_GENERIC_ALLOCATOR
  543. static phys_addr_t carveout_to_dev_addr(union heap_options *options,
  544. phys_addr_t addr)
  545. {
  546. phys_addr_t base = options->carveout.phys;
  547. size_t size = options->carveout.size;
  548. unsigned long offset = options->carveout.offs;
  549. if (addr - offset >= base && addr < base + size - offset)
  550. return addr - base;
  551. pr_err("%s: unexpected addr! base 0x%llx size %zu offs %zu addr 0x%llx\n",
  552. __func__, base, size, offset, addr);
  553. WARN_ON(1);
  554. return addr;
  555. }
  556. static phys_addr_t carveout_to_host_addr(union heap_options *options,
  557. phys_addr_t addr)
  558. {
  559. phys_addr_t base = options->carveout.phys;
  560. size_t size = options->carveout.size;
  561. unsigned long offset = options->carveout.offs;
  562. if (addr < size - offset)
  563. return base + addr;
  564. pr_err("%s: unexpected addr! base %llx size %zu offs %zu addr %#llx\n",
  565. __func__, base, size, offset, addr);
  566. WARN_ON(1);
  567. return addr;
  568. }
  569. static void *carveout_get_kptr(phys_addr_t addr,
  570. size_t size, enum img_mem_attr mattr)
  571. {
  572. /*
  573. * Device memory is I/O memory and as a rule, it cannot
  574. * be dereferenced safely without memory barriers, that
  575. * is why it is guarded by __iomem (return of ioremap)
  576. * and checked by sparse. It is accessed only through
  577. * ioread32(), iowrit32(), etc.
  578. *
  579. * In x86 this memory can be dereferenced and safely
  580. * accessed, i.e. a * __iomem pointer can be casted to
  581. * a regular void* * pointer. We cast this here
  582. * assuming FPGA is x86 and add __force to silence the
  583. * sparse warning
  584. *
  585. * Note: System memory carveout can be used with cached turned on.
  586. * */
  587. void *kptr = NULL;
  588. if (mattr & IMG_MEM_ATTR_UNCACHED)
  589. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
  590. kptr = (void * __force *)ioremap_nocache(addr, size);
  591. #else
  592. kptr = (void * __force *)ioremap(addr, size);
  593. #endif
  594. else if (mattr & IMG_MEM_ATTR_CACHED)
  595. kptr = (void * __force *)ioremap_cache(addr, size);
  596. else if (mattr & IMG_MEM_ATTR_WRITECOMBINE)
  597. kptr = (void * __force *)ioremap_wc(addr, size);
  598. /*pr_debug(
  599. "Mapping %zu bytes into kernel memory (Phys:%08llX, Kptr:%p)\n",
  600. size, addr, kptr);
  601. pr_debug("[%c%c%c]\n",
  602. (mattr & IMG_MEM_ATTR_UNCACHED) ? 'U' : '.',
  603. (mattr & IMG_MEM_ATTR_CACHED) ? 'C' : '.',
  604. (mattr & IMG_MEM_ATTR_WRITECOMBINE) ? 'W' : '.');*/
  605. return kptr;
  606. }
  607. static int carveout_put_kptr(void *addr)
  608. {
  609. /* pr_debug("Unmapping kernel memory (Phys: %p)\n", addr);*/
  610. iounmap(addr);
  611. return 0;
  612. }
  613. #endif
  614. /*
  615. * IO hooks.
  616. * NOTE: using spinlock to avoid
  617. * problems with multi threaded IO access
  618. */
  619. static DEFINE_SPINLOCK(io_irq_lock);
  620. static unsigned long io_irq_flags;
  621. uint64_t vha_plat_read64(void *addr)
  622. {
  623. u64 val;
  624. spin_lock_irqsave(&io_irq_lock, io_irq_flags);
  625. val =(uint64_t)readl((const volatile void __iomem *)addr) |
  626. ((uint64_t)readl((const volatile void __iomem *)addr + 4) << 32);
  627. spin_unlock_irqrestore(&io_irq_lock, io_irq_flags);
  628. return val;
  629. }
  630. void vha_plat_write64(void *addr, uint64_t val)
  631. {
  632. spin_lock_irqsave(&io_irq_lock, io_irq_flags);
  633. writel((uint32_t)(val & 0xffffffff), (volatile void __iomem *)addr);
  634. writel((uint32_t)(val >> 32), (volatile void __iomem *)addr + 4);
  635. spin_unlock_irqrestore(&io_irq_lock, io_irq_flags);
  636. }
  637. static int vha_plat_probe(struct pci_dev *pci_dev,
  638. const struct pci_device_id *id)
  639. {
  640. int ret = 0;
  641. struct imgpci_prvdata *data;
  642. size_t maxmapsize = maxmapsizeMB * 1024 * 1024;
  643. unsigned long vha_base_mem, vha_mem_size;
  644. struct device *dev = &pci_dev->dev;
  645. int heap;
  646. dev_dbg(dev, "probing device, pci_dev: %p\n", dev);
  647. /* Enable the device */
  648. if (pci_enable_device(pci_dev))
  649. goto out_free;
  650. dev_info(dev, "%s dma_get_mask : %#llx\n", __func__, dma_get_mask(dev));
  651. if (dev->dma_mask) {
  652. dev_info(dev, "%s dev->dma_mask : %p : %#llx\n",
  653. __func__, dev->dma_mask, *dev->dma_mask);
  654. } else {
  655. dev_info(dev, "%s mask unset, setting coherent\n", __func__);
  656. dev->dma_mask = &dev->coherent_dma_mask;
  657. }
  658. ret = dma_set_mask(dev, DMA_BIT_MASK(36));
  659. if (ret) {
  660. dev_err(dev, "%s failed to set dma mask\n", __func__);
  661. goto out_disable;
  662. }
  663. dev_info(dev, "%s dma_set_mask %#llx\n", __func__, dma_get_mask(dev));
  664. /* Reserve PCI I/O and memory resources */
  665. if (pci_request_regions(pci_dev, "imgpci"))
  666. goto out_disable;
  667. /* Create a kernel space mapping for each of the bars */
  668. data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
  669. if (!data) {
  670. pr_err("Memory allocation error, aborting.\n");
  671. ret = -ENOMEM;
  672. goto out_release;
  673. }
  674. dev_dbg(dev, "allocated imgpci_prvdata @ %p\n", data);
  675. memset(data, 0, sizeof(*data));
  676. /* Allocate frost core registers */
  677. ret = frost_allocate_registers(pci_dev, data,
  678. CORE_REG_BANK, PCI_FROST_SYS_CTRL_REGS_BAR,
  679. PCI_FROST_SYS_CTRL_BASE_OFFSET,
  680. PCI_FROST_CORE_REG_SIZE);
  681. if (ret) {
  682. dev_err(dev, "Can't allocate memory for frost regs!");
  683. ret = -ENOMEM;
  684. goto out_release;
  685. }
  686. /* Display some infos */
  687. {
  688. uint32_t frost_id = frost_core_readreg32(data, PCI_FROST_CORE_ID);
  689. uint32_t frost_rev = frost_core_readreg32(data, PCI_FROST_CORE_REVISION);
  690. uint32_t frost_cs = frost_core_readreg32(data, PCI_FROST_CORE_CHANGE_SET);
  691. uint32_t frost_ui = frost_core_readreg32(data, PCI_FROST_CORE_USER_ID);
  692. uint32_t frost_ub = frost_core_readreg32(data, PCI_FROST_CORE_USER_BUILD);
  693. uint32_t frost_swif = frost_core_readreg32(data, PCI_FROST_CORE_SW_IF_VERSION);
  694. uint32_t frost_ucif = frost_core_readreg32(data, PCI_FROST_CORE_UC_IF_VERSION);
  695. pr_info("Found Frost board v%d.%d (ID:%X CS:%X UI:%X UB:%X SWIF:%X UCIF:%X)",
  696. (frost_rev >> 16) & 0xFFFF, frost_rev & 0xFFFF,
  697. frost_id, frost_cs, frost_ui, frost_ub, frost_swif, frost_ucif);
  698. }
  699. /* Allocate NNA register space */
  700. ret = frost_allocate_registers(pci_dev, data,
  701. NNA_REG_BANK, NNA_REG_BAR,
  702. NNA_REG_OFFSET,
  703. NNA_REG_SIZE);
  704. if (ret) {
  705. dev_err(dev, "Can't allocate memory for vha regs!");
  706. ret = -ENOMEM;
  707. goto out_release;
  708. }
  709. /* Allocate DUT memory space */
  710. vha_mem_size = pci_resource_len(pci_dev, PCI_FROST_DUT_MEM_BAR);
  711. if (vha_mem_size > maxmapsize)
  712. vha_mem_size = maxmapsize;
  713. vha_base_mem = pci_resource_start(pci_dev, PCI_FROST_DUT_MEM_BAR);
  714. /* change alloc size according to module parameter */
  715. if (pci_size)
  716. vha_mem_size = pci_size;
  717. /* We are not really allocating memory for that reg bank,
  718. * so hand set values here: */
  719. data->reg_bank[MEM_REG_BANK].bar = PCI_FROST_DUT_MEM_BAR;
  720. data->reg_bank[MEM_REG_BANK].addr = vha_base_mem;
  721. data->reg_bank[MEM_REG_BANK].size = vha_mem_size;
  722. pr_debug("[bank %u] bar:%d addr: 0x%lx size: 0x%lx\n",
  723. MEM_REG_BANK, PCI_FROST_DUT_MEM_BAR,
  724. data->reg_bank[MEM_REG_BANK].addr,
  725. data->reg_bank[MEM_REG_BANK].size);
  726. /* Get the IRQ...*/
  727. data->irq = pci_dev->irq;
  728. data->pci_dev = pci_dev;
  729. vha_pci_drv.pci_dev = pci_dev;
  730. reset_dut(data);
  731. for (heap = 0; heap < vha_dev_frost_heaps; heap++) {
  732. struct heap_config *cfg = &vha_dev_frost_heap_configs[heap];
  733. #ifdef CONFIG_GENERIC_ALLOCATOR
  734. if (cfg->type == IMG_MEM_HEAP_TYPE_CARVEOUT) {
  735. cfg->options.carveout.phys =
  736. data->reg_bank[MEM_REG_BANK].addr;
  737. cfg->options.carveout.size =
  738. data->reg_bank[MEM_REG_BANK].size;
  739. cfg->options.carveout.offs = pci_offset;
  740. cfg->to_dev_addr = carveout_to_dev_addr;
  741. cfg->to_host_addr = carveout_to_host_addr;
  742. /* IO memory access callbacks */
  743. cfg->options.carveout.get_kptr = carveout_get_kptr;
  744. cfg->options.carveout.put_kptr = carveout_put_kptr;
  745. /* Allocation order */
  746. cfg->options.carveout.pool_order = pool_alloc_order;
  747. break;
  748. }
  749. #endif
  750. }
  751. ret = vha_add_dev(dev,
  752. vha_dev_frost_heap_configs,
  753. vha_dev_frost_heaps,
  754. data,
  755. data->reg_bank[NNA_REG_BANK].km_addr,
  756. data->reg_bank[NNA_REG_BANK].size);
  757. if (ret) {
  758. dev_err(dev, "failed to initialize driver core!\n");
  759. goto out_deinit;
  760. }
  761. if (!poll_interrupts) {
  762. /* Reset irqs at first */
  763. frost_reset_int(data);
  764. /* Install the ISR callback...*/
  765. ret = devm_request_threaded_irq(dev, data->irq, &frost_isr_cb,
  766. &frost_thread_irq, IRQF_SHARED, DEVICE_NAME,
  767. (void *)pci_dev);
  768. if (ret) {
  769. dev_err(dev, "failed to request irq!\n");
  770. goto out_rm_dev;
  771. }
  772. dev_dbg(dev, "registered irq %d\n", data->irq);
  773. if (irq_self_test) {
  774. /* Trigger Test interrupt */
  775. frost_test_int(data);
  776. /* Give some time to trigger test IRQ */
  777. msleep(10);
  778. } else {
  779. frost_enable_int(data, INT_INTERRUPT_EMU);
  780. }
  781. } else {
  782. INIT_DELAYED_WORK(&data->irq_work, frost_poll_interrupt);
  783. data->irq_poll = 1;
  784. /* Start the interrupt poll */
  785. schedule_delayed_work(&data->irq_work,
  786. usecs_to_jiffies(irq_poll_delay_us));
  787. }
  788. /* Try to calibrate the core if needed */
  789. ret = vha_dev_calibrate(dev, FREQ_MEASURE_CYCLES);
  790. if (ret) {
  791. dev_err(dev, "%s: Failed to start clock calibration!\n", __func__);
  792. goto out_rm_dev;
  793. }
  794. return ret;
  795. out_rm_dev:
  796. vha_rm_dev(dev);
  797. out_deinit:
  798. /* Make sure int are no longer enabled */
  799. frost_disable_int(data, INT_INTERRUPT_EMU);
  800. out_release:
  801. pci_release_regions(pci_dev);
  802. out_disable:
  803. pci_disable_device(pci_dev);
  804. out_free:
  805. return ret;
  806. }
  807. static void vha_plat_remove(struct pci_dev *dev)
  808. {
  809. struct imgpci_prvdata *data = vha_get_plat_data(&dev->dev);
  810. dev_dbg(&dev->dev, "removing device\n");
  811. if (data == NULL) {
  812. dev_err(&dev->dev, "PCI priv data missing!\n");
  813. } else if (!poll_interrupts) {
  814. /*
  815. * We need to disable interrupts for the
  816. * embedded device via the frost interrupt controller...
  817. */
  818. frost_disable_int(data, INT_INTERRUPT_EMU);
  819. /* Unregister int */
  820. devm_free_irq(&dev->dev, data->irq, dev);
  821. }
  822. pci_release_regions(dev);
  823. pci_disable_device(dev);
  824. vha_rm_dev(&dev->dev);
  825. }
  826. #ifdef CONFIG_PM
  827. static int vha_plat_suspend(struct device *dev)
  828. {
  829. return vha_suspend_dev(dev);
  830. }
  831. static int vha_plat_resume(struct device *dev)
  832. {
  833. return vha_resume_dev(dev);
  834. }
  835. #endif
  836. int vha_plat_init(void)
  837. {
  838. int ret;
  839. ret = pci_register_driver(&vha_pci_drv.pci_driver);
  840. if (ret) {
  841. pr_err("failed to register PCI driver!\n");
  842. return ret;
  843. }
  844. /* pci_dev should be set in probe */
  845. if (!vha_pci_drv.pci_dev) {
  846. pr_err("failed to find VHA PCI dev!\n");
  847. pci_unregister_driver(&vha_pci_drv.pci_driver);
  848. return -ENODEV;
  849. }
  850. return 0;
  851. }