zynq-fpga.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2011-2015 Xilinx Inc.
  4. * Copyright (c) 2015, National Instruments Corp.
  5. *
  6. * FPGA Manager Driver for Xilinx Zynq, heavily based on xdevcfg driver
  7. * in their vendor tree.
  8. */
  9. #include <linux/clk.h>
  10. #include <linux/completion.h>
  11. #include <linux/delay.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/fpga/fpga-mgr.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/iopoll.h>
  17. #include <linux/module.h>
  18. #include <linux/mfd/syscon.h>
  19. #include <linux/of_address.h>
  20. #include <linux/of_irq.h>
  21. #include <linux/pm.h>
  22. #include <linux/regmap.h>
  23. #include <linux/string.h>
  24. #include <linux/scatterlist.h>
  25. /* Offsets into SLCR regmap */
  26. /* FPGA Software Reset Control */
  27. #define SLCR_FPGA_RST_CTRL_OFFSET 0x240
  28. /* Level Shifters Enable */
  29. #define SLCR_LVL_SHFTR_EN_OFFSET 0x900
  30. /* Constant Definitions */
  31. /* Control Register */
  32. #define CTRL_OFFSET 0x00
  33. /* Lock Register */
  34. #define LOCK_OFFSET 0x04
  35. /* Interrupt Status Register */
  36. #define INT_STS_OFFSET 0x0c
  37. /* Interrupt Mask Register */
  38. #define INT_MASK_OFFSET 0x10
  39. /* Status Register */
  40. #define STATUS_OFFSET 0x14
  41. /* DMA Source Address Register */
  42. #define DMA_SRC_ADDR_OFFSET 0x18
  43. /* DMA Destination Address Reg */
  44. #define DMA_DST_ADDR_OFFSET 0x1c
  45. /* DMA Source Transfer Length */
  46. #define DMA_SRC_LEN_OFFSET 0x20
  47. /* DMA Destination Transfer */
  48. #define DMA_DEST_LEN_OFFSET 0x24
  49. /* Unlock Register */
  50. #define UNLOCK_OFFSET 0x34
  51. /* Misc. Control Register */
  52. #define MCTRL_OFFSET 0x80
  53. /* Control Register Bit definitions */
  54. /* Signal to reset FPGA */
  55. #define CTRL_PCFG_PROG_B_MASK BIT(30)
  56. /* Enable PCAP for PR */
  57. #define CTRL_PCAP_PR_MASK BIT(27)
  58. /* Enable PCAP */
  59. #define CTRL_PCAP_MODE_MASK BIT(26)
  60. /* Lower rate to allow decrypt on the fly */
  61. #define CTRL_PCAP_RATE_EN_MASK BIT(25)
  62. /* System booted in secure mode */
  63. #define CTRL_SEC_EN_MASK BIT(7)
  64. /* Miscellaneous Control Register bit definitions */
  65. /* Internal PCAP loopback */
  66. #define MCTRL_PCAP_LPBK_MASK BIT(4)
  67. /* Status register bit definitions */
  68. /* FPGA init status */
  69. #define STATUS_DMA_Q_F BIT(31)
  70. #define STATUS_DMA_Q_E BIT(30)
  71. #define STATUS_PCFG_INIT_MASK BIT(4)
  72. /* Interrupt Status/Mask Register Bit definitions */
  73. /* DMA command done */
  74. #define IXR_DMA_DONE_MASK BIT(13)
  75. /* DMA and PCAP cmd done */
  76. #define IXR_D_P_DONE_MASK BIT(12)
  77. /* FPGA programmed */
  78. #define IXR_PCFG_DONE_MASK BIT(2)
  79. #define IXR_ERROR_FLAGS_MASK 0x00F0C860
  80. #define IXR_ALL_MASK 0xF8F7F87F
  81. /* Miscellaneous constant values */
  82. /* Invalid DMA addr */
  83. #define DMA_INVALID_ADDRESS GENMASK(31, 0)
  84. /* Used to unlock the dev */
  85. #define UNLOCK_MASK 0x757bdf0d
  86. /* Timeout for polling reset bits */
  87. #define INIT_POLL_TIMEOUT 2500000
  88. /* Delay for polling reset bits */
  89. #define INIT_POLL_DELAY 20
  90. /* Signal this is the last DMA transfer, wait for the AXI and PCAP before
  91. * interrupting
  92. */
  93. #define DMA_SRC_LAST_TRANSFER 1
  94. /* Timeout for DMA completion */
  95. #define DMA_TIMEOUT_MS 5000
  96. /* Masks for controlling stuff in SLCR */
  97. /* Disable all Level shifters */
  98. #define LVL_SHFTR_DISABLE_ALL_MASK 0x0
  99. /* Enable Level shifters from PS to PL */
  100. #define LVL_SHFTR_ENABLE_PS_TO_PL 0xa
  101. /* Enable Level shifters from PL to PS */
  102. #define LVL_SHFTR_ENABLE_PL_TO_PS 0xf
  103. /* Enable global resets */
  104. #define FPGA_RST_ALL_MASK 0xf
  105. /* Disable global resets */
  106. #define FPGA_RST_NONE_MASK 0x0
  107. struct zynq_fpga_priv {
  108. int irq;
  109. struct clk *clk;
  110. void __iomem *io_base;
  111. struct regmap *slcr;
  112. spinlock_t dma_lock;
  113. unsigned int dma_elm;
  114. unsigned int dma_nelms;
  115. struct scatterlist *cur_sg;
  116. struct completion dma_done;
  117. };
  118. static inline void zynq_fpga_write(struct zynq_fpga_priv *priv, u32 offset,
  119. u32 val)
  120. {
  121. writel(val, priv->io_base + offset);
  122. }
  123. static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv,
  124. u32 offset)
  125. {
  126. return readl(priv->io_base + offset);
  127. }
  128. #define zynq_fpga_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
  129. readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \
  130. timeout_us)
  131. /* Cause the specified irq mask bits to generate IRQs */
  132. static inline void zynq_fpga_set_irq(struct zynq_fpga_priv *priv, u32 enable)
  133. {
  134. zynq_fpga_write(priv, INT_MASK_OFFSET, ~enable);
  135. }
  136. /* Must be called with dma_lock held */
  137. static void zynq_step_dma(struct zynq_fpga_priv *priv)
  138. {
  139. u32 addr;
  140. u32 len;
  141. bool first;
  142. first = priv->dma_elm == 0;
  143. while (priv->cur_sg) {
  144. /* Feed the DMA queue until it is full. */
  145. if (zynq_fpga_read(priv, STATUS_OFFSET) & STATUS_DMA_Q_F)
  146. break;
  147. addr = sg_dma_address(priv->cur_sg);
  148. len = sg_dma_len(priv->cur_sg);
  149. if (priv->dma_elm + 1 == priv->dma_nelms) {
  150. /* The last transfer waits for the PCAP to finish too,
  151. * notice this also changes the irq_mask to ignore
  152. * IXR_DMA_DONE_MASK which ensures we do not trigger
  153. * the completion too early.
  154. */
  155. addr |= DMA_SRC_LAST_TRANSFER;
  156. priv->cur_sg = NULL;
  157. } else {
  158. priv->cur_sg = sg_next(priv->cur_sg);
  159. priv->dma_elm++;
  160. }
  161. zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, addr);
  162. zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, DMA_INVALID_ADDRESS);
  163. zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, len / 4);
  164. zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
  165. }
  166. /* Once the first transfer is queued we can turn on the ISR, future
  167. * calls to zynq_step_dma will happen from the ISR context. The
  168. * dma_lock spinlock guarentees this handover is done coherently, the
  169. * ISR enable is put at the end to avoid another CPU spinning in the
  170. * ISR on this lock.
  171. */
  172. if (first && priv->cur_sg) {
  173. zynq_fpga_set_irq(priv,
  174. IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
  175. } else if (!priv->cur_sg) {
  176. /* The last transfer changes to DMA & PCAP mode since we do
  177. * not want to continue until everything has been flushed into
  178. * the PCAP.
  179. */
  180. zynq_fpga_set_irq(priv,
  181. IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK);
  182. }
  183. }
  184. static irqreturn_t zynq_fpga_isr(int irq, void *data)
  185. {
  186. struct zynq_fpga_priv *priv = data;
  187. u32 intr_status;
  188. /* If anything other than DMA completion is reported stop and hand
  189. * control back to zynq_fpga_ops_write, something went wrong,
  190. * otherwise progress the DMA.
  191. */
  192. spin_lock(&priv->dma_lock);
  193. intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
  194. if (!(intr_status & IXR_ERROR_FLAGS_MASK) &&
  195. (intr_status & IXR_DMA_DONE_MASK) && priv->cur_sg) {
  196. zynq_fpga_write(priv, INT_STS_OFFSET, IXR_DMA_DONE_MASK);
  197. zynq_step_dma(priv);
  198. spin_unlock(&priv->dma_lock);
  199. return IRQ_HANDLED;
  200. }
  201. spin_unlock(&priv->dma_lock);
  202. zynq_fpga_set_irq(priv, 0);
  203. complete(&priv->dma_done);
  204. return IRQ_HANDLED;
  205. }
  206. /* Sanity check the proposed bitstream. It must start with the sync word in
  207. * the correct byte order, and be dword aligned. The input is a Xilinx .bin
  208. * file with every 32 bit quantity swapped.
  209. */
  210. static bool zynq_fpga_has_sync(const u8 *buf, size_t count)
  211. {
  212. for (; count >= 4; buf += 4, count -= 4)
  213. if (buf[0] == 0x66 && buf[1] == 0x55 && buf[2] == 0x99 &&
  214. buf[3] == 0xaa)
  215. return true;
  216. return false;
  217. }
  218. static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
  219. struct fpga_image_info *info,
  220. const char *buf, size_t count)
  221. {
  222. struct zynq_fpga_priv *priv;
  223. u32 ctrl, status;
  224. int err;
  225. priv = mgr->priv;
  226. err = clk_enable(priv->clk);
  227. if (err)
  228. return err;
  229. /* check if bitstream is encrypted & and system's still secure */
  230. if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM) {
  231. ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
  232. if (!(ctrl & CTRL_SEC_EN_MASK)) {
  233. dev_err(&mgr->dev,
  234. "System not secure, can't use crypted bitstreams\n");
  235. err = -EINVAL;
  236. goto out_err;
  237. }
  238. }
  239. /* don't globally reset PL if we're doing partial reconfig */
  240. if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
  241. if (!zynq_fpga_has_sync(buf, count)) {
  242. dev_err(&mgr->dev,
  243. "Invalid bitstream, could not find a sync word. Bitstream must be a byte swapped .bin file\n");
  244. err = -EINVAL;
  245. goto out_err;
  246. }
  247. /* assert AXI interface resets */
  248. regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
  249. FPGA_RST_ALL_MASK);
  250. /* disable all level shifters */
  251. regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
  252. LVL_SHFTR_DISABLE_ALL_MASK);
  253. /* enable level shifters from PS to PL */
  254. regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
  255. LVL_SHFTR_ENABLE_PS_TO_PL);
  256. /* create a rising edge on PCFG_INIT. PCFG_INIT follows
  257. * PCFG_PROG_B, so we need to poll it after setting PCFG_PROG_B
  258. * to make sure the rising edge actually happens.
  259. * Note: PCFG_PROG_B is low active, sequence as described in
  260. * UG585 v1.10 page 211
  261. */
  262. ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
  263. ctrl |= CTRL_PCFG_PROG_B_MASK;
  264. zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
  265. err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
  266. status & STATUS_PCFG_INIT_MASK,
  267. INIT_POLL_DELAY,
  268. INIT_POLL_TIMEOUT);
  269. if (err) {
  270. dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
  271. goto out_err;
  272. }
  273. ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
  274. ctrl &= ~CTRL_PCFG_PROG_B_MASK;
  275. zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
  276. err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
  277. !(status & STATUS_PCFG_INIT_MASK),
  278. INIT_POLL_DELAY,
  279. INIT_POLL_TIMEOUT);
  280. if (err) {
  281. dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n");
  282. goto out_err;
  283. }
  284. ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
  285. ctrl |= CTRL_PCFG_PROG_B_MASK;
  286. zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
  287. err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
  288. status & STATUS_PCFG_INIT_MASK,
  289. INIT_POLL_DELAY,
  290. INIT_POLL_TIMEOUT);
  291. if (err) {
  292. dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
  293. goto out_err;
  294. }
  295. }
  296. /* set configuration register with following options:
  297. * - enable PCAP interface
  298. * - set throughput for maximum speed (if bistream not crypted)
  299. * - set CPU in user mode
  300. */
  301. ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
  302. if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM)
  303. zynq_fpga_write(priv, CTRL_OFFSET,
  304. (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
  305. | CTRL_PCAP_RATE_EN_MASK | ctrl));
  306. else
  307. zynq_fpga_write(priv, CTRL_OFFSET,
  308. (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
  309. | ctrl));
  310. /* We expect that the command queue is empty right now. */
  311. status = zynq_fpga_read(priv, STATUS_OFFSET);
  312. if ((status & STATUS_DMA_Q_F) ||
  313. (status & STATUS_DMA_Q_E) != STATUS_DMA_Q_E) {
  314. dev_err(&mgr->dev, "DMA command queue not right\n");
  315. err = -EBUSY;
  316. goto out_err;
  317. }
  318. /* ensure internal PCAP loopback is disabled */
  319. ctrl = zynq_fpga_read(priv, MCTRL_OFFSET);
  320. zynq_fpga_write(priv, MCTRL_OFFSET, (~MCTRL_PCAP_LPBK_MASK & ctrl));
  321. clk_disable(priv->clk);
  322. return 0;
  323. out_err:
  324. clk_disable(priv->clk);
  325. return err;
  326. }
  327. static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
  328. {
  329. struct zynq_fpga_priv *priv;
  330. const char *why;
  331. int err;
  332. u32 intr_status;
  333. unsigned long timeout;
  334. unsigned long flags;
  335. struct scatterlist *sg;
  336. int i;
  337. priv = mgr->priv;
  338. /* The hardware can only DMA multiples of 4 bytes, and it requires the
  339. * starting addresses to be aligned to 64 bits (UG585 pg 212).
  340. */
  341. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  342. if ((sg->offset % 8) || (sg->length % 4)) {
  343. dev_err(&mgr->dev,
  344. "Invalid bitstream, chunks must be aligned\n");
  345. return -EINVAL;
  346. }
  347. }
  348. priv->dma_nelms =
  349. dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
  350. if (priv->dma_nelms == 0) {
  351. dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
  352. return -ENOMEM;
  353. }
  354. /* enable clock */
  355. err = clk_enable(priv->clk);
  356. if (err)
  357. goto out_free;
  358. zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
  359. reinit_completion(&priv->dma_done);
  360. /* zynq_step_dma will turn on interrupts */
  361. spin_lock_irqsave(&priv->dma_lock, flags);
  362. priv->dma_elm = 0;
  363. priv->cur_sg = sgt->sgl;
  364. zynq_step_dma(priv);
  365. spin_unlock_irqrestore(&priv->dma_lock, flags);
  366. timeout = wait_for_completion_timeout(&priv->dma_done,
  367. msecs_to_jiffies(DMA_TIMEOUT_MS));
  368. spin_lock_irqsave(&priv->dma_lock, flags);
  369. zynq_fpga_set_irq(priv, 0);
  370. priv->cur_sg = NULL;
  371. spin_unlock_irqrestore(&priv->dma_lock, flags);
  372. intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
  373. zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
  374. /* There doesn't seem to be a way to force cancel any DMA, so if
  375. * something went wrong we are relying on the hardware to have halted
  376. * the DMA before we get here, if there was we could use
  377. * wait_for_completion_interruptible too.
  378. */
  379. if (intr_status & IXR_ERROR_FLAGS_MASK) {
  380. why = "DMA reported error";
  381. err = -EIO;
  382. goto out_report;
  383. }
  384. if (priv->cur_sg ||
  385. !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
  386. if (timeout == 0)
  387. why = "DMA timed out";
  388. else
  389. why = "DMA did not complete";
  390. err = -EIO;
  391. goto out_report;
  392. }
  393. err = 0;
  394. goto out_clk;
  395. out_report:
  396. dev_err(&mgr->dev,
  397. "%s: INT_STS:0x%x CTRL:0x%x LOCK:0x%x INT_MASK:0x%x STATUS:0x%x MCTRL:0x%x\n",
  398. why,
  399. intr_status,
  400. zynq_fpga_read(priv, CTRL_OFFSET),
  401. zynq_fpga_read(priv, LOCK_OFFSET),
  402. zynq_fpga_read(priv, INT_MASK_OFFSET),
  403. zynq_fpga_read(priv, STATUS_OFFSET),
  404. zynq_fpga_read(priv, MCTRL_OFFSET));
  405. out_clk:
  406. clk_disable(priv->clk);
  407. out_free:
  408. dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
  409. return err;
  410. }
  411. static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
  412. struct fpga_image_info *info)
  413. {
  414. struct zynq_fpga_priv *priv = mgr->priv;
  415. int err;
  416. u32 intr_status;
  417. err = clk_enable(priv->clk);
  418. if (err)
  419. return err;
  420. /* Release 'PR' control back to the ICAP */
  421. zynq_fpga_write(priv, CTRL_OFFSET,
  422. zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK);
  423. err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status,
  424. intr_status & IXR_PCFG_DONE_MASK,
  425. INIT_POLL_DELAY,
  426. INIT_POLL_TIMEOUT);
  427. clk_disable(priv->clk);
  428. if (err)
  429. return err;
  430. /* for the partial reconfig case we didn't touch the level shifters */
  431. if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
  432. /* enable level shifters from PL to PS */
  433. regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
  434. LVL_SHFTR_ENABLE_PL_TO_PS);
  435. /* deassert AXI interface resets */
  436. regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
  437. FPGA_RST_NONE_MASK);
  438. }
  439. return 0;
  440. }
  441. static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr)
  442. {
  443. int err;
  444. u32 intr_status;
  445. struct zynq_fpga_priv *priv;
  446. priv = mgr->priv;
  447. err = clk_enable(priv->clk);
  448. if (err)
  449. return FPGA_MGR_STATE_UNKNOWN;
  450. intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
  451. clk_disable(priv->clk);
  452. if (intr_status & IXR_PCFG_DONE_MASK)
  453. return FPGA_MGR_STATE_OPERATING;
  454. return FPGA_MGR_STATE_UNKNOWN;
  455. }
  456. static const struct fpga_manager_ops zynq_fpga_ops = {
  457. .initial_header_size = 128,
  458. .state = zynq_fpga_ops_state,
  459. .write_init = zynq_fpga_ops_write_init,
  460. .write_sg = zynq_fpga_ops_write,
  461. .write_complete = zynq_fpga_ops_write_complete,
  462. };
  463. static int zynq_fpga_probe(struct platform_device *pdev)
  464. {
  465. struct device *dev = &pdev->dev;
  466. struct zynq_fpga_priv *priv;
  467. struct fpga_manager *mgr;
  468. struct resource *res;
  469. int err;
  470. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  471. if (!priv)
  472. return -ENOMEM;
  473. spin_lock_init(&priv->dma_lock);
  474. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  475. priv->io_base = devm_ioremap_resource(dev, res);
  476. if (IS_ERR(priv->io_base))
  477. return PTR_ERR(priv->io_base);
  478. priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node,
  479. "syscon");
  480. if (IS_ERR(priv->slcr)) {
  481. dev_err(dev, "unable to get zynq-slcr regmap\n");
  482. return PTR_ERR(priv->slcr);
  483. }
  484. init_completion(&priv->dma_done);
  485. priv->irq = platform_get_irq(pdev, 0);
  486. if (priv->irq < 0)
  487. return priv->irq;
  488. priv->clk = devm_clk_get(dev, "ref_clk");
  489. if (IS_ERR(priv->clk)) {
  490. if (PTR_ERR(priv->clk) != -EPROBE_DEFER)
  491. dev_err(dev, "input clock not found\n");
  492. return PTR_ERR(priv->clk);
  493. }
  494. err = clk_prepare_enable(priv->clk);
  495. if (err) {
  496. dev_err(dev, "unable to enable clock\n");
  497. return err;
  498. }
  499. /* unlock the device */
  500. zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
  501. zynq_fpga_set_irq(priv, 0);
  502. zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
  503. err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
  504. priv);
  505. if (err) {
  506. dev_err(dev, "unable to request IRQ\n");
  507. clk_disable_unprepare(priv->clk);
  508. return err;
  509. }
  510. clk_disable(priv->clk);
  511. mgr = devm_fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager",
  512. &zynq_fpga_ops, priv);
  513. if (!mgr)
  514. return -ENOMEM;
  515. platform_set_drvdata(pdev, mgr);
  516. err = fpga_mgr_register(mgr);
  517. if (err) {
  518. dev_err(dev, "unable to register FPGA manager\n");
  519. clk_unprepare(priv->clk);
  520. return err;
  521. }
  522. return 0;
  523. }
  524. static int zynq_fpga_remove(struct platform_device *pdev)
  525. {
  526. struct zynq_fpga_priv *priv;
  527. struct fpga_manager *mgr;
  528. mgr = platform_get_drvdata(pdev);
  529. priv = mgr->priv;
  530. fpga_mgr_unregister(mgr);
  531. clk_unprepare(priv->clk);
  532. return 0;
  533. }
  534. #ifdef CONFIG_OF
  535. static const struct of_device_id zynq_fpga_of_match[] = {
  536. { .compatible = "xlnx,zynq-devcfg-1.0", },
  537. {},
  538. };
  539. MODULE_DEVICE_TABLE(of, zynq_fpga_of_match);
  540. #endif
  541. static struct platform_driver zynq_fpga_driver = {
  542. .probe = zynq_fpga_probe,
  543. .remove = zynq_fpga_remove,
  544. .driver = {
  545. .name = "zynq_fpga_manager",
  546. .of_match_table = of_match_ptr(zynq_fpga_of_match),
  547. },
  548. };
  549. module_platform_driver(zynq_fpga_driver);
  550. MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
  551. MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>");
  552. MODULE_DESCRIPTION("Xilinx Zynq FPGA Manager");
  553. MODULE_LICENSE("GPL v2");