dfl-afu-main.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for FPGA Accelerated Function Unit (AFU)
  4. *
  5. * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6. *
  7. * Authors:
  8. * Wu Hao <hao.wu@intel.com>
  9. * Xiao Guangrong <guangrong.xiao@linux.intel.com>
  10. * Joseph Grecco <joe.grecco@intel.com>
  11. * Enno Luebbers <enno.luebbers@intel.com>
  12. * Tim Whisonant <tim.whisonant@intel.com>
  13. * Ananda Ravuri <ananda.ravuri@intel.com>
  14. * Henry Mitchel <henry.mitchel@intel.com>
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/fpga-dfl.h>
  20. #include "dfl-afu.h"
  21. /**
  22. * __afu_port_enable - enable a port by clear reset
  23. * @pdev: port platform device.
  24. *
  25. * Enable Port by clear the port soft reset bit, which is set by default.
  26. * The AFU is unable to respond to any MMIO access while in reset.
  27. * __afu_port_enable function should only be used after __afu_port_disable
  28. * function.
  29. *
  30. * The caller needs to hold lock for protection.
  31. */
  32. void __afu_port_enable(struct platform_device *pdev)
  33. {
  34. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  35. void __iomem *base;
  36. u64 v;
  37. WARN_ON(!pdata->disable_count);
  38. if (--pdata->disable_count != 0)
  39. return;
  40. base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
  41. /* Clear port soft reset */
  42. v = readq(base + PORT_HDR_CTRL);
  43. v &= ~PORT_CTRL_SFTRST;
  44. writeq(v, base + PORT_HDR_CTRL);
  45. }
  46. #define RST_POLL_INVL 10 /* us */
  47. #define RST_POLL_TIMEOUT 1000 /* us */
  48. /**
  49. * __afu_port_disable - disable a port by hold reset
  50. * @pdev: port platform device.
  51. *
  52. * Disable Port by setting the port soft reset bit, it puts the port into reset.
  53. *
  54. * The caller needs to hold lock for protection.
  55. */
  56. int __afu_port_disable(struct platform_device *pdev)
  57. {
  58. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  59. void __iomem *base;
  60. u64 v;
  61. if (pdata->disable_count++ != 0)
  62. return 0;
  63. base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
  64. /* Set port soft reset */
  65. v = readq(base + PORT_HDR_CTRL);
  66. v |= PORT_CTRL_SFTRST;
  67. writeq(v, base + PORT_HDR_CTRL);
  68. /*
  69. * HW sets ack bit to 1 when all outstanding requests have been drained
  70. * on this port and minimum soft reset pulse width has elapsed.
  71. * Driver polls port_soft_reset_ack to determine if reset done by HW.
  72. */
  73. if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
  74. v & PORT_CTRL_SFTRST_ACK,
  75. RST_POLL_INVL, RST_POLL_TIMEOUT)) {
  76. dev_err(&pdev->dev, "timeout, fail to reset device\n");
  77. return -ETIMEDOUT;
  78. }
  79. return 0;
  80. }
  81. /*
  82. * This function resets the FPGA Port and its accelerator (AFU) by function
  83. * __port_disable and __port_enable (set port soft reset bit and then clear
  84. * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
  85. * Reconfiguration. But it should never cause any system level issue, only
  86. * functional failure (e.g. DMA or PR operation failure) and be recoverable
  87. * from the failure.
  88. *
  89. * Note: the accelerator (AFU) is not accessible when its port is in reset
  90. * (disabled). Any attempts on MMIO access to AFU while in reset, will
  91. * result errors reported via port error reporting sub feature (if present).
  92. */
  93. static int __port_reset(struct platform_device *pdev)
  94. {
  95. int ret;
  96. ret = __afu_port_disable(pdev);
  97. if (!ret)
  98. __afu_port_enable(pdev);
  99. return ret;
  100. }
  101. static int port_reset(struct platform_device *pdev)
  102. {
  103. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  104. int ret;
  105. mutex_lock(&pdata->lock);
  106. ret = __port_reset(pdev);
  107. mutex_unlock(&pdata->lock);
  108. return ret;
  109. }
  110. static int port_get_id(struct platform_device *pdev)
  111. {
  112. void __iomem *base;
  113. base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
  114. return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
  115. }
  116. static ssize_t
  117. id_show(struct device *dev, struct device_attribute *attr, char *buf)
  118. {
  119. int id = port_get_id(to_platform_device(dev));
  120. return scnprintf(buf, PAGE_SIZE, "%d\n", id);
  121. }
  122. static DEVICE_ATTR_RO(id);
  123. static ssize_t
  124. ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
  125. {
  126. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  127. void __iomem *base;
  128. u64 v;
  129. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  130. mutex_lock(&pdata->lock);
  131. v = readq(base + PORT_HDR_CTRL);
  132. mutex_unlock(&pdata->lock);
  133. return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
  134. }
  135. static ssize_t
  136. ltr_store(struct device *dev, struct device_attribute *attr,
  137. const char *buf, size_t count)
  138. {
  139. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  140. void __iomem *base;
  141. bool ltr;
  142. u64 v;
  143. if (kstrtobool(buf, &ltr))
  144. return -EINVAL;
  145. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  146. mutex_lock(&pdata->lock);
  147. v = readq(base + PORT_HDR_CTRL);
  148. v &= ~PORT_CTRL_LATENCY;
  149. v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
  150. writeq(v, base + PORT_HDR_CTRL);
  151. mutex_unlock(&pdata->lock);
  152. return count;
  153. }
  154. static DEVICE_ATTR_RW(ltr);
  155. static ssize_t
  156. ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
  157. {
  158. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  159. void __iomem *base;
  160. u64 v;
  161. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  162. mutex_lock(&pdata->lock);
  163. v = readq(base + PORT_HDR_STS);
  164. mutex_unlock(&pdata->lock);
  165. return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
  166. }
  167. static ssize_t
  168. ap1_event_store(struct device *dev, struct device_attribute *attr,
  169. const char *buf, size_t count)
  170. {
  171. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  172. void __iomem *base;
  173. bool clear;
  174. if (kstrtobool(buf, &clear) || !clear)
  175. return -EINVAL;
  176. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  177. mutex_lock(&pdata->lock);
  178. writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
  179. mutex_unlock(&pdata->lock);
  180. return count;
  181. }
  182. static DEVICE_ATTR_RW(ap1_event);
  183. static ssize_t
  184. ap2_event_show(struct device *dev, struct device_attribute *attr,
  185. char *buf)
  186. {
  187. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  188. void __iomem *base;
  189. u64 v;
  190. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  191. mutex_lock(&pdata->lock);
  192. v = readq(base + PORT_HDR_STS);
  193. mutex_unlock(&pdata->lock);
  194. return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
  195. }
  196. static ssize_t
  197. ap2_event_store(struct device *dev, struct device_attribute *attr,
  198. const char *buf, size_t count)
  199. {
  200. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  201. void __iomem *base;
  202. bool clear;
  203. if (kstrtobool(buf, &clear) || !clear)
  204. return -EINVAL;
  205. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  206. mutex_lock(&pdata->lock);
  207. writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
  208. mutex_unlock(&pdata->lock);
  209. return count;
  210. }
  211. static DEVICE_ATTR_RW(ap2_event);
  212. static ssize_t
  213. power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
  214. {
  215. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  216. void __iomem *base;
  217. u64 v;
  218. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  219. mutex_lock(&pdata->lock);
  220. v = readq(base + PORT_HDR_STS);
  221. mutex_unlock(&pdata->lock);
  222. return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
  223. }
  224. static DEVICE_ATTR_RO(power_state);
  225. static ssize_t
  226. userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
  227. const char *buf, size_t count)
  228. {
  229. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  230. u64 userclk_freq_cmd;
  231. void __iomem *base;
  232. if (kstrtou64(buf, 0, &userclk_freq_cmd))
  233. return -EINVAL;
  234. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  235. mutex_lock(&pdata->lock);
  236. writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
  237. mutex_unlock(&pdata->lock);
  238. return count;
  239. }
  240. static DEVICE_ATTR_WO(userclk_freqcmd);
  241. static ssize_t
  242. userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
  243. const char *buf, size_t count)
  244. {
  245. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  246. u64 userclk_freqcntr_cmd;
  247. void __iomem *base;
  248. if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
  249. return -EINVAL;
  250. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  251. mutex_lock(&pdata->lock);
  252. writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
  253. mutex_unlock(&pdata->lock);
  254. return count;
  255. }
  256. static DEVICE_ATTR_WO(userclk_freqcntrcmd);
  257. static ssize_t
  258. userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
  259. char *buf)
  260. {
  261. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  262. u64 userclk_freqsts;
  263. void __iomem *base;
  264. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  265. mutex_lock(&pdata->lock);
  266. userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
  267. mutex_unlock(&pdata->lock);
  268. return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
  269. }
  270. static DEVICE_ATTR_RO(userclk_freqsts);
  271. static ssize_t
  272. userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
  273. char *buf)
  274. {
  275. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  276. u64 userclk_freqcntrsts;
  277. void __iomem *base;
  278. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  279. mutex_lock(&pdata->lock);
  280. userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
  281. mutex_unlock(&pdata->lock);
  282. return sprintf(buf, "0x%llx\n",
  283. (unsigned long long)userclk_freqcntrsts);
  284. }
  285. static DEVICE_ATTR_RO(userclk_freqcntrsts);
  286. static struct attribute *port_hdr_attrs[] = {
  287. &dev_attr_id.attr,
  288. &dev_attr_ltr.attr,
  289. &dev_attr_ap1_event.attr,
  290. &dev_attr_ap2_event.attr,
  291. &dev_attr_power_state.attr,
  292. &dev_attr_userclk_freqcmd.attr,
  293. &dev_attr_userclk_freqcntrcmd.attr,
  294. &dev_attr_userclk_freqsts.attr,
  295. &dev_attr_userclk_freqcntrsts.attr,
  296. NULL,
  297. };
  298. static umode_t port_hdr_attrs_visible(struct kobject *kobj,
  299. struct attribute *attr, int n)
  300. {
  301. struct device *dev = kobj_to_dev(kobj);
  302. umode_t mode = attr->mode;
  303. void __iomem *base;
  304. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
  305. if (dfl_feature_revision(base) > 0) {
  306. /*
  307. * userclk sysfs interfaces are only visible in case port
  308. * revision is 0, as hardware with revision >0 doesn't
  309. * support this.
  310. */
  311. if (attr == &dev_attr_userclk_freqcmd.attr ||
  312. attr == &dev_attr_userclk_freqcntrcmd.attr ||
  313. attr == &dev_attr_userclk_freqsts.attr ||
  314. attr == &dev_attr_userclk_freqcntrsts.attr)
  315. mode = 0;
  316. }
  317. return mode;
  318. }
  319. static const struct attribute_group port_hdr_group = {
  320. .attrs = port_hdr_attrs,
  321. .is_visible = port_hdr_attrs_visible,
  322. };
  323. static int port_hdr_init(struct platform_device *pdev,
  324. struct dfl_feature *feature)
  325. {
  326. port_reset(pdev);
  327. return 0;
  328. }
  329. static long
  330. port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
  331. unsigned int cmd, unsigned long arg)
  332. {
  333. long ret;
  334. switch (cmd) {
  335. case DFL_FPGA_PORT_RESET:
  336. if (!arg)
  337. ret = port_reset(pdev);
  338. else
  339. ret = -EINVAL;
  340. break;
  341. default:
  342. dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
  343. ret = -ENODEV;
  344. }
  345. return ret;
  346. }
  347. static const struct dfl_feature_id port_hdr_id_table[] = {
  348. {.id = PORT_FEATURE_ID_HEADER,},
  349. {0,}
  350. };
  351. static const struct dfl_feature_ops port_hdr_ops = {
  352. .init = port_hdr_init,
  353. .ioctl = port_hdr_ioctl,
  354. };
  355. static ssize_t
  356. afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
  357. {
  358. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  359. void __iomem *base;
  360. u64 guidl, guidh;
  361. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
  362. mutex_lock(&pdata->lock);
  363. if (pdata->disable_count) {
  364. mutex_unlock(&pdata->lock);
  365. return -EBUSY;
  366. }
  367. guidl = readq(base + GUID_L);
  368. guidh = readq(base + GUID_H);
  369. mutex_unlock(&pdata->lock);
  370. return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
  371. }
  372. static DEVICE_ATTR_RO(afu_id);
  373. static struct attribute *port_afu_attrs[] = {
  374. &dev_attr_afu_id.attr,
  375. NULL
  376. };
  377. static umode_t port_afu_attrs_visible(struct kobject *kobj,
  378. struct attribute *attr, int n)
  379. {
  380. struct device *dev = kobj_to_dev(kobj);
  381. /*
  382. * sysfs entries are visible only if related private feature is
  383. * enumerated.
  384. */
  385. if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
  386. return 0;
  387. return attr->mode;
  388. }
  389. static const struct attribute_group port_afu_group = {
  390. .attrs = port_afu_attrs,
  391. .is_visible = port_afu_attrs_visible,
  392. };
  393. static int port_afu_init(struct platform_device *pdev,
  394. struct dfl_feature *feature)
  395. {
  396. struct resource *res = &pdev->resource[feature->resource_index];
  397. return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
  398. DFL_PORT_REGION_INDEX_AFU,
  399. resource_size(res), res->start,
  400. DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
  401. DFL_PORT_REGION_WRITE);
  402. }
  403. static const struct dfl_feature_id port_afu_id_table[] = {
  404. {.id = PORT_FEATURE_ID_AFU,},
  405. {0,}
  406. };
  407. static const struct dfl_feature_ops port_afu_ops = {
  408. .init = port_afu_init,
  409. };
  410. static int port_stp_init(struct platform_device *pdev,
  411. struct dfl_feature *feature)
  412. {
  413. struct resource *res = &pdev->resource[feature->resource_index];
  414. return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
  415. DFL_PORT_REGION_INDEX_STP,
  416. resource_size(res), res->start,
  417. DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
  418. DFL_PORT_REGION_WRITE);
  419. }
  420. static const struct dfl_feature_id port_stp_id_table[] = {
  421. {.id = PORT_FEATURE_ID_STP,},
  422. {0,}
  423. };
  424. static const struct dfl_feature_ops port_stp_ops = {
  425. .init = port_stp_init,
  426. };
  427. static long
  428. port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
  429. unsigned int cmd, unsigned long arg)
  430. {
  431. switch (cmd) {
  432. case DFL_FPGA_PORT_UINT_GET_IRQ_NUM:
  433. return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
  434. case DFL_FPGA_PORT_UINT_SET_IRQ:
  435. return dfl_feature_ioctl_set_irq(pdev, feature, arg);
  436. default:
  437. dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
  438. return -ENODEV;
  439. }
  440. }
  441. static const struct dfl_feature_id port_uint_id_table[] = {
  442. {.id = PORT_FEATURE_ID_UINT,},
  443. {0,}
  444. };
  445. static const struct dfl_feature_ops port_uint_ops = {
  446. .ioctl = port_uint_ioctl,
  447. };
  448. static struct dfl_feature_driver port_feature_drvs[] = {
  449. {
  450. .id_table = port_hdr_id_table,
  451. .ops = &port_hdr_ops,
  452. },
  453. {
  454. .id_table = port_afu_id_table,
  455. .ops = &port_afu_ops,
  456. },
  457. {
  458. .id_table = port_err_id_table,
  459. .ops = &port_err_ops,
  460. },
  461. {
  462. .id_table = port_stp_id_table,
  463. .ops = &port_stp_ops,
  464. },
  465. {
  466. .id_table = port_uint_id_table,
  467. .ops = &port_uint_ops,
  468. },
  469. {
  470. .ops = NULL,
  471. }
  472. };
  473. static int afu_open(struct inode *inode, struct file *filp)
  474. {
  475. struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
  476. struct dfl_feature_platform_data *pdata;
  477. int ret;
  478. pdata = dev_get_platdata(&fdev->dev);
  479. if (WARN_ON(!pdata))
  480. return -ENODEV;
  481. mutex_lock(&pdata->lock);
  482. ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
  483. if (!ret) {
  484. dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
  485. dfl_feature_dev_use_count(pdata));
  486. filp->private_data = fdev;
  487. }
  488. mutex_unlock(&pdata->lock);
  489. return ret;
  490. }
  491. static int afu_release(struct inode *inode, struct file *filp)
  492. {
  493. struct platform_device *pdev = filp->private_data;
  494. struct dfl_feature_platform_data *pdata;
  495. struct dfl_feature *feature;
  496. dev_dbg(&pdev->dev, "Device File Release\n");
  497. pdata = dev_get_platdata(&pdev->dev);
  498. mutex_lock(&pdata->lock);
  499. dfl_feature_dev_use_end(pdata);
  500. if (!dfl_feature_dev_use_count(pdata)) {
  501. dfl_fpga_dev_for_each_feature(pdata, feature)
  502. dfl_fpga_set_irq_triggers(feature, 0,
  503. feature->nr_irqs, NULL);
  504. __port_reset(pdev);
  505. afu_dma_region_destroy(pdata);
  506. }
  507. mutex_unlock(&pdata->lock);
  508. return 0;
  509. }
  510. static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
  511. unsigned long arg)
  512. {
  513. /* No extension support for now */
  514. return 0;
  515. }
  516. static long
  517. afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
  518. {
  519. struct dfl_fpga_port_info info;
  520. struct dfl_afu *afu;
  521. unsigned long minsz;
  522. minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
  523. if (copy_from_user(&info, arg, minsz))
  524. return -EFAULT;
  525. if (info.argsz < minsz)
  526. return -EINVAL;
  527. mutex_lock(&pdata->lock);
  528. afu = dfl_fpga_pdata_get_private(pdata);
  529. info.flags = 0;
  530. info.num_regions = afu->num_regions;
  531. info.num_umsgs = afu->num_umsgs;
  532. mutex_unlock(&pdata->lock);
  533. if (copy_to_user(arg, &info, sizeof(info)))
  534. return -EFAULT;
  535. return 0;
  536. }
  537. static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
  538. void __user *arg)
  539. {
  540. struct dfl_fpga_port_region_info rinfo;
  541. struct dfl_afu_mmio_region region;
  542. unsigned long minsz;
  543. long ret;
  544. minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
  545. if (copy_from_user(&rinfo, arg, minsz))
  546. return -EFAULT;
  547. if (rinfo.argsz < minsz || rinfo.padding)
  548. return -EINVAL;
  549. ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
  550. if (ret)
  551. return ret;
  552. rinfo.flags = region.flags;
  553. rinfo.size = region.size;
  554. rinfo.offset = region.offset;
  555. if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
  556. return -EFAULT;
  557. return 0;
  558. }
  559. static long
  560. afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
  561. {
  562. struct dfl_fpga_port_dma_map map;
  563. unsigned long minsz;
  564. long ret;
  565. minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
  566. if (copy_from_user(&map, arg, minsz))
  567. return -EFAULT;
  568. if (map.argsz < minsz || map.flags)
  569. return -EINVAL;
  570. ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
  571. if (ret)
  572. return ret;
  573. if (copy_to_user(arg, &map, sizeof(map))) {
  574. afu_dma_unmap_region(pdata, map.iova);
  575. return -EFAULT;
  576. }
  577. dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
  578. (unsigned long long)map.user_addr,
  579. (unsigned long long)map.length,
  580. (unsigned long long)map.iova);
  581. return 0;
  582. }
  583. static long
  584. afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
  585. {
  586. struct dfl_fpga_port_dma_unmap unmap;
  587. unsigned long minsz;
  588. minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
  589. if (copy_from_user(&unmap, arg, minsz))
  590. return -EFAULT;
  591. if (unmap.argsz < minsz || unmap.flags)
  592. return -EINVAL;
  593. return afu_dma_unmap_region(pdata, unmap.iova);
  594. }
  595. static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  596. {
  597. struct platform_device *pdev = filp->private_data;
  598. struct dfl_feature_platform_data *pdata;
  599. struct dfl_feature *f;
  600. long ret;
  601. dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
  602. pdata = dev_get_platdata(&pdev->dev);
  603. switch (cmd) {
  604. case DFL_FPGA_GET_API_VERSION:
  605. return DFL_FPGA_API_VERSION;
  606. case DFL_FPGA_CHECK_EXTENSION:
  607. return afu_ioctl_check_extension(pdata, arg);
  608. case DFL_FPGA_PORT_GET_INFO:
  609. return afu_ioctl_get_info(pdata, (void __user *)arg);
  610. case DFL_FPGA_PORT_GET_REGION_INFO:
  611. return afu_ioctl_get_region_info(pdata, (void __user *)arg);
  612. case DFL_FPGA_PORT_DMA_MAP:
  613. return afu_ioctl_dma_map(pdata, (void __user *)arg);
  614. case DFL_FPGA_PORT_DMA_UNMAP:
  615. return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
  616. default:
  617. /*
  618. * Let sub-feature's ioctl function to handle the cmd
  619. * Sub-feature's ioctl returns -ENODEV when cmd is not
  620. * handled in this sub feature, and returns 0 and other
  621. * error code if cmd is handled.
  622. */
  623. dfl_fpga_dev_for_each_feature(pdata, f)
  624. if (f->ops && f->ops->ioctl) {
  625. ret = f->ops->ioctl(pdev, f, cmd, arg);
  626. if (ret != -ENODEV)
  627. return ret;
  628. }
  629. }
  630. return -EINVAL;
  631. }
  632. static const struct vm_operations_struct afu_vma_ops = {
  633. #ifdef CONFIG_HAVE_IOREMAP_PROT
  634. .access = generic_access_phys,
  635. #endif
  636. };
  637. static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
  638. {
  639. struct platform_device *pdev = filp->private_data;
  640. struct dfl_feature_platform_data *pdata;
  641. u64 size = vma->vm_end - vma->vm_start;
  642. struct dfl_afu_mmio_region region;
  643. u64 offset;
  644. int ret;
  645. if (!(vma->vm_flags & VM_SHARED))
  646. return -EINVAL;
  647. pdata = dev_get_platdata(&pdev->dev);
  648. offset = vma->vm_pgoff << PAGE_SHIFT;
  649. ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
  650. if (ret)
  651. return ret;
  652. if (!(region.flags & DFL_PORT_REGION_MMAP))
  653. return -EINVAL;
  654. if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
  655. return -EPERM;
  656. if ((vma->vm_flags & VM_WRITE) &&
  657. !(region.flags & DFL_PORT_REGION_WRITE))
  658. return -EPERM;
  659. /* Support debug access to the mapping */
  660. vma->vm_ops = &afu_vma_ops;
  661. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  662. return remap_pfn_range(vma, vma->vm_start,
  663. (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
  664. size, vma->vm_page_prot);
  665. }
  666. static const struct file_operations afu_fops = {
  667. .owner = THIS_MODULE,
  668. .open = afu_open,
  669. .release = afu_release,
  670. .unlocked_ioctl = afu_ioctl,
  671. .mmap = afu_mmap,
  672. };
  673. static int afu_dev_init(struct platform_device *pdev)
  674. {
  675. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  676. struct dfl_afu *afu;
  677. afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
  678. if (!afu)
  679. return -ENOMEM;
  680. afu->pdata = pdata;
  681. mutex_lock(&pdata->lock);
  682. dfl_fpga_pdata_set_private(pdata, afu);
  683. afu_mmio_region_init(pdata);
  684. afu_dma_region_init(pdata);
  685. mutex_unlock(&pdata->lock);
  686. return 0;
  687. }
  688. static int afu_dev_destroy(struct platform_device *pdev)
  689. {
  690. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  691. mutex_lock(&pdata->lock);
  692. afu_mmio_region_destroy(pdata);
  693. afu_dma_region_destroy(pdata);
  694. dfl_fpga_pdata_set_private(pdata, NULL);
  695. mutex_unlock(&pdata->lock);
  696. return 0;
  697. }
  698. static int port_enable_set(struct platform_device *pdev, bool enable)
  699. {
  700. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  701. int ret = 0;
  702. mutex_lock(&pdata->lock);
  703. if (enable)
  704. __afu_port_enable(pdev);
  705. else
  706. ret = __afu_port_disable(pdev);
  707. mutex_unlock(&pdata->lock);
  708. return ret;
  709. }
  710. static struct dfl_fpga_port_ops afu_port_ops = {
  711. .name = DFL_FPGA_FEATURE_DEV_PORT,
  712. .owner = THIS_MODULE,
  713. .get_id = port_get_id,
  714. .enable_set = port_enable_set,
  715. };
  716. static int afu_probe(struct platform_device *pdev)
  717. {
  718. int ret;
  719. dev_dbg(&pdev->dev, "%s\n", __func__);
  720. ret = afu_dev_init(pdev);
  721. if (ret)
  722. goto exit;
  723. ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
  724. if (ret)
  725. goto dev_destroy;
  726. ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
  727. if (ret) {
  728. dfl_fpga_dev_feature_uinit(pdev);
  729. goto dev_destroy;
  730. }
  731. return 0;
  732. dev_destroy:
  733. afu_dev_destroy(pdev);
  734. exit:
  735. return ret;
  736. }
  737. static int afu_remove(struct platform_device *pdev)
  738. {
  739. dev_dbg(&pdev->dev, "%s\n", __func__);
  740. dfl_fpga_dev_ops_unregister(pdev);
  741. dfl_fpga_dev_feature_uinit(pdev);
  742. afu_dev_destroy(pdev);
  743. return 0;
  744. }
  745. static const struct attribute_group *afu_dev_groups[] = {
  746. &port_hdr_group,
  747. &port_afu_group,
  748. &port_err_group,
  749. NULL
  750. };
  751. static struct platform_driver afu_driver = {
  752. .driver = {
  753. .name = DFL_FPGA_FEATURE_DEV_PORT,
  754. .dev_groups = afu_dev_groups,
  755. },
  756. .probe = afu_probe,
  757. .remove = afu_remove,
  758. };
  759. static int __init afu_init(void)
  760. {
  761. int ret;
  762. dfl_fpga_port_ops_add(&afu_port_ops);
  763. ret = platform_driver_register(&afu_driver);
  764. if (ret)
  765. dfl_fpga_port_ops_del(&afu_port_ops);
  766. return ret;
  767. }
  768. static void __exit afu_exit(void)
  769. {
  770. platform_driver_unregister(&afu_driver);
  771. dfl_fpga_port_ops_del(&afu_port_ops);
  772. }
  773. module_init(afu_init);
  774. module_exit(afu_exit);
  775. MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
  776. MODULE_AUTHOR("Intel Corporation");
  777. MODULE_LICENSE("GPL v2");
  778. MODULE_ALIAS("platform:dfl-port");