xilinx_sdfec.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Xilinx SDFEC
  4. *
  5. * Copyright (C) 2019 Xilinx, Inc.
  6. *
  7. * Description:
  8. * This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
  9. * IP. It exposes a char device which supports file operations
  10. * like open(), close() and ioctl().
  11. */
  12. #include <linux/miscdevice.h>
  13. #include <linux/io.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/poll.h>
  19. #include <linux/slab.h>
  20. #include <linux/clk.h>
  21. #include <linux/compat.h>
  22. #include <linux/highmem.h>
  23. #include <uapi/misc/xilinx_sdfec.h>
  24. #define DEV_NAME_LEN 12
  25. static DEFINE_IDA(dev_nrs);
  26. /* Xilinx SDFEC Register Map */
  27. /* CODE_WRI_PROTECT Register */
  28. #define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
  29. /* ACTIVE Register */
  30. #define XSDFEC_ACTIVE_ADDR (0x8)
  31. #define XSDFEC_IS_ACTIVITY_SET (0x1)
  32. /* AXIS_WIDTH Register */
  33. #define XSDFEC_AXIS_WIDTH_ADDR (0xC)
  34. #define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
  35. #define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
  36. #define XSDFEC_AXIS_DIN_WORDS_LSB (2)
  37. #define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
  38. /* AXIS_ENABLE Register */
  39. #define XSDFEC_AXIS_ENABLE_ADDR (0x10)
  40. #define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
  41. #define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
  42. #define XSDFEC_AXIS_ENABLE_MASK \
  43. (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
  44. /* FEC_CODE Register */
  45. #define XSDFEC_FEC_CODE_ADDR (0x14)
  46. /* ORDER Register Map */
  47. #define XSDFEC_ORDER_ADDR (0x18)
  48. /* Interrupt Status Register */
  49. #define XSDFEC_ISR_ADDR (0x1C)
  50. /* Interrupt Status Register Bit Mask */
  51. #define XSDFEC_ISR_MASK (0x3F)
  52. /* Write Only - Interrupt Enable Register */
  53. #define XSDFEC_IER_ADDR (0x20)
  54. /* Write Only - Interrupt Disable Register */
  55. #define XSDFEC_IDR_ADDR (0x24)
  56. /* Read Only - Interrupt Mask Register */
  57. #define XSDFEC_IMR_ADDR (0x28)
  58. /* ECC Interrupt Status Register */
  59. #define XSDFEC_ECC_ISR_ADDR (0x2C)
  60. /* Single Bit Errors */
  61. #define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
  62. /* PL Initialize Single Bit Errors */
  63. #define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
  64. /* Multi Bit Errors */
  65. #define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
  66. /* PL Initialize Multi Bit Errors */
  67. #define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
  68. /* Multi Bit Error to Event Shift */
  69. #define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
  70. /* PL Initialize Multi Bit Error to Event Shift */
  71. #define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
  72. /* ECC Interrupt Status Bit Mask */
  73. #define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
  74. /* ECC Interrupt Status PL Initialize Bit Mask */
  75. #define XSDFEC_PL_INIT_ECC_ISR_MASK \
  76. (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
  77. /* ECC Interrupt Status All Bit Mask */
  78. #define XSDFEC_ALL_ECC_ISR_MASK \
  79. (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
  80. /* ECC Interrupt Status Single Bit Errors Mask */
  81. #define XSDFEC_ALL_ECC_ISR_SBE_MASK \
  82. (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
  83. /* ECC Interrupt Status Multi Bit Errors Mask */
  84. #define XSDFEC_ALL_ECC_ISR_MBE_MASK \
  85. (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
  86. /* Write Only - ECC Interrupt Enable Register */
  87. #define XSDFEC_ECC_IER_ADDR (0x30)
  88. /* Write Only - ECC Interrupt Disable Register */
  89. #define XSDFEC_ECC_IDR_ADDR (0x34)
  90. /* Read Only - ECC Interrupt Mask Register */
  91. #define XSDFEC_ECC_IMR_ADDR (0x38)
  92. /* BYPASS Register */
  93. #define XSDFEC_BYPASS_ADDR (0x3C)
  94. /* Turbo Code Register */
  95. #define XSDFEC_TURBO_ADDR (0x100)
  96. #define XSDFEC_TURBO_SCALE_MASK (0xFFF)
  97. #define XSDFEC_TURBO_SCALE_BIT_POS (8)
  98. #define XSDFEC_TURBO_SCALE_MAX (15)
  99. /* REG0 Register */
  100. #define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
  101. #define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
  102. #define XSDFEC_REG0_N_MIN (4)
  103. #define XSDFEC_REG0_N_MAX (32768)
  104. #define XSDFEC_REG0_N_MUL_P (256)
  105. #define XSDFEC_REG0_N_LSB (0)
  106. #define XSDFEC_REG0_K_MIN (2)
  107. #define XSDFEC_REG0_K_MAX (32766)
  108. #define XSDFEC_REG0_K_MUL_P (256)
  109. #define XSDFEC_REG0_K_LSB (16)
  110. /* REG1 Register */
  111. #define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
  112. #define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
  113. #define XSDFEC_REG1_PSIZE_MIN (2)
  114. #define XSDFEC_REG1_PSIZE_MAX (512)
  115. #define XSDFEC_REG1_NO_PACKING_MASK (0x400)
  116. #define XSDFEC_REG1_NO_PACKING_LSB (10)
  117. #define XSDFEC_REG1_NM_MASK (0xFF800)
  118. #define XSDFEC_REG1_NM_LSB (11)
  119. #define XSDFEC_REG1_BYPASS_MASK (0x100000)
  120. /* REG2 Register */
  121. #define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
  122. #define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
  123. #define XSDFEC_REG2_NLAYERS_MIN (1)
  124. #define XSDFEC_REG2_NLAYERS_MAX (256)
  125. #define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
  126. #define XSDFEC_REG2_NMQC_LSB (9)
  127. #define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
  128. #define XSDFEC_REG2_NORM_TYPE_LSB (20)
  129. #define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
  130. #define XSDFEC_REG2_SPEICAL_QC_LSB (21)
  131. #define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
  132. #define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
  133. #define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
  134. #define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
  135. /* REG3 Register */
  136. #define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
  137. #define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
  138. #define XSDFEC_REG3_LA_OFF_LSB (8)
  139. #define XSDFEC_REG3_QC_OFF_LSB (16)
  140. #define XSDFEC_LDPC_REG_JUMP (0x10)
  141. #define XSDFEC_REG_WIDTH_JUMP (4)
  142. /* The maximum number of pinned pages */
  143. #define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
  144. /**
  145. * struct xsdfec_clks - For managing SD-FEC clocks
  146. * @core_clk: Main processing clock for core
  147. * @axi_clk: AXI4-Lite memory-mapped clock
  148. * @din_words_clk: DIN Words AXI4-Stream Slave clock
  149. * @din_clk: DIN AXI4-Stream Slave clock
  150. * @dout_clk: DOUT Words AXI4-Stream Slave clock
  151. * @dout_words_clk: DOUT AXI4-Stream Slave clock
  152. * @ctrl_clk: Control AXI4-Stream Slave clock
  153. * @status_clk: Status AXI4-Stream Slave clock
  154. */
  155. struct xsdfec_clks {
  156. struct clk *core_clk;
  157. struct clk *axi_clk;
  158. struct clk *din_words_clk;
  159. struct clk *din_clk;
  160. struct clk *dout_clk;
  161. struct clk *dout_words_clk;
  162. struct clk *ctrl_clk;
  163. struct clk *status_clk;
  164. };
  165. /**
  166. * struct xsdfec_dev - Driver data for SDFEC
  167. * @miscdev: Misc device handle
  168. * @clks: Clocks managed by the SDFEC driver
  169. * @waitq: Driver wait queue
  170. * @config: Configuration of the SDFEC device
  171. * @dev_name: Device name
  172. * @flags: spinlock flags
  173. * @regs: device physical base address
  174. * @dev: pointer to device struct
  175. * @state: State of the SDFEC device
  176. * @error_data_lock: Error counter and states spinlock
  177. * @dev_id: Device ID
  178. * @isr_err_count: Count of ISR errors
  179. * @cecc_count: Count of Correctable ECC errors (SBE)
  180. * @uecc_count: Count of Uncorrectable ECC errors (MBE)
  181. * @irq: IRQ number
  182. * @state_updated: indicates State updated by interrupt handler
  183. * @stats_updated: indicates Stats updated by interrupt handler
  184. * @intr_enabled: indicates IRQ enabled
  185. *
  186. * This structure contains necessary state for SDFEC driver to operate
  187. */
  188. struct xsdfec_dev {
  189. struct miscdevice miscdev;
  190. struct xsdfec_clks clks;
  191. wait_queue_head_t waitq;
  192. struct xsdfec_config config;
  193. char dev_name[DEV_NAME_LEN];
  194. unsigned long flags;
  195. void __iomem *regs;
  196. struct device *dev;
  197. enum xsdfec_state state;
  198. /* Spinlock to protect state_updated and stats_updated */
  199. spinlock_t error_data_lock;
  200. int dev_id;
  201. u32 isr_err_count;
  202. u32 cecc_count;
  203. u32 uecc_count;
  204. int irq;
  205. bool state_updated;
  206. bool stats_updated;
  207. bool intr_enabled;
  208. };
  209. static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
  210. u32 value)
  211. {
  212. dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr);
  213. iowrite32(value, xsdfec->regs + addr);
  214. }
  215. static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
  216. {
  217. u32 rval;
  218. rval = ioread32(xsdfec->regs + addr);
  219. dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr);
  220. return rval;
  221. }
  222. static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec,
  223. u32 reg_offset, u32 bit_num,
  224. char *config_value)
  225. {
  226. u32 reg_val;
  227. u32 bit_mask = 1 << bit_num;
  228. reg_val = xsdfec_regread(xsdfec, reg_offset);
  229. *config_value = (reg_val & bit_mask) > 0;
  230. }
  231. static void update_config_from_hw(struct xsdfec_dev *xsdfec)
  232. {
  233. u32 reg_value;
  234. bool sdfec_started;
  235. /* Update the Order */
  236. reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR);
  237. xsdfec->config.order = reg_value;
  238. update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR,
  239. 0, /* Bit Number, maybe change to mask */
  240. &xsdfec->config.bypass);
  241. update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR,
  242. 0, /* Bit Number */
  243. &xsdfec->config.code_wr_protect);
  244. reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
  245. xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0;
  246. reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
  247. xsdfec->config.irq.enable_ecc_isr =
  248. (reg_value & XSDFEC_ECC_ISR_MASK) > 0;
  249. reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
  250. sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0;
  251. if (sdfec_started)
  252. xsdfec->state = XSDFEC_STARTED;
  253. else
  254. xsdfec->state = XSDFEC_STOPPED;
  255. }
  256. static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
  257. {
  258. struct xsdfec_status status;
  259. int err;
  260. memset(&status, 0, sizeof(status));
  261. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  262. status.state = xsdfec->state;
  263. xsdfec->state_updated = false;
  264. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  265. status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) &
  266. XSDFEC_IS_ACTIVITY_SET);
  267. err = copy_to_user(arg, &status, sizeof(status));
  268. if (err)
  269. err = -EFAULT;
  270. return err;
  271. }
  272. static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
  273. {
  274. int err;
  275. err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
  276. if (err)
  277. err = -EFAULT;
  278. return err;
  279. }
  280. static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
  281. {
  282. u32 mask_read;
  283. if (enable) {
  284. /* Enable */
  285. xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
  286. mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
  287. if (mask_read & XSDFEC_ISR_MASK) {
  288. dev_dbg(xsdfec->dev,
  289. "SDFEC enabling irq with IER failed");
  290. return -EIO;
  291. }
  292. } else {
  293. /* Disable */
  294. xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
  295. mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
  296. if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
  297. dev_dbg(xsdfec->dev,
  298. "SDFEC disabling irq with IDR failed");
  299. return -EIO;
  300. }
  301. }
  302. return 0;
  303. }
  304. static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
  305. {
  306. u32 mask_read;
  307. if (enable) {
  308. /* Enable */
  309. xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
  310. XSDFEC_ALL_ECC_ISR_MASK);
  311. mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
  312. if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) {
  313. dev_dbg(xsdfec->dev,
  314. "SDFEC enabling ECC irq with ECC IER failed");
  315. return -EIO;
  316. }
  317. } else {
  318. /* Disable */
  319. xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
  320. XSDFEC_ALL_ECC_ISR_MASK);
  321. mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
  322. if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
  323. XSDFEC_ECC_ISR_MASK) ||
  324. ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
  325. XSDFEC_PL_INIT_ECC_ISR_MASK))) {
  326. dev_dbg(xsdfec->dev,
  327. "SDFEC disable ECC irq with ECC IDR failed");
  328. return -EIO;
  329. }
  330. }
  331. return 0;
  332. }
  333. static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
  334. {
  335. struct xsdfec_irq irq;
  336. int err;
  337. int isr_err;
  338. int ecc_err;
  339. err = copy_from_user(&irq, arg, sizeof(irq));
  340. if (err)
  341. return -EFAULT;
  342. /* Setup tlast related IRQ */
  343. isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
  344. if (!isr_err)
  345. xsdfec->config.irq.enable_isr = irq.enable_isr;
  346. /* Setup ECC related IRQ */
  347. ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
  348. if (!ecc_err)
  349. xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
  350. if (isr_err < 0 || ecc_err < 0)
  351. err = -EIO;
  352. return err;
  353. }
  354. static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
  355. {
  356. struct xsdfec_turbo turbo;
  357. int err;
  358. u32 turbo_write;
  359. err = copy_from_user(&turbo, arg, sizeof(turbo));
  360. if (err)
  361. return -EFAULT;
  362. if (turbo.alg >= XSDFEC_TURBO_ALG_MAX)
  363. return -EINVAL;
  364. if (turbo.scale > XSDFEC_TURBO_SCALE_MAX)
  365. return -EINVAL;
  366. /* Check to see what device tree says about the FEC codes */
  367. if (xsdfec->config.code == XSDFEC_LDPC_CODE)
  368. return -EIO;
  369. turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK)
  370. << XSDFEC_TURBO_SCALE_BIT_POS) |
  371. turbo.alg;
  372. xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
  373. return err;
  374. }
  375. static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
  376. {
  377. u32 reg_value;
  378. struct xsdfec_turbo turbo_params;
  379. int err;
  380. if (xsdfec->config.code == XSDFEC_LDPC_CODE)
  381. return -EIO;
  382. memset(&turbo_params, 0, sizeof(turbo_params));
  383. reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
  384. turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
  385. XSDFEC_TURBO_SCALE_BIT_POS;
  386. turbo_params.alg = reg_value & 0x1;
  387. err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
  388. if (err)
  389. err = -EFAULT;
  390. return err;
  391. }
  392. static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize,
  393. u32 offset)
  394. {
  395. u32 wdata;
  396. if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 ||
  397. (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) {
  398. dev_dbg(xsdfec->dev, "N value is not in range");
  399. return -EINVAL;
  400. }
  401. n <<= XSDFEC_REG0_N_LSB;
  402. if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX ||
  403. (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) {
  404. dev_dbg(xsdfec->dev, "K value is not in range");
  405. return -EINVAL;
  406. }
  407. k = k << XSDFEC_REG0_K_LSB;
  408. wdata = k | n;
  409. if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  410. XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
  411. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x",
  412. XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
  413. (offset * XSDFEC_LDPC_REG_JUMP));
  414. return -EINVAL;
  415. }
  416. xsdfec_regwrite(xsdfec,
  417. XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
  418. (offset * XSDFEC_LDPC_REG_JUMP),
  419. wdata);
  420. return 0;
  421. }
  422. static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
  423. u32 no_packing, u32 nm, u32 offset)
  424. {
  425. u32 wdata;
  426. if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) {
  427. dev_dbg(xsdfec->dev, "Psize is not in range");
  428. return -EINVAL;
  429. }
  430. if (no_packing != 0 && no_packing != 1)
  431. dev_dbg(xsdfec->dev, "No-packing bit register invalid");
  432. no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
  433. XSDFEC_REG1_NO_PACKING_MASK);
  434. if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
  435. dev_dbg(xsdfec->dev, "NM is beyond 10 bits");
  436. nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
  437. wdata = nm | no_packing | psize;
  438. if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  439. XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
  440. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x",
  441. XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
  442. (offset * XSDFEC_LDPC_REG_JUMP));
  443. return -EINVAL;
  444. }
  445. xsdfec_regwrite(xsdfec,
  446. XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
  447. (offset * XSDFEC_LDPC_REG_JUMP),
  448. wdata);
  449. return 0;
  450. }
  451. static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
  452. u32 norm_type, u32 special_qc, u32 no_final_parity,
  453. u32 max_schedule, u32 offset)
  454. {
  455. u32 wdata;
  456. if (nlayers < XSDFEC_REG2_NLAYERS_MIN ||
  457. nlayers > XSDFEC_REG2_NLAYERS_MAX) {
  458. dev_dbg(xsdfec->dev, "Nlayers is not in range");
  459. return -EINVAL;
  460. }
  461. if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
  462. dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits");
  463. nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
  464. if (norm_type > 1)
  465. dev_dbg(xsdfec->dev, "Norm type is invalid");
  466. norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
  467. XSDFEC_REG2_NORM_TYPE_MASK);
  468. if (special_qc > 1)
  469. dev_dbg(xsdfec->dev, "Special QC in invalid");
  470. special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
  471. XSDFEC_REG2_SPECIAL_QC_MASK);
  472. if (no_final_parity > 1)
  473. dev_dbg(xsdfec->dev, "No final parity check invalid");
  474. no_final_parity =
  475. ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
  476. XSDFEC_REG2_NO_FINAL_PARITY_MASK);
  477. if (max_schedule &
  478. ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB))
  479. dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits");
  480. max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
  481. XSDFEC_REG2_MAX_SCHEDULE_MASK);
  482. wdata = (max_schedule | no_final_parity | special_qc | norm_type |
  483. nmqc | nlayers);
  484. if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  485. XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
  486. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x",
  487. XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
  488. (offset * XSDFEC_LDPC_REG_JUMP));
  489. return -EINVAL;
  490. }
  491. xsdfec_regwrite(xsdfec,
  492. XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
  493. (offset * XSDFEC_LDPC_REG_JUMP),
  494. wdata);
  495. return 0;
  496. }
  497. static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off,
  498. u16 qc_off, u32 offset)
  499. {
  500. u32 wdata;
  501. wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
  502. (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
  503. if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
  504. XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
  505. dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x",
  506. XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
  507. (offset * XSDFEC_LDPC_REG_JUMP));
  508. return -EINVAL;
  509. }
  510. xsdfec_regwrite(xsdfec,
  511. XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
  512. (offset * XSDFEC_LDPC_REG_JUMP),
  513. wdata);
  514. return 0;
  515. }
  516. static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
  517. u32 *src_ptr, u32 len, const u32 base_addr,
  518. const u32 depth)
  519. {
  520. u32 reg = 0;
  521. int res, i, nr_pages;
  522. u32 n;
  523. u32 *addr = NULL;
  524. struct page *pages[MAX_NUM_PAGES];
  525. /*
  526. * Writes that go beyond the length of
  527. * Shared Scale(SC) table should fail
  528. */
  529. if (offset > depth / XSDFEC_REG_WIDTH_JUMP ||
  530. len > depth / XSDFEC_REG_WIDTH_JUMP ||
  531. offset + len > depth / XSDFEC_REG_WIDTH_JUMP) {
  532. dev_dbg(xsdfec->dev, "Write exceeds SC table length");
  533. return -EINVAL;
  534. }
  535. n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE;
  536. if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
  537. n += 1;
  538. if (WARN_ON_ONCE(n > INT_MAX))
  539. return -EINVAL;
  540. nr_pages = n;
  541. res = pin_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages);
  542. if (res < nr_pages) {
  543. if (res > 0)
  544. unpin_user_pages(pages, res);
  545. return -EINVAL;
  546. }
  547. for (i = 0; i < nr_pages; i++) {
  548. addr = kmap(pages[i]);
  549. do {
  550. xsdfec_regwrite(xsdfec,
  551. base_addr + ((offset + reg) *
  552. XSDFEC_REG_WIDTH_JUMP),
  553. addr[reg]);
  554. reg++;
  555. } while ((reg < len) &&
  556. ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
  557. unpin_user_page(pages[i]);
  558. }
  559. return 0;
  560. }
  561. static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
  562. {
  563. struct xsdfec_ldpc_params *ldpc;
  564. int ret, n;
  565. ldpc = memdup_user(arg, sizeof(*ldpc));
  566. if (IS_ERR(ldpc))
  567. return PTR_ERR(ldpc);
  568. if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
  569. ret = -EIO;
  570. goto err_out;
  571. }
  572. /* Verify Device has not started */
  573. if (xsdfec->state == XSDFEC_STARTED) {
  574. ret = -EIO;
  575. goto err_out;
  576. }
  577. if (xsdfec->config.code_wr_protect) {
  578. ret = -EIO;
  579. goto err_out;
  580. }
  581. /* Write Reg 0 */
  582. ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize,
  583. ldpc->code_id);
  584. if (ret)
  585. goto err_out;
  586. /* Write Reg 1 */
  587. ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm,
  588. ldpc->code_id);
  589. if (ret)
  590. goto err_out;
  591. /* Write Reg 2 */
  592. ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
  593. ldpc->norm_type, ldpc->special_qc,
  594. ldpc->no_final_parity, ldpc->max_schedule,
  595. ldpc->code_id);
  596. if (ret)
  597. goto err_out;
  598. /* Write Reg 3 */
  599. ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off,
  600. ldpc->qc_off, ldpc->code_id);
  601. if (ret)
  602. goto err_out;
  603. /* Write Shared Codes */
  604. n = ldpc->nlayers / 4;
  605. if (ldpc->nlayers % 4)
  606. n++;
  607. ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n,
  608. XSDFEC_LDPC_SC_TABLE_ADDR_BASE,
  609. XSDFEC_SC_TABLE_DEPTH);
  610. if (ret < 0)
  611. goto err_out;
  612. ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table,
  613. ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE,
  614. XSDFEC_LA_TABLE_DEPTH);
  615. if (ret < 0)
  616. goto err_out;
  617. ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
  618. ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE,
  619. XSDFEC_QC_TABLE_DEPTH);
  620. err_out:
  621. kfree(ldpc);
  622. return ret;
  623. }
  624. static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
  625. {
  626. bool order_invalid;
  627. enum xsdfec_order order;
  628. int err;
  629. err = get_user(order, (enum xsdfec_order __user *)arg);
  630. if (err)
  631. return -EFAULT;
  632. order_invalid = (order != XSDFEC_MAINTAIN_ORDER) &&
  633. (order != XSDFEC_OUT_OF_ORDER);
  634. if (order_invalid)
  635. return -EINVAL;
  636. /* Verify Device has not started */
  637. if (xsdfec->state == XSDFEC_STARTED)
  638. return -EIO;
  639. xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
  640. xsdfec->config.order = order;
  641. return 0;
  642. }
  643. static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg)
  644. {
  645. bool bypass;
  646. int err;
  647. err = get_user(bypass, arg);
  648. if (err)
  649. return -EFAULT;
  650. /* Verify Device has not started */
  651. if (xsdfec->state == XSDFEC_STARTED)
  652. return -EIO;
  653. if (bypass)
  654. xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1);
  655. else
  656. xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0);
  657. xsdfec->config.bypass = bypass;
  658. return 0;
  659. }
  660. static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg)
  661. {
  662. u32 reg_value;
  663. bool is_active;
  664. int err;
  665. reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
  666. /* using a double ! operator instead of casting */
  667. is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
  668. err = put_user(is_active, arg);
  669. if (err)
  670. return -EFAULT;
  671. return err;
  672. }
  673. static u32
  674. xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
  675. {
  676. u32 axis_width_field = 0;
  677. switch (axis_width_cfg) {
  678. case XSDFEC_1x128b:
  679. axis_width_field = 0;
  680. break;
  681. case XSDFEC_2x128b:
  682. axis_width_field = 1;
  683. break;
  684. case XSDFEC_4x128b:
  685. axis_width_field = 2;
  686. break;
  687. }
  688. return axis_width_field;
  689. }
  690. static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
  691. axis_word_inc_cfg)
  692. {
  693. u32 axis_words_field = 0;
  694. if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
  695. axis_word_inc_cfg == XSDFEC_IN_BLOCK)
  696. axis_words_field = 0;
  697. else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
  698. axis_words_field = 1;
  699. return axis_words_field;
  700. }
  701. static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
  702. {
  703. u32 reg_value;
  704. u32 dout_words_field;
  705. u32 dout_width_field;
  706. u32 din_words_field;
  707. u32 din_width_field;
  708. struct xsdfec_config *config = &xsdfec->config;
  709. /* translate config info to register values */
  710. dout_words_field =
  711. xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
  712. dout_width_field =
  713. xsdfec_translate_axis_width_cfg_val(config->dout_width);
  714. din_words_field =
  715. xsdfec_translate_axis_words_cfg_val(config->din_word_include);
  716. din_width_field =
  717. xsdfec_translate_axis_width_cfg_val(config->din_width);
  718. reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
  719. reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
  720. reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
  721. reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
  722. xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
  723. return 0;
  724. }
  725. static int xsdfec_dev_open(struct inode *iptr, struct file *fptr)
  726. {
  727. return 0;
  728. }
  729. static int xsdfec_dev_release(struct inode *iptr, struct file *fptr)
  730. {
  731. return 0;
  732. }
  733. static int xsdfec_start(struct xsdfec_dev *xsdfec)
  734. {
  735. u32 regread;
  736. regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
  737. regread &= 0x1;
  738. if (regread != xsdfec->config.code) {
  739. dev_dbg(xsdfec->dev,
  740. "%s SDFEC HW code does not match driver code, reg %d, code %d",
  741. __func__, regread, xsdfec->config.code);
  742. return -EINVAL;
  743. }
  744. /* Set AXIS enable */
  745. xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR,
  746. XSDFEC_AXIS_ENABLE_MASK);
  747. /* Done */
  748. xsdfec->state = XSDFEC_STARTED;
  749. return 0;
  750. }
  751. static int xsdfec_stop(struct xsdfec_dev *xsdfec)
  752. {
  753. u32 regread;
  754. if (xsdfec->state != XSDFEC_STARTED)
  755. dev_dbg(xsdfec->dev, "Device not started correctly");
  756. /* Disable AXIS_ENABLE Input interfaces only */
  757. regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
  758. regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK);
  759. xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
  760. /* Stop */
  761. xsdfec->state = XSDFEC_STOPPED;
  762. return 0;
  763. }
  764. static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
  765. {
  766. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  767. xsdfec->isr_err_count = 0;
  768. xsdfec->uecc_count = 0;
  769. xsdfec->cecc_count = 0;
  770. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  771. return 0;
  772. }
  773. static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
  774. {
  775. int err;
  776. struct xsdfec_stats user_stats;
  777. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  778. user_stats.isr_err_count = xsdfec->isr_err_count;
  779. user_stats.cecc_count = xsdfec->cecc_count;
  780. user_stats.uecc_count = xsdfec->uecc_count;
  781. xsdfec->stats_updated = false;
  782. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  783. err = copy_to_user(arg, &user_stats, sizeof(user_stats));
  784. if (err)
  785. err = -EFAULT;
  786. return err;
  787. }
  788. static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
  789. {
  790. /* Ensure registers are aligned with core configuration */
  791. xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
  792. xsdfec_cfg_axi_streams(xsdfec);
  793. update_config_from_hw(xsdfec);
  794. return 0;
  795. }
  796. static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
  797. unsigned long data)
  798. {
  799. struct xsdfec_dev *xsdfec;
  800. void __user *arg = NULL;
  801. int rval = -EINVAL;
  802. xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev);
  803. /* In failed state allow only reset and get status IOCTLs */
  804. if (xsdfec->state == XSDFEC_NEEDS_RESET &&
  805. (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS &&
  806. cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) {
  807. return -EPERM;
  808. }
  809. if (_IOC_TYPE(cmd) != XSDFEC_MAGIC)
  810. return -ENOTTY;
  811. /* check if ioctl argument is present and valid */
  812. if (_IOC_DIR(cmd) != _IOC_NONE) {
  813. arg = (void __user *)data;
  814. if (!arg)
  815. return rval;
  816. }
  817. switch (cmd) {
  818. case XSDFEC_START_DEV:
  819. rval = xsdfec_start(xsdfec);
  820. break;
  821. case XSDFEC_STOP_DEV:
  822. rval = xsdfec_stop(xsdfec);
  823. break;
  824. case XSDFEC_CLEAR_STATS:
  825. rval = xsdfec_clear_stats(xsdfec);
  826. break;
  827. case XSDFEC_GET_STATS:
  828. rval = xsdfec_get_stats(xsdfec, arg);
  829. break;
  830. case XSDFEC_GET_STATUS:
  831. rval = xsdfec_get_status(xsdfec, arg);
  832. break;
  833. case XSDFEC_GET_CONFIG:
  834. rval = xsdfec_get_config(xsdfec, arg);
  835. break;
  836. case XSDFEC_SET_DEFAULT_CONFIG:
  837. rval = xsdfec_set_default_config(xsdfec);
  838. break;
  839. case XSDFEC_SET_IRQ:
  840. rval = xsdfec_set_irq(xsdfec, arg);
  841. break;
  842. case XSDFEC_SET_TURBO:
  843. rval = xsdfec_set_turbo(xsdfec, arg);
  844. break;
  845. case XSDFEC_GET_TURBO:
  846. rval = xsdfec_get_turbo(xsdfec, arg);
  847. break;
  848. case XSDFEC_ADD_LDPC_CODE_PARAMS:
  849. rval = xsdfec_add_ldpc(xsdfec, arg);
  850. break;
  851. case XSDFEC_SET_ORDER:
  852. rval = xsdfec_set_order(xsdfec, arg);
  853. break;
  854. case XSDFEC_SET_BYPASS:
  855. rval = xsdfec_set_bypass(xsdfec, arg);
  856. break;
  857. case XSDFEC_IS_ACTIVE:
  858. rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
  859. break;
  860. default:
  861. /* Should not get here */
  862. break;
  863. }
  864. return rval;
  865. }
  866. #ifdef CONFIG_COMPAT
  867. static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd,
  868. unsigned long data)
  869. {
  870. return xsdfec_dev_ioctl(file, cmd, (unsigned long)compat_ptr(data));
  871. }
  872. #endif
  873. static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
  874. {
  875. __poll_t mask = 0;
  876. struct xsdfec_dev *xsdfec;
  877. xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
  878. if (!xsdfec)
  879. return EPOLLNVAL | EPOLLHUP;
  880. poll_wait(file, &xsdfec->waitq, wait);
  881. /* XSDFEC ISR detected an error */
  882. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  883. if (xsdfec->state_updated)
  884. mask |= EPOLLIN | EPOLLPRI;
  885. if (xsdfec->stats_updated)
  886. mask |= EPOLLIN | EPOLLRDNORM;
  887. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  888. return mask;
  889. }
  890. static const struct file_operations xsdfec_fops = {
  891. .owner = THIS_MODULE,
  892. .open = xsdfec_dev_open,
  893. .release = xsdfec_dev_release,
  894. .unlocked_ioctl = xsdfec_dev_ioctl,
  895. .poll = xsdfec_poll,
  896. #ifdef CONFIG_COMPAT
  897. .compat_ioctl = xsdfec_dev_compat_ioctl,
  898. #endif
  899. };
  900. static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
  901. {
  902. struct device *dev = xsdfec->dev;
  903. struct device_node *node = dev->of_node;
  904. int rval;
  905. const char *fec_code;
  906. u32 din_width;
  907. u32 din_word_include;
  908. u32 dout_width;
  909. u32 dout_word_include;
  910. rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
  911. if (rval < 0)
  912. return rval;
  913. if (!strcasecmp(fec_code, "ldpc"))
  914. xsdfec->config.code = XSDFEC_LDPC_CODE;
  915. else if (!strcasecmp(fec_code, "turbo"))
  916. xsdfec->config.code = XSDFEC_TURBO_CODE;
  917. else
  918. return -EINVAL;
  919. rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
  920. &din_word_include);
  921. if (rval < 0)
  922. return rval;
  923. if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
  924. xsdfec->config.din_word_include = din_word_include;
  925. else
  926. return -EINVAL;
  927. rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
  928. if (rval < 0)
  929. return rval;
  930. switch (din_width) {
  931. /* Fall through and set for valid values */
  932. case XSDFEC_1x128b:
  933. case XSDFEC_2x128b:
  934. case XSDFEC_4x128b:
  935. xsdfec->config.din_width = din_width;
  936. break;
  937. default:
  938. return -EINVAL;
  939. }
  940. rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
  941. &dout_word_include);
  942. if (rval < 0)
  943. return rval;
  944. if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
  945. xsdfec->config.dout_word_include = dout_word_include;
  946. else
  947. return -EINVAL;
  948. rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
  949. if (rval < 0)
  950. return rval;
  951. switch (dout_width) {
  952. /* Fall through and set for valid values */
  953. case XSDFEC_1x128b:
  954. case XSDFEC_2x128b:
  955. case XSDFEC_4x128b:
  956. xsdfec->config.dout_width = dout_width;
  957. break;
  958. default:
  959. return -EINVAL;
  960. }
  961. /* Write LDPC to CODE Register */
  962. xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
  963. xsdfec_cfg_axi_streams(xsdfec);
  964. return 0;
  965. }
  966. static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id)
  967. {
  968. struct xsdfec_dev *xsdfec = dev_id;
  969. irqreturn_t ret = IRQ_HANDLED;
  970. u32 ecc_err;
  971. u32 isr_err;
  972. u32 uecc_count;
  973. u32 cecc_count;
  974. u32 isr_err_count;
  975. u32 aecc_count;
  976. u32 tmp;
  977. WARN_ON(xsdfec->irq != irq);
  978. /* Mask Interrupts */
  979. xsdfec_isr_enable(xsdfec, false);
  980. xsdfec_ecc_isr_enable(xsdfec, false);
  981. /* Read ISR */
  982. ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
  983. isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
  984. /* Clear the interrupts */
  985. xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err);
  986. xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err);
  987. tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK;
  988. /* Count uncorrectable 2-bit errors */
  989. uecc_count = hweight32(tmp);
  990. /* Count all ECC errors */
  991. aecc_count = hweight32(ecc_err);
  992. /* Number of correctable 1-bit ECC error */
  993. cecc_count = aecc_count - 2 * uecc_count;
  994. /* Count ISR errors */
  995. isr_err_count = hweight32(isr_err);
  996. dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp,
  997. uecc_count, aecc_count, cecc_count, isr_err_count);
  998. dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count,
  999. xsdfec->cecc_count, xsdfec->isr_err_count);
  1000. spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
  1001. /* Add new errors to a 2-bits counter */
  1002. if (uecc_count)
  1003. xsdfec->uecc_count += uecc_count;
  1004. /* Add new errors to a 1-bits counter */
  1005. if (cecc_count)
  1006. xsdfec->cecc_count += cecc_count;
  1007. /* Add new errors to a ISR counter */
  1008. if (isr_err_count)
  1009. xsdfec->isr_err_count += isr_err_count;
  1010. /* Update state/stats flag */
  1011. if (uecc_count) {
  1012. if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK)
  1013. xsdfec->state = XSDFEC_NEEDS_RESET;
  1014. else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
  1015. xsdfec->state = XSDFEC_PL_RECONFIGURE;
  1016. xsdfec->stats_updated = true;
  1017. xsdfec->state_updated = true;
  1018. }
  1019. if (cecc_count)
  1020. xsdfec->stats_updated = true;
  1021. if (isr_err_count) {
  1022. xsdfec->state = XSDFEC_NEEDS_RESET;
  1023. xsdfec->stats_updated = true;
  1024. xsdfec->state_updated = true;
  1025. }
  1026. spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
  1027. dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated,
  1028. xsdfec->stats_updated);
  1029. /* Enable another polling */
  1030. if (xsdfec->state_updated || xsdfec->stats_updated)
  1031. wake_up_interruptible(&xsdfec->waitq);
  1032. else
  1033. ret = IRQ_NONE;
  1034. /* Unmask Interrupts */
  1035. xsdfec_isr_enable(xsdfec, true);
  1036. xsdfec_ecc_isr_enable(xsdfec, true);
  1037. return ret;
  1038. }
  1039. static int xsdfec_clk_init(struct platform_device *pdev,
  1040. struct xsdfec_clks *clks)
  1041. {
  1042. int err;
  1043. clks->core_clk = devm_clk_get(&pdev->dev, "core_clk");
  1044. if (IS_ERR(clks->core_clk)) {
  1045. dev_err(&pdev->dev, "failed to get core_clk");
  1046. return PTR_ERR(clks->core_clk);
  1047. }
  1048. clks->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
  1049. if (IS_ERR(clks->axi_clk)) {
  1050. dev_err(&pdev->dev, "failed to get axi_clk");
  1051. return PTR_ERR(clks->axi_clk);
  1052. }
  1053. clks->din_words_clk = devm_clk_get(&pdev->dev, "s_axis_din_words_aclk");
  1054. if (IS_ERR(clks->din_words_clk)) {
  1055. if (PTR_ERR(clks->din_words_clk) != -ENOENT) {
  1056. err = PTR_ERR(clks->din_words_clk);
  1057. return err;
  1058. }
  1059. clks->din_words_clk = NULL;
  1060. }
  1061. clks->din_clk = devm_clk_get(&pdev->dev, "s_axis_din_aclk");
  1062. if (IS_ERR(clks->din_clk)) {
  1063. if (PTR_ERR(clks->din_clk) != -ENOENT) {
  1064. err = PTR_ERR(clks->din_clk);
  1065. return err;
  1066. }
  1067. clks->din_clk = NULL;
  1068. }
  1069. clks->dout_clk = devm_clk_get(&pdev->dev, "m_axis_dout_aclk");
  1070. if (IS_ERR(clks->dout_clk)) {
  1071. if (PTR_ERR(clks->dout_clk) != -ENOENT) {
  1072. err = PTR_ERR(clks->dout_clk);
  1073. return err;
  1074. }
  1075. clks->dout_clk = NULL;
  1076. }
  1077. clks->dout_words_clk =
  1078. devm_clk_get(&pdev->dev, "s_axis_dout_words_aclk");
  1079. if (IS_ERR(clks->dout_words_clk)) {
  1080. if (PTR_ERR(clks->dout_words_clk) != -ENOENT) {
  1081. err = PTR_ERR(clks->dout_words_clk);
  1082. return err;
  1083. }
  1084. clks->dout_words_clk = NULL;
  1085. }
  1086. clks->ctrl_clk = devm_clk_get(&pdev->dev, "s_axis_ctrl_aclk");
  1087. if (IS_ERR(clks->ctrl_clk)) {
  1088. if (PTR_ERR(clks->ctrl_clk) != -ENOENT) {
  1089. err = PTR_ERR(clks->ctrl_clk);
  1090. return err;
  1091. }
  1092. clks->ctrl_clk = NULL;
  1093. }
  1094. clks->status_clk = devm_clk_get(&pdev->dev, "m_axis_status_aclk");
  1095. if (IS_ERR(clks->status_clk)) {
  1096. if (PTR_ERR(clks->status_clk) != -ENOENT) {
  1097. err = PTR_ERR(clks->status_clk);
  1098. return err;
  1099. }
  1100. clks->status_clk = NULL;
  1101. }
  1102. err = clk_prepare_enable(clks->core_clk);
  1103. if (err) {
  1104. dev_err(&pdev->dev, "failed to enable core_clk (%d)", err);
  1105. return err;
  1106. }
  1107. err = clk_prepare_enable(clks->axi_clk);
  1108. if (err) {
  1109. dev_err(&pdev->dev, "failed to enable axi_clk (%d)", err);
  1110. goto err_disable_core_clk;
  1111. }
  1112. err = clk_prepare_enable(clks->din_clk);
  1113. if (err) {
  1114. dev_err(&pdev->dev, "failed to enable din_clk (%d)", err);
  1115. goto err_disable_axi_clk;
  1116. }
  1117. err = clk_prepare_enable(clks->din_words_clk);
  1118. if (err) {
  1119. dev_err(&pdev->dev, "failed to enable din_words_clk (%d)", err);
  1120. goto err_disable_din_clk;
  1121. }
  1122. err = clk_prepare_enable(clks->dout_clk);
  1123. if (err) {
  1124. dev_err(&pdev->dev, "failed to enable dout_clk (%d)", err);
  1125. goto err_disable_din_words_clk;
  1126. }
  1127. err = clk_prepare_enable(clks->dout_words_clk);
  1128. if (err) {
  1129. dev_err(&pdev->dev, "failed to enable dout_words_clk (%d)",
  1130. err);
  1131. goto err_disable_dout_clk;
  1132. }
  1133. err = clk_prepare_enable(clks->ctrl_clk);
  1134. if (err) {
  1135. dev_err(&pdev->dev, "failed to enable ctrl_clk (%d)", err);
  1136. goto err_disable_dout_words_clk;
  1137. }
  1138. err = clk_prepare_enable(clks->status_clk);
  1139. if (err) {
  1140. dev_err(&pdev->dev, "failed to enable status_clk (%d)\n", err);
  1141. goto err_disable_ctrl_clk;
  1142. }
  1143. return err;
  1144. err_disable_ctrl_clk:
  1145. clk_disable_unprepare(clks->ctrl_clk);
  1146. err_disable_dout_words_clk:
  1147. clk_disable_unprepare(clks->dout_words_clk);
  1148. err_disable_dout_clk:
  1149. clk_disable_unprepare(clks->dout_clk);
  1150. err_disable_din_words_clk:
  1151. clk_disable_unprepare(clks->din_words_clk);
  1152. err_disable_din_clk:
  1153. clk_disable_unprepare(clks->din_clk);
  1154. err_disable_axi_clk:
  1155. clk_disable_unprepare(clks->axi_clk);
  1156. err_disable_core_clk:
  1157. clk_disable_unprepare(clks->core_clk);
  1158. return err;
  1159. }
  1160. static void xsdfec_disable_all_clks(struct xsdfec_clks *clks)
  1161. {
  1162. clk_disable_unprepare(clks->status_clk);
  1163. clk_disable_unprepare(clks->ctrl_clk);
  1164. clk_disable_unprepare(clks->dout_words_clk);
  1165. clk_disable_unprepare(clks->dout_clk);
  1166. clk_disable_unprepare(clks->din_words_clk);
  1167. clk_disable_unprepare(clks->din_clk);
  1168. clk_disable_unprepare(clks->core_clk);
  1169. clk_disable_unprepare(clks->axi_clk);
  1170. }
  1171. static int xsdfec_probe(struct platform_device *pdev)
  1172. {
  1173. struct xsdfec_dev *xsdfec;
  1174. struct device *dev;
  1175. struct resource *res;
  1176. int err;
  1177. bool irq_enabled = true;
  1178. xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
  1179. if (!xsdfec)
  1180. return -ENOMEM;
  1181. xsdfec->dev = &pdev->dev;
  1182. spin_lock_init(&xsdfec->error_data_lock);
  1183. err = xsdfec_clk_init(pdev, &xsdfec->clks);
  1184. if (err)
  1185. return err;
  1186. dev = xsdfec->dev;
  1187. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1188. xsdfec->regs = devm_ioremap_resource(dev, res);
  1189. if (IS_ERR(xsdfec->regs)) {
  1190. err = PTR_ERR(xsdfec->regs);
  1191. goto err_xsdfec_dev;
  1192. }
  1193. xsdfec->irq = platform_get_irq(pdev, 0);
  1194. if (xsdfec->irq < 0) {
  1195. dev_dbg(dev, "platform_get_irq failed");
  1196. irq_enabled = false;
  1197. }
  1198. err = xsdfec_parse_of(xsdfec);
  1199. if (err < 0)
  1200. goto err_xsdfec_dev;
  1201. update_config_from_hw(xsdfec);
  1202. /* Save driver private data */
  1203. platform_set_drvdata(pdev, xsdfec);
  1204. if (irq_enabled) {
  1205. init_waitqueue_head(&xsdfec->waitq);
  1206. /* Register IRQ thread */
  1207. err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
  1208. xsdfec_irq_thread, IRQF_ONESHOT,
  1209. "xilinx-sdfec16", xsdfec);
  1210. if (err < 0) {
  1211. dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
  1212. goto err_xsdfec_dev;
  1213. }
  1214. }
  1215. err = ida_alloc(&dev_nrs, GFP_KERNEL);
  1216. if (err < 0)
  1217. goto err_xsdfec_dev;
  1218. xsdfec->dev_id = err;
  1219. snprintf(xsdfec->dev_name, DEV_NAME_LEN, "xsdfec%d", xsdfec->dev_id);
  1220. xsdfec->miscdev.minor = MISC_DYNAMIC_MINOR;
  1221. xsdfec->miscdev.name = xsdfec->dev_name;
  1222. xsdfec->miscdev.fops = &xsdfec_fops;
  1223. xsdfec->miscdev.parent = dev;
  1224. err = misc_register(&xsdfec->miscdev);
  1225. if (err) {
  1226. dev_err(dev, "error:%d. Unable to register device", err);
  1227. goto err_xsdfec_ida;
  1228. }
  1229. return 0;
  1230. err_xsdfec_ida:
  1231. ida_free(&dev_nrs, xsdfec->dev_id);
  1232. err_xsdfec_dev:
  1233. xsdfec_disable_all_clks(&xsdfec->clks);
  1234. return err;
  1235. }
  1236. static int xsdfec_remove(struct platform_device *pdev)
  1237. {
  1238. struct xsdfec_dev *xsdfec;
  1239. xsdfec = platform_get_drvdata(pdev);
  1240. misc_deregister(&xsdfec->miscdev);
  1241. ida_free(&dev_nrs, xsdfec->dev_id);
  1242. xsdfec_disable_all_clks(&xsdfec->clks);
  1243. return 0;
  1244. }
  1245. static const struct of_device_id xsdfec_of_match[] = {
  1246. {
  1247. .compatible = "xlnx,sd-fec-1.1",
  1248. },
  1249. { /* end of table */ }
  1250. };
  1251. MODULE_DEVICE_TABLE(of, xsdfec_of_match);
  1252. static struct platform_driver xsdfec_driver = {
  1253. .driver = {
  1254. .name = "xilinx-sdfec",
  1255. .of_match_table = xsdfec_of_match,
  1256. },
  1257. .probe = xsdfec_probe,
  1258. .remove = xsdfec_remove,
  1259. };
  1260. module_platform_driver(xsdfec_driver);
  1261. MODULE_AUTHOR("Xilinx, Inc");
  1262. MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
  1263. MODULE_LICENSE("GPL");