xrp_hw_simple.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158
  1. /*
  2. * xrp_hw_simple: Simple xtensa/arm low-level XRP driver
  3. *
  4. * Copyright (c) 2017 Cadence Design Systems, Inc.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included
  15. * in all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  21. * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  22. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  23. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. * Alternatively you can use and distribute this file under the terms of
  26. * the GNU General Public License version 2 or later.
  27. */
  28. #include <linux/delay.h>
  29. // #include <linux/dma-noncoherent.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/module.h>
  32. #include <linux/of.h>
  33. #include <linux/of_address.h>
  34. #include <linux/of_device.h>
  35. #include <linux/platform_device.h>
  36. #include <linux/proc_fs.h>
  37. #include <linux/seq_file.h>
  38. #include <linux/slab.h>
  39. #include <linux/io.h>
  40. #include <linux/clk.h>
  41. #include <asm/cacheflush.h>
  42. #include "xrp_kernel_defs.h"
  43. #include "xrp_hw.h"
  44. #include "xrp_hw_simple_dsp_interface.h"
  45. #define DRIVER_NAME "xrp-hw-simple"
  46. #define XRP_REG_RESET (0x28)
  47. #define RESET_BIT_MASK (0x1<<8)
  48. #define CDNS_DSP_RRG_OFFSET (0x4000)
  49. #define XRP_REG_RUNSTALL (CDNS_DSP_RRG_OFFSET+0x20)
  50. #define START_VECTOR_SEL (CDNS_DSP_RRG_OFFSET+0x1C)
  51. #define ALT_RESET_VEC (CDNS_DSP_RRG_OFFSET+0x18)
  52. // #define DSP_INT_MASK (0x1<<1)
  53. // #define INT_DSP_MASK (0x1<<1)
  54. #define VI_SYS_OFFSET_MASK (0x00000FFF)
  55. #ifdef WITH_VISYS_KO
  56. extern int k_bm_visys_write_reg(uint32_t offset, uint32_t value);
  57. extern int k_bm_visys_read_reg(uint32_t offset, uint32_t *value);
  58. #endif
  59. enum xrp_irq_mode {
  60. XRP_IRQ_NONE,
  61. XRP_IRQ_LEVEL,
  62. XRP_IRQ_EDGE,
  63. XRP_IRQ_EDGE_SW,
  64. XRP_IRQ_MAX,
  65. };
  66. struct xrp_hw_simple {
  67. struct xvp *xrp;
  68. phys_addr_t dev_regs_phys;
  69. void __iomem *dev_regs;
  70. /* IRQ register base phy address on device side */
  71. phys_addr_t irq_regs_dev_phys;
  72. /* Device IRQ register phy base addr on host side */
  73. phys_addr_t device_irq_regs_phys;
  74. /* Device IRQ register virtual base addr on host side */
  75. void __iomem *device_irq_regs;
  76. /* host IRQ register phy base addr on host side */
  77. phys_addr_t host_irq_regs_phys;
  78. /* host IRQ register virtual base addr on host side */
  79. void __iomem *host_irq_regs;
  80. /* how IRQ is used to notify the device of incoming data */
  81. enum xrp_irq_mode device_irq_mode;
  82. /*
  83. * offset of device IRQ register in MMIO region (device side)
  84. * bit number
  85. * device IRQ#
  86. */
  87. u32 device_irq[3];
  88. /* offset of devuce IRQ register in MMIO region (host side) */
  89. u32 device_irq_host_offset;
  90. /* how IRQ is used to notify the host of incoming data */
  91. enum xrp_irq_mode host_irq_mode;
  92. /*
  93. * offset of IRQ register (host side)
  94. * bit number
  95. */
  96. u32 host_irq[2];
  97. /*
  98. offset of IRQ register (device side to trigger)
  99. */
  100. u32 host_irq_offset;
  101. u32 device_id;
  102. struct xrp_hw_panic __iomem *panic;
  103. phys_addr_t panic_phys;
  104. u32 last_read;
  105. struct proc_dir_entry *log_proc_file;
  106. struct clk *cclk;
  107. // struct clk *aclk;
  108. struct clk *pclk;
  109. };
  110. // static inline void irq_reg_write32(struct xrp_hw_simple *hw, unsigned addr, u32 v)
  111. // {
  112. // if (hw->irq_regs)
  113. // // pr_debug("%s,irq Addr %llx\n",__func__,(unsigned long long)(hw->regs + addr));
  114. // __raw_writel(v, hw->irq_regs + addr);
  115. // }
  116. static inline void host_irq_reg_write32(struct xrp_hw_simple *hw, unsigned int addr, u32 v)
  117. {
  118. if( IOMEM_ERR_PTR(-EBUSY) ==hw->host_irq_regs )
  119. {
  120. #ifdef WITH_VISYS_KO
  121. uint32_t offset = ((uint32_t)(hw->host_irq_regs_phys&VI_SYS_OFFSET_MASK)+addr);
  122. // pr_debug("%s,vi sys write (%x,%d)\n",__func__,offset,v);
  123. k_bm_visys_write_reg(offset, v);
  124. #else
  125. pr_debug("%s,vi sys Error,need enable VISYS KO\n",__func__);
  126. #endif
  127. return;
  128. }
  129. if (hw->host_irq_regs)
  130. pr_debug("%s,irq Addr %llx\n",__func__,(unsigned long long)(hw->host_irq_regs + addr));
  131. __raw_writel(v, hw->host_irq_regs + addr);
  132. }
  133. static inline void device_irq_reg_write32(struct xrp_hw_simple *hw, unsigned int addr, u32 v)
  134. {
  135. if( IOMEM_ERR_PTR(-EBUSY) ==hw->device_irq_regs )
  136. {
  137. #ifdef WITH_VISYS_KO
  138. uint32_t offset = ((uint32_t)(hw->device_irq_regs_phys&VI_SYS_OFFSET_MASK)+addr);
  139. pr_debug("%s,vi sys write (%lx,%x,0x%x,%d)\n",__func__,hw->device_irq_regs_phys,addr,offset,v);
  140. k_bm_visys_write_reg(offset, v);
  141. #else
  142. pr_debug("%s,vi sys Error,need enable VISYS KO\n",__func__);
  143. #endif
  144. return;
  145. }
  146. if (hw->device_irq_regs)
  147. pr_debug("%s,irq Addr %llx\n",__func__,(unsigned long long)(hw->device_irq_regs + addr));
  148. __raw_writel(v, hw->device_irq_regs + addr);
  149. }
  150. // static inline u32 irq_reg_read32(struct xrp_hw_simple *hw, unsigned addr)
  151. // {
  152. // if (hw->irq_regs)
  153. // return __raw_readl(hw->irq_regs + addr);
  154. // else
  155. // return 0;
  156. // }
  157. static inline u32 host_irq_reg_read32(struct xrp_hw_simple *hw, unsigned addr)
  158. {
  159. if( IOMEM_ERR_PTR(-EBUSY) ==hw->host_irq_regs )
  160. {
  161. uint32_t offset = ((uint32_t)(hw->host_irq_regs_phys&VI_SYS_OFFSET_MASK)+(uint32_t)addr);
  162. uint32_t v=0;
  163. #ifdef WITH_VISYS_KO
  164. k_bm_visys_read_reg(offset,&v);
  165. // pr_debug("%s,vi sys read (%x,%d)\n",__func__,offset,v);
  166. #else
  167. pr_err("%s,vi sys Error,need enable VISYS KO\n",__func__);
  168. #endif
  169. return v;
  170. }
  171. if (hw->host_irq_regs)
  172. return __raw_readl(hw->host_irq_regs + addr);
  173. else
  174. return 0;
  175. }
  176. static inline u32 device_irq_reg_read32(struct xrp_hw_simple *hw, unsigned addr)
  177. {
  178. if( IOMEM_ERR_PTR(-EBUSY) ==hw->device_irq_regs )
  179. {
  180. uint32_t offset = ((uint32_t)(hw->device_irq_regs_phys&VI_SYS_OFFSET_MASK)+(uint32_t)addr);
  181. uint32_t v = 0;
  182. #ifdef WITH_VISYS_KO
  183. k_bm_visys_read_reg(offset,&v);
  184. // pr_debug("%s,vi sys write (%x,%d)\n",__func__,offset,v);
  185. #else
  186. pr_error("%s,vi sys Error,need enable VISYS KO\n",__func__);
  187. #endif
  188. return v;
  189. }
  190. if (hw->device_irq_regs)
  191. return __raw_readl(hw->device_irq_regs + addr);
  192. else
  193. return 0;
  194. }
  195. static inline void dev_reg_write32(struct xrp_hw_simple *hw, unsigned addr, u32 v)
  196. {
  197. if (hw->dev_regs)
  198. //pr_debug("%s,write to dev Addr %p,value:%x\n",__func__,(hw->dev_regs + addr),v);
  199. __raw_writel(v, hw->dev_regs + addr);
  200. }
  201. static inline u32 dev_reg_read32(struct xrp_hw_simple *hw, unsigned addr)
  202. {
  203. if (hw->dev_regs)
  204. return __raw_readl(hw->dev_regs + addr);
  205. else
  206. return 0;
  207. }
  208. static void dump_regs(const char *fn, void *hw_arg)
  209. {
  210. struct xrp_hw_simple *hw = hw_arg;
  211. if (!hw->panic)
  212. return;
  213. pr_debug("%s: panic = 0x%08x, ccount = 0x%08x\n",
  214. fn,
  215. __raw_readl(&hw->panic->panic),
  216. __raw_readl(&hw->panic->ccount));
  217. pr_debug("%s: read = 0x%08x, write = 0x%08x, size = 0x%08x\n",
  218. fn,
  219. __raw_readl(&hw->panic->rb.read),
  220. __raw_readl(&hw->panic->rb.write),
  221. __raw_readl(&hw->panic->rb.size));
  222. }
  223. static void dump_log_page(struct xrp_hw_simple *hw)
  224. {
  225. char *buf;
  226. size_t i;
  227. if (!hw->panic)
  228. return;
  229. dump_regs(__func__, hw);
  230. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  231. if (buf) {
  232. memcpy_fromio(buf, hw->panic, hw->panic->rb.size);
  233. buf+=sizeof(struct xrp_hw_panic);
  234. for (i = 0; i < hw->panic->rb.size; i += 64)
  235. pr_debug(" %*pEhp\n", 64, buf + i);
  236. kfree(buf);
  237. } else {
  238. pr_debug("(couldn't allocate copy buffer)\n");
  239. }
  240. }
  241. static int log_proc_show(struct seq_file *file, void *v)
  242. {
  243. struct xrp_hw_simple *hw = file->private;
  244. char *buf;
  245. size_t i;
  246. dump_regs(__func__, hw);
  247. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  248. if (buf) {
  249. memcpy_fromio(buf, hw->panic->rb.data, hw->panic->rb.size);
  250. seq_printf(file,"****************** device log >>>>>>>>>>>>>>>>>\n");
  251. for (i = 0; i < hw->panic->rb.size; i += 64)
  252. seq_printf(file," %*pEp", 64,buf+i);
  253. // pr_debug(" %*pEhp\n", 64, buf + i);
  254. // seq_printf(file," %*pEp\n",buf);
  255. kfree(buf);
  256. uint32_t write = __raw_readl(&hw->panic->rb.write);
  257. __raw_writel(write, &hw->panic->rb.read);
  258. return 0;
  259. }
  260. else
  261. {
  262. pr_debug("Fail to alloc buf\n");
  263. return -1;
  264. }
  265. return 0;
  266. }
  267. static int log_proc_open(struct inode *inode, struct file *file)
  268. {
  269. return single_open(file, log_proc_show, NULL);
  270. }
  271. static const struct file_operations log_proc_fops = {
  272. .open = log_proc_open,
  273. .read = seq_read,
  274. .llseek = seq_lseek,
  275. .release = single_release,
  276. };
  277. int xrp_hw_create_log_proc(struct xrp_hw_simple *hw)
  278. {
  279. int rv = 0;
  280. char file_name[32];
  281. sprintf(file_name,"dsp%d_proc",hw->device_id);
  282. // hw->log_proc_file = create_proc_entry(file_name,0644,NULL);
  283. hw->log_proc_file = proc_mkdir(file_name, NULL);
  284. if (NULL == hw->log_proc_file )
  285. {
  286. pr_debug("Error: Could not create dir\n");
  287. return -ENODEV;
  288. }
  289. // hw->log_proc_file = proc_create_data(file_name,0644,S_IFREG | S_IRUGO,&log_proc_fops,hw);
  290. hw->log_proc_file=proc_create_single_data("dsp_log",0644,hw->log_proc_file,&log_proc_show,hw);
  291. if(hw->log_proc_file == NULL) {
  292. rv = -ENOMEM;
  293. pr_debug("Error: Could not initialize %s\n","dsp_log");
  294. } else {
  295. pr_debug("%s create Success!\n","dsp_log");
  296. }
  297. return rv;
  298. }
  299. void xrp_hw_remove_log_proc(void *hw_arg)
  300. {
  301. char file_name[32];
  302. struct xrp_hw_simple *hw = hw_arg;
  303. sprintf(file_name,"dsp%d_proc",hw->device_id);
  304. remove_proc_entry(file_name,NULL);
  305. // proc_remove(hw->log_proc_file);
  306. pr_debug("%s,proc removed\n",file_name);
  307. }
  308. // int xrp_hw_log_read(char *buffer,char** buffer_location,off_t offset,
  309. // int buffer_length,int *eof,void *data)
  310. // {
  311. // int len = 0;
  312. // struct xrp_hw_simple *hw = data;
  313. // if(offset > 0) {
  314. // printk(KERN_INFO "offset %d: /proc/test1: profile_read,\
  315. // wrote %d Bytes\n",(int)(offset),len);
  316. // *eof = 1;
  317. // return len;
  318. // }
  319. // //填充buffer并获取其长度
  320. // len = sprintf(buffer,
  321. // "For the %d %s time,go away!\n",count,
  322. // (count % 100 > 10 && count % 100 < 14)?"th":
  323. // (count % 10 == 1)?"st":
  324. // (count % 10 == 2)?"nd":
  325. // (count % 10 == 3)?"rd":"th");
  326. // count++;
  327. // printk(KERN_INFO "leasving /proc/test1: profile_read,wrote %d Bytes\n",len);
  328. // return len;
  329. // }
  330. static void *get_hw_sync_data(void *hw_arg, size_t *sz)
  331. {
  332. static const u32 irq_mode[] = {
  333. [XRP_IRQ_NONE] = XRP_DSP_SYNC_IRQ_MODE_NONE,
  334. [XRP_IRQ_LEVEL] = XRP_DSP_SYNC_IRQ_MODE_LEVEL,
  335. [XRP_IRQ_EDGE] = XRP_DSP_SYNC_IRQ_MODE_EDGE,
  336. [XRP_IRQ_EDGE_SW] = XRP_DSP_SYNC_IRQ_MODE_EDGE,
  337. };
  338. struct xrp_hw_simple *hw = hw_arg;
  339. struct xrp_hw_simple_sync_data *hw_sync_data =
  340. kmalloc(sizeof(*hw_sync_data), GFP_KERNEL);
  341. if (!hw_sync_data)
  342. return NULL;
  343. u32 device_host_offset=0;
  344. u32 host_device_offset=0;
  345. if(hw->device_irq_regs_phys > hw->host_irq_regs_phys)
  346. {
  347. device_host_offset = hw->device_irq_regs_phys-hw->host_irq_regs_phys;
  348. }
  349. else
  350. {
  351. host_device_offset = hw->host_irq_regs_phys-hw->device_irq_regs_phys;
  352. }
  353. *hw_sync_data = (struct xrp_hw_simple_sync_data){
  354. .device_mmio_base = hw->irq_regs_dev_phys,
  355. .host_irq_mode = hw->host_irq_mode,
  356. .host_irq_offset = hw->host_irq_offset+host_device_offset,
  357. .host_irq_bit = hw->host_irq[1],
  358. .device_irq_mode = irq_mode[hw->device_irq_mode],
  359. .device_irq_offset = hw->device_irq[0]+device_host_offset,
  360. .device_irq_bit = hw->device_irq[1],
  361. .device_irq = hw->device_irq[2],
  362. // .panic_base = hw->panic_phys,
  363. };
  364. *sz = sizeof(*hw_sync_data);
  365. return hw_sync_data;
  366. }
  367. static void reset(void *hw_arg)
  368. {
  369. // dev_reg_write32(hw_arg, XRP_REG_RESET, (dev_reg_read32(hw_arg, XRP_REG_RESET))^RESET_BIT_MASK);
  370. // udelay(10000);
  371. // dev_reg_write32(hw_arg, XRP_REG_RESET, (dev_reg_read32(hw_arg, XRP_REG_RESET))^RESET_BIT_MASK);
  372. struct xrp_hw_simple *hw = hw_arg;
  373. xrp_set_reset_reg(hw->device_id);
  374. }
  375. static void halt(void *hw_arg)
  376. {
  377. dev_reg_write32(hw_arg, XRP_REG_RUNSTALL, 1);
  378. pr_debug("%s: halt value:%x\n",__func__,dev_reg_read32(hw_arg, XRP_REG_RUNSTALL));
  379. // dump_log_page(hw_arg);
  380. }
  381. static void set_reset_vector(void *hw_arg,u32 addr)
  382. {
  383. struct xrp_hw_simple *hw = hw_arg;
  384. // if(hw->device_id ==0)
  385. // {
  386. // addr = 0x80000000;
  387. // }
  388. // else{
  389. // addr = 0x70000000;
  390. // }
  391. addr = addr&0xffffff00;
  392. dev_reg_write32(hw_arg, ALT_RESET_VEC, addr);
  393. pr_debug("%s: reset_vector:%x\n",__func__,dev_reg_read32(hw_arg, ALT_RESET_VEC));
  394. }
  395. static void release(void *hw_arg)
  396. {
  397. dev_reg_write32(hw_arg, XRP_REG_RUNSTALL, 0);
  398. }
  399. static void send_irq(void *hw_arg)
  400. {
  401. struct xrp_hw_simple *hw = hw_arg;
  402. pr_debug("%s: Enter\n",__func__);
  403. switch (hw->device_irq_mode) {
  404. case XRP_IRQ_EDGE_SW:
  405. device_irq_reg_write32(hw, hw->device_irq_host_offset,
  406. BIT(hw->device_irq[1]));
  407. while ((device_irq_reg_read32(hw, hw->device_irq_host_offset) &
  408. BIT(hw->device_irq[1])))
  409. mb();
  410. break;
  411. case XRP_IRQ_EDGE:
  412. device_irq_reg_write32(hw, hw->device_irq_host_offset, 0);
  413. /* fallthrough */
  414. case XRP_IRQ_LEVEL:
  415. wmb();
  416. device_irq_reg_write32(hw, hw->device_irq_host_offset,
  417. BIT(hw->device_irq[1]));
  418. break;
  419. default:
  420. break;
  421. }
  422. }
  423. static int enable(void *hw_arg)
  424. {
  425. struct xrp_hw_simple *hw = hw_arg;
  426. int ret;
  427. ret = clk_prepare_enable(hw->cclk);
  428. if (ret < 0) {
  429. pr_err("could not prepare or enable core clock\n");
  430. return ret;
  431. }
  432. // ret = clk_prepare_enable(hw->aclk);
  433. // if (ret < 0) {
  434. // pr_err("could not prepare or enable axi clock\n");
  435. // clk_disable_unprepare(hw->cclk);
  436. // return ret;
  437. // }
  438. ret = clk_prepare_enable(hw->pclk);
  439. if (ret < 0) {
  440. pr_err("could not prepare or enable apb clock\n");
  441. clk_disable_unprepare(hw->cclk);
  442. // clk_disable_unprepare(hw->aclk);
  443. return ret;
  444. }
  445. pr_debug("%s: enable dsp\n",__func__);
  446. return ret;
  447. }
  448. static void disable(void *hw_arg)
  449. {
  450. struct xrp_hw_simple *hw = hw_arg;
  451. clk_disable_unprepare(hw->pclk);
  452. // clk_disable_unprepare(hw->aclk);
  453. clk_disable_unprepare(hw->cclk);
  454. pr_debug("%s: disable dsp\n",__func__);
  455. return;
  456. }
  457. static inline void ack_irq(void *hw_arg)
  458. {
  459. struct xrp_hw_simple *hw = hw_arg;
  460. if (hw->host_irq_mode == XRP_IRQ_LEVEL)
  461. host_irq_reg_write32(hw, hw->host_irq[0], BIT(hw->host_irq[1]));
  462. //__raw_writel(DSP_INT_MASK,0xFFE4040190); //DSP0
  463. }
  464. static inline bool is_expect_irq(struct xrp_hw_simple *hw)
  465. {
  466. return host_irq_reg_read32(hw,hw->host_irq_offset)&BIT(hw->host_irq[1]);
  467. }
  468. static irqreturn_t irq_handler(int irq, void *dev_id)
  469. {
  470. irqreturn_t ret=IRQ_NONE;
  471. struct xrp_hw_simple *hw = dev_id;
  472. if(is_expect_irq(hw))
  473. {
  474. ret = xrp_irq_handler(irq, hw->xrp);
  475. if (ret == IRQ_HANDLED)
  476. ack_irq(hw);
  477. }
  478. else{
  479. pr_err("%s: unexpect irq,%x\n",__func__,host_irq_reg_read32(hw,hw->host_irq_offset));
  480. }
  481. return ret;
  482. }
  483. phys_addr_t get_irq_base_mimo(void *hw_arg)
  484. {
  485. struct xrp_hw_simple *hw = hw_arg;
  486. pr_debug("%s: dev_regs\n",__func__);
  487. return hw->device_irq_regs_phys < hw->host_irq_regs_phys?hw->device_irq_regs_phys:hw->host_irq_regs_phys;
  488. }
  489. void update_device_base(void *hw_arg,phys_addr_t addr)
  490. {
  491. struct xrp_hw_simple *hw = hw_arg;
  492. hw->irq_regs_dev_phys = addr;
  493. pr_debug("%s:dev_regs,%p\n",__func__,hw->irq_regs_dev_phys);
  494. }
  495. void memcpy_tohw(volatile void __iomem *dst, const void *src, size_t sz)
  496. {
  497. int i;
  498. u32 *s_ptr = src;
  499. volatile u32 * d_ptr=dst;
  500. pr_debug("%s,dst:0x%llx,src:0x%llx,size:%d",__FUNCTION__,dst,src,sz);
  501. udelay(10000);
  502. for(i=0;i<sz/4;i++)
  503. {
  504. __raw_writel(s_ptr[i], d_ptr++);
  505. }
  506. }
  507. void memcpy_toio_local(volatile void __iomem *to, const void *from, size_t count)
  508. {
  509. while (count && !IS_ALIGNED((unsigned long)to, 8)) {
  510. __raw_writeb(*(u8 *)from, to);
  511. from++;
  512. to++;
  513. count--;
  514. }
  515. while (count >= 8) {
  516. __raw_writeq(*(u64 *)from, to);
  517. from += 8;
  518. to += 8;
  519. count -= 8;
  520. }
  521. while (count) {
  522. __raw_writeb(*(u8 *)from, to);
  523. from++;
  524. to++;
  525. count--;
  526. }
  527. }
  528. void memset_hw_local(volatile void __iomem *dst, int c, size_t count)
  529. {
  530. u64 qc = (u8)c;
  531. qc |= qc << 8;
  532. qc |= qc << 16;
  533. qc |= qc << 32;
  534. while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
  535. __raw_writeb(c, dst);
  536. dst++;
  537. count--;
  538. }
  539. while (count >= 8) {
  540. __raw_writeq(qc, dst);
  541. dst += 8;
  542. count -= 8;
  543. }
  544. while (count) {
  545. __raw_writeb(c, dst);
  546. dst++;
  547. count--;
  548. }
  549. }
  550. void memset_hw(void __iomem *dst, int c, size_t sz)
  551. {
  552. int i;
  553. volatile u32 * d_ptr=dst;
  554. for(i=0;i<sz/4;i++)
  555. {
  556. __raw_writel(c, d_ptr++);
  557. }
  558. }
  559. #if defined(__XTENSA__)
  560. static bool cacheable(void *hw_arg, unsigned long pfn, unsigned long n_pages)
  561. {
  562. return true;
  563. }
  564. static void dma_sync_for_device(void *hw_arg,
  565. void *vaddr, phys_addr_t paddr,
  566. unsigned long sz, unsigned flags)
  567. {
  568. switch (flags) {
  569. case XRP_FLAG_READ:
  570. __flush_dcache_range((unsigned long)vaddr, sz);
  571. break;
  572. case XRP_FLAG_READ_WRITE:
  573. __flush_dcache_range((unsigned long)vaddr, sz);
  574. __invalidate_dcache_range((unsigned long)vaddr, sz);
  575. break;
  576. case XRP_FLAG_WRITE:
  577. __invalidate_dcache_range((unsigned long)vaddr, sz);
  578. break;
  579. }
  580. }
  581. static void dma_sync_for_cpu(void *hw_arg,
  582. void *vaddr, phys_addr_t paddr,
  583. unsigned long sz, unsigned flags)
  584. {
  585. switch (flags) {
  586. case XRP_FLAG_READ_WRITE:
  587. case XRP_FLAG_WRITE:
  588. __invalidate_dcache_range((unsigned long)vaddr, sz);
  589. break;
  590. }
  591. }
  592. #elif defined(__arm__)
  593. static bool cacheable(void *hw_arg, unsigned long pfn, unsigned long n_pages)
  594. {
  595. return true;
  596. }
  597. static void dma_sync_for_device(void *hw_arg,
  598. void *vaddr, phys_addr_t paddr,
  599. unsigned long sz, unsigned flags)
  600. {
  601. switch (flags) {
  602. case XRP_FLAG_READ:
  603. __cpuc_flush_dcache_area(vaddr, sz);
  604. outer_clean_range(paddr, paddr + sz);
  605. break;
  606. case XRP_FLAG_WRITE:
  607. __cpuc_flush_dcache_area(vaddr, sz);
  608. outer_inv_range(paddr, paddr + sz);
  609. break;
  610. case XRP_FLAG_READ_WRITE:
  611. __cpuc_flush_dcache_area(vaddr, sz);
  612. outer_flush_range(paddr, paddr + sz);
  613. break;
  614. }
  615. }
  616. static void dma_sync_for_cpu(void *hw_arg,
  617. void *vaddr, phys_addr_t paddr,
  618. unsigned long sz, unsigned flags)
  619. {
  620. switch (flags) {
  621. case XRP_FLAG_WRITE:
  622. case XRP_FLAG_READ_WRITE:
  623. __cpuc_flush_dcache_area(vaddr, sz);
  624. outer_inv_range(paddr, paddr + sz);
  625. break;
  626. }
  627. }
  628. #else
  629. static bool cacheable(void *hw_arg, unsigned long pfn, unsigned long n_pages)
  630. {
  631. return true;
  632. }
  633. // static void dma_sync_for_device(void *hw_arg,
  634. // void *vaddr, phys_addr_t paddr,
  635. // unsigned long sz, unsigned flags)
  636. // {
  637. // struct xrp_hw_simple *hw = hw_arg;
  638. // switch (flags) {
  639. // case XRP_FLAG_READ:
  640. // case XRP_FLAG_WRITE:
  641. // case XRP_FLAG_READ_WRITE:
  642. // arch_sync_dma_for_cpu(hw->xrp->dev, paddr, sz,DMA_TO_DEVICE);
  643. // break;
  644. // }
  645. // }
  646. // static void dma_sync_for_cpu(void *hw_arg,
  647. // void *vaddr, phys_addr_t paddr,
  648. // unsigned long sz, unsigned flags)
  649. // {
  650. // struct xrp_hw_simple *hw = hw_arg;
  651. // switch (flags) {
  652. // case XRP_FLAG_WRITE:
  653. // case XRP_FLAG_READ_WRITE:
  654. // arch_sync_dma_for_cpu(hw->xrp->dev, paddr, sz,DMA_FROM_DEVICE);
  655. // break;
  656. // }
  657. // }
  658. #endif
  659. static bool panic_check(void *hw_arg)
  660. {
  661. struct xrp_hw_simple *hw = hw_arg;
  662. uint32_t panic;
  663. uint32_t ccount;
  664. uint32_t read;
  665. uint32_t write;
  666. uint32_t size;
  667. if (!hw->panic)
  668. return false;
  669. panic = __raw_readl(&hw->panic->panic);
  670. ccount = __raw_readl(&hw->panic->ccount);
  671. read = __raw_readl(&hw->panic->rb.read);
  672. write = __raw_readl(&hw->panic->rb.write);
  673. size = __raw_readl(&hw->panic->rb.size);
  674. if (read == 0 && read != hw->last_read) {
  675. pr_debug( "****************** device restarted >>>>>>>>>>>>>>>>>\n");
  676. dump_log_page(hw);
  677. pr_debug ("<<<<<<<<<<<<<<<<<< device restarted *****************\n");
  678. }
  679. if (write < size && read < size && size < PAGE_SIZE) {
  680. uint32_t tail;
  681. uint32_t total;
  682. char *buf = NULL;
  683. hw->last_read = read;
  684. if (read < write) {
  685. tail = write - read;
  686. total = tail;
  687. } else if (read == write) {
  688. tail = 0;
  689. total = 0;
  690. } else {
  691. tail = size - read;
  692. total = write + tail;
  693. }
  694. if (total)
  695. buf = kmalloc(total, GFP_KERNEL);
  696. if (buf) {
  697. uint32_t off = 0;
  698. pr_debug("panic = 0x%08x, ccount = 0x%08x read = %d, write = %d, size = %d, total = %d",
  699. panic, ccount, read, write, size, total);
  700. while (off != total) {
  701. memcpy_fromio(buf + off,
  702. hw->panic->rb.data + read,
  703. tail);
  704. read = 0;
  705. off += tail;
  706. tail = total - tail;
  707. }
  708. __raw_writel(write, &hw->panic->rb.read);
  709. hw->last_read = write;
  710. pr_debug("<<<\n%.*s\n>>>\n",
  711. total, buf);
  712. kfree(buf);
  713. } else if (total) {
  714. pr_debug(
  715. "%s: couldn't allocate memory (%d) to read the dump\n",
  716. __func__, total);
  717. }
  718. } else {
  719. if (read != hw->last_read) {
  720. pr_debug(
  721. "nonsense in the log buffer: read = %d, write = %d, size = %d\n",
  722. read, write, size);
  723. hw->last_read = read;
  724. }
  725. }
  726. if (panic == 0xdeadbabe) {
  727. pr_debug("%s: panic detected, log dump:\n", __func__);
  728. dump_log_page(hw);
  729. }
  730. return panic == 0xdeadbabe;
  731. }
  732. static bool xrp_panic_init(struct xrp_hw_panic* panic,size_t size)
  733. {
  734. if(size < sizeof(struct xrp_hw_panic))
  735. {
  736. return false;
  737. }
  738. memset_hw(panic,0x0,size);
  739. panic->panic = 0;
  740. panic->ccount = 0;
  741. panic->rb.read = 0;
  742. panic->rb.write = 0;
  743. panic->rb.size = size - sizeof(struct xrp_hw_panic);
  744. sprintf(panic->rb.data,"Inition dsp log\n");
  745. return true;
  746. }
  747. static const struct xrp_hw_ops hw_ops = {
  748. .halt = halt,
  749. .release = release,
  750. .reset = reset,
  751. .enable = enable,
  752. .disable = disable,
  753. .get_hw_sync_data = get_hw_sync_data,
  754. .send_irq = send_irq,
  755. .get_base_mimo = get_irq_base_mimo,
  756. .update_device_base = update_device_base,
  757. .set_reset_vector = set_reset_vector,
  758. .memcpy_tohw= memcpy_toio_local,
  759. .memset_hw = memset_hw_local,
  760. .clear_hw = xrp_hw_remove_log_proc,
  761. #if defined(__XTENSA__) || defined(__arm__)
  762. .cacheable = cacheable,
  763. .dma_sync_for_device = dma_sync_for_device,
  764. .dma_sync_for_cpu = dma_sync_for_cpu,
  765. #endif
  766. };
  767. static long init_hw_irq(struct platform_device *pdev, struct xrp_hw_simple *hw,
  768. int mem_idx, enum xrp_init_flags *init_flags)
  769. {
  770. struct resource *mem;
  771. int irq;
  772. long ret;
  773. mem = platform_get_resource(pdev, IORESOURCE_MEM, mem_idx++);
  774. if (!mem) {
  775. ret = -ENODEV;
  776. goto err;
  777. }
  778. hw->host_irq_regs_phys = mem->start;
  779. // hw->irq_regs_dev_phys =hw->irq_regs_phys;
  780. hw->host_irq_regs = devm_ioremap_resource(&pdev->dev, mem);
  781. pr_debug("%s:host irq regs = %pap/%p\n",
  782. __func__, &mem->start, hw->host_irq_regs);
  783. ret = of_property_read_u32_array(pdev->dev.of_node, "host-irq",
  784. hw->host_irq,
  785. ARRAY_SIZE(hw->host_irq));
  786. if (ret == 0) {
  787. u32 host_irq_mode;
  788. ret = of_property_read_u32(pdev->dev.of_node,
  789. "host-irq-mode",
  790. &host_irq_mode);
  791. if (host_irq_mode < XRP_IRQ_MAX)
  792. hw->host_irq_mode =host_irq_mode;
  793. else
  794. ret = -ENOENT;
  795. u32 host_irq_offset;
  796. ret = of_property_read_u32(pdev->dev.of_node,
  797. "host-irq-offset",
  798. &host_irq_offset);
  799. if(ret == 0)
  800. {
  801. hw->host_irq_offset=host_irq_offset;
  802. }
  803. dev_dbg(&pdev->dev,
  804. "%s: Host IRQ MMIO: device offset = 0x%08x,host offset = 0x%08x, bit = %d,IRQ mode = %d",
  805. __func__, hw->host_irq_offset,
  806. hw->host_irq[0], hw->host_irq[1],
  807. hw->host_irq_mode);
  808. }
  809. mem = platform_get_resource(pdev, IORESOURCE_MEM, mem_idx);
  810. if (!mem) {
  811. ret = -ENODEV;
  812. goto err;
  813. }
  814. hw->device_irq_regs_phys = mem->start;
  815. // hw->irq_regs_dev_phys =hw->irq_regs_phys;
  816. hw->device_irq_regs = devm_ioremap_resource(&pdev->dev, mem);
  817. pr_debug("%s:Device irq regs = %pap/%lx\n",
  818. __func__, &mem->start, hw->device_irq_regs_phys);
  819. ret = of_property_read_u32_array(pdev->dev.of_node,
  820. "device-irq",
  821. hw->device_irq,
  822. ARRAY_SIZE(hw->device_irq));
  823. if (ret == 0) {
  824. u32 device_irq_host_offset;
  825. ret = of_property_read_u32(pdev->dev.of_node,
  826. "device-irq-host-offset",
  827. &device_irq_host_offset);
  828. if (ret == 0) {
  829. hw->device_irq_host_offset = device_irq_host_offset;
  830. } else {
  831. hw->device_irq_host_offset = hw->device_irq[0];
  832. ret = 0;
  833. }
  834. }
  835. if (ret == 0) {
  836. u32 device_irq_mode;
  837. ret = of_property_read_u32(pdev->dev.of_node,
  838. "device-irq-mode",
  839. &device_irq_mode);
  840. if (device_irq_mode < XRP_IRQ_MAX)
  841. hw->device_irq_mode = device_irq_mode;
  842. else
  843. ret = -ENOENT;
  844. }
  845. if (ret == 0) {
  846. dev_dbg(&pdev->dev,
  847. "%s: device IRQ MMIO host offset = 0x%08x, offset = 0x%08x, bit = %d, device IRQ = %d, IRQ mode = %d",
  848. __func__, hw->device_irq_host_offset,
  849. hw->device_irq[0], hw->device_irq[1],
  850. hw->device_irq[2], hw->device_irq_mode);
  851. } else {
  852. dev_info(&pdev->dev,
  853. "using polling mode on the device side\n");
  854. }
  855. if (ret == 0 && hw->host_irq_mode != XRP_IRQ_NONE)
  856. irq = platform_get_irq(pdev, 0);
  857. else
  858. irq = -1;
  859. if (irq >= 0) {
  860. dev_dbg(&pdev->dev, "%s: host IRQ = %d, ",
  861. __func__, irq);
  862. ret = devm_request_irq(&pdev->dev, irq, irq_handler,
  863. IRQF_SHARED, pdev->name, hw);
  864. if (ret < 0) {
  865. dev_err(&pdev->dev, "request_irq %d failed\n", irq);
  866. goto err;
  867. }
  868. *init_flags |= XRP_INIT_USE_HOST_IRQ;
  869. } else {
  870. dev_info(&pdev->dev, "using polling mode on the host side\n");
  871. }
  872. ret = 0;
  873. err:
  874. return ret;
  875. }
  876. static long init_hw_device(struct platform_device *pdev, struct xrp_hw_simple *hw,int mem_idx)
  877. {
  878. struct resource *mem;
  879. long ret;
  880. mem = platform_get_resource(pdev, IORESOURCE_MEM, mem_idx++);
  881. if (!mem) {
  882. ret = -ENODEV;
  883. goto err;
  884. }
  885. hw->dev_regs_phys = mem->start;
  886. hw->dev_regs = devm_ioremap_resource(&pdev->dev, mem);
  887. pr_debug("%s: regs = %pap/%p\n",
  888. __func__, &mem->start, hw->dev_regs);
  889. hw->cclk = devm_clk_get(&pdev->dev, "cclk");
  890. if (hw->cclk==NULL) {
  891. dev_err(&pdev->dev, "failed to get core clock\n");
  892. ret = -ENOENT;
  893. goto err;
  894. }
  895. dev_dbg(&pdev->dev, "get core clock\n");
  896. // hw->aclk = devm_clk_get(&pdev->dev, "aclk");
  897. // if (hw->aclk==NULL) {
  898. // dev_err(&pdev->dev, "failed to get axi clock\n");
  899. // ret = -ENOENT;
  900. // goto err;
  901. // }
  902. hw->pclk = devm_clk_get(&pdev->dev, "pclk");
  903. if ( hw->pclk ==NULL) {
  904. dev_err(&pdev->dev, "failed to get apb clock\n");
  905. ret = -ENOENT;
  906. goto err;
  907. }
  908. dev_dbg(&pdev->dev, "get apb clock\n");
  909. u32 device_id;
  910. ret = of_property_read_u32(pdev->dev.of_node,"dsp",&device_id);
  911. if(ret ==0 )
  912. {
  913. hw->device_id = device_id;
  914. pr_debug("%s: device_id = %d\n",
  915. __func__,hw->device_id);
  916. }
  917. else{
  918. pr_debug("%s: no device_id \n",__func__);
  919. ret = -ENODEV;
  920. }
  921. // xrp_hw_create_log_proc(hw);
  922. err:
  923. return ret;
  924. }
  925. static long init(struct platform_device *pdev, struct xrp_hw_simple *hw)
  926. {
  927. long ret;
  928. enum xrp_init_flags init_flags = 0;
  929. ret = init_hw_irq(pdev, hw, 0, &init_flags);
  930. if (ret < 0)
  931. return ret;
  932. ret =init_hw_device(pdev, hw, 2);
  933. if (ret < 0)
  934. return ret;
  935. return xrp_init(pdev, init_flags, &hw_ops, hw,4);
  936. }
  937. static long init_v1(struct platform_device *pdev, struct xrp_hw_simple *hw)
  938. {
  939. long ret;
  940. enum xrp_init_flags init_flags = 0;
  941. ret = init_hw_irq(pdev, hw, 0, &init_flags);
  942. if (ret < 0)
  943. return ret;
  944. ret =init_hw_device(pdev, hw, 1);
  945. if (ret < 0)
  946. return ret;
  947. return xrp_init_v1(pdev, init_flags, &hw_ops, hw,2);
  948. }
  949. static long init_cma(struct platform_device *pdev, struct xrp_hw_simple *hw)
  950. {
  951. long ret;
  952. enum xrp_init_flags init_flags = 0;
  953. ret = init_hw_irq(pdev, hw, 0, &init_flags);
  954. if (ret < 0)
  955. return ret;
  956. ret =init_hw_device(pdev, hw, 1);
  957. if (ret < 0)
  958. return ret;
  959. return xrp_init_cma(pdev, init_flags, &hw_ops, hw,2);
  960. }
  961. #ifdef CONFIG_OF
  962. static const struct of_device_id xrp_hw_simple_match[] = {
  963. {
  964. .compatible = "cdns,xrp-hw-simple",
  965. .data = init,
  966. }, {
  967. .compatible = "cdns,xrp-hw-simple,v1",
  968. .data = init_v1,
  969. }, {
  970. .compatible = "cdns,xrp-hw-simple,cma",
  971. .data = init_cma,
  972. }, {},
  973. };
  974. // MODULE_DEVICE_TABLE(of, xrp_hw_simple_match);
  975. #endif
  976. static int xrp_hw_simple_probe(struct platform_device *pdev)
  977. {
  978. struct xrp_hw_simple *hw =
  979. devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
  980. const struct of_device_id *match;
  981. long (*init)(struct platform_device *pdev, struct xrp_hw_simple *hw);
  982. long ret;
  983. if (!hw)
  984. return -ENOMEM;
  985. match = of_match_device(of_match_ptr(xrp_hw_simple_match),
  986. &pdev->dev);
  987. if (!match)
  988. return -ENODEV;
  989. init = match->data;
  990. ret = init(pdev, hw);
  991. if (IS_ERR_VALUE(ret)) {
  992. //xrp_deinit(pdev);
  993. pr_debug("init fail\n");
  994. return ret;
  995. } else {
  996. hw->xrp = ERR_PTR(ret);
  997. return 0;
  998. }
  999. }
  1000. static int xrp_hw_simple_remove(struct platform_device *pdev)
  1001. {
  1002. // xrp_hw_remove_log_proc();
  1003. return xrp_deinit(pdev);
  1004. }
  1005. static const struct dev_pm_ops xrp_hw_simple_pm_ops = {
  1006. SET_RUNTIME_PM_OPS(xrp_runtime_suspend,
  1007. xrp_runtime_resume, NULL)
  1008. };
  1009. static struct platform_driver xrp_hw_simple_driver = {
  1010. .probe = xrp_hw_simple_probe,
  1011. .remove = xrp_hw_simple_remove,
  1012. .driver = {
  1013. .name = DRIVER_NAME,
  1014. .of_match_table = of_match_ptr(xrp_hw_simple_match),
  1015. .pm = &xrp_hw_simple_pm_ops,
  1016. },
  1017. };
  1018. module_platform_driver(xrp_hw_simple_driver);
  1019. MODULE_AUTHOR("Max Filippov");
  1020. MODULE_DESCRIPTION("XRP: low level device driver for Xtensa Remote Processing");
  1021. MODULE_LICENSE("Dual MIT/GPL");