jpu.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432
  1. // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
  2. //--=========================================================================--
  3. // This file is linux device driver for JPU.
  4. //-----------------------------------------------------------------------------
  5. //
  6. // This confidential and proprietary software may be used only
  7. // as authorized by a licensing agreement from Chips&Media Inc.
  8. // In the event of publication, the following notice is applicable:
  9. //
  10. // (C) COPYRIGHT 2006 - 2016 CHIPS&MEDIA INC.
  11. // ALL RIGHTS RESERVED
  12. //
  13. // The entire notice above must be reproduced on all authorized
  14. // copies.
  15. // Copyright (C) 2022 StarFive Technology Co., Ltd.
  16. //--=========================================================================-
  17. #include <linux/kernel.h>
  18. #include <linux/device.h>
  19. #include <linux/mm.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/ioport.h>
  22. #include <linux/module.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/of.h>
  26. #include <linux/of_address.h>
  27. #include <linux/pm_runtime.h>
  28. #include <linux/wait.h>
  29. #include <linux/list.h>
  30. #include <linux/clk.h>
  31. #include <linux/delay.h>
  32. #include <linux/uaccess.h>
  33. #include <linux/cdev.h>
  34. #include <linux/slab.h>
  35. #include <linux/sched.h>
  36. #include <linux/sched/signal.h>
  37. #include <linux/reset.h>
  38. #include <linux/version.h>
  39. #include <soc/sifive/sifive_l2_cache.h>
  40. #include "../../../jpuapi/jpuconfig.h"
  41. #include "jpu.h"
  42. //#define ENABLE_DEBUG_MSG
  43. #ifdef ENABLE_DEBUG_MSG
  44. #define DPRINTK(args...) printk(KERN_INFO args);
  45. #else
  46. #define DPRINTK(args...)
  47. #endif
  48. /* definitions to be changed as customer configuration */
  49. /* if you want to have clock gating scheme frame by frame */
  50. //#define JPU_SUPPORT_CLOCK_CONTROL
  51. #define JPU_SUPPORT_ISR
  52. //#define JPU_IRQ_CONTROL
  53. /* if clktree is work,try this...*/
  54. #define STARFIVE_JPU_SUPPORT_CLOCK_CONTROL
  55. /* if the platform driver knows the name of this driver */
  56. /* JPU_PLATFORM_DEVICE_NAME */
  57. #define JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  58. /* if this driver knows the dedicated video memory address */
  59. //#define JPU_SUPPORT_RESERVED_VIDEO_MEMORY //if this driver knows the dedicated video memory address
  60. static void starfive_flush_dcache(unsigned long start, unsigned long len)
  61. {
  62. sifive_l2_flush64_range(start, len);
  63. }
  64. #define JPU_PLATFORM_DEVICE_NAME "cnm_jpu"
  65. #define JPU_CLK_NAME "jpege"
  66. #define JPU_DEV_NAME "jpu"
  67. #define JPU_REG_BASE_ADDR 0x11900000
  68. #define JPU_REG_SIZE 0x300
  69. #ifdef JPU_SUPPORT_ISR
  70. #define JPU_IRQ_NUM 24
  71. /* if the driver want to disable and enable IRQ whenever interrupt asserted. */
  72. /*#define JPU_IRQ_CONTROL*/
  73. #endif
  74. #ifndef VM_RESERVED /*for kernel up to 3.7.0 version*/
  75. #define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
  76. #endif
  77. struct device *jpu_dev;
  78. typedef struct jpu_drv_context_t {
  79. struct fasync_struct *async_queue;
  80. u32 open_count; /*!<< device reference count. Not instance count */
  81. u32 interrupt_reason[MAX_NUM_INSTANCE];
  82. } jpu_drv_context_t;
  83. /* To track the allocated memory buffer */
  84. typedef struct jpudrv_buffer_pool_t {
  85. struct list_head list;
  86. struct jpudrv_buffer_t jb;
  87. struct file* filp;
  88. } jpudrv_buffer_pool_t;
  89. /* To track the instance index and buffer in instance pool */
  90. typedef struct jpudrv_instance_list_t {
  91. struct list_head list;
  92. unsigned long inst_idx;
  93. struct file* filp;
  94. } jpudrv_instance_list_t;
  95. typedef struct jpudrv_instance_pool_t {
  96. unsigned char codecInstPool[MAX_NUM_INSTANCE][MAX_INST_HANDLE_SIZE];
  97. } jpudrv_instance_pool_t;
  98. #ifndef STARFIVE_JPU_SUPPORT_CLOCK_CONTROL
  99. typedef struct jpu_clkgen_t {
  100. void __iomem *en_ctrl;
  101. uint32_t rst_mask;
  102. } jpu_clkgen_t;
  103. #endif
  104. struct clk_bulk_data jpu_clks[] = {
  105. { .id = "axi_clk" },
  106. { .id = "core_clk" },
  107. { .id = "apb_clk" },
  108. { .id = "noc_bus" },
  109. };
  110. typedef struct jpu_clk_t {
  111. #ifndef STARFIVE_JPU_SUPPORT_CLOCK_CONTROL
  112. void __iomem *clkgen;
  113. void __iomem *rst_ctrl;
  114. void __iomem *rst_status;
  115. uint32_t en_shift;
  116. uint32_t en_mask;
  117. jpu_clkgen_t apb_clk;
  118. jpu_clkgen_t axi_clk;
  119. jpu_clkgen_t core_clk;
  120. #else
  121. struct clk_bulk_data *clks;
  122. struct reset_control *resets;
  123. int nr_clks;
  124. #endif
  125. struct device *dev;
  126. } jpu_clk_t;
  127. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  128. #include "jmm.h"
  129. static jpu_mm_t s_jmem;
  130. static jpudrv_buffer_t s_video_memory = {0};
  131. #endif /* JPU_SUPPORT_RESERVED_VIDEO_MEMORY */
  132. static int jpu_hw_reset(void);
  133. static void jpu_clk_disable(jpu_clk_t *clk);
  134. static int jpu_clk_enable(jpu_clk_t *clk);
  135. static jpu_clk_t *jpu_clk_get(struct platform_device *pdev);
  136. static void jpu_clk_put(jpu_clk_t *clk);
  137. static int jpu_pmu_enable(struct device *dev);
  138. static void jpu_pmu_disable(struct device *dev);
  139. // end customer definition
  140. static jpudrv_buffer_t s_instance_pool = {0};
  141. static jpu_drv_context_t s_jpu_drv_context;
  142. static dev_t s_jpu_devt;
  143. static int s_jpu_major;
  144. static struct cdev s_jpu_cdev;
  145. static struct class *s_jpu_class;
  146. static jpu_clk_t *s_jpu_clk;
  147. static int s_jpu_open_ref_count;
  148. #ifdef JPU_SUPPORT_ISR
  149. static int s_jpu_irq = JPU_IRQ_NUM;
  150. #endif
  151. static jpudrv_buffer_t s_jpu_register = {0};
  152. static int s_interrupt_flag[MAX_NUM_INSTANCE];
  153. static wait_queue_head_t s_interrupt_wait_q[MAX_NUM_INSTANCE];
  154. static spinlock_t s_jpu_lock = __SPIN_LOCK_UNLOCKED(s_jpu_lock);
  155. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
  156. static DECLARE_MUTEX(s_jpu_sem);
  157. #else
  158. static DEFINE_SEMAPHORE(s_jpu_sem);
  159. #endif
  160. static struct list_head s_jbp_head = LIST_HEAD_INIT(s_jbp_head);
  161. static struct list_head s_inst_list_head = LIST_HEAD_INIT(s_inst_list_head);
  162. #ifdef CONFIG_PM
  163. /* implement to power management functions */
  164. #endif
  165. #define NPT_BASE 0x0000
  166. #define NPT_REG_SIZE 0x300
  167. #define MJPEG_PIC_STATUS_REG(_inst_no) (NPT_BASE + (_inst_no*NPT_REG_SIZE) + 0x004)
  168. #define ReadJpuRegister(addr) *(volatile unsigned int *)(s_jpu_register.virt_addr + addr)
  169. #define WriteJpuRegister(addr, val) *(volatile unsigned int *)(s_jpu_register.virt_addr + addr) = (unsigned int)val
  170. #define WriteJpu(addr, val) *(volatile unsigned int *)(addr) = (unsigned int)val;
  171. static int jpu_alloc_dma_buffer(jpudrv_buffer_t *jb)
  172. {
  173. if (!jb)
  174. return -1;
  175. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  176. jb->phys_addr = (unsigned long long)jmem_alloc(&s_jmem, jb->size, 0);
  177. if ((unsigned long)jb->phys_addr == (unsigned long)-1) {
  178. printk(KERN_ERR "[JPUDRV] Physical memory allocation error size=%d\n", jb->size);
  179. return -1;
  180. }
  181. jb->base = (unsigned long)(s_video_memory.base + (jb->phys_addr - s_video_memory.phys_addr));
  182. #else
  183. jb->base = (unsigned long)dma_alloc_coherent(jpu_dev, PAGE_ALIGN(jb->size), (dma_addr_t *) (&jb->phys_addr), GFP_DMA | GFP_KERNEL);
  184. if ((void *)(jb->base) == NULL) {
  185. printk(KERN_ERR "[JPUDRV] Physical memory allocation error size=%d\n", jb->size);
  186. return -1;
  187. }
  188. starfive_flush_dcache(jb->phys_addr,PAGE_ALIGN(jb->size));
  189. #endif /* JPU_SUPPORT_RESERVED_VIDEO_MEMORY */
  190. return 0;
  191. }
  192. static void jpu_free_dma_buffer(jpudrv_buffer_t *jb)
  193. {
  194. if (!jb) {
  195. return;
  196. }
  197. if (jb->base)
  198. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  199. jmem_free(&s_jmem, jb->phys_addr, 0);
  200. #else
  201. dma_free_coherent(jpu_dev, PAGE_ALIGN(jb->size), (void *)jb->base, jb->phys_addr);
  202. #endif /* JPUR_SUPPORT_RESERVED_VIDEO_MEMORY */
  203. }
  204. static int jpu_free_instances(struct file *filp)
  205. {
  206. jpudrv_instance_list_t *vil, *n;
  207. jpudrv_instance_pool_t *vip;
  208. void *vip_base;
  209. int instance_pool_size_per_core;
  210. #if !defined(PTHREAD_MUTEX_ROBUST_NP)
  211. void *jdi_mutexes_base;
  212. const int PTHREAD_MUTEX_T_DESTROY_VALUE = 0xdead10cc;
  213. #endif
  214. DPRINTK("[JPUDRV] jpu_free_instances\n");
  215. instance_pool_size_per_core = (s_instance_pool.size/MAX_NUM_JPU_CORE); /* s_instance_pool.size assigned to the size of all core once call JDI_IOCTL_GET_INSTANCE_POOL by user. */
  216. list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
  217. {
  218. if (vil->filp == filp) {
  219. vip_base = (void *)(s_instance_pool.base + instance_pool_size_per_core);
  220. DPRINTK("[JPUDRV] jpu_free_instances detect instance crash instIdx=%d, vip_base=%p, instance_pool_size_per_core=%d\n", (int)vil->inst_idx, vip_base, (int)instance_pool_size_per_core);
  221. vip = (jpudrv_instance_pool_t *)vip_base;
  222. if (vip) {
  223. memset(&vip->codecInstPool[vil->inst_idx], 0x00, 4); /* only first 4 byte is key point(inUse of CodecInst in jpuapi) to free the corresponding instance. */
  224. #if !defined(PTHREAD_MUTEX_ROBUST_NP)
  225. #define PTHREAD_MUTEX_T_HANDLE_SIZE 4
  226. jdi_mutexes_base = (vip_base + (instance_pool_size_per_core - PTHREAD_MUTEX_T_HANDLE_SIZE*4));
  227. DPRINTK("[JPUDRV] jpu_free_instances : force to destroy jdi_mutexes_base=%p in userspace \n", jdi_mutexes_base);
  228. if (jdi_mutexes_base) {
  229. int i;
  230. for (i = 0; i < 4; i++) {
  231. memcpy(jdi_mutexes_base, &PTHREAD_MUTEX_T_DESTROY_VALUE, PTHREAD_MUTEX_T_HANDLE_SIZE);
  232. jdi_mutexes_base += PTHREAD_MUTEX_T_HANDLE_SIZE;
  233. }
  234. }
  235. #endif
  236. }
  237. s_jpu_open_ref_count--;
  238. list_del(&vil->list);
  239. kfree(vil);
  240. }
  241. }
  242. return 1;
  243. }
  244. static int jpu_free_buffers(struct file *filp)
  245. {
  246. jpudrv_buffer_pool_t *pool, *n;
  247. jpudrv_buffer_t jb;
  248. DPRINTK("[JPUDRV] jpu_free_buffers\n");
  249. list_for_each_entry_safe(pool, n, &s_jbp_head, list)
  250. {
  251. if (pool->filp == filp) {
  252. jb = pool->jb;
  253. if (jb.base) {
  254. jpu_free_dma_buffer(&jb);
  255. list_del(&pool->list);
  256. kfree(pool);
  257. }
  258. }
  259. }
  260. return 0;
  261. }
  262. static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
  263. {
  264. jpu_drv_context_t* dev = (jpu_drv_context_t *)dev_id;
  265. int i;
  266. u32 flag;
  267. DPRINTK("[JPUDRV][+]%s\n", __func__);
  268. #ifdef JPU_IRQ_CONTROL
  269. disable_irq_nosync(s_jpu_irq);
  270. #endif
  271. for (i=0; i<MAX_NUM_INSTANCE; i++) {
  272. flag = ReadJpuRegister(MJPEG_PIC_STATUS_REG(i));
  273. if (flag != 0) {
  274. break;
  275. }
  276. }
  277. dev->interrupt_reason[i] = flag;
  278. s_interrupt_flag[i] = 1;
  279. DPRINTK("[JPUDRV][%d] INTERRUPT FLAG: %08x, %08x\n", i, dev->interrupt_reason[i], MJPEG_PIC_STATUS_REG(i));
  280. if (dev->async_queue)
  281. kill_fasync(&dev->async_queue, SIGIO, POLL_IN); // notify the interrupt to userspace
  282. #ifndef JPU_IRQ_CONTROL
  283. WriteJpuRegister(MJPEG_PIC_STATUS_REG(i),flag); //clear interrut
  284. #endif
  285. wake_up_interruptible(&s_interrupt_wait_q[i]);
  286. DPRINTK("[JPUDRV][-]%s\n", __func__);
  287. return IRQ_HANDLED;
  288. }
  289. static int jpu_open(struct inode *inode, struct file *filp)
  290. {
  291. DPRINTK("[JPUDRV][+] %s\n", __func__);
  292. pm_runtime_get_sync(s_jpu_clk->dev);
  293. spin_lock(&s_jpu_lock);
  294. s_jpu_drv_context.open_count++;
  295. filp->private_data = (void *)(&s_jpu_drv_context);
  296. spin_unlock(&s_jpu_lock);
  297. DPRINTK("[JPUDRV][-] %s\n", __func__);
  298. return 0;
  299. }
  300. static long jpu_ioctl(struct file *filp, u_int cmd, u_long arg)
  301. {
  302. int ret = 0;
  303. switch (cmd)
  304. {
  305. case JDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY:
  306. {
  307. jpudrv_buffer_pool_t *jbp;
  308. DPRINTK("[JPUDRV][+]JDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
  309. if ((ret = down_interruptible(&s_jpu_sem)) == 0) {
  310. jbp = kzalloc(sizeof(jpudrv_buffer_pool_t), GFP_KERNEL);
  311. if (!jbp) {
  312. up(&s_jpu_sem);
  313. return -ENOMEM;
  314. }
  315. ret = copy_from_user(&(jbp->jb), (jpudrv_buffer_t *)arg, sizeof(jpudrv_buffer_t));
  316. if (ret)
  317. {
  318. kfree(jbp);
  319. up(&s_jpu_sem);
  320. return -EFAULT;
  321. }
  322. ret = jpu_alloc_dma_buffer(&(jbp->jb));
  323. if (ret == -1)
  324. {
  325. ret = -ENOMEM;
  326. kfree(jbp);
  327. up(&s_jpu_sem);
  328. break;
  329. }
  330. ret = copy_to_user((void __user *)arg, &(jbp->jb), sizeof(jpudrv_buffer_t));
  331. if (ret)
  332. {
  333. kfree(jbp);
  334. ret = -EFAULT;
  335. up(&s_jpu_sem);
  336. break;
  337. }
  338. jbp->filp = filp;
  339. spin_lock(&s_jpu_lock);
  340. list_add(&jbp->list, &s_jbp_head);
  341. spin_unlock(&s_jpu_lock);
  342. up(&s_jpu_sem);
  343. }
  344. DPRINTK("[JPUDRV][-]JDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
  345. }
  346. break;
  347. case JDI_IOCTL_GET_PHYSICAL_MEMORY:
  348. {
  349. jpudrv_buffer_pool_t *jbp = NULL;
  350. void *user_address = NULL;
  351. struct task_struct *my_struct = NULL;
  352. struct mm_struct *mm = NULL;
  353. unsigned long address = 0;
  354. pgd_t *pgd = NULL;
  355. DPRINTK("[JPUDRV][+]JDI_IOCTL_GET_PHYSICAL_MEMORY\n");
  356. if ((ret = down_interruptible(&s_jpu_sem)) == 0) {
  357. jbp = kzalloc(sizeof(jpudrv_buffer_pool_t), GFP_KERNEL);
  358. if (!jbp) {
  359. up(&s_jpu_sem);
  360. return -ENOMEM;
  361. }
  362. ret = copy_from_user(&(jbp->jb), (jpudrv_buffer_t *)arg, sizeof(jpudrv_buffer_t));
  363. if (ret)
  364. {
  365. kfree(jbp);
  366. up(&s_jpu_sem);
  367. return -EFAULT;
  368. }
  369. user_address = (void *)jbp->jb.virt_addr;
  370. my_struct = get_current();
  371. mm = my_struct->mm;
  372. address = (unsigned long)user_address;
  373. pgd = pgd_offset(mm, address);
  374. if (!pgd_none(*pgd) && !pgd_bad(*pgd)) {
  375. p4d_t *p4d = p4d_offset(pgd, address);
  376. pud_t *pud = pud_offset(p4d, address);
  377. if (!pud_none(*pud) && !pud_bad(*pud)) {
  378. pmd_t *pmd = pmd_offset(pud, address);
  379. if (!pmd_none(*pmd) && !pmd_bad(*pmd)) {
  380. pte_t *pte = pte_offset_map(pmd, address);
  381. if (!pte_none(*pte)) {
  382. struct page *pg = pte_page(*pte);
  383. unsigned long phys = page_to_phys(pg);
  384. unsigned long virt = (unsigned long)phys_to_virt(phys);
  385. printk("phy address = %lx, virt = %lx\r\n", phys, virt);
  386. jbp->jb.phys_addr = phys;
  387. jbp->jb.base = virt;
  388. }
  389. pte_unmap(pte);
  390. }
  391. }
  392. }
  393. ret = copy_to_user((void __user *)arg, &(jbp->jb), sizeof(jpudrv_buffer_t));
  394. if (ret)
  395. {
  396. kfree(jbp);
  397. ret = -EFAULT;
  398. up(&s_jpu_sem);
  399. break;
  400. }
  401. kfree(jbp);
  402. up(&s_jpu_sem);
  403. }
  404. DPRINTK("[JPUDRV][-]JDI_IOCTL_GET_PHYSICAL_MEMORY\n");
  405. }
  406. break;
  407. case JDI_IOCTL_FREE_PHYSICALMEMORY:
  408. {
  409. jpudrv_buffer_pool_t *jbp, *n;
  410. jpudrv_buffer_t jb;
  411. DPRINTK("[JPUDRV][+]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
  412. if ((ret = down_interruptible(&s_jpu_sem)) == 0) {
  413. ret = copy_from_user(&jb, (jpudrv_buffer_t *)arg, sizeof(jpudrv_buffer_t));
  414. if (ret) {
  415. up(&s_jpu_sem);
  416. return -EACCES;
  417. }
  418. if (jb.base)
  419. jpu_free_dma_buffer(&jb);
  420. spin_lock(&s_jpu_lock);
  421. list_for_each_entry_safe(jbp, n, &s_jbp_head, list) {
  422. if (jbp->jb.base == jb.base) {
  423. list_del(&jbp->list);
  424. kfree(jbp);
  425. break;
  426. }
  427. }
  428. spin_unlock(&s_jpu_lock);
  429. up(&s_jpu_sem);
  430. }
  431. DPRINTK("[JPUDRV][-]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
  432. }
  433. break;
  434. case JDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO:
  435. {
  436. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  437. if (s_video_memory.base != 0) {
  438. ret = copy_to_user((void __user *)arg, &s_video_memory, sizeof(jpudrv_buffer_t));
  439. if (ret != 0)
  440. ret = -EFAULT;
  441. } else {
  442. ret = -EFAULT;
  443. }
  444. #endif /* JPU_SUPPORT_RESERVED_VIDEO_MEMORY */
  445. }
  446. break;
  447. case JDI_IOCTL_WAIT_INTERRUPT:
  448. {
  449. jpudrv_intr_info_t info;
  450. struct jpu_drv_context_t *dev = (struct jpu_drv_context_t *)filp->private_data;
  451. u32 instance_no;
  452. DPRINTK("[JPUDRV][+]JDI_IOCTL_WAIT_INTERRUPT\n");
  453. ret = copy_from_user(&info, (jpudrv_intr_info_t *)arg, sizeof(jpudrv_intr_info_t));
  454. if (ret != 0)
  455. return -EFAULT;
  456. instance_no = info.inst_idx;
  457. DPRINTK("[JPUDRV] INSTANCE NO: %d\n", instance_no);
  458. ret = wait_event_interruptible_timeout(s_interrupt_wait_q[instance_no], s_interrupt_flag[instance_no] != 0, msecs_to_jiffies(info.timeout));
  459. if (!ret) {
  460. DPRINTK("[JPUDRV] INSTANCE NO: %d ETIME\n", instance_no);
  461. ret = -ETIME;
  462. break;
  463. }
  464. if (signal_pending(current)) {
  465. ret = -ERESTARTSYS;
  466. DPRINTK("[JPUDRV] INSTANCE NO: %d ERESTARTSYS\n", instance_no);
  467. break;
  468. }
  469. DPRINTK("[JPUDRV] INST(%d) s_interrupt_flag(%d), reason(0x%08x)\n", instance_no, s_interrupt_flag[instance_no], dev->interrupt_reason[instance_no]);
  470. info.intr_reason = dev->interrupt_reason[instance_no];
  471. s_interrupt_flag[instance_no] = 0;
  472. dev->interrupt_reason[instance_no] = 0;
  473. ret = copy_to_user((void __user *)arg, &info, sizeof(jpudrv_intr_info_t));
  474. #ifdef JPU_IRQ_CONTROL
  475. enable_irq(s_jpu_irq);
  476. #endif
  477. DPRINTK("[VPUDRV][-]VDI_IOCTL_WAIT_INTERRUPT\n");
  478. if (ret != 0)
  479. return -EFAULT;
  480. }
  481. break;
  482. case JDI_IOCTL_SET_CLOCK_GATE:
  483. {
  484. u32 clkgate;
  485. if (get_user(clkgate, (u32 __user *) arg))
  486. return -EFAULT;
  487. #ifdef JPU_SUPPORT_CLOCK_CONTROL
  488. if (clkgate)
  489. jpu_clk_enable(s_jpu_clk);
  490. else
  491. jpu_clk_disable(s_jpu_clk);
  492. #endif /* JPU_SUPPORT_CLOCK_CONTROL */
  493. }
  494. break;
  495. case JDI_IOCTL_GET_INSTANCE_POOL:
  496. DPRINTK("[JPUDRV][+]JDI_IOCTL_GET_INSTANCE_POOL\n");
  497. if ((ret = down_interruptible(&s_jpu_sem)) == 0) {
  498. if (s_instance_pool.base != 0) {
  499. ret = copy_to_user((void __user *)arg, &s_instance_pool, sizeof(jpudrv_buffer_t));
  500. } else {
  501. ret = copy_from_user(&s_instance_pool, (jpudrv_buffer_t *)arg, sizeof(jpudrv_buffer_t));
  502. if (ret == 0) {
  503. s_instance_pool.size = PAGE_ALIGN(s_instance_pool.size);
  504. s_instance_pool.base = (unsigned long)vmalloc(s_instance_pool.size);
  505. s_instance_pool.phys_addr = s_instance_pool.base;
  506. if (s_instance_pool.base != 0) {
  507. memset((void *)s_instance_pool.base, 0x0, s_instance_pool.size); /*clearing memory*/
  508. ret = copy_to_user((void __user *)arg, &s_instance_pool, sizeof(jpudrv_buffer_t));
  509. if (ret == 0) {
  510. /* success to get memory for instance pool */
  511. up(&s_jpu_sem);
  512. break;
  513. }
  514. }
  515. ret = -EFAULT;
  516. }
  517. }
  518. up(&s_jpu_sem);
  519. }
  520. DPRINTK("[JPUDRV][-]JDI_IOCTL_GET_INSTANCE_POOL: %s base: %lx, size: %d\n",
  521. (ret==0 ? "OK" : "NG"), s_instance_pool.base, s_instance_pool.size);
  522. break;
  523. case JDI_IOCTL_OPEN_INSTANCE:
  524. {
  525. jpudrv_inst_info_t inst_info;
  526. if (copy_from_user(&inst_info, (jpudrv_inst_info_t *)arg, sizeof(jpudrv_inst_info_t)))
  527. return -EFAULT;
  528. spin_lock(&s_jpu_lock);
  529. s_jpu_open_ref_count++; /* flag just for that jpu is in opened or closed */
  530. inst_info.inst_open_count = s_jpu_open_ref_count;
  531. spin_unlock(&s_jpu_lock);
  532. if (copy_to_user((void __user *)arg, &inst_info, sizeof(jpudrv_inst_info_t))) {
  533. return -EFAULT;
  534. }
  535. DPRINTK("[JPUDRV] JDI_IOCTL_OPEN_INSTANCE inst_idx=%d, s_jpu_open_ref_count=%d, inst_open_count=%d\n",
  536. (int)inst_info.inst_idx, s_jpu_open_ref_count, inst_info.inst_open_count);
  537. }
  538. break;
  539. case JDI_IOCTL_CLOSE_INSTANCE:
  540. {
  541. jpudrv_inst_info_t inst_info;
  542. DPRINTK("[JPUDRV][+]JDI_IOCTL_CLOSE_INSTANCE\n");
  543. if (copy_from_user(&inst_info, (jpudrv_inst_info_t *)arg, sizeof(jpudrv_inst_info_t)))
  544. return -EFAULT;
  545. spin_lock(&s_jpu_lock);
  546. s_jpu_open_ref_count--; /* flag just for that jpu is in opened or closed */
  547. inst_info.inst_open_count = s_jpu_open_ref_count;
  548. spin_unlock(&s_jpu_lock);
  549. if (copy_to_user((void __user *)arg, &inst_info, sizeof(jpudrv_inst_info_t)))
  550. return -EFAULT;
  551. DPRINTK("[JPUDRV] JDI_IOCTL_CLOSE_INSTANCE inst_idx=%d, s_jpu_open_ref_count=%d, inst_open_count=%d\n",
  552. (int)inst_info.inst_idx, s_jpu_open_ref_count, inst_info.inst_open_count);
  553. }
  554. break;
  555. case JDI_IOCTL_GET_INSTANCE_NUM:
  556. {
  557. jpudrv_inst_info_t inst_info;
  558. DPRINTK("[JPUDRV][+]JDI_IOCTL_GET_INSTANCE_NUM\n");
  559. ret = copy_from_user(&inst_info, (jpudrv_inst_info_t *)arg, sizeof(jpudrv_inst_info_t));
  560. if (ret != 0)
  561. break;
  562. spin_lock(&s_jpu_lock);
  563. inst_info.inst_open_count = s_jpu_open_ref_count;
  564. spin_unlock(&s_jpu_lock);
  565. ret = copy_to_user((void __user *)arg, &inst_info, sizeof(jpudrv_inst_info_t));
  566. DPRINTK("[JPUDRV] JDI_IOCTL_GET_INSTANCE_NUM inst_idx=%d, open_count=%d\n", (int)inst_info.inst_idx, inst_info.inst_open_count);
  567. }
  568. break;
  569. case JDI_IOCTL_RESET:
  570. jpu_hw_reset();
  571. break;
  572. case JDI_IOCTL_GET_REGISTER_INFO:
  573. DPRINTK("[JPUDRV][+]JDI_IOCTL_GET_REGISTER_INFO\n");
  574. ret = copy_to_user((void __user *)arg, &s_jpu_register, sizeof(jpudrv_buffer_t));
  575. if (ret != 0)
  576. ret = -EFAULT;
  577. DPRINTK("[JPUDRV][-]JDI_IOCTL_GET_REGISTER_INFO s_jpu_register.phys_addr=0x%lx, s_jpu_register.virt_addr=0x%lx, s_jpu_register.size=%d\n",
  578. s_jpu_register.phys_addr , s_jpu_register.virt_addr, s_jpu_register.size);
  579. break;
  580. case JDI_IOCTL_FLUSH_DCACHE:
  581. {
  582. jpudrv_flush_cache_t cache_info;
  583. //DPRINTK("[JPUDRV][+]JDI_IOCTL_FLUSH_DCACHE\n");
  584. ret = copy_from_user(&cache_info, (jpudrv_flush_cache_t *)arg, sizeof(jpudrv_flush_cache_t));
  585. if (ret != 0)
  586. ret = -EFAULT;
  587. if(cache_info.flag)
  588. starfive_flush_dcache(cache_info.start,cache_info.size);
  589. //DPRINTK("[JPUDRV][-]JDI_IOCTL_FLUSH_DCACHE\n");
  590. break;
  591. }
  592. default:
  593. {
  594. printk(KERN_ERR "No such IOCTL, cmd is %d\n", cmd);
  595. }
  596. break;
  597. }
  598. return ret;
  599. }
  600. static ssize_t jpu_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
  601. {
  602. return -1;
  603. }
  604. static ssize_t jpu_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
  605. {
  606. /* DPRINTK("[VPUDRV] vpu_write len=%d\n", (int)len); */
  607. if (!buf) {
  608. printk(KERN_ERR "[VPUDRV] vpu_write buf = NULL error \n");
  609. return -EFAULT;
  610. }
  611. return -1;
  612. }
  613. static int jpu_release(struct inode *inode, struct file *filp)
  614. {
  615. int ret = 0;
  616. u32 open_count;
  617. DPRINTK("[JPUDRV][+] jpu_release\n");
  618. if ((ret = down_interruptible(&s_jpu_sem)) == 0) {
  619. /* found and free the not handled buffer by user applications */
  620. jpu_free_buffers(filp);
  621. /* found and free the not closed instance by user applications */
  622. jpu_free_instances(filp);
  623. DPRINTK("[JPUDRV] open_count: %d\n", s_jpu_drv_context.open_count);
  624. spin_lock(&s_jpu_lock);
  625. s_jpu_drv_context.open_count--;
  626. open_count = s_jpu_drv_context.open_count;
  627. spin_unlock(&s_jpu_lock);
  628. if (open_count == 0) {
  629. if (s_instance_pool.base) {
  630. DPRINTK("[JPUDRV] free instance pool\n");
  631. vfree((const void *)s_instance_pool.base);
  632. s_instance_pool.base = 0;
  633. }
  634. }
  635. }
  636. up(&s_jpu_sem);
  637. DPRINTK("[JPUDRV][-] jpu_release\n");
  638. pm_runtime_put_sync(s_jpu_clk->dev);
  639. return 0;
  640. }
  641. static int jpu_fasync(int fd, struct file *filp, int mode)
  642. {
  643. struct jpu_drv_context_t *dev = (struct jpu_drv_context_t *)filp->private_data;
  644. return fasync_helper(fd, filp, mode, &dev->async_queue);
  645. }
  646. static int jpu_map_to_register(struct file *fp, struct vm_area_struct *vm)
  647. {
  648. unsigned long pfn;
  649. vm->vm_flags |= VM_IO | VM_RESERVED;
  650. vm->vm_page_prot = pgprot_noncached(vm->vm_page_prot);
  651. pfn = s_jpu_register.phys_addr >> PAGE_SHIFT;
  652. return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end-vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
  653. }
  654. static int jpu_map_to_physical_memory(struct file *fp, struct vm_area_struct *vm)
  655. {
  656. vm->vm_flags |= VM_IO | VM_RESERVED;
  657. vm->vm_page_prot = pgprot_noncached(vm->vm_page_prot);
  658. return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff, vm->vm_end-vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
  659. }
  660. static int jpu_map_to_instance_pool_memory(struct file *fp, struct vm_area_struct *vm)
  661. {
  662. int ret;
  663. long length = vm->vm_end - vm->vm_start;
  664. unsigned long start = vm->vm_start;
  665. char *vmalloc_area_ptr = (char *)s_instance_pool.base;
  666. unsigned long pfn;
  667. vm->vm_flags |= VM_RESERVED;
  668. /* loop over all pages, map it page individually */
  669. while (length > 0) {
  670. pfn = vmalloc_to_pfn(vmalloc_area_ptr);
  671. if ((ret = remap_pfn_range(vm, start, pfn, PAGE_SIZE, PAGE_SHARED)) < 0) {
  672. return ret;
  673. }
  674. start += PAGE_SIZE;
  675. vmalloc_area_ptr += PAGE_SIZE;
  676. length -= PAGE_SIZE;
  677. }
  678. return 0;
  679. }
  680. /*!
  681. * @brief memory map interface for jpu file operation
  682. * @return 0 on success or negative error code on error
  683. */
  684. static int jpu_mmap(struct file *fp, struct vm_area_struct *vm)
  685. {
  686. if (vm->vm_pgoff == 0)
  687. return jpu_map_to_instance_pool_memory(fp, vm);
  688. if (vm->vm_pgoff == (s_jpu_register.phys_addr>>PAGE_SHIFT))
  689. return jpu_map_to_register(fp, vm);
  690. return jpu_map_to_physical_memory(fp, vm);
  691. }
  692. struct file_operations jpu_fops = {
  693. .owner = THIS_MODULE,
  694. .open = jpu_open,
  695. .read = jpu_read,
  696. .write = jpu_write,
  697. .unlocked_ioctl = jpu_ioctl,
  698. .release = jpu_release,
  699. .fasync = jpu_fasync,
  700. .mmap = jpu_mmap,
  701. };
  702. static int jpu_probe(struct platform_device *pdev)
  703. {
  704. int err = 0;
  705. struct resource *res = NULL;
  706. struct device *devices;
  707. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  708. struct resource res_cma;
  709. struct device_node *node;
  710. #endif
  711. DPRINTK("[JPUDRV] jpu_probe\n");
  712. if (pdev) {
  713. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  714. }
  715. if (res) {/* if platform driver is implemented */
  716. s_jpu_register.phys_addr = res->start;
  717. s_jpu_register.virt_addr = (unsigned long)ioremap(res->start, res->end - res->start);
  718. s_jpu_register.size = res->end - res->start;
  719. DPRINTK("[JPUDRV] : jpu base address get from platform driver physical base addr==0x%lx, virtual base=0x%lx\n", s_jpu_register.phys_addr , s_jpu_register.virt_addr);
  720. } else {
  721. s_jpu_register.phys_addr = JPU_REG_BASE_ADDR;
  722. s_jpu_register.virt_addr = (unsigned long)ioremap(s_jpu_register.phys_addr, JPU_REG_SIZE);
  723. s_jpu_register.size = JPU_REG_SIZE;
  724. DPRINTK("[JPUDRV] : jpu base address get from defined value physical base addr==0x%lx, virtual base=0x%lx\n", s_jpu_register.phys_addr, s_jpu_register.virt_addr);
  725. }
  726. if (pdev) {
  727. jpu_dev = &pdev->dev;
  728. //jpu_dev->dma_ops = NULL;
  729. dev_info(jpu_dev,"init device.\n");
  730. }
  731. /* get the major number of the character device */
  732. if ((alloc_chrdev_region(&s_jpu_devt, 0, 1, JPU_DEV_NAME)) < 0) {
  733. err = -EBUSY;
  734. printk(KERN_ERR "could not allocate major number\n");
  735. goto ERROR_PROVE_DEVICE;
  736. }
  737. s_jpu_major = MAJOR(s_jpu_devt);
  738. /* initialize the device structure and register the device with the kernel */
  739. cdev_init(&s_jpu_cdev, &jpu_fops);
  740. if ((cdev_add(&s_jpu_cdev, s_jpu_devt, 1)) < 0) {
  741. err = -EBUSY;
  742. printk(KERN_ERR "could not allocate chrdev\n");
  743. goto ERROR_PROVE_DEVICE;
  744. }
  745. s_jpu_class = class_create(THIS_MODULE, JPU_DEV_NAME);
  746. if (IS_ERR(s_jpu_class)) {
  747. dev_err(jpu_dev, "class creat error.\n");
  748. goto ERROR_CRART_CLASS;
  749. }
  750. devices = device_create(s_jpu_class, 0, MKDEV(s_jpu_major, 0),
  751. NULL, JPU_DEV_NAME);
  752. if (IS_ERR(devices)) {
  753. dev_err(jpu_dev, "device creat error.\n");
  754. goto ERROR_CREAT_DEVICE;
  755. }
  756. if (pdev)
  757. s_jpu_clk = jpu_clk_get(pdev);
  758. else
  759. s_jpu_clk = jpu_clk_get(NULL);
  760. if (!s_jpu_clk) {
  761. printk(KERN_ERR "[JPUDRV] : not support clock controller.\n");
  762. }
  763. else {
  764. DPRINTK("[JPUDRV] : get clock controller s_jpu_clk=%p\n", s_jpu_clk);
  765. }
  766. jpu_pmu_enable(s_jpu_clk->dev);
  767. jpu_clk_enable(s_jpu_clk);
  768. reset_control_deassert(s_jpu_clk->resets);
  769. #ifdef JPU_SUPPORT_ISR
  770. #ifdef JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  771. if(pdev)
  772. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  773. if (res) {/* if platform driver is implemented */
  774. s_jpu_irq = res->start;
  775. DPRINTK("[JPUDRV] : jpu irq number get from platform driver irq=0x%x\n", s_jpu_irq );
  776. } else {
  777. DPRINTK("[JPUDRV] : jpu irq number get from defined value irq=0x%x\n", s_jpu_irq );
  778. }
  779. #else
  780. DPRINTK("[JPUDRV] : jpu irq number get from defined value irq=0x%x\n", s_jpu_irq);
  781. #endif
  782. err = request_irq(s_jpu_irq, jpu_irq_handler, 0, "JPU_CODEC_IRQ", (void *)(&s_jpu_drv_context));
  783. if (err) {
  784. printk(KERN_ERR "[JPUDRV] : fail to register interrupt handler\n");
  785. goto ERROR_PROVE_DEVICE;
  786. }
  787. #endif
  788. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  789. node = of_parse_phandle(jpu_dev->of_node, "memory-region", 0);
  790. if(node){
  791. dev_info(jpu_dev, "Get mem form memory-region\n");
  792. of_address_to_resource(node, 0, &res_cma);
  793. s_video_memory.size = resource_size(&res_cma);
  794. s_video_memory.phys_addr = res_cma.start;
  795. }else{
  796. dev_info(jpu_dev, "Get mem form reserved memory failed.please check the dts file.\n");
  797. return 0;
  798. }
  799. s_video_memory.base = (unsigned long)ioremap(MEM2SYS(s_video_memory.phys_addr), PAGE_ALIGN(s_video_memory.size));
  800. if (!s_video_memory.base) {
  801. printk(KERN_ERR "[JPUDRV] : fail to remap video memory physical phys_addr=0x%lx, base=0x%lx, size=%d\n", MEM2SYS(s_video_memory.phys_addr), s_video_memory.base, s_video_memory.size);
  802. goto ERROR_PROVE_DEVICE;
  803. }
  804. if (jmem_init(&s_jmem, s_video_memory.phys_addr, s_video_memory.size) < 0) {
  805. printk(KERN_ERR "[JPUDRV] : fail to init vmem system\n");
  806. goto ERROR_PROVE_DEVICE;
  807. }
  808. DPRINTK("[JPUDRV] success to probe jpu device with reserved video memory phys_addr=0x%lx, base=0x%lx\n", s_video_memory.phys_addr, s_video_memory.base);
  809. #else
  810. DPRINTK("[JPUDRV] success to probe jpu device with non reserved video memory\n");
  811. #endif
  812. return 0;
  813. ERROR_CREAT_DEVICE:
  814. class_destroy(s_jpu_class);
  815. ERROR_CRART_CLASS:
  816. cdev_del(&s_jpu_cdev);
  817. ERROR_PROVE_DEVICE:
  818. if (s_jpu_major)
  819. unregister_chrdev_region(s_jpu_major, 1);
  820. if (s_jpu_register.virt_addr)
  821. iounmap((void *)s_jpu_register.virt_addr);
  822. return err;
  823. }
  824. static int jpu_remove(struct platform_device *pdev)
  825. {
  826. DPRINTK("[JPUDRV] jpu_remove\n");
  827. #ifdef JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  828. if (s_instance_pool.base) {
  829. vfree((const void *)s_instance_pool.base);
  830. s_instance_pool.base = 0;
  831. }
  832. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  833. if (s_video_memory.base) {
  834. iounmap((void *)s_video_memory.base);
  835. s_video_memory.base = 0;
  836. jmem_exit(&s_jmem);
  837. }
  838. #endif
  839. if (s_jpu_major > 0) {
  840. device_destroy(s_jpu_class, MKDEV(s_jpu_major, 0));
  841. class_destroy(s_jpu_class);
  842. cdev_del(&s_jpu_cdev);
  843. unregister_chrdev_region(s_jpu_devt, 1);
  844. s_jpu_major = 0;
  845. }
  846. #ifdef JPU_SUPPORT_ISR
  847. if (s_jpu_irq)
  848. free_irq(s_jpu_irq, &s_jpu_drv_context);
  849. #endif
  850. if (s_jpu_register.virt_addr)
  851. iounmap((void*)s_jpu_register.virt_addr);
  852. jpu_clk_put(s_jpu_clk);
  853. jpu_pmu_disable(&pdev->dev);
  854. #endif /* JPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  855. return 0;
  856. }
  857. #ifdef CONFIG_PM
  858. static int __maybe_unused jpu_runtime_suspend(struct device *dev)
  859. {
  860. reset_control_assert(s_jpu_clk->resets);
  861. jpu_clk_disable(s_jpu_clk);
  862. return 0;
  863. }
  864. static int __maybe_unused jpu_runtime_resume(struct device *dev)
  865. {
  866. jpu_clk_enable(s_jpu_clk);
  867. return reset_control_deassert(s_jpu_clk->resets);
  868. }
  869. #endif /* CONFIG_PM */
  870. #ifdef CONFIG_PM_SLEEP
  871. static int __maybe_unused jpu_suspend(struct device *dev)
  872. {
  873. pm_runtime_force_suspend(dev);
  874. return 0;
  875. }
  876. static int __maybe_unused jpu_resume(struct device *dev)
  877. {
  878. pm_runtime_force_resume(dev);
  879. return 0;
  880. }
  881. #endif /* CONFIG_PM_SLEEP */
  882. static const struct dev_pm_ops cm_jpu_pm_ops = {
  883. SET_RUNTIME_PM_OPS(jpu_runtime_suspend,
  884. jpu_runtime_resume, NULL)
  885. SET_SYSTEM_SLEEP_PM_OPS(jpu_suspend, jpu_resume)
  886. };
  887. #ifdef JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  888. static const struct of_device_id jpu_of_id_table[] = {
  889. { .compatible = "cm,codaj12-jpu-1" },
  890. { .compatible = "starfive,jpu" },
  891. {}
  892. };
  893. MODULE_DEVICE_TABLE(of, jpu_of_id_table);
  894. static struct platform_driver jpu_driver = {
  895. .driver = {
  896. .name = JPU_PLATFORM_DEVICE_NAME,
  897. .of_match_table = of_match_ptr(jpu_of_id_table),
  898. .pm = &cm_jpu_pm_ops,
  899. },
  900. .probe = jpu_probe,
  901. .remove = jpu_remove,
  902. };
  903. #endif /* JPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  904. static int __init jpu_init(void)
  905. {
  906. int res = 0;
  907. u32 i;
  908. DPRINTK("[JPUDRV] begin jpu_init\n");
  909. for (i=0; i<MAX_NUM_INSTANCE; i++) {
  910. init_waitqueue_head(&s_interrupt_wait_q[i]);
  911. }
  912. s_instance_pool.base = 0;
  913. #ifdef JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  914. res = platform_driver_register(&jpu_driver);
  915. #else
  916. res = jpu_probe(NULL);
  917. #endif /* JPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  918. DPRINTK("[JPUDRV] end jpu_init result=0x%x\n", res);
  919. return res;
  920. }
  921. static void __exit jpu_exit(void)
  922. {
  923. DPRINTK("[JPUDRV] [+]jpu_exit\n");
  924. #ifdef JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  925. platform_driver_unregister(&jpu_driver);
  926. #else /* JPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  927. #ifdef JPU_SUPPORT_CLOCK_CONTROL
  928. #else
  929. jpu_clk_disable(s_jpu_clk);
  930. #endif /* JPU_SUPPORT_CLOCK_CONTROL */
  931. jpu_clk_put(s_jpu_clk);
  932. if (s_instance_pool.base) {
  933. vfree((const void *)s_instance_pool.base);
  934. s_instance_pool.base = 0;
  935. }
  936. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  937. if (s_video_memory.base) {
  938. iounmap((void *)s_video_memory.base);
  939. s_video_memory.base = 0;
  940. jmem_exit(&s_jmem);
  941. }
  942. #endif /* JPU_SUPPORT_RESERVED_VIDEO_MEMORY */
  943. if (s_jpu_major > 0) {
  944. device_destroy(s_jpu_class, MKDEV(s_jpu_major, 0));
  945. class_destroy(s_jpu_class);
  946. cdev_del(&s_jpu_cdev);
  947. unregister_chrdev_region(s_jpu_devt, 1);
  948. s_jpu_major = 0;
  949. }
  950. #ifdef JPU_SUPPORT_ISR
  951. if (s_jpu_irq)
  952. free_irq(s_jpu_irq, &s_jpu_drv_context);
  953. #endif /* JPU_SUPPORT_ISR */
  954. if (s_jpu_register.virt_addr) {
  955. iounmap((void *)s_jpu_register.virt_addr);
  956. s_jpu_register.virt_addr = 0x00;
  957. }
  958. #endif /* JPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  959. DPRINTK("[JPUDRV] [-]jpu_exit\n");
  960. return;
  961. }
  962. MODULE_AUTHOR("A customer using C&M JPU, Inc.");
  963. MODULE_DESCRIPTION("JPU linux driver");
  964. MODULE_LICENSE("Dual BSD/GPL");
  965. module_init(jpu_init);
  966. module_exit(jpu_exit);
  967. static int jpu_pmu_enable(struct device *dev)
  968. {
  969. pm_runtime_set_active(dev);
  970. pm_runtime_enable(dev);
  971. return 0;
  972. }
  973. static void jpu_pmu_disable(struct device *dev)
  974. {
  975. pm_runtime_disable(dev);
  976. pm_runtime_set_suspended(dev);
  977. }
  978. #ifndef STARFIVE_JPU_SUPPORT_CLOCK_CONTROL
  979. #define CLK_ENABLE_DATA 1
  980. #define CLK_DISABLE_DATA 0
  981. #define CLK_EN_SHIFT 31
  982. #define CLK_EN_MASK 0x80000000U
  983. #define SAIF_BD_APBS_BASE 0x13020000
  984. #define CODAJ12_CLK_AXI_CTRL 0x108U
  985. #define CODAJ12_CLK_APB_CTRL 0x110U
  986. #define CODAJ12_CLK_CORE_CTRL 0x10cU
  987. #define RSTGEN_SOFTWARE_RESET_ASSERT1 0x2FCU
  988. #define RSTGEN_SOFTWARE_RESET_STATUS1 0x30CU
  989. #define RSTN_AXI_MASK (0x1 << 12)
  990. #define RSTN_CORE_MASK (0x1 << 13)
  991. #define RSTN_APB_MASK (0x1 << 14)
  992. static __maybe_unused uint32_t saif_get_reg(
  993. const volatile void __iomem *addr,
  994. uint32_t shift, uint32_t mask)
  995. {
  996. u32 tmp;
  997. tmp = readl(addr);
  998. tmp = (tmp & mask) >> shift;
  999. return tmp;
  1000. }
  1001. static void saif_set_reg(volatile void __iomem *addr, uint32_t data,
  1002. uint32_t shift, uint32_t mask)
  1003. {
  1004. uint32_t tmp;
  1005. tmp = readl(addr);
  1006. tmp &= ~mask;
  1007. tmp |= (data << shift) & mask;
  1008. writel(tmp, addr);
  1009. }
  1010. static void saif_assert_rst(volatile void __iomem *addr,
  1011. const volatile void __iomem *addr_status, uint32_t mask)
  1012. {
  1013. uint32_t tmp;
  1014. tmp = readl(addr);
  1015. tmp |= mask;
  1016. writel(tmp, addr);
  1017. do {
  1018. tmp = readl(addr_status);
  1019. } while ((tmp & mask) != 0);
  1020. }
  1021. static void saif_clear_rst(volatile void __iomem *addr,
  1022. const volatile void __iomem *addr_status, uint32_t mask)
  1023. {
  1024. uint32_t tmp;
  1025. tmp = readl(addr);
  1026. tmp &= ~mask;
  1027. writel(tmp, addr);
  1028. do {
  1029. tmp = readl(addr_status);
  1030. } while ((tmp & mask) != mask);
  1031. }
  1032. static void jpu_clk_control(jpu_clk_t *clk, bool enable)
  1033. {
  1034. if (enable) {
  1035. /*enable*/
  1036. saif_set_reg(clk->apb_clk.en_ctrl, CLK_ENABLE_DATA, clk->en_shift, clk->en_mask);
  1037. saif_set_reg(clk->axi_clk.en_ctrl, CLK_ENABLE_DATA, clk->en_shift, clk->en_mask);
  1038. saif_set_reg(clk->core_clk.en_ctrl, CLK_ENABLE_DATA, clk->en_shift, clk->en_mask);
  1039. /*clr-reset*/
  1040. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->apb_clk.rst_mask);
  1041. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->axi_clk.rst_mask);
  1042. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->core_clk.rst_mask);
  1043. } else {
  1044. /*assert-reset*/
  1045. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->apb_clk.rst_mask);
  1046. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->axi_clk.rst_mask);
  1047. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->core_clk.rst_mask);
  1048. /*disable*/
  1049. saif_set_reg(clk->apb_clk.en_ctrl, CLK_DISABLE_DATA, clk->en_shift, clk->en_mask);
  1050. saif_set_reg(clk->axi_clk.en_ctrl, CLK_DISABLE_DATA, clk->en_shift, clk->en_mask);
  1051. saif_set_reg(clk->core_clk.en_ctrl, CLK_DISABLE_DATA, clk->en_shift, clk->en_mask);
  1052. }
  1053. }
  1054. static void jpu_clk_reset(jpu_clk_t *clk)
  1055. {
  1056. /*assert-reset*/
  1057. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->apb_clk.rst_mask);
  1058. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->axi_clk.rst_mask);
  1059. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->core_clk.rst_mask);
  1060. /*clr-reset*/
  1061. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->apb_clk.rst_mask);
  1062. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->axi_clk.rst_mask);
  1063. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->core_clk.rst_mask);
  1064. }
  1065. int jpu_hw_reset(void)
  1066. {
  1067. if (!s_jpu_clk)
  1068. return -1;
  1069. jpu_clk_reset(s_jpu_clk);
  1070. DPRINTK("[VPUDRV] reset vpu hardware. \n");
  1071. return 0;
  1072. }
  1073. static int jpu_of_clk_get(struct platform_device *pdev, jpu_clk_t *jpu_clk)
  1074. {
  1075. if (!pdev)
  1076. return -ENXIO;
  1077. jpu_clk->clkgen = ioremap(SAIF_BD_APBS_BASE, 0x400);
  1078. if (IS_ERR(jpu_clk->clkgen)) {
  1079. dev_err(&pdev->dev, "ioremap clkgen failed.\n");
  1080. return PTR_ERR(jpu_clk->clkgen);
  1081. }
  1082. /* clkgen define */
  1083. jpu_clk->axi_clk.en_ctrl = jpu_clk->clkgen + CODAJ12_CLK_AXI_CTRL;
  1084. jpu_clk->apb_clk.en_ctrl = jpu_clk->clkgen + CODAJ12_CLK_APB_CTRL;
  1085. jpu_clk->core_clk.en_ctrl = jpu_clk->clkgen + CODAJ12_CLK_CORE_CTRL;
  1086. jpu_clk->en_mask = CLK_EN_MASK;
  1087. jpu_clk->en_shift = CLK_EN_SHIFT;
  1088. /* rstgen define */
  1089. jpu_clk->rst_ctrl = jpu_clk->clkgen + RSTGEN_SOFTWARE_RESET_ASSERT1;
  1090. jpu_clk->rst_status = jpu_clk->clkgen + RSTGEN_SOFTWARE_RESET_STATUS1;
  1091. jpu_clk->axi_clk.rst_mask = RSTN_AXI_MASK;
  1092. jpu_clk->apb_clk.rst_mask = RSTN_APB_MASK;
  1093. jpu_clk->core_clk.rst_mask = RSTN_CORE_MASK;
  1094. return 0;
  1095. }
  1096. static jpu_clk_t *jpu_clk_get(struct platform_device *pdev)
  1097. {
  1098. jpu_clk_t *jpu_clk;
  1099. jpu_clk = devm_kzalloc(&pdev->dev, sizeof(*jpu_clk), GFP_KERNEL);
  1100. if (!jpu_clk)
  1101. return NULL;
  1102. if (jpu_of_clk_get(pdev, jpu_clk))
  1103. goto err_get_clk;
  1104. return jpu_clk;
  1105. err_get_clk:
  1106. devm_kfree(&pdev->dev, jpu_clk);
  1107. return NULL;
  1108. }
  1109. static void jpu_clk_put(jpu_clk_t *clk)
  1110. {
  1111. if (clk->clkgen) {
  1112. iounmap(clk->clkgen);
  1113. clk->clkgen = NULL;
  1114. }
  1115. }
  1116. static int jpu_clk_enable(jpu_clk_t *clk)
  1117. {
  1118. if (clk == NULL || IS_ERR(clk))
  1119. return -1;
  1120. jpu_pmu_enable(clk->dev);
  1121. jpu_clk_control(clk, true);
  1122. DPRINTK("[VPUDRV] vpu_clk_enable\n");
  1123. return 0;
  1124. }
  1125. static void jpu_clk_disable(jpu_clk_t *clk)
  1126. {
  1127. if (clk == NULL || IS_ERR(clk))
  1128. return;
  1129. jpu_clk_control(clk, false);
  1130. jpu_pmu_disable(clk->dev);
  1131. DPRINTK("[VPUDRV] vpu_clk_disable\n");
  1132. }
  1133. #else /* STARFIVE_JPU_SUPPORT_CLOCK_CONTROL */
  1134. static int jpu_hw_reset(void)
  1135. {
  1136. return reset_control_reset(s_jpu_clk->resets);
  1137. }
  1138. static int jpu_of_clk_get(struct platform_device *pdev, jpu_clk_t *jpu_clk)
  1139. {
  1140. struct device *dev = &pdev->dev;
  1141. int ret;
  1142. jpu_clk->dev = dev;
  1143. jpu_clk->clks = jpu_clks;
  1144. jpu_clk->nr_clks = ARRAY_SIZE(jpu_clks);
  1145. jpu_clk->resets = devm_reset_control_array_get_shared(dev);
  1146. if (IS_ERR(jpu_clk->resets)) {
  1147. ret = PTR_ERR(jpu_clk->resets);
  1148. dev_err(dev, "faied to get jpu reset controls\n");
  1149. }
  1150. ret = devm_clk_bulk_get(dev, jpu_clk->nr_clks, jpu_clk->clks);
  1151. if (ret)
  1152. dev_err(dev, "faied to get jpu clk controls\n");
  1153. return 0;
  1154. }
  1155. static jpu_clk_t *jpu_clk_get(struct platform_device *pdev)
  1156. {
  1157. jpu_clk_t *jpu_clk;
  1158. if (!pdev)
  1159. return NULL;
  1160. jpu_clk = devm_kzalloc(&pdev->dev, sizeof(*jpu_clk), GFP_KERNEL);
  1161. if (!jpu_clk)
  1162. return NULL;
  1163. if (jpu_of_clk_get(pdev, jpu_clk))
  1164. goto err_of_clk_get;
  1165. return jpu_clk;
  1166. err_of_clk_get:
  1167. devm_kfree(&pdev->dev, jpu_clk);
  1168. return NULL;
  1169. }
  1170. static void jpu_clk_put(jpu_clk_t *clk)
  1171. {
  1172. clk_bulk_put(clk->nr_clks, clk->clks);
  1173. }
  1174. static int jpu_clk_enable(jpu_clk_t *clk)
  1175. {
  1176. int ret;
  1177. ret = clk_bulk_prepare_enable(clk->nr_clks, clk->clks);
  1178. if (ret)
  1179. dev_err(clk->dev, "enable clk error.\n");
  1180. DPRINTK("[VPUDRV] jpu_clk_enable\n");
  1181. return ret;
  1182. }
  1183. static void jpu_clk_disable(jpu_clk_t *clk)
  1184. {
  1185. clk_bulk_disable_unprepare(clk->nr_clks, clk->clks);
  1186. }
  1187. #endif /* STARFIVE_JPU_SUPPORT_CLOCK_CONTROL */