jpu.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434
  1. //--=========================================================================--
  2. // This file is linux device driver for JPU.
  3. //-----------------------------------------------------------------------------
  4. //
  5. // This confidential and proprietary software may be used only
  6. // as authorized by a licensing agreement from Chips&Media Inc.
  7. // In the event of publication, the following notice is applicable:
  8. //
  9. // (C) COPYRIGHT 2006 - 2016 CHIPS&MEDIA INC.
  10. // ALL RIGHTS RESERVED
  11. //
  12. // The entire notice above must be reproduced on all authorized
  13. // copies.
  14. //
  15. //--=========================================================================-
  16. #include <linux/kernel.h>
  17. #include <linux/device.h>
  18. #include <linux/mm.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/ioport.h>
  21. #include <linux/module.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/of.h>
  25. #include <linux/of_address.h>
  26. #include <linux/pm_runtime.h>
  27. #include <linux/wait.h>
  28. #include <linux/list.h>
  29. #include <linux/clk.h>
  30. #include <linux/delay.h>
  31. #include <linux/uaccess.h>
  32. #include <linux/cdev.h>
  33. #include <linux/slab.h>
  34. #include <linux/sched.h>
  35. #include <linux/sched/signal.h>
  36. #include <linux/reset.h>
  37. #include <linux/version.h>
  38. #include <soc/sifive/sifive_l2_cache.h>
  39. #include "../../../jpuapi/jpuconfig.h"
  40. #include "jpu.h"
  41. //#define ENABLE_DEBUG_MSG
  42. #ifdef ENABLE_DEBUG_MSG
  43. #define DPRINTK(args...) printk(KERN_INFO args);
  44. #else
  45. #define DPRINTK(args...)
  46. #endif
  47. /* definitions to be changed as customer configuration */
  48. /* if you want to have clock gating scheme frame by frame */
  49. //#define JPU_SUPPORT_CLOCK_CONTROL
  50. #define JPU_SUPPORT_ISR
  51. //#define JPU_IRQ_CONTROL
  52. /* if clktree is work,try this...*/
  53. #define STARFIVE_JPU_SUPPORT_CLOCK_CONTROL
  54. /* if the platform driver knows the name of this driver */
  55. /* JPU_PLATFORM_DEVICE_NAME */
  56. #define JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  57. /* if this driver knows the dedicated video memory address */
  58. //#define JPU_SUPPORT_RESERVED_VIDEO_MEMORY //if this driver knows the dedicated video memory address
  59. static void starfive_flush_dcache(unsigned long start, unsigned long len)
  60. {
  61. sifive_l2_flush64_range(start, len);
  62. }
  63. #define JPU_PLATFORM_DEVICE_NAME "cnm_jpu"
  64. #define JPU_CLK_NAME "jpege"
  65. #define JPU_DEV_NAME "jpu"
  66. #define JPU_REG_BASE_ADDR 0x11900000
  67. #define JPU_REG_SIZE 0x300
  68. #ifdef JPU_SUPPORT_ISR
  69. #define JPU_IRQ_NUM 24
  70. /* if the driver want to disable and enable IRQ whenever interrupt asserted. */
  71. /*#define JPU_IRQ_CONTROL*/
  72. #endif
  73. #ifndef VM_RESERVED /*for kernel up to 3.7.0 version*/
  74. #define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
  75. #endif
  76. struct device *jpu_dev;
  77. typedef struct jpu_drv_context_t {
  78. struct fasync_struct *async_queue;
  79. u32 open_count; /*!<< device reference count. Not instance count */
  80. u32 interrupt_reason[MAX_NUM_INSTANCE];
  81. } jpu_drv_context_t;
  82. /* To track the allocated memory buffer */
  83. typedef struct jpudrv_buffer_pool_t {
  84. struct list_head list;
  85. struct jpudrv_buffer_t jb;
  86. struct file* filp;
  87. } jpudrv_buffer_pool_t;
  88. /* To track the instance index and buffer in instance pool */
  89. typedef struct jpudrv_instance_list_t {
  90. struct list_head list;
  91. unsigned long inst_idx;
  92. struct file* filp;
  93. } jpudrv_instance_list_t;
  94. typedef struct jpudrv_instance_pool_t {
  95. unsigned char codecInstPool[MAX_NUM_INSTANCE][MAX_INST_HANDLE_SIZE];
  96. } jpudrv_instance_pool_t;
  97. #ifndef STARFIVE_JPU_SUPPORT_CLOCK_CONTROL
  98. typedef struct jpu_clkgen_t {
  99. void __iomem *en_ctrl;
  100. uint32_t rst_mask;
  101. } jpu_clkgen_t;
  102. #endif
  103. struct clk_bulk_data jpu_clks[] = {
  104. { .id = "axi_clk" },
  105. { .id = "core_clk" },
  106. { .id = "apb_clk" },
  107. { .id = "noc_bus" },
  108. };
  109. typedef struct jpu_clk_t {
  110. #ifndef STARFIVE_JPU_SUPPORT_CLOCK_CONTROL
  111. void __iomem *clkgen;
  112. void __iomem *rst_ctrl;
  113. void __iomem *rst_status;
  114. uint32_t en_shift;
  115. uint32_t en_mask;
  116. jpu_clkgen_t apb_clk;
  117. jpu_clkgen_t axi_clk;
  118. jpu_clkgen_t core_clk;
  119. #else
  120. struct clk_bulk_data *clks;
  121. struct reset_control *resets;
  122. int nr_clks;
  123. #endif
  124. struct device *dev;
  125. } jpu_clk_t;
  126. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  127. #include "jmm.h"
  128. static jpu_mm_t s_jmem;
  129. static jpudrv_buffer_t s_video_memory = {0};
  130. #endif /* JPU_SUPPORT_RESERVED_VIDEO_MEMORY */
  131. static int jpu_hw_reset(void);
  132. static void jpu_clk_disable(jpu_clk_t *clk);
  133. static int jpu_clk_enable(jpu_clk_t *clk);
  134. static jpu_clk_t *jpu_clk_get(struct platform_device *pdev);
  135. static void jpu_clk_put(jpu_clk_t *clk);
  136. static int jpu_pmu_enable(struct device *dev);
  137. static void jpu_pmu_disable(struct device *dev);
  138. // end customer definition
  139. static jpudrv_buffer_t s_instance_pool = {0};
  140. static jpu_drv_context_t s_jpu_drv_context;
  141. static dev_t s_jpu_devt;
  142. static int s_jpu_major;
  143. static struct cdev s_jpu_cdev;
  144. static struct class *s_jpu_class;
  145. static jpu_clk_t *s_jpu_clk;
  146. static int s_jpu_open_ref_count;
  147. #ifdef JPU_SUPPORT_ISR
  148. static int s_jpu_irq = JPU_IRQ_NUM;
  149. #endif
  150. static jpudrv_buffer_t s_jpu_register = {0};
  151. static int s_interrupt_flag[MAX_NUM_INSTANCE];
  152. static wait_queue_head_t s_interrupt_wait_q[MAX_NUM_INSTANCE];
  153. static spinlock_t s_jpu_lock = __SPIN_LOCK_UNLOCKED(s_jpu_lock);
  154. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
  155. static DECLARE_MUTEX(s_jpu_sem);
  156. #else
  157. static DEFINE_SEMAPHORE(s_jpu_sem);
  158. #endif
  159. static struct list_head s_jbp_head = LIST_HEAD_INIT(s_jbp_head);
  160. static struct list_head s_inst_list_head = LIST_HEAD_INIT(s_inst_list_head);
  161. #ifdef CONFIG_PM
  162. /* implement to power management functions */
  163. #endif
  164. #define NPT_BASE 0x0000
  165. #define NPT_REG_SIZE 0x300
  166. #define MJPEG_PIC_STATUS_REG(_inst_no) (NPT_BASE + (_inst_no*NPT_REG_SIZE) + 0x004)
  167. #define ReadJpuRegister(addr) *(volatile unsigned int *)(s_jpu_register.virt_addr + addr)
  168. #define WriteJpuRegister(addr, val) *(volatile unsigned int *)(s_jpu_register.virt_addr + addr) = (unsigned int)val
  169. #define WriteJpu(addr, val) *(volatile unsigned int *)(addr) = (unsigned int)val;
  170. static int jpu_alloc_dma_buffer(jpudrv_buffer_t *jb)
  171. {
  172. if (!jb)
  173. return -1;
  174. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  175. jb->phys_addr = (unsigned long long)jmem_alloc(&s_jmem, jb->size, 0);
  176. if ((unsigned long)jb->phys_addr == (unsigned long)-1) {
  177. printk(KERN_ERR "[JPUDRV] Physical memory allocation error size=%d\n", jb->size);
  178. return -1;
  179. }
  180. jb->base = (unsigned long)(s_video_memory.base + (jb->phys_addr - s_video_memory.phys_addr));
  181. #else
  182. jb->base = (unsigned long)dma_alloc_coherent(jpu_dev, PAGE_ALIGN(jb->size), (dma_addr_t *) (&jb->phys_addr), GFP_DMA | GFP_KERNEL);
  183. if ((void *)(jb->base) == NULL) {
  184. printk(KERN_ERR "[JPUDRV] Physical memory allocation error size=%d\n", jb->size);
  185. return -1;
  186. }
  187. starfive_flush_dcache(jb->phys_addr,PAGE_ALIGN(jb->size));
  188. #endif /* JPU_SUPPORT_RESERVED_VIDEO_MEMORY */
  189. return 0;
  190. }
  191. static void jpu_free_dma_buffer(jpudrv_buffer_t *jb)
  192. {
  193. if (!jb) {
  194. return;
  195. }
  196. if (jb->base)
  197. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  198. jmem_free(&s_jmem, jb->phys_addr, 0);
  199. #else
  200. dma_free_coherent(jpu_dev, PAGE_ALIGN(jb->size), (void *)jb->base, jb->phys_addr);
  201. #endif /* JPUR_SUPPORT_RESERVED_VIDEO_MEMORY */
  202. }
  203. static int jpu_free_instances(struct file *filp)
  204. {
  205. jpudrv_instance_list_t *vil, *n;
  206. jpudrv_instance_pool_t *vip;
  207. void *vip_base;
  208. int instance_pool_size_per_core;
  209. void *jdi_mutexes_base;
  210. const int PTHREAD_MUTEX_T_DESTROY_VALUE = 0xdead10cc;
  211. DPRINTK("[JPUDRV] jpu_free_instances\n");
  212. instance_pool_size_per_core = (s_instance_pool.size/MAX_NUM_JPU_CORE); /* s_instance_pool.size assigned to the size of all core once call JDI_IOCTL_GET_INSTANCE_POOL by user. */
  213. list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
  214. {
  215. if (vil->filp == filp) {
  216. vip_base = (void *)(s_instance_pool.base + instance_pool_size_per_core);
  217. DPRINTK("[JPUDRV] jpu_free_instances detect instance crash instIdx=%d, vip_base=%p, instance_pool_size_per_core=%d\n", (int)vil->inst_idx, vip_base, (int)instance_pool_size_per_core);
  218. vip = (jpudrv_instance_pool_t *)vip_base;
  219. if (vip) {
  220. memset(&vip->codecInstPool[vil->inst_idx], 0x00, 4); /* only first 4 byte is key point(inUse of CodecInst in jpuapi) to free the corresponding instance. */
  221. #define PTHREAD_MUTEX_T_HANDLE_SIZE 4
  222. jdi_mutexes_base = (vip_base + (instance_pool_size_per_core - PTHREAD_MUTEX_T_HANDLE_SIZE*4));
  223. DPRINTK("[JPUDRV] jpu_free_instances : force to destroy jdi_mutexes_base=%p in userspace \n", jdi_mutexes_base);
  224. if (jdi_mutexes_base) {
  225. int i;
  226. for (i = 0; i < 4; i++) {
  227. memcpy(jdi_mutexes_base, &PTHREAD_MUTEX_T_DESTROY_VALUE, PTHREAD_MUTEX_T_HANDLE_SIZE);
  228. jdi_mutexes_base += PTHREAD_MUTEX_T_HANDLE_SIZE;
  229. }
  230. }
  231. }
  232. s_jpu_open_ref_count--;
  233. list_del(&vil->list);
  234. kfree(vil);
  235. }
  236. }
  237. return 1;
  238. }
  239. static int jpu_free_buffers(struct file *filp)
  240. {
  241. jpudrv_buffer_pool_t *pool, *n;
  242. jpudrv_buffer_t jb;
  243. DPRINTK("[JPUDRV] jpu_free_buffers\n");
  244. list_for_each_entry_safe(pool, n, &s_jbp_head, list)
  245. {
  246. if (pool->filp == filp) {
  247. jb = pool->jb;
  248. if (jb.base) {
  249. jpu_free_dma_buffer(&jb);
  250. list_del(&pool->list);
  251. kfree(pool);
  252. }
  253. }
  254. }
  255. return 0;
  256. }
  257. static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
  258. {
  259. jpu_drv_context_t* dev = (jpu_drv_context_t *)dev_id;
  260. int i;
  261. u32 flag;
  262. DPRINTK("[JPUDRV][+]%s\n", __func__);
  263. #ifdef JPU_IRQ_CONTROL
  264. disable_irq_nosync(s_jpu_irq);
  265. #endif
  266. for (i=0; i<MAX_NUM_INSTANCE; i++) {
  267. flag = ReadJpuRegister(MJPEG_PIC_STATUS_REG(i));
  268. if (flag != 0) {
  269. break;
  270. }
  271. }
  272. dev->interrupt_reason[i] = flag;
  273. s_interrupt_flag[i] = 1;
  274. DPRINTK("[JPUDRV][%d] INTERRUPT FLAG: %08x, %08x\n", i, dev->interrupt_reason[i], MJPEG_PIC_STATUS_REG(i));
  275. if (dev->async_queue)
  276. kill_fasync(&dev->async_queue, SIGIO, POLL_IN); // notify the interrupt to userspace
  277. #ifndef JPU_IRQ_CONTROL
  278. WriteJpuRegister(MJPEG_PIC_STATUS_REG(i),flag); //clear interrut
  279. #endif
  280. wake_up_interruptible(&s_interrupt_wait_q[i]);
  281. DPRINTK("[JPUDRV][-]%s\n", __func__);
  282. return IRQ_HANDLED;
  283. }
  284. static int jpu_open(struct inode *inode, struct file *filp)
  285. {
  286. DPRINTK("[JPUDRV][+] %s\n", __func__);
  287. jpu_clk_enable(s_jpu_clk);
  288. spin_lock(&s_jpu_lock);
  289. s_jpu_drv_context.open_count++;
  290. filp->private_data = (void *)(&s_jpu_drv_context);
  291. spin_unlock(&s_jpu_lock);
  292. DPRINTK("[JPUDRV][-] %s\n", __func__);
  293. return 0;
  294. }
  295. static long jpu_ioctl(struct file *filp, u_int cmd, u_long arg)
  296. {
  297. int ret = 0;
  298. switch (cmd)
  299. {
  300. case JDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY:
  301. {
  302. jpudrv_buffer_pool_t *jbp;
  303. DPRINTK("[JPUDRV][+]JDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
  304. if ((ret = down_interruptible(&s_jpu_sem)) == 0) {
  305. jbp = kzalloc(sizeof(jpudrv_buffer_pool_t), GFP_KERNEL);
  306. if (!jbp) {
  307. up(&s_jpu_sem);
  308. return -ENOMEM;
  309. }
  310. ret = copy_from_user(&(jbp->jb), (jpudrv_buffer_t *)arg, sizeof(jpudrv_buffer_t));
  311. if (ret)
  312. {
  313. kfree(jbp);
  314. up(&s_jpu_sem);
  315. return -EFAULT;
  316. }
  317. ret = jpu_alloc_dma_buffer(&(jbp->jb));
  318. if (ret == -1)
  319. {
  320. ret = -ENOMEM;
  321. kfree(jbp);
  322. up(&s_jpu_sem);
  323. break;
  324. }
  325. ret = copy_to_user((void __user *)arg, &(jbp->jb), sizeof(jpudrv_buffer_t));
  326. if (ret)
  327. {
  328. kfree(jbp);
  329. ret = -EFAULT;
  330. up(&s_jpu_sem);
  331. break;
  332. }
  333. jbp->filp = filp;
  334. spin_lock(&s_jpu_lock);
  335. list_add(&jbp->list, &s_jbp_head);
  336. spin_unlock(&s_jpu_lock);
  337. up(&s_jpu_sem);
  338. }
  339. DPRINTK("[JPUDRV][-]JDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
  340. }
  341. break;
  342. case JDI_IOCTL_GET_PHYSICAL_MEMORY:
  343. {
  344. jpudrv_buffer_pool_t *jbp = NULL;
  345. void *user_address = NULL;
  346. struct task_struct *my_struct = NULL;
  347. struct mm_struct *mm = NULL;
  348. unsigned long address = 0;
  349. pgd_t *pgd = NULL;
  350. DPRINTK("[JPUDRV][+]JDI_IOCTL_GET_PHYSICAL_MEMORY\n");
  351. if ((ret = down_interruptible(&s_jpu_sem)) == 0) {
  352. jbp = kzalloc(sizeof(jpudrv_buffer_pool_t), GFP_KERNEL);
  353. if (!jbp) {
  354. up(&s_jpu_sem);
  355. return -ENOMEM;
  356. }
  357. ret = copy_from_user(&(jbp->jb), (jpudrv_buffer_t *)arg, sizeof(jpudrv_buffer_t));
  358. if (ret)
  359. {
  360. kfree(jbp);
  361. up(&s_jpu_sem);
  362. return -EFAULT;
  363. }
  364. user_address = (void *)jbp->jb.virt_addr;
  365. my_struct = get_current();
  366. mm = my_struct->mm;
  367. address = (unsigned long)user_address;
  368. pgd = pgd_offset(mm, address);
  369. if (!pgd_none(*pgd) && !pgd_bad(*pgd)) {
  370. p4d_t *p4d = p4d_offset(pgd, address);
  371. pud_t *pud = pud_offset(p4d, address);
  372. if (!pud_none(*pud) && !pud_bad(*pud)) {
  373. pmd_t *pmd = pmd_offset(pud, address);
  374. if (!pmd_none(*pmd) && !pmd_bad(*pmd)) {
  375. pte_t *pte = pte_offset_map(pmd, address);
  376. if (!pte_none(*pte)) {
  377. struct page *pg = pte_page(*pte);
  378. unsigned long phys = page_to_phys(pg);
  379. unsigned long virt = (unsigned long)phys_to_virt(phys);
  380. printk("phy address = %lx, virt = %lx\r\n", phys, virt);
  381. jbp->jb.phys_addr = phys;
  382. jbp->jb.base = virt;
  383. }
  384. pte_unmap(pte);
  385. }
  386. }
  387. }
  388. ret = copy_to_user((void __user *)arg, &(jbp->jb), sizeof(jpudrv_buffer_t));
  389. if (ret)
  390. {
  391. kfree(jbp);
  392. ret = -EFAULT;
  393. up(&s_jpu_sem);
  394. break;
  395. }
  396. kfree(jbp);
  397. up(&s_jpu_sem);
  398. }
  399. DPRINTK("[JPUDRV][-]JDI_IOCTL_GET_PHYSICAL_MEMORY\n");
  400. }
  401. break;
  402. case JDI_IOCTL_FREE_PHYSICALMEMORY:
  403. {
  404. jpudrv_buffer_pool_t *jbp, *n;
  405. jpudrv_buffer_t jb;
  406. DPRINTK("[JPUDRV][+]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
  407. if ((ret = down_interruptible(&s_jpu_sem)) == 0) {
  408. ret = copy_from_user(&jb, (jpudrv_buffer_t *)arg, sizeof(jpudrv_buffer_t));
  409. if (ret) {
  410. up(&s_jpu_sem);
  411. return -EACCES;
  412. }
  413. if (jb.base)
  414. jpu_free_dma_buffer(&jb);
  415. spin_lock(&s_jpu_lock);
  416. list_for_each_entry_safe(jbp, n, &s_jbp_head, list) {
  417. if (jbp->jb.base == jb.base) {
  418. list_del(&jbp->list);
  419. kfree(jbp);
  420. break;
  421. }
  422. }
  423. spin_unlock(&s_jpu_lock);
  424. up(&s_jpu_sem);
  425. }
  426. DPRINTK("[JPUDRV][-]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
  427. }
  428. break;
  429. case JDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO:
  430. {
  431. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  432. if (s_video_memory.base != 0) {
  433. ret = copy_to_user((void __user *)arg, &s_video_memory, sizeof(jpudrv_buffer_t));
  434. if (ret != 0)
  435. ret = -EFAULT;
  436. } else {
  437. ret = -EFAULT;
  438. }
  439. #endif /* JPU_SUPPORT_RESERVED_VIDEO_MEMORY */
  440. }
  441. break;
  442. case JDI_IOCTL_WAIT_INTERRUPT:
  443. {
  444. jpudrv_intr_info_t info;
  445. struct jpu_drv_context_t *dev = (struct jpu_drv_context_t *)filp->private_data;
  446. u32 instance_no;
  447. DPRINTK("[JPUDRV][+]JDI_IOCTL_WAIT_INTERRUPT\n");
  448. ret = copy_from_user(&info, (jpudrv_intr_info_t *)arg, sizeof(jpudrv_intr_info_t));
  449. if (ret != 0)
  450. return -EFAULT;
  451. instance_no = info.inst_idx;
  452. DPRINTK("[JPUDRV] INSTANCE NO: %d\n", instance_no);
  453. ret = wait_event_interruptible_timeout(s_interrupt_wait_q[instance_no], s_interrupt_flag[instance_no] != 0, msecs_to_jiffies(info.timeout));
  454. if (!ret) {
  455. DPRINTK("[JPUDRV] INSTANCE NO: %d ETIME\n", instance_no);
  456. ret = -ETIME;
  457. break;
  458. }
  459. if (signal_pending(current)) {
  460. ret = -ERESTARTSYS;
  461. DPRINTK("[JPUDRV] INSTANCE NO: %d ERESTARTSYS\n", instance_no);
  462. break;
  463. }
  464. DPRINTK("[JPUDRV] INST(%d) s_interrupt_flag(%d), reason(0x%08x)\n", instance_no, s_interrupt_flag[instance_no], dev->interrupt_reason[instance_no]);
  465. info.intr_reason = dev->interrupt_reason[instance_no];
  466. s_interrupt_flag[instance_no] = 0;
  467. dev->interrupt_reason[instance_no] = 0;
  468. ret = copy_to_user((void __user *)arg, &info, sizeof(jpudrv_intr_info_t));
  469. #ifdef JPU_IRQ_CONTROL
  470. enable_irq(s_jpu_irq);
  471. #endif
  472. DPRINTK("[VPUDRV][-]VDI_IOCTL_WAIT_INTERRUPT\n");
  473. if (ret != 0)
  474. return -EFAULT;
  475. }
  476. break;
  477. case JDI_IOCTL_SET_CLOCK_GATE:
  478. {
  479. u32 clkgate;
  480. if (get_user(clkgate, (u32 __user *) arg))
  481. return -EFAULT;
  482. #ifdef JPU_SUPPORT_CLOCK_CONTROL
  483. if (clkgate)
  484. jpu_clk_enable(s_jpu_clk);
  485. else
  486. jpu_clk_disable(s_jpu_clk);
  487. #endif /* JPU_SUPPORT_CLOCK_CONTROL */
  488. }
  489. break;
  490. case JDI_IOCTL_GET_INSTANCE_POOL:
  491. DPRINTK("[JPUDRV][+]JDI_IOCTL_GET_INSTANCE_POOL\n");
  492. if ((ret = down_interruptible(&s_jpu_sem)) == 0) {
  493. if (s_instance_pool.base != 0) {
  494. ret = copy_to_user((void __user *)arg, &s_instance_pool, sizeof(jpudrv_buffer_t));
  495. } else {
  496. ret = copy_from_user(&s_instance_pool, (jpudrv_buffer_t *)arg, sizeof(jpudrv_buffer_t));
  497. if (ret == 0) {
  498. s_instance_pool.size = PAGE_ALIGN(s_instance_pool.size);
  499. s_instance_pool.base = (unsigned long)vmalloc(s_instance_pool.size);
  500. s_instance_pool.phys_addr = s_instance_pool.base;
  501. if (s_instance_pool.base != 0) {
  502. memset((void *)s_instance_pool.base, 0x0, s_instance_pool.size); /*clearing memory*/
  503. ret = copy_to_user((void __user *)arg, &s_instance_pool, sizeof(jpudrv_buffer_t));
  504. if (ret == 0) {
  505. /* success to get memory for instance pool */
  506. up(&s_jpu_sem);
  507. break;
  508. }
  509. }
  510. ret = -EFAULT;
  511. }
  512. }
  513. up(&s_jpu_sem);
  514. }
  515. DPRINTK("[JPUDRV][-]JDI_IOCTL_GET_INSTANCE_POOL: %s base: %lx, size: %d\n",
  516. (ret==0 ? "OK" : "NG"), s_instance_pool.base, s_instance_pool.size);
  517. break;
  518. case JDI_IOCTL_OPEN_INSTANCE:
  519. {
  520. jpudrv_inst_info_t inst_info;
  521. if (copy_from_user(&inst_info, (jpudrv_inst_info_t *)arg, sizeof(jpudrv_inst_info_t)))
  522. return -EFAULT;
  523. spin_lock(&s_jpu_lock);
  524. s_jpu_open_ref_count++; /* flag just for that jpu is in opened or closed */
  525. inst_info.inst_open_count = s_jpu_open_ref_count;
  526. spin_unlock(&s_jpu_lock);
  527. if (copy_to_user((void __user *)arg, &inst_info, sizeof(jpudrv_inst_info_t))) {
  528. return -EFAULT;
  529. }
  530. DPRINTK("[JPUDRV] JDI_IOCTL_OPEN_INSTANCE inst_idx=%d, s_jpu_open_ref_count=%d, inst_open_count=%d\n",
  531. (int)inst_info.inst_idx, s_jpu_open_ref_count, inst_info.inst_open_count);
  532. }
  533. break;
  534. case JDI_IOCTL_CLOSE_INSTANCE:
  535. {
  536. jpudrv_inst_info_t inst_info;
  537. DPRINTK("[JPUDRV][+]JDI_IOCTL_CLOSE_INSTANCE\n");
  538. if (copy_from_user(&inst_info, (jpudrv_inst_info_t *)arg, sizeof(jpudrv_inst_info_t)))
  539. return -EFAULT;
  540. spin_lock(&s_jpu_lock);
  541. s_jpu_open_ref_count--; /* flag just for that jpu is in opened or closed */
  542. inst_info.inst_open_count = s_jpu_open_ref_count;
  543. spin_unlock(&s_jpu_lock);
  544. if (copy_to_user((void __user *)arg, &inst_info, sizeof(jpudrv_inst_info_t)))
  545. return -EFAULT;
  546. DPRINTK("[JPUDRV] JDI_IOCTL_CLOSE_INSTANCE inst_idx=%d, s_jpu_open_ref_count=%d, inst_open_count=%d\n",
  547. (int)inst_info.inst_idx, s_jpu_open_ref_count, inst_info.inst_open_count);
  548. }
  549. break;
  550. case JDI_IOCTL_GET_INSTANCE_NUM:
  551. {
  552. jpudrv_inst_info_t inst_info;
  553. DPRINTK("[JPUDRV][+]JDI_IOCTL_GET_INSTANCE_NUM\n");
  554. ret = copy_from_user(&inst_info, (jpudrv_inst_info_t *)arg, sizeof(jpudrv_inst_info_t));
  555. if (ret != 0)
  556. break;
  557. spin_lock(&s_jpu_lock);
  558. inst_info.inst_open_count = s_jpu_open_ref_count;
  559. spin_unlock(&s_jpu_lock);
  560. ret = copy_to_user((void __user *)arg, &inst_info, sizeof(jpudrv_inst_info_t));
  561. DPRINTK("[JPUDRV] JDI_IOCTL_GET_INSTANCE_NUM inst_idx=%d, open_count=%d\n", (int)inst_info.inst_idx, inst_info.inst_open_count);
  562. }
  563. break;
  564. case JDI_IOCTL_RESET:
  565. jpu_hw_reset();
  566. break;
  567. case JDI_IOCTL_GET_REGISTER_INFO:
  568. DPRINTK("[JPUDRV][+]JDI_IOCTL_GET_REGISTER_INFO\n");
  569. ret = copy_to_user((void __user *)arg, &s_jpu_register, sizeof(jpudrv_buffer_t));
  570. if (ret != 0)
  571. ret = -EFAULT;
  572. DPRINTK("[JPUDRV][-]JDI_IOCTL_GET_REGISTER_INFO s_jpu_register.phys_addr=0x%lx, s_jpu_register.virt_addr=0x%lx, s_jpu_register.size=%d\n",
  573. s_jpu_register.phys_addr , s_jpu_register.virt_addr, s_jpu_register.size);
  574. break;
  575. case JDI_IOCTL_FLUSH_DCACHE:
  576. {
  577. jpudrv_flush_cache_t cache_info;
  578. //DPRINTK("[JPUDRV][+]JDI_IOCTL_FLUSH_DCACHE\n");
  579. ret = copy_from_user(&cache_info, (jpudrv_flush_cache_t *)arg, sizeof(jpudrv_flush_cache_t));
  580. if (ret != 0)
  581. ret = -EFAULT;
  582. if(cache_info.flag)
  583. starfive_flush_dcache(cache_info.start,cache_info.size);
  584. //DPRINTK("[JPUDRV][-]JDI_IOCTL_FLUSH_DCACHE\n");
  585. break;
  586. }
  587. default:
  588. {
  589. printk(KERN_ERR "No such IOCTL, cmd is %d\n", cmd);
  590. }
  591. break;
  592. }
  593. return ret;
  594. }
  595. static ssize_t jpu_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
  596. {
  597. return -1;
  598. }
  599. static ssize_t jpu_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
  600. {
  601. /* DPRINTK("[VPUDRV] vpu_write len=%d\n", (int)len); */
  602. if (!buf) {
  603. printk(KERN_ERR "[VPUDRV] vpu_write buf = NULL error \n");
  604. return -EFAULT;
  605. }
  606. return -1;
  607. }
  608. static int jpu_release(struct inode *inode, struct file *filp)
  609. {
  610. int ret = 0;
  611. u32 open_count;
  612. DPRINTK("[JPUDRV][+] jpu_release\n");
  613. if ((ret = down_interruptible(&s_jpu_sem)) == 0) {
  614. /* found and free the not handled buffer by user applications */
  615. jpu_free_buffers(filp);
  616. /* found and free the not closed instance by user applications */
  617. jpu_free_instances(filp);
  618. DPRINTK("[JPUDRV] open_count: %d\n", s_jpu_drv_context.open_count);
  619. spin_lock(&s_jpu_lock);
  620. s_jpu_drv_context.open_count--;
  621. open_count = s_jpu_drv_context.open_count;
  622. spin_unlock(&s_jpu_lock);
  623. if (open_count == 0) {
  624. if (s_instance_pool.base) {
  625. DPRINTK("[JPUDRV] free instance pool\n");
  626. vfree((const void *)s_instance_pool.base);
  627. s_instance_pool.base = 0;
  628. }
  629. }
  630. }
  631. up(&s_jpu_sem);
  632. DPRINTK("[JPUDRV][-] jpu_release\n");
  633. jpu_clk_disable(s_jpu_clk);
  634. return 0;
  635. }
  636. static int jpu_fasync(int fd, struct file *filp, int mode)
  637. {
  638. struct jpu_drv_context_t *dev = (struct jpu_drv_context_t *)filp->private_data;
  639. return fasync_helper(fd, filp, mode, &dev->async_queue);
  640. }
  641. static int jpu_map_to_register(struct file *fp, struct vm_area_struct *vm)
  642. {
  643. unsigned long pfn;
  644. vm->vm_flags |= VM_IO | VM_RESERVED;
  645. vm->vm_page_prot = pgprot_noncached(vm->vm_page_prot);
  646. pfn = s_jpu_register.phys_addr >> PAGE_SHIFT;
  647. return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end-vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
  648. }
  649. static int jpu_map_to_physical_memory(struct file *fp, struct vm_area_struct *vm)
  650. {
  651. vm->vm_flags |= VM_IO | VM_RESERVED;
  652. vm->vm_page_prot = pgprot_noncached(vm->vm_page_prot);
  653. return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff, vm->vm_end-vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
  654. }
  655. static int jpu_map_to_instance_pool_memory(struct file *fp, struct vm_area_struct *vm)
  656. {
  657. int ret;
  658. long length = vm->vm_end - vm->vm_start;
  659. unsigned long start = vm->vm_start;
  660. char *vmalloc_area_ptr = (char *)s_instance_pool.base;
  661. unsigned long pfn;
  662. vm->vm_flags |= VM_RESERVED;
  663. /* loop over all pages, map it page individually */
  664. while (length > 0) {
  665. pfn = vmalloc_to_pfn(vmalloc_area_ptr);
  666. if ((ret = remap_pfn_range(vm, start, pfn, PAGE_SIZE, PAGE_SHARED)) < 0) {
  667. return ret;
  668. }
  669. start += PAGE_SIZE;
  670. vmalloc_area_ptr += PAGE_SIZE;
  671. length -= PAGE_SIZE;
  672. }
  673. return 0;
  674. }
  675. /*!
  676. * @brief memory map interface for jpu file operation
  677. * @return 0 on success or negative error code on error
  678. */
  679. static int jpu_mmap(struct file *fp, struct vm_area_struct *vm)
  680. {
  681. if (vm->vm_pgoff == 0)
  682. return jpu_map_to_instance_pool_memory(fp, vm);
  683. if (vm->vm_pgoff == (s_jpu_register.phys_addr>>PAGE_SHIFT))
  684. return jpu_map_to_register(fp, vm);
  685. return jpu_map_to_physical_memory(fp, vm);
  686. }
  687. struct file_operations jpu_fops = {
  688. .owner = THIS_MODULE,
  689. .open = jpu_open,
  690. .read = jpu_read,
  691. .write = jpu_write,
  692. .unlocked_ioctl = jpu_ioctl,
  693. .release = jpu_release,
  694. .fasync = jpu_fasync,
  695. .mmap = jpu_mmap,
  696. };
  697. static int jpu_probe(struct platform_device *pdev)
  698. {
  699. int err = 0;
  700. struct resource *res = NULL;
  701. struct device *devices;
  702. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  703. struct resource res_cma;
  704. struct device_node *node;
  705. #endif
  706. DPRINTK("[JPUDRV] jpu_probe\n");
  707. if (pdev) {
  708. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  709. }
  710. if (res) {/* if platform driver is implemented */
  711. s_jpu_register.phys_addr = res->start;
  712. s_jpu_register.virt_addr = (unsigned long)ioremap(res->start, res->end - res->start);
  713. s_jpu_register.size = res->end - res->start;
  714. DPRINTK("[JPUDRV] : jpu base address get from platform driver physical base addr==0x%lx, virtual base=0x%lx\n", s_jpu_register.phys_addr , s_jpu_register.virt_addr);
  715. } else {
  716. s_jpu_register.phys_addr = JPU_REG_BASE_ADDR;
  717. s_jpu_register.virt_addr = (unsigned long)ioremap(s_jpu_register.phys_addr, JPU_REG_SIZE);
  718. s_jpu_register.size = JPU_REG_SIZE;
  719. DPRINTK("[JPUDRV] : jpu base address get from defined value physical base addr==0x%lx, virtual base=0x%lx\n", s_jpu_register.phys_addr, s_jpu_register.virt_addr);
  720. }
  721. if (pdev) {
  722. jpu_dev = &pdev->dev;
  723. //jpu_dev->dma_ops = NULL;
  724. dev_info(jpu_dev,"init device.\n");
  725. }
  726. /* get the major number of the character device */
  727. if ((alloc_chrdev_region(&s_jpu_devt, 0, 1, JPU_DEV_NAME)) < 0) {
  728. err = -EBUSY;
  729. printk(KERN_ERR "could not allocate major number\n");
  730. goto ERROR_PROVE_DEVICE;
  731. }
  732. s_jpu_major = MAJOR(s_jpu_devt);
  733. /* initialize the device structure and register the device with the kernel */
  734. cdev_init(&s_jpu_cdev, &jpu_fops);
  735. if ((cdev_add(&s_jpu_cdev, s_jpu_devt, 1)) < 0) {
  736. err = -EBUSY;
  737. printk(KERN_ERR "could not allocate chrdev\n");
  738. goto ERROR_PROVE_DEVICE;
  739. }
  740. s_jpu_class = class_create(THIS_MODULE, JPU_DEV_NAME);
  741. if (IS_ERR(s_jpu_class)) {
  742. dev_err(jpu_dev, "class creat error.\n");
  743. goto ERROR_CRART_CLASS;
  744. }
  745. devices = device_create(s_jpu_class, 0, MKDEV(s_jpu_major, 0),
  746. NULL, JPU_DEV_NAME);
  747. if (IS_ERR(devices)) {
  748. dev_err(jpu_dev, "device creat error.\n");
  749. goto ERROR_CREAT_DEVICE;
  750. }
  751. if (pdev)
  752. s_jpu_clk = jpu_clk_get(pdev);
  753. else
  754. s_jpu_clk = jpu_clk_get(NULL);
  755. if (!s_jpu_clk) {
  756. printk(KERN_ERR "[JPUDRV] : not support clock controller.\n");
  757. }
  758. else {
  759. DPRINTK("[JPUDRV] : get clock controller s_jpu_clk=%p\n", s_jpu_clk);
  760. }
  761. #ifdef JPU_SUPPORT_CLOCK_CONTROL
  762. #else
  763. jpu_pmu_enable(s_jpu_clk->dev);
  764. #endif
  765. #ifdef JPU_SUPPORT_ISR
  766. #ifdef JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  767. if(pdev)
  768. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  769. if (res) {/* if platform driver is implemented */
  770. s_jpu_irq = res->start;
  771. DPRINTK("[JPUDRV] : jpu irq number get from platform driver irq=0x%x\n", s_jpu_irq );
  772. } else {
  773. DPRINTK("[JPUDRV] : jpu irq number get from defined value irq=0x%x\n", s_jpu_irq );
  774. }
  775. #else
  776. DPRINTK("[JPUDRV] : jpu irq number get from defined value irq=0x%x\n", s_jpu_irq);
  777. #endif
  778. err = request_irq(s_jpu_irq, jpu_irq_handler, 0, "JPU_CODEC_IRQ", (void *)(&s_jpu_drv_context));
  779. if (err) {
  780. printk(KERN_ERR "[JPUDRV] : fail to register interrupt handler\n");
  781. goto ERROR_PROVE_DEVICE;
  782. }
  783. #endif
  784. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  785. node = of_parse_phandle(jpu_dev->of_node, "memory-region", 0);
  786. if(node){
  787. dev_info(jpu_dev, "Get mem form memory-region\n");
  788. of_address_to_resource(node, 0, &res_cma);
  789. s_video_memory.size = resource_size(&res_cma);
  790. s_video_memory.phys_addr = res_cma.start;
  791. }else{
  792. dev_info(jpu_dev, "Get mem form reserved memory failed.please check the dts file.\n");
  793. return 0;
  794. }
  795. s_video_memory.base = (unsigned long)ioremap(MEM2SYS(s_video_memory.phys_addr), PAGE_ALIGN(s_video_memory.size));
  796. if (!s_video_memory.base) {
  797. printk(KERN_ERR "[JPUDRV] : fail to remap video memory physical phys_addr=0x%lx, base=0x%lx, size=%d\n", MEM2SYS(s_video_memory.phys_addr), s_video_memory.base, s_video_memory.size);
  798. goto ERROR_PROVE_DEVICE;
  799. }
  800. if (jmem_init(&s_jmem, s_video_memory.phys_addr, s_video_memory.size) < 0) {
  801. printk(KERN_ERR "[JPUDRV] : fail to init vmem system\n");
  802. goto ERROR_PROVE_DEVICE;
  803. }
  804. DPRINTK("[JPUDRV] success to probe jpu device with reserved video memory phys_addr=0x%lx, base=0x%lx\n", s_video_memory.phys_addr, s_video_memory.base);
  805. #else
  806. DPRINTK("[JPUDRV] success to probe jpu device with non reserved video memory\n");
  807. #endif
  808. return 0;
  809. ERROR_CREAT_DEVICE:
  810. class_destroy(s_jpu_class);
  811. ERROR_CRART_CLASS:
  812. cdev_del(&s_jpu_cdev);
  813. ERROR_PROVE_DEVICE:
  814. if (s_jpu_major)
  815. unregister_chrdev_region(s_jpu_major, 1);
  816. if (s_jpu_register.virt_addr)
  817. iounmap((void *)s_jpu_register.virt_addr);
  818. return err;
  819. }
  820. static int jpu_remove(struct platform_device *pdev)
  821. {
  822. DPRINTK("[JPUDRV] jpu_remove\n");
  823. #ifdef JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  824. if (s_instance_pool.base) {
  825. vfree((const void *)s_instance_pool.base);
  826. s_instance_pool.base = 0;
  827. }
  828. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  829. if (s_video_memory.base) {
  830. iounmap((void *)s_video_memory.base);
  831. s_video_memory.base = 0;
  832. jmem_exit(&s_jmem);
  833. }
  834. #endif
  835. if (s_jpu_major > 0) {
  836. device_destroy(s_jpu_class, MKDEV(s_jpu_major, 0));
  837. class_destroy(s_jpu_class);
  838. cdev_del(&s_jpu_cdev);
  839. unregister_chrdev_region(s_jpu_devt, 1);
  840. s_jpu_major = 0;
  841. }
  842. #ifdef JPU_SUPPORT_ISR
  843. if (s_jpu_irq)
  844. free_irq(s_jpu_irq, &s_jpu_drv_context);
  845. #endif
  846. if (s_jpu_register.virt_addr)
  847. iounmap((void*)s_jpu_register.virt_addr);
  848. jpu_pmu_disable(s_jpu_clk->dev);
  849. jpu_clk_put(s_jpu_clk);
  850. #endif /* JPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  851. return 0;
  852. }
  853. //#ifdef CONFIG_PM
  854. #if 1
  855. static int jpu_suspend(struct platform_device *pdev, pm_message_t state)
  856. {
  857. jpu_clk_disable(s_jpu_clk);
  858. return 0;
  859. }
  860. static int jpu_resume(struct platform_device *pdev)
  861. {
  862. jpu_clk_enable(s_jpu_clk);
  863. return 0;
  864. }
  865. #else
  866. #define jpu_suspend NULL
  867. #define jpu_resume NULL
  868. #endif /* !CONFIG_PM */
  869. #ifdef JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  870. static const struct of_device_id jpu_of_id_table[] = {
  871. { .compatible = "cm,codaj12-jpu-1" },
  872. { .compatible = "starfive,jpu" },
  873. {}
  874. };
  875. MODULE_DEVICE_TABLE(of, jpu_of_id_table);
  876. static struct platform_driver jpu_driver = {
  877. .driver = {
  878. .name = JPU_PLATFORM_DEVICE_NAME,
  879. .of_match_table = of_match_ptr(jpu_of_id_table),
  880. },
  881. .probe = jpu_probe,
  882. .remove = jpu_remove,
  883. .suspend = jpu_suspend,
  884. .resume = jpu_resume,
  885. };
  886. #endif /* JPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  887. static int __init jpu_init(void)
  888. {
  889. int res = 0;
  890. u32 i;
  891. DPRINTK("[JPUDRV] begin jpu_init\n");
  892. for (i=0; i<MAX_NUM_INSTANCE; i++) {
  893. init_waitqueue_head(&s_interrupt_wait_q[i]);
  894. }
  895. s_instance_pool.base = 0;
  896. #ifdef JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  897. res = platform_driver_register(&jpu_driver);
  898. #else
  899. res = jpu_probe(NULL);
  900. #endif /* JPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  901. DPRINTK("[JPUDRV] end jpu_init result=0x%x\n", res);
  902. return res;
  903. }
  904. static void __exit jpu_exit(void)
  905. {
  906. DPRINTK("[JPUDRV] [+]jpu_exit\n");
  907. #ifdef JPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  908. platform_driver_unregister(&jpu_driver);
  909. #else /* JPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  910. #ifdef JPU_SUPPORT_CLOCK_CONTROL
  911. #else
  912. jpu_clk_disable(s_jpu_clk);
  913. #endif /* JPU_SUPPORT_CLOCK_CONTROL */
  914. jpu_clk_put(s_jpu_clk);
  915. if (s_instance_pool.base) {
  916. vfree((const void *)s_instance_pool.base);
  917. s_instance_pool.base = 0;
  918. }
  919. #ifdef JPU_SUPPORT_RESERVED_VIDEO_MEMORY
  920. if (s_video_memory.base) {
  921. iounmap((void *)s_video_memory.base);
  922. s_video_memory.base = 0;
  923. jmem_exit(&s_jmem);
  924. }
  925. #endif /* JPU_SUPPORT_RESERVED_VIDEO_MEMORY */
  926. if (s_jpu_major > 0) {
  927. device_destroy(s_jpu_class, MKDEV(s_jpu_major, 0));
  928. class_destroy(s_jpu_class);
  929. cdev_del(&s_jpu_cdev);
  930. unregister_chrdev_region(s_jpu_devt, 1);
  931. s_jpu_major = 0;
  932. }
  933. #ifdef JPU_SUPPORT_ISR
  934. if (s_jpu_irq)
  935. free_irq(s_jpu_irq, &s_jpu_drv_context);
  936. #endif /* JPU_SUPPORT_ISR */
  937. if (s_jpu_register.virt_addr) {
  938. iounmap((void *)s_jpu_register.virt_addr);
  939. s_jpu_register.virt_addr = 0x00;
  940. }
  941. #endif /* JPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  942. DPRINTK("[JPUDRV] [-]jpu_exit\n");
  943. return;
  944. }
  945. MODULE_AUTHOR("A customer using C&M JPU, Inc.");
  946. MODULE_DESCRIPTION("JPU linux driver");
  947. MODULE_LICENSE("GPL");
  948. module_init(jpu_init);
  949. module_exit(jpu_exit);
  950. static int jpu_pmu_enable(struct device *dev)
  951. {
  952. int ret;
  953. pm_runtime_enable(dev);
  954. ret = pm_runtime_get_sync(dev);
  955. if (ret < 0)
  956. dev_err(dev, "failed to get pm runtime: %d\n", ret);
  957. return ret;
  958. }
  959. static void jpu_pmu_disable(struct device *dev)
  960. {
  961. pm_runtime_put_sync(dev);
  962. pm_runtime_disable(dev);
  963. }
  964. #ifndef STARFIVE_JPU_SUPPORT_CLOCK_CONTROL
  965. #define CLK_ENABLE_DATA 1
  966. #define CLK_DISABLE_DATA 0
  967. #define CLK_EN_SHIFT 31
  968. #define CLK_EN_MASK 0x80000000U
  969. #define SAIF_BD_APBS_BASE 0x13020000
  970. #define CODAJ12_CLK_AXI_CTRL 0x108U
  971. #define CODAJ12_CLK_APB_CTRL 0x110U
  972. #define CODAJ12_CLK_CORE_CTRL 0x10cU
  973. #define RSTGEN_SOFTWARE_RESET_ASSERT1 0x2FCU
  974. #define RSTGEN_SOFTWARE_RESET_STATUS1 0x30CU
  975. #define RSTN_AXI_MASK (0x1 << 12)
  976. #define RSTN_CORE_MASK (0x1 << 13)
  977. #define RSTN_APB_MASK (0x1 << 14)
  978. static __maybe_unused uint32_t saif_get_reg(
  979. const volatile void __iomem *addr,
  980. uint32_t shift, uint32_t mask)
  981. {
  982. u32 tmp;
  983. tmp = readl(addr);
  984. tmp = (tmp & mask) >> shift;
  985. return tmp;
  986. }
  987. static void saif_set_reg(volatile void __iomem *addr, uint32_t data,
  988. uint32_t shift, uint32_t mask)
  989. {
  990. uint32_t tmp;
  991. tmp = readl(addr);
  992. tmp &= ~mask;
  993. tmp |= (data << shift) & mask;
  994. writel(tmp, addr);
  995. }
  996. static void saif_assert_rst(volatile void __iomem *addr,
  997. const volatile void __iomem *addr_status, uint32_t mask)
  998. {
  999. uint32_t tmp;
  1000. tmp = readl(addr);
  1001. tmp |= mask;
  1002. writel(tmp, addr);
  1003. do {
  1004. tmp = readl(addr_status);
  1005. } while ((tmp & mask) != 0);
  1006. }
  1007. static void saif_clear_rst(volatile void __iomem *addr,
  1008. const volatile void __iomem *addr_status, uint32_t mask)
  1009. {
  1010. uint32_t tmp;
  1011. tmp = readl(addr);
  1012. tmp &= ~mask;
  1013. writel(tmp, addr);
  1014. do {
  1015. tmp = readl(addr_status);
  1016. } while ((tmp & mask) != mask);
  1017. }
  1018. static void jpu_clk_control(jpu_clk_t *clk, bool enable)
  1019. {
  1020. if (enable) {
  1021. /*enable*/
  1022. saif_set_reg(clk->apb_clk.en_ctrl, CLK_ENABLE_DATA, clk->en_shift, clk->en_mask);
  1023. saif_set_reg(clk->axi_clk.en_ctrl, CLK_ENABLE_DATA, clk->en_shift, clk->en_mask);
  1024. saif_set_reg(clk->core_clk.en_ctrl, CLK_ENABLE_DATA, clk->en_shift, clk->en_mask);
  1025. /*clr-reset*/
  1026. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->apb_clk.rst_mask);
  1027. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->axi_clk.rst_mask);
  1028. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->core_clk.rst_mask);
  1029. } else {
  1030. /*assert-reset*/
  1031. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->apb_clk.rst_mask);
  1032. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->axi_clk.rst_mask);
  1033. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->core_clk.rst_mask);
  1034. /*disable*/
  1035. saif_set_reg(clk->apb_clk.en_ctrl, CLK_DISABLE_DATA, clk->en_shift, clk->en_mask);
  1036. saif_set_reg(clk->axi_clk.en_ctrl, CLK_DISABLE_DATA, clk->en_shift, clk->en_mask);
  1037. saif_set_reg(clk->core_clk.en_ctrl, CLK_DISABLE_DATA, clk->en_shift, clk->en_mask);
  1038. }
  1039. }
  1040. static void jpu_clk_reset(jpu_clk_t *clk)
  1041. {
  1042. /*assert-reset*/
  1043. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->apb_clk.rst_mask);
  1044. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->axi_clk.rst_mask);
  1045. saif_assert_rst(clk->rst_ctrl, clk->rst_status, clk->core_clk.rst_mask);
  1046. /*clr-reset*/
  1047. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->apb_clk.rst_mask);
  1048. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->axi_clk.rst_mask);
  1049. saif_clear_rst(clk->rst_ctrl, clk->rst_status, clk->core_clk.rst_mask);
  1050. }
  1051. int jpu_hw_reset(void)
  1052. {
  1053. if (!s_jpu_clk)
  1054. return -1;
  1055. jpu_clk_reset(s_jpu_clk);
  1056. DPRINTK("[VPUDRV] reset vpu hardware. \n");
  1057. return 0;
  1058. }
  1059. static int jpu_of_clk_get(struct platform_device *pdev, jpu_clk_t *jpu_clk)
  1060. {
  1061. if (!pdev)
  1062. return -ENXIO;
  1063. jpu_clk->clkgen = ioremap(SAIF_BD_APBS_BASE, 0x400);
  1064. if (IS_ERR(jpu_clk->clkgen)) {
  1065. dev_err(&pdev->dev, "ioremap clkgen failed.\n");
  1066. return PTR_ERR(jpu_clk->clkgen);
  1067. }
  1068. /* clkgen define */
  1069. jpu_clk->axi_clk.en_ctrl = jpu_clk->clkgen + CODAJ12_CLK_AXI_CTRL;
  1070. jpu_clk->apb_clk.en_ctrl = jpu_clk->clkgen + CODAJ12_CLK_APB_CTRL;
  1071. jpu_clk->core_clk.en_ctrl = jpu_clk->clkgen + CODAJ12_CLK_CORE_CTRL;
  1072. jpu_clk->en_mask = CLK_EN_MASK;
  1073. jpu_clk->en_shift = CLK_EN_SHIFT;
  1074. /* rstgen define */
  1075. jpu_clk->rst_ctrl = jpu_clk->clkgen + RSTGEN_SOFTWARE_RESET_ASSERT1;
  1076. jpu_clk->rst_status = jpu_clk->clkgen + RSTGEN_SOFTWARE_RESET_STATUS1;
  1077. jpu_clk->axi_clk.rst_mask = RSTN_AXI_MASK;
  1078. jpu_clk->apb_clk.rst_mask = RSTN_APB_MASK;
  1079. jpu_clk->core_clk.rst_mask = RSTN_CORE_MASK;
  1080. return 0;
  1081. }
  1082. static jpu_clk_t *jpu_clk_get(struct platform_device *pdev)
  1083. {
  1084. jpu_clk_t *jpu_clk;
  1085. jpu_clk = devm_kzalloc(&pdev->dev, sizeof(*jpu_clk), GFP_KERNEL);
  1086. if (!jpu_clk)
  1087. return NULL;
  1088. if (jpu_of_clk_get(pdev, jpu_clk))
  1089. goto err_get_clk;
  1090. return jpu_clk;
  1091. err_get_clk:
  1092. devm_kfree(&pdev->dev, jpu_clk);
  1093. return NULL;
  1094. }
  1095. static void jpu_clk_put(jpu_clk_t *clk)
  1096. {
  1097. if (clk->clkgen) {
  1098. iounmap(clk->clkgen);
  1099. clk->clkgen = NULL;
  1100. }
  1101. }
  1102. static int jpu_clk_enable(jpu_clk_t *clk)
  1103. {
  1104. if (clk == NULL || IS_ERR(clk))
  1105. return -1;
  1106. jpu_pmu_enable(clk->dev);
  1107. jpu_clk_control(clk, true);
  1108. DPRINTK("[VPUDRV] vpu_clk_enable\n");
  1109. return 0;
  1110. }
  1111. static void jpu_clk_disable(jpu_clk_t *clk)
  1112. {
  1113. if (clk == NULL || IS_ERR(clk))
  1114. return;
  1115. jpu_clk_control(clk, false);
  1116. jpu_pmu_disable(clk->dev);
  1117. DPRINTK("[VPUDRV] vpu_clk_disable\n");
  1118. }
  1119. #else /* STARFIVE_JPU_SUPPORT_CLOCK_CONTROL */
  1120. static int jpu_hw_reset(void)
  1121. {
  1122. return reset_control_reset(s_jpu_clk->resets);
  1123. }
  1124. static int jpu_of_clk_get(struct platform_device *pdev, jpu_clk_t *jpu_clk)
  1125. {
  1126. struct device *dev = &pdev->dev;
  1127. int ret;
  1128. jpu_clk->dev = dev;
  1129. jpu_clk->clks = jpu_clks;
  1130. jpu_clk->nr_clks = ARRAY_SIZE(jpu_clks);
  1131. jpu_clk->resets = devm_reset_control_array_get_shared(dev);
  1132. if (IS_ERR(jpu_clk->resets)) {
  1133. ret = PTR_ERR(jpu_clk->resets);
  1134. dev_err(dev, "faied to get jpu reset controls\n");
  1135. }
  1136. ret = devm_clk_bulk_get(dev, jpu_clk->nr_clks, jpu_clk->clks);
  1137. if (ret)
  1138. dev_err(dev, "faied to get jpu clk controls\n");
  1139. return 0;
  1140. }
  1141. static jpu_clk_t *jpu_clk_get(struct platform_device *pdev)
  1142. {
  1143. jpu_clk_t *jpu_clk;
  1144. if (!pdev)
  1145. return NULL;
  1146. jpu_clk = devm_kzalloc(&pdev->dev, sizeof(*jpu_clk), GFP_KERNEL);
  1147. if (!jpu_clk)
  1148. return NULL;
  1149. if (jpu_of_clk_get(pdev, jpu_clk))
  1150. goto err_of_clk_get;
  1151. return jpu_clk;
  1152. err_of_clk_get:
  1153. devm_kfree(&pdev->dev, jpu_clk);
  1154. return NULL;
  1155. }
  1156. static void jpu_clk_put(jpu_clk_t *clk)
  1157. {
  1158. clk_bulk_put(clk->nr_clks, clk->clks);
  1159. }
  1160. static int jpu_clk_enable(jpu_clk_t *clk)
  1161. {
  1162. int ret;
  1163. ret = clk_bulk_prepare_enable(clk->nr_clks, clk->clks);
  1164. if (ret)
  1165. dev_err(clk->dev, "enable clk error.\n");
  1166. ret = reset_control_deassert(clk->resets);
  1167. if (ret)
  1168. dev_err(clk->dev, "deassert jpu error.\n");
  1169. DPRINTK("[VPUDRV] jpu_clk_enable\n");
  1170. return ret;
  1171. }
  1172. static void jpu_clk_disable(jpu_clk_t *clk)
  1173. {
  1174. int ret;
  1175. ret = reset_control_assert(clk->resets);
  1176. if (ret)
  1177. dev_err(clk->dev, "assert jpu error.\n");
  1178. clk_bulk_disable_unprepare(clk->nr_clks, clk->clks);
  1179. }
  1180. #endif /* STARFIVE_JPU_SUPPORT_CLOCK_CONTROL */