venc.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954
  1. #include <linux/kernel.h>
  2. #include <linux/mm.h>
  3. #include <linux/interrupt.h>
  4. #include <linux/ioport.h>
  5. #include <linux/module.h>
  6. #include <linux/platform_device.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/of.h>
  9. #include <linux/of_address.h>
  10. #include <linux/wait.h>
  11. #include <linux/list.h>
  12. #include <linux/clk.h>
  13. #include <linux/delay.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/cdev.h>
  16. #include <linux/slab.h>
  17. #include <linux/sched.h>
  18. #include <linux/version.h>
  19. #include <linux/kfifo.h>
  20. #include <linux/kthread.h>
  21. #include <linux/sched/signal.h>
  22. #include <soc/sifive/sifive_l2_cache.h>
  23. #include "../../../vpuapi/vpuconfig.h"
  24. #include "vpu.h"
  25. #include "venc-starfive.h"
  26. #include <linux/of_device.h>
  27. #include <linux/of_irq.h>
  28. #define starfive_flush_dcache(start, len) \
  29. sifive_l2_flush64_range(start, len)
  30. //#define ENABLE_DEBUG_MSG
  31. #ifdef ENABLE_DEBUG_MSG
  32. #define DPRINTK(args...) printk(KERN_INFO args);
  33. #else
  34. #define DPRINTK(args...)
  35. #endif
  36. /* definitions to be changed as customer configuration */
  37. /* if linux version is 5.15 or later, then can use clock and reset framework */
  38. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0)
  39. #define VPU_SUPPORT_CLOCK_CONTROL
  40. #endif
  41. /* if the driver want to use interrupt service from kernel ISR */
  42. #define VPU_SUPPORT_ISR
  43. #ifdef VPU_SUPPORT_ISR
  44. /* if the driver want to disable and enable IRQ whenever interrupt asserted. */
  45. //#define VPU_IRQ_CONTROL
  46. #endif
  47. /* if the platform driver knows the name of this driver */
  48. /* VPU_PLATFORM_DEVICE_NAME */
  49. #define VPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  50. /* if this driver knows the dedicated video memory address */
  51. //#define VPU_SUPPORT_RESERVED_VIDEO_MEMORY
  52. #define VPU_PLATFORM_DEVICE_NAME "venc"
  53. #define VPU_DEV_NAME "venc"
  54. /* if the platform driver knows this driver */
  55. /* the definition of VPU_REG_BASE_ADDR and VPU_REG_SIZE are not meaningful */
  56. #define VPU_REG_BASE_ADDR 0x118E0000
  57. #define VPU_REG_SIZE (0x4000*MAX_NUM_VPU_CORE)
  58. #define VENC_IRQ_ADDR 0x18
  59. #ifdef VPU_SUPPORT_ISR
  60. #define VPU_IRQ_NUM (26)
  61. #endif
  62. /* this definition is only for chipsnmedia FPGA board env */
  63. /* so for SOC env of customers can be ignored */
  64. #ifndef VM_RESERVED /*for kernel up to 3.7.0 version*/
  65. # define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
  66. #endif
  67. struct device *vpu_dev;
  68. typedef struct vpu_drv_context_t {
  69. struct fasync_struct *async_queue;
  70. #ifdef SUPPORT_MULTI_INST_INTR
  71. unsigned long interrupt_reason[MAX_NUM_INSTANCE];
  72. #else
  73. unsigned long interrupt_reason;
  74. #endif
  75. u32 open_count; /*!<< device reference count. Not instance count */
  76. } vpu_drv_context_t;
  77. /* To track the allocated memory buffer */
  78. typedef struct vpudrv_buffer_pool_t {
  79. struct list_head list;
  80. struct vpudrv_buffer_t vb;
  81. struct file *filp;
  82. } vpudrv_buffer_pool_t;
  83. /* To track the instance index and buffer in instance pool */
  84. typedef struct vpudrv_instanace_list_t {
  85. struct list_head list;
  86. unsigned long inst_idx;
  87. unsigned long core_idx;
  88. struct file *filp;
  89. } vpudrv_instanace_list_t;
  90. typedef struct vpudrv_instance_pool_t {
  91. unsigned char codecInstPool[MAX_NUM_INSTANCE][MAX_INST_HANDLE_SIZE];
  92. } vpudrv_instance_pool_t;
  93. #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
  94. #include "vmm.h"
  95. static video_mm_t s_vmem;
  96. static vpudrv_buffer_t s_video_memory = {0};
  97. #endif /*VPU_SUPPORT_RESERVED_VIDEO_MEMORY*/
  98. static int vpu_hw_reset(void);
  99. static void vpu_clk_disable(void);
  100. static int vpu_clk_enable(void);
  101. /* end customer definition */
  102. static vpudrv_buffer_t s_instance_pool = {0};
  103. static vpudrv_buffer_t s_common_memory = {0};
  104. static vpu_drv_context_t s_vpu_drv_context;
  105. static int s_vpu_major;
  106. static struct cdev s_vpu_cdev;
  107. static int s_vpu_open_ref_count;
  108. #ifdef VPU_SUPPORT_ISR
  109. static int s_vpu_irq = VPU_IRQ_NUM;
  110. #endif
  111. static vpudrv_buffer_t s_vpu_register = {0};
  112. #ifdef SUPPORT_MULTI_INST_INTR
  113. static int s_interrupt_flag[MAX_NUM_INSTANCE];
  114. static wait_queue_head_t s_interrupt_wait_q[MAX_NUM_INSTANCE];
  115. typedef struct kfifo kfifo_t;
  116. static kfifo_t s_interrupt_pending_q[MAX_NUM_INSTANCE];
  117. static spinlock_t s_kfifo_lock = __SPIN_LOCK_UNLOCKED(s_kfifo_lock);
  118. #else
  119. static int s_interrupt_flag;
  120. static wait_queue_head_t s_interrupt_wait_q;
  121. #endif
  122. static spinlock_t s_vpu_lock = __SPIN_LOCK_UNLOCKED(s_vpu_lock);
  123. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
  124. static DECLARE_MUTEX(s_vpu_sem);
  125. #else
  126. static DEFINE_SEMAPHORE(s_vpu_sem);
  127. #endif
  128. static struct list_head s_vbp_head = LIST_HEAD_INIT(s_vbp_head);
  129. static struct list_head s_inst_list_head = LIST_HEAD_INIT(s_inst_list_head);
  130. static vpu_bit_firmware_info_t s_bit_firmware_info[MAX_NUM_VPU_CORE];
  131. #ifdef CONFIG_PM
  132. /* implement to power management functions */
  133. #define BIT_BASE 0x0000
  134. #define BIT_CODE_RUN (BIT_BASE + 0x000)
  135. #define BIT_CODE_DOWN (BIT_BASE + 0x004)
  136. #define BIT_INT_CLEAR (BIT_BASE + 0x00C)
  137. #define BIT_INT_STS (BIT_BASE + 0x010)
  138. #define BIT_CODE_RESET (BIT_BASE + 0x014)
  139. #define BIT_INT_REASON (BIT_BASE + 0x174)
  140. #define BIT_BUSY_FLAG (BIT_BASE + 0x160)
  141. #define BIT_RUN_COMMAND (BIT_BASE + 0x164)
  142. #define BIT_RUN_INDEX (BIT_BASE + 0x168)
  143. #define BIT_RUN_COD_STD (BIT_BASE + 0x16C)
  144. /* WAVE5 registers */
  145. #define W5_REG_BASE 0x0000
  146. #define W5_VPU_BUSY_STATUS (W5_REG_BASE + 0x0070)
  147. #define W5_VPU_INT_REASON_CLEAR (W5_REG_BASE + 0x0034)
  148. #define W5_VPU_VINT_CLEAR (W5_REG_BASE + 0x003C)
  149. #define W5_VPU_VPU_INT_STS (W5_REG_BASE + 0x0044)
  150. #define W5_VPU_INT_REASON (W5_REG_BASE + 0x004c)
  151. #define W5_RET_FAIL_REASON (W5_REG_BASE + 0x010C)
  152. #ifdef SUPPORT_MULTI_INST_INTR
  153. #define W5_RET_BS_EMPTY_INST (W5_REG_BASE + 0x01E4)
  154. #define W5_RET_QUEUE_CMD_DONE_INST (W5_REG_BASE + 0x01E8)
  155. #define W5_RET_SEQ_DONE_INSTANCE_INFO (W5_REG_BASE + 0x01FC)
  156. typedef enum {
  157. INT_WAVE5_INIT_VPU = 0,
  158. INT_WAVE5_WAKEUP_VPU = 1,
  159. INT_WAVE5_SLEEP_VPU = 2,
  160. INT_WAVE5_CREATE_INSTANCE = 3,
  161. INT_WAVE5_FLUSH_INSTANCE = 4,
  162. INT_WAVE5_DESTORY_INSTANCE = 5,
  163. INT_WAVE5_INIT_SEQ = 6,
  164. INT_WAVE5_SET_FRAMEBUF = 7,
  165. INT_WAVE5_DEC_PIC = 8,
  166. INT_WAVE5_ENC_PIC = 8,
  167. INT_WAVE5_ENC_SET_PARAM = 9,
  168. #ifdef SUPPORT_SOURCE_RELEASE_INTERRUPT
  169. INT_WAVE5_ENC_SRC_RELEASE = 10,
  170. #endif
  171. INT_WAVE5_ENC_LOW_LATENCY = 13,
  172. INT_WAVE5_DEC_QUERY = 14,
  173. INT_WAVE5_BSBUF_EMPTY = 15,
  174. INT_WAVE5_BSBUF_FULL = 15,
  175. } Wave5InterruptBit;
  176. #endif
  177. /* WAVE5 INIT, WAKEUP */
  178. #define W5_PO_CONF (W5_REG_BASE + 0x0000)
  179. #define W5_VPU_VINT_ENABLE (W5_REG_BASE + 0x0048)
  180. #define W5_VPU_RESET_REQ (W5_REG_BASE + 0x0050)
  181. #define W5_VPU_RESET_STATUS (W5_REG_BASE + 0x0054)
  182. #define W5_VPU_REMAP_CTRL (W5_REG_BASE + 0x0060)
  183. #define W5_VPU_REMAP_VADDR (W5_REG_BASE + 0x0064)
  184. #define W5_VPU_REMAP_PADDR (W5_REG_BASE + 0x0068)
  185. #define W5_VPU_REMAP_CORE_START (W5_REG_BASE + 0x006C)
  186. #define W5_REMAP_CODE_INDEX 0
  187. /* WAVE5 registers */
  188. #define W5_ADDR_CODE_BASE (W5_REG_BASE + 0x0110)
  189. #define W5_CODE_SIZE (W5_REG_BASE + 0x0114)
  190. #define W5_CODE_PARAM (W5_REG_BASE + 0x0118)
  191. #define W5_INIT_VPU_TIME_OUT_CNT (W5_REG_BASE + 0x0130)
  192. #define W5_HW_OPTION (W5_REG_BASE + 0x012C)
  193. #define W5_RET_SUCCESS (W5_REG_BASE + 0x0108)
  194. #define W5_COMMAND (W5_REG_BASE + 0x0100)
  195. #define W5_VPU_HOST_INT_REQ (W5_REG_BASE + 0x0038)
  196. /* Product register */
  197. #define VPU_PRODUCT_CODE_REGISTER (BIT_BASE + 0x1044)
  198. #if defined(VPU_SUPPORT_PLATFORM_DRIVER_REGISTER) && defined(CONFIG_PM)
  199. static u32 s_vpu_reg_store[MAX_NUM_VPU_CORE][64];
  200. #endif
  201. #endif
  202. #define ReadVpuRegister(addr) *(volatile unsigned int *)(s_vpu_register.virt_addr + s_bit_firmware_info[core].reg_base_offset + addr)
  203. #define WriteVpuRegister(addr, val) *(volatile unsigned int *)(s_vpu_register.virt_addr + s_bit_firmware_info[core].reg_base_offset + addr) = (unsigned int)val
  204. #define WriteVpu(addr, val) *(volatile unsigned int *)(addr) = (unsigned int)val;
  205. #define vic_readl(addr) readl((void __iomem *)addr)
  206. #define vic_writel(val,addr) writel(val,(void __iomem *)addr)
  207. #define rstgen_Software_RESET_BASE_REG_ADDR 0x11840000
  208. #define rstgen_Software_RESET_assert0_OFFSET (0x0)
  209. #define rstgen_Software_RESET_status0_OFFSET (0x10)
  210. #define NBIT_RSTN_VENC_BRG_MAIN 26
  211. #define NBIT_RSTN_VENC_AXI 25
  212. #define NBIT_RSTN_VENC_BCLK 27
  213. #define NBIT_RSTN_VENC_CCLK 28
  214. #define NBIT_RSTN_VENC_APB 29
  215. #define clk_BASE_REG_ADDR 0x11800000
  216. #define clk_venc_axi_ctrl_REG_OFFSET (0xe4)
  217. #define clk_vencbrg_mainclk_ctrl_REG_OFFSET (0xe8)
  218. #define clk_venc_bclk_ctrl_REG_OFFSET (0xec)
  219. #define clk_venc_cclk_ctrl_REG_OFFSET (0xf0)
  220. #define clk_venc_apb_ctrl_REG_OFFSET (0xf4)
  221. static int vpu_alloc_dma_buffer(vpudrv_buffer_t *vb)
  222. {
  223. if (!vb)
  224. return -1;
  225. #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
  226. vb->phys_addr = (unsigned long)vmem_alloc(&s_vmem, vb->size, 0);
  227. if ((unsigned long)vb->phys_addr == (unsigned long)-1) {
  228. printk(KERN_ERR "[VPUDRV] Physical memory allocation error size=%d\n", vb->size);
  229. return -1;
  230. }
  231. vb->base = (unsigned long)(s_video_memory.base + (vb->phys_addr - s_video_memory.phys_addr));
  232. #else
  233. vb->base = (unsigned long)dma_alloc_coherent(vpu_dev, PAGE_ALIGN(vb->size), (dma_addr_t *) (&vb->phys_addr), GFP_DMA | GFP_KERNEL);
  234. if ((void *)(vb->base) == NULL) {
  235. printk(KERN_ERR "[VPUDRV] Physical memory allocation error size=%d\n", vb->size);
  236. return -1;
  237. }
  238. starfive_flush_dcache(vb->phys_addr,PAGE_ALIGN(vb->size));
  239. #endif
  240. return 0;
  241. }
  242. static void vpu_free_dma_buffer(vpudrv_buffer_t *vb)
  243. {
  244. if (!vb)
  245. return;
  246. #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
  247. if (vb->base)
  248. vmem_free(&s_vmem, vb->phys_addr, 0);
  249. #else
  250. if (vb->base)
  251. dma_free_coherent(vpu_dev, PAGE_ALIGN(vb->size), (void *)vb->base, vb->phys_addr);
  252. #endif
  253. }
  254. static int vpu_free_instances(struct file *filp)
  255. {
  256. vpudrv_instanace_list_t *vil, *n;
  257. vpudrv_instance_pool_t *vip;
  258. void *vip_base;
  259. int instance_pool_size_per_core;
  260. void *vdi_mutexes_base;
  261. const int PTHREAD_MUTEX_T_DESTROY_VALUE = 0xdead10cc;
  262. DPRINTK("[VPUDRV] vpu_free_instances\n");
  263. instance_pool_size_per_core = (s_instance_pool.size/MAX_NUM_VPU_CORE); /* s_instance_pool.size assigned to the size of all core once call VDI_IOCTL_GET_INSTANCE_POOL by user. */
  264. list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
  265. {
  266. if (vil->filp == filp) {
  267. vip_base = (void *)(s_instance_pool.base + (instance_pool_size_per_core*vil->core_idx));
  268. DPRINTK("[VPUDRV] vpu_free_instances detect instance crash instIdx=%d, coreIdx=%d, vip_base=%p, instance_pool_size_per_core=%d\n", (int)vil->inst_idx, (int)vil->core_idx, vip_base, (int)instance_pool_size_per_core);
  269. vip = (vpudrv_instance_pool_t *)vip_base;
  270. if (vip) {
  271. memset(&vip->codecInstPool[vil->inst_idx], 0x00, 4); /* only first 4 byte is key point(inUse of CodecInst in vpuapi) to free the corresponding instance. */
  272. #define PTHREAD_MUTEX_T_HANDLE_SIZE 4
  273. vdi_mutexes_base = (vip_base + (instance_pool_size_per_core - PTHREAD_MUTEX_T_HANDLE_SIZE*4));
  274. DPRINTK("[VPUDRV] vpu_free_instances : force to destroy vdi_mutexes_base=%p in userspace \n", vdi_mutexes_base);
  275. if (vdi_mutexes_base) {
  276. int i;
  277. for (i = 0; i < 4; i++) {
  278. memcpy(vdi_mutexes_base, &PTHREAD_MUTEX_T_DESTROY_VALUE, PTHREAD_MUTEX_T_HANDLE_SIZE);
  279. vdi_mutexes_base += PTHREAD_MUTEX_T_HANDLE_SIZE;
  280. }
  281. }
  282. }
  283. s_vpu_open_ref_count--;
  284. list_del(&vil->list);
  285. kfree(vil);
  286. }
  287. }
  288. return 1;
  289. }
  290. static int vpu_free_buffers(struct file *filp)
  291. {
  292. vpudrv_buffer_pool_t *pool, *n;
  293. vpudrv_buffer_t vb;
  294. DPRINTK("[VPUDRV] vpu_free_buffers\n");
  295. list_for_each_entry_safe(pool, n, &s_vbp_head, list)
  296. {
  297. if (pool->filp == filp) {
  298. vb = pool->vb;
  299. if (vb.base) {
  300. vpu_free_dma_buffer(&vb);
  301. list_del(&pool->list);
  302. kfree(pool);
  303. }
  304. }
  305. }
  306. return 0;
  307. }
  308. #ifdef SUPPORT_MULTI_INST_INTR
  309. static inline u32 get_inst_idx(u32 reg_val)
  310. {
  311. u32 inst_idx;
  312. int i;
  313. for (i=0; i < MAX_NUM_INSTANCE; i++)
  314. {
  315. if(((reg_val >> i)&0x01) == 1)
  316. break;
  317. }
  318. inst_idx = i;
  319. return inst_idx;
  320. }
  321. static s32 get_vpu_inst_idx(vpu_drv_context_t *dev, u32 *reason, u32 empty_inst, u32 done_inst, u32 seq_inst)
  322. {
  323. s32 inst_idx;
  324. u32 reg_val;
  325. u32 int_reason;
  326. int_reason = *reason;
  327. DPRINTK("[VPUDRV][+]%s, int_reason=0x%x, empty_inst=0x%x, done_inst=0x%x\n", __func__, int_reason, empty_inst, done_inst);
  328. //printk(KERN_ERR "[VPUDRV][+]%s, int_reason=0x%x, empty_inst=0x%x, done_inst=0x%x\n", __func__, int_reason, empty_inst, done_inst);
  329. if (int_reason & (1 << INT_WAVE5_BSBUF_EMPTY))
  330. {
  331. reg_val = (empty_inst & 0xffff);
  332. inst_idx = get_inst_idx(reg_val);
  333. *reason = (1 << INT_WAVE5_BSBUF_EMPTY);
  334. DPRINTK("[VPUDRV] %s, W5_RET_BS_EMPTY_INST reg_val=0x%x, inst_idx=%d\n", __func__, reg_val, inst_idx);
  335. goto GET_VPU_INST_IDX_HANDLED;
  336. }
  337. if (int_reason & (1 << INT_WAVE5_INIT_SEQ))
  338. {
  339. reg_val = (seq_inst & 0xffff);
  340. inst_idx = get_inst_idx(reg_val);
  341. *reason = (1 << INT_WAVE5_INIT_SEQ);
  342. DPRINTK("[VPUDRV] %s, RET_SEQ_DONE_INSTANCE_INFO INIT_SEQ reg_val=0x%x, inst_idx=%d\n", __func__, reg_val, inst_idx);
  343. goto GET_VPU_INST_IDX_HANDLED;
  344. }
  345. if (int_reason & (1 << INT_WAVE5_DEC_PIC))
  346. {
  347. reg_val = (done_inst & 0xffff);
  348. inst_idx = get_inst_idx(reg_val);
  349. *reason = (1 << INT_WAVE5_DEC_PIC);
  350. DPRINTK("[VPUDRV] %s, W5_RET_QUEUE_CMD_DONE_INST DEC_PIC reg_val=0x%x, inst_idx=%d\n", __func__, reg_val, inst_idx);
  351. if (int_reason & (1 << INT_WAVE5_ENC_LOW_LATENCY))
  352. {
  353. u32 ll_inst_idx;
  354. reg_val = (done_inst >> 16);
  355. ll_inst_idx = get_inst_idx(reg_val);
  356. if (ll_inst_idx == inst_idx)
  357. *reason = ((1 << INT_WAVE5_DEC_PIC) | (1 << INT_WAVE5_ENC_LOW_LATENCY));
  358. DPRINTK("[VPUDRV] %s, W5_RET_QUEUE_CMD_DONE_INST DEC_PIC and ENC_LOW_LATENCY reg_val=0x%x, inst_idx=%d, ll_inst_idx=%d\n", __func__, reg_val, inst_idx, ll_inst_idx);
  359. }
  360. goto GET_VPU_INST_IDX_HANDLED;
  361. }
  362. if (int_reason & (1 << INT_WAVE5_ENC_SET_PARAM))
  363. {
  364. reg_val = (seq_inst & 0xffff);
  365. inst_idx = get_inst_idx(reg_val);
  366. *reason = (1 << INT_WAVE5_ENC_SET_PARAM);
  367. DPRINTK("[VPUDRV] %s, RET_SEQ_DONE_INSTANCE_INFO ENC_SET_PARAM reg_val=0x%x, inst_idx=%d\n", __func__, reg_val, inst_idx);
  368. goto GET_VPU_INST_IDX_HANDLED;
  369. }
  370. #ifdef SUPPORT_SOURCE_RELEASE_INTERRUPT
  371. if (int_reason & (1 << INT_WAVE5_ENC_SRC_RELEASE))
  372. {
  373. reg_val = (done_inst & 0xffff);
  374. inst_idx = get_inst_idx(reg_val);
  375. *reason = (1 << INT_WAVE5_ENC_SRC_RELEASE);
  376. DPRINTK("[VPUDRV] %s, W5_RET_QUEUE_CMD_DONE_INST ENC_SET_PARAM reg_val=0x%x, inst_idx=%d\n", __func__, reg_val, inst_idx);
  377. goto GET_VPU_INST_IDX_HANDLED;
  378. }
  379. #endif
  380. if (int_reason & (1 << INT_WAVE5_ENC_LOW_LATENCY))
  381. {
  382. reg_val = (done_inst >> 16);
  383. inst_idx = get_inst_idx(reg_val);
  384. *reason = (1 << INT_WAVE5_ENC_LOW_LATENCY);
  385. DPRINTK("[VPUDRV] %s, W5_RET_QUEUE_CMD_DONE_INST ENC_LOW_LATENCY reg_val=0x%x, inst_idx=%d\n", __func__, reg_val, inst_idx);
  386. goto GET_VPU_INST_IDX_HANDLED;
  387. }
  388. inst_idx = -1;
  389. *reason = 0;
  390. DPRINTK("[VPUDRV] %s, UNKNOWN INTERRUPT REASON: %08x\n", __func__, int_reason);
  391. GET_VPU_INST_IDX_HANDLED:
  392. DPRINTK("[VPUDRV][-]%s, inst_idx=%d. *reason=0x%x\n", __func__, inst_idx, *reason);
  393. return inst_idx;
  394. }
  395. #endif
  396. static irqreturn_t vpu_irq_handler(int irq, void *dev_id)
  397. {
  398. vpu_drv_context_t *dev = (vpu_drv_context_t *)dev_id;
  399. /* this can be removed. it also work in VPU_WaitInterrupt of API function */
  400. int core;
  401. int product_code;
  402. #ifdef SUPPORT_MULTI_INST_INTR
  403. u32 intr_reason;
  404. s32 intr_inst_index;
  405. #endif
  406. DPRINTK("[VPUDRV][+]%s\n", __func__);
  407. #ifdef VPU_IRQ_CONTROL
  408. disable_irq_nosync(s_vpu_irq);
  409. #endif
  410. #ifdef SUPPORT_MULTI_INST_INTR
  411. intr_inst_index = 0;
  412. intr_reason = 0;
  413. #endif
  414. for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
  415. if (s_bit_firmware_info[core].size == 0) {/* it means that we didn't get an information the current core from API layer. No core activated.*/
  416. printk(KERN_ERR "[VPUDRV] : s_bit_firmware_info[core].size is zero\n");
  417. continue;
  418. }
  419. product_code = ReadVpuRegister(VPU_PRODUCT_CODE_REGISTER);
  420. if (PRODUCT_CODE_W_SERIES(product_code)) {
  421. if (ReadVpuRegister(W5_VPU_VPU_INT_STS)) {
  422. #ifdef SUPPORT_MULTI_INST_INTR
  423. u32 empty_inst;
  424. u32 done_inst;
  425. u32 seq_inst;
  426. u32 i, reason, reason_clr;
  427. reason = ReadVpuRegister(W5_VPU_INT_REASON);
  428. empty_inst = ReadVpuRegister(W5_RET_BS_EMPTY_INST);
  429. done_inst = ReadVpuRegister(W5_RET_QUEUE_CMD_DONE_INST);
  430. seq_inst = ReadVpuRegister(W5_RET_SEQ_DONE_INSTANCE_INFO);
  431. reason_clr = reason;
  432. DPRINTK("[VPUDRV] vpu_irq_handler reason=0x%x, empty_inst=0x%x, done_inst=0x%x, seq_inst=0x%x \n", reason, empty_inst, done_inst, seq_inst);
  433. for (i=0; i<MAX_NUM_INSTANCE; i++) {
  434. if (0 == empty_inst && 0 == done_inst && 0 == seq_inst) break;
  435. intr_reason = reason;
  436. intr_inst_index = get_vpu_inst_idx(dev, &intr_reason, empty_inst, done_inst, seq_inst);
  437. DPRINTK("[VPUDRV] > instance_index: %d, intr_reason: %08x empty_inst: %08x done_inst: %08x seq_inst: %08x\n", intr_inst_index, intr_reason, empty_inst, done_inst, seq_inst);
  438. if (intr_inst_index >= 0 && intr_inst_index < MAX_NUM_INSTANCE) {
  439. if (intr_reason == (1 << INT_WAVE5_BSBUF_EMPTY)) {
  440. empty_inst = empty_inst & ~(1 << intr_inst_index);
  441. WriteVpuRegister(W5_RET_BS_EMPTY_INST, empty_inst);
  442. if (0 == empty_inst) {
  443. reason &= ~(1<<INT_WAVE5_BSBUF_EMPTY);
  444. }
  445. DPRINTK("[VPUDRV] %s, W5_RET_BS_EMPTY_INST Clear empty_inst=0x%x, intr_inst_index=%d\n", __func__, empty_inst, intr_inst_index);
  446. }
  447. if (intr_reason == (1 << INT_WAVE5_DEC_PIC))
  448. {
  449. done_inst = done_inst & ~(1 << intr_inst_index);
  450. WriteVpuRegister(W5_RET_QUEUE_CMD_DONE_INST, done_inst);
  451. if (0 == done_inst) {
  452. reason &= ~(1<<INT_WAVE5_DEC_PIC);
  453. }
  454. DPRINTK("[VPUDRV] %s, W5_RET_QUEUE_CMD_DONE_INST Clear done_inst=0x%x, intr_inst_index=%d\n", __func__, done_inst, intr_inst_index);
  455. }
  456. if ((intr_reason == (1 << INT_WAVE5_INIT_SEQ)) || (intr_reason == (1 << INT_WAVE5_ENC_SET_PARAM)))
  457. {
  458. seq_inst = seq_inst & ~(1 << intr_inst_index);
  459. WriteVpuRegister(W5_RET_SEQ_DONE_INSTANCE_INFO, seq_inst);
  460. if (0 == seq_inst) {
  461. reason &= ~(1<<INT_WAVE5_INIT_SEQ | 1<<INT_WAVE5_ENC_SET_PARAM);
  462. }
  463. DPRINTK("[VPUDRV] %s, W5_RET_SEQ_DONE_INSTANCE_INFO Clear done_inst=0x%x, intr_inst_index=%d\n", __func__, done_inst, intr_inst_index);
  464. }
  465. if ((intr_reason == (1 << INT_WAVE5_ENC_LOW_LATENCY)))
  466. {
  467. done_inst = (done_inst >> 16);
  468. done_inst = done_inst & ~(1 << intr_inst_index);
  469. done_inst = (done_inst << 16);
  470. WriteVpuRegister(W5_RET_QUEUE_CMD_DONE_INST, done_inst);
  471. if (0 == done_inst) {
  472. reason &= ~(1 << INT_WAVE5_ENC_LOW_LATENCY);
  473. }
  474. DPRINTK("[VPUDRV] %s, W5_RET_QUEUE_CMD_DONE_INST INT_WAVE5_ENC_LOW_LATENCY Clear done_inst=0x%x, intr_inst_index=%d\n", __func__, done_inst, intr_inst_index);
  475. }
  476. if (!kfifo_is_full(&s_interrupt_pending_q[intr_inst_index])) {
  477. if (intr_reason == ((1 << INT_WAVE5_ENC_PIC) | (1 << INT_WAVE5_ENC_LOW_LATENCY))) {
  478. u32 ll_intr_reason = (1 << INT_WAVE5_ENC_PIC);
  479. kfifo_in_spinlocked(&s_interrupt_pending_q[intr_inst_index], &ll_intr_reason, sizeof(u32), &s_kfifo_lock);
  480. }
  481. else
  482. kfifo_in_spinlocked(&s_interrupt_pending_q[intr_inst_index], &intr_reason, sizeof(u32), &s_kfifo_lock);
  483. }
  484. else {
  485. printk(KERN_ERR "[VPUDRV] : kfifo_is_full kfifo_count=%d \n", kfifo_len(&s_interrupt_pending_q[intr_inst_index]));
  486. }
  487. }
  488. else {
  489. printk(KERN_ERR "[VPUDRV] : intr_inst_index is wrong intr_inst_index=%d \n", intr_inst_index);
  490. }
  491. }
  492. if (0 != reason)
  493. printk(KERN_ERR "INTERRUPT REASON REMAINED: %08x\n", reason);
  494. WriteVpuRegister(W5_VPU_INT_REASON_CLEAR, reason_clr);
  495. #else
  496. dev->interrupt_reason = ReadVpuRegister(W5_VPU_INT_REASON);
  497. WriteVpuRegister(W5_VPU_INT_REASON_CLEAR, dev->interrupt_reason);
  498. #endif
  499. WriteVpuRegister(W5_VPU_VINT_CLEAR, 0x1);
  500. }
  501. }
  502. else if (PRODUCT_CODE_NOT_W_SERIES(product_code)) {
  503. if (ReadVpuRegister(BIT_INT_STS)) {
  504. #ifdef SUPPORT_MULTI_INST_INTR
  505. intr_reason = ReadVpuRegister(BIT_INT_REASON);
  506. intr_inst_index = 0; // in case of coda seriese. treats intr_inst_index is already 0
  507. kfifo_in_spinlocked(&s_interrupt_pending_q[intr_inst_index], &intr_reason, sizeof(u32), &s_kfifo_lock);
  508. #else
  509. dev->interrupt_reason = ReadVpuRegister(BIT_INT_REASON);
  510. #endif
  511. WriteVpuRegister(BIT_INT_CLEAR, 0x1);
  512. }
  513. }
  514. else {
  515. DPRINTK("[VPUDRV] Unknown product id : %08x\n", product_code);
  516. continue;
  517. }
  518. #ifdef SUPPORT_MULTI_INST_INTR
  519. DPRINTK("[VPUDRV] product: 0x%08x intr_reason: 0x%08x\n\n", product_code, intr_reason);
  520. #else
  521. DPRINTK("[VPUDRV] product: 0x%08x intr_reason: 0x%08x\n", product_code, dev->interrupt_reason);
  522. #endif
  523. }
  524. if (dev->async_queue)
  525. kill_fasync(&dev->async_queue, SIGIO, POLL_IN); /* notify the interrupt to user space */
  526. #ifdef SUPPORT_MULTI_INST_INTR
  527. if (intr_inst_index >= 0 && intr_inst_index < MAX_NUM_INSTANCE) {
  528. s_interrupt_flag[intr_inst_index]= 1;
  529. wake_up_interruptible(&s_interrupt_wait_q[intr_inst_index]);
  530. }
  531. #else
  532. s_interrupt_flag = 1;
  533. wake_up_interruptible(&s_interrupt_wait_q);
  534. #endif
  535. DPRINTK("[VPUDRV][-]%s\n", __func__);
  536. return IRQ_HANDLED;
  537. }
  538. static int vpu_open(struct inode *inode, struct file *filp)
  539. {
  540. DPRINTK("[VPUDRV][+] %s\n", __func__);
  541. spin_lock(&s_vpu_lock);
  542. s_vpu_drv_context.open_count++;
  543. filp->private_data = (void *)(&s_vpu_drv_context);
  544. spin_unlock(&s_vpu_lock);
  545. DPRINTK("[VPUDRV][-] %s\n", __func__);
  546. return 0;
  547. }
  548. /*static int vpu_ioctl(struct inode *inode, struct file *filp, u_int cmd, u_long arg) // for kernel 2.6.9 of C&M*/
  549. static long vpu_ioctl(struct file *filp, u_int cmd, u_long arg)
  550. {
  551. int ret = 0;
  552. struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)filp->private_data;
  553. switch (cmd) {
  554. case VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY:
  555. {
  556. vpudrv_buffer_pool_t *vbp;
  557. DPRINTK("[VPUDRV][+]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
  558. if ((ret = down_interruptible(&s_vpu_sem)) == 0) {
  559. vbp = kzalloc(sizeof(*vbp), GFP_KERNEL);
  560. if (!vbp) {
  561. up(&s_vpu_sem);
  562. return -ENOMEM;
  563. }
  564. ret = copy_from_user(&(vbp->vb), (vpudrv_buffer_t *)arg, sizeof(vpudrv_buffer_t));
  565. if (ret) {
  566. kfree(vbp);
  567. up(&s_vpu_sem);
  568. return -EFAULT;
  569. }
  570. ret = vpu_alloc_dma_buffer(&(vbp->vb));
  571. if (ret == -1) {
  572. ret = -ENOMEM;
  573. kfree(vbp);
  574. up(&s_vpu_sem);
  575. break;
  576. }
  577. ret = copy_to_user((void __user *)arg, &(vbp->vb), sizeof(vpudrv_buffer_t));
  578. if (ret) {
  579. kfree(vbp);
  580. ret = -EFAULT;
  581. up(&s_vpu_sem);
  582. break;
  583. }
  584. vbp->filp = filp;
  585. spin_lock(&s_vpu_lock);
  586. list_add(&vbp->list, &s_vbp_head);
  587. spin_unlock(&s_vpu_lock);
  588. up(&s_vpu_sem);
  589. }
  590. DPRINTK("[VPUDRV][-]VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY\n");
  591. }
  592. break;
  593. case VDI_IOCTL_FREE_PHYSICALMEMORY:
  594. {
  595. vpudrv_buffer_pool_t *vbp, *n;
  596. vpudrv_buffer_t vb;
  597. DPRINTK("[VPUDRV][+]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
  598. if ((ret = down_interruptible(&s_vpu_sem)) == 0) {
  599. ret = copy_from_user(&vb, (vpudrv_buffer_t *)arg, sizeof(vpudrv_buffer_t));
  600. if (ret) {
  601. up(&s_vpu_sem);
  602. return -EACCES;
  603. }
  604. if (vb.base)
  605. vpu_free_dma_buffer(&vb);
  606. spin_lock(&s_vpu_lock);
  607. list_for_each_entry_safe(vbp, n, &s_vbp_head, list)
  608. {
  609. if (vbp->vb.base == vb.base) {
  610. list_del(&vbp->list);
  611. kfree(vbp);
  612. break;
  613. }
  614. }
  615. spin_unlock(&s_vpu_lock);
  616. up(&s_vpu_sem);
  617. }
  618. DPRINTK("[VPUDRV][-]VDI_IOCTL_FREE_PHYSICALMEMORY\n");
  619. }
  620. break;
  621. case VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO:
  622. {
  623. #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
  624. DPRINTK("[VPUDRV][+]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n");
  625. if (s_video_memory.base != 0) {
  626. ret = copy_to_user((void __user *)arg, &s_video_memory, sizeof(vpudrv_buffer_t));
  627. if (ret != 0)
  628. ret = -EFAULT;
  629. } else {
  630. ret = -EFAULT;
  631. }
  632. DPRINTK("[VPUDRV][-]VDI_IOCTL_GET_RESERVED_VIDEO_MEMORY_INFO\n");
  633. #endif
  634. }
  635. break;
  636. case VDI_IOCTL_WAIT_INTERRUPT:
  637. {
  638. vpudrv_intr_info_t info;
  639. #ifdef SUPPORT_MULTI_INST_INTR
  640. u32 intr_inst_index;
  641. u32 intr_reason_in_q;
  642. u32 interrupt_flag_in_q;
  643. #endif
  644. DPRINTK("[VPUDRV][+]VDI_IOCTL_WAIT_INTERRUPT4\n");
  645. ret = copy_from_user(&info, (vpudrv_intr_info_t *)arg, sizeof(vpudrv_intr_info_t));
  646. if (ret != 0)
  647. {
  648. return -EFAULT;
  649. }
  650. #ifdef SUPPORT_MULTI_INST_INTR
  651. intr_inst_index = info.intr_inst_index;
  652. intr_reason_in_q = 0;
  653. interrupt_flag_in_q = kfifo_out_spinlocked(&s_interrupt_pending_q[intr_inst_index], &intr_reason_in_q, sizeof(u32), &s_kfifo_lock);
  654. if (interrupt_flag_in_q > 0)
  655. {
  656. dev->interrupt_reason[intr_inst_index] = intr_reason_in_q;
  657. DPRINTK("[VPUDRV] Interrupt Remain : intr_inst_index=%d, intr_reason_in_q=0x%x, interrupt_flag_in_q=%d\n", intr_inst_index, intr_reason_in_q, interrupt_flag_in_q);
  658. goto INTERRUPT_REMAIN_IN_QUEUE;
  659. }
  660. #endif
  661. #ifdef SUPPORT_MULTI_INST_INTR
  662. #ifdef SUPPORT_TIMEOUT_RESOLUTION
  663. kt = ktime_set(0, info.timeout*1000*1000);
  664. ret = wait_event_interruptible_hrtimeout(s_interrupt_wait_q[intr_inst_index], s_interrupt_flag[intr_inst_index] != 0, kt);
  665. #else
  666. ret = wait_event_interruptible_timeout(s_interrupt_wait_q[intr_inst_index], s_interrupt_flag[intr_inst_index] != 0, msecs_to_jiffies(info.timeout));
  667. #endif
  668. #else
  669. ret = wait_event_interruptible_timeout(s_interrupt_wait_q, s_interrupt_flag != 0, msecs_to_jiffies(info.timeout));
  670. #endif
  671. #ifdef SUPPORT_TIMEOUT_RESOLUTION
  672. if (ret == -ETIME) {
  673. //DPRINTK("[VPUDRV][-]VDI_IOCTL_WAIT_INTERRUPT timeout = %d \n", info.timeout);
  674. break;
  675. }
  676. #endif
  677. if (!ret) {
  678. ret = -ETIME;
  679. break;
  680. }
  681. if (signal_pending(current)) {
  682. ret = -ERESTARTSYS;
  683. break;
  684. }
  685. #ifdef SUPPORT_MULTI_INST_INTR
  686. intr_reason_in_q = 0;
  687. interrupt_flag_in_q = kfifo_out_spinlocked(&s_interrupt_pending_q[intr_inst_index], &intr_reason_in_q, sizeof(u32), &s_kfifo_lock);
  688. if (interrupt_flag_in_q > 0) {
  689. dev->interrupt_reason[intr_inst_index] = intr_reason_in_q;
  690. }
  691. else {
  692. dev->interrupt_reason[intr_inst_index] = 0;
  693. }
  694. #endif
  695. #ifdef SUPPORT_MULTI_INST_INTR
  696. DPRINTK("[VPUDRV] inst_index(%d), s_interrupt_flag(%d), reason(0x%08lx)\n", intr_inst_index, s_interrupt_flag[intr_inst_index], dev->interrupt_reason[intr_inst_index]);
  697. #else
  698. DPRINTK("[VPUDRV] s_interrupt_flag(%d), reason(0x%08lx)\n", s_interrupt_flag, dev->interrupt_reason);
  699. #endif
  700. #ifdef SUPPORT_MULTI_INST_INTR
  701. INTERRUPT_REMAIN_IN_QUEUE:
  702. info.intr_reason = dev->interrupt_reason[intr_inst_index];
  703. s_interrupt_flag[intr_inst_index] = 0;
  704. dev->interrupt_reason[intr_inst_index] = 0;
  705. #else
  706. info.intr_reason = dev->interrupt_reason;
  707. s_interrupt_flag = 0;
  708. dev->interrupt_reason = 0;
  709. #endif
  710. #ifdef VPU_IRQ_CONTROL
  711. enable_irq(s_vpu_irq);
  712. #endif
  713. ret = copy_to_user((void __user *)arg, &info, sizeof(vpudrv_intr_info_t));
  714. DPRINTK("[VPUDRV][-]VDI_IOCTL_WAIT_INTERRUPT\n");
  715. if (ret != 0)
  716. {
  717. return -EFAULT;
  718. }
  719. }
  720. break;
  721. case VDI_IOCTL_SET_CLOCK_GATE:
  722. {
  723. // u32 clkgate;
  724. // DPRINTK("[VPUDRV][+]VDI_IOCTL_SET_CLOCK_GATE\n");
  725. // if (get_user(clkgate, (u32 __user *) arg))
  726. // return -EFAULT;
  727. // #ifdef VPU_SUPPORT_CLOCK_CONTROL
  728. // if (clkgate)
  729. // //vpu_clk_enable(s_vpu_clk);
  730. // else
  731. // //vpu_clk_disable(s_vpu_clk);
  732. // #endif
  733. // DPRINTK("[VPUDRV][-]VDI_IOCTL_SET_CLOCK_GATE\n");
  734. }
  735. break;
  736. case VDI_IOCTL_GET_INSTANCE_POOL:
  737. {
  738. DPRINTK("[VPUDRV][+]VDI_IOCTL_GET_INSTANCE_POOL\n");
  739. if ((ret = down_interruptible(&s_vpu_sem)) == 0) {
  740. if (s_instance_pool.base != 0) {
  741. ret = copy_to_user((void __user *)arg, &s_instance_pool, sizeof(vpudrv_buffer_t));
  742. if (ret != 0)
  743. ret = -EFAULT;
  744. } else {
  745. ret = copy_from_user(&s_instance_pool, (vpudrv_buffer_t *)arg, sizeof(vpudrv_buffer_t));
  746. if (ret == 0) {
  747. #ifdef USE_VMALLOC_FOR_INSTANCE_POOL_MEMORY
  748. s_instance_pool.size = PAGE_ALIGN(s_instance_pool.size);
  749. s_instance_pool.base = (unsigned long)vmalloc(s_instance_pool.size);
  750. s_instance_pool.phys_addr = s_instance_pool.base;
  751. if (s_instance_pool.base != 0)
  752. #else
  753. if (vpu_alloc_dma_buffer(&s_instance_pool) != -1)
  754. #endif
  755. {
  756. memset((void *)s_instance_pool.base, 0x0, s_instance_pool.size); /*clearing memory*/
  757. ret = copy_to_user((void __user *)arg, &s_instance_pool, sizeof(vpudrv_buffer_t));
  758. if (ret == 0) {
  759. /* success to get memory for instance pool */
  760. up(&s_vpu_sem);
  761. break;
  762. }
  763. }
  764. }
  765. ret = -EFAULT;
  766. }
  767. up(&s_vpu_sem);
  768. }
  769. DPRINTK("[VPUDRV][-]VDI_IOCTL_GET_INSTANCE_POOL\n");
  770. }
  771. break;
  772. case VDI_IOCTL_GET_COMMON_MEMORY:
  773. {
  774. DPRINTK("[VPUDRV][+]VDI_IOCTL_GET_COMMON_MEMORY\n");
  775. if (s_common_memory.base != 0) {
  776. ret = copy_to_user((void __user *)arg, &s_common_memory, sizeof(vpudrv_buffer_t));
  777. if (ret != 0)
  778. ret = -EFAULT;
  779. } else {
  780. ret = copy_from_user(&s_common_memory, (vpudrv_buffer_t *)arg, sizeof(vpudrv_buffer_t));
  781. if (ret == 0) {
  782. if (vpu_alloc_dma_buffer(&s_common_memory) != -1) {
  783. ret = copy_to_user((void __user *)arg, &s_common_memory, sizeof(vpudrv_buffer_t));
  784. if (ret == 0) {
  785. /* success to get memory for common memory */
  786. break;
  787. }
  788. }
  789. }
  790. ret = -EFAULT;
  791. }
  792. DPRINTK("[VPUDRV][-]VDI_IOCTL_GET_COMMON_MEMORY\n");
  793. }
  794. break;
  795. case VDI_IOCTL_OPEN_INSTANCE:
  796. {
  797. vpudrv_inst_info_t inst_info;
  798. vpudrv_instanace_list_t *vil, *n;
  799. vil = kzalloc(sizeof(*vil), GFP_KERNEL);
  800. if (!vil)
  801. return -ENOMEM;
  802. if (copy_from_user(&inst_info, (vpudrv_inst_info_t *)arg, sizeof(vpudrv_inst_info_t)))
  803. return -EFAULT;
  804. vil->inst_idx = inst_info.inst_idx;
  805. vil->core_idx = inst_info.core_idx;
  806. vil->filp = filp;
  807. spin_lock(&s_vpu_lock);
  808. list_add(&vil->list, &s_inst_list_head);
  809. inst_info.inst_open_count = 0; /* counting the current open instance number */
  810. list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
  811. {
  812. if (vil->core_idx == inst_info.core_idx)
  813. inst_info.inst_open_count++;
  814. }
  815. #ifdef SUPPORT_MULTI_INST_INTR
  816. kfifo_reset(&s_interrupt_pending_q[inst_info.inst_idx]);
  817. #endif
  818. spin_unlock(&s_vpu_lock);
  819. s_vpu_open_ref_count++; /* flag just for that vpu is in opened or closed */
  820. if (copy_to_user((void __user *)arg, &inst_info, sizeof(vpudrv_inst_info_t))) {
  821. kfree(vil);
  822. return -EFAULT;
  823. }
  824. DPRINTK("[VPUDRV] VDI_IOCTL_OPEN_INSTANCE core_idx=%d, inst_idx=%d, s_vpu_open_ref_count=%d, inst_open_count=%d\n", (int)inst_info.core_idx, (int)inst_info.inst_idx, s_vpu_open_ref_count, inst_info.inst_open_count);
  825. }
  826. break;
  827. case VDI_IOCTL_CLOSE_INSTANCE:
  828. {
  829. vpudrv_inst_info_t inst_info;
  830. vpudrv_instanace_list_t *vil, *n;
  831. u32 found = 0;
  832. DPRINTK("[VPUDRV][+]VDI_IOCTL_CLOSE_INSTANCE\n");
  833. if (copy_from_user(&inst_info, (vpudrv_inst_info_t *)arg, sizeof(vpudrv_inst_info_t)))
  834. return -EFAULT;
  835. spin_lock(&s_vpu_lock);
  836. list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
  837. {
  838. if (vil->inst_idx == inst_info.inst_idx && vil->core_idx == inst_info.core_idx) {
  839. list_del(&vil->list);
  840. kfree(vil);
  841. found = 1;
  842. break;
  843. }
  844. }
  845. if (0 == found) {
  846. spin_unlock(&s_vpu_lock);
  847. return -EINVAL;
  848. }
  849. inst_info.inst_open_count = 0; /* counting the current open instance number */
  850. list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
  851. {
  852. if (vil->core_idx == inst_info.core_idx)
  853. inst_info.inst_open_count++;
  854. }
  855. #ifdef SUPPORT_MULTI_INST_INTR
  856. kfifo_reset(&s_interrupt_pending_q[inst_info.inst_idx]);
  857. #endif
  858. spin_unlock(&s_vpu_lock);
  859. s_vpu_open_ref_count--; /* flag just for that vpu is in opened or closed */
  860. if (copy_to_user((void __user *)arg, &inst_info, sizeof(vpudrv_inst_info_t)))
  861. return -EFAULT;
  862. DPRINTK("[VPUDRV] VDI_IOCTL_CLOSE_INSTANCE core_idx=%d, inst_idx=%d, s_vpu_open_ref_count=%d, inst_open_count=%d\n", (int)inst_info.core_idx, (int)inst_info.inst_idx, s_vpu_open_ref_count, inst_info.inst_open_count);
  863. }
  864. break;
  865. case VDI_IOCTL_GET_INSTANCE_NUM:
  866. {
  867. vpudrv_inst_info_t inst_info;
  868. vpudrv_instanace_list_t *vil, *n;
  869. DPRINTK("[VPUDRV][+]VDI_IOCTL_GET_INSTANCE_NUM\n");
  870. ret = copy_from_user(&inst_info, (vpudrv_inst_info_t *)arg, sizeof(vpudrv_inst_info_t));
  871. if (ret != 0)
  872. break;
  873. spin_lock(&s_vpu_lock);
  874. inst_info.inst_open_count = 0;
  875. list_for_each_entry_safe(vil, n, &s_inst_list_head, list)
  876. {
  877. if (vil->core_idx == inst_info.core_idx)
  878. inst_info.inst_open_count++;
  879. }
  880. spin_unlock(&s_vpu_lock);
  881. ret = copy_to_user((void __user *)arg, &inst_info, sizeof(vpudrv_inst_info_t));
  882. DPRINTK("[VPUDRV] VDI_IOCTL_GET_INSTANCE_NUM core_idx=%d, inst_idx=%d, open_count=%d\n", (int)inst_info.core_idx, (int)inst_info.inst_idx, inst_info.inst_open_count);
  883. }
  884. break;
  885. case VDI_IOCTL_RESET:
  886. {
  887. //vpu_hw_reset();
  888. }
  889. break;
  890. case VDI_IOCTL_GET_REGISTER_INFO:
  891. {
  892. DPRINTK("[VPUDRV][+]VDI_IOCTL_GET_REGISTER_INFO\n");
  893. ret = copy_to_user((void __user *)arg, &s_vpu_register, sizeof(vpudrv_buffer_t));
  894. if (ret != 0)
  895. ret = -EFAULT;
  896. DPRINTK("[VPUDRV][-]VDI_IOCTL_GET_REGISTER_INFO s_vpu_register.phys_addr==0x%lx, s_vpu_register.virt_addr=0x%lx, s_vpu_register.size=%d\n", s_vpu_register.phys_addr , s_vpu_register.virt_addr, s_vpu_register.size);
  897. }
  898. break;
  899. case VDI_IOCTL_FLUSH_DCACHE:
  900. {
  901. vpudrv_flush_cache_t cache_info;
  902. //DPRINTK("[JPUDRV][+]VDI_IOCTL_FLUSH_DCACHE\n");
  903. ret = copy_from_user(&cache_info, (vpudrv_flush_cache_t *)arg, sizeof(vpudrv_flush_cache_t));
  904. if (ret != 0)
  905. ret = -EFAULT;
  906. if(cache_info.flag)
  907. starfive_flush_dcache(cache_info.start,cache_info.size);
  908. //DPRINTK("[JPUDRV][-]VDI_IOCTL_FLUSH_DCACHE\n");
  909. break;
  910. }
  911. default:
  912. {
  913. printk(KERN_ERR "[VPUDRV] No such IOCTL, cmd is %d\n", cmd);
  914. }
  915. break;
  916. }
  917. return ret;
  918. }
  919. static ssize_t vpu_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
  920. {
  921. return -1;
  922. }
  923. static ssize_t vpu_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
  924. {
  925. /* DPRINTK("[VPUDRV] vpu_write len=%d\n", (int)len); */
  926. if (!buf) {
  927. printk(KERN_ERR "[VPUDRV] vpu_write buf = NULL error \n");
  928. return -EFAULT;
  929. }
  930. if (len == sizeof(vpu_bit_firmware_info_t)) {
  931. vpu_bit_firmware_info_t *bit_firmware_info;
  932. bit_firmware_info = kmalloc(sizeof(vpu_bit_firmware_info_t), GFP_KERNEL);
  933. if (!bit_firmware_info) {
  934. printk(KERN_ERR "[VPUDRV] vpu_write bit_firmware_info allocation error \n");
  935. return -EFAULT;
  936. }
  937. if (copy_from_user(bit_firmware_info, buf, len)) {
  938. printk(KERN_ERR "[VPUDRV] vpu_write copy_from_user error for bit_firmware_info\n");
  939. return -EFAULT;
  940. }
  941. if (bit_firmware_info->size == sizeof(vpu_bit_firmware_info_t)) {
  942. DPRINTK("[VPUDRV] vpu_write set bit_firmware_info coreIdx=0x%x, reg_base_offset=0x%x size=0x%x, bit_code[0]=0x%x\n",
  943. bit_firmware_info->core_idx, (int)bit_firmware_info->reg_base_offset, bit_firmware_info->size, bit_firmware_info->bit_code[0]);
  944. if (bit_firmware_info->core_idx > MAX_NUM_VPU_CORE) {
  945. printk(KERN_ERR "[VPUDRV] vpu_write coreIdx[%d] is exceeded than MAX_NUM_VPU_CORE[%d]\n", bit_firmware_info->core_idx, MAX_NUM_VPU_CORE);
  946. return -ENODEV;
  947. }
  948. memcpy((void *)&s_bit_firmware_info[bit_firmware_info->core_idx], bit_firmware_info, sizeof(vpu_bit_firmware_info_t));
  949. kfree(bit_firmware_info);
  950. return len;
  951. }
  952. kfree(bit_firmware_info);
  953. }
  954. return -1;
  955. }
  956. static int vpu_release(struct inode *inode, struct file *filp)
  957. {
  958. int ret = 0;
  959. u32 open_count;
  960. #ifdef SUPPORT_MULTI_INST_INTR
  961. int i;
  962. #endif
  963. DPRINTK("[VPUDRV] vpu_release\n");
  964. if ((ret = down_interruptible(&s_vpu_sem)) == 0) {
  965. /* found and free the not handled buffer by user applications */
  966. vpu_free_buffers(filp);
  967. /* found and free the not closed instance by user applications */
  968. vpu_free_instances(filp);
  969. spin_lock(&s_vpu_lock);
  970. s_vpu_drv_context.open_count--;
  971. open_count = s_vpu_drv_context.open_count;
  972. spin_unlock(&s_vpu_lock);
  973. if (open_count == 0) {
  974. #ifdef SUPPORT_MULTI_INST_INTR
  975. for (i=0; i<MAX_NUM_INSTANCE; i++) {
  976. kfifo_reset(&s_interrupt_pending_q[i]);
  977. }
  978. #endif
  979. if (s_instance_pool.base) {
  980. DPRINTK("[VPUDRV] free instance pool\n");
  981. #ifdef USE_VMALLOC_FOR_INSTANCE_POOL_MEMORY
  982. vfree((const void *)s_instance_pool.base);
  983. #else
  984. vpu_free_dma_buffer(&s_instance_pool);
  985. #endif
  986. s_instance_pool.base = 0;
  987. }
  988. }
  989. }
  990. up(&s_vpu_sem);
  991. return 0;
  992. }
  993. static int vpu_fasync(int fd, struct file *filp, int mode)
  994. {
  995. struct vpu_drv_context_t *dev = (struct vpu_drv_context_t *)filp->private_data;
  996. return fasync_helper(fd, filp, mode, &dev->async_queue);
  997. }
  998. static int vpu_map_to_register(struct file *fp, struct vm_area_struct *vm)
  999. {
  1000. unsigned long pfn;
  1001. vm->vm_flags |= VM_IO | VM_RESERVED;
  1002. vm->vm_page_prot = pgprot_noncached(vm->vm_page_prot);
  1003. pfn = s_vpu_register.phys_addr >> PAGE_SHIFT;
  1004. return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end-vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
  1005. }
  1006. static int vpu_map_to_physical_memory(struct file *fp, struct vm_area_struct *vm)
  1007. {
  1008. vm->vm_flags |= VM_IO | VM_RESERVED;
  1009. vm->vm_page_prot = pgprot_noncached(vm->vm_page_prot);
  1010. return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff, vm->vm_end-vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
  1011. }
  1012. static int vpu_map_to_instance_pool_memory(struct file *fp, struct vm_area_struct *vm)
  1013. {
  1014. #ifdef USE_VMALLOC_FOR_INSTANCE_POOL_MEMORY
  1015. int ret;
  1016. long length = vm->vm_end - vm->vm_start;
  1017. unsigned long start = vm->vm_start;
  1018. char *vmalloc_area_ptr = (char *)s_instance_pool.base;
  1019. unsigned long pfn;
  1020. vm->vm_flags |= VM_RESERVED;
  1021. /* loop over all pages, map it page individually */
  1022. while (length > 0)
  1023. {
  1024. pfn = vmalloc_to_pfn(vmalloc_area_ptr);
  1025. if ((ret = remap_pfn_range(vm, start, pfn, PAGE_SIZE, PAGE_SHARED)) < 0) {
  1026. return ret;
  1027. }
  1028. start += PAGE_SIZE;
  1029. vmalloc_area_ptr += PAGE_SIZE;
  1030. length -= PAGE_SIZE;
  1031. }
  1032. return 0;
  1033. #else
  1034. vm->vm_flags |= VM_RESERVED;
  1035. return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff, vm->vm_end-vm->vm_start, vm->vm_page_prot) ? -EAGAIN : 0;
  1036. #endif
  1037. }
  1038. /*!
  1039. * @brief memory map interface for vpu file operation
  1040. * @return 0 on success or negative error code on error
  1041. */
  1042. static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
  1043. {
  1044. #ifdef USE_VMALLOC_FOR_INSTANCE_POOL_MEMORY
  1045. if (vm->vm_pgoff == 0)
  1046. return vpu_map_to_instance_pool_memory(fp, vm);
  1047. if (vm->vm_pgoff == (s_vpu_register.phys_addr>>PAGE_SHIFT))
  1048. return vpu_map_to_register(fp, vm);
  1049. return vpu_map_to_physical_memory(fp, vm);
  1050. #else
  1051. if (vm->vm_pgoff) {
  1052. if (vm->vm_pgoff == (s_instance_pool.phys_addr>>PAGE_SHIFT))
  1053. return vpu_map_to_instance_pool_memory(fp, vm);
  1054. return vpu_map_to_physical_memory(fp, vm);
  1055. } else {
  1056. return vpu_map_to_register(fp, vm);
  1057. }
  1058. #endif
  1059. }
  1060. struct file_operations vpu_fops = {
  1061. .owner = THIS_MODULE,
  1062. .open = vpu_open,
  1063. .read = vpu_read,
  1064. .write = vpu_write,
  1065. /*.ioctl = vpu_ioctl, // for kernel 2.6.9 of C&M*/
  1066. .unlocked_ioctl = vpu_ioctl,
  1067. .release = vpu_release,
  1068. .fasync = vpu_fasync,
  1069. .mmap = vpu_mmap,
  1070. };
  1071. static struct resource venc_resource[] = {
  1072. [0] = {
  1073. .start = VPU_REG_BASE_ADDR,
  1074. .end = VPU_REG_BASE_ADDR + VPU_REG_SIZE - 1,
  1075. .flags = IORESOURCE_MEM,
  1076. },
  1077. [1] = {
  1078. .start = VENC_IRQ_ADDR,
  1079. .end = VENC_IRQ_ADDR,
  1080. .flags = IORESOURCE_IRQ,
  1081. },
  1082. };
  1083. static int vpu_probe(struct platform_device *pdev)
  1084. {
  1085. int err = 0;
  1086. struct resource *res = NULL;
  1087. #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
  1088. struct resource res_cma;
  1089. struct device_node *node;
  1090. #endif
  1091. DPRINTK("[VPUDRV] vpu_probe\n");
  1092. if(pdev){
  1093. vpu_dev = &pdev->dev;
  1094. vpu_dev->coherent_dma_mask = 0xffffffff;
  1095. //vpu_dev->dma_ops = NULL;
  1096. dev_info(vpu_dev,"device init.\n");
  1097. }
  1098. #ifdef VPU_SUPPORT_CLOCK_CONTROL
  1099. vpu_dev->of_node = of_find_node_by_name(NULL, "vpu_enc");
  1100. if (!(vpu_dev->of_node))
  1101. printk("The node of vpu_enc is not found in device tree.\n");
  1102. err = of_address_to_resource(vpu_dev->of_node, 0, &venc_resource[0]);
  1103. if (err) {
  1104. printk(KERN_ERR "could not find venc register address\n");
  1105. goto ERROR_PROVE_DEVICE;
  1106. }
  1107. venc_resource[1].start = irq_of_parse_and_map(vpu_dev->of_node, 0);
  1108. venc_resource[1].end = venc_resource[1].start;
  1109. err = platform_device_add_resources(pdev, venc_resource, 2);
  1110. if (err) {
  1111. printk(KERN_ERR "could not add venc resource\n");
  1112. goto ERROR_PROVE_DEVICE;
  1113. }
  1114. #endif
  1115. if (pdev)
  1116. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1117. if (res) {/* if platform driver is implemented */
  1118. s_vpu_register.phys_addr = res->start;
  1119. s_vpu_register.virt_addr = (unsigned long)ioremap_nocache(res->start, res->end - res->start);
  1120. s_vpu_register.size = res->end - res->start;
  1121. DPRINTK("[VPUDRV] : vpu base address get from platform driver physical base addr==0x%lx, virtual base=0x%lx\n", s_vpu_register.phys_addr , s_vpu_register.virt_addr);
  1122. } else {
  1123. s_vpu_register.phys_addr = VPU_REG_BASE_ADDR;
  1124. s_vpu_register.virt_addr = (unsigned long)ioremap_nocache(s_vpu_register.phys_addr, VPU_REG_SIZE);
  1125. s_vpu_register.size = VPU_REG_SIZE;
  1126. DPRINTK("[VPUDRV] : vpu base address get from defined value physical base addr==0x%lx, virtual base=0x%lx\n", s_vpu_register.phys_addr, s_vpu_register.virt_addr);
  1127. }
  1128. /* get the major number of the character device */
  1129. if ((alloc_chrdev_region(&s_vpu_major, 0, 1, VPU_DEV_NAME)) < 0) {
  1130. err = -EBUSY;
  1131. printk(KERN_ERR "could not allocate major number\n");
  1132. goto ERROR_PROVE_DEVICE;
  1133. }
  1134. printk(KERN_INFO "SUCCESS alloc_chrdev_region\n");
  1135. /* initialize the device structure and register the device with the kernel */
  1136. cdev_init(&s_vpu_cdev, &vpu_fops);
  1137. if ((cdev_add(&s_vpu_cdev, s_vpu_major, 1)) < 0) {
  1138. err = -EBUSY;
  1139. printk(KERN_ERR "could not allocate chrdev\n");
  1140. goto ERROR_PROVE_DEVICE;
  1141. }
  1142. #ifdef VPU_SUPPORT_CLOCK_CONTROL
  1143. err = starfive_venc_clk_rst_init(pdev);
  1144. if (err){
  1145. goto ERROR_PROVE_DEVICE;
  1146. }
  1147. #else
  1148. vpu_clk_enable();
  1149. vpu_hw_reset();
  1150. #endif
  1151. #ifdef VPU_SUPPORT_ISR
  1152. #ifdef VPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  1153. if (pdev)
  1154. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1155. if (res) {/* if platform driver is implemented */
  1156. s_vpu_irq = res->start;
  1157. DPRINTK("[VPUDRV] : vpu irq number get from platform driver irq=0x%x\n", s_vpu_irq);
  1158. } else {
  1159. DPRINTK("[VPUDRV] : vpu irq number get from defined value irq=0x%x\n", s_vpu_irq);
  1160. }
  1161. #else
  1162. DPRINTK("[VPUDRV] : vpu irq number get from defined value irq=0x%x\n", s_vpu_irq);
  1163. #endif
  1164. err = request_irq(s_vpu_irq, vpu_irq_handler, 0, pdev->name, (void *)(&s_vpu_drv_context));
  1165. if (err) {
  1166. printk(KERN_ERR "[VPUDRV] : fail to register interrupt handler\n");
  1167. goto ERROR_PROVE_DEVICE;
  1168. }
  1169. #endif
  1170. #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
  1171. printk("start memory-region: of_node:%#lx \n",(unsigned long)vpu_dev->of_node);
  1172. node = of_parse_phandle(vpu_dev->of_node, "memory-region", 0);
  1173. if(node){
  1174. dev_info(vpu_dev, "Get mem form memory-region\n");
  1175. of_address_to_resource(node, 0, &res_cma);
  1176. s_video_memory.size = resource_size(&res_cma);
  1177. s_video_memory.phys_addr = res_cma.start;
  1178. }else{
  1179. dev_info(vpu_dev, "Get mem form memory-region filed. please check the dts file.\n");
  1180. //dev_info(vpu_dev, "Using default cma reserved space..\n");
  1181. //s_video_memory.phys_addr = 0xa3000000;
  1182. //s_video_memory.size = 0x10000000;
  1183. return 0;
  1184. }
  1185. s_video_memory.base = (unsigned long)ioremap_nocache(DRAM_MEM2SYS(s_video_memory.phys_addr), PAGE_ALIGN(s_video_memory.size));
  1186. if (!s_video_memory.base) {
  1187. printk(KERN_ERR "[VPUDRV] : fail to remap video memory physical phys_addr==0x%lx, base==0x%lx, size=%d\n", s_video_memory.phys_addr, s_video_memory.base, (int)s_video_memory.size);
  1188. goto ERROR_PROVE_DEVICE;
  1189. }
  1190. if (vmem_init(&s_vmem, s_video_memory.phys_addr, s_video_memory.size) < 0) {
  1191. printk(KERN_ERR "[VPUDRV] : fail to init vmem system\n");
  1192. goto ERROR_PROVE_DEVICE;
  1193. }
  1194. DPRINTK("[VPUDRV] success to probe vpu device with reserved video memory phys_addr==0x%lx, base = =0x%lx\n", s_video_memory.phys_addr, s_video_memory.base);
  1195. #else
  1196. DPRINTK("[VPUDRV] success to probe vpu device with non reserved video memory\n");
  1197. #endif
  1198. return 0;
  1199. ERROR_PROVE_DEVICE:
  1200. if (s_vpu_major)
  1201. unregister_chrdev_region(s_vpu_major, 1);
  1202. if (s_vpu_register.virt_addr)
  1203. iounmap((void *)s_vpu_register.virt_addr);
  1204. return err;
  1205. }
  1206. #ifdef VPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  1207. static int vpu_remove(struct platform_device *pdev)
  1208. {
  1209. DPRINTK("[VPUDRV] vpu_remove\n");
  1210. if (s_instance_pool.base) {
  1211. #ifdef USE_VMALLOC_FOR_INSTANCE_POOL_MEMORY
  1212. vfree((const void *)s_instance_pool.base);
  1213. #else
  1214. vpu_free_dma_buffer(&s_instance_pool);
  1215. #endif
  1216. s_instance_pool.base = 0;
  1217. }
  1218. if (s_common_memory.base) {
  1219. vpu_free_dma_buffer(&s_common_memory);
  1220. s_common_memory.base = 0;
  1221. }
  1222. #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
  1223. if (s_video_memory.base) {
  1224. iounmap((void *)s_video_memory.base);
  1225. s_video_memory.base = 0;
  1226. vmem_exit(&s_vmem);
  1227. }
  1228. #endif
  1229. if (s_vpu_major > 0) {
  1230. cdev_del(&s_vpu_cdev);
  1231. unregister_chrdev_region(s_vpu_major, 1);
  1232. s_vpu_major = 0;
  1233. }
  1234. #ifdef VPU_SUPPORT_ISR
  1235. if (s_vpu_irq)
  1236. free_irq(s_vpu_irq, &s_vpu_drv_context);
  1237. #endif
  1238. if (s_vpu_register.virt_addr)
  1239. iounmap((void *)s_vpu_register.virt_addr);
  1240. return 0;
  1241. }
  1242. #endif /*VPU_SUPPORT_PLATFORM_DRIVER_REGISTER*/
  1243. #if defined(VPU_SUPPORT_PLATFORM_DRIVER_REGISTER) && defined(CONFIG_PM)
  1244. #define W5_MAX_CODE_BUF_SIZE (512*1024)
  1245. #define W5_CMD_INIT_VPU (0x0001)
  1246. #define W5_CMD_SLEEP_VPU (0x0004)
  1247. #define W5_CMD_WAKEUP_VPU (0x0002)
  1248. static void Wave5BitIssueCommand(int core, u32 cmd)
  1249. {
  1250. WriteVpuRegister(W5_VPU_BUSY_STATUS, 1);
  1251. WriteVpuRegister(W5_COMMAND, cmd);
  1252. WriteVpuRegister(W5_VPU_HOST_INT_REQ, 1);
  1253. return;
  1254. }
  1255. static int vpu_suspend(struct platform_device *pdev, pm_message_t state)
  1256. {
  1257. int i;
  1258. int core;
  1259. unsigned long timeout = jiffies + HZ; /* vpu wait timeout to 1sec */
  1260. int product_code;
  1261. DPRINTK("[VPUDRV] vpu_suspend\n");
  1262. #ifdef VPU_SUPPORT_CLOCK_CONTROL
  1263. starfive_venc_clk_enable(&pdev->dev);
  1264. #else
  1265. vpu_clk_enable();
  1266. #endif
  1267. if (s_vpu_open_ref_count > 0) {
  1268. for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
  1269. if (s_bit_firmware_info[core].size == 0)
  1270. continue;
  1271. product_code = ReadVpuRegister(VPU_PRODUCT_CODE_REGISTER);
  1272. if (PRODUCT_CODE_W_SERIES(product_code)) {
  1273. while (ReadVpuRegister(W5_VPU_BUSY_STATUS)) {
  1274. if (time_after(jiffies, timeout)) {
  1275. DPRINTK("SLEEP_VPU BUSY timeout");
  1276. goto DONE_SUSPEND;
  1277. }
  1278. }
  1279. Wave5BitIssueCommand(core, W5_CMD_SLEEP_VPU);
  1280. while (ReadVpuRegister(W5_VPU_BUSY_STATUS)) {
  1281. if (time_after(jiffies, timeout)) {
  1282. DPRINTK("SLEEP_VPU BUSY timeout");
  1283. goto DONE_SUSPEND;
  1284. }
  1285. }
  1286. if (ReadVpuRegister(W5_RET_SUCCESS) == 0) {
  1287. DPRINTK("SLEEP_VPU failed [0x%x]", ReadVpuRegister(W5_RET_FAIL_REASON));
  1288. goto DONE_SUSPEND;
  1289. }
  1290. }
  1291. else if (PRODUCT_CODE_NOT_W_SERIES(product_code)) {
  1292. while (ReadVpuRegister(BIT_BUSY_FLAG)) {
  1293. if (time_after(jiffies, timeout))
  1294. goto DONE_SUSPEND;
  1295. }
  1296. for (i = 0; i < 64; i++)
  1297. s_vpu_reg_store[core][i] = ReadVpuRegister(BIT_BASE+(0x100+(i * 4)));
  1298. }
  1299. else {
  1300. DPRINTK("[VPUDRV] Unknown product id : %08x\n", product_code);
  1301. goto DONE_SUSPEND;
  1302. }
  1303. }
  1304. }
  1305. #ifdef VPU_SUPPORT_CLOCK_CONTROL
  1306. starfive_venc_clk_disable(&pdev->dev);
  1307. #else
  1308. vpu_clk_disable();
  1309. #endif
  1310. return 0;
  1311. DONE_SUSPEND:
  1312. #ifdef VPU_SUPPORT_CLOCK_CONTROL
  1313. starfive_venc_clk_disable(&pdev->dev);
  1314. #else
  1315. vpu_clk_disable();
  1316. #endif
  1317. return -EAGAIN;
  1318. }
  1319. static int vpu_resume(struct platform_device *pdev)
  1320. {
  1321. int i;
  1322. int core;
  1323. u32 val;
  1324. unsigned long timeout = jiffies + HZ; /* vpu wait timeout to 1sec */
  1325. int product_code;
  1326. unsigned long code_base;
  1327. u32 code_size;
  1328. u32 remap_size;
  1329. int regVal;
  1330. u32 hwOption = 0;
  1331. DPRINTK("[VPUDRV] vpu_resume\n");
  1332. #ifdef VPU_SUPPORT_CLOCK_CONTROL
  1333. starfive_venc_clk_enable(&pdev->dev);
  1334. #else
  1335. vpu_clk_enable();
  1336. #endif
  1337. for (core = 0; core < MAX_NUM_VPU_CORE; core++) {
  1338. if (s_bit_firmware_info[core].size == 0) {
  1339. continue;
  1340. }
  1341. product_code = ReadVpuRegister(VPU_PRODUCT_CODE_REGISTER);
  1342. if (PRODUCT_CODE_W_SERIES(product_code)) {
  1343. code_base = s_common_memory.phys_addr;
  1344. /* ALIGN TO 4KB */
  1345. code_size = (W5_MAX_CODE_BUF_SIZE&~0xfff);
  1346. if (code_size < s_bit_firmware_info[core].size*2) {
  1347. goto DONE_WAKEUP;
  1348. }
  1349. regVal = 0;
  1350. WriteVpuRegister(W5_PO_CONF, regVal);
  1351. /* Reset All blocks */
  1352. regVal = 0x7ffffff;
  1353. WriteVpuRegister(W5_VPU_RESET_REQ, regVal); /*Reset All blocks*/
  1354. /* Waiting reset done */
  1355. while (ReadVpuRegister(W5_VPU_RESET_STATUS)) {
  1356. if (time_after(jiffies, timeout))
  1357. goto DONE_WAKEUP;
  1358. }
  1359. WriteVpuRegister(W5_VPU_RESET_REQ, 0);
  1360. /* remap page size */
  1361. remap_size = (code_size >> 12) & 0x1ff;
  1362. regVal = 0x80000000 | (W5_REMAP_CODE_INDEX<<12) | (0 << 16) | (1<<11) | remap_size;
  1363. WriteVpuRegister(W5_VPU_REMAP_CTRL, regVal);
  1364. WriteVpuRegister(W5_VPU_REMAP_VADDR,0x00000000); /* DO NOT CHANGE! */
  1365. WriteVpuRegister(W5_VPU_REMAP_PADDR,code_base);
  1366. WriteVpuRegister(W5_ADDR_CODE_BASE, code_base);
  1367. WriteVpuRegister(W5_CODE_SIZE, code_size);
  1368. WriteVpuRegister(W5_CODE_PARAM, 0);
  1369. WriteVpuRegister(W5_INIT_VPU_TIME_OUT_CNT, timeout);
  1370. WriteVpuRegister(W5_HW_OPTION, hwOption);
  1371. /* Interrupt */
  1372. if (product_code == WAVE521_CODE || product_code == WAVE521C_CODE ) {
  1373. regVal = (1<<INT_WAVE5_ENC_SET_PARAM);
  1374. regVal |= (1<<INT_WAVE5_ENC_PIC);
  1375. regVal |= (1<<INT_WAVE5_INIT_SEQ);
  1376. regVal |= (1<<INT_WAVE5_DEC_PIC);
  1377. regVal |= (1<<INT_WAVE5_BSBUF_EMPTY);
  1378. }
  1379. else {
  1380. // decoder
  1381. regVal = (1<<INT_WAVE5_INIT_SEQ);
  1382. regVal |= (1<<INT_WAVE5_DEC_PIC);
  1383. regVal |= (1<<INT_WAVE5_BSBUF_EMPTY);
  1384. }
  1385. WriteVpuRegister(W5_VPU_VINT_ENABLE, regVal);
  1386. Wave5BitIssueCommand(core, W5_CMD_INIT_VPU);
  1387. WriteVpuRegister(W5_VPU_REMAP_CORE_START, 1);
  1388. while (ReadVpuRegister(W5_VPU_BUSY_STATUS)) {
  1389. if (time_after(jiffies, timeout))
  1390. goto DONE_WAKEUP;
  1391. }
  1392. if (ReadVpuRegister(W5_RET_SUCCESS) == 0) {
  1393. DPRINTK("WAKEUP_VPU failed [0x%x]", ReadVpuRegister(W5_RET_FAIL_REASON));
  1394. goto DONE_WAKEUP;
  1395. }
  1396. }
  1397. else if (PRODUCT_CODE_NOT_W_SERIES(product_code)) {
  1398. WriteVpuRegister(BIT_CODE_RUN, 0);
  1399. /*---- LOAD BOOT CODE*/
  1400. for (i = 0; i < 512; i++) {
  1401. val = s_bit_firmware_info[core].bit_code[i];
  1402. WriteVpuRegister(BIT_CODE_DOWN, ((i << 16) | val));
  1403. }
  1404. for (i = 0 ; i < 64 ; i++)
  1405. WriteVpuRegister(BIT_BASE+(0x100+(i * 4)), s_vpu_reg_store[core][i]);
  1406. WriteVpuRegister(BIT_BUSY_FLAG, 1);
  1407. WriteVpuRegister(BIT_CODE_RESET, 1);
  1408. WriteVpuRegister(BIT_CODE_RUN, 1);
  1409. while (ReadVpuRegister(BIT_BUSY_FLAG)) {
  1410. if (time_after(jiffies, timeout))
  1411. goto DONE_WAKEUP;
  1412. }
  1413. }
  1414. else {
  1415. DPRINTK("[VPUDRV] Unknown product id : %08x\n", product_code);
  1416. goto DONE_WAKEUP;
  1417. }
  1418. }
  1419. if (s_vpu_open_ref_count == 0){
  1420. #ifdef VPU_SUPPORT_CLOCK_CONTROL
  1421. starfive_venc_clk_disable(&pdev->dev);
  1422. #else
  1423. vpu_clk_disable();
  1424. #endif
  1425. }
  1426. DONE_WAKEUP:
  1427. if (s_vpu_open_ref_count > 0){
  1428. #ifdef VPU_SUPPORT_CLOCK_CONTROL
  1429. starfive_venc_clk_enable(&pdev->dev);
  1430. #else
  1431. vpu_clk_enable();
  1432. #endif
  1433. }
  1434. return 0;
  1435. }
  1436. #else
  1437. #define vpu_suspend NULL
  1438. #define vpu_resume NULL
  1439. #endif /* !CONFIG_PM */
  1440. #ifdef VPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  1441. static const struct of_device_id vpu_of_id_table[] = {
  1442. { .compatible = "cm,cm521-vpu" },
  1443. {}
  1444. };
  1445. static struct platform_driver vpu_driver = {
  1446. .driver = {
  1447. .name = VPU_PLATFORM_DEVICE_NAME,
  1448. .of_match_table = of_match_ptr(vpu_of_id_table),
  1449. },
  1450. .probe = vpu_probe,
  1451. .remove = vpu_remove,
  1452. .suspend = vpu_suspend,
  1453. .resume = vpu_resume,
  1454. };
  1455. #endif /* VPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  1456. static int __init vpu_init(void)
  1457. {
  1458. int res;
  1459. #ifdef SUPPORT_MULTI_INST_INTR
  1460. int i;
  1461. #endif
  1462. DPRINTK("[VPUDRV] begin vpu_init\n");
  1463. #ifdef SUPPORT_MULTI_INST_INTR
  1464. for (i=0; i<MAX_NUM_INSTANCE; i++) {
  1465. init_waitqueue_head(&s_interrupt_wait_q[i]);
  1466. }
  1467. for (i=0; i<MAX_NUM_INSTANCE; i++) {
  1468. #define MAX_INTERRUPT_QUEUE (16*MAX_NUM_INSTANCE)
  1469. res = kfifo_alloc(&s_interrupt_pending_q[i], MAX_INTERRUPT_QUEUE*sizeof(u32), GFP_KERNEL);
  1470. if (res) {
  1471. DPRINTK("[VPUDRV] kfifo_alloc failed 0x%x\n", res);
  1472. }
  1473. }
  1474. #else
  1475. init_waitqueue_head(&s_interrupt_wait_q);
  1476. #endif
  1477. s_common_memory.base = 0;
  1478. s_instance_pool.base = 0;
  1479. #ifdef VPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  1480. res = platform_driver_register(&vpu_driver);
  1481. #else
  1482. res = vpu_probe(NULL);
  1483. #endif /* VPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  1484. DPRINTK("[VPUDRV] end vpu_init result=0x%x\n", res);
  1485. return res;
  1486. }
  1487. static void __exit vpu_exit(void)
  1488. {
  1489. #ifdef VPU_SUPPORT_CLOCK_CONTROL
  1490. starfive_venc_clk_disable(vpu_dev);
  1491. starfive_venc_rst_assert(vpu_dev);
  1492. #else
  1493. vpu_clk_disable();
  1494. #endif
  1495. #ifdef VPU_SUPPORT_PLATFORM_DRIVER_REGISTER
  1496. DPRINTK("[VPUDRV] vpu_exit\n");
  1497. platform_driver_unregister(&vpu_driver);
  1498. #else /* VPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  1499. if (s_instance_pool.base) {
  1500. #ifdef USE_VMALLOC_FOR_INSTANCE_POOL_MEMORY
  1501. vfree((const void *)s_instance_pool.base);
  1502. #else
  1503. vpu_free_dma_buffer(&s_instance_pool);
  1504. #endif
  1505. s_instance_pool.base = 0;
  1506. }
  1507. if (s_common_memory.base) {
  1508. vpu_free_dma_buffer(&s_common_memory);
  1509. s_common_memory.base = 0;
  1510. }
  1511. #ifdef VPU_SUPPORT_RESERVED_VIDEO_MEMORY
  1512. if (s_video_memory.base) {
  1513. iounmap((void *)s_video_memory.base);
  1514. s_video_memory.base = 0;
  1515. vmem_exit(&s_vmem);
  1516. }
  1517. #endif
  1518. if (s_vpu_major > 0) {
  1519. cdev_del(&s_vpu_cdev);
  1520. unregister_chrdev_region(s_vpu_major, 1);
  1521. s_vpu_major = 0;
  1522. }
  1523. #ifdef VPU_SUPPORT_ISR
  1524. if (s_vpu_irq)
  1525. free_irq(s_vpu_irq, &s_vpu_drv_context);
  1526. #endif
  1527. #ifdef SUPPORT_MULTI_INST_INTR
  1528. {
  1529. int i;
  1530. for (i=0; i<MAX_NUM_INSTANCE; i++) {
  1531. kfifo_free(&s_interrupt_pending_q[i]);
  1532. }
  1533. }
  1534. #endif
  1535. if (s_vpu_register.virt_addr) {
  1536. iounmap((void *)s_vpu_register.virt_addr);
  1537. s_vpu_register.virt_addr = 0x00;
  1538. }
  1539. #endif /* VPU_SUPPORT_PLATFORM_DRIVER_REGISTER */
  1540. return;
  1541. }
  1542. MODULE_AUTHOR("A customer using C&M VPU, Inc.");
  1543. MODULE_DESCRIPTION("VPU linux driver");
  1544. MODULE_LICENSE("GPL");
  1545. module_init(vpu_init);
  1546. module_exit(vpu_exit);
  1547. static void _set_reset(volatile unsigned long p_assert_reg,volatile unsigned long p_status_reg,int ibit)
  1548. {
  1549. unsigned int read_value;
  1550. read_value = vic_readl(p_assert_reg);
  1551. read_value &= ~(0x1<<ibit);
  1552. read_value |= (0x1&0x1)<<ibit;
  1553. vic_writel(read_value,p_assert_reg);
  1554. }
  1555. static void _clr_reset(volatile unsigned long p_assert_reg,volatile unsigned long p_status_reg,int ibit)
  1556. {
  1557. unsigned int read_value;
  1558. read_value = vic_readl(p_assert_reg);
  1559. read_value &= ~(0x1<<ibit);
  1560. read_value |= (0x0&0x1)<<ibit;
  1561. vic_writel(read_value,p_assert_reg);
  1562. }
  1563. static void _enable_clk(volatile unsigned long p_reg,int ibit)
  1564. {
  1565. unsigned int read_value;
  1566. read_value = vic_readl(p_reg);
  1567. read_value &= ~(0x1<<ibit);
  1568. read_value |= (0x1&0x1)<<ibit;
  1569. vic_writel(read_value,p_reg);
  1570. }
  1571. static void _disable_clk(volatile unsigned long p_reg,int ibit)
  1572. {
  1573. unsigned int read_value;
  1574. read_value = vic_readl(p_reg);
  1575. read_value &= ~(0x1<<ibit);
  1576. read_value |= (0x0&0x1)<<ibit;
  1577. vic_writel(read_value,p_reg);
  1578. }
  1579. static void _reset_assert(volatile unsigned long p_assert_reg,volatile unsigned long p_status_reg)
  1580. {
  1581. //_set_reset(p_assert_reg,p_status_reg,NBIT_RSTN_VENC_BRG_MAIN);
  1582. _set_reset(p_assert_reg,p_status_reg,NBIT_RSTN_VENC_APB);
  1583. _set_reset(p_assert_reg,p_status_reg,NBIT_RSTN_VENC_AXI);
  1584. _set_reset(p_assert_reg,p_status_reg,NBIT_RSTN_VENC_BCLK);
  1585. _set_reset(p_assert_reg,p_status_reg,NBIT_RSTN_VENC_CCLK);
  1586. }
  1587. static void _reset_clear(volatile unsigned long p_assert_reg,volatile unsigned long p_status_reg)
  1588. {
  1589. _clr_reset(p_assert_reg,p_status_reg,NBIT_RSTN_VENC_BRG_MAIN);
  1590. _clr_reset(p_assert_reg,p_status_reg,NBIT_RSTN_VENC_AXI);
  1591. _clr_reset(p_assert_reg,p_status_reg,NBIT_RSTN_VENC_BCLK);
  1592. _clr_reset(p_assert_reg,p_status_reg,NBIT_RSTN_VENC_CCLK);
  1593. _clr_reset(p_assert_reg,p_status_reg,NBIT_RSTN_VENC_APB);
  1594. }
  1595. static int _reset(void)
  1596. {
  1597. volatile unsigned long p_breg = (unsigned long)ioremap_nocache(rstgen_Software_RESET_BASE_REG_ADDR,0x20);
  1598. if(!p_breg){
  1599. return -1;
  1600. }
  1601. _reset_assert(p_breg+rstgen_Software_RESET_assert0_OFFSET,p_breg+rstgen_Software_RESET_status0_OFFSET);
  1602. mdelay(1);
  1603. _reset_clear(p_breg+rstgen_Software_RESET_assert0_OFFSET,p_breg+rstgen_Software_RESET_status0_OFFSET);
  1604. iounmap((void *)p_breg);
  1605. return 0;
  1606. }
  1607. static int _clk_control(int enable)
  1608. {
  1609. volatile unsigned long p_breg = (unsigned long)ioremap_nocache(clk_BASE_REG_ADDR,0x100);
  1610. if(!p_breg){
  1611. return -1;
  1612. }
  1613. if(enable){
  1614. _enable_clk(p_breg+clk_venc_axi_ctrl_REG_OFFSET,31);
  1615. _enable_clk(p_breg+clk_vencbrg_mainclk_ctrl_REG_OFFSET,31);
  1616. _enable_clk(p_breg+clk_venc_bclk_ctrl_REG_OFFSET,31);
  1617. _enable_clk(p_breg+clk_venc_cclk_ctrl_REG_OFFSET,31);
  1618. _enable_clk(p_breg+clk_venc_apb_ctrl_REG_OFFSET,31);
  1619. }
  1620. else
  1621. {
  1622. _disable_clk(p_breg+clk_venc_axi_ctrl_REG_OFFSET,31);
  1623. // _disable_clk(p_breg+clk_vencbrg_mainclk_ctrl_REG_OFFSET,31);
  1624. _disable_clk(p_breg+clk_venc_bclk_ctrl_REG_OFFSET,31);
  1625. _disable_clk(p_breg+clk_venc_cclk_ctrl_REG_OFFSET,31);
  1626. _disable_clk(p_breg+clk_venc_apb_ctrl_REG_OFFSET,31);
  1627. }
  1628. iounmap((void *)p_breg);
  1629. return 0;
  1630. }
  1631. int vpu_hw_reset(void)
  1632. {
  1633. _reset();
  1634. DPRINTK("[VPUDRV] request vpu reset from application. \n");
  1635. return 0;
  1636. }
  1637. int vpu_clk_enable(void)
  1638. {
  1639. _clk_control(1);
  1640. return 0;
  1641. }
  1642. void vpu_clk_disable(void)
  1643. {
  1644. _clk_control(0);
  1645. }