vdi.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665
  1. /*
  2. * Copyright (c) 2019, Chips&Media
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright notice,
  11. * this list of conditions and the following disclaimer in the documentation
  12. * and/or other materials provided with the distribution.
  13. *
  14. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
  15. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  16. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  17. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
  18. * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  19. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  20. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  21. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  22. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  23. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  24. */
  25. #if defined(linux) || defined(__linux) || defined(ANDROID)
  26. #include <ctype.h>
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <string.h>
  30. #include <unistd.h>
  31. #ifdef _KERNEL_
  32. #include <linux/delay.h>
  33. #endif
  34. #include <signal.h> /* SIGIO */
  35. #include <fcntl.h> /* fcntl */
  36. #include <pthread.h>
  37. #include <sys/mman.h> /* mmap */
  38. #include <sys/ioctl.h> /* fopen/fread */
  39. #include <sys/errno.h> /* fopen/fread */
  40. #include <sys/types.h>
  41. #include <sys/time.h>
  42. #include "driver/vpu.h"
  43. #include "../vdi.h"
  44. #include "../vdi_osal.h"
  45. #include "coda9/coda9_regdefine.h"
  46. #include "wave/wave5_regdefine.h"
  47. #include "main_helper.h"
  48. #include "misc/debug.h"
  49. #define VPU_DEVICE_NAME "/dev/vdec"
  50. typedef pthread_mutex_t MUTEX_HANDLE;
  51. # define SUPPORT_INTERRUPT
  52. # define VDI_SRAM_BASE_ADDR 0x00000000 // if we can know the sram address in SOC directly for vdi layer. it is possible to set in vdi layer without allocation from driver
  53. #define VDI_SYSTEM_ENDIAN VDI_LITTLE_ENDIAN
  54. #define VDI_128BIT_BUS_SYSTEM_ENDIAN VDI_128BIT_LITTLE_ENDIAN
  55. #define VPU_BIT_REG_SIZE (0x4000*MAX_NUM_VPU_CORE)
  56. #define VDI_CODA9_SRAM_SIZE 0x34600 // FHD MAX size, 0x17D00 4K MAX size 0x34600
  57. #define VDI_WAVE511_SRAM_SIZE 0x2D000 /* H.265 Main10 : 8Kx4K -> 184320, 4Kx2K -> 92160
  58. * H.265 Main : 8Kx4K -> 155648, 4Kx2K -> 77824
  59. */
  60. #define VDI_WAVE521_SRAM_SIZE 0x20400 /* 10bit profile : 8Kx8K -> 132096, 4Kx2K -> 66560
  61. * 8bit profile : 8Kx8K -> 99328, 4Kx2K -> 51176
  62. */
  63. #define VDI_WAVE521C_SRAM_SIZE 0x2D000 /* H.265 Main10 : 8Kx4K -> 184320, 4Kx2K -> 92160
  64. * H.265 Main : 8Kx4K -> 155648, 4Kx2K -> 77824
  65. * NOTE: Decoder > Encoder
  66. */
  67. #define VDI_NUM_LOCK_HANDLES 4
  68. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  69. #define VPU_CORE_BASE_OFFSET 0x4000
  70. #endif
  71. typedef struct vpudrv_buffer_pool_t
  72. {
  73. vpudrv_buffer_t vdb;
  74. int inuse;
  75. } vpudrv_buffer_pool_t;
  76. typedef struct {
  77. unsigned long core_idx;
  78. unsigned int product_code;
  79. int vpu_fd;
  80. vpu_instance_pool_t *pvip;
  81. int task_num;
  82. int clock_state;
  83. vpudrv_buffer_t vdb_register;
  84. vpu_buffer_t vpu_common_memory;
  85. vpudrv_buffer_pool_t vpu_buffer_pool[MAX_VPU_BUFFER_POOL];
  86. int vpu_buffer_pool_count;
  87. void* vpu_mutex;
  88. void* vpu_omx_mutex;
  89. void* vpu_disp_mutex;
  90. void* vmem_mutex;
  91. } vdi_info_t;
  92. static vdi_info_t s_vdi_info[MAX_NUM_VPU_CORE];
  93. static int swap_endian(unsigned long core_idx, unsigned char *data, int len, int endian);
  94. void vdi_flush_ddr(unsigned long core_idx,unsigned long start,unsigned long size,unsigned char flag)
  95. {
  96. vdi_info_t *vdi;
  97. vpudrv_flush_cache_t cache_info;
  98. vdi = &s_vdi_info[core_idx];
  99. cache_info.start = start;
  100. cache_info.size = size;
  101. cache_info.flag = flag;
  102. ioctl(vdi->vpu_fd, VDI_IOCTL_FLUSH_DCACHE, &cache_info);
  103. }
  104. static void restore_mutex_in_dead(MUTEX_HANDLE *mutex)
  105. {
  106. int mutex_value;
  107. if (!mutex)
  108. return;
  109. #if defined(ANDROID)
  110. mutex_value = mutex->value;
  111. #else
  112. memcpy(&mutex_value, mutex, sizeof(mutex_value));
  113. #endif
  114. if (mutex_value == (int)0xdead10cc) // destroy by device driver
  115. {
  116. pthread_mutexattr_t mutexattr;
  117. pthread_mutexattr_init(&mutexattr);
  118. pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED);
  119. pthread_mutex_init(mutex, &mutexattr);
  120. }
  121. }
  122. static void vmem_lock(vdi_info_t* vdi)
  123. {
  124. #if defined(ANDROID) || !defined(PTHREAD_MUTEX_ROBUST_NP)
  125. #else
  126. const int MUTEX_TIMEOUT = 0x7fffffff;
  127. #endif
  128. #if defined(ANDROID) || !defined(PTHREAD_MUTEX_ROBUST_NP)
  129. restore_mutex_in_dead((MUTEX_HANDLE *)vdi->vmem_mutex);
  130. pthread_mutex_lock((MUTEX_HANDLE*)vdi->vmem_mutex);
  131. #else
  132. if (pthread_mutex_lock((MUTEX_HANDLE *)vdi->vmem_mutex) != 0) {
  133. VLOG(ERR, "%s:%d failed to pthread_mutex_locK\n", __FUNCTION__, __LINE__);
  134. }
  135. #endif
  136. return; //lint !e454
  137. }
  138. static void vmem_unlock(vdi_info_t* vdi)
  139. {
  140. pthread_mutex_unlock((MUTEX_HANDLE *)vdi->vmem_mutex);//lint !e455
  141. }
  142. int vdi_probe(unsigned long core_idx)
  143. {
  144. int ret;
  145. ret = vdi_init(core_idx);
  146. vdi_release(core_idx);
  147. return ret;
  148. }
  149. int vdi_init(unsigned long core_idx)
  150. {
  151. vdi_info_t *vdi;
  152. int i;
  153. if (core_idx >= MAX_NUM_VPU_CORE)
  154. return 0;
  155. vdi = &s_vdi_info[core_idx];
  156. if (vdi->vpu_fd != -1 && vdi->vpu_fd != 0x00)
  157. {
  158. vdi->task_num++;
  159. return 0;
  160. }
  161. vdi->vpu_fd = open(VPU_DEVICE_NAME, O_RDWR); // if this API supports VPU parallel processing using multi VPU. the driver should be made to open multiple times.
  162. if (vdi->vpu_fd < 0) {
  163. VLOG(ERR, "[VDI] Can't open vpu driver. [error=%s]. try to run vdi/linux/driver/load.sh script \n", strerror(errno));
  164. return -1;
  165. }
  166. memset(vdi->vpu_buffer_pool, 0x00, sizeof(vpudrv_buffer_pool_t)*MAX_VPU_BUFFER_POOL);
  167. if (!vdi_get_instance_pool(core_idx))
  168. {
  169. VLOG(INFO, "[VDI] fail to create shared info for saving context \n");
  170. goto ERR_VDI_INIT;
  171. }
  172. if (vdi->pvip->instance_pool_inited == FALSE)
  173. {
  174. int* pCodecInst;
  175. pthread_mutexattr_t mutexattr;
  176. pthread_mutexattr_init(&mutexattr);
  177. pthread_mutexattr_setpshared(&mutexattr, PTHREAD_PROCESS_SHARED);
  178. #if defined(ANDROID) || !defined(PTHREAD_MUTEX_ROBUST_NP)
  179. #else
  180. /* If a process or a thread is terminated abnormally,
  181. * pthread_mutexattr_setrobust_np(attr, PTHREAD_MUTEX_ROBUST_NP) makes
  182. * next onwer call pthread_mutex_lock() without deadlock.
  183. */
  184. pthread_mutexattr_setrobust_np(&mutexattr, PTHREAD_MUTEX_ROBUST_NP);
  185. #endif
  186. pthread_mutex_init((MUTEX_HANDLE *)vdi->vpu_mutex, &mutexattr);
  187. pthread_mutex_init((MUTEX_HANDLE *)vdi->vpu_disp_mutex, &mutexattr);
  188. pthread_mutex_init((MUTEX_HANDLE *)vdi->vmem_mutex, &mutexattr);
  189. for( i = 0; i < MAX_NUM_INSTANCE; i++) {
  190. pCodecInst = (int *)vdi->pvip->codecInstPool[i];
  191. pCodecInst[1] = i; // indicate instIndex of CodecInst
  192. pCodecInst[0] = 0; // indicate inUse of CodecInst
  193. }
  194. vdi->pvip->instance_pool_inited = TRUE;
  195. }
  196. #ifdef USE_VMALLOC_FOR_INSTANCE_POOL_MEMORY
  197. if (ioctl(vdi->vpu_fd, VDI_IOCTL_GET_REGISTER_INFO, &vdi->vdb_register) < 0)
  198. {
  199. VLOG(ERR, "[VDI] fail to get host interface register\n");
  200. goto ERR_VDI_INIT;
  201. }
  202. #endif
  203. #ifdef USE_VMALLOC_FOR_INSTANCE_POOL_MEMORY
  204. vdi->vdb_register.virt_addr = (unsigned long)mmap(NULL, vdi->vdb_register.size, PROT_READ | PROT_WRITE, MAP_SHARED, vdi->vpu_fd, vdi->vdb_register.phys_addr);
  205. #else
  206. vdi->vdb_register.size = VPU_BIT_REG_SIZE;
  207. vdi->vdb_register.virt_addr = (unsigned long)mmap(NULL, vdi->vdb_register.size, PROT_READ | PROT_WRITE, MAP_SHARED, vdi->vpu_fd, 0);
  208. #endif
  209. if ((void *)vdi->vdb_register.virt_addr == MAP_FAILED)
  210. {
  211. VLOG(ERR, "[VDI] fail to map vpu registers \n");
  212. goto ERR_VDI_INIT;
  213. }
  214. VLOG(INFO, "[VDI] map vdb_register core_idx=%d, virtaddr=0x%lx, size=%d\n", core_idx, vdi->vdb_register.virt_addr, vdi->vdb_register.size);
  215. if (vdi_lock(core_idx) < 0)
  216. {
  217. VLOG(ERR, "[VDI] fail to handle lock function\n");
  218. goto ERR_VDI_INIT;
  219. }
  220. vdi_set_clock_gate(core_idx, 1);
  221. vdi->product_code = vdi_read_register(core_idx, VPU_PRODUCT_CODE_REGISTER);
  222. if (vdi_allocate_common_memory(core_idx) < 0)
  223. {
  224. VLOG(ERR, "[VDI] fail to get vpu common buffer from driver\n");
  225. goto ERR_VDI_INIT;
  226. }
  227. vdi->core_idx = core_idx;
  228. vdi->task_num++;
  229. vdi_set_clock_gate(core_idx, 0);
  230. vdi_unlock(core_idx);
  231. VLOG(INFO, "[VDI] success to init driver \n");
  232. return 0;
  233. ERR_VDI_INIT:
  234. vdi_unlock(core_idx);
  235. vdi_release(core_idx);
  236. return -1;
  237. }
  238. int vdi_set_bit_firmware_to_pm(unsigned long core_idx, const unsigned short *code)
  239. {
  240. int i;
  241. vpu_bit_firmware_info_t bit_firmware_info;
  242. vdi_info_t *vdi;
  243. if (core_idx >= MAX_NUM_VPU_CORE)
  244. return 0;
  245. vdi = &s_vdi_info[core_idx];
  246. if (!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  247. return 0;
  248. bit_firmware_info.size = sizeof(vpu_bit_firmware_info_t);
  249. bit_firmware_info.core_idx = core_idx;
  250. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  251. bit_firmware_info.reg_base_offset = (core_idx*VPU_CORE_BASE_OFFSET);
  252. #else
  253. bit_firmware_info.reg_base_offset = 0;
  254. #endif
  255. for (i=0; i<512; i++)
  256. bit_firmware_info.bit_code[i] = code[i];
  257. if (write(vdi->vpu_fd, &bit_firmware_info, bit_firmware_info.size) < 0)
  258. {
  259. VLOG(ERR, "[VDI] fail to vdi_set_bit_firmware core=%d\n", bit_firmware_info.core_idx);
  260. return -1;
  261. }
  262. return 0;
  263. }
  264. #if defined(SUPPORT_SW_UART) || defined(SUPPORT_SW_UART_V2)
  265. int vdi_get_task_num(unsigned long core_idx)
  266. {
  267. vdi_info_t *vdi;
  268. vdi = &s_vdi_info[core_idx];
  269. if (!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  270. return -1;
  271. return vdi->task_num;
  272. }
  273. #endif
  274. int vdi_release(unsigned long core_idx)
  275. {
  276. int i;
  277. vpudrv_buffer_t vdb;
  278. vdi_info_t *vdi;
  279. if (core_idx >= MAX_NUM_VPU_CORE)
  280. return 0;
  281. vdi = &s_vdi_info[core_idx];
  282. if (!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  283. return 0;
  284. if (vdi_lock(core_idx) < 0)
  285. {
  286. VLOG(ERR, "[VDI] fail to handle lock function\n");
  287. return -1;
  288. }
  289. if (vdi->task_num > 1) // means that the opened instance remains
  290. {
  291. vdi->task_num--;
  292. vdi_unlock(core_idx);
  293. return 0;
  294. }
  295. if (vdi->vdb_register.virt_addr)
  296. munmap((void *)vdi->vdb_register.virt_addr, vdi->vdb_register.size);
  297. osal_memset(&vdi->vdb_register, 0x00, sizeof(vpudrv_buffer_t));
  298. vdb.size = 0;
  299. // get common memory information to free virtual address
  300. for (i=0; i<MAX_VPU_BUFFER_POOL; i++)
  301. {
  302. if (vdi->vpu_common_memory.phys_addr >= vdi->vpu_buffer_pool[i].vdb.phys_addr &&
  303. vdi->vpu_common_memory.phys_addr < (vdi->vpu_buffer_pool[i].vdb.phys_addr + vdi->vpu_buffer_pool[i].vdb.size))
  304. {
  305. vdi->vpu_buffer_pool[i].inuse = 0;
  306. vdi->vpu_buffer_pool_count--;
  307. vdb = vdi->vpu_buffer_pool[i].vdb;
  308. break;
  309. }
  310. }
  311. vdi_unlock(core_idx);
  312. if (vdb.size > 0)
  313. {
  314. munmap((void *)vdb.virt_addr, vdb.size);
  315. memset(&vdi->vpu_common_memory, 0x00, sizeof(vpu_buffer_t));
  316. }
  317. vdi->task_num--;
  318. if (vdi->vpu_fd != -1 && vdi->vpu_fd != 0x00)
  319. {
  320. close(vdi->vpu_fd);
  321. vdi->vpu_fd = -1;
  322. }
  323. memset(vdi, 0x00, sizeof(vdi_info_t));
  324. return 0;
  325. }
  326. int vdi_get_common_memory(unsigned long core_idx, vpu_buffer_t *vb)
  327. {
  328. vdi_info_t *vdi;
  329. if (core_idx >= MAX_NUM_VPU_CORE)
  330. return -1;
  331. vdi = &s_vdi_info[core_idx];
  332. if(!vdi || vdi->vpu_fd==-1 || vdi->vpu_fd==0x00)
  333. return -1;
  334. osal_memcpy(vb, &vdi->vpu_common_memory, sizeof(vpu_buffer_t));
  335. return 0;
  336. }
  337. int vdi_allocate_common_memory(unsigned long core_idx)
  338. {
  339. vdi_info_t *vdi = &s_vdi_info[core_idx];
  340. vpudrv_buffer_t vdb;
  341. int i;
  342. if (core_idx >= MAX_NUM_VPU_CORE)
  343. return -1;
  344. if(!vdi || vdi->vpu_fd==-1 || vdi->vpu_fd==0x00)
  345. return -1;
  346. osal_memset(&vdb, 0x00, sizeof(vpudrv_buffer_t));
  347. vdb.size = SIZE_COMMON*MAX_NUM_VPU_CORE;
  348. if (ioctl(vdi->vpu_fd, VDI_IOCTL_GET_COMMON_MEMORY, &vdb) < 0)
  349. {
  350. VLOG(ERR, "[VDI] fail to vdi_allocate_dma_memory size=%d\n", vdb.size);
  351. return -1;
  352. }
  353. vdb.virt_addr = (unsigned long)mmap(NULL, vdb.size, PROT_READ | PROT_WRITE, MAP_SHARED, vdi->vpu_fd, DRAM_MEM2SYS(vdb.phys_addr));
  354. if ((void *)vdb.virt_addr == MAP_FAILED)
  355. {
  356. VLOG(ERR, "[VDI] fail to map common memory phyaddr=%#lx, size = %d\n", vdb.phys_addr, vdb.size);
  357. return -1;
  358. }
  359. VLOG(INFO, "[VDI] vdi_allocate_common_memory, physaddr=%#lx, virtaddr=%#lx\n", vdb.phys_addr, vdb.virt_addr);
  360. // convert os driver buffer type to vpu buffer type
  361. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  362. vdi->pvip->vpu_common_buffer.size = SIZE_COMMON;
  363. vdi->pvip->vpu_common_buffer.phys_addr = (unsigned long)(vdb.phys_addr + (core_idx*SIZE_COMMON));
  364. vdi->pvip->vpu_common_buffer.base = (unsigned long)(vdb.base + (core_idx*SIZE_COMMON));
  365. vdi->pvip->vpu_common_buffer.virt_addr = (unsigned long)(vdb.virt_addr + (core_idx*SIZE_COMMON));
  366. #else
  367. vdi->pvip->vpu_common_buffer.size = SIZE_COMMON;
  368. vdi->pvip->vpu_common_buffer.phys_addr = (unsigned long)(vdb.phys_addr);
  369. vdi->pvip->vpu_common_buffer.base = (unsigned long)(vdb.base);
  370. vdi->pvip->vpu_common_buffer.virt_addr = (unsigned long)(vdb.virt_addr);
  371. #endif
  372. osal_memcpy(&vdi->vpu_common_memory, &vdi->pvip->vpu_common_buffer, sizeof(vpu_buffer_t));
  373. for (i=0; i<MAX_VPU_BUFFER_POOL; i++)
  374. {
  375. if (vdi->vpu_buffer_pool[i].inuse == 0)
  376. {
  377. vdi->vpu_buffer_pool[i].vdb = vdb;
  378. vdi->vpu_buffer_pool_count++;
  379. vdi->vpu_buffer_pool[i].inuse = 1;
  380. break;
  381. }
  382. }
  383. VLOG(INFO, "[VDI] vdi_get_common_memory physaddr=0x%lx, size=%d, virtaddr=0x%lx\n", (int)vdi->vpu_common_memory.phys_addr, (int)vdi->vpu_common_memory.size, (int)vdi->vpu_common_memory.virt_addr);
  384. return 0;
  385. }
  386. vpu_instance_pool_t *vdi_get_instance_pool(unsigned long core_idx)
  387. {
  388. vdi_info_t *vdi;
  389. vpudrv_buffer_t vdb;
  390. if (core_idx >= MAX_NUM_VPU_CORE)
  391. return NULL;
  392. vdi = &s_vdi_info[core_idx];
  393. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00 )
  394. return NULL;
  395. osal_memset(&vdb, 0x00, sizeof(vpudrv_buffer_t));
  396. if (!vdi->pvip)
  397. {
  398. vdb.size = sizeof(vpu_instance_pool_t) + sizeof(MUTEX_HANDLE)*VDI_NUM_LOCK_HANDLES;
  399. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  400. vdb.size *= MAX_NUM_VPU_CORE;
  401. #endif
  402. if (ioctl(vdi->vpu_fd, VDI_IOCTL_GET_INSTANCE_POOL, &vdb) < 0)
  403. {
  404. VLOG(ERR, "[VDI] fail to allocate get instance pool physical space=%d\n", (int)vdb.size);
  405. return NULL;
  406. }
  407. #ifdef USE_VMALLOC_FOR_INSTANCE_POOL_MEMORY
  408. vdb.virt_addr = (unsigned long)mmap(NULL, vdb.size, PROT_READ | PROT_WRITE, MAP_SHARED, vdi->vpu_fd, 0);
  409. #else
  410. vdb.virt_addr = (unsigned long)mmap(NULL, vdb.size, PROT_READ | PROT_WRITE, MAP_SHARED, vdi->vpu_fd, vdb.phys_addr);
  411. #endif
  412. if ((void *)vdb.virt_addr == MAP_FAILED)
  413. {
  414. VLOG(ERR, "[VDI] fail to map instance pool phyaddr=0x%lx, size = %d\n", (int)vdb.phys_addr, (int)vdb.size);
  415. return NULL;
  416. }
  417. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  418. vdi->pvip = (vpu_instance_pool_t *)(vdb.virt_addr + (core_idx*(sizeof(vpu_instance_pool_t) + sizeof(MUTEX_HANDLE)*VDI_NUM_LOCK_HANDLES)));
  419. #else
  420. vdi->pvip = (vpu_instance_pool_t *)(vdb.virt_addr);
  421. #endif
  422. vdi->vpu_mutex = (void *)((unsigned long)vdi->pvip + sizeof(vpu_instance_pool_t)); //change the pointer of vpu_mutex to at end pointer of vpu_instance_pool_t to assign at allocated position.
  423. vdi->vpu_disp_mutex = (void *)((unsigned long)vdi->pvip + sizeof(vpu_instance_pool_t) + sizeof(MUTEX_HANDLE));
  424. vdi->vmem_mutex = (void *)((unsigned long)vdi->pvip + sizeof(vpu_instance_pool_t) + 2*sizeof(MUTEX_HANDLE));
  425. VLOG(INFO, "[VDI] instance pool physaddr=0x%lx, virtaddr=0x%lx, base=0x%lx, size=%ld\n", (int)vdb.phys_addr, (int)vdb.virt_addr, (int)vdb.base, (int)vdb.size);
  426. }
  427. return (vpu_instance_pool_t *)vdi->pvip;
  428. }
  429. int vdi_open_instance(unsigned long core_idx, unsigned long inst_idx)
  430. {
  431. vdi_info_t *vdi;
  432. vpudrv_inst_info_t inst_info;
  433. if (core_idx >= MAX_NUM_VPU_CORE)
  434. return -1;
  435. vdi = &s_vdi_info[core_idx];
  436. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  437. return -1;
  438. inst_info.core_idx = core_idx;
  439. inst_info.inst_idx = inst_idx;
  440. if (ioctl(vdi->vpu_fd, VDI_IOCTL_OPEN_INSTANCE, &inst_info) < 0)
  441. {
  442. VLOG(ERR, "[VDI] fail to deliver open instance num inst_idx=%d\n", (int)inst_idx);
  443. return -1;
  444. }
  445. vdi->pvip->vpu_instance_num = inst_info.inst_open_count;
  446. return 0;
  447. }
  448. int vdi_close_instance(unsigned long core_idx, unsigned long inst_idx)
  449. {
  450. vdi_info_t *vdi;
  451. vpudrv_inst_info_t inst_info = {0, };;
  452. if (core_idx >= MAX_NUM_VPU_CORE)
  453. return -1;
  454. vdi = &s_vdi_info[core_idx];
  455. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  456. return -1;
  457. inst_info.core_idx = core_idx;
  458. inst_info.inst_idx = inst_idx;
  459. if (ioctl(vdi->vpu_fd, VDI_IOCTL_CLOSE_INSTANCE, &inst_info) < 0)
  460. {
  461. VLOG(ERR, "[VDI] fail to deliver open instance num inst_idx=%d\n", (int)inst_idx);
  462. return -1;
  463. }
  464. vdi->pvip->vpu_instance_num = inst_info.inst_open_count;
  465. return 0;
  466. }
  467. int vdi_get_instance_num(unsigned long core_idx)
  468. {
  469. vdi_info_t *vdi;
  470. if (core_idx >= MAX_NUM_VPU_CORE)
  471. return -1;
  472. vdi = &s_vdi_info[core_idx];
  473. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  474. return -1;
  475. return vdi->pvip->vpu_instance_num;
  476. }
  477. int vdi_hw_reset(unsigned long core_idx) // DEVICE_ADDR_SW_RESET
  478. {
  479. vdi_info_t *vdi;
  480. if (core_idx >= MAX_NUM_VPU_CORE)
  481. return -1;
  482. vdi = &s_vdi_info[core_idx];
  483. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  484. return -1;
  485. return ioctl(vdi->vpu_fd, VDI_IOCTL_RESET, 0);
  486. }
  487. int vdi_lock(unsigned long core_idx)
  488. {
  489. vdi_info_t *vdi;
  490. #if defined(ANDROID) || !defined(PTHREAD_MUTEX_ROBUST_NP)
  491. #else
  492. const int MUTEX_TIMEOUT = 0x7fffffff;
  493. #endif
  494. if (core_idx >= MAX_NUM_VPU_CORE)
  495. return -1;
  496. vdi = &s_vdi_info[core_idx];
  497. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  498. return -1;
  499. #if defined(ANDROID) || !defined(PTHREAD_MUTEX_ROBUST_NP)
  500. restore_mutex_in_dead((MUTEX_HANDLE *)vdi->vpu_mutex);
  501. pthread_mutex_lock((MUTEX_HANDLE*)vdi->vpu_mutex);
  502. #else
  503. if (pthread_mutex_lock((MUTEX_HANDLE *)vdi->vpu_mutex) != 0) {
  504. VLOG(ERR, "%s:%d failed to pthread_mutex_locK\n", __FUNCTION__, __LINE__);
  505. return -1;
  506. }
  507. #endif
  508. return 0;//lint !e454
  509. }
  510. void vdi_unlock(unsigned long core_idx)
  511. {
  512. vdi_info_t *vdi;
  513. if (core_idx >= MAX_NUM_VPU_CORE)
  514. return;
  515. vdi = &s_vdi_info[core_idx];
  516. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  517. return;
  518. pthread_mutex_unlock((MUTEX_HANDLE *)vdi->vpu_mutex);//lint !e455
  519. }
  520. int vdi_disp_lock(unsigned long core_idx)
  521. {
  522. vdi_info_t *vdi;
  523. #if defined(ANDROID) || !defined(PTHREAD_MUTEX_ROBUST_NP)
  524. #else
  525. const int MUTEX_TIMEOUT = 5000; // ms
  526. #endif
  527. if (core_idx >= MAX_NUM_VPU_CORE)
  528. return -1;
  529. vdi = &s_vdi_info[core_idx];
  530. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  531. return -1;
  532. #if defined(ANDROID) || !defined(PTHREAD_MUTEX_ROBUST_NP)
  533. restore_mutex_in_dead((MUTEX_HANDLE *)vdi->vpu_disp_mutex);
  534. pthread_mutex_lock((MUTEX_HANDLE*)vdi->vpu_disp_mutex);
  535. #else
  536. if (pthread_mutex_lock((MUTEX_HANDLE *)vdi->vpu_disp_mutex) != 0)
  537. VLOG(ERR, "%s:%d failed to pthread_mutex_lock\n", __FUNCTION__, __LINE__);
  538. return -1;
  539. }
  540. #endif /* ANDROID */
  541. return 0;//lint !e454
  542. }
  543. void vdi_disp_unlock(unsigned long core_idx)
  544. {
  545. vdi_info_t *vdi;
  546. if (core_idx >= MAX_NUM_VPU_CORE)
  547. return;
  548. vdi = &s_vdi_info[core_idx];
  549. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  550. return;
  551. pthread_mutex_unlock((MUTEX_HANDLE *)vdi->vpu_disp_mutex);//lint !e455
  552. }
  553. void vdi_write_register(unsigned long core_idx, unsigned int addr, unsigned int data)
  554. {
  555. vdi_info_t *vdi;
  556. unsigned long *reg_addr;
  557. if (core_idx >= MAX_NUM_VPU_CORE)
  558. return;
  559. vdi = &s_vdi_info[core_idx];
  560. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  561. return;
  562. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  563. reg_addr = (unsigned long *)(addr + (unsigned long)vdi->vdb_register.virt_addr + (core_idx*VPU_CORE_BASE_OFFSET));
  564. #else
  565. reg_addr = (unsigned long *)(addr + (unsigned long)vdi->vdb_register.virt_addr);
  566. #endif
  567. *(volatile unsigned int *)reg_addr = data;
  568. }
  569. unsigned int vdi_read_register(unsigned long core_idx, unsigned int addr)
  570. {
  571. vdi_info_t *vdi;
  572. unsigned long *reg_addr;
  573. if (core_idx >= MAX_NUM_VPU_CORE)
  574. return (unsigned int)-1;
  575. vdi = &s_vdi_info[core_idx];
  576. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  577. return (unsigned int)-1;
  578. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  579. reg_addr = (unsigned long *)(addr + (unsigned long)vdi->vdb_register.virt_addr + (core_idx*VPU_CORE_BASE_OFFSET));
  580. #else
  581. reg_addr = (unsigned long *)(addr + (unsigned long)vdi->vdb_register.virt_addr);
  582. #endif
  583. return *(volatile unsigned int *)reg_addr;
  584. }
  585. #define FIO_TIMEOUT 100
  586. unsigned int vdi_fio_read_register(unsigned long core_idx, unsigned int addr)
  587. {
  588. unsigned int ctrl;
  589. unsigned int count = 0;
  590. unsigned int data = 0xffffffff;
  591. ctrl = (addr&0xffff);
  592. ctrl |= (0<<16); /* read operation */
  593. vdi_write_register(core_idx, W5_VPU_FIO_CTRL_ADDR, ctrl);
  594. count = FIO_TIMEOUT;
  595. while (count--) {
  596. ctrl = vdi_read_register(core_idx, W5_VPU_FIO_CTRL_ADDR);
  597. if (ctrl & 0x80000000) {
  598. data = vdi_read_register(core_idx, W5_VPU_FIO_DATA);
  599. break;
  600. }
  601. }
  602. return data;
  603. }
  604. void vdi_fio_write_register(unsigned long core_idx, unsigned int addr, unsigned int data)
  605. {
  606. unsigned int ctrl;
  607. unsigned int count = 0;
  608. vdi_write_register(core_idx, W5_VPU_FIO_DATA, data);
  609. ctrl = (addr&0xffff);
  610. ctrl |= (1<<16); /* write operation */
  611. vdi_write_register(core_idx, W5_VPU_FIO_CTRL_ADDR, ctrl);
  612. count = FIO_TIMEOUT;
  613. while (count--) {
  614. ctrl = vdi_read_register(core_idx, W5_VPU_FIO_CTRL_ADDR);
  615. if (ctrl & 0x80000000) {
  616. break;
  617. }
  618. }
  619. }
  620. int vdi_clear_memory(unsigned long core_idx, PhysicalAddress addr, int len, int endian)
  621. {
  622. vdi_info_t *vdi;
  623. vpudrv_buffer_t vdb;
  624. unsigned long offset;
  625. int i;
  626. Uint8* zero;
  627. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  628. core_idx = 0;
  629. #endif
  630. if (core_idx >= MAX_NUM_VPU_CORE)
  631. return -1;
  632. vdi = &s_vdi_info[core_idx];
  633. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  634. return -1;
  635. osal_memset(&vdb, 0x00, sizeof(vpudrv_buffer_t));
  636. for (i=0; i<MAX_VPU_BUFFER_POOL; i++)
  637. {
  638. if (vdi->vpu_buffer_pool[i].inuse == 1)
  639. {
  640. vdb = vdi->vpu_buffer_pool[i].vdb;
  641. if (addr >= vdb.phys_addr && addr < (vdb.phys_addr + vdb.size))
  642. break;
  643. }
  644. }
  645. if (!vdb.size) {
  646. VLOG(ERR, "address 0x%08x is not mapped address!!!\n", (int)addr);
  647. return -1;
  648. }
  649. zero = (Uint8*)osal_malloc(len);
  650. osal_memset((void*)zero, 0x00, len);
  651. offset = addr - (unsigned long)vdb.phys_addr;
  652. osal_memcpy((void *)((unsigned long)vdb.virt_addr+offset), zero, len);
  653. vdi_flush_ddr(core_idx,(unsigned long )(vdb.phys_addr+offset),len,1);
  654. osal_free(zero);
  655. return len;
  656. }
  657. int vdi_write_memory(unsigned long core_idx, PhysicalAddress addr, unsigned char *data, int len, int endian)
  658. {
  659. vdi_info_t *vdi;
  660. vpudrv_buffer_t vdb;
  661. unsigned long offset;
  662. int i;
  663. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  664. core_idx = 0;
  665. #endif
  666. if (core_idx >= MAX_NUM_VPU_CORE)
  667. return -1;
  668. if (!data)
  669. return -1;
  670. vdi = &s_vdi_info[core_idx];
  671. if(!vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  672. return -1;
  673. osal_memset(&vdb, 0x00, sizeof(vpudrv_buffer_t));
  674. for (i=0; i<MAX_VPU_BUFFER_POOL; i++)
  675. {
  676. if (vdi->vpu_buffer_pool[i].inuse == 1)
  677. {
  678. vdb = vdi->vpu_buffer_pool[i].vdb;
  679. if (addr >= vdb.phys_addr && addr < (vdb.phys_addr + vdb.size)) {
  680. break;
  681. }
  682. }
  683. }
  684. if (!vdb.size) {
  685. VLOG(ERR, "address 0x%08x is not mapped address!!!\n", (int)addr);
  686. return -1;
  687. }
  688. #if 0 //def CNM_FPGA_PLATFORM
  689. // to check some writing of common buffer
  690. if (vdb.phys_addr >= vdi->vpu_common_memory.phys_addr &&
  691. vdb.phys_addr < (vdi->vpu_common_memory.phys_addr + SIZE_COMMON))
  692. {
  693. if (PRODUCT_CODE_W_SERIES(vdi->product_code))
  694. {
  695. if (vdi_read_register(core_idx, W5_VCPU_CUR_PC) != 0)
  696. {
  697. VLOG(ERR, "not to permit writing common buffer addr=%lx, size=%d\n", vdb.phys_addr, vdb.size);
  698. VLOG(ERR, "this process will be exit\n");
  699. exit(-1);
  700. }
  701. }
  702. else if (PRODUCT_CODE_NOT_W_SERIES(vdi->product_code)) {
  703. }
  704. else {
  705. VLOG(ERR, "Unknown product id : %08x\n", vdi->product_code);
  706. return -1;
  707. }
  708. }
  709. #endif
  710. offset = addr - (unsigned long)vdb.phys_addr;
  711. swap_endian(core_idx, data, len, endian);
  712. osal_memcpy((void *)((unsigned long)vdb.virt_addr+offset), data, len);
  713. vdi_flush_ddr(core_idx,(unsigned long )(vdb.phys_addr+offset),len,1);
  714. return len;
  715. }
  716. int vdi_read_memory(unsigned long core_idx, PhysicalAddress addr, unsigned char *data, int len, int endian)
  717. {
  718. vdi_info_t *vdi;
  719. vpudrv_buffer_t vdb;
  720. unsigned long offset;
  721. int i;
  722. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  723. core_idx = 0;
  724. #endif
  725. if (core_idx >= MAX_NUM_VPU_CORE)
  726. return -1;
  727. vdi = &s_vdi_info[core_idx];
  728. if(!vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  729. return -1;
  730. osal_memset(&vdb, 0x00, sizeof(vpudrv_buffer_t));
  731. for (i=0; i<MAX_VPU_BUFFER_POOL; i++)
  732. {
  733. if (vdi->vpu_buffer_pool[i].inuse == 1)
  734. {
  735. vdb = vdi->vpu_buffer_pool[i].vdb;
  736. if (addr >= vdb.phys_addr && addr < (vdb.phys_addr + vdb.size))
  737. break;
  738. }
  739. }
  740. if (!vdb.size)
  741. return -1;
  742. offset = addr - (unsigned long)vdb.phys_addr;
  743. vdi_flush_ddr(core_idx,(unsigned long )(vdb.phys_addr+offset),len,1);
  744. osal_memcpy(data, (const void *)((unsigned long)vdb.virt_addr+offset), len);
  745. swap_endian(core_idx, data, len, endian);
  746. return len;
  747. }
  748. // return the vpudrv_buffer_t virt addr, not copy data
  749. int vdi_read_memory2(unsigned long core_idx, PhysicalAddress addr, unsigned char **ppdata, int len, int endian)
  750. {
  751. vdi_info_t *vdi;
  752. vpudrv_buffer_t vdb;
  753. unsigned long offset;
  754. int i;
  755. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  756. core_idx = 0;
  757. #endif
  758. if (core_idx >= MAX_NUM_VPU_CORE)
  759. return -1;
  760. vdi = &s_vdi_info[core_idx];
  761. if(!vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  762. return -1;
  763. osal_memset(&vdb, 0x00, sizeof(vpudrv_buffer_t));
  764. for (i=0; i<MAX_VPU_BUFFER_POOL; i++)
  765. {
  766. if (vdi->vpu_buffer_pool[i].inuse == 1)
  767. {
  768. vdb = vdi->vpu_buffer_pool[i].vdb;
  769. if (addr >= vdb.phys_addr && addr < (vdb.phys_addr + vdb.size))
  770. break;
  771. }
  772. }
  773. if (!vdb.size)
  774. return -1;
  775. offset = addr - (unsigned long)vdb.phys_addr;
  776. vdi_flush_ddr(core_idx,(unsigned long )(vdb.phys_addr+offset),len,1);
  777. *ppdata = (unsigned char *)(vdb.virt_addr + offset);
  778. swap_endian(core_idx, *ppdata, len, endian);
  779. return len;
  780. }
  781. void* vdi_map_virt2(unsigned long core_idx, int size, PhysicalAddress bufY)
  782. {
  783. vdi_info_t *vdi = &s_vdi_info[core_idx];
  784. void *virt_addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vdi->vpu_fd, bufY);
  785. return virt_addr;
  786. }
  787. int vdi_virt_to_phys(unsigned long core_idx, vpu_buffer_t *vb)
  788. {
  789. vdi_info_t *vdi;
  790. vpudrv_buffer_t vdb;
  791. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  792. core_idx = 0;
  793. #endif
  794. if (core_idx >= MAX_NUM_VPU_CORE)
  795. return -1;
  796. vdi = &s_vdi_info[core_idx];
  797. if(!vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  798. return -1;
  799. osal_memset(&vdb, 0x00, sizeof(vpudrv_buffer_t));
  800. vdb.virt_addr = vb->virt_addr;
  801. if (ioctl(vdi->vpu_fd, VDI_IOCTL_GET_PHYSICAL_MEMORY, &vdb) < 0)
  802. {
  803. VLOG(ERR, "[VDI] fail to GET_PHYSICAL_MEMORY\n");
  804. return -1;
  805. }
  806. vb->phys_addr = (unsigned long)vdb.phys_addr;
  807. vb->base = (unsigned long)vdb.base;
  808. VLOG(INFO, "get phy = %#x vb->phy = %#x, base=%lx\n", vdb.phys_addr, vb->phys_addr, vb->base);
  809. return 0;
  810. }
  811. int vdi_allocate_dma_memory(unsigned long core_idx, vpu_buffer_t *vb, int memTypes, int instIndex)
  812. {
  813. vdi_info_t *vdi;
  814. int i;
  815. vpudrv_buffer_t vdb;
  816. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  817. core_idx = 0;
  818. #endif
  819. if (core_idx >= MAX_NUM_VPU_CORE)
  820. return -1;
  821. vdi = &s_vdi_info[core_idx];
  822. if(!vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  823. return -1;
  824. osal_memset(&vdb, 0x00, sizeof(vpudrv_buffer_t));
  825. vdb.size = vb->size;
  826. if (ioctl(vdi->vpu_fd, VDI_IOCTL_ALLOCATE_PHYSICAL_MEMORY, &vdb) < 0)
  827. {
  828. VLOG(ERR, "[VDI] fail to vdi_allocate_dma_memory size=%d\n", vb->size);
  829. return -1;
  830. }
  831. vb->phys_addr = (unsigned long)vdb.phys_addr;
  832. vb->base = (unsigned long)vdb.base;
  833. //map to virtual address
  834. vdb.virt_addr = (unsigned long)mmap(NULL, vdb.size, PROT_READ | PROT_WRITE,
  835. MAP_SHARED, vdi->vpu_fd, DRAM_MEM2SYS(vdb.phys_addr));
  836. if ((void *)vdb.virt_addr == MAP_FAILED)
  837. {
  838. memset(vb, 0x00, sizeof(vpu_buffer_t));
  839. return -1;
  840. }
  841. vb->virt_addr = vdb.virt_addr;
  842. vmem_lock(vdi);
  843. for (i=0; i<MAX_VPU_BUFFER_POOL; i++)
  844. {
  845. if (vdi->vpu_buffer_pool[i].inuse == 0)
  846. {
  847. vdi->vpu_buffer_pool[i].vdb = vdb;
  848. vdi->vpu_buffer_pool_count++;
  849. vdi->vpu_buffer_pool[i].inuse = 1;
  850. break;
  851. }
  852. }
  853. vmem_unlock(vdi);
  854. VLOG(INFO, "[VDI] vdi_allocate_dma_memory, physaddr=%#x, virtaddr=%#lx~0x%#lx, size=%d, memType=%d\n",
  855. vb->phys_addr, vb->virt_addr, vb->virt_addr + vb->size, vb->size, memTypes);
  856. return 0;
  857. }
  858. unsigned long vdi_get_dma_memory_free_size(unsigned long coreIdx)
  859. {
  860. vdi_info_t *vdi;
  861. int size;
  862. vdi = &s_vdi_info[coreIdx];
  863. if(!vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  864. return (unsigned long)-1;
  865. if (ioctl(vdi->vpu_fd, VDI_IOCTL_GET_FREE_MEM_SIZE, &size) < 0) {
  866. VLOG(ERR, "[VDI] fail VDI_IOCTL_GET_FREE_MEM_SIZE size=%d\n", size);
  867. return 0;
  868. }
  869. return size;
  870. }
  871. int vdi_attach_dma_memory(unsigned long core_idx, vpu_buffer_t *vb)
  872. {
  873. vdi_info_t *vdi;
  874. int i;
  875. vpudrv_buffer_t vdb;
  876. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  877. core_idx = 0;
  878. #endif
  879. if (core_idx >= MAX_NUM_VPU_CORE)
  880. return -1;
  881. vdi = &s_vdi_info[core_idx];
  882. if(!vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  883. return -1;
  884. osal_memset(&vdb, 0x00, sizeof(vpudrv_buffer_t));
  885. vdb.size = vb->size;
  886. vdb.phys_addr = vb->phys_addr;
  887. vdb.base = vb->base;
  888. vdb.virt_addr = vb->virt_addr;
  889. vmem_lock(vdi);
  890. for (i=0; i<MAX_VPU_BUFFER_POOL; i++)
  891. {
  892. if (vdi->vpu_buffer_pool[i].vdb.phys_addr == vb->phys_addr)
  893. {
  894. vdi->vpu_buffer_pool[i].vdb = vdb;
  895. vdi->vpu_buffer_pool[i].inuse = 1;
  896. break;
  897. }
  898. else
  899. {
  900. if (vdi->vpu_buffer_pool[i].inuse == 0)
  901. {
  902. vdi->vpu_buffer_pool[i].vdb = vdb;
  903. vdi->vpu_buffer_pool_count++;
  904. vdi->vpu_buffer_pool[i].inuse = 1;
  905. break;
  906. }
  907. }
  908. }
  909. vmem_unlock(vdi);
  910. VLOG(INFO, "[VDI] vdi_attach_dma_memory, physaddr=0x%#x, virtaddr=0x%lx, size=%d, index=%d\n", vb->phys_addr, vb->virt_addr, vb->size, i);
  911. return 0;
  912. }
  913. int vdi_dettach_dma_memory(unsigned long core_idx, vpu_buffer_t *vb)
  914. {
  915. vdi_info_t *vdi;
  916. int i;
  917. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  918. core_idx = 0;
  919. #endif
  920. if (core_idx >= MAX_NUM_VPU_CORE)
  921. return -1;
  922. vdi = &s_vdi_info[core_idx];
  923. if(!vb || !vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  924. return -1;
  925. if (vb->size == 0)
  926. return -1;
  927. vmem_lock(vdi);
  928. for (i=0; i<MAX_VPU_BUFFER_POOL; i++)
  929. {
  930. if (vdi->vpu_buffer_pool[i].vdb.phys_addr == vb->phys_addr)
  931. {
  932. vdi->vpu_buffer_pool[i].inuse = 0;
  933. vdi->vpu_buffer_pool_count--;
  934. break;
  935. }
  936. }
  937. vmem_unlock(vdi);
  938. return 0;
  939. }
  940. void vdi_free_dma_memory(unsigned long core_idx, vpu_buffer_t *vb, int memTypes, int instIndex)
  941. {
  942. vdi_info_t *vdi;
  943. int i;
  944. vpudrv_buffer_t vdb;
  945. #ifdef SUPPORT_MULTI_CORE_IN_ONE_DRIVER
  946. core_idx = 0;
  947. #endif
  948. if (core_idx >= MAX_NUM_VPU_CORE)
  949. return;
  950. vdi = &s_vdi_info[core_idx];
  951. if(!vb || !vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  952. return;
  953. if (vb->size == 0)
  954. return ;
  955. osal_memset(&vdb, 0x00, sizeof(vpudrv_buffer_t));
  956. vmem_lock(vdi);
  957. for (i=0; i<MAX_VPU_BUFFER_POOL; i++)
  958. {
  959. /* add more constraints for finding the correct buffer */
  960. if ((vdi->vpu_buffer_pool[i].vdb.phys_addr == vb->phys_addr)
  961. && (vdi->vpu_buffer_pool[i].vdb.size == vb->size)
  962. && (vdi->vpu_buffer_pool[i].vdb.virt_addr == vb->virt_addr))
  963. {
  964. vdi->vpu_buffer_pool[i].inuse = 0;
  965. vdi->vpu_buffer_pool_count--;
  966. vdb = vdi->vpu_buffer_pool[i].vdb;
  967. break;
  968. }
  969. }
  970. vmem_unlock(vdi);
  971. if (!vdb.size)
  972. {
  973. VLOG(ERR, "[VDI] invalid buffer to free address = 0x%lx\n", (int)vdb.virt_addr);
  974. return ;
  975. }
  976. ioctl(vdi->vpu_fd, VDI_IOCTL_FREE_PHYSICALMEMORY, &vdb);
  977. if (munmap((void *)vdb.virt_addr, vdb.size) != 0)
  978. {
  979. VLOG(ERR, "[VDI] fail to vdi_free_dma_memory virtial address = 0x%lx\n", (int)vdb.virt_addr);
  980. }
  981. osal_memset(vb, 0, sizeof(vpu_buffer_t));
  982. }
  983. int vdi_get_sram_memory(unsigned long core_idx, vpu_buffer_t *vb)
  984. {
  985. vdi_info_t *vdi = NULL;
  986. vpudrv_buffer_t vdb;
  987. unsigned int sram_size = 0;
  988. if (core_idx >= MAX_NUM_VPU_CORE)
  989. return -1;
  990. vdi = &s_vdi_info[core_idx];
  991. if(!vb || !vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  992. return -1;
  993. osal_memset(&vdb, 0x00, sizeof(vpudrv_buffer_t));
  994. switch (vdi->product_code) {
  995. case BODA950_CODE:
  996. case CODA960_CODE:
  997. case CODA980_CODE:
  998. sram_size = VDI_CODA9_SRAM_SIZE; break;
  999. case WAVE511_CODE:
  1000. sram_size = VDI_WAVE511_SRAM_SIZE; break;
  1001. case WAVE521_CODE:
  1002. sram_size = VDI_WAVE521_SRAM_SIZE; break;
  1003. case WAVE521C_CODE:
  1004. sram_size = VDI_WAVE521C_SRAM_SIZE; break;
  1005. case WAVE521C_DUAL_CODE:
  1006. sram_size = VDI_WAVE521C_SRAM_SIZE; break;
  1007. case WAVE517_CODE:
  1008. sram_size = VDI_WAVE521C_SRAM_SIZE; break;
  1009. default:
  1010. VLOG(ERR, "[VDI] check product_code(%x)\n", vdi->product_code);
  1011. break;
  1012. }
  1013. if (sram_size > 0) // if we can know the sram address directly in vdi layer, we use it first for sdram address
  1014. {
  1015. vb->phys_addr = VDI_SRAM_BASE_ADDR+(core_idx*sram_size); // HOST can set DRAM base addr to VDI_SRAM_BASE_ADDR.
  1016. vb->size = sram_size;
  1017. return 0;
  1018. }
  1019. return 0;
  1020. }
  1021. int vdi_set_clock_gate(unsigned long core_idx, int enable)
  1022. {
  1023. vdi_info_t *vdi = NULL;
  1024. int ret;
  1025. if (core_idx >= MAX_NUM_VPU_CORE)
  1026. return -1;
  1027. vdi = &s_vdi_info[core_idx];
  1028. if(!vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  1029. return -1;
  1030. if (vdi->product_code == WAVE512_CODE || vdi->product_code == WAVE515_CODE || vdi->product_code == WAVE517_CODE ||
  1031. vdi->product_code == WAVE521_CODE || vdi->product_code == WAVE521C_CODE || vdi->product_code == WAVE511_CODE || vdi->product_code == WAVE521C_DUAL_CODE ) {
  1032. return 0;
  1033. }
  1034. vdi->clock_state = enable;
  1035. ret = ioctl(vdi->vpu_fd, VDI_IOCTL_SET_CLOCK_GATE, &enable);
  1036. return ret;
  1037. }
  1038. int vdi_get_clock_gate(unsigned long core_idx)
  1039. {
  1040. vdi_info_t *vdi;
  1041. int ret;
  1042. if (core_idx >= MAX_NUM_VPU_CORE)
  1043. return -1;
  1044. vdi = &s_vdi_info[core_idx];
  1045. if(!vdi || vdi->vpu_fd==-1 || vdi->vpu_fd == 0x00)
  1046. return -1;
  1047. ret = vdi->clock_state;
  1048. return ret;
  1049. }
  1050. static int get_pc_addr(Uint32 product_code)
  1051. {
  1052. if (PRODUCT_CODE_W_SERIES(product_code)) {
  1053. return W5_VCPU_CUR_PC;
  1054. }
  1055. else if (PRODUCT_CODE_NOT_W_SERIES(product_code)) {
  1056. return BIT_CUR_PC;
  1057. }
  1058. else {
  1059. VLOG(ERR, "Unknown product id : %08x\n", product_code);
  1060. return -1;
  1061. }
  1062. }
  1063. int vdi_wait_bus_busy(unsigned long core_idx, int timeout, unsigned int gdi_busy_flag)
  1064. {
  1065. Uint64 elapse, cur;
  1066. Uint32 pc;
  1067. vdi_info_t *vdi;
  1068. vdi = &s_vdi_info[core_idx];
  1069. elapse = osal_gettime();
  1070. pc = get_pc_addr(vdi->product_code);
  1071. while(1)
  1072. {
  1073. if (vdi->product_code == WAVE521_CODE || vdi->product_code == WAVE521C_CODE || vdi->product_code == WAVE511_CODE || vdi->product_code == WAVE521C_DUAL_CODE ||
  1074. vdi->product_code == WAVE517_CODE) {
  1075. if (vdi_fio_read_register(core_idx, gdi_busy_flag) == 0x3f) break;
  1076. }
  1077. else if (PRODUCT_CODE_W_SERIES(vdi->product_code)) {
  1078. if (vdi_fio_read_register(core_idx, gdi_busy_flag) == 0x738) break;
  1079. }
  1080. else if (PRODUCT_CODE_NOT_W_SERIES(vdi->product_code)) {
  1081. if (vdi_read_register(core_idx, gdi_busy_flag) == 0x77) break;
  1082. }
  1083. else {
  1084. VLOG(ERR, "Unknown product id : %08x\n", vdi->product_code);
  1085. return -1;
  1086. }
  1087. if (timeout > 0) {
  1088. cur = osal_gettime();
  1089. if ((cur - elapse) > timeout) {
  1090. print_busy_timeout_status(core_idx, vdi->product_code, pc);
  1091. return -1;
  1092. }
  1093. }
  1094. }
  1095. return 0;
  1096. }
  1097. int vdi_wait_vpu_busy(unsigned long core_idx, int timeout, unsigned int addr_bit_busy_flag)
  1098. {
  1099. Uint64 elapse, cur;
  1100. Uint32 pc;
  1101. Uint32 normalReg = TRUE;
  1102. vdi_info_t *vdi;
  1103. vdi = &s_vdi_info[core_idx];
  1104. elapse = osal_gettime();
  1105. pc = get_pc_addr(vdi->product_code);
  1106. if (PRODUCT_CODE_W_SERIES(vdi->product_code) && (addr_bit_busy_flag&0x8000) ) {
  1107. normalReg = FALSE;
  1108. }
  1109. while(1)
  1110. {
  1111. if (normalReg == TRUE) {
  1112. if (vdi_read_register(core_idx, addr_bit_busy_flag) == 0) break;
  1113. }
  1114. else {
  1115. if (vdi_fio_read_register(core_idx, addr_bit_busy_flag) == 0) break;
  1116. }
  1117. if (timeout > 0) {
  1118. cur = osal_gettime();
  1119. if ((cur - elapse) > timeout) {
  1120. print_busy_timeout_status(core_idx, vdi->product_code, pc);
  1121. return -1;
  1122. }
  1123. }
  1124. }
  1125. return 0;
  1126. }
  1127. int vdi_wait_vcpu_bus_busy(unsigned long core_idx, int timeout, unsigned int addr_bit_busy_flag)
  1128. {
  1129. Uint64 elapse, cur;
  1130. Uint32 pc;
  1131. Uint32 normalReg = TRUE;
  1132. vdi_info_t *vdi;
  1133. vdi = &s_vdi_info[core_idx];
  1134. elapse = osal_gettime();
  1135. pc = get_pc_addr(vdi->product_code);
  1136. if (PRODUCT_CODE_W_SERIES(vdi->product_code) && (addr_bit_busy_flag&0x8000) ) {
  1137. normalReg = FALSE;
  1138. }
  1139. while(1)
  1140. {
  1141. if (normalReg == TRUE) {
  1142. if (vdi_read_register(core_idx, addr_bit_busy_flag) == 0x40) break;
  1143. }
  1144. else {
  1145. if (vdi_fio_read_register(core_idx, addr_bit_busy_flag) == 0x40) break;
  1146. }
  1147. if (timeout > 0) {
  1148. cur = osal_gettime();
  1149. if ((cur - elapse) > timeout) {
  1150. print_busy_timeout_status(core_idx, vdi->product_code, pc);
  1151. return -1;
  1152. }
  1153. }
  1154. }
  1155. return 0;
  1156. }
  1157. #ifdef SUPPORT_MULTI_INST_INTR
  1158. int vdi_wait_interrupt(unsigned long coreIdx, unsigned int instIdx, int timeout)
  1159. #else
  1160. int vdi_wait_interrupt(unsigned long coreIdx, int timeout)
  1161. #endif
  1162. {
  1163. int intr_reason = 0;
  1164. #ifdef SUPPORT_INTERRUPT
  1165. int ret;
  1166. #endif
  1167. vdi_info_t *vdi;
  1168. vpudrv_intr_info_t intr_info;
  1169. if (coreIdx >= MAX_NUM_VPU_CORE)
  1170. return -1;
  1171. vdi = &s_vdi_info[coreIdx];
  1172. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  1173. return -1;
  1174. #ifdef SUPPORT_INTERRUPT
  1175. intr_info.timeout = timeout;
  1176. intr_info.intr_reason = 0;
  1177. #ifdef SUPPORT_MULTI_INST_INTR
  1178. intr_info.intr_inst_index = instIdx;
  1179. #endif
  1180. ret = ioctl(vdi->vpu_fd, VDI_IOCTL_WAIT_INTERRUPT, (void*)&intr_info);
  1181. if (ret != 0)
  1182. return -1;
  1183. intr_reason = intr_info.intr_reason;
  1184. #else
  1185. struct timeval tv = {0};
  1186. Uint32 int_sts_reg;
  1187. Uint32 int_reason_reg;
  1188. Uint64 startTime, endTime;
  1189. UNREFERENCED_PARAMETER(intr_info);
  1190. if (PRODUCT_CODE_W_SERIES(vdi->product_code)) {
  1191. int_sts_reg = W5_VPU_VPU_INT_STS;
  1192. }
  1193. else if (PRODUCT_CODE_NOT_W_SERIES(vdi->product_code)) {
  1194. int_sts_reg = BIT_INT_STS;
  1195. }
  1196. else {
  1197. VLOG(ERR, "Unknown product id : %08x\n", vdi->product_code);
  1198. return -1;
  1199. }
  1200. if (PRODUCT_CODE_W_SERIES(vdi->product_code)) {
  1201. int_reason_reg = W5_VPU_VINT_REASON;
  1202. }
  1203. else {
  1204. int_reason_reg = BIT_INT_REASON;
  1205. }
  1206. startTime = osal_gettime();
  1207. while (TRUE) {
  1208. if (vdi_read_register(coreIdx, int_sts_reg)) {
  1209. if ((intr_reason=vdi_read_register(coreIdx, int_reason_reg))) {
  1210. if (PRODUCT_CODE_W_SERIES(vdi->product_code)) {
  1211. vdi_write_register(coreIdx, W5_VPU_VINT_REASON_CLR, intr_reason);
  1212. vdi_write_register(coreIdx, W5_VPU_VINT_CLEAR, 0x1);
  1213. }
  1214. else {
  1215. vdi_write_register(coreIdx, BIT_INT_CLEAR, 0x1);
  1216. }
  1217. break;
  1218. }
  1219. }
  1220. endTime = osal_gettime();
  1221. if (timeout > 0 && (endTime-startTime) >= timeout) {
  1222. return -1;
  1223. }
  1224. }
  1225. #endif
  1226. return intr_reason;
  1227. }
  1228. //------------------------------------------------------------------------------
  1229. // LOG & ENDIAN functions
  1230. //------------------------------------------------------------------------------
  1231. int vdi_get_system_endian(unsigned long core_idx)
  1232. {
  1233. vdi_info_t *vdi;
  1234. if (core_idx >= MAX_NUM_VPU_CORE)
  1235. return -1;
  1236. vdi = &s_vdi_info[core_idx];
  1237. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  1238. return -1;
  1239. if (PRODUCT_CODE_W_SERIES(vdi->product_code)) {
  1240. return VDI_128BIT_BUS_SYSTEM_ENDIAN;
  1241. }
  1242. else if(PRODUCT_CODE_NOT_W_SERIES(vdi->product_code)) {
  1243. return VDI_SYSTEM_ENDIAN;
  1244. }
  1245. else {
  1246. VLOG(ERR, "Unknown product id : %08x\n", vdi->product_code);
  1247. return -1;
  1248. }
  1249. }
  1250. int vdi_convert_endian(unsigned long core_idx, unsigned int endian)
  1251. {
  1252. vdi_info_t *vdi;
  1253. if (core_idx >= MAX_NUM_VPU_CORE)
  1254. return -1;
  1255. vdi = &s_vdi_info[core_idx];
  1256. if(!vdi || !vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  1257. return -1;
  1258. if (PRODUCT_CODE_W_SERIES(vdi->product_code)) {
  1259. switch (endian) {
  1260. case VDI_LITTLE_ENDIAN: endian = 0x00; break;
  1261. case VDI_BIG_ENDIAN: endian = 0x0f; break;
  1262. case VDI_32BIT_LITTLE_ENDIAN: endian = 0x04; break;
  1263. case VDI_32BIT_BIG_ENDIAN: endian = 0x03; break;
  1264. }
  1265. }
  1266. else if(PRODUCT_CODE_NOT_W_SERIES(vdi->product_code)) {
  1267. }
  1268. else {
  1269. VLOG(ERR, "Unknown product id : %08x\n", vdi->product_code);
  1270. return -1;
  1271. }
  1272. return (endian&0x0f);
  1273. }
  1274. static Uint32 convert_endian_coda9_to_wave4(Uint32 endian)
  1275. {
  1276. Uint32 converted_endian = endian;
  1277. switch(endian) {
  1278. case VDI_LITTLE_ENDIAN: converted_endian = 0; break;
  1279. case VDI_BIG_ENDIAN: converted_endian = 7; break;
  1280. case VDI_32BIT_LITTLE_ENDIAN: converted_endian = 4; break;
  1281. case VDI_32BIT_BIG_ENDIAN: converted_endian = 3; break;
  1282. }
  1283. return converted_endian;
  1284. }
  1285. int swap_endian(unsigned long core_idx, unsigned char *data, int len, int endian)
  1286. {
  1287. vdi_info_t *vdi;
  1288. int changes;
  1289. int sys_endian;
  1290. BOOL byteChange, wordChange, dwordChange, lwordChange;
  1291. if (core_idx >= MAX_NUM_VPU_CORE)
  1292. return -1;
  1293. vdi = &s_vdi_info[core_idx];
  1294. if(!vdi || vdi->vpu_fd == -1 || vdi->vpu_fd == 0x00)
  1295. return -1;
  1296. if (PRODUCT_CODE_W_SERIES(vdi->product_code)) {
  1297. sys_endian = VDI_128BIT_BUS_SYSTEM_ENDIAN;
  1298. }
  1299. else if(PRODUCT_CODE_NOT_W_SERIES(vdi->product_code)) {
  1300. sys_endian = VDI_SYSTEM_ENDIAN;
  1301. }
  1302. else {
  1303. VLOG(ERR, "Unknown product id : %08x\n", vdi->product_code);
  1304. return -1;
  1305. }
  1306. endian = vdi_convert_endian(core_idx, endian);
  1307. sys_endian = vdi_convert_endian(core_idx, sys_endian);
  1308. if (endian == sys_endian)
  1309. return 0;
  1310. if (PRODUCT_CODE_W_SERIES(vdi->product_code)) {
  1311. }
  1312. else if (PRODUCT_CODE_NOT_W_SERIES(vdi->product_code)) {
  1313. endian = convert_endian_coda9_to_wave4(endian);
  1314. sys_endian = convert_endian_coda9_to_wave4(sys_endian);
  1315. }
  1316. else {
  1317. VLOG(ERR, "Unknown product id : %08x\n", vdi->product_code);
  1318. return -1;
  1319. }
  1320. changes = endian ^ sys_endian;
  1321. byteChange = changes&0x01;
  1322. wordChange = ((changes&0x02) == 0x02);
  1323. dwordChange = ((changes&0x04) == 0x04);
  1324. lwordChange = ((changes&0x08) == 0x08);
  1325. if (byteChange) byte_swap(data, len);
  1326. if (wordChange) word_swap(data, len);
  1327. if (dwordChange) dword_swap(data, len);
  1328. if (lwordChange) lword_swap(data, len);
  1329. return 1;
  1330. }
  1331. #endif //#if defined(linux) || defined(__linux) || defined(ANDROID)