vc8000_normal_driver.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458
  1. /****************************************************************************
  2. *
  3. * The MIT License (MIT)
  4. *
  5. * Copyright (c) 2014 - 2021 VERISILICON
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  22. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  23. * DEALINGS IN THE SOFTWARE.
  24. *
  25. *****************************************************************************
  26. *
  27. * The GPL License (GPL)
  28. *
  29. * Copyright (C) 2014 - 2021 VERISILICON
  30. *
  31. * This program is free software; you can redistribute it and/or
  32. * modify it under the terms of the GNU General Public License
  33. * as published by the Free Software Foundation; either version 2
  34. * of the License, or (at your option) any later version.
  35. *
  36. * This program is distributed in the hope that it will be useful,
  37. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  38. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  39. * GNU General Public License for more details.
  40. *
  41. * You should have received a copy of the GNU General Public License
  42. * along with this program; if not, write to the Free Software Foundation,
  43. * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  44. *
  45. *****************************************************************************
  46. *
  47. * Note: This software is released under dual MIT and GPL licenses. A
  48. * recipient may use this file under the terms of either the MIT license or
  49. * GPL License. If you wish to use only one license not the other, you can
  50. * indicate your decision by deleting one of the above license notices in your
  51. * version of this file.
  52. *
  53. *****************************************************************************/
  54. #include <linux/kernel.h>
  55. #include <linux/module.h>
  56. /* needed for __init,__exit directives */
  57. #include <linux/init.h>
  58. /* needed for remap_page_range
  59. SetPageReserved
  60. ClearPageReserved
  61. */
  62. #include <linux/mm.h>
  63. /* obviously, for kmalloc */
  64. #include <linux/slab.h>
  65. /* for struct file_operations, register_chrdev() */
  66. #include <linux/fs.h>
  67. /* standard error codes */
  68. #include <linux/errno.h>
  69. #include <linux/moduleparam.h>
  70. /* request_irq(), free_irq() */
  71. #include <linux/interrupt.h>
  72. #include <linux/sched.h>
  73. #include <linux/semaphore.h>
  74. #include <linux/spinlock.h>
  75. /* needed for virt_to_phys() */
  76. #include <asm/io.h>
  77. #include <linux/pci.h>
  78. #include <asm/uaccess.h>
  79. #include <linux/ioport.h>
  80. #include <asm/irq.h>
  81. #include <linux/version.h>
  82. #include <linux/vmalloc.h>
  83. #include <linux/timer.h>
  84. #include <linux/uaccess.h>
  85. #include <linux/delay.h>
  86. #include <linux/device.h>
  87. #include <linux/platform_device.h>
  88. #include <linux/of_irq.h>
  89. #include <linux/of_address.h>
  90. #include <linux/of.h>
  91. /* our own stuff */
  92. #include "vc8000_driver.h"
  93. unsigned long gBaseDDRHw = 0;
  94. unsigned int pcie = 0; /* used in hantro_mmu.c*/
  95. //#define MULTI_THR_TEST
  96. #ifdef MULTI_THR_TEST
  97. #define WAIT_NODE_NUM 32
  98. struct wait_list_node
  99. {
  100. u32 node_id; //index of the node
  101. u32 used_flag; //1:the node is insert to the wait queue list.
  102. u32 sem_used; //1:the source is released and the semphone is uped.
  103. struct semaphore wait_sem; //the unique semphone for per reserve_encoder thread.
  104. u32 wait_cond; //the condition for wait. Equal to the "core_info".
  105. struct list_head wait_list; //list node.
  106. };
  107. static struct list_head reserve_header;
  108. static struct wait_list_node res_wait_node[WAIT_NODE_NUM];
  109. static void wait_delay(unsigned int delay) {
  110. if(delay > 0) {
  111. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
  112. ktime_t dl = ktime_set((delay / MSEC_PER_SEC),
  113. (delay % MSEC_PER_SEC) * NSEC_PER_MSEC);
  114. __set_current_state(TASK_UNINTERRUPTIBLE);
  115. schedule_hrtimeout(&dl, HRTIMER_MODE_REL);
  116. #else
  117. msleep(delay);
  118. #endif
  119. }
  120. }
  121. static u32 request_wait_node(struct wait_list_node **node,u32 start_id)
  122. {
  123. u32 i;
  124. struct wait_list_node *temp_node;
  125. while(1)
  126. {
  127. for(i=start_id;i<WAIT_NODE_NUM;i++)
  128. {
  129. temp_node = &res_wait_node[i];
  130. if(temp_node->used_flag==0)
  131. {
  132. temp_node->used_flag=1;
  133. *node = temp_node;
  134. return i;
  135. }
  136. }
  137. wait_delay(10);
  138. }
  139. }
  140. static void request_wait_sema(struct wait_list_node **node)
  141. {
  142. u32 i;
  143. struct wait_list_node *temp_node;
  144. while(1)
  145. {
  146. for(i=0;i<WAIT_NODE_NUM;i++)
  147. {
  148. temp_node = &res_wait_node[i];
  149. if((temp_node->used_flag==0)&&(temp_node->sem_used==0))
  150. {
  151. temp_node->sem_used =1;
  152. *node = temp_node;
  153. return ;
  154. }
  155. }
  156. wait_delay(10);
  157. }
  158. }
  159. static void init_wait_node(struct wait_list_node *node,u32 cond, u32 sem_flag)
  160. {
  161. node->used_flag = 0;
  162. node->wait_cond = cond;
  163. sema_init(&node->wait_sem, sem_flag);
  164. INIT_LIST_HEAD(&node->wait_list);
  165. if(sem_flag>0)
  166. {
  167. node->sem_used =1;
  168. }
  169. }
  170. static void init_reserve_wait(u32 dev_num)
  171. {
  172. u32 i;
  173. u32 cond = 0x80000001;
  174. u32 sem_flag =0;
  175. struct wait_list_node *node;
  176. // printk("%s,%d, dev_num %d\n",__FUNCTION__,__LINE__,dev_num);
  177. INIT_LIST_HEAD(&reserve_header);
  178. for(i=0;i<WAIT_NODE_NUM;i++)
  179. {
  180. if(i<dev_num)
  181. sem_flag =1;
  182. else
  183. sem_flag =0;
  184. node = &res_wait_node[i];
  185. node->node_id = i;
  186. init_wait_node(node,cond,sem_flag);
  187. }
  188. }
  189. void release_reserve_wait(u32 dev_num)
  190. {
  191. }
  192. #endif
  193. /********variables declaration related with race condition**********/
  194. struct semaphore enc_core_sem;
  195. DECLARE_WAIT_QUEUE_HEAD(hw_queue);
  196. DEFINE_SPINLOCK(owner_lock);
  197. DECLARE_WAIT_QUEUE_HEAD(enc_wait_queue);
  198. /*------------------------------------------------------------------------
  199. *****************************PORTING LAYER********************************
  200. -------------------------------------------------------------------------*/
  201. #define RESOURCE_SHARED_INTER_SUBSYS 0 /*0:no resource sharing inter subsystems 1: existing resource sharing*/
  202. #define SUBSYS_0_IO_ADDR 0x90000 /*customer specify according to own platform*/
  203. #define SUBSYS_0_IO_SIZE (1024 * 4) /* bytes */
  204. #define SUBSYS_1_IO_ADDR 0xA0000 /*customer specify according to own platform*/
  205. #define SUBSYS_1_IO_SIZE (20000 * 4) /* bytes */
  206. #define INT_PIN_SUBSYS_0_VC8000E -1
  207. #define INT_PIN_SUBSYS_0_CUTREE -1
  208. #define INT_PIN_SUBSYS_0_DEC400 -1
  209. #define INT_PIN_SUBSYS_0_L2CACHE -1
  210. #define INT_PIN_SUBSYS_1_VC8000E -1
  211. #define INT_PIN_SUBSYS_1_CUTREE -1
  212. #define INT_PIN_SUBSYS_1_DEC400 -1
  213. /*for all subsystem, the subsys info should be listed here for subsequent use*/
  214. /*base_addr, iosize, resource_shared*/
  215. SUBSYS_CONFIG subsys_array[]= {
  216. {SUBSYS_0_IO_ADDR, SUBSYS_0_IO_SIZE, RESOURCE_SHARED_INTER_SUBSYS}, //subsys_0
  217. //{SUBSYS_1_IO_ADDR, SUBSYS_1_IO_SIZE, RESOURCE_SHARED_INTER_SUBSYS}, //subsys_1
  218. };
  219. /*here config every core in all subsystem*/
  220. /*NOTE: no matter what format(HEVC/H264/JPEG/AV1/...) is supported in VC8000E, just use [CORE_VC8000E] to indicate it's a VC8000E core*/
  221. /* CUTREE can work standalone, so it can be a subsytem or just one core of a subsytem.*/
  222. /*subsys_idx, core_type, offset, reg_size, irq*/
  223. CORE_CONFIG core_array[]= {
  224. {0, CORE_VC8000E, 0x1000, 500 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_VC8000E
  225. //{0, CORE_MMU, 0x2000, 500 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_VC8000E
  226. //{0, CORE_AXIFE, 0x3000, 500 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_AXIFE
  227. //{0, CORE_MMU_1, 0x4000, 500 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_VC8000E
  228. //{0, CORE_AXIFE_1, 0x5000, 500 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_AXIFE_1
  229. //{0, CORE_DEC400, 0x6000, 1600 * 4, INT_PIN_SUBSYS_0_VC8000E}, //subsys_0_DEC400
  230. //{0, CORE_L2CACHE, 0xc000, 500 * 4, INT_PIN_SUBSYS_0_L2CACHE}, //subsys_0_l2cache
  231. //{0, CORE_CUTREE, 0xd000, 500 * 4, INT_PIN_SUBSYS_0_L2CACHE}, //subsys_0_CUTREE
  232. //{1, CORE_CUTREE, 0x1000, 500 * 4, INT_PIN_SUBSYS_0_CUTREE}, //CUTREE
  233. //{1, CORE_MMU, 0x2000, 500 * 4, INT_PIN_SUBSYS_0_CUTREE}, //subsys_1_MMU
  234. //{1, CORE_AXIFE, 0x3000, 500 * 4, INT_PIN_SUBSYS_0_CUTREE}, //subsys_1_AXIFE
  235. };
  236. /*------------------------------END-------------------------------------*/
  237. /***************************TYPE AND FUNCTION DECLARATION****************/
  238. /* here's all the must remember stuff */
  239. typedef struct
  240. {
  241. SUBSYS_DATA subsys_data; //config of each core,such as base addr, iosize,etc
  242. u32 hw_id; //VC8000E/VC8000EJ hw id to indicate project
  243. u32 subsys_id; //subsys id for driver and sw internal use
  244. u32 is_valid; //indicate this subsys is hantro's core or not
  245. int pid[CORE_MAX]; //indicate which process is occupying the subsys
  246. u32 is_reserved[CORE_MAX]; //indicate this subsys is occupied by user or not
  247. u32 irq_received[CORE_MAX]; //indicate which core receives irq
  248. u32 irq_status[CORE_MAX]; //IRQ status of each core
  249. u32 job_id[CORE_MAX];
  250. char *buffer;
  251. unsigned int buffsize;
  252. volatile u8 *hwregs;
  253. struct fasync_struct *async_queue;
  254. } hantroenc_t;
  255. static int ReserveIO(void);
  256. static void ReleaseIO(void);
  257. //static void ResetAsic(hantroenc_t * dev);
  258. #ifdef hantroenc_DEBUG
  259. static void dump_regs(unsigned long data);
  260. #endif
  261. /* IRQ handler */
  262. #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
  263. static irqreturn_t hantroenc_isr(int irq, void *dev_id, struct pt_regs *regs);
  264. #else
  265. static irqreturn_t hantroenc_isr(int irq, void *dev_id);
  266. #endif
  267. /*********************local variable declaration*****************/
  268. unsigned long sram_base = 0;
  269. unsigned int sram_size = 0;
  270. /* and this is our MAJOR; use 0 for dynamic allocation (recommended)*/
  271. static int hantroenc_major = 0;
  272. static int total_subsys_num = 0;
  273. static int total_core_num = 0;
  274. volatile unsigned int asic_status = 0;
  275. /* dynamic allocation*/
  276. static hantroenc_t* hantroenc_data = NULL;
  277. #ifdef IRQ_SIMULATION
  278. struct timer_list timer0;
  279. struct timer_list timer1;
  280. #endif
  281. /******************************************************************************/
  282. static int CheckEncIrq(hantroenc_t *dev,u32 *core_info,u32 *irq_status, u32 *job_id)
  283. {
  284. unsigned long flags;
  285. int rdy = 0;
  286. u8 core_type = 0;
  287. u8 subsys_idx = 0;
  288. core_type = (u8)(*core_info & 0x0F);
  289. subsys_idx = (u8)(*core_info >> 4);
  290. if (subsys_idx > total_subsys_num-1)
  291. {
  292. *core_info = -1;
  293. *irq_status = 0;
  294. return 1;
  295. }
  296. spin_lock_irqsave(&owner_lock, flags);
  297. if(dev[subsys_idx].irq_received[core_type])
  298. {
  299. /* reset the wait condition(s) */
  300. PDEBUG("check subsys[%d][%d] irq ready\n", subsys_idx, core_type);
  301. //dev[subsys_idx].irq_received[core_type] = 0;
  302. rdy = 1;
  303. *core_info = subsys_idx;
  304. *irq_status = dev[subsys_idx].irq_status[core_type];
  305. if(job_id != NULL)
  306. *job_id = dev[subsys_idx].job_id[core_type];
  307. }
  308. spin_unlock_irqrestore(&owner_lock, flags);
  309. return rdy;
  310. }
  311. static unsigned int WaitEncReady(hantroenc_t *dev,u32 *core_info,u32 *irq_status)
  312. {
  313. PDEBUG("WaitEncReady\n");
  314. if(wait_event_interruptible(enc_wait_queue, CheckEncIrq(dev,core_info,irq_status, NULL)))
  315. {
  316. PDEBUG("ENC wait_event_interruptible interrupted\n");
  317. return -ERESTARTSYS;
  318. }
  319. return 0;
  320. }
  321. static int CheckEncIrqbyPolling(hantroenc_t *dev,u32 *core_info,u32 *irq_status,u32 *job_id)
  322. {
  323. unsigned long flags;
  324. int rdy = 0;
  325. u8 core_type = 0;
  326. u8 subsys_idx = 0;
  327. u32 irq, hwId, majorId, wClr;
  328. unsigned long reg_offset = 0;
  329. u32 loop = 30;
  330. u32 interval = 100;
  331. u32 enable_status = 0;
  332. core_type = (u8)(*core_info & 0x0F);
  333. subsys_idx = (u8)(*core_info >> 4);
  334. if (subsys_idx > total_subsys_num-1)
  335. {
  336. *core_info = -1;
  337. *irq_status = 0;
  338. return 1;
  339. }
  340. do
  341. {
  342. spin_lock_irqsave(&owner_lock, flags);
  343. if(dev[subsys_idx].is_reserved[core_type] == 0)
  344. {
  345. //printk(KERN_DEBUG"subsys[%d][%d] is not reserved\n", subsys_idx, core_type);
  346. goto end_1;
  347. }
  348. else if(dev[subsys_idx].irq_received[core_type] &&
  349. (dev[subsys_idx].irq_status[core_type] & (ASIC_STATUS_FUSE_ERROR |ASIC_STATUS_HW_TIMEOUT|ASIC_STATUS_BUFF_FULL|
  350. ASIC_STATUS_HW_RESET|ASIC_STATUS_ERROR|ASIC_STATUS_FRAME_READY )) )
  351. {
  352. rdy = 1;
  353. *core_info = subsys_idx;
  354. *irq_status = dev[subsys_idx].irq_status[core_type];
  355. *job_id = dev[subsys_idx].job_id[core_type];
  356. goto end_1;
  357. }
  358. reg_offset = dev[subsys_idx].subsys_data.core_info.offset[core_type];
  359. irq = (u32)ioread32((void *)(dev[subsys_idx].hwregs + reg_offset + 0x04));
  360. enable_status = (u32)ioread32((void *)(dev[subsys_idx].hwregs + reg_offset + 20));
  361. if(irq & ASIC_STATUS_ALL)
  362. {
  363. PDEBUG("check subsys[%d][%d] irq ready\n", subsys_idx, core_type);
  364. if(irq & 0x20)
  365. iowrite32(0, (void *)(dev[subsys_idx].hwregs + reg_offset + 0x14));
  366. /* clear all IRQ bits. (hwId >= 0x80006100) means IRQ is cleared by writting 1 */
  367. hwId = ioread32((void *)dev[subsys_idx].hwregs + reg_offset);
  368. majorId = (hwId & 0x0000FF00) >> 8;
  369. wClr = (majorId >= 0x61) ? irq: (irq & (~0x1FD));
  370. iowrite32(wClr, (void *)(dev[subsys_idx].hwregs + reg_offset + 0x04));
  371. rdy = 1;
  372. *core_info = subsys_idx;
  373. *irq_status = irq;
  374. dev[subsys_idx].irq_received[core_type] = 1;
  375. dev[subsys_idx].irq_status[core_type] = irq;
  376. *job_id = dev[subsys_idx].job_id[core_type];
  377. goto end_1;
  378. }
  379. spin_unlock_irqrestore(&owner_lock, flags);
  380. mdelay(interval);
  381. }while(loop--);
  382. goto end_2;
  383. end_1:
  384. spin_unlock_irqrestore(&owner_lock, flags);
  385. end_2:
  386. return rdy;
  387. }
  388. static int CheckEncAnyIrq(hantroenc_t *dev, CORE_WAIT_OUT *out)
  389. {
  390. u32 i;
  391. int rdy = 1;
  392. u32 core_info,irq_status, job_id;
  393. u32 core_type = CORE_VC8000E;
  394. for(i = 0; i < total_subsys_num; i++)
  395. {
  396. if(!(dev[i].subsys_data.core_info.type_info & (1<<core_type)))
  397. continue;
  398. core_info = ((i << 4) | core_type);
  399. if((1 == CheckEncIrqbyPolling(dev, &core_info, &irq_status, &job_id)) && (core_info == i))
  400. {
  401. out->job_id[out->irq_num] = job_id;
  402. out->irq_status[out->irq_num] = irq_status;
  403. //printk(KERN_DEBUG "irq_status of subsys %d job_id %d is:%x\n",i,job_id,irq_status);
  404. out->irq_num++;
  405. rdy = 1;
  406. }
  407. }
  408. return rdy;
  409. }
  410. static unsigned int WaitEncAnyReady(hantroenc_t *dev,CORE_WAIT_OUT *out)
  411. {
  412. if(wait_event_interruptible(enc_wait_queue, CheckEncAnyIrq(dev,out)))
  413. {
  414. PDEBUG("ENC wait_event_interruptible interrupted\n");
  415. return -ERESTARTSYS;
  416. }
  417. return 0;
  418. }
  419. static int CheckCoreOccupation(hantroenc_t *dev, u8 core_type)
  420. {
  421. int ret = 0;
  422. unsigned long flags;
  423. core_type = (core_type == CORE_VC8000EJ ? CORE_VC8000E : core_type);
  424. spin_lock_irqsave(&owner_lock, flags);
  425. if(!dev->is_reserved[core_type]) {
  426. dev->is_reserved[core_type] = 1;
  427. #ifndef MULTI_THR_TEST
  428. dev->pid[core_type] = current->pid;
  429. #endif
  430. ret = 1;
  431. PDEBUG("CheckCoreOccupation pid=%d\n",dev->pid[core_type]);
  432. }
  433. spin_unlock_irqrestore(&owner_lock, flags);
  434. return ret;
  435. }
  436. static int GetWorkableCore(hantroenc_t *dev,u32 *core_info,u32 *core_info_tmp)
  437. {
  438. int ret = 0;
  439. u32 i = 0;
  440. u32 cores;
  441. u8 core_type = 0;
  442. u32 required_num = 0;
  443. static u32 reserved_job_id = 0;
  444. unsigned long flags;
  445. /*input core_info[32 bit]: mode[1bit](1:all 0:specified)+amount[3bit](the needing amount -1)+reserved+core_type[8bit]
  446. output core_info[32 bit]: the reserved core info to user space and defined as below.
  447. mode[1bit](1:all 0:specified)+amount[3bit](reserved total core num)+reserved+subsys_mapping[8bit]
  448. */
  449. cores = *core_info;
  450. required_num = ((cores >> CORE_INFO_AMOUNT_OFFSET)& 0x7)+1;
  451. core_type = (u8)(cores&0xFF);
  452. if (*core_info_tmp == 0)
  453. *core_info_tmp = required_num << CORE_INFO_AMOUNT_OFFSET;
  454. else
  455. required_num = (*core_info_tmp >> CORE_INFO_AMOUNT_OFFSET);
  456. PDEBUG("GetWorkableCore:required_num=%d,core_info=%x\n",required_num,*core_info);
  457. if(required_num)
  458. {
  459. /* a valid free Core with specified core type */
  460. for (i = 0; i < total_subsys_num; i++)
  461. {
  462. if (dev[i].subsys_data.core_info.type_info & (1 << core_type))
  463. {
  464. core_type = (core_type == CORE_VC8000EJ ? CORE_VC8000E : core_type);
  465. if(dev[i].is_valid && CheckCoreOccupation(&dev[i], core_type))
  466. {
  467. *core_info_tmp = ((((*core_info_tmp >> CORE_INFO_AMOUNT_OFFSET)-1)<<CORE_INFO_AMOUNT_OFFSET)|(*core_info_tmp & 0x0FF));
  468. *core_info_tmp = (*core_info_tmp | (1 << i));
  469. if ((*core_info_tmp >> CORE_INFO_AMOUNT_OFFSET)==0)
  470. {
  471. ret = 1;
  472. spin_lock_irqsave(&owner_lock, flags);
  473. *core_info = (reserved_job_id << 16)|(*core_info_tmp & 0xFF);
  474. dev[i].job_id[core_type] = reserved_job_id;
  475. reserved_job_id++;
  476. spin_unlock_irqrestore(&owner_lock, flags);
  477. *core_info_tmp = 0;
  478. required_num = 0;
  479. break;
  480. }
  481. }
  482. }
  483. }
  484. }
  485. else
  486. ret = 1;
  487. PDEBUG("*core_info = %x\n",*core_info);
  488. return ret;
  489. }
  490. static long ReserveEncoder(hantroenc_t *dev,u32 *core_info)
  491. {
  492. u32 core_info_tmp = 0;
  493. #ifdef MULTI_THR_TEST
  494. struct wait_list_node *wait_node;
  495. u32 start_id=0;
  496. #endif
  497. /*If HW resources are shared inter cores, just make sure only one is using the HW*/
  498. if (dev[0].subsys_data.cfg.resouce_shared)
  499. {
  500. if (down_interruptible(&enc_core_sem))
  501. return -ERESTARTSYS;
  502. }
  503. #ifdef MULTI_THR_TEST
  504. while(1)
  505. {
  506. start_id=request_wait_node(&wait_node,start_id);
  507. if(wait_node->sem_used==1)
  508. {
  509. if(GetWorkableCore(dev,core_info,&core_info_tmp))
  510. {
  511. down_interruptible(&wait_node->wait_sem);
  512. wait_node->sem_used=0;
  513. wait_node->used_flag=0;
  514. break;
  515. }
  516. else
  517. {
  518. start_id++;
  519. }
  520. }
  521. else
  522. {
  523. wait_node->wait_cond = *core_info;
  524. list_add_tail(&wait_node->wait_list,&reserve_header);
  525. down_interruptible(&wait_node->wait_sem);
  526. *core_info = wait_node->wait_cond;
  527. list_del(&wait_node->wait_list);
  528. wait_node->sem_used=0;
  529. wait_node->used_flag=0;
  530. break;
  531. }
  532. }
  533. #else
  534. /* lock a core that has specified core id*/
  535. if(wait_event_interruptible(hw_queue,
  536. GetWorkableCore(dev,core_info,&core_info_tmp) != 0 ))
  537. return -ERESTARTSYS;
  538. #endif
  539. return 0;
  540. }
  541. static void ReleaseEncoder(hantroenc_t * dev,u32 *core_info)
  542. {
  543. unsigned long flags;
  544. u8 core_type = 0, subsys_idx = 0, unCheckPid = 0;
  545. unCheckPid = (u8)((*core_info) >> 31);
  546. #ifdef MULTI_THR_TEST
  547. u32 release_ok=0;
  548. struct list_head *node;
  549. struct wait_list_node *wait_node;
  550. u32 core_info_tmp = 0;
  551. #endif
  552. subsys_idx = (u8)((*core_info&0xF0) >> 4);
  553. core_type = (u8)(*core_info&0x0F);
  554. PDEBUG("ReleaseEncoder:subsys_idx=%d,core_type=%x\n",subsys_idx,core_type);
  555. /* release specified subsys and core type */
  556. if (dev[subsys_idx].subsys_data.core_info.type_info & (1 << core_type))
  557. {
  558. core_type = (core_type == CORE_VC8000EJ ? CORE_VC8000E : core_type);
  559. spin_lock_irqsave(&owner_lock, flags);
  560. PDEBUG("subsys[%d].pid[%d]=%d,current->pid=%d\n",subsys_idx, core_type, dev[subsys_idx].pid[core_type],current->pid);
  561. #ifdef MULTI_THR_TEST
  562. if(dev[subsys_idx].is_reserved[core_type])
  563. #else
  564. if(dev[subsys_idx].is_reserved[core_type] && (dev[subsys_idx].pid[core_type] == current->pid || unCheckPid == 1))
  565. #endif
  566. {
  567. dev[subsys_idx].pid[core_type] = -1;
  568. dev[subsys_idx].is_reserved[core_type] = 0;
  569. dev[subsys_idx].irq_received[core_type] = 0;
  570. dev[subsys_idx].irq_status[core_type] = 0;
  571. dev[subsys_idx].job_id[core_type] = 0;
  572. spin_unlock_irqrestore(&owner_lock, flags);
  573. #ifdef MULTI_THR_TEST
  574. release_ok=0;
  575. if(list_empty(&reserve_header))
  576. {
  577. request_wait_sema(&wait_node);
  578. up(&wait_node->wait_sem);
  579. }
  580. else
  581. {
  582. list_for_each(node,&reserve_header)
  583. {
  584. wait_node = container_of(node,struct wait_list_node,wait_list);
  585. if((GetWorkableCore(dev,&wait_node->wait_cond,&core_info_tmp))&&(wait_node->sem_used==0))
  586. {
  587. release_ok =1;
  588. wait_node->sem_used = 1;
  589. up(&wait_node->wait_sem);
  590. break;
  591. }
  592. }
  593. if(release_ok==0)
  594. {
  595. request_wait_sema(&wait_node);
  596. up(&wait_node->wait_sem);
  597. }
  598. }
  599. #endif
  600. }
  601. else
  602. {
  603. if (dev[subsys_idx].pid[core_type] != current->pid && unCheckPid == 0)
  604. printk(KERN_ERR "WARNING:pid(%d) is trying to release core reserved by pid(%d)\n",current->pid,dev[subsys_idx].pid[core_type]);
  605. spin_unlock_irqrestore(&owner_lock, flags);
  606. }
  607. //wake_up_interruptible_all(&hw_queue);
  608. }
  609. #ifndef MULTI_THR_TEST
  610. wake_up_interruptible_all(&hw_queue);
  611. #endif
  612. if(dev->subsys_data.cfg.resouce_shared)
  613. up(&enc_core_sem);
  614. return;
  615. }
  616. #ifdef IRQ_SIMULATION
  617. static void get_random_bytes(void *buf, int nbytes);
  618. static void hantroenc_trigger_irq_0(unsigned long value)
  619. {
  620. PDEBUG("trigger core 0 irq\n");
  621. del_timer(&timer0);
  622. hantroenc_isr(0,(void *)&hantroenc_data[0]);
  623. }
  624. static void hantroenc_trigger_irq_1(unsigned long value)
  625. {
  626. PDEBUG("trigger core 1 irq\n");
  627. del_timer(&timer1);
  628. hantroenc_isr(0,(void *)&hantroenc_data[1]);
  629. }
  630. #endif
  631. static long hantroenc_ioctl(struct file *filp,
  632. unsigned int cmd, unsigned long arg)
  633. {
  634. int err = 0;
  635. unsigned int tmp;
  636. #ifdef HANTROMMU_SUPPORT
  637. u32 i = 0;
  638. volatile u8* mmu_hwregs[MAX_SUBSYS_NUM][2];
  639. #endif
  640. PDEBUG("ioctl cmd 0x%08ux\n", cmd);
  641. /*
  642. * extract the type and number bitfields, and don't encode
  643. * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
  644. */
  645. if(_IOC_TYPE(cmd) != HANTRO_IOC_MAGIC
  646. #ifdef HANTROMMU_SUPPORT
  647. &&_IOC_TYPE(cmd) != HANTRO_IOC_MMU
  648. #endif
  649. )
  650. return -ENOTTY;
  651. if((_IOC_TYPE(cmd) == HANTRO_IOC_MAGIC &&
  652. _IOC_NR(cmd) > HANTRO_IOC_MAXNR)
  653. #ifdef HANTROMMU_SUPPORT
  654. ||(_IOC_TYPE(cmd) == HANTRO_IOC_MMU &&
  655. _IOC_NR(cmd) > HANTRO_IOC_MMU_MAXNR)
  656. #endif
  657. )
  658. return -ENOTTY;
  659. /*
  660. * the direction is a bitmask, and VERIFY_WRITE catches R/W
  661. * transfers. `Type' is user-oriented, while
  662. * access_ok is kernel-oriented, so the concept of "read" and
  663. * "write" is reversed
  664. */
  665. if(_IOC_DIR(cmd) & _IOC_READ)
  666. #if KERNEL_VERSION(5,0,0) <= LINUX_VERSION_CODE
  667. err = !access_ok((void *) arg, _IOC_SIZE(cmd));
  668. #else
  669. err = !access_ok(VERIFY_WRITE, (void *) arg, _IOC_SIZE(cmd));
  670. #endif
  671. else if(_IOC_DIR(cmd) & _IOC_WRITE)
  672. #if KERNEL_VERSION(5,0,0) <= LINUX_VERSION_CODE
  673. err = !access_ok((void *) arg, _IOC_SIZE(cmd));
  674. #else
  675. err = !access_ok(VERIFY_READ, (void *) arg, _IOC_SIZE(cmd));
  676. #endif
  677. if(err)
  678. return -EFAULT;
  679. switch (cmd)
  680. {
  681. case HANTRO_IOCH_GET_VCMD_ENABLE:
  682. {
  683. __put_user(0, (unsigned long *) arg);
  684. break;
  685. }
  686. case HANTRO_IOCG_HWOFFSET:
  687. {
  688. u32 id;
  689. __get_user(id, (u32*)arg);
  690. if(id >= total_subsys_num)
  691. {
  692. return -EFAULT;
  693. }
  694. __put_user(hantroenc_data[id].subsys_data.cfg.base_addr, (unsigned long *) arg);
  695. break;
  696. }
  697. case HANTRO_IOCG_HWIOSIZE:
  698. {
  699. u32 id;
  700. u32 io_size;
  701. __get_user(id, (u32*)arg);
  702. if(id >= total_subsys_num)
  703. {
  704. return -EFAULT;
  705. }
  706. io_size = hantroenc_data[id].subsys_data.cfg.iosize;
  707. __put_user(io_size, (u32 *) arg);
  708. return 0;
  709. }
  710. case HANTRO_IOCG_SRAMOFFSET:
  711. __put_user(sram_base, (unsigned long *) arg);
  712. break;
  713. case HANTRO_IOCG_SRAMEIOSIZE:
  714. __put_user(sram_size, (unsigned int *) arg);
  715. break;
  716. case HANTRO_IOCG_CORE_NUM:
  717. __put_user(total_subsys_num, (unsigned int *) arg);
  718. break;
  719. case HANTRO_IOCG_CORE_INFO:
  720. {
  721. u32 idx;
  722. SUBSYS_CORE_INFO in_data;
  723. copy_from_user(&in_data, (void*)arg, sizeof(SUBSYS_CORE_INFO));
  724. idx = in_data.type_info;
  725. if (idx > total_subsys_num - 1)
  726. return -1;
  727. copy_to_user((void*)arg, &hantroenc_data[idx].subsys_data.core_info, sizeof(SUBSYS_CORE_INFO));
  728. break;
  729. }
  730. case HANTRO_IOCH_ENC_RESERVE:
  731. {
  732. u32 core_info;
  733. int ret;
  734. PDEBUG("Reserve ENC Cores\n");
  735. __get_user(core_info, (u32*)arg);
  736. ret = ReserveEncoder(hantroenc_data,&core_info);
  737. if (ret == 0)
  738. __put_user(core_info, (u32 *) arg);
  739. return ret;
  740. }
  741. case HANTRO_IOCH_ENC_RELEASE:
  742. {
  743. u32 core_info;
  744. __get_user(core_info, (u32*)arg);
  745. PDEBUG("Release ENC Core\n");
  746. ReleaseEncoder(hantroenc_data,&core_info);
  747. break;
  748. }
  749. case HANTRO_IOCG_CORE_WAIT:
  750. {
  751. u32 core_info;
  752. u32 irq_status;
  753. __get_user(core_info, (u32*)arg);
  754. #ifdef IRQ_SIMULATION
  755. u32 random_num;
  756. get_random_bytes(&random_num, sizeof(u32));
  757. random_num = random_num%10+80;
  758. PDEBUG("random_num=%d\n",random_num);
  759. /*init a timer to trigger irq*/
  760. if (core_info==1)
  761. {
  762. init_timer(&timer0);
  763. timer0.function = &hantroenc_trigger_irq_0;
  764. timer0.expires = jiffies + random_num*HZ/10; //the expires time is 1s
  765. add_timer(&timer0);
  766. }
  767. if (core_info==2)
  768. {
  769. init_timer(&timer1);
  770. timer1.function = &hantroenc_trigger_irq_1;
  771. timer1.expires = jiffies + random_num*HZ/10; //the expires time is 1s
  772. add_timer(&timer1);
  773. }
  774. #endif
  775. tmp = WaitEncReady(hantroenc_data,&core_info,&irq_status);
  776. if (tmp==0)
  777. {
  778. __put_user(irq_status, (unsigned int *)arg);
  779. return core_info;//return core_id
  780. }
  781. else
  782. {
  783. return -1;
  784. }
  785. break;
  786. }
  787. case HANTRO_IOCG_ANYCORE_WAIT:
  788. {
  789. CORE_WAIT_OUT out;
  790. memset(&out, 0, sizeof(CORE_WAIT_OUT));
  791. #ifdef IRQ_SIMULATION
  792. u32 random_num;
  793. get_random_bytes(&random_num, sizeof(u32));
  794. random_num = random_num%10+80;
  795. PDEBUG("random_num=%d\n",random_num);
  796. /*init a timer to trigger irq*/
  797. if (core_info==1)
  798. {
  799. init_timer(&timer0);
  800. timer0.function = &hantroenc_trigger_irq_0;
  801. timer0.expires = jiffies + random_num*HZ/10; //the expires time is 1s
  802. add_timer(&timer0);
  803. }
  804. if (core_info==2)
  805. {
  806. init_timer(&timer1);
  807. timer1.function = &hantroenc_trigger_irq_1;
  808. timer1.expires = jiffies + random_num*HZ/10; //the expires time is 1s
  809. add_timer(&timer1);
  810. }
  811. #endif
  812. tmp = WaitEncAnyReady(hantroenc_data,&out);
  813. if (tmp==0)
  814. {
  815. copy_to_user((void*)arg, &out, sizeof(CORE_WAIT_OUT));
  816. return 0;
  817. }
  818. else
  819. {
  820. return -1;
  821. }
  822. break;
  823. }
  824. default:
  825. {
  826. #ifdef HANTROMMU_SUPPORT
  827. if(_IOC_TYPE(cmd) == HANTRO_IOC_MMU)
  828. {
  829. memset (mmu_hwregs, 0, MAX_SUBSYS_NUM*2*sizeof(u8*));
  830. for (i = 0; i < total_subsys_num; i++ )
  831. {
  832. if(hantroenc_data[i].subsys_data.core_info.type_info & (1<<CORE_MMU))
  833. mmu_hwregs[i][0] = hantroenc_data[i].hwregs + hantroenc_data[i].subsys_data.core_info.offset[CORE_MMU];
  834. if(hantroenc_data[i].subsys_data.core_info.type_info & (1<<CORE_MMU_1))
  835. mmu_hwregs[i][1] = hantroenc_data[i].hwregs + hantroenc_data[i].subsys_data.core_info.offset[CORE_MMU_1];
  836. }
  837. return (MMUIoctl(cmd, filp, arg, mmu_hwregs));
  838. }
  839. #endif
  840. }
  841. }
  842. return 0;
  843. }
  844. static int hantroenc_open(struct inode *inode, struct file *filp)
  845. {
  846. int result = 0;
  847. hantroenc_t *dev = hantroenc_data;
  848. filp->private_data = (void *) dev;
  849. PDEBUG("dev opened\n");
  850. return result;
  851. }
  852. static int hantroenc_release(struct inode *inode, struct file *filp)
  853. {
  854. hantroenc_t *dev = (hantroenc_t *) filp->private_data;
  855. u32 core_id = 0, i = 0;
  856. #ifdef hantroenc_DEBUG
  857. dump_regs((unsigned long) dev); /* dump the regs */
  858. #endif
  859. unsigned long flags;
  860. PDEBUG("dev closed\n");
  861. for (i = 0;i < total_subsys_num; i++)
  862. {
  863. for (core_id = 0; core_id < CORE_MAX; core_id++)
  864. {
  865. spin_lock_irqsave(&owner_lock, flags);
  866. if (dev[i].is_reserved[core_id] == 1 && dev[i].pid[core_id] == current->pid)
  867. {
  868. dev[i].pid[core_id] = -1;
  869. dev[i].is_reserved[core_id] = 0;
  870. dev[i].irq_received[core_id] = 0;
  871. dev[i].irq_status[core_id] = 0;
  872. PDEBUG("release reserved core\n");
  873. }
  874. spin_unlock_irqrestore(&owner_lock, flags);
  875. }
  876. }
  877. #ifdef HANTROMMU_SUPPORT
  878. for(i = 0; i < total_subsys_num; i++)
  879. {
  880. if(!(hantroenc_data[i].subsys_data.core_info.type_info & (1<<CORE_MMU)))
  881. continue;
  882. MMURelease(filp,hantroenc_data[i].hwregs + hantroenc_data[i].subsys_data.core_info.offset[CORE_MMU]);
  883. break;
  884. }
  885. #endif
  886. wake_up_interruptible_all(&hw_queue);
  887. if(dev->subsys_data.cfg.resouce_shared)
  888. up(&enc_core_sem);
  889. return 0;
  890. }
  891. /* VFS methods */
  892. static struct file_operations hantroenc_fops = {
  893. .owner= THIS_MODULE,
  894. .open = hantroenc_open,
  895. .release = hantroenc_release,
  896. .unlocked_ioctl = hantroenc_ioctl,
  897. .fasync = NULL,
  898. };
  899. /*-----------------------------------------
  900. platform register
  901. -----------------------------------------*/
  902. static const struct of_device_id isp_of_match[] = {
  903. { .compatible = "thead,light-vc8000e", },
  904. { /* sentinel */ },
  905. };
  906. static int encoder_hantrodec_probe(struct platform_device *pdev)
  907. {
  908. int result;
  909. int i, j;
  910. struct resource *mem;
  911. mem = platform_get_resource(pdev,IORESOURCE_MEM,0);
  912. if(mem->start)
  913. subsys_array[0].base_addr = mem->start;
  914. core_array[0].irq = platform_get_irq(pdev,0);
  915. total_subsys_num = sizeof(subsys_array)/sizeof(SUBSYS_CONFIG);
  916. for (i = 0; i< total_subsys_num; i++)
  917. {
  918. printk(KERN_INFO "hantroenc: module init - subsys[%d] addr =%p\n",i,
  919. (void *)subsys_array[i].base_addr);
  920. }
  921. hantroenc_data = (hantroenc_t *)vmalloc(sizeof(hantroenc_t)*total_subsys_num);
  922. if (hantroenc_data == NULL)
  923. goto err1;
  924. memset(hantroenc_data,0,sizeof(hantroenc_t)*total_subsys_num);
  925. for(i = 0; i < total_subsys_num; i++)
  926. {
  927. hantroenc_data[i].subsys_data.cfg = subsys_array[i];
  928. hantroenc_data[i].async_queue = NULL;
  929. hantroenc_data[i].hwregs = NULL;
  930. hantroenc_data[i].subsys_id = i;
  931. for(j = 0; j < CORE_MAX; j++)
  932. hantroenc_data[i].subsys_data.core_info.irq[j] = -1;
  933. }
  934. total_core_num = sizeof(core_array)/sizeof(CORE_CONFIG);
  935. for (i = 0; i < total_core_num; i++)
  936. {
  937. hantroenc_data[core_array[i].subsys_idx].subsys_data.core_info.type_info |= (1<<(core_array[i].core_type));
  938. hantroenc_data[core_array[i].subsys_idx].subsys_data.core_info.offset[core_array[i].core_type] = core_array[i].offset;
  939. hantroenc_data[core_array[i].subsys_idx].subsys_data.core_info.regSize[core_array[i].core_type] = core_array[i].reg_size;
  940. hantroenc_data[core_array[i].subsys_idx].subsys_data.core_info.irq[core_array[i].core_type] = core_array[i].irq;
  941. }
  942. result = register_chrdev(hantroenc_major, "vc8000", &hantroenc_fops);
  943. if(result < 0)
  944. {
  945. printk(KERN_INFO "hantroenc: unable to get major <%d>\n",
  946. hantroenc_major);
  947. goto err1;
  948. }
  949. else if(result != 0) /* this is for dynamic major */
  950. {
  951. hantroenc_major = result;
  952. }
  953. result = ReserveIO();
  954. if(result < 0)
  955. {
  956. goto err;
  957. }
  958. //ResetAsic(hantroenc_data); /* reset hardware */
  959. sema_init(&enc_core_sem, 1);
  960. #ifdef HANTROMMU_SUPPORT
  961. /* MMU only initial once No matter how many MMU we have */
  962. for(i = 0; i < total_subsys_num; i++)
  963. {
  964. if(!(hantroenc_data[i].subsys_data.core_info.type_info & (1<<CORE_MMU)))
  965. continue;
  966. result = MMUInit(hantroenc_data[i].hwregs + hantroenc_data[i].subsys_data.core_info.offset[CORE_MMU]);
  967. if(result == MMU_STATUS_NOT_FOUND)
  968. printk(KERN_INFO "MMU does not exist!\n");
  969. else if(result != MMU_STATUS_OK)
  970. {
  971. ReleaseIO();
  972. goto err;
  973. }
  974. break;
  975. }
  976. #endif
  977. /* get the IRQ line */
  978. for (i=0;i<total_subsys_num;i++)
  979. {
  980. if (hantroenc_data[i].is_valid==0)
  981. continue;
  982. for (j = 0; j < CORE_MAX; j++)
  983. {
  984. if(hantroenc_data[i].subsys_data.core_info.irq[j]!= -1)
  985. {
  986. result = request_irq(hantroenc_data[i].subsys_data.core_info.irq[j], hantroenc_isr,
  987. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
  988. SA_INTERRUPT | SA_SHIRQ,
  989. #else
  990. IRQF_SHARED,
  991. #endif
  992. "vc8000", (void *) &hantroenc_data[i]);
  993. if(result == -EINVAL)
  994. {
  995. printk(KERN_ERR "hantroenc: Bad irq number or handler\n");
  996. ReleaseIO();
  997. goto err;
  998. }
  999. else if(result == -EBUSY)
  1000. {
  1001. printk(KERN_ERR "hantroenc: IRQ <%d> busy, change your config\n",
  1002. hantroenc_data[i].subsys_data.core_info.irq[j]);
  1003. ReleaseIO();
  1004. goto err;
  1005. }
  1006. }
  1007. else
  1008. {
  1009. printk(KERN_INFO "hantroenc: IRQ not in use!\n");
  1010. }
  1011. }
  1012. }
  1013. #ifdef MULTI_THR_TEST
  1014. init_reserve_wait(total_subsys_num);
  1015. #endif
  1016. printk(KERN_INFO "hantroenc: module inserted. Major <%d>\n", hantroenc_major);
  1017. return 0;
  1018. err:
  1019. unregister_chrdev(hantroenc_major, "vc8000");
  1020. err1:
  1021. if (hantroenc_data != NULL)
  1022. vfree(hantroenc_data);
  1023. printk(KERN_INFO "hantroenc: module not inserted\n");
  1024. return result;
  1025. }
  1026. static int encoder_hantrodec_remove(struct platform_device *pdev)
  1027. {
  1028. int i=0, j = 0;
  1029. #ifdef HANTROMMU_SUPPORT
  1030. volatile u8* mmu_hwregs[MAX_SUBSYS_NUM][2];
  1031. #endif
  1032. for(i=0;i<total_subsys_num;i++)
  1033. {
  1034. if (hantroenc_data[i].is_valid==0)
  1035. continue;
  1036. //writel(0, hantroenc_data[i].hwregs + 0x14); /* disable HW */
  1037. //writel(0, hantroenc_data[i].hwregs + 0x04); /* clear enc IRQ */
  1038. /* free the core IRQ */
  1039. for (j = 0; j < total_core_num; j++)
  1040. {
  1041. if(hantroenc_data[i].subsys_data.core_info.irq[j] != -1)
  1042. {
  1043. free_irq(hantroenc_data[i].subsys_data.core_info.irq[j], (void *)&hantroenc_data[i]);
  1044. }
  1045. }
  1046. }
  1047. #ifdef HANTROMMU_SUPPORT
  1048. memset (mmu_hwregs, 0, MAX_SUBSYS_NUM*2);
  1049. for (i = 0; i < total_subsys_num; i++ ) {
  1050. if(hantroenc_data[i].subsys_data.core_info.type_info & (1<<CORE_MMU))
  1051. mmu_hwregs[i][0] = hantroenc_data[i].hwregs + hantroenc_data[i].subsys_data.core_info.offset[CORE_MMU];
  1052. if(hantroenc_data[i].subsys_data.core_info.type_info & (1<<CORE_MMU_1))
  1053. mmu_hwregs[i][1] = hantroenc_data[i].hwregs + hantroenc_data[i].subsys_data.core_info.offset[CORE_MMU_1];
  1054. }
  1055. MMUCleanup(mmu_hwregs);
  1056. #endif
  1057. ReleaseIO();
  1058. vfree(hantroenc_data);
  1059. unregister_chrdev(hantroenc_major, "vc8000");
  1060. printk(KERN_INFO "hantroenc: module removed\n");
  1061. return 0;
  1062. }
  1063. static struct platform_driver encoder_hantrodec_driver = {
  1064. .probe = encoder_hantrodec_probe,
  1065. .remove = encoder_hantrodec_remove,
  1066. .driver = {
  1067. .name = "encoder_hantrodec",
  1068. .owner = THIS_MODULE,
  1069. .of_match_table = of_match_ptr(isp_of_match),
  1070. }
  1071. };
  1072. int __init hantroenc_normal_init(void)
  1073. {
  1074. int ret = 0;
  1075. printk("enter %s\n",__func__);
  1076. ret = platform_driver_register(&encoder_hantrodec_driver);
  1077. if(ret)
  1078. {
  1079. pr_err("register platform driver failed!\n");
  1080. }
  1081. return ret;
  1082. }
  1083. void __exit hantroenc_normal_cleanup(void)
  1084. {
  1085. printk("enter %s\n",__func__);
  1086. platform_driver_unregister(&encoder_hantrodec_driver);
  1087. return;
  1088. }
  1089. static int ReserveIO(void)
  1090. {
  1091. u32 hwid;
  1092. int i;
  1093. u32 found_hw = 0, hw_cfg;
  1094. u32 VC8000E_core_idx;
  1095. for (i=0;i<total_subsys_num;i++)
  1096. {
  1097. if(!request_mem_region
  1098. (hantroenc_data[i].subsys_data.cfg.base_addr, hantroenc_data[i].subsys_data.cfg.iosize, "vc8000"))
  1099. {
  1100. printk(KERN_INFO "hantroenc: failed to reserve HW regs\n");
  1101. continue;
  1102. }
  1103. #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0))
  1104. hantroenc_data[i].hwregs =
  1105. (volatile u8 *) ioremap_nocache(hantroenc_data[i].subsys_data.cfg.base_addr,
  1106. hantroenc_data[i].subsys_data.cfg.iosize);
  1107. #else
  1108. hantroenc_data[i].hwregs =
  1109. (volatile u8 *) ioremap(hantroenc_data[i].subsys_data.cfg.base_addr,
  1110. hantroenc_data[i].subsys_data.cfg.iosize);
  1111. #endif
  1112. if(hantroenc_data[i].hwregs == NULL)
  1113. {
  1114. printk(KERN_INFO "hantroenc: failed to ioremap HW regs\n");
  1115. ReleaseIO();
  1116. continue;
  1117. }
  1118. /*read hwid and check validness and store it*/
  1119. VC8000E_core_idx = GET_ENCODER_IDX(hantroenc_data[0].subsys_data.core_info.type_info);
  1120. if(!(hantroenc_data[i].subsys_data.core_info.type_info & (1<<CORE_VC8000E)))
  1121. VC8000E_core_idx = CORE_CUTREE;
  1122. hwid = (u32)ioread32((void *)hantroenc_data[i].hwregs + hantroenc_data[i].subsys_data.core_info.offset[VC8000E_core_idx]);
  1123. printk(KERN_INFO"hwid=0x%08x\n", hwid);
  1124. /* check for encoder HW ID */
  1125. if( ((((hwid >> 16) & 0xFFFF) != ((ENC_HW_ID1 >> 16) & 0xFFFF))) &&
  1126. ((((hwid >> 16) & 0xFFFF) != ((ENC_HW_ID2 >> 16) & 0xFFFF))))
  1127. {
  1128. printk(KERN_INFO "hantroenc: HW not found at %p\n",
  1129. (void *)hantroenc_data[i].subsys_data.cfg.base_addr);
  1130. #ifdef hantroenc_DEBUG
  1131. dump_regs((unsigned long) &hantroenc_data);
  1132. #endif
  1133. hantroenc_data[i].is_valid = 0;
  1134. ReleaseIO();
  1135. continue;
  1136. }
  1137. hantroenc_data[i].hw_id = hwid;
  1138. hantroenc_data[i].is_valid = 1;
  1139. found_hw = 1;
  1140. hw_cfg = (u32)ioread32((void *)hantroenc_data[i].hwregs + hantroenc_data[i].subsys_data.core_info.offset[VC8000E_core_idx] + 320);
  1141. hantroenc_data[i].subsys_data.core_info.type_info &= 0xFFFFFFFC;
  1142. if(hw_cfg & 0x88000000)
  1143. hantroenc_data[i].subsys_data.core_info.type_info |= (1<<CORE_VC8000E);
  1144. if(hw_cfg & 0x00008000)
  1145. hantroenc_data[i].subsys_data.core_info.type_info |= (1<<CORE_VC8000EJ);
  1146. printk(KERN_INFO
  1147. "hantroenc: HW at base <%p> with ID <0x%08x>\n",
  1148. (void *)hantroenc_data[i].subsys_data.cfg.base_addr, hwid);
  1149. }
  1150. if (found_hw == 0)
  1151. {
  1152. printk(KERN_ERR "hantroenc: NO ANY HW found!!\n");
  1153. return -1;
  1154. }
  1155. return 0;
  1156. }
  1157. static void ReleaseIO(void)
  1158. {
  1159. u32 i;
  1160. for (i=0;i<=total_subsys_num;i++)
  1161. {
  1162. if (hantroenc_data[i].is_valid == 0)
  1163. continue;
  1164. if(hantroenc_data[i].hwregs)
  1165. iounmap((void *) hantroenc_data[i].hwregs);
  1166. release_mem_region(hantroenc_data[i].subsys_data.cfg.base_addr, hantroenc_data[i].subsys_data.cfg.iosize);
  1167. }
  1168. }
  1169. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
  1170. irqreturn_t hantroenc_isr(int irq, void *dev_id, struct pt_regs *regs)
  1171. #else
  1172. irqreturn_t hantroenc_isr(int irq, void *dev_id)
  1173. #endif
  1174. {
  1175. unsigned int handled = 0;
  1176. hantroenc_t *dev = (hantroenc_t *) dev_id;
  1177. u32 irq_status;
  1178. unsigned long flags;
  1179. u32 core_type = 0, i = 0;
  1180. unsigned long reg_offset = 0;
  1181. u32 hwId, majorId, wClr;
  1182. /*get core id by irq from subsys config*/
  1183. for (i = 0; i < CORE_MAX; i++)
  1184. {
  1185. if (dev->subsys_data.core_info.irq[i] == irq)
  1186. {
  1187. core_type = i;
  1188. reg_offset = dev->subsys_data.core_info.offset[i];
  1189. break;
  1190. }
  1191. }
  1192. /*If core is not reserved by any user, but irq is received, just clean it*/
  1193. spin_lock_irqsave(&owner_lock, flags);
  1194. if (!dev->is_reserved[core_type])
  1195. {
  1196. printk(KERN_DEBUG "hantroenc_isr:received IRQ but core is not reserved!\n");
  1197. irq_status = (u32)ioread32((void *)(dev->hwregs + reg_offset + 0x04));
  1198. if(irq_status & 0x01)
  1199. {
  1200. /* Disable HW when buffer over-flow happen
  1201. * HW behavior changed in over-flow
  1202. * in-pass, HW cleanup HWIF_ENC_E auto
  1203. * new version: ask SW cleanup HWIF_ENC_E when buffer over-flow
  1204. */
  1205. if(irq_status & 0x20)
  1206. iowrite32(0, (void *)(dev->hwregs + reg_offset + 0x14));
  1207. /* clear all IRQ bits. (hwId >= 0x80006100) means IRQ is cleared by writting 1 */
  1208. hwId = ioread32((void *)dev->hwregs + reg_offset);
  1209. majorId = (hwId & 0x0000FF00) >> 8;
  1210. wClr = (majorId >= 0x61) ? irq_status: (irq_status & (~0x1FD));
  1211. iowrite32(wClr, (void *)(dev->hwregs + reg_offset + 0x04));
  1212. }
  1213. spin_unlock_irqrestore(&owner_lock, flags);
  1214. return IRQ_HANDLED;
  1215. }
  1216. spin_unlock_irqrestore(&owner_lock, flags);
  1217. printk(KERN_DEBUG "hantroenc_isr:received IRQ!\n");
  1218. irq_status = (u32)ioread32((void *)(dev->hwregs + reg_offset + 0x04));
  1219. printk(KERN_DEBUG "irq_status of subsys %d core %d is:%x\n",dev->subsys_id,core_type,irq_status);
  1220. if(irq_status & 0x01)
  1221. {
  1222. /* Disable HW when buffer over-flow happen
  1223. * HW behavior changed in over-flow
  1224. * in-pass, HW cleanup HWIF_ENC_E auto
  1225. * new version: ask SW cleanup HWIF_ENC_E when buffer over-flow
  1226. */
  1227. if(irq_status & 0x20)
  1228. iowrite32(0, (void *)(dev->hwregs + reg_offset + 0x14));
  1229. /* clear all IRQ bits. (hwId >= 0x80006100) means IRQ is cleared by writting 1 */
  1230. hwId = ioread32((void *)dev->hwregs + reg_offset);
  1231. majorId = (hwId & 0x0000FF00) >> 8;
  1232. wClr = (majorId >= 0x61) ? irq_status: (irq_status & (~0x1FD));
  1233. iowrite32(wClr, (void *)(dev->hwregs + reg_offset + 0x04));
  1234. spin_lock_irqsave(&owner_lock, flags);
  1235. dev->irq_received[core_type] = 1;
  1236. dev->irq_status[core_type] = irq_status & (~0x01);
  1237. spin_unlock_irqrestore(&owner_lock, flags);
  1238. wake_up_interruptible_all(&enc_wait_queue);
  1239. handled++;
  1240. }
  1241. if(!handled)
  1242. {
  1243. PDEBUG("IRQ received, but not hantro's!\n");
  1244. }
  1245. return IRQ_HANDLED;
  1246. }
  1247. #ifdef hantroenc_DEBUG
  1248. static void ResetAsic(hantroenc_t * dev)
  1249. {
  1250. int i,n;
  1251. for (n=0;n<total_subsys_num;n++)
  1252. {
  1253. if (dev[n].is_valid==0)
  1254. continue;
  1255. iowrite32(0, (void *)(dev[n].hwregs + 0x14));
  1256. for(i = 4; i < dev[n].subsys_data.cfg.iosize; i += 4)
  1257. {
  1258. iowrite32(0, (void *)(dev[n].hwregs + i));
  1259. }
  1260. }
  1261. }
  1262. static void dump_regs(unsigned long data)
  1263. {
  1264. hantroenc_t *dev = (hantroenc_t *) data;
  1265. int i;
  1266. PDEBUG("Reg Dump Start\n");
  1267. for(i = 0; i < dev->iosize; i += 4)
  1268. {
  1269. PDEBUG("\toffset %02X = %08X\n", i, ioread32(dev->hwregs + i));
  1270. }
  1271. PDEBUG("Reg Dump End\n");
  1272. }
  1273. #endif