hantro_dec.c 92 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973
  1. /****************************************************************************
  2. *
  3. * The MIT License (MIT)
  4. *
  5. * Copyright (c) 2014 - 2021 VERISILICON
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  22. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  23. * DEALINGS IN THE SOFTWARE.
  24. *
  25. *****************************************************************************
  26. *
  27. * The GPL License (GPL)
  28. *
  29. * Copyright (C) 2014 - 2021 VERISILICON
  30. *
  31. * This program is free software; you can redistribute it and/or
  32. * modify it under the terms of the GNU General Public License
  33. * as published by the Free Software Foundation; either version 2
  34. * of the License, or (at your option) any later version.
  35. *
  36. * This program is distributed in the hope that it will be useful,
  37. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  38. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  39. * GNU General Public License for more details.
  40. *
  41. * You should have received a copy of the GNU General Public License
  42. * along with this program; if not, write to the Free Software Foundation,
  43. * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  44. *
  45. *****************************************************************************
  46. *
  47. * Note: This software is released under dual MIT and GPL licenses. A
  48. * recipient may use this file under the terms of either the MIT license or
  49. * GPL License. If you wish to use only one license not the other, you can
  50. * indicate your decision by deleting one of the above license notices in your
  51. * version of this file.
  52. *
  53. *****************************************************************************/
  54. #include "hantrodec.h"
  55. #include "dwl_defs.h"
  56. #include <asm/io.h>
  57. #include <linux/uaccess.h>
  58. #include <linux/errno.h>
  59. #include <linux/fs.h>
  60. #include <linux/init.h>
  61. #include <linux/interrupt.h>
  62. #include <linux/ioport.h>
  63. #include <linux/kernel.h>
  64. #include <linux/mm.h>
  65. #include <linux/module.h>
  66. #include <linux/moduleparam.h>
  67. #include <linux/pci.h>
  68. #include <linux/sched.h>
  69. #include <linux/semaphore.h>
  70. #include <linux/spinlock.h>
  71. #include <linux/slab.h>
  72. #include <linux/version.h>
  73. #include <linux/wait.h>
  74. #include <linux/timer.h>
  75. #include <linux/clk.h>
  76. #include <linux/of.h>
  77. #include <linux/of_address.h>
  78. #include <linux/of_irq.h>
  79. #include <linux/types.h>
  80. #include <linux/bitops.h>
  81. #include <linux/mod_devicetable.h>
  82. #include "subsys.h"
  83. #include "hantroaxife.h"
  84. #include <asm/irq.h>
  85. #include <linux/platform_device.h>
  86. #include <linux/device.h>
  87. #include <linux/cdev.h>
  88. #include <linux/pm_runtime.h>
  89. #include <linux/debugfs.h>
  90. #include "kernel_allocator.h"
  91. #undef PDEBUG
  92. #ifdef HANTRODEC_DEBUG
  93. # ifdef __KERNEL__
  94. # define PDEBUG(fmt, args...) printk( KERN_INFO "hantrodec: " fmt, ## args)
  95. # else
  96. # define PDEBUG(fmt, args...) fprintf(stderr, fmt, ## args)
  97. # endif
  98. #else
  99. # define PDEBUG(fmt, args...)
  100. #endif
  101. #define PCI_VENDOR_ID_HANTRO 0x10ee// 0x1ae0//0x16c3
  102. #define PCI_DEVICE_ID_HANTRO_PCI 0x8014//0x001a// 0xabcd
  103. /* Base address DDR register */
  104. #define PCI_DDR_BAR 0
  105. /* Base address got control register */
  106. #define PCI_CONTROL_BAR 4
  107. /* PCIe hantro driver offset in control register */
  108. #define HANTRO_REG_OFFSET0 0x600000
  109. #define HANTRO_REG_OFFSET1 0x700000
  110. /* TODO(mheikkinen) Implement multicore support. */
  111. struct pci_dev *gDev = NULL; /* PCI device structure. */
  112. unsigned long gBaseHdwr; /* PCI base register address (Hardware address) */
  113. unsigned long gBaseDDRHw; /* PCI base register address (memalloc) */
  114. u32 gBaseLen; /* Base register address Length */
  115. /* hantro G1 regs config including dec and pp */
  116. //#define HANTRO_DEC_ORG_REGS 60
  117. //#define HANTRO_PP_ORG_REGS 41
  118. #define HANTRO_DEC_EXT_REGS 27
  119. #define HANTRO_PP_EXT_REGS 9
  120. //#define HANTRO_G1_DEC_TOTAL_REGS (HANTRO_DEC_ORG_REGS + HANTRO_DEC_EXT_REGS)
  121. #define HANTRO_PP_TOTAL_REGS (HANTRO_PP_ORG_REGS + HANTRO_PP_EXT_REGS)
  122. #define HANTRO_G1_DEC_REGS 155 /*G1 total regs*/
  123. //#define HANTRO_DEC_ORG_FIRST_REG 0
  124. //#define HANTRO_DEC_ORG_LAST_REG 59
  125. //#define HANTRO_DEC_EXT_FIRST_REG 119
  126. //#define HANTRO_DEC_EXT_LAST_REG 145
  127. #define HANTRO_PP_ORG_FIRST_REG 60
  128. #define HANTRO_PP_ORG_LAST_REG 100
  129. #define HANTRO_PP_EXT_FIRST_REG 146
  130. #define HANTRO_PP_EXT_LAST_REG 154
  131. /* hantro G2 reg config */
  132. #define HANTRO_G2_DEC_REGS 337 /*G2 total regs*/
  133. #define HANTRO_G2_DEC_FIRST_REG 0
  134. #define HANTRO_G2_DEC_LAST_REG HANTRO_G2_DEC_REGS-1
  135. /* hantro VC8000D reg config */
  136. #define HANTRO_VC8000D_REGS 503 /*VC8000D total regs*/
  137. #define HANTRO_VC8000D_FIRST_REG 0
  138. #define HANTRO_VC8000D_LAST_REG HANTRO_VC8000D_REGS-1
  139. #define HANTRODEC_HWBUILD_ID_OFF (309 * 4)
  140. /* Logic module IRQs */
  141. #define HXDEC_NO_IRQ -1
  142. #define MAX(a, b) (((a) > (b)) ? (a) : (b))
  143. #define DEC_IO_SIZE_MAX (MAX(MAX(HANTRO_G2_DEC_REGS, HANTRO_G1_DEC_REGS), HANTRO_VC8000D_REGS) * 4)
  144. /* User should modify these configuration if do porting to own platform. */
  145. /* Please guarantee the base_addr, io_size, dec_irq belong to same core. */
  146. /* Defines use kernel clk cfg or not**/
  147. //#define CLK_CFG
  148. #ifdef CLK_CFG
  149. #define CLK_ID "hantrodec_clk" /*this id should conform with platform define*/
  150. #endif
  151. /* Logic module base address */
  152. #define SOCLE_LOGIC_0_BASE 0x38300000
  153. #define SOCLE_LOGIC_1_BASE 0x38310000
  154. #define VEXPRESS_LOGIC_0_BASE 0xFC010000
  155. #define VEXPRESS_LOGIC_1_BASE 0xFC020000
  156. #define DEC_IO_SIZE_0 DEC_IO_SIZE_MAX /* bytes */
  157. #define DEC_IO_SIZE_1 DEC_IO_SIZE_MAX /* bytes */
  158. #define DEC_IRQ_0 HXDEC_NO_IRQ
  159. #define DEC_IRQ_1 HXDEC_NO_IRQ
  160. #define IS_G1(hw_id) (((hw_id) == 0x6731)? 1 : 0)
  161. #define IS_G2(hw_id) (((hw_id) == 0x6732)? 1 : 0)
  162. #define IS_VC8000D(hw_id) (((hw_id) == 0x8001)? 1 : 0)
  163. #define IS_BIGOCEAN(hw_id) (((hw_id) == 0xB16D)? 1 : 0)
  164. /* Some IPs HW configuration paramters for APB Filter */
  165. /* Because now such information can't be read from APB filter configuration registers */
  166. /* The fixed value have to be used */
  167. #define VC8000D_NUM_MASK_REG 336
  168. #define VC8000D_NUM_MODE 4
  169. #define VC8000D_MASK_REG_OFFSET 4096
  170. #define VC8000D_MASK_BITS_PER_REG 1
  171. #define VC8000DJ_NUM_MASK_REG 332
  172. #define VC8000DJ_NUM_MODE 1
  173. #define VC8000DJ_MASK_REG_OFFSET 4096
  174. #define VC8000DJ_MASK_BITS_PER_REG 1
  175. #define AV1_NUM_MASK_REG 303
  176. #define AV1_NUM_MODE 1
  177. #define AV1_MASK_REG_OFFSET 4096
  178. #define AV1_MASK_BITS_PER_REG 1
  179. #define AXIFE_NUM_MASK_REG 144
  180. #define AXIFE_NUM_MODE 1
  181. #define AXIFE_MASK_REG_OFFSET 4096
  182. #define AXIFE_MASK_BITS_PER_REG 1
  183. #define VC8000D_MAX_CONFIG_LEN 32
  184. #define VC8000D_PM_TIMEOUT 100 /* ms */
  185. /*************************************************************/
  186. /*********************local variable declaration*****************/
  187. static const int DecHwId[] = {
  188. 0x6731, /* G1 */
  189. 0x6732, /* G2 */
  190. 0xB16D, /* BigOcean */
  191. 0x8001 /* VC8000D */
  192. };
  193. unsigned long base_port = -1;
  194. unsigned int pcie = 0;
  195. volatile unsigned char *reg = NULL;
  196. unsigned int reg_access_opt = 0;
  197. unsigned int vcmd = 0;
  198. unsigned long alloc_size = 512;
  199. unsigned long alloc_base = 0x1c0000000;
  200. unsigned long multicorebase[HXDEC_MAX_CORES] = {
  201. HANTRO_REG_OFFSET0,
  202. HANTRO_REG_OFFSET1,
  203. 0,
  204. 0
  205. };
  206. int irq[HXDEC_MAX_CORES] = {
  207. 131,
  208. DEC_IRQ_1,
  209. -1,
  210. -1
  211. };
  212. unsigned int iosize[HXDEC_MAX_CORES] = {
  213. DEC_IO_SIZE_0,
  214. DEC_IO_SIZE_1,
  215. -1,
  216. -1
  217. };
  218. /* Because one core may contain multi-pipeline, so multicore base may be changed */
  219. unsigned long multicorebase_actual[HXDEC_MAX_CORES];
  220. struct subsys_config vpu_subsys[MAX_SUBSYS_NUM];
  221. struct apbfilter_cfg apbfilter_cfg[MAX_SUBSYS_NUM][HW_CORE_MAX];
  222. struct axife_cfg axife_cfg[MAX_SUBSYS_NUM];
  223. int elements = 2;
  224. #ifdef CLK_CFG
  225. struct clk *clk_cfg;
  226. int is_clk_on;
  227. struct timer_list timer;
  228. #endif
  229. /* module_param(name, type, perm) */
  230. //module_param(base_port, ulong, 0);
  231. module_param(pcie, uint, 0);
  232. //module_param_array(irq, int, &elements, 0);
  233. module_param_array(multicorebase, ulong, &elements, 0644);
  234. module_param(reg_access_opt, uint, 0);
  235. module_param(vcmd, uint, 0);
  236. module_param(alloc_base, ulong, 0);
  237. module_param(alloc_size, ulong, 0);
  238. static int hantrodec_major = 0; /* dynamic allocation */
  239. static int hantrodec_minor = 0; /* dynamic allocation */
  240. static struct cdev hantrodec_cdev;
  241. static dev_t hantrodec_devt;
  242. static struct class *hantrodec_class;
  243. static struct dentry *root_debugfs_dir = NULL;
  244. /* here's all the must remember stuff */
  245. typedef struct {
  246. char *buffer;
  247. volatile unsigned int iosize[HXDEC_MAX_CORES];
  248. /* mapped address to different HW cores regs*/
  249. volatile u8 *hwregs[HXDEC_MAX_CORES][HW_CORE_MAX];
  250. /* mapped address to different HW cores regs*/
  251. volatile u8 *apbfilter_hwregs[HXDEC_MAX_CORES][HW_CORE_MAX];
  252. volatile int irq[HXDEC_MAX_CORES];
  253. int hw_id[HXDEC_MAX_CORES][HW_CORE_MAX];
  254. /* Requested client type for given core, used when a subsys has multiple
  255. decoders, e.g., VC8000D+VC8000DJ+BigOcean */
  256. int client_type[HXDEC_MAX_CORES];
  257. int cores;
  258. struct fasync_struct *async_queue_dec;
  259. struct fasync_struct *async_queue_pp;
  260. struct platform_device *pdev;
  261. struct clk *cclk;
  262. struct clk *aclk;
  263. struct clk *pclk;
  264. char config_buf[VC8000D_MAX_CONFIG_LEN];
  265. int has_power_domains;
  266. } hantrodec_t;
  267. typedef struct {
  268. u32 cfg[HXDEC_MAX_CORES]; /* indicate the supported format */
  269. u32 cfg_backup[HXDEC_MAX_CORES]; /* back up of cfg */
  270. int its_main_core_id[HXDEC_MAX_CORES]; /* indicate if main core exist */
  271. int its_aux_core_id[HXDEC_MAX_CORES]; /* indicate if aux core exist */
  272. } core_cfg;
  273. static hantrodec_t hantrodec_data; /* dynamic allocation? */
  274. static int ReserveIO(void);
  275. static void ReleaseIO(void);
  276. static void ResetAsic(hantrodec_t * dev);
  277. #ifdef HANTRODEC_DEBUG
  278. static void dump_regs(hantrodec_t *dev);
  279. #endif
  280. /* IRQ handler */
  281. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
  282. static irqreturn_t hantrodec_isr(int irq, void *dev_id, struct pt_regs *regs);
  283. #else
  284. static irqreturn_t hantrodec_isr(int irq, void *dev_id);
  285. #endif
  286. static u32 dec_regs[HXDEC_MAX_CORES][DEC_IO_SIZE_MAX/4];
  287. static u32 apbfilter_regs[HXDEC_MAX_CORES][DEC_IO_SIZE_MAX/4+1];
  288. /* shadow_regs used to compare whether it's necessary to write to registers */
  289. static u32 shadow_dec_regs[HXDEC_MAX_CORES][DEC_IO_SIZE_MAX/4];
  290. struct semaphore dec_core_sem;
  291. struct semaphore pp_core_sem;
  292. static int dec_irq = 0;
  293. static int pp_irq = 0;
  294. atomic_t irq_rx = ATOMIC_INIT(0);
  295. atomic_t irq_tx = ATOMIC_INIT(0);
  296. static struct file* dec_owner[HXDEC_MAX_CORES];
  297. static struct file* pp_owner[HXDEC_MAX_CORES];
  298. static int CoreHasFormat(const u32 *cfg, int core, u32 format);
  299. /* spinlock_t owner_lock = SPIN_LOCK_UNLOCKED; */
  300. DEFINE_SPINLOCK(owner_lock);
  301. DECLARE_WAIT_QUEUE_HEAD(dec_wait_queue);
  302. DECLARE_WAIT_QUEUE_HEAD(pp_wait_queue);
  303. DECLARE_WAIT_QUEUE_HEAD(hw_queue);
  304. #ifdef CLK_CFG
  305. DEFINE_SPINLOCK(clk_lock);
  306. #endif
  307. #define DWL_CLIENT_TYPE_H264_DEC 1U
  308. #define DWL_CLIENT_TYPE_MPEG4_DEC 2U
  309. #define DWL_CLIENT_TYPE_JPEG_DEC 3U
  310. #define DWL_CLIENT_TYPE_PP 4U
  311. #define DWL_CLIENT_TYPE_VC1_DEC 5U
  312. #define DWL_CLIENT_TYPE_MPEG2_DEC 6U
  313. #define DWL_CLIENT_TYPE_VP6_DEC 7U
  314. #define DWL_CLIENT_TYPE_AVS_DEC 8U
  315. #define DWL_CLIENT_TYPE_RV_DEC 9U
  316. #define DWL_CLIENT_TYPE_VP8_DEC 10U
  317. #define DWL_CLIENT_TYPE_VP9_DEC 11U
  318. #define DWL_CLIENT_TYPE_HEVC_DEC 12U
  319. #define DWL_CLIENT_TYPE_ST_PP 14U
  320. #define DWL_CLIENT_TYPE_H264_MAIN10 15U
  321. #define DWL_CLIENT_TYPE_AVS2_DEC 16U
  322. #define DWL_CLIENT_TYPE_AV1_DEC 17U
  323. #define DWL_CLIENT_TYPE_BO_AV1_DEC 31U
  324. #define BIGOCEANDEC_CFG 1
  325. #define BIGOCEANDEC_AV1_E 5
  326. static core_cfg config;
  327. static void ReadCoreConfig(hantrodec_t *dev) {
  328. int c, j;
  329. u32 reg, tmp, mask;
  330. memset(config.cfg, 0, sizeof(config.cfg));
  331. for(c = 0; c < dev->cores; c++) {
  332. for (j = 0; j < HW_CORE_MAX; j++) {
  333. if (j != HW_VC8000D && j != HW_VC8000DJ && j != HW_BIGOCEAN)
  334. continue;
  335. if (!dev->hwregs[c][j]) /* NOT defined core type */
  336. continue;
  337. /* Decoder configuration */
  338. if (IS_G1(dev->hw_id[c][j])) {
  339. reg = ioread32((void*)(dev->hwregs[c][j] + HANTRODEC_SYNTH_CFG * 4));
  340. tmp = (reg >> DWL_H264_E) & 0x3U;
  341. if(tmp) pr_info("hantrodec: subsys[%d] has H264\n", c);
  342. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_H264_DEC : 0;
  343. tmp = (reg >> DWL_JPEG_E) & 0x01U;
  344. if(tmp) pr_info("hantrodec: subsys[%d] has JPEG\n", c);
  345. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_JPEG_DEC : 0;
  346. tmp = (reg >> DWL_HJPEG_E) & 0x01U;
  347. if(tmp) pr_info("hantrodec: subsys[%d] has HJPEG\n", c);
  348. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_JPEG_DEC : 0;
  349. tmp = (reg >> DWL_MPEG4_E) & 0x3U;
  350. if(tmp) pr_info("hantrodec: subsys[%d] has MPEG4\n", c);
  351. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_MPEG4_DEC : 0;
  352. tmp = (reg >> DWL_VC1_E) & 0x3U;
  353. if(tmp) pr_info("hantrodec: subsys[%d] has VC1\n", c);
  354. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_VC1_DEC: 0;
  355. tmp = (reg >> DWL_MPEG2_E) & 0x01U;
  356. if(tmp) pr_info("hantrodec: subsys[%d] has MPEG2\n", c);
  357. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_MPEG2_DEC : 0;
  358. tmp = (reg >> DWL_VP6_E) & 0x01U;
  359. if(tmp) pr_info("hantrodec: subsys[%d] has VP6\n", c);
  360. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_VP6_DEC : 0;
  361. reg = ioread32((void*)(dev->hwregs[c][j] + HANTRODEC_SYNTH_CFG_2 * 4));
  362. /* VP7 and WEBP is part of VP8 */
  363. mask = (1 << DWL_VP8_E) | (1 << DWL_VP7_E) | (1 << DWL_WEBP_E);
  364. tmp = (reg & mask);
  365. if(tmp & (1 << DWL_VP8_E))
  366. pr_info("hantrodec: subsys[%d] has VP8\n", c);
  367. if(tmp & (1 << DWL_VP7_E))
  368. pr_info("hantrodec: subsys[%d] has VP7\n", c);
  369. if(tmp & (1 << DWL_WEBP_E))
  370. pr_info("hantrodec: subsys[%d] has WebP\n", c);
  371. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_VP8_DEC : 0;
  372. tmp = (reg >> DWL_AVS_E) & 0x01U;
  373. if(tmp) pr_info("hantrodec: subsys[%d] has AVS\n", c);
  374. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_AVS_DEC: 0;
  375. tmp = (reg >> DWL_RV_E) & 0x03U;
  376. if(tmp) pr_info("hantrodec: subsys[%d] has RV\n", c);
  377. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_RV_DEC : 0;
  378. /* Post-processor configuration */
  379. reg = ioread32((void*)(dev->hwregs[c][j] + HANTROPP_SYNTH_CFG * 4));
  380. tmp = (reg >> DWL_G1_PP_E) & 0x01U;
  381. if(tmp) pr_info("hantrodec: subsys[%d] has PP\n", c);
  382. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_PP : 0;
  383. } else if((IS_G2(dev->hw_id[c][j]))) {
  384. reg = ioread32((void*)(dev->hwregs[c][j] + HANTRODEC_CFG_STAT * 4));
  385. tmp = (reg >> DWL_G2_HEVC_E) & 0x01U;
  386. if(tmp) pr_info("hantrodec: subsys[%d] has HEVC\n", c);
  387. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_HEVC_DEC : 0;
  388. tmp = (reg >> DWL_G2_VP9_E) & 0x01U;
  389. if(tmp) pr_info("hantrodec: subsys[%d] has VP9\n", c);
  390. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_VP9_DEC : 0;
  391. /* Post-processor configuration */
  392. reg = ioread32((void*)(dev->hwregs[c][j] + HANTRODECPP_SYNTH_CFG * 4));
  393. tmp = (reg >> DWL_G2_PP_E) & 0x01U;
  394. if(tmp) pr_info("hantrodec: subsys[%d] has PP\n", c);
  395. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_PP : 0;
  396. } else if((IS_VC8000D(dev->hw_id[c][j])) && config.its_main_core_id[c] < 0) {
  397. reg = ioread32((void*)(dev->hwregs[c][j] + HANTRODEC_SYNTH_CFG * 4));
  398. pr_info("hantrodec: subsys[%d] swreg[%d] = 0x%08x\n", c, HANTRODEC_SYNTH_CFG, reg);
  399. tmp = (reg >> DWL_H264_E) & 0x3U;
  400. if(tmp) pr_info("hantrodec: subsys[%d] has H264\n", c);
  401. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_H264_DEC : 0;
  402. tmp = (reg >> DWL_H264HIGH10_E) & 0x01U;
  403. if(tmp) pr_info("hantrodec: subsys[%d] has H264HIGH10\n", c);
  404. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_H264_DEC : 0;
  405. tmp = (reg >> DWL_AVS2_E) & 0x03U;
  406. if(tmp) pr_info("hantrodec: subsys[%d] has AVS2\n", c);
  407. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_AVS2_DEC : 0;
  408. tmp = (reg >> DWL_JPEG_E) & 0x01U;
  409. if(tmp) pr_info("hantrodec: subsys[%d] has JPEG\n", c);
  410. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_JPEG_DEC : 0;
  411. tmp = (reg >> DWL_HJPEG_E) & 0x01U;
  412. if(tmp) pr_info("hantrodec: subsys[%d] has HJPEG\n", c);
  413. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_JPEG_DEC : 0;
  414. tmp = (reg >> DWL_MPEG4_E) & 0x3U;
  415. if(tmp) pr_info("hantrodec: subsys[%d] has MPEG4\n", c);
  416. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_MPEG4_DEC : 0;
  417. tmp = (reg >> DWL_VC1_E) & 0x3U;
  418. if(tmp) pr_info("hantrodec: subsys[%d] has VC1\n", c);
  419. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_VC1_DEC: 0;
  420. tmp = (reg >> DWL_MPEG2_E) & 0x01U;
  421. if(tmp) pr_info("hantrodec: subsys[%d] has MPEG2\n", c);
  422. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_MPEG2_DEC : 0;
  423. tmp = (reg >> DWL_VP6_E) & 0x01U;
  424. if(tmp) pr_info("hantrodec: subsys[%d] has VP6\n", c);
  425. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_VP6_DEC : 0;
  426. reg = ioread32((void*)(dev->hwregs[c][j] + HANTRODEC_SYNTH_CFG_2 * 4));
  427. pr_info("hantrodec: subsys[%d] swreg[%d] = 0x%08x\n", c, HANTRODEC_SYNTH_CFG_2, reg);
  428. /* VP7 and WEBP is part of VP8 */
  429. mask = (1 << DWL_VP8_E) | (1 << DWL_VP7_E) | (1 << DWL_WEBP_E);
  430. tmp = (reg & mask);
  431. if(tmp & (1 << DWL_VP8_E))
  432. pr_info("hantrodec: subsys[%d] has VP8\n", c);
  433. if(tmp & (1 << DWL_VP7_E))
  434. pr_info("hantrodec: subsys[%d] has VP7\n", c);
  435. if(tmp & (1 << DWL_WEBP_E))
  436. pr_info("hantrodec: subsys[%d] has WebP\n", c);
  437. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_VP8_DEC : 0;
  438. tmp = (reg >> DWL_AVS_E) & 0x01U;
  439. if(tmp) pr_info("hantrodec: subsys[%d] has AVS\n", c);
  440. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_AVS_DEC: 0;
  441. tmp = (reg >> DWL_RV_E) & 0x03U;
  442. if(tmp) pr_info("hantrodec: subsys[%d] has RV\n", c);
  443. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_RV_DEC : 0;
  444. reg = ioread32((void*)(dev->hwregs[c][j] + HANTRODEC_SYNTH_CFG_3 * 4));
  445. pr_info("hantrodec: subsys[%d] swreg[%d] = 0x%08x\n", c, HANTRODEC_SYNTH_CFG_3, reg);
  446. tmp = (reg >> DWL_HEVC_E) & 0x07U;
  447. if(tmp) pr_info("hantrodec: subsys[%d] has HEVC\n", c);
  448. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_HEVC_DEC : 0;
  449. tmp = (reg >> DWL_VP9_E) & 0x07U;
  450. if(tmp) pr_info("hantrodec: subsys[%d] has VP9\n", c);
  451. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_VP9_DEC : 0;
  452. /* Post-processor configuration */
  453. reg = ioread32((void*)(dev->hwregs[c][j] + HANTRODECPP_CFG_STAT * 4));
  454. tmp = (reg >> DWL_PP_E) & 0x01U;
  455. if(tmp) pr_info("hantrodec: subsys[%d] has PP\n", c);
  456. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_PP : 0;
  457. config.cfg[c] |= 1 << DWL_CLIENT_TYPE_ST_PP;
  458. if (config.its_aux_core_id[c] >= 0) {
  459. /* set main_core_id and aux_core_id */
  460. reg = ioread32((void*)(dev->hwregs[c][j] + HANTRODEC_SYNTH_CFG_2 * 4));
  461. tmp = (reg >> DWL_H264_PIPELINE_E) & 0x01U;
  462. if(tmp) pr_info("hantrodec: subsys[%d] has pipeline H264\n", c);
  463. config.cfg[config.its_aux_core_id[c]] |= tmp ? 1 << DWL_CLIENT_TYPE_H264_DEC : 0;
  464. tmp = (reg >> DWL_JPEG_PIPELINE_E) & 0x01U;
  465. if(tmp) pr_info("hantrodec: subsys[%d] has pipeline JPEG\n", c);
  466. config.cfg[config.its_aux_core_id[c]] |= tmp ? 1 << DWL_CLIENT_TYPE_JPEG_DEC : 0;
  467. }
  468. } else if (IS_BIGOCEAN(dev->hw_id[c][j])) {
  469. reg = ioread32((void*)(dev->hwregs[c][j] + BIGOCEANDEC_CFG * 4));
  470. tmp = (reg >> BIGOCEANDEC_AV1_E) & 0x01U;
  471. if(tmp) pr_info("hantrodec: subsys[%d] has AV1 (BigOcean)\n", c);
  472. config.cfg[c] |= tmp ? 1 << DWL_CLIENT_TYPE_BO_AV1_DEC : 0;
  473. }
  474. }
  475. }
  476. memcpy(config.cfg_backup, config.cfg, sizeof(config.cfg));
  477. }
  478. static int CoreHasFormat(const u32 *cfg, int core, u32 format) {
  479. return (cfg[core] & (1 << format)) ? 1 : 0;
  480. }
  481. int GetDecCore(long core, hantrodec_t *dev, struct file* filp, unsigned long format) {
  482. int success = 0;
  483. unsigned long flags;
  484. spin_lock_irqsave(&owner_lock, flags);
  485. if(CoreHasFormat(config.cfg, core, format) && dec_owner[core] == NULL /*&& config.its_main_core_id[core] >= 0*/) {
  486. dec_owner[core] = filp;
  487. success = 1;
  488. /* If one main core takes one format which doesn't supported by aux core, set aux core's cfg to none video format support */
  489. if (config.its_aux_core_id[core] >= 0 &&
  490. !CoreHasFormat(config.cfg, config.its_aux_core_id[core], format)) {
  491. config.cfg[config.its_aux_core_id[core]] = 0;
  492. }
  493. /* If one aux core takes one format, set main core's cfg to aux core supported video format */
  494. else if (config.its_main_core_id[core] >= 0) {
  495. config.cfg[config.its_main_core_id[core]] = config.cfg[core];
  496. }
  497. }
  498. spin_unlock_irqrestore(&owner_lock, flags);
  499. return success;
  500. }
  501. int GetDecCoreAny(long *core, hantrodec_t *dev, struct file* filp,
  502. unsigned long format) {
  503. int success = 0;
  504. long c;
  505. *core = -1;
  506. for(c = 0; c < dev->cores; c++) {
  507. /* a free core that has format */
  508. if(GetDecCore(c, dev, filp, format)) {
  509. success = 1;
  510. *core = c;
  511. break;
  512. }
  513. }
  514. return success;
  515. }
  516. int GetDecCoreID(hantrodec_t *dev, struct file* filp,
  517. unsigned long format) {
  518. long c;
  519. unsigned long flags;
  520. int core_id = -1;
  521. for(c = 0; c < dev->cores; c++) {
  522. /* a core that has format */
  523. spin_lock_irqsave(&owner_lock, flags);
  524. if(CoreHasFormat(config.cfg, c, format)) {
  525. core_id = c;
  526. spin_unlock_irqrestore(&owner_lock, flags);
  527. break;
  528. }
  529. spin_unlock_irqrestore(&owner_lock, flags);
  530. }
  531. return core_id;
  532. }
  533. #if 0
  534. static int hantrodec_choose_core(int is_g1) {
  535. volatile unsigned char *reg = NULL;
  536. unsigned int blk_base = 0x38320000;
  537. PDEBUG("hantrodec_choose_core\n");
  538. if (!request_mem_region(blk_base, 0x1000, "blk_ctl")) {
  539. pr_info("blk_ctl: failed to reserve HW regs\n");
  540. return -EBUSY;
  541. }
  542. reg = (volatile u8 *) ioremap_nocache(blk_base, 0x1000);
  543. if (reg == NULL ) {
  544. pr_info("blk_ctl: failed to ioremap HW regs\n");
  545. if (reg)
  546. iounmap((void *)reg);
  547. release_mem_region(blk_base, 0x1000);
  548. return -EBUSY;
  549. }
  550. // G1 use, set to 1; G2 use, set to 0, choose the one you are using
  551. if (is_g1)
  552. iowrite32(0x1, (void*)(reg + 0x14)); // VPUMIX only use G1, user should modify the reg according to platform design
  553. else
  554. iowrite32(0x0, (void*)(reg + 0x14)); // VPUMIX only use G2, user should modify the reg according to platform design
  555. if (reg)
  556. iounmap((void *)reg);
  557. release_mem_region(blk_base, 0x1000);
  558. PDEBUG("hantrodec_choose_core OK!\n");
  559. return 0;
  560. }
  561. #endif
  562. long ReserveDecoder(hantrodec_t *dev, struct file* filp, unsigned long format) {
  563. long core = -1;
  564. /* reserve a core */
  565. if (down_interruptible(&dec_core_sem))
  566. return -ERESTARTSYS;
  567. /* lock a core that has specific format*/
  568. if(wait_event_interruptible(hw_queue,
  569. GetDecCoreAny(&core, dev, filp, format) != 0 ))
  570. return -ERESTARTSYS;
  571. #if 0
  572. if(IS_G1(dev->hw_id[core])) {
  573. if (0 == hantrodec_choose_core(1))
  574. printk("G1 is reserved\n");
  575. else
  576. return -1;
  577. } else {
  578. if (0 == hantrodec_choose_core(0))
  579. printk("G2 is reserved\n");
  580. else
  581. return -1;
  582. }
  583. #endif
  584. dev->client_type[core] = format;
  585. return core;
  586. }
  587. void ReleaseDecoder(hantrodec_t *dev, long core) {
  588. u32 status;
  589. unsigned long flags;
  590. PDEBUG("ReleaseDecoder %ld\n", core);
  591. if (dev->client_type[core] == DWL_CLIENT_TYPE_BO_AV1_DEC)
  592. status = ioread32((void*)(dev->hwregs[core][HW_BIGOCEAN] + BIGOCEAN_IRQ_STAT_DEC_OFF));
  593. else
  594. status = ioread32((void*)(dev->hwregs[core][HW_VC8000D] + HANTRODEC_IRQ_STAT_DEC_OFF));
  595. /* make sure HW is disabled */
  596. if(status & HANTRODEC_DEC_E) {
  597. pr_info("hantrodec: DEC[%li] still enabled -> reset\n", core);
  598. /* abort decoder */
  599. status |= HANTRODEC_DEC_ABORT | HANTRODEC_DEC_IRQ_DISABLE;
  600. iowrite32(status, (void*)(dev->hwregs[core][HW_VC8000D] + HANTRODEC_IRQ_STAT_DEC_OFF));
  601. }
  602. spin_lock_irqsave(&owner_lock, flags);
  603. /* If aux core released, revert main core's config back */
  604. if (config.its_main_core_id[core] >= 0) {
  605. config.cfg[config.its_main_core_id[core]] = config.cfg_backup[config.its_main_core_id[core]];
  606. }
  607. /* If main core released, revert aux core's config back */
  608. if (config.its_aux_core_id[core] >= 0) {
  609. config.cfg[config.its_aux_core_id[core]] = config.cfg_backup[config.its_aux_core_id[core]];
  610. }
  611. dec_owner[core] = NULL;
  612. spin_unlock_irqrestore(&owner_lock, flags);
  613. up(&dec_core_sem);
  614. wake_up_interruptible_all(&hw_queue);
  615. }
  616. long ReservePostProcessor(hantrodec_t *dev, struct file* filp) {
  617. unsigned long flags;
  618. long core = 0;
  619. /* single core PP only */
  620. if (down_interruptible(&pp_core_sem))
  621. return -ERESTARTSYS;
  622. spin_lock_irqsave(&owner_lock, flags);
  623. pp_owner[core] = filp;
  624. spin_unlock_irqrestore(&owner_lock, flags);
  625. return core;
  626. }
  627. void ReleasePostProcessor(hantrodec_t *dev, long core) {
  628. unsigned long flags;
  629. u32 status = ioread32((void*)(dev->hwregs[core][HW_VC8000D] + HANTRO_IRQ_STAT_PP_OFF));
  630. /* make sure HW is disabled */
  631. if(status & HANTRO_PP_E) {
  632. pr_info("hantrodec: PP[%li] still enabled -> reset\n", core);
  633. /* disable IRQ */
  634. status |= HANTRO_PP_IRQ_DISABLE;
  635. /* disable postprocessor */
  636. status &= (~HANTRO_PP_E);
  637. iowrite32(0x10, (void*)(dev->hwregs[core][HW_VC8000D] + HANTRO_IRQ_STAT_PP_OFF));
  638. }
  639. spin_lock_irqsave(&owner_lock, flags);
  640. pp_owner[core] = NULL;
  641. spin_unlock_irqrestore(&owner_lock, flags);
  642. up(&pp_core_sem);
  643. }
  644. long ReserveDecPp(hantrodec_t *dev, struct file* filp, unsigned long format) {
  645. /* reserve core 0, DEC+PP for pipeline */
  646. unsigned long flags;
  647. long core = 0;
  648. /* check that core has the requested dec format */
  649. if(!CoreHasFormat(config.cfg, core, format))
  650. return -EFAULT;
  651. /* check that core has PP */
  652. if(!CoreHasFormat(config.cfg, core, DWL_CLIENT_TYPE_PP))
  653. return -EFAULT;
  654. /* reserve a core */
  655. if (down_interruptible(&dec_core_sem))
  656. return -ERESTARTSYS;
  657. /* wait until the core is available */
  658. if(wait_event_interruptible(hw_queue,
  659. GetDecCore(core, dev, filp, format) != 0)) {
  660. up(&dec_core_sem);
  661. return -ERESTARTSYS;
  662. }
  663. if (down_interruptible(&pp_core_sem)) {
  664. ReleaseDecoder(dev, core);
  665. return -ERESTARTSYS;
  666. }
  667. spin_lock_irqsave(&owner_lock, flags);
  668. pp_owner[core] = filp;
  669. spin_unlock_irqrestore(&owner_lock, flags);
  670. return core;
  671. }
  672. #ifdef HANTRODEC_DEBUG
  673. static u32 flush_count = 0; /* times of calling of DecFlushRegs */
  674. static u32 flush_regs = 0; /* total number of registers flushed */
  675. #endif
  676. long DecFlushRegs(hantrodec_t *dev, struct core_desc *core) {
  677. long ret = 0, i;
  678. #ifdef HANTRODEC_DEBUG
  679. int reg_wr = 2;
  680. #endif
  681. u32 id = core->id;
  682. u32 type = core->type;
  683. PDEBUG("hantrodec: DecFlushRegs\n");
  684. PDEBUG("hantrodec: id = %d, type = %d, size = %d, reg_id = %d\n",
  685. core->id, core->type, core->size, core->reg_id);
  686. if (type == HW_VC8000D && !vpu_subsys[id].submodule_hwregs[type])
  687. type = HW_VC8000DJ;
  688. if (dev->client_type[id] == DWL_CLIENT_TYPE_BO_AV1_DEC)
  689. type = HW_BIGOCEAN;
  690. if (id >= MAX_SUBSYS_NUM ||
  691. !vpu_subsys[id].base_addr ||
  692. core->type >= HW_CORE_MAX ||
  693. !vpu_subsys[id].submodule_hwregs[type])
  694. return -EINVAL;
  695. PDEBUG("hantrodec: submodule_iosize = %d\n", vpu_subsys[id].submodule_iosize[type]);
  696. ret = copy_from_user(dec_regs[id], core->regs, vpu_subsys[id].submodule_iosize[type]);
  697. if (ret) {
  698. PDEBUG("copy_from_user failed, returned %li\n", ret);
  699. return -EFAULT;
  700. }
  701. if (type == HW_VC8000D || type == HW_BIGOCEAN || type == HW_VC8000DJ) {
  702. /* write all regs but the status reg[1] to hardware */
  703. if (reg_access_opt) {
  704. for(i = 3; i < vpu_subsys[id].submodule_iosize[type]/4; i++) {
  705. /* check whether register value is updated. */
  706. if (dec_regs[id][i] != shadow_dec_regs[id][i]) {
  707. iowrite32(dec_regs[id][i], (void*)(dev->hwregs[id][type] + i*4));
  708. shadow_dec_regs[id][i] = dec_regs[id][i];
  709. #ifdef HANTRODEC_DEBUG
  710. reg_wr++;
  711. #endif
  712. }
  713. }
  714. } else {
  715. for(i = 3; i < vpu_subsys[id].submodule_iosize[type]/4; i++) {
  716. iowrite32(dec_regs[id][i], (void*)(dev->hwregs[id][type] + i*4));
  717. #ifdef VALIDATE_REGS_WRITE
  718. if (dec_regs[id][i] != ioread32((void*)(dev->hwregs[id][type] + i*4)))
  719. pr_info("hantrodec: swreg[%ld]: read %08x != write %08x *\n",
  720. i, ioread32((void*)(dev->hwregs[id][type] + i*4)), dec_regs[id][i]);
  721. #endif
  722. }
  723. #ifdef HANTRODEC_DEBUG
  724. reg_wr = vpu_subsys[id].submodule_iosize[type]/4 - 1;
  725. #endif
  726. }
  727. /* write swreg2 for AV1, in which bit0 is the start bit */
  728. iowrite32(dec_regs[id][2], (void*)(dev->hwregs[id][type] + 8));
  729. shadow_dec_regs[id][2] = dec_regs[id][2];
  730. /* write the status register, which may start the decoder */
  731. iowrite32(dec_regs[id][1], (void*)(dev->hwregs[id][type] + 4));
  732. shadow_dec_regs[id][1] = dec_regs[id][1];
  733. #ifdef HANTRODEC_DEBUG
  734. flush_count++;
  735. flush_regs += reg_wr;
  736. #endif
  737. PDEBUG("flushed registers on core %d\n", id);
  738. PDEBUG("%d DecFlushRegs: flushed %d/%d registers (dec_mode = %d, avg %d regs per flush)\n",
  739. flush_count, reg_wr, flush_regs, dec_regs[id][3]>>27, flush_regs/flush_count);
  740. } else {
  741. /* write all regs but the status reg[1] to hardware */
  742. for(i = 0; i < vpu_subsys[id].submodule_iosize[type]/4; i++) {
  743. iowrite32(dec_regs[id][i], (void*)(dev->hwregs[id][type] + i*4));
  744. #ifdef VALIDATE_REGS_WRITE
  745. if (dec_regs[id][i] != ioread32((void*)(dev->hwregs[id][type] + i*4)))
  746. pr_info("hantrodec: swreg[%ld]: read %08x != write %08x *\n",
  747. i, ioread32((void*)(dev->hwregs[id][type] + i*4)), dec_regs[id][i]);
  748. #endif
  749. }
  750. }
  751. return 0;
  752. }
  753. long DecWriteRegs(hantrodec_t *dev, struct core_desc *core)
  754. {
  755. long ret = 0;
  756. u32 i = core->reg_id;
  757. u32 id = core->id;
  758. u32 type = core->type;
  759. PDEBUG("hantrodec: DecWriteRegs\n");
  760. PDEBUG("hantrodec: id = %d, type = %d, size = %d, reg_id = %d\n",
  761. core->id, core->type, core->size, core->reg_id);
  762. if (type == HW_VC8000D && !vpu_subsys[id].submodule_hwregs[type])
  763. type = HW_VC8000DJ;
  764. if (dev->client_type[id] == DWL_CLIENT_TYPE_BO_AV1_DEC)
  765. type = HW_BIGOCEAN;
  766. if (id >= MAX_SUBSYS_NUM ||
  767. !vpu_subsys[id].base_addr ||
  768. type >= HW_CORE_MAX ||
  769. !vpu_subsys[id].submodule_hwregs[type] ||
  770. (core->size & 0x3) ||
  771. core->reg_id * 4 + core->size > vpu_subsys[id].submodule_iosize[type])
  772. return -EINVAL;
  773. ret = copy_from_user(dec_regs[id], core->regs, core->size);
  774. if (ret) {
  775. PDEBUG("copy_from_user failed, returned %li\n", ret);
  776. return -EFAULT;
  777. }
  778. for (i = core->reg_id; i < core->reg_id + core->size/4; i++) {
  779. PDEBUG("hantrodec: write %08x to reg[%d] core %d\n", dec_regs[id][i-core->reg_id], i, id);
  780. iowrite32(dec_regs[id][i-core->reg_id], (void*)(dev->hwregs[id][type] + i*4));
  781. if (type == HW_VC8000D)
  782. shadow_dec_regs[id][i] = dec_regs[id][i-core->reg_id];
  783. }
  784. return 0;
  785. }
  786. long DecWriteApbFilterRegs(hantrodec_t *dev, struct core_desc *core)
  787. {
  788. long ret = 0;
  789. u32 i = core->reg_id;
  790. u32 id = core->id;
  791. PDEBUG("hantrodec: DecWriteApbFilterRegs\n");
  792. PDEBUG("hantrodec: id = %d, type = %d, size = %d, reg_id = %d\n",
  793. core->id, core->type, core->size, core->reg_id);
  794. if (id >= MAX_SUBSYS_NUM ||
  795. !vpu_subsys[id].base_addr ||
  796. core->type >= HW_CORE_MAX ||
  797. !vpu_subsys[id].submodule_hwregs[core->type] ||
  798. (core->size & 0x3) ||
  799. core->reg_id * 4 + core->size > vpu_subsys[id].submodule_iosize[core->type] + 4)
  800. return -EINVAL;
  801. ret = copy_from_user(apbfilter_regs[id], core->regs, core->size);
  802. if (ret) {
  803. PDEBUG("copy_from_user failed, returned %li\n", ret);
  804. return -EFAULT;
  805. }
  806. for (i = core->reg_id; i < core->reg_id + core->size/4; i++) {
  807. PDEBUG("hantrodec: write %08x to reg[%d] core %d\n", dec_regs[id][i-core->reg_id], i, id);
  808. iowrite32(apbfilter_regs[id][i-core->reg_id], (void*)(dev->apbfilter_hwregs[id][core->type] + i*4));
  809. }
  810. return 0;
  811. }
  812. long DecReadRegs(hantrodec_t *dev, struct core_desc *core)
  813. {
  814. long ret;
  815. u32 id = core->id;
  816. u32 i = core->reg_id;
  817. u32 type = core->type;
  818. PDEBUG("hantrodec: DecReadRegs\n");
  819. PDEBUG("hantrodec: id = %d, type = %d, size = %d, reg_id = %d\n",
  820. core->id, core->type, core->size, core->reg_id);
  821. if (type == HW_VC8000D && !vpu_subsys[id].submodule_hwregs[type])
  822. type = HW_VC8000DJ;
  823. if (dev->client_type[id] == DWL_CLIENT_TYPE_BO_AV1_DEC)
  824. type = HW_BIGOCEAN;
  825. if (id >= MAX_SUBSYS_NUM ||
  826. !vpu_subsys[id].base_addr ||
  827. type >= HW_CORE_MAX ||
  828. !vpu_subsys[id].submodule_hwregs[type] ||
  829. (core->size & 0x3) ||
  830. core->reg_id * 4 + core->size > vpu_subsys[id].submodule_iosize[type])
  831. return -EINVAL;
  832. /* read specific registers from hardware */
  833. for (i = core->reg_id; i < core->reg_id + core->size/4; i++) {
  834. dec_regs[id][i-core->reg_id] = ioread32((void*)(dev->hwregs[id][type] + i*4));
  835. PDEBUG("hantrodec: read %08x from reg[%d] core %d\n", dec_regs[id][i-core->reg_id], i, id);
  836. if (type == HW_VC8000D)
  837. shadow_dec_regs[id][i] = dec_regs[id][i];
  838. }
  839. /* put registers to user space*/
  840. ret = copy_to_user(core->regs, dec_regs[id], core->size);
  841. if (ret) {
  842. PDEBUG("copy_to_user failed, returned %li\n", ret);
  843. return -EFAULT;
  844. }
  845. return 0;
  846. }
  847. long DecRefreshRegs(hantrodec_t *dev, struct core_desc *core)
  848. {
  849. long ret, i;
  850. u32 id = core->id;
  851. u32 type = core->type;
  852. PDEBUG("hantrodec: DecRefreshRegs\n");
  853. PDEBUG("hantrodec: id = %d, type = %d, size = %d, reg_id = %d\n",
  854. core->id, core->type, core->size, core->reg_id);
  855. if (type == HW_VC8000D && !vpu_subsys[id].submodule_hwregs[type])
  856. type = HW_VC8000DJ;
  857. if (dev->client_type[id] == DWL_CLIENT_TYPE_BO_AV1_DEC)
  858. type = HW_BIGOCEAN;
  859. if (id >= MAX_SUBSYS_NUM ||
  860. !vpu_subsys[id].base_addr ||
  861. type >= HW_CORE_MAX ||
  862. !vpu_subsys[id].submodule_hwregs[type])
  863. return -EINVAL;
  864. PDEBUG("hantrodec: submodule_iosize = %d\n", vpu_subsys[id].submodule_iosize[type]);
  865. if (!reg_access_opt) {
  866. for(i = 0; i < vpu_subsys[id].submodule_iosize[type]/4; i++) {
  867. dec_regs[id][i] = ioread32((void*)(dev->hwregs[id][type] + i*4));
  868. }
  869. } else {
  870. // only need to read swreg1,62(?),63,168,169
  871. #define REFRESH_REG(idx) i = (idx); shadow_dec_regs[id][i] = dec_regs[id][i] = ioread32((void*)(dev->hwregs[id][type] + i*4))
  872. REFRESH_REG(0);
  873. REFRESH_REG(1);
  874. REFRESH_REG(62);
  875. REFRESH_REG(63);
  876. REFRESH_REG(168);
  877. REFRESH_REG(169);
  878. #undef REFRESH_REG
  879. }
  880. ret = copy_to_user(core->regs, dec_regs[id], vpu_subsys[id].submodule_iosize[type]);
  881. if (ret) {
  882. PDEBUG("copy_to_user failed, returned %li\n", ret);
  883. return -EFAULT;
  884. }
  885. return 0;
  886. }
  887. static int CheckDecIrq(hantrodec_t *dev, int id) {
  888. unsigned long flags;
  889. int rdy = 0;
  890. const u32 irq_mask = (1 << id);
  891. spin_lock_irqsave(&owner_lock, flags);
  892. if(dec_irq & irq_mask) {
  893. /* reset the wait condition(s) */
  894. dec_irq &= ~irq_mask;
  895. rdy = 1;
  896. }
  897. spin_unlock_irqrestore(&owner_lock, flags);
  898. return rdy;
  899. }
  900. long WaitDecReadyAndRefreshRegs(hantrodec_t *dev, struct core_desc *core) {
  901. u32 id = core->id;
  902. long ret;
  903. PDEBUG("wait_event_interruptible DEC[%d]\n", id);
  904. #ifdef USE_SW_TIMEOUT
  905. u32 status;
  906. ret = wait_event_interruptible_timeout(dec_wait_queue, CheckDecIrq(dev, id), msecs_to_jiffies(2000));
  907. if(ret < 0) {
  908. PDEBUG("DEC[%d] wait_event_interruptible interrupted\n", id);
  909. return -ERESTARTSYS;
  910. } else if (ret == 0) {
  911. PDEBUG("DEC[%d] wait_event_interruptible timeout\n", id);
  912. status = ioread32((void*)(dev->hwregs[id][HW_VC8000D] + HANTRODEC_IRQ_STAT_DEC_OFF));
  913. /* check if HW is enabled */
  914. if(status & HANTRODEC_DEC_E) {
  915. pr_info("hantrodec: DEC[%d] reset becuase of timeout\n", id);
  916. /* abort decoder */
  917. status |= HANTRODEC_DEC_ABORT | HANTRODEC_DEC_IRQ_DISABLE;
  918. iowrite32(status, (void*)(dev->hwregs[id][HW_VC8000D] + HANTRODEC_IRQ_STAT_DEC_OFF));
  919. }
  920. }
  921. #else
  922. ret = wait_event_interruptible(dec_wait_queue, CheckDecIrq(dev, id));
  923. if(ret) {
  924. PDEBUG("DEC[%d] wait_event_interruptible interrupted\n", id);
  925. return -ERESTARTSYS;
  926. }
  927. #endif
  928. atomic_inc(&irq_tx);
  929. /* refresh registers */
  930. return DecRefreshRegs(dev, core);
  931. }
  932. #if 0
  933. long PPFlushRegs(hantrodec_t *dev, struct core_desc *core) {
  934. long ret = 0;
  935. u32 id = core->id;
  936. u32 i;
  937. /* copy original dec regs to kernal space*/
  938. ret = copy_from_user(dec_regs[id] + HANTRO_PP_ORG_FIRST_REG,
  939. core->regs + HANTRO_PP_ORG_FIRST_REG,
  940. HANTRO_PP_ORG_REGS*4);
  941. if (sizeof(void *) == 8) {
  942. /* copy extended dec regs to kernal space*/
  943. ret = copy_from_user(dec_regs[id] + HANTRO_PP_EXT_FIRST_REG,
  944. core->regs + HANTRO_PP_EXT_FIRST_REG,
  945. HANTRO_PP_EXT_REGS*4);
  946. }
  947. if (ret) {
  948. PDEBUG("copy_from_user failed, returned %li\n", ret);
  949. return -EFAULT;
  950. }
  951. /* write all regs but the status reg[1] to hardware */
  952. /* both original and extended regs need to be written */
  953. for(i = HANTRO_PP_ORG_FIRST_REG + 1; i <= HANTRO_PP_ORG_LAST_REG; i++)
  954. iowrite32(dec_regs[id][i], (void*)(dev->hwregs[id] + i*4));
  955. if (sizeof(void *) == 8) {
  956. for(i = HANTRO_PP_EXT_FIRST_REG; i <= HANTRO_PP_EXT_LAST_REG; i++)
  957. iowrite32(dec_regs[id][i], (void*)(dev->hwregs[id] + i*4));
  958. }
  959. /* write the stat reg, which may start the PP */
  960. iowrite32(dec_regs[id][HANTRO_PP_ORG_FIRST_REG],
  961. (void*)(dev->hwregs[id] + HANTRO_PP_ORG_FIRST_REG * 4));
  962. return 0;
  963. }
  964. long PPRefreshRegs(hantrodec_t *dev, struct core_desc *core) {
  965. long i, ret;
  966. u32 id = core->id;
  967. if (sizeof(void *) == 8) {
  968. /* user has to know exactly what they are asking for */
  969. if(core->size != (HANTRO_PP_TOTAL_REGS * 4))
  970. return -EFAULT;
  971. } else {
  972. /* user has to know exactly what they are asking for */
  973. if(core->size != (HANTRO_PP_ORG_REGS * 4))
  974. return -EFAULT;
  975. }
  976. /* read all registers from hardware */
  977. /* both original and extended regs need to be read */
  978. for(i = HANTRO_PP_ORG_FIRST_REG; i <= HANTRO_PP_ORG_LAST_REG; i++)
  979. dec_regs[id][i] = ioread32((void*)(dev->hwregs[id] + i*4));
  980. if (sizeof(void *) == 8) {
  981. for(i = HANTRO_PP_EXT_FIRST_REG; i <= HANTRO_PP_EXT_LAST_REG; i++)
  982. dec_regs[id][i] = ioread32((void*)(dev->hwregs[id] + i*4));
  983. }
  984. /* put registers to user space*/
  985. /* put original registers to user space*/
  986. ret = copy_to_user(core->regs + HANTRO_PP_ORG_FIRST_REG,
  987. dec_regs[id] + HANTRO_PP_ORG_FIRST_REG,
  988. HANTRO_PP_ORG_REGS*4);
  989. if (sizeof(void *) == 8) {
  990. /* put extended registers to user space*/
  991. ret = copy_to_user(core->regs + HANTRO_PP_EXT_FIRST_REG,
  992. dec_regs[id] + HANTRO_PP_EXT_FIRST_REG,
  993. HANTRO_PP_EXT_REGS * 4);
  994. }
  995. if (ret) {
  996. PDEBUG("copy_to_user failed, returned %li\n", ret);
  997. return -EFAULT;
  998. }
  999. return 0;
  1000. }
  1001. static int CheckPPIrq(hantrodec_t *dev, int id) {
  1002. unsigned long flags;
  1003. int rdy = 0;
  1004. const u32 irq_mask = (1 << id);
  1005. spin_lock_irqsave(&owner_lock, flags);
  1006. if(pp_irq & irq_mask) {
  1007. /* reset the wait condition(s) */
  1008. pp_irq &= ~irq_mask;
  1009. rdy = 1;
  1010. }
  1011. spin_unlock_irqrestore(&owner_lock, flags);
  1012. return rdy;
  1013. }
  1014. long WaitPPReadyAndRefreshRegs(hantrodec_t *dev, struct core_desc *core) {
  1015. u32 id = core->id;
  1016. PDEBUG("wait_event_interruptible PP[%d]\n", id);
  1017. if(wait_event_interruptible(pp_wait_queue, CheckPPIrq(dev, id))) {
  1018. PDEBUG("PP[%d] wait_event_interruptible interrupted\n", id);
  1019. return -ERESTARTSYS;
  1020. }
  1021. atomic_inc(&irq_tx);
  1022. /* refresh registers */
  1023. return PPRefreshRegs(dev, core);
  1024. }
  1025. #endif
  1026. static int CheckCoreIrq(hantrodec_t *dev, const struct file *filp, int *id) {
  1027. unsigned long flags;
  1028. int rdy = 0, n = 0;
  1029. do {
  1030. u32 irq_mask = (1 << n);
  1031. spin_lock_irqsave(&owner_lock, flags);
  1032. if(dec_irq & irq_mask) {
  1033. if (dec_owner[n] == filp) {
  1034. /* we have an IRQ for our client */
  1035. /* reset the wait condition(s) */
  1036. dec_irq &= ~irq_mask;
  1037. /* signal ready core no. for our client */
  1038. *id = n;
  1039. rdy = 1;
  1040. spin_unlock_irqrestore(&owner_lock, flags);
  1041. break;
  1042. } else if(dec_owner[n] == NULL) {
  1043. /* zombie IRQ */
  1044. pr_info("IRQ on core[%d], but no owner!!!\n", n);
  1045. /* reset the wait condition(s) */
  1046. dec_irq &= ~irq_mask;
  1047. }
  1048. }
  1049. spin_unlock_irqrestore(&owner_lock, flags);
  1050. n++; /* next core */
  1051. } while(n < dev->cores);
  1052. return rdy;
  1053. }
  1054. long WaitCoreReady(hantrodec_t *dev, const struct file *filp, int *id) {
  1055. long ret;
  1056. PDEBUG("wait_event_interruptible CORE\n");
  1057. #ifdef USE_SW_TIMEOUT
  1058. u32 i, status;
  1059. ret = wait_event_interruptible_timeout(dec_wait_queue, CheckCoreIrq(dev, filp, id), msecs_to_jiffies(2000));
  1060. if(ret < 0) {
  1061. PDEBUG("CORE wait_event_interruptible interrupted\n");
  1062. return -ERESTARTSYS;
  1063. } else if (ret == 0) {
  1064. PDEBUG("CORE wait_event_interruptible timeout\n");
  1065. for(i = 0; i < dev->cores; i++) {
  1066. status = ioread32((void*)(dev->hwregs[i][HW_VC8000D] + HANTRODEC_IRQ_STAT_DEC_OFF));
  1067. /* check if HW is enabled */
  1068. if((status & HANTRODEC_DEC_E) && dec_owner[i] == filp) {
  1069. pr_info("hantrodec: CORE[%d] reset becuase of timeout\n", i);
  1070. *id = i;
  1071. /* abort decoder */
  1072. status |= HANTRODEC_DEC_ABORT | HANTRODEC_DEC_IRQ_DISABLE;
  1073. iowrite32(status, (void*)(dev->hwregs[i][HW_VC8000D] + HANTRODEC_IRQ_STAT_DEC_OFF));
  1074. break;
  1075. }
  1076. }
  1077. }
  1078. #else
  1079. ret = wait_event_interruptible(dec_wait_queue, CheckCoreIrq(dev, filp, id));
  1080. if(ret) {
  1081. PDEBUG("CORE[%d] wait_event_interruptible interrupted with 0x%x\n", *id, ret);
  1082. return -ERESTARTSYS;
  1083. }
  1084. #endif
  1085. atomic_inc(&irq_tx);
  1086. return 0;
  1087. }
  1088. /*------------------------------------------------------------------------------
  1089. Function name : hantrodec_ioctl
  1090. Description : communication method to/from the user space
  1091. Return type : long
  1092. ------------------------------------------------------------------------------*/
  1093. static long hantrodec_ioctl(struct file *filp, unsigned int cmd,
  1094. unsigned long arg) {
  1095. int err = 0;
  1096. long tmp;
  1097. u32 i = 0;
  1098. #ifdef CLK_CFG
  1099. unsigned long flags;
  1100. #endif
  1101. #ifdef HW_PERFORMANCE
  1102. struct timeval *end_time_arg;
  1103. #endif
  1104. PDEBUG("ioctl cmd 0x%08x\n", cmd);
  1105. /*
  1106. * extract the type and number bitfields, and don't decode
  1107. * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
  1108. */
  1109. if (_IOC_TYPE(cmd) != HANTRODEC_IOC_MAGIC &&
  1110. _IOC_TYPE(cmd) != HANTRO_IOC_MMU &&
  1111. _IOC_TYPE(cmd) != MEMORY_IOC_MAGIC &&
  1112. _IOC_TYPE(cmd) != HANTRO_VCMD_IOC_MAGIC)
  1113. return -ENOTTY;
  1114. if ((_IOC_TYPE(cmd) == HANTRODEC_IOC_MAGIC &&
  1115. _IOC_NR(cmd) > HANTRODEC_IOC_MAXNR) ||
  1116. (_IOC_TYPE(cmd) == HANTRO_IOC_MMU &&
  1117. _IOC_NR(cmd) > HANTRO_IOC_MMU_MAXNR) ||
  1118. (_IOC_TYPE(cmd) == MEMORY_IOC_MAGIC &&
  1119. _IOC_NR(cmd) > MEMORY_IOC_MAXNR) ||
  1120. (_IOC_TYPE(cmd) == HANTRO_VCMD_IOC_MAGIC &&
  1121. _IOC_NR(cmd) > HANTRO_VCMD_IOC_MAXNR))
  1122. return -ENOTTY;
  1123. /*
  1124. * the direction is a bitmask, and VERIFY_WRITE catches R/W
  1125. * transfers. `Type' is user-oriented, while
  1126. * access_ok is kernel-oriented, so the concept of "read" and
  1127. * "write" is reversed
  1128. */
  1129. if (_IOC_DIR(cmd) & _IOC_READ)
  1130. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)
  1131. err = !access_ok((void *) arg, _IOC_SIZE(cmd));
  1132. #else
  1133. err = !access_ok(VERIFY_WRITE, (void *) arg, _IOC_SIZE(cmd));
  1134. #endif
  1135. else if (_IOC_DIR(cmd) & _IOC_WRITE)
  1136. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)
  1137. err = !access_ok((void *) arg, _IOC_SIZE(cmd));
  1138. #else
  1139. err = !access_ok(VERIFY_READ, (void *) arg, _IOC_SIZE(cmd));
  1140. #endif
  1141. if (err)
  1142. return -EFAULT;
  1143. #ifdef CLK_CFG
  1144. spin_lock_irqsave(&clk_lock, flags);
  1145. if (clk_cfg!=NULL && !IS_ERR(clk_cfg)&&(is_clk_on==0)) {
  1146. printk("turn on clock by user\n");
  1147. if (clk_enable(clk_cfg)) {
  1148. spin_unlock_irqrestore(&clk_lock, flags);
  1149. return -EFAULT;
  1150. } else
  1151. is_clk_on=1;
  1152. }
  1153. spin_unlock_irqrestore(&clk_lock, flags);
  1154. mod_timer(&timer, jiffies + 10*HZ); /*the interval is 10s*/
  1155. #endif
  1156. switch (cmd) {
  1157. case HANTRODEC_IOC_CLI: {
  1158. __u32 id;
  1159. __get_user(id, (__u32*)arg);
  1160. if(id >= hantrodec_data.cores) {
  1161. return -EFAULT;
  1162. }
  1163. disable_irq(hantrodec_data.irq[id]);
  1164. break;
  1165. }
  1166. case HANTRODEC_IOC_STI: {
  1167. __u32 id;
  1168. __get_user(id, (__u32*)arg);
  1169. if(id >= hantrodec_data.cores) {
  1170. return -EFAULT;
  1171. }
  1172. enable_irq(hantrodec_data.irq[id]);
  1173. break;
  1174. }
  1175. case HANTRODEC_IOCGHWOFFSET: {
  1176. __u32 id;
  1177. __get_user(id, (__u32*)arg);
  1178. if(id >= hantrodec_data.cores) {
  1179. return -EFAULT;
  1180. }
  1181. __put_user(multicorebase_actual[id], (unsigned long *) arg);
  1182. break;
  1183. }
  1184. case HANTRODEC_IOCGHWIOSIZE: {
  1185. struct regsize_desc core;
  1186. pm_runtime_resume_and_get(&hantrodec_data.pdev->dev);
  1187. /* get registers from user space*/
  1188. tmp = copy_from_user(&core, (void*)arg, sizeof(struct regsize_desc));
  1189. if (tmp) {
  1190. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1191. pm_runtime_mark_last_busy(&hantrodec_data.pdev->dev);
  1192. pm_runtime_put_autosuspend(&hantrodec_data.pdev->dev);
  1193. return -EFAULT;
  1194. }
  1195. if(core.id >= MAX_SUBSYS_NUM /*hantrodec_data.cores*/) {
  1196. pm_runtime_mark_last_busy(&hantrodec_data.pdev->dev);
  1197. pm_runtime_put_autosuspend(&hantrodec_data.pdev->dev);
  1198. return -EFAULT;
  1199. }
  1200. if (core.type == HW_SHAPER) {
  1201. u32 asic_id;
  1202. /* Shaper is configured with l2cache. */
  1203. if (vpu_subsys[core.id].submodule_hwregs[HW_L2CACHE]) {
  1204. asic_id = ioread32((void*)vpu_subsys[core.id].submodule_hwregs[HW_L2CACHE]);
  1205. switch ((asic_id >> 16) & 0x3) {
  1206. case 1: /* cache only */
  1207. core.size = 0; break;
  1208. case 0: /* cache + shaper */
  1209. case 2: /* shaper only*/
  1210. core.size = vpu_subsys[core.id].submodule_iosize[HW_L2CACHE];
  1211. break;
  1212. default:
  1213. pm_runtime_mark_last_busy(&hantrodec_data.pdev->dev);
  1214. pm_runtime_put_autosuspend(&hantrodec_data.pdev->dev);
  1215. return -EFAULT;
  1216. }
  1217. } else
  1218. core.size = 0;
  1219. } else {
  1220. core.size = vpu_subsys[core.id].submodule_iosize[core.type];
  1221. if (core.type == HW_VC8000D && !core.size &&
  1222. vpu_subsys[core.id].submodule_hwregs[HW_VC8000DJ]) {
  1223. /* If VC8000D doesn't exists, while VC8000DJ exists, return VC8000DJ. */
  1224. core.size = vpu_subsys[core.id].submodule_iosize[HW_VC8000DJ];
  1225. }
  1226. }
  1227. copy_to_user((u32 *) arg, &core, sizeof(struct regsize_desc));
  1228. pm_runtime_mark_last_busy(&hantrodec_data.pdev->dev);
  1229. pm_runtime_put_autosuspend(&hantrodec_data.pdev->dev);
  1230. return 0;
  1231. }
  1232. case HANTRODEC_IOC_MC_OFFSETS: {
  1233. tmp = copy_to_user((unsigned long *) arg, multicorebase_actual, sizeof(multicorebase_actual));
  1234. if (err) {
  1235. PDEBUG("copy_to_user failed, returned %li\n", tmp);
  1236. return -EFAULT;
  1237. }
  1238. break;
  1239. }
  1240. case HANTRODEC_IOC_MC_CORES:
  1241. __put_user(hantrodec_data.cores, (unsigned int *) arg);
  1242. PDEBUG("hantrodec_data.cores=%d\n", hantrodec_data.cores);
  1243. break;
  1244. case HANTRODEC_IOCS_DEC_PUSH_REG: {
  1245. struct core_desc core;
  1246. /* get registers from user space*/
  1247. tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc));
  1248. if (tmp) {
  1249. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1250. return -EFAULT;
  1251. }
  1252. return DecFlushRegs(&hantrodec_data, &core);
  1253. }
  1254. case HANTRODEC_IOCS_DEC_WRITE_REG: {
  1255. struct core_desc core;
  1256. /* get registers from user space*/
  1257. tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc));
  1258. if (tmp) {
  1259. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1260. return -EFAULT;
  1261. }
  1262. return DecWriteRegs(&hantrodec_data, &core);
  1263. }
  1264. case HANTRODEC_IOCS_DEC_WRITE_APBFILTER_REG: {
  1265. struct core_desc core;
  1266. /* get registers from user space*/
  1267. tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc));
  1268. if (tmp) {
  1269. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1270. return -EFAULT;
  1271. }
  1272. return DecWriteApbFilterRegs(&hantrodec_data, &core);
  1273. }
  1274. case HANTRODEC_IOCS_PP_PUSH_REG: {
  1275. #if 0
  1276. struct core_desc core;
  1277. /* get registers from user space*/
  1278. tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc));
  1279. if (tmp) {
  1280. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1281. return -EFAULT;
  1282. }
  1283. PPFlushRegs(&hantrodec_data, &core);
  1284. #else
  1285. return EINVAL;
  1286. #endif
  1287. }
  1288. case HANTRODEC_IOCS_DEC_PULL_REG: {
  1289. printk("%s:case HANTRODEC_IOCS_DEC_PULL_REG\n",__func__);
  1290. struct core_desc core;
  1291. /* get registers from user space*/
  1292. tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc));
  1293. if (tmp) {
  1294. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1295. return -EFAULT;
  1296. }
  1297. printk("%s:return DecRefreshRegs!\n",__func__);
  1298. return DecRefreshRegs(&hantrodec_data, &core);
  1299. }
  1300. case HANTRODEC_IOCS_DEC_READ_REG: {
  1301. struct core_desc core;
  1302. /* get registers from user space*/
  1303. tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc));
  1304. if (tmp) {
  1305. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1306. return -EFAULT;
  1307. }
  1308. return DecReadRegs(&hantrodec_data, &core);
  1309. }
  1310. case HANTRODEC_IOCS_PP_PULL_REG: {
  1311. #if 0
  1312. struct core_desc core;
  1313. /* get registers from user space*/
  1314. tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc));
  1315. if (tmp) {
  1316. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1317. return -EFAULT;
  1318. }
  1319. return PPRefreshRegs(&hantrodec_data, &core);
  1320. #else
  1321. return EINVAL;
  1322. #endif
  1323. }
  1324. case HANTRODEC_IOCH_DEC_RESERVE: {
  1325. u32 format = 0;
  1326. __get_user(format, (unsigned long *)arg);
  1327. PDEBUG("Reserve DEC core, format = %li\n", format);
  1328. return ReserveDecoder(&hantrodec_data, filp, format);
  1329. }
  1330. case HANTRODEC_IOCT_DEC_RELEASE: {
  1331. u32 core = 0;
  1332. __get_user(core, (unsigned long *)arg);
  1333. if(core >= hantrodec_data.cores || dec_owner[core] != filp) {
  1334. PDEBUG("bogus DEC release, core = %li\n", core);
  1335. return -EFAULT;
  1336. }
  1337. PDEBUG("Release DEC, core = %li\n", core);
  1338. ReleaseDecoder(&hantrodec_data, core);
  1339. break;
  1340. }
  1341. case HANTRODEC_IOCQ_PP_RESERVE:
  1342. #if 0
  1343. return ReservePostProcessor(&hantrodec_data, filp);
  1344. #else
  1345. return EINVAL;
  1346. #endif
  1347. case HANTRODEC_IOCT_PP_RELEASE: {
  1348. #if 0
  1349. if(arg != 0 || pp_owner[arg] != filp) {
  1350. PDEBUG("bogus PP release %li\n", arg);
  1351. return -EFAULT;
  1352. }
  1353. ReleasePostProcessor(&hantrodec_data, arg);
  1354. break;
  1355. #else
  1356. return EINVAL;
  1357. #endif
  1358. }
  1359. case HANTRODEC_IOCX_DEC_WAIT: {
  1360. struct core_desc core;
  1361. /* get registers from user space */
  1362. tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc));
  1363. if (tmp) {
  1364. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1365. return -EFAULT;
  1366. }
  1367. return WaitDecReadyAndRefreshRegs(&hantrodec_data, &core);
  1368. }
  1369. case HANTRODEC_IOCX_PP_WAIT: {
  1370. #if 0
  1371. struct core_desc core;
  1372. /* get registers from user space */
  1373. tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_desc));
  1374. if (tmp) {
  1375. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1376. return -EFAULT;
  1377. }
  1378. return WaitPPReadyAndRefreshRegs(&hantrodec_data, &core);
  1379. #else
  1380. return EINVAL;
  1381. #endif
  1382. }
  1383. case HANTRODEC_IOCG_CORE_WAIT: {
  1384. int id;
  1385. tmp = WaitCoreReady(&hantrodec_data, filp, &id);
  1386. __put_user(id, (int *) arg);
  1387. return tmp;
  1388. }
  1389. case HANTRODEC_IOX_ASIC_ID: {
  1390. struct core_param core;
  1391. /* get registers from user space*/
  1392. tmp = copy_from_user(&core, (void*)arg, sizeof(struct core_param));
  1393. if (tmp) {
  1394. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1395. return -EFAULT;
  1396. }
  1397. if (core.id >= MAX_SUBSYS_NUM /*hantrodec_data.cores*/ ||
  1398. ((core.type == HW_VC8000D || core.type == HW_VC8000DJ) &&
  1399. !vpu_subsys[core.id].submodule_iosize[core.type == HW_VC8000D] &&
  1400. !vpu_subsys[core.id].submodule_iosize[core.type == HW_VC8000DJ]) ||
  1401. ((core.type != HW_VC8000D && core.type != HW_VC8000DJ) &&
  1402. !vpu_subsys[core.id].submodule_iosize[core.type])) {
  1403. return -EFAULT;
  1404. }
  1405. core.size = vpu_subsys[core.id].submodule_iosize[core.type];
  1406. if (vpu_subsys[core.id].submodule_hwregs[core.type])
  1407. core.asic_id = ioread32((void*)hantrodec_data.hwregs[core.id][core.type]);
  1408. else if (core.type == HW_VC8000D &&
  1409. hantrodec_data.hwregs[core.id][HW_VC8000DJ]) {
  1410. core.asic_id = ioread32((void*)hantrodec_data.hwregs[core.id][HW_VC8000DJ]);
  1411. } else
  1412. core.asic_id = 0;
  1413. copy_to_user((u32 *) arg, &core, sizeof(struct core_param));
  1414. return 0;
  1415. }
  1416. case HANTRODEC_IOCG_CORE_ID: {
  1417. u32 format = 0;
  1418. __get_user(format, (unsigned long *)arg);
  1419. PDEBUG("Get DEC Core_id, format = %li\n", format);
  1420. return GetDecCoreID(&hantrodec_data, filp, format);
  1421. }
  1422. case HANTRODEC_IOX_ASIC_BUILD_ID: {
  1423. u32 id, hw_id;
  1424. __get_user(id, (u32*)arg);
  1425. if(id >= hantrodec_data.cores) {
  1426. return -EFAULT;
  1427. }
  1428. if (hantrodec_data.hwregs[id][HW_VC8000D] ||
  1429. hantrodec_data.hwregs[id][HW_VC8000DJ]) {
  1430. volatile u8 *hwregs;
  1431. /* VC8000D first if it exists, otherwise VC8000DJ. */
  1432. if (hantrodec_data.hwregs[id][HW_VC8000D])
  1433. hwregs = hantrodec_data.hwregs[id][HW_VC8000D];
  1434. else
  1435. hwregs = hantrodec_data.hwregs[id][HW_VC8000DJ];
  1436. hw_id = ioread32((void*)hwregs);
  1437. if (IS_G1(hw_id >> 16) || IS_G2(hw_id >> 16) ||
  1438. (IS_VC8000D(hw_id >> 16) && ((hw_id & 0xFFFF) == 0x6010)))
  1439. __put_user(hw_id, (u32 *) arg);
  1440. else {
  1441. hw_id = ioread32((void*)(hwregs + HANTRODEC_HW_BUILD_ID_OFF));
  1442. __put_user(hw_id, (u32 *) arg);
  1443. }
  1444. } else if (hantrodec_data.hwregs[id][HW_BIGOCEAN]) {
  1445. hw_id = ioread32((void*)(hantrodec_data.hwregs[id][HW_BIGOCEAN]));
  1446. if (IS_BIGOCEAN(hw_id >> 16))
  1447. __put_user(hw_id, (u32 *) arg);
  1448. else
  1449. return -EFAULT;
  1450. }
  1451. return 0;
  1452. }
  1453. case HANTRODEC_DEBUG_STATUS: {
  1454. pr_info("hantrodec: dec_irq = 0x%08x \n", dec_irq);
  1455. pr_info("hantrodec: pp_irq = 0x%08x \n", pp_irq);
  1456. pr_info("hantrodec: IRQs received/sent2user = %d / %d \n",
  1457. atomic_read(&irq_rx), atomic_read(&irq_tx));
  1458. for (tmp = 0; tmp < hantrodec_data.cores; tmp++) {
  1459. pr_info("hantrodec: dec_core[%li] %s\n",
  1460. tmp, dec_owner[tmp] == NULL ? "FREE" : "RESERVED");
  1461. pr_info("hantrodec: pp_core[%li] %s\n",
  1462. tmp, pp_owner[tmp] == NULL ? "FREE" : "RESERVED");
  1463. }
  1464. return 0;
  1465. }
  1466. case HANTRODEC_IOX_SUBSYS: {
  1467. struct subsys_desc subsys = {0};
  1468. /* TODO(min): check all the subsys */
  1469. pm_runtime_resume_and_get(&hantrodec_data.pdev->dev);
  1470. if (vcmd) {
  1471. subsys.subsys_vcmd_num = 1;
  1472. subsys.subsys_num = subsys.subsys_vcmd_num;
  1473. } else {
  1474. subsys.subsys_num = hantrodec_data.cores;
  1475. subsys.subsys_vcmd_num = 0;
  1476. }
  1477. copy_to_user((u32 *) arg, &subsys, sizeof(struct subsys_desc));
  1478. pm_runtime_mark_last_busy(&hantrodec_data.pdev->dev);
  1479. pm_runtime_put_autosuspend(&hantrodec_data.pdev->dev);
  1480. return 0;
  1481. }
  1482. case HANTRODEC_IOCX_POLL: {
  1483. hantrodec_isr(0, &hantrodec_data);
  1484. return 0;
  1485. }
  1486. case HANTRODEC_IOC_APBFILTER_CONFIG: {
  1487. struct apbfilter_cfg tmp_apbfilter;
  1488. /* get registers from user space*/
  1489. tmp = copy_from_user(&tmp_apbfilter, (void*)arg, sizeof(struct apbfilter_cfg));
  1490. if (tmp) {
  1491. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1492. return -EFAULT;
  1493. }
  1494. if(tmp_apbfilter.id >= MAX_SUBSYS_NUM || tmp_apbfilter.type >= HW_CORE_MAX) {
  1495. return -EFAULT;
  1496. }
  1497. apbfilter_cfg[tmp_apbfilter.id][tmp_apbfilter.type].id = tmp_apbfilter.id;
  1498. apbfilter_cfg[tmp_apbfilter.id][tmp_apbfilter.type].type = tmp_apbfilter.type;
  1499. memcpy(&tmp_apbfilter, &(apbfilter_cfg[tmp_apbfilter.id][tmp_apbfilter.type]), sizeof(struct apbfilter_cfg));
  1500. copy_to_user((u32 *) arg, &tmp_apbfilter, sizeof(struct apbfilter_cfg));
  1501. return 0;
  1502. }
  1503. case HANTRODEC_IOC_AXIFE_CONFIG: {
  1504. struct axife_cfg tmp_axife;
  1505. /* get registers from user space*/
  1506. tmp = copy_from_user(&tmp_axife, (void*)arg, sizeof(struct axife_cfg));
  1507. if (tmp) {
  1508. PDEBUG("copy_from_user failed, returned %li\n", tmp);
  1509. return -EFAULT;
  1510. }
  1511. if(tmp_axife.id >= MAX_SUBSYS_NUM) {
  1512. return -EFAULT;
  1513. }
  1514. axife_cfg[tmp_axife.id].id = tmp_axife.id;
  1515. memcpy(&tmp_axife, &(axife_cfg[tmp_axife.id]), sizeof(struct axife_cfg));
  1516. copy_to_user((u32 *) arg, &tmp_axife, sizeof(struct axife_cfg));
  1517. return 0;
  1518. }
  1519. default: {
  1520. if(_IOC_TYPE(cmd) == HANTRO_IOC_MMU) {
  1521. volatile u8* mmu_hwregs[MAX_SUBSYS_NUM][2];
  1522. for (i = 0; i < MAX_SUBSYS_NUM; i++ ) {
  1523. mmu_hwregs[i][0] = hantrodec_data.hwregs[i][HW_MMU];
  1524. mmu_hwregs[i][1] = hantrodec_data.hwregs[i][HW_MMU_WR];
  1525. }
  1526. pm_runtime_resume_and_get(&hantrodec_data.pdev->dev);
  1527. long retval = MMUIoctl(cmd, filp, arg, mmu_hwregs);
  1528. pm_runtime_mark_last_busy(&hantrodec_data.pdev->dev);
  1529. pm_runtime_put_autosuspend(&hantrodec_data.pdev->dev);
  1530. return retval;
  1531. } else if (_IOC_TYPE(cmd) == HANTRO_VCMD_IOC_MAGIC) {
  1532. return (hantrovcmd_ioctl(filp, cmd, arg));
  1533. } else if (_IOC_TYPE(cmd) == MEMORY_IOC_MAGIC) {
  1534. return (allocator_ioctl(filp, cmd, arg));
  1535. }
  1536. return -ENOTTY;
  1537. }
  1538. }
  1539. return 0;
  1540. }
  1541. /*------------------------------------------------------------------------------
  1542. Function name : hantrodec_open
  1543. Description : open method
  1544. Return type : int
  1545. ------------------------------------------------------------------------------*/
  1546. static int hantrodec_open(struct inode *inode, struct file *filp) {
  1547. PDEBUG("dev opened\n");
  1548. pr_debug("==========%s:open!============\n",__func__);
  1549. if (vcmd)
  1550. hantrovcmd_open(inode, filp);
  1551. allocator_open(inode, filp);
  1552. return 0;
  1553. }
  1554. /*------------------------------------------------------------------------------
  1555. Function name : hantrodec_release
  1556. Description : Release driver
  1557. Return type : int
  1558. ------------------------------------------------------------------------------*/
  1559. static int hantrodec_release(struct inode *inode, struct file *filp) {
  1560. int n;
  1561. hantrodec_t *dev = &hantrodec_data;
  1562. PDEBUG("closing ...\n");
  1563. if (vcmd) {
  1564. hantrovcmd_release(inode, filp);
  1565. allocator_release(inode, filp);
  1566. return 0;
  1567. }
  1568. for(n = 0; n < dev->cores; n++) {
  1569. if(dec_owner[n] == filp) {
  1570. PDEBUG("releasing dec core %i lock\n", n);
  1571. ReleaseDecoder(dev, n);
  1572. }
  1573. }
  1574. for(n = 0; n < 1; n++) {
  1575. if(pp_owner[n] == filp) {
  1576. PDEBUG("releasing pp core %i lock\n", n);
  1577. ReleasePostProcessor(dev, n);
  1578. }
  1579. }
  1580. MMURelease(filp, hantrodec_data.hwregs[0][HW_MMU]);
  1581. allocator_release(inode, filp);
  1582. PDEBUG("closed\n");
  1583. return 0;
  1584. }
  1585. #ifdef CLK_CFG
  1586. void hantrodec_disable_clk(unsigned long value) {
  1587. unsigned long flags;
  1588. /*entering this function means decoder is idle over expiry.So disable clk*/
  1589. if (clk_cfg!=NULL && !IS_ERR(clk_cfg)) {
  1590. spin_lock_irqsave(&clk_lock, flags);
  1591. if (is_clk_on==1) {
  1592. clk_disable(clk_cfg);
  1593. is_clk_on = 0;
  1594. pr_info("turned off hantrodec clk\n");
  1595. }
  1596. spin_unlock_irqrestore(&clk_lock, flags);
  1597. }
  1598. }
  1599. #endif
  1600. /* VFS methods */
  1601. static struct file_operations hantrodec_fops = {
  1602. .owner = THIS_MODULE,
  1603. .open = hantrodec_open,
  1604. .release = hantrodec_release,
  1605. .unlocked_ioctl = hantrodec_ioctl,
  1606. .mmap = allocator_mmap,
  1607. .fasync = NULL
  1608. };
  1609. static int PcieInit(void) {
  1610. int i;
  1611. gDev = pci_get_device(PCI_VENDOR_ID_HANTRO, PCI_DEVICE_ID_HANTRO_PCI, gDev);
  1612. if (NULL == gDev) {
  1613. pr_info("Init: Hardware not found.\n");
  1614. goto out;
  1615. }
  1616. if (0 > pci_enable_device(gDev)) {
  1617. pr_info("PcieInit: Device not enabled.\n");
  1618. goto out;
  1619. }
  1620. gBaseHdwr = pci_resource_start (gDev, PCI_CONTROL_BAR);
  1621. if (0 == gBaseHdwr) {
  1622. pr_info("PcieInit: Base Address not set.\n");
  1623. goto out_pci_disable_device;
  1624. }
  1625. pr_info("Base hw val 0x%X\n", (unsigned int)gBaseHdwr);
  1626. gBaseLen = pci_resource_len (gDev, PCI_CONTROL_BAR);
  1627. pr_info("Base hw len 0x%X\n", (unsigned int)gBaseLen);
  1628. for (i = 0; i < MAX_SUBSYS_NUM; i++) {
  1629. if (vpu_subsys[i].base_addr) {
  1630. vpu_subsys[i].base_addr += gBaseHdwr;
  1631. multicorebase[i] += gBaseHdwr;
  1632. }
  1633. }
  1634. gBaseDDRHw = pci_resource_start (gDev, PCI_DDR_BAR);
  1635. if (0 == gBaseDDRHw) {
  1636. pr_info("PcieInit: Base Address not set.\n");
  1637. goto out_pci_disable_device;
  1638. }
  1639. pr_info("Base memory val 0x%llx\n", (unsigned int)gBaseDDRHw);
  1640. gBaseLen = pci_resource_len (gDev, PCI_DDR_BAR);
  1641. pr_info("Base memory len 0x%x\n", (unsigned int)gBaseLen);
  1642. return 0;
  1643. out_pci_disable_device:
  1644. pci_disable_device(gDev);
  1645. out:
  1646. return -1;
  1647. }
  1648. static void dump_vpu_subsys(struct subsys_config *cfg)
  1649. {
  1650. int i;
  1651. pr_info("lucz: dumping subsys_config[0]\n");
  1652. pr_info(" base_addr=0x%llx\n", cfg->base_addr);
  1653. if (cfg->base_addr == 0) {
  1654. pr_info(" base_addr=0, not dump any more\n");
  1655. return;
  1656. }
  1657. pr_info(" irq=%d\n", cfg->irq);
  1658. pr_info(" subsys_type=%u\n", cfg->subsys_type);
  1659. pr_info(" submodule_offset=");
  1660. for (i = 0; i < HW_CORE_MAX; i++) {
  1661. pr_info(" 0x%x,", cfg->submodule_offset[i]);
  1662. }
  1663. pr_info("\n");
  1664. pr_info(" submodule_iosize=");
  1665. for (i = 0; i < HW_CORE_MAX; i++) {
  1666. pr_info(" %d,", cfg->submodule_iosize[i]);
  1667. }
  1668. pr_info("\n");
  1669. pr_info(" submodule_hwregs=");
  1670. for (i = 0; i < HW_CORE_MAX; i++) {
  1671. pr_info(" %p,", cfg->submodule_hwregs[i]);
  1672. }
  1673. pr_info("\n");
  1674. pr_info(" has_apbfilter=");
  1675. for (i = 0; i < HW_CORE_MAX; i++) {
  1676. pr_info(" %d,", cfg->has_apbfilter[i]);
  1677. }
  1678. pr_info("\n");
  1679. }
  1680. static ssize_t decoder_config_write(struct file *filp,
  1681. const char __user *userbuf,
  1682. size_t count, loff_t *ppos)
  1683. {
  1684. hantrodec_t *dev = &hantrodec_data;
  1685. unsigned long value;
  1686. int ret;
  1687. if (count > VC8000D_MAX_CONFIG_LEN)
  1688. count = VC8000D_MAX_CONFIG_LEN;
  1689. else if (count <= 2)
  1690. return 0;
  1691. ret = copy_from_user(dev->config_buf, userbuf, count);
  1692. if (ret) {
  1693. ret = -EFAULT;
  1694. goto out;
  1695. }
  1696. //pr_info("hantrodec config: %s\n", dev->config_buf);
  1697. switch (dev->config_buf[0]) {
  1698. case 'd':
  1699. value = simple_strtoul(&(dev->config_buf[1]), NULL, 10);
  1700. pm_runtime_set_autosuspend_delay(&dev->pdev->dev, value);
  1701. pr_info("Set pm runtime auto suspend delay to %ldms\n", value);
  1702. break;
  1703. default:
  1704. printk(KERN_WARNING "Unsupported config!\n");
  1705. }
  1706. out:
  1707. return ret < 0 ? ret : count;
  1708. }
  1709. static ssize_t decoder_config_read(struct file *filp,
  1710. char __user *userbuf,
  1711. size_t count, loff_t *ppos)
  1712. {
  1713. hantrodec_t *dev = &hantrodec_data;
  1714. memset(dev->config_buf, 0, VC8000D_MAX_CONFIG_LEN);
  1715. return 0;
  1716. }
  1717. static const struct file_operations decoder_debug_ops = {
  1718. .write = decoder_config_write,
  1719. .read = decoder_config_read,
  1720. .open = simple_open,
  1721. .llseek = generic_file_llseek,
  1722. };
  1723. static int decoder_add_debugfs(struct platform_device *pdev)
  1724. {
  1725. root_debugfs_dir = debugfs_create_dir("vc8000d",NULL);
  1726. if (!root_debugfs_dir) {
  1727. dev_err(&pdev->dev, "Failed to create vc8000d debugfs\n");
  1728. return -EINVAL;
  1729. }
  1730. dev_info(&pdev->dev, "Create vc8000d debugfs.\n");
  1731. debugfs_create_file("config", 0600, root_debugfs_dir,
  1732. &hantrodec_data, &decoder_debug_ops);
  1733. return 0;
  1734. }
  1735. /*------------------------------------------------------------
  1736. platform register
  1737. ------------------------------------------------------------*/
  1738. static const struct of_device_id isp_of_match[] = {
  1739. { .compatible = "thead,light-vc8000d", },
  1740. { /* sentinel */ },
  1741. };
  1742. static int check_power_domain(void)
  1743. {
  1744. struct device_node *dn = NULL;
  1745. struct property *info = NULL;
  1746. dn = of_find_node_by_name(NULL, "vdec");
  1747. if (dn != NULL)
  1748. info = of_find_property(dn, "power-domains", NULL);
  1749. pr_debug("%s, %d: power gating is %s\n", __func__, __LINE__,
  1750. (info == NULL) ? "disabled" : "enabled");
  1751. return (info == NULL) ? 0 : 1;
  1752. }
  1753. static int decoder_runtime_suspend(struct device *dev)
  1754. {
  1755. hantrodec_t *decdev = &hantrodec_data;
  1756. pr_debug("%s, %d: Disable clock\n", __func__, __LINE__);
  1757. clk_disable_unprepare(decdev->cclk);
  1758. clk_disable_unprepare(decdev->aclk);
  1759. clk_disable_unprepare(decdev->pclk);
  1760. return 0;
  1761. }
  1762. static int decoder_runtime_resume(struct device *dev)
  1763. {
  1764. hantrodec_t *decdev = &hantrodec_data;
  1765. int ret;
  1766. ret = clk_prepare_enable(decdev->cclk);
  1767. if (ret < 0) {
  1768. dev_err(dev, "could not prepare or enable core clock\n");
  1769. return ret;
  1770. }
  1771. ret = clk_prepare_enable(decdev->aclk);
  1772. if (ret < 0) {
  1773. dev_err(dev, "could not prepare or enable axi clock\n");
  1774. clk_disable_unprepare(decdev->cclk);
  1775. return ret;
  1776. }
  1777. ret = clk_prepare_enable(decdev->pclk);
  1778. if (ret < 0) {
  1779. dev_err(dev, "could not prepare or enable apb clock\n");
  1780. clk_disable_unprepare(decdev->cclk);
  1781. clk_disable_unprepare(decdev->aclk);
  1782. return ret;
  1783. }
  1784. if (hantrodec_data.has_power_domains) {
  1785. if (hantrodec_data.hwregs[0][HW_MMU]) {
  1786. int i;
  1787. volatile u8* mmu_hwregs[MAX_SUBSYS_NUM][2];
  1788. for (i = 0; i < MAX_SUBSYS_NUM; i++ ) {
  1789. mmu_hwregs[i][0] = hantrodec_data.hwregs[i][HW_MMU];
  1790. mmu_hwregs[i][1] = hantrodec_data.hwregs[i][HW_MMU_WR];
  1791. }
  1792. MMURestore(mmu_hwregs);
  1793. }
  1794. hantrovcmd_reset();
  1795. }
  1796. pr_debug("%s, %d: Enabled clock\n", __func__, __LINE__);
  1797. return 0;
  1798. }
  1799. static int decoder_hantrodec_probe(struct platform_device *pdev)
  1800. {
  1801. printk("enter %s\n",__func__);
  1802. printk("pcie=%d\n",pcie);
  1803. int result, i;
  1804. struct resource *mem;
  1805. //struct decoder_driver_device *pdriver_dev;
  1806. enum MMUStatus status = 0;
  1807. enum MMUStatus mmu_status = MMU_STATUS_FALSE;
  1808. volatile u8* mmu_hwregs[MAX_SUBSYS_NUM][2];
  1809. //pdriver_dev = devm_kzalloc(&pdev->dev,sizeof(struct decoder_driver_device),GFP_KERNEL);
  1810. // if(pdriver_dev == NULL)
  1811. //{
  1812. // pr_err("%s:alloc struct deocder_driver_device error!\n",__func__);
  1813. // return -ENOMEM;
  1814. /// }
  1815. //pdriver_dev->hantrodec_class = class_create(THIS_MODULE,"hantrodec");
  1816. printk("%s:init variable is ok!\n",__func__);
  1817. mem = platform_get_resource(pdev,IORESOURCE_MEM,0);
  1818. printk("%s:get resource is ok!\n",__func__);
  1819. //devm_ioremap_resource(&pdev->dev,mem);
  1820. if(mem->start)
  1821. base_port = mem->start;
  1822. else
  1823. printk("%s:mem->start is not exist!\n",__func__);
  1824. printk("%s:start get irq!\n",__func__);
  1825. PDEBUG("module init\n");
  1826. CheckSubsysCoreArray(vpu_subsys, &vcmd);
  1827. irq[0] = platform_get_irq(pdev,0);
  1828. printk("%s:get irq!\n",__func__);
  1829. printk("%s:base_port=0x%llx,irq=%d\n",__func__,base_port,irq[0]);
  1830. printk("%s:pcie=%d\n",__func__,pcie);
  1831. if (pcie) {
  1832. result = PcieInit();
  1833. if(result)
  1834. goto err;
  1835. }
  1836. pr_info("hantrodec: dec/pp kernel module. \n");
  1837. /* If base_port is set when insmod, use that for single core legacy mode. */
  1838. if (base_port != -1) {
  1839. multicorebase[0] = base_port;
  1840. if (pcie)
  1841. multicorebase[0] += HANTRO_REG_OFFSET0;
  1842. elements = 1;
  1843. vpu_subsys[0].base_addr = base_port;
  1844. pr_info("hantrodec: Init single core at 0x%08lx IRQ=%i\n",
  1845. multicorebase[0], irq[0]);
  1846. } else {
  1847. pr_info("hantrodec: Init multi core[0] at 0x%16lx\n"
  1848. " core[1] at 0x%16lx\n"
  1849. " core[2] at 0x%16lx\n"
  1850. " core[3] at 0x%16lx\n"
  1851. " IRQ_0=%i\n"
  1852. " IRQ_1=%i\n",
  1853. multicorebase[0], multicorebase[1],
  1854. multicorebase[2], multicorebase[3],
  1855. irq[0],irq[1]);
  1856. }
  1857. hantrodec_data.pdev = pdev;
  1858. hantrodec_data.cores = 0;
  1859. hantrodec_data.iosize[0] = DEC_IO_SIZE_0;
  1860. hantrodec_data.irq[0] = irq[0];
  1861. hantrodec_data.iosize[1] = DEC_IO_SIZE_1;
  1862. hantrodec_data.irq[1] = irq[1];
  1863. //extern void dump_core_array(void);
  1864. //dump_vpu_subsys(&(vpu_subsys[0]));
  1865. //dump_core_array();
  1866. pr_info("hantrodec_data.irq=%d\n",
  1867. hantrodec_data.irq[0]);
  1868. for(i=0; i< HXDEC_MAX_CORES; i++) {
  1869. int j;
  1870. for (j = 0; j < HW_CORE_MAX; j++)
  1871. hantrodec_data.hwregs[i][j] = 0;
  1872. /* If user gave less core bases that we have by default,
  1873. * invalidate default bases
  1874. */
  1875. if(elements && i>=elements) {
  1876. multicorebase[i] = 0;
  1877. }
  1878. }
  1879. hantrodec_data.async_queue_dec = NULL;
  1880. hantrodec_data.async_queue_pp = NULL;
  1881. hantrodec_data.has_power_domains = check_power_domain();
  1882. hantrodec_data.cclk = devm_clk_get(&pdev->dev, "cclk");
  1883. if (IS_ERR(hantrodec_data.cclk)) {
  1884. dev_err(&pdev->dev, "failed to get core clock\n");
  1885. goto err;
  1886. }
  1887. hantrodec_data.aclk = devm_clk_get(&pdev->dev, "aclk");
  1888. if (IS_ERR(hantrodec_data.aclk)) {
  1889. dev_err(&pdev->dev, "failed to get axi clock\n");
  1890. goto err;
  1891. }
  1892. hantrodec_data.pclk = devm_clk_get(&pdev->dev, "pclk");
  1893. if (IS_ERR(hantrodec_data.pclk)) {
  1894. dev_err(&pdev->dev, "failed to get apb clock\n");
  1895. goto err;
  1896. }
  1897. pm_runtime_set_autosuspend_delay(&pdev->dev, VC8000D_PM_TIMEOUT);
  1898. pm_runtime_use_autosuspend(&pdev->dev);
  1899. pm_runtime_enable(&pdev->dev);
  1900. if (!pm_runtime_enabled(&pdev->dev)) {
  1901. if (decoder_runtime_resume(&pdev->dev))
  1902. {
  1903. pm_runtime_disable(&pdev->dev);
  1904. pm_runtime_dont_use_autosuspend(&pdev->dev);
  1905. }
  1906. }
  1907. pm_runtime_resume_and_get(&pdev->dev);
  1908. if (hantrodec_major == 0)
  1909. {
  1910. result = alloc_chrdev_region(&hantrodec_devt, 0, 1, "hantrodec");
  1911. if (result != 0)
  1912. {
  1913. printk(KERN_ERR "%s: alloc_chrdev_region error\n", __func__);
  1914. goto err;
  1915. }
  1916. hantrodec_major = MAJOR(hantrodec_devt);
  1917. hantrodec_minor = MINOR(hantrodec_devt);
  1918. }
  1919. else
  1920. {
  1921. hantrodec_devt = MKDEV(hantrodec_major, hantrodec_minor);
  1922. result = register_chrdev_region(hantrodec_devt, 1, "hantrodec");
  1923. if (result)
  1924. {
  1925. printk(KERN_ERR "%s: register_chrdev_region error\n", __func__);
  1926. goto err;
  1927. }
  1928. }
  1929. hantrodec_class = class_create(THIS_MODULE, "hantrodec");
  1930. if (IS_ERR(hantrodec_class))
  1931. {
  1932. printk(KERN_ERR "%s, %d: class_create error!\n", __func__, __LINE__);
  1933. goto err;
  1934. }
  1935. hantrodec_devt = MKDEV(hantrodec_major, hantrodec_minor);
  1936. cdev_init(&hantrodec_cdev, &hantrodec_fops);
  1937. result = cdev_add(&hantrodec_cdev, hantrodec_devt, 1);
  1938. if ( result )
  1939. {
  1940. printk(KERN_ERR "%s, %d: cdev_add error!\n", __func__, __LINE__);
  1941. goto err;
  1942. }
  1943. device_create(hantrodec_class, NULL, hantrodec_devt,
  1944. NULL, "hantrodec");
  1945. #ifdef CLK_CFG
  1946. /* first get clk instance pointer */
  1947. clk_cfg = clk_get(NULL, CLK_ID);
  1948. if (!clk_cfg||IS_ERR(clk_cfg)) {
  1949. printk("get handrodec clk failed!\n");
  1950. goto err;
  1951. }
  1952. /* prepare and enable clk */
  1953. if(clk_prepare_enable(clk_cfg)) {
  1954. printk("try to enable handrodec clk failed!\n");
  1955. goto err;
  1956. }
  1957. is_clk_on = 1;
  1958. /*init a timer to disable clk*/
  1959. init_timer(&timer);
  1960. timer.function = &hantrodec_disable_clk;
  1961. timer.expires = jiffies + 100*HZ; //the expires time is 100s
  1962. add_timer(&timer);
  1963. #endif
  1964. result = ReserveIO();
  1965. if(result < 0) {
  1966. goto err;
  1967. }
  1968. for (i = 0; i < hantrodec_data.cores; i++) {
  1969. AXIFEEnable(hantrodec_data.hwregs[i][HW_AXIFE]);
  1970. }
  1971. /* MMU only initial once No matter how many MMU we have */
  1972. if (hantrodec_data.hwregs[0][HW_MMU]) {
  1973. status = MMUInit(hantrodec_data.hwregs[0][HW_MMU]);
  1974. if(status == MMU_STATUS_NOT_FOUND)
  1975. pr_info("MMU does not exist!\n");
  1976. else if(status != MMU_STATUS_OK)
  1977. goto err;
  1978. else
  1979. pr_info("MMU detected!\n");
  1980. for (i = 0; i < MAX_SUBSYS_NUM; i++ ) {
  1981. mmu_hwregs[i][0] = hantrodec_data.hwregs[i][HW_MMU];
  1982. mmu_hwregs[i][1] = hantrodec_data.hwregs[i][HW_MMU_WR];
  1983. }
  1984. mmu_status = MMUEnable(mmu_hwregs);
  1985. }
  1986. allocator_init(&pdev->dev);
  1987. decoder_add_debugfs(pdev);
  1988. if (vcmd) {
  1989. /* unmap and release mem region for VCMD, since it will be mapped and
  1990. reserved again in hantro_vcmd.c*/
  1991. for (i = 0; i < hantrodec_data.cores; i++) {
  1992. if (hantrodec_data.hwregs[i][HW_VCMD]) {
  1993. iounmap((void *)hantrodec_data.hwregs[i][HW_VCMD]);
  1994. release_mem_region(vpu_subsys[i].base_addr + vpu_subsys[i].submodule_offset[HW_VCMD],
  1995. vpu_subsys[i].submodule_iosize[HW_VCMD]);
  1996. hantrodec_data.hwregs[i][HW_VCMD] = 0;
  1997. }
  1998. }
  1999. result = hantrovcmd_init(pdev);
  2000. pm_runtime_mark_last_busy(&pdev->dev);
  2001. pm_runtime_put_autosuspend(&pdev->dev);
  2002. if (result) return result;
  2003. pr_info("PM runtime was enable\n");
  2004. return 0;
  2005. }
  2006. memset(dec_owner, 0, sizeof(dec_owner));
  2007. memset(pp_owner, 0, sizeof(pp_owner));
  2008. sema_init(&dec_core_sem, hantrodec_data.cores);
  2009. sema_init(&pp_core_sem, 1);
  2010. /* read configuration fo all cores */
  2011. ReadCoreConfig(&hantrodec_data);
  2012. /* reset hardware */
  2013. ResetAsic(&hantrodec_data);
  2014. /* register irq for each core */
  2015. if(irq[0] > 0) {
  2016. result = request_irq(irq[0], hantrodec_isr,
  2017. //#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
  2018. // SA_INTERRUPT | SA_SHIRQ,
  2019. //#else
  2020. // IRQF_SHARED,
  2021. //#endif
  2022. IRQF_TRIGGER_RISING,
  2023. "hantrodec", (void *) &hantrodec_data);
  2024. if(result != 0) {
  2025. if(result == -EINVAL) {
  2026. printk(KERN_ERR "hantrodec: Bad irq number or handler\n");
  2027. } else if(result == -EBUSY) {
  2028. printk(KERN_ERR "hantrodec: IRQ <%d> busy, change your config\n",
  2029. hantrodec_data.irq[0]);
  2030. }
  2031. ReleaseIO();
  2032. goto err;
  2033. }
  2034. } else {
  2035. pr_info("hantrodec: IRQ not in use!\n");
  2036. }
  2037. if(irq[1] > 0) {
  2038. result = request_irq(irq[1], hantrodec_isr,
  2039. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
  2040. SA_INTERRUPT | SA_SHIRQ,
  2041. #else
  2042. IRQF_SHARED,
  2043. #endif
  2044. "hantrodec", (void *) &hantrodec_data);
  2045. if(result != 0) {
  2046. if(result == -EINVAL) {
  2047. printk(KERN_ERR "hantrodec: Bad irq number or handler\n");
  2048. } else if(result == -EBUSY) {
  2049. printk(KERN_ERR "hantrodec: IRQ <%d> busy, change your config\n",
  2050. hantrodec_data.irq[1]);
  2051. }
  2052. ReleaseIO();
  2053. goto err;
  2054. }
  2055. } else {
  2056. pr_info("hantrodec: IRQ not in use!\n");
  2057. }
  2058. for (i = 0; i < hantrodec_data.cores; i++) {
  2059. volatile u8 *hwregs = hantrodec_data.hwregs[i][HW_VC8000D];
  2060. if (hwregs) {
  2061. pr_info("hantrodec: VC8000D [%d] has build id 0x%08x\n",
  2062. i, ioread32((void*)(hwregs + HANTRODEC_HWBUILD_ID_OFF)));
  2063. }
  2064. }
  2065. pm_runtime_mark_last_busy(&pdev->dev);
  2066. pm_runtime_put_autosuspend(&pdev->dev);
  2067. pr_info("hantrodec: module inserted. Major = %d\n", hantrodec_major);
  2068. /* Please call the TEE functions to set VC8000D DRM relative registers here */
  2069. return 0;
  2070. err:
  2071. if (root_debugfs_dir) {
  2072. debugfs_remove_recursive(root_debugfs_dir);
  2073. root_debugfs_dir = NULL;
  2074. }
  2075. ReleaseIO();
  2076. pr_info("hantrodec: module not inserted\n");
  2077. pm_runtime_mark_last_busy(&pdev->dev);
  2078. pm_runtime_put_autosuspend(&pdev->dev);
  2079. unregister_chrdev_region(hantrodec_devt, 1);
  2080. return result;
  2081. }
  2082. static int decoder_hantrodec_remove(struct platform_device *pdev)
  2083. {
  2084. if (root_debugfs_dir) {
  2085. debugfs_remove_recursive(root_debugfs_dir);
  2086. root_debugfs_dir = NULL;
  2087. }
  2088. pm_runtime_resume_and_get(&pdev->dev);
  2089. if(irq[0] > 0)
  2090. {
  2091. free_irq(irq[0],(void *) &hantrodec_data);
  2092. }
  2093. if(irq[1] > 0)
  2094. {
  2095. free_irq(irq[1],(void *) &hantrodec_data);
  2096. }
  2097. hantrodec_t *dev = &hantrodec_data;
  2098. int i, n =0;
  2099. volatile u8* mmu_hwregs[MAX_SUBSYS_NUM][2];
  2100. for (i = 0; i < MAX_SUBSYS_NUM; i++ ) {
  2101. mmu_hwregs[i][0] = dev->hwregs[i][HW_MMU];
  2102. mmu_hwregs[i][1] = dev->hwregs[i][HW_MMU_WR];
  2103. }
  2104. if (dev->hwregs[0][HW_MMU] || dev->hwregs[1][HW_MMU] ||
  2105. dev->hwregs[2][HW_MMU] || dev->hwregs[3][HW_MMU])
  2106. MMUCleanup(mmu_hwregs);
  2107. if (vcmd) {
  2108. hantrovcmd_cleanup(pdev);
  2109. } else {
  2110. /* reset hardware */
  2111. ResetAsic(dev);
  2112. /* free the IRQ */
  2113. for (n = 0; n < dev->cores; n++) {
  2114. if(dev->irq[n] != -1) {
  2115. free_irq(dev->irq[n], (void *) dev);
  2116. }
  2117. }
  2118. }
  2119. ReleaseIO();
  2120. #ifdef CLK_CFG
  2121. if (clk_cfg!=NULL && !IS_ERR(clk_cfg)) {
  2122. clk_disable_unprepare(clk_cfg);
  2123. is_clk_on = 0;
  2124. printk("turned off hantrodec clk\n");
  2125. }
  2126. /*delete timer*/
  2127. del_timer(&timer);
  2128. #endif
  2129. pm_runtime_mark_last_busy(&pdev->dev);
  2130. pm_runtime_put_autosuspend(&pdev->dev);
  2131. pm_runtime_disable(&pdev->dev);
  2132. if (!pm_runtime_status_suspended(&pdev->dev))
  2133. decoder_runtime_suspend(&pdev->dev);
  2134. cdev_del(&hantrodec_cdev);
  2135. device_destroy(hantrodec_class, hantrodec_devt);
  2136. unregister_chrdev_region(hantrodec_devt, 1);
  2137. class_destroy(hantrodec_class);
  2138. pr_info("hantrodec: module removed\n");
  2139. return 0;
  2140. }
  2141. static const struct dev_pm_ops decoder_runtime_pm_ops = {
  2142. SET_RUNTIME_PM_OPS(decoder_runtime_suspend, decoder_runtime_resume, NULL)
  2143. };
  2144. static struct platform_driver decoder_hantrodec_driver = {
  2145. .probe = decoder_hantrodec_probe,
  2146. .remove = decoder_hantrodec_remove,
  2147. .driver = {
  2148. .name = "decoder_hantrodec",
  2149. .owner = THIS_MODULE,
  2150. .of_match_table = of_match_ptr(isp_of_match),
  2151. .pm = &decoder_runtime_pm_ops,
  2152. }
  2153. };
  2154. /*------------------------------------------------------------------------------
  2155. Function name : hantrodec_init
  2156. Description : Initialize the driver
  2157. Return type : int
  2158. ------------------------------------------------------------------------------*/
  2159. int __init hantrodec_init(void) {
  2160. int ret = 0;
  2161. printk("enter %s\n",__func__);
  2162. ret = platform_driver_register(&decoder_hantrodec_driver);
  2163. if(ret)
  2164. {
  2165. pr_err("register platform driver failed!\n");
  2166. }
  2167. return ret;
  2168. }
  2169. /*------------------------------------------------------------------------------
  2170. Function name : hantrodec_cleanup
  2171. Description : clean up
  2172. Return type : int
  2173. ------------------------------------------------------------------------------*/
  2174. void __exit hantrodec_cleanup(void) {
  2175. printk("enter %s\n",__func__);
  2176. platform_driver_unregister(&decoder_hantrodec_driver);
  2177. return;
  2178. }
  2179. /*------------------------------------------------------------------------------
  2180. Function name : CheckHwId
  2181. Return type : int
  2182. ------------------------------------------------------------------------------*/
  2183. static int CheckHwId(hantrodec_t * dev) {
  2184. int hwid;
  2185. int i, j;
  2186. size_t num_hw = sizeof(DecHwId) / sizeof(*DecHwId);
  2187. int found = 0;
  2188. for (i = 0; i < dev->cores; i++) {
  2189. for (j = 0; j < HW_CORE_MAX; j++) {
  2190. if ((j == HW_VC8000D || j == HW_BIGOCEAN || j == HW_VC8000DJ) &&
  2191. dev->hwregs[i][j] != NULL) {
  2192. hwid = readl(dev->hwregs[i][j]);
  2193. pr_info("hantrodec: core %d HW ID=0x%08x\n", i, hwid);
  2194. hwid = (hwid >> 16) & 0xFFFF; /* product version only */
  2195. while (num_hw--) {
  2196. if (hwid == DecHwId[num_hw]) {
  2197. pr_info("hantrodec: Supported HW found at 0x%16lx\n",
  2198. vpu_subsys[i].base_addr + vpu_subsys[i].submodule_offset[j]);
  2199. found++;
  2200. dev->hw_id[i][j] = hwid;
  2201. break;
  2202. }
  2203. }
  2204. if (!found) {
  2205. pr_info("hantrodec: Unknown HW found at 0x%16lx\n",
  2206. multicorebase_actual[i]);
  2207. return 0;
  2208. }
  2209. found = 0;
  2210. num_hw = sizeof(DecHwId) / sizeof(*DecHwId);
  2211. }
  2212. }
  2213. }
  2214. return 1;
  2215. }
  2216. /*------------------------------------------------------------------------------
  2217. Function name : ReserveIO
  2218. Description : IO reserve
  2219. Return type : int
  2220. ------------------------------------------------------------------------------*/
  2221. static int ReserveIO(void) {
  2222. int i, j;
  2223. long int hwid;
  2224. u32 axife_config;
  2225. memcpy(multicorebase_actual, multicorebase, HXDEC_MAX_CORES * sizeof(unsigned long));
  2226. memcpy((unsigned int*)(hantrodec_data.iosize), iosize, HXDEC_MAX_CORES * sizeof(unsigned int));
  2227. memcpy((unsigned int*)(hantrodec_data.irq), irq, HXDEC_MAX_CORES * sizeof(int));
  2228. for (i = 0; i < MAX_SUBSYS_NUM; i++) {
  2229. if (!vpu_subsys[i].base_addr) continue;
  2230. for (j = 0; j < HW_CORE_MAX; j++) {
  2231. if (vpu_subsys[i].submodule_iosize[j]) {
  2232. pr_info("hantrodec: base=0x%16lx, iosize=%d\n",
  2233. vpu_subsys[i].base_addr + vpu_subsys[i].submodule_offset[j],
  2234. vpu_subsys[i].submodule_iosize[j]);
  2235. if (!request_mem_region(vpu_subsys[i].base_addr + vpu_subsys[i].submodule_offset[j],
  2236. vpu_subsys[i].submodule_iosize[j],
  2237. "hantrodec0")) {
  2238. pr_info("hantrodec: failed to reserve HW %d regs\n", j);
  2239. return -EBUSY;
  2240. }
  2241. #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0))
  2242. vpu_subsys[i].submodule_hwregs[j] =
  2243. hantrodec_data.hwregs[i][j] =
  2244. (volatile u8 *) ioremap_nocache(vpu_subsys[i].base_addr + vpu_subsys[i].submodule_offset[j],
  2245. vpu_subsys[i].submodule_iosize[j]);
  2246. #else
  2247. vpu_subsys[i].submodule_hwregs[j] =
  2248. hantrodec_data.hwregs[i][j] =
  2249. (volatile u8 *) ioremap(vpu_subsys[i].base_addr + vpu_subsys[i].submodule_offset[j],
  2250. vpu_subsys[i].submodule_iosize[j]);
  2251. #endif
  2252. if (hantrodec_data.hwregs[i][j] == NULL) {
  2253. pr_info("hantrodec: failed to ioremap HW %d regs\n", j);
  2254. release_mem_region(vpu_subsys[i].base_addr + vpu_subsys[i].submodule_offset[j],
  2255. vpu_subsys[i].submodule_iosize[j]);
  2256. return -EBUSY;
  2257. } else {
  2258. if (vpu_subsys[i].has_apbfilter[j]) {
  2259. apbfilter_cfg[i][j].has_apbfilter = 1;
  2260. hwid = ioread32((void*)(hantrodec_data.hwregs[i][HW_VC8000D]));
  2261. if (IS_BIGOCEAN(hwid & 0xFFFF)) {
  2262. if (j == HW_BIGOCEAN) {
  2263. apbfilter_cfg[i][j].nbr_mask_regs = AV1_NUM_MASK_REG;
  2264. apbfilter_cfg[i][j].num_mode = AV1_NUM_MODE;
  2265. apbfilter_cfg[i][j].mask_reg_offset = AV1_MASK_REG_OFFSET;
  2266. apbfilter_cfg[i][j].mask_bits_per_reg = AV1_MASK_BITS_PER_REG;
  2267. apbfilter_cfg[i][j].page_sel_addr = apbfilter_cfg[i][j].mask_reg_offset + apbfilter_cfg[i][j].nbr_mask_regs * 4;
  2268. }
  2269. if (j == HW_AXIFE) {
  2270. apbfilter_cfg[i][j].nbr_mask_regs = AXIFE_NUM_MASK_REG;
  2271. apbfilter_cfg[i][j].num_mode = AXIFE_NUM_MODE;
  2272. apbfilter_cfg[i][j].mask_reg_offset = AXIFE_MASK_REG_OFFSET;
  2273. apbfilter_cfg[i][j].mask_bits_per_reg = AXIFE_MASK_BITS_PER_REG;
  2274. apbfilter_cfg[i][j].page_sel_addr = apbfilter_cfg[i][j].mask_reg_offset + apbfilter_cfg[i][j].nbr_mask_regs * 4;
  2275. }
  2276. } else {
  2277. hwid = ioread32((void*)(hantrodec_data.hwregs[i][HW_VC8000D] + HANTRODEC_HW_BUILD_ID_OFF));
  2278. if (hwid == 0x1F58) {
  2279. if (j == HW_VC8000D) {
  2280. apbfilter_cfg[i][j].nbr_mask_regs = VC8000D_NUM_MASK_REG;
  2281. apbfilter_cfg[i][j].num_mode = VC8000D_NUM_MODE;
  2282. apbfilter_cfg[i][j].mask_reg_offset = VC8000D_MASK_REG_OFFSET;
  2283. apbfilter_cfg[i][j].mask_bits_per_reg = VC8000D_MASK_BITS_PER_REG;
  2284. apbfilter_cfg[i][j].page_sel_addr = apbfilter_cfg[i][j].mask_reg_offset + apbfilter_cfg[i][j].nbr_mask_regs * 4;
  2285. }
  2286. if (j == HW_AXIFE) {
  2287. apbfilter_cfg[i][j].nbr_mask_regs = AXIFE_NUM_MASK_REG;
  2288. apbfilter_cfg[i][j].num_mode = AXIFE_NUM_MODE;
  2289. apbfilter_cfg[i][j].mask_reg_offset = AXIFE_MASK_REG_OFFSET;
  2290. apbfilter_cfg[i][j].mask_bits_per_reg = AXIFE_MASK_BITS_PER_REG;
  2291. apbfilter_cfg[i][j].page_sel_addr = apbfilter_cfg[i][j].mask_reg_offset + apbfilter_cfg[i][j].nbr_mask_regs * 4;
  2292. }
  2293. } else if (hwid == 0x1F59) {
  2294. if (j == HW_VC8000DJ) {
  2295. apbfilter_cfg[i][j].nbr_mask_regs = VC8000DJ_NUM_MASK_REG;
  2296. apbfilter_cfg[i][j].num_mode = VC8000DJ_NUM_MODE;
  2297. apbfilter_cfg[i][j].mask_reg_offset = VC8000DJ_MASK_REG_OFFSET;
  2298. apbfilter_cfg[i][j].mask_bits_per_reg = VC8000DJ_MASK_BITS_PER_REG;
  2299. apbfilter_cfg[i][j].page_sel_addr = apbfilter_cfg[i][j].mask_reg_offset + apbfilter_cfg[i][j].nbr_mask_regs * 4;
  2300. }
  2301. if (j == HW_AXIFE) {
  2302. apbfilter_cfg[i][j].nbr_mask_regs = AXIFE_NUM_MASK_REG;
  2303. apbfilter_cfg[i][j].num_mode = AXIFE_NUM_MODE;
  2304. apbfilter_cfg[i][j].mask_reg_offset = AXIFE_MASK_REG_OFFSET;
  2305. apbfilter_cfg[i][j].mask_bits_per_reg = AXIFE_MASK_BITS_PER_REG;
  2306. apbfilter_cfg[i][j].page_sel_addr = apbfilter_cfg[i][j].mask_reg_offset + apbfilter_cfg[i][j].nbr_mask_regs * 4;
  2307. }
  2308. } else {
  2309. pr_info("hantrodec: furture APBFILTER can read those configure parameters from REG\n");
  2310. }
  2311. }
  2312. hantrodec_data.apbfilter_hwregs[i][j] = hantrodec_data.hwregs[i][j] + apbfilter_cfg[i][j].mask_reg_offset;
  2313. } else {
  2314. apbfilter_cfg[i][j].has_apbfilter = 0;
  2315. }
  2316. if (j == HW_AXIFE) {
  2317. hwid = ioread32((void*)(hantrodec_data.hwregs[i][j] + HANTRODEC_HW_BUILD_ID_OFF));
  2318. axife_config = ioread32((void*)(hantrodec_data.hwregs[i][j]));
  2319. axife_cfg[i].axi_rd_chn_num = axife_config & 0x7F;
  2320. axife_cfg[i].axi_wr_chn_num = (axife_config >> 7) & 0x7F;
  2321. axife_cfg[i].axi_rd_burst_length = (axife_config >> 14) & 0x1F;
  2322. axife_cfg[i].axi_wr_burst_length = (axife_config >> 22) & 0x1F;
  2323. axife_cfg[i].fe_mode = 0; /*need to read from reg in furture*/
  2324. if (hwid == 0x1F66) {
  2325. axife_cfg[i].fe_mode = 1;
  2326. }
  2327. }
  2328. }
  2329. config.its_main_core_id[i] = -1;
  2330. config.its_aux_core_id[i] = -1;
  2331. pr_info("hantrodec: HW %d reg[0]=0x%08x\n", j, readl(hantrodec_data.hwregs[i][j]));
  2332. #ifdef SUPPORT_2ND_PIPELINES
  2333. if (j != HW_VC8000D) continue;
  2334. hwid = ((readl(hantrodec_data.hwregs[i][HW_VC8000D])) >> 16) & 0xFFFF; /* product version only */
  2335. if (IS_VC8000D(hwid)) {
  2336. u32 reg;
  2337. /*TODO(min): DO NOT support 2nd pipeline. */
  2338. reg = readl(hantrodec_data.hwregs[i][HW_VC8000D] + HANTRODEC_SYNTH_CFG_2_OFF);
  2339. if (((reg >> DWL_H264_PIPELINE_E) & 0x01U) || ((reg >> DWL_JPEG_PIPELINE_E) & 0x01U)) {
  2340. i++;
  2341. config.its_aux_core_id[i-1] = i;
  2342. config.its_main_core_id[i] = i-1;
  2343. config.its_aux_core_id[i] = -1;
  2344. multicorebase_actual[i] = multicorebase_actual[i-1] + 0x800;
  2345. hantrodec_data.iosize[i] = hantrodec_data.iosize[i-1];
  2346. memcpy(multicorebase_actual+i+1, multicorebase+i,
  2347. (HXDEC_MAX_CORES - i - 1) * sizeof(unsigned long));
  2348. memcpy((unsigned int*)hantrodec_data.iosize+i+1, iosize+i,
  2349. (HXDEC_MAX_CORES - i - 1) * sizeof(unsigned int));
  2350. if (!request_mem_region(multicorebase_actual[i], hantrodec_data.iosize[i],
  2351. "hantrodec0")) {
  2352. pr_info("hantrodec: failed to reserve HW regs\n");
  2353. return -EBUSY;
  2354. }
  2355. #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,17,0))
  2356. hantrodec_data.hwregs[i][HW_VC8000D] = (volatile u8 *) ioremap_nocache(multicorebase_actual[i],
  2357. hantrodec_data.iosize[i]);
  2358. #else
  2359. hantrodec_data.hwregs[i][HW_VC8000D] = (volatile u8 *) ioremap(multicorebase_actual[i],
  2360. hantrodec_data.iosize[i]);
  2361. #endif
  2362. if (hantrodec_data.hwregs[i][HW_VC8000D] == NULL ) {
  2363. pr_info("hantrodec: failed to ioremap HW regs\n");
  2364. ReleaseIO();
  2365. return -EBUSY;
  2366. }
  2367. hantrodec_data.cores++;
  2368. }
  2369. }
  2370. #endif
  2371. } else {
  2372. hantrodec_data.hwregs[i][j] = NULL;
  2373. }
  2374. }
  2375. hantrodec_data.cores++;
  2376. }
  2377. /* check for correct HW */
  2378. if (!CheckHwId(&hantrodec_data)) {
  2379. ReleaseIO();
  2380. return -EBUSY;
  2381. }
  2382. return 0;
  2383. }
  2384. /*------------------------------------------------------------------------------
  2385. Function name : releaseIO
  2386. Description : release
  2387. Return type : void
  2388. ------------------------------------------------------------------------------*/
  2389. static void ReleaseIO(void) {
  2390. int i, j;
  2391. for (i = 0; i < hantrodec_data.cores; i++) {
  2392. for (j = 0; j < HW_CORE_MAX; j++) {
  2393. if (hantrodec_data.hwregs[i][j]) {
  2394. iounmap((void *) hantrodec_data.hwregs[i][j]);
  2395. release_mem_region(vpu_subsys[i].base_addr + vpu_subsys[i].submodule_offset[j],
  2396. vpu_subsys[i].submodule_iosize[j]);
  2397. hantrodec_data.hwregs[i][j] = 0;
  2398. }
  2399. }
  2400. }
  2401. }
  2402. /*------------------------------------------------------------------------------
  2403. Function name : hantrodec_isr
  2404. Description : interrupt handler
  2405. Return type : irqreturn_t
  2406. ------------------------------------------------------------------------------*/
  2407. #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
  2408. irqreturn_t hantrodec_isr(int irq, void *dev_id, struct pt_regs *regs)
  2409. #else
  2410. irqreturn_t hantrodec_isr(int irq, void *dev_id)
  2411. #endif
  2412. {
  2413. printk("%s:start!\n",__func__);
  2414. unsigned long flags;
  2415. unsigned int handled = 0;
  2416. int i;
  2417. volatile u8 *hwregs;
  2418. hantrodec_t *dev = (hantrodec_t *) dev_id;
  2419. u32 irq_status_dec;
  2420. spin_lock_irqsave(&owner_lock, flags);
  2421. for(i=0; i<dev->cores; i++) {
  2422. volatile u8 *hwregs = dev->hwregs[i][HW_VC8000D];
  2423. /* interrupt status register read */
  2424. irq_status_dec = ioread32((void*)(hwregs + HANTRODEC_IRQ_STAT_DEC_OFF));
  2425. if(irq_status_dec & HANTRODEC_DEC_IRQ) {
  2426. /* clear dec IRQ */
  2427. irq_status_dec &= (~HANTRODEC_DEC_IRQ);
  2428. iowrite32(irq_status_dec, (void*)(hwregs + HANTRODEC_IRQ_STAT_DEC_OFF));
  2429. PDEBUG("decoder IRQ received! core %d\n", i);
  2430. atomic_inc(&irq_rx);
  2431. dec_irq |= (1 << i);
  2432. wake_up_interruptible_all(&dec_wait_queue);
  2433. handled++;
  2434. }
  2435. }
  2436. spin_unlock_irqrestore(&owner_lock, flags);
  2437. if(!handled) {
  2438. PDEBUG("IRQ received, but not hantrodec's!\n");
  2439. }
  2440. (void)hwregs;
  2441. printk("%s:end!\n",__func__);
  2442. return IRQ_RETVAL(handled);
  2443. }
  2444. /*------------------------------------------------------------------------------
  2445. Function name : ResetAsic
  2446. Description : reset asic (only VC8000D supports reset)
  2447. Return type :
  2448. ------------------------------------------------------------------------------*/
  2449. void ResetAsic(hantrodec_t * dev) {
  2450. int i, j;
  2451. u32 status;
  2452. for (j = 0; j < dev->cores; j++) {
  2453. if (!dev->hwregs[j][HW_VC8000D]) continue;
  2454. status = ioread32((void*)(dev->hwregs[j][HW_VC8000D] + HANTRODEC_IRQ_STAT_DEC_OFF));
  2455. if( status & HANTRODEC_DEC_E) {
  2456. /* abort with IRQ disabled */
  2457. status = HANTRODEC_DEC_ABORT | HANTRODEC_DEC_IRQ_DISABLE;
  2458. iowrite32(status, (void*)(dev->hwregs[j][HW_VC8000D] + HANTRODEC_IRQ_STAT_DEC_OFF));
  2459. }
  2460. if (IS_G1(dev->hw_id[j][HW_VC8000D]))
  2461. /* reset PP */
  2462. iowrite32(0, (void*)(dev->hwregs[j][HW_VC8000D] + HANTRO_IRQ_STAT_PP_OFF));
  2463. for (i = 4; i < dev->iosize[j]; i += 4) {
  2464. iowrite32(0, (void*)(dev->hwregs[j][HW_VC8000D] + i));
  2465. }
  2466. }
  2467. }
  2468. /*------------------------------------------------------------------------------
  2469. Function name : dump_regs
  2470. Description : Dump registers
  2471. Return type :
  2472. ------------------------------------------------------------------------------*/
  2473. #ifdef HANTRODEC_DEBUG
  2474. void dump_regs(hantrodec_t *dev) {
  2475. int i,c;
  2476. PDEBUG("Reg Dump Start\n");
  2477. for(c = 0; c < dev->cores; c++) {
  2478. for(i = 0; i < dev->iosize[c]; i += 4*4) {
  2479. PDEBUG("\toffset %04X: %08X %08X %08X %08X\n", i,
  2480. ioread32(dev->hwregs[c][HW_VC8000D] + i),
  2481. ioread32(dev->hwregs[c][HW_VC8000D] + i + 4),
  2482. ioread32(dev->hwregs[c][HW_VC8000D] + i + 16),
  2483. ioread32(dev->hwregs[c][HW_VC8000D] + i + 24));
  2484. }
  2485. }
  2486. PDEBUG("Reg Dump End\n");
  2487. }
  2488. #endif
  2489. module_init( hantrodec_init);
  2490. module_exit( hantrodec_cleanup);
  2491. /* module description */
  2492. MODULE_LICENSE("GPL");
  2493. MODULE_AUTHOR("VeriSilicon Microelectronics ");
  2494. MODULE_DESCRIPTION("driver module for Hantro video decoder VC8000D");