xvp_main.c 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298
  1. /*
  2. * XRP: Linux device driver for Xtensa Remote Processing
  3. *
  4. * Copyright (c) 2015 - 2017 Cadence Design Systems, Inc.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included
  15. * in all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  21. * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  22. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  23. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. * Alternatively you can use and distribute this file under the terms of
  26. * the GNU General Public License version 2 or later.
  27. */
  28. #include <linux/version.h>
  29. #include <linux/atomic.h>
  30. #include <linux/acpi.h>
  31. #include <linux/completion.h>
  32. #include <linux/delay.h>
  33. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
  34. #include <linux/dma-mapping.h>
  35. #else
  36. #include <linux/dma-direct.h>
  37. #endif
  38. #include <linux/firmware.h>
  39. #include <linux/fs.h>
  40. #include <linux/hashtable.h>
  41. #include <linux/highmem.h>
  42. #include <linux/idr.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/io.h>
  45. #include <linux/kernel.h>
  46. #include <linux/module.h>
  47. #include <linux/of.h>
  48. #include <linux/of_address.h>
  49. #include <linux/of_device.h>
  50. #include <linux/of_reserved_mem.h>
  51. #include <linux/platform_device.h>
  52. #include <linux/pm_runtime.h>
  53. #include <linux/property.h>
  54. #include <linux/sched.h>
  55. #include <linux/slab.h>
  56. #include <linux/sort.h>
  57. #include <linux/timer.h>
  58. #include <linux/time.h>
  59. #include <linux/timex.h>
  60. #include <linux/dma-mapping.h>
  61. #include <linux/dma-buf.h>
  62. #include <asm/mman.h>
  63. #include <linux/mman.h>
  64. #include <asm/uaccess.h>
  65. #include "xrp_cma_alloc.h"
  66. #include "xrp_firmware.h"
  67. #include "xrp_hw.h"
  68. #include "xrp_internal.h"
  69. #include "xrp_kernel_defs.h"
  70. #include "xrp_kernel_dsp_interface.h"
  71. #include "xrp_private_alloc.h"
  72. #include "xrp_debug.h"
  73. #define DRIVER_NAME "xrp"
  74. #define XRP_DEFAULT_TIMEOUT 60
  75. #ifndef __io_virt
  76. #define __io_virt(a) ((void __force *)(a))
  77. #endif
  78. struct xrp_alien_mapping {
  79. unsigned long vaddr;
  80. unsigned long size;
  81. phys_addr_t paddr;
  82. void *allocation;
  83. enum {
  84. ALIEN_GUP,
  85. ALIEN_PFN_MAP,
  86. ALIEN_COPY,
  87. } type;
  88. };
  89. struct xrp_mapping {
  90. enum {
  91. XRP_MAPPING_NONE,
  92. XRP_MAPPING_NATIVE,
  93. XRP_MAPPING_ALIEN,
  94. XRP_MAPPING_KERNEL = 0x4,
  95. } type;
  96. union {
  97. struct {
  98. struct xrp_allocation *xrp_allocation;
  99. unsigned long vaddr;
  100. } native;
  101. struct xrp_alien_mapping alien_mapping;
  102. };
  103. };
  104. struct xvp_file {
  105. struct xvp *xvp;
  106. spinlock_t busy_list_lock;
  107. struct xrp_allocation *busy_list;
  108. };
  109. struct xrp_known_file {
  110. void *filp;
  111. struct hlist_node node;
  112. };
  113. struct xrp_dma_buf_item{
  114. struct list_head link;
  115. struct dma_buf *dmabuf;
  116. struct sg_table *sgt;
  117. struct dma_buf_attachment * attachment;
  118. int ref;
  119. };
  120. static int firmware_command_timeout = XRP_DEFAULT_TIMEOUT;
  121. module_param(firmware_command_timeout, int, 0644);
  122. MODULE_PARM_DESC(firmware_command_timeout, "Firmware command timeout in seconds.");
  123. static int firmware_reboot = 1;
  124. module_param(firmware_reboot, int, 0644);
  125. MODULE_PARM_DESC(firmware_reboot, "Reboot firmware on command timeout.");
  126. enum {
  127. LOOPBACK_NORMAL, /* normal work mode */
  128. LOOPBACK_NOIO, /* don't communicate with FW, but still load it and control DSP */
  129. LOOPBACK_NOMMIO, /* don't comminicate with FW or use DSP MMIO, but still load the FW */
  130. LOOPBACK_NOFIRMWARE, /* communicate with FW or use DSP MMIO, don't load the FW */
  131. LOOPBACK_NOFIRMWARE_NOMMIO, /* don't communicate with FW or use DSP MMIO, don't load the FW */
  132. };
  133. static int loopback = 0;
  134. module_param(loopback, int, 0644);
  135. MODULE_PARM_DESC(loopback, "Don't use actual DSP, perform everything locally.");
  136. static int load_mode = 0;
  137. module_param(load_mode, int, 0644);
  138. MODULE_PARM_DESC(load_mode, "firmware load mode. 0: load by driver. 1:load by xplorer to debug.");
  139. enum {
  140. LOAD_MODE_AUTO, /* load firmware auto by drvier */
  141. LOAD_MODE_MANUAL, /* load firmware manually for debug*/
  142. };
  143. static int heartbeat_period = 0;
  144. module_param(heartbeat_period, int, 0644);
  145. MODULE_PARM_DESC(heartbeat_period, "Firmware command timeout in seconds.");
  146. static int dsp_fw_log_mode = 1;
  147. module_param(dsp_fw_log_mode, int, 0644);
  148. MODULE_PARM_DESC(dsp_fw_log_mode, "Firmware LOG MODE.0:disable,1:ERROR(DEFAULT),2:WRNING,3:INFO,4:DEUBG,5:TRACE");
  149. static DEFINE_HASHTABLE(xrp_known_files, 10);
  150. static DEFINE_SPINLOCK(xrp_known_files_lock);
  151. static DEFINE_SPINLOCK(xrp_dma_buf_lock);
  152. static DEFINE_IDA(xvp_nodeid);
  153. static int xrp_boot_firmware(struct xvp *xvp);
  154. static long xrp_copy_user_from_phys(struct xvp *xvp,
  155. unsigned long vaddr, unsigned long size,
  156. phys_addr_t paddr, unsigned long flags);
  157. static bool xrp_cacheable(struct xvp *xvp, unsigned long pfn,
  158. unsigned long n_pages)
  159. {
  160. if (xvp->hw_ops->cacheable) {
  161. return xvp->hw_ops->cacheable(xvp->hw_arg, pfn, n_pages);
  162. } else {
  163. unsigned long i;
  164. for (i = 0; i < n_pages; ++i)
  165. if (!pfn_valid(pfn + i))
  166. return false;
  167. return true;
  168. }
  169. }
  170. static int xrp_dma_direction(unsigned flags)
  171. {
  172. static const enum dma_data_direction xrp_dma_direction[] = {
  173. [0] = DMA_NONE,
  174. [XRP_FLAG_READ] = DMA_TO_DEVICE,
  175. [XRP_FLAG_WRITE] = DMA_FROM_DEVICE,
  176. [XRP_FLAG_READ_WRITE] = DMA_BIDIRECTIONAL,
  177. };
  178. return xrp_dma_direction[flags & XRP_FLAG_READ_WRITE];
  179. }
  180. static void xrp_default_dma_sync_for_device(struct xvp *xvp,
  181. phys_addr_t phys,
  182. unsigned long size,
  183. unsigned long flags)
  184. {
  185. dma_sync_single_for_device(xvp->dev, phys_to_dma(xvp->dev, phys), size,
  186. xrp_dma_direction(flags));
  187. }
  188. static void xrp_dma_sync_for_device(struct xvp *xvp,
  189. unsigned long virt,
  190. phys_addr_t phys,
  191. unsigned long size,
  192. unsigned long flags)
  193. {
  194. if (xvp->hw_ops->dma_sync_for_device)
  195. xvp->hw_ops->dma_sync_for_device(xvp->hw_arg,
  196. (void *)virt, phys, size,
  197. flags);
  198. else
  199. xrp_default_dma_sync_for_device(xvp, phys, size, flags);
  200. }
  201. static void xrp_default_dma_sync_for_cpu(struct xvp *xvp,
  202. phys_addr_t phys,
  203. unsigned long size,
  204. unsigned long flags)
  205. {
  206. dma_sync_single_for_cpu(xvp->dev, phys_to_dma(xvp->dev, phys), size,
  207. xrp_dma_direction(flags));
  208. }
  209. static void xrp_dma_sync_for_cpu(struct xvp *xvp,
  210. unsigned long virt,
  211. phys_addr_t phys,
  212. unsigned long size,
  213. unsigned long flags)
  214. {
  215. if (xvp->hw_ops->dma_sync_for_cpu)
  216. xvp->hw_ops->dma_sync_for_cpu(xvp->hw_arg,
  217. (void *)virt, phys, size,
  218. flags);
  219. else
  220. xrp_default_dma_sync_for_cpu(xvp, phys, size, flags);
  221. }
  222. static inline void xrp_comm_write32(volatile void __iomem *addr, u32 v)
  223. {
  224. //__raw_writel(v, addr);
  225. writel(v, addr);
  226. }
  227. static inline u32 xrp_comm_read32(volatile void __iomem *addr)
  228. {
  229. //return __raw_readl(addr);
  230. return readl(addr);
  231. }
  232. static inline void __iomem *xrp_comm_put_tlv(void __iomem **addr,
  233. uint32_t type,
  234. uint32_t length)
  235. {
  236. struct xrp_dsp_tlv __iomem *tlv = *addr;
  237. xrp_comm_write32(&tlv->type, type);
  238. xrp_comm_write32(&tlv->length, length);
  239. *addr = tlv->value + ((length + 3) / 4);
  240. return tlv->value;
  241. }
  242. static inline void __iomem *xrp_comm_get_tlv(void __iomem **addr,
  243. uint32_t *type,
  244. uint32_t *length)
  245. {
  246. struct xrp_dsp_tlv __iomem *tlv = *addr;
  247. *type = xrp_comm_read32(&tlv->type);
  248. *length = xrp_comm_read32(&tlv->length);
  249. *addr = tlv->value + ((*length + 3) / 4);
  250. return tlv->value;
  251. }
  252. static inline void xrp_comm_write(volatile void __iomem *addr, const void *p,
  253. size_t sz)
  254. {
  255. size_t sz32 = sz & ~3;
  256. u32 v;
  257. while (sz32) {
  258. memcpy(&v, p, sizeof(v));
  259. __raw_writel(v, addr);
  260. p += 4;
  261. addr += 4;
  262. sz32 -= 4;
  263. }
  264. sz &= 3;
  265. if (sz) {
  266. v = 0;
  267. memcpy(&v, p, sz);
  268. __raw_writel(v, addr);
  269. }
  270. }
  271. static inline void xrp_comm_read(volatile void __iomem *addr, void *p,
  272. size_t sz)
  273. {
  274. size_t sz32 = sz & ~3;
  275. u32 v;
  276. while (sz32) {
  277. v = __raw_readl(addr);
  278. memcpy(p, &v, sizeof(v));
  279. p += 4;
  280. addr += 4;
  281. sz32 -= 4;
  282. }
  283. sz &= 3;
  284. if (sz) {
  285. v = __raw_readl(addr);
  286. memcpy(p, &v, sz);
  287. }
  288. }
  289. static inline void xrp_send_device_irq(struct xvp *xvp)
  290. {
  291. if (xvp->hw_ops->send_irq)
  292. xvp->hw_ops->send_irq(xvp->hw_arg);
  293. }
  294. static inline bool xrp_panic_check(struct xvp *xvp)
  295. {
  296. if (xvp->hw_ops->panic_check)
  297. return xvp->hw_ops->panic_check(xvp->hw_arg);
  298. else
  299. return panic_check(xvp->panic_log);
  300. }
  301. static void xrp_add_known_file(struct file *filp)
  302. {
  303. struct xrp_known_file *p = kmalloc(sizeof(*p), GFP_KERNEL);
  304. if (!p)
  305. return;
  306. p->filp = filp;
  307. spin_lock(&xrp_known_files_lock);
  308. hash_add(xrp_known_files, &p->node, (unsigned long)filp);
  309. spin_unlock(&xrp_known_files_lock);
  310. }
  311. static void xrp_remove_known_file(struct file *filp)
  312. {
  313. struct xrp_known_file *p;
  314. struct xrp_known_file *pf = NULL;
  315. spin_lock(&xrp_known_files_lock);
  316. hash_for_each_possible(xrp_known_files, p, node, (unsigned long)filp) {
  317. if (p->filp == filp) {
  318. hash_del(&p->node);
  319. pf = p;
  320. break;
  321. }
  322. }
  323. spin_unlock(&xrp_known_files_lock);
  324. if (pf)
  325. kfree(pf);
  326. }
  327. static bool xrp_is_known_file(struct file *filp)
  328. {
  329. bool ret = false;
  330. struct xrp_known_file *p;
  331. spin_lock(&xrp_known_files_lock);
  332. hash_for_each_possible(xrp_known_files, p, node, (unsigned long)filp) {
  333. if (p->filp == filp) {
  334. ret = true;
  335. break;
  336. }
  337. }
  338. spin_unlock(&xrp_known_files_lock);
  339. return ret;
  340. }
  341. static void xrp_sync_v2(struct xvp *xvp,
  342. void *hw_sync_data, size_t sz)
  343. {
  344. struct xrp_dsp_sync_v2 __iomem *shared_sync = xvp->comm;
  345. void __iomem *addr = shared_sync->hw_sync_data;
  346. xrp_comm_write(xrp_comm_put_tlv(&addr,
  347. XRP_DSP_SYNC_TYPE_HW_SPEC_DATA, sz),
  348. hw_sync_data, sz);
  349. if (xvp->n_queues > 1) {
  350. struct xrp_dsp_sync_v2 __iomem *queue_sync;
  351. unsigned i;
  352. xrp_comm_write(xrp_comm_put_tlv(&addr,
  353. XRP_DSP_SYNC_TYPE_HW_QUEUES,
  354. xvp->n_queues * sizeof(u32)),
  355. xvp->queue_priority,
  356. xvp->n_queues * sizeof(u32));
  357. for (i = 1; i < xvp->n_queues; ++i) {
  358. queue_sync = xvp->queue[i].comm;
  359. xrp_comm_write32(&queue_sync->sync,
  360. XRP_DSP_SYNC_IDLE);
  361. }
  362. }
  363. struct xrp_dsp_debug_info debug_info ={
  364. .panic_addr = xvp->panic_phy,
  365. .log_level = dsp_fw_log_mode,
  366. };
  367. xrp_comm_write(xrp_comm_put_tlv(&addr,
  368. XRP_DSP_SYNC_TYPE_HW_DEBUG_INFO, sizeof(struct xrp_dsp_debug_info)),
  369. &debug_info, sizeof(struct xrp_dsp_debug_info));
  370. xrp_comm_put_tlv(&addr, XRP_DSP_SYNC_TYPE_LAST, 0);
  371. }
  372. static int xrp_sync_complete_v2(struct xvp *xvp, size_t sz)
  373. {
  374. struct xrp_dsp_sync_v2 __iomem *shared_sync = xvp->comm;
  375. void __iomem *addr = shared_sync->hw_sync_data;
  376. u32 type, len;
  377. xrp_comm_get_tlv(&addr, &type, &len);
  378. if (len != sz) {
  379. dev_err(xvp->dev,
  380. "HW spec data size modified by the DSP\n");
  381. return -EINVAL;
  382. }
  383. if (!(type & XRP_DSP_SYNC_TYPE_ACCEPT))
  384. dev_info(xvp->dev,
  385. "HW spec data not recognized by the DSP\n");
  386. if (xvp->n_queues > 1) {
  387. void __iomem *p = xrp_comm_get_tlv(&addr, &type, &len);
  388. if (len != xvp->n_queues * sizeof(u32)) {
  389. dev_err(xvp->dev,
  390. "Queue priority size modified by the DSP\n");
  391. return -EINVAL;
  392. }
  393. if (type & XRP_DSP_SYNC_TYPE_ACCEPT) {
  394. xrp_comm_read(p, xvp->queue_priority,
  395. xvp->n_queues * sizeof(u32));
  396. } else {
  397. dev_info(xvp->dev,
  398. "Queue priority data not recognized by the DSP\n");
  399. xvp->n_queues = 1;
  400. }
  401. }
  402. return 0;
  403. }
  404. static int xrp_synchronize(struct xvp *xvp)
  405. {
  406. size_t sz;
  407. void *hw_sync_data;
  408. unsigned long deadline = jiffies + firmware_command_timeout * HZ;
  409. struct xrp_dsp_sync_v1 __iomem *shared_sync = xvp->comm;
  410. int ret;
  411. u32 v, v1;
  412. hw_sync_data = xvp->hw_ops->get_hw_sync_data(xvp->hw_arg, &sz);
  413. if (!hw_sync_data) {
  414. ret = -ENOMEM;
  415. goto err;
  416. }
  417. ret = -ENODEV;
  418. dev_dbg(xvp->dev,"%s:comm sync:%p\n",__func__,&shared_sync->sync);
  419. xrp_comm_write32(&shared_sync->sync, XRP_DSP_SYNC_START);
  420. mb();
  421. do {
  422. v = xrp_comm_read32(&shared_sync->sync);
  423. if (v != XRP_DSP_SYNC_START)
  424. break;
  425. if (xrp_panic_check(xvp))
  426. goto err;
  427. schedule();
  428. } while (time_before(jiffies, deadline));
  429. dev_dbg(xvp->dev,"%s:comm sync data :%x\n",__func__,v);
  430. switch (v) {
  431. case XRP_DSP_SYNC_DSP_READY_V1:
  432. if (xvp->n_queues > 1) {
  433. dev_info(xvp->dev,
  434. "Queue priority data not recognized by the DSP\n");
  435. xvp->n_queues = 1;
  436. }
  437. xrp_comm_write(&shared_sync->hw_sync_data, hw_sync_data, sz);
  438. break;
  439. case XRP_DSP_SYNC_DSP_READY_V2:
  440. xrp_sync_v2(xvp, hw_sync_data, sz);
  441. break;
  442. case XRP_DSP_SYNC_START:
  443. dev_err(xvp->dev, "DSP is not ready for synchronization\n");
  444. goto err;
  445. default:
  446. dev_err(xvp->dev,
  447. "DSP response to XRP_DSP_SYNC_START is not recognized\n");
  448. goto err;
  449. }
  450. mb();
  451. xrp_comm_write32(&shared_sync->sync, XRP_DSP_SYNC_HOST_TO_DSP);
  452. do {
  453. mb();
  454. v1 = xrp_comm_read32(&shared_sync->sync);
  455. if (v1 == XRP_DSP_SYNC_DSP_TO_HOST)
  456. break;
  457. if (xrp_panic_check(xvp))
  458. goto err;
  459. schedule();
  460. } while (time_before(jiffies, deadline));
  461. if (v1 != XRP_DSP_SYNC_DSP_TO_HOST) {
  462. dev_err(xvp->dev,
  463. "DSP haven't confirmed initialization data reception\n");
  464. goto err;
  465. }
  466. if (v == XRP_DSP_SYNC_DSP_READY_V2) {
  467. ret = xrp_sync_complete_v2(xvp, sz);
  468. if (ret < 0)
  469. goto err;
  470. }
  471. xrp_send_device_irq(xvp);
  472. // if (xvp->host_irq_mode) {
  473. // int res = wait_for_completion_timeout(&xvp->queue[0].completion,
  474. // firmware_command_timeout * HZ);
  475. // ret = -ENODEV;
  476. // if (xrp_panic_check(xvp))
  477. // goto err;
  478. // if (res == 0) {
  479. // dev_err(xvp->dev,
  480. // "host IRQ mode is requested, but DSP couldn't deliver IRQ during synchronization\n");
  481. // goto err;
  482. // }
  483. // }
  484. ret = 0;
  485. err:
  486. kfree(hw_sync_data);
  487. xrp_comm_write32(&shared_sync->sync, XRP_DSP_SYNC_IDLE);
  488. return ret;
  489. }
  490. static bool xrp_cmd_complete(struct xrp_comm *xvp)
  491. {
  492. struct xrp_dsp_cmd __iomem *cmd = xvp->comm;
  493. u32 flags = xrp_comm_read32(&cmd->flags);
  494. pr_debug(" xrp_cmd_complete %x\n", flags);
  495. rmb();
  496. return (flags & (XRP_DSP_CMD_FLAG_REQUEST_VALID |
  497. XRP_DSP_CMD_FLAG_RESPONSE_VALID)) ==
  498. (XRP_DSP_CMD_FLAG_REQUEST_VALID |
  499. XRP_DSP_CMD_FLAG_RESPONSE_VALID);
  500. }
  501. static inline int xrp_report_comlete(struct xvp *xvp)
  502. {
  503. struct xrp_dsp_cmd __iomem *cmd = xvp->comm;
  504. if(!xvp->reporter)
  505. return -1;
  506. u32 flags = xrp_comm_read32(&cmd->report_id);
  507. if(flags& XRP_DSP_REPORT_TO_HOST_FLAG )
  508. {
  509. // dev_err(xvp->dev, "%s,report_flag %x\n", __func__,flags);
  510. flags &= (~XRP_DSP_REPORT_TO_HOST_FLAG);
  511. xrp_comm_write32(&cmd->report_id,flags);
  512. tasklet_schedule(&xvp->reporter->report_task);
  513. return 0;
  514. }
  515. return -1;
  516. }
  517. static inline int xrp_device_cmd_comlete(struct xvp *xvp)
  518. {
  519. struct xrp_dsp_cmd __iomem *cmd = xvp->comm;
  520. u32 flags = xrp_comm_read32(&cmd->cmd_flag);
  521. if(flags& XRP_DSP_REPORT_TO_HOST_FLAG )
  522. {
  523. xrp_comm_write32(&cmd->cmd_flag,0);
  524. return 0;
  525. }
  526. return -1;
  527. }
  528. irqreturn_t xrp_irq_handler(int irq, struct xvp *xvp)
  529. {
  530. unsigned i, n = 0;
  531. // dev_dbg(xvp->dev, "%s\n", __func__);
  532. if (!xvp->comm)
  533. return IRQ_NONE;
  534. if(!xrp_report_comlete(xvp))
  535. {
  536. dev_dbg(xvp->dev, "completing report\n");
  537. // return IRQ_HANDLED;
  538. }
  539. if(xrp_device_cmd_comlete(xvp))
  540. {
  541. dev_dbg(xvp->dev, "no cmd msg report\n");
  542. return IRQ_HANDLED;
  543. }
  544. for (i = 0; i < xvp->n_queues; ++i) {
  545. if (xrp_cmd_complete(xvp->queue + i)) {
  546. dev_dbg(xvp->dev, "completing queue %d\n", i);
  547. complete(&xvp->queue[i].completion);
  548. ++n;
  549. }
  550. }
  551. return n ? IRQ_HANDLED : IRQ_NONE;
  552. }
  553. EXPORT_SYMBOL(xrp_irq_handler);
  554. static inline void xvp_file_lock(struct xvp_file *xvp_file)
  555. {
  556. spin_lock(&xvp_file->busy_list_lock);
  557. }
  558. static inline void xvp_file_unlock(struct xvp_file *xvp_file)
  559. {
  560. spin_unlock(&xvp_file->busy_list_lock);
  561. }
  562. static void xrp_allocation_queue(struct xvp_file *xvp_file,
  563. struct xrp_allocation *xrp_allocation)
  564. {
  565. xvp_file_lock(xvp_file);
  566. xrp_allocation->next = xvp_file->busy_list;
  567. xvp_file->busy_list = xrp_allocation;
  568. xvp_file_unlock(xvp_file);
  569. }
  570. static struct xrp_allocation *xrp_allocation_dequeue(struct xvp_file *xvp_file,
  571. phys_addr_t paddr, u32 size)
  572. {
  573. struct xrp_allocation **pcur;
  574. struct xrp_allocation *cur;
  575. xvp_file_lock(xvp_file);
  576. for (pcur = &xvp_file->busy_list; (cur = *pcur); pcur = &((*pcur)->next)) {
  577. pr_debug("%s: %pap / %pap x %d\n", __func__, &paddr, &cur->start, cur->size);
  578. if (paddr >= cur->start && paddr + size - cur->start <= cur->size) {
  579. *pcur = cur->next;
  580. break;
  581. }
  582. }
  583. xvp_file_unlock(xvp_file);
  584. return cur;
  585. }
  586. static long xrp_ioctl_alloc(struct file *filp,
  587. struct xrp_ioctl_alloc __user *p)
  588. {
  589. struct xvp_file *xvp_file = filp->private_data;
  590. struct xrp_allocation *xrp_allocation;
  591. unsigned long vaddr;
  592. struct xrp_ioctl_alloc xrp_ioctl_alloc;
  593. long err;
  594. // pr_debug("%s: %p\n", __func__, p);
  595. if (copy_from_user(&xrp_ioctl_alloc, p, sizeof(*p)))
  596. return -EFAULT;
  597. // pr_debug("%s: size = %d, align = %x\n", __func__,
  598. // xrp_ioctl_alloc.size, xrp_ioctl_alloc.align);
  599. err = xrp_allocate(xvp_file->xvp->pool,
  600. xrp_ioctl_alloc.size,
  601. xrp_ioctl_alloc.align,
  602. &xrp_allocation);
  603. if (err)
  604. return err;
  605. xrp_allocation_queue(xvp_file, xrp_allocation);
  606. vaddr = vm_mmap(filp, 0, xrp_allocation->size,
  607. PROT_READ | PROT_WRITE, MAP_SHARED,
  608. xrp_allocation_offset(xrp_allocation));
  609. xrp_ioctl_alloc.addr = vaddr;
  610. xrp_ioctl_alloc.paddr = xrp_allocation->start;
  611. pr_debug("%s: vaddr = %llx, paddr = %llx\n", __func__,
  612. xrp_ioctl_alloc.addr, xrp_ioctl_alloc.paddr);
  613. if (copy_to_user(p, &xrp_ioctl_alloc, sizeof(*p))) {
  614. vm_munmap(vaddr, xrp_ioctl_alloc.size);
  615. return -EFAULT;
  616. }
  617. return 0;
  618. }
  619. static int report_cnt=0;
  620. static void xrp_report_tasklet(unsigned long arg)
  621. {
  622. struct xvp *xvp=(struct xvp *)arg;
  623. struct xrp_dsp_cmd __iomem *cmd=xvp->comm;
  624. struct xrp_report_buffer *p_buf = xvp->reporter->buffer_virt;
  625. struct xrp_report_ring_buffer *ring_buffer = xvp->reporter->buffer_list;
  626. __u32 report_id;
  627. if(!xvp->reporter->fasync)
  628. {
  629. pr_debug("%s:fasync is not register in user space\n",__func__);
  630. return;
  631. }
  632. if((ring_buffer->WR+1)%ring_buffer->max_item == ring_buffer->RD)
  633. {
  634. /*****if before this flag set ,pop comming, and finish the all sig handler lead no one clear ?? **/
  635. // xvp->reporter->buffer_list->is_full =true;
  636. pr_err("%s,report queue is full,block the reprot,WR:%d,RD:%d\n",__func__,ring_buffer->WR,ring_buffer->RD);
  637. return ;
  638. }
  639. report_id = xrp_comm_read32(&cmd->report_id)&0xffff;
  640. // int fd;
  641. // fd = xrp_comm_read32(&p_buf->data[8]);
  642. xrp_comm_write32(&p_buf->report_id,report_id);
  643. xrp_comm_read(p_buf,&ring_buffer->data[ring_buffer->WR*xvp->reporter->buffer_size],xvp->reporter->buffer_size);
  644. ring_buffer->WR = (ring_buffer->WR+1)%ring_buffer->max_item;
  645. kill_fasync(&(xvp->reporter->fasync), SIG_REPORT, POLL_IN);
  646. /*******************if report queue is full block new report************************************************/
  647. // if(ring_buffer->WR == ring_buffer->RD)
  648. // {
  649. // /*****if before this flag set ,pop comming, and finish the all sig handler lead no one clear ?? **/
  650. // xvp->reporter->buffer_list->is_full =true;
  651. // pr_err("%s,report queue is full,block the reprot\n",__func__);
  652. // }else
  653. {
  654. /*****clear report*********************/
  655. xrp_comm_write32(&cmd->report_id,0x0);
  656. }
  657. pr_debug("%s,report_id:%d,report_cnt:%d,WR:%d,RD:%d\n",__func__,p_buf->report_id,++report_cnt,ring_buffer->WR,ring_buffer->RD);
  658. }
  659. static long xrp_pop_report(struct file *filp,
  660. struct xrp_report_buffer __user *p)
  661. {
  662. struct xvp_file *xvp_file = filp->private_data;
  663. struct xvp *xvp = xvp_file->xvp;
  664. struct xrp_dsp_cmd __iomem *cmd=xvp->comm;
  665. struct xrp_report_ring_buffer *ring_buffer = xvp->reporter->buffer_list;
  666. void* report_buf;
  667. /*******************if report queue is empty ,return************************************************/
  668. if((ring_buffer->WR == ring_buffer->RD))
  669. return -EFAULT;
  670. report_buf = &ring_buffer->data[ring_buffer->RD*xvp->reporter->buffer_size];
  671. if(copy_to_user(p, report_buf, xvp->reporter->buffer_size))
  672. {
  673. pr_debug("%s: copy to user fail\n", __func__);
  674. return -EFAULT;
  675. }
  676. /*******************if report queue is full ,unblock************************************************/
  677. if((ring_buffer->WR+1)%ring_buffer->max_item==ring_buffer->RD)
  678. {
  679. ring_buffer->RD=(ring_buffer->RD+1)%ring_buffer->max_item;
  680. ring_buffer->is_full==false;
  681. xrp_comm_write32(&cmd->report_id,0x0);
  682. pr_debug("%s: unblock the report,RD:%d\n", __func__,ring_buffer->RD);
  683. }
  684. else
  685. {
  686. ring_buffer->RD=(ring_buffer->RD+1)%ring_buffer->max_item;
  687. }
  688. return 0;
  689. }
  690. static long xrp_map_phy_to_virt(phys_addr_t paddr,unsigned long size,__u64 *vaddr)
  691. {
  692. // if (pfn_valid(__phys_to_pfn(paddr))) {
  693. // struct page *page = pfn_to_page(__phys_to_pfn(paddr));
  694. // size_t page_offs = paddr & ~PAGE_MASK;
  695. // size_t offs;
  696. // // for (offs = 0; offs < size; ++page) {
  697. // // void *p = kmap(page);
  698. // // size_t sz = PAGE_SIZE - page_offs;
  699. // // size_t copy_sz = sz;
  700. // // unsigned long rc;
  701. // // }
  702. // if(page_offs+size>PAGE_SIZE)
  703. // {
  704. // pr_debug("%s,phys addr map to virt exceed one page",__func__);
  705. // return -EINVAL;
  706. // }
  707. // void *p = kmap(page);
  708. // if(!p)
  709. // {
  710. // pr_debug("%s couldn't kmap %pap x 0x%08x\n",__func__,&paddr, (u32)size);
  711. // return -EINVAL;
  712. // }
  713. // *vaddr =p + page_offs;
  714. // pr_debug("%s map to mem",__func__);
  715. // return 0;
  716. // }
  717. // else
  718. {
  719. void __iomem *p = ioremap(paddr, size);
  720. if (!p) {
  721. pr_debug("%s,couldn't ioremap %pap x 0x%08x\n",__func__,&paddr, (u32)size);
  722. return -EINVAL;
  723. }
  724. *vaddr = p;
  725. pr_debug("%s map to io mem",__func__);
  726. return 0;
  727. }
  728. // iounmap(p);
  729. // if (rc)
  730. // return -EFAULT;
  731. // }
  732. }
  733. static long xrp_unmap_phy_to_virt(unsigned long *vaddr,phys_addr_t paddr,unsigned long size)
  734. {
  735. if (pfn_valid(__phys_to_pfn(paddr))) {
  736. struct page *page = pfn_to_page(__phys_to_pfn(paddr));
  737. kunmap(page);
  738. }
  739. else{
  740. iounmap(*vaddr);
  741. }
  742. *vaddr=NULL;
  743. return 0;
  744. }
  745. static long xrp_ioctl_alloc_report(struct file *filp,
  746. struct xrp_ioctl_alloc __user *p)
  747. {
  748. struct xvp_file *xvp_file = filp->private_data;
  749. struct xrp_allocation *xrp_allocation;
  750. struct xvp *xvp = xvp_file->xvp;
  751. struct xrp_ioctl_alloc xrp_ioctl_alloc;
  752. struct xrp_dsp_cmd __iomem *cmd=xvp->comm;
  753. unsigned long vaddr;
  754. int size;
  755. long err;
  756. if (copy_from_user(&xrp_ioctl_alloc, p, sizeof(*p)))
  757. return -EFAULT;
  758. pr_debug("%s: virtAddr = %lx.size = %d, align = %x\n", __func__,
  759. xrp_ioctl_alloc.addr,xrp_ioctl_alloc.size,
  760. xrp_ioctl_alloc.align);
  761. xvp->reporter= kmalloc(sizeof(*(xvp->reporter)), GFP_KERNEL);
  762. if (!xvp->reporter)
  763. return -EFAULT;
  764. xvp->reporter->fasync=NULL;
  765. if (xrp_allocate(xvp_file->xvp->pool,xrp_ioctl_alloc.size,
  766. xrp_ioctl_alloc.align,&xrp_allocation))
  767. {
  768. goto One_Err;
  769. }
  770. xrp_allocation_queue(xvp_file, xrp_allocation);
  771. xvp->reporter->buffer_phys = xrp_allocation->start;
  772. xvp->reporter->buffer_size = xrp_ioctl_alloc.size;
  773. if(xrp_map_phy_to_virt(xvp->reporter->buffer_phys,xrp_ioctl_alloc.size,&xvp->reporter->buffer_virt))
  774. {
  775. pr_err("%s: map to kernel virt fail\n", __func__);
  776. goto Two_Err;
  777. }
  778. size = sizeof(struct xrp_report_ring_buffer)+ xrp_ioctl_alloc.size*REPORT_QUEUE_NUM;
  779. xvp->reporter->buffer_list = kmalloc(size, GFP_KERNEL);
  780. if (xvp->reporter->buffer_list == NULL)
  781. goto Two_Err;
  782. xvp->reporter->buffer_list->WR=0;
  783. xvp->reporter->buffer_list->RD=0;
  784. xvp->reporter->buffer_list->is_full = false;
  785. xvp->reporter->buffer_list->max_item = REPORT_QUEUE_NUM;
  786. report_cnt =0;
  787. xrp_comm_write32(&cmd->report_addr,
  788. xrp_translate_to_dsp(&xvp->address_map,xvp->reporter->buffer_phys+sizeof(__u32)));
  789. unsigned int dsp_addr = xrp_comm_read32(&cmd->report_addr);
  790. pr_debug("%s: alloc_report buffer user virt:%llx,kernel virt:%lx, phys:%llx,dsp_addr:%x,size:%d\n", __func__,
  791. vaddr,xvp->reporter->buffer_virt,xvp->reporter->buffer_phys,dsp_addr,xrp_allocation->size);
  792. xrp_comm_write32(&cmd->report_buffer_size,xvp->reporter->buffer_size);
  793. xrp_comm_write32(&cmd->report_paylad_size,xvp->reporter->buffer_size);
  794. xrp_comm_write32(&cmd->report_status,XRP_DSP_REPORT_WORKING);
  795. xrp_comm_write32(&cmd->report_id,0);
  796. tasklet_init(&xvp->reporter->report_task,xrp_report_tasklet,(unsigned long)xvp);
  797. if (copy_to_user(p, &xrp_ioctl_alloc, sizeof(*p))) {
  798. pr_debug("%s: copy to user fail\n", __func__);
  799. goto Thr_Err;
  800. }
  801. return 0;
  802. Thr_Err:
  803. kfree(xvp->reporter->buffer_list);
  804. xvp->reporter->buffer_list = NULL;
  805. Two_Err:
  806. xrp_allocation_put(xrp_allocation);
  807. One_Err:
  808. kfree(xvp->reporter);
  809. xvp->reporter == NULL;
  810. return -EFAULT;
  811. }
  812. static int xrp_report_fasync(int fd, struct file *filp, int on){
  813. struct xvp_file *xvp_file = (struct xvp_file *)filp->private_data;
  814. pr_debug("%s: start,mode: %d\n", __func__,on);
  815. if(xvp_file->xvp->reporter == NULL)
  816. {
  817. pr_debug("%s: reporter is NULL\n", __func__,on);
  818. return 0;
  819. }
  820. if( fasync_helper(fd,filp,on,&(xvp_file->xvp->reporter->fasync)) < 0){
  821. pr_debug("%s: xrp_report_fasync fail\n", __func__);
  822. return -EIO;
  823. }
  824. pr_debug("%s: end\n", __func__);
  825. return 0;
  826. }
  827. static int xrp_report_fasync_release(struct file *filp){
  828. struct xvp_file *xvp_file = (struct xvp_file *)filp->private_data;
  829. if(xvp_file->xvp->reporter)
  830. return xrp_report_fasync(-1,filp,0);
  831. return 0;
  832. }
  833. static long xrp_ioctl_release_report(struct file *filp,
  834. struct xrp_ioctl_alloc __user *p)
  835. {
  836. struct xvp_file *xvp_file = filp->private_data;
  837. struct xvp *xvp = xvp_file->xvp;
  838. struct mm_struct *mm = current->mm;
  839. struct xrp_ioctl_alloc xrp_ioctl_alloc;
  840. struct vm_area_struct *vma;
  841. unsigned long start;
  842. struct xrp_dsp_cmd __iomem *cmd=xvp->comm;
  843. struct xrp_allocation *xrp_allocation;
  844. if(xvp->reporter==NULL)
  845. return 0;
  846. tasklet_kill(&xvp->reporter->report_task);
  847. xrp_comm_write32(&cmd->report_status,XRP_DSP_REPORT_INVALID);
  848. if (copy_from_user(&xrp_ioctl_alloc, p, sizeof(*p)))
  849. return -EFAULT;
  850. xrp_allocation = xrp_allocation_dequeue(xvp_file,xvp->reporter->buffer_phys,xvp->reporter->buffer_size);
  851. xrp_allocation_put(xrp_allocation);
  852. if(xvp->reporter->buffer_list)
  853. kfree(xvp->reporter->buffer_list);
  854. xrp_report_fasync_release(filp);
  855. kfree(xvp->reporter);
  856. xvp->reporter =NULL;
  857. return 0;
  858. }
  859. // static struct struct_list timer;
  860. // static void xrp_device_heartbeat_check(unsigned long arg)
  861. // {
  862. // struct xvp *xvp = struct xvp *(arg);
  863. // if(xvp->reporter != NULL)
  864. // {
  865. // xrp_comm_write32(&cmd->flags, 0);
  866. // }
  867. // mod_timer(&timer,jiffies + heartbeat_period * HZ);
  868. // }
  869. // static int xrp_device_heartbeat_init(void * arg)
  870. // {
  871. // if(heartbeat_period > 0)
  872. // {
  873. // init_timer(&timer);
  874. // timer.function = xrp_device_heartbeat_check;
  875. // timer.expires = jiffies + heartbeat_period * HZ;
  876. // timer.data = arg;
  877. // add_timer(&timer);
  878. // pr_debug("%s enable heartbeat timer\n", __func__);
  879. // }
  880. // }
  881. static void xrp_put_pages(phys_addr_t phys, unsigned long n_pages)
  882. {
  883. struct page *page;
  884. unsigned long i;
  885. page = pfn_to_page(__phys_to_pfn(phys));
  886. for (i = 0; i < n_pages; ++i)
  887. put_page(page + i);
  888. }
  889. static void xrp_alien_mapping_destroy(struct xrp_alien_mapping *alien_mapping)
  890. {
  891. switch (alien_mapping->type) {
  892. case ALIEN_GUP:
  893. xrp_put_pages(alien_mapping->paddr,
  894. PFN_UP(alien_mapping->vaddr +
  895. alien_mapping->size) -
  896. PFN_DOWN(alien_mapping->vaddr));
  897. break;
  898. case ALIEN_COPY:
  899. xrp_allocation_put(alien_mapping->allocation);
  900. break;
  901. default:
  902. break;
  903. }
  904. }
  905. static long xvp_pfn_virt_to_phys(struct xvp_file *xvp_file,
  906. struct vm_area_struct *vma,
  907. unsigned long vaddr, unsigned long size,
  908. phys_addr_t *paddr,
  909. struct xrp_alien_mapping *mapping)
  910. {
  911. int ret;
  912. unsigned long i;
  913. unsigned long nr_pages = PFN_UP(vaddr + size) - PFN_DOWN(vaddr);
  914. unsigned long pfn;
  915. const struct xrp_address_map_entry *address_map;
  916. ret = follow_pfn(vma, vaddr, &pfn);
  917. if (ret)
  918. return ret;
  919. *paddr = __pfn_to_phys(pfn) + (vaddr & ~PAGE_MASK);
  920. address_map = xrp_get_address_mapping(&xvp_file->xvp->address_map,
  921. *paddr);
  922. if (!address_map) {
  923. pr_debug("%s: untranslatable addr: %pap\n", __func__, paddr);
  924. return -EINVAL;
  925. }
  926. for (i = 1; i < nr_pages; ++i) {
  927. unsigned long next_pfn;
  928. phys_addr_t next_phys;
  929. ret = follow_pfn(vma, vaddr + (i << PAGE_SHIFT), &next_pfn);
  930. if (ret)
  931. return ret;
  932. if (next_pfn != pfn + 1) {
  933. pr_debug("%s: non-contiguous physical memory\n",
  934. __func__);
  935. return -EINVAL;
  936. }
  937. next_phys = __pfn_to_phys(next_pfn);
  938. if (xrp_compare_address(next_phys, address_map)) {
  939. pr_debug("%s: untranslatable addr: %pap\n",
  940. __func__, &next_phys);
  941. return -EINVAL;
  942. }
  943. pfn = next_pfn;
  944. }
  945. *mapping = (struct xrp_alien_mapping){
  946. .vaddr = vaddr,
  947. .size = size,
  948. .paddr = *paddr,
  949. .type = ALIEN_PFN_MAP,
  950. };
  951. pr_debug("%s: success, paddr: %pap\n", __func__, paddr);
  952. return 0;
  953. }
  954. static long xvp_gup_virt_to_phys(struct xvp_file *xvp_file,
  955. unsigned long vaddr, unsigned long size,
  956. phys_addr_t *paddr,
  957. struct xrp_alien_mapping *mapping)
  958. {
  959. int ret;
  960. int i;
  961. int nr_pages;
  962. struct page **page;
  963. const struct xrp_address_map_entry *address_map;
  964. if (PFN_UP(vaddr + size) - PFN_DOWN(vaddr) > INT_MAX)
  965. return -EINVAL;
  966. nr_pages = PFN_UP(vaddr + size) - PFN_DOWN(vaddr);
  967. page = kmalloc(nr_pages * sizeof(void *), GFP_KERNEL);
  968. if (!page)
  969. return -ENOMEM;
  970. ret = get_user_pages_fast(vaddr, nr_pages, 1, page);
  971. if (ret < 0)
  972. goto out;
  973. if (ret < nr_pages) {
  974. pr_debug("%s: asked for %d pages, but got only %d\n",
  975. __func__, nr_pages, ret);
  976. nr_pages = ret;
  977. ret = -EINVAL;
  978. goto out_put;
  979. }
  980. address_map = xrp_get_address_mapping(&xvp_file->xvp->address_map,
  981. page_to_phys(page[0]));
  982. if (!address_map) {
  983. phys_addr_t addr = page_to_phys(page[0]);
  984. pr_debug("%s: untranslatable addr: %pap\n",
  985. __func__, &addr);
  986. ret = -EINVAL;
  987. goto out_put;
  988. }
  989. for (i = 1; i < nr_pages; ++i) {
  990. phys_addr_t addr;
  991. if (page[i] != page[i - 1] + 1) {
  992. pr_debug("%s: non-contiguous physical memory\n",
  993. __func__);
  994. ret = -EINVAL;
  995. goto out_put;
  996. }
  997. addr = page_to_phys(page[i]);
  998. if (xrp_compare_address(addr, address_map)) {
  999. pr_debug("%s: untranslatable addr: %pap\n",
  1000. __func__, &addr);
  1001. ret = -EINVAL;
  1002. goto out_put;
  1003. }
  1004. }
  1005. *paddr = __pfn_to_phys(page_to_pfn(page[0])) + (vaddr & ~PAGE_MASK);
  1006. *mapping = (struct xrp_alien_mapping){
  1007. .vaddr = vaddr,
  1008. .size = size,
  1009. .paddr = *paddr,
  1010. .type = ALIEN_GUP,
  1011. };
  1012. ret = 0;
  1013. pr_debug("%s: success, paddr: %pap\n", __func__, paddr);
  1014. out_put:
  1015. if (ret < 0)
  1016. for (i = 0; i < nr_pages; ++i)
  1017. put_page(page[i]);
  1018. out:
  1019. kfree(page);
  1020. return ret;
  1021. }
  1022. static long _xrp_copy_user_phys(struct xvp *xvp,
  1023. unsigned long vaddr, unsigned long size,
  1024. phys_addr_t paddr, unsigned long flags,
  1025. bool to_phys)
  1026. {
  1027. // if (pfn_valid(__phys_to_pfn(paddr))) {
  1028. // struct page *page = pfn_to_page(__phys_to_pfn(paddr));
  1029. // size_t page_offs = paddr & ~PAGE_MASK;
  1030. // size_t offs;
  1031. // if (!to_phys)
  1032. // xrp_default_dma_sync_for_cpu(xvp, paddr, size, flags);
  1033. // for (offs = 0; offs < size; ++page) {
  1034. // void *p = kmap(page);
  1035. // size_t sz = PAGE_SIZE - page_offs;
  1036. // size_t copy_sz = sz;
  1037. // unsigned long rc;
  1038. // if (!p)
  1039. // return -ENOMEM;
  1040. // if (size - offs < copy_sz)
  1041. // copy_sz = size - offs;
  1042. // if (to_phys)
  1043. // rc = copy_from_user(p + page_offs,
  1044. // (void __user *)(vaddr + offs),
  1045. // copy_sz);
  1046. // else
  1047. // rc = copy_to_user((void __user *)(vaddr + offs),
  1048. // p + page_offs, copy_sz);
  1049. // pr_debug("%s rc:%d,user addr :(%llx,%d) kernel:addr(%llx,%d) size:%d\n", __func__,rc,vaddr,offs,p,page_offs,copy_sz);
  1050. // page_offs = 0;
  1051. // offs += copy_sz;
  1052. // kunmap(page);
  1053. // if (rc)
  1054. // return -EFAULT;
  1055. // }
  1056. // if (to_phys)
  1057. // xrp_default_dma_sync_for_device(xvp, paddr, size, flags);
  1058. // } else
  1059. {
  1060. void __iomem *p = ioremap(paddr, size);
  1061. unsigned long rc;
  1062. pr_debug("%s ioremap:to_phys %d-(%llx,%llx)\n", __func__,to_phys,paddr,p);
  1063. if (!p) {
  1064. dev_err(xvp->dev,
  1065. "couldn't ioremap %pap x 0x%08x\n",
  1066. &paddr, (u32)size);
  1067. return -EINVAL;
  1068. }
  1069. if (to_phys)
  1070. {
  1071. rc = copy_from_user(__io_virt(p),
  1072. (void __user *)vaddr, size);
  1073. /*fix 5.10 kernel copy from vaddr in kernel to phy*/
  1074. if(rc)
  1075. {
  1076. xrp_comm_write(p,(void *)vaddr,size);
  1077. pr_debug("%s WR replease by copy to phy\n", __func__);
  1078. rc =0 ;
  1079. }
  1080. }
  1081. else
  1082. rc = copy_to_user((void __user *)vaddr,
  1083. __io_virt(p), size);
  1084. pr_debug("%s rc:%d,user addr :(%llx) kernel:addr(%llx) size:%d\n", __func__,rc,vaddr,p,size);
  1085. iounmap(p);
  1086. if (rc)
  1087. return -EFAULT;
  1088. }
  1089. return 0;
  1090. }
  1091. static long xrp_copy_user_to_phys(struct xvp *xvp,
  1092. unsigned long vaddr, unsigned long size,
  1093. phys_addr_t paddr, unsigned long flags)
  1094. {
  1095. return _xrp_copy_user_phys(xvp, vaddr, size, paddr, flags, true);
  1096. }
  1097. static long xrp_copy_user_from_phys(struct xvp *xvp,
  1098. unsigned long vaddr, unsigned long size,
  1099. phys_addr_t paddr, unsigned long flags)
  1100. {
  1101. return _xrp_copy_user_phys(xvp, vaddr, size, paddr, flags, false);
  1102. }
  1103. static long xvp_copy_virt_to_phys(struct xvp_file *xvp_file,
  1104. unsigned long flags,
  1105. unsigned long vaddr, unsigned long size,
  1106. phys_addr_t *paddr,
  1107. struct xrp_alien_mapping *mapping)
  1108. {
  1109. phys_addr_t phys;
  1110. unsigned long align = clamp(vaddr & -vaddr, 16ul, PAGE_SIZE);
  1111. unsigned long offset = vaddr & (align - 1);
  1112. struct xrp_allocation *allocation;
  1113. long rc;
  1114. rc = xrp_allocate(xvp_file->xvp->pool,
  1115. size + align, align, &allocation);
  1116. if (rc < 0)
  1117. return rc;
  1118. phys = (allocation->start & -align) | offset;
  1119. if (phys < allocation->start)
  1120. phys += align;
  1121. if (flags & XRP_FLAG_READ) {
  1122. if (xrp_copy_user_to_phys(xvp_file->xvp,
  1123. vaddr, size, phys, flags)) {
  1124. xrp_allocation_put(allocation);
  1125. return -EFAULT;
  1126. }
  1127. }
  1128. *paddr = phys;
  1129. *mapping = (struct xrp_alien_mapping){
  1130. .vaddr = vaddr,
  1131. .size = size,
  1132. .paddr = *paddr,
  1133. .allocation = allocation,
  1134. .type = ALIEN_COPY,
  1135. };
  1136. pr_debug("%s: copying to pa: %pap\n", __func__, paddr);
  1137. return 0;
  1138. }
  1139. static unsigned xvp_get_region_vma_count(unsigned long virt,
  1140. unsigned long size,
  1141. struct vm_area_struct *vma)
  1142. {
  1143. unsigned i;
  1144. struct mm_struct *mm = current->mm;
  1145. if (virt + size < virt)
  1146. return 0;
  1147. if (vma->vm_start > virt)
  1148. return 0;
  1149. if (vma->vm_start <= virt &&
  1150. virt + size <= vma->vm_end)
  1151. return 1;
  1152. for (i = 2; ; ++i) {
  1153. struct vm_area_struct *next_vma = find_vma(mm, vma->vm_end);
  1154. if (!next_vma)
  1155. return 0;
  1156. if (next_vma->vm_start != vma->vm_end)
  1157. return 0;
  1158. vma = next_vma;
  1159. if (virt + size <= vma->vm_end)
  1160. return i;
  1161. }
  1162. return 0;
  1163. }
  1164. static long xrp_share_kernel(struct file *filp,
  1165. unsigned long virt, unsigned long size,
  1166. unsigned long flags, phys_addr_t *paddr,
  1167. struct xrp_mapping *mapping)
  1168. {
  1169. struct xvp_file *xvp_file = filp->private_data;
  1170. struct xvp *xvp = xvp_file->xvp;
  1171. phys_addr_t phys = __pa(virt);
  1172. long err = 0;
  1173. pr_debug("%s: sharing kernel-only buffer: %pap\n", __func__, &phys);
  1174. if (xrp_translate_to_dsp(&xvp->address_map, phys) ==
  1175. XRP_NO_TRANSLATION) {
  1176. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1177. mm_segment_t oldfs = get_fs();
  1178. set_fs(KERNEL_DS);
  1179. #else
  1180. mm_segment_t oldfs =force_uaccess_begin();
  1181. #endif
  1182. pr_debug("%s: untranslatable addr, making shadow copy\n",
  1183. __func__);
  1184. err = xvp_copy_virt_to_phys(xvp_file, flags,
  1185. virt, size, paddr,
  1186. &mapping->alien_mapping);
  1187. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1188. set_fs(oldfs);
  1189. #else
  1190. force_uaccess_end(oldfs);
  1191. #endif
  1192. mapping->type = XRP_MAPPING_ALIEN | XRP_MAPPING_KERNEL;
  1193. } else {
  1194. mapping->type = XRP_MAPPING_KERNEL;
  1195. *paddr = phys;
  1196. xrp_default_dma_sync_for_device(xvp, phys, size, flags);
  1197. }
  1198. pr_debug("%s: mapping = %p, mapping->type = %d\n",
  1199. __func__, mapping, mapping->type);
  1200. return err;
  1201. }
  1202. static bool vma_needs_cache_ops(struct vm_area_struct *vma)
  1203. {
  1204. pgprot_t prot = vma->vm_page_prot;
  1205. return pgprot_val(prot) != pgprot_val(pgprot_noncached(prot)) &&
  1206. pgprot_val(prot) != pgprot_val(pgprot_writecombine(prot));
  1207. }
  1208. /* Share blocks of memory, from host to IVP or back.
  1209. *
  1210. * When sharing to IVP return physical addresses in paddr.
  1211. * Areas allocated from the driver can always be shared in both directions.
  1212. * Contiguous 3rd party allocations need to be shared to IVP before they can
  1213. * be shared back.
  1214. */
  1215. static long __xrp_share_block(struct file *filp,
  1216. unsigned long virt, unsigned long size,
  1217. unsigned long flags, phys_addr_t *paddr,
  1218. struct xrp_mapping *mapping)
  1219. {
  1220. phys_addr_t phys = ~0ul;
  1221. struct xvp_file *xvp_file = filp->private_data;
  1222. struct xvp *xvp = xvp_file->xvp;
  1223. struct mm_struct *mm = current->mm;
  1224. struct vm_area_struct *vma = find_vma(mm, virt);
  1225. bool do_cache = true;
  1226. long rc = -EINVAL;
  1227. if (!vma) {
  1228. pr_debug("%s: no vma for vaddr/size = 0x%08lx/0x%08lx\n",
  1229. __func__, virt, size);
  1230. return -EINVAL;
  1231. }
  1232. /*
  1233. * Region requested for sharing should be within single VMA.
  1234. * That's true for the majority of cases, but sometimes (e.g.
  1235. * sharing buffer in the beginning of .bss which shares a
  1236. * file-mapped page with .data, followed by anonymous page)
  1237. * region will cross multiple VMAs. Support it in the simplest
  1238. * way possible: start with get_user_pages and use shadow copy
  1239. * if that fails.
  1240. */
  1241. switch (xvp_get_region_vma_count(virt, size, vma)) {
  1242. case 0:
  1243. pr_debug("%s: bad vma for vaddr/size = 0x%08lx/0x%08lx\n",
  1244. __func__, virt, size);
  1245. pr_debug("%s: vma->vm_start = 0x%08lx, vma->vm_end = 0x%08lx\n",
  1246. __func__, vma->vm_start, vma->vm_end);
  1247. return -EINVAL;
  1248. case 1:
  1249. break;
  1250. default:
  1251. pr_debug("%s: multiple vmas cover vaddr/size = 0x%08lx/0x%08lx\n",
  1252. __func__, virt, size);
  1253. vma = NULL;
  1254. break;
  1255. }
  1256. /*
  1257. * And it need to be allocated from the same file descriptor, or
  1258. * at least from a file descriptor managed by the XRP.
  1259. */
  1260. if (vma &&
  1261. (vma->vm_file == filp || xrp_is_known_file(vma->vm_file))) {
  1262. struct xvp_file *vm_file = vma->vm_file->private_data;
  1263. struct xrp_allocation *xrp_allocation = vma->vm_private_data;
  1264. phys = (vma->vm_pgoff << PAGE_SHIFT) +
  1265. virt - vma->vm_start;
  1266. pr_debug("%s: XRP allocation at 0x%08lx, paddr: %pap\n",
  1267. __func__, virt, &phys);
  1268. /*
  1269. * If it was allocated from a different XRP file it may belong
  1270. * to a different device and not be directly accessible.
  1271. * Check if it is.
  1272. */
  1273. if (vma->vm_file != filp) {
  1274. const struct xrp_address_map_entry *address_map =
  1275. xrp_get_address_mapping(&xvp->address_map,
  1276. phys);
  1277. if (!address_map ||
  1278. xrp_compare_address(phys + size - 1, address_map))
  1279. pr_debug("%s: untranslatable addr: %pap\n",
  1280. __func__, &phys);
  1281. else
  1282. rc = 0;
  1283. } else {
  1284. rc = 0;
  1285. }
  1286. if (rc == 0) {
  1287. mapping->type = XRP_MAPPING_NATIVE;
  1288. mapping->native.xrp_allocation = xrp_allocation;
  1289. mapping->native.vaddr = virt;
  1290. xrp_allocation_get(xrp_allocation);
  1291. do_cache = vma_needs_cache_ops(vma);
  1292. }
  1293. }
  1294. if (rc < 0) {
  1295. struct xrp_alien_mapping *alien_mapping =
  1296. &mapping->alien_mapping;
  1297. unsigned long n_pages = PFN_UP(virt + size) - PFN_DOWN(virt);
  1298. /* Otherwise this is alien allocation. */
  1299. pr_debug("%s: non-XVP allocation at 0x%08lx\n",
  1300. __func__, virt);
  1301. /*
  1302. * A range can only be mapped directly if it is either
  1303. * uncached or HW-specific cache operations can handle it.
  1304. */
  1305. if (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)) {
  1306. rc = xvp_pfn_virt_to_phys(xvp_file, vma,
  1307. virt, size,
  1308. &phys,
  1309. alien_mapping);
  1310. if (rc == 0 && vma_needs_cache_ops(vma) &&
  1311. !xrp_cacheable(xvp, PFN_DOWN(phys), n_pages)) {
  1312. pr_debug("%s: needs unsupported cache mgmt\n",
  1313. __func__);
  1314. rc = -EINVAL;
  1315. }
  1316. } else {
  1317. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1318. up_read(&mm->mmap_sem);
  1319. #else
  1320. up_read(&mm->mmap_lock);
  1321. #endif
  1322. rc = xvp_gup_virt_to_phys(xvp_file, virt,
  1323. size, &phys,
  1324. alien_mapping);
  1325. if (rc == 0 &&
  1326. (!vma || vma_needs_cache_ops(vma)) &&
  1327. !xrp_cacheable(xvp, PFN_DOWN(phys), n_pages)) {
  1328. pr_debug("%s: needs unsupported cache mgmt\n",
  1329. __func__);
  1330. xrp_put_pages(phys, n_pages);
  1331. rc = -EINVAL;
  1332. }
  1333. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1334. down_read(&mm->mmap_sem);
  1335. #else
  1336. down_read(&mm->mmap_lock);
  1337. #endif
  1338. }
  1339. if (rc == 0 && vma && !vma_needs_cache_ops(vma))
  1340. do_cache = false;
  1341. /*
  1342. * If we couldn't share try to make a shadow copy.
  1343. */
  1344. if (rc < 0) {
  1345. rc = xvp_copy_virt_to_phys(xvp_file, flags,
  1346. virt, size, &phys,
  1347. alien_mapping);
  1348. do_cache = false;
  1349. }
  1350. /* We couldn't share it. Fail the request. */
  1351. if (rc < 0) {
  1352. pr_debug("%s: couldn't map virt to phys\n",
  1353. __func__);
  1354. return -EINVAL;
  1355. }
  1356. phys = alien_mapping->paddr +
  1357. virt - alien_mapping->vaddr;
  1358. mapping->type = XRP_MAPPING_ALIEN;
  1359. }
  1360. *paddr = phys;
  1361. pr_debug("%s: mapping = %p, mapping->type = %d,do_cache = %d\n",
  1362. __func__, mapping, mapping->type,do_cache);
  1363. if (do_cache)
  1364. xrp_dma_sync_for_device(xvp,
  1365. virt, phys, size,
  1366. flags);
  1367. return 0;
  1368. }
  1369. static long xrp_writeback_alien_mapping(struct xvp_file *xvp_file,
  1370. struct xrp_alien_mapping *alien_mapping,
  1371. unsigned long flags)
  1372. {
  1373. struct page *page;
  1374. size_t nr_pages;
  1375. size_t i;
  1376. long ret = 0;
  1377. switch (alien_mapping->type) {
  1378. case ALIEN_GUP:
  1379. xrp_dma_sync_for_cpu(xvp_file->xvp,
  1380. alien_mapping->vaddr,
  1381. alien_mapping->paddr,
  1382. alien_mapping->size,
  1383. flags);
  1384. pr_debug("%s: dirtying alien GUP @va = %p, pa = %pap\n",
  1385. __func__, (void __user *)alien_mapping->vaddr,
  1386. &alien_mapping->paddr);
  1387. page = pfn_to_page(__phys_to_pfn(alien_mapping->paddr));
  1388. nr_pages = PFN_UP(alien_mapping->vaddr + alien_mapping->size) -
  1389. PFN_DOWN(alien_mapping->vaddr);
  1390. for (i = 0; i < nr_pages; ++i)
  1391. SetPageDirty(page + i);
  1392. break;
  1393. case ALIEN_COPY:
  1394. pr_debug("%s: synchronizing alien copy @pa = %pap back to %p\n",
  1395. __func__, &alien_mapping->paddr,
  1396. (void __user *)alien_mapping->vaddr);
  1397. if (xrp_copy_user_from_phys(xvp_file->xvp,
  1398. alien_mapping->vaddr,
  1399. alien_mapping->size,
  1400. alien_mapping->paddr,
  1401. flags))
  1402. ret = -EINVAL;
  1403. break;
  1404. default:
  1405. break;
  1406. }
  1407. return ret;
  1408. }
  1409. /*
  1410. *
  1411. */
  1412. static long __xrp_unshare_block(struct file *filp, struct xrp_mapping *mapping,
  1413. unsigned long flags)
  1414. {
  1415. long ret = 0;
  1416. mm_segment_t oldfs ;
  1417. if (mapping->type & XRP_MAPPING_KERNEL)
  1418. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1419. oldfs = get_fs();
  1420. set_fs(KERNEL_DS);
  1421. #else
  1422. oldfs =force_uaccess_begin();
  1423. #endif
  1424. switch (mapping->type & ~XRP_MAPPING_KERNEL) {
  1425. case XRP_MAPPING_NATIVE:
  1426. if (flags & XRP_FLAG_WRITE) {
  1427. struct xvp_file *xvp_file = filp->private_data;
  1428. xrp_dma_sync_for_cpu(xvp_file->xvp,
  1429. mapping->native.vaddr,
  1430. mapping->native.xrp_allocation->start,
  1431. mapping->native.xrp_allocation->size,
  1432. flags);
  1433. }
  1434. xrp_allocation_put(mapping->native.xrp_allocation);
  1435. break;
  1436. case XRP_MAPPING_ALIEN:
  1437. if (flags & XRP_FLAG_WRITE)
  1438. ret = xrp_writeback_alien_mapping(filp->private_data,
  1439. &mapping->alien_mapping,
  1440. flags);
  1441. xrp_alien_mapping_destroy(&mapping->alien_mapping);
  1442. break;
  1443. case XRP_MAPPING_KERNEL:
  1444. break;
  1445. default:
  1446. break;
  1447. }
  1448. if (mapping->type & XRP_MAPPING_KERNEL)
  1449. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1450. set_fs(oldfs);
  1451. #else
  1452. force_uaccess_end(oldfs);
  1453. #endif
  1454. mapping->type = XRP_MAPPING_NONE;
  1455. return ret;
  1456. }
  1457. static long xrp_ioctl_free(struct file *filp,
  1458. struct xrp_ioctl_alloc __user *p)
  1459. {
  1460. struct mm_struct *mm = current->mm;
  1461. struct xrp_ioctl_alloc xrp_ioctl_alloc;
  1462. struct vm_area_struct *vma;
  1463. unsigned long start;
  1464. // pr_debug("%s: %p\n", __func__, p);
  1465. if (copy_from_user(&xrp_ioctl_alloc, p, sizeof(*p)))
  1466. return -EFAULT;
  1467. start = xrp_ioctl_alloc.addr;
  1468. // pr_debug("%s: virt_addr = 0x%08lx\n", __func__, start);
  1469. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1470. down_read(&mm->mmap_sem);
  1471. #else
  1472. down_read(&mm->mmap_lock);
  1473. #endif
  1474. vma = find_vma(mm, start);
  1475. if (vma && vma->vm_file == filp &&
  1476. vma->vm_start <= start && start < vma->vm_end) {
  1477. size_t size;
  1478. start = vma->vm_start;
  1479. size = vma->vm_end - vma->vm_start;
  1480. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1481. up_read(&mm->mmap_sem);
  1482. #else
  1483. up_read(&mm->mmap_lock);
  1484. #endif
  1485. pr_debug("%s: 0x%lx x %zu\n", __func__, start, size);
  1486. return vm_munmap(start, size);
  1487. }
  1488. // pr_debug("%s: no vma/bad vma for vaddr = 0x%08lx\n", __func__, start);
  1489. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1490. up_read(&mm->mmap_sem);
  1491. #else
  1492. up_read(&mm->mmap_lock);
  1493. #endif
  1494. return -EINVAL;
  1495. }
  1496. static long xvp_complete_cmd_irq(struct xvp *xvp, struct xrp_comm *comm,
  1497. bool (*cmd_complete)(struct xrp_comm *p))
  1498. {
  1499. long timeout = firmware_command_timeout * HZ;
  1500. if (cmd_complete(comm))
  1501. return 0;
  1502. if (xrp_panic_check(xvp))
  1503. return -EBUSY;
  1504. do {
  1505. timeout = wait_for_completion_interruptible_timeout(&comm->completion,
  1506. timeout);
  1507. if (cmd_complete(comm))
  1508. return 0;
  1509. if (xrp_panic_check(xvp))
  1510. return -EBUSY;
  1511. } while (timeout > 0);
  1512. if (timeout == 0)
  1513. return -EBUSY;
  1514. return timeout;
  1515. }
  1516. static long xvp_complete_cmd_poll(struct xvp *xvp, struct xrp_comm *comm,
  1517. bool (*cmd_complete)(struct xrp_comm *p))
  1518. {
  1519. unsigned long deadline = jiffies + firmware_command_timeout * HZ;
  1520. do {
  1521. if (cmd_complete(comm))
  1522. return 0;
  1523. if (xrp_panic_check(xvp))
  1524. return -EBUSY;
  1525. schedule();
  1526. } while (time_before(jiffies, deadline));
  1527. return -EBUSY;
  1528. }
  1529. struct xrp_request {
  1530. struct xrp_ioctl_queue ioctl_queue;
  1531. size_t n_buffers;
  1532. struct xrp_mapping *buffer_mapping;
  1533. struct xrp_dsp_buffer *dsp_buffer;
  1534. phys_addr_t in_data_phys;
  1535. phys_addr_t out_data_phys;
  1536. phys_addr_t dsp_buffer_phys;
  1537. union {
  1538. struct xrp_mapping in_data_mapping;
  1539. u8 in_data[XRP_DSP_CMD_INLINE_DATA_SIZE];
  1540. };
  1541. union {
  1542. struct xrp_mapping out_data_mapping;
  1543. u8 out_data[XRP_DSP_CMD_INLINE_DATA_SIZE];
  1544. };
  1545. union {
  1546. struct xrp_mapping dsp_buffer_mapping;
  1547. struct xrp_dsp_buffer buffer_data[XRP_DSP_CMD_INLINE_BUFFER_COUNT];
  1548. };
  1549. u8 nsid[XRP_DSP_CMD_NAMESPACE_ID_SIZE];
  1550. };
  1551. static void xrp_unmap_request_nowb(struct file *filp, struct xrp_request *rq)
  1552. {
  1553. size_t n_buffers = rq->n_buffers;
  1554. size_t i;
  1555. if (rq->ioctl_queue.in_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE)
  1556. __xrp_unshare_block(filp, &rq->in_data_mapping, 0);
  1557. if (rq->ioctl_queue.out_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE)
  1558. __xrp_unshare_block(filp, &rq->out_data_mapping, 0);
  1559. for (i = 0; i < n_buffers; ++i)
  1560. __xrp_unshare_block(filp, rq->buffer_mapping + i, 0);
  1561. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT)
  1562. __xrp_unshare_block(filp, &rq->dsp_buffer_mapping, 0);
  1563. if (n_buffers) {
  1564. kfree(rq->buffer_mapping);
  1565. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  1566. kfree(rq->dsp_buffer);
  1567. }
  1568. }
  1569. }
  1570. static long xrp_unmap_request(struct file *filp, struct xrp_request *rq)
  1571. {
  1572. size_t n_buffers = rq->n_buffers;
  1573. size_t i;
  1574. long ret = 0;
  1575. long rc;
  1576. if (rq->ioctl_queue.in_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE)
  1577. __xrp_unshare_block(filp, &rq->in_data_mapping, XRP_FLAG_READ);
  1578. if (rq->ioctl_queue.out_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE) {
  1579. rc = __xrp_unshare_block(filp, &rq->out_data_mapping,
  1580. XRP_FLAG_WRITE);
  1581. if (rc < 0) {
  1582. pr_debug("%s: out_data could not be unshared\n",
  1583. __func__);
  1584. ret = rc;
  1585. }
  1586. } else {
  1587. pr_debug("%s: out_data <%s> to copied\n",
  1588. __func__,rq->out_data);
  1589. if (copy_to_user((void __user *)(unsigned long)rq->ioctl_queue.out_data_addr,
  1590. rq->out_data,
  1591. rq->ioctl_queue.out_data_size)) {
  1592. pr_debug("%s: out_data could not be copied\n",
  1593. __func__);
  1594. ret = -EFAULT;
  1595. }
  1596. }
  1597. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT)
  1598. __xrp_unshare_block(filp, &rq->dsp_buffer_mapping,
  1599. XRP_FLAG_READ_WRITE);
  1600. for (i = 0; i < n_buffers; ++i) {
  1601. rc = __xrp_unshare_block(filp, rq->buffer_mapping + i,
  1602. rq->dsp_buffer[i].flags);
  1603. if (rc < 0) {
  1604. pr_debug("%s: buffer %zd could not be unshared\n",
  1605. __func__, i);
  1606. ret = rc;
  1607. }
  1608. }
  1609. if (n_buffers) {
  1610. kfree(rq->buffer_mapping);
  1611. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  1612. kfree(rq->dsp_buffer);
  1613. }
  1614. rq->n_buffers = 0;
  1615. }
  1616. return ret;
  1617. }
  1618. static long xrp_map_request(struct file *filp, struct xrp_request *rq,
  1619. struct mm_struct *mm)
  1620. {
  1621. struct xvp_file *xvp_file = filp->private_data;
  1622. struct xvp *xvp = xvp_file->xvp;
  1623. struct xrp_ioctl_buffer __user *buffer;
  1624. size_t n_buffers = rq->ioctl_queue.buffer_size /
  1625. sizeof(struct xrp_ioctl_buffer);
  1626. size_t i;
  1627. long ret = 0;
  1628. if ((rq->ioctl_queue.flags & XRP_QUEUE_FLAG_NSID) &&
  1629. copy_from_user(rq->nsid,
  1630. (void __user *)(unsigned long)rq->ioctl_queue.nsid_addr,
  1631. sizeof(rq->nsid))) {
  1632. pr_debug("%s: nsid could not be copied\n ", __func__);
  1633. return -EINVAL;
  1634. }
  1635. rq->n_buffers = n_buffers;
  1636. if (n_buffers) {
  1637. rq->buffer_mapping =
  1638. kzalloc(n_buffers * sizeof(*rq->buffer_mapping),
  1639. GFP_KERNEL);
  1640. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  1641. rq->dsp_buffer =
  1642. kmalloc(n_buffers * sizeof(*rq->dsp_buffer),
  1643. GFP_KERNEL);
  1644. if (!rq->dsp_buffer) {
  1645. kfree(rq->buffer_mapping);
  1646. return -ENOMEM;
  1647. }
  1648. } else {
  1649. rq->dsp_buffer = rq->buffer_data;
  1650. }
  1651. }
  1652. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1653. down_read(&mm->mmap_sem);
  1654. #else
  1655. down_read(&mm->mmap_lock);
  1656. #endif
  1657. if (rq->ioctl_queue.in_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE) {
  1658. ret = __xrp_share_block(filp, rq->ioctl_queue.in_data_addr,
  1659. rq->ioctl_queue.in_data_size,
  1660. XRP_FLAG_READ, &rq->in_data_phys,
  1661. &rq->in_data_mapping);
  1662. if(ret < 0) {
  1663. pr_debug("%s: in_data could not be shared\n",
  1664. __func__);
  1665. goto share_err;
  1666. }
  1667. } else {
  1668. if (copy_from_user(rq->in_data,
  1669. (void __user *)(unsigned long)rq->ioctl_queue.in_data_addr,
  1670. rq->ioctl_queue.in_data_size)) {
  1671. pr_debug("%s: in_data could not be copied\n",
  1672. __func__);
  1673. ret = -EFAULT;
  1674. goto share_err;
  1675. }
  1676. }
  1677. if (rq->ioctl_queue.out_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE) {
  1678. ret = __xrp_share_block(filp, rq->ioctl_queue.out_data_addr,
  1679. rq->ioctl_queue.out_data_size,
  1680. XRP_FLAG_WRITE, &rq->out_data_phys,
  1681. &rq->out_data_mapping);
  1682. if (ret < 0) {
  1683. pr_debug("%s: out_data could not be shared\n",
  1684. __func__);
  1685. goto share_err;
  1686. }
  1687. }
  1688. buffer = (void __user *)(unsigned long)rq->ioctl_queue.buffer_addr;
  1689. for (i = 0; i < n_buffers; ++i) {
  1690. struct xrp_ioctl_buffer ioctl_buffer;
  1691. phys_addr_t buffer_phys = ~0ul;
  1692. if (copy_from_user(&ioctl_buffer, buffer + i,
  1693. sizeof(ioctl_buffer))) {
  1694. ret = -EFAULT;
  1695. goto share_err;
  1696. }
  1697. if (ioctl_buffer.flags & XRP_FLAG_READ_WRITE) {
  1698. ret = __xrp_share_block(filp, ioctl_buffer.addr,
  1699. ioctl_buffer.size,
  1700. ioctl_buffer.flags,
  1701. &buffer_phys,
  1702. rq->buffer_mapping + i);
  1703. if (ret < 0) {
  1704. pr_debug("%s: buffer %zd could not be shared\n",
  1705. __func__, i);
  1706. goto share_err;
  1707. }
  1708. }
  1709. rq->dsp_buffer[i] = (struct xrp_dsp_buffer){
  1710. .flags = ioctl_buffer.flags,
  1711. .size = ioctl_buffer.size,
  1712. .addr = xrp_translate_to_dsp(&xvp->address_map,
  1713. buffer_phys),
  1714. };
  1715. }
  1716. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  1717. ret = xrp_share_kernel(filp, (unsigned long)rq->dsp_buffer,
  1718. n_buffers * sizeof(*rq->dsp_buffer),
  1719. XRP_FLAG_READ_WRITE, &rq->dsp_buffer_phys,
  1720. &rq->dsp_buffer_mapping);
  1721. if(ret < 0) {
  1722. pr_debug("%s: buffer descriptors could not be shared\n",
  1723. __func__);
  1724. goto share_err;
  1725. }
  1726. }
  1727. share_err:
  1728. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1729. up_read(&mm->mmap_sem);
  1730. #else
  1731. up_read(&mm->mmap_lock);
  1732. #endif
  1733. if (ret < 0)
  1734. xrp_unmap_request_nowb(filp, rq);
  1735. return ret;
  1736. }
  1737. static void xrp_fill_hw_request(struct xrp_dsp_cmd __iomem *cmd,
  1738. struct xrp_request *rq,
  1739. const struct xrp_address_map *map)
  1740. {
  1741. xrp_comm_write32(&cmd->in_data_size, rq->ioctl_queue.in_data_size);
  1742. xrp_comm_write32(&cmd->out_data_size, rq->ioctl_queue.out_data_size);
  1743. xrp_comm_write32(&cmd->buffer_size,
  1744. rq->n_buffers * sizeof(struct xrp_dsp_buffer));
  1745. if (rq->ioctl_queue.in_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE)
  1746. xrp_comm_write32(&cmd->in_data_addr,
  1747. xrp_translate_to_dsp(map, rq->in_data_phys));
  1748. else
  1749. xrp_comm_write(&cmd->in_data, rq->in_data,
  1750. rq->ioctl_queue.in_data_size);
  1751. if (rq->ioctl_queue.out_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE)
  1752. xrp_comm_write32(&cmd->out_data_addr,
  1753. xrp_translate_to_dsp(map, rq->out_data_phys));
  1754. if (rq->n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT)
  1755. xrp_comm_write32(&cmd->buffer_addr,
  1756. xrp_translate_to_dsp(map, rq->dsp_buffer_phys));
  1757. else
  1758. xrp_comm_write(&cmd->buffer_data, rq->dsp_buffer,
  1759. rq->n_buffers * sizeof(struct xrp_dsp_buffer));
  1760. if (rq->ioctl_queue.flags & XRP_QUEUE_FLAG_NSID)
  1761. xrp_comm_write(&cmd->nsid, rq->nsid, sizeof(rq->nsid));
  1762. #ifdef DEBUG
  1763. {
  1764. struct xrp_dsp_cmd dsp_cmd;
  1765. xrp_comm_read(cmd, &dsp_cmd, sizeof(dsp_cmd));
  1766. pr_debug("%s: cmd for DSP: %p: %*ph\n",
  1767. __func__, cmd,
  1768. (int)sizeof(dsp_cmd), &dsp_cmd);
  1769. }
  1770. #endif
  1771. wmb();
  1772. /* update flags */
  1773. xrp_comm_write32(&cmd->flags,
  1774. (rq->ioctl_queue.flags & ~XRP_DSP_CMD_FLAG_RESPONSE_VALID) |
  1775. XRP_DSP_CMD_FLAG_REQUEST_VALID);
  1776. }
  1777. static long xrp_complete_hw_request(struct xrp_dsp_cmd __iomem *cmd,
  1778. struct xrp_request *rq)
  1779. {
  1780. u32 flags = xrp_comm_read32(&cmd->flags);
  1781. if (rq->ioctl_queue.out_data_size <= XRP_DSP_CMD_INLINE_DATA_SIZE)
  1782. xrp_comm_read(&cmd->out_data, rq->out_data,
  1783. rq->ioctl_queue.out_data_size);
  1784. if (rq->n_buffers <= XRP_DSP_CMD_INLINE_BUFFER_COUNT)
  1785. xrp_comm_read(&cmd->buffer_data, rq->dsp_buffer,
  1786. rq->n_buffers * sizeof(struct xrp_dsp_buffer));
  1787. xrp_comm_write32(&cmd->flags, 0);
  1788. return (flags & XRP_DSP_CMD_FLAG_RESPONSE_DELIVERY_FAIL) ? -ENXIO : 0;
  1789. }
  1790. static long xrp_ioctl_submit_sync(struct file *filp,
  1791. struct xrp_ioctl_queue __user *p)
  1792. {
  1793. struct xvp_file *xvp_file = filp->private_data;
  1794. struct xvp *xvp = xvp_file->xvp;
  1795. struct xrp_comm *queue = xvp->queue;
  1796. struct xrp_request xrp_rq, *rq = &xrp_rq;
  1797. long ret = 0;
  1798. bool went_off = false;
  1799. if (copy_from_user(&rq->ioctl_queue, p, sizeof(*p)))
  1800. return -EFAULT;
  1801. if (rq->ioctl_queue.flags & ~XRP_QUEUE_VALID_FLAGS) {
  1802. dev_dbg(xvp->dev, "%s: invalid flags 0x%08x\n",
  1803. __func__, rq->ioctl_queue.flags);
  1804. return -EINVAL;
  1805. }
  1806. if (xvp->n_queues > 1) {
  1807. unsigned n = (rq->ioctl_queue.flags & XRP_QUEUE_FLAG_PRIO) >>
  1808. XRP_QUEUE_FLAG_PRIO_SHIFT;
  1809. if (n >= xvp->n_queues)
  1810. n = xvp->n_queues - 1;
  1811. queue = xvp->queue_ordered[n];
  1812. dev_dbg(xvp->dev, "%s: priority: %d -> %d\n",
  1813. __func__, n, queue->priority);
  1814. }
  1815. ret = xrp_map_request(filp, rq, current->mm);
  1816. if (ret < 0)
  1817. return ret;
  1818. if (loopback < LOOPBACK_NOIO) {
  1819. int reboot_cycle;
  1820. retry:
  1821. mutex_lock(&queue->lock);
  1822. reboot_cycle = atomic_read(&xvp->reboot_cycle);
  1823. if (reboot_cycle != atomic_read(&xvp->reboot_cycle_complete)) {
  1824. mutex_unlock(&queue->lock);
  1825. goto retry;
  1826. }
  1827. if (xvp->off) {
  1828. ret = -ENODEV;
  1829. } else {
  1830. xrp_fill_hw_request(queue->comm, rq, &xvp->address_map);
  1831. xrp_send_device_irq(xvp);
  1832. if (xvp->host_irq_mode) {
  1833. ret = xvp_complete_cmd_irq(xvp, queue,
  1834. xrp_cmd_complete);
  1835. } else {
  1836. ret = xvp_complete_cmd_poll(xvp, queue,
  1837. xrp_cmd_complete);
  1838. }
  1839. xrp_panic_check(xvp);
  1840. /* copy back inline data */
  1841. if (ret == 0) {
  1842. ret = xrp_complete_hw_request(queue->comm, rq);
  1843. } else if (ret == -EBUSY && firmware_reboot &&
  1844. atomic_inc_return(&xvp->reboot_cycle) ==
  1845. reboot_cycle + 1) {
  1846. int rc;
  1847. unsigned i;
  1848. dev_dbg(xvp->dev,
  1849. "%s: restarting firmware...\n",
  1850. __func__);
  1851. for (i = 0; i < xvp->n_queues; ++i)
  1852. if (xvp->queue + i != queue)
  1853. mutex_lock(&xvp->queue[i].lock);
  1854. rc = xrp_boot_firmware(xvp);
  1855. atomic_set(&xvp->reboot_cycle_complete,
  1856. atomic_read(&xvp->reboot_cycle));
  1857. for (i = 0; i < xvp->n_queues; ++i)
  1858. if (xvp->queue + i != queue)
  1859. mutex_unlock(&xvp->queue[i].lock);
  1860. if (rc < 0) {
  1861. ret = rc;
  1862. went_off = xvp->off;
  1863. }
  1864. }
  1865. }
  1866. mutex_unlock(&queue->lock);
  1867. }
  1868. if (ret == 0)
  1869. ret = xrp_unmap_request(filp, rq);
  1870. else if (!went_off)
  1871. xrp_unmap_request_nowb(filp, rq);
  1872. /*
  1873. * Otherwise (if the DSP went off) all mapped buffers are leaked here.
  1874. * There seems to be no way to recover them as we don't know what's
  1875. * going on with the DSP; the DSP may still be reading and writing
  1876. * this memory.
  1877. */
  1878. return ret;
  1879. }
  1880. // static void xrp_dam_buf_free(struct xrp_allocation *xrp_allocation)
  1881. // {
  1882. // dev_dbg(xvp->dev,"%s: release dma_buf allocation n",
  1883. // __func__);
  1884. // kfree(xrp_allocation->pool);
  1885. // kfree(xrp_allocation);
  1886. // return
  1887. // }
  1888. // static void xrp_dam_buf_offset(struct xrp_allocation *xrp_allocation)
  1889. // {
  1890. // return 0;
  1891. // }
  1892. // static const struct xrp_allocation_ops xrp_dma_buf_pool_ops = {
  1893. // .alloc = NULL,
  1894. // .free = xrp_dam_buf_free,
  1895. // .free_pool = NULL,
  1896. // .offset = xrp_dam_buf_offset,
  1897. // };
  1898. // static inline struct xrp_dma_buf_item * xrp_get_dma_buf_tail(struct xrp_dma_buf_item **list)
  1899. // {
  1900. // struct xrp_dma_buf_item ** item;
  1901. // if(*list == NULLL)
  1902. // return NULL;
  1903. // for(item = list;(*item)->next != NULL;item= &(*item)->next)
  1904. // {
  1905. // ;
  1906. // }
  1907. // return *item;
  1908. // }
  1909. // static inline void xrp_dam_buf_add_item(struct xrp_dma_buf_item **list,struct xrp_dma_buf_item *entry)
  1910. // {
  1911. // struct xrp_dma_buf_item * item = xrp_get_dma_buf_tail(list);
  1912. // if(item == NULL)
  1913. // {
  1914. // *list=entry;
  1915. // }
  1916. // else{
  1917. // item->next = entry;
  1918. // }
  1919. // }
  1920. // static inline int xrp_get_dma_buf_remove(struct xrp_dma_buf_item **list,struct xrp_dma_buf_item *entry)
  1921. // {
  1922. // {
  1923. // struct xrp_dma_buf_item ** item;
  1924. // for(item = list;(*item)->next != NULL;item= &(*item)->next)
  1925. // {
  1926. // struct xrp_dma_buf_item *cur = *item;
  1927. // if();
  1928. // }
  1929. // }
  1930. static void xrp_release_dma_buf_item(struct xrp_dma_buf_item * item)
  1931. {
  1932. spin_lock(&xrp_dma_buf_lock);
  1933. if(--item->ref==0)
  1934. {
  1935. list_del(&item->link);
  1936. kfree(item);
  1937. }
  1938. spin_unlock(&xrp_dma_buf_lock);
  1939. }
  1940. static long xrp_ioctl_dma_buf_import(struct file *filp,
  1941. struct xrp_dma_buf __user *p)
  1942. {
  1943. long ret;
  1944. struct xvp_file *xvp_file = filp->private_data;
  1945. struct xvp *xvp = xvp_file->xvp;
  1946. struct xrp_dma_buf xrp_dma_buf;
  1947. struct dma_buf *dmabuf = NULL;
  1948. struct sg_table *sgt = NULL;
  1949. struct xrp_dma_buf_item *dma_buf_item=NULL;
  1950. struct xrp_dma_buf_item *temp=NULL;
  1951. struct dma_buf_attachment *attachment = NULL;
  1952. // struct xrp_allocation *xrp_allocation;
  1953. // struct xrp_private_pool *pool;
  1954. int npages = 0;
  1955. int i;
  1956. struct scatterlist *s;
  1957. unsigned int size = 0;
  1958. dev_dbg(xvp->dev,"%s: entry\n", __func__);
  1959. if (copy_from_user(&xrp_dma_buf, p, sizeof(*p)))
  1960. {
  1961. return -EFAULT;
  1962. }
  1963. dmabuf = dma_buf_get(xrp_dma_buf.fd);
  1964. if(!dmabuf)
  1965. {
  1966. return -EFAULT;
  1967. }
  1968. spin_lock(&xrp_dma_buf_lock);
  1969. list_for_each_entry(temp,&xvp->dma_buf_list, link)
  1970. {
  1971. if(temp->dmabuf == dmabuf)
  1972. {
  1973. dma_buf_item = temp;
  1974. dma_buf_item->ref++;
  1975. break;
  1976. }
  1977. }
  1978. spin_unlock(&xrp_dma_buf_lock);
  1979. if(dma_buf_item == NULL)
  1980. {
  1981. dev_dbg(xvp->dev,
  1982. "%s: no exit same dma buf\n", __func__);
  1983. attachment = dma_buf_attach(dmabuf, xvp->dev);
  1984. if (!attachment)
  1985. {
  1986. goto One_Err;
  1987. }
  1988. sgt = dma_buf_map_attachment(attachment, xrp_dma_direction(xrp_dma_buf.flags));
  1989. if (!sgt)
  1990. {
  1991. goto One_Err;
  1992. }
  1993. dma_buf_item = kzalloc(sizeof(*dma_buf_item),GFP_KERNEL);
  1994. if(dma_buf_item == NULL)
  1995. {
  1996. goto One_Err;
  1997. }
  1998. dma_buf_item->attachment = attachment;
  1999. dma_buf_item->dmabuf = dmabuf;
  2000. dma_buf_item->sgt = sgt;
  2001. dma_buf_item->ref = 1;
  2002. spin_lock(&xrp_dma_buf_lock);
  2003. list_add_tail(&dma_buf_item->link, &xvp->dma_buf_list);
  2004. spin_unlock(&xrp_dma_buf_lock);
  2005. }
  2006. else
  2007. {
  2008. dev_dbg(xvp->dev,
  2009. "%s: exit same dma buf\n", __func__);
  2010. attachment = dma_buf_item->attachment;
  2011. sgt = dma_buf_item->sgt;
  2012. spin_lock(&xrp_dma_buf_lock);
  2013. dma_buf_item->ref++;
  2014. spin_unlock(&xrp_dma_buf_lock);
  2015. }
  2016. if(sgt->nents != 1)
  2017. {
  2018. dev_dbg(xvp->dev,
  2019. "%s: sg table number (%d) is not 1, unspoort.\n",
  2020. __func__,sgt->nents);
  2021. goto Two_Err;
  2022. }
  2023. /* Prepare page array. */
  2024. /* Get number of pages. */
  2025. for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
  2026. {
  2027. npages += (sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE;
  2028. size += sg_dma_len(s);
  2029. }
  2030. xrp_dma_buf.size = size;
  2031. #ifdef VIDMEM_DMA_MAP
  2032. xrp_dma_buf. = sg_dma_address(s) + j * PAGE_SIZE;
  2033. #else
  2034. // xrp_dma_buf.paddr = page_to_phys(nth_page(sg_page(s), 0));
  2035. xrp_dma_buf.paddr = sg_phys(sgt->sgl);
  2036. #endif
  2037. // dev_dbg(xvp->dev,
  2038. // "%s: import dma-buf phy addr:0x%lx,size:%d\n",
  2039. // __func__,xrp_dma_buf.paddr,xrp_dma_buf.size);
  2040. // xrp_allocation = kzalloc(sizeof(*xrp_allocation), GFP_KERNEL | __GFP_NORETRY);
  2041. // if(!xrp_allocation)
  2042. // {
  2043. // return -ENOMEM;
  2044. // }
  2045. // pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  2046. // if(!pool)
  2047. // {
  2048. // kfree(xrp_allocation);
  2049. // return -ENOMEM;
  2050. // }
  2051. // *pool = (struct xrp_private_pool){
  2052. // .pool = {
  2053. // .ops = &xrp_dma_buf_pool_ops,
  2054. // },
  2055. // .start = xrp_dma_buf.paddr ,
  2056. // .size = xrp_dma_buf.size,
  2057. // .free_list = NULL,
  2058. // };
  2059. // xrp_allocation->pool = pool;
  2060. // xrp_allocation->start = xrp_dma_buf.paddr;
  2061. // xrp_allocation->size = xrp_dma_buf.size;
  2062. // xrp_allocation_queue(xvp_file, xrp_allocation);
  2063. // xrp_dma_buf.addr = vm_mmap(filp, 0, xrp_allocation->size,
  2064. // PROT_READ | PROT_WRITE, MAP_SHARED,
  2065. // xrp_dam_buf_offset(xrp_allocation));
  2066. struct file *export_filp = fget(xrp_dma_buf.fd);
  2067. xrp_dma_buf.addr = vm_mmap(export_filp, 0, xrp_dma_buf.size,
  2068. PROT_READ | PROT_WRITE, MAP_SHARED,0);
  2069. fput(export_filp);
  2070. dev_dbg(xvp->dev,
  2071. "%s: import dma-buf phy addr:0x%lx,user addr:0x%lx,size:%d\n",
  2072. __func__,xrp_dma_buf.paddr,xrp_dma_buf.addr,xrp_dma_buf.size);
  2073. if (copy_to_user(p, &xrp_dma_buf, sizeof(*p))) {
  2074. dma_buf_put(dmabuf);
  2075. vm_munmap(xrp_dma_buf.addr , xrp_dma_buf.size);
  2076. goto Two_Err;
  2077. }
  2078. return 0;
  2079. Two_Err:
  2080. xrp_release_dma_buf_item(dma_buf_item);
  2081. One_Err:
  2082. dma_buf_put(dmabuf);
  2083. return -EINVAL;
  2084. }
  2085. static struct xrp_dma_buf_item * xrp_search_dma_buf( struct list_head *list,int fd)
  2086. {
  2087. struct xrp_dma_buf_item *loop;
  2088. struct xrp_dma_buf_item *dma_buf_item=NULL;
  2089. struct dma_buf *dmabuf = NULL;
  2090. // pr_debug("%s: fd %d,entry\n", __func__,fd);
  2091. dmabuf = dma_buf_get(fd);
  2092. spin_lock(&xrp_dma_buf_lock);
  2093. list_for_each_entry(loop,list, link)
  2094. {
  2095. if(loop->dmabuf == dmabuf)
  2096. {
  2097. dma_buf_item = loop;
  2098. break;
  2099. }
  2100. }
  2101. spin_unlock(&xrp_dma_buf_lock);
  2102. dma_buf_put(dmabuf);
  2103. pr_debug("%s: %p exit\n", __func__,fd,dma_buf_item);
  2104. return dma_buf_item;
  2105. }
  2106. static long xrp_ioctl_dma_buf_release(struct file *filp,
  2107. struct xrp_dma_buf __user *p)
  2108. {
  2109. int fd;
  2110. struct xvp_file *xvp_file = filp->private_data;
  2111. struct xvp *xvp = xvp_file->xvp;
  2112. struct dma_buf *dmabuf = NULL;
  2113. struct xrp_dma_buf user_param;
  2114. struct xrp_dma_buf_item *dma_buf_item=NULL;
  2115. struct xrp_dma_buf_item *loop,*temp;
  2116. if (copy_from_user(&user_param, p, sizeof(*p)))
  2117. {
  2118. return -EFAULT;
  2119. }
  2120. fd = user_param.fd;
  2121. // dmabuf = dma_buf_get(fd);
  2122. // spin_lock(&xrp_dma_buf_lock);
  2123. // list_for_each_entry_safe(loop, temp, &xvp->dma_buf_list, link)
  2124. // {
  2125. // if(loop->dmabuf == dmabuf)
  2126. // {
  2127. // dma_buf_item = loop;
  2128. // if((--dma_buf_item->ref)==0)
  2129. // list_del(&dma_buf_item);
  2130. // break;
  2131. // }
  2132. // }
  2133. // spin_unlock(&xrp_dma_buf_lock);
  2134. // dma_buf_put(dmabuf);
  2135. dma_buf_item = xrp_search_dma_buf(&xvp->dma_buf_list,fd);
  2136. if(dma_buf_item == NULL)
  2137. {
  2138. return -EFAULT;
  2139. }
  2140. vm_munmap(user_param.addr , user_param.size);
  2141. dma_buf_unmap_attachment(dma_buf_item->attachment, dma_buf_item->sgt, DMA_BIDIRECTIONAL);
  2142. dma_buf_detach(dma_buf_item->dmabuf, dma_buf_item->attachment);
  2143. dma_buf_put(dma_buf_item->dmabuf);
  2144. xrp_release_dma_buf_item(dma_buf_item);
  2145. return 0;
  2146. }
  2147. static long xrp_ioctl_dma_buf_sync(struct file *filp,
  2148. struct xrp_dma_buf __user *p)
  2149. {
  2150. struct xvp_file *xvp_file = filp->private_data;
  2151. struct xvp *xvp = xvp_file->xvp;
  2152. struct xrp_dma_buf xrp_dma_buf;
  2153. struct xrp_dma_buf_item *dma_buf_item=NULL;
  2154. if (copy_from_user(&xrp_dma_buf, p, sizeof(*p)))
  2155. {
  2156. return -EFAULT;
  2157. }
  2158. dma_buf_item = xrp_search_dma_buf(&xvp->dma_buf_list,xrp_dma_buf.fd);
  2159. if(dma_buf_item == NULL)
  2160. {
  2161. return -EFAULT;
  2162. }
  2163. switch(xrp_dma_buf.flags)
  2164. {
  2165. case XRP_FLAG_READ:
  2166. dma_sync_single_for_cpu(xvp->dev, phys_to_dma(xvp->dev, xrp_dma_buf.paddr), xrp_dma_buf.size,
  2167. xrp_dma_direction(xrp_dma_buf.flags));
  2168. break;
  2169. case XRP_FLAG_WRITE:
  2170. dma_sync_single_for_device(xvp->dev, phys_to_dma(xvp->dev, xrp_dma_buf.paddr), xrp_dma_buf.size,
  2171. xrp_dma_direction(xrp_dma_buf.flags));
  2172. break;
  2173. case XRP_FLAG_READ_WRITE:
  2174. dma_sync_single_for_cpu(xvp->dev, phys_to_dma(xvp->dev, xrp_dma_buf.paddr), xrp_dma_buf.size,
  2175. xrp_dma_direction(xrp_dma_buf.flags));
  2176. dma_sync_single_for_device(xvp->dev, phys_to_dma(xvp->dev, xrp_dma_buf.paddr),xrp_dma_buf.size,
  2177. xrp_dma_direction(xrp_dma_buf.flags));
  2178. break;
  2179. default:
  2180. dev_dbg(xvp->dev,"%s: invalid type%x\n", __func__, xrp_dma_buf.flags);
  2181. return -EFAULT;
  2182. }
  2183. return 0;
  2184. }
  2185. static long xvp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  2186. {
  2187. long retval;
  2188. pr_debug("%s: %x\n", __func__, cmd);
  2189. switch(cmd){
  2190. case XRP_IOCTL_ALLOC:
  2191. retval = xrp_ioctl_alloc(filp,
  2192. (struct xrp_ioctl_alloc __user *)arg);
  2193. break;
  2194. case XRP_IOCTL_FREE:
  2195. retval = xrp_ioctl_free(filp,
  2196. (struct xrp_ioctl_alloc __user *)arg);
  2197. break;
  2198. case XRP_IOCTL_QUEUE:
  2199. case XRP_IOCTL_QUEUE_NS:
  2200. retval = xrp_ioctl_submit_sync(filp,
  2201. (struct xrp_ioctl_queue __user *)arg);
  2202. break;
  2203. case XRP_IOCTL_REPORT_CREATE:
  2204. retval = xrp_ioctl_alloc_report(filp,
  2205. (struct xrp_ioctl_alloc __user *)arg);
  2206. break;
  2207. case XRP_IOCTL_REPORT_RELEASE:
  2208. retval = xrp_ioctl_release_report(filp,
  2209. (struct xrp_ioctl_alloc __user *)arg);
  2210. break;
  2211. case XRP_IOCTL_DMABUF_IMPORT:
  2212. retval = xrp_ioctl_dma_buf_import(filp,
  2213. (struct xrp_dma_buf __user *)arg);
  2214. break;
  2215. case XRP_IOCTL_DMABUF_RELEASE:
  2216. retval = xrp_ioctl_dma_buf_release(filp,
  2217. ( struct xrp_dma_buf __user *)arg);
  2218. break;
  2219. case XRP_IOCTL_DMABUF_SYNC:
  2220. retval = xrp_ioctl_dma_buf_sync(filp,
  2221. (struct xrp_dma_buf __user *)arg);
  2222. break;
  2223. case XRP_IOCTL_POP_NEW_REPORT:
  2224. retval = xrp_pop_report(filp,
  2225. (struct xrp_report_buffer __user *)arg);
  2226. break;
  2227. default:
  2228. retval = -EINVAL;
  2229. break;
  2230. }
  2231. return retval;
  2232. }
  2233. static void xvp_vm_open(struct vm_area_struct *vma)
  2234. {
  2235. // pr_debug("%s\n", __func__);
  2236. xrp_allocation_get(vma->vm_private_data);
  2237. }
  2238. static void xvp_vm_close(struct vm_area_struct *vma)
  2239. {
  2240. // pr_debug("%s\n", __func__);
  2241. xrp_allocation_put(vma->vm_private_data);
  2242. }
  2243. static const struct vm_operations_struct xvp_vm_ops = {
  2244. .open = xvp_vm_open,
  2245. .close = xvp_vm_close,
  2246. };
  2247. static int xvp_mmap(struct file *filp, struct vm_area_struct *vma)
  2248. {
  2249. int err;
  2250. struct xvp_file *xvp_file = filp->private_data;
  2251. unsigned long pfn = vma->vm_pgoff;// + PFN_DOWN(xvp_file->xvp->pmem);
  2252. struct xrp_allocation *xrp_allocation;
  2253. xrp_allocation = xrp_allocation_dequeue(filp->private_data,
  2254. pfn << PAGE_SHIFT,
  2255. vma->vm_end - vma->vm_start);
  2256. if (xrp_allocation) {
  2257. struct xvp *xvp = xvp_file->xvp;
  2258. pgprot_t prot = vma->vm_page_prot;
  2259. if (!xrp_cacheable(xvp, pfn,
  2260. PFN_DOWN(vma->vm_end - vma->vm_start))) {
  2261. prot = pgprot_writecombine(prot);
  2262. // prot = pgprot_noncached(prot);
  2263. vma->vm_page_prot = prot;
  2264. dev_dbg(xvp->dev,"%s cache atribution set \n", __func__);
  2265. }
  2266. err = remap_pfn_range(vma, vma->vm_start, pfn,
  2267. vma->vm_end - vma->vm_start,
  2268. prot);
  2269. vma->vm_private_data = xrp_allocation;
  2270. vma->vm_ops = &xvp_vm_ops;
  2271. } else {
  2272. pr_err("%s no valid xrp allocate for %lx:\n", __func__,pfn);
  2273. err = -EINVAL;
  2274. }
  2275. return err;
  2276. }
  2277. static int xvp_open(struct inode *inode, struct file *filp)
  2278. {
  2279. struct xvp *xvp = container_of(filp->private_data,
  2280. struct xvp, miscdev);
  2281. struct xvp_file *xvp_file;
  2282. int rc;
  2283. dev_dbg(xvp->dev,"%s\n", __func__);
  2284. rc = pm_runtime_get_sync(xvp->dev);
  2285. if (rc < 0)
  2286. {
  2287. dev_err(xvp->dev,"%s:pm_runtime_get_sync fail:%d\n", __func__,rc);
  2288. return rc;
  2289. }
  2290. xvp_file = devm_kzalloc(xvp->dev, sizeof(*xvp_file), GFP_KERNEL);
  2291. if (!xvp_file) {
  2292. dev_err(xvp->dev,"%s:malloc fail\n", __func__);
  2293. pm_runtime_put_sync(xvp->dev);
  2294. return -ENOMEM;
  2295. }
  2296. xvp_file->xvp = xvp;
  2297. spin_lock_init(&xvp_file->busy_list_lock);
  2298. filp->private_data = xvp_file;
  2299. xrp_add_known_file(filp);
  2300. return 0;
  2301. }
  2302. static int xvp_close(struct inode *inode, struct file *filp)
  2303. {
  2304. struct xvp_file *xvp_file = filp->private_data;
  2305. pr_debug("%s\n", __func__);
  2306. xrp_report_fasync_release(filp);
  2307. xrp_remove_known_file(filp);
  2308. pm_runtime_put_sync(xvp_file->xvp->dev);
  2309. devm_kfree(xvp_file->xvp->dev, xvp_file);
  2310. return 0;
  2311. }
  2312. static inline int xvp_enable_dsp(struct xvp *xvp)
  2313. {
  2314. if (loopback < LOOPBACK_NOMMIO &&
  2315. xvp->hw_ops->enable)
  2316. return xvp->hw_ops->enable(xvp->hw_arg);
  2317. else
  2318. return 0;
  2319. }
  2320. static inline void xvp_disable_dsp(struct xvp *xvp)
  2321. {
  2322. if (loopback < LOOPBACK_NOMMIO &&
  2323. xvp->hw_ops->disable)
  2324. xvp->hw_ops->disable(xvp->hw_arg);
  2325. }
  2326. static inline void xvp_remove_proc(struct xvp *xvp)
  2327. {
  2328. if( xvp->proc_dir)
  2329. {
  2330. if(xvp->panic_log)
  2331. {
  2332. xrp_remove_panic_log_proc(xvp->panic_log);
  2333. xvp->panic_log =NULL;
  2334. }
  2335. // remove_proc_entry(xvp->proc_dir,NULL);
  2336. proc_remove(xvp->proc_dir);
  2337. }
  2338. }
  2339. static inline void xrp_set_resetVec(struct xvp *xvp,u32 addr)
  2340. {
  2341. if (loopback < LOOPBACK_NOMMIO &&
  2342. xvp->hw_ops->set_reset_vector)
  2343. xvp->hw_ops->set_reset_vector(xvp->hw_arg,addr);
  2344. }
  2345. static inline void xrp_reset_dsp(struct xvp *xvp)
  2346. {
  2347. if (loopback < LOOPBACK_NOMMIO &&
  2348. xvp->hw_ops->reset)
  2349. xvp->hw_ops->reset(xvp->hw_arg);
  2350. }
  2351. static inline void xrp_halt_dsp(struct xvp *xvp)
  2352. {
  2353. if (loopback < LOOPBACK_NOMMIO &&
  2354. xvp->hw_ops->halt)
  2355. xvp->hw_ops->halt(xvp->hw_arg);
  2356. }
  2357. static inline void xrp_release_dsp(struct xvp *xvp)
  2358. {
  2359. if (loopback < LOOPBACK_NOMMIO &&
  2360. xvp->hw_ops->release)
  2361. xvp->hw_ops->release(xvp->hw_arg);
  2362. }
  2363. static int xrp_boot_firmware(struct xvp *xvp)
  2364. {
  2365. int ret;
  2366. u32 fm_entry_point=0;
  2367. struct xrp_dsp_sync_v1 __iomem *shared_sync = xvp->comm;
  2368. // dev_dbg(xvp->dev,"%s",__func__);
  2369. //#if 1 //LOAD_MODE_MANUAL load release dsp by xplorer
  2370. if(load_mode == LOAD_MODE_AUTO)
  2371. {
  2372. xrp_halt_dsp(xvp);
  2373. //xrp_reset_dsp(xvp);
  2374. if (xvp->firmware_name) {
  2375. if (loopback < LOOPBACK_NOFIRMWARE) {
  2376. ret = xrp_request_firmware(xvp,&fm_entry_point);
  2377. if (ret < 0)
  2378. return ret;
  2379. }
  2380. if (loopback < LOOPBACK_NOIO) {
  2381. xrp_comm_write32(&shared_sync->sync, XRP_DSP_SYNC_IDLE);
  2382. mb();
  2383. }
  2384. // fm_entry_point = xrp_get_firmware_entry_addr(xvp);
  2385. dev_dbg(xvp->dev,"%s,firmware entry point :%x\n",__func__,fm_entry_point);
  2386. if(fm_entry_point)
  2387. {
  2388. xrp_set_resetVec(xvp,fm_entry_point);
  2389. }
  2390. }
  2391. xrp_reset_dsp(xvp);
  2392. xrp_release_dsp(xvp);
  2393. }
  2394. //#endif
  2395. if (loopback < LOOPBACK_NOIO) {
  2396. ret = xrp_synchronize(xvp);
  2397. if (ret < 0) {
  2398. xrp_halt_dsp(xvp);
  2399. dev_err(xvp->dev,
  2400. "%s: couldn't synchronize with the DSP core\n",
  2401. __func__);
  2402. dev_err(xvp->dev,
  2403. "XRP device will not use the DSP until the driver is rebound to this device\n");
  2404. xvp->off = true;
  2405. return ret;
  2406. }
  2407. }
  2408. return 0;
  2409. }
  2410. static const struct file_operations xvp_fops = {
  2411. .owner = THIS_MODULE,
  2412. .llseek = no_llseek,
  2413. .unlocked_ioctl = xvp_ioctl,
  2414. #ifdef CONFIG_COMPAT
  2415. .compat_ioctl = xvp_ioctl,
  2416. #endif
  2417. .mmap = xvp_mmap,
  2418. .open = xvp_open,
  2419. .fasync = xrp_report_fasync,
  2420. .release = xvp_close,
  2421. };
  2422. int xrp_runtime_suspend(struct device *dev)
  2423. {
  2424. struct xvp *xvp = dev_get_drvdata(dev);
  2425. xrp_halt_dsp(xvp);
  2426. /*****WR to make sure DSP is in idle*****/
  2427. udelay(1000);
  2428. xrp_reset_dsp(xvp);
  2429. xvp_disable_dsp(xvp);
  2430. // release_firmware(xvp->firmware);
  2431. return 0;
  2432. }
  2433. EXPORT_SYMBOL(xrp_runtime_suspend);
  2434. int xrp_runtime_resume(struct device *dev)
  2435. {
  2436. struct xvp *xvp = dev_get_drvdata(dev);
  2437. unsigned i;
  2438. int ret = 0;
  2439. for (i = 0; i < xvp->n_queues; ++i)
  2440. mutex_lock(&xvp->queue[i].lock);
  2441. if (xvp->off)
  2442. goto out;
  2443. ret = xvp_enable_dsp(xvp);
  2444. if (ret < 0) {
  2445. dev_err(xvp->dev, "couldn't enable DSP\n");
  2446. goto out;
  2447. }
  2448. ret = xrp_boot_firmware(xvp);
  2449. if (ret < 0)
  2450. xvp_disable_dsp(xvp);
  2451. out:
  2452. for (i = 0; i < xvp->n_queues; ++i)
  2453. mutex_unlock(&xvp->queue[i].lock);
  2454. return ret;
  2455. }
  2456. EXPORT_SYMBOL(xrp_runtime_resume);
  2457. static int xrp_init_regs_v0(struct platform_device *pdev, struct xvp *xvp,int mem_idx)
  2458. {
  2459. struct resource res;
  2460. struct device_node *np;
  2461. int ret = 0;
  2462. np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
  2463. if (!np) {
  2464. dev_err(&pdev->dev, "No memory-region specified\n");
  2465. return -EINVAL;
  2466. }
  2467. ret = of_address_to_resource(np, 0, &res);
  2468. dev_dbg(xvp->dev,"%s:dsp runing addr 0x%llx,size:0x%x\n", __func__,
  2469. res.start,resource_size(&res));
  2470. ret = of_address_to_resource(np, 1, &res);
  2471. if (ret)
  2472. {
  2473. dev_dbg(xvp->dev,"%s:get comm region fail\n", __func__);
  2474. return -ENODEV;
  2475. }
  2476. xvp->comm_phys = res.start;
  2477. xvp->comm = devm_ioremap_resource(&pdev->dev, &res);
  2478. dev_dbg(xvp->dev,"%s:xvp->comm =0x%p, phy_addr base=0x%llx\n", __func__,
  2479. xvp->comm, xvp->comm_phys);
  2480. // mem = platform_get_resource(pdev, IORESOURCE_MEM, mem_idx);
  2481. ret = of_address_to_resource(np, 2, &res);
  2482. if(ret)
  2483. {
  2484. dev_dbg(xvp->dev,"%s:get paic region fail:%d\n", __func__,ret);
  2485. }else
  2486. {
  2487. xvp->panic_phy = res.start;
  2488. xvp->panic = devm_ioremap_resource(&pdev->dev, &res);
  2489. xvp->panic_size = resource_size(&res);
  2490. if(xvp->panic)
  2491. {
  2492. dev_dbg(xvp->dev,"%s:panic=0x%p, panic phy base=0x%llx,size:%d\n", __func__,
  2493. xvp->panic, xvp->panic_phy,xvp->panic_size);
  2494. }else
  2495. {
  2496. dev_warn(xvp->dev,"%s:get paic region fail\n", __func__);
  2497. }
  2498. }
  2499. ret = of_address_to_resource(np, 3, &res);
  2500. if (ret)
  2501. {
  2502. dev_dbg(xvp->dev,"%s:get memory pool region fail\n", __func__);
  2503. return -ENODEV;
  2504. }
  2505. xvp->pmem = res.start;
  2506. xvp->shared_size = resource_size(&res);
  2507. dev_dbg(xvp->dev,"%s,memory pool phy_addr base=0x%llx,size:0x%x\n", __func__,
  2508. xvp->pmem, xvp->shared_size);
  2509. return xrp_init_private_pool(&xvp->pool, xvp->pmem,
  2510. xvp->shared_size);
  2511. }
  2512. static int xrp_init_regs_v1(struct platform_device *pdev, struct xvp *xvp,int mem_idx)
  2513. {
  2514. struct resource *mem;
  2515. struct resource r;
  2516. mem = platform_get_resource(pdev, IORESOURCE_MEM, mem_idx);
  2517. if (!mem)
  2518. return -ENODEV;
  2519. if (resource_size(mem) < 2 * PAGE_SIZE) {
  2520. dev_err(xvp->dev,
  2521. "%s: shared memory size is too small\n",
  2522. __func__);
  2523. return -ENOMEM;
  2524. }
  2525. xvp->comm_phys = mem->start;
  2526. xvp->pmem = mem->start + PAGE_SIZE;
  2527. xvp->shared_size = resource_size(mem) - PAGE_SIZE;
  2528. r = *mem;
  2529. r.end = r.start + PAGE_SIZE;
  2530. xvp->comm = devm_ioremap_resource(&pdev->dev, &r);
  2531. return xrp_init_private_pool(&xvp->pool, xvp->pmem,
  2532. xvp->shared_size);
  2533. }
  2534. static bool xrp_translate_base_mimo_to_dsp(struct xvp *xvp)
  2535. {
  2536. if(!xvp->hw_ops->get_base_mimo || !xvp->hw_ops->get_hw_sync_data )
  2537. {
  2538. return true;
  2539. }
  2540. phys_addr_t mimo_addr = xvp->hw_ops->get_base_mimo(xvp->hw_arg);
  2541. u32 device_mimo_addr = xrp_translate_to_dsp(&xvp->address_map, mimo_addr);
  2542. if(device_mimo_addr==XRP_NO_TRANSLATION)
  2543. {
  2544. dev_err(xvp->dev,
  2545. "%s: 0x%x translate to dsp address fail\n",
  2546. __func__,mimo_addr);
  2547. return false;
  2548. }
  2549. xvp->hw_ops->update_device_base(xvp->hw_arg,device_mimo_addr);
  2550. dev_dbg(xvp->dev,
  2551. "%s: Base mimo translate to dsp address \n",__func__);
  2552. return true;
  2553. }
  2554. static int xrp_init_regs_cma(struct platform_device *pdev, struct xvp *xvp,int mem_idx)
  2555. {
  2556. dma_addr_t comm_phys;
  2557. if (of_reserved_mem_device_init(xvp->dev) < 0)
  2558. return -ENODEV;
  2559. xvp->comm = dma_alloc_attrs(xvp->dev, PAGE_SIZE, &comm_phys,
  2560. GFP_KERNEL, 0);
  2561. if (!xvp->comm)
  2562. return -ENOMEM;
  2563. xvp->comm_phys = dma_to_phys(xvp->dev, comm_phys);
  2564. return xrp_init_cma_pool(&xvp->pool, xvp->dev);
  2565. }
  2566. static int compare_queue_priority(const void *a, const void *b)
  2567. {
  2568. const void * const *ppa = a;
  2569. const void * const *ppb = b;
  2570. const struct xrp_comm *pa = *ppa, *pb = *ppb;
  2571. if (pa->priority == pb->priority)
  2572. return 0;
  2573. else
  2574. return pa->priority < pb->priority ? -1 : 1;
  2575. }
  2576. static long xrp_init_common(struct platform_device *pdev,
  2577. enum xrp_init_flags init_flags,
  2578. const struct xrp_hw_ops *hw_ops, void *hw_arg,
  2579. int mem_idx,
  2580. int (*xrp_init_regs)(struct platform_device *pdev,
  2581. struct xvp *xvp,int mem_idx))
  2582. {
  2583. long ret;
  2584. char nodename[sizeof("xvp") + 3 * sizeof(int)];
  2585. struct xvp *xvp;
  2586. int nodeid;
  2587. unsigned i;
  2588. u32 value;
  2589. char dir_name[32];
  2590. xvp = devm_kzalloc(&pdev->dev, sizeof(*xvp), GFP_KERNEL);
  2591. if (!xvp) {
  2592. ret = -ENOMEM;
  2593. goto err;
  2594. }
  2595. xvp->reporter = NULL;
  2596. xvp->dev = &pdev->dev;
  2597. xvp->hw_ops = hw_ops;
  2598. xvp->hw_arg = hw_arg;
  2599. if (init_flags & XRP_INIT_USE_HOST_IRQ)
  2600. xvp->host_irq_mode = true;
  2601. platform_set_drvdata(pdev, xvp);
  2602. ret = xrp_init_regs(pdev, xvp,mem_idx);
  2603. if (ret < 0)
  2604. goto err;
  2605. dev_dbg(xvp->dev,"%s: comm = %pap/%p\n", __func__, &xvp->comm_phys, xvp->comm);
  2606. dev_dbg(xvp->dev,"%s: xvp->pmem = %pap\n", __func__, &xvp->pmem);
  2607. // writel(0xdeadbeef,xvp->comm+0x4);
  2608. // value = readl(xvp->comm+0x4);
  2609. // pr_debug("offset=04, value is:0x%08x\n",value);
  2610. ret = xrp_init_address_map(xvp->dev, &xvp->address_map);
  2611. if (ret < 0)
  2612. goto err_free_pool;
  2613. if(false ==xrp_translate_base_mimo_to_dsp(xvp))
  2614. {
  2615. goto err_free_map;
  2616. }
  2617. ret = device_property_read_u32_array(xvp->dev, "queue-priority",
  2618. NULL, 0);
  2619. if (ret > 0) {
  2620. xvp->n_queues = ret;
  2621. xvp->queue_priority = devm_kmalloc(&pdev->dev,
  2622. ret * sizeof(u32),
  2623. GFP_KERNEL);
  2624. if (xvp->queue_priority == NULL)
  2625. goto err_free_pool;
  2626. ret = device_property_read_u32_array(xvp->dev,
  2627. "queue-priority",
  2628. xvp->queue_priority,
  2629. xvp->n_queues);
  2630. if (ret < 0)
  2631. goto err_free_pool;
  2632. dev_dbg(xvp->dev,
  2633. "multiqueue (%d) configuration, queue priorities:\n",
  2634. xvp->n_queues);
  2635. for (i = 0; i < xvp->n_queues; ++i)
  2636. dev_dbg(xvp->dev, " %d\n", xvp->queue_priority[i]);
  2637. } else {
  2638. xvp->n_queues = 1;
  2639. }
  2640. xvp->queue = devm_kmalloc(&pdev->dev,
  2641. xvp->n_queues * sizeof(*xvp->queue),
  2642. GFP_KERNEL);
  2643. xvp->queue_ordered = devm_kmalloc(&pdev->dev,
  2644. xvp->n_queues * sizeof(*xvp->queue_ordered),
  2645. GFP_KERNEL);
  2646. if (xvp->queue == NULL ||
  2647. xvp->queue_ordered == NULL)
  2648. goto err_free_pool;
  2649. for (i = 0; i < xvp->n_queues; ++i) {
  2650. mutex_init(&xvp->queue[i].lock);
  2651. xvp->queue[i].comm = xvp->comm + XRP_DSP_CMD_STRIDE * i;
  2652. init_completion(&xvp->queue[i].completion);
  2653. if (xvp->queue_priority)
  2654. xvp->queue[i].priority = xvp->queue_priority[i];
  2655. xvp->queue_ordered[i] = xvp->queue + i;
  2656. }
  2657. sort(xvp->queue_ordered, xvp->n_queues, sizeof(*xvp->queue_ordered),
  2658. compare_queue_priority, NULL);
  2659. if (xvp->n_queues > 1) {
  2660. dev_dbg(xvp->dev, "SW -> HW queue priority mapping:\n");
  2661. for (i = 0; i < xvp->n_queues; ++i) {
  2662. dev_dbg(xvp->dev, " %d -> %d\n",
  2663. i, xvp->queue_ordered[i]->priority);
  2664. }
  2665. }
  2666. ret = device_property_read_string(xvp->dev, "firmware-name",
  2667. &xvp->firmware_name);
  2668. if (ret == -EINVAL || ret == -ENODATA) {
  2669. dev_dbg(xvp->dev,
  2670. "no firmware-name property, not loading firmware\n");
  2671. } else if (ret < 0) {
  2672. dev_err(xvp->dev, "invalid firmware name (%ld)\n", ret);
  2673. goto err_free_map;
  2674. }
  2675. nodeid = ida_simple_get(&xvp_nodeid, 0, 0, GFP_KERNEL);
  2676. if (nodeid < 0) {
  2677. ret = nodeid;
  2678. goto err_free_map;
  2679. }
  2680. sprintf(dir_name,"dsp%d_proc",nodeid);
  2681. xvp->proc_dir = proc_mkdir(dir_name, NULL);
  2682. if (NULL != xvp->proc_dir)
  2683. {
  2684. xvp->panic_log = xrp_create_panic_log_proc(xvp->proc_dir,xvp->panic,xvp->panic_size);
  2685. }
  2686. else
  2687. {
  2688. dev_err(xvp->dev, "create %s fail\n", dir_name);
  2689. goto err_free_id;
  2690. }
  2691. pm_runtime_enable(xvp->dev);
  2692. if (!pm_runtime_enabled(xvp->dev)) {
  2693. ret = xrp_runtime_resume(xvp->dev);
  2694. if (ret)
  2695. goto err_pm_disable;
  2696. }else
  2697. {
  2698. ret = xrp_runtime_resume(xvp->dev);
  2699. if (ret)
  2700. goto err_proc_remove;
  2701. // xvp_enable_dsp(xvp);
  2702. xrp_runtime_suspend(xvp->dev);
  2703. }
  2704. xvp->nodeid = nodeid;
  2705. sprintf(nodename, "xvp%u", nodeid);
  2706. xvp->miscdev = (struct miscdevice){
  2707. .minor = MISC_DYNAMIC_MINOR,
  2708. .name = devm_kstrdup(&pdev->dev, nodename, GFP_KERNEL),
  2709. .nodename = devm_kstrdup(&pdev->dev, nodename, GFP_KERNEL),
  2710. .fops = &xvp_fops,
  2711. };
  2712. ret = misc_register(&xvp->miscdev);
  2713. if (ret < 0)
  2714. goto err_pm_disable;
  2715. // xrp_device_heartbeat_init(xvp);
  2716. INIT_LIST_HEAD(&xvp->dma_buf_list);
  2717. return PTR_ERR(xvp);
  2718. err_pm_disable:
  2719. pm_runtime_disable(xvp->dev);
  2720. err_proc_remove:
  2721. xvp_remove_proc(xvp);
  2722. err_free_id:
  2723. ida_simple_remove(&xvp_nodeid, nodeid);
  2724. err_free_map:
  2725. xrp_free_address_map(&xvp->address_map);
  2726. err_free_pool:
  2727. xrp_free_pool(xvp->pool);
  2728. if (xvp->comm_phys && !xvp->pmem) {
  2729. dma_free_attrs(xvp->dev, PAGE_SIZE, xvp->comm,
  2730. phys_to_dma(xvp->dev, xvp->comm_phys), 0);
  2731. }
  2732. err:
  2733. dev_err(&pdev->dev, "%s: ret = %ld\n", __func__, ret);
  2734. return ret;
  2735. }
  2736. typedef long xrp_init_function(struct platform_device *pdev,
  2737. enum xrp_init_flags flags,
  2738. const struct xrp_hw_ops *hw_ops, void *hw_arg,int mem_idx);
  2739. xrp_init_function xrp_init;
  2740. long xrp_init(struct platform_device *pdev, enum xrp_init_flags flags,
  2741. const struct xrp_hw_ops *hw_ops, void *hw_arg,int mem_idx)
  2742. {
  2743. return xrp_init_common(pdev, flags, hw_ops, hw_arg, mem_idx,xrp_init_regs_v0);
  2744. }
  2745. EXPORT_SYMBOL(xrp_init);
  2746. xrp_init_function xrp_init_v1;
  2747. long xrp_init_v1(struct platform_device *pdev, enum xrp_init_flags flags,
  2748. const struct xrp_hw_ops *hw_ops, void *hw_arg,int mem_idx)
  2749. {
  2750. return xrp_init_common(pdev, flags, hw_ops, hw_arg, mem_idx,xrp_init_regs_v1);
  2751. }
  2752. EXPORT_SYMBOL(xrp_init_v1);
  2753. xrp_init_function xrp_init_cma;
  2754. long xrp_init_cma(struct platform_device *pdev, enum xrp_init_flags flags,
  2755. const struct xrp_hw_ops *hw_ops, void *hw_arg,int mem_idx)
  2756. {
  2757. return xrp_init_common(pdev, flags, hw_ops, hw_arg, mem_idx,xrp_init_regs_cma);
  2758. }
  2759. EXPORT_SYMBOL(xrp_init_cma);
  2760. int xrp_deinit(struct platform_device *pdev)
  2761. {
  2762. struct xvp *xvp = platform_get_drvdata(pdev);
  2763. pm_runtime_disable(xvp->dev);
  2764. if (!pm_runtime_status_suspended(xvp->dev))
  2765. xrp_runtime_suspend(xvp->dev);
  2766. // xvp_clear_dsp(xvp);
  2767. xvp_remove_proc(xvp);
  2768. dev_dbg(xvp->dev,"%s:phase 1\n",__func__);
  2769. misc_deregister(&xvp->miscdev);
  2770. dev_dbg(xvp->dev,"%s:phase 2\n",__func__);
  2771. // release_firmware(xvp->firmware);
  2772. // dev_dbg(xvp->dev,"%s:phase 3\n",__func__);
  2773. xrp_free_pool(xvp->pool);
  2774. if (xvp->comm_phys && !xvp->pmem) {
  2775. dma_free_attrs(xvp->dev, PAGE_SIZE, xvp->comm,
  2776. phys_to_dma(xvp->dev, xvp->comm_phys), 0);
  2777. }
  2778. dev_dbg(xvp->dev,"%s:phase 3\n",__func__);
  2779. xrp_free_address_map(&xvp->address_map);
  2780. dev_dbg(xvp->dev,"%s:phase 4\n",__func__);
  2781. if(!ida_is_empty(&xvp_nodeid))
  2782. {
  2783. ida_simple_remove(&xvp_nodeid, xvp->nodeid);
  2784. dev_dbg(xvp->dev,"%s:phase 5\n",__func__);
  2785. }
  2786. return 0;
  2787. }
  2788. EXPORT_SYMBOL(xrp_deinit);
  2789. int xrp_deinit_hw(struct platform_device *pdev, void **hw_arg)
  2790. {
  2791. if (hw_arg) {
  2792. struct xvp *xvp = platform_get_drvdata(pdev);
  2793. *hw_arg = xvp->hw_arg;
  2794. }
  2795. return xrp_deinit(pdev);
  2796. }
  2797. EXPORT_SYMBOL(xrp_deinit_hw);
  2798. static void *get_hw_sync_data(void *hw_arg, size_t *sz)
  2799. {
  2800. void *p = kzalloc(64, GFP_KERNEL);
  2801. *sz = 64;
  2802. return p;
  2803. }
  2804. static const struct xrp_hw_ops hw_ops = {
  2805. .get_hw_sync_data = get_hw_sync_data,
  2806. };
  2807. #ifdef CONFIG_OF
  2808. static const struct of_device_id xrp_of_match[] = {
  2809. {
  2810. .compatible = "cdns,xrp",
  2811. .data = xrp_init,
  2812. }, {
  2813. .compatible = "cdns,xrp,v1",
  2814. .data = xrp_init_v1,
  2815. }, {
  2816. .compatible = "cdns,xrp,cma",
  2817. .data = xrp_init_cma,
  2818. }, {},
  2819. };
  2820. MODULE_DEVICE_TABLE(of, xrp_of_match);
  2821. #endif
  2822. #ifdef CONFIG_ACPI
  2823. static const struct acpi_device_id xrp_acpi_match[] = {
  2824. { "CXRP0001", 0 },
  2825. { },
  2826. };
  2827. MODULE_DEVICE_TABLE(acpi, xrp_acpi_match);
  2828. #endif
  2829. static int xrp_probe(struct platform_device *pdev)
  2830. {
  2831. long ret = -EINVAL;
  2832. #ifdef CONFIG_OF
  2833. const struct of_device_id *match;
  2834. match = of_match_device(xrp_of_match, &pdev->dev);
  2835. if (match) {
  2836. xrp_init_function *init = match->data;
  2837. ret = init(pdev, 0, &hw_ops, NULL,0);
  2838. return IS_ERR_VALUE(ret) ? ret : 0;
  2839. } else {
  2840. pr_debug("%s: no OF device match found\n", __func__);
  2841. }
  2842. #endif
  2843. #ifdef CONFIG_ACPI
  2844. ret = xrp_init_v1(pdev, 0, &hw_ops, NULL,2);
  2845. if (!IS_ERR_VALUE(ret)) {
  2846. struct xrp_address_map_entry *entry;
  2847. struct xvp *xvp = ERR_PTR(ret);
  2848. ret = 0;
  2849. /*
  2850. * On ACPI system DSP can currently only access
  2851. * its own shared memory.
  2852. */
  2853. entry = xrp_get_address_mapping(&xvp->address_map,
  2854. xvp->comm_phys);
  2855. if (entry) {
  2856. entry->src_addr = xvp->comm_phys;
  2857. entry->dst_addr = (u32)xvp->comm_phys;
  2858. entry->size = (u32)xvp->shared_size + PAGE_SIZE;
  2859. } else {
  2860. dev_err(xvp->dev,
  2861. "%s: couldn't find mapping for shared memory\n",
  2862. __func__);
  2863. ret = -EINVAL;
  2864. }
  2865. }
  2866. #endif
  2867. return ret;
  2868. }
  2869. static int xrp_remove(struct platform_device *pdev)
  2870. {
  2871. return xrp_deinit(pdev);
  2872. }
  2873. static const struct dev_pm_ops xrp_pm_ops = {
  2874. SET_RUNTIME_PM_OPS(xrp_runtime_suspend,
  2875. xrp_runtime_resume, NULL)
  2876. };
  2877. static struct platform_driver xrp_driver = {
  2878. .probe = xrp_probe,
  2879. .remove = xrp_remove,
  2880. .driver = {
  2881. .name = DRIVER_NAME,
  2882. .of_match_table = of_match_ptr(xrp_of_match),
  2883. .acpi_match_table = ACPI_PTR(xrp_acpi_match),
  2884. .pm = &xrp_pm_ops,
  2885. },
  2886. };
  2887. module_platform_driver(xrp_driver);
  2888. MODULE_AUTHOR("T-HEAD");
  2889. MODULE_DESCRIPTION("XRP: Linux device driver for Xtensa Remote Processing");
  2890. MODULE_LICENSE("Dual MIT/GPL");