xvp_main.c 85 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287
  1. /*
  2. * XRP: Linux device driver for Xtensa Remote Processing
  3. *
  4. * Copyright (c) 2015 - 2017 Cadence Design Systems, Inc.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included
  15. * in all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  21. * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  22. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  23. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. * Alternatively you can use and distribute this file under the terms of
  26. * the GNU General Public License version 2 or later.
  27. */
  28. #include <linux/version.h>
  29. #include <linux/atomic.h>
  30. #include <linux/acpi.h>
  31. #include <linux/completion.h>
  32. #include <linux/delay.h>
  33. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
  34. #include <linux/dma-mapping.h>
  35. #else
  36. #include <linux/dma-direct.h>
  37. #endif
  38. #include <linux/firmware.h>
  39. #include <linux/fs.h>
  40. #include <linux/hashtable.h>
  41. #include <linux/highmem.h>
  42. #include <linux/idr.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/io.h>
  45. #include <linux/kernel.h>
  46. #include <linux/module.h>
  47. #include <linux/of.h>
  48. #include <linux/of_address.h>
  49. #include <linux/of_device.h>
  50. #include <linux/of_reserved_mem.h>
  51. #include <linux/platform_device.h>
  52. #include <linux/pm_runtime.h>
  53. #include <linux/property.h>
  54. #include <linux/sched.h>
  55. #include <linux/slab.h>
  56. #include <linux/sort.h>
  57. #include <linux/timer.h>
  58. #include <linux/dma-mapping.h>
  59. #include <linux/dma-buf.h>
  60. #include <asm/mman.h>
  61. #include <linux/mman.h>
  62. #include <asm/uaccess.h>
  63. #include "xrp_cma_alloc.h"
  64. #include "xrp_firmware.h"
  65. #include "xrp_hw.h"
  66. #include "xrp_internal.h"
  67. #include "xrp_kernel_defs.h"
  68. #include "xrp_kernel_dsp_interface.h"
  69. #include "xrp_private_alloc.h"
  70. #include "xrp_debug.h"
  71. #define DRIVER_NAME "xrp"
  72. #define XRP_DEFAULT_TIMEOUT 60
  73. #ifndef __io_virt
  74. #define __io_virt(a) ((void __force *)(a))
  75. #endif
  76. struct xrp_alien_mapping {
  77. unsigned long vaddr;
  78. unsigned long size;
  79. phys_addr_t paddr;
  80. void *allocation;
  81. enum {
  82. ALIEN_GUP,
  83. ALIEN_PFN_MAP,
  84. ALIEN_COPY,
  85. } type;
  86. };
  87. struct xrp_mapping {
  88. enum {
  89. XRP_MAPPING_NONE,
  90. XRP_MAPPING_NATIVE,
  91. XRP_MAPPING_ALIEN,
  92. XRP_MAPPING_KERNEL = 0x4,
  93. } type;
  94. union {
  95. struct {
  96. struct xrp_allocation *xrp_allocation;
  97. unsigned long vaddr;
  98. } native;
  99. struct xrp_alien_mapping alien_mapping;
  100. };
  101. };
  102. struct xvp_file {
  103. struct xvp *xvp;
  104. spinlock_t busy_list_lock;
  105. struct xrp_allocation *busy_list;
  106. };
  107. struct xrp_known_file {
  108. void *filp;
  109. struct hlist_node node;
  110. };
  111. struct xrp_dma_buf_item{
  112. struct list_head link;
  113. struct dma_buf *dmabuf;
  114. struct sg_table *sgt;
  115. struct dma_buf_attachment * attachment;
  116. int ref;
  117. };
  118. static int firmware_command_timeout = XRP_DEFAULT_TIMEOUT;
  119. module_param(firmware_command_timeout, int, 0644);
  120. MODULE_PARM_DESC(firmware_command_timeout, "Firmware command timeout in seconds.");
  121. static int firmware_reboot = 1;
  122. module_param(firmware_reboot, int, 0644);
  123. MODULE_PARM_DESC(firmware_reboot, "Reboot firmware on command timeout.");
  124. enum {
  125. LOOPBACK_NORMAL, /* normal work mode */
  126. LOOPBACK_NOIO, /* don't communicate with FW, but still load it and control DSP */
  127. LOOPBACK_NOMMIO, /* don't comminicate with FW or use DSP MMIO, but still load the FW */
  128. LOOPBACK_NOFIRMWARE, /* communicate with FW or use DSP MMIO, don't load the FW */
  129. LOOPBACK_NOFIRMWARE_NOMMIO, /* don't communicate with FW or use DSP MMIO, don't load the FW */
  130. };
  131. static int loopback = 0;
  132. module_param(loopback, int, 0644);
  133. MODULE_PARM_DESC(loopback, "Don't use actual DSP, perform everything locally.");
  134. static int load_mode = 0;
  135. module_param(load_mode, int, 0644);
  136. MODULE_PARM_DESC(load_mode, "firmware load mode. 0: load by driver. 1:load by xplorer to debug.");
  137. enum {
  138. LOAD_MODE_AUTO, /* load firmware auto by drvier */
  139. LOAD_MODE_MANUAL, /* load firmware manually for debug*/
  140. };
  141. static int heartbeat_period = 0;
  142. module_param(heartbeat_period, int, 0644);
  143. MODULE_PARM_DESC(heartbeat_period, "Firmware command timeout in seconds.");
  144. static int dsp_fw_log_mode = 1;
  145. module_param(dsp_fw_log_mode, int, 0644);
  146. MODULE_PARM_DESC(dsp_fw_log_mode, "Firmware LOG MODE.0:disable,1:ERROR(DEFAULT),2:WRNING,3:INFO,4:DEUBG,5:TRACE");
  147. static DEFINE_HASHTABLE(xrp_known_files, 10);
  148. static DEFINE_SPINLOCK(xrp_known_files_lock);
  149. static DEFINE_SPINLOCK(xrp_dma_buf_lock);
  150. static DEFINE_IDA(xvp_nodeid);
  151. static int xrp_boot_firmware(struct xvp *xvp);
  152. static long xrp_copy_user_from_phys(struct xvp *xvp,
  153. unsigned long vaddr, unsigned long size,
  154. phys_addr_t paddr, unsigned long flags);
  155. static bool xrp_cacheable(struct xvp *xvp, unsigned long pfn,
  156. unsigned long n_pages)
  157. {
  158. if (xvp->hw_ops->cacheable) {
  159. return xvp->hw_ops->cacheable(xvp->hw_arg, pfn, n_pages);
  160. } else {
  161. unsigned long i;
  162. for (i = 0; i < n_pages; ++i)
  163. if (!pfn_valid(pfn + i))
  164. return false;
  165. return true;
  166. }
  167. }
  168. static int xrp_dma_direction(unsigned flags)
  169. {
  170. static const enum dma_data_direction xrp_dma_direction[] = {
  171. [0] = DMA_NONE,
  172. [XRP_FLAG_READ] = DMA_TO_DEVICE,
  173. [XRP_FLAG_WRITE] = DMA_FROM_DEVICE,
  174. [XRP_FLAG_READ_WRITE] = DMA_BIDIRECTIONAL,
  175. };
  176. return xrp_dma_direction[flags & XRP_FLAG_READ_WRITE];
  177. }
  178. static void xrp_default_dma_sync_for_device(struct xvp *xvp,
  179. phys_addr_t phys,
  180. unsigned long size,
  181. unsigned long flags)
  182. {
  183. dma_sync_single_for_device(xvp->dev, phys_to_dma(xvp->dev, phys), size,
  184. xrp_dma_direction(flags));
  185. }
  186. static void xrp_dma_sync_for_device(struct xvp *xvp,
  187. unsigned long virt,
  188. phys_addr_t phys,
  189. unsigned long size,
  190. unsigned long flags)
  191. {
  192. if (xvp->hw_ops->dma_sync_for_device)
  193. xvp->hw_ops->dma_sync_for_device(xvp->hw_arg,
  194. (void *)virt, phys, size,
  195. flags);
  196. else
  197. xrp_default_dma_sync_for_device(xvp, phys, size, flags);
  198. }
  199. static void xrp_default_dma_sync_for_cpu(struct xvp *xvp,
  200. phys_addr_t phys,
  201. unsigned long size,
  202. unsigned long flags)
  203. {
  204. dma_sync_single_for_cpu(xvp->dev, phys_to_dma(xvp->dev, phys), size,
  205. xrp_dma_direction(flags));
  206. }
  207. static void xrp_dma_sync_for_cpu(struct xvp *xvp,
  208. unsigned long virt,
  209. phys_addr_t phys,
  210. unsigned long size,
  211. unsigned long flags)
  212. {
  213. if (xvp->hw_ops->dma_sync_for_cpu)
  214. xvp->hw_ops->dma_sync_for_cpu(xvp->hw_arg,
  215. (void *)virt, phys, size,
  216. flags);
  217. else
  218. xrp_default_dma_sync_for_cpu(xvp, phys, size, flags);
  219. }
  220. static inline void xrp_comm_write32(volatile void __iomem *addr, u32 v)
  221. {
  222. //__raw_writel(v, addr);
  223. writel(v, addr);
  224. }
  225. static inline u32 xrp_comm_read32(volatile void __iomem *addr)
  226. {
  227. //return __raw_readl(addr);
  228. return readl(addr);
  229. }
  230. static inline void __iomem *xrp_comm_put_tlv(void __iomem **addr,
  231. uint32_t type,
  232. uint32_t length)
  233. {
  234. struct xrp_dsp_tlv __iomem *tlv = *addr;
  235. xrp_comm_write32(&tlv->type, type);
  236. xrp_comm_write32(&tlv->length, length);
  237. *addr = tlv->value + ((length + 3) / 4);
  238. return tlv->value;
  239. }
  240. static inline void __iomem *xrp_comm_get_tlv(void __iomem **addr,
  241. uint32_t *type,
  242. uint32_t *length)
  243. {
  244. struct xrp_dsp_tlv __iomem *tlv = *addr;
  245. *type = xrp_comm_read32(&tlv->type);
  246. *length = xrp_comm_read32(&tlv->length);
  247. *addr = tlv->value + ((*length + 3) / 4);
  248. return tlv->value;
  249. }
  250. static inline void xrp_comm_write(volatile void __iomem *addr, const void *p,
  251. size_t sz)
  252. {
  253. size_t sz32 = sz & ~3;
  254. u32 v;
  255. while (sz32) {
  256. memcpy(&v, p, sizeof(v));
  257. __raw_writel(v, addr);
  258. p += 4;
  259. addr += 4;
  260. sz32 -= 4;
  261. }
  262. sz &= 3;
  263. if (sz) {
  264. v = 0;
  265. memcpy(&v, p, sz);
  266. __raw_writel(v, addr);
  267. }
  268. }
  269. static inline void xrp_comm_read(volatile void __iomem *addr, void *p,
  270. size_t sz)
  271. {
  272. size_t sz32 = sz & ~3;
  273. u32 v;
  274. while (sz32) {
  275. v = __raw_readl(addr);
  276. memcpy(p, &v, sizeof(v));
  277. p += 4;
  278. addr += 4;
  279. sz32 -= 4;
  280. }
  281. sz &= 3;
  282. if (sz) {
  283. v = __raw_readl(addr);
  284. memcpy(p, &v, sz);
  285. }
  286. }
  287. static inline void xrp_send_device_irq(struct xvp *xvp)
  288. {
  289. if (xvp->hw_ops->send_irq)
  290. xvp->hw_ops->send_irq(xvp->hw_arg);
  291. }
  292. static inline bool xrp_panic_check(struct xvp *xvp)
  293. {
  294. if (xvp->hw_ops->panic_check)
  295. return xvp->hw_ops->panic_check(xvp->hw_arg);
  296. else
  297. return panic_check(xvp->panic_log);
  298. }
  299. static void xrp_add_known_file(struct file *filp)
  300. {
  301. struct xrp_known_file *p = kmalloc(sizeof(*p), GFP_KERNEL);
  302. if (!p)
  303. return;
  304. p->filp = filp;
  305. spin_lock(&xrp_known_files_lock);
  306. hash_add(xrp_known_files, &p->node, (unsigned long)filp);
  307. spin_unlock(&xrp_known_files_lock);
  308. }
  309. static void xrp_remove_known_file(struct file *filp)
  310. {
  311. struct xrp_known_file *p;
  312. struct xrp_known_file *pf = NULL;
  313. spin_lock(&xrp_known_files_lock);
  314. hash_for_each_possible(xrp_known_files, p, node, (unsigned long)filp) {
  315. if (p->filp == filp) {
  316. hash_del(&p->node);
  317. pf = p;
  318. break;
  319. }
  320. }
  321. spin_unlock(&xrp_known_files_lock);
  322. if (pf)
  323. kfree(pf);
  324. }
  325. static bool xrp_is_known_file(struct file *filp)
  326. {
  327. bool ret = false;
  328. struct xrp_known_file *p;
  329. spin_lock(&xrp_known_files_lock);
  330. hash_for_each_possible(xrp_known_files, p, node, (unsigned long)filp) {
  331. if (p->filp == filp) {
  332. ret = true;
  333. break;
  334. }
  335. }
  336. spin_unlock(&xrp_known_files_lock);
  337. return ret;
  338. }
  339. static void xrp_sync_v2(struct xvp *xvp,
  340. void *hw_sync_data, size_t sz)
  341. {
  342. struct xrp_dsp_sync_v2 __iomem *shared_sync = xvp->comm;
  343. void __iomem *addr = shared_sync->hw_sync_data;
  344. xrp_comm_write(xrp_comm_put_tlv(&addr,
  345. XRP_DSP_SYNC_TYPE_HW_SPEC_DATA, sz),
  346. hw_sync_data, sz);
  347. if (xvp->n_queues > 1) {
  348. struct xrp_dsp_sync_v2 __iomem *queue_sync;
  349. unsigned i;
  350. xrp_comm_write(xrp_comm_put_tlv(&addr,
  351. XRP_DSP_SYNC_TYPE_HW_QUEUES,
  352. xvp->n_queues * sizeof(u32)),
  353. xvp->queue_priority,
  354. xvp->n_queues * sizeof(u32));
  355. for (i = 1; i < xvp->n_queues; ++i) {
  356. queue_sync = xvp->queue[i].comm;
  357. xrp_comm_write32(&queue_sync->sync,
  358. XRP_DSP_SYNC_IDLE);
  359. }
  360. }
  361. struct xrp_dsp_debug_info debug_info ={
  362. .panic_addr = xvp->panic_phy,
  363. .log_level = dsp_fw_log_mode,
  364. };
  365. xrp_comm_write(xrp_comm_put_tlv(&addr,
  366. XRP_DSP_SYNC_TYPE_HW_DEBUG_INFO, sizeof(struct xrp_dsp_debug_info)),
  367. &debug_info, sizeof(struct xrp_dsp_debug_info));
  368. xrp_comm_put_tlv(&addr, XRP_DSP_SYNC_TYPE_LAST, 0);
  369. }
  370. static int xrp_sync_complete_v2(struct xvp *xvp, size_t sz)
  371. {
  372. struct xrp_dsp_sync_v2 __iomem *shared_sync = xvp->comm;
  373. void __iomem *addr = shared_sync->hw_sync_data;
  374. u32 type, len;
  375. xrp_comm_get_tlv(&addr, &type, &len);
  376. if (len != sz) {
  377. dev_err(xvp->dev,
  378. "HW spec data size modified by the DSP\n");
  379. return -EINVAL;
  380. }
  381. if (!(type & XRP_DSP_SYNC_TYPE_ACCEPT))
  382. dev_info(xvp->dev,
  383. "HW spec data not recognized by the DSP\n");
  384. if (xvp->n_queues > 1) {
  385. void __iomem *p = xrp_comm_get_tlv(&addr, &type, &len);
  386. if (len != xvp->n_queues * sizeof(u32)) {
  387. dev_err(xvp->dev,
  388. "Queue priority size modified by the DSP\n");
  389. return -EINVAL;
  390. }
  391. if (type & XRP_DSP_SYNC_TYPE_ACCEPT) {
  392. xrp_comm_read(p, xvp->queue_priority,
  393. xvp->n_queues * sizeof(u32));
  394. } else {
  395. dev_info(xvp->dev,
  396. "Queue priority data not recognized by the DSP\n");
  397. xvp->n_queues = 1;
  398. }
  399. }
  400. return 0;
  401. }
  402. static int xrp_synchronize(struct xvp *xvp)
  403. {
  404. size_t sz;
  405. void *hw_sync_data;
  406. unsigned long deadline = jiffies + firmware_command_timeout * HZ;
  407. struct xrp_dsp_sync_v1 __iomem *shared_sync = xvp->comm;
  408. int ret;
  409. u32 v, v1;
  410. hw_sync_data = xvp->hw_ops->get_hw_sync_data(xvp->hw_arg, &sz);
  411. if (!hw_sync_data) {
  412. ret = -ENOMEM;
  413. goto err;
  414. }
  415. ret = -ENODEV;
  416. dev_dbg(xvp->dev,"%s:comm sync:%p\n",__func__,&shared_sync->sync);
  417. xrp_comm_write32(&shared_sync->sync, XRP_DSP_SYNC_START);
  418. mb();
  419. do {
  420. v = xrp_comm_read32(&shared_sync->sync);
  421. if (v != XRP_DSP_SYNC_START)
  422. break;
  423. if (xrp_panic_check(xvp))
  424. goto err;
  425. schedule();
  426. } while (time_before(jiffies, deadline));
  427. dev_dbg(xvp->dev,"%s:comm sync data :%x\n",__func__,v);
  428. switch (v) {
  429. case XRP_DSP_SYNC_DSP_READY_V1:
  430. if (xvp->n_queues > 1) {
  431. dev_info(xvp->dev,
  432. "Queue priority data not recognized by the DSP\n");
  433. xvp->n_queues = 1;
  434. }
  435. xrp_comm_write(&shared_sync->hw_sync_data, hw_sync_data, sz);
  436. break;
  437. case XRP_DSP_SYNC_DSP_READY_V2:
  438. xrp_sync_v2(xvp, hw_sync_data, sz);
  439. break;
  440. case XRP_DSP_SYNC_START:
  441. dev_err(xvp->dev, "DSP is not ready for synchronization\n");
  442. goto err;
  443. default:
  444. dev_err(xvp->dev,
  445. "DSP response to XRP_DSP_SYNC_START is not recognized\n");
  446. goto err;
  447. }
  448. mb();
  449. xrp_comm_write32(&shared_sync->sync, XRP_DSP_SYNC_HOST_TO_DSP);
  450. do {
  451. mb();
  452. v1 = xrp_comm_read32(&shared_sync->sync);
  453. if (v1 == XRP_DSP_SYNC_DSP_TO_HOST)
  454. break;
  455. if (xrp_panic_check(xvp))
  456. goto err;
  457. schedule();
  458. } while (time_before(jiffies, deadline));
  459. if (v1 != XRP_DSP_SYNC_DSP_TO_HOST) {
  460. dev_err(xvp->dev,
  461. "DSP haven't confirmed initialization data reception\n");
  462. goto err;
  463. }
  464. if (v == XRP_DSP_SYNC_DSP_READY_V2) {
  465. ret = xrp_sync_complete_v2(xvp, sz);
  466. if (ret < 0)
  467. goto err;
  468. }
  469. xrp_send_device_irq(xvp);
  470. // if (xvp->host_irq_mode) {
  471. // int res = wait_for_completion_timeout(&xvp->queue[0].completion,
  472. // firmware_command_timeout * HZ);
  473. // ret = -ENODEV;
  474. // if (xrp_panic_check(xvp))
  475. // goto err;
  476. // if (res == 0) {
  477. // dev_err(xvp->dev,
  478. // "host IRQ mode is requested, but DSP couldn't deliver IRQ during synchronization\n");
  479. // goto err;
  480. // }
  481. // }
  482. ret = 0;
  483. err:
  484. kfree(hw_sync_data);
  485. xrp_comm_write32(&shared_sync->sync, XRP_DSP_SYNC_IDLE);
  486. return ret;
  487. }
  488. static bool xrp_cmd_complete(struct xrp_comm *xvp)
  489. {
  490. struct xrp_dsp_cmd __iomem *cmd = xvp->comm;
  491. u32 flags = xrp_comm_read32(&cmd->flags);
  492. pr_debug(" xrp_cmd_complete %x\n", flags);
  493. rmb();
  494. return (flags & (XRP_DSP_CMD_FLAG_REQUEST_VALID |
  495. XRP_DSP_CMD_FLAG_RESPONSE_VALID)) ==
  496. (XRP_DSP_CMD_FLAG_REQUEST_VALID |
  497. XRP_DSP_CMD_FLAG_RESPONSE_VALID);
  498. }
  499. static inline int xrp_report_comlete(struct xvp *xvp)
  500. {
  501. struct xrp_dsp_cmd __iomem *cmd = xvp->comm;
  502. if(!xvp->reporter)
  503. return -1;
  504. u32 flags = xrp_comm_read32(&cmd->report_id);
  505. if(flags& XRP_DSP_REPORT_TO_HOST_FLAG )
  506. {
  507. // dev_err(xvp->dev, "%s,report_flag %x\n", __func__,flags);
  508. flags &= (~XRP_DSP_REPORT_TO_HOST_FLAG);
  509. xrp_comm_write32(&cmd->report_id,flags);
  510. tasklet_schedule(&xvp->reporter->report_task);
  511. return 0;
  512. }
  513. return -1;
  514. }
  515. static inline int xrp_device_cmd_comlete(struct xvp *xvp)
  516. {
  517. struct xrp_dsp_cmd __iomem *cmd = xvp->comm;
  518. u32 flags = xrp_comm_read32(&cmd->cmd_flag);
  519. if(flags& XRP_DSP_REPORT_TO_HOST_FLAG )
  520. {
  521. xrp_comm_write32(&cmd->cmd_flag,0);
  522. return 0;
  523. }
  524. return -1;
  525. }
  526. irqreturn_t xrp_irq_handler(int irq, struct xvp *xvp)
  527. {
  528. unsigned i, n = 0;
  529. // dev_dbg(xvp->dev, "%s\n", __func__);
  530. if (!xvp->comm)
  531. return IRQ_NONE;
  532. if(!xrp_report_comlete(xvp))
  533. {
  534. dev_dbg(xvp->dev, "completing report\n");
  535. // return IRQ_HANDLED;
  536. }
  537. if(xrp_device_cmd_comlete(xvp))
  538. {
  539. dev_dbg(xvp->dev, "no cmd msg report\n");
  540. return IRQ_HANDLED;
  541. }
  542. for (i = 0; i < xvp->n_queues; ++i) {
  543. if (xrp_cmd_complete(xvp->queue + i)) {
  544. dev_dbg(xvp->dev, "completing queue %d\n", i);
  545. complete(&xvp->queue[i].completion);
  546. ++n;
  547. }
  548. }
  549. return n ? IRQ_HANDLED : IRQ_NONE;
  550. }
  551. EXPORT_SYMBOL(xrp_irq_handler);
  552. static inline void xvp_file_lock(struct xvp_file *xvp_file)
  553. {
  554. spin_lock(&xvp_file->busy_list_lock);
  555. }
  556. static inline void xvp_file_unlock(struct xvp_file *xvp_file)
  557. {
  558. spin_unlock(&xvp_file->busy_list_lock);
  559. }
  560. static void xrp_allocation_queue(struct xvp_file *xvp_file,
  561. struct xrp_allocation *xrp_allocation)
  562. {
  563. xvp_file_lock(xvp_file);
  564. xrp_allocation->next = xvp_file->busy_list;
  565. xvp_file->busy_list = xrp_allocation;
  566. xvp_file_unlock(xvp_file);
  567. }
  568. static struct xrp_allocation *xrp_allocation_dequeue(struct xvp_file *xvp_file,
  569. phys_addr_t paddr, u32 size)
  570. {
  571. struct xrp_allocation **pcur;
  572. struct xrp_allocation *cur;
  573. xvp_file_lock(xvp_file);
  574. for (pcur = &xvp_file->busy_list; (cur = *pcur); pcur = &((*pcur)->next)) {
  575. pr_debug("%s: %pap / %pap x %d\n", __func__, &paddr, &cur->start, cur->size);
  576. if (paddr >= cur->start && paddr + size - cur->start <= cur->size) {
  577. *pcur = cur->next;
  578. break;
  579. }
  580. }
  581. xvp_file_unlock(xvp_file);
  582. return cur;
  583. }
  584. static long xrp_ioctl_alloc(struct file *filp,
  585. struct xrp_ioctl_alloc __user *p)
  586. {
  587. struct xvp_file *xvp_file = filp->private_data;
  588. struct xrp_allocation *xrp_allocation;
  589. unsigned long vaddr;
  590. struct xrp_ioctl_alloc xrp_ioctl_alloc;
  591. long err;
  592. // pr_debug("%s: %p\n", __func__, p);
  593. if (copy_from_user(&xrp_ioctl_alloc, p, sizeof(*p)))
  594. return -EFAULT;
  595. // pr_debug("%s: size = %d, align = %x\n", __func__,
  596. // xrp_ioctl_alloc.size, xrp_ioctl_alloc.align);
  597. err = xrp_allocate(xvp_file->xvp->pool,
  598. xrp_ioctl_alloc.size,
  599. xrp_ioctl_alloc.align,
  600. &xrp_allocation);
  601. if (err)
  602. return err;
  603. xrp_allocation_queue(xvp_file, xrp_allocation);
  604. vaddr = vm_mmap(filp, 0, xrp_allocation->size,
  605. PROT_READ | PROT_WRITE, MAP_SHARED,
  606. xrp_allocation_offset(xrp_allocation));
  607. xrp_ioctl_alloc.addr = vaddr;
  608. xrp_ioctl_alloc.paddr = xrp_allocation->start;
  609. pr_debug("%s: vaddr = %llx, paddr = %llx\n", __func__,
  610. xrp_ioctl_alloc.addr, xrp_ioctl_alloc.paddr);
  611. if (copy_to_user(p, &xrp_ioctl_alloc, sizeof(*p))) {
  612. vm_munmap(vaddr, xrp_ioctl_alloc.size);
  613. return -EFAULT;
  614. }
  615. return 0;
  616. }
  617. static void xrp_report_tasklet(unsigned long arg)
  618. {
  619. struct xvp *xvp=(struct xvp *)arg;
  620. struct xrp_dsp_cmd __iomem *cmd=xvp->comm;
  621. struct xrp_report_buffer *p_buf = xvp->reporter->buffer_virt;
  622. // pr_debug("%s,addr:%lx\n",__func__,arg);
  623. if(!xvp->reporter->fasync)
  624. {
  625. pr_debug("%s:fasync is not register in user space\n",__func__);
  626. return;
  627. }
  628. // pr_debug("%s,%d\n",__func__,xvp->reporter->fasync->magic);
  629. // if(!xvp->reporter->user_buffer_virt &&
  630. // !xvp->reporter->buffer_size)
  631. // {
  632. // pr_debug("%s:user_buffer_virt and buffer size is invalid\n",__func__);
  633. // return;
  634. // }
  635. // size_t s= xrp_comm_read32(&cmd->report_paylad_size);
  636. // unsigned int id = xrp_comm_read32(&cmd->report_id);
  637. // if(copy_to_user(&p_buf_user->report_id,&id,sizeof(p_buf_user->report_id)));
  638. // {
  639. // pr_debug("%s:copy report id to user fail\n",__func__);
  640. // return;
  641. // }
  642. // if(xvp->reporter->buffer_size>XRP_DSP_CMD_INLINE_DATA_SIZE)
  643. // {
  644. // if(xrp_copy_user_from_phys(xvp,&p_buf_user->data[0],s,xvp->reporter->buffer_phys,XRP_FLAG_READ_WRITE))
  645. // return;
  646. // }
  647. // else
  648. // {
  649. // char temp_buf[XRP_DSP_CMD_INLINE_DATA_SIZE];
  650. // xrp_comm_read(&cmd->report_data,temp_buf,s);
  651. // if(copy_to_user(&p_buf_user->data[0],temp_buf,s))
  652. // {
  653. // pr_debug("%s:copy report data to user fail\n",__func__);
  654. // return;
  655. // }
  656. // }
  657. /*****clear report*********************/
  658. p_buf->report_id = xrp_comm_read32(&cmd->report_id)&0xffff;
  659. //xrp_dma_sync_for_cpu(xvp,xvp->reporter->buffer_virt,xvp->reporter->buffer_phys,xvp->reporter->buffer_size,XRP_FLAG_WRITE);
  660. kill_fasync(&(xvp->reporter->fasync), SIGIO, POLL_IN);
  661. xrp_comm_write32(&cmd->report_id,0x0);
  662. // pr_debug("%s,report_id:%d,report_data:%x\n",__func__,p_buf->report_id,p_buf->data[0]);
  663. }
  664. static long xrp_map_phy_to_virt(phys_addr_t paddr,unsigned long size,__u64 *vaddr)
  665. {
  666. // if (pfn_valid(__phys_to_pfn(paddr))) {
  667. // struct page *page = pfn_to_page(__phys_to_pfn(paddr));
  668. // size_t page_offs = paddr & ~PAGE_MASK;
  669. // size_t offs;
  670. // // for (offs = 0; offs < size; ++page) {
  671. // // void *p = kmap(page);
  672. // // size_t sz = PAGE_SIZE - page_offs;
  673. // // size_t copy_sz = sz;
  674. // // unsigned long rc;
  675. // // }
  676. // if(page_offs+size>PAGE_SIZE)
  677. // {
  678. // pr_debug("%s,phys addr map to virt exceed one page",__func__);
  679. // return -EINVAL;
  680. // }
  681. // void *p = kmap(page);
  682. // if(!p)
  683. // {
  684. // pr_debug("%s couldn't kmap %pap x 0x%08x\n",__func__,&paddr, (u32)size);
  685. // return -EINVAL;
  686. // }
  687. // *vaddr =p + page_offs;
  688. // pr_debug("%s map to mem",__func__);
  689. // return 0;
  690. // }
  691. // else
  692. {
  693. void __iomem *p = ioremap(paddr, size);
  694. unsigned long rc;
  695. if (!p) {
  696. pr_debug("%s,couldn't ioremap %pap x 0x%08x\n",__func__,&paddr, (u32)size);
  697. return -EINVAL;
  698. }
  699. *vaddr = p;
  700. pr_debug("%s map to io mem",__func__);
  701. return 0;
  702. }
  703. // iounmap(p);
  704. // if (rc)
  705. // return -EFAULT;
  706. // }
  707. }
  708. static long xrp_unmap_phy_to_virt(unsigned long *vaddr,phys_addr_t paddr,unsigned long size)
  709. {
  710. if (pfn_valid(__phys_to_pfn(paddr))) {
  711. struct page *page = pfn_to_page(__phys_to_pfn(paddr));
  712. kunmap(page);
  713. }
  714. else{
  715. iounmap(*vaddr);
  716. }
  717. *vaddr=NULL;
  718. return 0;
  719. }
  720. static long xrp_ioctl_alloc_report(struct file *filp,
  721. struct xrp_ioctl_alloc __user *p)
  722. {
  723. struct xvp_file *xvp_file = filp->private_data;
  724. struct xrp_allocation *xrp_allocation;
  725. struct xvp *xvp = xvp_file->xvp;
  726. struct xrp_ioctl_alloc xrp_ioctl_alloc;
  727. struct xrp_dsp_cmd __iomem *cmd=xvp->comm;
  728. unsigned long vaddr;
  729. long err;
  730. pr_debug("%s: %p\n", __func__, p);
  731. if (copy_from_user(&xrp_ioctl_alloc, p, sizeof(*p)))
  732. return -EFAULT;
  733. pr_debug("%s: virtAddr = %lx.size = %d, align = %x\n", __func__,
  734. xrp_ioctl_alloc.addr,xrp_ioctl_alloc.size,
  735. xrp_ioctl_alloc.align);
  736. // if(NULL == xrp_ioctl_alloc.addr)
  737. // {
  738. // return -EFAULT;
  739. // }
  740. xvp->reporter= kmalloc(sizeof(*(xvp->reporter)), GFP_KERNEL);
  741. if (!xvp->reporter)
  742. return -EFAULT;
  743. xvp->reporter->fasync=NULL;
  744. err = xrp_allocate(xvp_file->xvp->pool,
  745. xrp_ioctl_alloc.size,
  746. xrp_ioctl_alloc.align,
  747. &xrp_allocation);
  748. if (err)
  749. return err;
  750. xrp_allocation_queue(xvp_file, xrp_allocation);
  751. vaddr = vm_mmap(filp, 0, xrp_allocation->size,
  752. PROT_READ | PROT_WRITE, MAP_SHARED,
  753. xrp_allocation_offset(xrp_allocation));
  754. xrp_ioctl_alloc.addr=vaddr;
  755. xvp->reporter->buffer_phys = xrp_allocation->start;
  756. if(xrp_map_phy_to_virt(xvp->reporter->buffer_phys,sizeof(__u32),&xvp->reporter->buffer_virt))
  757. {
  758. pr_debug("%s: map to kernel virt fail\n", __func__);
  759. kfree(xvp->reporter);
  760. return -EFAULT;
  761. }
  762. xrp_comm_write32(&cmd->report_addr,
  763. xrp_translate_to_dsp(&xvp->address_map,xvp->reporter->buffer_phys+sizeof(__u32)));
  764. unsigned int dsp_addr = xrp_comm_read32(&cmd->report_addr);
  765. pr_debug("%s: alloc_report buffer user virt:%llx,kernel virt:%lx, phys:%llx,dsp_addr:%x,size:%d\n", __func__,
  766. vaddr,xvp->reporter->buffer_virt,xvp->reporter->buffer_phys,dsp_addr,xrp_allocation->size);
  767. /*alloc report memory for DSP , alloc kernel memory for user get*/
  768. // if(xrp_ioctl_alloc.size>XRP_DSP_CMD_INLINE_DATA_SIZE)
  769. // {
  770. // err = xrp_allocate(xvp_file->xvp->pool,
  771. // xrp_ioctl_alloc.size,
  772. // xrp_ioctl_alloc.align,
  773. // &xrp_allocation);
  774. // if (err)
  775. // return err;
  776. // // xrp_allocation_queue(xvp_file, xrp_allocation);
  777. // xvp->reporter->buffer_phys = xrp_allocation->start;
  778. // xrp_comm_write32(&cmd->report_addr,
  779. // xrp_translate_to_dsp(&xvp->address_map,xvp->reporter->buffer_phys));
  780. // // vaddr = vm_mmap(filp, 0, xrp_allocation->size,
  781. // // PROT_READ | PROT_WRITE, MAP_SHARED,
  782. // // xrp_allocation_offset(xrp_allocation));
  783. // // xrp_ioctl_alloc.addr=vaddr;
  784. // pr_debug("%s: kernel bufdfer:%lx\n", __func__, xvp->reporter->buffer_phys);
  785. // }
  786. // else{
  787. // xvp->reporter->buffer_phys = NULL;
  788. // }
  789. /*save the user addr ,which kernel copy the report to */
  790. // xvp->reporter->user_buffer_virt = xrp_ioctl_alloc.addr;
  791. xvp->reporter->buffer_size = xrp_ioctl_alloc.size;
  792. xrp_comm_write32(&cmd->report_buffer_size,xvp->reporter->buffer_size);
  793. xrp_comm_write32(&cmd->report_status,XRP_DSP_REPORT_WORKING);
  794. xrp_comm_write32(&cmd->report_id,0);
  795. tasklet_init(&xvp->reporter->report_task,xrp_report_tasklet,(unsigned long)xvp);
  796. if (copy_to_user(p, &xrp_ioctl_alloc, sizeof(*p))) {
  797. vm_munmap(vaddr, xrp_ioctl_alloc.size);
  798. kfree(xvp->reporter);
  799. pr_debug("%s: copy to user fail\n", __func__);
  800. return -EFAULT;
  801. }
  802. pr_debug("%s: alloc_report %lx end\n", __func__,xvp);
  803. return 0;
  804. }
  805. static int xrp_report_fasync(int fd, struct file *filp, int on){
  806. struct xvp_file *xvp_file = (struct xvp_file *)filp->private_data;
  807. pr_debug("%s: start,mode: %d\n", __func__,on);
  808. if(xvp_file->xvp->reporter == NULL)
  809. {
  810. pr_debug("%s: reporter is NULL\n", __func__,on);
  811. return 0;
  812. }
  813. if( fasync_helper(fd,filp,on,&(xvp_file->xvp->reporter->fasync)) < 0){
  814. pr_debug("%s: xrp_report_fasync fail\n", __func__);
  815. return -EIO;
  816. }
  817. pr_debug("%s: end\n", __func__);
  818. return 0;
  819. }
  820. static int xrp_report_fasync_release(struct file *filp){
  821. struct xvp_file *xvp_file = (struct xvp_file *)filp->private_data;
  822. if(xvp_file->xvp->reporter)
  823. return xrp_report_fasync(-1,filp,0);
  824. return 0;
  825. }
  826. static long xrp_ioctl_release_report(struct file *filp,
  827. struct xrp_ioctl_alloc __user *p)
  828. {
  829. struct xvp_file *xvp_file = filp->private_data;
  830. struct xvp *xvp = xvp_file->xvp;
  831. struct mm_struct *mm = current->mm;
  832. struct xrp_ioctl_alloc xrp_ioctl_alloc;
  833. struct vm_area_struct *vma;
  834. unsigned long start;
  835. struct xrp_dsp_cmd __iomem *cmd=xvp->comm;
  836. tasklet_kill(&xvp->reporter->report_task);
  837. xrp_comm_write32(&cmd->report_status,XRP_DSP_REPORT_INVALID);
  838. if (copy_from_user(&xrp_ioctl_alloc, p, sizeof(*p)))
  839. return -EFAULT;
  840. start = xrp_ioctl_alloc.addr;
  841. pr_debug("%s: virt_addr = 0x%08lx\n", __func__, start);
  842. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  843. down_read(&mm->mmap_sem);
  844. #else
  845. down_read(&mm->mmap_lock);
  846. #endif
  847. vma = find_vma(mm, start);
  848. if (vma && vma->vm_file == filp &&
  849. vma->vm_start <= start && start < vma->vm_end) {
  850. size_t size;
  851. start = vma->vm_start;
  852. size = vma->vm_end - vma->vm_start;
  853. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  854. up_read(&mm->mmap_sem);
  855. #else
  856. up_read(&mm->mmap_lock);
  857. #endif
  858. pr_debug("%s: 0x%lx x %zu\n", __func__, start, size);
  859. vm_munmap(start, size);
  860. }
  861. else{
  862. pr_debug("%s: no vma/bad vma for vaddr = 0x%08lx\n", __func__, start);
  863. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  864. up_read(&mm->mmap_sem);
  865. #else
  866. up_read(&mm->mmap_lock);
  867. #endif
  868. return -EINVAL;
  869. }
  870. xrp_report_fasync_release(filp);
  871. kfree(xvp->reporter);
  872. xvp->reporter =NULL;
  873. return 0;
  874. }
  875. // static struct struct_list timer;
  876. // static void xrp_device_heartbeat_check(unsigned long arg)
  877. // {
  878. // struct xvp *xvp = struct xvp *(arg);
  879. // if(xvp->reporter != NULL)
  880. // {
  881. // xrp_comm_write32(&cmd->flags, 0);
  882. // }
  883. // mod_timer(&timer,jiffies + heartbeat_period * HZ);
  884. // }
  885. // static int xrp_device_heartbeat_init(void * arg)
  886. // {
  887. // if(heartbeat_period > 0)
  888. // {
  889. // init_timer(&timer);
  890. // timer.function = xrp_device_heartbeat_check;
  891. // timer.expires = jiffies + heartbeat_period * HZ;
  892. // timer.data = arg;
  893. // add_timer(&timer);
  894. // pr_debug("%s enable heartbeat timer\n", __func__);
  895. // }
  896. // }
  897. static void xrp_put_pages(phys_addr_t phys, unsigned long n_pages)
  898. {
  899. struct page *page;
  900. unsigned long i;
  901. page = pfn_to_page(__phys_to_pfn(phys));
  902. for (i = 0; i < n_pages; ++i)
  903. put_page(page + i);
  904. }
  905. static void xrp_alien_mapping_destroy(struct xrp_alien_mapping *alien_mapping)
  906. {
  907. switch (alien_mapping->type) {
  908. case ALIEN_GUP:
  909. xrp_put_pages(alien_mapping->paddr,
  910. PFN_UP(alien_mapping->vaddr +
  911. alien_mapping->size) -
  912. PFN_DOWN(alien_mapping->vaddr));
  913. break;
  914. case ALIEN_COPY:
  915. xrp_allocation_put(alien_mapping->allocation);
  916. break;
  917. default:
  918. break;
  919. }
  920. }
  921. static long xvp_pfn_virt_to_phys(struct xvp_file *xvp_file,
  922. struct vm_area_struct *vma,
  923. unsigned long vaddr, unsigned long size,
  924. phys_addr_t *paddr,
  925. struct xrp_alien_mapping *mapping)
  926. {
  927. int ret;
  928. unsigned long i;
  929. unsigned long nr_pages = PFN_UP(vaddr + size) - PFN_DOWN(vaddr);
  930. unsigned long pfn;
  931. const struct xrp_address_map_entry *address_map;
  932. ret = follow_pfn(vma, vaddr, &pfn);
  933. if (ret)
  934. return ret;
  935. *paddr = __pfn_to_phys(pfn) + (vaddr & ~PAGE_MASK);
  936. address_map = xrp_get_address_mapping(&xvp_file->xvp->address_map,
  937. *paddr);
  938. if (!address_map) {
  939. pr_debug("%s: untranslatable addr: %pap\n", __func__, paddr);
  940. return -EINVAL;
  941. }
  942. for (i = 1; i < nr_pages; ++i) {
  943. unsigned long next_pfn;
  944. phys_addr_t next_phys;
  945. ret = follow_pfn(vma, vaddr + (i << PAGE_SHIFT), &next_pfn);
  946. if (ret)
  947. return ret;
  948. if (next_pfn != pfn + 1) {
  949. pr_debug("%s: non-contiguous physical memory\n",
  950. __func__);
  951. return -EINVAL;
  952. }
  953. next_phys = __pfn_to_phys(next_pfn);
  954. if (xrp_compare_address(next_phys, address_map)) {
  955. pr_debug("%s: untranslatable addr: %pap\n",
  956. __func__, &next_phys);
  957. return -EINVAL;
  958. }
  959. pfn = next_pfn;
  960. }
  961. *mapping = (struct xrp_alien_mapping){
  962. .vaddr = vaddr,
  963. .size = size,
  964. .paddr = *paddr,
  965. .type = ALIEN_PFN_MAP,
  966. };
  967. pr_debug("%s: success, paddr: %pap\n", __func__, paddr);
  968. return 0;
  969. }
  970. static long xvp_gup_virt_to_phys(struct xvp_file *xvp_file,
  971. unsigned long vaddr, unsigned long size,
  972. phys_addr_t *paddr,
  973. struct xrp_alien_mapping *mapping)
  974. {
  975. int ret;
  976. int i;
  977. int nr_pages;
  978. struct page **page;
  979. const struct xrp_address_map_entry *address_map;
  980. if (PFN_UP(vaddr + size) - PFN_DOWN(vaddr) > INT_MAX)
  981. return -EINVAL;
  982. nr_pages = PFN_UP(vaddr + size) - PFN_DOWN(vaddr);
  983. page = kmalloc(nr_pages * sizeof(void *), GFP_KERNEL);
  984. if (!page)
  985. return -ENOMEM;
  986. ret = get_user_pages_fast(vaddr, nr_pages, 1, page);
  987. if (ret < 0)
  988. goto out;
  989. if (ret < nr_pages) {
  990. pr_debug("%s: asked for %d pages, but got only %d\n",
  991. __func__, nr_pages, ret);
  992. nr_pages = ret;
  993. ret = -EINVAL;
  994. goto out_put;
  995. }
  996. address_map = xrp_get_address_mapping(&xvp_file->xvp->address_map,
  997. page_to_phys(page[0]));
  998. if (!address_map) {
  999. phys_addr_t addr = page_to_phys(page[0]);
  1000. pr_debug("%s: untranslatable addr: %pap\n",
  1001. __func__, &addr);
  1002. ret = -EINVAL;
  1003. goto out_put;
  1004. }
  1005. for (i = 1; i < nr_pages; ++i) {
  1006. phys_addr_t addr;
  1007. if (page[i] != page[i - 1] + 1) {
  1008. pr_debug("%s: non-contiguous physical memory\n",
  1009. __func__);
  1010. ret = -EINVAL;
  1011. goto out_put;
  1012. }
  1013. addr = page_to_phys(page[i]);
  1014. if (xrp_compare_address(addr, address_map)) {
  1015. pr_debug("%s: untranslatable addr: %pap\n",
  1016. __func__, &addr);
  1017. ret = -EINVAL;
  1018. goto out_put;
  1019. }
  1020. }
  1021. *paddr = __pfn_to_phys(page_to_pfn(page[0])) + (vaddr & ~PAGE_MASK);
  1022. *mapping = (struct xrp_alien_mapping){
  1023. .vaddr = vaddr,
  1024. .size = size,
  1025. .paddr = *paddr,
  1026. .type = ALIEN_GUP,
  1027. };
  1028. ret = 0;
  1029. pr_debug("%s: success, paddr: %pap\n", __func__, paddr);
  1030. out_put:
  1031. if (ret < 0)
  1032. for (i = 0; i < nr_pages; ++i)
  1033. put_page(page[i]);
  1034. out:
  1035. kfree(page);
  1036. return ret;
  1037. }
  1038. static long _xrp_copy_user_phys(struct xvp *xvp,
  1039. unsigned long vaddr, unsigned long size,
  1040. phys_addr_t paddr, unsigned long flags,
  1041. bool to_phys)
  1042. {
  1043. // if (pfn_valid(__phys_to_pfn(paddr))) {
  1044. // struct page *page = pfn_to_page(__phys_to_pfn(paddr));
  1045. // size_t page_offs = paddr & ~PAGE_MASK;
  1046. // size_t offs;
  1047. // if (!to_phys)
  1048. // xrp_default_dma_sync_for_cpu(xvp, paddr, size, flags);
  1049. // for (offs = 0; offs < size; ++page) {
  1050. // void *p = kmap(page);
  1051. // size_t sz = PAGE_SIZE - page_offs;
  1052. // size_t copy_sz = sz;
  1053. // unsigned long rc;
  1054. // if (!p)
  1055. // return -ENOMEM;
  1056. // if (size - offs < copy_sz)
  1057. // copy_sz = size - offs;
  1058. // if (to_phys)
  1059. // rc = copy_from_user(p + page_offs,
  1060. // (void __user *)(vaddr + offs),
  1061. // copy_sz);
  1062. // else
  1063. // rc = copy_to_user((void __user *)(vaddr + offs),
  1064. // p + page_offs, copy_sz);
  1065. // pr_debug("%s rc:%d,user addr :(%llx,%d) kernel:addr(%llx,%d) size:%d\n", __func__,rc,vaddr,offs,p,page_offs,copy_sz);
  1066. // page_offs = 0;
  1067. // offs += copy_sz;
  1068. // kunmap(page);
  1069. // if (rc)
  1070. // return -EFAULT;
  1071. // }
  1072. // if (to_phys)
  1073. // xrp_default_dma_sync_for_device(xvp, paddr, size, flags);
  1074. // } else
  1075. {
  1076. void __iomem *p = ioremap(paddr, size);
  1077. unsigned long rc;
  1078. pr_debug("%s ioremap:to_phys %d-(%llx,%llx)\n", __func__,to_phys,paddr,p);
  1079. if (!p) {
  1080. dev_err(xvp->dev,
  1081. "couldn't ioremap %pap x 0x%08x\n",
  1082. &paddr, (u32)size);
  1083. return -EINVAL;
  1084. }
  1085. if (to_phys)
  1086. {
  1087. rc = copy_from_user(__io_virt(p),
  1088. (void __user *)vaddr, size);
  1089. /*fix 5.10 kernel copy from vaddr in kernel to phy*/
  1090. if(rc)
  1091. {
  1092. xrp_comm_write(p,(void *)vaddr,size);
  1093. pr_debug("%s WR replease by copy to phy\n", __func__);
  1094. rc =0 ;
  1095. }
  1096. }
  1097. else
  1098. rc = copy_to_user((void __user *)vaddr,
  1099. __io_virt(p), size);
  1100. pr_debug("%s rc:%d,user addr :(%llx) kernel:addr(%llx) size:%d\n", __func__,rc,vaddr,p,size);
  1101. iounmap(p);
  1102. if (rc)
  1103. return -EFAULT;
  1104. }
  1105. return 0;
  1106. }
  1107. static long xrp_copy_user_to_phys(struct xvp *xvp,
  1108. unsigned long vaddr, unsigned long size,
  1109. phys_addr_t paddr, unsigned long flags)
  1110. {
  1111. return _xrp_copy_user_phys(xvp, vaddr, size, paddr, flags, true);
  1112. }
  1113. static long xrp_copy_user_from_phys(struct xvp *xvp,
  1114. unsigned long vaddr, unsigned long size,
  1115. phys_addr_t paddr, unsigned long flags)
  1116. {
  1117. return _xrp_copy_user_phys(xvp, vaddr, size, paddr, flags, false);
  1118. }
  1119. static long xvp_copy_virt_to_phys(struct xvp_file *xvp_file,
  1120. unsigned long flags,
  1121. unsigned long vaddr, unsigned long size,
  1122. phys_addr_t *paddr,
  1123. struct xrp_alien_mapping *mapping)
  1124. {
  1125. phys_addr_t phys;
  1126. unsigned long align = clamp(vaddr & -vaddr, 16ul, PAGE_SIZE);
  1127. unsigned long offset = vaddr & (align - 1);
  1128. struct xrp_allocation *allocation;
  1129. long rc;
  1130. rc = xrp_allocate(xvp_file->xvp->pool,
  1131. size + align, align, &allocation);
  1132. if (rc < 0)
  1133. return rc;
  1134. phys = (allocation->start & -align) | offset;
  1135. if (phys < allocation->start)
  1136. phys += align;
  1137. if (flags & XRP_FLAG_READ) {
  1138. if (xrp_copy_user_to_phys(xvp_file->xvp,
  1139. vaddr, size, phys, flags)) {
  1140. xrp_allocation_put(allocation);
  1141. return -EFAULT;
  1142. }
  1143. }
  1144. *paddr = phys;
  1145. *mapping = (struct xrp_alien_mapping){
  1146. .vaddr = vaddr,
  1147. .size = size,
  1148. .paddr = *paddr,
  1149. .allocation = allocation,
  1150. .type = ALIEN_COPY,
  1151. };
  1152. pr_debug("%s: copying to pa: %pap\n", __func__, paddr);
  1153. return 0;
  1154. }
  1155. static unsigned xvp_get_region_vma_count(unsigned long virt,
  1156. unsigned long size,
  1157. struct vm_area_struct *vma)
  1158. {
  1159. unsigned i;
  1160. struct mm_struct *mm = current->mm;
  1161. if (virt + size < virt)
  1162. return 0;
  1163. if (vma->vm_start > virt)
  1164. return 0;
  1165. if (vma->vm_start <= virt &&
  1166. virt + size <= vma->vm_end)
  1167. return 1;
  1168. for (i = 2; ; ++i) {
  1169. struct vm_area_struct *next_vma = find_vma(mm, vma->vm_end);
  1170. if (!next_vma)
  1171. return 0;
  1172. if (next_vma->vm_start != vma->vm_end)
  1173. return 0;
  1174. vma = next_vma;
  1175. if (virt + size <= vma->vm_end)
  1176. return i;
  1177. }
  1178. return 0;
  1179. }
  1180. static long xrp_share_kernel(struct file *filp,
  1181. unsigned long virt, unsigned long size,
  1182. unsigned long flags, phys_addr_t *paddr,
  1183. struct xrp_mapping *mapping)
  1184. {
  1185. struct xvp_file *xvp_file = filp->private_data;
  1186. struct xvp *xvp = xvp_file->xvp;
  1187. phys_addr_t phys = __pa(virt);
  1188. long err = 0;
  1189. pr_debug("%s: sharing kernel-only buffer: %pap\n", __func__, &phys);
  1190. if (xrp_translate_to_dsp(&xvp->address_map, phys) ==
  1191. XRP_NO_TRANSLATION) {
  1192. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1193. mm_segment_t oldfs = get_fs();
  1194. set_fs(KERNEL_DS);
  1195. #else
  1196. mm_segment_t oldfs =force_uaccess_begin();
  1197. #endif
  1198. pr_debug("%s: untranslatable addr, making shadow copy\n",
  1199. __func__);
  1200. err = xvp_copy_virt_to_phys(xvp_file, flags,
  1201. virt, size, paddr,
  1202. &mapping->alien_mapping);
  1203. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1204. set_fs(oldfs);
  1205. #else
  1206. force_uaccess_end(oldfs);
  1207. #endif
  1208. mapping->type = XRP_MAPPING_ALIEN | XRP_MAPPING_KERNEL;
  1209. } else {
  1210. mapping->type = XRP_MAPPING_KERNEL;
  1211. *paddr = phys;
  1212. xrp_default_dma_sync_for_device(xvp, phys, size, flags);
  1213. }
  1214. pr_debug("%s: mapping = %p, mapping->type = %d\n",
  1215. __func__, mapping, mapping->type);
  1216. return err;
  1217. }
  1218. static bool vma_needs_cache_ops(struct vm_area_struct *vma)
  1219. {
  1220. pgprot_t prot = vma->vm_page_prot;
  1221. return pgprot_val(prot) != pgprot_val(pgprot_noncached(prot)) &&
  1222. pgprot_val(prot) != pgprot_val(pgprot_writecombine(prot));
  1223. }
  1224. /* Share blocks of memory, from host to IVP or back.
  1225. *
  1226. * When sharing to IVP return physical addresses in paddr.
  1227. * Areas allocated from the driver can always be shared in both directions.
  1228. * Contiguous 3rd party allocations need to be shared to IVP before they can
  1229. * be shared back.
  1230. */
  1231. static long __xrp_share_block(struct file *filp,
  1232. unsigned long virt, unsigned long size,
  1233. unsigned long flags, phys_addr_t *paddr,
  1234. struct xrp_mapping *mapping)
  1235. {
  1236. phys_addr_t phys = ~0ul;
  1237. struct xvp_file *xvp_file = filp->private_data;
  1238. struct xvp *xvp = xvp_file->xvp;
  1239. struct mm_struct *mm = current->mm;
  1240. struct vm_area_struct *vma = find_vma(mm, virt);
  1241. bool do_cache = true;
  1242. long rc = -EINVAL;
  1243. if (!vma) {
  1244. pr_debug("%s: no vma for vaddr/size = 0x%08lx/0x%08lx\n",
  1245. __func__, virt, size);
  1246. return -EINVAL;
  1247. }
  1248. /*
  1249. * Region requested for sharing should be within single VMA.
  1250. * That's true for the majority of cases, but sometimes (e.g.
  1251. * sharing buffer in the beginning of .bss which shares a
  1252. * file-mapped page with .data, followed by anonymous page)
  1253. * region will cross multiple VMAs. Support it in the simplest
  1254. * way possible: start with get_user_pages and use shadow copy
  1255. * if that fails.
  1256. */
  1257. switch (xvp_get_region_vma_count(virt, size, vma)) {
  1258. case 0:
  1259. pr_debug("%s: bad vma for vaddr/size = 0x%08lx/0x%08lx\n",
  1260. __func__, virt, size);
  1261. pr_debug("%s: vma->vm_start = 0x%08lx, vma->vm_end = 0x%08lx\n",
  1262. __func__, vma->vm_start, vma->vm_end);
  1263. return -EINVAL;
  1264. case 1:
  1265. break;
  1266. default:
  1267. pr_debug("%s: multiple vmas cover vaddr/size = 0x%08lx/0x%08lx\n",
  1268. __func__, virt, size);
  1269. vma = NULL;
  1270. break;
  1271. }
  1272. /*
  1273. * And it need to be allocated from the same file descriptor, or
  1274. * at least from a file descriptor managed by the XRP.
  1275. */
  1276. if (vma &&
  1277. (vma->vm_file == filp || xrp_is_known_file(vma->vm_file))) {
  1278. struct xvp_file *vm_file = vma->vm_file->private_data;
  1279. struct xrp_allocation *xrp_allocation = vma->vm_private_data;
  1280. phys = (vma->vm_pgoff << PAGE_SHIFT) +
  1281. virt - vma->vm_start;
  1282. pr_debug("%s: XRP allocation at 0x%08lx, paddr: %pap\n",
  1283. __func__, virt, &phys);
  1284. /*
  1285. * If it was allocated from a different XRP file it may belong
  1286. * to a different device and not be directly accessible.
  1287. * Check if it is.
  1288. */
  1289. if (vma->vm_file != filp) {
  1290. const struct xrp_address_map_entry *address_map =
  1291. xrp_get_address_mapping(&xvp->address_map,
  1292. phys);
  1293. if (!address_map ||
  1294. xrp_compare_address(phys + size - 1, address_map))
  1295. pr_debug("%s: untranslatable addr: %pap\n",
  1296. __func__, &phys);
  1297. else
  1298. rc = 0;
  1299. } else {
  1300. rc = 0;
  1301. }
  1302. if (rc == 0) {
  1303. mapping->type = XRP_MAPPING_NATIVE;
  1304. mapping->native.xrp_allocation = xrp_allocation;
  1305. mapping->native.vaddr = virt;
  1306. xrp_allocation_get(xrp_allocation);
  1307. do_cache = vma_needs_cache_ops(vma);
  1308. }
  1309. }
  1310. if (rc < 0) {
  1311. struct xrp_alien_mapping *alien_mapping =
  1312. &mapping->alien_mapping;
  1313. unsigned long n_pages = PFN_UP(virt + size) - PFN_DOWN(virt);
  1314. /* Otherwise this is alien allocation. */
  1315. pr_debug("%s: non-XVP allocation at 0x%08lx\n",
  1316. __func__, virt);
  1317. /*
  1318. * A range can only be mapped directly if it is either
  1319. * uncached or HW-specific cache operations can handle it.
  1320. */
  1321. if (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)) {
  1322. rc = xvp_pfn_virt_to_phys(xvp_file, vma,
  1323. virt, size,
  1324. &phys,
  1325. alien_mapping);
  1326. if (rc == 0 && vma_needs_cache_ops(vma) &&
  1327. !xrp_cacheable(xvp, PFN_DOWN(phys), n_pages)) {
  1328. pr_debug("%s: needs unsupported cache mgmt\n",
  1329. __func__);
  1330. rc = -EINVAL;
  1331. }
  1332. } else {
  1333. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1334. up_read(&mm->mmap_sem);
  1335. #else
  1336. up_read(&mm->mmap_lock);
  1337. #endif
  1338. rc = xvp_gup_virt_to_phys(xvp_file, virt,
  1339. size, &phys,
  1340. alien_mapping);
  1341. if (rc == 0 &&
  1342. (!vma || vma_needs_cache_ops(vma)) &&
  1343. !xrp_cacheable(xvp, PFN_DOWN(phys), n_pages)) {
  1344. pr_debug("%s: needs unsupported cache mgmt\n",
  1345. __func__);
  1346. xrp_put_pages(phys, n_pages);
  1347. rc = -EINVAL;
  1348. }
  1349. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1350. down_read(&mm->mmap_sem);
  1351. #else
  1352. down_read(&mm->mmap_lock);
  1353. #endif
  1354. }
  1355. if (rc == 0 && vma && !vma_needs_cache_ops(vma))
  1356. do_cache = false;
  1357. /*
  1358. * If we couldn't share try to make a shadow copy.
  1359. */
  1360. if (rc < 0) {
  1361. rc = xvp_copy_virt_to_phys(xvp_file, flags,
  1362. virt, size, &phys,
  1363. alien_mapping);
  1364. do_cache = false;
  1365. }
  1366. /* We couldn't share it. Fail the request. */
  1367. if (rc < 0) {
  1368. pr_debug("%s: couldn't map virt to phys\n",
  1369. __func__);
  1370. return -EINVAL;
  1371. }
  1372. phys = alien_mapping->paddr +
  1373. virt - alien_mapping->vaddr;
  1374. mapping->type = XRP_MAPPING_ALIEN;
  1375. }
  1376. *paddr = phys;
  1377. pr_debug("%s: mapping = %p, mapping->type = %d,do_cache = %d\n",
  1378. __func__, mapping, mapping->type,do_cache);
  1379. if (do_cache)
  1380. xrp_dma_sync_for_device(xvp,
  1381. virt, phys, size,
  1382. flags);
  1383. return 0;
  1384. }
  1385. static long xrp_writeback_alien_mapping(struct xvp_file *xvp_file,
  1386. struct xrp_alien_mapping *alien_mapping,
  1387. unsigned long flags)
  1388. {
  1389. struct page *page;
  1390. size_t nr_pages;
  1391. size_t i;
  1392. long ret = 0;
  1393. switch (alien_mapping->type) {
  1394. case ALIEN_GUP:
  1395. xrp_dma_sync_for_cpu(xvp_file->xvp,
  1396. alien_mapping->vaddr,
  1397. alien_mapping->paddr,
  1398. alien_mapping->size,
  1399. flags);
  1400. pr_debug("%s: dirtying alien GUP @va = %p, pa = %pap\n",
  1401. __func__, (void __user *)alien_mapping->vaddr,
  1402. &alien_mapping->paddr);
  1403. page = pfn_to_page(__phys_to_pfn(alien_mapping->paddr));
  1404. nr_pages = PFN_UP(alien_mapping->vaddr + alien_mapping->size) -
  1405. PFN_DOWN(alien_mapping->vaddr);
  1406. for (i = 0; i < nr_pages; ++i)
  1407. SetPageDirty(page + i);
  1408. break;
  1409. case ALIEN_COPY:
  1410. pr_debug("%s: synchronizing alien copy @pa = %pap back to %p\n",
  1411. __func__, &alien_mapping->paddr,
  1412. (void __user *)alien_mapping->vaddr);
  1413. if (xrp_copy_user_from_phys(xvp_file->xvp,
  1414. alien_mapping->vaddr,
  1415. alien_mapping->size,
  1416. alien_mapping->paddr,
  1417. flags))
  1418. ret = -EINVAL;
  1419. break;
  1420. default:
  1421. break;
  1422. }
  1423. return ret;
  1424. }
  1425. /*
  1426. *
  1427. */
  1428. static long __xrp_unshare_block(struct file *filp, struct xrp_mapping *mapping,
  1429. unsigned long flags)
  1430. {
  1431. long ret = 0;
  1432. mm_segment_t oldfs ;
  1433. if (mapping->type & XRP_MAPPING_KERNEL)
  1434. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1435. oldfs = get_fs();
  1436. set_fs(KERNEL_DS);
  1437. #else
  1438. oldfs =force_uaccess_begin();
  1439. #endif
  1440. switch (mapping->type & ~XRP_MAPPING_KERNEL) {
  1441. case XRP_MAPPING_NATIVE:
  1442. if (flags & XRP_FLAG_WRITE) {
  1443. struct xvp_file *xvp_file = filp->private_data;
  1444. xrp_dma_sync_for_cpu(xvp_file->xvp,
  1445. mapping->native.vaddr,
  1446. mapping->native.xrp_allocation->start,
  1447. mapping->native.xrp_allocation->size,
  1448. flags);
  1449. }
  1450. xrp_allocation_put(mapping->native.xrp_allocation);
  1451. break;
  1452. case XRP_MAPPING_ALIEN:
  1453. if (flags & XRP_FLAG_WRITE)
  1454. ret = xrp_writeback_alien_mapping(filp->private_data,
  1455. &mapping->alien_mapping,
  1456. flags);
  1457. xrp_alien_mapping_destroy(&mapping->alien_mapping);
  1458. break;
  1459. case XRP_MAPPING_KERNEL:
  1460. break;
  1461. default:
  1462. break;
  1463. }
  1464. if (mapping->type & XRP_MAPPING_KERNEL)
  1465. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1466. set_fs(oldfs);
  1467. #else
  1468. force_uaccess_end(oldfs);
  1469. #endif
  1470. mapping->type = XRP_MAPPING_NONE;
  1471. return ret;
  1472. }
  1473. static long xrp_ioctl_free(struct file *filp,
  1474. struct xrp_ioctl_alloc __user *p)
  1475. {
  1476. struct mm_struct *mm = current->mm;
  1477. struct xrp_ioctl_alloc xrp_ioctl_alloc;
  1478. struct vm_area_struct *vma;
  1479. unsigned long start;
  1480. // pr_debug("%s: %p\n", __func__, p);
  1481. if (copy_from_user(&xrp_ioctl_alloc, p, sizeof(*p)))
  1482. return -EFAULT;
  1483. start = xrp_ioctl_alloc.addr;
  1484. // pr_debug("%s: virt_addr = 0x%08lx\n", __func__, start);
  1485. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1486. down_read(&mm->mmap_sem);
  1487. #else
  1488. down_read(&mm->mmap_lock);
  1489. #endif
  1490. vma = find_vma(mm, start);
  1491. if (vma && vma->vm_file == filp &&
  1492. vma->vm_start <= start && start < vma->vm_end) {
  1493. size_t size;
  1494. start = vma->vm_start;
  1495. size = vma->vm_end - vma->vm_start;
  1496. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1497. up_read(&mm->mmap_sem);
  1498. #else
  1499. up_read(&mm->mmap_lock);
  1500. #endif
  1501. pr_debug("%s: 0x%lx x %zu\n", __func__, start, size);
  1502. return vm_munmap(start, size);
  1503. }
  1504. // pr_debug("%s: no vma/bad vma for vaddr = 0x%08lx\n", __func__, start);
  1505. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1506. up_read(&mm->mmap_sem);
  1507. #else
  1508. up_read(&mm->mmap_lock);
  1509. #endif
  1510. return -EINVAL;
  1511. }
  1512. static long xvp_complete_cmd_irq(struct xvp *xvp, struct xrp_comm *comm,
  1513. bool (*cmd_complete)(struct xrp_comm *p))
  1514. {
  1515. long timeout = firmware_command_timeout * HZ;
  1516. if (cmd_complete(comm))
  1517. return 0;
  1518. if (xrp_panic_check(xvp))
  1519. return -EBUSY;
  1520. do {
  1521. timeout = wait_for_completion_interruptible_timeout(&comm->completion,
  1522. timeout);
  1523. if (cmd_complete(comm))
  1524. return 0;
  1525. if (xrp_panic_check(xvp))
  1526. return -EBUSY;
  1527. } while (timeout > 0);
  1528. if (timeout == 0)
  1529. return -EBUSY;
  1530. return timeout;
  1531. }
  1532. static long xvp_complete_cmd_poll(struct xvp *xvp, struct xrp_comm *comm,
  1533. bool (*cmd_complete)(struct xrp_comm *p))
  1534. {
  1535. unsigned long deadline = jiffies + firmware_command_timeout * HZ;
  1536. do {
  1537. if (cmd_complete(comm))
  1538. return 0;
  1539. if (xrp_panic_check(xvp))
  1540. return -EBUSY;
  1541. schedule();
  1542. } while (time_before(jiffies, deadline));
  1543. return -EBUSY;
  1544. }
  1545. struct xrp_request {
  1546. struct xrp_ioctl_queue ioctl_queue;
  1547. size_t n_buffers;
  1548. struct xrp_mapping *buffer_mapping;
  1549. struct xrp_dsp_buffer *dsp_buffer;
  1550. phys_addr_t in_data_phys;
  1551. phys_addr_t out_data_phys;
  1552. phys_addr_t dsp_buffer_phys;
  1553. union {
  1554. struct xrp_mapping in_data_mapping;
  1555. u8 in_data[XRP_DSP_CMD_INLINE_DATA_SIZE];
  1556. };
  1557. union {
  1558. struct xrp_mapping out_data_mapping;
  1559. u8 out_data[XRP_DSP_CMD_INLINE_DATA_SIZE];
  1560. };
  1561. union {
  1562. struct xrp_mapping dsp_buffer_mapping;
  1563. struct xrp_dsp_buffer buffer_data[XRP_DSP_CMD_INLINE_BUFFER_COUNT];
  1564. };
  1565. u8 nsid[XRP_DSP_CMD_NAMESPACE_ID_SIZE];
  1566. };
  1567. static void xrp_unmap_request_nowb(struct file *filp, struct xrp_request *rq)
  1568. {
  1569. size_t n_buffers = rq->n_buffers;
  1570. size_t i;
  1571. if (rq->ioctl_queue.in_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE)
  1572. __xrp_unshare_block(filp, &rq->in_data_mapping, 0);
  1573. if (rq->ioctl_queue.out_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE)
  1574. __xrp_unshare_block(filp, &rq->out_data_mapping, 0);
  1575. for (i = 0; i < n_buffers; ++i)
  1576. __xrp_unshare_block(filp, rq->buffer_mapping + i, 0);
  1577. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT)
  1578. __xrp_unshare_block(filp, &rq->dsp_buffer_mapping, 0);
  1579. if (n_buffers) {
  1580. kfree(rq->buffer_mapping);
  1581. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  1582. kfree(rq->dsp_buffer);
  1583. }
  1584. }
  1585. }
  1586. static long xrp_unmap_request(struct file *filp, struct xrp_request *rq)
  1587. {
  1588. size_t n_buffers = rq->n_buffers;
  1589. size_t i;
  1590. long ret = 0;
  1591. long rc;
  1592. if (rq->ioctl_queue.in_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE)
  1593. __xrp_unshare_block(filp, &rq->in_data_mapping, XRP_FLAG_READ);
  1594. if (rq->ioctl_queue.out_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE) {
  1595. rc = __xrp_unshare_block(filp, &rq->out_data_mapping,
  1596. XRP_FLAG_WRITE);
  1597. if (rc < 0) {
  1598. pr_debug("%s: out_data could not be unshared\n",
  1599. __func__);
  1600. ret = rc;
  1601. }
  1602. } else {
  1603. pr_debug("%s: out_data <%s> to copied\n",
  1604. __func__,rq->out_data);
  1605. if (copy_to_user((void __user *)(unsigned long)rq->ioctl_queue.out_data_addr,
  1606. rq->out_data,
  1607. rq->ioctl_queue.out_data_size)) {
  1608. pr_debug("%s: out_data could not be copied\n",
  1609. __func__);
  1610. ret = -EFAULT;
  1611. }
  1612. }
  1613. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT)
  1614. __xrp_unshare_block(filp, &rq->dsp_buffer_mapping,
  1615. XRP_FLAG_READ_WRITE);
  1616. for (i = 0; i < n_buffers; ++i) {
  1617. rc = __xrp_unshare_block(filp, rq->buffer_mapping + i,
  1618. rq->dsp_buffer[i].flags);
  1619. if (rc < 0) {
  1620. pr_debug("%s: buffer %zd could not be unshared\n",
  1621. __func__, i);
  1622. ret = rc;
  1623. }
  1624. }
  1625. if (n_buffers) {
  1626. kfree(rq->buffer_mapping);
  1627. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  1628. kfree(rq->dsp_buffer);
  1629. }
  1630. rq->n_buffers = 0;
  1631. }
  1632. return ret;
  1633. }
  1634. static long xrp_map_request(struct file *filp, struct xrp_request *rq,
  1635. struct mm_struct *mm)
  1636. {
  1637. struct xvp_file *xvp_file = filp->private_data;
  1638. struct xvp *xvp = xvp_file->xvp;
  1639. struct xrp_ioctl_buffer __user *buffer;
  1640. size_t n_buffers = rq->ioctl_queue.buffer_size /
  1641. sizeof(struct xrp_ioctl_buffer);
  1642. size_t i;
  1643. long ret = 0;
  1644. if ((rq->ioctl_queue.flags & XRP_QUEUE_FLAG_NSID) &&
  1645. copy_from_user(rq->nsid,
  1646. (void __user *)(unsigned long)rq->ioctl_queue.nsid_addr,
  1647. sizeof(rq->nsid))) {
  1648. pr_debug("%s: nsid could not be copied\n ", __func__);
  1649. return -EINVAL;
  1650. }
  1651. rq->n_buffers = n_buffers;
  1652. if (n_buffers) {
  1653. rq->buffer_mapping =
  1654. kzalloc(n_buffers * sizeof(*rq->buffer_mapping),
  1655. GFP_KERNEL);
  1656. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  1657. rq->dsp_buffer =
  1658. kmalloc(n_buffers * sizeof(*rq->dsp_buffer),
  1659. GFP_KERNEL);
  1660. if (!rq->dsp_buffer) {
  1661. kfree(rq->buffer_mapping);
  1662. return -ENOMEM;
  1663. }
  1664. } else {
  1665. rq->dsp_buffer = rq->buffer_data;
  1666. }
  1667. }
  1668. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1669. down_read(&mm->mmap_sem);
  1670. #else
  1671. down_read(&mm->mmap_lock);
  1672. #endif
  1673. if (rq->ioctl_queue.in_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE) {
  1674. ret = __xrp_share_block(filp, rq->ioctl_queue.in_data_addr,
  1675. rq->ioctl_queue.in_data_size,
  1676. XRP_FLAG_READ, &rq->in_data_phys,
  1677. &rq->in_data_mapping);
  1678. if(ret < 0) {
  1679. pr_debug("%s: in_data could not be shared\n",
  1680. __func__);
  1681. goto share_err;
  1682. }
  1683. } else {
  1684. if (copy_from_user(rq->in_data,
  1685. (void __user *)(unsigned long)rq->ioctl_queue.in_data_addr,
  1686. rq->ioctl_queue.in_data_size)) {
  1687. pr_debug("%s: in_data could not be copied\n",
  1688. __func__);
  1689. ret = -EFAULT;
  1690. goto share_err;
  1691. }
  1692. }
  1693. if (rq->ioctl_queue.out_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE) {
  1694. ret = __xrp_share_block(filp, rq->ioctl_queue.out_data_addr,
  1695. rq->ioctl_queue.out_data_size,
  1696. XRP_FLAG_WRITE, &rq->out_data_phys,
  1697. &rq->out_data_mapping);
  1698. if (ret < 0) {
  1699. pr_debug("%s: out_data could not be shared\n",
  1700. __func__);
  1701. goto share_err;
  1702. }
  1703. }
  1704. buffer = (void __user *)(unsigned long)rq->ioctl_queue.buffer_addr;
  1705. for (i = 0; i < n_buffers; ++i) {
  1706. struct xrp_ioctl_buffer ioctl_buffer;
  1707. phys_addr_t buffer_phys = ~0ul;
  1708. if (copy_from_user(&ioctl_buffer, buffer + i,
  1709. sizeof(ioctl_buffer))) {
  1710. ret = -EFAULT;
  1711. goto share_err;
  1712. }
  1713. if (ioctl_buffer.flags & XRP_FLAG_READ_WRITE) {
  1714. ret = __xrp_share_block(filp, ioctl_buffer.addr,
  1715. ioctl_buffer.size,
  1716. ioctl_buffer.flags,
  1717. &buffer_phys,
  1718. rq->buffer_mapping + i);
  1719. if (ret < 0) {
  1720. pr_debug("%s: buffer %zd could not be shared\n",
  1721. __func__, i);
  1722. goto share_err;
  1723. }
  1724. }
  1725. rq->dsp_buffer[i] = (struct xrp_dsp_buffer){
  1726. .flags = ioctl_buffer.flags,
  1727. .size = ioctl_buffer.size,
  1728. .addr = xrp_translate_to_dsp(&xvp->address_map,
  1729. buffer_phys),
  1730. };
  1731. }
  1732. if (n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT) {
  1733. ret = xrp_share_kernel(filp, (unsigned long)rq->dsp_buffer,
  1734. n_buffers * sizeof(*rq->dsp_buffer),
  1735. XRP_FLAG_READ_WRITE, &rq->dsp_buffer_phys,
  1736. &rq->dsp_buffer_mapping);
  1737. if(ret < 0) {
  1738. pr_debug("%s: buffer descriptors could not be shared\n",
  1739. __func__);
  1740. goto share_err;
  1741. }
  1742. }
  1743. share_err:
  1744. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  1745. up_read(&mm->mmap_sem);
  1746. #else
  1747. up_read(&mm->mmap_lock);
  1748. #endif
  1749. if (ret < 0)
  1750. xrp_unmap_request_nowb(filp, rq);
  1751. return ret;
  1752. }
  1753. static void xrp_fill_hw_request(struct xrp_dsp_cmd __iomem *cmd,
  1754. struct xrp_request *rq,
  1755. const struct xrp_address_map *map)
  1756. {
  1757. xrp_comm_write32(&cmd->in_data_size, rq->ioctl_queue.in_data_size);
  1758. xrp_comm_write32(&cmd->out_data_size, rq->ioctl_queue.out_data_size);
  1759. xrp_comm_write32(&cmd->buffer_size,
  1760. rq->n_buffers * sizeof(struct xrp_dsp_buffer));
  1761. if (rq->ioctl_queue.in_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE)
  1762. xrp_comm_write32(&cmd->in_data_addr,
  1763. xrp_translate_to_dsp(map, rq->in_data_phys));
  1764. else
  1765. xrp_comm_write(&cmd->in_data, rq->in_data,
  1766. rq->ioctl_queue.in_data_size);
  1767. if (rq->ioctl_queue.out_data_size > XRP_DSP_CMD_INLINE_DATA_SIZE)
  1768. xrp_comm_write32(&cmd->out_data_addr,
  1769. xrp_translate_to_dsp(map, rq->out_data_phys));
  1770. if (rq->n_buffers > XRP_DSP_CMD_INLINE_BUFFER_COUNT)
  1771. xrp_comm_write32(&cmd->buffer_addr,
  1772. xrp_translate_to_dsp(map, rq->dsp_buffer_phys));
  1773. else
  1774. xrp_comm_write(&cmd->buffer_data, rq->dsp_buffer,
  1775. rq->n_buffers * sizeof(struct xrp_dsp_buffer));
  1776. if (rq->ioctl_queue.flags & XRP_QUEUE_FLAG_NSID)
  1777. xrp_comm_write(&cmd->nsid, rq->nsid, sizeof(rq->nsid));
  1778. #ifdef DEBUG
  1779. {
  1780. struct xrp_dsp_cmd dsp_cmd;
  1781. xrp_comm_read(cmd, &dsp_cmd, sizeof(dsp_cmd));
  1782. pr_debug("%s: cmd for DSP: %p: %*ph\n",
  1783. __func__, cmd,
  1784. (int)sizeof(dsp_cmd), &dsp_cmd);
  1785. }
  1786. #endif
  1787. wmb();
  1788. /* update flags */
  1789. xrp_comm_write32(&cmd->flags,
  1790. (rq->ioctl_queue.flags & ~XRP_DSP_CMD_FLAG_RESPONSE_VALID) |
  1791. XRP_DSP_CMD_FLAG_REQUEST_VALID);
  1792. }
  1793. static long xrp_complete_hw_request(struct xrp_dsp_cmd __iomem *cmd,
  1794. struct xrp_request *rq)
  1795. {
  1796. u32 flags = xrp_comm_read32(&cmd->flags);
  1797. if (rq->ioctl_queue.out_data_size <= XRP_DSP_CMD_INLINE_DATA_SIZE)
  1798. xrp_comm_read(&cmd->out_data, rq->out_data,
  1799. rq->ioctl_queue.out_data_size);
  1800. if (rq->n_buffers <= XRP_DSP_CMD_INLINE_BUFFER_COUNT)
  1801. xrp_comm_read(&cmd->buffer_data, rq->dsp_buffer,
  1802. rq->n_buffers * sizeof(struct xrp_dsp_buffer));
  1803. xrp_comm_write32(&cmd->flags, 0);
  1804. return (flags & XRP_DSP_CMD_FLAG_RESPONSE_DELIVERY_FAIL) ? -ENXIO : 0;
  1805. }
  1806. static long xrp_ioctl_submit_sync(struct file *filp,
  1807. struct xrp_ioctl_queue __user *p)
  1808. {
  1809. struct xvp_file *xvp_file = filp->private_data;
  1810. struct xvp *xvp = xvp_file->xvp;
  1811. struct xrp_comm *queue = xvp->queue;
  1812. struct xrp_request xrp_rq, *rq = &xrp_rq;
  1813. long ret = 0;
  1814. bool went_off = false;
  1815. if (copy_from_user(&rq->ioctl_queue, p, sizeof(*p)))
  1816. return -EFAULT;
  1817. if (rq->ioctl_queue.flags & ~XRP_QUEUE_VALID_FLAGS) {
  1818. dev_dbg(xvp->dev, "%s: invalid flags 0x%08x\n",
  1819. __func__, rq->ioctl_queue.flags);
  1820. return -EINVAL;
  1821. }
  1822. if (xvp->n_queues > 1) {
  1823. unsigned n = (rq->ioctl_queue.flags & XRP_QUEUE_FLAG_PRIO) >>
  1824. XRP_QUEUE_FLAG_PRIO_SHIFT;
  1825. if (n >= xvp->n_queues)
  1826. n = xvp->n_queues - 1;
  1827. queue = xvp->queue_ordered[n];
  1828. dev_dbg(xvp->dev, "%s: priority: %d -> %d\n",
  1829. __func__, n, queue->priority);
  1830. }
  1831. ret = xrp_map_request(filp, rq, current->mm);
  1832. if (ret < 0)
  1833. return ret;
  1834. if (loopback < LOOPBACK_NOIO) {
  1835. int reboot_cycle;
  1836. retry:
  1837. mutex_lock(&queue->lock);
  1838. reboot_cycle = atomic_read(&xvp->reboot_cycle);
  1839. if (reboot_cycle != atomic_read(&xvp->reboot_cycle_complete)) {
  1840. mutex_unlock(&queue->lock);
  1841. goto retry;
  1842. }
  1843. if (xvp->off) {
  1844. ret = -ENODEV;
  1845. } else {
  1846. xrp_fill_hw_request(queue->comm, rq, &xvp->address_map);
  1847. xrp_send_device_irq(xvp);
  1848. if (xvp->host_irq_mode) {
  1849. ret = xvp_complete_cmd_irq(xvp, queue,
  1850. xrp_cmd_complete);
  1851. } else {
  1852. ret = xvp_complete_cmd_poll(xvp, queue,
  1853. xrp_cmd_complete);
  1854. }
  1855. xrp_panic_check(xvp);
  1856. /* copy back inline data */
  1857. if (ret == 0) {
  1858. ret = xrp_complete_hw_request(queue->comm, rq);
  1859. } else if (ret == -EBUSY && firmware_reboot &&
  1860. atomic_inc_return(&xvp->reboot_cycle) ==
  1861. reboot_cycle + 1) {
  1862. int rc;
  1863. unsigned i;
  1864. dev_dbg(xvp->dev,
  1865. "%s: restarting firmware...\n",
  1866. __func__);
  1867. for (i = 0; i < xvp->n_queues; ++i)
  1868. if (xvp->queue + i != queue)
  1869. mutex_lock(&xvp->queue[i].lock);
  1870. rc = xrp_boot_firmware(xvp);
  1871. atomic_set(&xvp->reboot_cycle_complete,
  1872. atomic_read(&xvp->reboot_cycle));
  1873. for (i = 0; i < xvp->n_queues; ++i)
  1874. if (xvp->queue + i != queue)
  1875. mutex_unlock(&xvp->queue[i].lock);
  1876. if (rc < 0) {
  1877. ret = rc;
  1878. went_off = xvp->off;
  1879. }
  1880. }
  1881. }
  1882. mutex_unlock(&queue->lock);
  1883. }
  1884. if (ret == 0)
  1885. ret = xrp_unmap_request(filp, rq);
  1886. else if (!went_off)
  1887. xrp_unmap_request_nowb(filp, rq);
  1888. /*
  1889. * Otherwise (if the DSP went off) all mapped buffers are leaked here.
  1890. * There seems to be no way to recover them as we don't know what's
  1891. * going on with the DSP; the DSP may still be reading and writing
  1892. * this memory.
  1893. */
  1894. return ret;
  1895. }
  1896. // static void xrp_dam_buf_free(struct xrp_allocation *xrp_allocation)
  1897. // {
  1898. // dev_dbg(xvp->dev,"%s: release dma_buf allocation n",
  1899. // __func__);
  1900. // kfree(xrp_allocation->pool);
  1901. // kfree(xrp_allocation);
  1902. // return
  1903. // }
  1904. // static void xrp_dam_buf_offset(struct xrp_allocation *xrp_allocation)
  1905. // {
  1906. // return 0;
  1907. // }
  1908. // static const struct xrp_allocation_ops xrp_dma_buf_pool_ops = {
  1909. // .alloc = NULL,
  1910. // .free = xrp_dam_buf_free,
  1911. // .free_pool = NULL,
  1912. // .offset = xrp_dam_buf_offset,
  1913. // };
  1914. // static inline struct xrp_dma_buf_item * xrp_get_dma_buf_tail(struct xrp_dma_buf_item **list)
  1915. // {
  1916. // struct xrp_dma_buf_item ** item;
  1917. // if(*list == NULLL)
  1918. // return NULL;
  1919. // for(item = list;(*item)->next != NULL;item= &(*item)->next)
  1920. // {
  1921. // ;
  1922. // }
  1923. // return *item;
  1924. // }
  1925. // static inline void xrp_dam_buf_add_item(struct xrp_dma_buf_item **list,struct xrp_dma_buf_item *entry)
  1926. // {
  1927. // struct xrp_dma_buf_item * item = xrp_get_dma_buf_tail(list);
  1928. // if(item == NULL)
  1929. // {
  1930. // *list=entry;
  1931. // }
  1932. // else{
  1933. // item->next = entry;
  1934. // }
  1935. // }
  1936. // static inline int xrp_get_dma_buf_remove(struct xrp_dma_buf_item **list,struct xrp_dma_buf_item *entry)
  1937. // {
  1938. // {
  1939. // struct xrp_dma_buf_item ** item;
  1940. // for(item = list;(*item)->next != NULL;item= &(*item)->next)
  1941. // {
  1942. // struct xrp_dma_buf_item *cur = *item;
  1943. // if();
  1944. // }
  1945. // }
  1946. static void xrp_release_dma_buf_item(struct xrp_dma_buf_item * item)
  1947. {
  1948. spin_lock(&xrp_dma_buf_lock);
  1949. if(--item->ref==0)
  1950. {
  1951. list_del(&item->link);
  1952. kfree(item);
  1953. }
  1954. spin_unlock(&xrp_dma_buf_lock);
  1955. }
  1956. static long xrp_ioctl_dma_buf_import(struct file *filp,
  1957. struct xrp_dma_buf __user *p)
  1958. {
  1959. long ret;
  1960. struct xvp_file *xvp_file = filp->private_data;
  1961. struct xvp *xvp = xvp_file->xvp;
  1962. struct xrp_dma_buf xrp_dma_buf;
  1963. struct dma_buf *dmabuf = NULL;
  1964. struct sg_table *sgt = NULL;
  1965. struct xrp_dma_buf_item *dma_buf_item=NULL;
  1966. struct xrp_dma_buf_item *temp=NULL;
  1967. struct dma_buf_attachment *attachment = NULL;
  1968. // struct xrp_allocation *xrp_allocation;
  1969. // struct xrp_private_pool *pool;
  1970. int npages = 0;
  1971. int i;
  1972. struct scatterlist *s;
  1973. unsigned int size = 0;
  1974. dev_dbg(xvp->dev,"%s: entry\n", __func__);
  1975. if (copy_from_user(&xrp_dma_buf, p, sizeof(*p)))
  1976. {
  1977. return -EFAULT;
  1978. }
  1979. dmabuf = dma_buf_get(xrp_dma_buf.fd);
  1980. if(!dmabuf)
  1981. {
  1982. return -EFAULT;
  1983. }
  1984. spin_lock(&xrp_dma_buf_lock);
  1985. list_for_each_entry(temp,&xvp->dma_buf_list, link)
  1986. {
  1987. if(temp->dmabuf == dmabuf)
  1988. {
  1989. dma_buf_item = temp;
  1990. dma_buf_item->ref++;
  1991. break;
  1992. }
  1993. }
  1994. spin_unlock(&xrp_dma_buf_lock);
  1995. if(dma_buf_item == NULL)
  1996. {
  1997. dev_dbg(xvp->dev,
  1998. "%s: no exit same dma buf\n", __func__);
  1999. attachment = dma_buf_attach(dmabuf, xvp->dev);
  2000. if (!attachment)
  2001. {
  2002. goto One_Err;
  2003. }
  2004. sgt = dma_buf_map_attachment(attachment, xrp_dma_direction(xrp_dma_buf.flags));
  2005. if (!sgt)
  2006. {
  2007. goto One_Err;
  2008. }
  2009. dma_buf_item = kzalloc(sizeof(*dma_buf_item),GFP_KERNEL);
  2010. if(dma_buf_item == NULL)
  2011. {
  2012. goto One_Err;
  2013. }
  2014. dma_buf_item->attachment = attachment;
  2015. dma_buf_item->dmabuf = dmabuf;
  2016. dma_buf_item->sgt = sgt;
  2017. dma_buf_item->ref = 1;
  2018. spin_lock(&xrp_dma_buf_lock);
  2019. list_add_tail(&dma_buf_item->link, &xvp->dma_buf_list);
  2020. spin_unlock(&xrp_dma_buf_lock);
  2021. }
  2022. else
  2023. {
  2024. dev_dbg(xvp->dev,
  2025. "%s: exit same dma buf\n", __func__);
  2026. attachment = dma_buf_item->attachment;
  2027. sgt = dma_buf_item->sgt;
  2028. spin_lock(&xrp_dma_buf_lock);
  2029. dma_buf_item->ref++;
  2030. spin_unlock(&xrp_dma_buf_lock);
  2031. }
  2032. if(sgt->nents != 1)
  2033. {
  2034. dev_dbg(xvp->dev,
  2035. "%s: sg table number (%d) is not 1, unspoort.\n",
  2036. __func__,sgt->nents);
  2037. goto Two_Err;
  2038. }
  2039. /* Prepare page array. */
  2040. /* Get number of pages. */
  2041. for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
  2042. {
  2043. npages += (sg_dma_len(s) + PAGE_SIZE - 1) / PAGE_SIZE;
  2044. size += sg_dma_len(s);
  2045. }
  2046. xrp_dma_buf.size = size;
  2047. #ifdef VIDMEM_DMA_MAP
  2048. xrp_dma_buf. = sg_dma_address(s) + j * PAGE_SIZE;
  2049. #else
  2050. // xrp_dma_buf.paddr = page_to_phys(nth_page(sg_page(s), 0));
  2051. xrp_dma_buf.paddr = sg_phys(sgt->sgl);
  2052. #endif
  2053. // dev_dbg(xvp->dev,
  2054. // "%s: import dma-buf phy addr:0x%lx,size:%d\n",
  2055. // __func__,xrp_dma_buf.paddr,xrp_dma_buf.size);
  2056. // xrp_allocation = kzalloc(sizeof(*xrp_allocation), GFP_KERNEL | __GFP_NORETRY);
  2057. // if(!xrp_allocation)
  2058. // {
  2059. // return -ENOMEM;
  2060. // }
  2061. // pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  2062. // if(!pool)
  2063. // {
  2064. // kfree(xrp_allocation);
  2065. // return -ENOMEM;
  2066. // }
  2067. // *pool = (struct xrp_private_pool){
  2068. // .pool = {
  2069. // .ops = &xrp_dma_buf_pool_ops,
  2070. // },
  2071. // .start = xrp_dma_buf.paddr ,
  2072. // .size = xrp_dma_buf.size,
  2073. // .free_list = NULL,
  2074. // };
  2075. // xrp_allocation->pool = pool;
  2076. // xrp_allocation->start = xrp_dma_buf.paddr;
  2077. // xrp_allocation->size = xrp_dma_buf.size;
  2078. // xrp_allocation_queue(xvp_file, xrp_allocation);
  2079. // xrp_dma_buf.addr = vm_mmap(filp, 0, xrp_allocation->size,
  2080. // PROT_READ | PROT_WRITE, MAP_SHARED,
  2081. // xrp_dam_buf_offset(xrp_allocation));
  2082. struct file *export_filp = fget(xrp_dma_buf.fd);
  2083. xrp_dma_buf.addr = vm_mmap(export_filp, 0, xrp_dma_buf.size,
  2084. PROT_READ | PROT_WRITE, MAP_SHARED,0);
  2085. fput(export_filp);
  2086. dev_dbg(xvp->dev,
  2087. "%s: import dma-buf phy addr:0x%lx,user addr:0x%lx,size:%d\n",
  2088. __func__,xrp_dma_buf.paddr,xrp_dma_buf.addr,xrp_dma_buf.size);
  2089. if (copy_to_user(p, &xrp_dma_buf, sizeof(*p))) {
  2090. dma_buf_put(dmabuf);
  2091. vm_munmap(xrp_dma_buf.addr , xrp_dma_buf.size);
  2092. goto Two_Err;
  2093. }
  2094. return 0;
  2095. Two_Err:
  2096. xrp_release_dma_buf_item(dma_buf_item);
  2097. One_Err:
  2098. dma_buf_put(dmabuf);
  2099. return -EINVAL;
  2100. }
  2101. static struct xrp_dma_buf_item * xrp_search_dma_buf( struct list_head *list,int fd)
  2102. {
  2103. struct xrp_dma_buf_item *loop;
  2104. struct xrp_dma_buf_item *dma_buf_item=NULL;
  2105. struct dma_buf *dmabuf = NULL;
  2106. // pr_debug("%s: fd %d,entry\n", __func__,fd);
  2107. dmabuf = dma_buf_get(fd);
  2108. spin_lock(&xrp_dma_buf_lock);
  2109. list_for_each_entry(loop,list, link)
  2110. {
  2111. if(loop->dmabuf == dmabuf)
  2112. {
  2113. dma_buf_item = loop;
  2114. break;
  2115. }
  2116. }
  2117. spin_unlock(&xrp_dma_buf_lock);
  2118. dma_buf_put(dmabuf);
  2119. pr_debug("%s: %p exit\n", __func__,fd,dma_buf_item);
  2120. return dma_buf_item;
  2121. }
  2122. static long xrp_ioctl_dma_buf_release(struct file *filp,
  2123. int __user *p)
  2124. {
  2125. int fd;
  2126. struct xvp_file *xvp_file = filp->private_data;
  2127. struct xvp *xvp = xvp_file->xvp;
  2128. struct dma_buf *dmabuf = NULL;
  2129. struct xrp_dma_buf_item *dma_buf_item=NULL;
  2130. struct xrp_dma_buf_item *loop,*temp;
  2131. if (copy_from_user(&fd, p, sizeof(*p)))
  2132. {
  2133. return -EFAULT;
  2134. }
  2135. // dmabuf = dma_buf_get(fd);
  2136. // spin_lock(&xrp_dma_buf_lock);
  2137. // list_for_each_entry_safe(loop, temp, &xvp->dma_buf_list, link)
  2138. // {
  2139. // if(loop->dmabuf == dmabuf)
  2140. // {
  2141. // dma_buf_item = loop;
  2142. // if((--dma_buf_item->ref)==0)
  2143. // list_del(&dma_buf_item);
  2144. // break;
  2145. // }
  2146. // }
  2147. // spin_unlock(&xrp_dma_buf_lock);
  2148. // dma_buf_put(dmabuf);
  2149. dma_buf_item = xrp_search_dma_buf(&xvp->dma_buf_list,fd);
  2150. if(dma_buf_item == NULL)
  2151. {
  2152. return -EFAULT;
  2153. }
  2154. dma_buf_unmap_attachment(dma_buf_item->attachment, dma_buf_item->sgt, DMA_BIDIRECTIONAL);
  2155. dma_buf_detach(dma_buf_item->dmabuf, dma_buf_item->attachment);
  2156. dma_buf_put(dma_buf_item->dmabuf);
  2157. xrp_release_dma_buf_item(dma_buf_item);
  2158. return 0;
  2159. }
  2160. static long xrp_ioctl_dma_buf_sync(struct file *filp,
  2161. struct xrp_dma_buf __user *p)
  2162. {
  2163. struct xvp_file *xvp_file = filp->private_data;
  2164. struct xvp *xvp = xvp_file->xvp;
  2165. struct xrp_dma_buf xrp_dma_buf;
  2166. struct xrp_dma_buf_item *dma_buf_item=NULL;
  2167. if (copy_from_user(&xrp_dma_buf, p, sizeof(*p)))
  2168. {
  2169. return -EFAULT;
  2170. }
  2171. dma_buf_item = xrp_search_dma_buf(&xvp->dma_buf_list,xrp_dma_buf.fd);
  2172. if(dma_buf_item == NULL)
  2173. {
  2174. return -EFAULT;
  2175. }
  2176. switch(xrp_dma_buf.flags)
  2177. {
  2178. case XRP_FLAG_READ:
  2179. dma_sync_single_for_cpu(xvp->dev, phys_to_dma(xvp->dev, xrp_dma_buf.paddr), xrp_dma_buf.size,
  2180. xrp_dma_direction(xrp_dma_buf.flags));
  2181. break;
  2182. case XRP_FLAG_WRITE:
  2183. dma_sync_single_for_device(xvp->dev, phys_to_dma(xvp->dev, xrp_dma_buf.paddr), xrp_dma_buf.size,
  2184. xrp_dma_direction(xrp_dma_buf.flags));
  2185. break;
  2186. case XRP_FLAG_READ_WRITE:
  2187. dma_sync_single_for_cpu(xvp->dev, phys_to_dma(xvp->dev, xrp_dma_buf.paddr), xrp_dma_buf.size,
  2188. xrp_dma_direction(xrp_dma_buf.flags));
  2189. dma_sync_single_for_device(xvp->dev, phys_to_dma(xvp->dev, xrp_dma_buf.paddr),xrp_dma_buf.size,
  2190. xrp_dma_direction(xrp_dma_buf.flags));
  2191. break;
  2192. default:
  2193. dev_dbg(xvp->dev,"%s: invalid type%x\n", __func__, xrp_dma_buf.flags);
  2194. return -EFAULT;
  2195. }
  2196. return 0;
  2197. }
  2198. static long xvp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  2199. {
  2200. long retval;
  2201. pr_debug("%s: %x\n", __func__, cmd);
  2202. switch(cmd){
  2203. case XRP_IOCTL_ALLOC:
  2204. retval = xrp_ioctl_alloc(filp,
  2205. (struct xrp_ioctl_alloc __user *)arg);
  2206. break;
  2207. case XRP_IOCTL_FREE:
  2208. retval = xrp_ioctl_free(filp,
  2209. (struct xrp_ioctl_alloc __user *)arg);
  2210. break;
  2211. case XRP_IOCTL_QUEUE:
  2212. case XRP_IOCTL_QUEUE_NS:
  2213. retval = xrp_ioctl_submit_sync(filp,
  2214. (struct xrp_ioctl_queue __user *)arg);
  2215. break;
  2216. case XRP_IOCTL_REPORT_CREATE:
  2217. retval = xrp_ioctl_alloc_report(filp,
  2218. (struct xrp_ioctl_alloc __user *)arg);
  2219. break;
  2220. case XRP_IOCTL_REPORT_RELEASE:
  2221. retval = xrp_ioctl_release_report(filp,
  2222. (struct xrp_ioctl_alloc __user *)arg);
  2223. break;
  2224. case XRP_IOCTL_DMABUF_IMPORT:
  2225. retval = xrp_ioctl_dma_buf_import(filp,
  2226. (struct xrp_dma_buf __user *)arg);
  2227. break;
  2228. case XRP_IOCTL_DMABUF_RELEASE:
  2229. retval = xrp_ioctl_dma_buf_release(filp,
  2230. (int __user *)arg);
  2231. break;
  2232. case XRP_IOCTL_DMABUF_SYNC:
  2233. retval = xrp_ioctl_dma_buf_sync(filp,
  2234. (struct xrp_dma_buf __user *)arg);
  2235. break;
  2236. default:
  2237. retval = -EINVAL;
  2238. break;
  2239. }
  2240. return retval;
  2241. }
  2242. static void xvp_vm_open(struct vm_area_struct *vma)
  2243. {
  2244. // pr_debug("%s\n", __func__);
  2245. xrp_allocation_get(vma->vm_private_data);
  2246. }
  2247. static void xvp_vm_close(struct vm_area_struct *vma)
  2248. {
  2249. // pr_debug("%s\n", __func__);
  2250. xrp_allocation_put(vma->vm_private_data);
  2251. }
  2252. static const struct vm_operations_struct xvp_vm_ops = {
  2253. .open = xvp_vm_open,
  2254. .close = xvp_vm_close,
  2255. };
  2256. static int xvp_mmap(struct file *filp, struct vm_area_struct *vma)
  2257. {
  2258. int err;
  2259. struct xvp_file *xvp_file = filp->private_data;
  2260. unsigned long pfn = vma->vm_pgoff;// + PFN_DOWN(xvp_file->xvp->pmem);
  2261. struct xrp_allocation *xrp_allocation;
  2262. xrp_allocation = xrp_allocation_dequeue(filp->private_data,
  2263. pfn << PAGE_SHIFT,
  2264. vma->vm_end - vma->vm_start);
  2265. if (xrp_allocation) {
  2266. struct xvp *xvp = xvp_file->xvp;
  2267. pgprot_t prot = vma->vm_page_prot;
  2268. if (!xrp_cacheable(xvp, pfn,
  2269. PFN_DOWN(vma->vm_end - vma->vm_start))) {
  2270. prot = pgprot_writecombine(prot);
  2271. // prot = pgprot_noncached(prot);
  2272. vma->vm_page_prot = prot;
  2273. dev_dbg(xvp->dev,"%s cache atribution set \n", __func__);
  2274. }
  2275. err = remap_pfn_range(vma, vma->vm_start, pfn,
  2276. vma->vm_end - vma->vm_start,
  2277. prot);
  2278. vma->vm_private_data = xrp_allocation;
  2279. vma->vm_ops = &xvp_vm_ops;
  2280. } else {
  2281. pr_err("%s no valid xrp allocate for %lx:\n", __func__,pfn);
  2282. err = -EINVAL;
  2283. }
  2284. return err;
  2285. }
  2286. static int xvp_open(struct inode *inode, struct file *filp)
  2287. {
  2288. struct xvp *xvp = container_of(filp->private_data,
  2289. struct xvp, miscdev);
  2290. struct xvp_file *xvp_file;
  2291. int rc;
  2292. dev_dbg(xvp->dev,"%s\n", __func__);
  2293. rc = pm_runtime_get_sync(xvp->dev);
  2294. if (rc < 0)
  2295. {
  2296. dev_err(xvp->dev,"%s:pm_runtime_get_sync fail:%d\n", __func__,rc);
  2297. return rc;
  2298. }
  2299. xvp_file = devm_kzalloc(xvp->dev, sizeof(*xvp_file), GFP_KERNEL);
  2300. if (!xvp_file) {
  2301. dev_err(xvp->dev,"%s:malloc fail\n", __func__);
  2302. pm_runtime_put_sync(xvp->dev);
  2303. return -ENOMEM;
  2304. }
  2305. xvp_file->xvp = xvp;
  2306. spin_lock_init(&xvp_file->busy_list_lock);
  2307. filp->private_data = xvp_file;
  2308. xrp_add_known_file(filp);
  2309. return 0;
  2310. }
  2311. static int xvp_close(struct inode *inode, struct file *filp)
  2312. {
  2313. struct xvp_file *xvp_file = filp->private_data;
  2314. pr_debug("%s\n", __func__);
  2315. xrp_report_fasync_release(filp);
  2316. xrp_remove_known_file(filp);
  2317. pm_runtime_put_sync(xvp_file->xvp->dev);
  2318. devm_kfree(xvp_file->xvp->dev, xvp_file);
  2319. return 0;
  2320. }
  2321. static inline int xvp_enable_dsp(struct xvp *xvp)
  2322. {
  2323. if (loopback < LOOPBACK_NOMMIO &&
  2324. xvp->hw_ops->enable)
  2325. return xvp->hw_ops->enable(xvp->hw_arg);
  2326. else
  2327. return 0;
  2328. }
  2329. static inline void xvp_disable_dsp(struct xvp *xvp)
  2330. {
  2331. if (loopback < LOOPBACK_NOMMIO &&
  2332. xvp->hw_ops->disable)
  2333. xvp->hw_ops->disable(xvp->hw_arg);
  2334. }
  2335. static inline void xvp_remove_proc(struct xvp *xvp)
  2336. {
  2337. if( xvp->proc_dir)
  2338. {
  2339. if(xvp->panic_log)
  2340. {
  2341. xrp_remove_panic_log_proc(xvp->panic_log);
  2342. xvp->panic_log =NULL;
  2343. }
  2344. // remove_proc_entry(xvp->proc_dir,NULL);
  2345. proc_remove(xvp->proc_dir);
  2346. }
  2347. }
  2348. static inline void xrp_set_resetVec(struct xvp *xvp,u32 addr)
  2349. {
  2350. if (loopback < LOOPBACK_NOMMIO &&
  2351. xvp->hw_ops->set_reset_vector)
  2352. xvp->hw_ops->set_reset_vector(xvp->hw_arg,addr);
  2353. }
  2354. static inline void xrp_reset_dsp(struct xvp *xvp)
  2355. {
  2356. if (loopback < LOOPBACK_NOMMIO &&
  2357. xvp->hw_ops->reset)
  2358. xvp->hw_ops->reset(xvp->hw_arg);
  2359. }
  2360. static inline void xrp_halt_dsp(struct xvp *xvp)
  2361. {
  2362. if (loopback < LOOPBACK_NOMMIO &&
  2363. xvp->hw_ops->halt)
  2364. xvp->hw_ops->halt(xvp->hw_arg);
  2365. }
  2366. static inline void xrp_release_dsp(struct xvp *xvp)
  2367. {
  2368. if (loopback < LOOPBACK_NOMMIO &&
  2369. xvp->hw_ops->release)
  2370. xvp->hw_ops->release(xvp->hw_arg);
  2371. }
  2372. static int xrp_boot_firmware(struct xvp *xvp)
  2373. {
  2374. int ret;
  2375. u32 fm_entry_point=0;
  2376. struct xrp_dsp_sync_v1 __iomem *shared_sync = xvp->comm;
  2377. // dev_dbg(xvp->dev,"%s",__func__);
  2378. //#if 1 //LOAD_MODE_MANUAL load release dsp by xplorer
  2379. if(load_mode == LOAD_MODE_AUTO)
  2380. {
  2381. xrp_halt_dsp(xvp);
  2382. //xrp_reset_dsp(xvp);
  2383. if (xvp->firmware_name) {
  2384. if (loopback < LOOPBACK_NOFIRMWARE) {
  2385. ret = xrp_request_firmware(xvp,&fm_entry_point);
  2386. if (ret < 0)
  2387. return ret;
  2388. }
  2389. if (loopback < LOOPBACK_NOIO) {
  2390. xrp_comm_write32(&shared_sync->sync, XRP_DSP_SYNC_IDLE);
  2391. mb();
  2392. }
  2393. // fm_entry_point = xrp_get_firmware_entry_addr(xvp);
  2394. dev_dbg(xvp->dev,"%s,firmware entry point :%x\n",__func__,fm_entry_point);
  2395. if(fm_entry_point)
  2396. {
  2397. xrp_set_resetVec(xvp,fm_entry_point);
  2398. }
  2399. }
  2400. xrp_reset_dsp(xvp);
  2401. }
  2402. xrp_release_dsp(xvp);
  2403. //#endif
  2404. if (loopback < LOOPBACK_NOIO) {
  2405. ret = xrp_synchronize(xvp);
  2406. if (ret < 0) {
  2407. xrp_halt_dsp(xvp);
  2408. dev_err(xvp->dev,
  2409. "%s: couldn't synchronize with the DSP core\n",
  2410. __func__);
  2411. dev_err(xvp->dev,
  2412. "XRP device will not use the DSP until the driver is rebound to this device\n");
  2413. xvp->off = true;
  2414. return ret;
  2415. }
  2416. }
  2417. return 0;
  2418. }
  2419. static const struct file_operations xvp_fops = {
  2420. .owner = THIS_MODULE,
  2421. .llseek = no_llseek,
  2422. .unlocked_ioctl = xvp_ioctl,
  2423. #ifdef CONFIG_COMPAT
  2424. .compat_ioctl = xvp_ioctl,
  2425. #endif
  2426. .mmap = xvp_mmap,
  2427. .open = xvp_open,
  2428. .fasync = xrp_report_fasync,
  2429. .release = xvp_close,
  2430. };
  2431. int xrp_runtime_suspend(struct device *dev)
  2432. {
  2433. struct xvp *xvp = dev_get_drvdata(dev);
  2434. xrp_halt_dsp(xvp);
  2435. xrp_reset_dsp(xvp);
  2436. xvp_disable_dsp(xvp);
  2437. // release_firmware(xvp->firmware);
  2438. return 0;
  2439. }
  2440. EXPORT_SYMBOL(xrp_runtime_suspend);
  2441. int xrp_runtime_resume(struct device *dev)
  2442. {
  2443. struct xvp *xvp = dev_get_drvdata(dev);
  2444. unsigned i;
  2445. int ret = 0;
  2446. for (i = 0; i < xvp->n_queues; ++i)
  2447. mutex_lock(&xvp->queue[i].lock);
  2448. if (xvp->off)
  2449. goto out;
  2450. ret = xvp_enable_dsp(xvp);
  2451. if (ret < 0) {
  2452. dev_err(xvp->dev, "couldn't enable DSP\n");
  2453. goto out;
  2454. }
  2455. ret = xrp_boot_firmware(xvp);
  2456. if (ret < 0)
  2457. xvp_disable_dsp(xvp);
  2458. out:
  2459. for (i = 0; i < xvp->n_queues; ++i)
  2460. mutex_unlock(&xvp->queue[i].lock);
  2461. return ret;
  2462. }
  2463. EXPORT_SYMBOL(xrp_runtime_resume);
  2464. static int xrp_init_regs_v0(struct platform_device *pdev, struct xvp *xvp,int mem_idx)
  2465. {
  2466. struct resource res;
  2467. struct device_node *np;
  2468. int ret = 0;
  2469. np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
  2470. if (!np) {
  2471. dev_err(&pdev->dev, "No memory-region specified\n");
  2472. return -EINVAL;
  2473. }
  2474. ret = of_address_to_resource(np, 0, &res);
  2475. dev_dbg(xvp->dev,"%s:dsp runing addr 0x%llx,size:0x%x\n", __func__,
  2476. res.start,resource_size(&res));
  2477. ret = of_address_to_resource(np, 1, &res);
  2478. if (ret)
  2479. {
  2480. dev_dbg(xvp->dev,"%s:get comm region fail\n", __func__);
  2481. return -ENODEV;
  2482. }
  2483. xvp->comm_phys = res.start;
  2484. xvp->comm = devm_ioremap_resource(&pdev->dev, &res);
  2485. dev_dbg(xvp->dev,"%s:xvp->comm =0x%p, phy_addr base=0x%llx\n", __func__,
  2486. xvp->comm, xvp->comm_phys);
  2487. // mem = platform_get_resource(pdev, IORESOURCE_MEM, mem_idx);
  2488. ret = of_address_to_resource(np, 2, &res);
  2489. if(ret)
  2490. {
  2491. dev_dbg(xvp->dev,"%s:get paic region fail:%d\n", __func__,ret);
  2492. }else
  2493. {
  2494. xvp->panic_phy = res.start;
  2495. xvp->panic = devm_ioremap_resource(&pdev->dev, &res);
  2496. xvp->panic_size = resource_size(&res);
  2497. if(xvp->panic)
  2498. {
  2499. dev_dbg(xvp->dev,"%s:panic=0x%p, panic phy base=0x%llx,size:%d\n", __func__,
  2500. xvp->panic, xvp->panic_phy,xvp->panic_size);
  2501. }else
  2502. {
  2503. dev_warn(xvp->dev,"%s:get paic region fail\n", __func__);
  2504. }
  2505. }
  2506. ret = of_address_to_resource(np, 3, &res);
  2507. if (ret)
  2508. {
  2509. dev_dbg(xvp->dev,"%s:get memory pool region fail\n", __func__);
  2510. return -ENODEV;
  2511. }
  2512. xvp->pmem = res.start;
  2513. xvp->shared_size = resource_size(&res);
  2514. dev_dbg(xvp->dev,"%s,memory pool phy_addr base=0x%llx,size:0x%x\n", __func__,
  2515. xvp->pmem, xvp->shared_size);
  2516. return xrp_init_private_pool(&xvp->pool, xvp->pmem,
  2517. xvp->shared_size);
  2518. }
  2519. static int xrp_init_regs_v1(struct platform_device *pdev, struct xvp *xvp,int mem_idx)
  2520. {
  2521. struct resource *mem;
  2522. struct resource r;
  2523. mem = platform_get_resource(pdev, IORESOURCE_MEM, mem_idx);
  2524. if (!mem)
  2525. return -ENODEV;
  2526. if (resource_size(mem) < 2 * PAGE_SIZE) {
  2527. dev_err(xvp->dev,
  2528. "%s: shared memory size is too small\n",
  2529. __func__);
  2530. return -ENOMEM;
  2531. }
  2532. xvp->comm_phys = mem->start;
  2533. xvp->pmem = mem->start + PAGE_SIZE;
  2534. xvp->shared_size = resource_size(mem) - PAGE_SIZE;
  2535. r = *mem;
  2536. r.end = r.start + PAGE_SIZE;
  2537. xvp->comm = devm_ioremap_resource(&pdev->dev, &r);
  2538. return xrp_init_private_pool(&xvp->pool, xvp->pmem,
  2539. xvp->shared_size);
  2540. }
  2541. static bool xrp_translate_base_mimo_to_dsp(struct xvp *xvp)
  2542. {
  2543. if(!xvp->hw_ops->get_base_mimo || !xvp->hw_ops->get_hw_sync_data )
  2544. {
  2545. return true;
  2546. }
  2547. phys_addr_t mimo_addr = xvp->hw_ops->get_base_mimo(xvp->hw_arg);
  2548. u32 device_mimo_addr = xrp_translate_to_dsp(&xvp->address_map, mimo_addr);
  2549. if(device_mimo_addr==XRP_NO_TRANSLATION)
  2550. {
  2551. dev_err(xvp->dev,
  2552. "%s: 0x%x translate to dsp address fail\n",
  2553. __func__,mimo_addr);
  2554. return false;
  2555. }
  2556. xvp->hw_ops->update_device_base(xvp->hw_arg,device_mimo_addr);
  2557. dev_dbg(xvp->dev,
  2558. "%s: Base mimo translate to dsp address \n",__func__);
  2559. return true;
  2560. }
  2561. static int xrp_init_regs_cma(struct platform_device *pdev, struct xvp *xvp,int mem_idx)
  2562. {
  2563. dma_addr_t comm_phys;
  2564. if (of_reserved_mem_device_init(xvp->dev) < 0)
  2565. return -ENODEV;
  2566. xvp->comm = dma_alloc_attrs(xvp->dev, PAGE_SIZE, &comm_phys,
  2567. GFP_KERNEL, 0);
  2568. if (!xvp->comm)
  2569. return -ENOMEM;
  2570. xvp->comm_phys = dma_to_phys(xvp->dev, comm_phys);
  2571. return xrp_init_cma_pool(&xvp->pool, xvp->dev);
  2572. }
  2573. static int compare_queue_priority(const void *a, const void *b)
  2574. {
  2575. const void * const *ppa = a;
  2576. const void * const *ppb = b;
  2577. const struct xrp_comm *pa = *ppa, *pb = *ppb;
  2578. if (pa->priority == pb->priority)
  2579. return 0;
  2580. else
  2581. return pa->priority < pb->priority ? -1 : 1;
  2582. }
  2583. static long xrp_init_common(struct platform_device *pdev,
  2584. enum xrp_init_flags init_flags,
  2585. const struct xrp_hw_ops *hw_ops, void *hw_arg,
  2586. int mem_idx,
  2587. int (*xrp_init_regs)(struct platform_device *pdev,
  2588. struct xvp *xvp,int mem_idx))
  2589. {
  2590. long ret;
  2591. char nodename[sizeof("xvp") + 3 * sizeof(int)];
  2592. struct xvp *xvp;
  2593. int nodeid;
  2594. unsigned i;
  2595. u32 value;
  2596. char dir_name[32];
  2597. xvp = devm_kzalloc(&pdev->dev, sizeof(*xvp), GFP_KERNEL);
  2598. if (!xvp) {
  2599. ret = -ENOMEM;
  2600. goto err;
  2601. }
  2602. xvp->reporter = NULL;
  2603. xvp->dev = &pdev->dev;
  2604. xvp->hw_ops = hw_ops;
  2605. xvp->hw_arg = hw_arg;
  2606. if (init_flags & XRP_INIT_USE_HOST_IRQ)
  2607. xvp->host_irq_mode = true;
  2608. platform_set_drvdata(pdev, xvp);
  2609. ret = xrp_init_regs(pdev, xvp,mem_idx);
  2610. if (ret < 0)
  2611. goto err;
  2612. dev_dbg(xvp->dev,"%s: comm = %pap/%p\n", __func__, &xvp->comm_phys, xvp->comm);
  2613. dev_dbg(xvp->dev,"%s: xvp->pmem = %pap\n", __func__, &xvp->pmem);
  2614. // writel(0xdeadbeef,xvp->comm+0x4);
  2615. // value = readl(xvp->comm+0x4);
  2616. // pr_debug("offset=04, value is:0x%08x\n",value);
  2617. ret = xrp_init_address_map(xvp->dev, &xvp->address_map);
  2618. if (ret < 0)
  2619. goto err_free_pool;
  2620. if(false ==xrp_translate_base_mimo_to_dsp(xvp))
  2621. {
  2622. goto err_free_map;
  2623. }
  2624. ret = device_property_read_u32_array(xvp->dev, "queue-priority",
  2625. NULL, 0);
  2626. if (ret > 0) {
  2627. xvp->n_queues = ret;
  2628. xvp->queue_priority = devm_kmalloc(&pdev->dev,
  2629. ret * sizeof(u32),
  2630. GFP_KERNEL);
  2631. if (xvp->queue_priority == NULL)
  2632. goto err_free_pool;
  2633. ret = device_property_read_u32_array(xvp->dev,
  2634. "queue-priority",
  2635. xvp->queue_priority,
  2636. xvp->n_queues);
  2637. if (ret < 0)
  2638. goto err_free_pool;
  2639. dev_dbg(xvp->dev,
  2640. "multiqueue (%d) configuration, queue priorities:\n",
  2641. xvp->n_queues);
  2642. for (i = 0; i < xvp->n_queues; ++i)
  2643. dev_dbg(xvp->dev, " %d\n", xvp->queue_priority[i]);
  2644. } else {
  2645. xvp->n_queues = 1;
  2646. }
  2647. xvp->queue = devm_kmalloc(&pdev->dev,
  2648. xvp->n_queues * sizeof(*xvp->queue),
  2649. GFP_KERNEL);
  2650. xvp->queue_ordered = devm_kmalloc(&pdev->dev,
  2651. xvp->n_queues * sizeof(*xvp->queue_ordered),
  2652. GFP_KERNEL);
  2653. if (xvp->queue == NULL ||
  2654. xvp->queue_ordered == NULL)
  2655. goto err_free_pool;
  2656. for (i = 0; i < xvp->n_queues; ++i) {
  2657. mutex_init(&xvp->queue[i].lock);
  2658. xvp->queue[i].comm = xvp->comm + XRP_DSP_CMD_STRIDE * i;
  2659. init_completion(&xvp->queue[i].completion);
  2660. if (xvp->queue_priority)
  2661. xvp->queue[i].priority = xvp->queue_priority[i];
  2662. xvp->queue_ordered[i] = xvp->queue + i;
  2663. }
  2664. sort(xvp->queue_ordered, xvp->n_queues, sizeof(*xvp->queue_ordered),
  2665. compare_queue_priority, NULL);
  2666. if (xvp->n_queues > 1) {
  2667. dev_dbg(xvp->dev, "SW -> HW queue priority mapping:\n");
  2668. for (i = 0; i < xvp->n_queues; ++i) {
  2669. dev_dbg(xvp->dev, " %d -> %d\n",
  2670. i, xvp->queue_ordered[i]->priority);
  2671. }
  2672. }
  2673. ret = device_property_read_string(xvp->dev, "firmware-name",
  2674. &xvp->firmware_name);
  2675. if (ret == -EINVAL || ret == -ENODATA) {
  2676. dev_dbg(xvp->dev,
  2677. "no firmware-name property, not loading firmware\n");
  2678. } else if (ret < 0) {
  2679. dev_err(xvp->dev, "invalid firmware name (%ld)\n", ret);
  2680. goto err_free_map;
  2681. }
  2682. nodeid = ida_simple_get(&xvp_nodeid, 0, 0, GFP_KERNEL);
  2683. if (nodeid < 0) {
  2684. ret = nodeid;
  2685. goto err_free_map;
  2686. }
  2687. sprintf(dir_name,"dsp%d_proc",nodeid);
  2688. xvp->proc_dir = proc_mkdir(dir_name, NULL);
  2689. if (NULL != xvp->proc_dir)
  2690. {
  2691. xvp->panic_log = xrp_create_panic_log_proc(xvp->proc_dir,xvp->panic,xvp->panic_size);
  2692. }
  2693. else
  2694. {
  2695. dev_err(xvp->dev, "create %s fail\n", dir_name);
  2696. goto err_free_id;
  2697. }
  2698. pm_runtime_enable(xvp->dev);
  2699. if (!pm_runtime_enabled(xvp->dev)) {
  2700. ret = xrp_runtime_resume(xvp->dev);
  2701. if (ret)
  2702. goto err_pm_disable;
  2703. }else
  2704. {
  2705. ret = xrp_runtime_resume(xvp->dev);
  2706. if (ret)
  2707. goto err_proc_remove;
  2708. // xvp_enable_dsp(xvp);
  2709. xrp_runtime_suspend(xvp->dev);
  2710. }
  2711. xvp->nodeid = nodeid;
  2712. sprintf(nodename, "xvp%u", nodeid);
  2713. xvp->miscdev = (struct miscdevice){
  2714. .minor = MISC_DYNAMIC_MINOR,
  2715. .name = devm_kstrdup(&pdev->dev, nodename, GFP_KERNEL),
  2716. .nodename = devm_kstrdup(&pdev->dev, nodename, GFP_KERNEL),
  2717. .fops = &xvp_fops,
  2718. };
  2719. ret = misc_register(&xvp->miscdev);
  2720. if (ret < 0)
  2721. goto err_pm_disable;
  2722. // xrp_device_heartbeat_init(xvp);
  2723. INIT_LIST_HEAD(&xvp->dma_buf_list);
  2724. return PTR_ERR(xvp);
  2725. err_pm_disable:
  2726. pm_runtime_disable(xvp->dev);
  2727. err_proc_remove:
  2728. xvp_remove_proc(xvp);
  2729. err_free_id:
  2730. ida_simple_remove(&xvp_nodeid, nodeid);
  2731. err_free_map:
  2732. xrp_free_address_map(&xvp->address_map);
  2733. err_free_pool:
  2734. xrp_free_pool(xvp->pool);
  2735. if (xvp->comm_phys && !xvp->pmem) {
  2736. dma_free_attrs(xvp->dev, PAGE_SIZE, xvp->comm,
  2737. phys_to_dma(xvp->dev, xvp->comm_phys), 0);
  2738. }
  2739. err:
  2740. dev_err(&pdev->dev, "%s: ret = %ld\n", __func__, ret);
  2741. return ret;
  2742. }
  2743. typedef long xrp_init_function(struct platform_device *pdev,
  2744. enum xrp_init_flags flags,
  2745. const struct xrp_hw_ops *hw_ops, void *hw_arg,int mem_idx);
  2746. xrp_init_function xrp_init;
  2747. long xrp_init(struct platform_device *pdev, enum xrp_init_flags flags,
  2748. const struct xrp_hw_ops *hw_ops, void *hw_arg,int mem_idx)
  2749. {
  2750. return xrp_init_common(pdev, flags, hw_ops, hw_arg, mem_idx,xrp_init_regs_v0);
  2751. }
  2752. EXPORT_SYMBOL(xrp_init);
  2753. xrp_init_function xrp_init_v1;
  2754. long xrp_init_v1(struct platform_device *pdev, enum xrp_init_flags flags,
  2755. const struct xrp_hw_ops *hw_ops, void *hw_arg,int mem_idx)
  2756. {
  2757. return xrp_init_common(pdev, flags, hw_ops, hw_arg, mem_idx,xrp_init_regs_v1);
  2758. }
  2759. EXPORT_SYMBOL(xrp_init_v1);
  2760. xrp_init_function xrp_init_cma;
  2761. long xrp_init_cma(struct platform_device *pdev, enum xrp_init_flags flags,
  2762. const struct xrp_hw_ops *hw_ops, void *hw_arg,int mem_idx)
  2763. {
  2764. return xrp_init_common(pdev, flags, hw_ops, hw_arg, mem_idx,xrp_init_regs_cma);
  2765. }
  2766. EXPORT_SYMBOL(xrp_init_cma);
  2767. int xrp_deinit(struct platform_device *pdev)
  2768. {
  2769. struct xvp *xvp = platform_get_drvdata(pdev);
  2770. pm_runtime_disable(xvp->dev);
  2771. if (!pm_runtime_status_suspended(xvp->dev))
  2772. xrp_runtime_suspend(xvp->dev);
  2773. // xvp_clear_dsp(xvp);
  2774. xvp_remove_proc(xvp);
  2775. dev_dbg(xvp->dev,"%s:phase 1\n",__func__);
  2776. misc_deregister(&xvp->miscdev);
  2777. dev_dbg(xvp->dev,"%s:phase 2\n",__func__);
  2778. // release_firmware(xvp->firmware);
  2779. // dev_dbg(xvp->dev,"%s:phase 3\n",__func__);
  2780. xrp_free_pool(xvp->pool);
  2781. if (xvp->comm_phys && !xvp->pmem) {
  2782. dma_free_attrs(xvp->dev, PAGE_SIZE, xvp->comm,
  2783. phys_to_dma(xvp->dev, xvp->comm_phys), 0);
  2784. }
  2785. dev_dbg(xvp->dev,"%s:phase 3\n",__func__);
  2786. xrp_free_address_map(&xvp->address_map);
  2787. dev_dbg(xvp->dev,"%s:phase 4\n",__func__);
  2788. if(!ida_is_empty(&xvp_nodeid))
  2789. {
  2790. ida_simple_remove(&xvp_nodeid, xvp->nodeid);
  2791. dev_dbg(xvp->dev,"%s:phase 5\n",__func__);
  2792. }
  2793. return 0;
  2794. }
  2795. EXPORT_SYMBOL(xrp_deinit);
  2796. int xrp_deinit_hw(struct platform_device *pdev, void **hw_arg)
  2797. {
  2798. if (hw_arg) {
  2799. struct xvp *xvp = platform_get_drvdata(pdev);
  2800. *hw_arg = xvp->hw_arg;
  2801. }
  2802. return xrp_deinit(pdev);
  2803. }
  2804. EXPORT_SYMBOL(xrp_deinit_hw);
  2805. static void *get_hw_sync_data(void *hw_arg, size_t *sz)
  2806. {
  2807. void *p = kzalloc(64, GFP_KERNEL);
  2808. *sz = 64;
  2809. return p;
  2810. }
  2811. static const struct xrp_hw_ops hw_ops = {
  2812. .get_hw_sync_data = get_hw_sync_data,
  2813. };
  2814. #ifdef CONFIG_OF
  2815. static const struct of_device_id xrp_of_match[] = {
  2816. {
  2817. .compatible = "cdns,xrp",
  2818. .data = xrp_init,
  2819. }, {
  2820. .compatible = "cdns,xrp,v1",
  2821. .data = xrp_init_v1,
  2822. }, {
  2823. .compatible = "cdns,xrp,cma",
  2824. .data = xrp_init_cma,
  2825. }, {},
  2826. };
  2827. MODULE_DEVICE_TABLE(of, xrp_of_match);
  2828. #endif
  2829. #ifdef CONFIG_ACPI
  2830. static const struct acpi_device_id xrp_acpi_match[] = {
  2831. { "CXRP0001", 0 },
  2832. { },
  2833. };
  2834. MODULE_DEVICE_TABLE(acpi, xrp_acpi_match);
  2835. #endif
  2836. static int xrp_probe(struct platform_device *pdev)
  2837. {
  2838. long ret = -EINVAL;
  2839. #ifdef CONFIG_OF
  2840. const struct of_device_id *match;
  2841. match = of_match_device(xrp_of_match, &pdev->dev);
  2842. if (match) {
  2843. xrp_init_function *init = match->data;
  2844. ret = init(pdev, 0, &hw_ops, NULL,0);
  2845. return IS_ERR_VALUE(ret) ? ret : 0;
  2846. } else {
  2847. pr_debug("%s: no OF device match found\n", __func__);
  2848. }
  2849. #endif
  2850. #ifdef CONFIG_ACPI
  2851. ret = xrp_init_v1(pdev, 0, &hw_ops, NULL,2);
  2852. if (!IS_ERR_VALUE(ret)) {
  2853. struct xrp_address_map_entry *entry;
  2854. struct xvp *xvp = ERR_PTR(ret);
  2855. ret = 0;
  2856. /*
  2857. * On ACPI system DSP can currently only access
  2858. * its own shared memory.
  2859. */
  2860. entry = xrp_get_address_mapping(&xvp->address_map,
  2861. xvp->comm_phys);
  2862. if (entry) {
  2863. entry->src_addr = xvp->comm_phys;
  2864. entry->dst_addr = (u32)xvp->comm_phys;
  2865. entry->size = (u32)xvp->shared_size + PAGE_SIZE;
  2866. } else {
  2867. dev_err(xvp->dev,
  2868. "%s: couldn't find mapping for shared memory\n",
  2869. __func__);
  2870. ret = -EINVAL;
  2871. }
  2872. }
  2873. #endif
  2874. return ret;
  2875. }
  2876. static int xrp_remove(struct platform_device *pdev)
  2877. {
  2878. return xrp_deinit(pdev);
  2879. }
  2880. static const struct dev_pm_ops xrp_pm_ops = {
  2881. SET_RUNTIME_PM_OPS(xrp_runtime_suspend,
  2882. xrp_runtime_resume, NULL)
  2883. };
  2884. static struct platform_driver xrp_driver = {
  2885. .probe = xrp_probe,
  2886. .remove = xrp_remove,
  2887. .driver = {
  2888. .name = DRIVER_NAME,
  2889. .of_match_table = of_match_ptr(xrp_of_match),
  2890. .acpi_match_table = ACPI_PTR(xrp_acpi_match),
  2891. .pm = &xrp_pm_ops,
  2892. },
  2893. };
  2894. module_platform_driver(xrp_driver);
  2895. MODULE_AUTHOR("T-HEAD");
  2896. MODULE_DESCRIPTION("XRP: Linux device driver for Xtensa Remote Processing");
  2897. MODULE_LICENSE("Dual MIT/GPL");