vha_common.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606
  1. /*
  2. *****************************************************************************
  3. * Copyright (c) Imagination Technologies Ltd.
  4. *
  5. * The contents of this file are subject to the MIT license as set out below.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. *
  25. * Alternatively, the contents of this file may be used under the terms of the
  26. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  27. * GPL are applicable instead of those above.
  28. *
  29. * If you wish to allow use of your version of this file only under the terms
  30. * of GPL, and not to allow others to use your version of this file under the
  31. * terms of the MIT license, indicate your decision by deleting the provisions
  32. * above and replace them with the notice and other provisions required by GPL
  33. * as set out in the file called "GPLHEADER" included in this distribution. If
  34. * you do not delete the provisions above, a recipient may use your version of
  35. * this file under the terms of either the MIT license or GPL.
  36. *
  37. * This License is also included in this distribution in the file called
  38. * "MIT_COPYING".
  39. *
  40. *****************************************************************************/
  41. #include <linux/slab.h>
  42. #include <linux/device.h>
  43. #include <linux/gfp.h>
  44. #include <linux/sched.h>
  45. #include <linux/moduleparam.h>
  46. #include <linux/jiffies.h>
  47. #include <linux/list.h>
  48. #include <linux/delay.h>
  49. #include <linux/pm_runtime.h>
  50. #include <linux/debugfs.h>
  51. #include <linux/crc32.h>
  52. #include <uapi/vha.h>
  53. #include "vha_common.h"
  54. #include "vha_plat.h"
  55. #include <vha_regs.h>
  56. #ifdef KERNEL_DMA_FENCE_SUPPORT
  57. #include <linux/dma-fence.h>
  58. #include <linux/dma-fence-array.h>
  59. #include <linux/sync_file.h>
  60. #include <linux/file.h>
  61. #include <linux/kernel.h>
  62. #endif
  63. #if !defined(HW_AX2) && !defined(HW_AX3)
  64. #error No HW architecture series defined. Either HW_AX2 or HW_AX3 must be defined
  65. #elseif defined(HW_AX2) && defined(HW_AX3)
  66. #error Invalid HW architecture series define. Only one of HW_AX2 or HW_AX3 must be defined.
  67. #endif
  68. #define MIN_ONCHIP_MAP 1
  69. #define MAX_ONCHIP_MAP 128
  70. static uint8_t mmu_mode = VHA_MMU_40BIT;
  71. module_param(mmu_mode, byte, 0444);
  72. MODULE_PARM_DESC(mmu_mode,
  73. "MMU mode: 0=no-MMU, 1=direct (1:1) mappings or 40=40bit (default)");
  74. static uint32_t mmu_ctx_default;
  75. module_param(mmu_ctx_default, uint, 0444);
  76. MODULE_PARM_DESC(mmu_ctx_default, "MMU default context id(0:31) to be used");
  77. static uint32_t mmu_page_size; /* 0-4kB */
  78. module_param(mmu_page_size, uint, 0444);
  79. MODULE_PARM_DESC(mmu_page_size,
  80. "MMU page size: 0-4kB, 1-16kB, 2-64kB, 3-256kB, 4-1MB; 5-2MB");
  81. static bool no_clock_disable = false;
  82. module_param(no_clock_disable, bool, 0444);
  83. MODULE_PARM_DESC(no_clock_disable,
  84. "if Y, the device is not disabled when inactive, otherwise APM is used");
  85. static int pm_delay = 100;
  86. module_param(pm_delay, int, S_IRUSR | S_IRGRP);
  87. MODULE_PARM_DESC(pm_delay, "Delay, in ms, before powering off the core that's idle");
  88. static int freq_khz = -1;
  89. module_param(freq_khz, int, 0444);
  90. MODULE_PARM_DESC(freq_khz,
  91. "core frequency in kHz, -1=start self measurement during driver load, 0=use platform defined value, otherwise (>0) declared value is used");
  92. static uint32_t hw_bypass;
  93. module_param(hw_bypass, uint, 0444);
  94. MODULE_PARM_DESC(hw_bypass,
  95. "Number of cnn kicks(segments) to be bypassed within the session, 0=none");
  96. static uint32_t slc_bypass;
  97. module_param(slc_bypass, uint, 0444);
  98. MODULE_PARM_DESC(slc_bypass, "SLC bypass mode");
  99. #if defined(HW_AX2) || defined(CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME)
  100. static uint32_t low_latency = VHA_LL_SW_KICK;
  101. #elif defined(HW_AX3) && defined(VHA_USE_LO_PRI_SUB_SEGMENTS)
  102. static uint32_t low_latency = VHA_LL_DISABLED;
  103. #else
  104. static uint32_t low_latency = VHA_LL_SELF_KICK;
  105. #endif
  106. module_param(low_latency, uint, 0444);
  107. MODULE_PARM_DESC(low_latency, "Low latency mode: 0-disabled, 1-sw kick, 2-self kick");
  108. static bool zero_buffers;
  109. module_param(zero_buffers, bool, 0444);
  110. MODULE_PARM_DESC(zero_buffers, "fill every allocated buffer with zeros");
  111. static bool dump_buff_digest = 0;
  112. module_param(dump_buff_digest, bool, 0444);
  113. MODULE_PARM_DESC(dump_buff_digest, "Calculate & dump digest for in/out buffers. This is crc32");
  114. static unsigned long onchipmem_phys_start= VHA_OCM_ADDR_START;
  115. module_param(onchipmem_phys_start, ulong, 0444);
  116. MODULE_PARM_DESC(onchipmem_phys_start,
  117. "Physical address of start of on-chip ram. '0xFs' means that ocm is disabled");
  118. static uint32_t onchipmem_size;
  119. module_param(onchipmem_size, uint, 0444);
  120. MODULE_PARM_DESC(onchipmem_size,
  121. "Size of on-chip memory in bytes");
  122. /* bringup test: force MMU fault with MMU base register */
  123. static bool test_mmu_base_pf;
  124. module_param(test_mmu_base_pf, bool, 0444);
  125. MODULE_PARM_DESC(test_mmu_base_pf,
  126. "Bringup test: force MMU page fault on first access");
  127. /* bringup test: do not map into the device after the Nth buffer */
  128. static int32_t test_mmu_no_map_count = -1;
  129. module_param(test_mmu_no_map_count, int, 0444);
  130. MODULE_PARM_DESC(test_mmu_no_map_count,
  131. "Bringup test: force MMU page faults if count >= 0");
  132. #ifdef VHA_SCF
  133. static bool parity_disable = false;
  134. module_param(parity_disable, bool, 0444);
  135. MODULE_PARM_DESC(parity_disable,
  136. "if Y, the core parity feature will be disabled, if it is supported");
  137. static bool confirm_config_reg = false;
  138. module_param(confirm_config_reg, bool, 0444);
  139. MODULE_PARM_DESC(confirm_config_reg,
  140. "Enables confirmation of register writes");
  141. #endif
  142. static bool test_without_bvnc_check;
  143. module_param(test_without_bvnc_check, bool, 0444);
  144. MODULE_PARM_DESC(test_without_bvnc_check,
  145. "When set BVNC check is ignored, allowing to kick the hw");
  146. /* Fault inject parameter is only applicable when
  147. * kernel fault injection feature is enabled
  148. * in the kernel options -> CONFIG_FAULT_INJECTION=y
  149. * See Documentation/fault-injection/
  150. */
  151. static uint8_t fault_inject;
  152. module_param(fault_inject, byte, 0444);
  153. MODULE_PARM_DESC(fault_inject,
  154. "Enable fault injection using bitwise value: 1-open,2-read,4-write,8-ioctl,16-mmap,32-cmd worker,64-irq worker,128-user space");
  155. /* Interval in milliseconds for testing/simulating system suspend/resume functionality */
  156. static uint8_t suspend_interval_msec;
  157. module_param(suspend_interval_msec, byte, 0444);
  158. MODULE_PARM_DESC(suspend_interval_msec,
  159. "Test suspend/resume interval, 0=disabled, otherwise defines interval in milliseconds");
  160. #ifdef VHA_SCF
  161. static bool cnn_combined_crc_enable = true;
  162. #else
  163. static bool cnn_combined_crc_enable = false;
  164. #endif
  165. module_param(cnn_combined_crc_enable, bool, 0444);
  166. MODULE_PARM_DESC(cnn_combined_crc_enable,
  167. "Enables the combined CRC feature");
  168. #ifdef VHA_SCF
  169. static u32 swd_period = 10;
  170. module_param(swd_period, uint, 0444);
  171. MODULE_PARM_DESC(swd_period,
  172. "The timer expiration period in miliseconds, 0=disable");
  173. static unsigned long swd_timeout_default = 0;
  174. module_param(swd_timeout_default, ulong, 0444);
  175. MODULE_PARM_DESC(swd_timeout_default,
  176. "The default expected execution time in us, 0=use MBS values only");
  177. static u32 swd_timeout_m0 = 100;
  178. module_param(swd_timeout_m0, uint, 0444);
  179. MODULE_PARM_DESC(swd_timeout_m0,
  180. "The m0 value in the expected execution time equation: T = (T0 * m0)/100 + m1");
  181. static u32 swd_timeout_m1 = 10000;
  182. module_param(swd_timeout_m1, uint, 0444);
  183. MODULE_PARM_DESC(swd_timeout_m1,
  184. "The m1 value in the expected execution time equation: T = (T0 * m0)/100 + m1");
  185. #endif
  186. /* Event observers, to be notified when significant events occur */
  187. struct vha_observers vha_observers;
  188. /* Driver context */
  189. static struct {
  190. /* Available driver memory heaps. List of <struct vha_heap> */
  191. struct list_head heaps;
  192. /* Memory Management context for driver */
  193. struct mem_ctx *mem_ctx;
  194. /* List of associated <struct vha_dev> */
  195. struct list_head devices;
  196. unsigned int num_devs;
  197. int initialised;
  198. } drv;
  199. /* Session id counter. */
  200. static uint32_t vha_session_id_cnt = 0;
  201. static void cmd_worker(struct work_struct *work);
  202. static const size_t mmu_page_size_kb_lut[] =
  203. { 4096, 16384, 65536, 262144, 1048576, 2097152};
  204. #ifdef CONFIG_FUNCTION_ERROR_INJECTION
  205. noinline int __IOPOLL64_RET(int ret) {
  206. return ret;
  207. }
  208. #include <asm-generic/error-injection.h>
  209. /* this is the placeholder function to support error code injection from
  210. * all IOPOLL_PDUMP* macros
  211. */
  212. ALLOW_ERROR_INJECTION(__IOPOLL64_RET, ERRNO);
  213. #ifdef VHA_EVENT_INJECT
  214. /*
  215. * called in __handle_event_injection()
  216. * if normal circumstances, return 0 and do not inject EVENT
  217. * otherwise, return -errno
  218. */
  219. noinline int __EVENT_INJECT(void) {
  220. return 0;
  221. }
  222. ALLOW_ERROR_INJECTION(__EVENT_INJECT, ERRNO);
  223. #endif /* VHA_EVENT_INJECT */
  224. #endif
  225. /* Calculate current timespan for the given timestamp */
  226. bool get_timespan_us(struct TIMESPEC *from, struct TIMESPEC *to, uint64_t *result)
  227. {
  228. long long total = 0;
  229. if (!TIMESPEC_VALID(from) || !TIMESPEC_VALID(to))
  230. return false;
  231. if (TIMESPEC_COMPARE(from, to) >= 0)
  232. return false;
  233. total = NSEC_PER_SEC * to->tv_sec +
  234. to->tv_nsec;
  235. total -= NSEC_PER_SEC * from->tv_sec +
  236. from->tv_nsec;
  237. do_div(total, 1000UL);
  238. *result = total;
  239. return true;
  240. }
  241. /* Used for simulating system level suspend/resume functionality */
  242. static void suspend_test_worker(struct work_struct *work)
  243. {
  244. struct vha_dev *vha = container_of(work, struct vha_dev, suspend_dwork.work);
  245. int ret;
  246. /* Make resume/suspend cycle */
  247. ret = vha_suspend_dev(vha->dev);
  248. WARN_ON(ret != 0);
  249. vha_resume_dev(vha->dev);
  250. mutex_lock(&vha->lock);
  251. /* Retrigger suspend worker */
  252. schedule_delayed_work(&vha->suspend_dwork,
  253. msecs_to_jiffies(vha->suspend_interval_msec));
  254. mutex_unlock(&vha->lock);
  255. }
  256. /*
  257. * Initialize common platform (driver) memory heaps.
  258. * device (cluster) heaps are initialized in vha_init()
  259. */
  260. int vha_init_plat_heaps(const struct heap_config heap_configs[], int heaps)
  261. {
  262. int i;
  263. int ret = 0;
  264. /* Initialise memory management component */
  265. for (i = 0; i < heaps; i++) {
  266. struct vha_heap *heap;
  267. pr_debug("%s: adding platform heap of type %d\n",
  268. __func__, heap_configs[i].type);
  269. heap = kzalloc(sizeof(struct vha_heap), GFP_KERNEL);
  270. if (!heap) {
  271. ret = -ENOMEM;
  272. goto drv_heap_add_failed;
  273. }
  274. heap->global = true;
  275. ret = img_mem_add_heap(&heap_configs[i], &heap->id);
  276. if (ret < 0) {
  277. pr_err("%s: failed to init platform heap (type %d)!\n",
  278. __func__, heap_configs[i].type);
  279. kfree(heap);
  280. goto drv_heap_add_failed;
  281. }
  282. list_add(&heap->list, &drv.heaps);
  283. }
  284. return ret;
  285. drv_heap_add_failed:
  286. while (!list_empty(&drv.heaps)) {
  287. struct vha_heap *heap;
  288. heap = list_first_entry(&drv.heaps, struct vha_heap, list);
  289. list_del(&heap->list);
  290. img_mem_del_heap(heap->id);
  291. kfree(heap);
  292. }
  293. return ret;
  294. }
  295. int vha_early_init(void)
  296. {
  297. int ret;
  298. INIT_LIST_HEAD(&drv.heaps);
  299. INIT_LIST_HEAD(&drv.devices);
  300. /* Create memory management context for HW buffers */
  301. ret = img_mem_create_proc_ctx(&drv.mem_ctx);
  302. if (ret) {
  303. pr_err("%s: failed to create mem context (err:%d)!\n",
  304. __func__, ret);
  305. drv.mem_ctx = NULL;
  306. }
  307. return ret;
  308. }
  309. /*
  310. * Lazy intialization of main driver context (when first core is probed)
  311. */
  312. static int vha_init(struct vha_dev *vha,
  313. const struct heap_config heap_configs[], int heaps)
  314. {
  315. struct device *dev = vha->dev;
  316. int ret, i;
  317. #ifdef CONFIG_HW_MULTICORE
  318. ret = vha_dev_scheduler_init(vha);
  319. if (ret != 0) {
  320. dev_err(dev, "%s: failed initializing scheduler!\n", __func__);
  321. return ret;
  322. }
  323. if (!vha_dev_dbg_params_init(vha)) {
  324. dev_err(dev, "%s: invalid debug params detected!\n", __func__);
  325. return -EINVAL;
  326. }
  327. #endif
  328. /* Initialise local device (cluster) heaps */
  329. for (i = 0; i < heaps; i++) {
  330. struct vha_heap *heap;
  331. dev_dbg(dev, "%s: adding device heap of type %d\n",
  332. __func__, heap_configs[i].type);
  333. heap = kzalloc(sizeof(struct vha_heap), GFP_KERNEL);
  334. if (!heap) {
  335. ret = -ENOMEM;
  336. goto heap_add_failed;
  337. }
  338. ret = img_mem_add_heap(&heap_configs[i], &heap->id);
  339. if (ret < 0) {
  340. dev_err(dev, "%s: failed to init device heap (type %d)!\n",
  341. __func__, heap_configs[i].type);
  342. kfree(heap);
  343. goto heap_add_failed;
  344. }
  345. list_add(&heap->list, &vha->heaps);
  346. }
  347. /* now copy platform (global) heap id's to device vha_heap list, the global heap id's are
  348. * not owned by vha_dev anyway (heap->global=true)
  349. * This is done for vha_ioctl_query_heaps() to be able to report both platform
  350. * and device heaps easily. */
  351. {
  352. struct list_head* pos;
  353. list_for_each_prev(pos, &drv.heaps) {
  354. struct vha_heap* heap = list_entry(pos, struct vha_heap, list);
  355. struct vha_heap* heap_copy = kmemdup(heap, sizeof(*heap), GFP_KERNEL);
  356. if(!heap_copy) {
  357. ret = -ENOMEM;
  358. goto heap_add_failed;
  359. }
  360. INIT_LIST_HEAD(&heap_copy->list);
  361. list_add(&heap_copy->list, &vha->heaps);
  362. }
  363. }
  364. /* initialize local ocm cluster heaps */
  365. if (vha->hw_props.locm_size_bytes && onchipmem_phys_start == ~0)
  366. dev_warn(dev, "%s: Onchip memory physical address not set!\n",
  367. __func__);
  368. /* OCM heap type is automatically appended */
  369. if (vha->hw_props.locm_size_bytes && onchipmem_phys_start != ~0) {
  370. struct heap_config heap_cfg;
  371. struct vha_heap *heap;
  372. memset(&heap_cfg, 0, sizeof(heap_cfg));
  373. heap_cfg.type = IMG_MEM_HEAP_TYPE_OCM;
  374. heap_cfg.options.ocm.phys = onchipmem_phys_start;
  375. heap_cfg.options.ocm.size = vha->hw_props.locm_size_bytes;
  376. heap_cfg.options.ocm.hattr = IMG_MEM_HEAP_ATTR_LOCAL;
  377. dev_dbg(dev, "%s: adding heap of type %d\n",
  378. __func__, heap_cfg.type);
  379. heap = kzalloc(sizeof(struct vha_heap), GFP_KERNEL);
  380. if (!heap) {
  381. ret = -ENOMEM;
  382. goto heap_add_failed;
  383. }
  384. ret = img_mem_add_heap(&heap_cfg, &heap->id);
  385. if (ret < 0) {
  386. dev_err(dev, "%s: failed to init heap (type %d)!\n",
  387. __func__, heap_cfg.type);
  388. kfree(heap);
  389. goto heap_add_failed;
  390. }
  391. list_add(&heap->list, &vha->heaps);
  392. }
  393. #ifdef CONFIG_HW_MULTICORE
  394. if (vha->hw_props.socm_size_bytes && onchipmem_phys_start != ~0) {
  395. struct heap_config heap_cfg;
  396. struct vha_heap *heap;
  397. memset(&heap_cfg, 0, sizeof(heap_cfg));
  398. heap_cfg.type = IMG_MEM_HEAP_TYPE_OCM;
  399. heap_cfg.options.ocm.phys = onchipmem_phys_start +
  400. vha->hw_props.locm_size_bytes + IMG_MEM_VA_GUARD_GAP;
  401. heap_cfg.options.ocm.size = vha->hw_props.socm_size_bytes;
  402. heap_cfg.options.ocm.hattr = IMG_MEM_HEAP_ATTR_SHARED;
  403. dev_dbg(dev, "%s: adding heap of type %d\n",
  404. __func__, heap_cfg.type);
  405. heap = kzalloc(sizeof(struct vha_heap), GFP_KERNEL);
  406. if (!heap) {
  407. ret = -ENOMEM;
  408. goto heap_add_failed;
  409. }
  410. ret = img_mem_add_heap(&heap_cfg, &heap->id);
  411. if (ret < 0) {
  412. dev_err(dev, "%s: failed to init heap (type %d)!\n",
  413. __func__, heap_cfg.type);
  414. kfree(heap);
  415. goto heap_add_failed;
  416. }
  417. list_add(&heap->list, &vha->heaps);
  418. }
  419. #endif
  420. {
  421. /* now get the last entry and make it responsible for internal allocations
  422. * use last entry because list_add() inserts at the head
  423. * When choosing the internal alloc heap, the device local heaps take precedence over
  424. * global platform heaps */
  425. struct vha_heap* heap = list_last_entry(&vha->heaps, struct vha_heap, list);
  426. if(!heap) {
  427. dev_err(dev, "%s: failed to locate heap for internal alloc\n",
  428. __func__);
  429. ret = -EINVAL;
  430. /* Loop registered heaps just for sanity */
  431. goto heap_add_failed;
  432. }
  433. vha->int_heap_id = heap->id;
  434. dev_dbg(dev, "%s: using heap %d for internal alloc\n",
  435. __func__, vha->int_heap_id);
  436. }
  437. /* Do not proceed if internal heap not defined */
  438. drv.initialised = 1;
  439. dev_dbg(dev, "%s: vha drv init done\n", __func__);
  440. return 0;
  441. heap_add_failed:
  442. while (!list_empty(&vha->heaps)) {
  443. struct vha_heap *heap;
  444. heap = list_first_entry(&vha->heaps, struct vha_heap, list);
  445. list_del(&heap->list);
  446. if(!heap->global)
  447. img_mem_del_heap(heap->id);
  448. kfree(heap);
  449. }
  450. return ret;
  451. }
  452. int vha_deinit(void)
  453. {
  454. /* Destroy memory management context */
  455. if (drv.mem_ctx) {
  456. size_t mem_usage;
  457. uint32_t MB, bytes, kB;
  458. img_mem_get_usage(drv.mem_ctx, &mem_usage, NULL);
  459. MB = mem_usage / (1024 * 1024);
  460. bytes = mem_usage - (MB * (1024 * 1024));
  461. kB = (bytes * 1000) / (1024 * 1024);
  462. pr_debug("%s: Total kernel memory used: %u.%u MB\n",
  463. __func__, MB, kB);
  464. img_mem_destroy_proc_ctx(drv.mem_ctx);
  465. drv.mem_ctx = NULL;
  466. }
  467. /* Deinitialize memory management component */
  468. while (!list_empty(&drv.heaps)) {
  469. struct vha_heap *heap;
  470. heap = list_first_entry(&drv.heaps, struct vha_heap, list);
  471. BUG_ON(!heap->global);
  472. list_del(&heap->list);
  473. img_mem_del_heap(heap->id);
  474. kfree(heap);
  475. }
  476. drv.initialised = 0;
  477. return 0;
  478. }
  479. /*
  480. * Returns: true if hardware has required capabilities, false otherwise.
  481. * Implementation is a simple check of expected BVNC against hw CORE_ID
  482. */
  483. bool vha_dev_check_hw_capab(struct vha_dev* vha, uint64_t expected_hw_capab)
  484. {
  485. uint64_t __maybe_unused hw = vha->hw_props.core_id
  486. & VHA_CR_CORE_ID_BVNC_CLRMSK;
  487. uint64_t __maybe_unused mbs = expected_hw_capab
  488. & VHA_CR_CORE_ID_BVNC_CLRMSK;
  489. if (!test_without_bvnc_check) {
  490. img_pdump_printf(
  491. "IF SKIP_COREID_CHECK\n"
  492. "COM Skip COREID Check\n"
  493. "ELSE SKIP_COREID_CHECK\n"
  494. "COM CHECKING CORE_ID: expecting BVNC:%llu.%llu.%llu.%llu\n",
  495. core_id_quad(expected_hw_capab));
  496. IOPOLL64_PDUMP(expected_hw_capab, 1, 1,
  497. VHA_CR_CORE_ID_BVNC_CLRMSK,
  498. VHA_CR_CORE_ID);
  499. img_pdump_printf(
  500. "FI SKIP_COREID_CHECK\n");
  501. }
  502. if ((expected_hw_capab >> 48) != HW_SERIES) {
  503. dev_err(vha->dev,
  504. "%s: network was compiled for incorrect hardware series: expected %llu / found %u\n",
  505. __func__,
  506. (expected_hw_capab >> 48), HW_SERIES);
  507. return false;
  508. }
  509. #ifndef CONFIG_VHA_DUMMY
  510. if (hw != mbs) {
  511. dev_warn(vha->dev,
  512. "%s: network was compiled for an incorrect hardware variant (BVNC): "
  513. "found %llu.%llu.%llu.%llu, expected %llu.%llu.%llu.%llu\n",
  514. __func__,
  515. core_id_quad(vha->hw_props.core_id),
  516. core_id_quad(expected_hw_capab));
  517. /* Conditionally allow the hw to be kicked */
  518. if (test_without_bvnc_check)
  519. dev_warn(vha->dev, "%s: trying to kick the hw ... ", __func__);
  520. else {
  521. dev_err(vha->dev, "%s: can't kick the hardware!", __func__);
  522. return false;
  523. }
  524. }
  525. #endif
  526. return true;
  527. }
  528. /* notify the user space if a response msg is ready */
  529. void vha_cmd_notify(struct vha_cmd *cmd)
  530. {
  531. struct vha_session *session = cmd->session;
  532. struct vha_rsp *rsp = cmd->rsp;
  533. dev_dbg(session->vha->dev, "%s: 0x%08x/%u\n",
  534. __func__, cmd->user_cmd.cmd_id, session->id);
  535. if (rsp) {
  536. cmd->rsp = NULL;
  537. list_add_tail(&rsp->list, &session->rsps);
  538. }
  539. wake_up(&session->wq);
  540. /* we are done with this cmd, let's free it */
  541. list_del(&cmd->list[cmd->user_cmd.priority]);
  542. kfree(cmd);
  543. }
  544. static void vha_measure_core_freq(struct vha_dev *vha)
  545. {
  546. if (vha->stats.last_proc_us) {
  547. uint64_t proc = vha->stats.last_proc_us;
  548. do_div(proc, 1000UL);
  549. if (proc) {
  550. uint64_t cycles = vha->calibration_cycles;
  551. do_div(cycles, proc);
  552. vha->freq_khz = cycles;
  553. dev_info(vha->dev,
  554. "%s: Measured core clock frequency[kHz]: %u\n",
  555. __func__, vha->freq_khz);
  556. return;
  557. }
  558. }
  559. dev_info(vha->dev,
  560. "%s: Can't measure core clock frequency!\n",
  561. __func__);
  562. }
  563. bool vha_check_calibration(struct vha_dev *vha)
  564. {
  565. if (vha->stats.last_proc_us) {
  566. /* Core may have been kicked to
  567. * measure frequency */
  568. if (vha->do_calibration) {
  569. vha_dev_stop(vha, true);
  570. vha_measure_core_freq(vha);
  571. vha->do_calibration = false;
  572. /* Something may have been scheduled in
  573. * the middle so poke the worker */
  574. vha_chk_cmd_queues(vha, false);
  575. return true;
  576. }
  577. }
  578. return false;
  579. }
  580. /*
  581. * A session represents a single device and a set of buffers
  582. * to be used for inferences.
  583. * If required, buffers will be allocated for hardware CRC and DEBUG.
  584. */
  585. int vha_add_session(struct vha_session *session)
  586. {
  587. struct vha_dev *vha = session->vha;
  588. int ret;
  589. struct mmu_config mmu_config;
  590. int ctx_id;
  591. uint8_t pri;
  592. img_pdump_printf("-- OPEN_BEGIN\n");
  593. img_pdump_printf("-- VHA driver session started\n");
  594. ret = mutex_lock_interruptible(&vha->lock);
  595. if (ret)
  596. return ret;
  597. #ifdef CONFIG_VHA_DUMMY
  598. if (list_empty(&vha->sessions) && !vha->do_calibration)
  599. vha_dev_start(vha);
  600. #endif
  601. idr_init(&session->onchip_maps);
  602. memset(&mmu_config, 0, sizeof(mmu_config));
  603. /* Create a memory context for this session */
  604. if (vha->mmu_mode == VHA_MMU_DISABLED) {
  605. /* if MMU is disabled,
  606. * bypass the mmu hw layer,
  607. * but still need do the buffer
  608. * allocation through img_mem api
  609. */
  610. mmu_config.bypass_hw = true;
  611. #ifdef CONFIG_HW_MULTICORE
  612. mmu_config.bypass_offset = IMG_MEM_VA_HEAP1_BASE;
  613. #endif
  614. }
  615. #ifdef VHA_SCF
  616. /* Do not calculate parity when core does not support it,
  617. * or we forced the core to disable it */
  618. if (vha->hw_props.supported.parity &&
  619. !vha->parity_disable) {
  620. mmu_config.use_pte_parity = true;
  621. dev_dbg(vha->dev,
  622. "%s: Enabling MMU parity protection!\n",
  623. __func__);
  624. }
  625. #endif
  626. mmu_config.addr_width = vha->hw_props.mmu_width;
  627. mmu_config.alloc_attr = IMG_MEM_ATTR_MMU | /* Indicate MMU allocation */
  628. IMG_MEM_ATTR_WRITECOMBINE;
  629. mmu_config.page_size = mmu_page_size_kb_lut[vha->mmu_page_size];
  630. img_pdump_printf("-- MMU context: using %zukB MMU pages, %lukB CPU pages\n",
  631. mmu_page_size_kb_lut[vha->mmu_page_size]/1024, PAGE_SIZE/1024);
  632. /* Update current MMU page size, so that the correct
  633. * granularity is used when generating virtual addresses */
  634. vha->hw_props.mmu_pagesize = mmu_config.page_size;
  635. /* Update clock frequency stored in props */
  636. vha->hw_props.clock_freq = vha->freq_khz;
  637. for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++) {
  638. ret = img_mmu_ctx_create(vha->dev, &mmu_config,
  639. session->mem_ctx, vha->int_heap_id,
  640. vha_mmu_callback, session,
  641. &session->mmu_ctxs[ctx_id].ctx);
  642. if (ret < 0) {
  643. dev_err(vha->dev, "%s: failed to create sw mmu context%d!\n",
  644. __func__, ctx_id);
  645. goto out_unlock;
  646. }
  647. if (vha->mmu_mode != VHA_MMU_DISABLED) {
  648. /* Store mmu context id */
  649. session->mmu_ctxs[ctx_id].id = ret;
  650. ret = img_mmu_get_pc(session->mmu_ctxs[ctx_id].ctx,
  651. &session->mmu_ctxs[ctx_id].pc_baddr,
  652. &session->mmu_ctxs[ctx_id].pc_bufid);
  653. if (ret) {
  654. dev_err(vha->dev, "%s: failed to get PC for context%d!\n",
  655. __func__, ctx_id);
  656. ret = -EFAULT;
  657. goto out_free_mmu_ctx;
  658. }
  659. }
  660. }
  661. #ifndef CONFIG_HW_MULTICORE
  662. if (vha->hw_props.locm_size_bytes && onchipmem_phys_start != ~0) {
  663. /* OCM data is considered as IO (or shared)*/
  664. ret = img_mmu_init_cache(session->mmu_ctxs[VHA_MMU_REQ_IO_CTXID].ctx,
  665. onchipmem_phys_start, vha->hw_props.locm_size_bytes
  666. #if defined(CFG_SYS_VAGUS)
  667. + sizeof(uint32_t)
  668. #endif
  669. );
  670. if (ret < 0) {
  671. dev_err(vha->dev, "%s: failed to create init cache!\n",
  672. __func__);
  673. goto out_free_mmu_ctx;
  674. }
  675. vha_dev_ocm_configure(vha);
  676. }
  677. #endif
  678. /* enable CRC and DEBUG registers */
  679. ret = vha_dbg_create_hwbufs(session);
  680. if (ret)
  681. goto out_free_mmu_ctx;
  682. img_pdump_printf("-- OPEN_END\n");
  683. /* Used for simulating system level suspend/resume functionality */
  684. if (list_empty(&vha->sessions) && vha->suspend_interval_msec) {
  685. INIT_DELAYED_WORK(&vha->suspend_dwork, suspend_test_worker);
  686. /* Start suspend worker */
  687. schedule_delayed_work(&vha->suspend_dwork,
  688. msecs_to_jiffies(vha->suspend_interval_msec));
  689. }
  690. /* Assign session id. */
  691. session->id = vha_session_id_cnt++;
  692. list_add_tail(&session->list, &vha->sessions);
  693. for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++) {
  694. struct vha_session *aux_head = list_prev_entry(session, list);
  695. list_add(&session->sched_list[pri], &aux_head->sched_list[pri]);
  696. }
  697. /* All mmu contextes are successfully created,
  698. it is safe to incremet the counters and assign id. */
  699. if (vha->mmu_mode != VHA_MMU_DISABLED)
  700. for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++) {
  701. uint8_t hw_ctxid = 0;
  702. /* Assign mmu hardware context */
  703. hw_ctxid = VHA_MMU_GET_CTXID(session);
  704. hw_ctxid += (VHA_MMU_AUX_HW_CTX_SHIFT*ctx_id);
  705. vha->mmu_ctxs[hw_ctxid]++;
  706. session->mmu_ctxs[ctx_id].hw_id = hw_ctxid;
  707. }
  708. dev_dbg(vha->dev,
  709. "%s: %p ctxid:%d\n", __func__, session,
  710. session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].id);
  711. mutex_unlock(&vha->lock);
  712. return ret;
  713. out_free_mmu_ctx:
  714. for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++)
  715. if (session->mmu_ctxs[ctx_id].ctx)
  716. img_mmu_ctx_destroy(session->mmu_ctxs[ctx_id].ctx);
  717. out_unlock:
  718. mutex_unlock(&vha->lock);
  719. return ret;
  720. }
  721. static void vha_clean_onchip_maps(struct vha_session *session, struct vha_buffer *buf)
  722. {
  723. struct vha_onchip_map *onchip_map = NULL, *tmp = NULL;
  724. WARN_ON(!buf);
  725. WARN_ON(!session);
  726. list_for_each_entry_safe(onchip_map, tmp, &buf->onchip_maps, list) {
  727. idr_remove(&session->onchip_maps, onchip_map->mapid);
  728. list_del(&onchip_map->list);
  729. kfree(onchip_map);
  730. }
  731. }
  732. #ifdef KERNEL_DMA_FENCE_SUPPORT
  733. void vha_rm_buf_fence(struct vha_session *session, struct vha_buffer *buf)
  734. {
  735. struct vha_buf_sync_info *sync_info = &buf->sync_info;
  736. img_mem_remove_fence(session->mem_ctx, buf->id);
  737. if (sync_info->in_fence) {
  738. if (!dma_fence_is_signaled(sync_info->in_fence))
  739. dma_fence_remove_callback(sync_info->in_fence, &sync_info->in_sync_cb);
  740. if (sync_info->in_sync_file) {
  741. fput(sync_info->in_sync_file);
  742. sync_info->in_sync_file = NULL;
  743. }
  744. sync_info->in_sync_fd = VHA_SYNC_NONE;
  745. dma_fence_put(sync_info->in_fence);
  746. sync_info->in_fence = NULL;
  747. memset(&sync_info->in_sync_cb, 0, sizeof(struct dma_fence_cb));
  748. }
  749. }
  750. #endif
  751. #if defined(VHA_SCF) && defined(CONFIG_HW_MULTICORE)
  752. void vha_start_swd(struct vha_dev *vha, int cmd_idx)
  753. {
  754. if (vha->swd_period) {
  755. schedule_delayed_work(&vha->swd_dwork, msecs_to_jiffies(vha->swd_period));
  756. }
  757. }
  758. #endif
  759. void vha_rm_session(struct vha_session *session)
  760. {
  761. struct vha_dev *vha = session->vha;
  762. struct vha_session *cur_session, *tmp_session;
  763. struct vha_rsp *cur_rsp, *tmp_rsp;
  764. struct vha_buffer *cur_buf, *tmp_buf;
  765. bool reschedule = false;
  766. int ctx_id;
  767. uint8_t pri;
  768. mutex_lock(&vha->lock);
  769. img_pdump_printf("-- FREE_END\n");
  770. session->freeing = false;
  771. img_pdump_printf("-- CLOSE_BEGIN\n");
  772. /* Remove pend/queued session commands. */
  773. reschedule = vha_rm_session_cmds(session);
  774. /* Remove responses for session related commands. */
  775. list_for_each_entry_safe(cur_rsp, tmp_rsp, &session->rsps, list) {
  776. dev_warn(vha->dev,
  777. "Removing a session while the rsp is still pending\n");
  778. list_del(&cur_rsp->list);
  779. kfree(cur_rsp);
  780. }
  781. /* Disable CRC and DEBUG capture. */
  782. #ifdef CONFIG_HW_MULTICORE
  783. vha_dbg_stop_hwbufs(session, vha->full_core_mask);
  784. #else
  785. vha_dbg_stop_hwbufs(session, 0);
  786. #endif
  787. vha_dbg_destroy_hwbufs(session);
  788. list_for_each_entry_safe(cur_buf, tmp_buf, &session->bufs, list) {
  789. dev_warn(vha->dev,
  790. "Removing a session while the buffer wasn't freed\n");
  791. #ifdef KERNEL_DMA_FENCE_SUPPORT
  792. vha_rm_buf_fence(session, cur_buf);
  793. #endif
  794. vha_clean_onchip_maps(session, cur_buf);
  795. list_del(&cur_buf->list);
  796. kfree(cur_buf);
  797. }
  798. /* Remove link from VHA's list. */
  799. list_for_each_entry_safe(cur_session, tmp_session,
  800. &vha->sessions, list) {
  801. if (cur_session == session)
  802. list_del(&cur_session->list);
  803. }
  804. for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++) {
  805. list_for_each_entry_safe(cur_session, tmp_session,
  806. &vha->sched_sessions[pri], sched_list[pri]) {
  807. if (cur_session == session)
  808. list_del(&cur_session->sched_list[pri]);
  809. }
  810. }
  811. /* Reset hardware if required. */
  812. if ((list_empty(&vha->sessions) && !vha->do_calibration)
  813. || reschedule
  814. )
  815. vha_dev_stop(vha, reschedule);
  816. #ifndef CONFIG_HW_MULTICORE
  817. img_mmu_clear_cache(session->mmu_ctxs[VHA_MMU_REQ_IO_CTXID].ctx);
  818. #endif
  819. /* Delete session's MMU memory contexts. */
  820. for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++) {
  821. img_mmu_ctx_destroy(session->mmu_ctxs[ctx_id].ctx);
  822. if (vha->mmu_mode != VHA_MMU_DISABLED) {
  823. uint8_t hw_ctxid = session->mmu_ctxs[ctx_id].hw_id;
  824. WARN_ON(!vha->mmu_ctxs[hw_ctxid]);
  825. if (vha->mmu_ctxs[hw_ctxid])
  826. vha->mmu_ctxs[hw_ctxid]--;
  827. }
  828. }
  829. /* Update mem stats - max memory usage in this session. */
  830. img_mem_get_usage(session->mem_ctx,
  831. (size_t *)&vha->stats.mem_usage_last, NULL);
  832. {
  833. uint32_t MB = vha->stats.mem_usage_last / (1024 * 1024);
  834. uint32_t bytes = vha->stats.mem_usage_last -
  835. (MB * (1024 * 1024));
  836. uint32_t kB = (bytes * 1000) / (1024 * 1024);
  837. dev_dbg(vha->dev,
  838. "%s: Total user memory used in session: %u.%u MB\n",
  839. __func__, MB, kB);
  840. }
  841. img_mmu_get_usage(session->mem_ctx,
  842. (size_t *)&vha->stats.mmu_usage_last, NULL);
  843. vha->active_mmu_ctx = VHA_INVALID_ID;
  844. img_pdump_printf("-- VHA driver session complete\n");
  845. img_pdump_printf("-- CLOSE_END\n");
  846. /* Used for simulating system level suspend/resume functionality */
  847. if (list_empty(&vha->sessions) && vha->suspend_interval_msec) {
  848. mutex_unlock(&vha->lock);
  849. flush_scheduled_work();
  850. cancel_delayed_work_sync(&vha->suspend_dwork);
  851. mutex_lock(&vha->lock);
  852. }
  853. mutex_unlock(&vha->lock);
  854. /* Reschedule once the session is removed. */
  855. if (reschedule)
  856. vha_chk_cmd_queues(vha, true);
  857. }
  858. static int vha_alloc_common(struct vha_dev *vha)
  859. {
  860. #if 0
  861. img_pdump_printf("-- INIT_BEGIN\n");
  862. img_pdump_printf("-- INIT_END\n");
  863. #endif
  864. return 0;
  865. }
  866. static ssize_t
  867. BVNC_show(struct device *dev, struct device_attribute *attr, char *buf)
  868. {
  869. struct vha_dev *vha = vha_dev_get_drvdata(dev);
  870. struct vha_hw_props *props = &vha->hw_props;
  871. return snprintf(buf, 4*6, "%hu.%hu.%hu.%hu\n",
  872. (unsigned short)(props->core_id >> 48),
  873. (unsigned short)(props->core_id >> 32),
  874. (unsigned short)(props->core_id >> 16),
  875. (unsigned short)props->core_id);
  876. }
  877. static DEVICE_ATTR_RO(BVNC);
  878. static struct attribute *vha_sysfs_entries[] = {
  879. &dev_attr_BVNC.attr,
  880. NULL,
  881. };
  882. static const struct attribute_group vha_attr_group = {
  883. .name = NULL, /* put in device directory */
  884. .attrs = vha_sysfs_entries,
  885. };
  886. void vha_sched_apm(struct vha_dev *vha, struct vha_apm_work *apm_work)
  887. {
  888. unsigned long work_at = jiffies + msecs_to_jiffies(apm_work->delay_ms);
  889. int ret;
  890. dev_dbg(vha->dev, "%s: core_mask:%#x delay:%d\n",
  891. __func__, apm_work->core_mask, apm_work->delay_ms);
  892. /*
  893. * Try to queue the work.
  894. */
  895. ret = schedule_delayed_work(&apm_work->dwork,
  896. work_at - jiffies);
  897. if (!ret) {
  898. /* Work is already in the queue.
  899. * Canceling & rescheduling might be problematic,
  900. * so just modify to postpone.
  901. */
  902. mod_delayed_work(system_wq, &apm_work->dwork,
  903. work_at - jiffies);
  904. }
  905. }
  906. static void vha_apm_worker(struct work_struct *work)
  907. {
  908. struct vha_apm_work *apm_work =
  909. container_of(work, struct vha_apm_work, dwork.work);
  910. struct vha_dev *vha = apm_work->vha;
  911. mutex_lock(&vha->lock);
  912. dev_dbg(vha->dev, "%s: apm expired! core_mask:%#x\n",
  913. __func__, apm_work->core_mask);
  914. vha_dev_apm_stop(vha, apm_work);
  915. mutex_unlock(&vha->lock);
  916. }
  917. int vha_add_dev(struct device *dev,
  918. const struct heap_config heap_configs[], const int heaps,
  919. void *plat_data, void __iomem *reg_base, uint32_t reg_size)
  920. {
  921. struct vha_dev_common* vha_common;
  922. struct vha_dev *vha;
  923. int ret;
  924. uint8_t id, pri;
  925. /* Validate module params. */
  926. ret = -EINVAL;
  927. if (low_latency > VHA_LL_SELF_KICK) {
  928. dev_err(dev, "%s: Unsupported low latency mode %u!\n", __func__, low_latency);
  929. goto out_validate_params;
  930. } else if ((mmu_mode != VHA_MMU_DISABLED) &&
  931. (mmu_mode != VHA_MMU_DIRECT) &&
  932. (mmu_mode != VHA_MMU_40BIT)) {
  933. dev_err(dev, "%s: Unsupported MMU mode %u!\n", __func__, mmu_mode);
  934. goto out_validate_params;
  935. } else if (mmu_ctx_default >= VHA_MMU_MAX_HW_CTXS) {
  936. dev_err(dev, "%s: Unsupported MMU context id %u!\n", __func__, mmu_ctx_default);
  937. goto out_validate_params;
  938. } else if (mmu_page_size > ARRAY_SIZE(mmu_page_size_kb_lut)) {
  939. dev_err(dev, "%s: Unsupported MMU page size %u!\n", __func__, mmu_page_size);
  940. goto out_validate_params;
  941. }
  942. ret = 0;
  943. vha_common = devm_kzalloc(dev, sizeof(struct vha_dev_common), GFP_KERNEL);
  944. if (!vha_common)
  945. return -ENOMEM;
  946. vha = devm_kzalloc(dev, sizeof(struct vha_dev), GFP_KERNEL);
  947. if (!vha) {
  948. ret = -ENOMEM;
  949. goto out_free_dev;
  950. }
  951. vha_common->vha_dev = vha;
  952. dev_dbg(dev, "%s: allocated vha_dev @ %px\n", __func__, vha);
  953. vha->dev = dev;
  954. vha->reg_base = reg_base;
  955. vha->reg_size = reg_size;
  956. vha->plat_data = plat_data;
  957. vha->fault_inject = fault_inject;
  958. vha->suspend_interval_msec = suspend_interval_msec;
  959. vha->hw_bypass = hw_bypass;
  960. vha->low_latency = low_latency;
  961. vha->no_clock_disable = no_clock_disable;
  962. vha->pm_delay = pm_delay;
  963. vha->mmu_mode = mmu_mode;
  964. vha->mmu_ctx_default = mmu_ctx_default;
  965. vha->mmu_page_size = mmu_page_size;
  966. vha->mmu_base_pf_test = test_mmu_base_pf;
  967. vha->mmu_no_map_count = test_mmu_no_map_count;
  968. vha->ocm_paddr = onchipmem_phys_start;
  969. #ifdef VHA_SCF
  970. vha->parity_disable = parity_disable;
  971. vha->confirm_config_reg = confirm_config_reg;
  972. #endif
  973. vha->cnn_combined_crc_enable = cnn_combined_crc_enable;
  974. vha->active_mmu_ctx = VHA_INVALID_ID;
  975. vha->dump_buff_digest = dump_buff_digest;
  976. /* Enable and configure pm_runtime*/
  977. if (!pm_runtime_enabled(vha->dev))
  978. pm_runtime_enable(vha->dev);
  979. pm_runtime_set_autosuspend_delay(vha->dev, VHA_CORE_SUSPEND_DELAY);
  980. pm_runtime_use_autosuspend(vha->dev);
  981. /* Resume device so that we can read the core props */
  982. if (pm_runtime_status_suspended(vha->dev))
  983. pm_runtime_get_sync(vha->dev);
  984. /* Read HW properties */
  985. ret = vha_dev_get_props(vha, onchipmem_size);
  986. if (ret) {
  987. dev_err(dev, "%s: could not get vha properties at %px\n",
  988. __func__, (__force void *)vha->reg_base);
  989. pm_runtime_put_sync_suspend(vha->dev);
  990. goto out_free_dev;
  991. }
  992. if (test_without_bvnc_check)
  993. vha->hw_props.skip_bvnc_check = true;
  994. mutex_init(&vha->lock);
  995. spin_lock_init(&vha->irq_lock);
  996. INIT_LIST_HEAD(&vha->sessions);
  997. for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++)
  998. INIT_LIST_HEAD(&vha->sched_sessions[pri]);
  999. INIT_LIST_HEAD(&vha->heaps);
  1000. ret = vha_init(vha, heap_configs, heaps);
  1001. if (ret) {
  1002. dev_err(dev, "%s: main component initialisation failed!",
  1003. __func__);
  1004. goto out_free_dev;
  1005. }
  1006. /* Initialise command data pump worker */
  1007. INIT_WORK(&vha->worker, cmd_worker);
  1008. #ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
  1009. /* Initialise hw processing time simulation worker */
  1010. #ifdef CONFIG_HW_MULTICORE
  1011. {
  1012. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id ++) {
  1013. INIT_DELAYED_WORK(&vha->dummy_dworks[id].dummy_dwork,
  1014. vha_dummy_worker);
  1015. vha->dummy_dworks[id].wm_id = id;
  1016. vha->dummy_dworks[id].vha = vha;
  1017. }
  1018. }
  1019. #else
  1020. INIT_DELAYED_WORK(&vha->dummy_dwork, vha_dummy_worker);
  1021. #endif
  1022. #endif
  1023. dev_set_drvdata(dev, vha_common);
  1024. ret = vha_api_add_dev(dev, vha, drv.num_devs);
  1025. if (ret) {
  1026. dev_err(dev, "%s: failed to add UM node!", __func__);
  1027. goto out_add_dev;
  1028. }
  1029. vha_dbg_init(vha);
  1030. ret = vha_pdump_init(vha, &vha_common->pdump);
  1031. if (ret == 0)
  1032. vha->hw_props.use_pdump = true;
  1033. if (ret == -EPERM)
  1034. goto out_alloc_common;
  1035. else
  1036. ret = 0;
  1037. ret = vha_alloc_common(vha);
  1038. if (ret) {
  1039. dev_err(dev, "%s: failed to allocate common dev buffers!",
  1040. __func__);
  1041. goto out_alloc_common;
  1042. }
  1043. pm_runtime_put_sync_autosuspend(vha->dev);
  1044. /* Add device to driver context */
  1045. list_add(&vha->list, &drv.devices);
  1046. drv.num_devs++;
  1047. if (sysfs_create_group(&dev->kobj, &vha_attr_group))
  1048. dev_err(dev, "failed to create sysfs entries\n");
  1049. vha->freq_khz = freq_khz;
  1050. #ifndef CONFIG_VHA_DUMMY
  1051. if (vha->freq_khz < 0)
  1052. vha->do_calibration = true; /* ??? OS0 ? */
  1053. if (vha->freq_khz <= 0)
  1054. vha->freq_khz = VHA_CORE_CLOCK_MHZ * 1000;
  1055. if (vha->do_calibration)
  1056. dev_info(dev, "%s: Core freq[kHz]: to be calibrated",
  1057. __func__);
  1058. else
  1059. dev_info(dev, "%s: Core freq[kHz]: %u",
  1060. __func__, vha->freq_khz);
  1061. #else
  1062. # ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
  1063. vha->freq_khz = VHA_CORE_CLOCK_MHZ * 1000;
  1064. dev_info(dev, "%s: Core freq[kHz]: %u (faked for DUMMY device)",
  1065. __func__, vha->freq_khz);
  1066. # endif
  1067. #endif
  1068. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
  1069. vha->apm_dworks[id].vha = vha;
  1070. vha->apm_dworks[id].core_mask = 1 << id;
  1071. vha->apm_dworks[id].delay_ms = vha->pm_delay;
  1072. INIT_DELAYED_WORK(&vha->apm_dworks[id].dwork, vha_apm_worker);
  1073. }
  1074. #if defined(VHA_SCF) && defined(CONFIG_HW_MULTICORE)
  1075. /* Initialise the SW wachdog */
  1076. INIT_DELAYED_WORK(&vha->swd_dwork, wd_timer_callback);
  1077. vha->swd_period = swd_period;
  1078. vha->swd_timeout_default = swd_timeout_default;
  1079. vha->swd_timeout_m0 = swd_timeout_m0;
  1080. vha->swd_timeout_m1 = swd_timeout_m1;
  1081. #endif
  1082. return ret;
  1083. out_alloc_common:
  1084. vha_api_rm_dev(dev, vha);
  1085. vha_dbg_deinit(vha);
  1086. out_add_dev:
  1087. dev_set_drvdata(dev, NULL);
  1088. vha_deinit();
  1089. out_free_dev:
  1090. devm_kfree(dev, vha);
  1091. devm_kfree(dev, vha_common);
  1092. out_validate_params:
  1093. return ret;
  1094. }
  1095. static void vha_free_common(struct vha_dev *vha)
  1096. {
  1097. if (vha->fp_bufid) {
  1098. img_mem_free(drv.mem_ctx, vha->fp_bufid);
  1099. vha->fp_bufid = VHA_INVALID_ID;
  1100. }
  1101. }
  1102. void vha_rm_dev(struct device *dev)
  1103. {
  1104. struct vha_dev *vha;
  1105. struct vha_dev_common* vha_common;
  1106. int ret;
  1107. uint8_t id, pri;
  1108. vha_common = dev_get_drvdata(dev);
  1109. BUG_ON(vha_common == NULL);
  1110. vha = vha_common->vha_dev;
  1111. if (!vha) {
  1112. pr_err("%s: vha ptr is invalid!\n", __func__);
  1113. return;
  1114. }
  1115. if (dev != vha->dev) {
  1116. pr_err("%s: vha->dev is not properly initialised! (%p!=%p)\n", __func__, dev, vha->dev);
  1117. return;
  1118. }
  1119. flush_scheduled_work();
  1120. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++)
  1121. cancel_delayed_work_sync(&vha->apm_dworks[id].dwork);
  1122. #if defined(VHA_SCF) && defined(CONFIG_HW_MULTICORE)
  1123. cancel_delayed_work_sync(&vha->swd_dwork);
  1124. #endif
  1125. #ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
  1126. #ifdef CONFIG_HW_MULTICORE
  1127. {
  1128. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++)
  1129. cancel_delayed_work_sync(&vha->dummy_dworks[id].dummy_dwork);
  1130. }
  1131. #else
  1132. cancel_delayed_work_sync(&vha->dummy_dwork);
  1133. #endif
  1134. #endif
  1135. if (!pm_runtime_status_suspended(vha->dev))
  1136. pm_runtime_put_sync_suspend(vha->dev);
  1137. pm_runtime_dont_use_autosuspend(vha->dev);
  1138. pm_runtime_disable(vha->dev);
  1139. vha_free_common(vha);
  1140. #ifdef CONFIG_HW_MULTICORE
  1141. vha_dev_scheduler_deinit(vha);
  1142. #endif
  1143. while (!list_empty(&vha->heaps)) {
  1144. struct vha_heap *heap = list_first_entry(&vha->heaps, struct vha_heap, list);
  1145. list_del(&heap->list);
  1146. if(!heap->global) /* remove only device heaps */
  1147. img_mem_del_heap(heap->id);
  1148. kfree(heap);
  1149. }
  1150. ret = vha_api_rm_dev(dev, vha);
  1151. if (ret)
  1152. dev_err(dev, "%s: failed to remove UM node!\n", __func__);
  1153. list_del(&vha->sessions);
  1154. for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++)
  1155. list_del(&vha->sched_sessions[pri]);
  1156. list_del(&vha->list);
  1157. list_del(&vha->heaps);
  1158. BUG_ON(!drv.num_devs--);
  1159. sysfs_remove_group(&dev->kobj, &vha_attr_group);
  1160. vha_dbg_deinit(vha);
  1161. vha_pdump_deinit(&vha_common->pdump);
  1162. dev_set_drvdata(dev, NULL);
  1163. devm_kfree(dev, vha);
  1164. devm_kfree(dev, vha_common);
  1165. }
  1166. /* performs device self test operations */
  1167. int vha_dev_calibrate(struct device *dev, uint32_t cycles)
  1168. {
  1169. int ret = 0;
  1170. struct vha_dev *vha = vha_dev_get_drvdata(dev);
  1171. if (!vha) {
  1172. WARN_ON(1);
  1173. return -EFAULT;
  1174. }
  1175. mutex_lock(&vha->lock);
  1176. if (vha->do_calibration) {
  1177. vha->calibration_cycles = cycles;
  1178. dev_info(dev, "%s: Starting core frequency measurement (%d)...",
  1179. __func__, cycles);
  1180. ret = vha_dev_start(vha);
  1181. if (ret)
  1182. goto calib_err;
  1183. #if (defined(HW_AX2) || defined(CONFIG_HW_MULTICORE))
  1184. vha_cnn_start_calib(vha);
  1185. #endif
  1186. }
  1187. calib_err:
  1188. mutex_unlock(&vha->lock);
  1189. return ret;
  1190. }
  1191. /* map buffer into the device */
  1192. int vha_map_to_onchip(struct vha_session *session,
  1193. uint32_t buf_id, uint64_t virt_addr, uint32_t page_size,
  1194. unsigned int num_pages, uint32_t page_idxs[], uint32_t *mapid)
  1195. {
  1196. struct vha_dev *vha = session->vha;
  1197. struct vha_onchip_map *onchip_map = NULL;
  1198. struct vha_buffer *buf = NULL;
  1199. int map_id = *mapid;
  1200. int ret = 0;
  1201. int i = 0;
  1202. ret = mutex_lock_interruptible(&vha->lock);
  1203. if (ret)
  1204. return ret;
  1205. buf = vha_find_bufid(session, buf_id);
  1206. if (!buf) {
  1207. pr_err("%s: buffer id %d not found\n", __func__, buf_id);
  1208. ret = -EINVAL;
  1209. goto out_unlock;
  1210. }
  1211. if (map_id == 0) {
  1212. onchip_map = kzalloc(sizeof(struct vha_onchip_map), GFP_KERNEL);
  1213. if (!onchip_map) {
  1214. ret = -ENOMEM;
  1215. goto out_unlock;
  1216. }
  1217. map_id = idr_alloc(&session->onchip_maps, onchip_map,
  1218. MIN_ONCHIP_MAP, MAX_ONCHIP_MAP, GFP_KERNEL);
  1219. if (map_id < 0) {
  1220. pr_err("%s: idr_alloc failed\n", __func__);
  1221. ret = map_id;
  1222. goto alloc_id_failed;
  1223. }
  1224. ret = img_mmu_map(session->mmu_ctxs[VHA_MMU_REQ_IO_CTXID].ctx,
  1225. session->mem_ctx, buf_id,
  1226. virt_addr, IMG_MMU_PTE_FLAG_NONE);
  1227. if (ret) {
  1228. dev_err(vha->dev, "%s: map failed!\n", __func__);
  1229. ret = -EFAULT;
  1230. goto mmu_map_failed;
  1231. }
  1232. onchip_map->devvirt = virt_addr;
  1233. onchip_map->mapid = map_id;
  1234. onchip_map->bufid = buf_id;
  1235. list_add(&onchip_map->list, &buf->onchip_maps);
  1236. *mapid = map_id;
  1237. } else {
  1238. onchip_map = idr_find(&session->onchip_maps, map_id);
  1239. if (!onchip_map) {
  1240. pr_err("%s: idr_find failed\n", __func__);
  1241. ret = -EINVAL;
  1242. goto out_unlock;
  1243. }
  1244. }
  1245. for (i = 0; i < num_pages; i++) {
  1246. ret = img_mmu_move_pg_to_cache(
  1247. session->mmu_ctxs[VHA_MMU_REQ_IO_CTXID].ctx,
  1248. session->mem_ctx, buf_id,
  1249. onchip_map->devvirt, page_size, page_idxs[i]);
  1250. if (ret) {
  1251. dev_warn(vha->dev, "%s: moving a page to on chip ram failed!\n", __func__);
  1252. goto out_unlock;
  1253. }
  1254. }
  1255. dev_dbg(vha->dev, "%s: mapped buf %s (%u) to %#llx, num_pages: %d\n",
  1256. __func__, buf->name, buf_id, virt_addr, num_pages);
  1257. mutex_unlock(&vha->lock);
  1258. return 0;
  1259. mmu_map_failed:
  1260. idr_remove(&session->onchip_maps, map_id);
  1261. alloc_id_failed:
  1262. kfree(onchip_map);
  1263. out_unlock:
  1264. mutex_unlock(&vha->lock);
  1265. return ret;
  1266. }
  1267. /* map buffer into the device */
  1268. int vha_map_buffer(struct vha_session *session,
  1269. uint32_t buf_id, uint64_t virt_addr,
  1270. uint32_t map_flags)
  1271. {
  1272. struct vha_dev *vha = session->vha;
  1273. uint32_t flags = IMG_MMU_PTE_FLAG_NONE;
  1274. struct vha_buffer *buf = NULL;
  1275. int ret = 0;
  1276. ret = mutex_lock_interruptible(&vha->lock);
  1277. if (ret)
  1278. return ret;
  1279. if ((map_flags & (VHA_MAP_FLAG_READ_ONLY|VHA_MAP_FLAG_WRITE_ONLY)) ==
  1280. (VHA_MAP_FLAG_READ_ONLY|VHA_MAP_FLAG_WRITE_ONLY)) {
  1281. dev_err(vha->dev, "%s: invalid mapping flags combination: 0x%x\n",
  1282. __func__, map_flags);
  1283. ret = -EINVAL;
  1284. goto out_unlock;
  1285. }
  1286. /* Convert permission flags to internal definitions */
  1287. if (map_flags & VHA_MAP_FLAG_READ_ONLY)
  1288. flags |= IMG_MMU_PTE_FLAG_READ_ONLY;
  1289. /* Note: VHA_MAP_FLAG_WRITE_ONLY is not supported by the mmuv3 hw */
  1290. /* Direct 1:1 mappings */
  1291. if (vha->mmu_mode == VHA_MMU_DIRECT) {
  1292. uint64_t *phys = img_mem_get_page_array(session->mem_ctx,
  1293. buf_id);
  1294. WARN_ON(!phys);
  1295. /* Override virtual address,
  1296. * only applicable for physically contiguous memory regions */
  1297. if (phys && phys[0]) {
  1298. virt_addr = phys[0];
  1299. dev_dbg(vha->dev,
  1300. "%s: using direct mapping!\n",
  1301. __func__);
  1302. } else {
  1303. dev_err(vha->dev,
  1304. "%s: not contiguous memory!\n",
  1305. __func__);
  1306. }
  1307. }
  1308. buf = vha_find_bufid(session, buf_id);
  1309. #ifdef CONFIG_HW_MULTICORE
  1310. if (buf->attr & IMG_MEM_ATTR_OCM) {
  1311. uint64_t *phys = img_mem_get_page_array(session->mem_ctx,
  1312. buf_id);
  1313. /* Virtual == physical */
  1314. buf->devvirt = phys[0];
  1315. dev_dbg(vha->dev,
  1316. "%s: buf %s (%u), is OCM buffer, no MMU mapping needed!\n",
  1317. __func__, buf->name, buf_id);
  1318. goto out_unlock;
  1319. }
  1320. #endif
  1321. /* force MMU fault after N buffer map operations */
  1322. if (vha->mmu_no_map_count != 0) {
  1323. int ctx_id;
  1324. if (map_flags & VHA_MAP_FLAG_MODEL) {
  1325. ctx_id = VHA_MMU_REQ_MODEL_CTXID;
  1326. buf->req_type = VHA_REQ_MODEL;
  1327. } else if (map_flags & VHA_MAP_FLAG_IO) {
  1328. ctx_id = VHA_MMU_REQ_IO_CTXID;
  1329. buf->req_type = VHA_REQ_IO;
  1330. } else {
  1331. WARN_ONCE(1, "No requestor flags!");
  1332. ctx_id = VHA_MMU_REQ_IO_CTXID;
  1333. buf->req_type = VHA_REQ_IO;
  1334. }
  1335. ret = img_mmu_map(session->mmu_ctxs[ctx_id].ctx,
  1336. session->mem_ctx, buf_id, virt_addr, flags);
  1337. if (ret || buf == NULL) {
  1338. dev_err(vha->dev, "%s: map failed!\n", __func__);
  1339. goto out_unlock;
  1340. }
  1341. if (vha->mmu_no_map_count >= 0)
  1342. --vha->mmu_no_map_count;
  1343. } else
  1344. dev_info(vha->dev, "Bringup test: MMU no map count = %d\n",
  1345. vha->mmu_no_map_count);
  1346. buf->devvirt = virt_addr;
  1347. dev_dbg(vha->dev, "%s: mapped buf %s (%u) to %#llx, flags: 0x%x\n",
  1348. __func__, buf->name, buf_id, virt_addr, map_flags);
  1349. out_unlock:
  1350. mutex_unlock(&vha->lock);
  1351. return ret;
  1352. }
  1353. /* unmap buffer from the device */
  1354. int vha_unmap_buffer(struct vha_session *session,
  1355. uint32_t buf_id)
  1356. {
  1357. struct vha_dev *vha = session->vha;
  1358. struct vha_buffer *buf = NULL;
  1359. int ret = 0;
  1360. int ctx_id;
  1361. ret = mutex_lock_interruptible(&vha->lock);
  1362. if (ret)
  1363. return ret;
  1364. buf = vha_find_bufid(session, buf_id);
  1365. #ifdef CONFIG_HW_MULTICORE
  1366. if (buf->attr & IMG_MEM_ATTR_OCM) {
  1367. dev_dbg(vha->dev,
  1368. "%s: buf %s (%u) is OCM buffer, no MMU unmapping needed!\n",
  1369. __func__, buf->name, buf_id);
  1370. buf->devvirt = ~0ULL;
  1371. goto out_unlock;
  1372. }
  1373. #endif
  1374. if (buf->req_type == VHA_REQ_MODEL)
  1375. ctx_id = VHA_MMU_REQ_MODEL_CTXID;
  1376. else
  1377. ctx_id = VHA_MMU_REQ_IO_CTXID;
  1378. ret = img_mmu_unmap(session->mmu_ctxs[ctx_id].ctx,
  1379. session->mem_ctx, buf_id);
  1380. if (ret || buf == NULL) {
  1381. dev_err(vha->dev, "%s: unmap failed!\n", __func__);
  1382. goto out_unlock;
  1383. }
  1384. buf->devvirt = 0ULL;
  1385. vha_clean_onchip_maps(session, buf);
  1386. dev_dbg(vha->dev, "%s: unmapped buf %s(%u)\n",
  1387. __func__, buf->name, buf_id);
  1388. out_unlock:
  1389. mutex_unlock(&vha->lock);
  1390. return ret;
  1391. }
  1392. /*
  1393. * return either dev virtual address or physical address of buffer
  1394. * phys address only applicable if contiguous memory
  1395. * virtual address only if MMU enabled
  1396. */
  1397. uint64_t vha_buf_addr(struct vha_session *session, struct vha_buffer *buf)
  1398. {
  1399. struct vha_dev *vha = session->vha;
  1400. if (vha->mmu_mode == VHA_MMU_DISABLED) {
  1401. uint64_t *phys;
  1402. /* no-MMU mode */
  1403. if (vha->hw_props.dummy_dev)
  1404. return 0; /* no-MMU: dummy hardware */
  1405. phys = img_mem_get_page_array(session->mem_ctx, buf->id);
  1406. if (phys)
  1407. /*
  1408. * no-MMU: carveout memory
  1409. * Get the address that dev expects.
  1410. */
  1411. return img_mem_get_dev_addr(session->mem_ctx,
  1412. buf->id, phys[0]);
  1413. dev_err(vha->dev, "%s: ERROR: buffer %x is not contiguous\n",
  1414. __func__, buf->id);
  1415. return 0; /* no-MMU: system memory */
  1416. }
  1417. /* mmu mode */
  1418. if (buf == NULL)
  1419. return 0; /* error */
  1420. return buf->devvirt; /* MMU mode: virt address */
  1421. }
  1422. struct vha_buffer *vha_find_bufid(const struct vha_session *session, uint32_t buf_id)
  1423. {
  1424. struct vha_buffer *buf;
  1425. list_for_each_entry(buf, &session->bufs, list) {
  1426. if (buf_id == buf->id)
  1427. return buf;
  1428. }
  1429. return NULL;
  1430. }
  1431. struct vha_buffer *vha_find_bufvaddr(const struct vha_session *session,
  1432. uint64_t virt_addr)
  1433. {
  1434. struct vha_buffer *buf;
  1435. list_for_each_entry(buf, &session->bufs, list) {
  1436. /* check if virtual address belongs to specific buffer */
  1437. if (virt_addr >= buf->devvirt &&
  1438. virt_addr < (buf->devvirt + buf->size))
  1439. return buf;
  1440. }
  1441. return NULL;
  1442. }
  1443. /* when a buffer is allocated or imported, it is added to session.bufs */
  1444. int vha_add_buf(struct vha_session *session,
  1445. uint32_t buf_id, size_t size, const char *name, enum img_mem_attr attr)
  1446. {
  1447. struct vha_buffer *buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  1448. struct vha_dev *vha = session->vha;
  1449. int ret = 0;
  1450. dev_dbg(vha->dev, "%s buf '%.*s' id:%d\n", __func__,
  1451. (int)(sizeof(buf->name))-1, name, buf_id);
  1452. if (buf == NULL)
  1453. return -ENOMEM;
  1454. buf->id = buf_id;
  1455. buf->size = size;
  1456. strncpy(buf->name, name, sizeof(buf->name)-1);
  1457. buf->attr = attr;
  1458. buf->status = VHA_BUF_UNFILLED;
  1459. buf->session = session;
  1460. #ifdef KERNEL_DMA_FENCE_SUPPORT
  1461. buf->sync_info.in_sync_fd = VHA_SYNC_NONE;
  1462. #endif
  1463. list_add(&buf->list, &session->bufs);
  1464. INIT_LIST_HEAD(&buf->onchip_maps);
  1465. if (!(attr & IMG_MEM_ATTR_OCM))
  1466. img_pdump_printf("-- <-- New buffer name: %s\n", buf->name);
  1467. if (zero_buffers && !(buf->attr & IMG_MEM_ATTR_NOMAP)) {
  1468. ret = img_mem_map_km(session->mem_ctx, buf_id);
  1469. if (ret) {
  1470. dev_err(session->vha->dev, "failed to map buff %x to km: %d\n",
  1471. buf_id, ret);
  1472. ret = -EFAULT;
  1473. goto out_err;
  1474. }
  1475. buf->kptr = img_mem_get_kptr(session->mem_ctx, buf_id);
  1476. {
  1477. void *ptr = buf->kptr;
  1478. int max_chunk = 1 * 1024 * 1024;
  1479. while (size) {
  1480. int chunk_size = size > max_chunk ?
  1481. max_chunk : size;
  1482. pr_debug("memset buf chunk %d!\n", chunk_size);
  1483. memset(ptr, 0, chunk_size);
  1484. ptr += chunk_size;
  1485. size -= chunk_size;
  1486. schedule();
  1487. }
  1488. }
  1489. ret = img_mem_unmap_km(session->mem_ctx, buf->id);
  1490. if (ret) {
  1491. dev_err(session->vha->dev,
  1492. "%s: failed to unmap buff %x from km: %d\n",
  1493. __func__, buf->id, ret);
  1494. ret = -EFAULT;
  1495. goto out_err;
  1496. }
  1497. buf->kptr = NULL;
  1498. }
  1499. return 0;
  1500. out_err:
  1501. list_del(&buf->list);
  1502. kfree(buf);
  1503. return ret;
  1504. }
  1505. /* remove buffer from the session */
  1506. int vha_rm_buf(struct vha_session *session, uint32_t buf_id)
  1507. {
  1508. struct vha_buffer *buf = vha_find_bufid(session, buf_id);
  1509. dev_dbg(session->vha->dev, "%s buf_id:%d\n", __func__, buf_id);
  1510. if (buf == NULL) {
  1511. dev_err(session->vha->dev, "%s: could not find buf %x\n",
  1512. __func__, buf_id);
  1513. return -EINVAL;
  1514. }
  1515. #ifdef KERNEL_DMA_FENCE_SUPPORT
  1516. vha_rm_buf_fence(session, buf);
  1517. #endif
  1518. vha_clean_onchip_maps(session, buf);
  1519. list_del(&buf->list);
  1520. kfree(buf);
  1521. return 0;
  1522. }
  1523. /* process the cmd if everything is ready */
  1524. enum do_cmd_status vha_do_cmd(struct vha_cmd *cmd)
  1525. {
  1526. struct vha_session *session = cmd->session;
  1527. struct vha_dev* vha = session->vha;
  1528. /* already submitted, wait until processed */
  1529. if (cmd->in_hw)
  1530. return CMD_IN_HW;
  1531. /* check all input buffers are filled and ready to go */
  1532. if (vha_is_waiting_for_inputs(session, cmd))
  1533. return CMD_WAIT_INBUFS;
  1534. #if !defined(CONFIG_VHA_DUMMY) && !defined(CONFIG_HW_MULTICORE)
  1535. if (!session->vha->is_ready)
  1536. return CMD_HW_BUSY;
  1537. #endif
  1538. /* check hw availability (if needed) */
  1539. #ifdef CONFIG_HW_MULTICORE
  1540. /* Attempt to schedule command on available cores. */
  1541. if (vha_dev_schedule_cmd(session->vha, cmd) != 0)
  1542. #else
  1543. /* Check if the core's queue is full. */
  1544. if (vha_is_queue_full(session->vha, cmd))
  1545. #endif
  1546. return CMD_HW_BUSY;
  1547. if (cmd->user_cmd.cmd_type == VHA_CMD_CNN_SUBMIT &&
  1548. !session->vha->stats.cnn_kicks)
  1549. img_pdump_printf("-- ALLOC_END\n");
  1550. /* at this point we should be able to process the cmd */
  1551. if (vha_do_cnn_cmd(cmd) != 0)
  1552. return CMD_DONE;
  1553. return CMD_OK;
  1554. }
  1555. /* check if there is any work to be done */
  1556. static void cmd_worker(struct work_struct *work)
  1557. {
  1558. struct vha_dev *vha = container_of(work, struct vha_dev, worker);
  1559. dev_dbg(vha->dev, "%s\n", __func__);
  1560. mutex_lock(&vha->lock);
  1561. #ifdef CONFIG_FAULT_INJECTION
  1562. if (task_pid_nr(current) != vha->irq_bh_pid) {
  1563. if (vha->fault_inject & VHA_FI_CMD_WORKER)
  1564. current->make_it_fail = true;
  1565. else
  1566. current->make_it_fail = false;
  1567. }
  1568. #endif
  1569. if (vha->do_calibration) {
  1570. /* Postpone any worker tasks. */
  1571. dev_dbg(vha->dev, "%s: Postpone worker task!\n", __func__);
  1572. goto exit;
  1573. }
  1574. /* Execute the main scheduling loop. */
  1575. vha_scheduler_loop(vha);
  1576. exit:
  1577. #ifdef CONFIG_FAULT_INJECTION
  1578. if (task_pid_nr(current) != vha->irq_bh_pid) {
  1579. if (vha->fault_inject & VHA_FI_CMD_WORKER)
  1580. current->make_it_fail = false;
  1581. }
  1582. #endif
  1583. mutex_unlock(&vha->lock);
  1584. }
  1585. /* this is wrapper func for scheduling command worker task */
  1586. void vha_chk_cmd_queues(struct vha_dev *vha, bool threaded)
  1587. {
  1588. dev_dbg(vha->dev, "%s threaded:%u\n", __func__, threaded);
  1589. if (threaded) {
  1590. /* If work has been already scheduled from other context,
  1591. * the below call does nothing (returns false).
  1592. * However the worker is only used as command data pump,
  1593. * so it is not necessary to do any kind of rescheduling,
  1594. * as it will be executed anyway!
  1595. */
  1596. schedule_work(&vha->worker); /* call asynchronously */
  1597. } else {
  1598. /* Direct calls must be always invoked
  1599. * with vha_dev.lock == locked
  1600. */
  1601. BUG_ON(!mutex_is_locked(&vha->lock));
  1602. mutex_unlock(&vha->lock);
  1603. cmd_worker(&vha->worker); /* call synchronously */
  1604. mutex_lock(&vha->lock);
  1605. }
  1606. }
  1607. #ifdef KERNEL_DMA_FENCE_SUPPORT
  1608. /* input buffer sync callback */
  1609. static void _vha_in_buf_sync_cb(struct dma_fence *fence,
  1610. struct dma_fence_cb *cb)
  1611. {
  1612. struct vha_buffer *buf = container_of(cb, struct vha_buffer, sync_info.in_sync_cb);
  1613. vha_set_buf_status(buf->session, buf->id, VHA_BUF_FILLED_BY_SW,
  1614. VHA_SYNC_NONE, false);
  1615. fput(buf->sync_info.in_sync_file);
  1616. dma_fence_put(fence);
  1617. memset(&buf->sync_info, 0, sizeof(struct vha_buf_sync_info));
  1618. buf->sync_info.in_sync_fd = VHA_SYNC_NONE;
  1619. }
  1620. #endif
  1621. /* set buffer status per user request: either filled or unfilled */
  1622. int vha_set_buf_status(struct vha_session *session, uint32_t buf_id,
  1623. enum vha_buf_status status, int in_sync_fd, bool out_sync_sig)
  1624. {
  1625. struct vha_buffer *buf = vha_find_bufid(session, buf_id);
  1626. if (buf == NULL) {
  1627. dev_err(session->vha->dev, "%s: invalid buf id:%d\n",
  1628. __func__, buf_id);
  1629. return -EINVAL;
  1630. }
  1631. dev_dbg(session->vha->dev, "%s: id:%d curr:%d new:%d sig:%d\n",
  1632. __func__, buf->id, buf->status, status, out_sync_sig);
  1633. /* If buffer has been filled by HW,
  1634. * mark that it probably needs invalidation, not necessarily,
  1635. * as it can be the input for the next hw segment,
  1636. * and may not be mapped by the UM */
  1637. if (buf->status != VHA_BUF_FILLED_BY_HW &&
  1638. status == VHA_BUF_FILLED_BY_HW) {
  1639. buf->inval = true;
  1640. #ifdef KERNEL_DMA_FENCE_SUPPORT
  1641. buf->status = status;
  1642. #endif
  1643. }
  1644. /* If buffer has been filled by SW,
  1645. * mark that it needs flushing */
  1646. if (buf->status == VHA_BUF_UNFILLED &&
  1647. status == VHA_BUF_FILLED_BY_SW) {
  1648. buf->flush = true;
  1649. #ifdef KERNEL_DMA_FENCE_SUPPORT
  1650. if (in_sync_fd > 0) {
  1651. if (buf->sync_info.in_sync_fd < 0) {
  1652. int ret = 0;
  1653. struct file *sync_file;
  1654. struct dma_fence *fence;
  1655. sync_file = fget(in_sync_fd);
  1656. if (sync_file == NULL) {
  1657. dev_err(session->vha->dev, "%s: could not get file for fd=%d and buf %d\n",
  1658. __func__, in_sync_fd, buf_id);
  1659. return -EINVAL;
  1660. }
  1661. fence = sync_file_get_fence(in_sync_fd);
  1662. if (!fence) {
  1663. fput(sync_file);
  1664. dev_err(session->vha->dev, "%s: could not get fence for fd=%d and buf %d\n",
  1665. __func__, in_sync_fd, buf_id);
  1666. return -EINVAL;
  1667. }
  1668. ret = dma_fence_add_callback(fence, &buf->sync_info.in_sync_cb,
  1669. _vha_in_buf_sync_cb);
  1670. if (ret) {
  1671. if (dma_fence_is_signaled(fence)) {
  1672. dma_fence_put(fence);
  1673. buf->status = status;
  1674. } else
  1675. dev_err(session->vha->dev, "%s: could not set cb for fd=%d and buf %x\n",
  1676. __func__, in_sync_fd, buf_id);
  1677. fput(sync_file);
  1678. return ret;
  1679. }
  1680. buf->sync_info.in_fence = fence;
  1681. buf->sync_info.in_sync_file = sync_file;
  1682. buf->sync_info.in_sync_fd = in_sync_fd;
  1683. } else if (in_sync_fd != buf->sync_info.in_sync_fd) {
  1684. dev_err(session->vha->dev, "%s: buf %d has already assigned sync file fd=%d\n",
  1685. __func__, buf_id, in_sync_fd);
  1686. return -EINVAL;
  1687. }
  1688. }
  1689. else {
  1690. if (out_sync_sig)
  1691. img_mem_signal_fence(session->mem_ctx, buf->id);
  1692. buf->status = status;
  1693. }
  1694. #endif
  1695. }
  1696. /* If buffer has been filled by SW,
  1697. * after being filled by the hw, flush it too */
  1698. if (buf->status == VHA_BUF_FILLED_BY_HW &&
  1699. status == VHA_BUF_FILLED_BY_SW) {
  1700. buf->flush = true;
  1701. }
  1702. #ifdef KERNEL_DMA_FENCE_SUPPORT
  1703. if (status != VHA_BUF_FILLED_BY_SW)
  1704. #endif
  1705. buf->status = status;
  1706. /* Poke the command queue only when filled by SW */
  1707. if (status == VHA_BUF_FILLED_BY_SW) {
  1708. /* We are already locked!
  1709. * Run in separate thread
  1710. */
  1711. vha_chk_cmd_queues(session->vha, true);
  1712. }
  1713. return 0;
  1714. }
  1715. bool vha_buf_needs_inval(struct vha_session *session, uint32_t buf_id)
  1716. {
  1717. struct vha_buffer *buf = vha_find_bufid(session, buf_id);
  1718. bool inval;
  1719. if (buf == NULL) {
  1720. dev_err(session->vha->dev, "%s: invalid buf id:%d\n",
  1721. __func__, buf_id);
  1722. return false;
  1723. }
  1724. /* Buffer that has been allocated as HW access only
  1725. * does not need invalidation */
  1726. if (buf->attr & (IMG_MEM_ATTR_NOMAP|IMG_MEM_ATTR_NOSYNC)) {
  1727. dev_dbg(session->vha->dev, "%s: id:%d (skip)\n",
  1728. __func__, buf->id);
  1729. return false;
  1730. }
  1731. dev_dbg(session->vha->dev, "%s: id:%d (%d)\n",
  1732. __func__, buf->id, buf->inval);
  1733. inval = buf->inval;
  1734. buf->inval = false;
  1735. return inval;
  1736. }
  1737. bool vha_buf_needs_flush(struct vha_session *session, uint32_t buf_id)
  1738. {
  1739. struct vha_buffer *buf = vha_find_bufid(session, buf_id);
  1740. bool flush;
  1741. if (buf == NULL) {
  1742. dev_err(session->vha->dev, "%s: invalid buf id:%d\n",
  1743. __func__, buf_id);
  1744. return false;
  1745. }
  1746. dev_dbg(session->vha->dev, "%s: id:%d (%d)\n",
  1747. __func__, buf->id, buf->flush);
  1748. flush = buf->flush;
  1749. buf->flush = false;
  1750. return flush;
  1751. }
  1752. #ifdef KERNEL_DMA_FENCE_SUPPORT
  1753. struct vha_sync_cb_data {
  1754. struct dma_fence_cb cb;
  1755. union {
  1756. struct sync_file *sync_file;
  1757. struct file *file;
  1758. };
  1759. };
  1760. static void _vha_out_sync_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
  1761. {
  1762. struct vha_sync_cb_data *cb_data =
  1763. container_of(cb, struct vha_sync_cb_data, cb);
  1764. fput(cb_data->sync_file->file);
  1765. dma_fence_put(fence);
  1766. kfree(cb_data);
  1767. }
  1768. int vha_create_output_sync(struct vha_session *session, uint32_t buf_id_count,
  1769. uint32_t *buf_ids)
  1770. {
  1771. int i;
  1772. int ret = -ENOMEM;
  1773. int sync_fd = VHA_SYNC_NONE;
  1774. struct device *dev = session->vha->dev;
  1775. struct dma_fence_array *fence_array = NULL;
  1776. struct vha_sync_cb_data *cb_data = NULL;
  1777. struct dma_fence **fences =
  1778. (struct dma_fence **)kmalloc_array(sizeof(struct buffer_fence*),
  1779. buf_id_count, GFP_KERNEL);
  1780. if (fences == NULL) {
  1781. dev_err(dev, "%s: failed allocating fence container for %u buffers\n",
  1782. __func__, buf_id_count);
  1783. return -ENOMEM;
  1784. }
  1785. cb_data = kzalloc(sizeof(struct vha_sync_cb_data), GFP_KERNEL);
  1786. if (cb_data == NULL) {
  1787. dev_err(dev, "%s: failed allocating fence callback for %u buffers\n",
  1788. __func__, buf_id_count);
  1789. kfree(fences);
  1790. return -ENOMEM;
  1791. }
  1792. for (i = 0; i < buf_id_count; i++) {
  1793. fences[i] = img_mem_add_fence(session->mem_ctx, buf_ids[i]);
  1794. if (!fences[i]) {
  1795. dev_err(dev, "%s: failed allocating fence for buffer id=%u\n",
  1796. __func__, buf_ids[i]);
  1797. goto err_fences;
  1798. }
  1799. }
  1800. fence_array = dma_fence_array_create(buf_id_count, fences,
  1801. dma_fence_context_alloc(1), 1, false);
  1802. if (fence_array == NULL) {
  1803. dev_err(dev, "%s: failed allocating fence array for %u buffers\n",
  1804. __func__, buf_id_count);
  1805. goto err_fences;
  1806. }
  1807. cb_data->sync_file = sync_file_create(&fence_array->base);
  1808. if (cb_data->sync_file == NULL) {
  1809. dev_err(dev, "%s: failed creating sync file for %u buffers\n",
  1810. __func__, buf_id_count);
  1811. goto error_sf;
  1812. }
  1813. sync_fd = get_unused_fd_flags(O_CLOEXEC);
  1814. if (sync_fd < 0) {
  1815. dev_err(dev, "%s: failed creating file descriptor for %u buffers\n",
  1816. __func__, buf_id_count);
  1817. ret = sync_fd;
  1818. goto error_fd;
  1819. }
  1820. ret = dma_fence_add_callback(&fence_array->base, &cb_data->cb,
  1821. _vha_out_sync_cb);
  1822. if (ret < 0) {
  1823. dev_err(dev, "%s: failed adding callback file descriptor for %u buffers\n",
  1824. __func__, buf_id_count);
  1825. goto error_fd;
  1826. }
  1827. fd_install(sync_fd, cb_data->sync_file->file);
  1828. fget(sync_fd);
  1829. return sync_fd;
  1830. error_fd:
  1831. fput(cb_data->sync_file->file);
  1832. dma_fence_put(&fence_array->base);
  1833. error_sf:
  1834. dma_fence_put(&fence_array->base);
  1835. err_fences:
  1836. i--;
  1837. for (; i >= 0; i--) {
  1838. img_mem_remove_fence(session->mem_ctx, buf_ids[i]);
  1839. }
  1840. kfree(cb_data);
  1841. return ret;
  1842. }
  1843. /* input sync callback */
  1844. static void _vha_in_sync_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
  1845. {
  1846. struct vha_sync_cb_data *cb_data =
  1847. container_of(cb, struct vha_sync_cb_data, cb);
  1848. fput(cb_data->file);
  1849. dma_fence_put(fence);
  1850. kfree(cb_data);
  1851. }
  1852. /* merged input sync callback */
  1853. static void _vha_in_merged_sync_cb(struct dma_fence *fence,
  1854. struct dma_fence_cb *cb)
  1855. {
  1856. struct vha_sync_cb_data *cb_data =
  1857. container_of(cb, struct vha_sync_cb_data, cb);
  1858. fput(cb_data->sync_file->file);
  1859. dma_fence_put(fence);
  1860. }
  1861. int vha_merge_input_syncs(struct vha_session *session, uint32_t in_sync_fd_count,
  1862. int *in_sync_fds)
  1863. {
  1864. struct device *dev = session->vha->dev;
  1865. int i, actual_count = 0;
  1866. int ret = -ENOMEM;
  1867. int sync_fd = VHA_SYNC_NONE;
  1868. struct dma_fence_array *fence_array = NULL;
  1869. struct vha_sync_cb_data *cb_data = NULL;
  1870. struct vha_sync_cb_data *in_sync_cbs = NULL;
  1871. struct dma_fence **fences;
  1872. void *dma_fence_mem;
  1873. struct file *f;
  1874. /* Special cases. */
  1875. if (in_sync_fd_count == 0) {
  1876. dev_err(dev, "%s: requested 0 sync_fds to merge\n", __func__);
  1877. return -EINVAL;
  1878. } else if (in_sync_fd_count == 1) {
  1879. struct file *f;
  1880. struct dma_fence *fence;
  1881. f = fget(in_sync_fds[0]);
  1882. if (f == NULL) {
  1883. dev_err(dev, "%s: could not get file for input sync fd=%d\n",
  1884. __func__, in_sync_fds[0]);
  1885. return -EINVAL;
  1886. }
  1887. fence = sync_file_get_fence(in_sync_fds[0]);
  1888. if (!fence) {
  1889. fput(f);
  1890. dev_err(dev, "%s: could not get fence for input sync fd=%d\n",
  1891. __func__, in_sync_fds[0]);
  1892. return -EINVAL;
  1893. }
  1894. cb_data = kmalloc(sizeof(struct vha_sync_cb_data), GFP_KERNEL);
  1895. if (cb_data == NULL) {
  1896. fput(f);
  1897. dma_fence_put(fence);
  1898. dev_err(dev, "%s: failed allocating callback data for input sync fd=%d\n",
  1899. __func__, in_sync_fds[0]);
  1900. return -ENOMEM;
  1901. }
  1902. if (dma_fence_add_callback(fence, &cb_data->cb, _vha_in_sync_cb)) {
  1903. if (dma_fence_is_signaled(fence)) {
  1904. dev_warn(dev, "%s: input sync fd=%d already signalled\n",
  1905. __func__, in_sync_fds[0]);
  1906. ret = -EINVAL;
  1907. } else {
  1908. dev_err(dev, "%s: could not add fence callback for input sync fd=%d\n",
  1909. __func__, in_sync_fds[0]);
  1910. ret = -EFAULT;
  1911. }
  1912. fput(f);
  1913. dma_fence_put(fence);
  1914. kfree(cb_data);
  1915. return ret;
  1916. }
  1917. cb_data->file = f;
  1918. return in_sync_fds[0];
  1919. }
  1920. dma_fence_mem =
  1921. kmalloc_array(
  1922. (sizeof(struct dma_fence*) + sizeof(struct vha_sync_cb_data)),
  1923. in_sync_fd_count + sizeof(struct vha_sync_cb_data), GFP_KERNEL);
  1924. if (dma_fence_mem == NULL) {
  1925. dev_err(dev, "%s: failed allocating fence container for %u buffers\n",
  1926. __func__, in_sync_fd_count);
  1927. return -ENOMEM;
  1928. }
  1929. fences = (struct dma_fence**)dma_fence_mem;
  1930. in_sync_cbs = (struct vha_sync_cb_data *)(dma_fence_mem +
  1931. sizeof(struct dma_fence*) * in_sync_fd_count);
  1932. cb_data = (struct vha_sync_cb_data *)(dma_fence_mem +
  1933. (sizeof(struct dma_fence*) + sizeof(struct vha_sync_cb_data)) *
  1934. in_sync_fd_count);
  1935. for (i = 0; i < in_sync_fd_count; i++) {
  1936. struct dma_fence *fence;
  1937. f = fget(in_sync_fds[i]);
  1938. if (f == NULL) {
  1939. dev_warn(dev, "%s: could not get file for fd=%d; will not use it\n",
  1940. __func__, in_sync_fds[i]);
  1941. continue;
  1942. }
  1943. fence = sync_file_get_fence(in_sync_fds[i]);
  1944. if (!fence) {
  1945. fput(f);
  1946. dev_warn(dev, "%s: could not get fence for fd=%d; will not use it\n",
  1947. __func__, in_sync_fds[i]);
  1948. continue;
  1949. }
  1950. if (dma_fence_add_callback(fence, &in_sync_cbs[actual_count].cb,
  1951. _vha_in_sync_cb)) {
  1952. if (dma_fence_is_signaled(fence)) {
  1953. dev_warn(dev, "%s: input sync fd=%d already signalled\n",
  1954. __func__, in_sync_fds[i]);
  1955. } else {
  1956. dev_err(dev, "%s: could not add fence callback for input sync fd=%d;"
  1957. " will not use it\n", __func__, in_sync_fds[i]);
  1958. }
  1959. fput(f);
  1960. dma_fence_put(fence);
  1961. continue;
  1962. }
  1963. dma_fence_get(fence); /* should be freed in dma_fence_array_release() */
  1964. in_sync_cbs[actual_count].file = f;
  1965. fences[actual_count] = fence;
  1966. actual_count++;
  1967. }
  1968. if (actual_count == 0) {
  1969. dev_err(dev, "%s: failed merging input fences\n", __func__);
  1970. kfree(dma_fence_mem);
  1971. return -EINVAL;
  1972. }
  1973. fence_array = dma_fence_array_create(actual_count, fences,
  1974. dma_fence_context_alloc(1), 1, false);
  1975. if (fence_array == NULL) {
  1976. dev_err(dev, "%s: failed allocating fence array for %u buffers\n",
  1977. __func__, in_sync_fd_count);
  1978. kfree(dma_fence_mem);
  1979. return -ENOMEM;
  1980. }
  1981. cb_data->sync_file = sync_file_create(&fence_array->base);
  1982. if (cb_data->sync_file == NULL) {
  1983. dev_err(dev, "%s: failed creating sync file for %u buffers\n",
  1984. __func__, in_sync_fd_count);
  1985. goto error_sf;
  1986. }
  1987. sync_fd = get_unused_fd_flags(O_CLOEXEC);
  1988. if (sync_fd < 0) {
  1989. dev_err(dev, "%s: failed creating file descriptor for %u buffers\n",
  1990. __func__, in_sync_fd_count);
  1991. ret = sync_fd;
  1992. goto error_fd;
  1993. }
  1994. ret = dma_fence_add_callback(&fence_array->base, &cb_data->cb,
  1995. _vha_in_merged_sync_cb);
  1996. if (ret < 0) {
  1997. dev_err(dev, "%s: failed adding callback file descriptor for %u buffers\n",
  1998. __func__, in_sync_fd_count);
  1999. goto error_fd;
  2000. }
  2001. fd_install(sync_fd, cb_data->sync_file->file);
  2002. fget(sync_fd);
  2003. return sync_fd;
  2004. error_fd:
  2005. fput(cb_data->sync_file->file);
  2006. dma_fence_put(&fence_array->base);
  2007. error_sf:
  2008. for (i = 0; i < actual_count; i++) {
  2009. fput(in_sync_cbs[actual_count].file);
  2010. dma_fence_put(fences[actual_count]);
  2011. }
  2012. dma_fence_put(&fence_array->base);
  2013. return ret;
  2014. }
  2015. int vha_release_syncs(struct vha_session *session, uint32_t buf_id_count,
  2016. uint32_t *buf_ids)
  2017. {
  2018. struct device *dev = session->vha->dev;
  2019. int i;
  2020. for (i = 0; i < buf_id_count; i++) {
  2021. struct vha_buffer *buf = vha_find_bufid(session, buf_ids[i]);
  2022. if (buf == NULL) {
  2023. dev_warn(dev, "%s: could not find buf %u\n", __func__, buf_ids[i]);
  2024. } else {
  2025. vha_rm_buf_fence(session, buf);
  2026. }
  2027. }
  2028. return 0;
  2029. }
  2030. #endif
  2031. /* validate and queue a message from a user
  2032. * called with mutex locked */
  2033. int vha_add_cmd(struct vha_session *session, struct vha_cmd *cmd)
  2034. {
  2035. uint32_t i;
  2036. struct device *dev = session->vha->dev;
  2037. struct vha_user_cmd *user_cmd = &cmd->user_cmd;
  2038. /* number of words in vha_user_cmd->data[0] */
  2039. uint32_t num_params = (cmd->size - sizeof(struct vha_user_cmd))/sizeof(uint32_t);
  2040. uint32_t pri_q_count = 1;
  2041. #ifdef CONFIG_HW_MULTICORE
  2042. if (user_cmd->cmd_type == VHA_CMD_CNN_SUBMIT) {
  2043. dev_err(dev, "%s: invalid cmd type 0x%x\n", __func__, user_cmd->cmd_type);
  2044. return -EINVAL;
  2045. }
  2046. #endif
  2047. if (user_cmd->num_bufs > num_params * sizeof(uint32_t)) {
  2048. dev_err(dev, "%s: invalid number of buffers in message: in:%x total:%x>%lx\n",
  2049. __func__, user_cmd->num_inbufs, user_cmd->num_bufs,
  2050. num_params * sizeof(uint32_t));
  2051. return -EINVAL;
  2052. }
  2053. if (user_cmd->num_bufs > VHA_MAX_ALT_ADDRS) {
  2054. dev_err(dev, "%s: invalid number of buffers in message: %x max:%x\n",
  2055. __func__, user_cmd->num_bufs, VHA_MAX_ALT_ADDRS);
  2056. return -EINVAL;
  2057. }
  2058. if (!session->vha->cnn_combined_crc_enable && (cmd->user_cmd.flags & VHA_CHECK_CRC)) {
  2059. dev_err(dev, "%s: Trying to perform CRC check while combined CRCs are disabled!,"
  2060. " try cnn_combined_crc_enable=1\n", __func__);
  2061. return -EINVAL;
  2062. }
  2063. if (user_cmd->priority >= VHA_MAX_PRIORITIES) {
  2064. #if defined(CONFIG_HW_MULTICORE) || (defined(HW_AX3) && defined(VHA_USE_LO_PRI_SUB_SEGMENTS))
  2065. dev_warn(dev, "%s: Priority %u too high. Setting to max supported priority: %u.\n",
  2066. __func__, user_cmd->priority, VHA_MAX_PRIORITIES - 1);
  2067. user_cmd->priority = VHA_MAX_PRIORITIES - 1;
  2068. #else
  2069. dev_warn_once(dev, "%s: Priorities not supported.\n", __func__);
  2070. user_cmd->priority = VHA_DEFAULT_PRI;
  2071. #endif
  2072. }
  2073. switch(cmd->user_cmd.cmd_type) {
  2074. case VHA_CMD_CNN_SUBMIT:
  2075. {
  2076. struct vha_user_cnn_submit_cmd* submit_cmd =
  2077. (struct vha_user_cnn_submit_cmd*)user_cmd;
  2078. /* subsegments cannot be handled with low latency enabled */
  2079. if ((submit_cmd->subseg_num > 1) && (session->vha->low_latency != VHA_LL_DISABLED)) {
  2080. dev_err(dev, "%s: Subsegments are not supported with low latency enabled\n", __func__);
  2081. return -EINVAL;
  2082. }
  2083. /* include subsegments in priority counters */
  2084. pri_q_count = submit_cmd->subseg_num;
  2085. /* check input and output buffers are valid */
  2086. for (i = 0; i < user_cmd->num_bufs; i++) {
  2087. uint32_t buf_id = user_cmd->data[i];
  2088. if (vha_find_bufid(session, buf_id) == NULL) {
  2089. dev_err(dev, "%s: unrecognised buf id[%u]:%x\n",
  2090. __func__, i, buf_id);
  2091. return -EINVAL;
  2092. }
  2093. }
  2094. /* send out a event notifications when submit is enqueued */
  2095. if (vha_observers.enqueued)
  2096. vha_observers.enqueued(session->vha->id, session->id,
  2097. cmd->user_cmd.cmd_id, cmd->user_cmd.priority);
  2098. break;
  2099. }
  2100. case VHA_CMD_CNN_SUBMIT_MULTI:
  2101. {
  2102. uint32_t num_cmd_bufs = 0;
  2103. /* check if command stream buffers are valid */
  2104. for (i = 0; i < VHA_MAX_CORES; i++) {
  2105. uint32_t buf_id = user_cmd->data[i];
  2106. if (buf_id == 0)
  2107. break;
  2108. if (vha_find_bufid(session, buf_id) == NULL) {
  2109. dev_err(dev, "%s: unrecognised cmdstr buf id[%u]:%x\n",
  2110. __func__, i, buf_id);
  2111. return -EINVAL;
  2112. }
  2113. num_cmd_bufs++;
  2114. }
  2115. /* check input and output buffers are valid */
  2116. for (i = VHA_MAX_CORES; i < (user_cmd->num_bufs - 1); i++) {
  2117. uint32_t buf_id = user_cmd->data[i];
  2118. if (vha_find_bufid(session, buf_id) == NULL) {
  2119. dev_err(dev, "%s: unrecognised buf id[%u]:%x\n",
  2120. __func__, i, buf_id);
  2121. return -EINVAL;
  2122. }
  2123. }
  2124. /* send out a event notifications when submit is enqueued */
  2125. if (vha_observers.enqueued)
  2126. vha_observers.enqueued(session->vha->id, session->id,
  2127. cmd->user_cmd.cmd_id, cmd->user_cmd.priority);
  2128. break;
  2129. }
  2130. case VHA_CMD_CNN_PDUMP_MSG:
  2131. {
  2132. struct pdump_descr* pdump = vha_pdump_dev_get_drvdata(dev);
  2133. if (!img_pdump_enabled(pdump)) {
  2134. kfree(cmd);
  2135. /* Silently ignore this pdump message */
  2136. return 0;
  2137. }
  2138. }
  2139. }
  2140. /* add the command to the pending list */
  2141. list_add_tail(&cmd->list[cmd->user_cmd.priority], &session->cmds[cmd->user_cmd.priority]);
  2142. GETNSTIMEOFDAY(&cmd->submit_ts);
  2143. session->vha->pri_q_counters[cmd->user_cmd.priority] += pri_q_count;
  2144. /* We are already locked!
  2145. * Run in separate thread
  2146. */
  2147. vha_chk_cmd_queues(session->vha, true);
  2148. return 0;
  2149. }
  2150. int vha_suspend_dev(struct device *dev)
  2151. {
  2152. struct vha_dev *vha = vha_dev_get_drvdata(dev);
  2153. int ret;
  2154. mutex_lock(&vha->lock);
  2155. dev_dbg(dev, "%s: taking a nap!\n", __func__);
  2156. ret = vha_dev_suspend_work(vha);
  2157. mutex_unlock(&vha->lock);
  2158. return ret;
  2159. }
  2160. int vha_resume_dev(struct device *dev)
  2161. {
  2162. struct vha_dev *vha = vha_dev_get_drvdata(dev);
  2163. mutex_lock(&vha->lock);
  2164. dev_dbg(dev, "%s: waking up!\n", __func__);
  2165. /* Call the worker */
  2166. vha_chk_cmd_queues(vha, true);
  2167. mutex_unlock(&vha->lock);
  2168. return 0;
  2169. }
  2170. void vha_dump_digest(struct vha_session *session, struct vha_buffer *buf,
  2171. struct vha_cmd *cmd)
  2172. {
  2173. struct vha_dev *vha = session->vha;
  2174. int ret;
  2175. if (!vha->dump_buff_digest)
  2176. return;
  2177. if (!(buf->attr & IMG_MEM_ATTR_NOMAP)) {
  2178. ret = img_mem_map_km(session->mem_ctx, buf->id);
  2179. if (ret) {
  2180. dev_err(session->vha->dev, "failed to map buff %x to km: %d\n",
  2181. buf->id, ret);
  2182. return;
  2183. }
  2184. buf->kptr = img_mem_get_kptr(session->mem_ctx, buf->id);
  2185. dev_info(vha->dev, "%s: buff id:%d name:%s digest is [crc32]:%#x\n",
  2186. __func__, buf->id, buf->name, crc32(0, buf->kptr, buf->size));
  2187. ret = img_mem_unmap_km(session->mem_ctx, buf->id);
  2188. if (ret) {
  2189. dev_err(session->vha->dev,
  2190. "%s: failed to unmap buff %x from km: %d\n",
  2191. __func__, buf->id, ret);
  2192. }
  2193. buf->kptr = NULL;
  2194. }
  2195. }
  2196. /*
  2197. * register event observers.
  2198. * only a SINGLE observer for each type of event.
  2199. * unregister by passing NULL parameter
  2200. */
  2201. void vha_observe_event_enqueue(void (*func)(uint32_t devid,
  2202. uint32_t sessionid,
  2203. uint32_t cmdid,
  2204. uint32_t priority))
  2205. {
  2206. if (func && vha_observers.enqueued)
  2207. pr_warn("%s: vha_observer for ENQUEUED events is already set to '%pf'\n",
  2208. __func__, vha_observers.enqueued);
  2209. vha_observers.enqueued = func;
  2210. }
  2211. EXPORT_SYMBOL(vha_observe_event_enqueue);
  2212. void vha_observe_event_submit(void (*func)(uint32_t devid,
  2213. uint32_t sessionid,
  2214. uint32_t cmdid,
  2215. bool last_subsegment,
  2216. uint32_t priority))
  2217. {
  2218. if (func && vha_observers.submitted)
  2219. pr_warn("%s: vha_observer for SUBMITTED events is already set to '%pf'\n",
  2220. __func__, vha_observers.submitted);
  2221. vha_observers.submitted = func;
  2222. }
  2223. EXPORT_SYMBOL(vha_observe_event_submit);
  2224. void vha_observe_event_complete(void (*func)(uint32_t devid,
  2225. uint32_t sessionid,
  2226. uint32_t cmdid,
  2227. uint64_t status,
  2228. uint64_t cycles,
  2229. uint64_t mem_usage,
  2230. uint32_t priority))
  2231. {
  2232. if (func && vha_observers.completed)
  2233. pr_warn("%s: vha_observer for COMPLETED events is already set to '%pf'\n",
  2234. __func__, vha_observers.completed);
  2235. vha_observers.completed = func;
  2236. }
  2237. EXPORT_SYMBOL(vha_observe_event_complete);
  2238. void vha_observe_event_cancel(void (*func)(uint32_t devid,
  2239. uint32_t sessionid,
  2240. uint32_t cmdid,
  2241. uint32_t priority))
  2242. {
  2243. if (func && vha_observers.canceled)
  2244. pr_warn("%s: vha_observer for CANCELED events is already set to '%pf'\n",
  2245. __func__, vha_observers.canceled);
  2246. vha_observers.canceled = func;
  2247. }
  2248. EXPORT_SYMBOL(vha_observe_event_cancel);
  2249. void vha_observe_event_error(void (*func)(uint32_t devid,
  2250. uint32_t sessionid,
  2251. uint32_t cmdid,
  2252. uint64_t status))
  2253. {
  2254. if (func && vha_observers.error)
  2255. pr_warn("%s: vha_observer for ERROR events is already set to '%pf'\n",
  2256. __func__, vha_observers.error);
  2257. vha_observers.error = func;
  2258. }
  2259. EXPORT_SYMBOL(vha_observe_event_error);