vha_dbg.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900
  1. /*
  2. *****************************************************************************
  3. * Copyright (c) Imagination Technologies Ltd.
  4. *
  5. * The contents of this file are subject to the MIT license as set out below.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. *
  25. * Alternatively, the contents of this file may be used under the terms of the
  26. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  27. * GPL are applicable instead of those above.
  28. *
  29. * If you wish to allow use of your version of this file only under the terms
  30. * of GPL, and not to allow others to use your version of this file under the
  31. * terms of the MIT license, indicate your decision by deleting the provisions
  32. * above and replace them with the notice and other provisions required by GPL
  33. * as set out in the file called "GPLHEADER" included in this distribution. If
  34. * you do not delete the provisions above, a recipient may use your version of
  35. * this file under the terms of either the MIT license or GPL.
  36. *
  37. * This License is also included in this distribution in the file called
  38. * "MIT_COPYING".
  39. *
  40. *****************************************************************************/
  41. #include <linux/delay.h>
  42. #include <linux/slab.h>
  43. #include <linux/device.h>
  44. #include <linux/gfp.h>
  45. #include <linux/moduleparam.h>
  46. #include <linux/list.h>
  47. #include <linux/debugfs.h>
  48. #include <linux/uaccess.h>
  49. #include <uapi/vha.h>
  50. #include "vha_common.h"
  51. #include "vha_plat.h"
  52. #include "vha_io.h"
  53. #ifdef CONFIG_DEBUG_FS
  54. #define VHA_DBG_CONBINED_CRC_BUF_SIZE 0x1000
  55. #define VHA_DBG_CRC_BUF_SIZE 0x2000
  56. static uint32_t cnn_crc_size_kB;
  57. static uint32_t cnn_dbg_size_kB;
  58. static bool cnn_dbg_pdump_enable = true;
  59. module_param(cnn_crc_size_kB, uint, 0444);
  60. module_param(cnn_dbg_size_kB, uint, 0444);
  61. module_param(cnn_dbg_pdump_enable, bool, 0444);
  62. MODULE_PARM_DESC(cnn_crc_size_kB, "size of hw CRC buffer");
  63. MODULE_PARM_DESC(cnn_dbg_size_kB, "size of hw DEBUG buffer");
  64. MODULE_PARM_DESC(cnn_dbg_pdump_enable,
  65. "DEBUG buffer is captured into pdump file");
  66. static uint32_t cnn_crc_mode;
  67. static uint32_t cnn_dbg_modes[2];
  68. module_param(cnn_crc_mode, uint, 0444);
  69. module_param_array(cnn_dbg_modes, uint, NULL, 0444);
  70. MODULE_PARM_DESC(cnn_crc_mode,
  71. "CRC CONTROL: mode for CNN_CRC_ENABLE: 0=disable 1=stream 2=layer 3=pass");
  72. MODULE_PARM_DESC(cnn_dbg_modes,
  73. "DEBUG CONTROL: modes for PERF and BAND_ENABLE: 0=disable 1=stream 2=layer 3=pass");
  74. #ifdef HW_AX3
  75. static uint32_t cnn_crc_mask = 0;
  76. module_param(cnn_crc_mask, uint, 0444);
  77. MODULE_PARM_DESC(cnn_crc_mask,
  78. "CRC MASK: 0=no mask 1=debug silicon 2=safety critical 3=reserved");
  79. #endif
  80. static uint32_t cnn_pdump_flush_dbg = 1;
  81. module_param(cnn_pdump_flush_dbg, uint, 0444);
  82. MODULE_PARM_DESC(cnn_pdump_flush_dbg,
  83. "PDUMP: flushing debug buffs: 0:session,1:stream(default)");
  84. static unsigned long vaa_offset = 0;
  85. module_param(vaa_offset, ulong, 0444);
  86. MODULE_PARM_DESC(vaa_offset,
  87. "Page aligned offset in virtual address allocator space for kernel buffers."
  88. " NOTE: given offset decreases the size of vaa heap, accordingly");
  89. struct vha_dbgfs_ctx {
  90. struct dentry *debugfs_dir;
  91. #if defined VHA_EVENT_INJECT
  92. struct dentry *event_inject_dir;
  93. #endif
  94. #if defined VHA_FUNCT_CTRL
  95. struct dentry *funct_ctrl_dir;
  96. #endif
  97. struct vha_regset regset;
  98. uint64_t rtm_ctrl;
  99. uint64_t ioreg_addr;
  100. };
  101. /* MMU PTE dump info */
  102. struct vha_ptedump {
  103. struct vha_session *session;
  104. /* Actual address */
  105. uint64_t vaddr;
  106. /* Selected mmu sw context to be dumped */
  107. unsigned cur_cid;
  108. /* Configuration info */
  109. size_t page_size;
  110. size_t virt_size;
  111. };
  112. static void *vha_mmu_ptedump_start(struct seq_file *seq, loff_t *pos)
  113. {
  114. struct vha_ptedump *ctx = seq->private;
  115. struct vha_session *session;
  116. if (!ctx)
  117. return NULL;
  118. session = ctx->session;
  119. if (!session)
  120. return NULL;
  121. /* Get mmu configuration info - the same one the tables were built with */
  122. img_mmu_get_conf(&ctx->page_size, &ctx->virt_size);
  123. ctx->vaddr = *pos * ctx->page_size;
  124. if (*pos == 0) {
  125. uint64_t pc_addr = img_mem_get_single_page(session->mem_ctx,
  126. session->mmu_ctxs[ctx->cur_cid-1].pc_bufid, 0);
  127. seq_printf(seq, " Session hw_ctxid:%x -> PC addr:%#llx\n",
  128. session->mmu_ctxs[ctx->cur_cid-1].hw_id, pc_addr);
  129. seq_printf(seq, " [ virtaddr ] [ physaddr ] [flags]\n");
  130. }
  131. return ctx;
  132. }
  133. static void *vha_mmu_ptedump_next(struct seq_file *seq, void *priv, loff_t *pos)
  134. {
  135. struct vha_ptedump *ctx = priv;
  136. struct vha_session *session;
  137. if (!ctx)
  138. return NULL;
  139. session = ctx->session;
  140. (*pos)++;
  141. ctx->vaddr = *pos * ctx->page_size;
  142. if (ctx->vaddr <= (1ULL<<ctx->virt_size)-ctx->page_size)
  143. return ctx;
  144. if (ctx->cur_cid < ARRAY_SIZE(session->mmu_ctxs)) {
  145. /* Switch to next context & reset position */
  146. ctx->cur_cid++;
  147. *pos = 0;
  148. vha_mmu_ptedump_start(seq, pos);
  149. return ctx;
  150. }
  151. return NULL;
  152. }
  153. static int vha_mmu_ptedump_show(struct seq_file *seq, void *priv)
  154. {
  155. struct vha_ptedump *ctx = priv;
  156. struct vha_session *session;
  157. phys_addr_t paddr;
  158. uint8_t flags;
  159. int ret = 0;
  160. if (!ctx)
  161. return -EINVAL;
  162. session = ctx->session;
  163. if (!session)
  164. return -EINVAL;
  165. if (ctx->vaddr > (1ULL<<ctx->virt_size)-ctx->page_size)
  166. return SEQ_SKIP;
  167. paddr = img_mmu_get_paddr(session->mmu_ctxs[ctx->cur_cid-1].ctx,
  168. ctx->vaddr, &flags);
  169. if (flags) {
  170. struct vha_buffer *buf = vha_find_bufvaddr(session, ctx->vaddr);
  171. seq_printf(seq, " 0x%010llx 0x%010llx 0x%04x (%s)\n",
  172. ctx->vaddr, paddr, flags, buf ? buf->name : "???");
  173. } else if (!(ctx->vaddr % 0x40000000)) {
  174. /* Give some time to others, to avoid soft lockup warnings
  175. * Call yield() for every GB boundary of virtual address space */
  176. yield();
  177. }
  178. return ret;
  179. }
  180. static void vha_mmu_ptedump_stop(struct seq_file *seq, void *priv)
  181. {
  182. /* Nothing to do */
  183. }
  184. static const struct seq_operations vha_mmu_ptedump_sops = {
  185. .start = vha_mmu_ptedump_start,
  186. .next = vha_mmu_ptedump_next,
  187. .show = vha_mmu_ptedump_show,
  188. .stop = vha_mmu_ptedump_stop
  189. };
  190. static int vha_mmu_ptedump_open(struct inode *inode, struct file *file)
  191. {
  192. int ret;
  193. ret = seq_open(file, &vha_mmu_ptedump_sops);
  194. if (!ret) {
  195. struct vha_session *session;
  196. struct seq_file *seq;
  197. struct vha_ptedump *ctx;
  198. seq = file->private_data;
  199. session = inode->i_private;
  200. if (!session)
  201. return -EINVAL;
  202. ctx = kzalloc(sizeof(struct vha_ptedump), GFP_KERNEL);
  203. if (!ctx)
  204. return -ENOMEM;
  205. ctx->session = session;
  206. seq->private = ctx;
  207. ctx->cur_cid = 1;
  208. ret = mutex_lock_interruptible(&session->vha->lock);
  209. }
  210. return ret;
  211. }
  212. static int vha_mmu_ptedump_release(struct inode *inode, struct file *file)
  213. {
  214. struct seq_file *seq = file->private_data;
  215. struct vha_session *session;
  216. struct vha_ptedump *ctx;
  217. if (!seq)
  218. return -EINVAL;
  219. ctx = seq->private;
  220. session = ctx->session;
  221. if (session)
  222. mutex_unlock(&session->vha->lock);
  223. kfree(ctx);
  224. return seq_release(inode, file);
  225. }
  226. static const struct file_operations vha_mmu_ptedump_fops = {
  227. .owner = THIS_MODULE,
  228. .open = vha_mmu_ptedump_open,
  229. .llseek = seq_lseek,
  230. .release = vha_mmu_ptedump_release,
  231. .read = seq_read,
  232. };
  233. static void *vha_buffer_dump_start(struct seq_file *seq, loff_t *pos)
  234. {
  235. struct vha_session *session = seq->private;
  236. int ret;
  237. if (session == NULL) {
  238. pr_warn("Invalid VHA session pointer...\n");
  239. return NULL;
  240. }
  241. if (list_empty(&session->bufs))
  242. return NULL;
  243. ret = mutex_lock_interruptible(&session->vha->lock);
  244. if (ret) {
  245. pr_warn("Error while trying to get vha lock (%d)...\n", ret);
  246. return NULL;
  247. }
  248. seq_printf(seq, "Allocated buffers:\n");
  249. seq_printf(seq, "ID Name Size Atributes Status Kptr DevVirt Inval? Flush?\n");
  250. /* 6005 012345678 123456789 CUWSNM SW filled (null) 0x40002001 n Y\n" */
  251. /* Then first buffer from it */
  252. return seq_list_start(&session->bufs, *pos);
  253. }
  254. static void *vha_buffer_dump_next(struct seq_file *seq, void *priv, loff_t *pos)
  255. {
  256. struct vha_session *session = seq->private;
  257. return seq_list_next(priv, &session->bufs, pos);
  258. }
  259. static void vha_buffer_dump_stop(struct seq_file *seq, void *priv)
  260. {
  261. struct vha_session *session = seq->private;
  262. mutex_unlock(&session->vha->lock);
  263. seq_printf(seq, "Attributes: Cached;Uncached;Writecombine;Secure;Nomap;Mmu\n");
  264. }
  265. static const char *BufferStatus[] = {
  266. "Unfilled ",
  267. "SW filled",
  268. "HW filled"
  269. };
  270. static int vha_buffer_dump_show(struct seq_file *seq, void *priv)
  271. {
  272. const struct vha_buffer *buf = list_entry(priv, struct vha_buffer, list);
  273. /* ID Name Size Atributes Status Kptr DevVirt Inval? Flush? */
  274. seq_printf(seq, "%04u %9s %9ld %c%c%c%c%c%c %s %p 0x%08llX %c %c\n",
  275. buf->id,
  276. buf->name,
  277. buf->size,
  278. (buf->attr & IMG_MEM_ATTR_CACHED)?'C':'.',
  279. (buf->attr & IMG_MEM_ATTR_UNCACHED)?'U':'.',
  280. (buf->attr & IMG_MEM_ATTR_WRITECOMBINE)?'W':'.',
  281. (buf->attr & IMG_MEM_ATTR_SECURE)?'S':'.',
  282. (buf->attr & IMG_MEM_ATTR_NOMAP)?'N':'.',
  283. (buf->attr & IMG_MEM_ATTR_MMU)?'M':'.',
  284. BufferStatus[buf->status],
  285. buf->kptr,
  286. buf->devvirt,
  287. (buf->inval)?'Y':'n',
  288. (buf->flush)?'Y':'n'
  289. );
  290. return 0;
  291. }
  292. static const struct seq_operations vha_buffer_dump_sops = {
  293. .start = vha_buffer_dump_start,
  294. .next = vha_buffer_dump_next,
  295. .show = vha_buffer_dump_show,
  296. .stop = vha_buffer_dump_stop
  297. };
  298. static int vha_buffer_dump_open(struct inode *inode, struct file *file)
  299. {
  300. struct seq_file *s;
  301. int err;
  302. err = seq_open(file, &vha_buffer_dump_sops);
  303. if (err)
  304. return err;
  305. s = file->private_data;
  306. /* i_private containt a pointer to the vha_session structure */
  307. s->private = inode->i_private;
  308. return 0;
  309. }
  310. static const struct file_operations vha_buffer_dump_fops = {
  311. .owner = THIS_MODULE,
  312. .open = vha_buffer_dump_open,
  313. .read = seq_read,
  314. .llseek = seq_lseek,
  315. .release = seq_release,
  316. };
  317. static ssize_t vha_session_mem_max_read(struct file *file, char __user *buf,
  318. size_t count, loff_t *ppos)
  319. {
  320. struct vha_session *session = file->private_data;
  321. char mem_usage[25] = { 0 };
  322. size_t mem_val = 0;
  323. size_t size;
  324. img_mem_get_usage(session->mem_ctx, &mem_val, NULL);
  325. size = snprintf(mem_usage, sizeof(mem_usage), "%ld\n", mem_val);
  326. return simple_read_from_buffer(buf, count, ppos, mem_usage, size);
  327. }
  328. static const struct file_operations vha_session_mem_max_fops = {
  329. .owner = THIS_MODULE,
  330. .open = simple_open,
  331. .read = vha_session_mem_max_read,
  332. };
  333. static ssize_t vha_session_mem_curr_read(struct file *file, char __user *buf,
  334. size_t count, loff_t *ppos)
  335. {
  336. struct vha_session *session = file->private_data;
  337. char mem_usage[25] = { 0 };
  338. size_t mem_val = 0;
  339. size_t size;
  340. img_mem_get_usage(session->mem_ctx, NULL, &mem_val);
  341. size = snprintf(mem_usage, sizeof(mem_usage), "%ld\n", mem_val);
  342. return simple_read_from_buffer(buf, count, ppos, mem_usage, size);
  343. }
  344. static const struct file_operations vha_session_mem_curr_fops = {
  345. .owner = THIS_MODULE,
  346. .open = simple_open,
  347. .read = vha_session_mem_curr_read,
  348. };
  349. static ssize_t vha_session_mmu_max_read(struct file *file, char __user *buf,
  350. size_t count, loff_t *ppos)
  351. {
  352. struct vha_session *session = file->private_data;
  353. char mem_usage[25] = { 0 };
  354. size_t mem_val = 0;
  355. size_t size;
  356. img_mmu_get_usage(session->mem_ctx, &mem_val, NULL);
  357. size = snprintf(mem_usage, sizeof(mem_usage), "%ld\n", mem_val);
  358. return simple_read_from_buffer(buf, count, ppos, mem_usage, size);
  359. }
  360. static const struct file_operations vha_session_mmu_max_fops = {
  361. .owner = THIS_MODULE,
  362. .open = simple_open,
  363. .read = vha_session_mmu_max_read,
  364. };
  365. static ssize_t vha_session_mmu_curr_read(struct file *file, char __user *buf,
  366. size_t count, loff_t *ppos)
  367. {
  368. struct vha_session *session = file->private_data;
  369. char mem_usage[25] = { 0 };
  370. size_t mem_val = 0;
  371. size_t size;
  372. img_mmu_get_usage(session->mem_ctx, NULL, &mem_val);
  373. size = snprintf(mem_usage, sizeof(mem_usage), "%ld\n", mem_val);
  374. return simple_read_from_buffer(buf, count, ppos, mem_usage, size);
  375. }
  376. static const struct file_operations vha_session_mmu_curr_fops = {
  377. .owner = THIS_MODULE,
  378. .open = simple_open,
  379. .read = vha_session_mmu_curr_read,
  380. };
  381. struct dbgfs_buf_info {
  382. struct vha_session *session;
  383. struct vha_buffer *buf;
  384. struct dentry *dbgfs; /* file in debugfs */
  385. };
  386. /* debugfs read a buffer */
  387. static ssize_t dbgfs_buf_read(struct file *file, char __user *user_buf,
  388. size_t count, loff_t *ppos)
  389. {
  390. struct dbgfs_buf_info *info = file->private_data;
  391. struct vha_buffer *buf = info->buf;
  392. struct vha_session *session = info->session;
  393. int ret;
  394. ret = mutex_lock_interruptible(&session->vha->lock);
  395. if (!ret) {
  396. if (buf->attr & IMG_MEM_ATTR_NOMAP) {
  397. ret = -ENOMEM;
  398. dev_err(session->vha->dev, "can't read non mappable buff %x\n (%d)",
  399. buf->id, ret);
  400. goto exit;
  401. }
  402. ret = img_mem_map_km(session->mem_ctx, buf->id);
  403. if (ret) {
  404. dev_err(session->vha->dev, "failed to map buff %x to km: %d\n",
  405. buf->id, ret);
  406. ret = -ENOMEM;
  407. goto exit;
  408. }
  409. buf->kptr = img_mem_get_kptr(session->mem_ctx, buf->id);
  410. ret = simple_read_from_buffer(user_buf, count, ppos,
  411. buf->kptr, buf->size);
  412. if (ret < 0)
  413. dev_err(session->vha->dev, "failed to read buff %x to km: %d\n",
  414. buf->id, ret);
  415. if (img_mem_unmap_km(session->mem_ctx, buf->id))
  416. dev_err(session->vha->dev,
  417. "%s: failed to unmap buff %x from km: %d\n",
  418. __func__, buf->id, ret);
  419. exit:
  420. buf->kptr = NULL;
  421. mutex_unlock(&session->vha->lock);
  422. }
  423. return ret;
  424. }
  425. static const struct file_operations dbgfs_buf_fops = {
  426. .read = dbgfs_buf_read,
  427. .open = simple_open,
  428. .llseek = default_llseek,
  429. };
  430. static void dbg_add_buf(struct vha_session *session, struct vha_buffer *buf)
  431. {
  432. if (buf->name[0] && session->dbgfs) {
  433. char name[13] = { 0 };
  434. struct dbgfs_buf_info *info = kzalloc(sizeof(struct dbgfs_buf_info),
  435. GFP_KERNEL);
  436. if (!info) {
  437. dev_err(session->vha->dev, "%s: alloc info failed!\n", __func__);
  438. return;
  439. }
  440. snprintf(name, sizeof(name)-1, "%s.bin", buf->name);
  441. info->buf = buf;
  442. info->session = session;
  443. info->dbgfs = debugfs_create_file(name,
  444. S_IRUGO, session->dbgfs,
  445. info, &dbgfs_buf_fops);
  446. if (!info->dbgfs)
  447. dev_warn(session->vha->dev,
  448. "%s: failed to create debugfs entry for '%s'!\n",
  449. __func__, name);
  450. buf->dbgfs_priv = (void*)info;
  451. }
  452. }
  453. static void dbg_rm_buf(struct vha_session *session, uint32_t buf_id)
  454. {
  455. struct vha_buffer *buf = vha_find_bufid(session, buf_id);
  456. struct dbgfs_buf_info *info;
  457. info = (struct dbgfs_buf_info *)buf->dbgfs_priv;
  458. if (info) {
  459. /* remove debugfs directory. NULL is safe */
  460. debugfs_remove(info->dbgfs);
  461. kfree(info);
  462. }
  463. buf->dbgfs_priv = NULL;
  464. }
  465. /*
  466. * create buffers for CRC and DEBUG (PERF and BAND).
  467. * Configure the hardware to use them.
  468. * Buffers are mapped into device mmu on demand(when map=true)
  469. */
  470. int vha_dbg_alloc_hwbuf(struct vha_session *session, size_t size,
  471. struct vha_buffer **buffer,
  472. const char *name, bool map)
  473. {
  474. struct vha_dev *vha = session->vha;
  475. struct vha_buffer *buf;
  476. int buf_id, ret;
  477. uint32_t vaddr = 0;
  478. size_t page_size;
  479. if (list_empty(&session->bufs))
  480. img_pdump_printf("-- ALLOC_BEGIN\n");
  481. img_mmu_get_conf(&page_size, NULL);
  482. size = ALIGN(size, page_size);
  483. ret = img_mem_alloc(vha->dev,
  484. session->mem_ctx,
  485. vha->int_heap_id,
  486. size,
  487. IMG_MEM_ATTR_WRITECOMBINE,
  488. &buf_id);
  489. if (ret)
  490. return ret;
  491. ret = vha_add_buf(session, buf_id, size,
  492. name, IMG_MEM_ATTR_WRITECOMBINE);
  493. if (ret) {
  494. dev_err(vha->dev, "%s: add failed!\n", __func__);
  495. goto out_add_failed;
  496. }
  497. buf = vha_find_bufid(session, buf_id);
  498. if (buf == NULL)
  499. goto out_no_buf;
  500. if (vha->mmu_mode) {
  501. ret = img_mmu_vaa_alloc(session->vaa_ctx,
  502. buf->size, &vaddr);
  503. if (ret) {
  504. dev_err(vha->dev, "%s: vaa alloc failed!\n", __func__);
  505. goto out_vaa_failed;
  506. }
  507. if (map) {
  508. ret = img_mmu_map(
  509. session->mmu_ctxs[VHA_MMU_REQ_IO_CTXID].ctx,
  510. session->mem_ctx, buf_id,
  511. vaddr, 0);
  512. if (ret) {
  513. dev_err(vha->dev,
  514. "%s: map failed!\n",
  515. __func__);
  516. goto out_map_failed;
  517. }
  518. buf->devvirt = vaddr;
  519. dev_dbg(vha->dev,
  520. "%s: mapped buf %s (%u) to %#llx:%zu\n",
  521. __func__,
  522. buf->name, buf_id,
  523. buf->devvirt, buf->size);
  524. }
  525. }
  526. *buffer = buf;
  527. return 0;
  528. out_map_failed:
  529. img_mmu_vaa_free(session->vaa_ctx, buf->devvirt, buf->size);
  530. out_vaa_failed:
  531. out_no_buf:
  532. vha_rm_buf(session, buf->id);
  533. out_add_failed:
  534. img_mem_free(session->mem_ctx, buf_id);
  535. return -EFAULT;
  536. }
  537. /* create CNN_CRC and CNN_DEBUG capture into buffers */
  538. int vha_dbg_create_hwbufs(struct vha_session *session)
  539. {
  540. struct vha_dev *vha = session->vha;
  541. struct vha_dbgfs_ctx *ctx =
  542. (struct vha_dbgfs_ctx *)vha->dbgfs_ctx;
  543. int ret;
  544. if (vha->cnn_combined_crc_enable) {
  545. session->cnn_dbg.cnn_crc_size_kB = cnn_crc_size_kB ? cnn_crc_size_kB :
  546. VHA_DBG_CRC_BUF_SIZE;
  547. session->cnn_dbg.cnn_crc_mode = 1; /* stream mode */
  548. #ifdef HW_AX3
  549. session->cnn_dbg.cnn_crc_mask = 2; /* safety mode */
  550. #endif
  551. } else {
  552. session->cnn_dbg.cnn_crc_mode = cnn_crc_mode;
  553. session->cnn_dbg.cnn_crc_size_kB = cnn_crc_size_kB;
  554. #ifdef HW_AX3
  555. session->cnn_dbg.cnn_crc_mask = cnn_crc_mask;
  556. #endif
  557. }
  558. memcpy(session->cnn_dbg.cnn_dbg_modes, cnn_dbg_modes, sizeof(cnn_dbg_modes));
  559. session->cnn_dbg.cnn_dbg_size_kB = cnn_dbg_size_kB;
  560. session->cnn_dbg.cnn_dbg_flush = cnn_pdump_flush_dbg;
  561. session->cnn_dbg.cnn_dbg_pdump_enable = cnn_dbg_pdump_enable;
  562. if (vha->mmu_mode &&
  563. ((session->cnn_dbg.cnn_crc_mode > 0 && session->cnn_dbg.cnn_crc_size_kB > 0) ||
  564. (session->cnn_dbg.cnn_crc_size_kB > 0) || vha->cnn_combined_crc_enable)) {
  565. if (vaa_offset & (PAGE_SIZE-1)) {
  566. dev_err(vha->dev, "%s: given vaa offset is not page aligned!\n",
  567. __func__);
  568. return -EINVAL;
  569. }
  570. ret = img_mmu_vaa_create(vha->dev,
  571. IMG_MEM_VA_HEAP1_BASE + vaa_offset,
  572. IMG_MEM_VA_HEAP1_SIZE - vaa_offset,
  573. &session->vaa_ctx);
  574. if (ret) {
  575. dev_err(vha->dev, "%s: failed to allocate vaa heap\n",
  576. __func__);
  577. return ret;
  578. }
  579. }
  580. /* Create debugfs dir and populate entries */
  581. if (ctx->debugfs_dir) {
  582. char name[15] = { 0 };
  583. snprintf(name, sizeof(name)-1, "%s%d",
  584. "session",
  585. session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].id);
  586. session->dbgfs =
  587. debugfs_create_dir(name, ctx->debugfs_dir);
  588. if (session->dbgfs) {
  589. if (!debugfs_create_file("pte_dump", S_IRUGO, session->dbgfs,
  590. session, &vha_mmu_ptedump_fops))
  591. dev_warn(vha->dev,
  592. "%s: failed to create pte_dump!\n",
  593. __func__);
  594. if (!debugfs_create_file("mem_usage_max", S_IRUGO, session->dbgfs,
  595. session, &vha_session_mem_max_fops))
  596. dev_warn(vha->dev,
  597. "%s: failed to create mem_usage_max!\n",
  598. __func__);
  599. if (!debugfs_create_file("mem_usage_curr", S_IRUGO, session->dbgfs,
  600. session, &vha_session_mem_curr_fops))
  601. dev_warn(vha->dev,
  602. "%s: failed to create mem_usage_curr!\n",
  603. __func__);
  604. if (!debugfs_create_file("mmu_usage_max", S_IRUGO, session->dbgfs,
  605. session, &vha_session_mmu_max_fops))
  606. dev_warn(vha->dev,
  607. "%s: failed to create mmu_usage_max!\n",
  608. __func__);
  609. if (!debugfs_create_file("mmu_usage_curr", S_IRUGO, session->dbgfs,
  610. session, &vha_session_mmu_curr_fops))
  611. dev_warn(vha->dev,
  612. "%s: failed to create mmu_usage_curr!\n",
  613. __func__);
  614. if (!debugfs_create_file("buffer_dump", S_IRUGO, session->dbgfs,
  615. session, &vha_buffer_dump_fops))
  616. dev_warn(vha->dev,
  617. "%s: failed to create buffer_dump!\n",
  618. __func__);
  619. }
  620. }
  621. if (session->cnn_dbg.cnn_crc_mode > 0 && session->cnn_dbg.cnn_crc_size_kB > 0) {
  622. struct vha_buffer *buf;
  623. size_t size = session->cnn_dbg.cnn_crc_size_kB * 1024;
  624. int id;
  625. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
  626. char name[10] = { 0 };
  627. snprintf(name, sizeof(name)-1, "CRC_%u", id);
  628. ret = vha_dbg_alloc_hwbuf(session, size, &buf, name, true);
  629. if (ret) {
  630. dev_err(vha->dev, "%s: failed to allocate buffer for CNN_CRC\n",
  631. __func__);
  632. goto out_disable;
  633. }
  634. session->cnn_dbg.cnn_crc_buf[id] = buf;
  635. dbg_add_buf(session, buf);
  636. }
  637. }
  638. if (cnn_dbg_size_kB > 0) {
  639. struct vha_buffer *buf;
  640. size_t size = cnn_dbg_size_kB * 1024;
  641. int id;
  642. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
  643. char name[10] = { 0 };
  644. snprintf(name, sizeof(name)-1, "DBG_%u", id);
  645. ret = vha_dbg_alloc_hwbuf(session, size, &buf, name, true);
  646. if (ret) {
  647. dev_err(vha->dev, "%s: failed to allocate buffer for CNN_DEBUG\n",
  648. __func__);
  649. goto out_disable;
  650. }
  651. session->cnn_dbg.cnn_dbg_buf[id] = buf;
  652. dbg_add_buf(session, buf);
  653. }
  654. }
  655. if (vha->cnn_combined_crc_enable) {
  656. struct vha_buffer *buf;
  657. ret = vha_dbg_alloc_hwbuf(session, VHA_DBG_CONBINED_CRC_BUF_SIZE, &buf,
  658. "CRC_Cmb", true);
  659. if (ret) {
  660. dev_err(vha->dev, "%s: failed to allocate buffer for CRC_Cmb\n",
  661. __func__);
  662. goto out_disable;
  663. }
  664. session->cnn_dbg.cnn_combined_crc = buf;
  665. dbg_add_buf(session, buf);
  666. if (buf->kptr == NULL) {
  667. ret = img_mem_map_km(session->mem_ctx, buf->id);
  668. if (ret) {
  669. dev_err(session->vha->dev,
  670. "%s: failed to map buff %x to km: %d\n",
  671. __func__, buf->id, ret);
  672. return ret;
  673. }
  674. buf->kptr = img_mem_get_kptr(session->mem_ctx, buf->id);
  675. }
  676. }
  677. return 0;
  678. out_disable:
  679. vha_dbg_destroy_hwbufs(session);
  680. return ret;
  681. }
  682. void vha_dbg_hwbuf_cleanup(struct vha_session *session,
  683. struct vha_buffer *buf)
  684. {
  685. struct vha_dev *vha = session->vha;
  686. if (buf == NULL)
  687. return;
  688. if (vha->mmu_mode) {
  689. img_mmu_vaa_free(session->vaa_ctx, buf->devvirt, buf->size);
  690. img_mmu_unmap(session->mmu_ctxs[VHA_MMU_REQ_IO_CTXID].ctx,
  691. session->mem_ctx, buf->id);
  692. }
  693. dbg_rm_buf(session, buf->id);
  694. vha_rm_buf(session, buf->id);
  695. img_mem_free(session->mem_ctx, buf->id);
  696. }
  697. /* free the CRC and DEBUG buffers */
  698. void vha_dbg_destroy_hwbufs(struct vha_session *session)
  699. {
  700. struct vha_dev *vha = session->vha;
  701. if (session->cnn_dbg.cnn_combined_crc) {
  702. vha_dbg_hwbuf_cleanup(session, session->cnn_dbg.cnn_combined_crc);
  703. }
  704. if (session->cnn_dbg.cnn_crc_buf[0]) {
  705. int id;
  706. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
  707. struct vha_buffer *buf = session->cnn_dbg.cnn_crc_buf[id];
  708. vha_dbg_hwbuf_cleanup(session, buf);
  709. }
  710. }
  711. if (session->cnn_dbg.cnn_dbg_buf[0]) {
  712. int id;
  713. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
  714. struct vha_buffer *buf = session->cnn_dbg.cnn_dbg_buf[id];
  715. vha_dbg_hwbuf_cleanup(session, buf);
  716. }
  717. }
  718. if (vha->mmu_mode && session->vaa_ctx)
  719. img_mmu_vaa_destroy(session->vaa_ctx);
  720. /* remove debugfs directory. NULL is safe */
  721. debugfs_remove_recursive(session->dbgfs);
  722. }
  723. static int _show_vha_regset(struct seq_file *s, void *data)
  724. {
  725. struct vha_regset *regset = s->private;
  726. struct vha_dev *vha = regset->vha;
  727. const struct vha_reg *reg = regset->regs;
  728. char str[150];
  729. int i;
  730. int ret;
  731. ret = mutex_lock_interruptible(&vha->lock);
  732. if (ret)
  733. return ret;
  734. #ifndef VHA_FORCE_IO_DEBUG
  735. if (vha->state == VHA_STATE_OFF) {
  736. dev_err(vha->dev, "%s: can't access disabled device!!\n", __func__);
  737. mutex_unlock(&vha->lock);
  738. return -EIO;
  739. }
  740. #endif
  741. for (i = 0; i < regset->nregs; i++, reg++) {
  742. uint64_t val;
  743. if (reg->name == NULL)
  744. break;
  745. val = IOREAD64(vha->reg_base, reg->offset);
  746. sprintf(str, "%s(0x%04x) = 0x%016llx",
  747. reg->name, reg->offset, val);
  748. if (val & ~reg->mask)
  749. strcat(str, " Bogus register value detected !!!");
  750. strcat(str, "\n");
  751. seq_puts(s, str);
  752. }
  753. mutex_unlock(&vha->lock);
  754. return 0;
  755. }
  756. static int _open_vha_regset(struct inode *inode, struct file *file)
  757. {
  758. return single_open(file, _show_vha_regset, inode->i_private);
  759. }
  760. static const struct file_operations vha_regset_fops = {
  761. .open = _open_vha_regset,
  762. .read = seq_read,
  763. .llseek = seq_lseek,
  764. .release = single_release,
  765. };
  766. /* List of predefined registers to be shown in debugfs */
  767. extern const struct vha_reg vha_regs[];
  768. static ssize_t vha_cnn_utilization_read(struct file *file, char __user *buf,
  769. size_t count, loff_t *ppos)
  770. {
  771. struct vha_dev *vha = file->private_data;
  772. char utilization[20] = { 0 };
  773. size_t bytes = 0;
  774. if (count < strlen(utilization))
  775. return -EINVAL;
  776. if (*ppos)
  777. return 0;
  778. snprintf(utilization, (int)sizeof(utilization)-1, "%d.%d[%%]\n",
  779. vha->stats.cnn_utilization / 10,
  780. vha->stats.cnn_utilization % 10);
  781. if (copy_to_user(buf, utilization,
  782. strlen(utilization))) {
  783. dev_err(vha->dev, "%s: cnn_utilization read: copy to user failed\n",
  784. __func__);
  785. return -EFAULT;
  786. }
  787. bytes = strlen(utilization);
  788. *ppos = bytes;
  789. return bytes;
  790. }
  791. static const struct file_operations vha_cnn_utilization_fops = {
  792. .owner = THIS_MODULE,
  793. .open = simple_open,
  794. .read = vha_cnn_utilization_read,
  795. };
  796. static ssize_t vha_cnn_last_cycles_read(struct file *file, char __user *buf, size_t len,
  797. loff_t *ppos)
  798. {
  799. struct vha_dev *vha = file->private_data;
  800. char cycles[16];
  801. size_t size;
  802. #if defined(HW_AX2)
  803. /* For Mirage cnn_last_cycles holds a valid value only
  804. * when we set WDT per segment */
  805. #define WDT_CTRL_MASK (3)
  806. #define WDT_CTRL_KICK_PASS (1)
  807. if ((vha->wdt_mode & WDT_CTRL_MASK) ==
  808. WDT_CTRL_KICK_PASS)
  809. size = snprintf(cycles, sizeof(cycles), "n/a\n");
  810. else
  811. #elif defined(HW_AX3) && !defined(CONFIG_HW_MULTICORE)
  812. /* For Aura cnn_last_cycles holds a valid value only
  813. * when debug mode is turned on to collect performance data per segment
  814. * VHA_CR_CNN_DEBUG_CTRL_STREAM */
  815. #define DEBUG_CTRL_STREAM (1)
  816. if (cnn_dbg_modes[0] != DEBUG_CTRL_STREAM)
  817. size = snprintf(cycles, sizeof(cycles), "n/a\n");
  818. else
  819. #endif
  820. size = snprintf(cycles, sizeof(cycles), "%lld\n",
  821. vha->stats.cnn_last_cycles);
  822. return simple_read_from_buffer(buf, len, ppos, cycles, size);
  823. }
  824. static const struct file_operations vha_cnn_last_cycles_fops = {
  825. .owner = THIS_MODULE,
  826. .open = simple_open,
  827. .read = vha_cnn_last_cycles_read,
  828. };
  829. static ssize_t vha_bvnc_read(struct file *file, char __user *buf, size_t len,
  830. loff_t *ppos)
  831. {
  832. struct vha_dev *vha = file->private_data;
  833. char bvnc[4*6];
  834. size_t size = snprintf(bvnc, sizeof(bvnc), "%llu.%llu.%llu.%llu\n",
  835. core_id_quad(vha->hw_props.core_id));
  836. return simple_read_from_buffer(buf, len, ppos, bvnc, size);
  837. }
  838. static const struct file_operations vha_bvnc_fops = {
  839. .owner = THIS_MODULE,
  840. .open = simple_open,
  841. .read = vha_bvnc_read,
  842. };
  843. static ssize_t vha_pri_q_counters_read(struct file *file, char __user *buf, size_t len,
  844. loff_t *ppos)
  845. {
  846. #define MAX_ENTRY_LEN 20
  847. struct vha_dev *vha = file->private_data;
  848. int ret;
  849. char pri_q_counters[VHA_MAX_PRIORITIES * MAX_ENTRY_LEN + 1] = "";
  850. char pri_q_counter[MAX_ENTRY_LEN] = "";
  851. char *str = pri_q_counters;
  852. ret = mutex_lock_interruptible(&vha->lock);
  853. if (!ret) {
  854. size_t size = 0;
  855. uint8_t pri;
  856. for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++) {
  857. size += snprintf(pri_q_counter, MAX_ENTRY_LEN,
  858. "pri %u: %u\n", pri, vha->pri_q_counters[pri]);
  859. strncat(str, pri_q_counter, MAX_ENTRY_LEN);
  860. }
  861. mutex_unlock(&vha->lock);
  862. return simple_read_from_buffer(buf, len, ppos, pri_q_counters, size);
  863. }
  864. return ret;
  865. #undef MAX_ENTRY_LEN
  866. }
  867. static const struct file_operations vha_pri_q_counters_fops = {
  868. .owner = THIS_MODULE,
  869. .open = simple_open,
  870. .read = vha_pri_q_counters_read,
  871. };
  872. /* Real Time Monitor facilities.
  873. * It allows to peek hw internals. Please refer to TRM */
  874. static ssize_t vha_rtm_read(struct file *file, char __user *buf, size_t len,
  875. loff_t *ppos)
  876. {
  877. struct vha_dev *vha = file->private_data;
  878. struct vha_dbgfs_ctx *ctx =
  879. (struct vha_dbgfs_ctx *)vha->dbgfs_ctx;
  880. int ret;
  881. char rtm[23];
  882. uint64_t rtm_data;
  883. ret = mutex_lock_interruptible(&vha->lock);
  884. if (!ret) {
  885. size_t size;
  886. #ifndef VHA_FORCE_IO_DEBUG
  887. if (vha->state == VHA_STATE_OFF) {
  888. dev_err(vha->dev, "%s: can't access disabled device!!\n", __func__);
  889. mutex_unlock(&vha->lock);
  890. return -EIO;
  891. }
  892. #endif
  893. rtm_data = vha_dbg_rtm_read(vha, ctx->rtm_ctrl);
  894. size = snprintf(rtm, sizeof(rtm), "%#.8llx %#.8llx\n",
  895. ctx->rtm_ctrl, rtm_data);
  896. mutex_unlock(&vha->lock);
  897. return simple_read_from_buffer(buf, len, ppos, rtm, size);
  898. }
  899. return ret;
  900. }
  901. static const struct file_operations vha_rtm_fops = {
  902. .owner = THIS_MODULE,
  903. .open = simple_open,
  904. .read = vha_rtm_read,
  905. };
  906. /* Generic IO access facilities.
  907. * It allows read/write any register in the address space */
  908. static ssize_t vha_ioreg_read(struct file *file, char __user *buf, size_t len,
  909. loff_t *ppos)
  910. {
  911. struct vha_dev *vha = file->private_data;
  912. struct vha_dbgfs_ctx *ctx =
  913. (struct vha_dbgfs_ctx *)vha->dbgfs_ctx;
  914. char data[32];
  915. uint64_t io_data;
  916. size_t size;
  917. int ret;
  918. ret = mutex_lock_interruptible(&vha->lock);
  919. if (ret)
  920. return ret;
  921. if (ctx->ioreg_addr >= vha->reg_size) {
  922. dev_err(vha->dev,
  923. "%s: read attempt beyond reg space (%#llx >= %#llx)!\n",
  924. __func__, ctx->ioreg_addr, vha->reg_size);
  925. mutex_unlock(&vha->lock);
  926. return -EINVAL;
  927. }
  928. #ifndef VHA_FORCE_IO_DEBUG
  929. if (vha->state == VHA_STATE_OFF) {
  930. dev_err(vha->dev, "%s: can't access disabled device!!\n", __func__);
  931. mutex_unlock(&vha->lock);
  932. return -EIO;
  933. }
  934. #endif
  935. /* Read the data */
  936. io_data = IOREAD64(vha->reg_base, ctx->ioreg_addr);
  937. mutex_unlock(&vha->lock);
  938. size = snprintf(data, sizeof(data), "%#.8llx::%#.16llx\n",
  939. ctx->ioreg_addr, io_data);
  940. return simple_read_from_buffer(buf, len, ppos, data, size);
  941. }
  942. static ssize_t vha_ioreg_write(struct file *file, const char __user *buf,
  943. size_t len, loff_t *ppos)
  944. {
  945. struct vha_dev *vha = file->private_data;
  946. struct vha_dbgfs_ctx *ctx =
  947. (struct vha_dbgfs_ctx *)vha->dbgfs_ctx;
  948. uint64_t io_data;
  949. int ret = kstrtou64_from_user(buf, len, 16, &io_data);
  950. if (ret)
  951. return ret;
  952. ret = mutex_lock_interruptible(&vha->lock);
  953. if (ret)
  954. return ret;
  955. if (ctx->ioreg_addr >= vha->reg_size) {
  956. dev_err(vha->dev,
  957. "%s: write attempt beyond reg space (%#llx >= %#llx)!\n",
  958. __func__, ctx->ioreg_addr, vha->reg_size);
  959. mutex_unlock(&vha->lock);
  960. return -EINVAL;
  961. }
  962. #ifndef VHA_FORCE_IO_DEBUG
  963. if (vha->state == VHA_STATE_OFF) {
  964. dev_err(vha->dev, "%s: can't access disabled device!!\n", __func__);
  965. mutex_unlock(&vha->lock);
  966. return -EIO;
  967. }
  968. #endif
  969. /* Write the data */
  970. IOWRITE64(vha->reg_base, ctx->ioreg_addr, io_data);
  971. mutex_unlock(&vha->lock);
  972. return len;
  973. }
  974. static const struct file_operations vha_ioreg_fops = {
  975. .owner = THIS_MODULE,
  976. .open = simple_open,
  977. .read = vha_ioreg_read,
  978. .write = vha_ioreg_write,
  979. };
  980. static ssize_t vha_stats_reset_write(struct file *file, const char __user *buf,
  981. size_t count, loff_t *ppos)
  982. {
  983. struct vha_dev *vha = file->private_data;
  984. memset(&vha->stats, 0, sizeof(struct vha_stats));
  985. return count;
  986. }
  987. static const struct file_operations vha_stats_reset_fops = {
  988. .owner = THIS_MODULE,
  989. .write = vha_stats_reset_write,
  990. .open = simple_open,
  991. };
  992. #ifdef CONFIG_HW_MULTICORE
  993. /* Per core scheduling stats. */
  994. static ssize_t vha_cnn_kicks_per_core_read(struct file *file, char __user *buf,
  995. size_t len, loff_t *ppos)
  996. {
  997. #define MAX_CORE_REPORT_LEN 76
  998. #define MAX_REPORT_LEN ((2 * VHA_NUM_CORES + 1) * MAX_CORE_REPORT_LEN)
  999. #define MAX_STAT_NUM 5
  1000. struct vha_dev *vha = file->private_data;
  1001. int ret;
  1002. char* kicks_per = kmalloc(MAX_REPORT_LEN, GFP_KERNEL);
  1003. if (kicks_per == NULL) {
  1004. dev_err(vha->dev,
  1005. "%s: failed to allocate memory for stats!\n", __func__);
  1006. return -ENOMEM;
  1007. }
  1008. ret = mutex_lock_interruptible(&vha->lock);
  1009. if (!ret) {
  1010. char report_line[MAX_CORE_REPORT_LEN];
  1011. char core_report_fmt[MAX_CORE_REPORT_LEN] = "core%u: %10u";
  1012. char wm_report_fmt[MAX_CORE_REPORT_LEN] = "WM%u: %10u";
  1013. size_t size = 0;
  1014. uint8_t id;
  1015. uint8_t stat_id;
  1016. char* include_queued = "";
  1017. char* include_cancels = "";
  1018. char* include_aborts = "";
  1019. uint32_t stats[MAX_STAT_NUM] = {0};
  1020. ssize_t read_ret;
  1021. /* Init stats message. */
  1022. kicks_per[0] = 0;
  1023. /* Check if queued WLs need to be included. */
  1024. switch (vha->low_latency) {
  1025. case VHA_LL_SW_KICK:
  1026. include_queued = " queued";
  1027. break;
  1028. case VHA_LL_SELF_KICK:
  1029. include_queued = " selfkicked";
  1030. break;
  1031. default:
  1032. break;
  1033. }
  1034. if (strlen(include_queued) > 0) {
  1035. strncat(core_report_fmt, " %10u", MAX_CORE_REPORT_LEN);
  1036. strncat(wm_report_fmt, " %10u", MAX_CORE_REPORT_LEN);
  1037. }
  1038. /* Check if any cancels were recorded. */
  1039. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++)
  1040. if (vha->stats.wm_stats[id].kicks_cancelled > 0) {
  1041. include_cancels = " cancelled";
  1042. strncat(core_report_fmt, " %10u", MAX_CORE_REPORT_LEN);
  1043. strncat(wm_report_fmt, " %10u", MAX_CORE_REPORT_LEN);
  1044. break;
  1045. }
  1046. /* Check if any aborts were recorded. */
  1047. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++)
  1048. if (vha->stats.wm_stats[id].kicks_aborted > 0) {
  1049. include_aborts = " aborted";
  1050. strncat(core_report_fmt, " %10u", MAX_CORE_REPORT_LEN);
  1051. strncat(wm_report_fmt, " %10u", MAX_CORE_REPORT_LEN);
  1052. break;
  1053. }
  1054. /* Add completed WLs. */
  1055. strncat(core_report_fmt, " %10u\n", MAX_CORE_REPORT_LEN);
  1056. strncat(wm_report_fmt, " %10u\n", MAX_CORE_REPORT_LEN);
  1057. /* Create report header. */
  1058. size += snprintf(kicks_per, MAX_REPORT_LEN,
  1059. " total%s%s%s completed\n",
  1060. include_queued, include_cancels, include_aborts);
  1061. /* Create core report. */
  1062. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
  1063. stat_id = 0;
  1064. stats[stat_id] = vha->stats.core_stats[id].kicks;
  1065. stat_id++;
  1066. if (strlen(include_queued) > 0) {
  1067. stats[stat_id] = vha->stats.core_stats[id].kicks_queued;
  1068. stat_id++;
  1069. }
  1070. if (strlen(include_cancels) > 0) {
  1071. stats[stat_id] = vha->stats.core_stats[id].kicks_cancelled;
  1072. stat_id++;
  1073. }
  1074. if (strlen(include_aborts) > 0) {
  1075. stats[stat_id] = vha->stats.core_stats[id].kicks_aborted;
  1076. stat_id++;
  1077. }
  1078. stats[stat_id] = vha->stats.core_stats[id].kicks_completed;
  1079. size += snprintf(report_line, MAX_CORE_REPORT_LEN,
  1080. core_report_fmt, id,
  1081. stats[0], stats[1], stats[2], stats[3], stats[4]);
  1082. strncat(kicks_per, report_line, MAX_REPORT_LEN);
  1083. }
  1084. /* Create WM report. */
  1085. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
  1086. stat_id = 0;
  1087. stats[stat_id] = vha->stats.wm_stats[id].kicks;
  1088. stat_id++;
  1089. if (strlen(include_queued) > 0) {
  1090. stats[stat_id] = vha->stats.wm_stats[id].kicks_queued;
  1091. stat_id++;
  1092. }
  1093. if (strlen(include_cancels) > 0) {
  1094. stats[stat_id] = vha->stats.wm_stats[id].kicks_cancelled;
  1095. stat_id++;
  1096. }
  1097. if (strlen(include_aborts) > 0) {
  1098. stats[stat_id] = vha->stats.wm_stats[id].kicks_aborted;
  1099. stat_id++;
  1100. }
  1101. stats[stat_id] = vha->stats.wm_stats[id].kicks_completed;
  1102. size += snprintf(report_line, MAX_CORE_REPORT_LEN,
  1103. wm_report_fmt, id,
  1104. stats[0], stats[1], stats[2], stats[3], stats[4]);
  1105. strncat(kicks_per, report_line, MAX_REPORT_LEN);
  1106. }
  1107. mutex_unlock(&vha->lock);
  1108. read_ret = simple_read_from_buffer(buf, len, ppos, kicks_per, size);
  1109. kfree(kicks_per);
  1110. return read_ret;
  1111. }
  1112. #undef MAX_CORE_REPORT_LEN
  1113. #undef MAX_REPORT_LEN
  1114. #undef MAX_STAT_NUM
  1115. return ret;
  1116. }
  1117. static const struct file_operations vha_cnn_kicks_per_core_fops = {
  1118. .owner = THIS_MODULE,
  1119. .open = simple_open,
  1120. .read = vha_cnn_kicks_per_core_read,
  1121. };
  1122. /* Per core utilization stats. */
  1123. static ssize_t vha_cnn_utilization_per_core_read(struct file *file,
  1124. char __user *buf, size_t len, loff_t *ppos)
  1125. {
  1126. #define MAX_CORE_REPORT_LEN 24
  1127. #define MAX_REPORT_LEN ((2 * VHA_NUM_CORES) * MAX_CORE_REPORT_LEN)
  1128. struct vha_dev *vha = file->private_data;
  1129. int ret;
  1130. char utilization_per[MAX_REPORT_LEN] = "";
  1131. ret = mutex_lock_interruptible(&vha->lock);
  1132. if (!ret) {
  1133. char core_report_line[MAX_CORE_REPORT_LEN];
  1134. size_t size = 0;
  1135. uint8_t id;
  1136. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
  1137. size += snprintf(core_report_line, MAX_CORE_REPORT_LEN,
  1138. "core%u: %d.%d[%%]\n",
  1139. id,
  1140. vha->stats.core_stats[id].utilization / 10,
  1141. vha->stats.core_stats[id].utilization % 10);
  1142. strcat(utilization_per, core_report_line);
  1143. }
  1144. for (id = 0; id < vha->hw_props.num_cnn_core_devs; id++) {
  1145. size += snprintf(core_report_line, MAX_CORE_REPORT_LEN,
  1146. "WM%u: %d.%d[%%]\n",
  1147. id,
  1148. vha->stats.wm_stats[id].utilization / 10,
  1149. vha->stats.wm_stats[id].utilization % 10);
  1150. strcat(utilization_per, core_report_line);
  1151. }
  1152. mutex_unlock(&vha->lock);
  1153. return simple_read_from_buffer(buf, len, ppos, utilization_per, size);
  1154. }
  1155. #undef MAX_CORE_REPORT_LEN
  1156. #undef MAX_REPORT_LEN
  1157. return ret;
  1158. }
  1159. static const struct file_operations vha_cnn_utilization_per_core_fops = {
  1160. .owner = THIS_MODULE,
  1161. .open = simple_open,
  1162. .read = vha_cnn_utilization_per_core_read,
  1163. };
  1164. /* Last processed WL stats. */
  1165. static ssize_t vha_wl_last_stats_read(struct file *file,
  1166. char __user *buf, size_t len, loff_t *ppos)
  1167. {
  1168. #define MAX_WL_REPORT_LEN (16 * 30 + 11)
  1169. struct vha_dev *vha = file->private_data;
  1170. int ret;
  1171. char wl_stats_txt[MAX_WL_REPORT_LEN] = "";
  1172. ret = mutex_lock_interruptible(&vha->lock);
  1173. if (!ret) {
  1174. size_t size = 0;
  1175. size = snprintf(wl_stats_txt, MAX_WL_REPORT_LEN,
  1176. "cycles: %llu\n"
  1177. "LOCM rd trans: %u\n"
  1178. "LOCM wr trans: %u\n"
  1179. "LOCM mwr trans: %u\n"
  1180. "SOCM rd trans: %u\n"
  1181. "SOCM wr trans: %u\n"
  1182. "SOCM mwr trans: %u\n"
  1183. "DDR rd trans: %u\n"
  1184. "DDR wr trans: %u\n"
  1185. "DDR mwr trans: %u\n"
  1186. "LOCM read words: %u\n"
  1187. "LOCM write words: %u\n"
  1188. "SOCM read words: %u\n"
  1189. "SOCM write words: %u\n"
  1190. "DDR read words: %u\n"
  1191. "DDR write words: %u\n",
  1192. vha->stats.cnn_last_cycles,
  1193. vha->stats.last_mem_stats.locm_rd_transactions,
  1194. vha->stats.last_mem_stats.locm_wr_transactions,
  1195. vha->stats.last_mem_stats.locm_mwr_transactions,
  1196. vha->stats.last_mem_stats.socm_rd_transactions,
  1197. vha->stats.last_mem_stats.socm_wr_transactions,
  1198. vha->stats.last_mem_stats.socm_mwr_transactions,
  1199. vha->stats.last_mem_stats.ddr_rd_transactions,
  1200. vha->stats.last_mem_stats.ddr_wr_transactions,
  1201. vha->stats.last_mem_stats.ddr_mwr_transactions,
  1202. vha->stats.last_mem_stats.locm_rd_words,
  1203. vha->stats.last_mem_stats.locm_wr_words,
  1204. vha->stats.last_mem_stats.socm_rd_words,
  1205. vha->stats.last_mem_stats.socm_wr_words,
  1206. vha->stats.last_mem_stats.ddr_rd_words,
  1207. vha->stats.last_mem_stats.ddr_wr_words);
  1208. mutex_unlock(&vha->lock);
  1209. return simple_read_from_buffer(buf, len, ppos, wl_stats_txt, size);
  1210. }
  1211. #undef MAX_WL_REPORT_LEN
  1212. return ret;
  1213. }
  1214. static const struct file_operations vha_wl_last_stats_fops = {
  1215. .owner = THIS_MODULE,
  1216. .open = simple_open,
  1217. .read = vha_wl_last_stats_read,
  1218. };
  1219. /* Scheduling stats. */
  1220. static ssize_t vha_scheduling_stats_read(struct file *file,
  1221. char __user *buf, size_t len, loff_t *ppos)
  1222. {
  1223. #define MAX_PRI_SCHED_REPORT_LEN 40
  1224. #define MAX_SCHED_REPORT_LEN ((VHA_MAX_PRIORITIES + 1) * 40)
  1225. struct vha_dev *vha = file->private_data;
  1226. int ret;
  1227. char sched_stats_txt[MAX_SCHED_REPORT_LEN] = "";
  1228. char sched_pri_txt[MAX_PRI_SCHED_REPORT_LEN] = "";
  1229. uint8_t pri;
  1230. ret = mutex_lock_interruptible(&vha->lock);
  1231. if (!ret) {
  1232. size_t size = 0;
  1233. size = snprintf(sched_stats_txt, MAX_SCHED_REPORT_LEN,
  1234. "mean time from submit to kick [ns]:\n");
  1235. for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++) {
  1236. size += snprintf(sched_pri_txt, MAX_PRI_SCHED_REPORT_LEN,
  1237. "priority %u: %10llu\n",
  1238. pri, vha->stats.sched_stats.mt_submit_to_kick_ns[pri]);
  1239. strcat(sched_stats_txt, sched_pri_txt);
  1240. }
  1241. mutex_unlock(&vha->lock);
  1242. return simple_read_from_buffer(buf, len, ppos, sched_stats_txt, size);
  1243. }
  1244. #undef MAX_PRI_SCHED_REPORT_LEN
  1245. #undef MAX_SCHED_REPORT_LEN
  1246. return ret;
  1247. }
  1248. static const struct file_operations vha_scheduling_stats_fops = {
  1249. .owner = THIS_MODULE,
  1250. .open = simple_open,
  1251. .read = vha_scheduling_stats_read,
  1252. };
  1253. static ssize_t vha_sched_read(struct file *file, char __user *buf,
  1254. size_t len, loff_t *ppos)
  1255. {
  1256. #define MAX_ENTRY_LEN 8
  1257. struct vha_dev *vha = file->private_data;
  1258. char entries[VHA_MC_SCHED_SEQ_LEN_MAX * MAX_ENTRY_LEN] = { 0 };
  1259. char entry[MAX_ENTRY_LEN] = { 0 };
  1260. char *str = entries;
  1261. size_t size = 0;
  1262. int i;
  1263. for (i = 0; i < vha->scheduling_sequence_len; i++) {
  1264. size += snprintf(entry, MAX_ENTRY_LEN, "0x%04x%c",
  1265. vha->scheduling_sequence[i],
  1266. i == vha->scheduling_sequence_len -1 ? '\n' : ',');
  1267. str = strncat(str, entry, MAX_ENTRY_LEN);
  1268. }
  1269. #undef MAX_ENTRY_LEN
  1270. return simple_read_from_buffer(buf, len, ppos, entries, size);
  1271. }
  1272. static ssize_t vha_sched_write(struct file *file, const char __user *buf,
  1273. size_t len, loff_t *ppos)
  1274. {
  1275. struct vha_dev *vha = file->private_data;
  1276. char *str, *str_aux, *s;
  1277. int ret, i = 0;
  1278. str = kzalloc(len, GFP_KERNEL);
  1279. if (!str)
  1280. return -ENOMEM;
  1281. ret = copy_from_user(str, buf, len);
  1282. if (ret)
  1283. goto exit;
  1284. str_aux = str;
  1285. /* Expected format 0x0003, 0x010c, 0x000A ...
  1286. * Zero value stops parsing */
  1287. while((s = strsep(&str_aux,",")) != NULL) {
  1288. uint16_t entry;
  1289. ret = kstrtou16(s, 16, &entry);
  1290. if (ret)
  1291. goto exit;
  1292. if (entry == 0)
  1293. break;
  1294. if (i < VHA_MC_SCHED_SEQ_LEN_MAX)
  1295. vha->scheduling_sequence[i++] = entry;
  1296. else {
  1297. ret = -EINVAL;
  1298. goto exit;
  1299. }
  1300. }
  1301. if (!vha_dev_dbg_params_check(vha)) {
  1302. ret = -EINVAL;
  1303. goto exit;
  1304. }
  1305. vha->scheduling_sequence_len = i;
  1306. vha->scheduling_counter = 0;
  1307. kfree(str);
  1308. return len;
  1309. exit:
  1310. vha->scheduling_sequence_len = 0;
  1311. kfree(str);
  1312. return ret;
  1313. }
  1314. static const struct file_operations vha_sched_fops = {
  1315. .owner = THIS_MODULE,
  1316. .open = simple_open,
  1317. .read = vha_sched_read,
  1318. .write = vha_sched_write,
  1319. };
  1320. static ssize_t vha_stalling_read(struct file *file, char __user *buf,
  1321. size_t len, loff_t *ppos)
  1322. {
  1323. #define MAX_STALLING_DATA_TXT_LEN 20
  1324. struct vha_dev *vha = file->private_data;
  1325. char stalling_str[MAX_STALLING_DATA_TXT_LEN] = { 0 };
  1326. size_t size = 0;
  1327. size = snprintf(stalling_str, MAX_STALLING_DATA_TXT_LEN, "0x%04x,0x%08x\n",
  1328. vha->stalling_sysbus_host_stall_ratio,
  1329. vha->stalling_membus_sys_stall_ratio);
  1330. #undef MAX_STALLING_DATA_TXT_LEN
  1331. return simple_read_from_buffer(buf, len, ppos, stalling_str, size);
  1332. }
  1333. static ssize_t vha_stalling_write(struct file *file, const char __user *buf,
  1334. size_t len, loff_t *ppos)
  1335. {
  1336. struct vha_dev *vha = file->private_data;
  1337. char *stalling_str, *stalling_str_aux, *stalling_token;
  1338. uint32_t stalling_data = 0;
  1339. int ret;
  1340. stalling_str = kzalloc(len, GFP_KERNEL);
  1341. if (!stalling_str)
  1342. return -ENOMEM;
  1343. ret = copy_from_user(stalling_str, buf, len);
  1344. if (ret)
  1345. goto exit;
  1346. stalling_str_aux = stalling_str;
  1347. /* Expected format 0x0000,0x00000000 */
  1348. if ((stalling_token = strsep(&stalling_str_aux,",")) != NULL) {
  1349. ret = kstrtou32(stalling_token, 16, &stalling_data);
  1350. if (ret)
  1351. goto exit;
  1352. vha->stalling_sysbus_host_stall_ratio = stalling_data;
  1353. }
  1354. if ((stalling_token = strsep(&stalling_str_aux,",")) != NULL) {
  1355. ret = kstrtou32(stalling_token, 16, &stalling_data);
  1356. if (ret)
  1357. goto exit;
  1358. vha->stalling_membus_sys_stall_ratio = stalling_data;
  1359. }
  1360. kfree(stalling_str);
  1361. return len;
  1362. exit:
  1363. vha->stalling_sysbus_host_stall_ratio = 0;
  1364. vha->stalling_membus_sys_stall_ratio = 0;
  1365. kfree(stalling_str);
  1366. return ret;
  1367. }
  1368. static const struct file_operations vha_stalling_fops = {
  1369. .owner = THIS_MODULE,
  1370. .open = simple_open,
  1371. .read = vha_stalling_read,
  1372. .write = vha_stalling_write,
  1373. };
  1374. #endif
  1375. void vha_dbg_init(struct vha_dev *vha)
  1376. {
  1377. struct vha_dbgfs_ctx *ctx = devm_kzalloc(vha->dev,
  1378. sizeof(struct vha_dbgfs_ctx), GFP_KERNEL);
  1379. if (!ctx) {
  1380. dev_err(vha->dev,
  1381. "%s: Out of memory when creating debugfs context!\n",
  1382. __func__);
  1383. return;
  1384. }
  1385. /* Create userspace node */
  1386. ctx->debugfs_dir = debugfs_create_dir(vha->miscdev.name, NULL);
  1387. if (!ctx->debugfs_dir) {
  1388. dev_warn(vha->dev,
  1389. "%s: Probably debugfs not enabled in this kernel!\n",
  1390. __func__);
  1391. return;
  1392. }
  1393. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)
  1394. #define VHA_DBGFS_CREATE_(_type_, _name_, _vha_dev_member_, flags, dir) \
  1395. { \
  1396. struct dentry *dentry; \
  1397. debugfs_create_##_type_(_name_, \
  1398. (flags), ctx->dir, \
  1399. &vha->_vha_dev_member_); \
  1400. dentry = debugfs_lookup(_name_, ctx->dir); \
  1401. if (!dentry) { \
  1402. dev_warn(vha->dev, \
  1403. "%s: failed to create %s dbg file!\n", \
  1404. __func__, _name_); \
  1405. } else { \
  1406. dput(dentry); \
  1407. } \
  1408. }
  1409. #else
  1410. #define VHA_DBGFS_CREATE_(_type_, _name_, _vha_dev_member_, flags, dir) \
  1411. { \
  1412. if (!debugfs_create_##_type_(_name_, \
  1413. (flags), ctx->dir, \
  1414. &vha->_vha_dev_member_)) { \
  1415. dev_warn(vha->dev, \
  1416. "%s: failed to create %s dbg file!\n", \
  1417. __func__, _name_); \
  1418. } \
  1419. }
  1420. #endif
  1421. #define VHA_DBGFS_CREATE_RO(_type_, _name_, _vha_dev_member_, dir) \
  1422. VHA_DBGFS_CREATE_(_type_, _name_, _vha_dev_member_, S_IRUGO, dir)
  1423. #define VHA_DBGFS_CREATE_RW(_type_, _name_, _vha_dev_member_, dir) \
  1424. VHA_DBGFS_CREATE_(_type_, _name_, _vha_dev_member_, S_IWUSR|S_IRUGO, dir)
  1425. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)
  1426. #define CTX_DBGFS_CREATE_RW(_type_, _name_, _ctx_dev_member_, dir) \
  1427. { \
  1428. struct dentry *dentry; \
  1429. debugfs_create_##_type_(_name_, \
  1430. S_IWUSR|S_IRUGO, ctx->dir, \
  1431. &ctx->_ctx_dev_member_); \
  1432. dentry = debugfs_lookup(_name_, ctx->dir); \
  1433. if (!dentry) { \
  1434. dev_warn(vha->dev, \
  1435. "%s: failed to create %s dbg file!\n", \
  1436. __func__, _name_); \
  1437. } else { \
  1438. dput(dentry); \
  1439. } \
  1440. }
  1441. #else
  1442. #define CTX_DBGFS_CREATE_RW(_type_, _name_, _ctx_dev_member_, dir) \
  1443. { \
  1444. if (!debugfs_create_##_type_(_name_, \
  1445. S_IWUSR|S_IRUGO, ctx->dir, \
  1446. &ctx->_ctx_dev_member_)) { \
  1447. dev_warn(vha->dev, \
  1448. "%s: failed to create %s dbg file!\n", \
  1449. __func__, _name_); \
  1450. } \
  1451. }
  1452. #endif
  1453. #define VHA_DBGFS_CREATE_FILE(_perm_, _name_, _fops_) \
  1454. { \
  1455. if (!debugfs_create_file(_name_, \
  1456. _perm_, ctx->debugfs_dir, vha, \
  1457. &vha_##_fops_##_fops)) { \
  1458. dev_warn(vha->dev, \
  1459. "%s: failed to create %s dbg file!\n", \
  1460. __func__, _name_); \
  1461. } \
  1462. }
  1463. #define VHA_DBGFS_CREATE_FILE_IN_DIR(_perm_, _name_, _fops_, dir) \
  1464. { \
  1465. if (!debugfs_create_file(_name_, \
  1466. _perm_, ctx->dir, vha, \
  1467. &vha_##_fops_##_fops)) { \
  1468. dev_warn(vha->dev, \
  1469. "%s: failed to create %s dbg file!\n", \
  1470. __func__, _name_); \
  1471. } \
  1472. }
  1473. #define CTX_DBGFS_CREATE_FILE(_perm_, _name_, _fops_) \
  1474. { \
  1475. if (!debugfs_create_file(_name_, \
  1476. _perm_, ctx->debugfs_dir, &ctx->_fops_, \
  1477. &vha_##_fops_##_fops)) { \
  1478. dev_warn(vha->dev, \
  1479. "%s: failed to create %s dbg file!\n", \
  1480. __func__, _name_); \
  1481. } \
  1482. }
  1483. /* and some registers for debug */
  1484. if (vha->reg_base) {
  1485. ctx->regset.regs = vha_regs;
  1486. ctx->regset.nregs = vha->reg_size / sizeof(uint64_t);
  1487. ctx->regset.vha = vha;
  1488. CTX_DBGFS_CREATE_FILE(S_IRUGO, "regdump", regset);
  1489. }
  1490. VHA_DBGFS_CREATE_RO(u32, "core_freq_khz", freq_khz, debugfs_dir);
  1491. VHA_DBGFS_CREATE_RO(u32, "core_state", state, debugfs_dir);
  1492. VHA_DBGFS_CREATE_RO(u64, "core_uptime_ms", stats.uptime_ms, debugfs_dir);
  1493. #ifndef CONFIG_HW_MULTICORE
  1494. VHA_DBGFS_CREATE_RO(u64, "core_last_proc_us", stats.last_proc_us, debugfs_dir);
  1495. #endif
  1496. VHA_DBGFS_CREATE_RO(u32, "cnn_kicks", stats.cnn_kicks, debugfs_dir);
  1497. VHA_DBGFS_CREATE_RO(u32, "cnn_kicks_queued", stats.cnn_kicks_queued, debugfs_dir);
  1498. VHA_DBGFS_CREATE_RO(u32, "cnn_kicks_completed", stats.cnn_kicks_completed, debugfs_dir);
  1499. VHA_DBGFS_CREATE_RO(u32, "cnn_kicks_cancelled", stats.cnn_kicks_cancelled, debugfs_dir);
  1500. VHA_DBGFS_CREATE_RO(u32, "cnn_kicks_aborted", stats.cnn_kicks_aborted, debugfs_dir);
  1501. VHA_DBGFS_CREATE_RO(u64, "cnn_total_proc_us", stats.cnn_total_proc_us, debugfs_dir);
  1502. VHA_DBGFS_CREATE_RO(u64, "cnn_last_proc_us", stats.cnn_last_proc_us, debugfs_dir);
  1503. VHA_DBGFS_CREATE_RO(u64, "cnn_avg_proc_us", stats.cnn_avg_proc_us, debugfs_dir);
  1504. VHA_DBGFS_CREATE_RO(u64, "cnn_last_est_proc_us", stats.cnn_last_est_proc_us, debugfs_dir);
  1505. VHA_DBGFS_CREATE_RO(u64, "cnn_avg_est_proc_us", stats.cnn_avg_est_proc_us, debugfs_dir);
  1506. #ifdef CONFIG_HW_MULTICORE
  1507. VHA_DBGFS_CREATE_RO(u8, "num_cores", hw_props.num_cnn_core_devs, debugfs_dir);
  1508. VHA_DBGFS_CREATE_RO(u32, "socm_bytes", hw_props.socm_size_bytes, debugfs_dir);
  1509. VHA_DBGFS_CREATE_RO(u32, "socm_core_bytes", hw_props.socm_core_size_bytes, debugfs_dir);
  1510. #endif
  1511. VHA_DBGFS_CREATE_RO(u32, "locm_bytes", hw_props.locm_size_bytes, debugfs_dir);
  1512. VHA_DBGFS_CREATE_RO(u32, "mem_usage_last", stats.mem_usage_last, debugfs_dir);
  1513. VHA_DBGFS_CREATE_RO(u32, "mmu_usage_last", stats.mmu_usage_last, debugfs_dir);
  1514. VHA_DBGFS_CREATE_RO(u32, "total_failures", stats.total_failures, debugfs_dir);
  1515. if (vha->hw_props.supported.rtm) {
  1516. CTX_DBGFS_CREATE_RW(u64, "rtm_ctrl", rtm_ctrl, debugfs_dir);
  1517. VHA_DBGFS_CREATE_FILE(S_IRUGO, "rtm_data", rtm);
  1518. }
  1519. CTX_DBGFS_CREATE_RW(u64, "ioreg_addr", ioreg_addr, debugfs_dir);
  1520. VHA_DBGFS_CREATE_FILE(S_IRUGO, "ioreg_data", ioreg);
  1521. VHA_DBGFS_CREATE_FILE(S_IRUGO, "cnn_utilization", cnn_utilization);
  1522. VHA_DBGFS_CREATE_FILE(S_IRUGO, "cnn_last_cycles", cnn_last_cycles);
  1523. VHA_DBGFS_CREATE_FILE(S_IWUSR, "stats_reset", stats_reset);
  1524. VHA_DBGFS_CREATE_FILE(S_IRUGO, "BVNC", bvnc);
  1525. VHA_DBGFS_CREATE_FILE(S_IRUGO, "pri_q_counters", pri_q_counters);
  1526. #ifdef CONFIG_HW_MULTICORE
  1527. VHA_DBGFS_CREATE_FILE(S_IRUGO, "cnn_kicks_per_core", cnn_kicks_per_core);
  1528. VHA_DBGFS_CREATE_FILE(S_IRUGO, "cnn_utilization_per_core", cnn_utilization_per_core);
  1529. VHA_DBGFS_CREATE_FILE(S_IRUGO, "wl_last_stats", wl_last_stats);
  1530. VHA_DBGFS_CREATE_FILE(S_IRUGO, "scheduling_stats", scheduling_stats);
  1531. #endif
  1532. #ifdef VHA_FUNCT_CTRL
  1533. ctx->funct_ctrl_dir = debugfs_create_dir("FUNCT_CTRL", ctx->debugfs_dir);
  1534. if (ctx->funct_ctrl_dir) {
  1535. VHA_DBGFS_CREATE_RW(u32, "pm_delay", pm_delay, funct_ctrl_dir);
  1536. VHA_DBGFS_CREATE_RW(u8, "mmu_mode", mmu_mode, funct_ctrl_dir);
  1537. VHA_DBGFS_CREATE_RW(u8, "mmu_ctx_default", mmu_ctx_default, funct_ctrl_dir);
  1538. VHA_DBGFS_CREATE_RW(u32, "mmu_page_size", mmu_page_size, funct_ctrl_dir);
  1539. VHA_DBGFS_CREATE_RW(bool, "mmu_base_pf_test", mmu_base_pf_test, funct_ctrl_dir);
  1540. VHA_DBGFS_CREATE_RW(u32, "mmu_no_map_count", mmu_no_map_count, funct_ctrl_dir);
  1541. VHA_DBGFS_CREATE_RW(u8, "low_latency", low_latency, funct_ctrl_dir);
  1542. VHA_DBGFS_CREATE_RW(u32, "suspend_interval_msec", suspend_interval_msec, funct_ctrl_dir);
  1543. VHA_DBGFS_CREATE_RW(u8, "fault_inject", fault_inject, funct_ctrl_dir);
  1544. #ifdef CONFIG_HW_MULTICORE
  1545. VHA_DBGFS_CREATE_FILE_IN_DIR(S_IRUGO, "scheduling_sequence", sched, funct_ctrl_dir);
  1546. VHA_DBGFS_CREATE_FILE_IN_DIR(S_IRUGO, "stalling", stalling, funct_ctrl_dir);
  1547. #endif
  1548. }
  1549. #endif
  1550. #ifdef VHA_EVENT_INJECT
  1551. ctx->event_inject_dir = debugfs_create_dir("EVENT_INJECT", ctx->debugfs_dir);
  1552. if (ctx->event_inject_dir) {
  1553. #ifdef CONFIG_HW_MULTICORE
  1554. VHA_DBGFS_CREATE_RW(u64, "VHA_CR_CORE_EVENT", injection.vha_cr_core_event, event_inject_dir);
  1555. VHA_DBGFS_CREATE_RW(u64, "VHA_CR_SYS_EVENT", injection.vha_cr_sys_event, event_inject_dir);
  1556. VHA_DBGFS_CREATE_RW(u64, "VHA_CR_INTERCONNECT_EVENT", injection.vha_cr_interconnect_event, event_inject_dir);
  1557. VHA_DBGFS_CREATE_RW(u64, "VHA_CR_WM_EVENT", injection.vha_cr_wm_event, event_inject_dir);
  1558. VHA_DBGFS_CREATE_RW(u64, "CONF_ERR", injection.conf_err, event_inject_dir);
  1559. VHA_DBGFS_CREATE_RW(u64, "PARITY_POLL_ERR", injection.parity_poll_err_reg, event_inject_dir);
  1560. #else
  1561. VHA_DBGFS_CREATE_RW(u64, "VHA_CR_EVENT", injection.vha_cr_event, event_inject_dir);
  1562. #endif
  1563. }
  1564. #endif /* VHA_EVENT_INJECT */
  1565. #undef CTX_DBGFS_CREATE_FILE
  1566. #undef VHA_DBGFS_CREATE_FILE
  1567. #undef VHA_DBGFS_CREATE_FILE_IN_DIR
  1568. #undef CTX_DBGFS_CREATE
  1569. #undef VHA_DBGFS_CREATE_RO
  1570. #undef VHA_DBGFS_CREATE_RW
  1571. #if defined(VHA_SCF) && defined(CONFIG_HW_MULTICORE)
  1572. vha_sc_dbg_init(vha, ctx->debugfs_dir);
  1573. #endif /* VHA_SCF */
  1574. vha->dbgfs_ctx = (void *)ctx;
  1575. }
  1576. void vha_dbg_deinit(struct vha_dev *vha)
  1577. {
  1578. struct vha_dbgfs_ctx *ctx =
  1579. (struct vha_dbgfs_ctx *)vha->dbgfs_ctx;
  1580. #if defined(VHA_SCF) && defined(CONFIG_HW_MULTICORE)
  1581. vha_sc_dbg_deinit(vha);
  1582. #endif /* VHA_SCF */
  1583. /* ctx->debugfs_dir==NULL is safe */
  1584. debugfs_remove_recursive(ctx->debugfs_dir);
  1585. }
  1586. struct dentry* vha_dbg_get_sysfs(struct vha_dev *vha)
  1587. {
  1588. struct vha_dbgfs_ctx *ctx =
  1589. (struct vha_dbgfs_ctx *)vha->dbgfs_ctx;
  1590. return ctx->debugfs_dir;
  1591. }
  1592. #else // CONFIG_DEBUG_FS
  1593. void vha_dbg_init(struct vha_dev *vha) {}
  1594. void vha_dbg_deinit(struct vha_dev *vha) {}
  1595. struct dentry* vha_dbg_get_sysfs(struct vha_dev *vha) { return NULL; }
  1596. int vha_dbg_create_hwbufs(struct vha_session *session) { return 0; }
  1597. void vha_dbg_destroy_hwbufs(struct vha_session *session) {}
  1598. int vha_dbg_alloc_hwbuf(struct vha_session *session, size_t size,
  1599. struct vha_buffer **buffer, const char *name, bool map) { return 0; }
  1600. void vha_dbg_hwbuf_cleanup(struct vha_session *session,
  1601. struct vha_buffer *buf) {}
  1602. #endif // CONFIG_DEBUG_FS