vha_dev.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581
  1. /*
  2. *****************************************************************************
  3. * Copyright (c) Imagination Technologies Ltd.
  4. *
  5. * The contents of this file are subject to the MIT license as set out below.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. *
  25. * Alternatively, the contents of this file may be used under the terms of the
  26. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  27. * GPL are applicable instead of those above.
  28. *
  29. * If you wish to allow use of your version of this file only under the terms
  30. * of GPL, and not to allow others to use your version of this file under the
  31. * terms of the MIT license, indicate your decision by deleting the provisions
  32. * above and replace them with the notice and other provisions required by GPL
  33. * as set out in the file called "GPLHEADER" included in this distribution. If
  34. * you do not delete the provisions above, a recipient may use your version of
  35. * this file under the terms of either the MIT license or GPL.
  36. *
  37. * This License is also included in this distribution in the file called
  38. * "MIT_COPYING".
  39. *
  40. *****************************************************************************/
  41. #include <linux/delay.h>
  42. #include <linux/irq.h>
  43. #include <linux/moduleparam.h>
  44. #include <linux/pm_runtime.h>
  45. #include <linux/slab.h>
  46. #include <uapi/vha.h>
  47. #include "vha_common.h"
  48. #include "vha_plat.h"
  49. #include "vha_regs.h"
  50. #if defined(CFG_SYS_VAGUS)
  51. #include <hwdefs/nn_sys_cr_vagus.h>
  52. #endif
  53. #define ERR_EVENT_DESC(b) VHA_CR_OS(VHA_EVENT_STATUS_VHA_##b##_EN), __stringify(b)
  54. static void vha_dev_disable_events(struct vha_dev *vha)
  55. {
  56. img_pdump_printf("-- Clear CNN events\n");
  57. IOWRITE64_PDUMP(VHA_EVNTS_DEFAULT, VHA_CR_OS(VHA_EVENT_CLEAR));
  58. img_pdump_printf("-- Disable CNN events\n");
  59. IOWRITE64_PDUMP(0, VHA_CR_OS(VHA_EVENT_ENABLE));
  60. /* Clear the START bit !
  61. * Note: It is stated that writing 0 to this bit has no effect,
  62. * however in error cases, some hw blocks may start
  63. * to process previous requests after turning on the clocks
  64. * which was previously disabled */
  65. IOWRITE64_PDUMP(0, VHA_CR_OS(CNN_CONTROL));
  66. /* Disable core events */
  67. img_pdump_printf("-- Disable CORE events\n");
  68. IOWRITE64_PDUMP(0, VHA_CR_OS(VHA_EVENT_ENABLE));
  69. }
  70. __maybe_unused
  71. static void vha_dev_enable_clocks(struct vha_dev *vha)
  72. {
  73. uint64_t __maybe_unused sys_clks = 0;
  74. uint64_t __maybe_unused main_clks = 0;
  75. /* Always AUTO gating when needed */
  76. sys_clks = VHA_SYS_CLOCKS_DEFAULT(AUTO);
  77. main_clks = VHA_MAIN_CLOCKS_DEFAULT(AUTO);
  78. /* Enable sys clocks ! */
  79. img_pdump_printf("-- Enable SYS clocks\n");
  80. IOWRITE64_PDUMP(sys_clks, VHA_CR_SYS_CLK_CTRL0);
  81. /* Enable main clocks ! */
  82. img_pdump_printf("-- Enable MAIN clocks\n");
  83. IOWRITE64_PDUMP(main_clks, VHA_CR_CLK_CTRL0);
  84. #if defined(CFG_SYS_VAGUS)
  85. img_pdump_printf("-- Enable NN_SYS clocks\n");
  86. IOWRITE64_PDUMP_REGIO(NN_SYS_CR_CLK_CTRL_MODE_AUTO,
  87. NN_SYS_CR_BASE, NN_SYS_CR_CLK_CTRL, "REG_NNSYS");
  88. #endif
  89. }
  90. static void vha_dev_ready(struct vha_dev *vha)
  91. {
  92. #ifndef CONFIG_VHA_DUMMY
  93. if (!vha->is_ready)
  94. return;
  95. #endif
  96. dev_dbg(vha->dev, "%s\n", __func__);
  97. vha_dev_wait(vha);
  98. /* Finally enable ALL events */
  99. img_pdump_printf("-- Enable ALL events\n");
  100. IOWRITE64_PDUMP(VHA_EVNTS_DEFAULT, VHA_CR_OS(VHA_EVENT_ENABLE));
  101. img_pdump_printf("-- Clear ALL events\n");
  102. IOWRITE64_PDUMP(VHA_EVNTS_DEFAULT, VHA_CR_OS(VHA_EVENT_CLEAR));
  103. #ifdef HW_AX2
  104. img_pdump_printf("-- Clear CNN status\n");
  105. IOWRITE64_PDUMP(0, VHA_CR_OS(CNN_STATUS));
  106. #endif
  107. img_pdump_printf("-- Clear MMU fault status\n");
  108. IOWRITE64_PDUMP(0, VHA_CR_OS(MMU_FAULT_STATUS1));
  109. img_pdump_printf("-- Clear SLC debug status\n");
  110. IOWRITE64_PDUMP(0, VHA_CR_SLC_STATUS_DEBUG);
  111. img_pdump_printf("-- Reset PERF counters\n");
  112. IOWRITE64_PDUMP(0, VHA_CR_PERF_RESET_FULL);
  113. }
  114. __maybe_unused
  115. static int vha_dev_reset(struct vha_dev *vha)
  116. {
  117. img_pdump_printf("-- Set RESET bits\n");
  118. #if defined(CFG_SYS_VAGUS)
  119. IOWRITE64_PDUMP_REGIO(NN_SYS_CR_RESET_CTRL_NN_SYS_EN,
  120. NN_SYS_CR_BASE, NN_SYS_CR_RESET_CTRL, "REG_NNSYS");
  121. #endif
  122. /* Perform reset procedure */
  123. IOWRITE64_PDUMP(VHA_RESET_DEFAULT, VHA_CR_RESET_CTRL);
  124. /* poll for reset deassertion
  125. * count=16, delay=256cycles
  126. */
  127. img_pdump_printf("-- Wait for RESET deassertion\n");
  128. #if defined(CFG_SYS_VAGUS)
  129. IOPOLL64_PDUMP_REGIO(0, 16, 256, NN_SYS_CR_RESET_CTRL_MASKFULL,
  130. NN_SYS_CR_BASE, NN_SYS_CR_RESET_CTRL, "REG_NNSYS");
  131. #endif
  132. IOPOLL64_PDUMP(0, 16, 256, VHA_CR_RESET_CTRL_MASKFULL,
  133. VHA_CR_RESET_CTRL);
  134. return 0;
  135. }
  136. __maybe_unused
  137. static int vha_dev_disable_clocks(struct vha_dev *vha)
  138. {
  139. /* If auto gating was turned on, wait for clocks idle state */
  140. img_pdump_printf("-- Wait for clocks IDLE state\n");
  141. IOPOLL64_PDUMP(0, 1000, 1000,
  142. VHA_CR_CLK_STATUS0_MASKFULL,
  143. VHA_CR_CLK_STATUS0);
  144. #if defined(CFG_SYS_VAGUS)
  145. IOPOLL64_PDUMP_REGIO(0, 100, 1000, NN_SYS_CR_CLK_STATUS_MASKFULL,
  146. NN_SYS_CR_BASE, NN_SYS_CR_CLK_STATUS, "REG_NNSYS");
  147. #endif
  148. /* Wait for MMU,CCM,RDI,XBAR IDLE state */
  149. img_pdump_printf("-- Wait for memory bus interface IDLE state\n");
  150. IOPOLL64_PDUMP(0xFFFF, 100, 1000, VHA_CR_SLC_IDLE_MASKFULL,
  151. VHA_CR_SLC_IDLE);
  152. /* Finally disable clocks */
  153. img_pdump_printf("-- Disable MAIN clocks\n");
  154. IOWRITE64_PDUMP(0, VHA_CR_CLK_CTRL0); /* main */
  155. img_pdump_printf("-- Disable SYS clocks\n");
  156. IOWRITE64_PDUMP(0, VHA_CR_SYS_CLK_CTRL0); /* sys */
  157. #if defined(CFG_SYS_VAGUS)
  158. img_pdump_printf("-- NN_SYS clocks\n");
  159. IOWRITE64_PDUMP_REGIO(0, NN_SYS_CR_BASE,
  160. NN_SYS_CR_CLK_CTRL, "REG_NNSYS"); /* nn_sys */
  161. #endif
  162. return 0;
  163. }
  164. /* start the device */
  165. int vha_dev_start(struct vha_dev *vha)
  166. {
  167. int ret = 0;
  168. /* Cancel APM request if new inference comes */
  169. cancel_delayed_work(&vha->apm_dworks[0].dwork);
  170. if (vha->state == VHA_STATE_ON)
  171. return 0; /* not an error */
  172. dev_dbg(vha->dev, "%s\n", __func__);
  173. /* Assuming OS0 is the privileged one */
  174. #if _OSID_ == 0 /* For HW_AX2 this is always true */
  175. pm_runtime_get_sync(vha->dev);
  176. /////////////// POWER ON //////////////////////////
  177. img_pdump_printf("-- POWER_ON_BEGIN\n");
  178. /* Prepare device ... */
  179. ret = vha_dev_prepare(vha);
  180. if (ret) {
  181. dev_err(vha->dev, "%s: Error preparing device!\n", __func__);
  182. return ret;
  183. }
  184. /* Reset device */
  185. ret = vha_dev_reset(vha);
  186. if (ret){
  187. dev_err(vha->dev, "%s: Error reseting device!\n", __func__);
  188. return ret;
  189. }
  190. /* Enable device clocks */
  191. vha_dev_enable_clocks(vha);
  192. img_pdump_printf("-- POWER_ON_END\n");
  193. /* Call device specific setup */
  194. vha_dev_setup(vha);
  195. /////////////////////////////////////////////////////
  196. #endif
  197. vha_dev_ready(vha);
  198. vha->state = VHA_STATE_ON;
  199. /* Remember the time hw is powered on */
  200. GETNSTIMEOFDAY(&vha->stats.hw_start);
  201. return ret;
  202. }
  203. /* stop the device */
  204. int vha_dev_stop(struct vha_dev *vha, bool reset)
  205. {
  206. int ret = 0;
  207. if (vha->state == VHA_STATE_OFF)
  208. return 0; /* not an error */
  209. /* Cancel APM request if we are about to power off the core */
  210. cancel_delayed_work(&vha->apm_dworks[0].dwork);
  211. dev_dbg(vha->dev, "%s\n", __func__);
  212. /* Disable events at first */
  213. vha_dev_disable_events(vha);
  214. vha->is_ready = false;
  215. /* Assuming OS0 is the privileged one */
  216. #if _OSID_ == 0 /* For HW_AX2 */
  217. /////////////// POWER_OFF //////////////////////////
  218. img_pdump_printf("-- POWER_OFF_BEGIN\n");
  219. /* Reset core in case of error or pending inference */
  220. if (reset) {
  221. /* ensure that clocks are set to AUTO before reset */
  222. vha_dev_enable_clocks(vha);
  223. ret = vha_dev_reset(vha);
  224. }
  225. if(ret)
  226. dev_warn(vha->dev,
  227. "%s: Problem with resetting device!\n",
  228. __func__);
  229. /* Disable device clocks */
  230. ret = vha_dev_disable_clocks(vha);
  231. if(ret)
  232. dev_warn(vha->dev,
  233. "%s: Problem with disabling clocks!\n",
  234. __func__);
  235. img_pdump_printf("-- POWER_OFF_END\n");
  236. /////////////////////////////////////////////////////
  237. if (reset) {
  238. pm_runtime_mark_last_busy(vha->dev);
  239. pm_runtime_put_sync_autosuspend(vha->dev);
  240. } else {
  241. pm_runtime_put_sync(vha->dev);
  242. }
  243. #endif
  244. vha->state = VHA_STATE_OFF;
  245. /* Update the up time of the core */
  246. if (!vha->do_calibration) {
  247. uint64_t tmp = 0;
  248. struct TIMESPEC now;
  249. GETNSTIMEOFDAY(&now);
  250. if (get_timespan_us(&vha->stats.hw_start, &now, &tmp)) {
  251. do_div(tmp, 1000UL);
  252. vha->stats.uptime_ms += tmp;
  253. if (vha->stats.uptime_ms)
  254. vha_update_utilization(vha);
  255. else
  256. dev_dbg(vha->dev,
  257. "%s Too short execution time to calculate utilization!\n",
  258. __func__);
  259. } else
  260. WARN_ON(1);
  261. }
  262. vha->active_mmu_ctx = VHA_INVALID_ID;
  263. spin_lock_irq(&vha->irq_lock);
  264. vha->irq_status = 0;
  265. vha->irq_count = 0;
  266. vha->stream_count = 0;
  267. spin_unlock_irq(&vha->irq_lock);
  268. return ret;
  269. }
  270. void vha_update_utilization(struct vha_dev *vha)
  271. {
  272. uint64_t tmp;
  273. tmp = vha->stats.cnn_total_proc_us;
  274. do_div(tmp, vha->stats.uptime_ms);
  275. vha->stats.cnn_utilization = tmp;
  276. }
  277. #ifdef VHA_EVENT_INJECT
  278. /*
  279. * Inject EVENT_STATUS bits, requested by respective debugfs nodes, to
  280. * the status register.
  281. */
  282. static inline void __inject_event_regs(struct vha_dev* vha, uint64_t* event_status)
  283. {
  284. if(!__EVENT_INJECT())
  285. return;
  286. if (*event_status & (1 << VHA_CR_VHA_EVENT_STATUS_TYPE_VHA_CNN0_COMPLETE_SHIFT))
  287. *event_status |= vha->injection.vha_cr_event;
  288. }
  289. #endif
  290. /* Top half */
  291. irqreturn_t vha_handle_irq(struct device *dev)
  292. {
  293. struct vha_dev *vha = vha_dev_get_drvdata(dev);
  294. int ret = IRQ_HANDLED;
  295. uint64_t event_status;
  296. if (!vha)
  297. return IRQ_NONE;
  298. event_status = IOREAD64(vha->reg_base, VHA_CR_OS(VHA_EVENT_STATUS));
  299. event_status &= IOREAD64(vha->reg_base, VHA_CR_OS(VHA_EVENT_ENABLE));
  300. /* On fpga platform it is possible to get
  301. * a spurious interrupt when the hw died
  302. * Do not proceed, just throw a warning */
  303. if (event_status == VHA_DEAD_HW || event_status == ~0) {
  304. WARN_ONCE(1, "Hardware is dead!");
  305. return IRQ_NONE;
  306. }
  307. #ifdef VHA_EVENT_INJECT
  308. __inject_event_regs(vha, &event_status);
  309. #endif
  310. #ifdef VHA_SCF
  311. if (vha->hw_props.supported.parity &&
  312. !vha->parity_disable) {
  313. bool par_bit = img_mem_calc_parity(event_status &
  314. ~VHA_CR_BITMASK(VHA_EVENT_STATUS_TYPE, PARITY));
  315. if (par_bit !=
  316. VHA_CR_GETBITS(VHA_EVENT_STATUS_TYPE, PARITY,
  317. event_status)) {
  318. dev_err(dev, "Event status register parity error!\n");
  319. /* Use the real event to indicate the error */
  320. event_status |= VHA_CR_OS(VHA_EVENT_STATUS_VHA_PARITY_ERROR_EN);
  321. }
  322. /* Clear the PARITY bit - it's not a valid event */
  323. VHA_CR_CLEARBITS(event_status, VHA_EVENT_STATUS_TYPE, PARITY);
  324. }
  325. #endif
  326. if (event_status & VHA_EVNTS_DEFAULT) {
  327. uint64_t cnn_status;
  328. uint8_t count;
  329. /* clear the interrupt:
  330. * best not to write pdump in interrupts */
  331. IOWRITE64(vha->reg_base, VHA_CR_OS(VHA_EVENT_CLEAR),
  332. event_status & VHA_EVNTS_DEFAULT);
  333. /* Read the stream count as single IRQ may be raised for multiple kicks */
  334. cnn_status = IOREAD64(vha->reg_base, VHA_CR_OS(CNN_STATUS));
  335. #ifdef VHA_SCF
  336. if (vha->hw_props.supported.parity &&
  337. !vha->parity_disable) {
  338. bool par_bit = img_mem_calc_parity(cnn_status &
  339. ~VHA_CR_BITMASK_OS(CNN_STATUS, PARITY));
  340. if (par_bit != VHA_CR_GETBITS_OS(CNN_STATUS, PARITY, cnn_status)) {
  341. dev_err(dev, "CNN status register parity error!\n");
  342. /* Use the real event to indicate the error */
  343. event_status |= VHA_CR_OS(VHA_EVENT_STATUS_VHA_PARITY_ERROR_EN);
  344. }
  345. }
  346. #endif
  347. if (vha->is_ready) {
  348. /* Post check for AXI bus errors */
  349. uint64_t ace_status = IOREAD64(vha->reg_base, VHA_CR_ACE_STATUS);
  350. if (ace_status) {
  351. dev_err(vha->dev, "AXI bus protocol error: %#llx\n",
  352. ace_status);
  353. /* Use AXI error event to indicate that */
  354. event_status |= VHA_CR_OS(VHA_EVENT_STATUS_VHA_AXI_ERROR_EN);
  355. }
  356. }
  357. /* Read the stream count as single IRQ may be raised for multiple kicks */
  358. count = VHA_CR_GETBITS_OS(CNN_STATUS, STREAM_COUNT, cnn_status);
  359. spin_lock(&vha->irq_lock);
  360. /* store the status to be processed later */
  361. if (vha->do_calibration ||
  362. vha_is_busy(vha)) {
  363. vha->irq_status |= event_status;
  364. if (vha->low_latency == VHA_LL_SELF_KICK)
  365. /* Two separate IRQs may be raised for multiple kicks */
  366. vha->irq_count += count - vha->stream_count;
  367. else
  368. /* Only single IRQ may be raised otherwise ... */
  369. vha->irq_count = count - vha->stream_count;
  370. vha->stream_count = count;
  371. /* Record hw processing end timestamps */
  372. vha->stats.hw_proc_end_prev = vha->stats.hw_proc_end;
  373. GETNSTIMEOFDAY(&vha->stats.hw_proc_end);
  374. } else {
  375. /* Command may have been aborted before this handler is executed */
  376. vha->irq_status = 0;
  377. vha->irq_count = 0;
  378. vha->stream_count = 0;
  379. }
  380. spin_unlock(&vha->irq_lock);
  381. ret = IRQ_WAKE_THREAD;
  382. } else
  383. return IRQ_NONE;
  384. dev_dbg(dev, "IRQ 0x%08llx\n", event_status);
  385. return ret;
  386. }
  387. static bool vha_rollback_cnn_cmds(struct vha_dev *vha)
  388. {
  389. bool processing = false;
  390. /* Not processed commands are still on the pending list
  391. * of each session, so just mark the hw pending lists as empty */
  392. if (vha->pendcmd[VHA_CNN_CMD].cmd) {
  393. struct vha_cmd *pendcmd = vha->pendcmd[VHA_CNN_CMD].cmd;
  394. pendcmd->in_hw = false;
  395. pendcmd->queued = false;
  396. pendcmd->rolled_back = true;
  397. processing = true;
  398. vha->stats.cnn_kicks_aborted += pendcmd->subseg_current;
  399. vha->stats.cnn_kicks_completed -= pendcmd->subsegs_completed;
  400. vha->pri_q_counters[pendcmd->user_cmd.priority] += pendcmd->subseg_current;
  401. pendcmd->subseg_current = 0;
  402. pendcmd->subsegs_completed = 0;
  403. vha->pendcmd[VHA_CNN_CMD].cmd = NULL;
  404. }
  405. /* low_latency ...*/
  406. if (vha->queuedcmd[VHA_CNN_CMD].cmd) {
  407. struct vha_cmd *queuedcmd = vha->queuedcmd[VHA_CNN_CMD].cmd;
  408. queuedcmd->in_hw = false;
  409. queuedcmd->queued = false;
  410. queuedcmd->rolled_back = true;
  411. vha->stats.cnn_kicks_aborted += queuedcmd->subseg_current;
  412. vha->stats.cnn_kicks_completed -= queuedcmd->subsegs_completed;
  413. vha->pri_q_counters[queuedcmd->user_cmd.priority] += queuedcmd->subseg_current;
  414. queuedcmd->subseg_current = 0;
  415. queuedcmd->subsegs_completed = 0;
  416. vha->queuedcmd[VHA_CNN_CMD].cmd = NULL;
  417. }
  418. dev_dbg(vha->dev, "%s: (%d)\n", __func__, processing);
  419. return processing;
  420. }
  421. bool vha_rollback_cmds(struct vha_dev *vha)
  422. {
  423. return vha_rollback_cnn_cmds(vha);
  424. }
  425. static bool vha_is_processing(struct vha_dev *vha)
  426. {
  427. return vha->pendcmd[VHA_CNN_CMD].cmd != NULL;
  428. }
  429. int vha_dev_suspend_work(struct vha_dev *vha)
  430. {
  431. bool processing = false;
  432. int ret;
  433. /* Check if anything is being processed right now. */
  434. processing = vha_is_processing(vha);
  435. /* Forcing hardware disable. */
  436. ret = vha_dev_stop(vha, processing);
  437. /* Rollback commands after hw is stopped. */
  438. vha_rollback_cmds(vha);
  439. return ret;
  440. }
  441. /*
  442. * handles the command already processed by the hw.
  443. */
  444. static bool vha_handle_cmd(struct vha_dev *vha, int status)
  445. {
  446. struct vha_cmd *cmd = NULL;
  447. cmd = vha->pendcmd[VHA_CNN_CMD].cmd;
  448. if (unlikely(!cmd)) {
  449. dev_dbg(vha->dev, "No command. Probably it has been aborted\n");
  450. return false;
  451. }
  452. {
  453. uint64_t proc_time = 0;
  454. struct TIMESPEC *from = &cmd->hw_proc_start;
  455. struct TIMESPEC *to = &vha->stats.hw_proc_end;
  456. if (TIMESPEC_COMPARE(&vha->stats.hw_proc_end_prev, &cmd->hw_proc_start) >= 0)
  457. from = &vha->stats.hw_proc_end_prev;
  458. if (get_timespan_us(from, to, &proc_time)) {
  459. vha->stats.last_proc_us = proc_time;
  460. } else {
  461. vha->stats.last_proc_us = 0;
  462. }
  463. /* Update cnn stats */
  464. vha_cnn_update_stats(vha);
  465. /* Update cmd stats. */
  466. cmd->proc_us += vha->stats.cnn_last_proc_us;
  467. cmd->hw_cycles += vha->stats.cnn_last_cycles;
  468. }
  469. /* Mark this subsegment as completed. */
  470. if (status == 0)
  471. vha->pendcmd[VHA_CNN_CMD].cmd->subsegs_completed++;
  472. /* If this isn't the last subsegment, just return to process the next one. */
  473. if ((cmd->subseg_current < VHA_CMD_SUBSEG_NUM(cmd)) && (status == 0)) {
  474. vha->pendcmd[VHA_CNN_CMD].cmd->in_hw = false;
  475. vha->pendcmd[VHA_CNN_CMD].cmd = NULL;
  476. return true;
  477. }
  478. vha_cnn_cmd_completed(cmd, status);
  479. if (status) {
  480. /* Rollback any queued command ... */
  481. vha_rollback_cnn_cmds(vha);
  482. /* Adjust for just rolled back pending cmd. */
  483. vha->pri_q_counters[cmd->user_cmd.priority] -= VHA_CMD_SUBSEG_NUM(cmd);
  484. /* Notify immediately current command */
  485. vha_cmd_notify(cmd);
  486. return false;
  487. }
  488. if (vha->queuedcmd[VHA_CNN_CMD].cmd)
  489. vha->pendcmd[VHA_CNN_CMD].cmd = vha->queuedcmd[VHA_CNN_CMD].cmd;
  490. else
  491. vha->pendcmd[VHA_CNN_CMD].cmd = NULL;
  492. vha->queuedcmd[VHA_CNN_CMD].cmd = NULL;
  493. dev_dbg(vha->dev,
  494. "%s: %p -> new pending %p\n",
  495. __func__, cmd, vha->pendcmd[VHA_CNN_CMD].cmd);
  496. vha_cmd_notify(cmd);
  497. return true;
  498. }
  499. static void vha_do_queued_cmd(struct vha_dev *vha)
  500. {
  501. struct vha_cmd *cmd, *pend;
  502. cmd = vha->queuedcmd[VHA_CNN_CMD].cmd;
  503. dev_dbg(vha->dev,
  504. "%s: queued %p pending %p\n",
  505. __func__, cmd, vha->pendcmd[VHA_CNN_CMD].cmd);
  506. if (!cmd || (cmd &&
  507. ((vha->low_latency == VHA_LL_DISABLED ||
  508. vha->low_latency == VHA_LL_SELF_KICK) ||
  509. !cmd->queued))) {
  510. dev_dbg(vha->dev, "%s: skipping!\n", __func__);
  511. return;
  512. }
  513. /* store actual pending command as it will be modified */
  514. pend = vha->pendcmd[VHA_CNN_CMD].cmd;
  515. /* at this point we should be able to process the cmd */
  516. vha_do_cnn_cmd(cmd);
  517. /* restore pending */
  518. vha->pendcmd[VHA_CNN_CMD].cmd = pend;
  519. }
  520. static int vha_report_failure(struct vha_dev *vha, uint64_t status,
  521. const struct vha_biterr bits[], int bits_size)
  522. {
  523. int error = 0;
  524. int i;
  525. int cmdid = -1;
  526. int sesid = -1;
  527. if (vha->pendcmd[VHA_CNN_CMD].cmd) {
  528. cmdid = vha->pendcmd[VHA_CNN_CMD].cmd->user_cmd.cmd_id;
  529. sesid = vha->pendcmd[VHA_CNN_CMD].cmd->session->id;
  530. }
  531. if (vha_observers.error)
  532. vha_observers.error(vha->id, sesid, cmdid, status);
  533. /* event status in human readable form */
  534. for (i = 0; i < bits_size; i++) {
  535. if (status & bits[i].b) {
  536. dev_err(vha->dev,
  537. " event status: %s\n",
  538. bits[i].s);
  539. /* convert from register bits into POSIX errno
  540. * if multiple errors, then arbitrary errno choice */
  541. error = bits[i].e;
  542. }
  543. }
  544. return error;
  545. }
  546. /* if vha event register reports CNN events, so handle them */
  547. static int vha_handle_cnn_event(struct vha_dev *vha, uint64_t event_status)
  548. {
  549. int err = 0;
  550. if (vha_check_calibration(vha))
  551. return 0;
  552. if (event_status & VHA_CNN_ERR_EVNTS) {
  553. static const struct vha_biterr err_bits[] = {
  554. {-ETIMEDOUT, ERR_EVENT_DESC(CNN0_MEM_WDT)},
  555. #ifdef HW_AX2
  556. {-ETIMEDOUT, ERR_EVENT_DESC(CNN0_WDT)},
  557. #endif
  558. {-EIO, ERR_EVENT_DESC(CNN0_ERROR)}
  559. };
  560. err = vha_report_failure(vha,
  561. event_status, err_bits, ARRAY_SIZE(err_bits));
  562. vha_cnn_dump_status(vha);
  563. }
  564. /* Poke the hw if there were already
  565. * command queued in the hw */
  566. if (!err)
  567. vha_do_queued_cmd(vha);
  568. /* Handle actual command */
  569. if (vha_handle_cmd(vha, err) == false)
  570. err = -ENOENT;
  571. return err;
  572. }
  573. #ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
  574. /* Simulating hw execution time by scheduling this delayed work. */
  575. void vha_dummy_worker(struct work_struct *work)
  576. {
  577. struct vha_dev *vha = container_of(work, struct vha_dev, dummy_dwork.work);
  578. mutex_lock(&vha->lock);
  579. if (vha->pendcmd[VHA_CNN_CMD].cmd) {
  580. /* Record hw processing end timestamps */
  581. vha->stats.hw_proc_end_prev = vha->stats.hw_proc_end;
  582. GETNSTIMEOFDAY(&vha->stats.hw_proc_end);
  583. /* Handle current pending command */
  584. vha_handle_cnn_event(vha, VHA_CNN_CMPLT_EVNT);
  585. vha->stats.cnn_kicks_completed++;
  586. /* Schedule following commands */
  587. vha_chk_cmd_queues(vha, true);
  588. }
  589. mutex_unlock(&vha->lock);
  590. }
  591. #endif
  592. /* Bottom half */
  593. irqreturn_t vha_handle_thread_irq(struct device *dev)
  594. {
  595. struct vha_dev *vha = vha_dev_get_drvdata(dev);
  596. irqreturn_t ret = IRQ_HANDLED;
  597. uint64_t status;
  598. uint8_t count, c = 0;
  599. int err = 0;
  600. if (!vha)
  601. return IRQ_NONE;
  602. mutex_lock(&vha->lock);
  603. #ifdef CONFIG_FAULT_INJECTION
  604. if (!vha->irq_bh_pid)
  605. vha->irq_bh_pid = task_pid_nr(current);
  606. if (vha->fault_inject & VHA_FI_IRQ_WORKER)
  607. current->make_it_fail = true;
  608. else
  609. current->make_it_fail = false;
  610. #endif
  611. spin_lock_irq(&vha->irq_lock);
  612. status = vha->irq_status;
  613. vha->irq_status = 0;
  614. count = vha->irq_count;
  615. vha->irq_count = 0;
  616. if (!count) {
  617. uint64_t proc_time = 0;
  618. if (get_timespan_us(&vha->stats.hw_proc_start, &vha->stats.hw_proc_end,
  619. &proc_time)) {
  620. vha->stats.last_proc_us = proc_time;
  621. } else {
  622. vha->stats.last_proc_us = 0;
  623. }
  624. }
  625. spin_unlock_irq(&vha->irq_lock);
  626. /* Command may have been aborted before this handler is executed */
  627. if (!status)
  628. goto exit;
  629. /* There can be two inferences already finished for self kick mode,
  630. * otherwise, only single inference at the time */
  631. if ((vha->low_latency == VHA_LL_SELF_KICK && count > 2) ||
  632. (vha->low_latency != VHA_LL_SELF_KICK && count > 1))
  633. WARN_ON(1);
  634. dev_dbg(dev, "%s: status:%llx count:%d\n",
  635. __func__, status, count);
  636. do {
  637. if (status & VHA_CORE_EVNTS) {
  638. static const struct vha_biterr err_bits[] = {
  639. {-EIO, ERR_EVENT_DESC(AXI_ERROR)},
  640. {-EFAULT, ERR_EVENT_DESC(MMU_PAGE_FAULT)},
  641. #ifdef HW_AX3
  642. #ifdef VHA_SCF
  643. {-EIO, ERR_EVENT_DESC(MMU_PARITY_ERROR)},
  644. {-EIO, ERR_EVENT_DESC(PARITY_ERROR)},
  645. {-EIO, ERR_EVENT_DESC(LOCKSTEP_ERROR)},
  646. #endif
  647. {-ETIMEDOUT, ERR_EVENT_DESC(HL_WDT)},
  648. {-EIO, ERR_EVENT_DESC(ERROR)}
  649. #endif
  650. };
  651. #ifdef HW_AX3
  652. if (status & VHA_EVENT_TYPE(HL_WDT)
  653. && vha->is_ready)
  654. if (vha_check_calibration(vha))
  655. break;
  656. if ((status & VHA_CORE_EVNTS)==
  657. VHA_EVENT_TYPE(READY)
  658. && !vha->is_ready) {
  659. vha->is_ready = true;
  660. vha_dev_ready(vha);
  661. if (vha->do_calibration) {
  662. vha_cnn_start_calib(vha);
  663. break;
  664. } else
  665. vha_chk_cmd_queues(vha, true);
  666. }
  667. #endif
  668. err = vha_report_failure(vha, status,
  669. err_bits, ARRAY_SIZE(err_bits));
  670. if (err) {
  671. dev_err(vha->dev, "NNA hw failure: %llx\n", status);
  672. dev_err(vha->dev, " CLK_STATUS0:%llx ",
  673. IOREAD64(vha->reg_base, VHA_CR_CLK_STATUS0));
  674. dev_err(vha->dev, " VHA_EVENT_STATUS:%llx ", status);
  675. }
  676. if (status & VHA_EVENT_TYPE(MMU_PAGE_FAULT))
  677. /* dump mmu status */
  678. vha_mmu_status(vha);
  679. }
  680. /* If no core level error process cnn events */
  681. if (!err && status & VHA_CNN_EVNTS)
  682. err = vha_handle_cnn_event(vha, status);
  683. #ifdef HW_AX3
  684. else if (status == VHA_EVENT_TYPE(ERROR)) {
  685. /* Resubmit command next time if no CNN error detected
  686. * and only ERROR bit is set.
  687. * That means other OS caused the error */
  688. vha_rollback_cnn_cmds(vha);
  689. }
  690. #endif
  691. else if (err && vha->is_ready) { /* Core level error */
  692. if (vha_handle_cmd(vha, err) == false)
  693. err = -ENOENT;
  694. }
  695. c++;
  696. } while (c < count && !err);
  697. if (err) {
  698. vha->stats.total_failures += count ? count : 1;
  699. vha_dev_stop(vha, true);
  700. /* Check queues ... */
  701. vha_chk_cmd_queues(vha, true);
  702. } else {
  703. /* Run in BH context! */
  704. vha_chk_cmd_queues(vha, false);
  705. }
  706. vha->stats.cnn_kicks_completed += count;
  707. exit:
  708. #ifdef CONFIG_FAULT_INJECTION
  709. if (vha->fault_inject & VHA_FI_IRQ_WORKER)
  710. current->make_it_fail = false;
  711. #endif
  712. mutex_unlock(&vha->lock);
  713. return ret;
  714. }
  715. bool vha_rm_session_cmds(struct vha_session *session)
  716. {
  717. struct vha_dev *vha = session->vha;
  718. bool pend_removed = false;
  719. bool queued_removed = false;
  720. bool reschedule = false;
  721. struct vha_cmd *cur_cmd, *tmp_cmd;
  722. uint8_t pri;
  723. /* Check if pend/queued commands will be removed. */
  724. if (vha->pendcmd[VHA_CNN_CMD].cmd &&
  725. vha->pendcmd[VHA_CNN_CMD].cmd->session == session) {
  726. dev_warn(vha->dev,
  727. "Removing a session while cnn cmd is still pending\n");
  728. pend_removed = true;
  729. #ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
  730. cancel_delayed_work(&vha->dummy_dwork);
  731. #endif
  732. }
  733. if (vha->queuedcmd[VHA_CNN_CMD].cmd &&
  734. vha->queuedcmd[VHA_CNN_CMD].cmd->session == session) {
  735. dev_warn(vha->dev,
  736. "Removing a session while cnn cmd is still queued\n");
  737. queued_removed = true;
  738. }
  739. /* Update session scheduling. */
  740. if (vha->queuedcmd[VHA_CNN_CMD].cmd &&
  741. (pend_removed && !queued_removed)) {
  742. uint8_t pri = vha->queuedcmd[VHA_CNN_CMD].cmd->user_cmd.priority;
  743. if (vha->queuedcmd[VHA_CNN_CMD].cmd->session !=
  744. list_entry(&vha->sched_sessions[pri], struct vha_session,
  745. sched_list[pri]))
  746. while(list_first_entry(&vha->sched_sessions[pri], struct vha_session,
  747. sched_list[pri]) != vha->queuedcmd[VHA_CNN_CMD].cmd->session)
  748. list_rotate_left(&vha->sched_sessions[pri]);
  749. }
  750. /* Remove pend/queued commands if needed. */
  751. if (pend_removed || queued_removed) {
  752. vha_rollback_cnn_cmds(vha);
  753. /* Need to reschedule too. */
  754. reschedule = true;
  755. }
  756. /* Remove session related commands. */
  757. for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++) {
  758. list_for_each_entry_safe(cur_cmd, tmp_cmd, &session->cmds[pri], list[pri]) {
  759. /* rsp didn't make it to rsps list, free it now */
  760. kfree(cur_cmd->rsp);
  761. list_del(&cur_cmd->list[cur_cmd->user_cmd.priority]);
  762. vha->pri_q_counters[cur_cmd->user_cmd.priority] -=
  763. (VHA_CMD_SUBSEG_NUM(cur_cmd) - cur_cmd->subseg_current);
  764. if (vha_observers.canceled)
  765. vha_observers.canceled(vha->id, session->id, cur_cmd->user_cmd.cmd_id,
  766. cur_cmd->user_cmd.priority);
  767. kfree(cur_cmd);
  768. }
  769. }
  770. return reschedule;
  771. }
  772. bool vha_rm_session_cmds_masked(struct vha_session *session, uint32_t cmd_id,
  773. uint32_t cmd_id_mask)
  774. {
  775. struct vha_dev *vha = session->vha;
  776. bool reschedule = false;
  777. bool pend_removed = false;
  778. uint32_t pend_aborted_kicks_adj_val = 0;
  779. bool queued_removed = false;
  780. uint32_t queued_aborted_kicks_adj_val = 0;
  781. /* Check if pend/queued commands will be removed. */
  782. if (vha->pendcmd[VHA_CNN_CMD].cmd &&
  783. (vha->pendcmd[VHA_CNN_CMD].cmd->session == session) &&
  784. (vha->pendcmd[VHA_CNN_CMD].cmd->user_cmd.cmd_id & cmd_id_mask)
  785. == cmd_id) {
  786. pend_removed = true;
  787. vha->stats.cnn_kicks_cancelled += vha->pendcmd[VHA_CNN_CMD].cmd->subseg_current;
  788. pend_aborted_kicks_adj_val = vha->pendcmd[VHA_CNN_CMD].cmd->subseg_current;
  789. #ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
  790. cancel_delayed_work(&vha->dummy_dwork);
  791. #endif
  792. }
  793. if (vha->queuedcmd[VHA_CNN_CMD].cmd &&
  794. (vha->queuedcmd[VHA_CNN_CMD].cmd->session == session) &&
  795. (vha->queuedcmd[VHA_CNN_CMD].cmd->user_cmd.cmd_id & cmd_id_mask)
  796. == cmd_id) {
  797. queued_removed = true;
  798. vha->stats.cnn_kicks_cancelled += vha->queuedcmd[VHA_CNN_CMD].cmd->subseg_current;
  799. queued_aborted_kicks_adj_val = vha->pendcmd[VHA_CNN_CMD].cmd->subseg_current;
  800. }
  801. /* Update session scheduling. */
  802. if (vha->queuedcmd[VHA_CNN_CMD].cmd &&
  803. (pend_removed && !queued_removed)) {
  804. uint8_t pri = vha->queuedcmd[VHA_CNN_CMD].cmd->user_cmd.priority;
  805. if (vha->queuedcmd[VHA_CNN_CMD].cmd->session !=
  806. list_entry(&vha->sched_sessions[pri], struct vha_session,
  807. sched_list[pri]))
  808. while(list_first_entry(&vha->sched_sessions[pri], struct vha_session,
  809. sched_list[pri]) != vha->queuedcmd[VHA_CNN_CMD].cmd->session)
  810. list_rotate_left(&vha->sched_sessions[pri]);
  811. }
  812. /* Remove pend/queued commands if needed. */
  813. if (pend_removed || queued_removed) {
  814. vha_rollback_cnn_cmds(vha);
  815. /* Correct aborted stats. */
  816. if (queued_removed)
  817. vha->stats.cnn_kicks_aborted -= queued_aborted_kicks_adj_val;
  818. if (pend_removed)
  819. vha->stats.cnn_kicks_aborted -= pend_aborted_kicks_adj_val;
  820. reschedule = true;
  821. }
  822. return reschedule;
  823. }
  824. int vha_rm_cmds(struct vha_session *session, uint32_t cmd_id,
  825. uint32_t cmd_id_mask, bool respond)
  826. {
  827. struct vha_dev *vha = session->vha;
  828. struct vha_cmd *cur_cmd, *tmp_cmd;
  829. struct vha_rsp *cur_rsp, *tmp_rsp;
  830. bool reschedule = false;
  831. bool respond_aux = false;
  832. int ret = 0;
  833. uint8_t pri;
  834. mutex_lock(&vha->lock);
  835. /* Remove pend/queued session commands that match the cmd_id. */
  836. reschedule = vha_rm_session_cmds_masked(session, cmd_id, cmd_id_mask);
  837. /* Remove session related commands matching command id template. */
  838. for (pri = 0; pri < VHA_MAX_PRIORITIES; pri++) {
  839. list_for_each_entry_safe(cur_cmd, tmp_cmd, &session->cmds[pri], list[pri]) {
  840. if ((cur_cmd->user_cmd.cmd_id & cmd_id_mask) == cmd_id) {
  841. #ifdef KERNEL_DMA_FENCE_SUPPORT
  842. switch (cur_cmd->user_cmd.cmd_type)
  843. {
  844. case VHA_CMD_CNN_SUBMIT:
  845. {
  846. struct vha_user_cnn_submit_cmd *cnn_cmd =
  847. (struct vha_user_cnn_submit_cmd *)&cur_cmd->user_cmd;
  848. int j;
  849. for (j = 0; j < (cnn_cmd->msg.num_bufs - 1); j++) {
  850. struct vha_buffer *buf = vha_find_bufid(session, cnn_cmd->bufs[j]);
  851. if (buf == NULL) {
  852. dev_warn(vha->dev, "%s: could not find buf %x\n", __func__,
  853. cnn_cmd->bufs[j]);
  854. } else {
  855. vha_rm_buf_fence(session, buf);
  856. }
  857. }
  858. break;
  859. }
  860. default:
  861. dev_warn(vha->dev, "%s: invalid cmd type %x\n", __func__,
  862. cur_cmd->user_cmd.cmd_type);
  863. break;
  864. }
  865. #endif
  866. /* rsp didn't make it to rsps list; free it now. */
  867. kfree(cur_cmd->rsp);
  868. list_del(&cur_cmd->list[cur_cmd->user_cmd.priority]);
  869. vha->pri_q_counters[cur_cmd->user_cmd.priority] -=
  870. (VHA_CMD_SUBSEG_NUM(cur_cmd) - cur_cmd->subseg_current);
  871. if (vha_observers.canceled)
  872. vha_observers.canceled(vha->id, session->id, cur_cmd->user_cmd.cmd_id,
  873. cur_cmd->user_cmd.priority);
  874. kfree(cur_cmd);
  875. /* There were commands matching command id template in the list,
  876. * so respond to wake user space. */
  877. respond_aux = true;
  878. }
  879. }
  880. }
  881. /* Remove responses for session related commands
  882. * matching command id template. */
  883. list_for_each_entry_safe(cur_rsp, tmp_rsp, &session->rsps, list) {
  884. if ((cur_rsp->user_rsp.cmd_id & cmd_id_mask) == cmd_id) {
  885. list_del(&cur_rsp->list);
  886. kfree(cur_rsp);
  887. respond_aux = true;
  888. }
  889. }
  890. /* Reset hardware if required. */
  891. if (reschedule)
  892. ret = vha_dev_stop(vha, reschedule);
  893. /* Generate "cancel" response if any commands matching command id template
  894. * were removed. */
  895. if (respond_aux && respond) {
  896. /* Calculate space for the response. */
  897. size_t sz = sizeof(struct vha_rsp)
  898. + sizeof(struct vha_user_cnn_submit_rsp)
  899. - sizeof(struct vha_user_rsp);
  900. /* Allocate space for standard response. */
  901. struct vha_rsp *rsp = kzalloc(sz, GFP_KERNEL);
  902. if (rsp == NULL) {
  903. dev_crit(session->vha->dev,
  904. "Failed to allocate memory to notify cancel for cmds 0x%08x\n", cmd_id);
  905. session->oom = true;
  906. } else {
  907. rsp->size = sizeof(struct vha_user_cnn_submit_rsp);
  908. rsp->user_rsp.cmd_id = cmd_id;
  909. list_add_tail(&rsp->list, &session->rsps);
  910. }
  911. wake_up(&session->wq);
  912. }
  913. mutex_unlock(&vha->lock);
  914. /* Just return in case of oom. */
  915. if (session->oom)
  916. return -ENOMEM;
  917. /* Reschedule once all commands matching command id template are removed. */
  918. if (reschedule)
  919. vha_chk_cmd_queues(vha, true);
  920. return ret;
  921. }
  922. bool vha_is_busy(struct vha_dev *vha)
  923. {
  924. #ifndef CONFIG_VHA_DUMMY
  925. if (!vha->is_ready)
  926. return true;
  927. #endif
  928. if (vha->low_latency != VHA_LL_DISABLED) {
  929. return vha->pendcmd[VHA_CNN_CMD].cmd != NULL ||
  930. vha->queuedcmd[VHA_CNN_CMD].cmd != NULL;
  931. }
  932. return vha->pendcmd[VHA_CNN_CMD].cmd != NULL;
  933. }
  934. /* returns true if the cmd queue is full */
  935. bool vha_is_queue_full(struct vha_dev *vha, struct vha_cmd *cmd)
  936. {
  937. if (vha->low_latency != VHA_LL_DISABLED) {
  938. if (vha->low_latency == VHA_LL_SELF_KICK
  939. #ifdef HW_AX3
  940. /* if current command we are trying to queue belongs to a different session than pending one */
  941. && (vha->pendcmd[VHA_CNN_CMD].cmd != NULL && cmd != NULL &&
  942. vha->pendcmd[VHA_CNN_CMD].cmd->session != cmd->session)
  943. /* if session of the command we are trying to queue, shares the hw mmu ctx with the session of pending cmd */
  944. && (cmd->session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id ==
  945. vha->pendcmd[VHA_CNN_CMD].cmd->session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id)
  946. /* Sanity if hw mmu ctx is really shared at this point */
  947. && (vha->mmu_ctxs[cmd->session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id] > 1)
  948. ) {
  949. #else
  950. ) {
  951. dev_warn(vha->dev, "%s: LL=2 not supported!\n", __func__);
  952. #endif
  953. /* skip low latency mode */
  954. return vha->pendcmd[VHA_CNN_CMD].cmd != NULL;
  955. }
  956. return vha->pendcmd[VHA_CNN_CMD].cmd != NULL &&
  957. vha->queuedcmd[VHA_CNN_CMD].cmd != NULL;
  958. }
  959. return vha->pendcmd[VHA_CNN_CMD].cmd != NULL;
  960. }
  961. /* check all input buffers are filled and ready to go */
  962. bool vha_is_waiting_for_inputs(struct vha_session *session,
  963. struct vha_cmd *cmd)
  964. {
  965. if (!cmd->inbufs_ready) {
  966. const struct vha_user_cnn_submit_cmd *user_cmd =
  967. (struct vha_user_cnn_submit_cmd *)&cmd->user_cmd;
  968. int i;
  969. for (i = 0; i < cmd->user_cmd.num_inbufs - 1; i++) {
  970. struct vha_buffer *buf = vha_find_bufid(session, user_cmd->bufs[i]);
  971. if (buf && buf->status == VHA_BUF_UNFILLED) {
  972. dev_dbg(session->vha->dev,
  973. "%s: cmd %u waiting for input "
  974. "buf %d to be ready\n",
  975. __func__,
  976. cmd->user_cmd.cmd_id,
  977. buf->id);
  978. return true;
  979. }
  980. }
  981. }
  982. cmd->inbufs_ready = true;
  983. return false;
  984. }
  985. static bool vha_can_schedule(struct vha_dev *vha)
  986. {
  987. #ifndef CONFIG_VHA_DUMMY
  988. if (!vha->is_ready)
  989. return false;
  990. #endif
  991. if (vha->low_latency != VHA_LL_DISABLED) {
  992. return vha->pendcmd[VHA_CNN_CMD].cmd == NULL ||
  993. vha->queuedcmd[VHA_CNN_CMD].cmd == NULL;
  994. }
  995. return vha->pendcmd[VHA_CNN_CMD].cmd == NULL;
  996. }
  997. static void vha_scheduler_set_starting_session(struct vha_dev *vha,
  998. uint8_t priority, struct vha_session *session, bool set_next)
  999. {
  1000. /* Rotate scheduling list to the current session
  1001. * to make it a starting point for the next scheduling round. */
  1002. if (session != list_entry(&vha->sched_sessions[priority],
  1003. struct vha_session, sched_list[priority]))
  1004. while(list_first_entry(&vha->sched_sessions[priority],
  1005. struct vha_session, sched_list[priority]) != session)
  1006. list_rotate_left(&vha->sched_sessions[priority]);
  1007. /* Set a starting point session for the next scheduling round
  1008. * to next to the current one if requested. */
  1009. if (set_next)
  1010. list_rotate_left(&vha->sched_sessions[priority]);
  1011. }
  1012. static uint8_t vha_scheduler_get_priority(struct vha_dev *vha)
  1013. {
  1014. uint8_t pri;
  1015. /* Calculate current total window width. */
  1016. for (pri = VHA_MAX_PRIORITIES - 1; (int8_t)pri >= 0; pri--)
  1017. if (vha->pri_q_counters[pri] > 0)
  1018. return pri;
  1019. /* If there's no priority with WLs to schedule, just return 0. */
  1020. return VHA_INVALID_PRI;
  1021. }
  1022. void vha_scheduler_loop(struct vha_dev *vha)
  1023. {
  1024. struct vha_cmd *cmd, *tmp;
  1025. struct vha_session *session = NULL;
  1026. enum do_cmd_status cmd_status = CMD_OK;
  1027. bool scheduled = false;
  1028. uint8_t current_pri = VHA_DEFAULT_PRI;
  1029. if (vha_is_queue_full(vha, NULL)) {
  1030. /* Postpone worker task if command queue is full. */
  1031. dev_dbg(vha->dev, "%s Queue full. Postpone worker task!\n", __func__);
  1032. return;
  1033. }
  1034. do {
  1035. scheduled = false;
  1036. current_pri = vha_scheduler_get_priority(vha);
  1037. if (current_pri == VHA_INVALID_PRI)
  1038. break;
  1039. list_for_each_entry(session, &vha->sched_sessions[current_pri], sched_list[current_pri]) {
  1040. list_for_each_entry_safe(cmd, tmp, &session->cmds[current_pri], list[current_pri]) {
  1041. /* For hw commands... */
  1042. if (CMD_EXEC_ON_HW(cmd)) {
  1043. if (!VHA_IS_DUMMY(vha)) {
  1044. /* Start device. */
  1045. if(vha_dev_start(vha))
  1046. return;
  1047. }
  1048. }
  1049. /* Skip this workload as it's already scheduled. */
  1050. if (cmd->queued || cmd->in_hw)
  1051. continue;
  1052. dev_dbg(vha->dev, "%s cur_prio=<%d>\n", __func__,current_pri);
  1053. /* Attempt to schedule command for execution. */
  1054. cmd_status = vha_do_cmd(cmd);
  1055. /* Update scheduling loop based on command scheduling status. */
  1056. if ((cmd_status == CMD_OK) || (cmd_status == CMD_HW_BUSY)) {
  1057. bool set_next = false;
  1058. if (cmd_status == CMD_OK) {
  1059. scheduled = true;
  1060. if (cmd->subseg_current == VHA_CMD_SUBSEG_NUM(cmd))
  1061. set_next = true;
  1062. }
  1063. vha_scheduler_set_starting_session(vha, current_pri, session, set_next);
  1064. goto exit_session_loop;
  1065. }
  1066. }
  1067. }
  1068. exit_session_loop:;
  1069. /* Iterate until a workload was scheduled and no other can be scheduled. */
  1070. } while (vha_can_schedule(vha) && scheduled);
  1071. if (!VHA_IS_DUMMY(vha)) {
  1072. /* Schedule APM if needed */
  1073. if (!vha_is_busy(vha) &&
  1074. !vha->no_clock_disable) {
  1075. if (!vha->pm_delay) {
  1076. if (vha_dev_stop(vha, false)) {
  1077. dev_warn(vha->dev, "%s: Failed to soft stop device. trying with reset",
  1078. __func__);
  1079. if (vha_dev_stop(vha, true))
  1080. dev_err(vha->dev, "%s: Failed to stop device with reset!", __func__);
  1081. }
  1082. }
  1083. else {
  1084. vha->apm_dworks[0].delay_ms = vha->pm_delay;
  1085. vha_sched_apm(vha, &vha->apm_dworks[0]);
  1086. }
  1087. }
  1088. }
  1089. }
  1090. void vha_dev_apm_stop(struct vha_dev *vha, struct vha_apm_work *apm_work)
  1091. {
  1092. if (!vha->do_calibration &&
  1093. (vha->pendcmd[VHA_CNN_CMD].cmd == NULL &&
  1094. vha->queuedcmd[VHA_CNN_CMD].cmd == NULL))
  1095. if (vha_dev_stop(vha, false)) {
  1096. dev_warn(vha->dev, "%s: Failed to soft stop device. trying with reset",
  1097. __func__);
  1098. if (vha_dev_stop(vha, true))
  1099. dev_err(vha->dev, "%s: Failed to stop device with reset!", __func__);
  1100. }
  1101. }
  1102. int vha_dev_get_props(struct vha_dev *vha, uint32_t onchipmem_size)
  1103. {
  1104. struct vha_hw_props *props = &vha->hw_props;
  1105. uint64_t ip_config;
  1106. uint32_t ocm_size_kb = 0;
  1107. memset(props, 0, sizeof(*props));
  1108. #ifdef CONFIG_VHA_DUMMY
  1109. /* Note: dummy dev always reads zeroes from registers */
  1110. props->product_id = 0x8070605040302010ULL;
  1111. props->core_id = (long)HW_SERIES << (int)VHA_CR_CORE_ID_BRANCH_ID_SHIFT;
  1112. props->core_id += 0x010203040505ULL; // provide a dummy core id
  1113. props->dummy_dev = true;
  1114. props->num_cnn_core_devs = 1;
  1115. #else
  1116. props->product_id = IOREAD64(vha->reg_base, VHA_CR_PRODUCT_ID);
  1117. props->core_id = IOREAD64(vha->reg_base, VHA_CR_CORE_ID);
  1118. #endif
  1119. props->skip_bvnc_check = false;
  1120. /*
  1121. * New mmu version 3 and onwards operates on 40bit physical & virtual addresses
  1122. */
  1123. props->mmu_width = 40;
  1124. /* HW from 1.1 onwards */
  1125. ip_config = IOREAD64(vha->reg_base, VHA_CR_CORE_IP_CONFIG);
  1126. #ifdef HW_AX3
  1127. props->mmu_ver = VHA_CR_GETBITS(CORE_IP_CONFIG, MMU_VERSION, ip_config);
  1128. #endif
  1129. /* Mirage uses MMU version 3 hardware */
  1130. if (!props->mmu_ver)
  1131. props->mmu_ver = 3;
  1132. if (VHA_CR_GETBITS(CORE_IP_CONFIG, CNN_SUPPORTED, ip_config))
  1133. props->num_cnn_core_devs = 1;
  1134. if (VHA_CR_GETBITS(CORE_IP_CONFIG, RTM_SUPPORTED, ip_config))
  1135. props->supported.rtm = 1;
  1136. #ifdef HW_AX3
  1137. if (VHA_CR_GETBITS(CORE_IP_CONFIG, PARITY_REGISTERS, ip_config))
  1138. props->supported.parity = 1;
  1139. #if defined(CONFIG_VHA_DUMMY) && defined(VHA_SCF)
  1140. /* Force parity for pdump generation */
  1141. props->supported.parity = 1;
  1142. #endif
  1143. #endif
  1144. if ((props->num_cnn_core_devs == 0)
  1145. || VHA_CR_GETBITS(CORE_ID, BRANCH_ID, props->core_id) != HW_SERIES) {
  1146. dev_err(vha->dev, "%s: Wrong core configuration detected. "
  1147. "Expected BVNC %d.x.x.x, got %llu.x.x.x. "
  1148. "Maybe kernel module was built with wrong params.\n",
  1149. __func__, HW_SERIES,
  1150. VHA_CR_GETBITS(CORE_ID, BRANCH_ID, props->core_id));
  1151. return -ENODEV;
  1152. }
  1153. props->soc_axi = IOREAD64(vha->reg_base, VHA_CR_SOC_AXI);
  1154. dev_info(vha->dev, "%s: Product id: %#llx\n",
  1155. __func__, props->product_id);
  1156. dev_info(vha->dev, "%s: Core id: %#llx\n",
  1157. __func__, props->core_id);
  1158. dev_info(vha->dev, "%s: MMU version:%d (%dbit)\n",
  1159. __func__, props->mmu_ver, props->mmu_width);
  1160. dev_dbg(vha->dev, "%s: supported: %#x\n",
  1161. __func__, props->features);
  1162. dev_dbg(vha->dev, "%s: soc_axi: %#llx\n",
  1163. __func__, props->soc_axi);
  1164. {
  1165. uint64_t tmp = IOREAD64(vha->reg_base,
  1166. VHA_CR_CORE_IP_INTEGRATOR_ID);
  1167. dev_dbg(vha->dev, "%s: ip integrator id: %#llx\n",
  1168. __func__, tmp);
  1169. tmp = IOREAD64(vha->reg_base, VHA_CR_CORE_IP_CHANGELIST);
  1170. dev_dbg(vha->dev, "%s: ip change list: %llu\n", __func__, tmp);
  1171. }
  1172. #if defined(CFG_SYS_VAGUS)
  1173. ocm_size_kb = IOREAD64(vha->reg_base, NN_SYS_CR(CORE_IP_CONFIG)) &
  1174. ~NN_SYS_CR_CORE_IP_CONFIG_NN_SYS_OCM_RAM_SIZE_4KB_CLRMSK;
  1175. ocm_size_kb *= 4;
  1176. #endif
  1177. if (ocm_size_kb) {
  1178. vha->hw_props.locm_size_bytes = ocm_size_kb * 1024;
  1179. /* User may wanted to limit OCM ... */
  1180. if (onchipmem_size) {
  1181. if (onchipmem_size < vha->hw_props.locm_size_bytes) {
  1182. dev_warn(vha->dev, "%s:Limiting onchip memory to %u bytes (available:%u)\n",
  1183. __func__, onchipmem_size, vha->hw_props.locm_size_bytes);
  1184. vha->hw_props.locm_size_bytes = onchipmem_size;
  1185. } else if (onchipmem_size > vha->hw_props.locm_size_bytes) {
  1186. dev_err(vha->dev, "%s: User defined onchip memory size exceeded (%u > %u))\n",
  1187. __func__, onchipmem_size, vha->hw_props.locm_size_bytes);
  1188. }
  1189. }
  1190. } else {
  1191. vha->hw_props.locm_size_bytes = onchipmem_size;
  1192. }
  1193. dev_info(vha->dev, "%s: Total onchip memory: %u [kB]\n",
  1194. __func__, vha->hw_props.locm_size_bytes / 1024);
  1195. dev_info(vha->dev, "%s: Devices: DUMMY:%u CNN:%u\n", __func__,
  1196. props->dummy_dev ? props->num_cnn_core_devs : 0,
  1197. props->dummy_dev ? 0 : props->num_cnn_core_devs);
  1198. return 0;
  1199. }
  1200. void vha_dev_ocm_configure(struct vha_dev *vha)
  1201. {
  1202. #if defined(CFG_SYS_VAGUS)
  1203. dev_dbg(vha->dev, "%s: OCM address range: %#lx - %#lx\n",
  1204. __func__, vha->ocm_paddr,
  1205. vha->ocm_paddr + vha->hw_props.locm_size_bytes - 1);
  1206. IOWRITE64(vha->reg_base, NN_SYS_CR(NOC_LOWER_ADDR1), vha->ocm_paddr);
  1207. IOWRITE64(vha->reg_base, NN_SYS_CR(NOC_UPPER_ADDR1),
  1208. vha->ocm_paddr + vha->hw_props.locm_size_bytes - 1);
  1209. img_pdump_printf("-- Setup NN_SYS OCM phys address range\n"
  1210. "WRW "_PMEM_":$0 :OCM:BLOCK_CACHE:0x0\n"
  1211. "WRW64 :REG_NNSYS:%#x "_PMEM_":$0\n"
  1212. "WRW "_PMEM_":$0 :OCM:BLOCK_CACHE:%#x\n"
  1213. "WRW64 :REG_NNSYS:%#x "_PMEM_":$0\n",
  1214. NN_SYS_CR_NOC_LOWER_ADDR1, vha->hw_props.locm_size_bytes-1,
  1215. NN_SYS_CR_NOC_UPPER_ADDR1);
  1216. #endif
  1217. }
  1218. /* prepare CRC and DEBUG data buffers */
  1219. void vha_dbg_prepare_hwbufs(struct vha_session *session, struct vha_cmd *cmd,
  1220. struct vha_crc_config_regs *regs)
  1221. {
  1222. struct vha_dev *vha = session->vha;
  1223. (void)cmd;
  1224. if (session->cnn_dbg.cnn_crc_buf[0]) {
  1225. struct vha_buffer *buf = session->cnn_dbg.cnn_crc_buf[0];
  1226. uint64_t val64;
  1227. /* enable CRC: address + mode */
  1228. val64 = VHA_CR_SETBITS_OS(CNN_CRC_CONTROL, CNN_CRC_ENABLE,
  1229. session->cnn_dbg.cnn_crc_mode);
  1230. img_pdump_printf("-- CRC_CONTROL=%u buf 'CRC' size=%zx\n",
  1231. session->cnn_dbg.cnn_crc_mode, buf->size);
  1232. IOWRITE_PDUMP_BUFADDR(session, buf, 0, VHA_CR_OS(CNN_CRC_ADDRESS));
  1233. IOWRITE64_PDUMP(val64, VHA_CR_OS(CNN_CRC_CONTROL));
  1234. #ifdef HW_AX3
  1235. img_pdump_printf("-- CRC_MASK=%#x\n", session->cnn_dbg.cnn_crc_mask);
  1236. IOWRITE64_PDUMP(session->cnn_dbg.cnn_crc_mask, VHA_CR_OS(CNN_CRC_MASK_CTRL));
  1237. #endif
  1238. }
  1239. if (session->cnn_dbg.cnn_dbg_buf[0] && session->cnn_dbg.cnn_dbg_pdump_enable) {
  1240. struct vha_buffer *buf = session->cnn_dbg.cnn_dbg_buf[0];
  1241. uint64_t val64;
  1242. /* enable DEBUG: address, perf mode, band mode */
  1243. img_pdump_printf("-- DEBUG_CONTROL=%u,%u buf 'DBG' size=%zx\n",
  1244. GET_CNN_DBG_MODE(PERF, session), GET_CNN_DBG_MODE(BAND, session),
  1245. buf->size);
  1246. IOWRITE_PDUMP_BUFADDR(session, buf, 0,
  1247. VHA_CR_OS(CNN_DEBUG_ADDRESS));
  1248. val64 = VHA_CR_ALIGN_SETBITS_OS(CNN_DEBUG_SIZE,
  1249. CNN_DEBUG_SIZE,
  1250. buf->size);
  1251. IOWRITE64_PDUMP(val64, VHA_CR_OS(CNN_DEBUG_SIZE));
  1252. /* Set the CONTROL register only if requested */
  1253. if (CNN_DBG_MODE_ON(PERF, session) || CNN_DBG_MODE_ON(BAND, session)) {
  1254. val64 = VHA_CR_SETBITS_OS(CNN_DEBUG_CONTROL, CNN_PERF_ENABLE,
  1255. GET_CNN_DBG_MODE(PERF, session));
  1256. val64 |= VHA_CR_SETBITS_OS(CNN_DEBUG_CONTROL, CNN_BAND_ENABLE,
  1257. GET_CNN_DBG_MODE(BAND, session));
  1258. IOWRITE64_PDUMP(val64, VHA_CR_OS(CNN_DEBUG_CONTROL));
  1259. }
  1260. }
  1261. }
  1262. /* flush CRC and DEBUG data buffers */
  1263. void vha_dbg_flush_hwbufs(struct vha_session *session, char checkpoint, uint8_t mask)
  1264. {
  1265. struct vha_dev* vha = session->vha;
  1266. (void)mask;
  1267. if (session->cnn_dbg.cnn_dbg_flush != checkpoint)
  1268. return;
  1269. if (session->cnn_dbg.cnn_crc_buf[0]) {
  1270. struct vha_buffer *buf = session->cnn_dbg.cnn_crc_buf[0];
  1271. /*
  1272. * TOBEDONE: calculate CRC buffer size based
  1273. * on num passes, num layers, etc
  1274. */
  1275. img_pdump_printf("-- Save signatures\n");
  1276. img_pdump_printf("IF CHECK_CRCS\n");
  1277. img_pdump_printf("COM Checking CRCs ...\n");
  1278. vha_pdump_sab_buf(session, PDUMP_CRC,
  1279. buf, 0, buf->size);
  1280. img_pdump_printf("ELSE CHECK_CRCS\n");
  1281. img_pdump_printf("COM Not checking CRCs!\n");
  1282. img_pdump_printf("FI CHECK_CRCS\n");
  1283. }
  1284. if (session->cnn_dbg.cnn_dbg_buf[0] && session->cnn_dbg.cnn_dbg_pdump_enable) {
  1285. struct vha_buffer *buf = session->cnn_dbg.cnn_dbg_buf[0];
  1286. /* read the size of the DEBUG buffer */
  1287. uint64_t size = IOREAD64(vha->reg_base, VHA_CR_OS(CNN_DEBUG_STATUS));
  1288. /*
  1289. * SAB the DBG buffer, even though "it is not deterministic"
  1290. */
  1291. size = VHA_CR_GETBITS_OS(CNN_DEBUG_STATUS,
  1292. CNN_DEBUG_OFFSET,
  1293. size);
  1294. img_pdump_printf("-- Save DEBUG info\n");
  1295. vha_pdump_sab_buf(session, PDUMP_DBG, buf, 0, buf->size);
  1296. }
  1297. }
  1298. /* stop capturing CRC and DEBUG data */
  1299. void vha_dbg_stop_hwbufs(struct vha_session *session, uint8_t mask)
  1300. {
  1301. struct vha_dev *vha = session->vha;
  1302. (void)mask;
  1303. /* Flush hw debug buffers */
  1304. vha_dbg_flush_hwbufs(session, 0, 0);
  1305. if (session->cnn_dbg.cnn_crc_buf[0]) {
  1306. IOWRITE64_PDUMP(0, VHA_CR_OS(CNN_CRC_CONTROL));
  1307. }
  1308. if (session->cnn_dbg.cnn_dbg_buf[0]) {
  1309. /* read the size of the DEBUG buffer */
  1310. uint64_t size = IOREAD64(vha->reg_base, VHA_CR_OS(CNN_DEBUG_STATUS));
  1311. if (CNN_DBG_MODE_ON(PERF, session) || CNN_DBG_MODE_ON(BAND, session)) {
  1312. IOWRITE64_PDUMP(0, VHA_CR_OS(CNN_DEBUG_CONTROL));
  1313. /* just give a hint in the pdump:
  1314. * dummy device returns 0 */
  1315. img_pdump_printf(
  1316. "-- POL64 :REG:%#x 0 0 0 1 1 -- DEBUG_STATUS=%llx\n",
  1317. VHA_CR_OS(CNN_DEBUG_STATUS),
  1318. size);
  1319. }
  1320. }
  1321. }
  1322. uint64_t vha_dbg_rtm_read(struct vha_dev *vha, uint64_t addr)
  1323. {
  1324. /* Turn on all clocks forcefully */
  1325. IOWRITE64(vha->reg_base, VHA_CR_SYS_CLK_CTRL0, VHA_SYS_CLOCKS_DEFAULT(ON));
  1326. IOWRITE64(vha->reg_base, VHA_CR_CLK_CTRL0, VHA_MAIN_CLOCKS_DEFAULT(ON));
  1327. /* Set up address of the signal */
  1328. IOWRITE64(vha->reg_base, VHA_CR_RTM_CTRL, addr | VHA_CR_RTM_CTRL_RTM_ENABLE_EN);
  1329. /* but N_OF_RTM_STAGES is not accessible by SW*/
  1330. /* so waiting 1 ms for now */
  1331. msleep(1);
  1332. /* Read the data */
  1333. return IOREAD64(vha->reg_base, VHA_CR_RTM_DATA);
  1334. }
  1335. /* List of predefined registers to be shown in debugfs */
  1336. const struct vha_reg vha_regs[] = {
  1337. #define REG_DESC(reg) VHA_CR_##reg, VHA_CR_##reg##_MASKFULL
  1338. #define REG_DESC_OS(reg) VHA_CR_OS(reg), VHA_CR_OS(reg##_MASKFULL)
  1339. {"main_clocks_control ", REG_DESC(CLK_CTRL0)},
  1340. {"main_clocks_status ", REG_DESC(CLK_STATUS0)},
  1341. {"sys_clocks_control ", REG_DESC(SYS_CLK_CTRL0)},
  1342. {"sys_clocks_status ", REG_DESC(SYS_CLK_STATUS0)},
  1343. {"product_id ", REG_DESC(PRODUCT_ID)},
  1344. {"core_id ", REG_DESC(CORE_ID)},
  1345. {"soc_axi ", REG_DESC(SOC_AXI)},
  1346. {"integrator_id ", REG_DESC(CORE_IP_INTEGRATOR_ID)},
  1347. {"ip_changelist ", REG_DESC(CORE_IP_CHANGELIST)},
  1348. {"core_ip_config ", REG_DESC(CORE_IP_CONFIG)},
  1349. {"reset ", REG_DESC(RESET_CTRL)},
  1350. {"event_enable ", REG_DESC_OS(VHA_EVENT_ENABLE)},
  1351. {"event_status ", REG_DESC_OS(VHA_EVENT_STATUS)},
  1352. {"cnn_control ", REG_DESC_OS(CNN_CONTROL)},
  1353. {"cnn_status ", REG_DESC_OS(CNN_STATUS)},
  1354. #ifdef HW_AX2
  1355. {"cnn_wdt_cmpmatch ", REG_DESC(CNN_WDT_COMPAREMATCH)},
  1356. {"cnn_wdt_control ", REG_DESC(CNN_WDT_CTRL)},
  1357. {"cnn_wdt_timer ", REG_DESC(CNN_WDT_TIMER)},
  1358. #endif
  1359. {"cnn_mem_wdt_cmpmatch ", REG_DESC(CNN_MEM_WDT_COMPAREMATCH)},
  1360. {"cnn_mem_wdt_control ", REG_DESC(CNN_MEM_WDT_CTRL)},
  1361. {"cnn_mem_wdt_timer ", REG_DESC(CNN_MEM_WDT_TIMER)},
  1362. {"mmu_control ", REG_DESC_OS(MMU_CTRL)},
  1363. {"mmu_context ", REG_DESC_OS(MMU_CBASE_MAPPING_CONTEXT)},
  1364. {"mmu_mapping ", REG_DESC_OS(MMU_CBASE_MAPPING)},
  1365. {"mmu_status ", REG_DESC(MMU_STATUS)},
  1366. {"mmu_fault_status1 ", REG_DESC_OS(MMU_FAULT_STATUS1)},
  1367. {"mmu_fault_status2 ", REG_DESC_OS(MMU_FAULT_STATUS2)},
  1368. {"slc_control ", REG_DESC(SLC_CTRL)},
  1369. #if 0
  1370. {"slc_bypass_control ", REG_DESC(SLC_BYPASS_CTRL)},
  1371. #endif
  1372. {"slc_status1 ", REG_DESC(SLC_STATUS1)},
  1373. {"slc_status2 ", REG_DESC(SLC_STATUS2)},
  1374. {"slc_status3 ", REG_DESC(SLC_STATUS3)},
  1375. {"slc_idle ", REG_DESC(SLC_IDLE)},
  1376. {"bif_outstanding_read ", REG_DESC(BIF_OUTSTANDING_READ)},
  1377. #undef REG_DESC
  1378. #undef REG_DESC_OS
  1379. {NULL , 0},
  1380. };