vha_wm.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896
  1. /*
  2. *****************************************************************************
  3. * Copyright (c) Imagination Technologies Ltd.
  4. *
  5. * The contents of this file are subject to the MIT license as set out below.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. *
  25. * Alternatively, the contents of this file may be used under the terms of the
  26. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  27. * GPL are applicable instead of those above.
  28. *
  29. * If you wish to allow use of your version of this file only under the terms
  30. * of GPL, and not to allow others to use your version of this file under the
  31. * terms of the MIT license, indicate your decision by deleting the provisions
  32. * above and replace them with the notice and other provisions required by GPL
  33. * as set out in the file called "GPLHEADER" included in this distribution. If
  34. * you do not delete the provisions above, a recipient may use your version of
  35. * this file under the terms of either the MIT license or GPL.
  36. *
  37. * This License is also included in this distribution in the file called
  38. * "MIT_COPYING".
  39. *
  40. *****************************************************************************/
  41. #include <linux/slab.h>
  42. #include <linux/device.h>
  43. #include <linux/gfp.h>
  44. #include <linux/moduleparam.h>
  45. #include <linux/delay.h>
  46. #include <uapi/vha.h>
  47. #include <uapi/vha_errors.h>
  48. #include "vha_common.h"
  49. #include "vha_plat.h"
  50. #include "vha_regs.h"
  51. static uint32_t cnn_pdump_poll_count = 10000000;
  52. module_param(cnn_pdump_poll_count, uint, 0444);
  53. MODULE_PARM_DESC(cnn_pdump_poll_count,
  54. "PDUMP: Number of times to poll for CNN status");
  55. static uint32_t wm_pdump_poll_count = 100;
  56. module_param(wm_pdump_poll_count, uint, 0444);
  57. MODULE_PARM_DESC(wm_pdump_poll_count,
  58. "PDUMP: Number of times to poll for WM status");
  59. static bool cnn_preloads_disable;
  60. module_param(cnn_preloads_disable, bool, 0444);
  61. MODULE_PARM_DESC(cnn_preloads_disable,
  62. "Disables CNN preloads");
  63. static uint32_t cnn_hl_wdt_cycles = VHA_CORE_WDT_CYCLES;
  64. module_param(cnn_hl_wdt_cycles, uint, 0444);
  65. MODULE_PARM_DESC(cnn_hl_wdt_cycles,
  66. "High level core watchdog cycles");
  67. static uint32_t cnn_hl_wdt_mode = VHA_CR_CNN_WDT_CTRL_CNN_WDT_CTRL_KICK_PASS;
  68. module_param(cnn_hl_wdt_mode, uint, 0444);
  69. MODULE_PARM_DESC(cnn_hl_wdt_mode,
  70. "High level core watchdog mode: 1-pass; 2-layer group. See TRM");
  71. static uint32_t cnn_mem_wdt_cycles = VHA_CORE_MEM_WDT_CYCLES;
  72. module_param(cnn_mem_wdt_cycles, uint, 0444);
  73. MODULE_PARM_DESC(cnn_mem_wdt_cycles,
  74. "Core memory watchdog cycles");
  75. static uint32_t cnn_mem_wdt_mode = VHA_CR_CNN_MEM_WDT_CTRL_CNN_MEM_WDT_CTRL_KICK_PASS;
  76. module_param(cnn_mem_wdt_mode, uint, 0444);
  77. MODULE_PARM_DESC(cnn_mem_wdt_mode,
  78. "Core memory watchdog mode: 0-disabled; "
  79. "1-CMD Parser starts a pass or CMD parser is kicked; "
  80. "2-CMD parser is kicked. See TRM");
  81. static bool use_estimated_cycles_for_wm_wdt = false;
  82. module_param(use_estimated_cycles_for_wm_wdt, bool, 0444);
  83. MODULE_PARM_DESC(use_estimated_cycles_for_wm_wdt,
  84. "WM workload watchdog cycles source: "
  85. "false-the value from the wm_wl_wdt_cycles parameter will be used; "
  86. "true-the value from the MBS SEGMENT_ESTIMATED_CYCLES filed will be used");
  87. static uint32_t wm_wl_wdt_estimated_cycles_margin = 0;
  88. module_param(wm_wl_wdt_estimated_cycles_margin, uint, 0444);
  89. MODULE_PARM_DESC(wm_wl_wdt_estimated_cycles_margin,
  90. "WM workload watchdog cycles margin added to the SEGMENT_ESTIMATED_CYCLES"
  91. " value, used only if use_estimated_cycles_for_wm_wdt==true");
  92. static uint32_t wm_wl_wdt_cycles = VHA_WM_WDT_CYCLES;
  93. module_param(wm_wl_wdt_cycles, uint, 0444);
  94. MODULE_PARM_DESC(wm_wl_wdt_cycles,
  95. "WM workload watchdog cycles");
  96. static uint32_t wm_wl_wdt_mode = VHA_CR_WM_WL_WDT_CTRL_WL_WDT_CTRL_KICK_WL;
  97. module_param(wm_wl_wdt_mode, uint, 0444);
  98. MODULE_PARM_DESC(wm_wl_wdt_mode,
  99. "WM workload watchdog mode: 0-disabled; 1-enabled. See TRM");
  100. static uint32_t socm_xor_bits[2] = { 0, 0 };
  101. module_param_array(socm_xor_bits, uint, NULL, 0444);
  102. MODULE_PARM_DESC(socm_xor_bits,
  103. "SOCM Hashing: This parameter reflects SOCM_B7_XOR_BITS & SOCM_B8_XOR_BITS"
  104. "hw registers. If not set the default values are used. See TRM.");
  105. /*
  106. * Internal memory layout:
  107. * .onchipmem_phys_start
  108. * LOCM - <onchipmem_size>
  109. * 4k GUARD PAGE
  110. * WM0 SOCM - <shared_onchipmem_size>
  111. * 4k GUARD PAGE
  112. * WM1 SOCM
  113. * 4k GUARD PAGE
  114. * ...
  115. * WMn SOCM
  116. * 4k GUARD PAGE
  117. * WM0 LL SYNC buffer- 4k PAGE
  118. * 4k GUARD PAGE
  119. * WM1 LL SYNC buffer- 4k PAGE
  120. * 4k GUARD PAGE
  121. * ...
  122. * WMn LL SYNC buffer- 4k PAGE
  123. * 4k GUARD PAGE
  124. */
  125. #define LLSYNC_SIZE 0x1000
  126. struct vha_config_regs {
  127. uint64_t core_assignment;
  128. uint64_t cnn_control[VHA_MAX_CORES];
  129. uint64_t cmd_base_addr[VHA_MAX_CORES];
  130. uint64_t cnn_alt_addr[VHA_CORE_MAX_ALT_ADDRS];
  131. uint64_t locm_base_addr;
  132. uint64_t socm_circ_buff_size;
  133. uint64_t socm_base_addr;
  134. uint64_t socm_buf_assignment;
  135. uint64_t socm_b7_xor_bits;
  136. uint64_t socm_b8_xor_bits;
  137. uint64_t low_level_sync_base_addr;
  138. uint64_t cnn_alt_addr_used;
  139. uint64_t cnn_vcore_mapping;
  140. };
  141. /* Note:
  142. * The SOCM_BUF_<X>_WM_MAPPING and the CORE_<X>_WM_MAPPING registers muse be configured to be the same
  143. * thus we use the core_mask for a given WM. */
  144. static uint64_t wm_assign_socm(struct vha_dev *vha, uint64_t socm_buf_addr,
  145. uint8_t wm_id, uint8_t core_mask, uint32_t circ_buf_offs, struct vha_config_regs* regs)
  146. {
  147. uint64_t socm_buf_assignment = IOREAD64_CR_REGIO(SOCM_BUF_ASSIGNMENT);
  148. uint32_t assignment_field_shift =
  149. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_SHIFT -
  150. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_SHIFT;
  151. uint64_t assignment_field_mask =
  152. ~VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_CLRMSK;
  153. uint64_t base_addr = socm_buf_addr;
  154. uint32_t socm_chunk_size = vha->hw_props.socm_core_size_bytes *
  155. VHA_CORE_MASK_TO_NUM(vha_wm_get_cores(vha, wm_id));
  156. /* Use different address for each WM to make debugging easier */
  157. base_addr += wm_id * (vha->hw_props.socm_size_bytes + IMG_MEM_VA_GUARD_GAP);
  158. /* Virtual base address must be 256 byte aligned */
  159. base_addr = ALIGN(base_addr, 256);
  160. /* Chunk size used to calculate the offset must be 128 byte aligned */
  161. socm_chunk_size = ALIGN(socm_chunk_size, 128);
  162. /* circ_buf_offs = 0 means that the circular buffer is disabled */
  163. if (circ_buf_offs && socm_chunk_size && circ_buf_offs <= socm_chunk_size) {
  164. regs->socm_circ_buff_size = socm_chunk_size - circ_buf_offs;
  165. } else {
  166. regs->socm_circ_buff_size = 0;
  167. }
  168. regs->socm_base_addr = base_addr;
  169. dev_dbg(vha->dev, "%s: set SOCM WM%u address -> %#llx\n",
  170. __func__, wm_id, base_addr);
  171. while (core_mask != 0) {
  172. uint32_t curr_core_id = VHA_CORE_MASK_TO_ID(core_mask);
  173. core_mask &= ~(VHA_CORE_ID_TO_MASK(curr_core_id));
  174. socm_buf_assignment &=
  175. ~(assignment_field_mask << (curr_core_id * assignment_field_shift));
  176. socm_buf_assignment |= wm_id << (curr_core_id * assignment_field_shift);
  177. }
  178. regs->socm_buf_assignment = socm_buf_assignment;
  179. dev_dbg(vha->dev, "%s: assigned SOCM bufs for WM%u: 0x%llx\n",
  180. __func__, wm_id, socm_buf_assignment);
  181. if (socm_xor_bits[0]) {
  182. regs->socm_b7_xor_bits = socm_xor_bits[0];
  183. }
  184. if (socm_xor_bits[1]) {
  185. regs->socm_b8_xor_bits = socm_xor_bits[1];
  186. }
  187. return base_addr - socm_buf_addr;
  188. }
  189. static bool vha_wm_setup_config_regs_multi(struct vha_cmd *cmd, struct vha_config_regs* regs)
  190. {
  191. int i;
  192. bool ret = false;
  193. const struct vha_user_cnn_submit_multi_cmd *user_submit_cmd =
  194. (struct vha_user_cnn_submit_multi_cmd *)&cmd->user_cmd;
  195. struct vha_hw_sched_info *sched_info = &cmd->hw_sched_info;
  196. struct vha_session *session = cmd->session;
  197. struct vha_dev *vha = session->vha;
  198. uint32_t val32 = 0;
  199. struct vha_buffer *buf = NULL;
  200. uint64_t *reg = NULL;
  201. uint32_t core_mask;
  202. uint64_t vcore_map = 0;
  203. uint32_t vcore_field_shift =
  204. VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE1_SHIFT -
  205. VHA_CR_OS0_CNN_VCORE_MAPPING_VCORE0_SHIFT;
  206. if (cmd->size != sizeof(*user_submit_cmd)) {
  207. dev_err(vha->dev, "%s: command buffer wrong size: %zu/%zu",
  208. __func__, cmd->size, sizeof(*user_submit_cmd));
  209. goto out_error;
  210. }
  211. if (!vha_dev_check_hw_capab(vha, user_submit_cmd->expected_ip_capab))
  212. goto out_error;
  213. /* At least num cores CMDs and IN */
  214. if (user_submit_cmd->msg.num_inbufs < (user_submit_cmd->num_cores + 1) ||
  215. /* At least OUT */
  216. (user_submit_cmd->msg.num_inbufs - user_submit_cmd->num_cores
  217. >= user_submit_cmd->msg.num_bufs) ||
  218. /* And maybe TMP and others */
  219. user_submit_cmd->msg.num_bufs > VHA_CORE_MAX_ALT_ADDRS) {
  220. dev_err(vha->dev, "%s: wrong number of bufs: %u,%u\n",
  221. __func__,
  222. user_submit_cmd->msg.num_inbufs,
  223. user_submit_cmd->msg.num_bufs);
  224. goto out_error;
  225. }
  226. /* Number of cores. */
  227. if ((user_submit_cmd->num_cores < 1) ||
  228. (user_submit_cmd->num_cores > vha->hw_props.num_cnn_core_devs)) {
  229. dev_err(vha->dev, "%s: wrong number of cores: %u\n",
  230. __func__,
  231. user_submit_cmd->num_cores);
  232. goto out_error;
  233. }
  234. /* Number of cmd streams must match number of cores. */
  235. for (i = 0; i < user_submit_cmd->num_cores; i++)
  236. if (user_submit_cmd->cmdbuf[i] == 0)
  237. break;
  238. if ((i < user_submit_cmd->num_cores) ||
  239. ((user_submit_cmd->num_cores < VHA_MAX_CORES) &&
  240. (user_submit_cmd->cmdbuf[i] != 0))) {
  241. for (; i < VHA_MAX_CORES; i++)
  242. if (user_submit_cmd->cmdbuf[i] == 0)
  243. break;
  244. dev_err(vha->dev, "%s: wrong number of cmd streams: %u,%u\n",
  245. __func__,
  246. i, user_submit_cmd->num_cores);
  247. goto out_error;
  248. }
  249. /* Make WM<->cores binding. */
  250. vha_wm_assign_cores(vha, sched_info->wm_id, sched_info->core_mask, &regs->core_assignment);
  251. dev_dbg(vha->dev, "%s: assigned cores for WM%u: 0x%02x\n",
  252. __func__, sched_info->wm_id, vha_wm_get_cores(vha, sched_info->wm_id));
  253. /* write buffer address to each register,
  254. * and pdump LDB each of the the input buffers */
  255. img_pdump_printf("-- Load inputs\n");
  256. /* First program cmd stream addrs. */
  257. core_mask = sched_info->core_mask;
  258. if (VHA_CORE_MASK_TO_NUM(core_mask) != user_submit_cmd->num_cores) {
  259. dev_err(vha->dev, "%s: invalid core_mask!\n", __func__);
  260. goto out_error;
  261. }
  262. for (i = 0; i < user_submit_cmd->num_cores; i++) {
  263. uint64_t curr_core;
  264. uint32_t curr_core_id = VHA_CORE_MASK_TO_ID(core_mask);
  265. buf = vha_find_bufid(session, user_submit_cmd->cmdbuf[i]);
  266. if (buf == NULL) {
  267. dev_err(vha->dev, "%s: invalid buffer id:%d\n",
  268. __func__, user_submit_cmd->cmdbuf[i]);
  269. goto out_error;
  270. }
  271. if (buf->size == 0) {
  272. dev_err(vha->dev, "%s: invalid cmdstream size\n", __func__);
  273. goto out_error;
  274. }
  275. /* Choose next core from the WM set. */
  276. curr_core = VHA_CORE_ID_TO_MASK(curr_core_id);
  277. core_mask &= ~((uint32_t)curr_core);
  278. val32 = min(2048U, (uint32_t)buf->size)/32 - 1;
  279. val32 = VHA_CR_SETBITS(OS0_CNN_CONTROL, CMD_SIZE_MIN1, val32) |
  280. VHA_CR_SETBITS(OS0_CNN_CONTROL, CTXT_PASID,
  281. session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id) |
  282. VHA_CR_SETBITS(OS0_CNN_CONTROL, CTXT_PASID_IO,
  283. session->mmu_ctxs[VHA_MMU_REQ_IO_CTXID].hw_id);
  284. regs->cnn_control[curr_core_id] = val32;
  285. /* Pdump the cmd stream buffers. */
  286. vha_pdump_ldb_buf(session, PDUMP_PRM,
  287. buf, 0, buf->size,
  288. buf->status == VHA_BUF_FILLED_BY_SW);
  289. /* Write to core's cmd register.
  290. * In no-MMU mode, write phys address of a contig buffer.
  291. * In MMU mode, write virt address of buffer. */
  292. SET_BUFADDR(session, buf, 0, &regs->cmd_base_addr[curr_core_id]);
  293. /* Map this core. */
  294. vcore_map |= curr_core_id << (i * vcore_field_shift);
  295. if (vha_buf_needs_flush(session, buf->id))
  296. img_mem_sync_cpu_to_device(session->mem_ctx, buf->id);
  297. }
  298. /* Command stream buffers are already handled */
  299. for (i = 0; i < (user_submit_cmd->msg.num_bufs - 1); i++) {
  300. uint32_t offset;
  301. uint32_t size;
  302. buf = vha_find_bufid(session, user_submit_cmd->bufs[i]);
  303. if (buf == NULL) {
  304. dev_err(vha->dev, "%s: invalid buffer id:%d\n",
  305. __func__, user_submit_cmd->bufs[i]);
  306. goto out_error;
  307. }
  308. /* offset can be specified for all
  309. * buffers except cmdstream buf */
  310. offset = user_submit_cmd->bufoffsets[i];
  311. size = user_submit_cmd->bufsizes[i];
  312. if (size + offset > buf->size) {
  313. dev_err(vha->dev, "%s: invalid size+offset: %x+%x > %zx\n",
  314. __func__, size, offset, buf->size);
  315. goto out_error;
  316. }
  317. /* Calculate reg address */
  318. reg = &regs->cnn_alt_addr[user_submit_cmd->regidx[i]];
  319. /* Record what alt address is in use */
  320. regs->cnn_alt_addr_used |= 1 << user_submit_cmd->regidx[i];
  321. regs->cnn_alt_addr_used |= buf->req_type <<
  322. (VHA_CR_OS0_CNN_ALT_ADDRESS_USED_ALT_ADDR0_BUF_TYPE_SHIFT +
  323. user_submit_cmd->regidx[i]);
  324. if (user_submit_cmd->onchipram_bufs[VHA_LOCAL_OCM] == buf->id) {
  325. /* Check against overflow */
  326. if (buf->devvirt + vha->hw_props.locm_size_bytes +
  327. IMG_MEM_VA_GUARD_GAP > IMG_MEM_VA_HEAP1_BASE) {
  328. dev_err(vha->dev, "%s: LOCM overflow!\n", __func__);
  329. goto out_error;
  330. }
  331. /* Setup Local OCM */
  332. regs->locm_base_addr = buf->devvirt;
  333. dev_dbg(vha->dev, "%s: set LOCM address -> %#llx\n",
  334. __func__, buf->devvirt);
  335. }
  336. if (user_submit_cmd->onchipram_bufs[VHA_SHARED_OCM] == buf->id) {
  337. /* Check against overflow */
  338. if (buf->devvirt + vha->hw_props.socm_size_bytes +
  339. IMG_MEM_VA_GUARD_GAP > IMG_MEM_VA_HEAP1_BASE) {
  340. dev_err(vha->dev, "%s: SOCM overflow!\n", __func__);
  341. goto out_error;
  342. }
  343. /* Setup Shared OCM */
  344. offset = wm_assign_socm(vha, buf->devvirt,
  345. sched_info->wm_id, sched_info->core_mask,
  346. user_submit_cmd->shared_circ_buf_offs, regs);
  347. /* Check against overflow */
  348. if (regs->socm_base_addr + vha->hw_props.socm_size_bytes +
  349. IMG_MEM_VA_GUARD_GAP > IMG_MEM_VA_HEAP1_BASE) {
  350. dev_err(vha->dev, "%s: SOCM overflow!\n", __func__);
  351. goto out_error;
  352. }
  353. }
  354. /* pdump the input buffers (not filled by the hw),
  355. * try to cache buffers filled by SW,
  356. * to avoid unnecessary LDBs */
  357. if (i < user_submit_cmd->msg.num_inbufs - user_submit_cmd->num_cores &&
  358. !(buf->status == VHA_BUF_FILLED_BY_HW))
  359. vha_pdump_ldb_buf(session, PDUMP_PRM,
  360. buf, offset, size,
  361. buf->status == VHA_BUF_FILLED_BY_SW);
  362. /* Write to the index register.
  363. * In no-MMU mode, write phys address of a contig buffer.
  364. * In MMU mode, write virt address of buffer. */
  365. SET_BUFADDR(session, buf, offset, reg);
  366. if (vha_buf_needs_flush(session, buf->id))
  367. img_mem_sync_cpu_to_device(session->mem_ctx, buf->id);
  368. }
  369. if (vha->ocm_paddr != ~0) {
  370. /* Low level sync buffer address
  371. * It has fixed size = 512 bytes but we operate on 4k pages
  372. * It is placed after SOCM
  373. * including gap page between LOCM&SOCM and after SOCM.
  374. */
  375. uint64_t ll_sync_addr = vha->ocm_paddr +
  376. vha->hw_props.locm_size_bytes + IMG_MEM_VA_GUARD_GAP +
  377. vha->hw_props.num_cnn_core_devs * (vha->hw_props.socm_size_bytes + IMG_MEM_VA_GUARD_GAP);
  378. /* Add offset based on WM id */
  379. ll_sync_addr += sched_info->wm_id * (LLSYNC_SIZE +
  380. IMG_MEM_VA_GUARD_GAP);
  381. /* Check against overflow */
  382. if (ll_sync_addr + LLSYNC_SIZE +
  383. IMG_MEM_VA_GUARD_GAP > IMG_MEM_VA_HEAP1_BASE) {
  384. dev_err(vha->dev, "%s: LLSYNC overflow!\n", __func__);
  385. goto out_error;
  386. }
  387. /* Setup low level sync buffer address */
  388. regs->low_level_sync_base_addr = ll_sync_addr;
  389. dev_dbg(vha->dev, "%s: set LLSYNC address -> %#llx\n",
  390. __func__, ll_sync_addr);
  391. }
  392. ret = true;
  393. /* Program core mappings. */
  394. regs->cnn_vcore_mapping = vcore_map;
  395. out_error:
  396. return ret;
  397. }
  398. static bool vha_wm_write_config_regs(struct vha_cmd *cmd, struct vha_config_regs* regs)
  399. {
  400. struct vha_hw_sched_info *sched_info = &cmd->hw_sched_info;
  401. uint8_t wm_id = sched_info->wm_id;
  402. struct vha_session *session = cmd->session;
  403. struct vha_dev *vha = session->vha;
  404. uint32_t reg_size = VHA_CR_OS0_CNN_ALT_ADDRESS1 - VHA_CR_OS0_CNN_ALT_ADDRESS0;
  405. uint32_t reg_base = VHA_CR_OS0_CNN_ALT_ADDRESS0;
  406. uint32_t reg_idx_offset = 0;
  407. uint32_t core_id = 0;
  408. int i;
  409. img_pdump_printf("-- Assign cores 0x%02x to WM%u\n", sched_info->core_mask, wm_id);
  410. IOWRITE64_CR_PDUMP(regs->core_assignment, CORE_ASSIGNMENT);
  411. for (core_id = 0; core_id < VHA_MAX_CORES; core_id++) {
  412. if (sched_info->core_mask & (1 << core_id)) {
  413. uint64_t curr_core = VHA_CORE_ID_TO_MASK(core_id);
  414. img_pdump_printf("-- Select core: %llu\n", curr_core);
  415. IOWRITE64_CR_PDUMP(curr_core, CORE_CTRL_INDIRECT);
  416. img_pdump_printf("-- Setup command stream for core %u\n", core_id);
  417. IOWRITE64_CR_PDUMP(regs->cnn_control[core_id], OS0_CNN_CONTROL);
  418. IOWRITE64_CR_PDUMP(regs->cmd_base_addr[core_id], OS0_CNN_CMD_BASE_ADDRESS);
  419. }
  420. }
  421. /* Operate only on a core assigned to this WM. */
  422. img_pdump_printf("-- Select only cores assigned to WM: %u\n",
  423. sched_info->core_mask);
  424. IOWRITE64_CR_PDUMP(sched_info->core_mask, CORE_CTRL_INDIRECT);
  425. /* Make WM<->core binding. */
  426. if (regs->socm_base_addr != ~0) {
  427. img_pdump_printf("-- Set SOCM circular buffer size for WM%d\n", wm_id);
  428. IOWRITE64_CR_PDUMP(regs->socm_circ_buff_size, SOCM_CIRCULAR_BUFFER_SIZE);
  429. img_pdump_printf("-- Set SOCM WM%u address\n", wm_id);
  430. IOWRITE64_CR_PDUMP(regs->socm_base_addr, SOCM_BASE_ADDR);
  431. img_pdump_printf("-- Assign SOCM bufs 0x%02x to WM%u\n", sched_info->core_mask, wm_id);
  432. IOWRITE64_CR_PDUMP(regs->socm_buf_assignment, SOCM_BUF_ASSIGNMENT);
  433. if (regs->socm_b7_xor_bits)
  434. IOWRITE64_CR_PDUMP(regs->socm_b7_xor_bits, SOCM_B7_XOR_BITS);
  435. if (regs->socm_b8_xor_bits)
  436. IOWRITE64_CR_PDUMP(regs->socm_b8_xor_bits, SOCM_B8_XOR_BITS);
  437. }
  438. if (regs->locm_base_addr != ~0) {
  439. img_pdump_printf("-- Set LOCM address\n");
  440. IOWRITE64_CR_PDUMP(regs->locm_base_addr, OS0_LOCM_BASE_ADDR);
  441. }
  442. for (i = 0; i < VHA_CORE_MAX_ALT_ADDRS; i++) {
  443. if (i >= 8) {
  444. reg_base = VHA_CR_OS0_CNN_ALT_ADDRESS8;
  445. reg_idx_offset = 8;
  446. }
  447. if (regs->cnn_alt_addr_used & (1 << i)) {
  448. img_pdump_printf("-- Set ALT_%d address\n", i);
  449. IOWRITE64_PDUMP(regs->cnn_alt_addr[i], reg_base + (i - reg_idx_offset) * reg_size);
  450. }
  451. }
  452. if (regs->low_level_sync_base_addr != ~0) {
  453. /* Setup low level sync buffer address */
  454. img_pdump_printf("-- Set LLSYNC address\n");
  455. IOWRITE64_CR_PDUMP(regs->low_level_sync_base_addr, LOW_LEVEL_SYNC_BASE_ADDR);
  456. }
  457. if (!cnn_preloads_disable) {
  458. /* Inform the hw what alt addresses are in use,
  459. * so the command decoder can prefetch */
  460. img_pdump_printf("-- Setup CNN prefetch register\n");
  461. IOWRITE64_CR_PDUMP(regs->cnn_alt_addr_used, OS0_CNN_ALT_ADDRESS_USED);
  462. }
  463. /* Program core mapping. */
  464. img_pdump_printf("-- Program virtual core mappings\n");
  465. IOWRITE64_CR_PDUMP(regs->cnn_vcore_mapping, OS0_CNN_VCORE_MAPPING);
  466. return true;
  467. }
  468. #ifdef VHA_SCF
  469. #ifdef VHA_EVENT_INJECT
  470. #define CHECK_TOP_REG(_val_, _reg_) do { \
  471. uint64_t val64 = IOREAD64_CR_REGIO(_reg_); \
  472. if((vha->injection.conf_err & CONF_ERR_TOP) && __EVENT_INJECT()) \
  473. val64 = ~val64; \
  474. if (val64 != _val_) { \
  475. cmd->conf_top_error = true; \
  476. dev_err(vha->dev, "Confirmation writes mismatch, top register: 0x%x \n \
  477. expected: 0x%016llx actual: 0x%016llx\n", VHA_CR_##_reg_, (uint64_t)_val_, val64); \
  478. goto out_error; \
  479. }} while(0)
  480. #define CHECK_CR_CORE_REG(_val_, _reg_, _core_id_) do { \
  481. uint64_t val64 = IOREAD64_CR_REGIO(_reg_); \
  482. if((vha->injection.conf_err & CONF_ERR_BOTTOM) && __EVENT_INJECT()) \
  483. val64 = ~val64; \
  484. if (val64 != _val_) { \
  485. cmd->conf_core_error |= 1 << _core_id_; \
  486. dev_err(vha->dev, "Confirmation writes mismatch, core register: 0x%x \n \
  487. expected: 0x%016llx actual: 0x%016llx\n", VHA_CR_##_reg_, (uint64_t)_val_, val64); \
  488. }} while(0)
  489. #define CHECK_CORE_REG(_val_, _reg_, _core_id_) do { \
  490. uint64_t val64 = IOREAD64_REGIO(_reg_); \
  491. if((vha->injection.conf_err & CONF_ERR_BOTTOM) && __EVENT_INJECT()) \
  492. val64 = ~val64; \
  493. if (val64 != _val_) { \
  494. cmd->conf_core_error |= 1 << _core_id_; \
  495. dev_err(vha->dev, "Confirmation writes mismatch, core register: 0x%x \n \
  496. expected: 0x%016llx actual: 0x%016llx\n", _reg_, (uint64_t)_val_, val64); \
  497. }} while(0)
  498. #else
  499. #define CHECK_TOP_REG(_val_, _reg_) do { \
  500. uint64_t val64 = IOREAD64_CR_REGIO(_reg_); \
  501. if (val64 != _val_) { \
  502. cmd->conf_top_error = true; \
  503. dev_err(vha->dev, "Confirmation writes mismatch, top register: 0x%x \n \
  504. expected: 0x%016llx actual: 0x%016llx\n", VHA_CR_##_reg_, (uint64_t)_val_, val64); \
  505. goto out_error; \
  506. }} while(0)
  507. #define CHECK_CR_CORE_REG(_val_, _reg_, _core_id_) do { \
  508. uint64_t val64 = IOREAD64_CR_REGIO(_reg_); \
  509. if (val64 != _val_) { \
  510. cmd->conf_core_error |= 1 << _core_id_; \
  511. dev_err(vha->dev, "Confirmation writes mismatch, core register: 0x%x \n \
  512. expected: 0x%016llx actual: 0x%016llx\n", VHA_CR_##_reg_, (uint64_t)_val_, val64); \
  513. }} while(0)
  514. #define CHECK_CORE_REG(_val_, _reg_, _core_id_) do { \
  515. uint64_t val64 = IOREAD64_REGIO(_reg_); \
  516. if (val64 != _val_) { \
  517. cmd->conf_core_error |= 1 << _core_id_; \
  518. dev_err(vha->dev, "Confirmation writes mismatch, core register: 0x%x \n \
  519. expected: 0x%016llx actual: 0x%016llx\n", _reg_, (uint64_t)_val_, val64); \
  520. }} while(0)
  521. #endif
  522. static bool vha_wm_confirm_config_regs(struct vha_cmd *cmd, struct vha_config_regs* regs)
  523. {
  524. struct vha_hw_sched_info *sched_info = &cmd->hw_sched_info;
  525. struct vha_session *session = cmd->session;
  526. struct vha_dev *vha = session->vha;
  527. uint32_t reg_size = VHA_CR_OS0_CNN_ALT_ADDRESS1 - VHA_CR_OS0_CNN_ALT_ADDRESS0;
  528. uint32_t reg_base = VHA_CR_OS0_CNN_ALT_ADDRESS0;
  529. uint32_t reg_idx_offset = 0;
  530. uint32_t core_id = 0;
  531. int i;
  532. CHECK_TOP_REG(regs->core_assignment, CORE_ASSIGNMENT);
  533. for (core_id = 0; core_id < VHA_MAX_CORES; core_id++) {
  534. reg_base = VHA_CR_OS0_CNN_ALT_ADDRESS0;
  535. reg_idx_offset = 0;
  536. if (sched_info->core_mask & (1 << core_id)) {
  537. uint64_t curr_core = VHA_CORE_ID_TO_MASK(core_id);
  538. IOWRITE64_CR_REGIO(curr_core, CORE_CTRL_INDIRECT);
  539. CHECK_CR_CORE_REG(regs->cnn_control[core_id], OS0_CNN_CONTROL, core_id);
  540. CHECK_CR_CORE_REG(regs->cmd_base_addr[core_id], OS0_CNN_CMD_BASE_ADDRESS, core_id);
  541. if (regs->socm_base_addr != ~0) {
  542. CHECK_CR_CORE_REG(regs->socm_circ_buff_size, SOCM_CIRCULAR_BUFFER_SIZE, core_id);
  543. CHECK_CR_CORE_REG(regs->socm_base_addr, SOCM_BASE_ADDR, core_id);
  544. CHECK_CR_CORE_REG(regs->socm_buf_assignment, SOCM_BUF_ASSIGNMENT, core_id);
  545. if (regs->socm_b7_xor_bits)
  546. CHECK_CR_CORE_REG(regs->socm_b7_xor_bits, SOCM_B7_XOR_BITS, core_id);
  547. if (regs->socm_b8_xor_bits)
  548. CHECK_CR_CORE_REG(regs->socm_b8_xor_bits, SOCM_B8_XOR_BITS, core_id);
  549. }
  550. if (regs->locm_base_addr != ~0) {
  551. CHECK_CR_CORE_REG(regs->locm_base_addr, OS0_LOCM_BASE_ADDR, core_id);
  552. }
  553. for (i = 0; i < VHA_CORE_MAX_ALT_ADDRS; i++) {
  554. if (i >= 8) {
  555. reg_base = VHA_CR_OS0_CNN_ALT_ADDRESS8;
  556. reg_idx_offset = 8;
  557. }
  558. if (regs->cnn_alt_addr_used & (1 << i))
  559. CHECK_CORE_REG(regs->cnn_alt_addr[i], reg_base + (i - reg_idx_offset) * reg_size, core_id);
  560. }
  561. if (regs->low_level_sync_base_addr != ~0) {
  562. CHECK_CR_CORE_REG(regs->low_level_sync_base_addr, LOW_LEVEL_SYNC_BASE_ADDR, core_id);
  563. }
  564. if (!cnn_preloads_disable)
  565. CHECK_CR_CORE_REG(regs->cnn_alt_addr_used, OS0_CNN_ALT_ADDRESS_USED, core_id);
  566. CHECK_CR_CORE_REG(regs->cnn_vcore_mapping, OS0_CNN_VCORE_MAPPING, core_id);
  567. }
  568. }
  569. out_error:
  570. return cmd->conf_top_error;
  571. }
  572. static bool vha_wm_confirm_mmu_regs(struct vha_cmd *cmd)
  573. {
  574. struct vha_session *session = cmd->session;
  575. struct vha_dev *vha = session->vha;
  576. uint32_t ctx_id = 0;
  577. if (vha->mmu_mode == VHA_MMU_DISABLED) {
  578. CHECK_TOP_REG(VHA_CR_OS(MMU_CTRL_BYPASS_EN), OS0_MMU_CTRL);
  579. return 0;
  580. }
  581. for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++) {
  582. IOWRITE64_CR_REGIO(session->mmu_ctxs[ctx_id].hw_id, OS0_MMU_CBASE_MAPPING_CONTEXT);
  583. CHECK_TOP_REG(session->mmu_ctxs[ctx_id].pc_baddr, OS0_MMU_CBASE_MAPPING);
  584. }
  585. out_error:
  586. return cmd->conf_top_error;
  587. }
  588. static bool vha_wm_confirm_mh_regs(struct vha_cmd *cmd, struct vha_mh_config_regs * regs) {
  589. struct vha_session *session = cmd->session;
  590. struct vha_dev *vha = session->vha;
  591. CHECK_TOP_REG(regs->cnn_preload_control, OS0_CNN_PRELOAD_CONTROL);
  592. CHECK_TOP_REG(regs->req_ctxt_override, REQ_CTXT_OVERRIDE);
  593. if (regs->slc_control)
  594. CHECK_TOP_REG(regs->slc_control, SLC_CTRL);
  595. out_error:
  596. return cmd->conf_top_error;
  597. }
  598. static bool vha_wm_confirm_crc_regs(struct vha_cmd *cmd, struct vha_crc_config_regs * regs) {
  599. struct vha_hw_sched_info *sched_info = &cmd->hw_sched_info;
  600. struct vha_session *session = cmd->session;
  601. struct vha_dev *vha = session->vha;
  602. uint32_t core_id = 0;
  603. if (session->cnn_dbg.cnn_crc_buf[0] || vha->cnn_combined_crc_enable ) {
  604. for (core_id = 0; core_id < VHA_MAX_CORES; core_id++) {
  605. if (sched_info->core_mask & (1 << core_id)) {
  606. uint64_t curr_core = VHA_CORE_ID_TO_MASK(core_id);
  607. IOWRITE64_CR_REGIO(curr_core, CORE_CTRL_INDIRECT);
  608. CHECK_CR_CORE_REG(regs->crc_control, OS0_CNN_CRC_CONTROL, core_id);
  609. CHECK_CR_CORE_REG(regs->crc_mask_ctrl, OS0_CNN_CRC_MASK_CTRL, core_id);
  610. if (session->cnn_dbg.cnn_crc_buf[0])
  611. CHECK_CR_CORE_REG(regs->crc_address[core_id],
  612. OS0_CNN_CRC_ADDRESS, core_id);
  613. if (vha->cnn_combined_crc_enable)
  614. CHECK_CR_CORE_REG(regs->crc_combined_address[core_id],
  615. OS0_COMBINED_CNN_CRC_ADDRESS, core_id);
  616. }
  617. }
  618. }
  619. return false;
  620. }
  621. #endif
  622. /*
  623. * submit a command stream to the CNN hardware
  624. * input buffers:
  625. * command
  626. * input
  627. * coeff
  628. * output buffers:
  629. * output
  630. * accum_load
  631. * data:
  632. * none
  633. */
  634. static int do_cmd_cnn_submit(struct vha_cmd *cmd, uint64_t *rsp_err_flags)
  635. {
  636. const struct vha_user_cmd *user_cmd =
  637. (struct vha_user_cmd *)&cmd->user_cmd;
  638. struct vha_session *session = cmd->session;
  639. struct vha_hw_sched_info *sched_info = &cmd->hw_sched_info;
  640. struct vha_dev *vha = session->vha;
  641. int ret = -EINVAL;
  642. struct vha_config_regs regs;
  643. struct vha_mh_config_regs mh_regs;
  644. struct vha_crc_config_regs crc_regs;
  645. #ifdef VHA_SCF
  646. int i;
  647. #endif
  648. memset(&regs, 0, sizeof(regs));
  649. memset(&mh_regs, 0, sizeof(mh_regs));
  650. memset(&crc_regs, 0, sizeof(crc_regs));
  651. regs.socm_base_addr = ~0;
  652. regs.locm_base_addr = ~0;
  653. regs.low_level_sync_base_addr = ~0;
  654. #ifdef VHA_SCF
  655. //initialize progress counters with max values possible
  656. for (i = 0; i < VHA_NUM_CORES; i++) {
  657. cmd->layer_count[i] = ~0;
  658. cmd->pass_count[i] = ~0;
  659. }
  660. #endif
  661. if (vha->hw_bypass) {
  662. ret = -EAGAIN;
  663. dev_info(vha->dev, "%s skip\n", __func__);
  664. *rsp_err_flags |= VHA_RSP_ERROR(SW_SKIP_CMD);
  665. goto out_error;
  666. }
  667. img_pdump_printf("-- WM_SETUP_BEGIN\n");
  668. /* Select WM to submit this cmd to. */
  669. img_pdump_printf("-- Select WM%u\n", sched_info->wm_id);
  670. VHA_LOCK_WM();
  671. VHA_SELECT_WM(sched_info->wm_id);
  672. /* Wait for the previous kick to be accepted */
  673. if (vha->low_latency != VHA_LL_DISABLED) {
  674. /* Sanity wait for the WM kick bit to be deasserted */
  675. ret = IOPOLL64_CR_PDUMP(0, 1000, 10,
  676. (uint64_t)VHA_CR_BITMASK(WM_WL_CONTROL, WL_START),
  677. WM_WL_CONTROL);
  678. VHA_UNLOCK_WM();
  679. if(ret) {
  680. dev_err(vha->dev, "%s: WM%u kick bit read-back failed!\n",
  681. __func__, sched_info->wm_id);
  682. *rsp_err_flags |= VHA_RSP_ERROR(SW_KICK_BIT_READ_BACK_FAILURE);
  683. goto out_error;
  684. }
  685. if (cmd->queued &&
  686. vha->low_latency == VHA_LL_SW_KICK)
  687. goto hw_kick;
  688. } else {
  689. VHA_UNLOCK_WM();
  690. }
  691. ret = -EINVAL;
  692. if (vha->pendcmd[sched_info->wm_id].cmd != NULL &&
  693. vha->low_latency == VHA_LL_DISABLED) {
  694. dev_err(vha->dev, "%s: trying to submit workload on WM%u when hw busy!\n",
  695. __func__, sched_info->wm_id);
  696. *rsp_err_flags |= VHA_RSP_ERROR(SW_HW_BUSY);
  697. goto out_error;
  698. }
  699. if (user_cmd->cmd_type == VHA_CMD_CNN_SUBMIT_MULTI)
  700. {
  701. if (!vha_wm_setup_config_regs_multi(cmd, &regs)) {
  702. dev_err(vha->dev, "%s: invalid cmd info\n", __func__);
  703. *rsp_err_flags |= VHA_RSP_ERROR(SW_INVALID_CMD_INFO);
  704. goto out_error;
  705. }
  706. }
  707. else {
  708. dev_err(vha->dev, "%s: invalid cmd type %u\n",
  709. __func__, user_cmd->cmd_type);
  710. *rsp_err_flags |= VHA_RSP_ERROR(SW_INVALID_CMD_TYPE);
  711. ret = -EINVAL;
  712. goto out_error;
  713. }
  714. vha_wm_write_config_regs(cmd, &regs);
  715. /* write the stream size only */
  716. ret = 0;
  717. if (vha->pendcmd[cmd->hw_sched_info.wm_id].cmd) {
  718. vha->queuedcmd[cmd->hw_sched_info.wm_id].cmd = cmd;
  719. cmd->queued = true;
  720. vha->stats.cnn_kicks_queued++;
  721. img_pdump_printf("-- WM%u already kicked queueing!\n",
  722. cmd->hw_sched_info.wm_id);
  723. dev_dbg(vha->dev, "%s: WM%u already kicked. "
  724. "Queueing -> kicked: 0x%08x/%u, queueing: 0x%08x/%u\n",
  725. __func__, cmd->hw_sched_info.wm_id,
  726. vha->pendcmd[cmd->hw_sched_info.wm_id].cmd->user_cmd.cmd_id,
  727. vha->pendcmd[cmd->hw_sched_info.wm_id].cmd->session->id,
  728. cmd->user_cmd.cmd_id, session->id);
  729. if (vha->low_latency == VHA_LL_SW_KICK)
  730. return ret;
  731. }
  732. hw_kick:
  733. /* Operate only on cores assigned to this WM. */
  734. img_pdump_printf("-- Select cores\n");
  735. IOWRITE64_CR_PDUMP(vha_wm_get_cores(vha, cmd->hw_sched_info.wm_id),
  736. CORE_CTRL_INDIRECT);
  737. /* Change mmu context */
  738. ret = vha_mmu_setup(cmd->session);
  739. if (ret) {
  740. dev_err(vha->dev, "%s: Error while MMU setup!\n", __func__);
  741. *rsp_err_flags |= VHA_RSP_ERROR(SW_MMU_SETUP_FAILURE);
  742. goto out_error;
  743. }
  744. /* Setup memory stuff */
  745. vha_dev_mh_setup(vha, session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id, &mh_regs);
  746. /* Prepare debug buffer registers */
  747. vha_dbg_prepare_hwbufs(session, cmd, &crc_regs);
  748. /* Setup cnn hw watchdog before kicking the hw */
  749. {
  750. uint64_t wl_cycles, core_cycles;
  751. vha_wm_hwwdt_calculate(vha, cmd, &wl_cycles, &core_cycles);
  752. vha_wm_hwwdt_setup(vha, cmd, wl_cycles, core_cycles);
  753. }
  754. img_pdump_printf("-- Select WM%d\n", cmd->hw_sched_info.wm_id);
  755. /* Select WM to setup. */
  756. VHA_LOCK_WM();
  757. VHA_SELECT_WM(cmd->hw_sched_info.wm_id);
  758. /* Generate and set workload id. */
  759. cmd->wm_cmd_id = ++vha->wm_cmd_id_count;
  760. cmd->wm_cmd_id = (cmd->wm_cmd_id & VHA_WL_KICK_ID_COUNT_MASK) |
  761. (cmd->hw_sched_info.wm_id << VHA_WL_KICK_ID_WM_ID_SHIFT);
  762. img_pdump_printf("-- Set workload id: %u\n", cmd->wm_cmd_id);
  763. IOWRITE64_CR_PDUMP(VHA_CR_SETBITS(WM_WL_ID, WL_ID, cmd->wm_cmd_id), WM_WL_ID);
  764. VHA_UNLOCK_WM();
  765. if (CMD_EXEC_ON_HW(cmd)) {
  766. cmd->in_hw = true;
  767. if (!cmd->queued)
  768. vha->pendcmd[cmd->hw_sched_info.wm_id].cmd = cmd;
  769. }
  770. #ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
  771. /* Mark kick for dummy driver */
  772. cmd->dummy_kicked = true;
  773. #endif
  774. /* Consider this WL as kicked. */
  775. vha->pri_q_counters[cmd->user_cmd.priority]--;
  776. img_pdump_printf("-- WM_SETUP_END\n");
  777. /* Remember the time cnn is kicked */
  778. GETNSTIMEOFDAY(&cmd->hw_proc_start);
  779. VHA_SET_WM_STAT(vha, hw_proc_start, cmd->hw_sched_info.wm_id, cmd->hw_proc_start);
  780. /* Need to generate proper pdump */
  781. if (cmd->queued &&
  782. vha->low_latency == VHA_LL_SW_KICK) {
  783. /* Do not write to pdump
  784. * this needs to be done after irq POL*/
  785. VHA_LOCK_WM();
  786. VHA_SELECT_WM(cmd->hw_sched_info.wm_id);
  787. IOWRITE64_CR_REGIO(VHA_CR_WM_WL_CONTROL_WL_START_EN, WM_WL_CONTROL);
  788. VHA_UNLOCK_WM();
  789. VHA_INC_WL_STAT(vha, kicks_queued, cmd);
  790. dev_dbg(vha->dev, "%s: WM%u kick queued for cmd id 0x%08x/%u (WL kick id: 0x%08x)!\n",
  791. __func__, sched_info->wm_id, cmd->user_cmd.cmd_id, session->id, cmd->wm_cmd_id);
  792. cmd->queued = false;
  793. } else {
  794. img_pdump_printf("-- WM_KICK_BEGIN\n");
  795. img_pdump_printf("-- Select WM%u\n", sched_info->wm_id);
  796. VHA_LOCK_WM();
  797. VHA_SELECT_WM(cmd->hw_sched_info.wm_id);
  798. img_pdump_printf("-- WM kick!\n");
  799. IOWRITE64_CR_PDUMP(VHA_CR_WM_WL_CONTROL_WL_START_EN, WM_WL_CONTROL);
  800. VHA_UNLOCK_WM();
  801. if (cmd->queued)
  802. VHA_INC_WL_STAT(vha, kicks_queued, cmd);
  803. dev_dbg(vha->dev, "%s: WM%u %skick for cmd id 0x%08x/%u (WL kick id: 0x%08x)!\n",
  804. __func__, sched_info->wm_id, cmd->queued ? "queued " : "",
  805. cmd->user_cmd.cmd_id, session->id, cmd->wm_cmd_id);
  806. img_pdump_printf("-- WM_KICK_END\n");
  807. }
  808. #ifdef VHA_SCF
  809. if (vha->confirm_config_reg) {
  810. if (vha_wm_confirm_config_regs(cmd, &regs))
  811. goto out_complete;
  812. if (vha_wm_confirm_mmu_regs(cmd))
  813. goto out_complete;
  814. if (vha_wm_confirm_mh_regs(cmd, &mh_regs))
  815. goto out_complete;
  816. vha_wm_confirm_crc_regs(cmd, &crc_regs);
  817. out_complete:
  818. complete(&cmd->conf_done);
  819. }
  820. #endif
  821. /* Update kick stats. */
  822. vha->stats.cnn_kicks++;
  823. VHA_INC_WL_STAT(vha, kicks, cmd);
  824. /* Notify any observers of the submit event. */
  825. if (vha_observers.submitted)
  826. vha_observers.submitted(vha->id, session->id, user_cmd->cmd_id, false, user_cmd->priority);
  827. out_error:
  828. if (ret != 0) {
  829. /* Consider this WL as kicked for errors too. */
  830. vha->pri_q_counters[cmd->user_cmd.priority]--;
  831. }
  832. return ret;
  833. }
  834. /*
  835. * append a string to the pdump TXT file
  836. * buffers:
  837. * none
  838. * data:
  839. * string to be printed
  840. */
  841. static int do_cmd_cnn_pdump_msg(const struct vha_cmd *cmd)
  842. {
  843. const struct vha_user_cmd *user_cmd = &cmd->user_cmd;
  844. struct vha_session *session = cmd->session;
  845. struct vha_dev* vha = session->vha;
  846. int ret = 0;
  847. if (user_cmd->num_inbufs != 0 || user_cmd->num_bufs != 0) {
  848. dev_err(session->vha->dev, ">0 buffers in cmd is wrong\n");
  849. ret = -EINVAL;
  850. }
  851. /* remember the pdump message may not be null terminated */
  852. img_pdump_printf("%.*s\n", (int)cmd->size, (char *)user_cmd->data);
  853. return ret;
  854. }
  855. /*
  856. * Simple procedure that generates watchdog interrupt
  857. */
  858. void vha_cnn_start_calib(struct vha_dev *vha)
  859. {
  860. uint64_t core_mask = VHA_CALIBRATION_CORE_MASK;
  861. uint64_t core_assignment;
  862. uint64_t val64 = 0;
  863. /* Use WM0 and core 0. */
  864. vha_wm_assign_cores(vha, VHA_CALIBRATION_WM_ID, VHA_CALIBRATION_CORE_MASK, &core_assignment);
  865. IOWRITE64_CR_PDUMP(core_assignment, CORE_ASSIGNMENT);
  866. /* Operate only on core 0. */
  867. IOWRITE64_CR_REGIO(core_mask, CORE_CTRL_INDIRECT);
  868. /* Setup core WDTs. */
  869. IOWRITE64_CR_REGIO(vha->calibration_cycles, CNN_WDT_COMPAREMATCH);
  870. val64 = VHA_SET_FIELD_SIMPLE_VAL(CNN_WDT_CTRL, MODE, KICK_PASS);
  871. IOWRITE64_CR_REGIO(val64, CNN_WDT_CTRL);
  872. IOWRITE64_CR_REGIO(VHA_CORE_MEM_WDT_CYCLES, CNN_MEM_WDT_COMPAREMATCH);
  873. val64 = VHA_SET_FIELD_SIMPLE_VAL(CNN_MEM_WDT_CTRL, MODE, KICK_PASS);
  874. IOWRITE64_CR_REGIO(val64, CNN_MEM_WDT_CTRL);
  875. /* Disabling command decoder, so we can generate WDT interrupt
  876. * without providing any buffer address. */
  877. val64 = IOREAD64_CR_REGIO(CLK_CTRL0);
  878. VHA_CR_CLEARBITS(val64, CLK_CTRL0, CNN_CMD);
  879. IOWRITE64_CR_REGIO(val64, CLK_CTRL0);
  880. /* To be sure the command decoder clock has switched off. */
  881. udelay(100);
  882. /* Enable core only events */
  883. IOWRITE64_CR_REGIO(VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_HOST_ENABLE);
  884. IOWRITE64_CR_REGIO(VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_HOST_CLEAR);
  885. /* Set minimum command stream size. */
  886. val64 = VHA_CR_SETBITS(OS0_CNN_CONTROL, CMD_SIZE_MIN1, (2048U/32-1));
  887. IOWRITE64_CR_REGIO(val64, OS0_CNN_CONTROL);
  888. /* Enable MMU bypass */
  889. IOWRITE64_PDUMP(VHA_CR_OS(MMU_CTRL_BYPASS_EN),
  890. VHA_CR_OS(MMU_CTRL));
  891. VHA_LOCK_WM();
  892. /* Select WM0 for calibration. */
  893. VHA_SELECT_WM(VHA_CALIBRATION_WM_ID);
  894. /* Disable WM events */
  895. IOWRITE64_CR_REGIO(0, WM_EVENT_ENABLE);
  896. /* Start WM0. */
  897. IOWRITE64_CR_REGIO(VHA_CR_WM_WL_CONTROL_WL_START_EN, WM_WL_CONTROL);
  898. VHA_UNLOCK_WM();
  899. /* Remember the time WM0 is kicked */
  900. GETNSTIMEOFDAY(&vha->stats.wm_stats[VHA_CALIBRATION_WM_ID].hw_proc_start);
  901. }
  902. void vha_cnn_update_stats(struct vha_dev *vha)
  903. {
  904. vha->stats.cnn_last_proc_us =
  905. vha->stats.last_proc_us;
  906. vha->stats.cnn_total_proc_us +=
  907. vha->stats.last_proc_us;
  908. if (vha->stats.cnn_kicks) {
  909. uint64_t avg = vha->stats.cnn_total_proc_us;
  910. do_div(avg, vha->stats.cnn_kicks);
  911. vha->stats.cnn_avg_proc_us = avg;
  912. }
  913. if (vha->stats.cnn_last_cycles && vha->freq_khz) {
  914. uint64_t est_proc_us = 1000UL * vha->stats.cnn_last_cycles;
  915. do_div(est_proc_us, vha->freq_khz);
  916. vha->stats.cnn_last_est_proc_us = est_proc_us;
  917. }
  918. vha->stats.cnn_total_cycles += vha->stats.cnn_last_cycles;
  919. if (vha->stats.cnn_kicks &&
  920. vha->stats.cnn_total_cycles && vha->freq_khz) {
  921. uint64_t avg = 1000UL * vha->stats.cnn_total_cycles;
  922. do_div(avg, vha->stats.cnn_kicks);
  923. do_div(avg, vha->freq_khz);
  924. vha->stats.cnn_avg_est_proc_us = avg;
  925. }
  926. }
  927. /*
  928. * a command has completed. sent notification to user
  929. */
  930. void vha_cnn_cmd_completed(struct vha_cmd *cmd, uint64_t status, int err, uint64_t rsp_err_flags)
  931. {
  932. struct vha_session *session = cmd->session;
  933. struct vha_dev *vha = session->vha;
  934. struct vha_rsp *rsp = NULL;
  935. int i;
  936. struct vha_user_cnn_submit_rsp * cnn_submit_rsp = NULL;
  937. const struct vha_user_cmd *user_cmd = &cmd->user_cmd;
  938. switch (user_cmd->cmd_type) {
  939. case VHA_CMD_CNN_SUBMIT_MULTI:
  940. {
  941. size_t mem_usage;
  942. /* allocate sufficient space for the response */
  943. size_t sz = sizeof(*rsp)
  944. + sizeof(struct vha_user_cnn_submit_rsp)
  945. - sizeof(struct vha_user_rsp);
  946. #ifdef VHA_SCF
  947. uint64_t wm_fifo_ready =
  948. VHA_CR_WM_EVENT_STATUS_TYPE_RESPONSE_FIFO_READY_EN |
  949. VHA_CR_WM_EVENT_STATUS_TYPE_PARITY_EN;
  950. uint64_t wm_fifo_mask =
  951. VHA_WM_EVENTS_DEFAULT | VHA_CR_WM_EVENT_STATUS_TYPE_PARITY_EN;
  952. uint64_t wm_fifo_status_success =
  953. VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_SUCCESS_EN |
  954. VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_PARITY_EN;
  955. uint64_t wm_fifo_status_mask =
  956. VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_MASKFULL;
  957. #else
  958. uint64_t wm_fifo_ready =
  959. VHA_CR_WM_EVENT_STATUS_TYPE_RESPONSE_FIFO_READY_EN;
  960. uint64_t wm_fifo_mask = VHA_WM_EVENTS_DEFAULT;
  961. uint64_t wm_fifo_status_success =
  962. VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_SUCCESS_EN;
  963. uint64_t wm_fifo_status_mask =
  964. VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_MASKFULL &
  965. ~VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_PARITY_EN;
  966. #endif
  967. uint64_t wm_src_mask = VHA_CR_SETBITS(HOST_EVENT_SOURCE, WM,
  968. VHA_WM_ID_TO_MASK(cmd->hw_sched_info.wm_id)) |
  969. VHA_SET_FIELD_SIMPLE_VAL(HOST_EVENT_SOURCE, SYS, EN) |
  970. VHA_SET_FIELD_SIMPLE_FULL(HOST_EVENT_SOURCE, CORE) |
  971. VHA_SET_FIELD_SIMPLE_FULL(HOST_EVENT_SOURCE, IC);
  972. uint32_t num_cores;
  973. uint32_t outbuf_offset;
  974. uint32_t outbuf_last_idx;
  975. uint32_t outbuf_data_offset;
  976. uint32_t* bufoffsets;
  977. uint32_t* bufsizes;
  978. struct vha_user_cnn_submit_multi_cmd *msg;
  979. rsp = kzalloc(sz, GFP_KERNEL);
  980. if (rsp == NULL) {
  981. session->oom = true;
  982. return;
  983. }
  984. cnn_submit_rsp = (struct vha_user_cnn_submit_rsp*)&rsp->user_rsp;
  985. rsp->size = sizeof(struct vha_user_cnn_submit_rsp);
  986. if (vha->hw_bypass) {
  987. vha->hw_bypass--;
  988. break;
  989. }
  990. dev_dbg(vha->dev, "%s: 0x%08x/%u\n", __func__, cmd->user_cmd.cmd_id, session->id);
  991. img_pdump_printf("-- WM_WAIT_BEGIN\n");
  992. /* pdump POL for event source change
  993. * count=cnn_pdump_poll_count, delay=1000cycles */
  994. img_pdump_printf("-- Wait for WM%u or any event source to be signalled\n"
  995. "POL :REG:%#x 0 %#llx 3 %u 1000\n",
  996. cmd->hw_sched_info.wm_id,
  997. VHA_CR_HOST_EVENT_SOURCE,
  998. wm_src_mask,
  999. cnn_pdump_poll_count);
  1000. /* quick pdump POL for the related WM source flag only:
  1001. * count=1, delay=10cycles */
  1002. img_pdump_printf("-- Check for WM%u source, all COREs/ICs & SYS\n"
  1003. "POL :REG:%#x %#llx 0x%llx 0 %u 10\n",
  1004. cmd->hw_sched_info.wm_id,
  1005. VHA_CR_HOST_EVENT_SOURCE,
  1006. VHA_CR_SETBITS(HOST_EVENT_SOURCE, WM,
  1007. VHA_WM_ID_TO_MASK(cmd->hw_sched_info.wm_id)),
  1008. wm_src_mask,
  1009. wm_pdump_poll_count);
  1010. /* quick pdump POL for the FIFO_READY flag only in related WM:
  1011. * count=1, delay=10cycles */
  1012. img_pdump_printf("-- Select WM%u\n"
  1013. "WRW64 :REG:%#x %#llx\n",
  1014. cmd->hw_sched_info.wm_id,
  1015. VHA_CR_TLC_WM_INDIRECT,
  1016. (uint64_t)cmd->hw_sched_info.wm_id);
  1017. img_pdump_printf("-- Check for WM%u FIFO_READY flag\n"
  1018. "POL :REG:%#x %#llx 0x%llx 0 1 10\n",
  1019. cmd->hw_sched_info.wm_id,
  1020. VHA_CR_WM_EVENT_STATUS,
  1021. wm_fifo_ready,
  1022. wm_fifo_mask);
  1023. /* quick pdump POL for AXI errors:
  1024. * count=1, delay=10cycles
  1025. */
  1026. img_pdump_printf("-- Post check of AXI status\n"
  1027. "POL :REG:%#x 0 0xffffffff 0 1 10\n",
  1028. VHA_CR_ACE_STATUS);
  1029. /* We do clear interrupts in the irq handler,
  1030. * but this is not recorded into pdump because
  1031. * of the irq context, so do it here */
  1032. img_pdump_printf("-- Clear SYS events\n"
  1033. "WRW64 :REG:%#x %#x\n",
  1034. VHA_CR_SYS_EVENT_CLEAR,
  1035. VHA_SYS_EVENTS_DEFAULT);
  1036. img_pdump_printf("-- Clear WM%u events\n"
  1037. "WRW64 :REG:%#x %#x\n",
  1038. cmd->hw_sched_info.wm_id,
  1039. VHA_CR_WM_EVENT_CLEAR,
  1040. VHA_WM_EVENTS_DEFAULT);
  1041. img_pdump_printf("-- Select core assigned to WM%u\n"
  1042. "WRW64 :REG:%#x %#x\n",
  1043. cmd->hw_sched_info.wm_id,
  1044. VHA_CR_CORE_CTRL_INDIRECT,
  1045. cmd->hw_sched_info.core_mask);
  1046. img_pdump_printf("-- Clear core events\n"
  1047. "WRW64 :REG:%#x %#x\n",
  1048. VHA_CR_CORE_EVENT_HOST_CLEAR,
  1049. VHA_CORE_EVENTS_DEFAULT);
  1050. img_pdump_printf("-- Check RESPONSE_FIFO status for WM%u\n"
  1051. "POL :REG:%#x %#llx 0x%llx 0 1 10\n",
  1052. cmd->hw_sched_info.wm_id,
  1053. VHA_CR_WM_RESPONSE_FIFO_WL_STATUS,
  1054. wm_fifo_status_success,
  1055. wm_fifo_status_mask);
  1056. img_pdump_printf("-- Check RESPONSE_FIFO workload id for WM%u\n"
  1057. "POL :REG:%#x %#llx 0x%llx 0 1 10\n",
  1058. cmd->hw_sched_info.wm_id,
  1059. VHA_CR_WM_RESPONSE_FIFO_WL_ID,
  1060. (uint64_t)cmd->wm_cmd_id,
  1061. VHA_CR_WM_RESPONSE_FIFO_WL_ID_MASKFULL);
  1062. /* Pop the RESPONSE_FIFO */
  1063. img_pdump_printf("-- Pop RESPONSE_FIFO for WM%u\n"
  1064. "WRW64 :REG:%#x %#x\n",
  1065. cmd->hw_sched_info.wm_id,
  1066. VHA_CR_WM_RESPONSE_FIFO_READ,
  1067. VHA_CR_WM_RESPONSE_FIFO_READ_FIFO_READ_EN);
  1068. #ifdef CONFIG_VHA_DUMMY
  1069. vha_wm_release_cores(session->vha,
  1070. cmd->hw_sched_info.core_mask, true);
  1071. #endif
  1072. /* Try to flush hw debug buffers first
  1073. * - this does pdump SAB when proper checkpoint is set */
  1074. vha_dbg_flush_hwbufs(session, 1, cmd->hw_sched_info.core_mask);
  1075. /* pdump SAB for each of the output buffers */
  1076. img_pdump_printf("-- Save outputs\n");
  1077. msg = container_of(user_cmd, struct vha_user_cnn_submit_multi_cmd, msg);
  1078. num_cores = msg->num_cores;
  1079. outbuf_offset = VHA_MAX_CORES + (user_cmd->num_inbufs - num_cores);
  1080. outbuf_last_idx = VHA_MAX_CORES + user_cmd->num_bufs - 1;
  1081. outbuf_data_offset = user_cmd->num_inbufs - num_cores;
  1082. bufoffsets = msg->bufoffsets;
  1083. bufsizes = msg->bufsizes;
  1084. /* There should be at least on output buffer */
  1085. WARN_ON(outbuf_last_idx <= outbuf_offset);
  1086. for (i = outbuf_offset; i < outbuf_last_idx; i++) {
  1087. struct vha_buffer *buf;
  1088. uint32_t offset;
  1089. uint32_t size;
  1090. buf = vha_find_bufid(session, user_cmd->data[i]);
  1091. if (buf == NULL) {
  1092. dev_err(vha->dev,
  1093. "%s: invalid buffer id:%d\n",
  1094. __func__, user_cmd->data[i]);
  1095. continue;
  1096. }
  1097. offset = bufoffsets[outbuf_data_offset];
  1098. size = bufsizes[outbuf_data_offset];
  1099. outbuf_data_offset++;
  1100. vha_pdump_sab_buf(session, PDUMP_RES, buf, offset, size);
  1101. /* Update status, do not signal fence yet,
  1102. * it's is done explicitly below, after cache invalidation */
  1103. vha_set_buf_status(session, buf->id, VHA_BUF_FILLED_BY_HW,
  1104. VHA_SYNC_NONE, false);
  1105. if (vha_buf_needs_inval(session, buf->id) && !status)
  1106. img_mem_sync_device_to_cpu(session->mem_ctx, buf->id);
  1107. #ifdef KERNEL_DMA_FENCE_SUPPORT
  1108. img_mem_signal_fence(session->mem_ctx, buf->id);
  1109. #endif
  1110. }
  1111. if (session->vha->low_latency == VHA_LL_SW_KICK) {
  1112. struct vha_cmd *qcmd =
  1113. session->vha->queuedcmd[cmd->hw_sched_info.wm_id].cmd;
  1114. if (qcmd && qcmd->queued) {
  1115. /* Setup kick info */
  1116. img_pdump_printf("-- CNN kick (queued)!\n");
  1117. img_pdump_printf("WRW64 :REG:%#x %#x\n",
  1118. VHA_CR_WM_WL_CONTROL, VHA_CR_WM_WL_CONTROL_WL_START_EN);
  1119. }
  1120. }
  1121. img_pdump_printf("-- WM_WAIT_END\n");
  1122. img_mem_get_usage(session->mem_ctx, NULL, &mem_usage);
  1123. /* send out an event when submit is complete */
  1124. if (vha_observers.completed)
  1125. vha_observers.completed(
  1126. session->vha->id,
  1127. session->id,
  1128. user_cmd->cmd_id,
  1129. status,
  1130. session->vha->stats.cnn_last_cycles,
  1131. mem_usage,
  1132. user_cmd->priority);
  1133. /* post some metrics about the hw to user space */
  1134. #ifdef MEM_USAGE_LAST_METRICS_ARE_AVAILABLE
  1135. cnn_submit_rsp->mem_usage = mem_usage;
  1136. #else
  1137. cnn_submit_rsp->mem_usage = ~0;
  1138. #endif
  1139. cnn_submit_rsp->last_proc_us = session->vha->stats.cnn_last_proc_us;
  1140. cnn_submit_rsp->hw_cycles = session->vha->stats.cnn_last_cycles;
  1141. dev_dbg(session->vha->dev, "%s: 0x%08x/%u, hw_cycles %llx\n", __func__,
  1142. cmd->user_cmd.cmd_id, session->id, session->vha->stats.cnn_last_cycles);
  1143. if (session->vha->stats.cnn_last_cycles > (uint32_t)~0)
  1144. dev_warn(session->vha->dev,
  1145. "%s: hw_cycles %llx exceeds 32bit limit\n",
  1146. __func__,
  1147. session->vha->stats.cnn_last_cycles);
  1148. break;
  1149. }
  1150. case VHA_CMD_CNN_PDUMP_MSG:
  1151. default:
  1152. /* allocate space for standard response */
  1153. rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
  1154. if (rsp == NULL) {
  1155. session->oom = true;
  1156. return;
  1157. }
  1158. rsp->size = sizeof(rsp->user_rsp);
  1159. break;
  1160. }
  1161. if (user_cmd->flags & VHA_CMDFLAG_NOTIFY) {
  1162. rsp->user_rsp.cmd_id = cmd->user_cmd.cmd_id;
  1163. rsp->user_rsp.err_no = session->vha->hw_bypass ? 0 : err;
  1164. rsp->user_rsp.rsp_err_flags = rsp_err_flags;
  1165. cmd->rsp = rsp;
  1166. } else
  1167. kfree(rsp);
  1168. }
  1169. static uint32_t get_estimated_cycles(const struct vha_user_cmd *user_cmd)
  1170. {
  1171. const struct vha_user_cnn_submit_multi_cmd *cnn_user_cmd =
  1172. (struct vha_user_cnn_submit_multi_cmd *)user_cmd;
  1173. return cnn_user_cmd->estimated_cycles;
  1174. }
  1175. /*
  1176. * Perform a command, as requested by user.
  1177. * note: this function is called with vha_dev.lock == locked
  1178. */
  1179. int vha_do_cnn_cmd(struct vha_cmd *cmd)
  1180. {
  1181. struct vha_session *session = cmd->session;
  1182. struct vha_dev *vha = session->vha;
  1183. const struct vha_user_cmd *user_cmd = &cmd->user_cmd;
  1184. int err = -EINVAL;
  1185. uint64_t rsp_err_flags = 0;
  1186. dev_dbg(vha->dev,
  1187. "%s: WL id:0x%08x type:%x nin:%x nbufs:%x\n",
  1188. __func__, user_cmd->cmd_id, user_cmd->cmd_type,
  1189. user_cmd->num_inbufs, user_cmd->num_bufs);
  1190. print_hex_dump_debug("VHA CMD: ", DUMP_PREFIX_NONE, 4, 4,
  1191. user_cmd, ALIGN(cmd->size, 4), false);
  1192. switch (user_cmd->cmd_type) {
  1193. case VHA_CMD_CNN_SUBMIT_MULTI:
  1194. err = do_cmd_cnn_submit(cmd, &rsp_err_flags);
  1195. #ifdef CONFIG_VHA_DUMMY_SIMULATE_HW_PROCESSING_TIME
  1196. if (cmd->dummy_kicked) {
  1197. uint32_t estimated_cycles = get_estimated_cycles(user_cmd);
  1198. if (estimated_cycles == 0)
  1199. estimated_cycles = VHA_DUMMY_HW_PROCESSING_TIME_CYCLES;
  1200. cmd->dummy_exec_time = (estimated_cycles / (vha->freq_khz / 1000));
  1201. if (cmd->hw_sched_info.wm_id < vha->hw_props.num_cnn_core_devs)
  1202. schedule_delayed_work(
  1203. &vha->dummy_dworks[cmd->hw_sched_info.wm_id].dummy_dwork,
  1204. usecs_to_jiffies(cmd->dummy_exec_time));
  1205. cmd->dummy_kicked = false;
  1206. }
  1207. #endif
  1208. break;
  1209. case VHA_CMD_CNN_PDUMP_MSG:
  1210. err = do_cmd_cnn_pdump_msg(cmd);
  1211. break;
  1212. default:
  1213. break;
  1214. }
  1215. /*
  1216. * Immediately send notification to user if not using hw at all
  1217. * or submitting failed.
  1218. */
  1219. if (!CMD_EXEC_ON_HW(cmd) || err) {
  1220. bool is_cnn_cmd = CMD_IS_CNN(cmd);
  1221. vha_cnn_cmd_completed(cmd,
  1222. err ? (uint64_t)VHA_CR_WM_RESPONSE_FIFO_WL_STATUS_WL_FAILURE_EN : 0ULL, err, rsp_err_flags);
  1223. if (is_cnn_cmd) {
  1224. if (rsp_err_flags & VHA_RSP_ERROR(SW_MMU_SETUP_FAILURE))
  1225. vha_wm_release_cores(vha, cmd->hw_sched_info.core_mask, false);
  1226. /* Free current command */
  1227. vha_dev_free_cmd_res(vha, cmd, false);
  1228. }
  1229. vha_cmd_notify(cmd);
  1230. if (is_cnn_cmd) {
  1231. if (rsp_err_flags & VHA_RSP_ERROR(SW_MMU_SETUP_FAILURE)) {
  1232. /* Rollback commands being processed to perform full reset */
  1233. vha_rollback_cmds(vha);
  1234. /* Perform stop & reset eventually*/
  1235. vha_dev_stop(vha, true);
  1236. /* Reschedule commands */
  1237. vha_chk_cmd_queues(vha, true);
  1238. }
  1239. }
  1240. return 1;
  1241. }
  1242. return 0;
  1243. }
  1244. uint8_t vha_wm_get_cores(struct vha_dev *vha, uint8_t wm_id)
  1245. {
  1246. uint8_t core_mask = 0;
  1247. uint64_t wm_core_assignment;
  1248. #define CHECK_CORE_ASSIGNMENT(c) \
  1249. if (wm_id == VHA_CR_GETBITS(CORE_ASSIGNMENT, CORE_##c##_WM_MAPPING, \
  1250. wm_core_assignment)) \
  1251. core_mask |= (1 << c);
  1252. wm_core_assignment = vha->wm_core_assignment;
  1253. dev_dbg(vha->dev, "%s: %llx\n", __func__, wm_core_assignment);
  1254. CHECK_CORE_ASSIGNMENT(0);
  1255. CHECK_CORE_ASSIGNMENT(1);
  1256. CHECK_CORE_ASSIGNMENT(2);
  1257. CHECK_CORE_ASSIGNMENT(3);
  1258. CHECK_CORE_ASSIGNMENT(4);
  1259. CHECK_CORE_ASSIGNMENT(5);
  1260. CHECK_CORE_ASSIGNMENT(6);
  1261. CHECK_CORE_ASSIGNMENT(7);
  1262. #undef CHECK_CORE_ASSIGNMENT
  1263. return core_mask;
  1264. }
  1265. void vha_wm_assign_cores(struct vha_dev *vha, uint8_t wm_id, uint8_t core_mask, uint64_t *core_assignment)
  1266. {
  1267. uint64_t wm_core_assignment = vha->wm_core_assignment;
  1268. uint32_t assignment_field_shift =
  1269. VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_SHIFT -
  1270. VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_SHIFT;
  1271. uint64_t assignment_field_mask =
  1272. ~VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_CLRMSK;
  1273. uint64_t wm_core_assignment_orig = wm_core_assignment;
  1274. while (core_mask != 0) {
  1275. uint32_t curr_core_id = VHA_CORE_MASK_TO_ID(core_mask);
  1276. core_mask &= ~(VHA_CORE_ID_TO_MASK(curr_core_id));
  1277. wm_core_assignment &=
  1278. ~(assignment_field_mask << (curr_core_id * assignment_field_shift));
  1279. wm_core_assignment |= wm_id << (curr_core_id * assignment_field_shift);
  1280. }
  1281. dev_dbg(vha->dev, "%s: %llx -> %llx\n", __func__, wm_core_assignment_orig, wm_core_assignment);
  1282. *core_assignment = wm_core_assignment;
  1283. vha->wm_core_assignment = wm_core_assignment;
  1284. }
  1285. static void wm_release_socm(struct vha_dev *vha, uint8_t core_mask, bool to_pdump)
  1286. {
  1287. uint64_t cur_assignment = IOREAD64_CR_REGIO(SOCM_BUF_ASSIGNMENT);
  1288. uint32_t assignment_field_shift =
  1289. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_1_WM_MAPPING_SHIFT -
  1290. VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_SHIFT;
  1291. uint64_t assignment_field_mask =
  1292. ~VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_CLRMSK;
  1293. uint64_t new_assignment = cur_assignment;
  1294. uint64_t mask = core_mask;
  1295. while (mask != 0) {
  1296. uint32_t curr_core_id = VHA_CORE_MASK_TO_ID(mask);
  1297. mask &= ~(VHA_CORE_ID_TO_MASK(curr_core_id));
  1298. new_assignment &=
  1299. ~(assignment_field_mask << (curr_core_id * assignment_field_shift));
  1300. new_assignment |= VHA_CR_SOCM_BUF_ASSIGNMENT_SOCM_BUF_0_WM_MAPPING_UNALLOCATED
  1301. << (curr_core_id * assignment_field_shift);
  1302. }
  1303. if (cur_assignment == new_assignment) {
  1304. dev_dbg(vha->dev, "%s: %llx -> %llx (no change)\n", __func__, cur_assignment, new_assignment);
  1305. return;
  1306. }
  1307. dev_dbg(vha->dev, "%s: %llx -> %llx\n", __func__, cur_assignment, new_assignment);
  1308. if (to_pdump) {
  1309. img_pdump_printf("-- Release SOCM on cores 0x%02x\n", core_mask);
  1310. IOWRITE64_CR_PDUMP(new_assignment, SOCM_BUF_ASSIGNMENT);
  1311. } else
  1312. IOWRITE64_CR_REGIO(new_assignment, SOCM_BUF_ASSIGNMENT);
  1313. }
  1314. void vha_wm_release_cores(struct vha_dev *vha, uint8_t core_mask, bool to_pdump)
  1315. {
  1316. uint64_t cur_assignment = vha->wm_core_assignment;
  1317. uint32_t assignment_field_shift =
  1318. VHA_CR_CORE_ASSIGNMENT_CORE_1_WM_MAPPING_SHIFT -
  1319. VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_SHIFT;
  1320. uint64_t assignment_field_mask =
  1321. ~VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_CLRMSK;
  1322. uint64_t new_assignment = cur_assignment;
  1323. uint64_t mask = core_mask;
  1324. wm_release_socm(vha, core_mask, to_pdump);
  1325. while (mask != 0) {
  1326. uint32_t curr_core_id = VHA_CORE_MASK_TO_ID(mask);
  1327. mask &= ~(VHA_CORE_ID_TO_MASK(curr_core_id));
  1328. new_assignment &=
  1329. ~(assignment_field_mask << (curr_core_id * assignment_field_shift));
  1330. new_assignment |=
  1331. VHA_CR_CORE_ASSIGNMENT_CORE_0_WM_MAPPING_UNALLOCATED <<
  1332. (curr_core_id * assignment_field_shift);
  1333. }
  1334. if (cur_assignment == new_assignment) {
  1335. dev_dbg(vha->dev, "%s: %llx -> %llx (no change)\n", __func__, cur_assignment, new_assignment);
  1336. return;
  1337. }
  1338. dev_dbg(vha->dev, "%s: %llx -> %llx\n", __func__, cur_assignment, new_assignment);
  1339. if (to_pdump) {
  1340. img_pdump_printf("-- Release cores 0x%02x\n", core_mask);
  1341. IOWRITE64_CR_PDUMP(new_assignment, CORE_ASSIGNMENT);
  1342. } else
  1343. IOWRITE64_CR_REGIO(new_assignment, CORE_ASSIGNMENT);
  1344. vha->wm_core_assignment = new_assignment;
  1345. }
  1346. int vha_wm_reset(struct vha_dev *vha, struct vha_hw_sched_info *sched_info)
  1347. {
  1348. uint64_t val64 = 0;
  1349. uint64_t wm_reset_val64 = 0;
  1350. uint8_t wm_cores_mask = 0;
  1351. uint8_t core_mask = 0;
  1352. uint8_t id;
  1353. int ret = 0;
  1354. dev_dbg(vha->dev, "%s: WM%d\n", __func__, sched_info->wm_id);
  1355. img_pdump_printf("-- WM level RESET sequence BEGIN\n");
  1356. /* Perform reset procedure */
  1357. /* Operate only on cores assigned to this WM. */
  1358. wm_cores_mask = sched_info->core_mask;
  1359. /* Core Level Reset Assertion:
  1360. * 4. Force global clocks on current core (others set to AUT0). */
  1361. img_pdump_printf("-- Force global clocks ON for all cores assigned to WM %u"
  1362. " (others set to AUTO)\n", sched_info->wm_id);
  1363. val64 = VHA_SYS_CLOCK_MODE(INTERCONNECT, ON) |
  1364. VHA_SYS_CLOCK_MODE_MULTI(CORE, ON, wm_cores_mask) |
  1365. VHA_SYS_CLOCK_MODE_MULTI(CORE, AUTO, (uint8_t)~wm_cores_mask) |
  1366. VHA_SYS_CLOCK_MODE_MULTI(NOC, AUTO, ~0) |
  1367. VHA_SYS_CLOCK_MODE_MULTI(WM, AUTO, ~0) |
  1368. VHA_SYS_CLOCK_MODE(AXI, AUTO) |
  1369. VHA_SYS_CLOCK_MODE(SLC, AUTO) |
  1370. VHA_SYS_CLOCK_MODE(LSYNC, AUTO) |
  1371. VHA_SYS_CLOCK_MODE(SOCM, AUTO) |
  1372. VHA_SYS_CLOCK_MODE(REGBANK, AUTO);
  1373. IOWRITE64_CR_PDUMP(val64, SYS_CLK_CTRL0);
  1374. /* WM reset procedure start. */
  1375. /* Move this WM into reset state. */
  1376. img_pdump_printf("-- Move WM%u into reset state\n", sched_info->wm_id);
  1377. wm_reset_val64 = VHA_CR_SETBITS(SYS_RESET_CTRL, WM, VHA_WM_ID_TO_MASK(sched_info->wm_id));
  1378. IOWRITE64_CR_PDUMP(wm_reset_val64, SYS_RESET_CTRL);
  1379. /* Dummy read to avoid race conditions in the hw */
  1380. val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
  1381. /* Core Level Reset Reset Sequence */
  1382. /* Proceed core by core. */
  1383. while (wm_cores_mask) {
  1384. /* Reset Assertion */
  1385. /* 1. Select current core. */
  1386. id = ffs(wm_cores_mask) - 1;
  1387. img_pdump_printf("-- Select core%u\n", id);
  1388. core_mask = VHA_CORE_ID_TO_MASK(id);
  1389. wm_cores_mask &= ~core_mask;
  1390. IOWRITE64_CR_PDUMP(core_mask, CORE_CTRL_INDIRECT);
  1391. /* 3. Disable page fault interrupts for core while resetting. */
  1392. img_pdump_printf("-- Disable page fault interrupts for core%u\n", id);
  1393. val64 = IOREAD64_CR_REGIO(SYS_EVENT_ENABLE);
  1394. val64 &= ~(VHA_CR_SETBITS(SYS_EVENT_ENABLE, MMU_PAGE_FAULT, core_mask));
  1395. IOWRITE64_CR_PDUMP(val64, SYS_EVENT_ENABLE);
  1396. /* 5. Set all core level clocks to AUTO. */
  1397. img_pdump_printf("-- Set all core%u level clocks to AUTO\n", id);
  1398. val64 = VHA_MAIN_CLOCKS_DEFAULT(AUTO);
  1399. IOWRITE64_CR_PDUMP(val64, CLK_CTRL0);
  1400. /* 6. Move core into soft reset. */
  1401. img_pdump_printf("-- Perform soft reset on core%u\n", id);
  1402. val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_SOFT_RESET, CORE_RESET, EN);
  1403. IOWRITE64_CR_PDUMP(val64, CORE_SOFT_RESET);
  1404. /* Dummy read to avoid race conditions in the hw. */
  1405. val64 = IOREAD64_CR_PDUMP(CORE_SOFT_RESET);
  1406. /* Clear reset. */
  1407. IOWRITE64_CR_PDUMP(0, CORE_SOFT_RESET);
  1408. /* 7. Wait until core memory bus reset has completed. */
  1409. img_pdump_printf("-- Wait until core%u memory bus reset has completed\n", id);
  1410. val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, MEMBUS_RESET_DONE, EN);
  1411. ret = IOPOLL64_CR_PDUMP(val64, 1000, 1000,
  1412. (uint64_t)VHA_CR_BITMASK(CORE_EVENT_HOST_STATUS, MEMBUS_RESET_DONE),
  1413. CORE_EVENT_HOST_STATUS);
  1414. if(ret)
  1415. return ret;
  1416. /* 8. Clear core memory bus reset interrupt. */
  1417. img_pdump_printf("-- Clear core%u memory bus reset interrupt\n", id);
  1418. val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_CLEAR, MEMBUS_RESET_DONE, EN);
  1419. IOWRITE64_CR_PDUMP(val64, CORE_EVENT_HOST_CLEAR);
  1420. /* 9. Clear the core indirect register. */
  1421. img_pdump_printf("-- Deselect core%u\n", id);
  1422. IOWRITE64_CR_PDUMP(0, CORE_CTRL_INDIRECT);
  1423. /* 10. Ensure no resets are pending. */
  1424. img_pdump_printf("-- Ensure no resets are pending\n");
  1425. IOWRITE64_CR_PDUMP(wm_reset_val64, SYS_RESET_CTRL);
  1426. /* 11. Move current core into full reset state. Leave WM in reset. */
  1427. img_pdump_printf("-- Move core%u into full reset state\n", id);
  1428. val64 = VHA_CR_SETBITS(SYS_RESET_CTRL, CORE, core_mask);
  1429. val64 |= wm_reset_val64;
  1430. IOWRITE64_CR_PDUMP(val64, SYS_RESET_CTRL);
  1431. /* 12. Dummy read to avoid race conditions in the hw. */
  1432. val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
  1433. /* Reset Deassertion */
  1434. /* 1. Move current core out of reset state. */
  1435. img_pdump_printf("-- Move core%u out of reset state\n", id);
  1436. IOWRITE64_CR_PDUMP(wm_reset_val64, SYS_RESET_CTRL);
  1437. /* Dummy read to avoid race conditions in the hw. */
  1438. val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
  1439. /* 2. Select current core again. */
  1440. img_pdump_printf("-- Select core%u again\n", id);
  1441. IOWRITE64_CR_PDUMP(core_mask, CORE_CTRL_INDIRECT);
  1442. /* 5. Force core clocks to ON for everything. */
  1443. img_pdump_printf("-- Force core clocks ON for everything\n");
  1444. val64 = VHA_MAIN_CLOCKS_DEFAULT(ON);
  1445. IOWRITE64_CR_PDUMP(val64, CLK_CTRL0);
  1446. /* 6. Perform core level RAM initialisation. */
  1447. img_pdump_printf("-- Perform core%u level RAM initialisation\n", id);
  1448. val64 = VHA_SET_FIELD_SIMPLE_VAL(FUSA_CONTROL, ECC_INIT_KICK, EN);
  1449. IOWRITE64_CR_PDUMP(val64, FUSA_CONTROL);
  1450. /* 7. Perform LOCM scrubbing. */
  1451. img_pdump_printf("-- Perform core%u LOCM scrubbing\n", id);
  1452. val64 = VHA_SET_FIELD_SIMPLE_VAL(LOCM_SCRUB_CTRL, KICK, EN);
  1453. IOWRITE64_CR_PDUMP(val64, LOCM_SCRUB_CTRL);
  1454. /* 8. Wait until the RAM initialisation sequence has completed. */
  1455. img_pdump_printf("-- Wait until the RAM initialisation sequence has completed\n");
  1456. val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, RAM_INIT_DONE, EN);
  1457. ret = IOPOLL64_CR_PDUMP(val64, 100, 1000,
  1458. (uint64_t)VHA_CR_BITMASK(CORE_EVENT_HOST_STATUS, RAM_INIT_DONE),
  1459. CORE_EVENT_HOST_STATUS);
  1460. if(ret)
  1461. return ret;
  1462. /* 9. Clear core RAM reset interrupt. */
  1463. img_pdump_printf("-- Clear core%u RAM reset interrupt\n", id);
  1464. val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_CLEAR, RAM_INIT_DONE, EN);
  1465. IOWRITE64_CR_PDUMP(val64, CORE_EVENT_HOST_CLEAR);
  1466. /* Confirm that 'RAM_INIT_DONE' field is cleared. */
  1467. img_pdump_printf("-- Confirm that core%u RAM reset interrupt is cleared\n", id);
  1468. val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, RAM_INIT_DONE, EN);
  1469. ret = IOPOLL64_CR_PDUMP(0ULL, 10, 10, val64, CORE_EVENT_HOST_STATUS);
  1470. if(ret)
  1471. return ret;
  1472. /* 10. Wait until the LOCM scrubbing sequence has completed. */
  1473. img_pdump_printf("-- Wait until the LOCM scrubbing sequence has completed.\n");
  1474. val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, LOCM_SCRUB_DONE, EN);
  1475. ret = IOPOLL64_CR_PDUMP(val64, 1000, 1000,
  1476. (uint64_t)VHA_CR_BITMASK(CORE_EVENT_HOST_STATUS, LOCM_SCRUB_DONE),
  1477. CORE_EVENT_HOST_STATUS);
  1478. if(ret)
  1479. return ret;
  1480. /* 11. Deassert core LOCM scrubbing. */
  1481. img_pdump_printf("-- Deassert core%u LOCM scrubbing\n", id);
  1482. IOWRITE64_CR_PDUMP(0, LOCM_SCRUB_CTRL);
  1483. /* 12. Clear core LOCM scrub interrupt. */
  1484. img_pdump_printf("-- Clear core%u LOCM scrub interrupt\n", id);
  1485. val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_CLEAR, LOCM_SCRUB_DONE, EN);
  1486. IOWRITE64_CR_PDUMP(val64, CORE_EVENT_HOST_CLEAR);
  1487. /* Confirm that 'LOCM_SCRUB_DONE' field is cleared. */
  1488. img_pdump_printf("-- Confirm that core%u LOCM scrub interrupt is cleared\n", id);
  1489. val64 = VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_HOST_STATUS, LOCM_SCRUB_DONE, EN);
  1490. ret = IOPOLL64_CR_PDUMP(0ULL, 10, 10, val64, CORE_EVENT_HOST_STATUS);
  1491. if(ret)
  1492. return ret;
  1493. /* 13. Enable the interrupts from core to WM. */
  1494. img_pdump_printf("-- Enable CORE events to WM\n");
  1495. IOWRITE64_CR_PDUMP(VHA_CORE_EVENTS_DEFAULT, CORE_EVENT_WM_ENABLE);
  1496. /* 14. Clear all status from CORE_EVENT_WM (clears the RAM_INIT_DONE). */
  1497. img_pdump_printf("-- Clear CORE events on WM\n");
  1498. IOWRITE64_CR_PDUMP(VHA_CORE_EVENTS_DEFAULT |
  1499. VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_WM_CLEAR, RAM_INIT_DONE, EN) |
  1500. VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_WM_CLEAR, LOCM_SCRUB_DONE, EN) |
  1501. VHA_SET_FIELD_SIMPLE_VAL(CORE_EVENT_WM_CLEAR, MEMBUS_RESET_DONE, EN),
  1502. CORE_EVENT_WM_CLEAR);
  1503. /* 15. Enable the interrupts from interconnect to WM. */
  1504. img_pdump_printf("-- Enable INTERCONNECT events to WM\n");
  1505. IOWRITE64_CR_PDUMP(VHA_IC_EVENTS_DEFAULT, INTERCONNECT_EVENT_WM_ENABLE);
  1506. /* 16. Disable all interrupts from the CORE to the HOST. */
  1507. img_pdump_printf("-- Disable CORE events on host\n");
  1508. IOWRITE64_CR_PDUMP(0, CORE_EVENT_HOST_ENABLE);
  1509. /* 17. Set all core level clocks back to AUTO. */
  1510. img_pdump_printf("-- Set all core%u level clocks back to AUTO\n", id);
  1511. val64 = VHA_MAIN_CLOCKS_DEFAULT(AUTO);
  1512. IOWRITE64_CR_PDUMP(val64, CLK_CTRL0);
  1513. /* 18. Set core global clock back to AUTO. */
  1514. img_pdump_printf("-- Set core%u global clock back to AUTO (others set to ON or AUTO)\n", id);
  1515. if (wm_cores_mask == 0) {
  1516. val64 = VHA_SYS_CLOCKS_DEFAULT(AUTO);
  1517. IOWRITE64_CR_PDUMP(val64, SYS_CLK_CTRL0);
  1518. } else {
  1519. val64 = VHA_SYS_CLOCK_MODE(INTERCONNECT, ON) |
  1520. VHA_SYS_CLOCK_MODE_MULTI(CORE, ON, wm_cores_mask) |
  1521. VHA_SYS_CLOCK_MODE_MULTI(CORE, AUTO, (uint8_t)~wm_cores_mask) |
  1522. VHA_SYS_CLOCK_MODE_MULTI(NOC, AUTO, ~0) |
  1523. VHA_SYS_CLOCK_MODE_MULTI(WM, AUTO, ~0) |
  1524. VHA_SYS_CLOCK_MODE(AXI, AUTO) |
  1525. VHA_SYS_CLOCK_MODE(SLC, AUTO) |
  1526. VHA_SYS_CLOCK_MODE(LSYNC, AUTO) |
  1527. VHA_SYS_CLOCK_MODE(SOCM, AUTO) |
  1528. VHA_SYS_CLOCK_MODE(REGBANK, AUTO);
  1529. IOWRITE64_CR_PDUMP(val64, SYS_CLK_CTRL0);
  1530. }
  1531. /* Setup stalling if requested. */
  1532. if (vha->stalling_membus_sys_stall_ratio != 0)
  1533. IOWRITE64_CR_REGIO(vha->stalling_membus_sys_stall_ratio,
  1534. NN_SYS2_MEMBUS_SYS_STALL_RATIO);
  1535. }
  1536. /* WM reset procedure end. */
  1537. /* Move this WM out of reset state. */
  1538. img_pdump_printf("-- Move WM%u out of reset state\n", sched_info->wm_id);
  1539. IOWRITE64_CR_PDUMP(0ULL, SYS_RESET_CTRL);
  1540. /* Dummy read to avoid race conditions in the hw */
  1541. val64 = IOREAD64_CR_PDUMP(SYS_RESET_CTRL);
  1542. img_pdump_printf("-- WM level RESET sequence END\n");
  1543. return 0;
  1544. }
  1545. void vha_wm_hwwdt_calculate(struct vha_dev *vha, struct vha_cmd *cmd,
  1546. uint64_t *wl_cycles, uint64_t *core_cycles)
  1547. {
  1548. if (use_estimated_cycles_for_wm_wdt) {
  1549. /* Using values defined in MBS */
  1550. *wl_cycles = (uint64_t)get_estimated_cycles(&cmd->user_cmd) +
  1551. (uint64_t)wm_wl_wdt_estimated_cycles_margin;
  1552. *core_cycles = cnn_hl_wdt_cycles;
  1553. } else {
  1554. /* Using values defined as kernel param */
  1555. *wl_cycles = wm_wl_wdt_cycles;
  1556. *core_cycles = cnn_hl_wdt_cycles;
  1557. }
  1558. }
  1559. void vha_wm_hwwdt_setup(struct vha_dev *vha, struct vha_cmd *cmd,
  1560. uint64_t wl_cycles, uint64_t core_cycles)
  1561. {
  1562. uint64_t val64 = 0;
  1563. uint64_t hw_brns =
  1564. ((struct vha_user_cnn_submit_multi_cmd*)&cmd->user_cmd)->hw_brns;
  1565. uint8_t wm_id = cmd->hw_sched_info.wm_id;
  1566. img_pdump_printf("-- Set SYSTEM watchdogs \n");
  1567. /* Setup system WDTs. */
  1568. IOWRITE64_CR_PDUMP(VHA_SYS_MEM_WDT_CYCLES, SYS_MEM_WDT_COMPAREMATCH);
  1569. val64 = VHA_SET_FIELD_SIMPLE_VAL(SYS_MEM_WDT_CTRL, MODE, KICK_WL);
  1570. IOWRITE64_CR_PDUMP(val64, SYS_MEM_WDT_CTRL);
  1571. img_pdump_printf("-- Set WM%d watchdogs \n", wm_id);
  1572. VHA_LOCK_WM();
  1573. VHA_SELECT_WM(wm_id);
  1574. /* Setup WM WDTs. */
  1575. IOWRITE64_CR_PDUMP(wl_cycles, WM_WL_WDT_COMPAREMATCH);
  1576. //val64 = VHA_SET_FIELD_SIMPLE_VAL(WM_WL_WDT_CTRL, MODE, KICK_WL);
  1577. val64 = VHA_CR_SETBITS(WM_WL_WDT_CTRL, MODE, wm_wl_wdt_mode);
  1578. IOWRITE64_CR_PDUMP(val64, WM_WL_WDT_CTRL);
  1579. IOWRITE64_CR_PDUMP(VHA_WM_IDLE_WDT_CYCLES, WM_WL_IDLE_WDT_COMPAREMATCH);
  1580. val64 = VHA_SET_FIELD_SIMPLE_VAL(WM_WL_IDLE_WDT_CTRL, MODE, ENABLED);
  1581. IOWRITE64_CR_PDUMP(val64, WM_WL_IDLE_WDT_CTRL);
  1582. IOWRITE64_CR_PDUMP(VHA_WM_SOCIF_WDT_CYCLES, WM_SOCIF_WDT_COMPAREMATCH);
  1583. val64 = VHA_SET_FIELD_SIMPLE_VAL(WM_SOCIF_WDT_CTRL, MODE, ENABLED);
  1584. IOWRITE64_CR_PDUMP(val64, WM_SOCIF_WDT_CTRL);
  1585. VHA_UNLOCK_WM();
  1586. /* Operate only on cores assigned to this WM. */
  1587. img_pdump_printf("-- Select cores\n");
  1588. IOWRITE64_CR_PDUMP(vha_wm_get_cores(vha, wm_id),
  1589. CORE_CTRL_INDIRECT);
  1590. img_pdump_printf("-- Set CORE watchdogs \n");
  1591. /* Setup core WDTs. */
  1592. IOWRITE64_CR_PDUMP(core_cycles, CNN_WDT_COMPAREMATCH);
  1593. val64 = VHA_CR_SETBITS(CNN_WDT_CTRL, MODE, cnn_hl_wdt_mode);
  1594. IOWRITE64_CR_PDUMP(val64, CNN_WDT_CTRL);
  1595. if (VHA_IS_BRN(hw_brns, 71556) ||
  1596. VHA_IS_BRN(hw_brns, 71338))
  1597. /* Always set max value */
  1598. IOWRITE64_CR_PDUMP(VHA_CR_CNN_MEM_WDT_COMPAREMATCH_MASKFULL, CNN_MEM_WDT_COMPAREMATCH);
  1599. else
  1600. IOWRITE64_CR_PDUMP(cnn_mem_wdt_cycles, CNN_MEM_WDT_COMPAREMATCH);
  1601. val64 = VHA_CR_SETBITS(CNN_MEM_WDT_CTRL, MODE, cnn_mem_wdt_mode);
  1602. IOWRITE64_CR_PDUMP(val64, CNN_MEM_WDT_CTRL);
  1603. val64 = VHA_CR_SETBITS(CNN_CORE_SYNC_WDT_CTRL, ENABLE,
  1604. VHA_CR_CNN_CORE_SYNC_WDT_CTRL_ENABLE_EN) |
  1605. VHA_CR_SETBITS(CNN_CORE_SYNC_WDT_CTRL, VALUE,
  1606. VHA_CORE_SYNC_WDT_CYCLES);
  1607. IOWRITE64_CR_PDUMP(val64, CNN_CORE_SYNC_WDT_CTRL);
  1608. }
  1609. void vha_wm_status(struct vha_dev *vha, uint8_t wm_id, uint8_t core_mask)
  1610. {
  1611. uint64_t wm_status;
  1612. dev_err(vha->dev, " WM%u failure:\n", wm_id);
  1613. /* Select WM to read from. */
  1614. VHA_LOCK_WM();
  1615. VHA_SELECT_WM(wm_id);
  1616. wm_status = IOREAD64_CR_REGIO(WM_STATUS);
  1617. VHA_UNLOCK_WM();
  1618. dev_err(vha->dev, " WM_STATUS: 0x%016llx\n",
  1619. wm_status);
  1620. dev_err(vha->dev, " LLSYNC_STATUS: 0x%016llx\n",
  1621. IOREAD64_CR_REGIO(LOW_LEVEL_SYNC_STATUS));
  1622. while (core_mask != 0) {
  1623. uint32_t core_id = VHA_CORE_MASK_TO_ID(core_mask);
  1624. dev_err(vha->dev, " core%u:\n", core_id);
  1625. IOWRITE64_CR_REGIO(VHA_CR_SETBITS(CORE_CTRL_INDIRECT, MASK, (1 << core_id)),
  1626. CORE_CTRL_INDIRECT);
  1627. dev_err(vha->dev, " CNN_STATUS: 0x%016llx\n",
  1628. IOREAD64_CR_REGIO(OS0_CNN_STATUS));
  1629. dev_err(vha->dev, " CNN_STATUS2: 0x%016llx\n",
  1630. IOREAD64_CR_REGIO(OS0_CNN_STATUS2));
  1631. {
  1632. uint64_t reg = VHA_CR_CORE0_LAST_NNA_SYNC_ID +
  1633. core_id * (VHA_CR_CORE1_LAST_NNA_SYNC_ID - VHA_CR_CORE0_LAST_NNA_SYNC_ID);
  1634. dev_err(vha->dev, " LAST_NNA_SYNC_ID: 0x%016llx\n",
  1635. IOREAD64(vha->reg_base, reg));
  1636. reg = VHA_CR_CORE0_LAST_MMM_SYNC_ID +
  1637. core_id * (VHA_CR_CORE1_LAST_MMM_SYNC_ID - VHA_CR_CORE0_LAST_MMM_SYNC_ID);
  1638. dev_err(vha->dev, " LAST_MMM_SYNC_ID: 0x%016llx\n",
  1639. IOREAD64(vha->reg_base, reg));
  1640. }
  1641. core_mask &= ~(VHA_CORE_ID_TO_MASK(core_id));
  1642. }
  1643. }