vha_mmu.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /*
  2. *****************************************************************************
  3. * Copyright (c) Imagination Technologies Ltd.
  4. *
  5. * The contents of this file are subject to the MIT license as set out below.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the "Software"),
  9. * to deal in the Software without restriction, including without limitation
  10. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11. * and/or sell copies of the Software, and to permit persons to whom the
  12. * Software is furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  20. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. *
  25. * Alternatively, the contents of this file may be used under the terms of the
  26. * GNU General Public License Version 2 ("GPL")in which case the provisions of
  27. * GPL are applicable instead of those above.
  28. *
  29. * If you wish to allow use of your version of this file only under the terms
  30. * of GPL, and not to allow others to use your version of this file under the
  31. * terms of the MIT license, indicate your decision by deleting the provisions
  32. * above and replace them with the notice and other provisions required by GPL
  33. * as set out in the file called "GPLHEADER" included in this distribution. If
  34. * you do not delete the provisions above, a recipient may use your version of
  35. * this file under the terms of either the MIT license or GPL.
  36. *
  37. * This License is also included in this distribution in the file called
  38. * "MIT_COPYING".
  39. *
  40. *****************************************************************************/
  41. #include <linux/moduleparam.h>
  42. #include <linux/delay.h>
  43. #include <uapi/vha.h>
  44. #include "vha_common.h"
  45. #include "vha_plat.h"
  46. #include <vha_regs.h>
  47. static void mmu_flush(const struct device *dev,
  48. struct vha_dev *vha, int ctx_id)
  49. {
  50. uint64_t inval = VHA_CR_OS(MMU_CTRL_INVAL_PC_EN) |
  51. VHA_CR_OS(MMU_CTRL_INVAL_PD_EN) |
  52. VHA_CR_OS(MMU_CTRL_INVAL_PT_EN);
  53. /* No need to handle mmu cache, when core is already offline */
  54. if (vha->state == VHA_STATE_OFF)
  55. return;
  56. #if defined(HW_AX3)
  57. {
  58. uint64_t pend = VHA_CR_OS(MMU_CTRL_INVAL_STATUS_PENDING_EN);
  59. #ifdef VHA_SCF
  60. if (vha->hw_props.supported.parity &&
  61. !vha->parity_disable) {
  62. /* If pending bit is set then parity bit must be set as well ! */
  63. pend |= VHA_CR_OS(MMU_CTRL_INVAL_STATUS_PARITY_EN);
  64. }
  65. #endif
  66. IOPOLL64_PDUMP(0, 20, 150, pend, VHA_CR_OS(MMU_CTRL_INVAL_STATUS));
  67. }
  68. #endif
  69. if (unlikely(ctx_id == VHA_INVALID_ID))
  70. inval |= VHA_CR_OS(MMU_CTRL_INVAL_ALL_CONTEXTS_EN);
  71. else {
  72. inval |= ctx_id << VHA_CR_OS(MMU_CTRL_INVAL_CONTEXT_SHIFT);
  73. }
  74. dev_dbg(dev, "%s: ctx_id:%d (0x%llx)\n", __func__, ctx_id, inval);
  75. img_pdump_printf("-- MMU invalidate TLB caches\n");
  76. IOWRITE64_PDUMP(inval, VHA_CR_OS(MMU_CTRL_INVAL));
  77. }
  78. /* this function is called from img_mmu, to handle cache issues */
  79. int vha_mmu_callback(enum img_mmu_callback_type callback_type,
  80. int buf_id, void *data)
  81. {
  82. struct vha_session *session = data;
  83. struct vha_dev *vha = session->vha;
  84. int ctx_id;
  85. int ret = 0;
  86. if (!vha)
  87. return 0;
  88. for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++)
  89. mmu_flush(vha->dev, vha, session->mmu_ctxs[ctx_id].hw_id);
  90. #if defined(VHA_MMU_MIRRORED_CTX_SUPPORT) && defined(HW_AX3)
  91. {
  92. /* Need to flush auxilary hw context */
  93. int hw_id = session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id +
  94. VHA_MMU_AUX_HW_CTX_SHIFT;
  95. mmu_flush(vha->dev, vha, hw_id);
  96. }
  97. #endif
  98. return ret;
  99. }
  100. static void do_mmu_ctx_setup(struct vha_dev *vha,
  101. uint8_t hw_id, int pc_bufid, uint32_t pc_baddr)
  102. {
  103. img_pdump_printf("-- Setup MMU context:%d\n", hw_id);
  104. IOWRITE64_PDUMP(hw_id, VHA_CR_OS(MMU_CBASE_MAPPING_CONTEXT));
  105. if (!vha->mmu_base_pf_test) {
  106. IOWRITE64(vha->reg_base, VHA_CR_OS(MMU_CBASE_MAPPING), pc_baddr);
  107. /* This is physical address so we need use MEM_OS0:BLOCK tag
  108. * when pdump'ing. */
  109. img_pdump_printf("-- Setup MMU base address\n"
  110. "WRW "_PMEM_":$0 "_PMEM_":BLOCK_%d:0 -- 'PC'\n"
  111. "SHR "_PMEM_":$0 "_PMEM_":$0 %d\n"
  112. "WRW64 :REG:%#x "_PMEM_":$0\n", pc_bufid,
  113. IMG_MMU_PC_ADDR_SHIFT,
  114. VHA_CR_OS(MMU_CBASE_MAPPING));
  115. dev_dbg(vha->dev, "%s: setting hardware ctx id:%u\n", __func__, hw_id);
  116. } else
  117. dev_info(vha->dev, "Bringup test: force MMU base page fault\n");
  118. }
  119. int vha_mmu_setup(struct vha_session *session)
  120. {
  121. struct vha_dev *vha = session->vha;
  122. int ctx_id;
  123. for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++)
  124. dev_dbg(vha->dev,
  125. "%s: mode:%d session ctxid:%x active ctxid:%x\n",
  126. __func__, vha->mmu_mode,
  127. session->mmu_ctxs[ctx_id].id,
  128. vha->active_mmu_ctx);
  129. if (vha->mmu_mode == VHA_MMU_DISABLED) {
  130. img_pdump_printf("-- MMU bypass ON\n");
  131. IOWRITE64_PDUMP(VHA_CR_OS(MMU_CTRL_BYPASS_EN),
  132. VHA_CR_OS(MMU_CTRL));
  133. return 0;
  134. }
  135. /* Using model context to track active context */
  136. if (session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].id == vha->active_mmu_ctx)
  137. return 0;
  138. img_pdump_printf("-- MMU_SETUP_BEGIN\n");
  139. img_pdump_printf("-- MMU bypass OFF\n");
  140. IOWRITE64_PDUMP(0, VHA_CR_OS(MMU_CTRL));
  141. for (ctx_id = 0; ctx_id < ARRAY_SIZE(session->mmu_ctxs); ctx_id++) {
  142. do_mmu_ctx_setup(vha, session->mmu_ctxs[ctx_id].hw_id,
  143. session->mmu_ctxs[ctx_id].pc_bufid,
  144. session->mmu_ctxs[ctx_id].pc_baddr);
  145. /* If there are multiple sessions using the same mmu hardware context
  146. * we need to flush caches for the old context (id is the same).
  147. * This will happen when number of processes is > VHA_MMU_MAX_HW_CTXS */
  148. if (vha->mmu_ctxs[session->mmu_ctxs[ctx_id].hw_id] > 1)
  149. mmu_flush(vha->dev, vha, session->mmu_ctxs[ctx_id].hw_id);
  150. }
  151. #if defined(VHA_MMU_MIRRORED_CTX_SUPPORT) && defined(HW_AX3)
  152. {
  153. /* Need to program auxilary hw context to
  154. * point the same page tables as base context */
  155. int hw_id = session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id +
  156. VHA_MMU_AUX_HW_CTX_SHIFT;
  157. do_mmu_ctx_setup(vha, hw_id,
  158. session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].pc_bufid,
  159. session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].pc_baddr);
  160. if (vha->mmu_ctxs[session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].hw_id] > 1)
  161. mmu_flush(vha->dev, vha, hw_id);
  162. }
  163. #endif
  164. /* Using model context to track context change */
  165. vha->active_mmu_ctx = session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].id;
  166. dev_dbg(vha->dev, "%s: update ctx id active:%x pc:%#x\n",
  167. __func__, vha->active_mmu_ctx,
  168. session->mmu_ctxs[VHA_MMU_REQ_MODEL_CTXID].pc_baddr <<
  169. VHA_CR_OS(MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT));
  170. img_pdump_printf("-- MMU_SETUP_END\n");
  171. return 0;
  172. }
  173. void vha_mmu_status(struct vha_dev *vha)
  174. {
  175. const char levels[][5] = {"PT", "PD", "PC", "BASE"};
  176. uint64_t status1 = IOREAD64(vha->reg_base,
  177. VHA_CR_OS(MMU_FAULT_STATUS1));
  178. uint64_t status2 = IOREAD64(vha->reg_base,
  179. VHA_CR_OS(MMU_FAULT_STATUS2));
  180. uint64_t addr = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, ADDRESS, status1);
  181. uint8_t level = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, LEVEL, status1);
  182. uint8_t req_id = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, REQ_ID, status1);
  183. uint8_t ctx = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, CONTEXT, status1);
  184. uint8_t rnw = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, RNW, status1);
  185. uint8_t type = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, TYPE, status1);
  186. uint8_t fault = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS1, FAULT, status1);
  187. uint8_t bif_id = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS2, BIF_ID, status2);
  188. uint8_t tlb_entry = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS2, TLB_ENTRY, status2);
  189. uint8_t slc_bank = VHA_CR_GETBITS_OS(MMU_FAULT_STATUS2, BANK, status2);
  190. uint64_t mapping = 0;
  191. /* Select context and read current pc */
  192. IOWRITE64(vha->reg_base, VHA_CR_OS(MMU_CBASE_MAPPING_CONTEXT), ctx);
  193. mapping = IOREAD64(vha->reg_base, VHA_CR_OS(MMU_CBASE_MAPPING));
  194. /* false alarm ? */
  195. if (!fault)
  196. return;
  197. dev_dbg(vha->dev, "%s: MMU FAULT: s1:%llx s2:%llx\n",
  198. __func__, status1, status2);
  199. dev_warn(vha->dev, "%s: MMU fault while %s @ 0x%llx\n",
  200. __func__, (rnw) ? "reading" : "writing", addr << 4);
  201. dev_warn(vha->dev, "%s: level:%s Requestor:%x Context:%x Type:%s\n",
  202. __func__, levels[level], req_id, ctx,
  203. (type == 0) ? "VALID" :
  204. (type == 2) ? "READ-ONLY" :
  205. "UNKNOWN");
  206. dev_warn(vha->dev, "%s: bif_id:%x tlb_entry:%x slc_bank:%x\n",
  207. __func__, bif_id, tlb_entry, slc_bank);
  208. dev_warn(vha->dev, "%s: current mapping@context%d:%#llx\n",
  209. __func__, ctx,
  210. mapping <<
  211. VHA_CR_OS(MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT));
  212. }