cache.S 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /* SPDX-License-Identifier: GPL-2.0+ */
  2. /*
  3. * (C) Copyright 2013
  4. * David Feng <fenghua@phytium.com.cn>
  5. *
  6. * This file is based on sample code from ARMv8 ARM.
  7. */
  8. #include <asm-offsets.h>
  9. #include <config.h>
  10. #include <asm/macro.h>
  11. #include <asm/system.h>
  12. #include <linux/linkage.h>
  13. /*
  14. * void __asm_dcache_level(level)
  15. *
  16. * flush or invalidate one level cache.
  17. *
  18. * x0: cache level
  19. * x1: 0 clean & invalidate, 1 invalidate only
  20. * x2~x9: clobbered
  21. */
  22. .pushsection .text.__asm_dcache_level, "ax"
  23. ENTRY(__asm_dcache_level)
  24. lsl x12, x0, #1
  25. msr csselr_el1, x12 /* select cache level */
  26. isb /* sync change of cssidr_el1 */
  27. mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
  28. and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
  29. add x2, x2, #4 /* x2 <- log2(cache line size) */
  30. mov x3, #0x3ff
  31. and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
  32. clz w5, w3 /* bit position of #ways */
  33. mov x4, #0x7fff
  34. and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
  35. /* x12 <- cache level << 1 */
  36. /* x2 <- line length offset */
  37. /* x3 <- number of cache ways - 1 */
  38. /* x4 <- number of cache sets - 1 */
  39. /* x5 <- bit position of #ways */
  40. loop_set:
  41. mov x6, x3 /* x6 <- working copy of #ways */
  42. loop_way:
  43. lsl x7, x6, x5
  44. orr x9, x12, x7 /* map way and level to cisw value */
  45. lsl x7, x4, x2
  46. orr x9, x9, x7 /* map set number to cisw value */
  47. tbz w1, #0, 1f
  48. dc isw, x9
  49. b 2f
  50. 1: dc cisw, x9 /* clean & invalidate by set/way */
  51. 2: subs x6, x6, #1 /* decrement the way */
  52. b.ge loop_way
  53. subs x4, x4, #1 /* decrement the set */
  54. b.ge loop_set
  55. ret
  56. ENDPROC(__asm_dcache_level)
  57. .popsection
  58. /*
  59. * void __asm_flush_dcache_all(int invalidate_only)
  60. *
  61. * x0: 0 clean & invalidate, 1 invalidate only
  62. *
  63. * flush or invalidate all data cache by SET/WAY.
  64. */
  65. .pushsection .text.__asm_dcache_all, "ax"
  66. ENTRY(__asm_dcache_all)
  67. mov x1, x0
  68. dsb sy
  69. mrs x10, clidr_el1 /* read clidr_el1 */
  70. lsr x11, x10, #24
  71. and x11, x11, #0x7 /* x11 <- loc */
  72. cbz x11, finished /* if loc is 0, exit */
  73. mov x15, lr
  74. mov x0, #0 /* start flush at cache level 0 */
  75. /* x0 <- cache level */
  76. /* x10 <- clidr_el1 */
  77. /* x11 <- loc */
  78. /* x15 <- return address */
  79. loop_level:
  80. lsl x12, x0, #1
  81. add x12, x12, x0 /* x0 <- tripled cache level */
  82. lsr x12, x10, x12
  83. and x12, x12, #7 /* x12 <- cache type */
  84. cmp x12, #2
  85. b.lt skip /* skip if no cache or icache */
  86. bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
  87. skip:
  88. add x0, x0, #1 /* increment cache level */
  89. cmp x11, x0
  90. b.gt loop_level
  91. mov x0, #0
  92. msr csselr_el1, x0 /* restore csselr_el1 */
  93. dsb sy
  94. isb
  95. mov lr, x15
  96. finished:
  97. ret
  98. ENDPROC(__asm_dcache_all)
  99. .popsection
  100. .pushsection .text.__asm_flush_dcache_all, "ax"
  101. ENTRY(__asm_flush_dcache_all)
  102. mov x0, #0
  103. b __asm_dcache_all
  104. ENDPROC(__asm_flush_dcache_all)
  105. .popsection
  106. .pushsection .text.__asm_invalidate_dcache_all, "ax"
  107. ENTRY(__asm_invalidate_dcache_all)
  108. mov x0, #0x1
  109. b __asm_dcache_all
  110. ENDPROC(__asm_invalidate_dcache_all)
  111. .popsection
  112. /*
  113. * void __asm_flush_dcache_range(start, end)
  114. *
  115. * clean & invalidate data cache in the range
  116. *
  117. * x0: start address
  118. * x1: end address
  119. */
  120. .pushsection .text.__asm_flush_dcache_range, "ax"
  121. ENTRY(__asm_flush_dcache_range)
  122. mrs x3, ctr_el0
  123. lsr x3, x3, #16
  124. and x3, x3, #0xf
  125. mov x2, #4
  126. lsl x2, x2, x3 /* cache line size */
  127. /* x2 <- minimal cache line size in cache system */
  128. sub x3, x2, #1
  129. bic x0, x0, x3
  130. 1: dc civac, x0 /* clean & invalidate data or unified cache */
  131. add x0, x0, x2
  132. cmp x0, x1
  133. b.lo 1b
  134. dsb sy
  135. ret
  136. ENDPROC(__asm_flush_dcache_range)
  137. .popsection
  138. /*
  139. * void __asm_invalidate_dcache_range(start, end)
  140. *
  141. * invalidate data cache in the range
  142. *
  143. * x0: start address
  144. * x1: end address
  145. */
  146. .pushsection .text.__asm_invalidate_dcache_range, "ax"
  147. ENTRY(__asm_invalidate_dcache_range)
  148. mrs x3, ctr_el0
  149. ubfm x3, x3, #16, #19
  150. mov x2, #4
  151. lsl x2, x2, x3 /* cache line size */
  152. /* x2 <- minimal cache line size in cache system */
  153. sub x3, x2, #1
  154. bic x0, x0, x3
  155. 1: dc ivac, x0 /* invalidate data or unified cache */
  156. add x0, x0, x2
  157. cmp x0, x1
  158. b.lo 1b
  159. dsb sy
  160. ret
  161. ENDPROC(__asm_invalidate_dcache_range)
  162. .popsection
  163. /*
  164. * void __asm_invalidate_icache_all(void)
  165. *
  166. * invalidate all tlb entries.
  167. */
  168. .pushsection .text.__asm_invalidate_icache_all, "ax"
  169. ENTRY(__asm_invalidate_icache_all)
  170. ic ialluis
  171. isb sy
  172. ret
  173. ENDPROC(__asm_invalidate_icache_all)
  174. .popsection
  175. .pushsection .text.__asm_invalidate_l3_dcache, "ax"
  176. ENTRY(__asm_invalidate_l3_dcache)
  177. mov x0, #0 /* return status as success */
  178. ret
  179. ENDPROC(__asm_invalidate_l3_dcache)
  180. .weak __asm_invalidate_l3_dcache
  181. .popsection
  182. .pushsection .text.__asm_flush_l3_dcache, "ax"
  183. ENTRY(__asm_flush_l3_dcache)
  184. mov x0, #0 /* return status as success */
  185. ret
  186. ENDPROC(__asm_flush_l3_dcache)
  187. .weak __asm_flush_l3_dcache
  188. .popsection
  189. .pushsection .text.__asm_invalidate_l3_icache, "ax"
  190. ENTRY(__asm_invalidate_l3_icache)
  191. mov x0, #0 /* return status as success */
  192. ret
  193. ENDPROC(__asm_invalidate_l3_icache)
  194. .weak __asm_invalidate_l3_icache
  195. .popsection
  196. /*
  197. * void __asm_switch_ttbr(ulong new_ttbr)
  198. *
  199. * Safely switches to a new page table.
  200. */
  201. .pushsection .text.__asm_switch_ttbr, "ax"
  202. ENTRY(__asm_switch_ttbr)
  203. /* x2 = SCTLR (alive throghout the function) */
  204. switch_el x4, 3f, 2f, 1f
  205. 3: mrs x2, sctlr_el3
  206. b 0f
  207. 2: mrs x2, sctlr_el2
  208. b 0f
  209. 1: mrs x2, sctlr_el1
  210. 0:
  211. /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
  212. movn x1, #(CR_M | CR_C | CR_I)
  213. and x1, x2, x1
  214. switch_el x4, 3f, 2f, 1f
  215. 3: msr sctlr_el3, x1
  216. b 0f
  217. 2: msr sctlr_el2, x1
  218. b 0f
  219. 1: msr sctlr_el1, x1
  220. 0: isb
  221. /* This call only clobbers x30 (lr) and x9 (unused) */
  222. mov x3, x30
  223. bl __asm_invalidate_tlb_all
  224. /* From here on we're running safely with caches disabled */
  225. /* Set TTBR to our first argument */
  226. switch_el x4, 3f, 2f, 1f
  227. 3: msr ttbr0_el3, x0
  228. b 0f
  229. 2: msr ttbr0_el2, x0
  230. b 0f
  231. 1: msr ttbr0_el1, x0
  232. 0: isb
  233. /* Restore original SCTLR and thus enable caches again */
  234. switch_el x4, 3f, 2f, 1f
  235. 3: msr sctlr_el3, x2
  236. b 0f
  237. 2: msr sctlr_el2, x2
  238. b 0f
  239. 1: msr sctlr_el1, x2
  240. 0: isb
  241. ret x3
  242. ENDPROC(__asm_switch_ttbr)
  243. .popsection