cache.S 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Cache maintenance
  4. *
  5. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  6. * Copyright (C) 2012 ARM Ltd.
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/linkage.h>
  10. #include <linux/init.h>
  11. #include <asm/assembler.h>
  12. #include <asm/cpufeature.h>
  13. #include <asm/alternative.h>
  14. #include <asm/asm-uaccess.h>
  15. /*
  16. * flush_icache_range(start,end)
  17. *
  18. * Ensure that the I and D caches are coherent within specified region.
  19. * This is typically used when code has been written to a memory region,
  20. * and will be executed.
  21. *
  22. * - start - virtual start address of region
  23. * - end - virtual end address of region
  24. */
  25. SYM_FUNC_START(__flush_icache_range)
  26. /* FALLTHROUGH */
  27. /*
  28. * __flush_cache_user_range(start,end)
  29. *
  30. * Ensure that the I and D caches are coherent within specified region.
  31. * This is typically used when code has been written to a memory region,
  32. * and will be executed.
  33. *
  34. * - start - virtual start address of region
  35. * - end - virtual end address of region
  36. */
  37. SYM_FUNC_START(__flush_cache_user_range)
  38. uaccess_ttbr0_enable x2, x3, x4
  39. alternative_if ARM64_HAS_CACHE_IDC
  40. dsb ishst
  41. b 7f
  42. alternative_else_nop_endif
  43. dcache_line_size x2, x3
  44. sub x3, x2, #1
  45. bic x4, x0, x3
  46. 1:
  47. user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
  48. add x4, x4, x2
  49. cmp x4, x1
  50. b.lo 1b
  51. dsb ish
  52. 7:
  53. alternative_if ARM64_HAS_CACHE_DIC
  54. isb
  55. b 8f
  56. alternative_else_nop_endif
  57. invalidate_icache_by_line x0, x1, x2, x3, 9f
  58. 8: mov x0, #0
  59. 1:
  60. uaccess_ttbr0_disable x1, x2
  61. ret
  62. 9:
  63. mov x0, #-EFAULT
  64. b 1b
  65. SYM_FUNC_END(__flush_icache_range)
  66. SYM_FUNC_END(__flush_cache_user_range)
  67. /*
  68. * invalidate_icache_range(start,end)
  69. *
  70. * Ensure that the I cache is invalid within specified region.
  71. *
  72. * - start - virtual start address of region
  73. * - end - virtual end address of region
  74. */
  75. SYM_FUNC_START(invalidate_icache_range)
  76. alternative_if ARM64_HAS_CACHE_DIC
  77. mov x0, xzr
  78. isb
  79. ret
  80. alternative_else_nop_endif
  81. uaccess_ttbr0_enable x2, x3, x4
  82. invalidate_icache_by_line x0, x1, x2, x3, 2f
  83. mov x0, xzr
  84. 1:
  85. uaccess_ttbr0_disable x1, x2
  86. ret
  87. 2:
  88. mov x0, #-EFAULT
  89. b 1b
  90. SYM_FUNC_END(invalidate_icache_range)
  91. /*
  92. * __flush_dcache_area(kaddr, size)
  93. *
  94. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  95. * are cleaned and invalidated to the PoC.
  96. *
  97. * - kaddr - kernel address
  98. * - size - size in question
  99. */
  100. SYM_FUNC_START_PI(__flush_dcache_area)
  101. dcache_by_line_op civac, sy, x0, x1, x2, x3
  102. ret
  103. SYM_FUNC_END_PI(__flush_dcache_area)
  104. /*
  105. * __clean_dcache_area_pou(kaddr, size)
  106. *
  107. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  108. * are cleaned to the PoU.
  109. *
  110. * - kaddr - kernel address
  111. * - size - size in question
  112. */
  113. SYM_FUNC_START(__clean_dcache_area_pou)
  114. alternative_if ARM64_HAS_CACHE_IDC
  115. dsb ishst
  116. ret
  117. alternative_else_nop_endif
  118. dcache_by_line_op cvau, ish, x0, x1, x2, x3
  119. ret
  120. SYM_FUNC_END(__clean_dcache_area_pou)
  121. /*
  122. * __inval_dcache_area(kaddr, size)
  123. *
  124. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  125. * are invalidated. Any partial lines at the ends of the interval are
  126. * also cleaned to PoC to prevent data loss.
  127. *
  128. * - kaddr - kernel address
  129. * - size - size in question
  130. */
  131. SYM_FUNC_START_LOCAL(__dma_inv_area)
  132. SYM_FUNC_START_PI(__inval_dcache_area)
  133. /* FALLTHROUGH */
  134. /*
  135. * __dma_inv_area(start, size)
  136. * - start - virtual start address of region
  137. * - size - size in question
  138. */
  139. add x1, x1, x0
  140. dcache_line_size x2, x3
  141. sub x3, x2, #1
  142. tst x1, x3 // end cache line aligned?
  143. bic x1, x1, x3
  144. b.eq 1f
  145. dc civac, x1 // clean & invalidate D / U line
  146. 1: tst x0, x3 // start cache line aligned?
  147. bic x0, x0, x3
  148. b.eq 2f
  149. dc civac, x0 // clean & invalidate D / U line
  150. b 3f
  151. 2: dc ivac, x0 // invalidate D / U line
  152. 3: add x0, x0, x2
  153. cmp x0, x1
  154. b.lo 2b
  155. dsb sy
  156. ret
  157. SYM_FUNC_END_PI(__inval_dcache_area)
  158. SYM_FUNC_END(__dma_inv_area)
  159. /*
  160. * __clean_dcache_area_poc(kaddr, size)
  161. *
  162. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  163. * are cleaned to the PoC.
  164. *
  165. * - kaddr - kernel address
  166. * - size - size in question
  167. */
  168. SYM_FUNC_START_LOCAL(__dma_clean_area)
  169. SYM_FUNC_START_PI(__clean_dcache_area_poc)
  170. /* FALLTHROUGH */
  171. /*
  172. * __dma_clean_area(start, size)
  173. * - start - virtual start address of region
  174. * - size - size in question
  175. */
  176. dcache_by_line_op cvac, sy, x0, x1, x2, x3
  177. ret
  178. SYM_FUNC_END_PI(__clean_dcache_area_poc)
  179. SYM_FUNC_END(__dma_clean_area)
  180. /*
  181. * __clean_dcache_area_pop(kaddr, size)
  182. *
  183. * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
  184. * are cleaned to the PoP.
  185. *
  186. * - kaddr - kernel address
  187. * - size - size in question
  188. */
  189. SYM_FUNC_START_PI(__clean_dcache_area_pop)
  190. alternative_if_not ARM64_HAS_DCPOP
  191. b __clean_dcache_area_poc
  192. alternative_else_nop_endif
  193. dcache_by_line_op cvap, sy, x0, x1, x2, x3
  194. ret
  195. SYM_FUNC_END_PI(__clean_dcache_area_pop)
  196. /*
  197. * __dma_flush_area(start, size)
  198. *
  199. * clean & invalidate D / U line
  200. *
  201. * - start - virtual start address of region
  202. * - size - size in question
  203. */
  204. SYM_FUNC_START_PI(__dma_flush_area)
  205. dcache_by_line_op civac, sy, x0, x1, x2, x3
  206. ret
  207. SYM_FUNC_END_PI(__dma_flush_area)
  208. /*
  209. * __dma_map_area(start, size, dir)
  210. * - start - kernel virtual start address
  211. * - size - size of region
  212. * - dir - DMA direction
  213. */
  214. SYM_FUNC_START_PI(__dma_map_area)
  215. cmp w2, #DMA_FROM_DEVICE
  216. b.eq __dma_inv_area
  217. b __dma_clean_area
  218. SYM_FUNC_END_PI(__dma_map_area)
  219. /*
  220. * __dma_unmap_area(start, size, dir)
  221. * - start - kernel virtual start address
  222. * - size - size of region
  223. * - dir - DMA direction
  224. */
  225. SYM_FUNC_START_PI(__dma_unmap_area)
  226. cmp w2, #DMA_TO_DEVICE
  227. b.ne __dma_inv_area
  228. ret
  229. SYM_FUNC_END_PI(__dma_unmap_area)