cache-v4wb.S 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /*
  2. * linux/arch/arm/mm/cache-v4wb.S
  3. *
  4. * Copyright (C) 1997-2002 Russell king
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/linkage.h>
  11. #include <linux/init.h>
  12. #include <asm/memory.h>
  13. #include <asm/page.h>
  14. #include "proc-macros.S"
  15. /*
  16. * The size of one data cache line.
  17. */
  18. #define CACHE_DLINESIZE 32
  19. /*
  20. * The total size of the data cache.
  21. */
  22. #if defined(CONFIG_CPU_SA110)
  23. # define CACHE_DSIZE 16384
  24. #elif defined(CONFIG_CPU_SA1100)
  25. # define CACHE_DSIZE 8192
  26. #else
  27. # error Unknown cache size
  28. #endif
  29. /*
  30. * This is the size at which it becomes more efficient to
  31. * clean the whole cache, rather than using the individual
  32. * cache line maintainence instructions.
  33. *
  34. * Size Clean (ticks) Dirty (ticks)
  35. * 4096 21 20 21 53 55 54
  36. * 8192 40 41 40 106 100 102
  37. * 16384 77 77 76 140 140 138
  38. * 32768 150 149 150 214 216 212 <---
  39. * 65536 296 297 296 351 358 361
  40. * 131072 591 591 591 656 657 651
  41. * Whole 132 136 132 221 217 207 <---
  42. */
  43. #define CACHE_DLIMIT (CACHE_DSIZE * 4)
  44. .data
  45. flush_base:
  46. .long FLUSH_BASE
  47. .text
  48. /*
  49. * flush_user_cache_all()
  50. *
  51. * Clean and invalidate all cache entries in a particular address
  52. * space.
  53. */
  54. ENTRY(v4wb_flush_user_cache_all)
  55. /* FALLTHROUGH */
  56. /*
  57. * flush_kern_cache_all()
  58. *
  59. * Clean and invalidate the entire cache.
  60. */
  61. ENTRY(v4wb_flush_kern_cache_all)
  62. mov ip, #0
  63. mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
  64. __flush_whole_cache:
  65. ldr r3, =flush_base
  66. ldr r1, [r3, #0]
  67. eor r1, r1, #CACHE_DSIZE
  68. str r1, [r3, #0]
  69. add r2, r1, #CACHE_DSIZE
  70. 1: ldr r3, [r1], #32
  71. cmp r1, r2
  72. blo 1b
  73. #ifdef FLUSH_BASE_MINICACHE
  74. add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
  75. sub r1, r2, #512 @ only 512 bytes
  76. 1: ldr r3, [r1], #32
  77. cmp r1, r2
  78. blo 1b
  79. #endif
  80. mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
  81. mov pc, lr
  82. /*
  83. * flush_user_cache_range(start, end, flags)
  84. *
  85. * Invalidate a range of cache entries in the specified
  86. * address space.
  87. *
  88. * - start - start address (inclusive, page aligned)
  89. * - end - end address (exclusive, page aligned)
  90. * - flags - vma_area_struct flags describing address space
  91. */
  92. ENTRY(v4wb_flush_user_cache_range)
  93. mov ip, #0
  94. sub r3, r1, r0 @ calculate total size
  95. tst r2, #VM_EXEC @ executable region?
  96. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  97. cmp r3, #CACHE_DLIMIT @ total size >= limit?
  98. bhs __flush_whole_cache @ flush whole D cache
  99. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  100. mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  101. add r0, r0, #CACHE_DLINESIZE
  102. cmp r0, r1
  103. blo 1b
  104. tst r2, #VM_EXEC
  105. mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
  106. mov pc, lr
  107. /*
  108. * flush_kern_dcache_page(void *page)
  109. *
  110. * Ensure no D cache aliasing occurs, either with itself or
  111. * the I cache
  112. *
  113. * - addr - page aligned address
  114. */
  115. ENTRY(v4wb_flush_kern_dcache_page)
  116. add r1, r0, #PAGE_SZ
  117. /* fall through */
  118. /*
  119. * coherent_kern_range(start, end)
  120. *
  121. * Ensure coherency between the Icache and the Dcache in the
  122. * region described by start. If you have non-snooping
  123. * Harvard caches, you need to implement this function.
  124. *
  125. * - start - virtual start address
  126. * - end - virtual end address
  127. */
  128. ENTRY(v4wb_coherent_kern_range)
  129. /* fall through */
  130. /*
  131. * coherent_user_range(start, end)
  132. *
  133. * Ensure coherency between the Icache and the Dcache in the
  134. * region described by start. If you have non-snooping
  135. * Harvard caches, you need to implement this function.
  136. *
  137. * - start - virtual start address
  138. * - end - virtual end address
  139. */
  140. ENTRY(v4wb_coherent_user_range)
  141. bic r0, r0, #CACHE_DLINESIZE - 1
  142. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  143. mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  144. add r0, r0, #CACHE_DLINESIZE
  145. cmp r0, r1
  146. blo 1b
  147. mov ip, #0
  148. mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
  149. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  150. mov pc, lr
  151. /*
  152. * dma_inv_range(start, end)
  153. *
  154. * Invalidate (discard) the specified virtual address range.
  155. * May not write back any entries. If 'start' or 'end'
  156. * are not cache line aligned, those lines must be written
  157. * back.
  158. *
  159. * - start - virtual start address
  160. * - end - virtual end address
  161. */
  162. ENTRY(v4wb_dma_inv_range)
  163. tst r0, #CACHE_DLINESIZE - 1
  164. bic r0, r0, #CACHE_DLINESIZE - 1
  165. mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
  166. tst r1, #CACHE_DLINESIZE - 1
  167. mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
  168. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  169. add r0, r0, #CACHE_DLINESIZE
  170. cmp r0, r1
  171. blo 1b
  172. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  173. mov pc, lr
  174. /*
  175. * dma_clean_range(start, end)
  176. *
  177. * Clean (write back) the specified virtual address range.
  178. *
  179. * - start - virtual start address
  180. * - end - virtual end address
  181. */
  182. ENTRY(v4wb_dma_clean_range)
  183. bic r0, r0, #CACHE_DLINESIZE - 1
  184. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  185. add r0, r0, #CACHE_DLINESIZE
  186. cmp r0, r1
  187. blo 1b
  188. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  189. mov pc, lr
  190. /*
  191. * dma_flush_range(start, end)
  192. *
  193. * Clean and invalidate the specified virtual address range.
  194. *
  195. * - start - virtual start address
  196. * - end - virtual end address
  197. *
  198. * This is actually the same as v4wb_coherent_kern_range()
  199. */
  200. .globl v4wb_dma_flush_range
  201. .set v4wb_dma_flush_range, v4wb_coherent_kern_range
  202. __INITDATA
  203. .type v4wb_cache_fns, #object
  204. ENTRY(v4wb_cache_fns)
  205. .long v4wb_flush_kern_cache_all
  206. .long v4wb_flush_user_cache_all
  207. .long v4wb_flush_user_cache_range
  208. .long v4wb_coherent_kern_range
  209. .long v4wb_coherent_user_range
  210. .long v4wb_flush_kern_dcache_page
  211. .long v4wb_dma_inv_range
  212. .long v4wb_dma_clean_range
  213. .long v4wb_dma_flush_range
  214. .size v4wb_cache_fns, . - v4wb_cache_fns