cache-v4wt.S 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /*
  2. * linux/arch/arm/mm/cache-v4wt.S
  3. *
  4. * Copyright (C) 1997-2002 Russell king
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * ARMv4 write through cache operations support.
  11. *
  12. * We assume that the write buffer is not enabled.
  13. */
  14. #include <linux/linkage.h>
  15. #include <linux/init.h>
  16. #include <asm/hardware.h>
  17. #include <asm/page.h>
  18. #include "proc-macros.S"
  19. /*
  20. * The size of one data cache line.
  21. */
  22. #define CACHE_DLINESIZE 32
  23. /*
  24. * The number of data cache segments.
  25. */
  26. #define CACHE_DSEGMENTS 8
  27. /*
  28. * The number of lines in a cache segment.
  29. */
  30. #define CACHE_DENTRIES 64
  31. /*
  32. * This is the size at which it becomes more efficient to
  33. * clean the whole cache, rather than using the individual
  34. * cache line maintainence instructions.
  35. *
  36. * *** This needs benchmarking
  37. */
  38. #define CACHE_DLIMIT 16384
  39. /*
  40. * flush_user_cache_all()
  41. *
  42. * Invalidate all cache entries in a particular address
  43. * space.
  44. */
  45. ENTRY(v4wt_flush_user_cache_all)
  46. /* FALLTHROUGH */
  47. /*
  48. * flush_kern_cache_all()
  49. *
  50. * Clean and invalidate the entire cache.
  51. */
  52. ENTRY(v4wt_flush_kern_cache_all)
  53. mov r2, #VM_EXEC
  54. mov ip, #0
  55. __flush_whole_cache:
  56. tst r2, #VM_EXEC
  57. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  58. mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
  59. mov pc, lr
  60. /*
  61. * flush_user_cache_range(start, end, flags)
  62. *
  63. * Clean and invalidate a range of cache entries in the specified
  64. * address space.
  65. *
  66. * - start - start address (inclusive, page aligned)
  67. * - end - end address (exclusive, page aligned)
  68. * - flags - vma_area_struct flags describing address space
  69. */
  70. ENTRY(v4wt_flush_user_cache_range)
  71. sub r3, r1, r0 @ calculate total size
  72. cmp r3, #CACHE_DLIMIT
  73. bhs __flush_whole_cache
  74. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  75. tst r2, #VM_EXEC
  76. mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
  77. add r0, r0, #CACHE_DLINESIZE
  78. cmp r0, r1
  79. blo 1b
  80. mov pc, lr
  81. /*
  82. * coherent_kern_range(start, end)
  83. *
  84. * Ensure coherency between the Icache and the Dcache in the
  85. * region described by start. If you have non-snooping
  86. * Harvard caches, you need to implement this function.
  87. *
  88. * - start - virtual start address
  89. * - end - virtual end address
  90. */
  91. ENTRY(v4wt_coherent_kern_range)
  92. /* FALLTRHOUGH */
  93. /*
  94. * coherent_user_range(start, end)
  95. *
  96. * Ensure coherency between the Icache and the Dcache in the
  97. * region described by start. If you have non-snooping
  98. * Harvard caches, you need to implement this function.
  99. *
  100. * - start - virtual start address
  101. * - end - virtual end address
  102. */
  103. ENTRY(v4wt_coherent_user_range)
  104. bic r0, r0, #CACHE_DLINESIZE - 1
  105. 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
  106. add r0, r0, #CACHE_DLINESIZE
  107. cmp r0, r1
  108. blo 1b
  109. mov pc, lr
  110. /*
  111. * flush_kern_dcache_page(void *page)
  112. *
  113. * Ensure no D cache aliasing occurs, either with itself or
  114. * the I cache
  115. *
  116. * - addr - page aligned address
  117. */
  118. ENTRY(v4wt_flush_kern_dcache_page)
  119. mov r2, #0
  120. mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
  121. add r1, r0, #PAGE_SZ
  122. /* fallthrough */
  123. /*
  124. * dma_inv_range(start, end)
  125. *
  126. * Invalidate (discard) the specified virtual address range.
  127. * May not write back any entries. If 'start' or 'end'
  128. * are not cache line aligned, those lines must be written
  129. * back.
  130. *
  131. * - start - virtual start address
  132. * - end - virtual end address
  133. */
  134. ENTRY(v4wt_dma_inv_range)
  135. bic r0, r0, #CACHE_DLINESIZE - 1
  136. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  137. add r0, r0, #CACHE_DLINESIZE
  138. cmp r0, r1
  139. blo 1b
  140. /* FALLTHROUGH */
  141. /*
  142. * dma_clean_range(start, end)
  143. *
  144. * Clean the specified virtual address range.
  145. *
  146. * - start - virtual start address
  147. * - end - virtual end address
  148. */
  149. ENTRY(v4wt_dma_clean_range)
  150. mov pc, lr
  151. /*
  152. * dma_flush_range(start, end)
  153. *
  154. * Clean and invalidate the specified virtual address range.
  155. *
  156. * - start - virtual start address
  157. * - end - virtual end address
  158. */
  159. .globl v4wt_dma_flush_range
  160. .equ v4wt_dma_flush_range, v4wt_dma_inv_range
  161. __INITDATA
  162. .type v4wt_cache_fns, #object
  163. ENTRY(v4wt_cache_fns)
  164. .long v4wt_flush_kern_cache_all
  165. .long v4wt_flush_user_cache_all
  166. .long v4wt_flush_user_cache_range
  167. .long v4wt_coherent_kern_range
  168. .long v4wt_coherent_user_range
  169. .long v4wt_flush_kern_dcache_page
  170. .long v4wt_dma_inv_range
  171. .long v4wt_dma_clean_range
  172. .long v4wt_dma_flush_range
  173. .size v4wt_cache_fns, . - v4wt_cache_fns