cache-uniphier.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2012-2014 Panasonic Corporation
  4. * Copyright (C) 2015-2016 Socionext Inc.
  5. * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  6. */
  7. #include <common.h>
  8. #include <cpu_func.h>
  9. #include <linux/io.h>
  10. #include <linux/kernel.h>
  11. #include <asm/armv7.h>
  12. #include <asm/processor.h>
  13. #include "cache-uniphier.h"
  14. /* control registers */
  15. #define UNIPHIER_SSCC 0x500c0000 /* Control Register */
  16. #define UNIPHIER_SSCC_BST (0x1 << 20) /* UCWG burst read */
  17. #define UNIPHIER_SSCC_ACT (0x1 << 19) /* Inst-Data separate */
  18. #define UNIPHIER_SSCC_WTG (0x1 << 18) /* WT gathering on */
  19. #define UNIPHIER_SSCC_PRD (0x1 << 17) /* enable pre-fetch */
  20. #define UNIPHIER_SSCC_ON (0x1 << 0) /* enable cache */
  21. #define UNIPHIER_SSCLPDAWCR 0x500c0030 /* Unified/Data Active Way Control */
  22. #define UNIPHIER_SSCLPIAWCR 0x500c0034 /* Instruction Active Way Control */
  23. /* revision registers */
  24. #define UNIPHIER_SSCID 0x503c0100 /* ID Register */
  25. /* operation registers */
  26. #define UNIPHIER_SSCOPE 0x506c0244 /* Cache Operation Primitive Entry */
  27. #define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
  28. #define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
  29. #define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
  30. #define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
  31. #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
  32. #define UNIPHIER_SSCOQM 0x506c0248
  33. #define UNIPHIER_SSCOQM_TID_MASK (0x3 << 21)
  34. #define UNIPHIER_SSCOQM_TID_LRU_DATA (0x0 << 21)
  35. #define UNIPHIER_SSCOQM_TID_LRU_INST (0x1 << 21)
  36. #define UNIPHIER_SSCOQM_TID_WAY (0x2 << 21)
  37. #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
  38. #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
  39. #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
  40. #define UNIPHIER_SSCOQM_S_WAY (0x2 << 17)
  41. #define UNIPHIER_SSCOQM_CE (0x1 << 15) /* notify completion */
  42. #define UNIPHIER_SSCOQM_CW (0x1 << 14)
  43. #define UNIPHIER_SSCOQM_CM_MASK (0x7)
  44. #define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
  45. #define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
  46. #define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
  47. #define UNIPHIER_SSCOQM_CM_PREFETCH 0x3 /* prefetch to cache */
  48. #define UNIPHIER_SSCOQM_CM_PREFETCH_BUF 0x4 /* prefetch to pf-buf */
  49. #define UNIPHIER_SSCOQM_CM_TOUCH 0x5 /* touch */
  50. #define UNIPHIER_SSCOQM_CM_TOUCH_ZERO 0x6 /* touch to zero */
  51. #define UNIPHIER_SSCOQM_CM_TOUCH_DIRTY 0x7 /* touch with dirty */
  52. #define UNIPHIER_SSCOQAD 0x506c024c /* Cache Operation Queue Address */
  53. #define UNIPHIER_SSCOQSZ 0x506c0250 /* Cache Operation Queue Size */
  54. #define UNIPHIER_SSCOQMASK 0x506c0254 /* Cache Operation Queue Address Mask */
  55. #define UNIPHIER_SSCOQWN 0x506c0258 /* Cache Operation Queue Way Number */
  56. #define UNIPHIER_SSCOPPQSEF 0x506c025c /* Cache Operation Queue Set Complete */
  57. #define UNIPHIER_SSCOPPQSEF_FE (0x1 << 1)
  58. #define UNIPHIER_SSCOPPQSEF_OE (0x1 << 0)
  59. #define UNIPHIER_SSCOLPQS 0x506c0260 /* Cache Operation Queue Status */
  60. #define UNIPHIER_SSCOLPQS_EF (0x1 << 2)
  61. #define UNIPHIER_SSCOLPQS_EST (0x1 << 1)
  62. #define UNIPHIER_SSCOLPQS_QST (0x1 << 0)
  63. #define UNIPHIER_SSC_LINE_SIZE 128
  64. #define UNIPHIER_SSC_RANGE_OP_MAX_SIZE (0x00400000 - (UNIPHIER_SSC_LINE_SIZE))
  65. #define UNIPHIER_SSCOQAD_IS_NEEDED(op) \
  66. ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
  67. #define UNIPHIER_SSCOQWM_IS_NEEDED(op) \
  68. (((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_WAY) || \
  69. ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY))
  70. /* uniphier_cache_sync - perform a sync point for a particular cache level */
  71. static void uniphier_cache_sync(void)
  72. {
  73. /* drain internal buffers */
  74. writel(UNIPHIER_SSCOPE_CM_SYNC, UNIPHIER_SSCOPE);
  75. /* need a read back to confirm */
  76. readl(UNIPHIER_SSCOPE);
  77. }
  78. /**
  79. * uniphier_cache_maint_common - run a queue operation
  80. *
  81. * @start: start address of range operation (don't care for "all" operation)
  82. * @size: data size of range operation (don't care for "all" operation)
  83. * @ways: target ways (don't care for operations other than pre-fetch, touch
  84. * @operation: flags to specify the desired cache operation
  85. */
  86. static void uniphier_cache_maint_common(u32 start, u32 size, u32 ways,
  87. u32 operation)
  88. {
  89. /* clear the complete notification flag */
  90. writel(UNIPHIER_SSCOLPQS_EF, UNIPHIER_SSCOLPQS);
  91. do {
  92. /* set cache operation */
  93. writel(UNIPHIER_SSCOQM_CE | operation, UNIPHIER_SSCOQM);
  94. /* set address range if needed */
  95. if (likely(UNIPHIER_SSCOQAD_IS_NEEDED(operation))) {
  96. writel(start, UNIPHIER_SSCOQAD);
  97. writel(size, UNIPHIER_SSCOQSZ);
  98. }
  99. /* set target ways if needed */
  100. if (unlikely(UNIPHIER_SSCOQWM_IS_NEEDED(operation)))
  101. writel(ways, UNIPHIER_SSCOQWN);
  102. } while (unlikely(readl(UNIPHIER_SSCOPPQSEF) &
  103. (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
  104. /* wait until the operation is completed */
  105. while (likely(readl(UNIPHIER_SSCOLPQS) != UNIPHIER_SSCOLPQS_EF))
  106. cpu_relax();
  107. }
  108. static void uniphier_cache_maint_all(u32 operation)
  109. {
  110. uniphier_cache_maint_common(0, 0, 0, UNIPHIER_SSCOQM_S_ALL | operation);
  111. uniphier_cache_sync();
  112. }
  113. static void uniphier_cache_maint_range(u32 start, u32 end, u32 ways,
  114. u32 operation)
  115. {
  116. u32 size;
  117. /*
  118. * If the start address is not aligned,
  119. * perform a cache operation for the first cache-line
  120. */
  121. start = start & ~(UNIPHIER_SSC_LINE_SIZE - 1);
  122. size = end - start;
  123. if (unlikely(size >= (u32)(-UNIPHIER_SSC_LINE_SIZE))) {
  124. /* this means cache operation for all range */
  125. uniphier_cache_maint_all(operation);
  126. return;
  127. }
  128. /*
  129. * If the end address is not aligned,
  130. * perform a cache operation for the last cache-line
  131. */
  132. size = ALIGN(size, UNIPHIER_SSC_LINE_SIZE);
  133. while (size) {
  134. u32 chunk_size = min_t(u32, size, UNIPHIER_SSC_RANGE_OP_MAX_SIZE);
  135. uniphier_cache_maint_common(start, chunk_size, ways,
  136. UNIPHIER_SSCOQM_S_RANGE | operation);
  137. start += chunk_size;
  138. size -= chunk_size;
  139. }
  140. uniphier_cache_sync();
  141. }
  142. void uniphier_cache_prefetch_range(u32 start, u32 end, u32 ways)
  143. {
  144. uniphier_cache_maint_range(start, end, ways,
  145. UNIPHIER_SSCOQM_TID_WAY |
  146. UNIPHIER_SSCOQM_CM_PREFETCH);
  147. }
  148. void uniphier_cache_touch_range(u32 start, u32 end, u32 ways)
  149. {
  150. uniphier_cache_maint_range(start, end, ways,
  151. UNIPHIER_SSCOQM_TID_WAY |
  152. UNIPHIER_SSCOQM_CM_TOUCH);
  153. }
  154. void uniphier_cache_touch_zero_range(u32 start, u32 end, u32 ways)
  155. {
  156. uniphier_cache_maint_range(start, end, ways,
  157. UNIPHIER_SSCOQM_TID_WAY |
  158. UNIPHIER_SSCOQM_CM_TOUCH_ZERO);
  159. }
  160. void uniphier_cache_inv_way(u32 ways)
  161. {
  162. uniphier_cache_maint_common(0, 0, ways,
  163. UNIPHIER_SSCOQM_S_WAY |
  164. UNIPHIER_SSCOQM_CM_INV);
  165. }
  166. void uniphier_cache_set_active_ways(int cpu, u32 active_ways)
  167. {
  168. void __iomem *base = (void __iomem *)UNIPHIER_SSCC + 0xc00;
  169. switch (readl(UNIPHIER_SSCID)) { /* revision */
  170. case 0x12: /* LD4 */
  171. case 0x16: /* sld8 */
  172. base = (void __iomem *)UNIPHIER_SSCC + 0x840;
  173. break;
  174. default:
  175. base = (void __iomem *)UNIPHIER_SSCC + 0xc00;
  176. break;
  177. }
  178. writel(active_ways, base + 4 * cpu);
  179. }
  180. static void uniphier_cache_endisable(int enable)
  181. {
  182. u32 tmp;
  183. tmp = readl(UNIPHIER_SSCC);
  184. if (enable)
  185. tmp |= UNIPHIER_SSCC_ON;
  186. else
  187. tmp &= ~UNIPHIER_SSCC_ON;
  188. writel(tmp, UNIPHIER_SSCC);
  189. }
  190. void uniphier_cache_enable(void)
  191. {
  192. uniphier_cache_endisable(1);
  193. }
  194. void uniphier_cache_disable(void)
  195. {
  196. uniphier_cache_endisable(0);
  197. }
  198. #ifdef CONFIG_CACHE_UNIPHIER
  199. void v7_outer_cache_flush_all(void)
  200. {
  201. uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
  202. }
  203. void v7_outer_cache_inval_all(void)
  204. {
  205. uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
  206. }
  207. void v7_outer_cache_flush_range(u32 start, u32 end)
  208. {
  209. uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_FLUSH);
  210. }
  211. void v7_outer_cache_inval_range(u32 start, u32 end)
  212. {
  213. if (start & (UNIPHIER_SSC_LINE_SIZE - 1)) {
  214. start &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
  215. uniphier_cache_maint_range(start, UNIPHIER_SSC_LINE_SIZE, 0,
  216. UNIPHIER_SSCOQM_CM_FLUSH);
  217. start += UNIPHIER_SSC_LINE_SIZE;
  218. }
  219. if (start >= end) {
  220. uniphier_cache_sync();
  221. return;
  222. }
  223. if (end & (UNIPHIER_SSC_LINE_SIZE - 1)) {
  224. end &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
  225. uniphier_cache_maint_range(end, UNIPHIER_SSC_LINE_SIZE, 0,
  226. UNIPHIER_SSCOQM_CM_FLUSH);
  227. }
  228. if (start >= end) {
  229. uniphier_cache_sync();
  230. return;
  231. }
  232. uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_INV);
  233. }
  234. void v7_outer_cache_enable(void)
  235. {
  236. uniphier_cache_set_active_ways(0, U32_MAX); /* activate all ways */
  237. uniphier_cache_enable();
  238. }
  239. void v7_outer_cache_disable(void)
  240. {
  241. uniphier_cache_disable();
  242. }
  243. #endif
  244. void enable_caches(void)
  245. {
  246. dcache_enable();
  247. }