cache.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
  4. */
  5. #include <config.h>
  6. #include <common.h>
  7. #include <cpu_func.h>
  8. #include <linux/bitops.h>
  9. #include <linux/compiler.h>
  10. #include <linux/kernel.h>
  11. #include <linux/log2.h>
  12. #include <asm/arcregs.h>
  13. #include <asm/arc-bcr.h>
  14. #include <asm/cache.h>
  15. /*
  16. * [ NOTE 1 ]:
  17. * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable
  18. * operation may result in unexpected behavior and data loss even if we flush
  19. * data cache right before invalidation. That may happens if we store any context
  20. * on stack (like we store BLINK register on stack before function call).
  21. * BLINK register is the register where return address is automatically saved
  22. * when we do function call with instructions like 'bl'.
  23. *
  24. * There is the real example:
  25. * We may hang in the next code as we store any BLINK register on stack in
  26. * invalidate_dcache_all() function.
  27. *
  28. * void flush_dcache_all() {
  29. * __dc_entire_op(OP_FLUSH);
  30. * // Other code //
  31. * }
  32. *
  33. * void invalidate_dcache_all() {
  34. * __dc_entire_op(OP_INV);
  35. * // Other code //
  36. * }
  37. *
  38. * void foo(void) {
  39. * flush_dcache_all();
  40. * invalidate_dcache_all();
  41. * }
  42. *
  43. * Now let's see what really happens during that code execution:
  44. *
  45. * foo()
  46. * |->> call flush_dcache_all
  47. * [return address is saved to BLINK register]
  48. * [push BLINK] (save to stack) ![point 1]
  49. * |->> call __dc_entire_op(OP_FLUSH)
  50. * [return address is saved to BLINK register]
  51. * [flush L1 D$]
  52. * return [jump to BLINK]
  53. * <<------
  54. * [other flush_dcache_all code]
  55. * [pop BLINK] (get from stack)
  56. * return [jump to BLINK]
  57. * <<------
  58. * |->> call invalidate_dcache_all
  59. * [return address is saved to BLINK register]
  60. * [push BLINK] (save to stack) ![point 2]
  61. * |->> call __dc_entire_op(OP_FLUSH)
  62. * [return address is saved to BLINK register]
  63. * [invalidate L1 D$] ![point 3]
  64. * // Oops!!!
  65. * // We lose return address from invalidate_dcache_all function:
  66. * // we save it to stack and invalidate L1 D$ after that!
  67. * return [jump to BLINK]
  68. * <<------
  69. * [other invalidate_dcache_all code]
  70. * [pop BLINK] (get from stack)
  71. * // we don't have this data in L1 dcache as we invalidated it in [point 3]
  72. * // so we get it from next memory level (for example DDR memory)
  73. * // but in the memory we have value which we save in [point 1], which
  74. * // is return address from flush_dcache_all function (instead of
  75. * // address from current invalidate_dcache_all function which we
  76. * // saved in [point 2] !)
  77. * return [jump to BLINK]
  78. * <<------
  79. * // As BLINK points to invalidate_dcache_all, we call it again and
  80. * // loop forever.
  81. *
  82. * Fortunately we may fix that by using flush & invalidation of D$ with a single
  83. * one instruction (instead of flush and invalidation instructions pair) and
  84. * enabling force function inline with '__attribute__((always_inline))' gcc
  85. * attribute to avoid any function call (and BLINK store) between cache flush
  86. * and disable.
  87. *
  88. *
  89. * [ NOTE 2 ]:
  90. * As of today we only support the following cache configurations on ARC.
  91. * Other configurations may exist in HW but we don't support it in SW.
  92. * Configuration 1:
  93. * ______________________
  94. * | |
  95. * | ARC CPU |
  96. * |______________________|
  97. * ___|___ ___|___
  98. * | | | |
  99. * | L1 I$ | | L1 D$ |
  100. * |_______| |_______|
  101. * on/off on/off
  102. * ___|______________|____
  103. * | |
  104. * | main memory |
  105. * |______________________|
  106. *
  107. * Configuration 2:
  108. * ______________________
  109. * | |
  110. * | ARC CPU |
  111. * |______________________|
  112. * ___|___ ___|___
  113. * | | | |
  114. * | L1 I$ | | L1 D$ |
  115. * |_______| |_______|
  116. * on/off on/off
  117. * ___|______________|____
  118. * | |
  119. * | L2 (SL$) |
  120. * |______________________|
  121. * always on (ARCv2, HS < 3.0)
  122. * on/off (ARCv2, HS >= 3.0)
  123. * ___|______________|____
  124. * | |
  125. * | main memory |
  126. * |______________________|
  127. *
  128. * Configuration 3:
  129. * ______________________
  130. * | |
  131. * | ARC CPU |
  132. * |______________________|
  133. * ___|___ ___|___
  134. * | | | |
  135. * | L1 I$ | | L1 D$ |
  136. * |_______| |_______|
  137. * on/off must be on
  138. * ___|______________|____ _______
  139. * | | | |
  140. * | L2 (SL$) |-----| IOC |
  141. * |______________________| |_______|
  142. * always must be on on/off
  143. * ___|______________|____
  144. * | |
  145. * | main memory |
  146. * |______________________|
  147. */
  148. DECLARE_GLOBAL_DATA_PTR;
  149. /* Bit values in IC_CTRL */
  150. #define IC_CTRL_CACHE_DISABLE BIT(0)
  151. /* Bit values in DC_CTRL */
  152. #define DC_CTRL_CACHE_DISABLE BIT(0)
  153. #define DC_CTRL_INV_MODE_FLUSH BIT(6)
  154. #define DC_CTRL_FLUSH_STATUS BIT(8)
  155. #define OP_INV BIT(0)
  156. #define OP_FLUSH BIT(1)
  157. #define OP_FLUSH_N_INV (OP_FLUSH | OP_INV)
  158. /* Bit val in SLC_CONTROL */
  159. #define SLC_CTRL_DIS 0x001
  160. #define SLC_CTRL_IM 0x040
  161. #define SLC_CTRL_BUSY 0x100
  162. #define SLC_CTRL_RGN_OP_INV 0x200
  163. #define CACHE_LINE_MASK (~(gd->arch.l1_line_sz - 1))
  164. /*
  165. * We don't want to use '__always_inline' macro here as it can be redefined
  166. * to simple 'inline' in some cases which breaks stuff. See [ NOTE 1 ] for more
  167. * details about the reasons we need to use always_inline functions.
  168. */
  169. #define inlined_cachefunc inline __attribute__((always_inline))
  170. static inlined_cachefunc void __ic_entire_invalidate(void);
  171. static inlined_cachefunc void __dc_entire_op(const int cacheop);
  172. static inlined_cachefunc void __slc_entire_op(const int op);
  173. static inlined_cachefunc bool ioc_enabled(void);
  174. static inline bool pae_exists(void)
  175. {
  176. /* TODO: should we compare mmu version from BCR and from CONFIG? */
  177. #if (CONFIG_ARC_MMU_VER >= 4)
  178. union bcr_mmu_4 mmu4;
  179. mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR);
  180. if (mmu4.fields.pae)
  181. return true;
  182. #endif /* (CONFIG_ARC_MMU_VER >= 4) */
  183. return false;
  184. }
  185. static inlined_cachefunc bool icache_exists(void)
  186. {
  187. union bcr_di_cache ibcr;
  188. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  189. return !!ibcr.fields.ver;
  190. }
  191. static inlined_cachefunc bool icache_enabled(void)
  192. {
  193. if (!icache_exists())
  194. return false;
  195. return !(read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE);
  196. }
  197. static inlined_cachefunc bool dcache_exists(void)
  198. {
  199. union bcr_di_cache dbcr;
  200. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  201. return !!dbcr.fields.ver;
  202. }
  203. static inlined_cachefunc bool dcache_enabled(void)
  204. {
  205. if (!dcache_exists())
  206. return false;
  207. return !(read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE);
  208. }
  209. static inlined_cachefunc bool slc_exists(void)
  210. {
  211. if (is_isa_arcv2()) {
  212. union bcr_generic sbcr;
  213. sbcr.word = read_aux_reg(ARC_BCR_SLC);
  214. return !!sbcr.fields.ver;
  215. }
  216. return false;
  217. }
  218. enum slc_dis_status {
  219. ST_SLC_MISSING = 0,
  220. ST_SLC_NO_DISABLE_CTRL,
  221. ST_SLC_DISABLE_CTRL
  222. };
  223. /*
  224. * ARCv1 -> ST_SLC_MISSING
  225. * ARCv2 && SLC absent -> ST_SLC_MISSING
  226. * ARCv2 && SLC exists && SLC version <= 2 -> ST_SLC_NO_DISABLE_CTRL
  227. * ARCv2 && SLC exists && SLC version > 2 -> ST_SLC_DISABLE_CTRL
  228. */
  229. static inlined_cachefunc enum slc_dis_status slc_disable_supported(void)
  230. {
  231. if (is_isa_arcv2()) {
  232. union bcr_generic sbcr;
  233. sbcr.word = read_aux_reg(ARC_BCR_SLC);
  234. if (sbcr.fields.ver == 0)
  235. return ST_SLC_MISSING;
  236. else if (sbcr.fields.ver <= 2)
  237. return ST_SLC_NO_DISABLE_CTRL;
  238. else
  239. return ST_SLC_DISABLE_CTRL;
  240. }
  241. return ST_SLC_MISSING;
  242. }
  243. static inlined_cachefunc bool __slc_enabled(void)
  244. {
  245. return !(read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_DIS);
  246. }
  247. static inlined_cachefunc void __slc_enable(void)
  248. {
  249. unsigned int ctrl;
  250. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  251. ctrl &= ~SLC_CTRL_DIS;
  252. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  253. }
  254. static inlined_cachefunc void __slc_disable(void)
  255. {
  256. unsigned int ctrl;
  257. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  258. ctrl |= SLC_CTRL_DIS;
  259. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  260. }
  261. static inlined_cachefunc bool slc_enabled(void)
  262. {
  263. enum slc_dis_status slc_status = slc_disable_supported();
  264. if (slc_status == ST_SLC_MISSING)
  265. return false;
  266. else if (slc_status == ST_SLC_NO_DISABLE_CTRL)
  267. return true;
  268. else
  269. return __slc_enabled();
  270. }
  271. static inlined_cachefunc bool slc_data_bypass(void)
  272. {
  273. /*
  274. * If L1 data cache is disabled SL$ is bypassed and all load/store
  275. * requests are sent directly to main memory.
  276. */
  277. return !dcache_enabled();
  278. }
  279. void slc_enable(void)
  280. {
  281. if (slc_disable_supported() != ST_SLC_DISABLE_CTRL)
  282. return;
  283. if (__slc_enabled())
  284. return;
  285. __slc_enable();
  286. }
  287. /* TODO: warn if we are not able to disable SLC */
  288. void slc_disable(void)
  289. {
  290. if (slc_disable_supported() != ST_SLC_DISABLE_CTRL)
  291. return;
  292. /* we don't support SLC disabling if we use IOC */
  293. if (ioc_enabled())
  294. return;
  295. if (!__slc_enabled())
  296. return;
  297. /*
  298. * We need to flush L1D$ to guarantee that we won't have any
  299. * writeback operations during SLC disabling.
  300. */
  301. __dc_entire_op(OP_FLUSH);
  302. __slc_entire_op(OP_FLUSH_N_INV);
  303. __slc_disable();
  304. }
  305. static inlined_cachefunc bool ioc_exists(void)
  306. {
  307. if (is_isa_arcv2()) {
  308. union bcr_clust_cfg cbcr;
  309. cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
  310. return cbcr.fields.c;
  311. }
  312. return false;
  313. }
  314. static inlined_cachefunc bool ioc_enabled(void)
  315. {
  316. /*
  317. * We check only CONFIG option instead of IOC HW state check as IOC
  318. * must be disabled by default.
  319. */
  320. if (is_ioc_enabled())
  321. return ioc_exists();
  322. return false;
  323. }
  324. static inlined_cachefunc void __slc_entire_op(const int op)
  325. {
  326. unsigned int ctrl;
  327. if (!slc_enabled())
  328. return;
  329. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  330. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  331. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  332. else
  333. ctrl |= SLC_CTRL_IM;
  334. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  335. if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  336. write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
  337. else
  338. write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
  339. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  340. read_aux_reg(ARC_AUX_SLC_CTRL);
  341. /* Important to wait for flush to complete */
  342. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  343. }
  344. static void slc_upper_region_init(void)
  345. {
  346. /*
  347. * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
  348. * only if PAE exists in current HW. So we had to check pae_exist
  349. * before using them.
  350. */
  351. if (!pae_exists())
  352. return;
  353. /*
  354. * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
  355. * as we don't use PAE40.
  356. */
  357. write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
  358. write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
  359. }
  360. static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
  361. {
  362. #ifdef CONFIG_ISA_ARCV2
  363. unsigned int ctrl;
  364. unsigned long end;
  365. if (!slc_enabled())
  366. return;
  367. /*
  368. * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
  369. * - b'000 (default) is Flush,
  370. * - b'001 is Invalidate if CTRL.IM == 0
  371. * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
  372. */
  373. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  374. /* Don't rely on default value of IM bit */
  375. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  376. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  377. else
  378. ctrl |= SLC_CTRL_IM;
  379. if (op & OP_INV)
  380. ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
  381. else
  382. ctrl &= ~SLC_CTRL_RGN_OP_INV;
  383. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  384. /*
  385. * Lower bits are ignored, no need to clip
  386. * END needs to be setup before START (latter triggers the operation)
  387. * END can't be same as START, so add (l2_line_sz - 1) to sz
  388. */
  389. end = paddr + sz + gd->arch.slc_line_sz - 1;
  390. /*
  391. * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
  392. * are always == 0 as we don't use PAE40, so we only setup lower ones
  393. * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
  394. */
  395. write_aux_reg(ARC_AUX_SLC_RGN_END, end);
  396. write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
  397. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  398. read_aux_reg(ARC_AUX_SLC_CTRL);
  399. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  400. #endif /* CONFIG_ISA_ARCV2 */
  401. }
  402. static void arc_ioc_setup(void)
  403. {
  404. /* IOC Aperture start is equal to DDR start */
  405. unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
  406. /* IOC Aperture size is equal to DDR size */
  407. long ap_size = CONFIG_SYS_SDRAM_SIZE;
  408. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  409. if (!slc_exists())
  410. panic("Try to enable IOC but SLC is not present");
  411. if (!slc_enabled())
  412. panic("Try to enable IOC but SLC is disabled");
  413. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  414. if (!dcache_enabled())
  415. panic("Try to enable IOC but L1 D$ is disabled");
  416. if (!is_power_of_2(ap_size) || ap_size < 4096)
  417. panic("IOC Aperture size must be power of 2 and bigger 4Kib");
  418. /* IOC Aperture start must be aligned to the size of the aperture */
  419. if (ap_base % ap_size != 0)
  420. panic("IOC Aperture start must be aligned to the size of the aperture");
  421. flush_n_invalidate_dcache_all();
  422. /*
  423. * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
  424. * so setting 0x11 implies 512M, 0x12 implies 1G...
  425. */
  426. write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
  427. order_base_2(ap_size / 1024) - 2);
  428. write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
  429. write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
  430. write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
  431. }
  432. static void read_decode_cache_bcr_arcv2(void)
  433. {
  434. #ifdef CONFIG_ISA_ARCV2
  435. union bcr_slc_cfg slc_cfg;
  436. if (slc_exists()) {
  437. slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
  438. gd->arch.slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
  439. /*
  440. * We don't support configuration where L1 I$ or L1 D$ is
  441. * absent but SL$ exists. See [ NOTE 2 ] for more details.
  442. */
  443. if (!icache_exists() || !dcache_exists())
  444. panic("Unsupported cache configuration: SLC exists but one of L1 caches is absent");
  445. }
  446. #endif /* CONFIG_ISA_ARCV2 */
  447. }
  448. void read_decode_cache_bcr(void)
  449. {
  450. int dc_line_sz = 0, ic_line_sz = 0;
  451. union bcr_di_cache ibcr, dbcr;
  452. /*
  453. * We don't care much about I$ line length really as there're
  454. * no per-line ops on I$ instead we only do full invalidation of it
  455. * on occasion of relocation and right before jumping to the OS.
  456. * Still we check insane config with zero-encoded line length in
  457. * presense of version field in I$ BCR. Just in case.
  458. */
  459. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  460. if (ibcr.fields.ver) {
  461. ic_line_sz = 8 << ibcr.fields.line_len;
  462. if (!ic_line_sz)
  463. panic("Instruction exists but line length is 0\n");
  464. }
  465. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  466. if (dbcr.fields.ver) {
  467. gd->arch.l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
  468. if (!dc_line_sz)
  469. panic("Data cache exists but line length is 0\n");
  470. }
  471. }
  472. void cache_init(void)
  473. {
  474. read_decode_cache_bcr();
  475. if (is_isa_arcv2())
  476. read_decode_cache_bcr_arcv2();
  477. if (is_isa_arcv2() && ioc_enabled())
  478. arc_ioc_setup();
  479. if (is_isa_arcv2() && slc_exists())
  480. slc_upper_region_init();
  481. }
  482. int icache_status(void)
  483. {
  484. return icache_enabled();
  485. }
  486. void icache_enable(void)
  487. {
  488. if (icache_exists())
  489. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
  490. ~IC_CTRL_CACHE_DISABLE);
  491. }
  492. void icache_disable(void)
  493. {
  494. if (!icache_exists())
  495. return;
  496. __ic_entire_invalidate();
  497. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
  498. IC_CTRL_CACHE_DISABLE);
  499. }
  500. /* IC supports only invalidation */
  501. static inlined_cachefunc void __ic_entire_invalidate(void)
  502. {
  503. if (!icache_enabled())
  504. return;
  505. /* Any write to IC_IVIC register triggers invalidation of entire I$ */
  506. write_aux_reg(ARC_AUX_IC_IVIC, 1);
  507. /*
  508. * As per ARC HS databook (see chapter 5.3.3.2)
  509. * it is required to add 3 NOPs after each write to IC_IVIC.
  510. */
  511. __builtin_arc_nop();
  512. __builtin_arc_nop();
  513. __builtin_arc_nop();
  514. read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */
  515. }
  516. void invalidate_icache_all(void)
  517. {
  518. __ic_entire_invalidate();
  519. /*
  520. * If SL$ is bypassed for data it is used only for instructions,
  521. * so we need to invalidate it too.
  522. */
  523. if (is_isa_arcv2() && slc_data_bypass())
  524. __slc_entire_op(OP_INV);
  525. }
  526. int dcache_status(void)
  527. {
  528. return dcache_enabled();
  529. }
  530. void dcache_enable(void)
  531. {
  532. if (!dcache_exists())
  533. return;
  534. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
  535. ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
  536. }
  537. void dcache_disable(void)
  538. {
  539. if (!dcache_exists())
  540. return;
  541. __dc_entire_op(OP_FLUSH_N_INV);
  542. /*
  543. * As SLC will be bypassed for data after L1 D$ disable we need to
  544. * flush it first before L1 D$ disable. Also we invalidate SLC to
  545. * avoid any inconsistent data problems after enabling L1 D$ again with
  546. * dcache_enable function.
  547. */
  548. if (is_isa_arcv2())
  549. __slc_entire_op(OP_FLUSH_N_INV);
  550. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
  551. DC_CTRL_CACHE_DISABLE);
  552. }
  553. /* Common Helper for Line Operations on D-cache */
  554. static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
  555. const int cacheop)
  556. {
  557. unsigned int aux_cmd;
  558. int num_lines;
  559. /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
  560. aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
  561. sz += paddr & ~CACHE_LINE_MASK;
  562. paddr &= CACHE_LINE_MASK;
  563. num_lines = DIV_ROUND_UP(sz, gd->arch.l1_line_sz);
  564. while (num_lines-- > 0) {
  565. #if (CONFIG_ARC_MMU_VER == 3)
  566. write_aux_reg(ARC_AUX_DC_PTAG, paddr);
  567. #endif
  568. write_aux_reg(aux_cmd, paddr);
  569. paddr += gd->arch.l1_line_sz;
  570. }
  571. }
  572. static inlined_cachefunc void __before_dc_op(const int op)
  573. {
  574. unsigned int ctrl;
  575. ctrl = read_aux_reg(ARC_AUX_DC_CTRL);
  576. /* IM bit implies flush-n-inv, instead of vanilla inv */
  577. if (op == OP_INV)
  578. ctrl &= ~DC_CTRL_INV_MODE_FLUSH;
  579. else
  580. ctrl |= DC_CTRL_INV_MODE_FLUSH;
  581. write_aux_reg(ARC_AUX_DC_CTRL, ctrl);
  582. }
  583. static inlined_cachefunc void __after_dc_op(const int op)
  584. {
  585. if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
  586. while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
  587. }
  588. static inlined_cachefunc void __dc_entire_op(const int cacheop)
  589. {
  590. int aux;
  591. if (!dcache_enabled())
  592. return;
  593. __before_dc_op(cacheop);
  594. if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  595. aux = ARC_AUX_DC_IVDC;
  596. else
  597. aux = ARC_AUX_DC_FLSH;
  598. write_aux_reg(aux, 0x1);
  599. __after_dc_op(cacheop);
  600. }
  601. static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
  602. const int cacheop)
  603. {
  604. if (!dcache_enabled())
  605. return;
  606. __before_dc_op(cacheop);
  607. __dcache_line_loop(paddr, sz, cacheop);
  608. __after_dc_op(cacheop);
  609. }
  610. void invalidate_dcache_range(unsigned long start, unsigned long end)
  611. {
  612. if (start >= end)
  613. return;
  614. /*
  615. * ARCv1 -> call __dc_line_op
  616. * ARCv2 && L1 D$ disabled -> nothing
  617. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  618. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  619. */
  620. if (!is_isa_arcv2() || !ioc_enabled())
  621. __dc_line_op(start, end - start, OP_INV);
  622. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  623. __slc_rgn_op(start, end - start, OP_INV);
  624. }
  625. void flush_dcache_range(unsigned long start, unsigned long end)
  626. {
  627. if (start >= end)
  628. return;
  629. /*
  630. * ARCv1 -> call __dc_line_op
  631. * ARCv2 && L1 D$ disabled -> nothing
  632. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  633. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  634. */
  635. if (!is_isa_arcv2() || !ioc_enabled())
  636. __dc_line_op(start, end - start, OP_FLUSH);
  637. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  638. __slc_rgn_op(start, end - start, OP_FLUSH);
  639. }
  640. void flush_cache(unsigned long start, unsigned long size)
  641. {
  642. flush_dcache_range(start, start + size);
  643. }
  644. /*
  645. * As invalidate_dcache_all() is not used in generic U-Boot code and as we
  646. * don't need it in arch/arc code alone (invalidate without flush) we implement
  647. * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because
  648. * it's much safer. See [ NOTE 1 ] for more details.
  649. */
  650. void flush_n_invalidate_dcache_all(void)
  651. {
  652. __dc_entire_op(OP_FLUSH_N_INV);
  653. if (is_isa_arcv2() && !slc_data_bypass())
  654. __slc_entire_op(OP_FLUSH_N_INV);
  655. }
  656. void flush_dcache_all(void)
  657. {
  658. __dc_entire_op(OP_FLUSH);
  659. if (is_isa_arcv2() && !slc_data_bypass())
  660. __slc_entire_op(OP_FLUSH);
  661. }
  662. /*
  663. * This is function to cleanup all caches (and therefore sync I/D caches) which
  664. * can be used for cleanup before linux launch or to sync caches during
  665. * relocation.
  666. */
  667. void sync_n_cleanup_cache_all(void)
  668. {
  669. __dc_entire_op(OP_FLUSH_N_INV);
  670. /*
  671. * If SL$ is bypassed for data it is used only for instructions,
  672. * and we shouldn't flush it. So invalidate it instead of flush_n_inv.
  673. */
  674. if (is_isa_arcv2()) {
  675. if (slc_data_bypass())
  676. __slc_entire_op(OP_INV);
  677. else
  678. __slc_entire_op(OP_FLUSH_N_INV);
  679. }
  680. __ic_entire_invalidate();
  681. }