cache.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
  4. */
  5. #include <config.h>
  6. #include <common.h>
  7. #include <cpu_func.h>
  8. #include <asm/global_data.h>
  9. #include <linux/bitops.h>
  10. #include <linux/compiler.h>
  11. #include <linux/kernel.h>
  12. #include <linux/log2.h>
  13. #include <asm/arcregs.h>
  14. #include <asm/arc-bcr.h>
  15. #include <asm/cache.h>
  16. /*
  17. * [ NOTE 1 ]:
  18. * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable
  19. * operation may result in unexpected behavior and data loss even if we flush
  20. * data cache right before invalidation. That may happens if we store any context
  21. * on stack (like we store BLINK register on stack before function call).
  22. * BLINK register is the register where return address is automatically saved
  23. * when we do function call with instructions like 'bl'.
  24. *
  25. * There is the real example:
  26. * We may hang in the next code as we store any BLINK register on stack in
  27. * invalidate_dcache_all() function.
  28. *
  29. * void flush_dcache_all() {
  30. * __dc_entire_op(OP_FLUSH);
  31. * // Other code //
  32. * }
  33. *
  34. * void invalidate_dcache_all() {
  35. * __dc_entire_op(OP_INV);
  36. * // Other code //
  37. * }
  38. *
  39. * void foo(void) {
  40. * flush_dcache_all();
  41. * invalidate_dcache_all();
  42. * }
  43. *
  44. * Now let's see what really happens during that code execution:
  45. *
  46. * foo()
  47. * |->> call flush_dcache_all
  48. * [return address is saved to BLINK register]
  49. * [push BLINK] (save to stack) ![point 1]
  50. * |->> call __dc_entire_op(OP_FLUSH)
  51. * [return address is saved to BLINK register]
  52. * [flush L1 D$]
  53. * return [jump to BLINK]
  54. * <<------
  55. * [other flush_dcache_all code]
  56. * [pop BLINK] (get from stack)
  57. * return [jump to BLINK]
  58. * <<------
  59. * |->> call invalidate_dcache_all
  60. * [return address is saved to BLINK register]
  61. * [push BLINK] (save to stack) ![point 2]
  62. * |->> call __dc_entire_op(OP_FLUSH)
  63. * [return address is saved to BLINK register]
  64. * [invalidate L1 D$] ![point 3]
  65. * // Oops!!!
  66. * // We lose return address from invalidate_dcache_all function:
  67. * // we save it to stack and invalidate L1 D$ after that!
  68. * return [jump to BLINK]
  69. * <<------
  70. * [other invalidate_dcache_all code]
  71. * [pop BLINK] (get from stack)
  72. * // we don't have this data in L1 dcache as we invalidated it in [point 3]
  73. * // so we get it from next memory level (for example DDR memory)
  74. * // but in the memory we have value which we save in [point 1], which
  75. * // is return address from flush_dcache_all function (instead of
  76. * // address from current invalidate_dcache_all function which we
  77. * // saved in [point 2] !)
  78. * return [jump to BLINK]
  79. * <<------
  80. * // As BLINK points to invalidate_dcache_all, we call it again and
  81. * // loop forever.
  82. *
  83. * Fortunately we may fix that by using flush & invalidation of D$ with a single
  84. * one instruction (instead of flush and invalidation instructions pair) and
  85. * enabling force function inline with '__attribute__((always_inline))' gcc
  86. * attribute to avoid any function call (and BLINK store) between cache flush
  87. * and disable.
  88. *
  89. *
  90. * [ NOTE 2 ]:
  91. * As of today we only support the following cache configurations on ARC.
  92. * Other configurations may exist in HW but we don't support it in SW.
  93. * Configuration 1:
  94. * ______________________
  95. * | |
  96. * | ARC CPU |
  97. * |______________________|
  98. * ___|___ ___|___
  99. * | | | |
  100. * | L1 I$ | | L1 D$ |
  101. * |_______| |_______|
  102. * on/off on/off
  103. * ___|______________|____
  104. * | |
  105. * | main memory |
  106. * |______________________|
  107. *
  108. * Configuration 2:
  109. * ______________________
  110. * | |
  111. * | ARC CPU |
  112. * |______________________|
  113. * ___|___ ___|___
  114. * | | | |
  115. * | L1 I$ | | L1 D$ |
  116. * |_______| |_______|
  117. * on/off on/off
  118. * ___|______________|____
  119. * | |
  120. * | L2 (SL$) |
  121. * |______________________|
  122. * always on (ARCv2, HS < 3.0)
  123. * on/off (ARCv2, HS >= 3.0)
  124. * ___|______________|____
  125. * | |
  126. * | main memory |
  127. * |______________________|
  128. *
  129. * Configuration 3:
  130. * ______________________
  131. * | |
  132. * | ARC CPU |
  133. * |______________________|
  134. * ___|___ ___|___
  135. * | | | |
  136. * | L1 I$ | | L1 D$ |
  137. * |_______| |_______|
  138. * on/off must be on
  139. * ___|______________|____ _______
  140. * | | | |
  141. * | L2 (SL$) |-----| IOC |
  142. * |______________________| |_______|
  143. * always must be on on/off
  144. * ___|______________|____
  145. * | |
  146. * | main memory |
  147. * |______________________|
  148. */
  149. DECLARE_GLOBAL_DATA_PTR;
  150. /* Bit values in IC_CTRL */
  151. #define IC_CTRL_CACHE_DISABLE BIT(0)
  152. /* Bit values in DC_CTRL */
  153. #define DC_CTRL_CACHE_DISABLE BIT(0)
  154. #define DC_CTRL_INV_MODE_FLUSH BIT(6)
  155. #define DC_CTRL_FLUSH_STATUS BIT(8)
  156. #define OP_INV BIT(0)
  157. #define OP_FLUSH BIT(1)
  158. #define OP_FLUSH_N_INV (OP_FLUSH | OP_INV)
  159. /* Bit val in SLC_CONTROL */
  160. #define SLC_CTRL_DIS 0x001
  161. #define SLC_CTRL_IM 0x040
  162. #define SLC_CTRL_BUSY 0x100
  163. #define SLC_CTRL_RGN_OP_INV 0x200
  164. #define CACHE_LINE_MASK (~(gd->arch.l1_line_sz - 1))
  165. /*
  166. * We don't want to use '__always_inline' macro here as it can be redefined
  167. * to simple 'inline' in some cases which breaks stuff. See [ NOTE 1 ] for more
  168. * details about the reasons we need to use always_inline functions.
  169. */
  170. #define inlined_cachefunc inline __attribute__((always_inline))
  171. static inlined_cachefunc void __ic_entire_invalidate(void);
  172. static inlined_cachefunc void __dc_entire_op(const int cacheop);
  173. static inlined_cachefunc void __slc_entire_op(const int op);
  174. static inlined_cachefunc bool ioc_enabled(void);
  175. static inline bool pae_exists(void)
  176. {
  177. /* TODO: should we compare mmu version from BCR and from CONFIG? */
  178. #if (CONFIG_ARC_MMU_VER >= 4)
  179. union bcr_mmu_4 mmu4;
  180. mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR);
  181. if (mmu4.fields.pae)
  182. return true;
  183. #endif /* (CONFIG_ARC_MMU_VER >= 4) */
  184. return false;
  185. }
  186. static inlined_cachefunc bool icache_exists(void)
  187. {
  188. union bcr_di_cache ibcr;
  189. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  190. return !!ibcr.fields.ver;
  191. }
  192. static inlined_cachefunc bool icache_enabled(void)
  193. {
  194. if (!icache_exists())
  195. return false;
  196. return !(read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE);
  197. }
  198. static inlined_cachefunc bool dcache_exists(void)
  199. {
  200. union bcr_di_cache dbcr;
  201. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  202. return !!dbcr.fields.ver;
  203. }
  204. static inlined_cachefunc bool dcache_enabled(void)
  205. {
  206. if (!dcache_exists())
  207. return false;
  208. return !(read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE);
  209. }
  210. static inlined_cachefunc bool slc_exists(void)
  211. {
  212. if (is_isa_arcv2()) {
  213. union bcr_generic sbcr;
  214. sbcr.word = read_aux_reg(ARC_BCR_SLC);
  215. return !!sbcr.fields.ver;
  216. }
  217. return false;
  218. }
  219. enum slc_dis_status {
  220. ST_SLC_MISSING = 0,
  221. ST_SLC_NO_DISABLE_CTRL,
  222. ST_SLC_DISABLE_CTRL
  223. };
  224. /*
  225. * ARCv1 -> ST_SLC_MISSING
  226. * ARCv2 && SLC absent -> ST_SLC_MISSING
  227. * ARCv2 && SLC exists && SLC version <= 2 -> ST_SLC_NO_DISABLE_CTRL
  228. * ARCv2 && SLC exists && SLC version > 2 -> ST_SLC_DISABLE_CTRL
  229. */
  230. static inlined_cachefunc enum slc_dis_status slc_disable_supported(void)
  231. {
  232. if (is_isa_arcv2()) {
  233. union bcr_generic sbcr;
  234. sbcr.word = read_aux_reg(ARC_BCR_SLC);
  235. if (sbcr.fields.ver == 0)
  236. return ST_SLC_MISSING;
  237. else if (sbcr.fields.ver <= 2)
  238. return ST_SLC_NO_DISABLE_CTRL;
  239. else
  240. return ST_SLC_DISABLE_CTRL;
  241. }
  242. return ST_SLC_MISSING;
  243. }
  244. static inlined_cachefunc bool __slc_enabled(void)
  245. {
  246. return !(read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_DIS);
  247. }
  248. static inlined_cachefunc void __slc_enable(void)
  249. {
  250. unsigned int ctrl;
  251. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  252. ctrl &= ~SLC_CTRL_DIS;
  253. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  254. }
  255. static inlined_cachefunc void __slc_disable(void)
  256. {
  257. unsigned int ctrl;
  258. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  259. ctrl |= SLC_CTRL_DIS;
  260. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  261. }
  262. static inlined_cachefunc bool slc_enabled(void)
  263. {
  264. enum slc_dis_status slc_status = slc_disable_supported();
  265. if (slc_status == ST_SLC_MISSING)
  266. return false;
  267. else if (slc_status == ST_SLC_NO_DISABLE_CTRL)
  268. return true;
  269. else
  270. return __slc_enabled();
  271. }
  272. static inlined_cachefunc bool slc_data_bypass(void)
  273. {
  274. /*
  275. * If L1 data cache is disabled SL$ is bypassed and all load/store
  276. * requests are sent directly to main memory.
  277. */
  278. return !dcache_enabled();
  279. }
  280. void slc_enable(void)
  281. {
  282. if (slc_disable_supported() != ST_SLC_DISABLE_CTRL)
  283. return;
  284. if (__slc_enabled())
  285. return;
  286. __slc_enable();
  287. }
  288. /* TODO: warn if we are not able to disable SLC */
  289. void slc_disable(void)
  290. {
  291. if (slc_disable_supported() != ST_SLC_DISABLE_CTRL)
  292. return;
  293. /* we don't support SLC disabling if we use IOC */
  294. if (ioc_enabled())
  295. return;
  296. if (!__slc_enabled())
  297. return;
  298. /*
  299. * We need to flush L1D$ to guarantee that we won't have any
  300. * writeback operations during SLC disabling.
  301. */
  302. __dc_entire_op(OP_FLUSH);
  303. __slc_entire_op(OP_FLUSH_N_INV);
  304. __slc_disable();
  305. }
  306. static inlined_cachefunc bool ioc_exists(void)
  307. {
  308. if (is_isa_arcv2()) {
  309. union bcr_clust_cfg cbcr;
  310. cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
  311. return cbcr.fields.c;
  312. }
  313. return false;
  314. }
  315. static inlined_cachefunc bool ioc_enabled(void)
  316. {
  317. /*
  318. * We check only CONFIG option instead of IOC HW state check as IOC
  319. * must be disabled by default.
  320. */
  321. if (is_ioc_enabled())
  322. return ioc_exists();
  323. return false;
  324. }
  325. static inlined_cachefunc void __slc_entire_op(const int op)
  326. {
  327. unsigned int ctrl;
  328. if (!slc_enabled())
  329. return;
  330. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  331. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  332. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  333. else
  334. ctrl |= SLC_CTRL_IM;
  335. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  336. if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  337. write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
  338. else
  339. write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
  340. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  341. read_aux_reg(ARC_AUX_SLC_CTRL);
  342. /* Important to wait for flush to complete */
  343. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  344. }
  345. static void slc_upper_region_init(void)
  346. {
  347. /*
  348. * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
  349. * only if PAE exists in current HW. So we had to check pae_exist
  350. * before using them.
  351. */
  352. if (!pae_exists())
  353. return;
  354. /*
  355. * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
  356. * as we don't use PAE40.
  357. */
  358. write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
  359. write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
  360. }
  361. static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
  362. {
  363. #ifdef CONFIG_ISA_ARCV2
  364. unsigned int ctrl;
  365. unsigned long end;
  366. if (!slc_enabled())
  367. return;
  368. /*
  369. * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
  370. * - b'000 (default) is Flush,
  371. * - b'001 is Invalidate if CTRL.IM == 0
  372. * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
  373. */
  374. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  375. /* Don't rely on default value of IM bit */
  376. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  377. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  378. else
  379. ctrl |= SLC_CTRL_IM;
  380. if (op & OP_INV)
  381. ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
  382. else
  383. ctrl &= ~SLC_CTRL_RGN_OP_INV;
  384. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  385. /*
  386. * Lower bits are ignored, no need to clip
  387. * END needs to be setup before START (latter triggers the operation)
  388. * END can't be same as START, so add (l2_line_sz - 1) to sz
  389. */
  390. end = paddr + sz + gd->arch.slc_line_sz - 1;
  391. /*
  392. * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
  393. * are always == 0 as we don't use PAE40, so we only setup lower ones
  394. * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
  395. */
  396. write_aux_reg(ARC_AUX_SLC_RGN_END, end);
  397. write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
  398. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  399. read_aux_reg(ARC_AUX_SLC_CTRL);
  400. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  401. #endif /* CONFIG_ISA_ARCV2 */
  402. }
  403. static void arc_ioc_setup(void)
  404. {
  405. /* IOC Aperture start is equal to DDR start */
  406. unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
  407. /* IOC Aperture size is equal to DDR size */
  408. long ap_size = CONFIG_SYS_SDRAM_SIZE;
  409. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  410. if (!slc_exists())
  411. panic("Try to enable IOC but SLC is not present");
  412. if (!slc_enabled())
  413. panic("Try to enable IOC but SLC is disabled");
  414. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  415. if (!dcache_enabled())
  416. panic("Try to enable IOC but L1 D$ is disabled");
  417. if (!is_power_of_2(ap_size) || ap_size < 4096)
  418. panic("IOC Aperture size must be power of 2 and bigger 4Kib");
  419. /* IOC Aperture start must be aligned to the size of the aperture */
  420. if (ap_base % ap_size != 0)
  421. panic("IOC Aperture start must be aligned to the size of the aperture");
  422. flush_n_invalidate_dcache_all();
  423. /*
  424. * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
  425. * so setting 0x11 implies 512M, 0x12 implies 1G...
  426. */
  427. write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
  428. order_base_2(ap_size / 1024) - 2);
  429. write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
  430. write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
  431. write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
  432. }
  433. static void read_decode_cache_bcr_arcv2(void)
  434. {
  435. #ifdef CONFIG_ISA_ARCV2
  436. union bcr_slc_cfg slc_cfg;
  437. if (slc_exists()) {
  438. slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
  439. gd->arch.slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
  440. /*
  441. * We don't support configuration where L1 I$ or L1 D$ is
  442. * absent but SL$ exists. See [ NOTE 2 ] for more details.
  443. */
  444. if (!icache_exists() || !dcache_exists())
  445. panic("Unsupported cache configuration: SLC exists but one of L1 caches is absent");
  446. }
  447. #endif /* CONFIG_ISA_ARCV2 */
  448. }
  449. void read_decode_cache_bcr(void)
  450. {
  451. int dc_line_sz = 0, ic_line_sz = 0;
  452. union bcr_di_cache ibcr, dbcr;
  453. /*
  454. * We don't care much about I$ line length really as there're
  455. * no per-line ops on I$ instead we only do full invalidation of it
  456. * on occasion of relocation and right before jumping to the OS.
  457. * Still we check insane config with zero-encoded line length in
  458. * presense of version field in I$ BCR. Just in case.
  459. */
  460. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  461. if (ibcr.fields.ver) {
  462. ic_line_sz = 8 << ibcr.fields.line_len;
  463. if (!ic_line_sz)
  464. panic("Instruction exists but line length is 0\n");
  465. }
  466. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  467. if (dbcr.fields.ver) {
  468. gd->arch.l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
  469. if (!dc_line_sz)
  470. panic("Data cache exists but line length is 0\n");
  471. }
  472. }
  473. void cache_init(void)
  474. {
  475. read_decode_cache_bcr();
  476. if (is_isa_arcv2())
  477. read_decode_cache_bcr_arcv2();
  478. if (is_isa_arcv2() && ioc_enabled())
  479. arc_ioc_setup();
  480. if (is_isa_arcv2() && slc_exists())
  481. slc_upper_region_init();
  482. }
  483. int icache_status(void)
  484. {
  485. return icache_enabled();
  486. }
  487. void icache_enable(void)
  488. {
  489. if (icache_exists())
  490. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
  491. ~IC_CTRL_CACHE_DISABLE);
  492. }
  493. void icache_disable(void)
  494. {
  495. if (!icache_exists())
  496. return;
  497. __ic_entire_invalidate();
  498. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
  499. IC_CTRL_CACHE_DISABLE);
  500. }
  501. /* IC supports only invalidation */
  502. static inlined_cachefunc void __ic_entire_invalidate(void)
  503. {
  504. if (!icache_enabled())
  505. return;
  506. /* Any write to IC_IVIC register triggers invalidation of entire I$ */
  507. write_aux_reg(ARC_AUX_IC_IVIC, 1);
  508. /*
  509. * As per ARC HS databook (see chapter 5.3.3.2)
  510. * it is required to add 3 NOPs after each write to IC_IVIC.
  511. */
  512. __builtin_arc_nop();
  513. __builtin_arc_nop();
  514. __builtin_arc_nop();
  515. read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */
  516. }
  517. void invalidate_icache_all(void)
  518. {
  519. __ic_entire_invalidate();
  520. /*
  521. * If SL$ is bypassed for data it is used only for instructions,
  522. * so we need to invalidate it too.
  523. */
  524. if (is_isa_arcv2() && slc_data_bypass())
  525. __slc_entire_op(OP_INV);
  526. }
  527. int dcache_status(void)
  528. {
  529. return dcache_enabled();
  530. }
  531. void dcache_enable(void)
  532. {
  533. if (!dcache_exists())
  534. return;
  535. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
  536. ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
  537. }
  538. void dcache_disable(void)
  539. {
  540. if (!dcache_exists())
  541. return;
  542. __dc_entire_op(OP_FLUSH_N_INV);
  543. /*
  544. * As SLC will be bypassed for data after L1 D$ disable we need to
  545. * flush it first before L1 D$ disable. Also we invalidate SLC to
  546. * avoid any inconsistent data problems after enabling L1 D$ again with
  547. * dcache_enable function.
  548. */
  549. if (is_isa_arcv2())
  550. __slc_entire_op(OP_FLUSH_N_INV);
  551. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
  552. DC_CTRL_CACHE_DISABLE);
  553. }
  554. /* Common Helper for Line Operations on D-cache */
  555. static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
  556. const int cacheop)
  557. {
  558. unsigned int aux_cmd;
  559. int num_lines;
  560. /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
  561. aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
  562. sz += paddr & ~CACHE_LINE_MASK;
  563. paddr &= CACHE_LINE_MASK;
  564. num_lines = DIV_ROUND_UP(sz, gd->arch.l1_line_sz);
  565. while (num_lines-- > 0) {
  566. #if (CONFIG_ARC_MMU_VER == 3)
  567. write_aux_reg(ARC_AUX_DC_PTAG, paddr);
  568. #endif
  569. write_aux_reg(aux_cmd, paddr);
  570. paddr += gd->arch.l1_line_sz;
  571. }
  572. }
  573. static inlined_cachefunc void __before_dc_op(const int op)
  574. {
  575. unsigned int ctrl;
  576. ctrl = read_aux_reg(ARC_AUX_DC_CTRL);
  577. /* IM bit implies flush-n-inv, instead of vanilla inv */
  578. if (op == OP_INV)
  579. ctrl &= ~DC_CTRL_INV_MODE_FLUSH;
  580. else
  581. ctrl |= DC_CTRL_INV_MODE_FLUSH;
  582. write_aux_reg(ARC_AUX_DC_CTRL, ctrl);
  583. }
  584. static inlined_cachefunc void __after_dc_op(const int op)
  585. {
  586. if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
  587. while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
  588. }
  589. static inlined_cachefunc void __dc_entire_op(const int cacheop)
  590. {
  591. int aux;
  592. if (!dcache_enabled())
  593. return;
  594. __before_dc_op(cacheop);
  595. if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  596. aux = ARC_AUX_DC_IVDC;
  597. else
  598. aux = ARC_AUX_DC_FLSH;
  599. write_aux_reg(aux, 0x1);
  600. __after_dc_op(cacheop);
  601. }
  602. static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
  603. const int cacheop)
  604. {
  605. if (!dcache_enabled())
  606. return;
  607. __before_dc_op(cacheop);
  608. __dcache_line_loop(paddr, sz, cacheop);
  609. __after_dc_op(cacheop);
  610. }
  611. void invalidate_dcache_range(unsigned long start, unsigned long end)
  612. {
  613. if (start >= end)
  614. return;
  615. /*
  616. * ARCv1 -> call __dc_line_op
  617. * ARCv2 && L1 D$ disabled -> nothing
  618. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  619. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  620. */
  621. if (!is_isa_arcv2() || !ioc_enabled())
  622. __dc_line_op(start, end - start, OP_INV);
  623. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  624. __slc_rgn_op(start, end - start, OP_INV);
  625. }
  626. void flush_dcache_range(unsigned long start, unsigned long end)
  627. {
  628. if (start >= end)
  629. return;
  630. /*
  631. * ARCv1 -> call __dc_line_op
  632. * ARCv2 && L1 D$ disabled -> nothing
  633. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  634. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  635. */
  636. if (!is_isa_arcv2() || !ioc_enabled())
  637. __dc_line_op(start, end - start, OP_FLUSH);
  638. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  639. __slc_rgn_op(start, end - start, OP_FLUSH);
  640. }
  641. void flush_cache(unsigned long start, unsigned long size)
  642. {
  643. flush_dcache_range(start, start + size);
  644. }
  645. /*
  646. * As invalidate_dcache_all() is not used in generic U-Boot code and as we
  647. * don't need it in arch/arc code alone (invalidate without flush) we implement
  648. * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because
  649. * it's much safer. See [ NOTE 1 ] for more details.
  650. */
  651. void flush_n_invalidate_dcache_all(void)
  652. {
  653. __dc_entire_op(OP_FLUSH_N_INV);
  654. if (is_isa_arcv2() && !slc_data_bypass())
  655. __slc_entire_op(OP_FLUSH_N_INV);
  656. }
  657. void flush_dcache_all(void)
  658. {
  659. __dc_entire_op(OP_FLUSH);
  660. if (is_isa_arcv2() && !slc_data_bypass())
  661. __slc_entire_op(OP_FLUSH);
  662. }
  663. /*
  664. * This is function to cleanup all caches (and therefore sync I/D caches) which
  665. * can be used for cleanup before linux launch or to sync caches during
  666. * relocation.
  667. */
  668. void sync_n_cleanup_cache_all(void)
  669. {
  670. __dc_entire_op(OP_FLUSH_N_INV);
  671. /*
  672. * If SL$ is bypassed for data it is used only for instructions,
  673. * and we shouldn't flush it. So invalidate it instead of flush_n_inv.
  674. */
  675. if (is_isa_arcv2()) {
  676. if (slc_data_bypass())
  677. __slc_entire_op(OP_INV);
  678. else
  679. __slc_entire_op(OP_FLUSH_N_INV);
  680. }
  681. __ic_entire_invalidate();
  682. }