cache.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2002
  4. * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
  5. */
  6. /* for now: just dummy functions to satisfy the linker */
  7. #include <common.h>
  8. #include <cpu_func.h>
  9. #include <malloc.h>
  10. DECLARE_GLOBAL_DATA_PTR;
  11. /*
  12. * Flush range from all levels of d-cache/unified-cache.
  13. * Affects the range [start, start + size - 1].
  14. */
  15. __weak void flush_cache(unsigned long start, unsigned long size)
  16. {
  17. flush_dcache_range(start, start + size);
  18. }
  19. /*
  20. * Default implementation:
  21. * do a range flush for the entire range
  22. */
  23. __weak void flush_dcache_all(void)
  24. {
  25. flush_cache(0, ~0);
  26. }
  27. /*
  28. * Default implementation of enable_caches()
  29. * Real implementation should be in platform code
  30. */
  31. __weak void enable_caches(void)
  32. {
  33. puts("WARNING: Caches not enabled\n");
  34. }
  35. __weak void invalidate_dcache_range(unsigned long start, unsigned long stop)
  36. {
  37. /* An empty stub, real implementation should be in platform code */
  38. }
  39. __weak void flush_dcache_range(unsigned long start, unsigned long stop)
  40. {
  41. /* An empty stub, real implementation should be in platform code */
  42. }
  43. int check_cache_range(unsigned long start, unsigned long stop)
  44. {
  45. int ok = 1;
  46. if (start & (CONFIG_SYS_CACHELINE_SIZE - 1))
  47. ok = 0;
  48. if (stop & (CONFIG_SYS_CACHELINE_SIZE - 1))
  49. ok = 0;
  50. if (!ok) {
  51. warn_non_spl("CACHE: Misaligned operation at range [%08lx, %08lx]\n",
  52. start, stop);
  53. }
  54. return ok;
  55. }
  56. #ifdef CONFIG_SYS_NONCACHED_MEMORY
  57. /*
  58. * Reserve one MMU section worth of address space below the malloc() area that
  59. * will be mapped uncached.
  60. */
  61. static unsigned long noncached_start;
  62. static unsigned long noncached_end;
  63. static unsigned long noncached_next;
  64. void noncached_init(void)
  65. {
  66. phys_addr_t start, end;
  67. size_t size;
  68. /* If this calculation changes, update board_f.c:reserve_noncached() */
  69. end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE;
  70. size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE);
  71. start = end - size;
  72. debug("mapping memory %pa-%pa non-cached\n", &start, &end);
  73. noncached_start = start;
  74. noncached_end = end;
  75. noncached_next = start;
  76. #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  77. mmu_set_region_dcache_behaviour(noncached_start, size, DCACHE_OFF);
  78. #endif
  79. }
  80. phys_addr_t noncached_alloc(size_t size, size_t align)
  81. {
  82. phys_addr_t next = ALIGN(noncached_next, align);
  83. if (next >= noncached_end || (noncached_end - next) < size)
  84. return 0;
  85. debug("allocated %zu bytes of uncached memory @%pa\n", size, &next);
  86. noncached_next = next + size;
  87. return next;
  88. }
  89. #endif /* CONFIG_SYS_NONCACHED_MEMORY */
  90. #if CONFIG_IS_ENABLED(SYS_THUMB_BUILD)
  91. void invalidate_l2_cache(void)
  92. {
  93. unsigned int val = 0;
  94. asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache"
  95. : : "r" (val) : "cc");
  96. isb();
  97. }
  98. #endif
  99. int arch_reserve_mmu(void)
  100. {
  101. return arm_reserve_mmu();
  102. }
  103. __weak int arm_reserve_mmu(void)
  104. {
  105. #if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
  106. /* reserve TLB table */
  107. gd->arch.tlb_size = PGTABLE_SIZE;
  108. gd->relocaddr -= gd->arch.tlb_size;
  109. /* round down to next 64 kB limit */
  110. gd->relocaddr &= ~(0x10000 - 1);
  111. gd->arch.tlb_addr = gd->relocaddr;
  112. debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
  113. gd->arch.tlb_addr + gd->arch.tlb_size);
  114. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  115. /*
  116. * Record allocated tlb_addr in case gd->tlb_addr to be overwritten
  117. * with location within secure ram.
  118. */
  119. gd->arch.tlb_allocated = gd->arch.tlb_addr;
  120. #endif
  121. #endif
  122. return 0;
  123. }