cacheflush.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2017 SiFive
  4. */
  5. #include <asm/cacheflush.h>
  6. #include <asm/sbi.h>
  7. #ifdef CONFIG_SMP
  8. static void ipi_remote_fence_i(void *info)
  9. {
  10. return local_flush_icache_all();
  11. }
  12. void flush_icache_all(void)
  13. {
  14. local_flush_icache_all();
  15. if (IS_ENABLED(CONFIG_RISCV_SBI))
  16. sbi_remote_fence_i(NULL);
  17. else
  18. on_each_cpu(ipi_remote_fence_i, NULL, 1);
  19. }
  20. EXPORT_SYMBOL(flush_icache_all);
  21. /*
  22. * Performs an icache flush for the given MM context. RISC-V has no direct
  23. * mechanism for instruction cache shoot downs, so instead we send an IPI that
  24. * informs the remote harts they need to flush their local instruction caches.
  25. * To avoid pathologically slow behavior in a common case (a bunch of
  26. * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
  27. * IPIs for harts that are not currently executing a MM context and instead
  28. * schedule a deferred local instruction cache flush to be performed before
  29. * execution resumes on each hart.
  30. */
  31. void flush_icache_mm(struct mm_struct *mm, bool local)
  32. {
  33. unsigned int cpu;
  34. cpumask_t others, *mask;
  35. preempt_disable();
  36. /* Mark every hart's icache as needing a flush for this MM. */
  37. mask = &mm->context.icache_stale_mask;
  38. cpumask_setall(mask);
  39. /* Flush this hart's I$ now, and mark it as flushed. */
  40. cpu = smp_processor_id();
  41. cpumask_clear_cpu(cpu, mask);
  42. local_flush_icache_all();
  43. /*
  44. * Flush the I$ of other harts concurrently executing, and mark them as
  45. * flushed.
  46. */
  47. cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
  48. local |= cpumask_empty(&others);
  49. if (mm == current->active_mm && local) {
  50. /*
  51. * It's assumed that at least one strongly ordered operation is
  52. * performed on this hart between setting a hart's cpumask bit
  53. * and scheduling this MM context on that hart. Sending an SBI
  54. * remote message will do this, but in the case where no
  55. * messages are sent we still need to order this hart's writes
  56. * with flush_icache_deferred().
  57. */
  58. smp_mb();
  59. } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
  60. cpumask_t hartid_mask;
  61. riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
  62. sbi_remote_fence_i(cpumask_bits(&hartid_mask));
  63. } else {
  64. on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
  65. }
  66. preempt_enable();
  67. }
  68. #endif /* CONFIG_SMP */
  69. #ifdef CONFIG_MMU
  70. void flush_icache_pte(pte_t pte)
  71. {
  72. struct page *page = pte_page(pte);
  73. if (!test_and_set_bit(PG_dcache_clean, &page->flags))
  74. flush_icache_all();
  75. }
  76. #endif /* CONFIG_MMU */
  77. static bool thead_dma_init_flag = false;
  78. #define sync_is() asm volatile (".long 0x01a0000b")
  79. void dma_wbinv_range(unsigned long start, unsigned long end)
  80. {
  81. register unsigned long i asm("a0") = start & ~(L1_CACHE_BYTES - 1);
  82. if (!thead_dma_init_flag)
  83. return;
  84. for (; i < end; i += L1_CACHE_BYTES)
  85. asm volatile (".long 0x02b5000b"); /* dcache.cipa a0 */
  86. sync_is();
  87. }
  88. void dma_wb_range(unsigned long start, unsigned long end)
  89. {
  90. register unsigned long i asm("a0") = start & ~(L1_CACHE_BYTES - 1);
  91. if (!thead_dma_init_flag)
  92. return;
  93. for (; i < end; i += L1_CACHE_BYTES)
  94. asm volatile (".long 0x0295000b"); /* dcache.cpa a0 */
  95. sync_is();
  96. }
  97. #define THEAD_VENDOR_ID 0x5b7
  98. static int __init thead_dma_init(void)
  99. {
  100. if (sbi_get_mvendorid() == THEAD_VENDOR_ID)
  101. thead_dma_init_flag = true;
  102. return 0;
  103. }
  104. arch_initcall(thead_dma_init);