context.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. */
  6. #include <linux/mm.h>
  7. #include <asm/tlbflush.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/mmu_context.h>
  10. /*
  11. * When necessary, performs a deferred icache flush for the given MM context,
  12. * on the local CPU. RISC-V has no direct mechanism for instruction cache
  13. * shoot downs, so instead we send an IPI that informs the remote harts they
  14. * need to flush their local instruction caches. To avoid pathologically slow
  15. * behavior in a common case (a bunch of single-hart processes on a many-hart
  16. * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
  17. * executing a MM context and instead schedule a deferred local instruction
  18. * cache flush to be performed before execution resumes on each hart. This
  19. * actually performs that local instruction cache flush, which implicitly only
  20. * refers to the current hart.
  21. */
  22. static inline void flush_icache_deferred(struct mm_struct *mm)
  23. {
  24. #ifdef CONFIG_SMP
  25. unsigned int cpu = smp_processor_id();
  26. cpumask_t *mask = &mm->context.icache_stale_mask;
  27. if (cpumask_test_cpu(cpu, mask)) {
  28. cpumask_clear_cpu(cpu, mask);
  29. /*
  30. * Ensure the remote hart's writes are visible to this hart.
  31. * This pairs with a barrier in flush_icache_mm.
  32. */
  33. smp_mb();
  34. local_flush_icache_all();
  35. }
  36. #endif
  37. }
  38. void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  39. struct task_struct *task)
  40. {
  41. unsigned int cpu;
  42. unsigned long asid;
  43. if (unlikely(prev == next))
  44. return;
  45. /*
  46. * Mark the current MM context as inactive, and the next as
  47. * active. This is at least used by the icache flushing
  48. * routines in order to determine who should be flushed.
  49. */
  50. cpu = smp_processor_id();
  51. cpumask_set_cpu(cpu, mm_cpumask(next));
  52. #ifdef CONFIG_MMU
  53. __asm__ __volatile__(
  54. "jal t0,1f\n\t"
  55. "1: \n\t"
  56. "jal t0,2f\n\t"
  57. "2: \n\t"
  58. "jal t0,3f\n\t"
  59. "3: \n\t"
  60. "jal t0,4f\n\t"
  61. "4: \n\t"
  62. "jal t0,5f\n\t"
  63. "5: \n\t"
  64. "jal t0,6f\n\t"
  65. "6: \n\t"
  66. "jal t0,7f\n\t"
  67. "7: \n\t"
  68. "jal t0,8f\n\t"
  69. "8: \n\t"
  70. "jal t0,9f\n\t"
  71. "9: \n\t"
  72. "jal t0,10f\n\t"
  73. "10: \n\t"
  74. "jal t0,11f\n\t"
  75. "11: \n\t"
  76. "jal t0,12f\n\t"
  77. "12: \n\t"
  78. ::: "memory", "t0");
  79. check_and_switch_context(next, cpu);
  80. asid = (next->context.asid.counter & SATP_ASID_MASK)
  81. << SATP_ASID_SHIFT;
  82. local_flush_tlb_page(0);
  83. /* flush utlb before setting satp */
  84. __asm__ __volatile__(
  85. "li t0, 0\n\t"
  86. "sfence.vma t0, t0\n\t"
  87. ::: "memory", "t0");
  88. csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE | asid);
  89. #endif
  90. flush_icache_deferred(next);
  91. }
  92. static DEFINE_PER_CPU(atomic64_t, active_asids);
  93. static DEFINE_PER_CPU(u64, reserved_asids);
  94. struct asid_info asid_info;
  95. void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
  96. {
  97. asid_check_context(&asid_info, &mm->context.asid, cpu, mm);
  98. }
  99. static void asid_flush_cpu_ctxt(void)
  100. {
  101. local_flush_tlb_all();
  102. }
  103. static int asids_init(void)
  104. {
  105. BUG_ON(((1 << SATP_ASID_BITS) - 1) <= num_possible_cpus());
  106. if (asid_allocator_init(&asid_info, SATP_ASID_BITS, 1,
  107. asid_flush_cpu_ctxt))
  108. panic("Unable to initialize ASID allocator for %lu ASIDs\n",
  109. NUM_ASIDS(&asid_info));
  110. asid_info.active = &active_asids;
  111. asid_info.reserved = &reserved_asids;
  112. pr_info("ASID allocator initialised with %lu entries\n",
  113. NUM_CTXT_ASIDS(&asid_info));
  114. local_flush_tlb_all();
  115. return 0;
  116. }
  117. early_initcall(asids_init);