context.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. * Copyright (C) 2017 SiFive
  5. */
  6. #include <linux/mm.h>
  7. #include <asm/tlbflush.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/mmu_context.h>
  10. /*
  11. * When necessary, performs a deferred icache flush for the given MM context,
  12. * on the local CPU. RISC-V has no direct mechanism for instruction cache
  13. * shoot downs, so instead we send an IPI that informs the remote harts they
  14. * need to flush their local instruction caches. To avoid pathologically slow
  15. * behavior in a common case (a bunch of single-hart processes on a many-hart
  16. * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
  17. * executing a MM context and instead schedule a deferred local instruction
  18. * cache flush to be performed before execution resumes on each hart. This
  19. * actually performs that local instruction cache flush, which implicitly only
  20. * refers to the current hart.
  21. */
  22. static inline void flush_icache_deferred(struct mm_struct *mm)
  23. {
  24. #ifdef CONFIG_SMP
  25. unsigned int cpu = smp_processor_id();
  26. cpumask_t *mask = &mm->context.icache_stale_mask;
  27. if (cpumask_test_cpu(cpu, mask)) {
  28. cpumask_clear_cpu(cpu, mask);
  29. /*
  30. * Ensure the remote hart's writes are visible to this hart.
  31. * This pairs with a barrier in flush_icache_mm.
  32. */
  33. smp_mb();
  34. local_flush_icache_all();
  35. }
  36. #endif
  37. }
  38. void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  39. struct task_struct *task)
  40. {
  41. unsigned int cpu;
  42. unsigned long asid;
  43. if (unlikely(prev == next))
  44. return;
  45. /*
  46. * Mark the current MM context as inactive, and the next as
  47. * active. This is at least used by the icache flushing
  48. * routines in order to determine who should be flushed.
  49. */
  50. cpu = smp_processor_id();
  51. cpumask_clear_cpu(cpu, mm_cpumask(prev));
  52. cpumask_set_cpu(cpu, mm_cpumask(next));
  53. #ifdef CONFIG_MMU
  54. check_and_switch_context(next, cpu);
  55. asid = (next->context.asid.counter & SATP_ASID_MASK)
  56. << SATP_ASID_SHIFT;
  57. csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE | asid);
  58. #endif
  59. flush_icache_deferred(next);
  60. }
  61. static DEFINE_PER_CPU(atomic64_t, active_asids);
  62. static DEFINE_PER_CPU(u64, reserved_asids);
  63. struct asid_info asid_info;
  64. void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
  65. {
  66. asid_check_context(&asid_info, &mm->context.asid, cpu, mm);
  67. }
  68. static void asid_flush_cpu_ctxt(void)
  69. {
  70. local_flush_tlb_all();
  71. }
  72. static int asids_init(void)
  73. {
  74. BUG_ON(((1 << SATP_ASID_BITS) - 1) <= num_possible_cpus());
  75. if (asid_allocator_init(&asid_info, SATP_ASID_BITS, 1,
  76. asid_flush_cpu_ctxt))
  77. panic("Unable to initialize ASID allocator for %lu ASIDs\n",
  78. NUM_ASIDS(&asid_info));
  79. asid_info.active = &active_asids;
  80. asid_info.reserved = &reserved_asids;
  81. pr_info("ASID allocator initialised with %lu entries\n",
  82. NUM_CTXT_ASIDS(&asid_info));
  83. local_flush_tlb_all();
  84. return 0;
  85. }
  86. early_initcall(asids_init);