context.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/atomic.h>
  3. #include <linux/mmu_context.h>
  4. #include <linux/percpu.h>
  5. #include <linux/spinlock.h>
  6. static DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
  7. static atomic64_t mmid_version;
  8. static unsigned int num_mmids;
  9. static unsigned long *mmid_map;
  10. static DEFINE_PER_CPU(u64, reserved_mmids);
  11. static cpumask_t tlb_flush_pending;
  12. static bool asid_versions_eq(int cpu, u64 a, u64 b)
  13. {
  14. return ((a ^ b) & asid_version_mask(cpu)) == 0;
  15. }
  16. void get_new_mmu_context(struct mm_struct *mm)
  17. {
  18. unsigned int cpu;
  19. u64 asid;
  20. /*
  21. * This function is specific to ASIDs, and should not be called when
  22. * MMIDs are in use.
  23. */
  24. if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
  25. return;
  26. cpu = smp_processor_id();
  27. asid = asid_cache(cpu);
  28. if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
  29. if (cpu_has_vtag_icache)
  30. flush_icache_all();
  31. local_flush_tlb_all(); /* start new asid cycle */
  32. }
  33. set_cpu_context(cpu, mm, asid);
  34. asid_cache(cpu) = asid;
  35. }
  36. EXPORT_SYMBOL_GPL(get_new_mmu_context);
  37. void check_mmu_context(struct mm_struct *mm)
  38. {
  39. unsigned int cpu = smp_processor_id();
  40. /*
  41. * This function is specific to ASIDs, and should not be called when
  42. * MMIDs are in use.
  43. */
  44. if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
  45. return;
  46. /* Check if our ASID is of an older version and thus invalid */
  47. if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
  48. get_new_mmu_context(mm);
  49. }
  50. EXPORT_SYMBOL_GPL(check_mmu_context);
  51. static void flush_context(void)
  52. {
  53. u64 mmid;
  54. int cpu;
  55. /* Update the list of reserved MMIDs and the MMID bitmap */
  56. bitmap_clear(mmid_map, 0, num_mmids);
  57. /* Reserve an MMID for kmap/wired entries */
  58. __set_bit(MMID_KERNEL_WIRED, mmid_map);
  59. for_each_possible_cpu(cpu) {
  60. mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
  61. /*
  62. * If this CPU has already been through a
  63. * rollover, but hasn't run another task in
  64. * the meantime, we must preserve its reserved
  65. * MMID, as this is the only trace we have of
  66. * the process it is still running.
  67. */
  68. if (mmid == 0)
  69. mmid = per_cpu(reserved_mmids, cpu);
  70. __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
  71. per_cpu(reserved_mmids, cpu) = mmid;
  72. }
  73. /*
  74. * Queue a TLB invalidation for each CPU to perform on next
  75. * context-switch
  76. */
  77. cpumask_setall(&tlb_flush_pending);
  78. }
  79. static bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
  80. {
  81. bool hit;
  82. int cpu;
  83. /*
  84. * Iterate over the set of reserved MMIDs looking for a match.
  85. * If we find one, then we can update our mm to use newmmid
  86. * (i.e. the same MMID in the current generation) but we can't
  87. * exit the loop early, since we need to ensure that all copies
  88. * of the old MMID are updated to reflect the mm. Failure to do
  89. * so could result in us missing the reserved MMID in a future
  90. * generation.
  91. */
  92. hit = false;
  93. for_each_possible_cpu(cpu) {
  94. if (per_cpu(reserved_mmids, cpu) == mmid) {
  95. hit = true;
  96. per_cpu(reserved_mmids, cpu) = newmmid;
  97. }
  98. }
  99. return hit;
  100. }
  101. static u64 get_new_mmid(struct mm_struct *mm)
  102. {
  103. static u32 cur_idx = MMID_KERNEL_WIRED + 1;
  104. u64 mmid, version, mmid_mask;
  105. mmid = cpu_context(0, mm);
  106. version = atomic64_read(&mmid_version);
  107. mmid_mask = cpu_asid_mask(&boot_cpu_data);
  108. if (!asid_versions_eq(0, mmid, 0)) {
  109. u64 newmmid = version | (mmid & mmid_mask);
  110. /*
  111. * If our current MMID was active during a rollover, we
  112. * can continue to use it and this was just a false alarm.
  113. */
  114. if (check_update_reserved_mmid(mmid, newmmid)) {
  115. mmid = newmmid;
  116. goto set_context;
  117. }
  118. /*
  119. * We had a valid MMID in a previous life, so try to re-use
  120. * it if possible.
  121. */
  122. if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
  123. mmid = newmmid;
  124. goto set_context;
  125. }
  126. }
  127. /* Allocate a free MMID */
  128. mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
  129. if (mmid != num_mmids)
  130. goto reserve_mmid;
  131. /* We're out of MMIDs, so increment the global version */
  132. version = atomic64_add_return_relaxed(asid_first_version(0),
  133. &mmid_version);
  134. /* Note currently active MMIDs & mark TLBs as requiring flushes */
  135. flush_context();
  136. /* We have more MMIDs than CPUs, so this will always succeed */
  137. mmid = find_first_zero_bit(mmid_map, num_mmids);
  138. reserve_mmid:
  139. __set_bit(mmid, mmid_map);
  140. cur_idx = mmid;
  141. mmid |= version;
  142. set_context:
  143. set_cpu_context(0, mm, mmid);
  144. return mmid;
  145. }
  146. void check_switch_mmu_context(struct mm_struct *mm)
  147. {
  148. unsigned int cpu = smp_processor_id();
  149. u64 ctx, old_active_mmid;
  150. unsigned long flags;
  151. if (!cpu_has_mmid) {
  152. check_mmu_context(mm);
  153. write_c0_entryhi(cpu_asid(cpu, mm));
  154. goto setup_pgd;
  155. }
  156. /*
  157. * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
  158. * unnecessary.
  159. *
  160. * The memory ordering here is subtle. If our active_mmids is non-zero
  161. * and the MMID matches the current version, then we update the CPU's
  162. * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
  163. * means that either:
  164. *
  165. * - We get a zero back from the cmpxchg and end up waiting on
  166. * cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
  167. * with the rollover and so we are forced to see the updated
  168. * generation.
  169. *
  170. * - We get a valid MMID back from the cmpxchg, which means the
  171. * relaxed xchg in flush_context will treat us as reserved
  172. * because atomic RmWs are totally ordered for a given location.
  173. */
  174. ctx = cpu_context(cpu, mm);
  175. old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
  176. if (!old_active_mmid ||
  177. !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
  178. !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
  179. raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
  180. ctx = cpu_context(cpu, mm);
  181. if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
  182. ctx = get_new_mmid(mm);
  183. WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
  184. raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
  185. }
  186. /*
  187. * Invalidate the local TLB if needed. Note that we must only clear our
  188. * bit in tlb_flush_pending after this is complete, so that the
  189. * cpu_has_shared_ftlb_entries case below isn't misled.
  190. */
  191. if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
  192. if (cpu_has_vtag_icache)
  193. flush_icache_all();
  194. local_flush_tlb_all();
  195. cpumask_clear_cpu(cpu, &tlb_flush_pending);
  196. }
  197. write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
  198. /*
  199. * If this CPU shares FTLB entries with its siblings and one or more of
  200. * those siblings hasn't yet invalidated its TLB following a version
  201. * increase then we need to invalidate any TLB entries for our MMID
  202. * that we might otherwise pick up from a sibling.
  203. *
  204. * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
  205. * CONFIG_SMP=n kernels.
  206. */
  207. #ifdef CONFIG_SMP
  208. if (cpu_has_shared_ftlb_entries &&
  209. cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
  210. /* Ensure we operate on the new MMID */
  211. mtc0_tlbw_hazard();
  212. /*
  213. * Invalidate all TLB entries associated with the new
  214. * MMID, and wait for the invalidation to complete.
  215. */
  216. ginvt_mmid();
  217. sync_ginv();
  218. }
  219. #endif
  220. setup_pgd:
  221. TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
  222. }
  223. EXPORT_SYMBOL_GPL(check_switch_mmu_context);
  224. static int mmid_init(void)
  225. {
  226. if (!cpu_has_mmid)
  227. return 0;
  228. /*
  229. * Expect allocation after rollover to fail if we don't have at least
  230. * one more MMID than CPUs.
  231. */
  232. num_mmids = asid_first_version(0);
  233. WARN_ON(num_mmids <= num_possible_cpus());
  234. atomic64_set(&mmid_version, asid_first_version(0));
  235. mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map),
  236. GFP_KERNEL);
  237. if (!mmid_map)
  238. panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
  239. /* Reserve an MMID for kmap/wired entries */
  240. __set_bit(MMID_KERNEL_WIRED, mmid_map);
  241. pr_info("MMID allocator initialised with %u entries\n", num_mmids);
  242. return 0;
  243. }
  244. early_initcall(mmid_init);