asid.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Generic ASID allocator.
  4. *
  5. * Based on arch/arm/mm/context.c
  6. *
  7. * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
  8. * Copyright (C) 2012 ARM Ltd.
  9. */
  10. #include <linux/slab.h>
  11. #include <linux/mm_types.h>
  12. #include <asm/asid.h>
  13. #define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)
  14. #define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0))
  15. #define ASID_FIRST_VERSION(info) (1UL << ((info)->bits))
  16. #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
  17. #define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))
  18. static void flush_context(struct asid_info *info)
  19. {
  20. int i;
  21. u64 asid;
  22. /* Update the list of reserved ASIDs and the ASID bitmap. */
  23. bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));
  24. for_each_possible_cpu(i) {
  25. asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
  26. /*
  27. * If this CPU has already been through a
  28. * rollover, but hasn't run another task in
  29. * the meantime, we must preserve its reserved
  30. * ASID, as this is the only trace we have of
  31. * the process it is still running.
  32. */
  33. if (asid == 0)
  34. asid = reserved_asid(info, i);
  35. __set_bit(asid2idx(info, asid), info->map);
  36. reserved_asid(info, i) = asid;
  37. }
  38. /*
  39. * Queue a TLB invalidation for each CPU to perform on next
  40. * context-switch
  41. */
  42. cpumask_setall(&info->flush_pending);
  43. }
  44. static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
  45. u64 newasid)
  46. {
  47. int cpu;
  48. bool hit = false;
  49. /*
  50. * Iterate over the set of reserved ASIDs looking for a match.
  51. * If we find one, then we can update our mm to use newasid
  52. * (i.e. the same ASID in the current generation) but we can't
  53. * exit the loop early, since we need to ensure that all copies
  54. * of the old ASID are updated to reflect the mm. Failure to do
  55. * so could result in us missing the reserved ASID in a future
  56. * generation.
  57. */
  58. for_each_possible_cpu(cpu) {
  59. if (reserved_asid(info, cpu) == asid) {
  60. hit = true;
  61. reserved_asid(info, cpu) = newasid;
  62. }
  63. }
  64. return hit;
  65. }
  66. static u64 new_context(struct asid_info *info, atomic64_t *pasid,
  67. struct mm_struct *mm)
  68. {
  69. static u32 cur_idx = 1;
  70. u64 asid = atomic64_read(pasid);
  71. u64 generation = atomic64_read(&info->generation);
  72. if (asid != 0) {
  73. u64 newasid = generation | (asid & ~ASID_MASK(info));
  74. /*
  75. * If our current ASID was active during a rollover, we
  76. * can continue to use it and this was just a false alarm.
  77. */
  78. if (check_update_reserved_asid(info, asid, newasid))
  79. return newasid;
  80. /*
  81. * We had a valid ASID in a previous life, so try to re-use
  82. * it if possible.
  83. */
  84. if (!__test_and_set_bit(asid2idx(info, asid), info->map))
  85. return newasid;
  86. }
  87. /*
  88. * Allocate a free ASID. If we can't find one, take a note of the
  89. * currently active ASIDs and mark the TLBs as requiring flushes. We
  90. * always count from ASID #2 (index 1), as we use ASID #0 when setting
  91. * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
  92. * pairs.
  93. */
  94. asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
  95. if (asid != NUM_CTXT_ASIDS(info))
  96. goto set_asid;
  97. /* We're out of ASIDs, so increment the global generation count */
  98. generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
  99. &info->generation);
  100. flush_context(info);
  101. /* We have more ASIDs than CPUs, so this will always succeed */
  102. asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);
  103. set_asid:
  104. __set_bit(asid, info->map);
  105. cur_idx = asid;
  106. cpumask_clear(mm_cpumask(mm));
  107. return idx2asid(info, asid) | generation;
  108. }
  109. /*
  110. * Generate a new ASID for the context.
  111. *
  112. * @pasid: Pointer to the current ASID batch allocated. It will be updated
  113. * with the new ASID batch.
  114. * @cpu: current CPU ID. Must have been acquired through get_cpu()
  115. */
  116. void asid_new_context(struct asid_info *info, atomic64_t *pasid,
  117. unsigned int cpu, struct mm_struct *mm)
  118. {
  119. unsigned long flags;
  120. u64 asid;
  121. raw_spin_lock_irqsave(&info->lock, flags);
  122. /* Check that our ASID belongs to the current generation. */
  123. asid = atomic64_read(pasid);
  124. if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
  125. asid = new_context(info, pasid, mm);
  126. atomic64_set(pasid, asid);
  127. }
  128. if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
  129. info->flush_cpu_ctxt_cb();
  130. atomic64_set(&active_asid(info, cpu), asid);
  131. cpumask_set_cpu(cpu, mm_cpumask(mm));
  132. raw_spin_unlock_irqrestore(&info->lock, flags);
  133. }
  134. /*
  135. * Initialize the ASID allocator
  136. *
  137. * @info: Pointer to the asid allocator structure
  138. * @bits: Number of ASIDs available
  139. * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are
  140. * allocated contiguously for a given context. This value should be a power of
  141. * 2.
  142. */
  143. int asid_allocator_init(struct asid_info *info,
  144. u32 bits, unsigned int asid_per_ctxt,
  145. void (*flush_cpu_ctxt_cb)(void))
  146. {
  147. info->bits = bits;
  148. info->ctxt_shift = ilog2(asid_per_ctxt);
  149. info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb;
  150. /*
  151. * Expect allocation after rollover to fail if we don't have at least
  152. * one more ASID than CPUs. ASID #0 is always reserved.
  153. */
  154. WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
  155. atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
  156. info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
  157. sizeof(*info->map), GFP_KERNEL);
  158. if (!info->map)
  159. return -ENOMEM;
  160. raw_spin_lock_init(&info->lock);
  161. return 0;
  162. }