swap_slots.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Manage cache of swap slots to be used for and returned from
  4. * swap.
  5. *
  6. * Copyright(c) 2016 Intel Corporation.
  7. *
  8. * Author: Tim Chen <tim.c.chen@linux.intel.com>
  9. *
  10. * We allocate the swap slots from the global pool and put
  11. * it into local per cpu caches. This has the advantage
  12. * of no needing to acquire the swap_info lock every time
  13. * we need a new slot.
  14. *
  15. * There is also opportunity to simply return the slot
  16. * to local caches without needing to acquire swap_info
  17. * lock. We do not reuse the returned slots directly but
  18. * move them back to the global pool in a batch. This
  19. * allows the slots to coaellesce and reduce fragmentation.
  20. *
  21. * The swap entry allocated is marked with SWAP_HAS_CACHE
  22. * flag in map_count that prevents it from being allocated
  23. * again from the global pool.
  24. *
  25. * The swap slots cache is protected by a mutex instead of
  26. * a spin lock as when we search for slots with scan_swap_map,
  27. * we can possibly sleep.
  28. */
  29. #include <linux/swap_slots.h>
  30. #include <linux/cpu.h>
  31. #include <linux/cpumask.h>
  32. #include <linux/vmalloc.h>
  33. #include <linux/mutex.h>
  34. #include <linux/mm.h>
  35. static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots);
  36. static bool swap_slot_cache_active;
  37. bool swap_slot_cache_enabled;
  38. static bool swap_slot_cache_initialized;
  39. static DEFINE_MUTEX(swap_slots_cache_mutex);
  40. /* Serialize swap slots cache enable/disable operations */
  41. static DEFINE_MUTEX(swap_slots_cache_enable_mutex);
  42. static void __drain_swap_slots_cache(unsigned int type);
  43. static void deactivate_swap_slots_cache(void);
  44. static void reactivate_swap_slots_cache(void);
  45. #define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled)
  46. #define SLOTS_CACHE 0x1
  47. #define SLOTS_CACHE_RET 0x2
  48. static void deactivate_swap_slots_cache(void)
  49. {
  50. mutex_lock(&swap_slots_cache_mutex);
  51. swap_slot_cache_active = false;
  52. __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
  53. mutex_unlock(&swap_slots_cache_mutex);
  54. }
  55. static void reactivate_swap_slots_cache(void)
  56. {
  57. mutex_lock(&swap_slots_cache_mutex);
  58. swap_slot_cache_active = true;
  59. mutex_unlock(&swap_slots_cache_mutex);
  60. }
  61. /* Must not be called with cpu hot plug lock */
  62. void disable_swap_slots_cache_lock(void)
  63. {
  64. mutex_lock(&swap_slots_cache_enable_mutex);
  65. swap_slot_cache_enabled = false;
  66. if (swap_slot_cache_initialized) {
  67. /* serialize with cpu hotplug operations */
  68. get_online_cpus();
  69. __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET);
  70. put_online_cpus();
  71. }
  72. }
  73. static void __reenable_swap_slots_cache(void)
  74. {
  75. swap_slot_cache_enabled = has_usable_swap();
  76. }
  77. void reenable_swap_slots_cache_unlock(void)
  78. {
  79. __reenable_swap_slots_cache();
  80. mutex_unlock(&swap_slots_cache_enable_mutex);
  81. }
  82. static bool check_cache_active(void)
  83. {
  84. long pages;
  85. if (!swap_slot_cache_enabled)
  86. return false;
  87. pages = get_nr_swap_pages();
  88. if (!swap_slot_cache_active) {
  89. if (pages > num_online_cpus() *
  90. THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE)
  91. reactivate_swap_slots_cache();
  92. goto out;
  93. }
  94. /* if global pool of slot caches too low, deactivate cache */
  95. if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE)
  96. deactivate_swap_slots_cache();
  97. out:
  98. return swap_slot_cache_active;
  99. }
  100. static int alloc_swap_slot_cache(unsigned int cpu)
  101. {
  102. struct swap_slots_cache *cache;
  103. swp_entry_t *slots, *slots_ret;
  104. /*
  105. * Do allocation outside swap_slots_cache_mutex
  106. * as kvzalloc could trigger reclaim and get_swap_page,
  107. * which can lock swap_slots_cache_mutex.
  108. */
  109. slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
  110. GFP_KERNEL);
  111. if (!slots)
  112. return -ENOMEM;
  113. slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t),
  114. GFP_KERNEL);
  115. if (!slots_ret) {
  116. kvfree(slots);
  117. return -ENOMEM;
  118. }
  119. mutex_lock(&swap_slots_cache_mutex);
  120. cache = &per_cpu(swp_slots, cpu);
  121. if (cache->slots || cache->slots_ret) {
  122. /* cache already allocated */
  123. mutex_unlock(&swap_slots_cache_mutex);
  124. kvfree(slots);
  125. kvfree(slots_ret);
  126. return 0;
  127. }
  128. if (!cache->lock_initialized) {
  129. mutex_init(&cache->alloc_lock);
  130. spin_lock_init(&cache->free_lock);
  131. cache->lock_initialized = true;
  132. }
  133. cache->nr = 0;
  134. cache->cur = 0;
  135. cache->n_ret = 0;
  136. /*
  137. * We initialized alloc_lock and free_lock earlier. We use
  138. * !cache->slots or !cache->slots_ret to know if it is safe to acquire
  139. * the corresponding lock and use the cache. Memory barrier below
  140. * ensures the assumption.
  141. */
  142. mb();
  143. cache->slots = slots;
  144. cache->slots_ret = slots_ret;
  145. mutex_unlock(&swap_slots_cache_mutex);
  146. return 0;
  147. }
  148. static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type,
  149. bool free_slots)
  150. {
  151. struct swap_slots_cache *cache;
  152. swp_entry_t *slots = NULL;
  153. cache = &per_cpu(swp_slots, cpu);
  154. if ((type & SLOTS_CACHE) && cache->slots) {
  155. mutex_lock(&cache->alloc_lock);
  156. swapcache_free_entries(cache->slots + cache->cur, cache->nr);
  157. cache->cur = 0;
  158. cache->nr = 0;
  159. if (free_slots && cache->slots) {
  160. kvfree(cache->slots);
  161. cache->slots = NULL;
  162. }
  163. mutex_unlock(&cache->alloc_lock);
  164. }
  165. if ((type & SLOTS_CACHE_RET) && cache->slots_ret) {
  166. spin_lock_irq(&cache->free_lock);
  167. swapcache_free_entries(cache->slots_ret, cache->n_ret);
  168. cache->n_ret = 0;
  169. if (free_slots && cache->slots_ret) {
  170. slots = cache->slots_ret;
  171. cache->slots_ret = NULL;
  172. }
  173. spin_unlock_irq(&cache->free_lock);
  174. if (slots)
  175. kvfree(slots);
  176. }
  177. }
  178. static void __drain_swap_slots_cache(unsigned int type)
  179. {
  180. unsigned int cpu;
  181. /*
  182. * This function is called during
  183. * 1) swapoff, when we have to make sure no
  184. * left over slots are in cache when we remove
  185. * a swap device;
  186. * 2) disabling of swap slot cache, when we run low
  187. * on swap slots when allocating memory and need
  188. * to return swap slots to global pool.
  189. *
  190. * We cannot acquire cpu hot plug lock here as
  191. * this function can be invoked in the cpu
  192. * hot plug path:
  193. * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback
  194. * -> memory allocation -> direct reclaim -> get_swap_page
  195. * -> drain_swap_slots_cache
  196. *
  197. * Hence the loop over current online cpu below could miss cpu that
  198. * is being brought online but not yet marked as online.
  199. * That is okay as we do not schedule and run anything on a
  200. * cpu before it has been marked online. Hence, we will not
  201. * fill any swap slots in slots cache of such cpu.
  202. * There are no slots on such cpu that need to be drained.
  203. */
  204. for_each_online_cpu(cpu)
  205. drain_slots_cache_cpu(cpu, type, false);
  206. }
  207. static int free_slot_cache(unsigned int cpu)
  208. {
  209. mutex_lock(&swap_slots_cache_mutex);
  210. drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true);
  211. mutex_unlock(&swap_slots_cache_mutex);
  212. return 0;
  213. }
  214. void enable_swap_slots_cache(void)
  215. {
  216. mutex_lock(&swap_slots_cache_enable_mutex);
  217. if (!swap_slot_cache_initialized) {
  218. int ret;
  219. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache",
  220. alloc_swap_slot_cache, free_slot_cache);
  221. if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating "
  222. "without swap slots cache.\n", __func__))
  223. goto out_unlock;
  224. swap_slot_cache_initialized = true;
  225. }
  226. __reenable_swap_slots_cache();
  227. out_unlock:
  228. mutex_unlock(&swap_slots_cache_enable_mutex);
  229. }
  230. /* called with swap slot cache's alloc lock held */
  231. static int refill_swap_slots_cache(struct swap_slots_cache *cache)
  232. {
  233. if (!use_swap_slot_cache || cache->nr)
  234. return 0;
  235. cache->cur = 0;
  236. if (swap_slot_cache_active)
  237. cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE,
  238. cache->slots, 1);
  239. return cache->nr;
  240. }
  241. int free_swap_slot(swp_entry_t entry)
  242. {
  243. struct swap_slots_cache *cache;
  244. cache = raw_cpu_ptr(&swp_slots);
  245. if (likely(use_swap_slot_cache && cache->slots_ret)) {
  246. spin_lock_irq(&cache->free_lock);
  247. /* Swap slots cache may be deactivated before acquiring lock */
  248. if (!use_swap_slot_cache || !cache->slots_ret) {
  249. spin_unlock_irq(&cache->free_lock);
  250. goto direct_free;
  251. }
  252. if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) {
  253. /*
  254. * Return slots to global pool.
  255. * The current swap_map value is SWAP_HAS_CACHE.
  256. * Set it to 0 to indicate it is available for
  257. * allocation in global pool
  258. */
  259. swapcache_free_entries(cache->slots_ret, cache->n_ret);
  260. cache->n_ret = 0;
  261. }
  262. cache->slots_ret[cache->n_ret++] = entry;
  263. spin_unlock_irq(&cache->free_lock);
  264. } else {
  265. direct_free:
  266. swapcache_free_entries(&entry, 1);
  267. }
  268. return 0;
  269. }
  270. swp_entry_t get_swap_page(struct page *page)
  271. {
  272. swp_entry_t entry;
  273. struct swap_slots_cache *cache;
  274. entry.val = 0;
  275. if (PageTransHuge(page)) {
  276. if (IS_ENABLED(CONFIG_THP_SWAP))
  277. get_swap_pages(1, &entry, HPAGE_PMD_NR);
  278. goto out;
  279. }
  280. /*
  281. * Preemption is allowed here, because we may sleep
  282. * in refill_swap_slots_cache(). But it is safe, because
  283. * accesses to the per-CPU data structure are protected by the
  284. * mutex cache->alloc_lock.
  285. *
  286. * The alloc path here does not touch cache->slots_ret
  287. * so cache->free_lock is not taken.
  288. */
  289. cache = raw_cpu_ptr(&swp_slots);
  290. if (likely(check_cache_active() && cache->slots)) {
  291. mutex_lock(&cache->alloc_lock);
  292. if (cache->slots) {
  293. repeat:
  294. if (cache->nr) {
  295. entry = cache->slots[cache->cur];
  296. cache->slots[cache->cur++].val = 0;
  297. cache->nr--;
  298. } else if (refill_swap_slots_cache(cache)) {
  299. goto repeat;
  300. }
  301. }
  302. mutex_unlock(&cache->alloc_lock);
  303. if (entry.val)
  304. goto out;
  305. }
  306. get_swap_pages(1, &entry, 1);
  307. out:
  308. if (mem_cgroup_try_charge_swap(page, entry)) {
  309. put_swap_page(page, entry);
  310. entry.val = 0;
  311. }
  312. return entry;
  313. }