paddr.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * DAMON Primitives for The Physical Address Space
  4. *
  5. * Author: SeongJae Park <sj@kernel.org>
  6. */
  7. #define pr_fmt(fmt) "damon-pa: " fmt
  8. #include <linux/mmu_notifier.h>
  9. #include <linux/page_idle.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/rmap.h>
  12. #include <linux/swap.h>
  13. #include "../internal.h"
  14. #include "prmtv-common.h"
  15. static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
  16. unsigned long addr, void *arg)
  17. {
  18. struct page_vma_mapped_walk pvmw = {
  19. .page = page,
  20. .vma = vma,
  21. .address = addr,
  22. };
  23. while (page_vma_mapped_walk(&pvmw)) {
  24. addr = pvmw.address;
  25. if (pvmw.pte)
  26. damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
  27. else
  28. damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
  29. }
  30. return true;
  31. }
  32. static void damon_pa_mkold(unsigned long paddr)
  33. {
  34. struct page *page = damon_get_page(PHYS_PFN(paddr));
  35. struct rmap_walk_control rwc = {
  36. .rmap_one = __damon_pa_mkold,
  37. .anon_lock = page_lock_anon_vma_read,
  38. };
  39. bool need_lock;
  40. if (!page)
  41. return;
  42. if (!page_mapped(page) || !page_rmapping(page)) {
  43. set_page_idle(page);
  44. goto out;
  45. }
  46. need_lock = !PageAnon(page) || PageKsm(page);
  47. if (need_lock && !trylock_page(page))
  48. goto out;
  49. rmap_walk(page, &rwc);
  50. if (need_lock)
  51. unlock_page(page);
  52. out:
  53. put_page(page);
  54. }
  55. static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
  56. struct damon_region *r)
  57. {
  58. r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
  59. damon_pa_mkold(r->sampling_addr);
  60. }
  61. static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
  62. {
  63. struct damon_target *t;
  64. struct damon_region *r;
  65. damon_for_each_target(t, ctx) {
  66. damon_for_each_region(r, t)
  67. __damon_pa_prepare_access_check(ctx, r);
  68. }
  69. }
  70. struct damon_pa_access_chk_result {
  71. unsigned long page_sz;
  72. bool accessed;
  73. };
  74. static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
  75. unsigned long addr, void *arg)
  76. {
  77. struct damon_pa_access_chk_result *result = arg;
  78. struct page_vma_mapped_walk pvmw = {
  79. .page = page,
  80. .vma = vma,
  81. .address = addr,
  82. };
  83. result->accessed = false;
  84. result->page_sz = PAGE_SIZE;
  85. while (page_vma_mapped_walk(&pvmw)) {
  86. addr = pvmw.address;
  87. if (pvmw.pte) {
  88. result->accessed = pte_young(*pvmw.pte) ||
  89. !page_is_idle(page) ||
  90. mmu_notifier_test_young(vma->vm_mm, addr);
  91. } else {
  92. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  93. result->accessed = pmd_young(*pvmw.pmd) ||
  94. !page_is_idle(page) ||
  95. mmu_notifier_test_young(vma->vm_mm, addr);
  96. result->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
  97. #else
  98. WARN_ON_ONCE(1);
  99. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  100. }
  101. if (result->accessed) {
  102. page_vma_mapped_walk_done(&pvmw);
  103. break;
  104. }
  105. }
  106. /* If accessed, stop walking */
  107. return !result->accessed;
  108. }
  109. static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
  110. {
  111. struct page *page = damon_get_page(PHYS_PFN(paddr));
  112. struct damon_pa_access_chk_result result = {
  113. .page_sz = PAGE_SIZE,
  114. .accessed = false,
  115. };
  116. struct rmap_walk_control rwc = {
  117. .arg = &result,
  118. .rmap_one = __damon_pa_young,
  119. .anon_lock = page_lock_anon_vma_read,
  120. };
  121. bool need_lock;
  122. if (!page)
  123. return false;
  124. if (!page_mapped(page) || !page_rmapping(page)) {
  125. if (page_is_idle(page))
  126. result.accessed = false;
  127. else
  128. result.accessed = true;
  129. put_page(page);
  130. goto out;
  131. }
  132. need_lock = !PageAnon(page) || PageKsm(page);
  133. if (need_lock && !trylock_page(page)) {
  134. put_page(page);
  135. return NULL;
  136. }
  137. rmap_walk(page, &rwc);
  138. if (need_lock)
  139. unlock_page(page);
  140. put_page(page);
  141. out:
  142. *page_sz = result.page_sz;
  143. return result.accessed;
  144. }
  145. static void __damon_pa_check_access(struct damon_ctx *ctx,
  146. struct damon_region *r)
  147. {
  148. static unsigned long last_addr;
  149. static unsigned long last_page_sz = PAGE_SIZE;
  150. static bool last_accessed;
  151. /* If the region is in the last checked page, reuse the result */
  152. if (ALIGN_DOWN(last_addr, last_page_sz) ==
  153. ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
  154. if (last_accessed)
  155. r->nr_accesses++;
  156. return;
  157. }
  158. last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
  159. if (last_accessed)
  160. r->nr_accesses++;
  161. last_addr = r->sampling_addr;
  162. }
  163. static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
  164. {
  165. struct damon_target *t;
  166. struct damon_region *r;
  167. unsigned int max_nr_accesses = 0;
  168. damon_for_each_target(t, ctx) {
  169. damon_for_each_region(r, t) {
  170. __damon_pa_check_access(ctx, r);
  171. max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
  172. }
  173. }
  174. return max_nr_accesses;
  175. }
  176. bool damon_pa_target_valid(void *t)
  177. {
  178. return true;
  179. }
  180. static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
  181. struct damon_target *t, struct damon_region *r,
  182. struct damos *scheme)
  183. {
  184. unsigned long addr, applied;
  185. LIST_HEAD(page_list);
  186. if (scheme->action != DAMOS_PAGEOUT)
  187. return 0;
  188. for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
  189. struct page *page = damon_get_page(PHYS_PFN(addr));
  190. if (!page)
  191. continue;
  192. ClearPageReferenced(page);
  193. test_and_clear_page_young(page);
  194. if (isolate_lru_page(page)) {
  195. put_page(page);
  196. continue;
  197. }
  198. if (PageUnevictable(page)) {
  199. putback_lru_page(page);
  200. } else {
  201. list_add(&page->lru, &page_list);
  202. put_page(page);
  203. }
  204. }
  205. applied = reclaim_pages(&page_list);
  206. cond_resched();
  207. return applied * PAGE_SIZE;
  208. }
  209. static int damon_pa_scheme_score(struct damon_ctx *context,
  210. struct damon_target *t, struct damon_region *r,
  211. struct damos *scheme)
  212. {
  213. switch (scheme->action) {
  214. case DAMOS_PAGEOUT:
  215. return damon_pageout_score(context, r, scheme);
  216. default:
  217. break;
  218. }
  219. return DAMOS_MAX_SCORE;
  220. }
  221. void damon_pa_set_primitives(struct damon_ctx *ctx)
  222. {
  223. ctx->primitive.init = NULL;
  224. ctx->primitive.update = NULL;
  225. ctx->primitive.prepare_access_checks = damon_pa_prepare_access_checks;
  226. ctx->primitive.check_accesses = damon_pa_check_accesses;
  227. ctx->primitive.reset_aggregated = NULL;
  228. ctx->primitive.target_valid = damon_pa_target_valid;
  229. ctx->primitive.cleanup = NULL;
  230. ctx->primitive.apply_scheme = damon_pa_apply_scheme;
  231. ctx->primitive.get_scheme_score = damon_pa_scheme_score;
  232. }