page_pinner.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/debugfs.h>
  3. #include <linux/mm.h>
  4. #include <linux/slab.h>
  5. #include <linux/uaccess.h>
  6. #include <linux/memblock.h>
  7. #include <linux/stacktrace.h>
  8. #include <linux/page_pinner.h>
  9. #include <linux/jump_label.h>
  10. #include <linux/migrate.h>
  11. #include <linux/stackdepot.h>
  12. #include <linux/seq_file.h>
  13. #include <linux/sched/clock.h>
  14. #include "internal.h"
  15. #define PAGE_PINNER_STACK_DEPTH 16
  16. #define LONGTERM_PIN_BUCKETS 4096
  17. struct page_pinner {
  18. depot_stack_handle_t handle;
  19. s64 ts_usec;
  20. atomic_t count;
  21. };
  22. struct captured_pinner {
  23. depot_stack_handle_t handle;
  24. union {
  25. s64 ts_usec;
  26. s64 elapsed;
  27. };
  28. /* struct page fields */
  29. unsigned long pfn;
  30. int count;
  31. int mapcount;
  32. struct address_space *mapping;
  33. unsigned long flags;
  34. };
  35. struct longterm_pinner {
  36. spinlock_t lock;
  37. unsigned int index;
  38. struct captured_pinner pinner[LONGTERM_PIN_BUCKETS];
  39. };
  40. static struct longterm_pinner lt_pinner = {
  41. .lock = __SPIN_LOCK_UNLOCKED(lt_pinner.lock),
  42. };
  43. static s64 threshold_usec = 300000;
  44. /* alloc_contig failed pinner */
  45. static struct longterm_pinner acf_pinner = {
  46. .lock = __SPIN_LOCK_UNLOCKED(acf_pinner.lock),
  47. };
  48. static bool page_pinner_enabled;
  49. DEFINE_STATIC_KEY_FALSE(page_pinner_inited);
  50. DEFINE_STATIC_KEY_TRUE(failure_tracking);
  51. EXPORT_SYMBOL(failure_tracking);
  52. static depot_stack_handle_t failure_handle;
  53. static int __init early_page_pinner_param(char *buf)
  54. {
  55. page_pinner_enabled = true;
  56. return 0;
  57. }
  58. early_param("page_pinner", early_page_pinner_param);
  59. static bool need_page_pinner(void)
  60. {
  61. return page_pinner_enabled;
  62. }
  63. static noinline void register_failure_stack(void)
  64. {
  65. unsigned long entries[4];
  66. unsigned int nr_entries;
  67. nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
  68. failure_handle = stack_depot_save(entries, nr_entries, GFP_KERNEL);
  69. }
  70. static void init_page_pinner(void)
  71. {
  72. if (!page_pinner_enabled)
  73. return;
  74. register_failure_stack();
  75. static_branch_enable(&page_pinner_inited);
  76. }
  77. struct page_ext_operations page_pinner_ops = {
  78. .size = sizeof(struct page_pinner),
  79. .need = need_page_pinner,
  80. .init = init_page_pinner,
  81. };
  82. static inline struct page_pinner *get_page_pinner(struct page_ext *page_ext)
  83. {
  84. return (void *)page_ext + page_pinner_ops.offset;
  85. }
  86. static noinline depot_stack_handle_t save_stack(gfp_t flags)
  87. {
  88. unsigned long entries[PAGE_PINNER_STACK_DEPTH];
  89. depot_stack_handle_t handle;
  90. unsigned int nr_entries;
  91. nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
  92. handle = stack_depot_save(entries, nr_entries, flags);
  93. if (!handle)
  94. handle = failure_handle;
  95. return handle;
  96. }
  97. static void capture_page_state(struct page *page,
  98. struct captured_pinner *record)
  99. {
  100. record->flags = page->flags;
  101. record->mapping = page_mapping(page);
  102. record->pfn = page_to_pfn(page);
  103. record->count = page_count(page);
  104. record->mapcount = page_mapcount(page);
  105. }
  106. static void check_longterm_pin(struct page_pinner *page_pinner,
  107. struct page *page)
  108. {
  109. s64 now, delta = 0;
  110. unsigned long flags;
  111. unsigned int idx;
  112. struct captured_pinner record;
  113. now = ktime_to_us(ktime_get_boottime());
  114. /* get/put_page can be raced. Ignore that case */
  115. if (page_pinner->ts_usec < now)
  116. delta = now - page_pinner->ts_usec;
  117. if (delta <= threshold_usec)
  118. return;
  119. record.handle = page_pinner->handle;
  120. record.elapsed = delta;
  121. capture_page_state(page, &record);
  122. spin_lock_irqsave(&lt_pinner.lock, flags);
  123. idx = lt_pinner.index++;
  124. lt_pinner.index %= LONGTERM_PIN_BUCKETS;
  125. lt_pinner.pinner[idx] = record;
  126. spin_unlock_irqrestore(&lt_pinner.lock, flags);
  127. }
  128. void __reset_page_pinner(struct page *page, unsigned int order, bool free)
  129. {
  130. struct page_pinner *page_pinner;
  131. struct page_ext *page_ext;
  132. int i;
  133. page_ext = lookup_page_ext(page);
  134. if (unlikely(!page_ext))
  135. return;
  136. for (i = 0; i < (1 << order); i++) {
  137. if (!test_bit(PAGE_EXT_GET, &page_ext->flags) &&
  138. !test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED,
  139. &page_ext->flags))
  140. continue;
  141. page_pinner = get_page_pinner(page_ext);
  142. if (free) {
  143. /* record page free call path */
  144. __page_pinner_migration_failed(page);
  145. atomic_set(&page_pinner->count, 0);
  146. __clear_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags);
  147. } else {
  148. check_longterm_pin(page_pinner, page);
  149. }
  150. clear_bit(PAGE_EXT_GET, &page_ext->flags);
  151. page_ext = page_ext_next(page_ext);
  152. }
  153. }
  154. static inline void __set_page_pinner_handle(struct page *page,
  155. struct page_ext *page_ext, depot_stack_handle_t handle,
  156. unsigned int order)
  157. {
  158. struct page_pinner *page_pinner;
  159. int i;
  160. s64 usec = ktime_to_us(ktime_get_boottime());
  161. for (i = 0; i < (1 << order); i++) {
  162. page_pinner = get_page_pinner(page_ext);
  163. page_pinner->handle = handle;
  164. page_pinner->ts_usec = usec;
  165. set_bit(PAGE_EXT_GET, &page_ext->flags);
  166. atomic_inc(&page_pinner->count);
  167. page_ext = page_ext_next(page_ext);
  168. }
  169. }
  170. noinline void __set_page_pinner(struct page *page, unsigned int order)
  171. {
  172. struct page_ext *page_ext = lookup_page_ext(page);
  173. depot_stack_handle_t handle;
  174. if (unlikely(!page_ext))
  175. return;
  176. handle = save_stack(GFP_NOWAIT|__GFP_NOWARN);
  177. __set_page_pinner_handle(page, page_ext, handle, order);
  178. }
  179. static ssize_t
  180. print_page_pinner(bool longterm, char __user *buf, size_t count, struct captured_pinner *record)
  181. {
  182. int ret;
  183. unsigned long *entries;
  184. unsigned int nr_entries;
  185. char *kbuf;
  186. count = min_t(size_t, count, PAGE_SIZE);
  187. kbuf = kmalloc(count, GFP_KERNEL);
  188. if (!kbuf)
  189. return -ENOMEM;
  190. if (longterm) {
  191. ret = snprintf(kbuf, count, "Page pinned for %lld us\n",
  192. record->elapsed);
  193. } else {
  194. s64 ts_usec = record->ts_usec;
  195. unsigned long rem_usec = do_div(ts_usec, 1000000);
  196. ret = snprintf(kbuf, count,
  197. "Page pinned ts [%5lu.%06lu]\n",
  198. (unsigned long)ts_usec, rem_usec);
  199. }
  200. if (ret >= count)
  201. goto err;
  202. /* Print information relevant to grouping pages by mobility */
  203. ret += snprintf(kbuf + ret, count - ret,
  204. "PFN 0x%lx Block %lu count %d mapcount %d mapping %pS Flags %#lx(%pGp)\n",
  205. record->pfn,
  206. record->pfn >> pageblock_order,
  207. record->count, record->mapcount,
  208. record->mapping,
  209. record->flags, &record->flags);
  210. if (ret >= count)
  211. goto err;
  212. nr_entries = stack_depot_fetch(record->handle, &entries);
  213. ret += stack_trace_snprint(kbuf + ret, count - ret, entries,
  214. nr_entries, 0);
  215. if (ret >= count)
  216. goto err;
  217. ret += snprintf(kbuf + ret, count - ret, "\n");
  218. if (ret >= count)
  219. goto err;
  220. if (copy_to_user(buf, kbuf, ret))
  221. ret = -EFAULT;
  222. kfree(kbuf);
  223. return ret;
  224. err:
  225. kfree(kbuf);
  226. return -ENOMEM;
  227. }
  228. void __dump_page_pinner(struct page *page)
  229. {
  230. struct page_ext *page_ext = lookup_page_ext(page);
  231. struct page_pinner *page_pinner;
  232. depot_stack_handle_t handle;
  233. unsigned long *entries;
  234. unsigned int nr_entries;
  235. int pageblock_mt;
  236. unsigned long pfn;
  237. int count;
  238. unsigned long rem_usec;
  239. s64 ts_usec;
  240. if (unlikely(!page_ext)) {
  241. pr_alert("There is not page extension available.\n");
  242. return;
  243. }
  244. page_pinner = get_page_pinner(page_ext);
  245. count = atomic_read(&page_pinner->count);
  246. if (!count) {
  247. pr_alert("page_pinner info is not present (never set?)\n");
  248. return;
  249. }
  250. pfn = page_to_pfn(page);
  251. ts_usec = page_pinner->ts_usec;
  252. rem_usec = do_div(ts_usec, 1000000);
  253. pr_alert("page last pinned %5lu.%06lu] count %d\n",
  254. (unsigned long)ts_usec, rem_usec, count);
  255. pageblock_mt = get_pageblock_migratetype(page);
  256. pr_alert("PFN %lu Block %lu type %s Flags %#lx(%pGp)\n",
  257. pfn,
  258. pfn >> pageblock_order,
  259. migratetype_names[pageblock_mt],
  260. page->flags, &page->flags);
  261. handle = READ_ONCE(page_pinner->handle);
  262. if (!handle) {
  263. pr_alert("page_pinner allocation stack trace missing\n");
  264. } else {
  265. nr_entries = stack_depot_fetch(handle, &entries);
  266. stack_trace_print(entries, nr_entries, 0);
  267. }
  268. }
  269. void __page_pinner_migration_failed(struct page *page)
  270. {
  271. struct page_ext *page_ext = lookup_page_ext(page);
  272. struct captured_pinner record;
  273. unsigned long flags;
  274. unsigned int idx;
  275. if (unlikely(!page_ext))
  276. return;
  277. if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags))
  278. return;
  279. record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN);
  280. record.ts_usec = ktime_to_us(ktime_get_boottime());
  281. capture_page_state(page, &record);
  282. spin_lock_irqsave(&acf_pinner.lock, flags);
  283. idx = acf_pinner.index++;
  284. acf_pinner.index %= LONGTERM_PIN_BUCKETS;
  285. acf_pinner.pinner[idx] = record;
  286. spin_unlock_irqrestore(&acf_pinner.lock, flags);
  287. }
  288. EXPORT_SYMBOL(__page_pinner_migration_failed);
  289. void __page_pinner_mark_migration_failed_pages(struct list_head *page_list)
  290. {
  291. struct page *page;
  292. struct page_ext *page_ext;
  293. list_for_each_entry(page, page_list, lru) {
  294. /* The page will be freed by putback_movable_pages soon */
  295. if (page_count(page) == 1)
  296. continue;
  297. page_ext = lookup_page_ext(page);
  298. if (unlikely(!page_ext))
  299. continue;
  300. __set_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags);
  301. __page_pinner_migration_failed(page);
  302. }
  303. }
  304. static ssize_t
  305. read_longterm_page_pinner(struct file *file, char __user *buf, size_t count,
  306. loff_t *ppos)
  307. {
  308. loff_t i, idx;
  309. struct captured_pinner record;
  310. unsigned long flags;
  311. if (!static_branch_unlikely(&page_pinner_inited))
  312. return -EINVAL;
  313. if (*ppos >= LONGTERM_PIN_BUCKETS)
  314. return 0;
  315. i = *ppos;
  316. *ppos = i + 1;
  317. /*
  318. * reading the records in the reverse order with newest one
  319. * being read first followed by older ones
  320. */
  321. idx = (lt_pinner.index - 1 - i + LONGTERM_PIN_BUCKETS) %
  322. LONGTERM_PIN_BUCKETS;
  323. spin_lock_irqsave(&lt_pinner.lock, flags);
  324. record = lt_pinner.pinner[idx];
  325. spin_unlock_irqrestore(&lt_pinner.lock, flags);
  326. if (!record.handle)
  327. return 0;
  328. return print_page_pinner(true, buf, count, &record);
  329. }
  330. static const struct file_operations proc_longterm_pinner_operations = {
  331. .read = read_longterm_page_pinner,
  332. };
  333. static ssize_t read_alloc_contig_failed(struct file *file, char __user *buf,
  334. size_t count, loff_t *ppos)
  335. {
  336. loff_t i, idx;
  337. struct captured_pinner record;
  338. unsigned long flags;
  339. if (!static_branch_unlikely(&failure_tracking))
  340. return -EINVAL;
  341. if (*ppos >= LONGTERM_PIN_BUCKETS)
  342. return 0;
  343. i = *ppos;
  344. *ppos = i + 1;
  345. /*
  346. * reading the records in the reverse order with newest one
  347. * being read first followed by older ones
  348. */
  349. idx = (acf_pinner.index - 1 - i + LONGTERM_PIN_BUCKETS) %
  350. LONGTERM_PIN_BUCKETS;
  351. spin_lock_irqsave(&acf_pinner.lock, flags);
  352. record = acf_pinner.pinner[idx];
  353. spin_unlock_irqrestore(&acf_pinner.lock, flags);
  354. if (!record.handle)
  355. return 0;
  356. return print_page_pinner(false, buf, count, &record);
  357. }
  358. static const struct file_operations proc_alloc_contig_failed_operations = {
  359. .read = read_alloc_contig_failed,
  360. };
  361. static int pp_threshold_set(void *data, unsigned long long val)
  362. {
  363. unsigned long flags;
  364. threshold_usec = (s64)val;
  365. spin_lock_irqsave(&lt_pinner.lock, flags);
  366. memset(lt_pinner.pinner, 0,
  367. sizeof(struct captured_pinner) * LONGTERM_PIN_BUCKETS);
  368. lt_pinner.index = 0;
  369. spin_unlock_irqrestore(&lt_pinner.lock, flags);
  370. return 0;
  371. }
  372. static int pp_threshold_get(void *data, unsigned long long *val)
  373. {
  374. *val = (unsigned long long)threshold_usec;
  375. return 0;
  376. }
  377. DEFINE_DEBUGFS_ATTRIBUTE(pp_threshold_fops, pp_threshold_get,
  378. pp_threshold_set, "%lld\n");
  379. static int failure_tracking_set(void *data, u64 val)
  380. {
  381. bool on;
  382. on = (bool)val;
  383. if (on)
  384. static_branch_enable(&failure_tracking);
  385. else
  386. static_branch_disable(&failure_tracking);
  387. return 0;
  388. }
  389. static int failure_tracking_get(void *data, u64 *val)
  390. {
  391. *val = static_branch_unlikely(&failure_tracking);
  392. return 0;
  393. }
  394. DEFINE_DEBUGFS_ATTRIBUTE(failure_tracking_fops,
  395. failure_tracking_get,
  396. failure_tracking_set, "%llu\n");
  397. static int __init page_pinner_init(void)
  398. {
  399. struct dentry *pp_debugfs_root;
  400. if (!static_branch_unlikely(&page_pinner_inited))
  401. return 0;
  402. pr_info("page_pinner enabled\n");
  403. pp_debugfs_root = debugfs_create_dir("page_pinner", NULL);
  404. debugfs_create_file("longterm_pinner", 0444, pp_debugfs_root, NULL,
  405. &proc_longterm_pinner_operations);
  406. debugfs_create_file("threshold", 0644, pp_debugfs_root, NULL,
  407. &pp_threshold_fops);
  408. debugfs_create_file("alloc_contig_failed", 0444,
  409. pp_debugfs_root, NULL,
  410. &proc_alloc_contig_failed_operations);
  411. debugfs_create_file("failure_tracking", 0644,
  412. pp_debugfs_root, NULL,
  413. &failure_tracking_fops);
  414. return 0;
  415. }
  416. late_initcall(page_pinner_init)