page_reporting.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/mm.h>
  3. #include <linux/mmzone.h>
  4. #include <linux/page_reporting.h>
  5. #include <linux/gfp.h>
  6. #include <linux/export.h>
  7. #include <linux/delay.h>
  8. #include <linux/scatterlist.h>
  9. #include "page_reporting.h"
  10. #include "internal.h"
  11. #define PAGE_REPORTING_DELAY (2 * HZ)
  12. static struct page_reporting_dev_info __rcu *pr_dev_info __read_mostly;
  13. enum {
  14. PAGE_REPORTING_IDLE = 0,
  15. PAGE_REPORTING_REQUESTED,
  16. PAGE_REPORTING_ACTIVE
  17. };
  18. /* request page reporting */
  19. static void
  20. __page_reporting_request(struct page_reporting_dev_info *prdev)
  21. {
  22. unsigned int state;
  23. /* Check to see if we are in desired state */
  24. state = atomic_read(&prdev->state);
  25. if (state == PAGE_REPORTING_REQUESTED)
  26. return;
  27. /*
  28. * If reporting is already active there is nothing we need to do.
  29. * Test against 0 as that represents PAGE_REPORTING_IDLE.
  30. */
  31. state = atomic_xchg(&prdev->state, PAGE_REPORTING_REQUESTED);
  32. if (state != PAGE_REPORTING_IDLE)
  33. return;
  34. /*
  35. * Delay the start of work to allow a sizable queue to build. For
  36. * now we are limiting this to running no more than once every
  37. * couple of seconds.
  38. */
  39. schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
  40. }
  41. /* notify prdev of free page reporting request */
  42. void __page_reporting_notify(void)
  43. {
  44. struct page_reporting_dev_info *prdev;
  45. /*
  46. * We use RCU to protect the pr_dev_info pointer. In almost all
  47. * cases this should be present, however in the unlikely case of
  48. * a shutdown this will be NULL and we should exit.
  49. */
  50. rcu_read_lock();
  51. prdev = rcu_dereference(pr_dev_info);
  52. if (likely(prdev))
  53. __page_reporting_request(prdev);
  54. rcu_read_unlock();
  55. }
  56. static void
  57. page_reporting_drain(struct page_reporting_dev_info *prdev,
  58. struct scatterlist *sgl, unsigned int nents, bool reported)
  59. {
  60. struct scatterlist *sg = sgl;
  61. /*
  62. * Drain the now reported pages back into their respective
  63. * free lists/areas. We assume at least one page is populated.
  64. */
  65. do {
  66. struct page *page = sg_page(sg);
  67. int mt = get_pageblock_migratetype(page);
  68. unsigned int order = get_order(sg->length);
  69. __putback_isolated_page(page, order, mt);
  70. /* If the pages were not reported due to error skip flagging */
  71. if (!reported)
  72. continue;
  73. /*
  74. * If page was not comingled with another page we can
  75. * consider the result to be "reported" since the page
  76. * hasn't been modified, otherwise we will need to
  77. * report on the new larger page when we make our way
  78. * up to that higher order.
  79. */
  80. if (PageBuddy(page) && buddy_order(page) == order)
  81. __SetPageReported(page);
  82. } while ((sg = sg_next(sg)));
  83. /* reinitialize scatterlist now that it is empty */
  84. sg_init_table(sgl, nents);
  85. }
  86. /*
  87. * The page reporting cycle consists of 4 stages, fill, report, drain, and
  88. * idle. We will cycle through the first 3 stages until we cannot obtain a
  89. * full scatterlist of pages, in that case we will switch to idle.
  90. */
  91. static int
  92. page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
  93. unsigned int order, unsigned int mt,
  94. struct scatterlist *sgl, unsigned int *offset)
  95. {
  96. struct free_area *area = &zone->free_area[order];
  97. struct list_head *list = &area->free_list[mt];
  98. unsigned int page_len = PAGE_SIZE << order;
  99. struct page *page, *next;
  100. long budget;
  101. int err = 0;
  102. /*
  103. * Perform early check, if free area is empty there is
  104. * nothing to process so we can skip this free_list.
  105. */
  106. if (list_empty(list))
  107. return err;
  108. spin_lock_irq(&zone->lock);
  109. /*
  110. * Limit how many calls we will be making to the page reporting
  111. * device for this list. By doing this we avoid processing any
  112. * given list for too long.
  113. *
  114. * The current value used allows us enough calls to process over a
  115. * sixteenth of the current list plus one additional call to handle
  116. * any pages that may have already been present from the previous
  117. * list processed. This should result in us reporting all pages on
  118. * an idle system in about 30 seconds.
  119. *
  120. * The division here should be cheap since PAGE_REPORTING_CAPACITY
  121. * should always be a power of 2.
  122. */
  123. budget = DIV_ROUND_UP(area->nr_free, PAGE_REPORTING_CAPACITY * 16);
  124. /* loop through free list adding unreported pages to sg list */
  125. list_for_each_entry_safe(page, next, list, lru) {
  126. /* We are going to skip over the reported pages. */
  127. if (PageReported(page))
  128. continue;
  129. /*
  130. * If we fully consumed our budget then update our
  131. * state to indicate that we are requesting additional
  132. * processing and exit this list.
  133. */
  134. if (budget < 0) {
  135. atomic_set(&prdev->state, PAGE_REPORTING_REQUESTED);
  136. next = page;
  137. break;
  138. }
  139. /* Attempt to pull page from list and place in scatterlist */
  140. if (*offset) {
  141. if (!__isolate_free_page(page, order)) {
  142. next = page;
  143. break;
  144. }
  145. /* Add page to scatter list */
  146. --(*offset);
  147. sg_set_page(&sgl[*offset], page, page_len, 0);
  148. continue;
  149. }
  150. /*
  151. * Make the first non-reported page in the free list
  152. * the new head of the free list before we release the
  153. * zone lock.
  154. */
  155. if (!list_is_first(&page->lru, list))
  156. list_rotate_to_front(&page->lru, list);
  157. /* release lock before waiting on report processing */
  158. spin_unlock_irq(&zone->lock);
  159. /* begin processing pages in local list */
  160. err = prdev->report(prdev, sgl, PAGE_REPORTING_CAPACITY);
  161. /* reset offset since the full list was reported */
  162. *offset = PAGE_REPORTING_CAPACITY;
  163. /* update budget to reflect call to report function */
  164. budget--;
  165. /* reacquire zone lock and resume processing */
  166. spin_lock_irq(&zone->lock);
  167. /* flush reported pages from the sg list */
  168. page_reporting_drain(prdev, sgl, PAGE_REPORTING_CAPACITY, !err);
  169. /*
  170. * Reset next to first entry, the old next isn't valid
  171. * since we dropped the lock to report the pages
  172. */
  173. next = list_first_entry(list, struct page, lru);
  174. /* exit on error */
  175. if (err)
  176. break;
  177. }
  178. /* Rotate any leftover pages to the head of the freelist */
  179. if (&next->lru != list && !list_is_first(&next->lru, list))
  180. list_rotate_to_front(&next->lru, list);
  181. spin_unlock_irq(&zone->lock);
  182. return err;
  183. }
  184. static int
  185. page_reporting_process_zone(struct page_reporting_dev_info *prdev,
  186. struct scatterlist *sgl, struct zone *zone)
  187. {
  188. unsigned int order, mt, leftover, offset = PAGE_REPORTING_CAPACITY;
  189. unsigned long watermark;
  190. int err = 0;
  191. /* Generate minimum watermark to be able to guarantee progress */
  192. watermark = low_wmark_pages(zone) +
  193. (PAGE_REPORTING_CAPACITY << PAGE_REPORTING_MIN_ORDER);
  194. /*
  195. * Cancel request if insufficient free memory or if we failed
  196. * to allocate page reporting statistics for the zone.
  197. */
  198. if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
  199. return err;
  200. /* Process each free list starting from lowest order/mt */
  201. for (order = PAGE_REPORTING_MIN_ORDER; order < MAX_ORDER; order++) {
  202. for (mt = 0; mt < MIGRATE_TYPES; mt++) {
  203. /* We do not pull pages from the isolate free list */
  204. if (is_migrate_isolate(mt))
  205. continue;
  206. err = page_reporting_cycle(prdev, zone, order, mt,
  207. sgl, &offset);
  208. if (err)
  209. return err;
  210. }
  211. }
  212. /* report the leftover pages before going idle */
  213. leftover = PAGE_REPORTING_CAPACITY - offset;
  214. if (leftover) {
  215. sgl = &sgl[offset];
  216. err = prdev->report(prdev, sgl, leftover);
  217. /* flush any remaining pages out from the last report */
  218. spin_lock_irq(&zone->lock);
  219. page_reporting_drain(prdev, sgl, leftover, !err);
  220. spin_unlock_irq(&zone->lock);
  221. }
  222. return err;
  223. }
  224. static void page_reporting_process(struct work_struct *work)
  225. {
  226. struct delayed_work *d_work = to_delayed_work(work);
  227. struct page_reporting_dev_info *prdev =
  228. container_of(d_work, struct page_reporting_dev_info, work);
  229. int err = 0, state = PAGE_REPORTING_ACTIVE;
  230. struct scatterlist *sgl;
  231. struct zone *zone;
  232. /*
  233. * Change the state to "Active" so that we can track if there is
  234. * anyone requests page reporting after we complete our pass. If
  235. * the state is not altered by the end of the pass we will switch
  236. * to idle and quit scheduling reporting runs.
  237. */
  238. atomic_set(&prdev->state, state);
  239. /* allocate scatterlist to store pages being reported on */
  240. sgl = kmalloc_array(PAGE_REPORTING_CAPACITY, sizeof(*sgl), GFP_KERNEL);
  241. if (!sgl)
  242. goto err_out;
  243. sg_init_table(sgl, PAGE_REPORTING_CAPACITY);
  244. for_each_zone(zone) {
  245. err = page_reporting_process_zone(prdev, sgl, zone);
  246. if (err)
  247. break;
  248. }
  249. kfree(sgl);
  250. err_out:
  251. /*
  252. * If the state has reverted back to requested then there may be
  253. * additional pages to be processed. We will defer for 2s to allow
  254. * more pages to accumulate.
  255. */
  256. state = atomic_cmpxchg(&prdev->state, state, PAGE_REPORTING_IDLE);
  257. if (state == PAGE_REPORTING_REQUESTED)
  258. schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY);
  259. }
  260. static DEFINE_MUTEX(page_reporting_mutex);
  261. DEFINE_STATIC_KEY_FALSE(page_reporting_enabled);
  262. int page_reporting_register(struct page_reporting_dev_info *prdev)
  263. {
  264. int err = 0;
  265. mutex_lock(&page_reporting_mutex);
  266. /* nothing to do if already in use */
  267. if (rcu_access_pointer(pr_dev_info)) {
  268. err = -EBUSY;
  269. goto err_out;
  270. }
  271. /* initialize state and work structures */
  272. atomic_set(&prdev->state, PAGE_REPORTING_IDLE);
  273. INIT_DELAYED_WORK(&prdev->work, &page_reporting_process);
  274. /* Begin initial flush of zones */
  275. __page_reporting_request(prdev);
  276. /* Assign device to allow notifications */
  277. rcu_assign_pointer(pr_dev_info, prdev);
  278. /* enable page reporting notification */
  279. if (!static_key_enabled(&page_reporting_enabled)) {
  280. static_branch_enable(&page_reporting_enabled);
  281. pr_info("Free page reporting enabled\n");
  282. }
  283. err_out:
  284. mutex_unlock(&page_reporting_mutex);
  285. return err;
  286. }
  287. EXPORT_SYMBOL_GPL(page_reporting_register);
  288. void page_reporting_unregister(struct page_reporting_dev_info *prdev)
  289. {
  290. mutex_lock(&page_reporting_mutex);
  291. if (rcu_access_pointer(pr_dev_info) == prdev) {
  292. /* Disable page reporting notification */
  293. RCU_INIT_POINTER(pr_dev_info, NULL);
  294. synchronize_rcu();
  295. /* Flush any existing work, and lock it out */
  296. cancel_delayed_work_sync(&prdev->work);
  297. }
  298. mutex_unlock(&page_reporting_mutex);
  299. }
  300. EXPORT_SYMBOL_GPL(page_reporting_unregister);