page_counter.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Lockless hierarchical page accounting & limiting
  4. *
  5. * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner
  6. */
  7. #include <linux/page_counter.h>
  8. #include <linux/atomic.h>
  9. #include <linux/kernel.h>
  10. #include <linux/string.h>
  11. #include <linux/sched.h>
  12. #include <linux/bug.h>
  13. #include <asm/page.h>
  14. static void propagate_protected_usage(struct page_counter *c,
  15. unsigned long usage)
  16. {
  17. unsigned long protected, old_protected;
  18. unsigned long low, min;
  19. long delta;
  20. if (!c->parent)
  21. return;
  22. min = READ_ONCE(c->min);
  23. if (min || atomic_long_read(&c->min_usage)) {
  24. protected = min(usage, min);
  25. old_protected = atomic_long_xchg(&c->min_usage, protected);
  26. delta = protected - old_protected;
  27. if (delta)
  28. atomic_long_add(delta, &c->parent->children_min_usage);
  29. }
  30. low = READ_ONCE(c->low);
  31. if (low || atomic_long_read(&c->low_usage)) {
  32. protected = min(usage, low);
  33. old_protected = atomic_long_xchg(&c->low_usage, protected);
  34. delta = protected - old_protected;
  35. if (delta)
  36. atomic_long_add(delta, &c->parent->children_low_usage);
  37. }
  38. }
  39. /**
  40. * page_counter_cancel - take pages out of the local counter
  41. * @counter: counter
  42. * @nr_pages: number of pages to cancel
  43. */
  44. void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
  45. {
  46. long new;
  47. new = atomic_long_sub_return(nr_pages, &counter->usage);
  48. propagate_protected_usage(counter, new);
  49. /* More uncharges than charges? */
  50. WARN_ON_ONCE(new < 0);
  51. }
  52. /**
  53. * page_counter_charge - hierarchically charge pages
  54. * @counter: counter
  55. * @nr_pages: number of pages to charge
  56. *
  57. * NOTE: This does not consider any configured counter limits.
  58. */
  59. void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
  60. {
  61. struct page_counter *c;
  62. for (c = counter; c; c = c->parent) {
  63. long new;
  64. new = atomic_long_add_return(nr_pages, &c->usage);
  65. propagate_protected_usage(c, new);
  66. /*
  67. * This is indeed racy, but we can live with some
  68. * inaccuracy in the watermark.
  69. */
  70. if (new > READ_ONCE(c->watermark))
  71. WRITE_ONCE(c->watermark, new);
  72. }
  73. }
  74. /**
  75. * page_counter_try_charge - try to hierarchically charge pages
  76. * @counter: counter
  77. * @nr_pages: number of pages to charge
  78. * @fail: points first counter to hit its limit, if any
  79. *
  80. * Returns %true on success, or %false and @fail if the counter or one
  81. * of its ancestors has hit its configured limit.
  82. */
  83. bool page_counter_try_charge(struct page_counter *counter,
  84. unsigned long nr_pages,
  85. struct page_counter **fail)
  86. {
  87. struct page_counter *c;
  88. for (c = counter; c; c = c->parent) {
  89. long new;
  90. /*
  91. * Charge speculatively to avoid an expensive CAS. If
  92. * a bigger charge fails, it might falsely lock out a
  93. * racing smaller charge and send it into reclaim
  94. * early, but the error is limited to the difference
  95. * between the two sizes, which is less than 2M/4M in
  96. * case of a THP locking out a regular page charge.
  97. *
  98. * The atomic_long_add_return() implies a full memory
  99. * barrier between incrementing the count and reading
  100. * the limit. When racing with page_counter_set_max(),
  101. * we either see the new limit or the setter sees the
  102. * counter has changed and retries.
  103. */
  104. new = atomic_long_add_return(nr_pages, &c->usage);
  105. if (new > c->max) {
  106. atomic_long_sub(nr_pages, &c->usage);
  107. propagate_protected_usage(c, new);
  108. /*
  109. * This is racy, but we can live with some
  110. * inaccuracy in the failcnt which is only used
  111. * to report stats.
  112. */
  113. data_race(c->failcnt++);
  114. *fail = c;
  115. goto failed;
  116. }
  117. propagate_protected_usage(c, new);
  118. /*
  119. * Just like with failcnt, we can live with some
  120. * inaccuracy in the watermark.
  121. */
  122. if (new > READ_ONCE(c->watermark))
  123. WRITE_ONCE(c->watermark, new);
  124. }
  125. return true;
  126. failed:
  127. for (c = counter; c != *fail; c = c->parent)
  128. page_counter_cancel(c, nr_pages);
  129. return false;
  130. }
  131. /**
  132. * page_counter_uncharge - hierarchically uncharge pages
  133. * @counter: counter
  134. * @nr_pages: number of pages to uncharge
  135. */
  136. void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
  137. {
  138. struct page_counter *c;
  139. for (c = counter; c; c = c->parent)
  140. page_counter_cancel(c, nr_pages);
  141. }
  142. /**
  143. * page_counter_set_max - set the maximum number of pages allowed
  144. * @counter: counter
  145. * @nr_pages: limit to set
  146. *
  147. * Returns 0 on success, -EBUSY if the current number of pages on the
  148. * counter already exceeds the specified limit.
  149. *
  150. * The caller must serialize invocations on the same counter.
  151. */
  152. int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
  153. {
  154. for (;;) {
  155. unsigned long old;
  156. long usage;
  157. /*
  158. * Update the limit while making sure that it's not
  159. * below the concurrently-changing counter value.
  160. *
  161. * The xchg implies two full memory barriers before
  162. * and after, so the read-swap-read is ordered and
  163. * ensures coherency with page_counter_try_charge():
  164. * that function modifies the count before checking
  165. * the limit, so if it sees the old limit, we see the
  166. * modified counter and retry.
  167. */
  168. usage = atomic_long_read(&counter->usage);
  169. if (usage > nr_pages)
  170. return -EBUSY;
  171. old = xchg(&counter->max, nr_pages);
  172. if (atomic_long_read(&counter->usage) <= usage)
  173. return 0;
  174. counter->max = old;
  175. cond_resched();
  176. }
  177. }
  178. /**
  179. * page_counter_set_min - set the amount of protected memory
  180. * @counter: counter
  181. * @nr_pages: value to set
  182. *
  183. * The caller must serialize invocations on the same counter.
  184. */
  185. void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
  186. {
  187. struct page_counter *c;
  188. WRITE_ONCE(counter->min, nr_pages);
  189. for (c = counter; c; c = c->parent)
  190. propagate_protected_usage(c, atomic_long_read(&c->usage));
  191. }
  192. /**
  193. * page_counter_set_low - set the amount of protected memory
  194. * @counter: counter
  195. * @nr_pages: value to set
  196. *
  197. * The caller must serialize invocations on the same counter.
  198. */
  199. void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
  200. {
  201. struct page_counter *c;
  202. WRITE_ONCE(counter->low, nr_pages);
  203. for (c = counter; c; c = c->parent)
  204. propagate_protected_usage(c, atomic_long_read(&c->usage));
  205. }
  206. /**
  207. * page_counter_memparse - memparse() for page counter limits
  208. * @buf: string to parse
  209. * @max: string meaning maximum possible value
  210. * @nr_pages: returns the result in number of pages
  211. *
  212. * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be
  213. * limited to %PAGE_COUNTER_MAX.
  214. */
  215. int page_counter_memparse(const char *buf, const char *max,
  216. unsigned long *nr_pages)
  217. {
  218. char *end;
  219. u64 bytes;
  220. if (!strcmp(buf, max)) {
  221. *nr_pages = PAGE_COUNTER_MAX;
  222. return 0;
  223. }
  224. bytes = memparse(buf, &end);
  225. if (*end != '\0')
  226. return -EINVAL;
  227. *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX);
  228. return 0;
  229. }