hugetlb_cgroup.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. /*
  2. *
  3. * Copyright IBM Corporation, 2012
  4. * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  5. *
  6. * Cgroup v2
  7. * Copyright (C) 2019 Red Hat, Inc.
  8. * Author: Giuseppe Scrivano <gscrivan@redhat.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of version 2.1 of the GNU Lesser General Public License
  12. * as published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it would be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  17. *
  18. */
  19. #include <linux/cgroup.h>
  20. #include <linux/page_counter.h>
  21. #include <linux/slab.h>
  22. #include <linux/hugetlb.h>
  23. #include <linux/hugetlb_cgroup.h>
  24. #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
  25. #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
  26. #define MEMFILE_ATTR(val) ((val) & 0xffff)
  27. #define hugetlb_cgroup_from_counter(counter, idx) \
  28. container_of(counter, struct hugetlb_cgroup, hugepage[idx])
  29. static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
  30. static inline struct page_counter *
  31. __hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
  32. bool rsvd)
  33. {
  34. if (rsvd)
  35. return &h_cg->rsvd_hugepage[idx];
  36. return &h_cg->hugepage[idx];
  37. }
  38. static inline struct page_counter *
  39. hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
  40. {
  41. return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
  42. }
  43. static inline struct page_counter *
  44. hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
  45. {
  46. return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
  47. }
  48. static inline
  49. struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
  50. {
  51. return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
  52. }
  53. static inline
  54. struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
  55. {
  56. return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
  57. }
  58. static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
  59. {
  60. return (h_cg == root_h_cgroup);
  61. }
  62. static inline struct hugetlb_cgroup *
  63. parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
  64. {
  65. return hugetlb_cgroup_from_css(h_cg->css.parent);
  66. }
  67. static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
  68. {
  69. int idx;
  70. for (idx = 0; idx < hugetlb_max_hstate; idx++) {
  71. if (page_counter_read(
  72. hugetlb_cgroup_counter_from_cgroup(h_cg, idx)))
  73. return true;
  74. }
  75. return false;
  76. }
  77. static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
  78. struct hugetlb_cgroup *parent_h_cgroup)
  79. {
  80. int idx;
  81. for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
  82. struct page_counter *fault_parent = NULL;
  83. struct page_counter *rsvd_parent = NULL;
  84. unsigned long limit;
  85. int ret;
  86. if (parent_h_cgroup) {
  87. fault_parent = hugetlb_cgroup_counter_from_cgroup(
  88. parent_h_cgroup, idx);
  89. rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
  90. parent_h_cgroup, idx);
  91. }
  92. page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
  93. idx),
  94. fault_parent);
  95. page_counter_init(
  96. hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
  97. rsvd_parent);
  98. limit = round_down(PAGE_COUNTER_MAX,
  99. 1 << huge_page_order(&hstates[idx]));
  100. ret = page_counter_set_max(
  101. hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
  102. limit);
  103. VM_BUG_ON(ret);
  104. ret = page_counter_set_max(
  105. hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
  106. limit);
  107. VM_BUG_ON(ret);
  108. }
  109. }
  110. static struct cgroup_subsys_state *
  111. hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
  112. {
  113. struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
  114. struct hugetlb_cgroup *h_cgroup;
  115. h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
  116. if (!h_cgroup)
  117. return ERR_PTR(-ENOMEM);
  118. if (!parent_h_cgroup)
  119. root_h_cgroup = h_cgroup;
  120. hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
  121. return &h_cgroup->css;
  122. }
  123. static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
  124. {
  125. struct hugetlb_cgroup *h_cgroup;
  126. h_cgroup = hugetlb_cgroup_from_css(css);
  127. kfree(h_cgroup);
  128. }
  129. /*
  130. * Should be called with hugetlb_lock held.
  131. * Since we are holding hugetlb_lock, pages cannot get moved from
  132. * active list or uncharged from the cgroup, So no need to get
  133. * page reference and test for page active here. This function
  134. * cannot fail.
  135. */
  136. static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
  137. struct page *page)
  138. {
  139. unsigned int nr_pages;
  140. struct page_counter *counter;
  141. struct hugetlb_cgroup *page_hcg;
  142. struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
  143. page_hcg = hugetlb_cgroup_from_page(page);
  144. /*
  145. * We can have pages in active list without any cgroup
  146. * ie, hugepage with less than 3 pages. We can safely
  147. * ignore those pages.
  148. */
  149. if (!page_hcg || page_hcg != h_cg)
  150. goto out;
  151. nr_pages = compound_nr(page);
  152. if (!parent) {
  153. parent = root_h_cgroup;
  154. /* root has no limit */
  155. page_counter_charge(&parent->hugepage[idx], nr_pages);
  156. }
  157. counter = &h_cg->hugepage[idx];
  158. /* Take the pages off the local counter */
  159. page_counter_cancel(counter, nr_pages);
  160. set_hugetlb_cgroup(page, parent);
  161. out:
  162. return;
  163. }
  164. /*
  165. * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
  166. * the parent cgroup.
  167. */
  168. static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
  169. {
  170. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
  171. struct hstate *h;
  172. struct page *page;
  173. int idx;
  174. do {
  175. idx = 0;
  176. for_each_hstate(h) {
  177. spin_lock(&hugetlb_lock);
  178. list_for_each_entry(page, &h->hugepage_activelist, lru)
  179. hugetlb_cgroup_move_parent(idx, h_cg, page);
  180. spin_unlock(&hugetlb_lock);
  181. idx++;
  182. }
  183. cond_resched();
  184. } while (hugetlb_cgroup_have_usage(h_cg));
  185. }
  186. static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
  187. enum hugetlb_memory_event event)
  188. {
  189. atomic_long_inc(&hugetlb->events_local[idx][event]);
  190. cgroup_file_notify(&hugetlb->events_local_file[idx]);
  191. do {
  192. atomic_long_inc(&hugetlb->events[idx][event]);
  193. cgroup_file_notify(&hugetlb->events_file[idx]);
  194. } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
  195. !hugetlb_cgroup_is_root(hugetlb));
  196. }
  197. static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
  198. struct hugetlb_cgroup **ptr,
  199. bool rsvd)
  200. {
  201. int ret = 0;
  202. struct page_counter *counter;
  203. struct hugetlb_cgroup *h_cg = NULL;
  204. if (hugetlb_cgroup_disabled())
  205. goto done;
  206. /*
  207. * We don't charge any cgroup if the compound page have less
  208. * than 3 pages.
  209. */
  210. if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
  211. goto done;
  212. again:
  213. rcu_read_lock();
  214. h_cg = hugetlb_cgroup_from_task(current);
  215. if (!css_tryget(&h_cg->css)) {
  216. rcu_read_unlock();
  217. goto again;
  218. }
  219. rcu_read_unlock();
  220. if (!page_counter_try_charge(
  221. __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
  222. nr_pages, &counter)) {
  223. ret = -ENOMEM;
  224. hugetlb_event(h_cg, idx, HUGETLB_MAX);
  225. css_put(&h_cg->css);
  226. goto done;
  227. }
  228. /* Reservations take a reference to the css because they do not get
  229. * reparented.
  230. */
  231. if (!rsvd)
  232. css_put(&h_cg->css);
  233. done:
  234. *ptr = h_cg;
  235. return ret;
  236. }
  237. int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
  238. struct hugetlb_cgroup **ptr)
  239. {
  240. return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
  241. }
  242. int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
  243. struct hugetlb_cgroup **ptr)
  244. {
  245. return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
  246. }
  247. /* Should be called with hugetlb_lock held */
  248. static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
  249. struct hugetlb_cgroup *h_cg,
  250. struct page *page, bool rsvd)
  251. {
  252. if (hugetlb_cgroup_disabled() || !h_cg)
  253. return;
  254. __set_hugetlb_cgroup(page, h_cg, rsvd);
  255. return;
  256. }
  257. void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
  258. struct hugetlb_cgroup *h_cg,
  259. struct page *page)
  260. {
  261. __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, false);
  262. }
  263. void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
  264. struct hugetlb_cgroup *h_cg,
  265. struct page *page)
  266. {
  267. __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, true);
  268. }
  269. /*
  270. * Should be called with hugetlb_lock held
  271. */
  272. static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
  273. struct page *page, bool rsvd)
  274. {
  275. struct hugetlb_cgroup *h_cg;
  276. if (hugetlb_cgroup_disabled())
  277. return;
  278. lockdep_assert_held(&hugetlb_lock);
  279. h_cg = __hugetlb_cgroup_from_page(page, rsvd);
  280. if (unlikely(!h_cg))
  281. return;
  282. __set_hugetlb_cgroup(page, NULL, rsvd);
  283. page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
  284. rsvd),
  285. nr_pages);
  286. if (rsvd)
  287. css_put(&h_cg->css);
  288. return;
  289. }
  290. void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
  291. struct page *page)
  292. {
  293. __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
  294. }
  295. void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
  296. struct page *page)
  297. {
  298. __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
  299. }
  300. static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
  301. struct hugetlb_cgroup *h_cg,
  302. bool rsvd)
  303. {
  304. if (hugetlb_cgroup_disabled() || !h_cg)
  305. return;
  306. if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
  307. return;
  308. page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
  309. rsvd),
  310. nr_pages);
  311. if (rsvd)
  312. css_put(&h_cg->css);
  313. }
  314. void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
  315. struct hugetlb_cgroup *h_cg)
  316. {
  317. __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
  318. }
  319. void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
  320. struct hugetlb_cgroup *h_cg)
  321. {
  322. __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
  323. }
  324. void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
  325. unsigned long end)
  326. {
  327. if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
  328. !resv->css)
  329. return;
  330. page_counter_uncharge(resv->reservation_counter,
  331. (end - start) * resv->pages_per_hpage);
  332. css_put(resv->css);
  333. }
  334. void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
  335. struct file_region *rg,
  336. unsigned long nr_pages,
  337. bool region_del)
  338. {
  339. if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
  340. return;
  341. if (rg->reservation_counter && resv->pages_per_hpage && nr_pages > 0 &&
  342. !resv->reservation_counter) {
  343. page_counter_uncharge(rg->reservation_counter,
  344. nr_pages * resv->pages_per_hpage);
  345. /*
  346. * Only do css_put(rg->css) when we delete the entire region
  347. * because one file_region must hold exactly one css reference.
  348. */
  349. if (region_del)
  350. css_put(rg->css);
  351. }
  352. }
  353. enum {
  354. RES_USAGE,
  355. RES_RSVD_USAGE,
  356. RES_LIMIT,
  357. RES_RSVD_LIMIT,
  358. RES_MAX_USAGE,
  359. RES_RSVD_MAX_USAGE,
  360. RES_FAILCNT,
  361. RES_RSVD_FAILCNT,
  362. };
  363. static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
  364. struct cftype *cft)
  365. {
  366. struct page_counter *counter;
  367. struct page_counter *rsvd_counter;
  368. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
  369. counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
  370. rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
  371. switch (MEMFILE_ATTR(cft->private)) {
  372. case RES_USAGE:
  373. return (u64)page_counter_read(counter) * PAGE_SIZE;
  374. case RES_RSVD_USAGE:
  375. return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
  376. case RES_LIMIT:
  377. return (u64)counter->max * PAGE_SIZE;
  378. case RES_RSVD_LIMIT:
  379. return (u64)rsvd_counter->max * PAGE_SIZE;
  380. case RES_MAX_USAGE:
  381. return (u64)counter->watermark * PAGE_SIZE;
  382. case RES_RSVD_MAX_USAGE:
  383. return (u64)rsvd_counter->watermark * PAGE_SIZE;
  384. case RES_FAILCNT:
  385. return counter->failcnt;
  386. case RES_RSVD_FAILCNT:
  387. return rsvd_counter->failcnt;
  388. default:
  389. BUG();
  390. }
  391. }
  392. static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
  393. {
  394. int idx;
  395. u64 val;
  396. struct cftype *cft = seq_cft(seq);
  397. unsigned long limit;
  398. struct page_counter *counter;
  399. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
  400. idx = MEMFILE_IDX(cft->private);
  401. counter = &h_cg->hugepage[idx];
  402. limit = round_down(PAGE_COUNTER_MAX,
  403. 1 << huge_page_order(&hstates[idx]));
  404. switch (MEMFILE_ATTR(cft->private)) {
  405. case RES_RSVD_USAGE:
  406. counter = &h_cg->rsvd_hugepage[idx];
  407. fallthrough;
  408. case RES_USAGE:
  409. val = (u64)page_counter_read(counter);
  410. seq_printf(seq, "%llu\n", val * PAGE_SIZE);
  411. break;
  412. case RES_RSVD_LIMIT:
  413. counter = &h_cg->rsvd_hugepage[idx];
  414. fallthrough;
  415. case RES_LIMIT:
  416. val = (u64)counter->max;
  417. if (val == limit)
  418. seq_puts(seq, "max\n");
  419. else
  420. seq_printf(seq, "%llu\n", val * PAGE_SIZE);
  421. break;
  422. default:
  423. BUG();
  424. }
  425. return 0;
  426. }
  427. static DEFINE_MUTEX(hugetlb_limit_mutex);
  428. static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
  429. char *buf, size_t nbytes, loff_t off,
  430. const char *max)
  431. {
  432. int ret, idx;
  433. unsigned long nr_pages;
  434. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
  435. bool rsvd = false;
  436. if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
  437. return -EINVAL;
  438. buf = strstrip(buf);
  439. ret = page_counter_memparse(buf, max, &nr_pages);
  440. if (ret)
  441. return ret;
  442. idx = MEMFILE_IDX(of_cft(of)->private);
  443. nr_pages = round_down(nr_pages, 1 << huge_page_order(&hstates[idx]));
  444. switch (MEMFILE_ATTR(of_cft(of)->private)) {
  445. case RES_RSVD_LIMIT:
  446. rsvd = true;
  447. fallthrough;
  448. case RES_LIMIT:
  449. mutex_lock(&hugetlb_limit_mutex);
  450. ret = page_counter_set_max(
  451. __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
  452. nr_pages);
  453. mutex_unlock(&hugetlb_limit_mutex);
  454. break;
  455. default:
  456. ret = -EINVAL;
  457. break;
  458. }
  459. return ret ?: nbytes;
  460. }
  461. static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
  462. char *buf, size_t nbytes, loff_t off)
  463. {
  464. return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
  465. }
  466. static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
  467. char *buf, size_t nbytes, loff_t off)
  468. {
  469. return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
  470. }
  471. static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
  472. char *buf, size_t nbytes, loff_t off)
  473. {
  474. int ret = 0;
  475. struct page_counter *counter, *rsvd_counter;
  476. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
  477. counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
  478. rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
  479. switch (MEMFILE_ATTR(of_cft(of)->private)) {
  480. case RES_MAX_USAGE:
  481. page_counter_reset_watermark(counter);
  482. break;
  483. case RES_RSVD_MAX_USAGE:
  484. page_counter_reset_watermark(rsvd_counter);
  485. break;
  486. case RES_FAILCNT:
  487. counter->failcnt = 0;
  488. break;
  489. case RES_RSVD_FAILCNT:
  490. rsvd_counter->failcnt = 0;
  491. break;
  492. default:
  493. ret = -EINVAL;
  494. break;
  495. }
  496. return ret ?: nbytes;
  497. }
  498. static char *mem_fmt(char *buf, int size, unsigned long hsize)
  499. {
  500. if (hsize >= (1UL << 30))
  501. snprintf(buf, size, "%luGB", hsize >> 30);
  502. else if (hsize >= (1UL << 20))
  503. snprintf(buf, size, "%luMB", hsize >> 20);
  504. else
  505. snprintf(buf, size, "%luKB", hsize >> 10);
  506. return buf;
  507. }
  508. static int __hugetlb_events_show(struct seq_file *seq, bool local)
  509. {
  510. int idx;
  511. long max;
  512. struct cftype *cft = seq_cft(seq);
  513. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
  514. idx = MEMFILE_IDX(cft->private);
  515. if (local)
  516. max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
  517. else
  518. max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
  519. seq_printf(seq, "max %lu\n", max);
  520. return 0;
  521. }
  522. static int hugetlb_events_show(struct seq_file *seq, void *v)
  523. {
  524. return __hugetlb_events_show(seq, false);
  525. }
  526. static int hugetlb_events_local_show(struct seq_file *seq, void *v)
  527. {
  528. return __hugetlb_events_show(seq, true);
  529. }
  530. static void __init __hugetlb_cgroup_file_dfl_init(int idx)
  531. {
  532. char buf[32];
  533. struct cftype *cft;
  534. struct hstate *h = &hstates[idx];
  535. /* format the size */
  536. mem_fmt(buf, sizeof(buf), huge_page_size(h));
  537. /* Add the limit file */
  538. cft = &h->cgroup_files_dfl[0];
  539. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
  540. cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
  541. cft->seq_show = hugetlb_cgroup_read_u64_max;
  542. cft->write = hugetlb_cgroup_write_dfl;
  543. cft->flags = CFTYPE_NOT_ON_ROOT;
  544. /* Add the reservation limit file */
  545. cft = &h->cgroup_files_dfl[1];
  546. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf);
  547. cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
  548. cft->seq_show = hugetlb_cgroup_read_u64_max;
  549. cft->write = hugetlb_cgroup_write_dfl;
  550. cft->flags = CFTYPE_NOT_ON_ROOT;
  551. /* Add the current usage file */
  552. cft = &h->cgroup_files_dfl[2];
  553. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
  554. cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
  555. cft->seq_show = hugetlb_cgroup_read_u64_max;
  556. cft->flags = CFTYPE_NOT_ON_ROOT;
  557. /* Add the current reservation usage file */
  558. cft = &h->cgroup_files_dfl[3];
  559. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf);
  560. cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
  561. cft->seq_show = hugetlb_cgroup_read_u64_max;
  562. cft->flags = CFTYPE_NOT_ON_ROOT;
  563. /* Add the events file */
  564. cft = &h->cgroup_files_dfl[4];
  565. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
  566. cft->private = MEMFILE_PRIVATE(idx, 0);
  567. cft->seq_show = hugetlb_events_show;
  568. cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
  569. cft->flags = CFTYPE_NOT_ON_ROOT;
  570. /* Add the events.local file */
  571. cft = &h->cgroup_files_dfl[5];
  572. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
  573. cft->private = MEMFILE_PRIVATE(idx, 0);
  574. cft->seq_show = hugetlb_events_local_show;
  575. cft->file_offset = offsetof(struct hugetlb_cgroup,
  576. events_local_file[idx]);
  577. cft->flags = CFTYPE_NOT_ON_ROOT;
  578. /* NULL terminate the last cft */
  579. cft = &h->cgroup_files_dfl[6];
  580. memset(cft, 0, sizeof(*cft));
  581. WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
  582. h->cgroup_files_dfl));
  583. }
  584. static void __init __hugetlb_cgroup_file_legacy_init(int idx)
  585. {
  586. char buf[32];
  587. struct cftype *cft;
  588. struct hstate *h = &hstates[idx];
  589. /* format the size */
  590. mem_fmt(buf, sizeof(buf), huge_page_size(h));
  591. /* Add the limit file */
  592. cft = &h->cgroup_files_legacy[0];
  593. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
  594. cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
  595. cft->read_u64 = hugetlb_cgroup_read_u64;
  596. cft->write = hugetlb_cgroup_write_legacy;
  597. /* Add the reservation limit file */
  598. cft = &h->cgroup_files_legacy[1];
  599. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf);
  600. cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
  601. cft->read_u64 = hugetlb_cgroup_read_u64;
  602. cft->write = hugetlb_cgroup_write_legacy;
  603. /* Add the usage file */
  604. cft = &h->cgroup_files_legacy[2];
  605. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
  606. cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
  607. cft->read_u64 = hugetlb_cgroup_read_u64;
  608. /* Add the reservation usage file */
  609. cft = &h->cgroup_files_legacy[3];
  610. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf);
  611. cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
  612. cft->read_u64 = hugetlb_cgroup_read_u64;
  613. /* Add the MAX usage file */
  614. cft = &h->cgroup_files_legacy[4];
  615. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
  616. cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
  617. cft->write = hugetlb_cgroup_reset;
  618. cft->read_u64 = hugetlb_cgroup_read_u64;
  619. /* Add the MAX reservation usage file */
  620. cft = &h->cgroup_files_legacy[5];
  621. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf);
  622. cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE);
  623. cft->write = hugetlb_cgroup_reset;
  624. cft->read_u64 = hugetlb_cgroup_read_u64;
  625. /* Add the failcntfile */
  626. cft = &h->cgroup_files_legacy[6];
  627. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
  628. cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
  629. cft->write = hugetlb_cgroup_reset;
  630. cft->read_u64 = hugetlb_cgroup_read_u64;
  631. /* Add the reservation failcntfile */
  632. cft = &h->cgroup_files_legacy[7];
  633. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf);
  634. cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT);
  635. cft->write = hugetlb_cgroup_reset;
  636. cft->read_u64 = hugetlb_cgroup_read_u64;
  637. /* NULL terminate the last cft */
  638. cft = &h->cgroup_files_legacy[8];
  639. memset(cft, 0, sizeof(*cft));
  640. WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
  641. h->cgroup_files_legacy));
  642. }
  643. static void __init __hugetlb_cgroup_file_init(int idx)
  644. {
  645. __hugetlb_cgroup_file_dfl_init(idx);
  646. __hugetlb_cgroup_file_legacy_init(idx);
  647. }
  648. void __init hugetlb_cgroup_file_init(void)
  649. {
  650. struct hstate *h;
  651. for_each_hstate(h) {
  652. /*
  653. * Add cgroup control files only if the huge page consists
  654. * of more than two normal pages. This is because we use
  655. * page[2].private for storing cgroup details.
  656. */
  657. if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
  658. __hugetlb_cgroup_file_init(hstate_index(h));
  659. }
  660. }
  661. /*
  662. * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
  663. * when we migrate hugepages
  664. */
  665. void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
  666. {
  667. struct hugetlb_cgroup *h_cg;
  668. struct hugetlb_cgroup *h_cg_rsvd;
  669. struct hstate *h = page_hstate(oldhpage);
  670. if (hugetlb_cgroup_disabled())
  671. return;
  672. VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
  673. spin_lock(&hugetlb_lock);
  674. h_cg = hugetlb_cgroup_from_page(oldhpage);
  675. h_cg_rsvd = hugetlb_cgroup_from_page_rsvd(oldhpage);
  676. set_hugetlb_cgroup(oldhpage, NULL);
  677. set_hugetlb_cgroup_rsvd(oldhpage, NULL);
  678. /* move the h_cg details to new cgroup */
  679. set_hugetlb_cgroup(newhpage, h_cg);
  680. set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
  681. list_move(&newhpage->lru, &h->hugepage_activelist);
  682. spin_unlock(&hugetlb_lock);
  683. return;
  684. }
  685. static struct cftype hugetlb_files[] = {
  686. {} /* terminate */
  687. };
  688. struct cgroup_subsys hugetlb_cgrp_subsys = {
  689. .css_alloc = hugetlb_cgroup_css_alloc,
  690. .css_offline = hugetlb_cgroup_css_offline,
  691. .css_free = hugetlb_cgroup_css_free,
  692. .dfl_cftypes = hugetlb_files,
  693. .legacy_cftypes = hugetlb_files,
  694. };