pids.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Process number limiting controller for cgroups.
  4. *
  5. * Used to allow a cgroup hierarchy to stop any new processes from fork()ing
  6. * after a certain limit is reached.
  7. *
  8. * Since it is trivial to hit the task limit without hitting any kmemcg limits
  9. * in place, PIDs are a fundamental resource. As such, PID exhaustion must be
  10. * preventable in the scope of a cgroup hierarchy by allowing resource limiting
  11. * of the number of tasks in a cgroup.
  12. *
  13. * In order to use the `pids` controller, set the maximum number of tasks in
  14. * pids.max (this is not available in the root cgroup for obvious reasons). The
  15. * number of processes currently in the cgroup is given by pids.current.
  16. * Organisational operations are not blocked by cgroup policies, so it is
  17. * possible to have pids.current > pids.max. However, it is not possible to
  18. * violate a cgroup policy through fork(). fork() will return -EAGAIN if forking
  19. * would cause a cgroup policy to be violated.
  20. *
  21. * To set a cgroup to have no limit, set pids.max to "max". This is the default
  22. * for all new cgroups (N.B. that PID limits are hierarchical, so the most
  23. * stringent limit in the hierarchy is followed).
  24. *
  25. * pids.current tracks all child cgroup hierarchies, so parent/pids.current is
  26. * a superset of parent/child/pids.current.
  27. *
  28. * Copyright (C) 2015 Aleksa Sarai <cyphar@cyphar.com>
  29. */
  30. #include <linux/kernel.h>
  31. #include <linux/threads.h>
  32. #include <linux/atomic.h>
  33. #include <linux/cgroup.h>
  34. #include <linux/slab.h>
  35. #include <linux/sched/task.h>
  36. #define PIDS_MAX (PID_MAX_LIMIT + 1ULL)
  37. #define PIDS_MAX_STR "max"
  38. struct pids_cgroup {
  39. struct cgroup_subsys_state css;
  40. /*
  41. * Use 64-bit types so that we can safely represent "max" as
  42. * %PIDS_MAX = (%PID_MAX_LIMIT + 1).
  43. */
  44. atomic64_t counter;
  45. atomic64_t limit;
  46. /* Handle for "pids.events" */
  47. struct cgroup_file events_file;
  48. /* Number of times fork failed because limit was hit. */
  49. atomic64_t events_limit;
  50. };
  51. static struct pids_cgroup *css_pids(struct cgroup_subsys_state *css)
  52. {
  53. return container_of(css, struct pids_cgroup, css);
  54. }
  55. static struct pids_cgroup *parent_pids(struct pids_cgroup *pids)
  56. {
  57. return css_pids(pids->css.parent);
  58. }
  59. static struct cgroup_subsys_state *
  60. pids_css_alloc(struct cgroup_subsys_state *parent)
  61. {
  62. struct pids_cgroup *pids;
  63. pids = kzalloc(sizeof(struct pids_cgroup), GFP_KERNEL);
  64. if (!pids)
  65. return ERR_PTR(-ENOMEM);
  66. atomic64_set(&pids->counter, 0);
  67. atomic64_set(&pids->limit, PIDS_MAX);
  68. atomic64_set(&pids->events_limit, 0);
  69. return &pids->css;
  70. }
  71. static void pids_css_free(struct cgroup_subsys_state *css)
  72. {
  73. kfree(css_pids(css));
  74. }
  75. /**
  76. * pids_cancel - uncharge the local pid count
  77. * @pids: the pid cgroup state
  78. * @num: the number of pids to cancel
  79. *
  80. * This function will WARN if the pid count goes under 0, because such a case is
  81. * a bug in the pids controller proper.
  82. */
  83. static void pids_cancel(struct pids_cgroup *pids, int num)
  84. {
  85. /*
  86. * A negative count (or overflow for that matter) is invalid,
  87. * and indicates a bug in the `pids` controller proper.
  88. */
  89. WARN_ON_ONCE(atomic64_add_negative(-num, &pids->counter));
  90. }
  91. /**
  92. * pids_uncharge - hierarchically uncharge the pid count
  93. * @pids: the pid cgroup state
  94. * @num: the number of pids to uncharge
  95. */
  96. static void pids_uncharge(struct pids_cgroup *pids, int num)
  97. {
  98. struct pids_cgroup *p;
  99. for (p = pids; parent_pids(p); p = parent_pids(p))
  100. pids_cancel(p, num);
  101. }
  102. /**
  103. * pids_charge - hierarchically charge the pid count
  104. * @pids: the pid cgroup state
  105. * @num: the number of pids to charge
  106. *
  107. * This function does *not* follow the pid limit set. It cannot fail and the new
  108. * pid count may exceed the limit. This is only used for reverting failed
  109. * attaches, where there is no other way out than violating the limit.
  110. */
  111. static void pids_charge(struct pids_cgroup *pids, int num)
  112. {
  113. struct pids_cgroup *p;
  114. for (p = pids; parent_pids(p); p = parent_pids(p))
  115. atomic64_add(num, &p->counter);
  116. }
  117. /**
  118. * pids_try_charge - hierarchically try to charge the pid count
  119. * @pids: the pid cgroup state
  120. * @num: the number of pids to charge
  121. *
  122. * This function follows the set limit. It will fail if the charge would cause
  123. * the new value to exceed the hierarchical limit. Returns 0 if the charge
  124. * succeeded, otherwise -EAGAIN.
  125. */
  126. static int pids_try_charge(struct pids_cgroup *pids, int num)
  127. {
  128. struct pids_cgroup *p, *q;
  129. for (p = pids; parent_pids(p); p = parent_pids(p)) {
  130. int64_t new = atomic64_add_return(num, &p->counter);
  131. int64_t limit = atomic64_read(&p->limit);
  132. /*
  133. * Since new is capped to the maximum number of pid_t, if
  134. * p->limit is %PIDS_MAX then we know that this test will never
  135. * fail.
  136. */
  137. if (new > limit)
  138. goto revert;
  139. }
  140. return 0;
  141. revert:
  142. for (q = pids; q != p; q = parent_pids(q))
  143. pids_cancel(q, num);
  144. pids_cancel(p, num);
  145. return -EAGAIN;
  146. }
  147. static int pids_can_attach(struct cgroup_taskset *tset)
  148. {
  149. struct task_struct *task;
  150. struct cgroup_subsys_state *dst_css;
  151. cgroup_taskset_for_each(task, dst_css, tset) {
  152. struct pids_cgroup *pids = css_pids(dst_css);
  153. struct cgroup_subsys_state *old_css;
  154. struct pids_cgroup *old_pids;
  155. /*
  156. * No need to pin @old_css between here and cancel_attach()
  157. * because cgroup core protects it from being freed before
  158. * the migration completes or fails.
  159. */
  160. old_css = task_css(task, pids_cgrp_id);
  161. old_pids = css_pids(old_css);
  162. pids_charge(pids, 1);
  163. pids_uncharge(old_pids, 1);
  164. }
  165. return 0;
  166. }
  167. static void pids_cancel_attach(struct cgroup_taskset *tset)
  168. {
  169. struct task_struct *task;
  170. struct cgroup_subsys_state *dst_css;
  171. cgroup_taskset_for_each(task, dst_css, tset) {
  172. struct pids_cgroup *pids = css_pids(dst_css);
  173. struct cgroup_subsys_state *old_css;
  174. struct pids_cgroup *old_pids;
  175. old_css = task_css(task, pids_cgrp_id);
  176. old_pids = css_pids(old_css);
  177. pids_charge(old_pids, 1);
  178. pids_uncharge(pids, 1);
  179. }
  180. }
  181. /*
  182. * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
  183. * on cgroup_threadgroup_change_begin() held by the copy_process().
  184. */
  185. static int pids_can_fork(struct task_struct *task, struct css_set *cset)
  186. {
  187. struct cgroup_subsys_state *css;
  188. struct pids_cgroup *pids;
  189. int err;
  190. if (cset)
  191. css = cset->subsys[pids_cgrp_id];
  192. else
  193. css = task_css_check(current, pids_cgrp_id, true);
  194. pids = css_pids(css);
  195. err = pids_try_charge(pids, 1);
  196. if (err) {
  197. /* Only log the first time events_limit is incremented. */
  198. if (atomic64_inc_return(&pids->events_limit) == 1) {
  199. pr_info("cgroup: fork rejected by pids controller in ");
  200. pr_cont_cgroup_path(css->cgroup);
  201. pr_cont("\n");
  202. }
  203. cgroup_file_notify(&pids->events_file);
  204. }
  205. return err;
  206. }
  207. static void pids_cancel_fork(struct task_struct *task, struct css_set *cset)
  208. {
  209. struct cgroup_subsys_state *css;
  210. struct pids_cgroup *pids;
  211. if (cset)
  212. css = cset->subsys[pids_cgrp_id];
  213. else
  214. css = task_css_check(current, pids_cgrp_id, true);
  215. pids = css_pids(css);
  216. pids_uncharge(pids, 1);
  217. }
  218. static void pids_release(struct task_struct *task)
  219. {
  220. struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
  221. pids_uncharge(pids, 1);
  222. }
  223. static ssize_t pids_max_write(struct kernfs_open_file *of, char *buf,
  224. size_t nbytes, loff_t off)
  225. {
  226. struct cgroup_subsys_state *css = of_css(of);
  227. struct pids_cgroup *pids = css_pids(css);
  228. int64_t limit;
  229. int err;
  230. buf = strstrip(buf);
  231. if (!strcmp(buf, PIDS_MAX_STR)) {
  232. limit = PIDS_MAX;
  233. goto set_limit;
  234. }
  235. err = kstrtoll(buf, 0, &limit);
  236. if (err)
  237. return err;
  238. if (limit < 0 || limit >= PIDS_MAX)
  239. return -EINVAL;
  240. set_limit:
  241. /*
  242. * Limit updates don't need to be mutex'd, since it isn't
  243. * critical that any racing fork()s follow the new limit.
  244. */
  245. atomic64_set(&pids->limit, limit);
  246. return nbytes;
  247. }
  248. static int pids_max_show(struct seq_file *sf, void *v)
  249. {
  250. struct cgroup_subsys_state *css = seq_css(sf);
  251. struct pids_cgroup *pids = css_pids(css);
  252. int64_t limit = atomic64_read(&pids->limit);
  253. if (limit >= PIDS_MAX)
  254. seq_printf(sf, "%s\n", PIDS_MAX_STR);
  255. else
  256. seq_printf(sf, "%lld\n", limit);
  257. return 0;
  258. }
  259. static s64 pids_current_read(struct cgroup_subsys_state *css,
  260. struct cftype *cft)
  261. {
  262. struct pids_cgroup *pids = css_pids(css);
  263. return atomic64_read(&pids->counter);
  264. }
  265. static int pids_events_show(struct seq_file *sf, void *v)
  266. {
  267. struct pids_cgroup *pids = css_pids(seq_css(sf));
  268. seq_printf(sf, "max %lld\n", (s64)atomic64_read(&pids->events_limit));
  269. return 0;
  270. }
  271. static struct cftype pids_files[] = {
  272. {
  273. .name = "max",
  274. .write = pids_max_write,
  275. .seq_show = pids_max_show,
  276. .flags = CFTYPE_NOT_ON_ROOT,
  277. },
  278. {
  279. .name = "current",
  280. .read_s64 = pids_current_read,
  281. .flags = CFTYPE_NOT_ON_ROOT,
  282. },
  283. {
  284. .name = "events",
  285. .seq_show = pids_events_show,
  286. .file_offset = offsetof(struct pids_cgroup, events_file),
  287. .flags = CFTYPE_NOT_ON_ROOT,
  288. },
  289. { } /* terminate */
  290. };
  291. struct cgroup_subsys pids_cgrp_subsys = {
  292. .css_alloc = pids_css_alloc,
  293. .css_free = pids_css_free,
  294. .can_attach = pids_can_attach,
  295. .cancel_attach = pids_cancel_attach,
  296. .can_fork = pids_can_fork,
  297. .cancel_fork = pids_cancel_fork,
  298. .release = pids_release,
  299. .legacy_cftypes = pids_files,
  300. .dfl_cftypes = pids_files,
  301. .threaded = true,
  302. };