debug.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Debug controller
  4. *
  5. * WARNING: This controller is for cgroup core debugging only.
  6. * Its interfaces are unstable and subject to changes at any time.
  7. */
  8. #include <linux/ctype.h>
  9. #include <linux/mm.h>
  10. #include <linux/slab.h>
  11. #include "cgroup-internal.h"
  12. static struct cgroup_subsys_state *
  13. debug_css_alloc(struct cgroup_subsys_state *parent_css)
  14. {
  15. struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
  16. if (!css)
  17. return ERR_PTR(-ENOMEM);
  18. return css;
  19. }
  20. static void debug_css_free(struct cgroup_subsys_state *css)
  21. {
  22. kfree(css);
  23. }
  24. /*
  25. * debug_taskcount_read - return the number of tasks in a cgroup.
  26. * @cgrp: the cgroup in question
  27. */
  28. static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
  29. struct cftype *cft)
  30. {
  31. return cgroup_task_count(css->cgroup);
  32. }
  33. static int current_css_set_read(struct seq_file *seq, void *v)
  34. {
  35. struct kernfs_open_file *of = seq->private;
  36. struct css_set *cset;
  37. struct cgroup_subsys *ss;
  38. struct cgroup_subsys_state *css;
  39. int i, refcnt;
  40. if (!cgroup_kn_lock_live(of->kn, false))
  41. return -ENODEV;
  42. spin_lock_irq(&css_set_lock);
  43. rcu_read_lock();
  44. cset = task_css_set(current);
  45. refcnt = refcount_read(&cset->refcount);
  46. seq_printf(seq, "css_set %pK %d", cset, refcnt);
  47. if (refcnt > cset->nr_tasks)
  48. seq_printf(seq, " +%d", refcnt - cset->nr_tasks);
  49. seq_puts(seq, "\n");
  50. /*
  51. * Print the css'es stored in the current css_set.
  52. */
  53. for_each_subsys(ss, i) {
  54. css = cset->subsys[ss->id];
  55. if (!css)
  56. continue;
  57. seq_printf(seq, "%2d: %-4s\t- %p[%d]\n", ss->id, ss->name,
  58. css, css->id);
  59. }
  60. rcu_read_unlock();
  61. spin_unlock_irq(&css_set_lock);
  62. cgroup_kn_unlock(of->kn);
  63. return 0;
  64. }
  65. static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
  66. struct cftype *cft)
  67. {
  68. u64 count;
  69. rcu_read_lock();
  70. count = refcount_read(&task_css_set(current)->refcount);
  71. rcu_read_unlock();
  72. return count;
  73. }
  74. static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
  75. {
  76. struct cgrp_cset_link *link;
  77. struct css_set *cset;
  78. char *name_buf;
  79. name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
  80. if (!name_buf)
  81. return -ENOMEM;
  82. spin_lock_irq(&css_set_lock);
  83. rcu_read_lock();
  84. cset = task_css_set(current);
  85. list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
  86. struct cgroup *c = link->cgrp;
  87. cgroup_name(c, name_buf, NAME_MAX + 1);
  88. seq_printf(seq, "Root %d group %s\n",
  89. c->root->hierarchy_id, name_buf);
  90. }
  91. rcu_read_unlock();
  92. spin_unlock_irq(&css_set_lock);
  93. kfree(name_buf);
  94. return 0;
  95. }
  96. #define MAX_TASKS_SHOWN_PER_CSS 25
  97. static int cgroup_css_links_read(struct seq_file *seq, void *v)
  98. {
  99. struct cgroup_subsys_state *css = seq_css(seq);
  100. struct cgrp_cset_link *link;
  101. int dead_cnt = 0, extra_refs = 0, threaded_csets = 0;
  102. spin_lock_irq(&css_set_lock);
  103. list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
  104. struct css_set *cset = link->cset;
  105. struct task_struct *task;
  106. int count = 0;
  107. int refcnt = refcount_read(&cset->refcount);
  108. /*
  109. * Print out the proc_cset and threaded_cset relationship
  110. * and highlight difference between refcount and task_count.
  111. */
  112. seq_printf(seq, "css_set %pK", cset);
  113. if (rcu_dereference_protected(cset->dom_cset, 1) != cset) {
  114. threaded_csets++;
  115. seq_printf(seq, "=>%pK", cset->dom_cset);
  116. }
  117. if (!list_empty(&cset->threaded_csets)) {
  118. struct css_set *tcset;
  119. int idx = 0;
  120. list_for_each_entry(tcset, &cset->threaded_csets,
  121. threaded_csets_node) {
  122. seq_puts(seq, idx ? "," : "<=");
  123. seq_printf(seq, "%pK", tcset);
  124. idx++;
  125. }
  126. } else {
  127. seq_printf(seq, " %d", refcnt);
  128. if (refcnt - cset->nr_tasks > 0) {
  129. int extra = refcnt - cset->nr_tasks;
  130. seq_printf(seq, " +%d", extra);
  131. /*
  132. * Take out the one additional reference in
  133. * init_css_set.
  134. */
  135. if (cset == &init_css_set)
  136. extra--;
  137. extra_refs += extra;
  138. }
  139. }
  140. seq_puts(seq, "\n");
  141. list_for_each_entry(task, &cset->tasks, cg_list) {
  142. if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
  143. seq_printf(seq, " task %d\n",
  144. task_pid_vnr(task));
  145. }
  146. list_for_each_entry(task, &cset->mg_tasks, cg_list) {
  147. if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
  148. seq_printf(seq, " task %d\n",
  149. task_pid_vnr(task));
  150. }
  151. /* show # of overflowed tasks */
  152. if (count > MAX_TASKS_SHOWN_PER_CSS)
  153. seq_printf(seq, " ... (%d)\n",
  154. count - MAX_TASKS_SHOWN_PER_CSS);
  155. if (cset->dead) {
  156. seq_puts(seq, " [dead]\n");
  157. dead_cnt++;
  158. }
  159. WARN_ON(count != cset->nr_tasks);
  160. }
  161. spin_unlock_irq(&css_set_lock);
  162. if (!dead_cnt && !extra_refs && !threaded_csets)
  163. return 0;
  164. seq_puts(seq, "\n");
  165. if (threaded_csets)
  166. seq_printf(seq, "threaded css_sets = %d\n", threaded_csets);
  167. if (extra_refs)
  168. seq_printf(seq, "extra references = %d\n", extra_refs);
  169. if (dead_cnt)
  170. seq_printf(seq, "dead css_sets = %d\n", dead_cnt);
  171. return 0;
  172. }
  173. static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
  174. {
  175. struct kernfs_open_file *of = seq->private;
  176. struct cgroup *cgrp;
  177. struct cgroup_subsys *ss;
  178. struct cgroup_subsys_state *css;
  179. char pbuf[16];
  180. int i;
  181. cgrp = cgroup_kn_lock_live(of->kn, false);
  182. if (!cgrp)
  183. return -ENODEV;
  184. for_each_subsys(ss, i) {
  185. css = rcu_dereference_check(cgrp->subsys[ss->id], true);
  186. if (!css)
  187. continue;
  188. pbuf[0] = '\0';
  189. /* Show the parent CSS if applicable*/
  190. if (css->parent)
  191. snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
  192. css->parent->id);
  193. seq_printf(seq, "%2d: %-4s\t- %p[%d] %d%s\n", ss->id, ss->name,
  194. css, css->id,
  195. atomic_read(&css->online_cnt), pbuf);
  196. }
  197. cgroup_kn_unlock(of->kn);
  198. return 0;
  199. }
  200. static void cgroup_masks_read_one(struct seq_file *seq, const char *name,
  201. u16 mask)
  202. {
  203. struct cgroup_subsys *ss;
  204. int ssid;
  205. bool first = true;
  206. seq_printf(seq, "%-17s: ", name);
  207. for_each_subsys(ss, ssid) {
  208. if (!(mask & (1 << ssid)))
  209. continue;
  210. if (!first)
  211. seq_puts(seq, ", ");
  212. seq_puts(seq, ss->name);
  213. first = false;
  214. }
  215. seq_putc(seq, '\n');
  216. }
  217. static int cgroup_masks_read(struct seq_file *seq, void *v)
  218. {
  219. struct kernfs_open_file *of = seq->private;
  220. struct cgroup *cgrp;
  221. cgrp = cgroup_kn_lock_live(of->kn, false);
  222. if (!cgrp)
  223. return -ENODEV;
  224. cgroup_masks_read_one(seq, "subtree_control", cgrp->subtree_control);
  225. cgroup_masks_read_one(seq, "subtree_ss_mask", cgrp->subtree_ss_mask);
  226. cgroup_kn_unlock(of->kn);
  227. return 0;
  228. }
  229. static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
  230. {
  231. return (!cgroup_is_populated(css->cgroup) &&
  232. !css_has_online_children(&css->cgroup->self));
  233. }
  234. static struct cftype debug_legacy_files[] = {
  235. {
  236. .name = "taskcount",
  237. .read_u64 = debug_taskcount_read,
  238. },
  239. {
  240. .name = "current_css_set",
  241. .seq_show = current_css_set_read,
  242. .flags = CFTYPE_ONLY_ON_ROOT,
  243. },
  244. {
  245. .name = "current_css_set_refcount",
  246. .read_u64 = current_css_set_refcount_read,
  247. .flags = CFTYPE_ONLY_ON_ROOT,
  248. },
  249. {
  250. .name = "current_css_set_cg_links",
  251. .seq_show = current_css_set_cg_links_read,
  252. .flags = CFTYPE_ONLY_ON_ROOT,
  253. },
  254. {
  255. .name = "cgroup_css_links",
  256. .seq_show = cgroup_css_links_read,
  257. },
  258. {
  259. .name = "cgroup_subsys_states",
  260. .seq_show = cgroup_subsys_states_read,
  261. },
  262. {
  263. .name = "cgroup_masks",
  264. .seq_show = cgroup_masks_read,
  265. },
  266. {
  267. .name = "releasable",
  268. .read_u64 = releasable_read,
  269. },
  270. { } /* terminate */
  271. };
  272. static struct cftype debug_files[] = {
  273. {
  274. .name = "taskcount",
  275. .read_u64 = debug_taskcount_read,
  276. },
  277. {
  278. .name = "current_css_set",
  279. .seq_show = current_css_set_read,
  280. .flags = CFTYPE_ONLY_ON_ROOT,
  281. },
  282. {
  283. .name = "current_css_set_refcount",
  284. .read_u64 = current_css_set_refcount_read,
  285. .flags = CFTYPE_ONLY_ON_ROOT,
  286. },
  287. {
  288. .name = "current_css_set_cg_links",
  289. .seq_show = current_css_set_cg_links_read,
  290. .flags = CFTYPE_ONLY_ON_ROOT,
  291. },
  292. {
  293. .name = "css_links",
  294. .seq_show = cgroup_css_links_read,
  295. },
  296. {
  297. .name = "csses",
  298. .seq_show = cgroup_subsys_states_read,
  299. },
  300. {
  301. .name = "masks",
  302. .seq_show = cgroup_masks_read,
  303. },
  304. { } /* terminate */
  305. };
  306. struct cgroup_subsys debug_cgrp_subsys = {
  307. .css_alloc = debug_css_alloc,
  308. .css_free = debug_css_free,
  309. .legacy_cftypes = debug_legacy_files,
  310. };
  311. /*
  312. * On v2, debug is an implicit controller enabled by "cgroup_debug" boot
  313. * parameter.
  314. */
  315. void __init enable_debug_cgroup(void)
  316. {
  317. debug_cgrp_subsys.dfl_cftypes = debug_files;
  318. debug_cgrp_subsys.implicit_on_dfl = true;
  319. debug_cgrp_subsys.threaded = true;
  320. }