rstat.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include "cgroup-internal.h"
  3. #include <linux/sched/cputime.h>
  4. static DEFINE_SPINLOCK(cgroup_rstat_lock);
  5. static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
  6. static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
  7. static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
  8. {
  9. return per_cpu_ptr(cgrp->rstat_cpu, cpu);
  10. }
  11. /**
  12. * cgroup_rstat_updated - keep track of updated rstat_cpu
  13. * @cgrp: target cgroup
  14. * @cpu: cpu on which rstat_cpu was updated
  15. *
  16. * @cgrp's rstat_cpu on @cpu was updated. Put it on the parent's matching
  17. * rstat_cpu->updated_children list. See the comment on top of
  18. * cgroup_rstat_cpu definition for details.
  19. */
  20. void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
  21. {
  22. raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
  23. struct cgroup *parent;
  24. unsigned long flags;
  25. /* nothing to do for root */
  26. if (!cgroup_parent(cgrp))
  27. return;
  28. /*
  29. * Speculative already-on-list test. This may race leading to
  30. * temporary inaccuracies, which is fine.
  31. *
  32. * Because @parent's updated_children is terminated with @parent
  33. * instead of NULL, we can tell whether @cgrp is on the list by
  34. * testing the next pointer for NULL.
  35. */
  36. if (cgroup_rstat_cpu(cgrp, cpu)->updated_next)
  37. return;
  38. raw_spin_lock_irqsave(cpu_lock, flags);
  39. /* put @cgrp and all ancestors on the corresponding updated lists */
  40. for (parent = cgroup_parent(cgrp); parent;
  41. cgrp = parent, parent = cgroup_parent(cgrp)) {
  42. struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
  43. struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
  44. /*
  45. * Both additions and removals are bottom-up. If a cgroup
  46. * is already in the tree, all ancestors are.
  47. */
  48. if (rstatc->updated_next)
  49. break;
  50. rstatc->updated_next = prstatc->updated_children;
  51. prstatc->updated_children = cgrp;
  52. }
  53. raw_spin_unlock_irqrestore(cpu_lock, flags);
  54. }
  55. /**
  56. * cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
  57. * @pos: current position
  58. * @root: root of the tree to traversal
  59. * @cpu: target cpu
  60. *
  61. * Walks the udpated rstat_cpu tree on @cpu from @root. %NULL @pos starts
  62. * the traversal and %NULL return indicates the end. During traversal,
  63. * each returned cgroup is unlinked from the tree. Must be called with the
  64. * matching cgroup_rstat_cpu_lock held.
  65. *
  66. * The only ordering guarantee is that, for a parent and a child pair
  67. * covered by a given traversal, if a child is visited, its parent is
  68. * guaranteed to be visited afterwards.
  69. */
  70. static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
  71. struct cgroup *root, int cpu)
  72. {
  73. struct cgroup_rstat_cpu *rstatc;
  74. if (pos == root)
  75. return NULL;
  76. /*
  77. * We're gonna walk down to the first leaf and visit/remove it. We
  78. * can pick whatever unvisited node as the starting point.
  79. */
  80. if (!pos)
  81. pos = root;
  82. else
  83. pos = cgroup_parent(pos);
  84. /* walk down to the first leaf */
  85. while (true) {
  86. rstatc = cgroup_rstat_cpu(pos, cpu);
  87. if (rstatc->updated_children == pos)
  88. break;
  89. pos = rstatc->updated_children;
  90. }
  91. /*
  92. * Unlink @pos from the tree. As the updated_children list is
  93. * singly linked, we have to walk it to find the removal point.
  94. * However, due to the way we traverse, @pos will be the first
  95. * child in most cases. The only exception is @root.
  96. */
  97. if (rstatc->updated_next) {
  98. struct cgroup *parent = cgroup_parent(pos);
  99. struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
  100. struct cgroup_rstat_cpu *nrstatc;
  101. struct cgroup **nextp;
  102. nextp = &prstatc->updated_children;
  103. while (true) {
  104. nrstatc = cgroup_rstat_cpu(*nextp, cpu);
  105. if (*nextp == pos)
  106. break;
  107. WARN_ON_ONCE(*nextp == parent);
  108. nextp = &nrstatc->updated_next;
  109. }
  110. *nextp = rstatc->updated_next;
  111. rstatc->updated_next = NULL;
  112. return pos;
  113. }
  114. /* only happens for @root */
  115. return NULL;
  116. }
  117. /* see cgroup_rstat_flush() */
  118. static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
  119. __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
  120. {
  121. int cpu;
  122. lockdep_assert_held(&cgroup_rstat_lock);
  123. for_each_possible_cpu(cpu) {
  124. raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
  125. cpu);
  126. struct cgroup *pos = NULL;
  127. raw_spin_lock(cpu_lock);
  128. while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
  129. struct cgroup_subsys_state *css;
  130. cgroup_base_stat_flush(pos, cpu);
  131. rcu_read_lock();
  132. list_for_each_entry_rcu(css, &pos->rstat_css_list,
  133. rstat_css_node)
  134. css->ss->css_rstat_flush(css, cpu);
  135. rcu_read_unlock();
  136. }
  137. raw_spin_unlock(cpu_lock);
  138. /* if @may_sleep, play nice and yield if necessary */
  139. if (may_sleep && (need_resched() ||
  140. spin_needbreak(&cgroup_rstat_lock))) {
  141. spin_unlock_irq(&cgroup_rstat_lock);
  142. if (!cond_resched())
  143. cpu_relax();
  144. spin_lock_irq(&cgroup_rstat_lock);
  145. }
  146. }
  147. }
  148. /**
  149. * cgroup_rstat_flush - flush stats in @cgrp's subtree
  150. * @cgrp: target cgroup
  151. *
  152. * Collect all per-cpu stats in @cgrp's subtree into the global counters
  153. * and propagate them upwards. After this function returns, all cgroups in
  154. * the subtree have up-to-date ->stat.
  155. *
  156. * This also gets all cgroups in the subtree including @cgrp off the
  157. * ->updated_children lists.
  158. *
  159. * This function may block.
  160. */
  161. void cgroup_rstat_flush(struct cgroup *cgrp)
  162. {
  163. might_sleep();
  164. spin_lock_irq(&cgroup_rstat_lock);
  165. cgroup_rstat_flush_locked(cgrp, true);
  166. spin_unlock_irq(&cgroup_rstat_lock);
  167. }
  168. /**
  169. * cgroup_rstat_flush_irqsafe - irqsafe version of cgroup_rstat_flush()
  170. * @cgrp: target cgroup
  171. *
  172. * This function can be called from any context.
  173. */
  174. void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp)
  175. {
  176. unsigned long flags;
  177. spin_lock_irqsave(&cgroup_rstat_lock, flags);
  178. cgroup_rstat_flush_locked(cgrp, false);
  179. spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
  180. }
  181. /**
  182. * cgroup_rstat_flush_begin - flush stats in @cgrp's subtree and hold
  183. * @cgrp: target cgroup
  184. *
  185. * Flush stats in @cgrp's subtree and prevent further flushes. Must be
  186. * paired with cgroup_rstat_flush_release().
  187. *
  188. * This function may block.
  189. */
  190. void cgroup_rstat_flush_hold(struct cgroup *cgrp)
  191. __acquires(&cgroup_rstat_lock)
  192. {
  193. might_sleep();
  194. spin_lock_irq(&cgroup_rstat_lock);
  195. cgroup_rstat_flush_locked(cgrp, true);
  196. }
  197. /**
  198. * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
  199. */
  200. void cgroup_rstat_flush_release(void)
  201. __releases(&cgroup_rstat_lock)
  202. {
  203. spin_unlock_irq(&cgroup_rstat_lock);
  204. }
  205. int cgroup_rstat_init(struct cgroup *cgrp)
  206. {
  207. int cpu;
  208. /* the root cgrp has rstat_cpu preallocated */
  209. if (!cgrp->rstat_cpu) {
  210. cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
  211. if (!cgrp->rstat_cpu)
  212. return -ENOMEM;
  213. }
  214. /* ->updated_children list is self terminated */
  215. for_each_possible_cpu(cpu) {
  216. struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
  217. rstatc->updated_children = cgrp;
  218. u64_stats_init(&rstatc->bsync);
  219. }
  220. return 0;
  221. }
  222. void cgroup_rstat_exit(struct cgroup *cgrp)
  223. {
  224. int cpu;
  225. cgroup_rstat_flush(cgrp);
  226. /* sanity check */
  227. for_each_possible_cpu(cpu) {
  228. struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
  229. if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
  230. WARN_ON_ONCE(rstatc->updated_next))
  231. return;
  232. }
  233. free_percpu(cgrp->rstat_cpu);
  234. cgrp->rstat_cpu = NULL;
  235. }
  236. void __init cgroup_rstat_boot(void)
  237. {
  238. int cpu;
  239. for_each_possible_cpu(cpu)
  240. raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
  241. BUG_ON(cgroup_rstat_init(&cgrp_dfl_root.cgrp));
  242. }
  243. /*
  244. * Functions for cgroup basic resource statistics implemented on top of
  245. * rstat.
  246. */
  247. static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
  248. struct cgroup_base_stat *src_bstat)
  249. {
  250. dst_bstat->cputime.utime += src_bstat->cputime.utime;
  251. dst_bstat->cputime.stime += src_bstat->cputime.stime;
  252. dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
  253. }
  254. static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
  255. struct cgroup_base_stat *src_bstat)
  256. {
  257. dst_bstat->cputime.utime -= src_bstat->cputime.utime;
  258. dst_bstat->cputime.stime -= src_bstat->cputime.stime;
  259. dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
  260. }
  261. static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
  262. {
  263. struct cgroup *parent = cgroup_parent(cgrp);
  264. struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
  265. struct cgroup_base_stat cur, delta;
  266. unsigned seq;
  267. /* fetch the current per-cpu values */
  268. do {
  269. seq = __u64_stats_fetch_begin(&rstatc->bsync);
  270. cur.cputime = rstatc->bstat.cputime;
  271. } while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
  272. /* propagate percpu delta to global */
  273. delta = cur;
  274. cgroup_base_stat_sub(&delta, &rstatc->last_bstat);
  275. cgroup_base_stat_add(&cgrp->bstat, &delta);
  276. cgroup_base_stat_add(&rstatc->last_bstat, &delta);
  277. /* propagate global delta to parent */
  278. if (parent) {
  279. delta = cgrp->bstat;
  280. cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
  281. cgroup_base_stat_add(&parent->bstat, &delta);
  282. cgroup_base_stat_add(&cgrp->last_bstat, &delta);
  283. }
  284. }
  285. static struct cgroup_rstat_cpu *
  286. cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
  287. {
  288. struct cgroup_rstat_cpu *rstatc;
  289. rstatc = get_cpu_ptr(cgrp->rstat_cpu);
  290. u64_stats_update_begin(&rstatc->bsync);
  291. return rstatc;
  292. }
  293. static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
  294. struct cgroup_rstat_cpu *rstatc)
  295. {
  296. u64_stats_update_end(&rstatc->bsync);
  297. cgroup_rstat_updated(cgrp, smp_processor_id());
  298. put_cpu_ptr(rstatc);
  299. }
  300. void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
  301. {
  302. struct cgroup_rstat_cpu *rstatc;
  303. rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
  304. rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
  305. cgroup_base_stat_cputime_account_end(cgrp, rstatc);
  306. }
  307. void __cgroup_account_cputime_field(struct cgroup *cgrp,
  308. enum cpu_usage_stat index, u64 delta_exec)
  309. {
  310. struct cgroup_rstat_cpu *rstatc;
  311. rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
  312. switch (index) {
  313. case CPUTIME_USER:
  314. case CPUTIME_NICE:
  315. rstatc->bstat.cputime.utime += delta_exec;
  316. break;
  317. case CPUTIME_SYSTEM:
  318. case CPUTIME_IRQ:
  319. case CPUTIME_SOFTIRQ:
  320. rstatc->bstat.cputime.stime += delta_exec;
  321. break;
  322. default:
  323. break;
  324. }
  325. cgroup_base_stat_cputime_account_end(cgrp, rstatc);
  326. }
  327. /*
  328. * compute the cputime for the root cgroup by getting the per cpu data
  329. * at a global level, then categorizing the fields in a manner consistent
  330. * with how it is done by __cgroup_account_cputime_field for each bit of
  331. * cpu time attributed to a cgroup.
  332. */
  333. static void root_cgroup_cputime(struct task_cputime *cputime)
  334. {
  335. int i;
  336. cputime->stime = 0;
  337. cputime->utime = 0;
  338. cputime->sum_exec_runtime = 0;
  339. for_each_possible_cpu(i) {
  340. struct kernel_cpustat kcpustat;
  341. u64 *cpustat = kcpustat.cpustat;
  342. u64 user = 0;
  343. u64 sys = 0;
  344. kcpustat_cpu_fetch(&kcpustat, i);
  345. user += cpustat[CPUTIME_USER];
  346. user += cpustat[CPUTIME_NICE];
  347. cputime->utime += user;
  348. sys += cpustat[CPUTIME_SYSTEM];
  349. sys += cpustat[CPUTIME_IRQ];
  350. sys += cpustat[CPUTIME_SOFTIRQ];
  351. cputime->stime += sys;
  352. cputime->sum_exec_runtime += user;
  353. cputime->sum_exec_runtime += sys;
  354. cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
  355. }
  356. }
  357. void cgroup_base_stat_cputime_show(struct seq_file *seq)
  358. {
  359. struct cgroup *cgrp = seq_css(seq)->cgroup;
  360. u64 usage, utime, stime;
  361. struct task_cputime cputime;
  362. if (cgroup_parent(cgrp)) {
  363. cgroup_rstat_flush_hold(cgrp);
  364. usage = cgrp->bstat.cputime.sum_exec_runtime;
  365. cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
  366. &utime, &stime);
  367. cgroup_rstat_flush_release();
  368. } else {
  369. root_cgroup_cputime(&cputime);
  370. usage = cputime.sum_exec_runtime;
  371. utime = cputime.utime;
  372. stime = cputime.stime;
  373. }
  374. do_div(usage, NSEC_PER_USEC);
  375. do_div(utime, NSEC_PER_USEC);
  376. do_div(stime, NSEC_PER_USEC);
  377. seq_printf(seq, "usage_usec %llu\n"
  378. "user_usec %llu\n"
  379. "system_usec %llu\n",
  380. usage, utime, stime);
  381. }