blk-cgroup.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _BLK_CGROUP_H
  3. #define _BLK_CGROUP_H
  4. /*
  5. * Common Block IO controller cgroup interface
  6. *
  7. * Based on ideas and code from CFQ, CFS and BFQ:
  8. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  9. *
  10. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  11. * Paolo Valente <paolo.valente@unimore.it>
  12. *
  13. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  14. * Nauman Rafique <nauman@google.com>
  15. */
  16. #include <linux/cgroup.h>
  17. #include <linux/percpu.h>
  18. #include <linux/percpu_counter.h>
  19. #include <linux/u64_stats_sync.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/radix-tree.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/atomic.h>
  24. #include <linux/kthread.h>
  25. #include <linux/fs.h>
  26. #ifndef __GENKSYMS__
  27. #include <linux/blk-mq.h>
  28. #endif
  29. /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
  30. #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
  31. /* Max limits for throttle policy */
  32. #define THROTL_IOPS_MAX UINT_MAX
  33. #ifdef CONFIG_BLK_CGROUP
  34. enum blkg_iostat_type {
  35. BLKG_IOSTAT_READ,
  36. BLKG_IOSTAT_WRITE,
  37. BLKG_IOSTAT_DISCARD,
  38. BLKG_IOSTAT_NR,
  39. };
  40. struct blkcg_gq;
  41. struct blkcg {
  42. struct cgroup_subsys_state css;
  43. spinlock_t lock;
  44. refcount_t online_pin;
  45. struct radix_tree_root blkg_tree;
  46. struct blkcg_gq __rcu *blkg_hint;
  47. struct hlist_head blkg_list;
  48. struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
  49. struct list_head all_blkcgs_node;
  50. #ifdef CONFIG_CGROUP_WRITEBACK
  51. struct list_head cgwb_list;
  52. #endif
  53. };
  54. struct blkg_iostat {
  55. u64 bytes[BLKG_IOSTAT_NR];
  56. u64 ios[BLKG_IOSTAT_NR];
  57. };
  58. struct blkg_iostat_set {
  59. struct u64_stats_sync sync;
  60. struct blkg_iostat cur;
  61. struct blkg_iostat last;
  62. };
  63. /*
  64. * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
  65. * request_queue (q). This is used by blkcg policies which need to track
  66. * information per blkcg - q pair.
  67. *
  68. * There can be multiple active blkcg policies and each blkg:policy pair is
  69. * represented by a blkg_policy_data which is allocated and freed by each
  70. * policy's pd_alloc/free_fn() methods. A policy can allocate private data
  71. * area by allocating larger data structure which embeds blkg_policy_data
  72. * at the beginning.
  73. */
  74. struct blkg_policy_data {
  75. /* the blkg and policy id this per-policy data belongs to */
  76. struct blkcg_gq *blkg;
  77. int plid;
  78. };
  79. /*
  80. * Policies that need to keep per-blkcg data which is independent from any
  81. * request_queue associated to it should implement cpd_alloc/free_fn()
  82. * methods. A policy can allocate private data area by allocating larger
  83. * data structure which embeds blkcg_policy_data at the beginning.
  84. * cpd_init() is invoked to let each policy handle per-blkcg data.
  85. */
  86. struct blkcg_policy_data {
  87. /* the blkcg and policy id this per-policy data belongs to */
  88. struct blkcg *blkcg;
  89. int plid;
  90. };
  91. /* association between a blk cgroup and a request queue */
  92. struct blkcg_gq {
  93. /* Pointer to the associated request_queue */
  94. struct request_queue *q;
  95. struct list_head q_node;
  96. struct hlist_node blkcg_node;
  97. struct blkcg *blkcg;
  98. /* all non-root blkcg_gq's are guaranteed to have access to parent */
  99. struct blkcg_gq *parent;
  100. /* reference count */
  101. struct percpu_ref refcnt;
  102. /* is this blkg online? protected by both blkcg and q locks */
  103. bool online;
  104. struct blkg_iostat_set __percpu *iostat_cpu;
  105. struct blkg_iostat_set iostat;
  106. struct blkg_policy_data *pd[BLKCG_MAX_POLS];
  107. spinlock_t async_bio_lock;
  108. struct bio_list async_bios;
  109. struct work_struct async_bio_work;
  110. atomic_t use_delay;
  111. atomic64_t delay_nsec;
  112. atomic64_t delay_start;
  113. u64 last_delay;
  114. int last_use;
  115. struct rcu_head rcu_head;
  116. };
  117. typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
  118. typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
  119. typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
  120. typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
  121. typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
  122. struct request_queue *q, struct blkcg *blkcg);
  123. typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
  124. typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
  125. typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
  126. typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
  127. typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
  128. typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
  129. size_t size);
  130. struct blkcg_policy {
  131. int plid;
  132. /* cgroup files for the policy */
  133. struct cftype *dfl_cftypes;
  134. struct cftype *legacy_cftypes;
  135. /* operations */
  136. blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
  137. blkcg_pol_init_cpd_fn *cpd_init_fn;
  138. blkcg_pol_free_cpd_fn *cpd_free_fn;
  139. blkcg_pol_bind_cpd_fn *cpd_bind_fn;
  140. blkcg_pol_alloc_pd_fn *pd_alloc_fn;
  141. blkcg_pol_init_pd_fn *pd_init_fn;
  142. blkcg_pol_online_pd_fn *pd_online_fn;
  143. blkcg_pol_offline_pd_fn *pd_offline_fn;
  144. blkcg_pol_free_pd_fn *pd_free_fn;
  145. blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
  146. blkcg_pol_stat_pd_fn *pd_stat_fn;
  147. };
  148. extern struct blkcg blkcg_root;
  149. extern struct cgroup_subsys_state * const blkcg_root_css;
  150. extern bool blkcg_debug_stats;
  151. struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
  152. struct request_queue *q, bool update_hint);
  153. int blkcg_init_queue(struct request_queue *q);
  154. void blkcg_exit_queue(struct request_queue *q);
  155. /* Blkio controller policy registration */
  156. int blkcg_policy_register(struct blkcg_policy *pol);
  157. void blkcg_policy_unregister(struct blkcg_policy *pol);
  158. int blkcg_activate_policy(struct request_queue *q,
  159. const struct blkcg_policy *pol);
  160. void blkcg_deactivate_policy(struct request_queue *q,
  161. const struct blkcg_policy *pol);
  162. const char *blkg_dev_name(struct blkcg_gq *blkg);
  163. void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
  164. u64 (*prfill)(struct seq_file *,
  165. struct blkg_policy_data *, int),
  166. const struct blkcg_policy *pol, int data,
  167. bool show_total);
  168. u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
  169. struct blkg_conf_ctx {
  170. struct gendisk *disk;
  171. struct blkcg_gq *blkg;
  172. char *body;
  173. };
  174. struct gendisk *blkcg_conf_get_disk(char **inputp);
  175. int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
  176. char *input, struct blkg_conf_ctx *ctx);
  177. void blkg_conf_finish(struct blkg_conf_ctx *ctx);
  178. /**
  179. * blkcg_css - find the current css
  180. *
  181. * Find the css associated with either the kthread or the current task.
  182. * This may return a dying css, so it is up to the caller to use tryget logic
  183. * to confirm it is alive and well.
  184. */
  185. static inline struct cgroup_subsys_state *blkcg_css(void)
  186. {
  187. struct cgroup_subsys_state *css;
  188. css = kthread_blkcg();
  189. if (css)
  190. return css;
  191. return task_css(current, io_cgrp_id);
  192. }
  193. static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
  194. {
  195. return css ? container_of(css, struct blkcg, css) : NULL;
  196. }
  197. /**
  198. * __bio_blkcg - internal, inconsistent version to get blkcg
  199. *
  200. * DO NOT USE.
  201. * This function is inconsistent and consequently is dangerous to use. The
  202. * first part of the function returns a blkcg where a reference is owned by the
  203. * bio. This means it does not need to be rcu protected as it cannot go away
  204. * with the bio owning a reference to it. However, the latter potentially gets
  205. * it from task_css(). This can race against task migration and the cgroup
  206. * dying. It is also semantically different as it must be called rcu protected
  207. * and is susceptible to failure when trying to get a reference to it.
  208. * Therefore, it is not ok to assume that *_get() will always succeed on the
  209. * blkcg returned here.
  210. */
  211. static inline struct blkcg *__bio_blkcg(struct bio *bio)
  212. {
  213. if (bio && bio->bi_blkg)
  214. return bio->bi_blkg->blkcg;
  215. return css_to_blkcg(blkcg_css());
  216. }
  217. /**
  218. * bio_blkcg - grab the blkcg associated with a bio
  219. * @bio: target bio
  220. *
  221. * This returns the blkcg associated with a bio, %NULL if not associated.
  222. * Callers are expected to either handle %NULL or know association has been
  223. * done prior to calling this.
  224. */
  225. static inline struct blkcg *bio_blkcg(struct bio *bio)
  226. {
  227. if (bio && bio->bi_blkg)
  228. return bio->bi_blkg->blkcg;
  229. return NULL;
  230. }
  231. static inline bool blk_cgroup_congested(void)
  232. {
  233. struct cgroup_subsys_state *css;
  234. bool ret = false;
  235. rcu_read_lock();
  236. css = kthread_blkcg();
  237. if (!css)
  238. css = task_css(current, io_cgrp_id);
  239. while (css) {
  240. if (atomic_read(&css->cgroup->congestion_count)) {
  241. ret = true;
  242. break;
  243. }
  244. css = css->parent;
  245. }
  246. rcu_read_unlock();
  247. return ret;
  248. }
  249. /**
  250. * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
  251. * @return: true if this bio needs to be submitted with the root blkg context.
  252. *
  253. * In order to avoid priority inversions we sometimes need to issue a bio as if
  254. * it were attached to the root blkg, and then backcharge to the actual owning
  255. * blkg. The idea is we do bio_blkcg() to look up the actual context for the
  256. * bio and attach the appropriate blkg to the bio. Then we call this helper and
  257. * if it is true run with the root blkg for that queue and then do any
  258. * backcharging to the originating cgroup once the io is complete.
  259. */
  260. static inline bool bio_issue_as_root_blkg(struct bio *bio)
  261. {
  262. return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
  263. }
  264. /**
  265. * blkcg_parent - get the parent of a blkcg
  266. * @blkcg: blkcg of interest
  267. *
  268. * Return the parent blkcg of @blkcg. Can be called anytime.
  269. */
  270. static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
  271. {
  272. return css_to_blkcg(blkcg->css.parent);
  273. }
  274. /**
  275. * __blkg_lookup - internal version of blkg_lookup()
  276. * @blkcg: blkcg of interest
  277. * @q: request_queue of interest
  278. * @update_hint: whether to update lookup hint with the result or not
  279. *
  280. * This is internal version and shouldn't be used by policy
  281. * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
  282. * @q's bypass state. If @update_hint is %true, the caller should be
  283. * holding @q->queue_lock and lookup hint is updated on success.
  284. */
  285. static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
  286. struct request_queue *q,
  287. bool update_hint)
  288. {
  289. struct blkcg_gq *blkg;
  290. if (blkcg == &blkcg_root)
  291. return q->root_blkg;
  292. blkg = rcu_dereference(blkcg->blkg_hint);
  293. if (blkg && blkg->q == q)
  294. return blkg;
  295. return blkg_lookup_slowpath(blkcg, q, update_hint);
  296. }
  297. /**
  298. * blkg_lookup - lookup blkg for the specified blkcg - q pair
  299. * @blkcg: blkcg of interest
  300. * @q: request_queue of interest
  301. *
  302. * Lookup blkg for the @blkcg - @q pair. This function should be called
  303. * under RCU read lock.
  304. */
  305. static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
  306. struct request_queue *q)
  307. {
  308. WARN_ON_ONCE(!rcu_read_lock_held());
  309. return __blkg_lookup(blkcg, q, false);
  310. }
  311. /**
  312. * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
  313. * @q: request_queue of interest
  314. *
  315. * Lookup blkg for @q at the root level. See also blkg_lookup().
  316. */
  317. static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
  318. {
  319. return q->root_blkg;
  320. }
  321. /**
  322. * blkg_to_pdata - get policy private data
  323. * @blkg: blkg of interest
  324. * @pol: policy of interest
  325. *
  326. * Return pointer to private data associated with the @blkg-@pol pair.
  327. */
  328. static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  329. struct blkcg_policy *pol)
  330. {
  331. return blkg ? blkg->pd[pol->plid] : NULL;
  332. }
  333. static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
  334. struct blkcg_policy *pol)
  335. {
  336. return blkcg ? blkcg->cpd[pol->plid] : NULL;
  337. }
  338. /**
  339. * pdata_to_blkg - get blkg associated with policy private data
  340. * @pd: policy private data of interest
  341. *
  342. * @pd is policy private data. Determine the blkg it's associated with.
  343. */
  344. static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
  345. {
  346. return pd ? pd->blkg : NULL;
  347. }
  348. static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
  349. {
  350. return cpd ? cpd->blkcg : NULL;
  351. }
  352. extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
  353. /**
  354. * blkcg_pin_online - pin online state
  355. * @blkcg: blkcg of interest
  356. *
  357. * While pinned, a blkcg is kept online. This is primarily used to
  358. * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
  359. * while an associated cgwb is still active.
  360. */
  361. static inline void blkcg_pin_online(struct blkcg *blkcg)
  362. {
  363. refcount_inc(&blkcg->online_pin);
  364. }
  365. /**
  366. * blkcg_unpin_online - unpin online state
  367. * @blkcg: blkcg of interest
  368. *
  369. * This is primarily used to impedance-match blkg and cgwb lifetimes so
  370. * that blkg doesn't go offline while an associated cgwb is still active.
  371. * When this count goes to zero, all active cgwbs have finished so the
  372. * blkcg can continue destruction by calling blkcg_destroy_blkgs().
  373. */
  374. static inline void blkcg_unpin_online(struct blkcg *blkcg)
  375. {
  376. do {
  377. if (!refcount_dec_and_test(&blkcg->online_pin))
  378. break;
  379. blkcg_destroy_blkgs(blkcg);
  380. blkcg = blkcg_parent(blkcg);
  381. } while (blkcg);
  382. }
  383. /**
  384. * blkg_path - format cgroup path of blkg
  385. * @blkg: blkg of interest
  386. * @buf: target buffer
  387. * @buflen: target buffer length
  388. *
  389. * Format the path of the cgroup of @blkg into @buf.
  390. */
  391. static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
  392. {
  393. return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
  394. }
  395. /**
  396. * blkg_get - get a blkg reference
  397. * @blkg: blkg to get
  398. *
  399. * The caller should be holding an existing reference.
  400. */
  401. static inline void blkg_get(struct blkcg_gq *blkg)
  402. {
  403. percpu_ref_get(&blkg->refcnt);
  404. }
  405. /**
  406. * blkg_tryget - try and get a blkg reference
  407. * @blkg: blkg to get
  408. *
  409. * This is for use when doing an RCU lookup of the blkg. We may be in the midst
  410. * of freeing this blkg, so we can only use it if the refcnt is not zero.
  411. */
  412. static inline bool blkg_tryget(struct blkcg_gq *blkg)
  413. {
  414. return blkg && percpu_ref_tryget(&blkg->refcnt);
  415. }
  416. /**
  417. * blkg_put - put a blkg reference
  418. * @blkg: blkg to put
  419. */
  420. static inline void blkg_put(struct blkcg_gq *blkg)
  421. {
  422. percpu_ref_put(&blkg->refcnt);
  423. }
  424. /**
  425. * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
  426. * @d_blkg: loop cursor pointing to the current descendant
  427. * @pos_css: used for iteration
  428. * @p_blkg: target blkg to walk descendants of
  429. *
  430. * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
  431. * read locked. If called under either blkcg or queue lock, the iteration
  432. * is guaranteed to include all and only online blkgs. The caller may
  433. * update @pos_css by calling css_rightmost_descendant() to skip subtree.
  434. * @p_blkg is included in the iteration and the first node to be visited.
  435. */
  436. #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
  437. css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
  438. if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
  439. (p_blkg)->q, false)))
  440. /**
  441. * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
  442. * @d_blkg: loop cursor pointing to the current descendant
  443. * @pos_css: used for iteration
  444. * @p_blkg: target blkg to walk descendants of
  445. *
  446. * Similar to blkg_for_each_descendant_pre() but performs post-order
  447. * traversal instead. Synchronization rules are the same. @p_blkg is
  448. * included in the iteration and the last node to be visited.
  449. */
  450. #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
  451. css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
  452. if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
  453. (p_blkg)->q, false)))
  454. bool __blkcg_punt_bio_submit(struct bio *bio);
  455. static inline bool blkcg_punt_bio_submit(struct bio *bio)
  456. {
  457. if (bio->bi_opf & REQ_CGROUP_PUNT)
  458. return __blkcg_punt_bio_submit(bio);
  459. else
  460. return false;
  461. }
  462. static inline void blkcg_bio_issue_init(struct bio *bio)
  463. {
  464. bio_issue_init(&bio->bi_issue, bio_sectors(bio));
  465. }
  466. static inline void blkcg_use_delay(struct blkcg_gq *blkg)
  467. {
  468. if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
  469. return;
  470. if (atomic_add_return(1, &blkg->use_delay) == 1)
  471. atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
  472. }
  473. static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
  474. {
  475. int old = atomic_read(&blkg->use_delay);
  476. if (WARN_ON_ONCE(old < 0))
  477. return 0;
  478. if (old == 0)
  479. return 0;
  480. /*
  481. * We do this song and dance because we can race with somebody else
  482. * adding or removing delay. If we just did an atomic_dec we'd end up
  483. * negative and we'd already be in trouble. We need to subtract 1 and
  484. * then check to see if we were the last delay so we can drop the
  485. * congestion count on the cgroup.
  486. */
  487. while (old) {
  488. int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
  489. if (cur == old)
  490. break;
  491. old = cur;
  492. }
  493. if (old == 0)
  494. return 0;
  495. if (old == 1)
  496. atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
  497. return 1;
  498. }
  499. /**
  500. * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
  501. * @blkg: target blkg
  502. * @delay: delay duration in nsecs
  503. *
  504. * When enabled with this function, the delay is not decayed and must be
  505. * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
  506. * blkcg_[un]use_delay() and blkcg_add_delay() usages.
  507. */
  508. static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
  509. {
  510. int old = atomic_read(&blkg->use_delay);
  511. /* We only want 1 person setting the congestion count for this blkg. */
  512. if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
  513. atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
  514. atomic64_set(&blkg->delay_nsec, delay);
  515. }
  516. /**
  517. * blkcg_clear_delay - Disable allocator delay mechanism
  518. * @blkg: target blkg
  519. *
  520. * Disable use_delay mechanism. See blkcg_set_delay().
  521. */
  522. static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
  523. {
  524. int old = atomic_read(&blkg->use_delay);
  525. /* We only want 1 person clearing the congestion count for this blkg. */
  526. if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
  527. atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
  528. }
  529. /**
  530. * blk_cgroup_mergeable - Determine whether to allow or disallow merges
  531. * @rq: request to merge into
  532. * @bio: bio to merge
  533. *
  534. * @bio and @rq should belong to the same cgroup and their issue_as_root should
  535. * match. The latter is necessary as we don't want to throttle e.g. a metadata
  536. * update because it happens to be next to a regular IO.
  537. */
  538. static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
  539. {
  540. return rq->bio->bi_blkg == bio->bi_blkg &&
  541. bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
  542. }
  543. void blk_cgroup_bio_start(struct bio *bio);
  544. void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
  545. void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
  546. void blkcg_maybe_throttle_current(void);
  547. #else /* CONFIG_BLK_CGROUP */
  548. struct blkcg {
  549. };
  550. struct blkg_policy_data {
  551. };
  552. struct blkcg_policy_data {
  553. };
  554. struct blkcg_gq {
  555. };
  556. struct blkcg_policy {
  557. };
  558. #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
  559. static inline void blkcg_maybe_throttle_current(void) { }
  560. static inline bool blk_cgroup_congested(void) { return false; }
  561. #ifdef CONFIG_BLOCK
  562. static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
  563. static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
  564. static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
  565. { return NULL; }
  566. static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
  567. static inline void blkcg_exit_queue(struct request_queue *q) { }
  568. static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
  569. static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
  570. static inline int blkcg_activate_policy(struct request_queue *q,
  571. const struct blkcg_policy *pol) { return 0; }
  572. static inline void blkcg_deactivate_policy(struct request_queue *q,
  573. const struct blkcg_policy *pol) { }
  574. static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
  575. static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
  576. static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
  577. struct blkcg_policy *pol) { return NULL; }
  578. static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
  579. static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
  580. static inline void blkg_get(struct blkcg_gq *blkg) { }
  581. static inline void blkg_put(struct blkcg_gq *blkg) { }
  582. static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
  583. static inline void blkcg_bio_issue_init(struct bio *bio) { }
  584. static inline void blk_cgroup_bio_start(struct bio *bio) { }
  585. static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
  586. #define blk_queue_for_each_rl(rl, q) \
  587. for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
  588. #endif /* CONFIG_BLOCK */
  589. #endif /* CONFIG_BLK_CGROUP */
  590. #endif /* _BLK_CGROUP_H */