blk-iolatency.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Block rq-qos base io controller
  4. *
  5. * This works similar to wbt with a few exceptions
  6. *
  7. * - It's bio based, so the latency covers the whole block layer in addition to
  8. * the actual io.
  9. * - We will throttle all IO that comes in here if we need to.
  10. * - We use the mean latency over the 100ms window. This is because writes can
  11. * be particularly fast, which could give us a false sense of the impact of
  12. * other workloads on our protected workload.
  13. * - By default there's no throttling, we set the queue_depth to UINT_MAX so
  14. * that we can have as many outstanding bio's as we're allowed to. Only at
  15. * throttle time do we pay attention to the actual queue depth.
  16. *
  17. * The hierarchy works like the cpu controller does, we track the latency at
  18. * every configured node, and each configured node has it's own independent
  19. * queue depth. This means that we only care about our latency targets at the
  20. * peer level. Some group at the bottom of the hierarchy isn't going to affect
  21. * a group at the end of some other path if we're only configred at leaf level.
  22. *
  23. * Consider the following
  24. *
  25. * root blkg
  26. * / \
  27. * fast (target=5ms) slow (target=10ms)
  28. * / \ / \
  29. * a b normal(15ms) unloved
  30. *
  31. * "a" and "b" have no target, but their combined io under "fast" cannot exceed
  32. * an average latency of 5ms. If it does then we will throttle the "slow"
  33. * group. In the case of "normal", if it exceeds its 15ms target, we will
  34. * throttle "unloved", but nobody else.
  35. *
  36. * In this example "fast", "slow", and "normal" will be the only groups actually
  37. * accounting their io latencies. We have to walk up the heirarchy to the root
  38. * on every submit and complete so we can do the appropriate stat recording and
  39. * adjust the queue depth of ourselves if needed.
  40. *
  41. * There are 2 ways we throttle IO.
  42. *
  43. * 1) Queue depth throttling. As we throttle down we will adjust the maximum
  44. * number of IO's we're allowed to have in flight. This starts at (u64)-1 down
  45. * to 1. If the group is only ever submitting IO for itself then this is the
  46. * only way we throttle.
  47. *
  48. * 2) Induced delay throttling. This is for the case that a group is generating
  49. * IO that has to be issued by the root cg to avoid priority inversion. So think
  50. * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot
  51. * of work done for us on behalf of the root cg and are being asked to scale
  52. * down more then we induce a latency at userspace return. We accumulate the
  53. * total amount of time we need to be punished by doing
  54. *
  55. * total_time += min_lat_nsec - actual_io_completion
  56. *
  57. * and then at throttle time will do
  58. *
  59. * throttle_time = min(total_time, NSEC_PER_SEC)
  60. *
  61. * This induced delay will throttle back the activity that is generating the
  62. * root cg issued io's, wethere that's some metadata intensive operation or the
  63. * group is using so much memory that it is pushing us into swap.
  64. *
  65. * Copyright (C) 2018 Josef Bacik
  66. */
  67. #include <linux/kernel.h>
  68. #include <linux/blk_types.h>
  69. #include <linux/backing-dev.h>
  70. #include <linux/module.h>
  71. #include <linux/timer.h>
  72. #include <linux/memcontrol.h>
  73. #include <linux/sched/loadavg.h>
  74. #include <linux/sched/signal.h>
  75. #include <trace/events/block.h>
  76. #include <linux/blk-mq.h>
  77. #include "blk-rq-qos.h"
  78. #include "blk-stat.h"
  79. #include "blk.h"
  80. #define DEFAULT_SCALE_COOKIE 1000000U
  81. static struct blkcg_policy blkcg_policy_iolatency;
  82. struct iolatency_grp;
  83. struct blk_iolatency {
  84. struct rq_qos rqos;
  85. struct timer_list timer;
  86. atomic_t enabled;
  87. };
  88. static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
  89. {
  90. return container_of(rqos, struct blk_iolatency, rqos);
  91. }
  92. static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
  93. {
  94. return atomic_read(&blkiolat->enabled) > 0;
  95. }
  96. struct child_latency_info {
  97. spinlock_t lock;
  98. /* Last time we adjusted the scale of everybody. */
  99. u64 last_scale_event;
  100. /* The latency that we missed. */
  101. u64 scale_lat;
  102. /* Total io's from all of our children for the last summation. */
  103. u64 nr_samples;
  104. /* The guy who actually changed the latency numbers. */
  105. struct iolatency_grp *scale_grp;
  106. /* Cookie to tell if we need to scale up or down. */
  107. atomic_t scale_cookie;
  108. };
  109. struct percentile_stats {
  110. u64 total;
  111. u64 missed;
  112. };
  113. struct latency_stat {
  114. union {
  115. struct percentile_stats ps;
  116. struct blk_rq_stat rqs;
  117. };
  118. };
  119. struct iolatency_grp {
  120. struct blkg_policy_data pd;
  121. struct latency_stat __percpu *stats;
  122. struct latency_stat cur_stat;
  123. struct blk_iolatency *blkiolat;
  124. struct rq_depth rq_depth;
  125. struct rq_wait rq_wait;
  126. atomic64_t window_start;
  127. atomic_t scale_cookie;
  128. u64 min_lat_nsec;
  129. u64 cur_win_nsec;
  130. /* total running average of our io latency. */
  131. u64 lat_avg;
  132. /* Our current number of IO's for the last summation. */
  133. u64 nr_samples;
  134. bool ssd;
  135. struct child_latency_info child_lat;
  136. };
  137. #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
  138. #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
  139. /*
  140. * These are the constants used to fake the fixed-point moving average
  141. * calculation just like load average. The call to calc_load() folds
  142. * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling
  143. * window size is bucketed to try to approximately calculate average
  144. * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
  145. * elapse immediately. Note, windows only elapse with IO activity. Idle
  146. * periods extend the most recent window.
  147. */
  148. #define BLKIOLATENCY_NR_EXP_FACTORS 5
  149. #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
  150. (BLKIOLATENCY_NR_EXP_FACTORS - 1))
  151. static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
  152. 2045, // exp(1/600) - 600 samples
  153. 2039, // exp(1/240) - 240 samples
  154. 2031, // exp(1/120) - 120 samples
  155. 2023, // exp(1/80) - 80 samples
  156. 2014, // exp(1/60) - 60 samples
  157. };
  158. static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
  159. {
  160. return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
  161. }
  162. static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
  163. {
  164. return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
  165. }
  166. static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
  167. {
  168. return pd_to_blkg(&iolat->pd);
  169. }
  170. static inline void latency_stat_init(struct iolatency_grp *iolat,
  171. struct latency_stat *stat)
  172. {
  173. if (iolat->ssd) {
  174. stat->ps.total = 0;
  175. stat->ps.missed = 0;
  176. } else
  177. blk_rq_stat_init(&stat->rqs);
  178. }
  179. static inline void latency_stat_sum(struct iolatency_grp *iolat,
  180. struct latency_stat *sum,
  181. struct latency_stat *stat)
  182. {
  183. if (iolat->ssd) {
  184. sum->ps.total += stat->ps.total;
  185. sum->ps.missed += stat->ps.missed;
  186. } else
  187. blk_rq_stat_sum(&sum->rqs, &stat->rqs);
  188. }
  189. static inline void latency_stat_record_time(struct iolatency_grp *iolat,
  190. u64 req_time)
  191. {
  192. struct latency_stat *stat = get_cpu_ptr(iolat->stats);
  193. if (iolat->ssd) {
  194. if (req_time >= iolat->min_lat_nsec)
  195. stat->ps.missed++;
  196. stat->ps.total++;
  197. } else
  198. blk_rq_stat_add(&stat->rqs, req_time);
  199. put_cpu_ptr(stat);
  200. }
  201. static inline bool latency_sum_ok(struct iolatency_grp *iolat,
  202. struct latency_stat *stat)
  203. {
  204. if (iolat->ssd) {
  205. u64 thresh = div64_u64(stat->ps.total, 10);
  206. thresh = max(thresh, 1ULL);
  207. return stat->ps.missed < thresh;
  208. }
  209. return stat->rqs.mean <= iolat->min_lat_nsec;
  210. }
  211. static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
  212. struct latency_stat *stat)
  213. {
  214. if (iolat->ssd)
  215. return stat->ps.total;
  216. return stat->rqs.nr_samples;
  217. }
  218. static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
  219. struct latency_stat *stat)
  220. {
  221. int exp_idx;
  222. if (iolat->ssd)
  223. return;
  224. /*
  225. * calc_load() takes in a number stored in fixed point representation.
  226. * Because we are using this for IO time in ns, the values stored
  227. * are significantly larger than the FIXED_1 denominator (2048).
  228. * Therefore, rounding errors in the calculation are negligible and
  229. * can be ignored.
  230. */
  231. exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
  232. div64_u64(iolat->cur_win_nsec,
  233. BLKIOLATENCY_EXP_BUCKET_SIZE));
  234. iolat->lat_avg = calc_load(iolat->lat_avg,
  235. iolatency_exp_factors[exp_idx],
  236. stat->rqs.mean);
  237. }
  238. static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
  239. {
  240. atomic_dec(&rqw->inflight);
  241. wake_up(&rqw->wait);
  242. }
  243. static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
  244. {
  245. struct iolatency_grp *iolat = private_data;
  246. return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
  247. }
  248. static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
  249. struct iolatency_grp *iolat,
  250. bool issue_as_root,
  251. bool use_memdelay)
  252. {
  253. struct rq_wait *rqw = &iolat->rq_wait;
  254. unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
  255. if (use_delay)
  256. blkcg_schedule_throttle(rqos->q, use_memdelay);
  257. /*
  258. * To avoid priority inversions we want to just take a slot if we are
  259. * issuing as root. If we're being killed off there's no point in
  260. * delaying things, we may have been killed by OOM so throttling may
  261. * make recovery take even longer, so just let the IO's through so the
  262. * task can go away.
  263. */
  264. if (issue_as_root || fatal_signal_pending(current)) {
  265. atomic_inc(&rqw->inflight);
  266. return;
  267. }
  268. rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
  269. }
  270. #define SCALE_DOWN_FACTOR 2
  271. #define SCALE_UP_FACTOR 4
  272. static inline unsigned long scale_amount(unsigned long qd, bool up)
  273. {
  274. return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
  275. }
  276. /*
  277. * We scale the qd down faster than we scale up, so we need to use this helper
  278. * to adjust the scale_cookie accordingly so we don't prematurely get
  279. * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
  280. *
  281. * Each group has their own local copy of the last scale cookie they saw, so if
  282. * the global scale cookie goes up or down they know which way they need to go
  283. * based on their last knowledge of it.
  284. */
  285. static void scale_cookie_change(struct blk_iolatency *blkiolat,
  286. struct child_latency_info *lat_info,
  287. bool up)
  288. {
  289. unsigned long qd = blkiolat->rqos.q->nr_requests;
  290. unsigned long scale = scale_amount(qd, up);
  291. unsigned long old = atomic_read(&lat_info->scale_cookie);
  292. unsigned long max_scale = qd << 1;
  293. unsigned long diff = 0;
  294. if (old < DEFAULT_SCALE_COOKIE)
  295. diff = DEFAULT_SCALE_COOKIE - old;
  296. if (up) {
  297. if (scale + old > DEFAULT_SCALE_COOKIE)
  298. atomic_set(&lat_info->scale_cookie,
  299. DEFAULT_SCALE_COOKIE);
  300. else if (diff > qd)
  301. atomic_inc(&lat_info->scale_cookie);
  302. else
  303. atomic_add(scale, &lat_info->scale_cookie);
  304. } else {
  305. /*
  306. * We don't want to dig a hole so deep that it takes us hours to
  307. * dig out of it. Just enough that we don't throttle/unthrottle
  308. * with jagged workloads but can still unthrottle once pressure
  309. * has sufficiently dissipated.
  310. */
  311. if (diff > qd) {
  312. if (diff < max_scale)
  313. atomic_dec(&lat_info->scale_cookie);
  314. } else {
  315. atomic_sub(scale, &lat_info->scale_cookie);
  316. }
  317. }
  318. }
  319. /*
  320. * Change the queue depth of the iolatency_grp. We add/subtract 1/16th of the
  321. * queue depth at a time so we don't get wild swings and hopefully dial in to
  322. * fairer distribution of the overall queue depth.
  323. */
  324. static void scale_change(struct iolatency_grp *iolat, bool up)
  325. {
  326. unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
  327. unsigned long scale = scale_amount(qd, up);
  328. unsigned long old = iolat->rq_depth.max_depth;
  329. if (old > qd)
  330. old = qd;
  331. if (up) {
  332. if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
  333. return;
  334. if (old < qd) {
  335. old += scale;
  336. old = min(old, qd);
  337. iolat->rq_depth.max_depth = old;
  338. wake_up_all(&iolat->rq_wait.wait);
  339. }
  340. } else {
  341. old >>= 1;
  342. iolat->rq_depth.max_depth = max(old, 1UL);
  343. }
  344. }
  345. /* Check our parent and see if the scale cookie has changed. */
  346. static void check_scale_change(struct iolatency_grp *iolat)
  347. {
  348. struct iolatency_grp *parent;
  349. struct child_latency_info *lat_info;
  350. unsigned int cur_cookie;
  351. unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
  352. u64 scale_lat;
  353. unsigned int old;
  354. int direction = 0;
  355. if (lat_to_blkg(iolat)->parent == NULL)
  356. return;
  357. parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
  358. if (!parent)
  359. return;
  360. lat_info = &parent->child_lat;
  361. cur_cookie = atomic_read(&lat_info->scale_cookie);
  362. scale_lat = READ_ONCE(lat_info->scale_lat);
  363. if (cur_cookie < our_cookie)
  364. direction = -1;
  365. else if (cur_cookie > our_cookie)
  366. direction = 1;
  367. else
  368. return;
  369. old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
  370. /* Somebody beat us to the punch, just bail. */
  371. if (old != our_cookie)
  372. return;
  373. if (direction < 0 && iolat->min_lat_nsec) {
  374. u64 samples_thresh;
  375. if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
  376. return;
  377. /*
  378. * Sometimes high priority groups are their own worst enemy, so
  379. * instead of taking it out on some poor other group that did 5%
  380. * or less of the IO's for the last summation just skip this
  381. * scale down event.
  382. */
  383. samples_thresh = lat_info->nr_samples * 5;
  384. samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
  385. if (iolat->nr_samples <= samples_thresh)
  386. return;
  387. }
  388. /* We're as low as we can go. */
  389. if (iolat->rq_depth.max_depth == 1 && direction < 0) {
  390. blkcg_use_delay(lat_to_blkg(iolat));
  391. return;
  392. }
  393. /* We're back to the default cookie, unthrottle all the things. */
  394. if (cur_cookie == DEFAULT_SCALE_COOKIE) {
  395. blkcg_clear_delay(lat_to_blkg(iolat));
  396. iolat->rq_depth.max_depth = UINT_MAX;
  397. wake_up_all(&iolat->rq_wait.wait);
  398. return;
  399. }
  400. scale_change(iolat, direction > 0);
  401. }
  402. static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
  403. {
  404. struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
  405. struct blkcg_gq *blkg = bio->bi_blkg;
  406. bool issue_as_root = bio_issue_as_root_blkg(bio);
  407. if (!blk_iolatency_enabled(blkiolat))
  408. return;
  409. while (blkg && blkg->parent) {
  410. struct iolatency_grp *iolat = blkg_to_lat(blkg);
  411. if (!iolat) {
  412. blkg = blkg->parent;
  413. continue;
  414. }
  415. check_scale_change(iolat);
  416. __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
  417. (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
  418. blkg = blkg->parent;
  419. }
  420. if (!timer_pending(&blkiolat->timer))
  421. mod_timer(&blkiolat->timer, jiffies + HZ);
  422. }
  423. static void iolatency_record_time(struct iolatency_grp *iolat,
  424. struct bio_issue *issue, u64 now,
  425. bool issue_as_root)
  426. {
  427. u64 start = bio_issue_time(issue);
  428. u64 req_time;
  429. /*
  430. * Have to do this so we are truncated to the correct time that our
  431. * issue is truncated to.
  432. */
  433. now = __bio_issue_time(now);
  434. if (now <= start)
  435. return;
  436. req_time = now - start;
  437. /*
  438. * We don't want to count issue_as_root bio's in the cgroups latency
  439. * statistics as it could skew the numbers downwards.
  440. */
  441. if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
  442. u64 sub = iolat->min_lat_nsec;
  443. if (req_time < sub)
  444. blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
  445. return;
  446. }
  447. latency_stat_record_time(iolat, req_time);
  448. }
  449. #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
  450. #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
  451. static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
  452. {
  453. struct blkcg_gq *blkg = lat_to_blkg(iolat);
  454. struct iolatency_grp *parent;
  455. struct child_latency_info *lat_info;
  456. struct latency_stat stat;
  457. unsigned long flags;
  458. int cpu;
  459. latency_stat_init(iolat, &stat);
  460. preempt_disable();
  461. for_each_online_cpu(cpu) {
  462. struct latency_stat *s;
  463. s = per_cpu_ptr(iolat->stats, cpu);
  464. latency_stat_sum(iolat, &stat, s);
  465. latency_stat_init(iolat, s);
  466. }
  467. preempt_enable();
  468. parent = blkg_to_lat(blkg->parent);
  469. if (!parent)
  470. return;
  471. lat_info = &parent->child_lat;
  472. iolat_update_total_lat_avg(iolat, &stat);
  473. /* Everything is ok and we don't need to adjust the scale. */
  474. if (latency_sum_ok(iolat, &stat) &&
  475. atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
  476. return;
  477. /* Somebody beat us to the punch, just bail. */
  478. spin_lock_irqsave(&lat_info->lock, flags);
  479. latency_stat_sum(iolat, &iolat->cur_stat, &stat);
  480. lat_info->nr_samples -= iolat->nr_samples;
  481. lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
  482. iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
  483. if ((lat_info->last_scale_event >= now ||
  484. now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
  485. goto out;
  486. if (latency_sum_ok(iolat, &iolat->cur_stat) &&
  487. latency_sum_ok(iolat, &stat)) {
  488. if (latency_stat_samples(iolat, &iolat->cur_stat) <
  489. BLKIOLATENCY_MIN_GOOD_SAMPLES)
  490. goto out;
  491. if (lat_info->scale_grp == iolat) {
  492. lat_info->last_scale_event = now;
  493. scale_cookie_change(iolat->blkiolat, lat_info, true);
  494. }
  495. } else if (lat_info->scale_lat == 0 ||
  496. lat_info->scale_lat >= iolat->min_lat_nsec) {
  497. lat_info->last_scale_event = now;
  498. if (!lat_info->scale_grp ||
  499. lat_info->scale_lat > iolat->min_lat_nsec) {
  500. WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
  501. lat_info->scale_grp = iolat;
  502. }
  503. scale_cookie_change(iolat->blkiolat, lat_info, false);
  504. }
  505. latency_stat_init(iolat, &iolat->cur_stat);
  506. out:
  507. spin_unlock_irqrestore(&lat_info->lock, flags);
  508. }
  509. static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
  510. {
  511. struct blkcg_gq *blkg;
  512. struct rq_wait *rqw;
  513. struct iolatency_grp *iolat;
  514. u64 window_start;
  515. u64 now;
  516. bool issue_as_root = bio_issue_as_root_blkg(bio);
  517. bool enabled = false;
  518. int inflight = 0;
  519. blkg = bio->bi_blkg;
  520. if (!blkg || !bio_flagged(bio, BIO_TRACKED))
  521. return;
  522. iolat = blkg_to_lat(bio->bi_blkg);
  523. if (!iolat)
  524. return;
  525. enabled = blk_iolatency_enabled(iolat->blkiolat);
  526. if (!enabled)
  527. return;
  528. now = ktime_to_ns(ktime_get());
  529. while (blkg && blkg->parent) {
  530. iolat = blkg_to_lat(blkg);
  531. if (!iolat) {
  532. blkg = blkg->parent;
  533. continue;
  534. }
  535. rqw = &iolat->rq_wait;
  536. inflight = atomic_dec_return(&rqw->inflight);
  537. WARN_ON_ONCE(inflight < 0);
  538. /*
  539. * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
  540. * submitted, so do not account for it.
  541. */
  542. if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
  543. iolatency_record_time(iolat, &bio->bi_issue, now,
  544. issue_as_root);
  545. window_start = atomic64_read(&iolat->window_start);
  546. if (now > window_start &&
  547. (now - window_start) >= iolat->cur_win_nsec) {
  548. if (atomic64_cmpxchg(&iolat->window_start,
  549. window_start, now) == window_start)
  550. iolatency_check_latencies(iolat, now);
  551. }
  552. }
  553. wake_up(&rqw->wait);
  554. blkg = blkg->parent;
  555. }
  556. }
  557. static void blkcg_iolatency_exit(struct rq_qos *rqos)
  558. {
  559. struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
  560. del_timer_sync(&blkiolat->timer);
  561. blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
  562. kfree(blkiolat);
  563. }
  564. static struct rq_qos_ops blkcg_iolatency_ops = {
  565. .throttle = blkcg_iolatency_throttle,
  566. .done_bio = blkcg_iolatency_done_bio,
  567. .exit = blkcg_iolatency_exit,
  568. };
  569. static void blkiolatency_timer_fn(struct timer_list *t)
  570. {
  571. struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
  572. struct blkcg_gq *blkg;
  573. struct cgroup_subsys_state *pos_css;
  574. u64 now = ktime_to_ns(ktime_get());
  575. rcu_read_lock();
  576. blkg_for_each_descendant_pre(blkg, pos_css,
  577. blkiolat->rqos.q->root_blkg) {
  578. struct iolatency_grp *iolat;
  579. struct child_latency_info *lat_info;
  580. unsigned long flags;
  581. u64 cookie;
  582. /*
  583. * We could be exiting, don't access the pd unless we have a
  584. * ref on the blkg.
  585. */
  586. if (!blkg_tryget(blkg))
  587. continue;
  588. iolat = blkg_to_lat(blkg);
  589. if (!iolat)
  590. goto next;
  591. lat_info = &iolat->child_lat;
  592. cookie = atomic_read(&lat_info->scale_cookie);
  593. if (cookie >= DEFAULT_SCALE_COOKIE)
  594. goto next;
  595. spin_lock_irqsave(&lat_info->lock, flags);
  596. if (lat_info->last_scale_event >= now)
  597. goto next_lock;
  598. /*
  599. * We scaled down but don't have a scale_grp, scale up and carry
  600. * on.
  601. */
  602. if (lat_info->scale_grp == NULL) {
  603. scale_cookie_change(iolat->blkiolat, lat_info, true);
  604. goto next_lock;
  605. }
  606. /*
  607. * It's been 5 seconds since our last scale event, clear the
  608. * scale grp in case the group that needed the scale down isn't
  609. * doing any IO currently.
  610. */
  611. if (now - lat_info->last_scale_event >=
  612. ((u64)NSEC_PER_SEC * 5))
  613. lat_info->scale_grp = NULL;
  614. next_lock:
  615. spin_unlock_irqrestore(&lat_info->lock, flags);
  616. next:
  617. blkg_put(blkg);
  618. }
  619. rcu_read_unlock();
  620. }
  621. int blk_iolatency_init(struct request_queue *q)
  622. {
  623. struct blk_iolatency *blkiolat;
  624. struct rq_qos *rqos;
  625. int ret;
  626. blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
  627. if (!blkiolat)
  628. return -ENOMEM;
  629. rqos = &blkiolat->rqos;
  630. rqos->id = RQ_QOS_LATENCY;
  631. rqos->ops = &blkcg_iolatency_ops;
  632. rqos->q = q;
  633. rq_qos_add(q, rqos);
  634. ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
  635. if (ret) {
  636. rq_qos_del(q, rqos);
  637. kfree(blkiolat);
  638. return ret;
  639. }
  640. timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
  641. return 0;
  642. }
  643. /*
  644. * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
  645. * return 0.
  646. */
  647. static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
  648. {
  649. struct iolatency_grp *iolat = blkg_to_lat(blkg);
  650. u64 oldval = iolat->min_lat_nsec;
  651. iolat->min_lat_nsec = val;
  652. iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
  653. iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
  654. BLKIOLATENCY_MAX_WIN_SIZE);
  655. if (!oldval && val)
  656. return 1;
  657. if (oldval && !val) {
  658. blkcg_clear_delay(blkg);
  659. return -1;
  660. }
  661. return 0;
  662. }
  663. static void iolatency_clear_scaling(struct blkcg_gq *blkg)
  664. {
  665. if (blkg->parent) {
  666. struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
  667. struct child_latency_info *lat_info;
  668. if (!iolat)
  669. return;
  670. lat_info = &iolat->child_lat;
  671. spin_lock(&lat_info->lock);
  672. atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
  673. lat_info->last_scale_event = 0;
  674. lat_info->scale_grp = NULL;
  675. lat_info->scale_lat = 0;
  676. spin_unlock(&lat_info->lock);
  677. }
  678. }
  679. static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
  680. size_t nbytes, loff_t off)
  681. {
  682. struct blkcg *blkcg = css_to_blkcg(of_css(of));
  683. struct blkcg_gq *blkg;
  684. struct blkg_conf_ctx ctx;
  685. struct iolatency_grp *iolat;
  686. char *p, *tok;
  687. u64 lat_val = 0;
  688. u64 oldval;
  689. int ret;
  690. int enable = 0;
  691. ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
  692. if (ret)
  693. return ret;
  694. iolat = blkg_to_lat(ctx.blkg);
  695. p = ctx.body;
  696. ret = -EINVAL;
  697. while ((tok = strsep(&p, " "))) {
  698. char key[16];
  699. char val[21]; /* 18446744073709551616 */
  700. if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
  701. goto out;
  702. if (!strcmp(key, "target")) {
  703. u64 v;
  704. if (!strcmp(val, "max"))
  705. lat_val = 0;
  706. else if (sscanf(val, "%llu", &v) == 1)
  707. lat_val = v * NSEC_PER_USEC;
  708. else
  709. goto out;
  710. } else {
  711. goto out;
  712. }
  713. }
  714. /* Walk up the tree to see if our new val is lower than it should be. */
  715. blkg = ctx.blkg;
  716. oldval = iolat->min_lat_nsec;
  717. enable = iolatency_set_min_lat_nsec(blkg, lat_val);
  718. if (enable) {
  719. if (!blk_get_queue(blkg->q)) {
  720. ret = -ENODEV;
  721. goto out;
  722. }
  723. blkg_get(blkg);
  724. }
  725. if (oldval != iolat->min_lat_nsec) {
  726. iolatency_clear_scaling(blkg);
  727. }
  728. ret = 0;
  729. out:
  730. blkg_conf_finish(&ctx);
  731. if (ret == 0 && enable) {
  732. struct iolatency_grp *tmp = blkg_to_lat(blkg);
  733. struct blk_iolatency *blkiolat = tmp->blkiolat;
  734. blk_mq_freeze_queue(blkg->q);
  735. if (enable == 1)
  736. atomic_inc(&blkiolat->enabled);
  737. else if (enable == -1)
  738. atomic_dec(&blkiolat->enabled);
  739. else
  740. WARN_ON_ONCE(1);
  741. blk_mq_unfreeze_queue(blkg->q);
  742. blkg_put(blkg);
  743. blk_put_queue(blkg->q);
  744. }
  745. return ret ?: nbytes;
  746. }
  747. static u64 iolatency_prfill_limit(struct seq_file *sf,
  748. struct blkg_policy_data *pd, int off)
  749. {
  750. struct iolatency_grp *iolat = pd_to_lat(pd);
  751. const char *dname = blkg_dev_name(pd->blkg);
  752. if (!dname || !iolat->min_lat_nsec)
  753. return 0;
  754. seq_printf(sf, "%s target=%llu\n",
  755. dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
  756. return 0;
  757. }
  758. static int iolatency_print_limit(struct seq_file *sf, void *v)
  759. {
  760. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  761. iolatency_prfill_limit,
  762. &blkcg_policy_iolatency, seq_cft(sf)->private, false);
  763. return 0;
  764. }
  765. static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
  766. size_t size)
  767. {
  768. struct latency_stat stat;
  769. int cpu;
  770. latency_stat_init(iolat, &stat);
  771. preempt_disable();
  772. for_each_online_cpu(cpu) {
  773. struct latency_stat *s;
  774. s = per_cpu_ptr(iolat->stats, cpu);
  775. latency_stat_sum(iolat, &stat, s);
  776. }
  777. preempt_enable();
  778. if (iolat->rq_depth.max_depth == UINT_MAX)
  779. return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
  780. (unsigned long long)stat.ps.missed,
  781. (unsigned long long)stat.ps.total);
  782. return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
  783. (unsigned long long)stat.ps.missed,
  784. (unsigned long long)stat.ps.total,
  785. iolat->rq_depth.max_depth);
  786. }
  787. static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
  788. size_t size)
  789. {
  790. struct iolatency_grp *iolat = pd_to_lat(pd);
  791. unsigned long long avg_lat;
  792. unsigned long long cur_win;
  793. if (!blkcg_debug_stats)
  794. return 0;
  795. if (iolat->ssd)
  796. return iolatency_ssd_stat(iolat, buf, size);
  797. avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
  798. cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
  799. if (iolat->rq_depth.max_depth == UINT_MAX)
  800. return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
  801. avg_lat, cur_win);
  802. return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
  803. iolat->rq_depth.max_depth, avg_lat, cur_win);
  804. }
  805. static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
  806. struct request_queue *q,
  807. struct blkcg *blkcg)
  808. {
  809. struct iolatency_grp *iolat;
  810. iolat = kzalloc_node(sizeof(*iolat), gfp, q->node);
  811. if (!iolat)
  812. return NULL;
  813. iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
  814. __alignof__(struct latency_stat), gfp);
  815. if (!iolat->stats) {
  816. kfree(iolat);
  817. return NULL;
  818. }
  819. return &iolat->pd;
  820. }
  821. static void iolatency_pd_init(struct blkg_policy_data *pd)
  822. {
  823. struct iolatency_grp *iolat = pd_to_lat(pd);
  824. struct blkcg_gq *blkg = lat_to_blkg(iolat);
  825. struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
  826. struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
  827. u64 now = ktime_to_ns(ktime_get());
  828. int cpu;
  829. if (blk_queue_nonrot(blkg->q))
  830. iolat->ssd = true;
  831. else
  832. iolat->ssd = false;
  833. for_each_possible_cpu(cpu) {
  834. struct latency_stat *stat;
  835. stat = per_cpu_ptr(iolat->stats, cpu);
  836. latency_stat_init(iolat, stat);
  837. }
  838. latency_stat_init(iolat, &iolat->cur_stat);
  839. rq_wait_init(&iolat->rq_wait);
  840. spin_lock_init(&iolat->child_lat.lock);
  841. iolat->rq_depth.queue_depth = blkg->q->nr_requests;
  842. iolat->rq_depth.max_depth = UINT_MAX;
  843. iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
  844. iolat->blkiolat = blkiolat;
  845. iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
  846. atomic64_set(&iolat->window_start, now);
  847. /*
  848. * We init things in list order, so the pd for the parent may not be
  849. * init'ed yet for whatever reason.
  850. */
  851. if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
  852. struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
  853. atomic_set(&iolat->scale_cookie,
  854. atomic_read(&parent->child_lat.scale_cookie));
  855. } else {
  856. atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
  857. }
  858. atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
  859. }
  860. static void iolatency_pd_offline(struct blkg_policy_data *pd)
  861. {
  862. struct iolatency_grp *iolat = pd_to_lat(pd);
  863. struct blkcg_gq *blkg = lat_to_blkg(iolat);
  864. struct blk_iolatency *blkiolat = iolat->blkiolat;
  865. int ret;
  866. ret = iolatency_set_min_lat_nsec(blkg, 0);
  867. if (ret == 1)
  868. atomic_inc(&blkiolat->enabled);
  869. if (ret == -1)
  870. atomic_dec(&blkiolat->enabled);
  871. iolatency_clear_scaling(blkg);
  872. }
  873. static void iolatency_pd_free(struct blkg_policy_data *pd)
  874. {
  875. struct iolatency_grp *iolat = pd_to_lat(pd);
  876. free_percpu(iolat->stats);
  877. kfree(iolat);
  878. }
  879. static struct cftype iolatency_files[] = {
  880. {
  881. .name = "latency",
  882. .flags = CFTYPE_NOT_ON_ROOT,
  883. .seq_show = iolatency_print_limit,
  884. .write = iolatency_set_limit,
  885. },
  886. {}
  887. };
  888. static struct blkcg_policy blkcg_policy_iolatency = {
  889. .dfl_cftypes = iolatency_files,
  890. .pd_alloc_fn = iolatency_pd_alloc,
  891. .pd_init_fn = iolatency_pd_init,
  892. .pd_offline_fn = iolatency_pd_offline,
  893. .pd_free_fn = iolatency_pd_free,
  894. .pd_stat_fn = iolatency_pd_stat,
  895. };
  896. static int __init iolatency_init(void)
  897. {
  898. return blkcg_policy_register(&blkcg_policy_iolatency);
  899. }
  900. static void __exit iolatency_exit(void)
  901. {
  902. blkcg_policy_unregister(&blkcg_policy_iolatency);
  903. }
  904. module_init(iolatency_init);
  905. module_exit(iolatency_exit);