blk-wbt.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * buffered writeback throttling. loosely based on CoDel. We can't drop
  4. * packets for IO scheduling, so the logic is something like this:
  5. *
  6. * - Monitor latencies in a defined window of time.
  7. * - If the minimum latency in the above window exceeds some target, increment
  8. * scaling step and scale down queue depth by a factor of 2x. The monitoring
  9. * window is then shrunk to 100 / sqrt(scaling step + 1).
  10. * - For any window where we don't have solid data on what the latencies
  11. * look like, retain status quo.
  12. * - If latencies look good, decrement scaling step.
  13. * - If we're only doing writes, allow the scaling step to go negative. This
  14. * will temporarily boost write performance, snapping back to a stable
  15. * scaling step of 0 if reads show up or the heavy writers finish. Unlike
  16. * positive scaling steps where we shrink the monitoring window, a negative
  17. * scaling step retains the default step==0 window size.
  18. *
  19. * Copyright (C) 2016 Jens Axboe
  20. *
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/blk_types.h>
  24. #include <linux/slab.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/swap.h>
  27. #include "blk-wbt.h"
  28. #include "blk-rq-qos.h"
  29. #define CREATE_TRACE_POINTS
  30. #include <trace/events/wbt.h>
  31. static inline void wbt_clear_state(struct request *rq)
  32. {
  33. rq->wbt_flags = 0;
  34. }
  35. static inline enum wbt_flags wbt_flags(struct request *rq)
  36. {
  37. return rq->wbt_flags;
  38. }
  39. static inline bool wbt_is_tracked(struct request *rq)
  40. {
  41. return rq->wbt_flags & WBT_TRACKED;
  42. }
  43. static inline bool wbt_is_read(struct request *rq)
  44. {
  45. return rq->wbt_flags & WBT_READ;
  46. }
  47. enum {
  48. /*
  49. * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
  50. * from here depending on device stats
  51. */
  52. RWB_DEF_DEPTH = 16,
  53. /*
  54. * 100msec window
  55. */
  56. RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
  57. /*
  58. * Disregard stats, if we don't meet this minimum
  59. */
  60. RWB_MIN_WRITE_SAMPLES = 3,
  61. /*
  62. * If we have this number of consecutive windows with not enough
  63. * information to scale up or down, scale up.
  64. */
  65. RWB_UNKNOWN_BUMP = 5,
  66. };
  67. static inline bool rwb_enabled(struct rq_wb *rwb)
  68. {
  69. return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
  70. rwb->wb_normal != 0;
  71. }
  72. static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
  73. {
  74. if (rwb_enabled(rwb)) {
  75. const unsigned long cur = jiffies;
  76. if (cur != *var)
  77. *var = cur;
  78. }
  79. }
  80. /*
  81. * If a task was rate throttled in balance_dirty_pages() within the last
  82. * second or so, use that to indicate a higher cleaning rate.
  83. */
  84. static bool wb_recent_wait(struct rq_wb *rwb)
  85. {
  86. struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
  87. return time_before(jiffies, wb->dirty_sleep + HZ);
  88. }
  89. static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
  90. enum wbt_flags wb_acct)
  91. {
  92. if (wb_acct & WBT_KSWAPD)
  93. return &rwb->rq_wait[WBT_RWQ_KSWAPD];
  94. else if (wb_acct & WBT_DISCARD)
  95. return &rwb->rq_wait[WBT_RWQ_DISCARD];
  96. return &rwb->rq_wait[WBT_RWQ_BG];
  97. }
  98. static void rwb_wake_all(struct rq_wb *rwb)
  99. {
  100. int i;
  101. for (i = 0; i < WBT_NUM_RWQ; i++) {
  102. struct rq_wait *rqw = &rwb->rq_wait[i];
  103. if (wq_has_sleeper(&rqw->wait))
  104. wake_up_all(&rqw->wait);
  105. }
  106. }
  107. static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
  108. enum wbt_flags wb_acct)
  109. {
  110. int inflight, limit;
  111. inflight = atomic_dec_return(&rqw->inflight);
  112. /*
  113. * wbt got disabled with IO in flight. Wake up any potential
  114. * waiters, we don't have to do more than that.
  115. */
  116. if (unlikely(!rwb_enabled(rwb))) {
  117. rwb_wake_all(rwb);
  118. return;
  119. }
  120. /*
  121. * For discards, our limit is always the background. For writes, if
  122. * the device does write back caching, drop further down before we
  123. * wake people up.
  124. */
  125. if (wb_acct & WBT_DISCARD)
  126. limit = rwb->wb_background;
  127. else if (rwb->wc && !wb_recent_wait(rwb))
  128. limit = 0;
  129. else
  130. limit = rwb->wb_normal;
  131. /*
  132. * Don't wake anyone up if we are above the normal limit.
  133. */
  134. if (inflight && inflight >= limit)
  135. return;
  136. if (wq_has_sleeper(&rqw->wait)) {
  137. int diff = limit - inflight;
  138. if (!inflight || diff >= rwb->wb_background / 2)
  139. wake_up_all(&rqw->wait);
  140. }
  141. }
  142. static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
  143. {
  144. struct rq_wb *rwb = RQWB(rqos);
  145. struct rq_wait *rqw;
  146. if (!(wb_acct & WBT_TRACKED))
  147. return;
  148. rqw = get_rq_wait(rwb, wb_acct);
  149. wbt_rqw_done(rwb, rqw, wb_acct);
  150. }
  151. /*
  152. * Called on completion of a request. Note that it's also called when
  153. * a request is merged, when the request gets freed.
  154. */
  155. static void wbt_done(struct rq_qos *rqos, struct request *rq)
  156. {
  157. struct rq_wb *rwb = RQWB(rqos);
  158. if (!wbt_is_tracked(rq)) {
  159. if (rwb->sync_cookie == rq) {
  160. rwb->sync_issue = 0;
  161. rwb->sync_cookie = NULL;
  162. }
  163. if (wbt_is_read(rq))
  164. wb_timestamp(rwb, &rwb->last_comp);
  165. } else {
  166. WARN_ON_ONCE(rq == rwb->sync_cookie);
  167. __wbt_done(rqos, wbt_flags(rq));
  168. }
  169. wbt_clear_state(rq);
  170. }
  171. static inline bool stat_sample_valid(struct blk_rq_stat *stat)
  172. {
  173. /*
  174. * We need at least one read sample, and a minimum of
  175. * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
  176. * that it's writes impacting us, and not just some sole read on
  177. * a device that is in a lower power state.
  178. */
  179. return (stat[READ].nr_samples >= 1 &&
  180. stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
  181. }
  182. static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
  183. {
  184. u64 now, issue = READ_ONCE(rwb->sync_issue);
  185. if (!issue || !rwb->sync_cookie)
  186. return 0;
  187. now = ktime_to_ns(ktime_get());
  188. return now - issue;
  189. }
  190. enum {
  191. LAT_OK = 1,
  192. LAT_UNKNOWN,
  193. LAT_UNKNOWN_WRITES,
  194. LAT_EXCEEDED,
  195. };
  196. static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
  197. {
  198. struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
  199. struct rq_depth *rqd = &rwb->rq_depth;
  200. u64 thislat;
  201. /*
  202. * If our stored sync issue exceeds the window size, or it
  203. * exceeds our min target AND we haven't logged any entries,
  204. * flag the latency as exceeded. wbt works off completion latencies,
  205. * but for a flooded device, a single sync IO can take a long time
  206. * to complete after being issued. If this time exceeds our
  207. * monitoring window AND we didn't see any other completions in that
  208. * window, then count that sync IO as a violation of the latency.
  209. */
  210. thislat = rwb_sync_issue_lat(rwb);
  211. if (thislat > rwb->cur_win_nsec ||
  212. (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
  213. trace_wbt_lat(bdi, thislat);
  214. return LAT_EXCEEDED;
  215. }
  216. /*
  217. * No read/write mix, if stat isn't valid
  218. */
  219. if (!stat_sample_valid(stat)) {
  220. /*
  221. * If we had writes in this stat window and the window is
  222. * current, we're only doing writes. If a task recently
  223. * waited or still has writes in flights, consider us doing
  224. * just writes as well.
  225. */
  226. if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
  227. wbt_inflight(rwb))
  228. return LAT_UNKNOWN_WRITES;
  229. return LAT_UNKNOWN;
  230. }
  231. /*
  232. * If the 'min' latency exceeds our target, step down.
  233. */
  234. if (stat[READ].min > rwb->min_lat_nsec) {
  235. trace_wbt_lat(bdi, stat[READ].min);
  236. trace_wbt_stat(bdi, stat);
  237. return LAT_EXCEEDED;
  238. }
  239. if (rqd->scale_step)
  240. trace_wbt_stat(bdi, stat);
  241. return LAT_OK;
  242. }
  243. static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
  244. {
  245. struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
  246. struct rq_depth *rqd = &rwb->rq_depth;
  247. trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
  248. rwb->wb_background, rwb->wb_normal, rqd->max_depth);
  249. }
  250. static void calc_wb_limits(struct rq_wb *rwb)
  251. {
  252. if (rwb->min_lat_nsec == 0) {
  253. rwb->wb_normal = rwb->wb_background = 0;
  254. } else if (rwb->rq_depth.max_depth <= 2) {
  255. rwb->wb_normal = rwb->rq_depth.max_depth;
  256. rwb->wb_background = 1;
  257. } else {
  258. rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
  259. rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
  260. }
  261. }
  262. static void scale_up(struct rq_wb *rwb)
  263. {
  264. if (!rq_depth_scale_up(&rwb->rq_depth))
  265. return;
  266. calc_wb_limits(rwb);
  267. rwb->unknown_cnt = 0;
  268. rwb_wake_all(rwb);
  269. rwb_trace_step(rwb, tracepoint_string("scale up"));
  270. }
  271. static void scale_down(struct rq_wb *rwb, bool hard_throttle)
  272. {
  273. if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
  274. return;
  275. calc_wb_limits(rwb);
  276. rwb->unknown_cnt = 0;
  277. rwb_trace_step(rwb, tracepoint_string("scale down"));
  278. }
  279. static void rwb_arm_timer(struct rq_wb *rwb)
  280. {
  281. struct rq_depth *rqd = &rwb->rq_depth;
  282. if (rqd->scale_step > 0) {
  283. /*
  284. * We should speed this up, using some variant of a fast
  285. * integer inverse square root calculation. Since we only do
  286. * this for every window expiration, it's not a huge deal,
  287. * though.
  288. */
  289. rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
  290. int_sqrt((rqd->scale_step + 1) << 8));
  291. } else {
  292. /*
  293. * For step < 0, we don't want to increase/decrease the
  294. * window size.
  295. */
  296. rwb->cur_win_nsec = rwb->win_nsec;
  297. }
  298. blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
  299. }
  300. static void wb_timer_fn(struct blk_stat_callback *cb)
  301. {
  302. struct rq_wb *rwb = cb->data;
  303. struct rq_depth *rqd = &rwb->rq_depth;
  304. unsigned int inflight = wbt_inflight(rwb);
  305. int status;
  306. status = latency_exceeded(rwb, cb->stat);
  307. trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
  308. inflight);
  309. /*
  310. * If we exceeded the latency target, step down. If we did not,
  311. * step one level up. If we don't know enough to say either exceeded
  312. * or ok, then don't do anything.
  313. */
  314. switch (status) {
  315. case LAT_EXCEEDED:
  316. scale_down(rwb, true);
  317. break;
  318. case LAT_OK:
  319. scale_up(rwb);
  320. break;
  321. case LAT_UNKNOWN_WRITES:
  322. /*
  323. * We started a the center step, but don't have a valid
  324. * read/write sample, but we do have writes going on.
  325. * Allow step to go negative, to increase write perf.
  326. */
  327. scale_up(rwb);
  328. break;
  329. case LAT_UNKNOWN:
  330. if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
  331. break;
  332. /*
  333. * We get here when previously scaled reduced depth, and we
  334. * currently don't have a valid read/write sample. For that
  335. * case, slowly return to center state (step == 0).
  336. */
  337. if (rqd->scale_step > 0)
  338. scale_up(rwb);
  339. else if (rqd->scale_step < 0)
  340. scale_down(rwb, false);
  341. break;
  342. default:
  343. break;
  344. }
  345. /*
  346. * Re-arm timer, if we have IO in flight
  347. */
  348. if (rqd->scale_step || inflight)
  349. rwb_arm_timer(rwb);
  350. }
  351. static void wbt_update_limits(struct rq_wb *rwb)
  352. {
  353. struct rq_depth *rqd = &rwb->rq_depth;
  354. rqd->scale_step = 0;
  355. rqd->scaled_max = false;
  356. rq_depth_calc_max_depth(rqd);
  357. calc_wb_limits(rwb);
  358. rwb_wake_all(rwb);
  359. }
  360. u64 wbt_get_min_lat(struct request_queue *q)
  361. {
  362. struct rq_qos *rqos = wbt_rq_qos(q);
  363. if (!rqos)
  364. return 0;
  365. return RQWB(rqos)->min_lat_nsec;
  366. }
  367. void wbt_set_min_lat(struct request_queue *q, u64 val)
  368. {
  369. struct rq_qos *rqos = wbt_rq_qos(q);
  370. if (!rqos)
  371. return;
  372. RQWB(rqos)->min_lat_nsec = val;
  373. RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
  374. wbt_update_limits(RQWB(rqos));
  375. }
  376. static bool close_io(struct rq_wb *rwb)
  377. {
  378. const unsigned long now = jiffies;
  379. return time_before(now, rwb->last_issue + HZ / 10) ||
  380. time_before(now, rwb->last_comp + HZ / 10);
  381. }
  382. #define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
  383. static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
  384. {
  385. unsigned int limit;
  386. /*
  387. * If we got disabled, just return UINT_MAX. This ensures that
  388. * we'll properly inc a new IO, and dec+wakeup at the end.
  389. */
  390. if (!rwb_enabled(rwb))
  391. return UINT_MAX;
  392. if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
  393. return rwb->wb_background;
  394. /*
  395. * At this point we know it's a buffered write. If this is
  396. * kswapd trying to free memory, or REQ_SYNC is set, then
  397. * it's WB_SYNC_ALL writeback, and we'll use the max limit for
  398. * that. If the write is marked as a background write, then use
  399. * the idle limit, or go to normal if we haven't had competing
  400. * IO for a bit.
  401. */
  402. if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
  403. limit = rwb->rq_depth.max_depth;
  404. else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
  405. /*
  406. * If less than 100ms since we completed unrelated IO,
  407. * limit us to half the depth for background writeback.
  408. */
  409. limit = rwb->wb_background;
  410. } else
  411. limit = rwb->wb_normal;
  412. return limit;
  413. }
  414. struct wbt_wait_data {
  415. struct rq_wb *rwb;
  416. enum wbt_flags wb_acct;
  417. unsigned long rw;
  418. };
  419. static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
  420. {
  421. struct wbt_wait_data *data = private_data;
  422. return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
  423. }
  424. static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
  425. {
  426. struct wbt_wait_data *data = private_data;
  427. wbt_rqw_done(data->rwb, rqw, data->wb_acct);
  428. }
  429. /*
  430. * Block if we will exceed our limit, or if we are currently waiting for
  431. * the timer to kick off queuing again.
  432. */
  433. static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
  434. unsigned long rw)
  435. {
  436. struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
  437. struct wbt_wait_data data = {
  438. .rwb = rwb,
  439. .wb_acct = wb_acct,
  440. .rw = rw,
  441. };
  442. rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
  443. }
  444. static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
  445. {
  446. switch (bio_op(bio)) {
  447. case REQ_OP_WRITE:
  448. /*
  449. * Don't throttle WRITE_ODIRECT
  450. */
  451. if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
  452. (REQ_SYNC | REQ_IDLE))
  453. return false;
  454. fallthrough;
  455. case REQ_OP_DISCARD:
  456. return true;
  457. default:
  458. return false;
  459. }
  460. }
  461. static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
  462. {
  463. enum wbt_flags flags = 0;
  464. if (!rwb_enabled(rwb))
  465. return 0;
  466. if (bio_op(bio) == REQ_OP_READ) {
  467. flags = WBT_READ;
  468. } else if (wbt_should_throttle(rwb, bio)) {
  469. if (current_is_kswapd())
  470. flags |= WBT_KSWAPD;
  471. if (bio_op(bio) == REQ_OP_DISCARD)
  472. flags |= WBT_DISCARD;
  473. flags |= WBT_TRACKED;
  474. }
  475. return flags;
  476. }
  477. static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
  478. {
  479. struct rq_wb *rwb = RQWB(rqos);
  480. enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
  481. __wbt_done(rqos, flags);
  482. }
  483. /*
  484. * Returns true if the IO request should be accounted, false if not.
  485. * May sleep, if we have exceeded the writeback limits. Caller can pass
  486. * in an irq held spinlock, if it holds one when calling this function.
  487. * If we do sleep, we'll release and re-grab it.
  488. */
  489. static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
  490. {
  491. struct rq_wb *rwb = RQWB(rqos);
  492. enum wbt_flags flags;
  493. flags = bio_to_wbt_flags(rwb, bio);
  494. if (!(flags & WBT_TRACKED)) {
  495. if (flags & WBT_READ)
  496. wb_timestamp(rwb, &rwb->last_issue);
  497. return;
  498. }
  499. __wbt_wait(rwb, flags, bio->bi_opf);
  500. if (!blk_stat_is_active(rwb->cb))
  501. rwb_arm_timer(rwb);
  502. }
  503. static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
  504. {
  505. struct rq_wb *rwb = RQWB(rqos);
  506. rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
  507. }
  508. static void wbt_issue(struct rq_qos *rqos, struct request *rq)
  509. {
  510. struct rq_wb *rwb = RQWB(rqos);
  511. if (!rwb_enabled(rwb))
  512. return;
  513. /*
  514. * Track sync issue, in case it takes a long time to complete. Allows us
  515. * to react quicker, if a sync IO takes a long time to complete. Note
  516. * that this is just a hint. The request can go away when it completes,
  517. * so it's important we never dereference it. We only use the address to
  518. * compare with, which is why we store the sync_issue time locally.
  519. */
  520. if (wbt_is_read(rq) && !rwb->sync_issue) {
  521. rwb->sync_cookie = rq;
  522. rwb->sync_issue = rq->io_start_time_ns;
  523. }
  524. }
  525. static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
  526. {
  527. struct rq_wb *rwb = RQWB(rqos);
  528. if (!rwb_enabled(rwb))
  529. return;
  530. if (rq == rwb->sync_cookie) {
  531. rwb->sync_issue = 0;
  532. rwb->sync_cookie = NULL;
  533. }
  534. }
  535. void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
  536. {
  537. struct rq_qos *rqos = wbt_rq_qos(q);
  538. if (rqos)
  539. RQWB(rqos)->wc = write_cache_on;
  540. }
  541. /*
  542. * Enable wbt if defaults are configured that way
  543. */
  544. void wbt_enable_default(struct request_queue *q)
  545. {
  546. struct rq_qos *rqos = wbt_rq_qos(q);
  547. /* Throttling already enabled? */
  548. if (rqos) {
  549. if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
  550. RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
  551. return;
  552. }
  553. /* Queue not registered? Maybe shutting down... */
  554. if (!blk_queue_registered(q))
  555. return;
  556. if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
  557. wbt_init(q);
  558. }
  559. EXPORT_SYMBOL_GPL(wbt_enable_default);
  560. u64 wbt_default_latency_nsec(struct request_queue *q)
  561. {
  562. /*
  563. * We default to 2msec for non-rotational storage, and 75msec
  564. * for rotational storage.
  565. */
  566. if (blk_queue_nonrot(q))
  567. return 2000000ULL;
  568. else
  569. return 75000000ULL;
  570. }
  571. static int wbt_data_dir(const struct request *rq)
  572. {
  573. const int op = req_op(rq);
  574. if (op == REQ_OP_READ)
  575. return READ;
  576. else if (op_is_write(op))
  577. return WRITE;
  578. /* don't account */
  579. return -1;
  580. }
  581. static void wbt_queue_depth_changed(struct rq_qos *rqos)
  582. {
  583. RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
  584. wbt_update_limits(RQWB(rqos));
  585. }
  586. static void wbt_exit(struct rq_qos *rqos)
  587. {
  588. struct rq_wb *rwb = RQWB(rqos);
  589. struct request_queue *q = rqos->q;
  590. blk_stat_remove_callback(q, rwb->cb);
  591. blk_stat_free_callback(rwb->cb);
  592. kfree(rwb);
  593. }
  594. /*
  595. * Disable wbt, if enabled by default.
  596. */
  597. void wbt_disable_default(struct request_queue *q)
  598. {
  599. struct rq_qos *rqos = wbt_rq_qos(q);
  600. struct rq_wb *rwb;
  601. if (!rqos)
  602. return;
  603. rwb = RQWB(rqos);
  604. if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
  605. blk_stat_deactivate(rwb->cb);
  606. rwb->enable_state = WBT_STATE_OFF_DEFAULT;
  607. }
  608. }
  609. EXPORT_SYMBOL_GPL(wbt_disable_default);
  610. #ifdef CONFIG_BLK_DEBUG_FS
  611. static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
  612. {
  613. struct rq_qos *rqos = data;
  614. struct rq_wb *rwb = RQWB(rqos);
  615. seq_printf(m, "%llu\n", rwb->cur_win_nsec);
  616. return 0;
  617. }
  618. static int wbt_enabled_show(void *data, struct seq_file *m)
  619. {
  620. struct rq_qos *rqos = data;
  621. struct rq_wb *rwb = RQWB(rqos);
  622. seq_printf(m, "%d\n", rwb->enable_state);
  623. return 0;
  624. }
  625. static int wbt_id_show(void *data, struct seq_file *m)
  626. {
  627. struct rq_qos *rqos = data;
  628. seq_printf(m, "%u\n", rqos->id);
  629. return 0;
  630. }
  631. static int wbt_inflight_show(void *data, struct seq_file *m)
  632. {
  633. struct rq_qos *rqos = data;
  634. struct rq_wb *rwb = RQWB(rqos);
  635. int i;
  636. for (i = 0; i < WBT_NUM_RWQ; i++)
  637. seq_printf(m, "%d: inflight %d\n", i,
  638. atomic_read(&rwb->rq_wait[i].inflight));
  639. return 0;
  640. }
  641. static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
  642. {
  643. struct rq_qos *rqos = data;
  644. struct rq_wb *rwb = RQWB(rqos);
  645. seq_printf(m, "%lu\n", rwb->min_lat_nsec);
  646. return 0;
  647. }
  648. static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
  649. {
  650. struct rq_qos *rqos = data;
  651. struct rq_wb *rwb = RQWB(rqos);
  652. seq_printf(m, "%u\n", rwb->unknown_cnt);
  653. return 0;
  654. }
  655. static int wbt_normal_show(void *data, struct seq_file *m)
  656. {
  657. struct rq_qos *rqos = data;
  658. struct rq_wb *rwb = RQWB(rqos);
  659. seq_printf(m, "%u\n", rwb->wb_normal);
  660. return 0;
  661. }
  662. static int wbt_background_show(void *data, struct seq_file *m)
  663. {
  664. struct rq_qos *rqos = data;
  665. struct rq_wb *rwb = RQWB(rqos);
  666. seq_printf(m, "%u\n", rwb->wb_background);
  667. return 0;
  668. }
  669. static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
  670. {"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
  671. {"enabled", 0400, wbt_enabled_show},
  672. {"id", 0400, wbt_id_show},
  673. {"inflight", 0400, wbt_inflight_show},
  674. {"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
  675. {"unknown_cnt", 0400, wbt_unknown_cnt_show},
  676. {"wb_normal", 0400, wbt_normal_show},
  677. {"wb_background", 0400, wbt_background_show},
  678. {},
  679. };
  680. #endif
  681. static struct rq_qos_ops wbt_rqos_ops = {
  682. .throttle = wbt_wait,
  683. .issue = wbt_issue,
  684. .track = wbt_track,
  685. .requeue = wbt_requeue,
  686. .done = wbt_done,
  687. .cleanup = wbt_cleanup,
  688. .queue_depth_changed = wbt_queue_depth_changed,
  689. .exit = wbt_exit,
  690. #ifdef CONFIG_BLK_DEBUG_FS
  691. .debugfs_attrs = wbt_debugfs_attrs,
  692. #endif
  693. };
  694. int wbt_init(struct request_queue *q)
  695. {
  696. struct rq_wb *rwb;
  697. int i;
  698. rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
  699. if (!rwb)
  700. return -ENOMEM;
  701. rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
  702. if (!rwb->cb) {
  703. kfree(rwb);
  704. return -ENOMEM;
  705. }
  706. for (i = 0; i < WBT_NUM_RWQ; i++)
  707. rq_wait_init(&rwb->rq_wait[i]);
  708. rwb->rqos.id = RQ_QOS_WBT;
  709. rwb->rqos.ops = &wbt_rqos_ops;
  710. rwb->rqos.q = q;
  711. rwb->last_comp = rwb->last_issue = jiffies;
  712. rwb->win_nsec = RWB_WINDOW_NSEC;
  713. rwb->enable_state = WBT_STATE_ON_DEFAULT;
  714. rwb->wc = 1;
  715. rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
  716. wbt_update_limits(rwb);
  717. /*
  718. * Assign rwb and add the stats callback.
  719. */
  720. rq_qos_add(q, &rwb->rqos);
  721. blk_stat_add_callback(q, rwb->cb);
  722. rwb->min_lat_nsec = wbt_default_latency_nsec(q);
  723. wbt_queue_depth_changed(&rwb->rqos);
  724. wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
  725. return 0;
  726. }