mq-deadline-main.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
  4. * for the blk-mq scheduling framework
  5. *
  6. * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/fs.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/blk-mq.h>
  12. #include <linux/elevator.h>
  13. #include <linux/bio.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/init.h>
  17. #include <linux/compiler.h>
  18. #include <linux/rbtree.h>
  19. #include <linux/sbitmap.h>
  20. #include "blk.h"
  21. #include "blk-mq.h"
  22. #include "blk-mq-debugfs.h"
  23. #include "blk-mq-tag.h"
  24. #include "blk-mq-sched.h"
  25. #include "mq-deadline-cgroup.h"
  26. /*
  27. * See Documentation/block/deadline-iosched.rst
  28. */
  29. static const int read_expire = HZ / 2; /* max time before a read is submitted. */
  30. static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
  31. /*
  32. * Time after which to dispatch lower priority requests even if higher
  33. * priority requests are pending.
  34. */
  35. static const int aging_expire = 10 * HZ;
  36. static const int writes_starved = 2; /* max times reads can starve a write */
  37. static const int fifo_batch = 16; /* # of sequential requests treated as one
  38. by the above parameters. For throughput. */
  39. enum dd_data_dir {
  40. DD_READ = READ,
  41. DD_WRITE = WRITE,
  42. };
  43. enum { DD_DIR_COUNT = 2 };
  44. enum dd_prio {
  45. DD_RT_PRIO = 0,
  46. DD_BE_PRIO = 1,
  47. DD_IDLE_PRIO = 2,
  48. DD_PRIO_MAX = 2,
  49. };
  50. enum { DD_PRIO_COUNT = 3 };
  51. /* I/O statistics for all I/O priorities (enum dd_prio). */
  52. struct io_stats {
  53. struct io_stats_per_prio stats[DD_PRIO_COUNT];
  54. };
  55. /*
  56. * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
  57. * present on both sort_list[] and fifo_list[].
  58. */
  59. struct dd_per_prio {
  60. struct list_head dispatch;
  61. struct rb_root sort_list[DD_DIR_COUNT];
  62. struct list_head fifo_list[DD_DIR_COUNT];
  63. /* Next request in FIFO order. Read, write or both are NULL. */
  64. struct request *next_rq[DD_DIR_COUNT];
  65. };
  66. struct deadline_data {
  67. /*
  68. * run time data
  69. */
  70. /* Request queue that owns this data structure. */
  71. struct request_queue *queue;
  72. struct dd_per_prio per_prio[DD_PRIO_COUNT];
  73. /* Data direction of latest dispatched request. */
  74. enum dd_data_dir last_dir;
  75. unsigned int batching; /* number of sequential requests made */
  76. unsigned int starved; /* times reads have starved writes */
  77. struct io_stats __percpu *stats;
  78. /*
  79. * settings that change how the i/o scheduler behaves
  80. */
  81. int fifo_expire[DD_DIR_COUNT];
  82. int fifo_batch;
  83. int writes_starved;
  84. int front_merges;
  85. u32 async_depth;
  86. int aging_expire;
  87. spinlock_t lock;
  88. spinlock_t zone_lock;
  89. };
  90. /* Count one event of type 'event_type' and with I/O priority 'prio' */
  91. #define dd_count(dd, event_type, prio) do { \
  92. struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \
  93. \
  94. BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
  95. BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
  96. local_inc(&io_stats->stats[(prio)].event_type); \
  97. put_cpu_ptr(io_stats); \
  98. } while (0)
  99. /*
  100. * Returns the total number of dd_count(dd, event_type, prio) calls across all
  101. * CPUs. No locking or barriers since it is fine if the returned sum is slightly
  102. * outdated.
  103. */
  104. #define dd_sum(dd, event_type, prio) ({ \
  105. unsigned int cpu; \
  106. u32 sum = 0; \
  107. \
  108. BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
  109. BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
  110. for_each_present_cpu(cpu) \
  111. sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \
  112. stats[(prio)].event_type); \
  113. sum; \
  114. })
  115. /* Maps an I/O priority class to a deadline scheduler priority. */
  116. static const enum dd_prio ioprio_class_to_prio[] = {
  117. [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
  118. [IOPRIO_CLASS_RT] = DD_RT_PRIO,
  119. [IOPRIO_CLASS_BE] = DD_BE_PRIO,
  120. [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
  121. };
  122. static inline struct rb_root *
  123. deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
  124. {
  125. return &per_prio->sort_list[rq_data_dir(rq)];
  126. }
  127. /*
  128. * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
  129. * request.
  130. */
  131. static u8 dd_rq_ioclass(struct request *rq)
  132. {
  133. return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
  134. }
  135. /*
  136. * get the request after `rq' in sector-sorted order
  137. */
  138. static inline struct request *
  139. deadline_latter_request(struct request *rq)
  140. {
  141. struct rb_node *node = rb_next(&rq->rb_node);
  142. if (node)
  143. return rb_entry_rq(node);
  144. return NULL;
  145. }
  146. static void
  147. deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
  148. {
  149. struct rb_root *root = deadline_rb_root(per_prio, rq);
  150. elv_rb_add(root, rq);
  151. }
  152. static inline void
  153. deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
  154. {
  155. const enum dd_data_dir data_dir = rq_data_dir(rq);
  156. if (per_prio->next_rq[data_dir] == rq)
  157. per_prio->next_rq[data_dir] = deadline_latter_request(rq);
  158. elv_rb_del(deadline_rb_root(per_prio, rq), rq);
  159. }
  160. /*
  161. * remove rq from rbtree and fifo.
  162. */
  163. static void deadline_remove_request(struct request_queue *q,
  164. struct dd_per_prio *per_prio,
  165. struct request *rq)
  166. {
  167. list_del_init(&rq->queuelist);
  168. /*
  169. * We might not be on the rbtree, if we are doing an insert merge
  170. */
  171. if (!RB_EMPTY_NODE(&rq->rb_node))
  172. deadline_del_rq_rb(per_prio, rq);
  173. elv_rqhash_del(q, rq);
  174. if (q->last_merge == rq)
  175. q->last_merge = NULL;
  176. }
  177. static void dd_request_merged(struct request_queue *q, struct request *req,
  178. enum elv_merge type)
  179. {
  180. struct deadline_data *dd = q->elevator->elevator_data;
  181. const u8 ioprio_class = dd_rq_ioclass(req);
  182. const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
  183. struct dd_per_prio *per_prio = &dd->per_prio[prio];
  184. /*
  185. * if the merge was a front merge, we need to reposition request
  186. */
  187. if (type == ELEVATOR_FRONT_MERGE) {
  188. elv_rb_del(deadline_rb_root(per_prio, req), req);
  189. deadline_add_rq_rb(per_prio, req);
  190. }
  191. }
  192. /*
  193. * Callback function that is invoked after @next has been merged into @req.
  194. */
  195. static void dd_merged_requests(struct request_queue *q, struct request *req,
  196. struct request *next)
  197. {
  198. struct deadline_data *dd = q->elevator->elevator_data;
  199. const u8 ioprio_class = dd_rq_ioclass(next);
  200. const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
  201. struct dd_blkcg *blkcg = next->elv.priv[0];
  202. dd_count(dd, merged, prio);
  203. ddcg_count(blkcg, merged, ioprio_class);
  204. /*
  205. * if next expires before rq, assign its expire time to rq
  206. * and move into next position (next will be deleted) in fifo
  207. */
  208. if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
  209. if (time_before((unsigned long)next->fifo_time,
  210. (unsigned long)req->fifo_time)) {
  211. list_move(&req->queuelist, &next->queuelist);
  212. req->fifo_time = next->fifo_time;
  213. }
  214. }
  215. /*
  216. * kill knowledge of next, this one is a goner
  217. */
  218. deadline_remove_request(q, &dd->per_prio[prio], next);
  219. }
  220. /*
  221. * move an entry to dispatch queue
  222. */
  223. static void
  224. deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
  225. struct request *rq)
  226. {
  227. const enum dd_data_dir data_dir = rq_data_dir(rq);
  228. per_prio->next_rq[data_dir] = deadline_latter_request(rq);
  229. /*
  230. * take it off the sort and fifo list
  231. */
  232. deadline_remove_request(rq->q, per_prio, rq);
  233. }
  234. /* Number of requests queued for a given priority level. */
  235. static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
  236. {
  237. return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
  238. }
  239. /*
  240. * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
  241. * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
  242. */
  243. static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
  244. enum dd_data_dir data_dir)
  245. {
  246. struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
  247. /*
  248. * rq is expired!
  249. */
  250. if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
  251. return 1;
  252. return 0;
  253. }
  254. /*
  255. * For the specified data direction, return the next request to
  256. * dispatch using arrival ordered lists.
  257. */
  258. static struct request *
  259. deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
  260. enum dd_data_dir data_dir)
  261. {
  262. struct request *rq;
  263. unsigned long flags;
  264. if (list_empty(&per_prio->fifo_list[data_dir]))
  265. return NULL;
  266. rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
  267. if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
  268. return rq;
  269. /*
  270. * Look for a write request that can be dispatched, that is one with
  271. * an unlocked target zone.
  272. */
  273. spin_lock_irqsave(&dd->zone_lock, flags);
  274. list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
  275. if (blk_req_can_dispatch_to_zone(rq))
  276. goto out;
  277. }
  278. rq = NULL;
  279. out:
  280. spin_unlock_irqrestore(&dd->zone_lock, flags);
  281. return rq;
  282. }
  283. /*
  284. * For the specified data direction, return the next request to
  285. * dispatch using sector position sorted lists.
  286. */
  287. static struct request *
  288. deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
  289. enum dd_data_dir data_dir)
  290. {
  291. struct request *rq;
  292. unsigned long flags;
  293. rq = per_prio->next_rq[data_dir];
  294. if (!rq)
  295. return NULL;
  296. if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
  297. return rq;
  298. /*
  299. * Look for a write request that can be dispatched, that is one with
  300. * an unlocked target zone.
  301. */
  302. spin_lock_irqsave(&dd->zone_lock, flags);
  303. while (rq) {
  304. if (blk_req_can_dispatch_to_zone(rq))
  305. break;
  306. rq = deadline_latter_request(rq);
  307. }
  308. spin_unlock_irqrestore(&dd->zone_lock, flags);
  309. return rq;
  310. }
  311. /*
  312. * deadline_dispatch_requests selects the best request according to
  313. * read/write expire, fifo_batch, etc and with a start time <= @latest.
  314. */
  315. static struct request *__dd_dispatch_request(struct deadline_data *dd,
  316. struct dd_per_prio *per_prio,
  317. u64 latest_start_ns)
  318. {
  319. struct request *rq, *next_rq;
  320. enum dd_data_dir data_dir;
  321. struct dd_blkcg *blkcg;
  322. enum dd_prio prio;
  323. u8 ioprio_class;
  324. lockdep_assert_held(&dd->lock);
  325. if (!list_empty(&per_prio->dispatch)) {
  326. rq = list_first_entry(&per_prio->dispatch, struct request,
  327. queuelist);
  328. if (rq->start_time_ns > latest_start_ns)
  329. return NULL;
  330. list_del_init(&rq->queuelist);
  331. goto done;
  332. }
  333. /*
  334. * batches are currently reads XOR writes
  335. */
  336. rq = deadline_next_request(dd, per_prio, dd->last_dir);
  337. if (rq && dd->batching < dd->fifo_batch)
  338. /* we have a next request are still entitled to batch */
  339. goto dispatch_request;
  340. /*
  341. * at this point we are not running a batch. select the appropriate
  342. * data direction (read / write)
  343. */
  344. if (!list_empty(&per_prio->fifo_list[DD_READ])) {
  345. BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
  346. if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
  347. (dd->starved++ >= dd->writes_starved))
  348. goto dispatch_writes;
  349. data_dir = DD_READ;
  350. goto dispatch_find_request;
  351. }
  352. /*
  353. * there are either no reads or writes have been starved
  354. */
  355. if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
  356. dispatch_writes:
  357. BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
  358. dd->starved = 0;
  359. data_dir = DD_WRITE;
  360. goto dispatch_find_request;
  361. }
  362. return NULL;
  363. dispatch_find_request:
  364. /*
  365. * we are not running a batch, find best request for selected data_dir
  366. */
  367. next_rq = deadline_next_request(dd, per_prio, data_dir);
  368. if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
  369. /*
  370. * A deadline has expired, the last request was in the other
  371. * direction, or we have run out of higher-sectored requests.
  372. * Start again from the request with the earliest expiry time.
  373. */
  374. rq = deadline_fifo_request(dd, per_prio, data_dir);
  375. } else {
  376. /*
  377. * The last req was the same dir and we have a next request in
  378. * sort order. No expired requests so continue on from here.
  379. */
  380. rq = next_rq;
  381. }
  382. /*
  383. * For a zoned block device, if we only have writes queued and none of
  384. * them can be dispatched, rq will be NULL.
  385. */
  386. if (!rq)
  387. return NULL;
  388. dd->last_dir = data_dir;
  389. dd->batching = 0;
  390. dispatch_request:
  391. if (rq->start_time_ns > latest_start_ns)
  392. return NULL;
  393. /*
  394. * rq is the selected appropriate request.
  395. */
  396. dd->batching++;
  397. deadline_move_request(dd, per_prio, rq);
  398. done:
  399. ioprio_class = dd_rq_ioclass(rq);
  400. prio = ioprio_class_to_prio[ioprio_class];
  401. dd_count(dd, dispatched, prio);
  402. blkcg = rq->elv.priv[0];
  403. ddcg_count(blkcg, dispatched, ioprio_class);
  404. /*
  405. * If the request needs its target zone locked, do it.
  406. */
  407. blk_req_zone_write_lock(rq);
  408. rq->rq_flags |= RQF_STARTED;
  409. return rq;
  410. }
  411. /*
  412. * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
  413. *
  414. * One confusing aspect here is that we get called for a specific
  415. * hardware queue, but we may return a request that is for a
  416. * different hardware queue. This is because mq-deadline has shared
  417. * state for all hardware queues, in terms of sorting, FIFOs, etc.
  418. */
  419. static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
  420. {
  421. struct deadline_data *dd = hctx->queue->elevator->elevator_data;
  422. const u64 now_ns = ktime_get_ns();
  423. struct request *rq = NULL;
  424. enum dd_prio prio;
  425. spin_lock(&dd->lock);
  426. /*
  427. * Start with dispatching requests whose deadline expired more than
  428. * aging_expire jiffies ago.
  429. */
  430. for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
  431. rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns -
  432. jiffies_to_nsecs(dd->aging_expire));
  433. if (rq)
  434. goto unlock;
  435. }
  436. /*
  437. * Next, dispatch requests in priority order. Ignore lower priority
  438. * requests if any higher priority requests are pending.
  439. */
  440. for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
  441. rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns);
  442. if (rq || dd_queued(dd, prio))
  443. break;
  444. }
  445. unlock:
  446. spin_unlock(&dd->lock);
  447. return rq;
  448. }
  449. /*
  450. * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
  451. * function is used by __blk_mq_get_tag().
  452. */
  453. static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
  454. {
  455. struct deadline_data *dd = data->q->elevator->elevator_data;
  456. /* Do not throttle synchronous reads. */
  457. if (op_is_sync(op) && !op_is_write(op))
  458. return;
  459. /*
  460. * Throttle asynchronous requests and writes such that these requests
  461. * do not block the allocation of synchronous requests.
  462. */
  463. data->shallow_depth = dd->async_depth;
  464. }
  465. /* Called by blk_mq_update_nr_requests(). */
  466. static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
  467. {
  468. struct request_queue *q = hctx->queue;
  469. struct deadline_data *dd = q->elevator->elevator_data;
  470. struct blk_mq_tags *tags = hctx->sched_tags;
  471. dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
  472. sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
  473. }
  474. /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
  475. static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
  476. {
  477. dd_depth_updated(hctx);
  478. return 0;
  479. }
  480. static void dd_exit_sched(struct elevator_queue *e)
  481. {
  482. struct deadline_data *dd = e->elevator_data;
  483. enum dd_prio prio;
  484. dd_deactivate_policy(dd->queue);
  485. for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
  486. struct dd_per_prio *per_prio = &dd->per_prio[prio];
  487. WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
  488. WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
  489. }
  490. free_percpu(dd->stats);
  491. kfree(dd);
  492. }
  493. /*
  494. * Initialize elevator private data (deadline_data) and associate with blkcg.
  495. */
  496. static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
  497. {
  498. struct deadline_data *dd;
  499. struct elevator_queue *eq;
  500. enum dd_prio prio;
  501. int ret = -ENOMEM;
  502. /*
  503. * Initialization would be very tricky if the queue is not frozen,
  504. * hence the warning statement below.
  505. */
  506. WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter));
  507. eq = elevator_alloc(q, e);
  508. if (!eq)
  509. return ret;
  510. dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
  511. if (!dd)
  512. goto put_eq;
  513. eq->elevator_data = dd;
  514. dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
  515. GFP_KERNEL | __GFP_ZERO);
  516. if (!dd->stats)
  517. goto free_dd;
  518. dd->queue = q;
  519. for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
  520. struct dd_per_prio *per_prio = &dd->per_prio[prio];
  521. INIT_LIST_HEAD(&per_prio->dispatch);
  522. INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
  523. INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
  524. per_prio->sort_list[DD_READ] = RB_ROOT;
  525. per_prio->sort_list[DD_WRITE] = RB_ROOT;
  526. }
  527. dd->fifo_expire[DD_READ] = read_expire;
  528. dd->fifo_expire[DD_WRITE] = write_expire;
  529. dd->writes_starved = writes_starved;
  530. dd->front_merges = 1;
  531. dd->last_dir = DD_WRITE;
  532. dd->fifo_batch = fifo_batch;
  533. dd->aging_expire = aging_expire;
  534. spin_lock_init(&dd->lock);
  535. spin_lock_init(&dd->zone_lock);
  536. ret = dd_activate_policy(q);
  537. if (ret)
  538. goto free_stats;
  539. ret = 0;
  540. q->elevator = eq;
  541. return 0;
  542. free_stats:
  543. free_percpu(dd->stats);
  544. free_dd:
  545. kfree(dd);
  546. put_eq:
  547. kobject_put(&eq->kobj);
  548. return ret;
  549. }
  550. /*
  551. * Try to merge @bio into an existing request. If @bio has been merged into
  552. * an existing request, store the pointer to that request into *@rq.
  553. */
  554. static int dd_request_merge(struct request_queue *q, struct request **rq,
  555. struct bio *bio)
  556. {
  557. struct deadline_data *dd = q->elevator->elevator_data;
  558. const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
  559. const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
  560. struct dd_per_prio *per_prio = &dd->per_prio[prio];
  561. sector_t sector = bio_end_sector(bio);
  562. struct request *__rq;
  563. if (!dd->front_merges)
  564. return ELEVATOR_NO_MERGE;
  565. __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
  566. if (__rq) {
  567. BUG_ON(sector != blk_rq_pos(__rq));
  568. if (elv_bio_merge_ok(__rq, bio)) {
  569. *rq = __rq;
  570. if (blk_discard_mergable(__rq))
  571. return ELEVATOR_DISCARD_MERGE;
  572. return ELEVATOR_FRONT_MERGE;
  573. }
  574. }
  575. return ELEVATOR_NO_MERGE;
  576. }
  577. /*
  578. * Attempt to merge a bio into an existing request. This function is called
  579. * before @bio is associated with a request.
  580. */
  581. static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
  582. unsigned int nr_segs)
  583. {
  584. struct deadline_data *dd = q->elevator->elevator_data;
  585. struct request *free = NULL;
  586. bool ret;
  587. spin_lock(&dd->lock);
  588. ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
  589. spin_unlock(&dd->lock);
  590. if (free)
  591. blk_mq_free_request(free);
  592. return ret;
  593. }
  594. /*
  595. * add rq to rbtree and fifo
  596. */
  597. static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  598. bool at_head)
  599. {
  600. struct request_queue *q = hctx->queue;
  601. struct deadline_data *dd = q->elevator->elevator_data;
  602. const enum dd_data_dir data_dir = rq_data_dir(rq);
  603. u16 ioprio = req_get_ioprio(rq);
  604. u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
  605. struct dd_per_prio *per_prio;
  606. enum dd_prio prio;
  607. struct dd_blkcg *blkcg;
  608. lockdep_assert_held(&dd->lock);
  609. /*
  610. * This may be a requeue of a write request that has locked its
  611. * target zone. If it is the case, this releases the zone lock.
  612. */
  613. blk_req_zone_write_unlock(rq);
  614. /*
  615. * If a block cgroup has been associated with the submitter and if an
  616. * I/O priority has been set in the associated block cgroup, use the
  617. * lowest of the cgroup priority and the request priority for the
  618. * request. If no priority has been set in the request, use the cgroup
  619. * priority.
  620. */
  621. prio = ioprio_class_to_prio[ioprio_class];
  622. dd_count(dd, inserted, prio);
  623. blkcg = dd_blkcg_from_bio(rq->bio);
  624. ddcg_count(blkcg, inserted, ioprio_class);
  625. rq->elv.priv[0] = blkcg;
  626. if (blk_mq_sched_try_insert_merge(q, rq))
  627. return;
  628. blk_mq_sched_request_inserted(rq);
  629. per_prio = &dd->per_prio[prio];
  630. if (at_head) {
  631. list_add(&rq->queuelist, &per_prio->dispatch);
  632. } else {
  633. deadline_add_rq_rb(per_prio, rq);
  634. if (rq_mergeable(rq)) {
  635. elv_rqhash_add(q, rq);
  636. if (!q->last_merge)
  637. q->last_merge = rq;
  638. }
  639. /*
  640. * set expire time and add to fifo list
  641. */
  642. rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
  643. list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
  644. }
  645. }
  646. /*
  647. * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
  648. */
  649. static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
  650. struct list_head *list, bool at_head)
  651. {
  652. struct request_queue *q = hctx->queue;
  653. struct deadline_data *dd = q->elevator->elevator_data;
  654. spin_lock(&dd->lock);
  655. while (!list_empty(list)) {
  656. struct request *rq;
  657. rq = list_first_entry(list, struct request, queuelist);
  658. list_del_init(&rq->queuelist);
  659. dd_insert_request(hctx, rq, at_head);
  660. }
  661. spin_unlock(&dd->lock);
  662. }
  663. /* Callback from inside blk_mq_rq_ctx_init(). */
  664. static void dd_prepare_request(struct request *rq)
  665. {
  666. rq->elv.priv[0] = NULL;
  667. }
  668. /*
  669. * Callback from inside blk_mq_free_request().
  670. *
  671. * For zoned block devices, write unlock the target zone of
  672. * completed write requests. Do this while holding the zone lock
  673. * spinlock so that the zone is never unlocked while deadline_fifo_request()
  674. * or deadline_next_request() are executing. This function is called for
  675. * all requests, whether or not these requests complete successfully.
  676. *
  677. * For a zoned block device, __dd_dispatch_request() may have stopped
  678. * dispatching requests if all the queued requests are write requests directed
  679. * at zones that are already locked due to on-going write requests. To ensure
  680. * write request dispatch progress in this case, mark the queue as needing a
  681. * restart to ensure that the queue is run again after completion of the
  682. * request and zones being unlocked.
  683. */
  684. static void dd_finish_request(struct request *rq)
  685. {
  686. struct request_queue *q = rq->q;
  687. struct deadline_data *dd = q->elevator->elevator_data;
  688. struct dd_blkcg *blkcg = rq->elv.priv[0];
  689. const u8 ioprio_class = dd_rq_ioclass(rq);
  690. const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
  691. struct dd_per_prio *per_prio = &dd->per_prio[prio];
  692. dd_count(dd, completed, prio);
  693. ddcg_count(blkcg, completed, ioprio_class);
  694. if (blk_queue_is_zoned(q)) {
  695. unsigned long flags;
  696. spin_lock_irqsave(&dd->zone_lock, flags);
  697. blk_req_zone_write_unlock(rq);
  698. if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
  699. blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
  700. spin_unlock_irqrestore(&dd->zone_lock, flags);
  701. }
  702. }
  703. static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
  704. {
  705. return !list_empty_careful(&per_prio->dispatch) ||
  706. !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
  707. !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
  708. }
  709. static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
  710. {
  711. struct deadline_data *dd = hctx->queue->elevator->elevator_data;
  712. enum dd_prio prio;
  713. for (prio = 0; prio <= DD_PRIO_MAX; prio++)
  714. if (dd_has_work_for_prio(&dd->per_prio[prio]))
  715. return true;
  716. return false;
  717. }
  718. /*
  719. * sysfs parts below
  720. */
  721. #define SHOW_INT(__FUNC, __VAR) \
  722. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  723. { \
  724. struct deadline_data *dd = e->elevator_data; \
  725. \
  726. return sysfs_emit(page, "%d\n", __VAR); \
  727. }
  728. #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
  729. SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
  730. SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
  731. SHOW_JIFFIES(deadline_aging_expire_show, dd->aging_expire);
  732. SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
  733. SHOW_INT(deadline_front_merges_show, dd->front_merges);
  734. SHOW_INT(deadline_async_depth_show, dd->front_merges);
  735. SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
  736. #undef SHOW_INT
  737. #undef SHOW_JIFFIES
  738. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  739. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  740. { \
  741. struct deadline_data *dd = e->elevator_data; \
  742. int __data, __ret; \
  743. \
  744. __ret = kstrtoint(page, 0, &__data); \
  745. if (__ret < 0) \
  746. return __ret; \
  747. if (__data < (MIN)) \
  748. __data = (MIN); \
  749. else if (__data > (MAX)) \
  750. __data = (MAX); \
  751. *(__PTR) = __CONV(__data); \
  752. return count; \
  753. }
  754. #define STORE_INT(__FUNC, __PTR, MIN, MAX) \
  755. STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
  756. #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
  757. STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
  758. STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
  759. STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
  760. STORE_JIFFIES(deadline_aging_expire_store, &dd->aging_expire, 0, INT_MAX);
  761. STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
  762. STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
  763. STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
  764. STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
  765. #undef STORE_FUNCTION
  766. #undef STORE_INT
  767. #undef STORE_JIFFIES
  768. #define DD_ATTR(name) \
  769. __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
  770. static struct elv_fs_entry deadline_attrs[] = {
  771. DD_ATTR(read_expire),
  772. DD_ATTR(write_expire),
  773. DD_ATTR(writes_starved),
  774. DD_ATTR(front_merges),
  775. DD_ATTR(async_depth),
  776. DD_ATTR(fifo_batch),
  777. DD_ATTR(aging_expire),
  778. __ATTR_NULL
  779. };
  780. #ifdef CONFIG_BLK_DEBUG_FS
  781. #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
  782. static void *deadline_##name##_fifo_start(struct seq_file *m, \
  783. loff_t *pos) \
  784. __acquires(&dd->lock) \
  785. { \
  786. struct request_queue *q = m->private; \
  787. struct deadline_data *dd = q->elevator->elevator_data; \
  788. struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
  789. \
  790. spin_lock(&dd->lock); \
  791. return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
  792. } \
  793. \
  794. static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
  795. loff_t *pos) \
  796. { \
  797. struct request_queue *q = m->private; \
  798. struct deadline_data *dd = q->elevator->elevator_data; \
  799. struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
  800. \
  801. return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
  802. } \
  803. \
  804. static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
  805. __releases(&dd->lock) \
  806. { \
  807. struct request_queue *q = m->private; \
  808. struct deadline_data *dd = q->elevator->elevator_data; \
  809. \
  810. spin_unlock(&dd->lock); \
  811. } \
  812. \
  813. static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
  814. .start = deadline_##name##_fifo_start, \
  815. .next = deadline_##name##_fifo_next, \
  816. .stop = deadline_##name##_fifo_stop, \
  817. .show = blk_mq_debugfs_rq_show, \
  818. }; \
  819. \
  820. static int deadline_##name##_next_rq_show(void *data, \
  821. struct seq_file *m) \
  822. { \
  823. struct request_queue *q = data; \
  824. struct deadline_data *dd = q->elevator->elevator_data; \
  825. struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
  826. struct request *rq = per_prio->next_rq[data_dir]; \
  827. \
  828. if (rq) \
  829. __blk_mq_debugfs_rq_show(m, rq); \
  830. return 0; \
  831. }
  832. DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
  833. DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
  834. DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
  835. DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
  836. DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
  837. DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
  838. #undef DEADLINE_DEBUGFS_DDIR_ATTRS
  839. static int deadline_batching_show(void *data, struct seq_file *m)
  840. {
  841. struct request_queue *q = data;
  842. struct deadline_data *dd = q->elevator->elevator_data;
  843. seq_printf(m, "%u\n", dd->batching);
  844. return 0;
  845. }
  846. static int deadline_starved_show(void *data, struct seq_file *m)
  847. {
  848. struct request_queue *q = data;
  849. struct deadline_data *dd = q->elevator->elevator_data;
  850. seq_printf(m, "%u\n", dd->starved);
  851. return 0;
  852. }
  853. static int dd_async_depth_show(void *data, struct seq_file *m)
  854. {
  855. struct request_queue *q = data;
  856. struct deadline_data *dd = q->elevator->elevator_data;
  857. seq_printf(m, "%u\n", dd->async_depth);
  858. return 0;
  859. }
  860. static int dd_queued_show(void *data, struct seq_file *m)
  861. {
  862. struct request_queue *q = data;
  863. struct deadline_data *dd = q->elevator->elevator_data;
  864. seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
  865. dd_queued(dd, DD_BE_PRIO),
  866. dd_queued(dd, DD_IDLE_PRIO));
  867. return 0;
  868. }
  869. /* Number of requests owned by the block driver for a given priority. */
  870. static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
  871. {
  872. return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
  873. - dd_sum(dd, completed, prio);
  874. }
  875. static int dd_owned_by_driver_show(void *data, struct seq_file *m)
  876. {
  877. struct request_queue *q = data;
  878. struct deadline_data *dd = q->elevator->elevator_data;
  879. seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
  880. dd_owned_by_driver(dd, DD_BE_PRIO),
  881. dd_owned_by_driver(dd, DD_IDLE_PRIO));
  882. return 0;
  883. }
  884. #define DEADLINE_DISPATCH_ATTR(prio) \
  885. static void *deadline_dispatch##prio##_start(struct seq_file *m, \
  886. loff_t *pos) \
  887. __acquires(&dd->lock) \
  888. { \
  889. struct request_queue *q = m->private; \
  890. struct deadline_data *dd = q->elevator->elevator_data; \
  891. struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
  892. \
  893. spin_lock(&dd->lock); \
  894. return seq_list_start(&per_prio->dispatch, *pos); \
  895. } \
  896. \
  897. static void *deadline_dispatch##prio##_next(struct seq_file *m, \
  898. void *v, loff_t *pos) \
  899. { \
  900. struct request_queue *q = m->private; \
  901. struct deadline_data *dd = q->elevator->elevator_data; \
  902. struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
  903. \
  904. return seq_list_next(v, &per_prio->dispatch, pos); \
  905. } \
  906. \
  907. static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
  908. __releases(&dd->lock) \
  909. { \
  910. struct request_queue *q = m->private; \
  911. struct deadline_data *dd = q->elevator->elevator_data; \
  912. \
  913. spin_unlock(&dd->lock); \
  914. } \
  915. \
  916. static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
  917. .start = deadline_dispatch##prio##_start, \
  918. .next = deadline_dispatch##prio##_next, \
  919. .stop = deadline_dispatch##prio##_stop, \
  920. .show = blk_mq_debugfs_rq_show, \
  921. }
  922. DEADLINE_DISPATCH_ATTR(0);
  923. DEADLINE_DISPATCH_ATTR(1);
  924. DEADLINE_DISPATCH_ATTR(2);
  925. #undef DEADLINE_DISPATCH_ATTR
  926. #define DEADLINE_QUEUE_DDIR_ATTRS(name) \
  927. {#name "_fifo_list", 0400, \
  928. .seq_ops = &deadline_##name##_fifo_seq_ops}
  929. #define DEADLINE_NEXT_RQ_ATTR(name) \
  930. {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
  931. static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
  932. DEADLINE_QUEUE_DDIR_ATTRS(read0),
  933. DEADLINE_QUEUE_DDIR_ATTRS(write0),
  934. DEADLINE_QUEUE_DDIR_ATTRS(read1),
  935. DEADLINE_QUEUE_DDIR_ATTRS(write1),
  936. DEADLINE_QUEUE_DDIR_ATTRS(read2),
  937. DEADLINE_QUEUE_DDIR_ATTRS(write2),
  938. DEADLINE_NEXT_RQ_ATTR(read0),
  939. DEADLINE_NEXT_RQ_ATTR(write0),
  940. DEADLINE_NEXT_RQ_ATTR(read1),
  941. DEADLINE_NEXT_RQ_ATTR(write1),
  942. DEADLINE_NEXT_RQ_ATTR(read2),
  943. DEADLINE_NEXT_RQ_ATTR(write2),
  944. {"batching", 0400, deadline_batching_show},
  945. {"starved", 0400, deadline_starved_show},
  946. {"async_depth", 0400, dd_async_depth_show},
  947. {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
  948. {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
  949. {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
  950. {"owned_by_driver", 0400, dd_owned_by_driver_show},
  951. {"queued", 0400, dd_queued_show},
  952. {},
  953. };
  954. #undef DEADLINE_QUEUE_DDIR_ATTRS
  955. #endif
  956. static struct elevator_type mq_deadline = {
  957. .ops = {
  958. .depth_updated = dd_depth_updated,
  959. .limit_depth = dd_limit_depth,
  960. .insert_requests = dd_insert_requests,
  961. .dispatch_request = dd_dispatch_request,
  962. .prepare_request = dd_prepare_request,
  963. .finish_request = dd_finish_request,
  964. .next_request = elv_rb_latter_request,
  965. .former_request = elv_rb_former_request,
  966. .bio_merge = dd_bio_merge,
  967. .request_merge = dd_request_merge,
  968. .requests_merged = dd_merged_requests,
  969. .request_merged = dd_request_merged,
  970. .has_work = dd_has_work,
  971. .init_sched = dd_init_sched,
  972. .exit_sched = dd_exit_sched,
  973. .init_hctx = dd_init_hctx,
  974. },
  975. #ifdef CONFIG_BLK_DEBUG_FS
  976. .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
  977. #endif
  978. .elevator_attrs = deadline_attrs,
  979. .elevator_name = "mq-deadline",
  980. .elevator_alias = "deadline",
  981. .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
  982. .elevator_owner = THIS_MODULE,
  983. };
  984. MODULE_ALIAS("mq-deadline-iosched");
  985. static int __init deadline_init(void)
  986. {
  987. int ret;
  988. ret = elv_register(&mq_deadline);
  989. if (ret)
  990. goto out;
  991. ret = dd_blkcg_init();
  992. if (ret)
  993. goto unreg;
  994. out:
  995. return ret;
  996. unreg:
  997. elv_unregister(&mq_deadline);
  998. goto out;
  999. }
  1000. static void __exit deadline_exit(void)
  1001. {
  1002. dd_blkcg_exit();
  1003. elv_unregister(&mq_deadline);
  1004. }
  1005. module_init(deadline_init);
  1006. module_exit(deadline_exit);
  1007. MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
  1008. MODULE_LICENSE("GPL");
  1009. MODULE_DESCRIPTION("MQ deadline IO scheduler");