blk-mq-sched.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * blk-mq scheduling framework
  4. *
  5. * Copyright (C) 2016 Jens Axboe
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/blk-mq.h>
  10. #include <linux/list_sort.h>
  11. #include <trace/events/block.h>
  12. #include "blk.h"
  13. #include "blk-mq.h"
  14. #include "blk-mq-debugfs.h"
  15. #include "blk-mq-sched.h"
  16. #include "blk-mq-tag.h"
  17. #include "blk-wbt.h"
  18. void blk_mq_sched_assign_ioc(struct request *rq)
  19. {
  20. struct request_queue *q = rq->q;
  21. struct io_context *ioc;
  22. struct io_cq *icq;
  23. /*
  24. * May not have an IO context if it's a passthrough request
  25. */
  26. ioc = current->io_context;
  27. if (!ioc)
  28. return;
  29. spin_lock_irq(&q->queue_lock);
  30. icq = ioc_lookup_icq(ioc, q);
  31. spin_unlock_irq(&q->queue_lock);
  32. if (!icq) {
  33. icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
  34. if (!icq)
  35. return;
  36. }
  37. get_io_context(icq->ioc);
  38. rq->elv.icq = icq;
  39. }
  40. /*
  41. * Mark a hardware queue as needing a restart. For shared queues, maintain
  42. * a count of how many hardware queues are marked for restart.
  43. */
  44. void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
  45. {
  46. if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
  47. return;
  48. set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
  49. }
  50. EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
  51. void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
  52. {
  53. if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
  54. return;
  55. clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
  56. /*
  57. * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
  58. * in blk_mq_run_hw_queue(). Its pair is the barrier in
  59. * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
  60. * meantime new request added to hctx->dispatch is missed to check in
  61. * blk_mq_run_hw_queue().
  62. */
  63. smp_mb();
  64. blk_mq_run_hw_queue(hctx, true);
  65. }
  66. static int sched_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
  67. {
  68. struct request *rqa = container_of(a, struct request, queuelist);
  69. struct request *rqb = container_of(b, struct request, queuelist);
  70. return rqa->mq_hctx > rqb->mq_hctx;
  71. }
  72. static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
  73. {
  74. struct blk_mq_hw_ctx *hctx =
  75. list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
  76. struct request *rq;
  77. LIST_HEAD(hctx_list);
  78. unsigned int count = 0;
  79. list_for_each_entry(rq, rq_list, queuelist) {
  80. if (rq->mq_hctx != hctx) {
  81. list_cut_before(&hctx_list, rq_list, &rq->queuelist);
  82. goto dispatch;
  83. }
  84. count++;
  85. }
  86. list_splice_tail_init(rq_list, &hctx_list);
  87. dispatch:
  88. return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
  89. }
  90. #define BLK_MQ_BUDGET_DELAY 3 /* ms units */
  91. /*
  92. * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
  93. * its queue by itself in its completion handler, so we don't need to
  94. * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
  95. *
  96. * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
  97. * be run again. This is necessary to avoid starving flushes.
  98. */
  99. static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
  100. {
  101. struct request_queue *q = hctx->queue;
  102. struct elevator_queue *e = q->elevator;
  103. bool multi_hctxs = false, run_queue = false;
  104. bool dispatched = false, busy = false;
  105. unsigned int max_dispatch;
  106. LIST_HEAD(rq_list);
  107. int count = 0;
  108. if (hctx->dispatch_busy)
  109. max_dispatch = 1;
  110. else
  111. max_dispatch = hctx->queue->nr_requests;
  112. do {
  113. struct request *rq;
  114. if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
  115. break;
  116. if (!list_empty_careful(&hctx->dispatch)) {
  117. busy = true;
  118. break;
  119. }
  120. if (!blk_mq_get_dispatch_budget(q))
  121. break;
  122. rq = e->type->ops.dispatch_request(hctx);
  123. if (!rq) {
  124. blk_mq_put_dispatch_budget(q);
  125. /*
  126. * We're releasing without dispatching. Holding the
  127. * budget could have blocked any "hctx"s with the
  128. * same queue and if we didn't dispatch then there's
  129. * no guarantee anyone will kick the queue. Kick it
  130. * ourselves.
  131. */
  132. run_queue = true;
  133. break;
  134. }
  135. /*
  136. * Now this rq owns the budget which has to be released
  137. * if this rq won't be queued to driver via .queue_rq()
  138. * in blk_mq_dispatch_rq_list().
  139. */
  140. list_add_tail(&rq->queuelist, &rq_list);
  141. if (rq->mq_hctx != hctx)
  142. multi_hctxs = true;
  143. } while (++count < max_dispatch);
  144. if (!count) {
  145. if (run_queue)
  146. blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
  147. } else if (multi_hctxs) {
  148. /*
  149. * Requests from different hctx may be dequeued from some
  150. * schedulers, such as bfq and deadline.
  151. *
  152. * Sort the requests in the list according to their hctx,
  153. * dispatch batching requests from same hctx at a time.
  154. */
  155. list_sort(NULL, &rq_list, sched_rq_cmp);
  156. do {
  157. dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
  158. } while (!list_empty(&rq_list));
  159. } else {
  160. dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
  161. }
  162. if (busy)
  163. return -EAGAIN;
  164. return !!dispatched;
  165. }
  166. static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
  167. {
  168. unsigned long end = jiffies + HZ;
  169. int ret;
  170. do {
  171. ret = __blk_mq_do_dispatch_sched(hctx);
  172. if (ret != 1)
  173. break;
  174. if (need_resched() || time_is_before_jiffies(end)) {
  175. blk_mq_delay_run_hw_queue(hctx, 0);
  176. break;
  177. }
  178. } while (1);
  179. return ret;
  180. }
  181. static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
  182. struct blk_mq_ctx *ctx)
  183. {
  184. unsigned short idx = ctx->index_hw[hctx->type];
  185. if (++idx == hctx->nr_ctx)
  186. idx = 0;
  187. return hctx->ctxs[idx];
  188. }
  189. /*
  190. * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
  191. * its queue by itself in its completion handler, so we don't need to
  192. * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
  193. *
  194. * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
  195. * be run again. This is necessary to avoid starving flushes.
  196. */
  197. static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
  198. {
  199. struct request_queue *q = hctx->queue;
  200. LIST_HEAD(rq_list);
  201. struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
  202. int ret = 0;
  203. struct request *rq;
  204. do {
  205. if (!list_empty_careful(&hctx->dispatch)) {
  206. ret = -EAGAIN;
  207. break;
  208. }
  209. if (!sbitmap_any_bit_set(&hctx->ctx_map))
  210. break;
  211. if (!blk_mq_get_dispatch_budget(q))
  212. break;
  213. rq = blk_mq_dequeue_from_ctx(hctx, ctx);
  214. if (!rq) {
  215. blk_mq_put_dispatch_budget(q);
  216. /*
  217. * We're releasing without dispatching. Holding the
  218. * budget could have blocked any "hctx"s with the
  219. * same queue and if we didn't dispatch then there's
  220. * no guarantee anyone will kick the queue. Kick it
  221. * ourselves.
  222. */
  223. blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
  224. break;
  225. }
  226. /*
  227. * Now this rq owns the budget which has to be released
  228. * if this rq won't be queued to driver via .queue_rq()
  229. * in blk_mq_dispatch_rq_list().
  230. */
  231. list_add(&rq->queuelist, &rq_list);
  232. /* round robin for fair dispatch */
  233. ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
  234. } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
  235. WRITE_ONCE(hctx->dispatch_from, ctx);
  236. return ret;
  237. }
  238. static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
  239. {
  240. struct request_queue *q = hctx->queue;
  241. struct elevator_queue *e = q->elevator;
  242. const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
  243. int ret = 0;
  244. LIST_HEAD(rq_list);
  245. /*
  246. * If we have previous entries on our dispatch list, grab them first for
  247. * more fair dispatch.
  248. */
  249. if (!list_empty_careful(&hctx->dispatch)) {
  250. spin_lock(&hctx->lock);
  251. if (!list_empty(&hctx->dispatch))
  252. list_splice_init(&hctx->dispatch, &rq_list);
  253. spin_unlock(&hctx->lock);
  254. }
  255. /*
  256. * Only ask the scheduler for requests, if we didn't have residual
  257. * requests from the dispatch list. This is to avoid the case where
  258. * we only ever dispatch a fraction of the requests available because
  259. * of low device queue depth. Once we pull requests out of the IO
  260. * scheduler, we can no longer merge or sort them. So it's best to
  261. * leave them there for as long as we can. Mark the hw queue as
  262. * needing a restart in that case.
  263. *
  264. * We want to dispatch from the scheduler if there was nothing
  265. * on the dispatch list or we were able to dispatch from the
  266. * dispatch list.
  267. */
  268. if (!list_empty(&rq_list)) {
  269. blk_mq_sched_mark_restart_hctx(hctx);
  270. if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
  271. if (has_sched_dispatch)
  272. ret = blk_mq_do_dispatch_sched(hctx);
  273. else
  274. ret = blk_mq_do_dispatch_ctx(hctx);
  275. }
  276. } else if (has_sched_dispatch) {
  277. ret = blk_mq_do_dispatch_sched(hctx);
  278. } else if (hctx->dispatch_busy) {
  279. /* dequeue request one by one from sw queue if queue is busy */
  280. ret = blk_mq_do_dispatch_ctx(hctx);
  281. } else {
  282. blk_mq_flush_busy_ctxs(hctx, &rq_list);
  283. blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
  284. }
  285. return ret;
  286. }
  287. void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
  288. {
  289. struct request_queue *q = hctx->queue;
  290. /* RCU or SRCU read lock is needed before checking quiesced flag */
  291. if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
  292. return;
  293. hctx->run++;
  294. /*
  295. * A return of -EAGAIN is an indication that hctx->dispatch is not
  296. * empty and we must run again in order to avoid starving flushes.
  297. */
  298. if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
  299. if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
  300. blk_mq_run_hw_queue(hctx, true);
  301. }
  302. }
  303. bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
  304. unsigned int nr_segs)
  305. {
  306. struct elevator_queue *e = q->elevator;
  307. struct blk_mq_ctx *ctx;
  308. struct blk_mq_hw_ctx *hctx;
  309. bool ret = false;
  310. enum hctx_type type;
  311. if (e && e->type->ops.bio_merge)
  312. return e->type->ops.bio_merge(q, bio, nr_segs);
  313. ctx = blk_mq_get_ctx(q);
  314. hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
  315. type = hctx->type;
  316. if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
  317. list_empty_careful(&ctx->rq_lists[type]))
  318. return false;
  319. /* default per sw-queue merge */
  320. spin_lock(&ctx->lock);
  321. /*
  322. * Reverse check our software queue for entries that we could
  323. * potentially merge with. Currently includes a hand-wavy stop
  324. * count of 8, to not spend too much time checking for merges.
  325. */
  326. if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
  327. ctx->rq_merged++;
  328. ret = true;
  329. }
  330. spin_unlock(&ctx->lock);
  331. return ret;
  332. }
  333. bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
  334. {
  335. return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
  336. }
  337. EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
  338. void blk_mq_sched_request_inserted(struct request *rq)
  339. {
  340. trace_block_rq_insert(rq->q, rq);
  341. }
  342. EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
  343. static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
  344. bool has_sched,
  345. struct request *rq)
  346. {
  347. /*
  348. * dispatch flush and passthrough rq directly
  349. *
  350. * passthrough request has to be added to hctx->dispatch directly.
  351. * For some reason, device may be in one situation which can't
  352. * handle FS request, so STS_RESOURCE is always returned and the
  353. * FS request will be added to hctx->dispatch. However passthrough
  354. * request may be required at that time for fixing the problem. If
  355. * passthrough request is added to scheduler queue, there isn't any
  356. * chance to dispatch it given we prioritize requests in hctx->dispatch.
  357. */
  358. if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
  359. return true;
  360. if (has_sched)
  361. rq->rq_flags |= RQF_SORTED;
  362. return false;
  363. }
  364. void blk_mq_sched_insert_request(struct request *rq, bool at_head,
  365. bool run_queue, bool async)
  366. {
  367. struct request_queue *q = rq->q;
  368. struct elevator_queue *e = q->elevator;
  369. struct blk_mq_ctx *ctx = rq->mq_ctx;
  370. struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
  371. WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
  372. if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
  373. /*
  374. * Firstly normal IO request is inserted to scheduler queue or
  375. * sw queue, meantime we add flush request to dispatch queue(
  376. * hctx->dispatch) directly and there is at most one in-flight
  377. * flush request for each hw queue, so it doesn't matter to add
  378. * flush request to tail or front of the dispatch queue.
  379. *
  380. * Secondly in case of NCQ, flush request belongs to non-NCQ
  381. * command, and queueing it will fail when there is any
  382. * in-flight normal IO request(NCQ command). When adding flush
  383. * rq to the front of hctx->dispatch, it is easier to introduce
  384. * extra time to flush rq's latency because of S_SCHED_RESTART
  385. * compared with adding to the tail of dispatch queue, then
  386. * chance of flush merge is increased, and less flush requests
  387. * will be issued to controller. It is observed that ~10% time
  388. * is saved in blktests block/004 on disk attached to AHCI/NCQ
  389. * drive when adding flush rq to the front of hctx->dispatch.
  390. *
  391. * Simply queue flush rq to the front of hctx->dispatch so that
  392. * intensive flush workloads can benefit in case of NCQ HW.
  393. */
  394. at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
  395. blk_mq_request_bypass_insert(rq, at_head, false);
  396. goto run;
  397. }
  398. if (e && e->type->ops.insert_requests) {
  399. LIST_HEAD(list);
  400. list_add(&rq->queuelist, &list);
  401. e->type->ops.insert_requests(hctx, &list, at_head);
  402. } else {
  403. spin_lock(&ctx->lock);
  404. __blk_mq_insert_request(hctx, rq, at_head);
  405. spin_unlock(&ctx->lock);
  406. }
  407. run:
  408. if (run_queue)
  409. blk_mq_run_hw_queue(hctx, async);
  410. }
  411. void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
  412. struct blk_mq_ctx *ctx,
  413. struct list_head *list, bool run_queue_async)
  414. {
  415. struct elevator_queue *e;
  416. struct request_queue *q = hctx->queue;
  417. /*
  418. * blk_mq_sched_insert_requests() is called from flush plug
  419. * context only, and hold one usage counter to prevent queue
  420. * from being released.
  421. */
  422. percpu_ref_get(&q->q_usage_counter);
  423. e = hctx->queue->elevator;
  424. if (e && e->type->ops.insert_requests)
  425. e->type->ops.insert_requests(hctx, list, false);
  426. else {
  427. /*
  428. * try to issue requests directly if the hw queue isn't
  429. * busy in case of 'none' scheduler, and this way may save
  430. * us one extra enqueue & dequeue to sw queue.
  431. */
  432. if (!hctx->dispatch_busy && !e && !run_queue_async) {
  433. blk_mq_try_issue_list_directly(hctx, list);
  434. if (list_empty(list))
  435. goto out;
  436. }
  437. blk_mq_insert_requests(hctx, ctx, list);
  438. }
  439. blk_mq_run_hw_queue(hctx, run_queue_async);
  440. out:
  441. percpu_ref_put(&q->q_usage_counter);
  442. }
  443. static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
  444. struct blk_mq_hw_ctx *hctx,
  445. unsigned int hctx_idx)
  446. {
  447. unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
  448. if (hctx->sched_tags) {
  449. blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
  450. blk_mq_free_rq_map(hctx->sched_tags, flags);
  451. hctx->sched_tags = NULL;
  452. }
  453. }
  454. static int blk_mq_sched_alloc_tags(struct request_queue *q,
  455. struct blk_mq_hw_ctx *hctx,
  456. unsigned int hctx_idx)
  457. {
  458. struct blk_mq_tag_set *set = q->tag_set;
  459. /* Clear HCTX_SHARED so tags are init'ed */
  460. unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
  461. int ret;
  462. hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
  463. set->reserved_tags, flags);
  464. if (!hctx->sched_tags)
  465. return -ENOMEM;
  466. ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
  467. if (ret)
  468. blk_mq_sched_free_tags(set, hctx, hctx_idx);
  469. return ret;
  470. }
  471. /* called in queue's release handler, tagset has gone away */
  472. static void blk_mq_sched_tags_teardown(struct request_queue *q)
  473. {
  474. struct blk_mq_hw_ctx *hctx;
  475. int i;
  476. queue_for_each_hw_ctx(q, hctx, i) {
  477. /* Clear HCTX_SHARED so tags are freed */
  478. unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
  479. if (hctx->sched_tags) {
  480. blk_mq_free_rq_map(hctx->sched_tags, flags);
  481. hctx->sched_tags = NULL;
  482. }
  483. }
  484. }
  485. int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
  486. {
  487. struct blk_mq_hw_ctx *hctx;
  488. struct elevator_queue *eq;
  489. unsigned int i;
  490. int ret;
  491. if (!e) {
  492. q->elevator = NULL;
  493. q->nr_requests = q->tag_set->queue_depth;
  494. return 0;
  495. }
  496. /*
  497. * Default to double of smaller one between hw queue_depth and 128,
  498. * since we don't split into sync/async like the old code did.
  499. * Additionally, this is a per-hw queue depth.
  500. */
  501. q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
  502. BLKDEV_MAX_RQ);
  503. queue_for_each_hw_ctx(q, hctx, i) {
  504. ret = blk_mq_sched_alloc_tags(q, hctx, i);
  505. if (ret)
  506. goto err;
  507. }
  508. ret = e->ops.init_sched(q, e);
  509. if (ret)
  510. goto err;
  511. blk_mq_debugfs_register_sched(q);
  512. queue_for_each_hw_ctx(q, hctx, i) {
  513. if (e->ops.init_hctx) {
  514. ret = e->ops.init_hctx(hctx, i);
  515. if (ret) {
  516. eq = q->elevator;
  517. blk_mq_sched_free_requests(q);
  518. blk_mq_exit_sched(q, eq);
  519. kobject_put(&eq->kobj);
  520. return ret;
  521. }
  522. }
  523. blk_mq_debugfs_register_sched_hctx(q, hctx);
  524. }
  525. return 0;
  526. err:
  527. blk_mq_sched_free_requests(q);
  528. blk_mq_sched_tags_teardown(q);
  529. q->elevator = NULL;
  530. return ret;
  531. }
  532. /*
  533. * called in either blk_queue_cleanup or elevator_switch, tagset
  534. * is required for freeing requests
  535. */
  536. void blk_mq_sched_free_requests(struct request_queue *q)
  537. {
  538. struct blk_mq_hw_ctx *hctx;
  539. int i;
  540. queue_for_each_hw_ctx(q, hctx, i) {
  541. if (hctx->sched_tags)
  542. blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
  543. }
  544. }
  545. void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
  546. {
  547. struct blk_mq_hw_ctx *hctx;
  548. unsigned int i;
  549. queue_for_each_hw_ctx(q, hctx, i) {
  550. blk_mq_debugfs_unregister_sched_hctx(hctx);
  551. if (e->type->ops.exit_hctx && hctx->sched_data) {
  552. e->type->ops.exit_hctx(hctx, i);
  553. hctx->sched_data = NULL;
  554. }
  555. }
  556. blk_mq_debugfs_unregister_sched(q);
  557. if (e->type->ops.exit_sched)
  558. e->type->ops.exit_sched(e);
  559. blk_mq_sched_tags_teardown(q);
  560. q->elevator = NULL;
  561. }