blk-mq-tag.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Tag allocation using scalable bitmaps. Uses active queue tracking to support
  4. * fairer distribution of tags between multiple submitters when a shared tag map
  5. * is used.
  6. *
  7. * Copyright (C) 2013-2014 Jens Axboe
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/blk-mq.h>
  12. #include <linux/delay.h>
  13. #include "blk.h"
  14. #include "blk-mq.h"
  15. #include "blk-mq-tag.h"
  16. /*
  17. * If a previously inactive queue goes active, bump the active user count.
  18. * We need to do this before try to allocate driver tag, then even if fail
  19. * to get tag when first time, the other shared-tag users could reserve
  20. * budget for it.
  21. */
  22. bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
  23. {
  24. if (blk_mq_is_sbitmap_shared(hctx->flags)) {
  25. struct request_queue *q = hctx->queue;
  26. struct blk_mq_tag_set *set = q->tag_set;
  27. if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
  28. !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
  29. atomic_inc(&set->active_queues_shared_sbitmap);
  30. } else {
  31. if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
  32. !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  33. atomic_inc(&hctx->tags->active_queues);
  34. }
  35. return true;
  36. }
  37. /*
  38. * Wakeup all potentially sleeping on tags
  39. */
  40. void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
  41. {
  42. sbitmap_queue_wake_all(tags->bitmap_tags);
  43. if (include_reserve)
  44. sbitmap_queue_wake_all(tags->breserved_tags);
  45. }
  46. /*
  47. * If a previously busy queue goes inactive, potential waiters could now
  48. * be allowed to queue. Wake them up and check.
  49. */
  50. void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
  51. {
  52. struct blk_mq_tags *tags = hctx->tags;
  53. struct request_queue *q = hctx->queue;
  54. struct blk_mq_tag_set *set = q->tag_set;
  55. if (blk_mq_is_sbitmap_shared(hctx->flags)) {
  56. if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
  57. &q->queue_flags))
  58. return;
  59. atomic_dec(&set->active_queues_shared_sbitmap);
  60. } else {
  61. if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  62. return;
  63. atomic_dec(&tags->active_queues);
  64. }
  65. blk_mq_tag_wakeup_all(tags, false);
  66. }
  67. static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
  68. struct sbitmap_queue *bt)
  69. {
  70. if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
  71. !hctx_may_queue(data->hctx, bt))
  72. return BLK_MQ_NO_TAG;
  73. if (data->shallow_depth)
  74. return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
  75. else
  76. return __sbitmap_queue_get(bt);
  77. }
  78. unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
  79. {
  80. struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
  81. struct sbitmap_queue *bt;
  82. struct sbq_wait_state *ws;
  83. DEFINE_SBQ_WAIT(wait);
  84. unsigned int tag_offset;
  85. int tag;
  86. if (data->flags & BLK_MQ_REQ_RESERVED) {
  87. if (unlikely(!tags->nr_reserved_tags)) {
  88. WARN_ON_ONCE(1);
  89. return BLK_MQ_NO_TAG;
  90. }
  91. bt = tags->breserved_tags;
  92. tag_offset = 0;
  93. } else {
  94. bt = tags->bitmap_tags;
  95. tag_offset = tags->nr_reserved_tags;
  96. }
  97. tag = __blk_mq_get_tag(data, bt);
  98. if (tag != BLK_MQ_NO_TAG)
  99. goto found_tag;
  100. if (data->flags & BLK_MQ_REQ_NOWAIT)
  101. return BLK_MQ_NO_TAG;
  102. ws = bt_wait_ptr(bt, data->hctx);
  103. do {
  104. struct sbitmap_queue *bt_prev;
  105. /*
  106. * We're out of tags on this hardware queue, kick any
  107. * pending IO submits before going to sleep waiting for
  108. * some to complete.
  109. */
  110. blk_mq_run_hw_queue(data->hctx, false);
  111. /*
  112. * Retry tag allocation after running the hardware queue,
  113. * as running the queue may also have found completions.
  114. */
  115. tag = __blk_mq_get_tag(data, bt);
  116. if (tag != BLK_MQ_NO_TAG)
  117. break;
  118. sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
  119. tag = __blk_mq_get_tag(data, bt);
  120. if (tag != BLK_MQ_NO_TAG)
  121. break;
  122. bt_prev = bt;
  123. io_schedule();
  124. sbitmap_finish_wait(bt, ws, &wait);
  125. data->ctx = blk_mq_get_ctx(data->q);
  126. data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
  127. data->ctx);
  128. tags = blk_mq_tags_from_data(data);
  129. if (data->flags & BLK_MQ_REQ_RESERVED)
  130. bt = tags->breserved_tags;
  131. else
  132. bt = tags->bitmap_tags;
  133. /*
  134. * If destination hw queue is changed, fake wake up on
  135. * previous queue for compensating the wake up miss, so
  136. * other allocations on previous queue won't be starved.
  137. */
  138. if (bt != bt_prev)
  139. sbitmap_queue_wake_up(bt_prev);
  140. ws = bt_wait_ptr(bt, data->hctx);
  141. } while (1);
  142. sbitmap_finish_wait(bt, ws, &wait);
  143. found_tag:
  144. /*
  145. * Give up this allocation if the hctx is inactive. The caller will
  146. * retry on an active hctx.
  147. */
  148. if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
  149. blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
  150. return BLK_MQ_NO_TAG;
  151. }
  152. return tag + tag_offset;
  153. }
  154. void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
  155. unsigned int tag)
  156. {
  157. if (!blk_mq_tag_is_reserved(tags, tag)) {
  158. const int real_tag = tag - tags->nr_reserved_tags;
  159. BUG_ON(real_tag >= tags->nr_tags);
  160. sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu);
  161. } else {
  162. BUG_ON(tag >= tags->nr_reserved_tags);
  163. sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu);
  164. }
  165. }
  166. struct bt_iter_data {
  167. struct blk_mq_hw_ctx *hctx;
  168. busy_iter_fn *fn;
  169. void *data;
  170. bool reserved;
  171. };
  172. static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
  173. unsigned int bitnr)
  174. {
  175. struct request *rq;
  176. unsigned long flags;
  177. spin_lock_irqsave(&tags->lock, flags);
  178. rq = tags->rqs[bitnr];
  179. if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
  180. rq = NULL;
  181. spin_unlock_irqrestore(&tags->lock, flags);
  182. return rq;
  183. }
  184. static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
  185. {
  186. struct bt_iter_data *iter_data = data;
  187. struct blk_mq_hw_ctx *hctx = iter_data->hctx;
  188. struct blk_mq_tags *tags = hctx->tags;
  189. bool reserved = iter_data->reserved;
  190. struct request *rq;
  191. bool ret = true;
  192. if (!reserved)
  193. bitnr += tags->nr_reserved_tags;
  194. /*
  195. * We can hit rq == NULL here, because the tagging functions
  196. * test and set the bit before assigning ->rqs[].
  197. */
  198. rq = blk_mq_find_and_get_req(tags, bitnr);
  199. if (!rq)
  200. return true;
  201. if (rq->q == hctx->queue && rq->mq_hctx == hctx)
  202. ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
  203. blk_mq_put_rq_ref(rq);
  204. return ret;
  205. }
  206. /**
  207. * bt_for_each - iterate over the requests associated with a hardware queue
  208. * @hctx: Hardware queue to examine.
  209. * @bt: sbitmap to examine. This is either the breserved_tags member
  210. * or the bitmap_tags member of struct blk_mq_tags.
  211. * @fn: Pointer to the function that will be called for each request
  212. * associated with @hctx that has been assigned a driver tag.
  213. * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
  214. * where rq is a pointer to a request. Return true to continue
  215. * iterating tags, false to stop.
  216. * @data: Will be passed as third argument to @fn.
  217. * @reserved: Indicates whether @bt is the breserved_tags member or the
  218. * bitmap_tags member of struct blk_mq_tags.
  219. */
  220. static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
  221. busy_iter_fn *fn, void *data, bool reserved)
  222. {
  223. struct bt_iter_data iter_data = {
  224. .hctx = hctx,
  225. .fn = fn,
  226. .data = data,
  227. .reserved = reserved,
  228. };
  229. sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
  230. }
  231. struct bt_tags_iter_data {
  232. struct blk_mq_tags *tags;
  233. busy_tag_iter_fn *fn;
  234. void *data;
  235. unsigned int flags;
  236. };
  237. #define BT_TAG_ITER_RESERVED (1 << 0)
  238. #define BT_TAG_ITER_STARTED (1 << 1)
  239. #define BT_TAG_ITER_STATIC_RQS (1 << 2)
  240. static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
  241. {
  242. struct bt_tags_iter_data *iter_data = data;
  243. struct blk_mq_tags *tags = iter_data->tags;
  244. bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
  245. struct request *rq;
  246. bool ret = true;
  247. bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
  248. if (!reserved)
  249. bitnr += tags->nr_reserved_tags;
  250. /*
  251. * We can hit rq == NULL here, because the tagging functions
  252. * test and set the bit before assigning ->rqs[].
  253. */
  254. if (iter_static_rqs)
  255. rq = tags->static_rqs[bitnr];
  256. else
  257. rq = blk_mq_find_and_get_req(tags, bitnr);
  258. if (!rq)
  259. return true;
  260. if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
  261. blk_mq_request_started(rq))
  262. ret = iter_data->fn(rq, iter_data->data, reserved);
  263. if (!iter_static_rqs)
  264. blk_mq_put_rq_ref(rq);
  265. return ret;
  266. }
  267. /**
  268. * bt_tags_for_each - iterate over the requests in a tag map
  269. * @tags: Tag map to iterate over.
  270. * @bt: sbitmap to examine. This is either the breserved_tags member
  271. * or the bitmap_tags member of struct blk_mq_tags.
  272. * @fn: Pointer to the function that will be called for each started
  273. * request. @fn will be called as follows: @fn(rq, @data,
  274. * @reserved) where rq is a pointer to a request. Return true
  275. * to continue iterating tags, false to stop.
  276. * @data: Will be passed as second argument to @fn.
  277. * @flags: BT_TAG_ITER_*
  278. */
  279. static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
  280. busy_tag_iter_fn *fn, void *data, unsigned int flags)
  281. {
  282. struct bt_tags_iter_data iter_data = {
  283. .tags = tags,
  284. .fn = fn,
  285. .data = data,
  286. .flags = flags,
  287. };
  288. if (tags->rqs)
  289. sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
  290. }
  291. static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
  292. busy_tag_iter_fn *fn, void *priv, unsigned int flags)
  293. {
  294. WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
  295. if (tags->nr_reserved_tags)
  296. bt_tags_for_each(tags, tags->breserved_tags, fn, priv,
  297. flags | BT_TAG_ITER_RESERVED);
  298. bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags);
  299. }
  300. /**
  301. * blk_mq_all_tag_iter - iterate over all requests in a tag map
  302. * @tags: Tag map to iterate over.
  303. * @fn: Pointer to the function that will be called for each
  304. * request. @fn will be called as follows: @fn(rq, @priv,
  305. * reserved) where rq is a pointer to a request. 'reserved'
  306. * indicates whether or not @rq is a reserved request. Return
  307. * true to continue iterating tags, false to stop.
  308. * @priv: Will be passed as second argument to @fn.
  309. *
  310. * Caller has to pass the tag map from which requests are allocated.
  311. */
  312. void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
  313. void *priv)
  314. {
  315. __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
  316. }
  317. /**
  318. * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
  319. * @tagset: Tag set to iterate over.
  320. * @fn: Pointer to the function that will be called for each started
  321. * request. @fn will be called as follows: @fn(rq, @priv,
  322. * reserved) where rq is a pointer to a request. 'reserved'
  323. * indicates whether or not @rq is a reserved request. Return
  324. * true to continue iterating tags, false to stop.
  325. * @priv: Will be passed as second argument to @fn.
  326. *
  327. * We grab one request reference before calling @fn and release it after
  328. * @fn returns.
  329. */
  330. void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
  331. busy_tag_iter_fn *fn, void *priv)
  332. {
  333. int i;
  334. for (i = 0; i < tagset->nr_hw_queues; i++) {
  335. if (tagset->tags && tagset->tags[i])
  336. __blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
  337. BT_TAG_ITER_STARTED);
  338. }
  339. }
  340. EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
  341. static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
  342. void *data, bool reserved)
  343. {
  344. unsigned *count = data;
  345. if (blk_mq_request_completed(rq))
  346. (*count)++;
  347. return true;
  348. }
  349. /**
  350. * blk_mq_tagset_wait_completed_request - wait until all completed req's
  351. * complete funtion is run
  352. * @tagset: Tag set to drain completed request
  353. *
  354. * Note: This function has to be run after all IO queues are shutdown
  355. */
  356. void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
  357. {
  358. while (true) {
  359. unsigned count = 0;
  360. blk_mq_tagset_busy_iter(tagset,
  361. blk_mq_tagset_count_completed_rqs, &count);
  362. if (!count)
  363. break;
  364. msleep(5);
  365. }
  366. }
  367. EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
  368. /**
  369. * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
  370. * @q: Request queue to examine.
  371. * @fn: Pointer to the function that will be called for each request
  372. * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
  373. * reserved) where rq is a pointer to a request and hctx points
  374. * to the hardware queue associated with the request. 'reserved'
  375. * indicates whether or not @rq is a reserved request.
  376. * @priv: Will be passed as third argument to @fn.
  377. *
  378. * Note: if @q->tag_set is shared with other request queues then @fn will be
  379. * called for all requests on all queues that share that tag set and not only
  380. * for requests associated with @q.
  381. */
  382. void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
  383. void *priv)
  384. {
  385. struct blk_mq_hw_ctx *hctx;
  386. int i;
  387. /*
  388. * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
  389. * while the queue is frozen. So we can use q_usage_counter to avoid
  390. * racing with it.
  391. */
  392. if (!percpu_ref_tryget(&q->q_usage_counter))
  393. return;
  394. queue_for_each_hw_ctx(q, hctx, i) {
  395. struct blk_mq_tags *tags = hctx->tags;
  396. /*
  397. * If no software queues are currently mapped to this
  398. * hardware queue, there's nothing to check
  399. */
  400. if (!blk_mq_hw_queue_mapped(hctx))
  401. continue;
  402. if (tags->nr_reserved_tags)
  403. bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
  404. bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
  405. }
  406. blk_queue_exit(q);
  407. }
  408. static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
  409. bool round_robin, int node)
  410. {
  411. return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
  412. node);
  413. }
  414. static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
  415. int node, int alloc_policy)
  416. {
  417. unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
  418. bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
  419. if (bt_alloc(&tags->__bitmap_tags, depth, round_robin, node))
  420. return -ENOMEM;
  421. if (bt_alloc(&tags->__breserved_tags, tags->nr_reserved_tags,
  422. round_robin, node))
  423. goto free_bitmap_tags;
  424. tags->bitmap_tags = &tags->__bitmap_tags;
  425. tags->breserved_tags = &tags->__breserved_tags;
  426. return 0;
  427. free_bitmap_tags:
  428. sbitmap_queue_free(&tags->__bitmap_tags);
  429. return -ENOMEM;
  430. }
  431. int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags)
  432. {
  433. unsigned int depth = set->queue_depth - set->reserved_tags;
  434. int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
  435. bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
  436. int i, node = set->numa_node;
  437. if (bt_alloc(&set->__bitmap_tags, depth, round_robin, node))
  438. return -ENOMEM;
  439. if (bt_alloc(&set->__breserved_tags, set->reserved_tags,
  440. round_robin, node))
  441. goto free_bitmap_tags;
  442. for (i = 0; i < set->nr_hw_queues; i++) {
  443. struct blk_mq_tags *tags = set->tags[i];
  444. tags->bitmap_tags = &set->__bitmap_tags;
  445. tags->breserved_tags = &set->__breserved_tags;
  446. }
  447. return 0;
  448. free_bitmap_tags:
  449. sbitmap_queue_free(&set->__bitmap_tags);
  450. return -ENOMEM;
  451. }
  452. void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set)
  453. {
  454. sbitmap_queue_free(&set->__bitmap_tags);
  455. sbitmap_queue_free(&set->__breserved_tags);
  456. }
  457. struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
  458. unsigned int reserved_tags,
  459. int node, unsigned int flags)
  460. {
  461. int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags);
  462. struct blk_mq_tags *tags;
  463. if (total_tags > BLK_MQ_TAG_MAX) {
  464. pr_err("blk-mq: tag depth too large\n");
  465. return NULL;
  466. }
  467. tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
  468. if (!tags)
  469. return NULL;
  470. tags->nr_tags = total_tags;
  471. tags->nr_reserved_tags = reserved_tags;
  472. spin_lock_init(&tags->lock);
  473. if (flags & BLK_MQ_F_TAG_HCTX_SHARED)
  474. return tags;
  475. if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) {
  476. kfree(tags);
  477. return NULL;
  478. }
  479. return tags;
  480. }
  481. void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags)
  482. {
  483. if (!(flags & BLK_MQ_F_TAG_HCTX_SHARED)) {
  484. sbitmap_queue_free(tags->bitmap_tags);
  485. sbitmap_queue_free(tags->breserved_tags);
  486. }
  487. kfree(tags);
  488. }
  489. int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
  490. struct blk_mq_tags **tagsptr, unsigned int tdepth,
  491. bool can_grow)
  492. {
  493. struct blk_mq_tags *tags = *tagsptr;
  494. if (tdepth <= tags->nr_reserved_tags)
  495. return -EINVAL;
  496. /*
  497. * If we are allowed to grow beyond the original size, allocate
  498. * a new set of tags before freeing the old one.
  499. */
  500. if (tdepth > tags->nr_tags) {
  501. struct blk_mq_tag_set *set = hctx->queue->tag_set;
  502. /* Only sched tags can grow, so clear HCTX_SHARED flag */
  503. unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
  504. struct blk_mq_tags *new;
  505. bool ret;
  506. if (!can_grow)
  507. return -EINVAL;
  508. /*
  509. * We need some sort of upper limit, set it high enough that
  510. * no valid use cases should require more.
  511. */
  512. if (tdepth > 16 * BLKDEV_MAX_RQ)
  513. return -EINVAL;
  514. new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
  515. tags->nr_reserved_tags, flags);
  516. if (!new)
  517. return -ENOMEM;
  518. ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
  519. if (ret) {
  520. blk_mq_free_rq_map(new, flags);
  521. return -ENOMEM;
  522. }
  523. blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
  524. blk_mq_free_rq_map(*tagsptr, flags);
  525. *tagsptr = new;
  526. } else {
  527. /*
  528. * Don't need (or can't) update reserved tags here, they
  529. * remain static and should never need resizing.
  530. */
  531. sbitmap_queue_resize(tags->bitmap_tags,
  532. tdepth - tags->nr_reserved_tags);
  533. }
  534. return 0;
  535. }
  536. void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size)
  537. {
  538. sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags);
  539. }
  540. /**
  541. * blk_mq_unique_tag() - return a tag that is unique queue-wide
  542. * @rq: request for which to compute a unique tag
  543. *
  544. * The tag field in struct request is unique per hardware queue but not over
  545. * all hardware queues. Hence this function that returns a tag with the
  546. * hardware context index in the upper bits and the per hardware queue tag in
  547. * the lower bits.
  548. *
  549. * Note: When called for a request that is queued on a non-multiqueue request
  550. * queue, the hardware context index is set to zero.
  551. */
  552. u32 blk_mq_unique_tag(struct request *rq)
  553. {
  554. return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
  555. (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
  556. }
  557. EXPORT_SYMBOL(blk_mq_unique_tag);