blk-merge.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Functions related to segment and merge handling
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/module.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/scatterlist.h>
  10. #ifndef __GENKSYMS__
  11. #include <linux/blk-cgroup.h>
  12. #endif
  13. #include <trace/events/block.h>
  14. #include "blk.h"
  15. #include "blk-rq-qos.h"
  16. static inline bool bio_will_gap(struct request_queue *q,
  17. struct request *prev_rq, struct bio *prev, struct bio *next)
  18. {
  19. struct bio_vec pb, nb;
  20. if (!bio_has_data(prev) || !queue_virt_boundary(q))
  21. return false;
  22. /*
  23. * Don't merge if the 1st bio starts with non-zero offset, otherwise it
  24. * is quite difficult to respect the sg gap limit. We work hard to
  25. * merge a huge number of small single bios in case of mkfs.
  26. */
  27. if (prev_rq)
  28. bio_get_first_bvec(prev_rq->bio, &pb);
  29. else
  30. bio_get_first_bvec(prev, &pb);
  31. if (pb.bv_offset & queue_virt_boundary(q))
  32. return true;
  33. /*
  34. * We don't need to worry about the situation that the merged segment
  35. * ends in unaligned virt boundary:
  36. *
  37. * - if 'pb' ends aligned, the merged segment ends aligned
  38. * - if 'pb' ends unaligned, the next bio must include
  39. * one single bvec of 'nb', otherwise the 'nb' can't
  40. * merge with 'pb'
  41. */
  42. bio_get_last_bvec(prev, &pb);
  43. bio_get_first_bvec(next, &nb);
  44. if (biovec_phys_mergeable(q, &pb, &nb))
  45. return false;
  46. return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
  47. }
  48. static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
  49. {
  50. return bio_will_gap(req->q, req, req->biotail, bio);
  51. }
  52. static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
  53. {
  54. return bio_will_gap(req->q, NULL, bio, req->bio);
  55. }
  56. static struct bio *blk_bio_discard_split(struct request_queue *q,
  57. struct bio *bio,
  58. struct bio_set *bs,
  59. unsigned *nsegs)
  60. {
  61. unsigned int max_discard_sectors, granularity;
  62. int alignment;
  63. sector_t tmp;
  64. unsigned split_sectors;
  65. *nsegs = 1;
  66. /* Zero-sector (unknown) and one-sector granularities are the same. */
  67. granularity = max(q->limits.discard_granularity >> 9, 1U);
  68. max_discard_sectors = min(q->limits.max_discard_sectors,
  69. bio_allowed_max_sectors(q));
  70. max_discard_sectors -= max_discard_sectors % granularity;
  71. if (unlikely(!max_discard_sectors)) {
  72. /* XXX: warn */
  73. return NULL;
  74. }
  75. if (bio_sectors(bio) <= max_discard_sectors)
  76. return NULL;
  77. split_sectors = max_discard_sectors;
  78. /*
  79. * If the next starting sector would be misaligned, stop the discard at
  80. * the previous aligned sector.
  81. */
  82. alignment = (q->limits.discard_alignment >> 9) % granularity;
  83. tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
  84. tmp = sector_div(tmp, granularity);
  85. if (split_sectors > tmp)
  86. split_sectors -= tmp;
  87. return bio_split(bio, split_sectors, GFP_NOIO, bs);
  88. }
  89. static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
  90. struct bio *bio, struct bio_set *bs, unsigned *nsegs)
  91. {
  92. *nsegs = 0;
  93. if (!q->limits.max_write_zeroes_sectors)
  94. return NULL;
  95. if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
  96. return NULL;
  97. return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
  98. }
  99. static struct bio *blk_bio_write_same_split(struct request_queue *q,
  100. struct bio *bio,
  101. struct bio_set *bs,
  102. unsigned *nsegs)
  103. {
  104. *nsegs = 1;
  105. if (!q->limits.max_write_same_sectors)
  106. return NULL;
  107. if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
  108. return NULL;
  109. return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
  110. }
  111. /*
  112. * Return the maximum number of sectors from the start of a bio that may be
  113. * submitted as a single request to a block device. If enough sectors remain,
  114. * align the end to the physical block size. Otherwise align the end to the
  115. * logical block size. This approach minimizes the number of non-aligned
  116. * requests that are submitted to a block device if the start of a bio is not
  117. * aligned to a physical block boundary.
  118. */
  119. static inline unsigned get_max_io_size(struct request_queue *q,
  120. struct bio *bio)
  121. {
  122. unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
  123. unsigned max_sectors = sectors;
  124. unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
  125. unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
  126. unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
  127. max_sectors += start_offset;
  128. max_sectors &= ~(pbs - 1);
  129. if (max_sectors > start_offset)
  130. return max_sectors - start_offset;
  131. return sectors & ~(lbs - 1);
  132. }
  133. static inline unsigned get_max_segment_size(const struct request_queue *q,
  134. struct page *start_page,
  135. unsigned long offset)
  136. {
  137. unsigned long mask = queue_segment_boundary(q);
  138. offset = mask & (page_to_phys(start_page) + offset);
  139. /*
  140. * overflow may be triggered in case of zero page physical address
  141. * on 32bit arch, use queue's max segment size when that happens.
  142. */
  143. return min_not_zero(mask - offset + 1,
  144. (unsigned long)queue_max_segment_size(q));
  145. }
  146. /**
  147. * bvec_split_segs - verify whether or not a bvec should be split in the middle
  148. * @q: [in] request queue associated with the bio associated with @bv
  149. * @bv: [in] bvec to examine
  150. * @nsegs: [in,out] Number of segments in the bio being built. Incremented
  151. * by the number of segments from @bv that may be appended to that
  152. * bio without exceeding @max_segs
  153. * @sectors: [in,out] Number of sectors in the bio being built. Incremented
  154. * by the number of sectors from @bv that may be appended to that
  155. * bio without exceeding @max_sectors
  156. * @max_segs: [in] upper bound for *@nsegs
  157. * @max_sectors: [in] upper bound for *@sectors
  158. *
  159. * When splitting a bio, it can happen that a bvec is encountered that is too
  160. * big to fit in a single segment and hence that it has to be split in the
  161. * middle. This function verifies whether or not that should happen. The value
  162. * %true is returned if and only if appending the entire @bv to a bio with
  163. * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
  164. * the block driver.
  165. */
  166. static bool bvec_split_segs(const struct request_queue *q,
  167. const struct bio_vec *bv, unsigned *nsegs,
  168. unsigned *sectors, unsigned max_segs,
  169. unsigned max_sectors)
  170. {
  171. unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
  172. unsigned len = min(bv->bv_len, max_len);
  173. unsigned total_len = 0;
  174. unsigned seg_size = 0;
  175. while (len && *nsegs < max_segs) {
  176. seg_size = get_max_segment_size(q, bv->bv_page,
  177. bv->bv_offset + total_len);
  178. seg_size = min(seg_size, len);
  179. (*nsegs)++;
  180. total_len += seg_size;
  181. len -= seg_size;
  182. if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
  183. break;
  184. }
  185. *sectors += total_len >> 9;
  186. /* tell the caller to split the bvec if it is too big to fit */
  187. return len > 0 || bv->bv_len > max_len;
  188. }
  189. /**
  190. * blk_bio_segment_split - split a bio in two bios
  191. * @q: [in] request queue pointer
  192. * @bio: [in] bio to be split
  193. * @bs: [in] bio set to allocate the clone from
  194. * @segs: [out] number of segments in the bio with the first half of the sectors
  195. *
  196. * Clone @bio, update the bi_iter of the clone to represent the first sectors
  197. * of @bio and update @bio->bi_iter to represent the remaining sectors. The
  198. * following is guaranteed for the cloned bio:
  199. * - That it has at most get_max_io_size(@q, @bio) sectors.
  200. * - That it has at most queue_max_segments(@q) segments.
  201. *
  202. * Except for discard requests the cloned bio will point at the bi_io_vec of
  203. * the original bio. It is the responsibility of the caller to ensure that the
  204. * original bio is not freed before the cloned bio. The caller is also
  205. * responsible for ensuring that @bs is only destroyed after processing of the
  206. * split bio has finished.
  207. */
  208. static struct bio *blk_bio_segment_split(struct request_queue *q,
  209. struct bio *bio,
  210. struct bio_set *bs,
  211. unsigned *segs)
  212. {
  213. struct bio_vec bv, bvprv, *bvprvp = NULL;
  214. struct bvec_iter iter;
  215. unsigned nsegs = 0, sectors = 0;
  216. const unsigned max_sectors = get_max_io_size(q, bio);
  217. const unsigned max_segs = queue_max_segments(q);
  218. bio_for_each_bvec(bv, bio, iter) {
  219. /*
  220. * If the queue doesn't support SG gaps and adding this
  221. * offset would create a gap, disallow it.
  222. */
  223. if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
  224. goto split;
  225. if (nsegs < max_segs &&
  226. sectors + (bv.bv_len >> 9) <= max_sectors &&
  227. bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
  228. nsegs++;
  229. sectors += bv.bv_len >> 9;
  230. } else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
  231. max_sectors)) {
  232. goto split;
  233. }
  234. bvprv = bv;
  235. bvprvp = &bvprv;
  236. }
  237. *segs = nsegs;
  238. return NULL;
  239. split:
  240. *segs = nsegs;
  241. return bio_split(bio, sectors, GFP_NOIO, bs);
  242. }
  243. /**
  244. * __blk_queue_split - split a bio and submit the second half
  245. * @bio: [in, out] bio to be split
  246. * @nr_segs: [out] number of segments in the first bio
  247. *
  248. * Split a bio into two bios, chain the two bios, submit the second half and
  249. * store a pointer to the first half in *@bio. If the second bio is still too
  250. * big it will be split by a recursive call to this function. Since this
  251. * function may allocate a new bio from @bio->bi_disk->queue->bio_split, it is
  252. * the responsibility of the caller to ensure that
  253. * @bio->bi_disk->queue->bio_split is only released after processing of the
  254. * split bio has finished.
  255. */
  256. void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
  257. {
  258. struct request_queue *q = (*bio)->bi_disk->queue;
  259. struct bio *split = NULL;
  260. switch (bio_op(*bio)) {
  261. case REQ_OP_DISCARD:
  262. case REQ_OP_SECURE_ERASE:
  263. split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
  264. break;
  265. case REQ_OP_WRITE_ZEROES:
  266. split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
  267. nr_segs);
  268. break;
  269. case REQ_OP_WRITE_SAME:
  270. split = blk_bio_write_same_split(q, *bio, &q->bio_split,
  271. nr_segs);
  272. break;
  273. default:
  274. /*
  275. * All drivers must accept single-segments bios that are <=
  276. * PAGE_SIZE. This is a quick and dirty check that relies on
  277. * the fact that bi_io_vec[0] is always valid if a bio has data.
  278. * The check might lead to occasional false negatives when bios
  279. * are cloned, but compared to the performance impact of cloned
  280. * bios themselves the loop below doesn't matter anyway.
  281. */
  282. if (!q->limits.chunk_sectors &&
  283. (*bio)->bi_vcnt == 1 &&
  284. ((*bio)->bi_io_vec[0].bv_len +
  285. (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
  286. *nr_segs = 1;
  287. break;
  288. }
  289. split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
  290. break;
  291. }
  292. if (split) {
  293. /* there isn't chance to merge the splitted bio */
  294. split->bi_opf |= REQ_NOMERGE;
  295. bio_chain(split, *bio);
  296. trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
  297. submit_bio_noacct(*bio);
  298. *bio = split;
  299. blk_throtl_charge_bio_split(*bio);
  300. }
  301. }
  302. /**
  303. * blk_queue_split - split a bio and submit the second half
  304. * @bio: [in, out] bio to be split
  305. *
  306. * Split a bio into two bios, chains the two bios, submit the second half and
  307. * store a pointer to the first half in *@bio. Since this function may allocate
  308. * a new bio from @bio->bi_disk->queue->bio_split, it is the responsibility of
  309. * the caller to ensure that @bio->bi_disk->queue->bio_split is only released
  310. * after processing of the split bio has finished.
  311. */
  312. void blk_queue_split(struct bio **bio)
  313. {
  314. unsigned int nr_segs;
  315. __blk_queue_split(bio, &nr_segs);
  316. }
  317. EXPORT_SYMBOL(blk_queue_split);
  318. unsigned int blk_recalc_rq_segments(struct request *rq)
  319. {
  320. unsigned int nr_phys_segs = 0;
  321. unsigned int nr_sectors = 0;
  322. struct req_iterator iter;
  323. struct bio_vec bv;
  324. if (!rq->bio)
  325. return 0;
  326. switch (bio_op(rq->bio)) {
  327. case REQ_OP_DISCARD:
  328. case REQ_OP_SECURE_ERASE:
  329. if (queue_max_discard_segments(rq->q) > 1) {
  330. struct bio *bio = rq->bio;
  331. for_each_bio(bio)
  332. nr_phys_segs++;
  333. return nr_phys_segs;
  334. }
  335. return 1;
  336. case REQ_OP_WRITE_ZEROES:
  337. return 0;
  338. case REQ_OP_WRITE_SAME:
  339. return 1;
  340. }
  341. rq_for_each_bvec(bv, rq, iter)
  342. bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
  343. UINT_MAX, UINT_MAX);
  344. return nr_phys_segs;
  345. }
  346. static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
  347. struct scatterlist *sglist)
  348. {
  349. if (!*sg)
  350. return sglist;
  351. /*
  352. * If the driver previously mapped a shorter list, we could see a
  353. * termination bit prematurely unless it fully inits the sg table
  354. * on each mapping. We KNOW that there must be more entries here
  355. * or the driver would be buggy, so force clear the termination bit
  356. * to avoid doing a full sg_init_table() in drivers for each command.
  357. */
  358. sg_unmark_end(*sg);
  359. return sg_next(*sg);
  360. }
  361. static unsigned blk_bvec_map_sg(struct request_queue *q,
  362. struct bio_vec *bvec, struct scatterlist *sglist,
  363. struct scatterlist **sg)
  364. {
  365. unsigned nbytes = bvec->bv_len;
  366. unsigned nsegs = 0, total = 0;
  367. while (nbytes > 0) {
  368. unsigned offset = bvec->bv_offset + total;
  369. unsigned len = min(get_max_segment_size(q, bvec->bv_page,
  370. offset), nbytes);
  371. struct page *page = bvec->bv_page;
  372. /*
  373. * Unfortunately a fair number of drivers barf on scatterlists
  374. * that have an offset larger than PAGE_SIZE, despite other
  375. * subsystems dealing with that invariant just fine. For now
  376. * stick to the legacy format where we never present those from
  377. * the block layer, but the code below should be removed once
  378. * these offenders (mostly MMC/SD drivers) are fixed.
  379. */
  380. page += (offset >> PAGE_SHIFT);
  381. offset &= ~PAGE_MASK;
  382. *sg = blk_next_sg(sg, sglist);
  383. sg_set_page(*sg, page, len, offset);
  384. total += len;
  385. nbytes -= len;
  386. nsegs++;
  387. }
  388. return nsegs;
  389. }
  390. static inline int __blk_bvec_map_sg(struct bio_vec bv,
  391. struct scatterlist *sglist, struct scatterlist **sg)
  392. {
  393. *sg = blk_next_sg(sg, sglist);
  394. sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
  395. return 1;
  396. }
  397. /* only try to merge bvecs into one sg if they are from two bios */
  398. static inline bool
  399. __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
  400. struct bio_vec *bvprv, struct scatterlist **sg)
  401. {
  402. int nbytes = bvec->bv_len;
  403. if (!*sg)
  404. return false;
  405. if ((*sg)->length + nbytes > queue_max_segment_size(q))
  406. return false;
  407. if (!biovec_phys_mergeable(q, bvprv, bvec))
  408. return false;
  409. (*sg)->length += nbytes;
  410. return true;
  411. }
  412. static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  413. struct scatterlist *sglist,
  414. struct scatterlist **sg)
  415. {
  416. struct bio_vec bvec, bvprv = { NULL };
  417. struct bvec_iter iter;
  418. int nsegs = 0;
  419. bool new_bio = false;
  420. for_each_bio(bio) {
  421. bio_for_each_bvec(bvec, bio, iter) {
  422. /*
  423. * Only try to merge bvecs from two bios given we
  424. * have done bio internal merge when adding pages
  425. * to bio
  426. */
  427. if (new_bio &&
  428. __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
  429. goto next_bvec;
  430. if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
  431. nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
  432. else
  433. nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
  434. next_bvec:
  435. new_bio = false;
  436. }
  437. if (likely(bio->bi_iter.bi_size)) {
  438. bvprv = bvec;
  439. new_bio = true;
  440. }
  441. }
  442. return nsegs;
  443. }
  444. /*
  445. * map a request to scatterlist, return number of sg entries setup. Caller
  446. * must make sure sg can hold rq->nr_phys_segments entries
  447. */
  448. int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
  449. struct scatterlist *sglist, struct scatterlist **last_sg)
  450. {
  451. int nsegs = 0;
  452. if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
  453. nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
  454. else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
  455. nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
  456. else if (rq->bio)
  457. nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
  458. if (*last_sg)
  459. sg_mark_end(*last_sg);
  460. /*
  461. * Something must have been wrong if the figured number of
  462. * segment is bigger than number of req's physical segments
  463. */
  464. WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
  465. return nsegs;
  466. }
  467. EXPORT_SYMBOL(__blk_rq_map_sg);
  468. static inline unsigned int blk_rq_get_max_segments(struct request *rq)
  469. {
  470. if (req_op(rq) == REQ_OP_DISCARD)
  471. return queue_max_discard_segments(rq->q);
  472. return queue_max_segments(rq->q);
  473. }
  474. static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
  475. unsigned int nr_phys_segs)
  476. {
  477. if (!blk_cgroup_mergeable(req, bio))
  478. goto no_merge;
  479. if (blk_integrity_merge_bio(req->q, req, bio) == false)
  480. goto no_merge;
  481. /* discard request merge won't add new segment */
  482. if (req_op(req) == REQ_OP_DISCARD)
  483. return 1;
  484. if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
  485. goto no_merge;
  486. /*
  487. * This will form the start of a new hw segment. Bump both
  488. * counters.
  489. */
  490. req->nr_phys_segments += nr_phys_segs;
  491. return 1;
  492. no_merge:
  493. req_set_nomerge(req->q, req);
  494. return 0;
  495. }
  496. int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
  497. {
  498. if (req_gap_back_merge(req, bio))
  499. return 0;
  500. if (blk_integrity_rq(req) &&
  501. integrity_req_gap_back_merge(req, bio))
  502. return 0;
  503. if (!bio_crypt_ctx_back_mergeable(req, bio))
  504. return 0;
  505. if (blk_rq_sectors(req) + bio_sectors(bio) >
  506. blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
  507. req_set_nomerge(req->q, req);
  508. return 0;
  509. }
  510. return ll_new_hw_segment(req, bio, nr_segs);
  511. }
  512. static int ll_front_merge_fn(struct request *req, struct bio *bio,
  513. unsigned int nr_segs)
  514. {
  515. if (req_gap_front_merge(req, bio))
  516. return 0;
  517. if (blk_integrity_rq(req) &&
  518. integrity_req_gap_front_merge(req, bio))
  519. return 0;
  520. if (!bio_crypt_ctx_front_mergeable(req, bio))
  521. return 0;
  522. if (blk_rq_sectors(req) + bio_sectors(bio) >
  523. blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
  524. req_set_nomerge(req->q, req);
  525. return 0;
  526. }
  527. return ll_new_hw_segment(req, bio, nr_segs);
  528. }
  529. static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
  530. struct request *next)
  531. {
  532. unsigned short segments = blk_rq_nr_discard_segments(req);
  533. if (segments >= queue_max_discard_segments(q))
  534. goto no_merge;
  535. if (blk_rq_sectors(req) + bio_sectors(next->bio) >
  536. blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  537. goto no_merge;
  538. req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
  539. return true;
  540. no_merge:
  541. req_set_nomerge(q, req);
  542. return false;
  543. }
  544. static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  545. struct request *next)
  546. {
  547. int total_phys_segments;
  548. if (req_gap_back_merge(req, next->bio))
  549. return 0;
  550. /*
  551. * Will it become too large?
  552. */
  553. if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
  554. blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  555. return 0;
  556. total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
  557. if (total_phys_segments > blk_rq_get_max_segments(req))
  558. return 0;
  559. if (!blk_cgroup_mergeable(req, next->bio))
  560. return 0;
  561. if (blk_integrity_merge_rq(q, req, next) == false)
  562. return 0;
  563. if (!bio_crypt_ctx_merge_rq(req, next))
  564. return 0;
  565. /* Merge is OK... */
  566. req->nr_phys_segments = total_phys_segments;
  567. return 1;
  568. }
  569. /**
  570. * blk_rq_set_mixed_merge - mark a request as mixed merge
  571. * @rq: request to mark as mixed merge
  572. *
  573. * Description:
  574. * @rq is about to be mixed merged. Make sure the attributes
  575. * which can be mixed are set in each bio and mark @rq as mixed
  576. * merged.
  577. */
  578. void blk_rq_set_mixed_merge(struct request *rq)
  579. {
  580. unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  581. struct bio *bio;
  582. if (rq->rq_flags & RQF_MIXED_MERGE)
  583. return;
  584. /*
  585. * @rq will no longer represent mixable attributes for all the
  586. * contained bios. It will just track those of the first one.
  587. * Distributes the attributs to each bio.
  588. */
  589. for (bio = rq->bio; bio; bio = bio->bi_next) {
  590. WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
  591. (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
  592. bio->bi_opf |= ff;
  593. }
  594. rq->rq_flags |= RQF_MIXED_MERGE;
  595. }
  596. static void blk_account_io_merge_request(struct request *req)
  597. {
  598. if (blk_do_io_stat(req)) {
  599. part_stat_lock();
  600. part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
  601. part_stat_unlock();
  602. hd_struct_put(req->part);
  603. }
  604. }
  605. static enum elv_merge blk_try_req_merge(struct request *req,
  606. struct request *next)
  607. {
  608. if (blk_discard_mergable(req))
  609. return ELEVATOR_DISCARD_MERGE;
  610. else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
  611. return ELEVATOR_BACK_MERGE;
  612. return ELEVATOR_NO_MERGE;
  613. }
  614. /*
  615. * For non-mq, this has to be called with the request spinlock acquired.
  616. * For mq with scheduling, the appropriate queue wide lock should be held.
  617. */
  618. static struct request *attempt_merge(struct request_queue *q,
  619. struct request *req, struct request *next)
  620. {
  621. if (!rq_mergeable(req) || !rq_mergeable(next))
  622. return NULL;
  623. if (req_op(req) != req_op(next))
  624. return NULL;
  625. if (rq_data_dir(req) != rq_data_dir(next)
  626. || req->rq_disk != next->rq_disk)
  627. return NULL;
  628. if (req_op(req) == REQ_OP_WRITE_SAME &&
  629. !blk_write_same_mergeable(req->bio, next->bio))
  630. return NULL;
  631. /*
  632. * Don't allow merge of different write hints, or for a hint with
  633. * non-hint IO.
  634. */
  635. if (req->write_hint != next->write_hint)
  636. return NULL;
  637. if (req->ioprio != next->ioprio)
  638. return NULL;
  639. /*
  640. * If we are allowed to merge, then append bio list
  641. * from next to rq and release next. merge_requests_fn
  642. * will have updated segment counts, update sector
  643. * counts here. Handle DISCARDs separately, as they
  644. * have separate settings.
  645. */
  646. switch (blk_try_req_merge(req, next)) {
  647. case ELEVATOR_DISCARD_MERGE:
  648. if (!req_attempt_discard_merge(q, req, next))
  649. return NULL;
  650. break;
  651. case ELEVATOR_BACK_MERGE:
  652. if (!ll_merge_requests_fn(q, req, next))
  653. return NULL;
  654. break;
  655. default:
  656. return NULL;
  657. }
  658. /*
  659. * If failfast settings disagree or any of the two is already
  660. * a mixed merge, mark both as mixed before proceeding. This
  661. * makes sure that all involved bios have mixable attributes
  662. * set properly.
  663. */
  664. if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
  665. (req->cmd_flags & REQ_FAILFAST_MASK) !=
  666. (next->cmd_flags & REQ_FAILFAST_MASK)) {
  667. blk_rq_set_mixed_merge(req);
  668. blk_rq_set_mixed_merge(next);
  669. }
  670. /*
  671. * At this point we have either done a back merge or front merge. We
  672. * need the smaller start_time_ns of the merged requests to be the
  673. * current request for accounting purposes.
  674. */
  675. if (next->start_time_ns < req->start_time_ns)
  676. req->start_time_ns = next->start_time_ns;
  677. req->biotail->bi_next = next->bio;
  678. req->biotail = next->biotail;
  679. req->__data_len += blk_rq_bytes(next);
  680. if (!blk_discard_mergable(req))
  681. elv_merge_requests(q, req, next);
  682. /*
  683. * 'next' is going away, so update stats accordingly
  684. */
  685. blk_account_io_merge_request(next);
  686. trace_block_rq_merge(q, next);
  687. /*
  688. * ownership of bio passed from next to req, return 'next' for
  689. * the caller to free
  690. */
  691. next->bio = NULL;
  692. return next;
  693. }
  694. static struct request *attempt_back_merge(struct request_queue *q,
  695. struct request *rq)
  696. {
  697. struct request *next = elv_latter_request(q, rq);
  698. if (next)
  699. return attempt_merge(q, rq, next);
  700. return NULL;
  701. }
  702. static struct request *attempt_front_merge(struct request_queue *q,
  703. struct request *rq)
  704. {
  705. struct request *prev = elv_former_request(q, rq);
  706. if (prev)
  707. return attempt_merge(q, prev, rq);
  708. return NULL;
  709. }
  710. int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  711. struct request *next)
  712. {
  713. struct request *free;
  714. free = attempt_merge(q, rq, next);
  715. if (free) {
  716. blk_put_request(free);
  717. return 1;
  718. }
  719. return 0;
  720. }
  721. bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  722. {
  723. if (!rq_mergeable(rq) || !bio_mergeable(bio))
  724. return false;
  725. if (req_op(rq) != bio_op(bio))
  726. return false;
  727. /* different data direction or already started, don't merge */
  728. if (bio_data_dir(bio) != rq_data_dir(rq))
  729. return false;
  730. /* must be same device */
  731. if (rq->rq_disk != bio->bi_disk)
  732. return false;
  733. /* don't merge across cgroup boundaries */
  734. if (!blk_cgroup_mergeable(rq, bio))
  735. return false;
  736. /* only merge integrity protected bio into ditto rq */
  737. if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
  738. return false;
  739. /* Only merge if the crypt contexts are compatible */
  740. if (!bio_crypt_rq_ctx_compatible(rq, bio))
  741. return false;
  742. /* must be using the same buffer */
  743. if (req_op(rq) == REQ_OP_WRITE_SAME &&
  744. !blk_write_same_mergeable(rq->bio, bio))
  745. return false;
  746. /*
  747. * Don't allow merge of different write hints, or for a hint with
  748. * non-hint IO.
  749. */
  750. if (rq->write_hint != bio->bi_write_hint)
  751. return false;
  752. if (rq->ioprio != bio_prio(bio))
  753. return false;
  754. return true;
  755. }
  756. enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
  757. {
  758. if (blk_discard_mergable(rq))
  759. return ELEVATOR_DISCARD_MERGE;
  760. else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
  761. return ELEVATOR_BACK_MERGE;
  762. else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
  763. return ELEVATOR_FRONT_MERGE;
  764. return ELEVATOR_NO_MERGE;
  765. }
  766. static void blk_account_io_merge_bio(struct request *req)
  767. {
  768. if (!blk_do_io_stat(req))
  769. return;
  770. part_stat_lock();
  771. part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
  772. part_stat_unlock();
  773. }
  774. enum bio_merge_status {
  775. BIO_MERGE_OK,
  776. BIO_MERGE_NONE,
  777. BIO_MERGE_FAILED,
  778. };
  779. static enum bio_merge_status bio_attempt_back_merge(struct request *req,
  780. struct bio *bio, unsigned int nr_segs)
  781. {
  782. const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
  783. if (!ll_back_merge_fn(req, bio, nr_segs))
  784. return BIO_MERGE_FAILED;
  785. trace_block_bio_backmerge(req->q, req, bio);
  786. rq_qos_merge(req->q, req, bio);
  787. if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  788. blk_rq_set_mixed_merge(req);
  789. req->biotail->bi_next = bio;
  790. req->biotail = bio;
  791. req->__data_len += bio->bi_iter.bi_size;
  792. bio_crypt_free_ctx(bio);
  793. blk_account_io_merge_bio(req);
  794. return BIO_MERGE_OK;
  795. }
  796. static enum bio_merge_status bio_attempt_front_merge(struct request *req,
  797. struct bio *bio, unsigned int nr_segs)
  798. {
  799. const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
  800. if (!ll_front_merge_fn(req, bio, nr_segs))
  801. return BIO_MERGE_FAILED;
  802. trace_block_bio_frontmerge(req->q, req, bio);
  803. rq_qos_merge(req->q, req, bio);
  804. if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  805. blk_rq_set_mixed_merge(req);
  806. bio->bi_next = req->bio;
  807. req->bio = bio;
  808. req->__sector = bio->bi_iter.bi_sector;
  809. req->__data_len += bio->bi_iter.bi_size;
  810. bio_crypt_do_front_merge(req, bio);
  811. blk_account_io_merge_bio(req);
  812. return BIO_MERGE_OK;
  813. }
  814. static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
  815. struct request *req, struct bio *bio)
  816. {
  817. unsigned short segments = blk_rq_nr_discard_segments(req);
  818. if (segments >= queue_max_discard_segments(q))
  819. goto no_merge;
  820. if (blk_rq_sectors(req) + bio_sectors(bio) >
  821. blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  822. goto no_merge;
  823. rq_qos_merge(q, req, bio);
  824. req->biotail->bi_next = bio;
  825. req->biotail = bio;
  826. req->__data_len += bio->bi_iter.bi_size;
  827. req->nr_phys_segments = segments + 1;
  828. blk_account_io_merge_bio(req);
  829. return BIO_MERGE_OK;
  830. no_merge:
  831. req_set_nomerge(q, req);
  832. return BIO_MERGE_FAILED;
  833. }
  834. static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
  835. struct request *rq,
  836. struct bio *bio,
  837. unsigned int nr_segs,
  838. bool sched_allow_merge)
  839. {
  840. if (!blk_rq_merge_ok(rq, bio))
  841. return BIO_MERGE_NONE;
  842. switch (blk_try_merge(rq, bio)) {
  843. case ELEVATOR_BACK_MERGE:
  844. if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
  845. return bio_attempt_back_merge(rq, bio, nr_segs);
  846. break;
  847. case ELEVATOR_FRONT_MERGE:
  848. if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
  849. return bio_attempt_front_merge(rq, bio, nr_segs);
  850. break;
  851. case ELEVATOR_DISCARD_MERGE:
  852. return bio_attempt_discard_merge(q, rq, bio);
  853. default:
  854. return BIO_MERGE_NONE;
  855. }
  856. return BIO_MERGE_FAILED;
  857. }
  858. /**
  859. * blk_attempt_plug_merge - try to merge with %current's plugged list
  860. * @q: request_queue new bio is being queued at
  861. * @bio: new bio being queued
  862. * @nr_segs: number of segments in @bio
  863. * @same_queue_rq: pointer to &struct request that gets filled in when
  864. * another request associated with @q is found on the plug list
  865. * (optional, may be %NULL)
  866. *
  867. * Determine whether @bio being queued on @q can be merged with a request
  868. * on %current's plugged list. Returns %true if merge was successful,
  869. * otherwise %false.
  870. *
  871. * Plugging coalesces IOs from the same issuer for the same purpose without
  872. * going through @q->queue_lock. As such it's more of an issuing mechanism
  873. * than scheduling, and the request, while may have elvpriv data, is not
  874. * added on the elevator at this point. In addition, we don't have
  875. * reliable access to the elevator outside queue lock. Only check basic
  876. * merging parameters without querying the elevator.
  877. *
  878. * Caller must ensure !blk_queue_nomerges(q) beforehand.
  879. */
  880. bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
  881. unsigned int nr_segs, struct request **same_queue_rq)
  882. {
  883. struct blk_plug *plug;
  884. struct request *rq;
  885. struct list_head *plug_list;
  886. plug = blk_mq_plug(q, bio);
  887. if (!plug)
  888. return false;
  889. plug_list = &plug->mq_list;
  890. list_for_each_entry_reverse(rq, plug_list, queuelist) {
  891. if (rq->q == q && same_queue_rq) {
  892. /*
  893. * Only blk-mq multiple hardware queues case checks the
  894. * rq in the same queue, there should be only one such
  895. * rq in a queue
  896. **/
  897. *same_queue_rq = rq;
  898. }
  899. if (rq->q != q)
  900. continue;
  901. if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
  902. BIO_MERGE_OK)
  903. return true;
  904. }
  905. return false;
  906. }
  907. /*
  908. * Iterate list of requests and see if we can merge this bio with any
  909. * of them.
  910. */
  911. bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
  912. struct bio *bio, unsigned int nr_segs)
  913. {
  914. struct request *rq;
  915. int checked = 8;
  916. list_for_each_entry_reverse(rq, list, queuelist) {
  917. if (!checked--)
  918. break;
  919. switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
  920. case BIO_MERGE_NONE:
  921. continue;
  922. case BIO_MERGE_OK:
  923. return true;
  924. case BIO_MERGE_FAILED:
  925. return false;
  926. }
  927. }
  928. return false;
  929. }
  930. EXPORT_SYMBOL_GPL(blk_bio_list_merge);
  931. bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
  932. unsigned int nr_segs, struct request **merged_request)
  933. {
  934. struct request *rq;
  935. switch (elv_merge(q, &rq, bio)) {
  936. case ELEVATOR_BACK_MERGE:
  937. if (!blk_mq_sched_allow_merge(q, rq, bio))
  938. return false;
  939. if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
  940. return false;
  941. *merged_request = attempt_back_merge(q, rq);
  942. if (!*merged_request)
  943. elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
  944. return true;
  945. case ELEVATOR_FRONT_MERGE:
  946. if (!blk_mq_sched_allow_merge(q, rq, bio))
  947. return false;
  948. if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
  949. return false;
  950. *merged_request = attempt_front_merge(q, rq);
  951. if (!*merged_request)
  952. elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
  953. return true;
  954. case ELEVATOR_DISCARD_MERGE:
  955. return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
  956. default:
  957. return false;
  958. }
  959. }
  960. EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);