bio.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/swap.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/uio.h>
  10. #include <linux/iocontext.h>
  11. #include <linux/slab.h>
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/export.h>
  15. #include <linux/mempool.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/cgroup.h>
  18. #include <linux/blk-cgroup.h>
  19. #include <linux/highmem.h>
  20. #include <linux/sched/sysctl.h>
  21. #include <linux/blk-crypto.h>
  22. #include <trace/events/block.h>
  23. #include "blk.h"
  24. #include "blk-rq-qos.h"
  25. /*
  26. * Test patch to inline a certain number of bi_io_vec's inside the bio
  27. * itself, to shrink a bio data allocation from two mempool calls to one
  28. */
  29. #define BIO_INLINE_VECS 4
  30. /*
  31. * if you change this list, also change bvec_alloc or things will
  32. * break badly! cannot be bigger than what you can fit into an
  33. * unsigned short
  34. */
  35. #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
  36. static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
  37. BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
  38. };
  39. #undef BV
  40. /*
  41. * fs_bio_set is the bio_set containing bio and iovec memory pools used by
  42. * IO code that does not need private memory pools.
  43. */
  44. struct bio_set fs_bio_set;
  45. EXPORT_SYMBOL(fs_bio_set);
  46. /*
  47. * Our slab pool management
  48. */
  49. struct bio_slab {
  50. struct kmem_cache *slab;
  51. unsigned int slab_ref;
  52. unsigned int slab_size;
  53. char name[8];
  54. };
  55. static DEFINE_MUTEX(bio_slab_lock);
  56. static struct bio_slab *bio_slabs;
  57. static unsigned int bio_slab_nr, bio_slab_max;
  58. static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
  59. {
  60. unsigned int sz = sizeof(struct bio) + extra_size;
  61. struct kmem_cache *slab = NULL;
  62. struct bio_slab *bslab, *new_bio_slabs;
  63. unsigned int new_bio_slab_max;
  64. unsigned int i, entry = -1;
  65. mutex_lock(&bio_slab_lock);
  66. i = 0;
  67. while (i < bio_slab_nr) {
  68. bslab = &bio_slabs[i];
  69. if (!bslab->slab && entry == -1)
  70. entry = i;
  71. else if (bslab->slab_size == sz) {
  72. slab = bslab->slab;
  73. bslab->slab_ref++;
  74. break;
  75. }
  76. i++;
  77. }
  78. if (slab)
  79. goto out_unlock;
  80. if (bio_slab_nr == bio_slab_max && entry == -1) {
  81. new_bio_slab_max = bio_slab_max << 1;
  82. new_bio_slabs = krealloc(bio_slabs,
  83. new_bio_slab_max * sizeof(struct bio_slab),
  84. GFP_KERNEL);
  85. if (!new_bio_slabs)
  86. goto out_unlock;
  87. bio_slab_max = new_bio_slab_max;
  88. bio_slabs = new_bio_slabs;
  89. }
  90. if (entry == -1)
  91. entry = bio_slab_nr++;
  92. bslab = &bio_slabs[entry];
  93. snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
  94. slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
  95. SLAB_HWCACHE_ALIGN, NULL);
  96. if (!slab)
  97. goto out_unlock;
  98. bslab->slab = slab;
  99. bslab->slab_ref = 1;
  100. bslab->slab_size = sz;
  101. out_unlock:
  102. mutex_unlock(&bio_slab_lock);
  103. return slab;
  104. }
  105. static void bio_put_slab(struct bio_set *bs)
  106. {
  107. struct bio_slab *bslab = NULL;
  108. unsigned int i;
  109. mutex_lock(&bio_slab_lock);
  110. for (i = 0; i < bio_slab_nr; i++) {
  111. if (bs->bio_slab == bio_slabs[i].slab) {
  112. bslab = &bio_slabs[i];
  113. break;
  114. }
  115. }
  116. if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
  117. goto out;
  118. WARN_ON(!bslab->slab_ref);
  119. if (--bslab->slab_ref)
  120. goto out;
  121. kmem_cache_destroy(bslab->slab);
  122. bslab->slab = NULL;
  123. out:
  124. mutex_unlock(&bio_slab_lock);
  125. }
  126. unsigned int bvec_nr_vecs(unsigned short idx)
  127. {
  128. return bvec_slabs[--idx].nr_vecs;
  129. }
  130. void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
  131. {
  132. if (!idx)
  133. return;
  134. idx--;
  135. BIO_BUG_ON(idx >= BVEC_POOL_NR);
  136. if (idx == BVEC_POOL_MAX) {
  137. mempool_free(bv, pool);
  138. } else {
  139. struct biovec_slab *bvs = bvec_slabs + idx;
  140. kmem_cache_free(bvs->slab, bv);
  141. }
  142. }
  143. struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
  144. mempool_t *pool)
  145. {
  146. struct bio_vec *bvl;
  147. /*
  148. * see comment near bvec_array define!
  149. */
  150. switch (nr) {
  151. case 1:
  152. *idx = 0;
  153. break;
  154. case 2 ... 4:
  155. *idx = 1;
  156. break;
  157. case 5 ... 16:
  158. *idx = 2;
  159. break;
  160. case 17 ... 64:
  161. *idx = 3;
  162. break;
  163. case 65 ... 128:
  164. *idx = 4;
  165. break;
  166. case 129 ... BIO_MAX_PAGES:
  167. *idx = 5;
  168. break;
  169. default:
  170. return NULL;
  171. }
  172. /*
  173. * idx now points to the pool we want to allocate from. only the
  174. * 1-vec entry pool is mempool backed.
  175. */
  176. if (*idx == BVEC_POOL_MAX) {
  177. fallback:
  178. bvl = mempool_alloc(pool, gfp_mask);
  179. } else {
  180. struct biovec_slab *bvs = bvec_slabs + *idx;
  181. gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
  182. /*
  183. * Make this allocation restricted and don't dump info on
  184. * allocation failures, since we'll fallback to the mempool
  185. * in case of failure.
  186. */
  187. __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
  188. /*
  189. * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
  190. * is set, retry with the 1-entry mempool
  191. */
  192. bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
  193. if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
  194. *idx = BVEC_POOL_MAX;
  195. goto fallback;
  196. }
  197. }
  198. (*idx)++;
  199. return bvl;
  200. }
  201. void bio_uninit(struct bio *bio)
  202. {
  203. #ifdef CONFIG_BLK_CGROUP
  204. if (bio->bi_blkg) {
  205. blkg_put(bio->bi_blkg);
  206. bio->bi_blkg = NULL;
  207. }
  208. #endif
  209. if (bio_integrity(bio))
  210. bio_integrity_free(bio);
  211. bio_crypt_free_ctx(bio);
  212. }
  213. EXPORT_SYMBOL(bio_uninit);
  214. static void bio_free(struct bio *bio)
  215. {
  216. struct bio_set *bs = bio->bi_pool;
  217. void *p;
  218. bio_uninit(bio);
  219. if (bs) {
  220. bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
  221. /*
  222. * If we have front padding, adjust the bio pointer before freeing
  223. */
  224. p = bio;
  225. p -= bs->front_pad;
  226. mempool_free(p, &bs->bio_pool);
  227. } else {
  228. /* Bio was allocated by bio_kmalloc() */
  229. kfree(bio);
  230. }
  231. }
  232. /*
  233. * Users of this function have their own bio allocation. Subsequently,
  234. * they must remember to pair any call to bio_init() with bio_uninit()
  235. * when IO has completed, or when the bio is released.
  236. */
  237. void bio_init(struct bio *bio, struct bio_vec *table,
  238. unsigned short max_vecs)
  239. {
  240. memset(bio, 0, sizeof(*bio));
  241. atomic_set(&bio->__bi_remaining, 1);
  242. atomic_set(&bio->__bi_cnt, 1);
  243. bio->bi_io_vec = table;
  244. bio->bi_max_vecs = max_vecs;
  245. }
  246. EXPORT_SYMBOL(bio_init);
  247. /**
  248. * bio_reset - reinitialize a bio
  249. * @bio: bio to reset
  250. *
  251. * Description:
  252. * After calling bio_reset(), @bio will be in the same state as a freshly
  253. * allocated bio returned bio bio_alloc_bioset() - the only fields that are
  254. * preserved are the ones that are initialized by bio_alloc_bioset(). See
  255. * comment in struct bio.
  256. */
  257. void bio_reset(struct bio *bio)
  258. {
  259. unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
  260. bio_uninit(bio);
  261. memset(bio, 0, BIO_RESET_BYTES);
  262. bio->bi_flags = flags;
  263. atomic_set(&bio->__bi_remaining, 1);
  264. }
  265. EXPORT_SYMBOL(bio_reset);
  266. static struct bio *__bio_chain_endio(struct bio *bio)
  267. {
  268. struct bio *parent = bio->bi_private;
  269. if (bio->bi_status && !parent->bi_status)
  270. parent->bi_status = bio->bi_status;
  271. bio_put(bio);
  272. return parent;
  273. }
  274. static void bio_chain_endio(struct bio *bio)
  275. {
  276. bio_endio(__bio_chain_endio(bio));
  277. }
  278. /**
  279. * bio_chain - chain bio completions
  280. * @bio: the target bio
  281. * @parent: the parent bio of @bio
  282. *
  283. * The caller won't have a bi_end_io called when @bio completes - instead,
  284. * @parent's bi_end_io won't be called until both @parent and @bio have
  285. * completed; the chained bio will also be freed when it completes.
  286. *
  287. * The caller must not set bi_private or bi_end_io in @bio.
  288. */
  289. void bio_chain(struct bio *bio, struct bio *parent)
  290. {
  291. BUG_ON(bio->bi_private || bio->bi_end_io);
  292. bio->bi_private = parent;
  293. bio->bi_end_io = bio_chain_endio;
  294. bio_inc_remaining(parent);
  295. }
  296. EXPORT_SYMBOL(bio_chain);
  297. static void bio_alloc_rescue(struct work_struct *work)
  298. {
  299. struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
  300. struct bio *bio;
  301. while (1) {
  302. spin_lock(&bs->rescue_lock);
  303. bio = bio_list_pop(&bs->rescue_list);
  304. spin_unlock(&bs->rescue_lock);
  305. if (!bio)
  306. break;
  307. submit_bio_noacct(bio);
  308. }
  309. }
  310. static void punt_bios_to_rescuer(struct bio_set *bs)
  311. {
  312. struct bio_list punt, nopunt;
  313. struct bio *bio;
  314. if (WARN_ON_ONCE(!bs->rescue_workqueue))
  315. return;
  316. /*
  317. * In order to guarantee forward progress we must punt only bios that
  318. * were allocated from this bio_set; otherwise, if there was a bio on
  319. * there for a stacking driver higher up in the stack, processing it
  320. * could require allocating bios from this bio_set, and doing that from
  321. * our own rescuer would be bad.
  322. *
  323. * Since bio lists are singly linked, pop them all instead of trying to
  324. * remove from the middle of the list:
  325. */
  326. bio_list_init(&punt);
  327. bio_list_init(&nopunt);
  328. while ((bio = bio_list_pop(&current->bio_list[0])))
  329. bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
  330. current->bio_list[0] = nopunt;
  331. bio_list_init(&nopunt);
  332. while ((bio = bio_list_pop(&current->bio_list[1])))
  333. bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
  334. current->bio_list[1] = nopunt;
  335. spin_lock(&bs->rescue_lock);
  336. bio_list_merge(&bs->rescue_list, &punt);
  337. spin_unlock(&bs->rescue_lock);
  338. queue_work(bs->rescue_workqueue, &bs->rescue_work);
  339. }
  340. /**
  341. * bio_alloc_bioset - allocate a bio for I/O
  342. * @gfp_mask: the GFP_* mask given to the slab allocator
  343. * @nr_iovecs: number of iovecs to pre-allocate
  344. * @bs: the bio_set to allocate from.
  345. *
  346. * Description:
  347. * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
  348. * backed by the @bs's mempool.
  349. *
  350. * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
  351. * always be able to allocate a bio. This is due to the mempool guarantees.
  352. * To make this work, callers must never allocate more than 1 bio at a time
  353. * from this pool. Callers that need to allocate more than 1 bio must always
  354. * submit the previously allocated bio for IO before attempting to allocate
  355. * a new one. Failure to do so can cause deadlocks under memory pressure.
  356. *
  357. * Note that when running under submit_bio_noacct() (i.e. any block
  358. * driver), bios are not submitted until after you return - see the code in
  359. * submit_bio_noacct() that converts recursion into iteration, to prevent
  360. * stack overflows.
  361. *
  362. * This would normally mean allocating multiple bios under
  363. * submit_bio_noacct() would be susceptible to deadlocks, but we have
  364. * deadlock avoidance code that resubmits any blocked bios from a rescuer
  365. * thread.
  366. *
  367. * However, we do not guarantee forward progress for allocations from other
  368. * mempools. Doing multiple allocations from the same mempool under
  369. * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
  370. * for per bio allocations.
  371. *
  372. * RETURNS:
  373. * Pointer to new bio on success, NULL on failure.
  374. */
  375. struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
  376. struct bio_set *bs)
  377. {
  378. gfp_t saved_gfp = gfp_mask;
  379. unsigned front_pad;
  380. unsigned inline_vecs;
  381. struct bio_vec *bvl = NULL;
  382. struct bio *bio;
  383. void *p;
  384. if (!bs) {
  385. if (nr_iovecs > UIO_MAXIOV)
  386. return NULL;
  387. p = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
  388. front_pad = 0;
  389. inline_vecs = nr_iovecs;
  390. } else {
  391. /* should not use nobvec bioset for nr_iovecs > 0 */
  392. if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
  393. nr_iovecs > 0))
  394. return NULL;
  395. /*
  396. * submit_bio_noacct() converts recursion to iteration; this
  397. * means if we're running beneath it, any bios we allocate and
  398. * submit will not be submitted (and thus freed) until after we
  399. * return.
  400. *
  401. * This exposes us to a potential deadlock if we allocate
  402. * multiple bios from the same bio_set() while running
  403. * underneath submit_bio_noacct(). If we were to allocate
  404. * multiple bios (say a stacking block driver that was splitting
  405. * bios), we would deadlock if we exhausted the mempool's
  406. * reserve.
  407. *
  408. * We solve this, and guarantee forward progress, with a rescuer
  409. * workqueue per bio_set. If we go to allocate and there are
  410. * bios on current->bio_list, we first try the allocation
  411. * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
  412. * bios we would be blocking to the rescuer workqueue before
  413. * we retry with the original gfp_flags.
  414. */
  415. if (current->bio_list &&
  416. (!bio_list_empty(&current->bio_list[0]) ||
  417. !bio_list_empty(&current->bio_list[1])) &&
  418. bs->rescue_workqueue)
  419. gfp_mask &= ~__GFP_DIRECT_RECLAIM;
  420. p = mempool_alloc(&bs->bio_pool, gfp_mask);
  421. if (!p && gfp_mask != saved_gfp) {
  422. punt_bios_to_rescuer(bs);
  423. gfp_mask = saved_gfp;
  424. p = mempool_alloc(&bs->bio_pool, gfp_mask);
  425. }
  426. front_pad = bs->front_pad;
  427. inline_vecs = BIO_INLINE_VECS;
  428. }
  429. if (unlikely(!p))
  430. return NULL;
  431. bio = p + front_pad;
  432. bio_init(bio, NULL, 0);
  433. if (nr_iovecs > inline_vecs) {
  434. unsigned long idx = 0;
  435. bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
  436. if (!bvl && gfp_mask != saved_gfp) {
  437. punt_bios_to_rescuer(bs);
  438. gfp_mask = saved_gfp;
  439. bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
  440. }
  441. if (unlikely(!bvl))
  442. goto err_free;
  443. bio->bi_flags |= idx << BVEC_POOL_OFFSET;
  444. } else if (nr_iovecs) {
  445. bvl = bio->bi_inline_vecs;
  446. }
  447. bio->bi_pool = bs;
  448. bio->bi_max_vecs = nr_iovecs;
  449. bio->bi_io_vec = bvl;
  450. return bio;
  451. err_free:
  452. mempool_free(p, &bs->bio_pool);
  453. return NULL;
  454. }
  455. EXPORT_SYMBOL(bio_alloc_bioset);
  456. void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
  457. {
  458. unsigned long flags;
  459. struct bio_vec bv;
  460. struct bvec_iter iter;
  461. __bio_for_each_segment(bv, bio, iter, start) {
  462. char *data = bvec_kmap_irq(&bv, &flags);
  463. memset(data, 0, bv.bv_len);
  464. flush_dcache_page(bv.bv_page);
  465. bvec_kunmap_irq(data, &flags);
  466. }
  467. }
  468. EXPORT_SYMBOL(zero_fill_bio_iter);
  469. /**
  470. * bio_truncate - truncate the bio to small size of @new_size
  471. * @bio: the bio to be truncated
  472. * @new_size: new size for truncating the bio
  473. *
  474. * Description:
  475. * Truncate the bio to new size of @new_size. If bio_op(bio) is
  476. * REQ_OP_READ, zero the truncated part. This function should only
  477. * be used for handling corner cases, such as bio eod.
  478. */
  479. void bio_truncate(struct bio *bio, unsigned new_size)
  480. {
  481. struct bio_vec bv;
  482. struct bvec_iter iter;
  483. unsigned int done = 0;
  484. bool truncated = false;
  485. if (new_size >= bio->bi_iter.bi_size)
  486. return;
  487. if (bio_op(bio) != REQ_OP_READ)
  488. goto exit;
  489. bio_for_each_segment(bv, bio, iter) {
  490. if (done + bv.bv_len > new_size) {
  491. unsigned offset;
  492. if (!truncated)
  493. offset = new_size - done;
  494. else
  495. offset = 0;
  496. zero_user(bv.bv_page, bv.bv_offset + offset,
  497. bv.bv_len - offset);
  498. truncated = true;
  499. }
  500. done += bv.bv_len;
  501. }
  502. exit:
  503. /*
  504. * Don't touch bvec table here and make it really immutable, since
  505. * fs bio user has to retrieve all pages via bio_for_each_segment_all
  506. * in its .end_bio() callback.
  507. *
  508. * It is enough to truncate bio by updating .bi_size since we can make
  509. * correct bvec with the updated .bi_size for drivers.
  510. */
  511. bio->bi_iter.bi_size = new_size;
  512. }
  513. /**
  514. * guard_bio_eod - truncate a BIO to fit the block device
  515. * @bio: bio to truncate
  516. *
  517. * This allows us to do IO even on the odd last sectors of a device, even if the
  518. * block size is some multiple of the physical sector size.
  519. *
  520. * We'll just truncate the bio to the size of the device, and clear the end of
  521. * the buffer head manually. Truly out-of-range accesses will turn into actual
  522. * I/O errors, this only handles the "we need to be able to do I/O at the final
  523. * sector" case.
  524. */
  525. void guard_bio_eod(struct bio *bio)
  526. {
  527. sector_t maxsector;
  528. struct hd_struct *part;
  529. rcu_read_lock();
  530. part = __disk_get_part(bio->bi_disk, bio->bi_partno);
  531. if (part)
  532. maxsector = part_nr_sects_read(part);
  533. else
  534. maxsector = get_capacity(bio->bi_disk);
  535. rcu_read_unlock();
  536. if (!maxsector)
  537. return;
  538. /*
  539. * If the *whole* IO is past the end of the device,
  540. * let it through, and the IO layer will turn it into
  541. * an EIO.
  542. */
  543. if (unlikely(bio->bi_iter.bi_sector >= maxsector))
  544. return;
  545. maxsector -= bio->bi_iter.bi_sector;
  546. if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
  547. return;
  548. bio_truncate(bio, maxsector << 9);
  549. }
  550. /**
  551. * bio_put - release a reference to a bio
  552. * @bio: bio to release reference to
  553. *
  554. * Description:
  555. * Put a reference to a &struct bio, either one you have gotten with
  556. * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
  557. **/
  558. void bio_put(struct bio *bio)
  559. {
  560. if (!bio_flagged(bio, BIO_REFFED))
  561. bio_free(bio);
  562. else {
  563. BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
  564. /*
  565. * last put frees it
  566. */
  567. if (atomic_dec_and_test(&bio->__bi_cnt))
  568. bio_free(bio);
  569. }
  570. }
  571. EXPORT_SYMBOL(bio_put);
  572. /**
  573. * __bio_clone_fast - clone a bio that shares the original bio's biovec
  574. * @bio: destination bio
  575. * @bio_src: bio to clone
  576. *
  577. * Clone a &bio. Caller will own the returned bio, but not
  578. * the actual data it points to. Reference count of returned
  579. * bio will be one.
  580. *
  581. * Caller must ensure that @bio_src is not freed before @bio.
  582. */
  583. void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
  584. {
  585. BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
  586. /*
  587. * most users will be overriding ->bi_disk with a new target,
  588. * so we don't set nor calculate new physical/hw segment counts here
  589. */
  590. bio->bi_disk = bio_src->bi_disk;
  591. bio->bi_partno = bio_src->bi_partno;
  592. bio_set_flag(bio, BIO_CLONED);
  593. if (bio_flagged(bio_src, BIO_THROTTLED))
  594. bio_set_flag(bio, BIO_THROTTLED);
  595. bio->bi_opf = bio_src->bi_opf;
  596. bio->bi_ioprio = bio_src->bi_ioprio;
  597. bio->bi_write_hint = bio_src->bi_write_hint;
  598. bio->bi_iter = bio_src->bi_iter;
  599. bio->bi_io_vec = bio_src->bi_io_vec;
  600. bio_clone_blkg_association(bio, bio_src);
  601. blkcg_bio_issue_init(bio);
  602. }
  603. EXPORT_SYMBOL(__bio_clone_fast);
  604. /**
  605. * bio_clone_fast - clone a bio that shares the original bio's biovec
  606. * @bio: bio to clone
  607. * @gfp_mask: allocation priority
  608. * @bs: bio_set to allocate from
  609. *
  610. * Like __bio_clone_fast, only also allocates the returned bio
  611. */
  612. struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
  613. {
  614. struct bio *b;
  615. b = bio_alloc_bioset(gfp_mask, 0, bs);
  616. if (!b)
  617. return NULL;
  618. __bio_clone_fast(b, bio);
  619. if (bio_crypt_clone(b, bio, gfp_mask) < 0)
  620. goto err_put;
  621. if (bio_integrity(bio) &&
  622. bio_integrity_clone(b, bio, gfp_mask) < 0)
  623. goto err_put;
  624. return b;
  625. err_put:
  626. bio_put(b);
  627. return NULL;
  628. }
  629. EXPORT_SYMBOL(bio_clone_fast);
  630. const char *bio_devname(struct bio *bio, char *buf)
  631. {
  632. return disk_name(bio->bi_disk, bio->bi_partno, buf);
  633. }
  634. EXPORT_SYMBOL(bio_devname);
  635. static inline bool page_is_mergeable(const struct bio_vec *bv,
  636. struct page *page, unsigned int len, unsigned int off,
  637. bool *same_page)
  638. {
  639. size_t bv_end = bv->bv_offset + bv->bv_len;
  640. phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
  641. phys_addr_t page_addr = page_to_phys(page);
  642. if (vec_end_addr + 1 != page_addr + off)
  643. return false;
  644. if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
  645. return false;
  646. *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
  647. if (*same_page)
  648. return true;
  649. return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
  650. }
  651. /*
  652. * Try to merge a page into a segment, while obeying the hardware segment
  653. * size limit. This is not for normal read/write bios, but for passthrough
  654. * or Zone Append operations that we can't split.
  655. */
  656. static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
  657. struct page *page, unsigned len,
  658. unsigned offset, bool *same_page)
  659. {
  660. struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
  661. unsigned long mask = queue_segment_boundary(q);
  662. phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
  663. phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
  664. if ((addr1 | mask) != (addr2 | mask))
  665. return false;
  666. if (bv->bv_len + len > queue_max_segment_size(q))
  667. return false;
  668. return __bio_try_merge_page(bio, page, len, offset, same_page);
  669. }
  670. /**
  671. * bio_add_hw_page - attempt to add a page to a bio with hw constraints
  672. * @q: the target queue
  673. * @bio: destination bio
  674. * @page: page to add
  675. * @len: vec entry length
  676. * @offset: vec entry offset
  677. * @max_sectors: maximum number of sectors that can be added
  678. * @same_page: return if the segment has been merged inside the same page
  679. *
  680. * Add a page to a bio while respecting the hardware max_sectors, max_segment
  681. * and gap limitations.
  682. */
  683. int bio_add_hw_page(struct request_queue *q, struct bio *bio,
  684. struct page *page, unsigned int len, unsigned int offset,
  685. unsigned int max_sectors, bool *same_page)
  686. {
  687. struct bio_vec *bvec;
  688. if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
  689. return 0;
  690. if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
  691. return 0;
  692. if (bio->bi_vcnt > 0) {
  693. if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
  694. return len;
  695. /*
  696. * If the queue doesn't support SG gaps and adding this segment
  697. * would create a gap, disallow it.
  698. */
  699. bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
  700. if (bvec_gap_to_prev(q, bvec, offset))
  701. return 0;
  702. }
  703. if (bio_full(bio, len))
  704. return 0;
  705. if (bio->bi_vcnt >= queue_max_segments(q))
  706. return 0;
  707. bvec = &bio->bi_io_vec[bio->bi_vcnt];
  708. bvec->bv_page = page;
  709. bvec->bv_len = len;
  710. bvec->bv_offset = offset;
  711. bio->bi_vcnt++;
  712. bio->bi_iter.bi_size += len;
  713. return len;
  714. }
  715. /**
  716. * bio_add_pc_page - attempt to add page to passthrough bio
  717. * @q: the target queue
  718. * @bio: destination bio
  719. * @page: page to add
  720. * @len: vec entry length
  721. * @offset: vec entry offset
  722. *
  723. * Attempt to add a page to the bio_vec maplist. This can fail for a
  724. * number of reasons, such as the bio being full or target block device
  725. * limitations. The target block device must allow bio's up to PAGE_SIZE,
  726. * so it is always possible to add a single page to an empty bio.
  727. *
  728. * This should only be used by passthrough bios.
  729. */
  730. int bio_add_pc_page(struct request_queue *q, struct bio *bio,
  731. struct page *page, unsigned int len, unsigned int offset)
  732. {
  733. bool same_page = false;
  734. return bio_add_hw_page(q, bio, page, len, offset,
  735. queue_max_hw_sectors(q), &same_page);
  736. }
  737. EXPORT_SYMBOL(bio_add_pc_page);
  738. /**
  739. * __bio_try_merge_page - try appending data to an existing bvec.
  740. * @bio: destination bio
  741. * @page: start page to add
  742. * @len: length of the data to add
  743. * @off: offset of the data relative to @page
  744. * @same_page: return if the segment has been merged inside the same page
  745. *
  746. * Try to add the data at @page + @off to the last bvec of @bio. This is a
  747. * useful optimisation for file systems with a block size smaller than the
  748. * page size.
  749. *
  750. * Warn if (@len, @off) crosses pages in case that @same_page is true.
  751. *
  752. * Return %true on success or %false on failure.
  753. */
  754. bool __bio_try_merge_page(struct bio *bio, struct page *page,
  755. unsigned int len, unsigned int off, bool *same_page)
  756. {
  757. if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
  758. return false;
  759. if (bio->bi_vcnt > 0) {
  760. struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
  761. if (page_is_mergeable(bv, page, len, off, same_page)) {
  762. if (bio->bi_iter.bi_size > UINT_MAX - len) {
  763. *same_page = false;
  764. return false;
  765. }
  766. bv->bv_len += len;
  767. bio->bi_iter.bi_size += len;
  768. return true;
  769. }
  770. }
  771. return false;
  772. }
  773. EXPORT_SYMBOL_GPL(__bio_try_merge_page);
  774. /**
  775. * __bio_add_page - add page(s) to a bio in a new segment
  776. * @bio: destination bio
  777. * @page: start page to add
  778. * @len: length of the data to add, may cross pages
  779. * @off: offset of the data relative to @page, may cross pages
  780. *
  781. * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
  782. * that @bio has space for another bvec.
  783. */
  784. void __bio_add_page(struct bio *bio, struct page *page,
  785. unsigned int len, unsigned int off)
  786. {
  787. struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
  788. WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
  789. WARN_ON_ONCE(bio_full(bio, len));
  790. bv->bv_page = page;
  791. bv->bv_offset = off;
  792. bv->bv_len = len;
  793. bio->bi_iter.bi_size += len;
  794. bio->bi_vcnt++;
  795. if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
  796. bio_set_flag(bio, BIO_WORKINGSET);
  797. }
  798. EXPORT_SYMBOL_GPL(__bio_add_page);
  799. /**
  800. * bio_add_page - attempt to add page(s) to bio
  801. * @bio: destination bio
  802. * @page: start page to add
  803. * @len: vec entry length, may cross pages
  804. * @offset: vec entry offset relative to @page, may cross pages
  805. *
  806. * Attempt to add page(s) to the bio_vec maplist. This will only fail
  807. * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
  808. */
  809. int bio_add_page(struct bio *bio, struct page *page,
  810. unsigned int len, unsigned int offset)
  811. {
  812. bool same_page = false;
  813. if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
  814. if (bio_full(bio, len))
  815. return 0;
  816. __bio_add_page(bio, page, len, offset);
  817. }
  818. return len;
  819. }
  820. EXPORT_SYMBOL(bio_add_page);
  821. void bio_release_pages(struct bio *bio, bool mark_dirty)
  822. {
  823. struct bvec_iter_all iter_all;
  824. struct bio_vec *bvec;
  825. if (bio_flagged(bio, BIO_NO_PAGE_REF))
  826. return;
  827. bio_for_each_segment_all(bvec, bio, iter_all) {
  828. if (mark_dirty && !PageCompound(bvec->bv_page))
  829. set_page_dirty_lock(bvec->bv_page);
  830. put_page(bvec->bv_page);
  831. }
  832. }
  833. EXPORT_SYMBOL_GPL(bio_release_pages);
  834. static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
  835. {
  836. const struct bio_vec *bv = iter->bvec;
  837. unsigned int len;
  838. size_t size;
  839. if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
  840. return -EINVAL;
  841. len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
  842. size = bio_add_page(bio, bv->bv_page, len,
  843. bv->bv_offset + iter->iov_offset);
  844. if (unlikely(size != len))
  845. return -EINVAL;
  846. iov_iter_advance(iter, size);
  847. return 0;
  848. }
  849. static void bio_put_pages(struct page **pages, size_t size, size_t off)
  850. {
  851. size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
  852. for (i = 0; i < nr; i++)
  853. put_page(pages[i]);
  854. }
  855. #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
  856. /**
  857. * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
  858. * @bio: bio to add pages to
  859. * @iter: iov iterator describing the region to be mapped
  860. *
  861. * Pins pages from *iter and appends them to @bio's bvec array. The
  862. * pages will have to be released using put_page() when done.
  863. * For multi-segment *iter, this function only adds pages from the
  864. * next non-empty segment of the iov iterator.
  865. */
  866. static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
  867. {
  868. unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
  869. unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
  870. struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
  871. struct page **pages = (struct page **)bv;
  872. bool same_page = false;
  873. ssize_t size, left;
  874. unsigned len, i;
  875. size_t offset;
  876. /*
  877. * Move page array up in the allocated memory for the bio vecs as far as
  878. * possible so that we can start filling biovecs from the beginning
  879. * without overwriting the temporary page array.
  880. */
  881. BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
  882. pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
  883. size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
  884. if (unlikely(size <= 0))
  885. return size ? size : -EFAULT;
  886. for (left = size, i = 0; left > 0; left -= len, i++) {
  887. struct page *page = pages[i];
  888. len = min_t(size_t, PAGE_SIZE - offset, left);
  889. if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
  890. if (same_page)
  891. put_page(page);
  892. } else {
  893. if (WARN_ON_ONCE(bio_full(bio, len))) {
  894. bio_put_pages(pages + i, left, offset);
  895. return -EINVAL;
  896. }
  897. __bio_add_page(bio, page, len, offset);
  898. }
  899. offset = 0;
  900. }
  901. iov_iter_advance(iter, size);
  902. return 0;
  903. }
  904. static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
  905. {
  906. unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
  907. unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
  908. struct request_queue *q = bio->bi_disk->queue;
  909. unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
  910. struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
  911. struct page **pages = (struct page **)bv;
  912. ssize_t size, left;
  913. unsigned len, i;
  914. size_t offset;
  915. int ret = 0;
  916. if (WARN_ON_ONCE(!max_append_sectors))
  917. return 0;
  918. /*
  919. * Move page array up in the allocated memory for the bio vecs as far as
  920. * possible so that we can start filling biovecs from the beginning
  921. * without overwriting the temporary page array.
  922. */
  923. BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
  924. pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
  925. size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
  926. if (unlikely(size <= 0))
  927. return size ? size : -EFAULT;
  928. for (left = size, i = 0; left > 0; left -= len, i++) {
  929. struct page *page = pages[i];
  930. bool same_page = false;
  931. len = min_t(size_t, PAGE_SIZE - offset, left);
  932. if (bio_add_hw_page(q, bio, page, len, offset,
  933. max_append_sectors, &same_page) != len) {
  934. bio_put_pages(pages + i, left, offset);
  935. ret = -EINVAL;
  936. break;
  937. }
  938. if (same_page)
  939. put_page(page);
  940. offset = 0;
  941. }
  942. iov_iter_advance(iter, size - left);
  943. return ret;
  944. }
  945. /**
  946. * bio_iov_iter_get_pages - add user or kernel pages to a bio
  947. * @bio: bio to add pages to
  948. * @iter: iov iterator describing the region to be added
  949. *
  950. * This takes either an iterator pointing to user memory, or one pointing to
  951. * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
  952. * map them into the kernel. On IO completion, the caller should put those
  953. * pages. If we're adding kernel pages, and the caller told us it's safe to
  954. * do so, we just have to add the pages to the bio directly. We don't grab an
  955. * extra reference to those pages (the user should already have that), and we
  956. * don't put the page on IO completion. The caller needs to check if the bio is
  957. * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
  958. * released.
  959. *
  960. * The function tries, but does not guarantee, to pin as many pages as
  961. * fit into the bio, or are requested in @iter, whatever is smaller. If
  962. * MM encounters an error pinning the requested pages, it stops. Error
  963. * is returned only if 0 pages could be pinned.
  964. */
  965. int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
  966. {
  967. const bool is_bvec = iov_iter_is_bvec(iter);
  968. int ret;
  969. if (WARN_ON_ONCE(bio->bi_vcnt))
  970. return -EINVAL;
  971. do {
  972. if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
  973. if (WARN_ON_ONCE(is_bvec))
  974. return -EINVAL;
  975. ret = __bio_iov_append_get_pages(bio, iter);
  976. } else {
  977. if (is_bvec)
  978. ret = __bio_iov_bvec_add_pages(bio, iter);
  979. else
  980. ret = __bio_iov_iter_get_pages(bio, iter);
  981. }
  982. } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
  983. if (is_bvec)
  984. bio_set_flag(bio, BIO_NO_PAGE_REF);
  985. return bio->bi_vcnt ? 0 : ret;
  986. }
  987. EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
  988. static void submit_bio_wait_endio(struct bio *bio)
  989. {
  990. complete(bio->bi_private);
  991. }
  992. /**
  993. * submit_bio_wait - submit a bio, and wait until it completes
  994. * @bio: The &struct bio which describes the I/O
  995. *
  996. * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
  997. * bio_endio() on failure.
  998. *
  999. * WARNING: Unlike to how submit_bio() is usually used, this function does not
  1000. * result in bio reference to be consumed. The caller must drop the reference
  1001. * on his own.
  1002. */
  1003. int submit_bio_wait(struct bio *bio)
  1004. {
  1005. DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
  1006. unsigned long hang_check;
  1007. bio->bi_private = &done;
  1008. bio->bi_end_io = submit_bio_wait_endio;
  1009. bio->bi_opf |= REQ_SYNC;
  1010. submit_bio(bio);
  1011. /* Prevent hang_check timer from firing at us during very long I/O */
  1012. hang_check = sysctl_hung_task_timeout_secs;
  1013. if (hang_check)
  1014. while (!wait_for_completion_io_timeout(&done,
  1015. hang_check * (HZ/2)))
  1016. ;
  1017. else
  1018. wait_for_completion_io(&done);
  1019. return blk_status_to_errno(bio->bi_status);
  1020. }
  1021. EXPORT_SYMBOL(submit_bio_wait);
  1022. /**
  1023. * bio_advance - increment/complete a bio by some number of bytes
  1024. * @bio: bio to advance
  1025. * @bytes: number of bytes to complete
  1026. *
  1027. * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
  1028. * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
  1029. * be updated on the last bvec as well.
  1030. *
  1031. * @bio will then represent the remaining, uncompleted portion of the io.
  1032. */
  1033. void bio_advance(struct bio *bio, unsigned bytes)
  1034. {
  1035. if (bio_integrity(bio))
  1036. bio_integrity_advance(bio, bytes);
  1037. bio_crypt_advance(bio, bytes);
  1038. bio_advance_iter(bio, &bio->bi_iter, bytes);
  1039. }
  1040. EXPORT_SYMBOL(bio_advance);
  1041. void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
  1042. struct bio *src, struct bvec_iter *src_iter)
  1043. {
  1044. struct bio_vec src_bv, dst_bv;
  1045. void *src_p, *dst_p;
  1046. unsigned bytes;
  1047. while (src_iter->bi_size && dst_iter->bi_size) {
  1048. src_bv = bio_iter_iovec(src, *src_iter);
  1049. dst_bv = bio_iter_iovec(dst, *dst_iter);
  1050. bytes = min(src_bv.bv_len, dst_bv.bv_len);
  1051. src_p = kmap_atomic(src_bv.bv_page);
  1052. dst_p = kmap_atomic(dst_bv.bv_page);
  1053. memcpy(dst_p + dst_bv.bv_offset,
  1054. src_p + src_bv.bv_offset,
  1055. bytes);
  1056. kunmap_atomic(dst_p);
  1057. kunmap_atomic(src_p);
  1058. flush_dcache_page(dst_bv.bv_page);
  1059. bio_advance_iter(src, src_iter, bytes);
  1060. bio_advance_iter(dst, dst_iter, bytes);
  1061. }
  1062. }
  1063. EXPORT_SYMBOL(bio_copy_data_iter);
  1064. /**
  1065. * bio_copy_data - copy contents of data buffers from one bio to another
  1066. * @src: source bio
  1067. * @dst: destination bio
  1068. *
  1069. * Stops when it reaches the end of either @src or @dst - that is, copies
  1070. * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
  1071. */
  1072. void bio_copy_data(struct bio *dst, struct bio *src)
  1073. {
  1074. struct bvec_iter src_iter = src->bi_iter;
  1075. struct bvec_iter dst_iter = dst->bi_iter;
  1076. bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
  1077. }
  1078. EXPORT_SYMBOL(bio_copy_data);
  1079. /**
  1080. * bio_list_copy_data - copy contents of data buffers from one chain of bios to
  1081. * another
  1082. * @src: source bio list
  1083. * @dst: destination bio list
  1084. *
  1085. * Stops when it reaches the end of either the @src list or @dst list - that is,
  1086. * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
  1087. * bios).
  1088. */
  1089. void bio_list_copy_data(struct bio *dst, struct bio *src)
  1090. {
  1091. struct bvec_iter src_iter = src->bi_iter;
  1092. struct bvec_iter dst_iter = dst->bi_iter;
  1093. while (1) {
  1094. if (!src_iter.bi_size) {
  1095. src = src->bi_next;
  1096. if (!src)
  1097. break;
  1098. src_iter = src->bi_iter;
  1099. }
  1100. if (!dst_iter.bi_size) {
  1101. dst = dst->bi_next;
  1102. if (!dst)
  1103. break;
  1104. dst_iter = dst->bi_iter;
  1105. }
  1106. bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
  1107. }
  1108. }
  1109. EXPORT_SYMBOL(bio_list_copy_data);
  1110. void bio_free_pages(struct bio *bio)
  1111. {
  1112. struct bio_vec *bvec;
  1113. struct bvec_iter_all iter_all;
  1114. bio_for_each_segment_all(bvec, bio, iter_all)
  1115. __free_page(bvec->bv_page);
  1116. }
  1117. EXPORT_SYMBOL(bio_free_pages);
  1118. /*
  1119. * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
  1120. * for performing direct-IO in BIOs.
  1121. *
  1122. * The problem is that we cannot run set_page_dirty() from interrupt context
  1123. * because the required locks are not interrupt-safe. So what we can do is to
  1124. * mark the pages dirty _before_ performing IO. And in interrupt context,
  1125. * check that the pages are still dirty. If so, fine. If not, redirty them
  1126. * in process context.
  1127. *
  1128. * We special-case compound pages here: normally this means reads into hugetlb
  1129. * pages. The logic in here doesn't really work right for compound pages
  1130. * because the VM does not uniformly chase down the head page in all cases.
  1131. * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
  1132. * handle them at all. So we skip compound pages here at an early stage.
  1133. *
  1134. * Note that this code is very hard to test under normal circumstances because
  1135. * direct-io pins the pages with get_user_pages(). This makes
  1136. * is_page_cache_freeable return false, and the VM will not clean the pages.
  1137. * But other code (eg, flusher threads) could clean the pages if they are mapped
  1138. * pagecache.
  1139. *
  1140. * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
  1141. * deferred bio dirtying paths.
  1142. */
  1143. /*
  1144. * bio_set_pages_dirty() will mark all the bio's pages as dirty.
  1145. */
  1146. void bio_set_pages_dirty(struct bio *bio)
  1147. {
  1148. struct bio_vec *bvec;
  1149. struct bvec_iter_all iter_all;
  1150. bio_for_each_segment_all(bvec, bio, iter_all) {
  1151. if (!PageCompound(bvec->bv_page))
  1152. set_page_dirty_lock(bvec->bv_page);
  1153. }
  1154. }
  1155. /*
  1156. * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
  1157. * If they are, then fine. If, however, some pages are clean then they must
  1158. * have been written out during the direct-IO read. So we take another ref on
  1159. * the BIO and re-dirty the pages in process context.
  1160. *
  1161. * It is expected that bio_check_pages_dirty() will wholly own the BIO from
  1162. * here on. It will run one put_page() against each page and will run one
  1163. * bio_put() against the BIO.
  1164. */
  1165. static void bio_dirty_fn(struct work_struct *work);
  1166. static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
  1167. static DEFINE_SPINLOCK(bio_dirty_lock);
  1168. static struct bio *bio_dirty_list;
  1169. /*
  1170. * This runs in process context
  1171. */
  1172. static void bio_dirty_fn(struct work_struct *work)
  1173. {
  1174. struct bio *bio, *next;
  1175. spin_lock_irq(&bio_dirty_lock);
  1176. next = bio_dirty_list;
  1177. bio_dirty_list = NULL;
  1178. spin_unlock_irq(&bio_dirty_lock);
  1179. while ((bio = next) != NULL) {
  1180. next = bio->bi_private;
  1181. bio_release_pages(bio, true);
  1182. bio_put(bio);
  1183. }
  1184. }
  1185. void bio_check_pages_dirty(struct bio *bio)
  1186. {
  1187. struct bio_vec *bvec;
  1188. unsigned long flags;
  1189. struct bvec_iter_all iter_all;
  1190. bio_for_each_segment_all(bvec, bio, iter_all) {
  1191. if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
  1192. goto defer;
  1193. }
  1194. bio_release_pages(bio, false);
  1195. bio_put(bio);
  1196. return;
  1197. defer:
  1198. spin_lock_irqsave(&bio_dirty_lock, flags);
  1199. bio->bi_private = bio_dirty_list;
  1200. bio_dirty_list = bio;
  1201. spin_unlock_irqrestore(&bio_dirty_lock, flags);
  1202. schedule_work(&bio_dirty_work);
  1203. }
  1204. static inline bool bio_remaining_done(struct bio *bio)
  1205. {
  1206. /*
  1207. * If we're not chaining, then ->__bi_remaining is always 1 and
  1208. * we always end io on the first invocation.
  1209. */
  1210. if (!bio_flagged(bio, BIO_CHAIN))
  1211. return true;
  1212. BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
  1213. if (atomic_dec_and_test(&bio->__bi_remaining)) {
  1214. bio_clear_flag(bio, BIO_CHAIN);
  1215. return true;
  1216. }
  1217. return false;
  1218. }
  1219. /**
  1220. * bio_endio - end I/O on a bio
  1221. * @bio: bio
  1222. *
  1223. * Description:
  1224. * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
  1225. * way to end I/O on a bio. No one should call bi_end_io() directly on a
  1226. * bio unless they own it and thus know that it has an end_io function.
  1227. *
  1228. * bio_endio() can be called several times on a bio that has been chained
  1229. * using bio_chain(). The ->bi_end_io() function will only be called the
  1230. * last time. At this point the BLK_TA_COMPLETE tracing event will be
  1231. * generated if BIO_TRACE_COMPLETION is set.
  1232. **/
  1233. void bio_endio(struct bio *bio)
  1234. {
  1235. again:
  1236. if (!bio_remaining_done(bio))
  1237. return;
  1238. if (!bio_integrity_endio(bio))
  1239. return;
  1240. if (bio->bi_disk)
  1241. rq_qos_done_bio(bio->bi_disk->queue, bio);
  1242. /*
  1243. * Need to have a real endio function for chained bios, otherwise
  1244. * various corner cases will break (like stacking block devices that
  1245. * save/restore bi_end_io) - however, we want to avoid unbounded
  1246. * recursion and blowing the stack. Tail call optimization would
  1247. * handle this, but compiling with frame pointers also disables
  1248. * gcc's sibling call optimization.
  1249. */
  1250. if (bio->bi_end_io == bio_chain_endio) {
  1251. bio = __bio_chain_endio(bio);
  1252. goto again;
  1253. }
  1254. if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
  1255. trace_block_bio_complete(bio->bi_disk->queue, bio);
  1256. bio_clear_flag(bio, BIO_TRACE_COMPLETION);
  1257. }
  1258. blk_throtl_bio_endio(bio);
  1259. /* release cgroup info */
  1260. bio_uninit(bio);
  1261. if (bio->bi_end_io)
  1262. bio->bi_end_io(bio);
  1263. }
  1264. EXPORT_SYMBOL(bio_endio);
  1265. /**
  1266. * bio_split - split a bio
  1267. * @bio: bio to split
  1268. * @sectors: number of sectors to split from the front of @bio
  1269. * @gfp: gfp mask
  1270. * @bs: bio set to allocate from
  1271. *
  1272. * Allocates and returns a new bio which represents @sectors from the start of
  1273. * @bio, and updates @bio to represent the remaining sectors.
  1274. *
  1275. * Unless this is a discard request the newly allocated bio will point
  1276. * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
  1277. * neither @bio nor @bs are freed before the split bio.
  1278. */
  1279. struct bio *bio_split(struct bio *bio, int sectors,
  1280. gfp_t gfp, struct bio_set *bs)
  1281. {
  1282. struct bio *split;
  1283. BUG_ON(sectors <= 0);
  1284. BUG_ON(sectors >= bio_sectors(bio));
  1285. /* Zone append commands cannot be split */
  1286. if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
  1287. return NULL;
  1288. split = bio_clone_fast(bio, gfp, bs);
  1289. if (!split)
  1290. return NULL;
  1291. split->bi_iter.bi_size = sectors << 9;
  1292. if (bio_integrity(split))
  1293. bio_integrity_trim(split);
  1294. bio_advance(bio, split->bi_iter.bi_size);
  1295. if (bio_flagged(bio, BIO_TRACE_COMPLETION))
  1296. bio_set_flag(split, BIO_TRACE_COMPLETION);
  1297. return split;
  1298. }
  1299. EXPORT_SYMBOL(bio_split);
  1300. /**
  1301. * bio_trim - trim a bio
  1302. * @bio: bio to trim
  1303. * @offset: number of sectors to trim from the front of @bio
  1304. * @size: size we want to trim @bio to, in sectors
  1305. */
  1306. void bio_trim(struct bio *bio, int offset, int size)
  1307. {
  1308. /* 'bio' is a cloned bio which we need to trim to match
  1309. * the given offset and size.
  1310. */
  1311. size <<= 9;
  1312. if (offset == 0 && size == bio->bi_iter.bi_size)
  1313. return;
  1314. bio_advance(bio, offset << 9);
  1315. bio->bi_iter.bi_size = size;
  1316. if (bio_integrity(bio))
  1317. bio_integrity_trim(bio);
  1318. }
  1319. EXPORT_SYMBOL_GPL(bio_trim);
  1320. /*
  1321. * create memory pools for biovec's in a bio_set.
  1322. * use the global biovec slabs created for general use.
  1323. */
  1324. int biovec_init_pool(mempool_t *pool, int pool_entries)
  1325. {
  1326. struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
  1327. return mempool_init_slab_pool(pool, pool_entries, bp->slab);
  1328. }
  1329. /*
  1330. * bioset_exit - exit a bioset initialized with bioset_init()
  1331. *
  1332. * May be called on a zeroed but uninitialized bioset (i.e. allocated with
  1333. * kzalloc()).
  1334. */
  1335. void bioset_exit(struct bio_set *bs)
  1336. {
  1337. if (bs->rescue_workqueue)
  1338. destroy_workqueue(bs->rescue_workqueue);
  1339. bs->rescue_workqueue = NULL;
  1340. mempool_exit(&bs->bio_pool);
  1341. mempool_exit(&bs->bvec_pool);
  1342. bioset_integrity_free(bs);
  1343. if (bs->bio_slab)
  1344. bio_put_slab(bs);
  1345. bs->bio_slab = NULL;
  1346. }
  1347. EXPORT_SYMBOL(bioset_exit);
  1348. /**
  1349. * bioset_init - Initialize a bio_set
  1350. * @bs: pool to initialize
  1351. * @pool_size: Number of bio and bio_vecs to cache in the mempool
  1352. * @front_pad: Number of bytes to allocate in front of the returned bio
  1353. * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS
  1354. * and %BIOSET_NEED_RESCUER
  1355. *
  1356. * Description:
  1357. * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
  1358. * to ask for a number of bytes to be allocated in front of the bio.
  1359. * Front pad allocation is useful for embedding the bio inside
  1360. * another structure, to avoid allocating extra data to go with the bio.
  1361. * Note that the bio must be embedded at the END of that structure always,
  1362. * or things will break badly.
  1363. * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
  1364. * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast().
  1365. * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
  1366. * dispatch queued requests when the mempool runs out of space.
  1367. *
  1368. */
  1369. int bioset_init(struct bio_set *bs,
  1370. unsigned int pool_size,
  1371. unsigned int front_pad,
  1372. int flags)
  1373. {
  1374. unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
  1375. bs->front_pad = front_pad;
  1376. spin_lock_init(&bs->rescue_lock);
  1377. bio_list_init(&bs->rescue_list);
  1378. INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
  1379. bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
  1380. if (!bs->bio_slab)
  1381. return -ENOMEM;
  1382. if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
  1383. goto bad;
  1384. if ((flags & BIOSET_NEED_BVECS) &&
  1385. biovec_init_pool(&bs->bvec_pool, pool_size))
  1386. goto bad;
  1387. if (!(flags & BIOSET_NEED_RESCUER))
  1388. return 0;
  1389. bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
  1390. if (!bs->rescue_workqueue)
  1391. goto bad;
  1392. return 0;
  1393. bad:
  1394. bioset_exit(bs);
  1395. return -ENOMEM;
  1396. }
  1397. EXPORT_SYMBOL(bioset_init);
  1398. /*
  1399. * Initialize and setup a new bio_set, based on the settings from
  1400. * another bio_set.
  1401. */
  1402. int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
  1403. {
  1404. int flags;
  1405. flags = 0;
  1406. if (src->bvec_pool.min_nr)
  1407. flags |= BIOSET_NEED_BVECS;
  1408. if (src->rescue_workqueue)
  1409. flags |= BIOSET_NEED_RESCUER;
  1410. return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
  1411. }
  1412. EXPORT_SYMBOL(bioset_init_from_src);
  1413. static void __init biovec_init_slabs(void)
  1414. {
  1415. int i;
  1416. for (i = 0; i < BVEC_POOL_NR; i++) {
  1417. int size;
  1418. struct biovec_slab *bvs = bvec_slabs + i;
  1419. if (bvs->nr_vecs <= BIO_INLINE_VECS) {
  1420. bvs->slab = NULL;
  1421. continue;
  1422. }
  1423. size = bvs->nr_vecs * sizeof(struct bio_vec);
  1424. bvs->slab = kmem_cache_create(bvs->name, size, 0,
  1425. SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  1426. }
  1427. }
  1428. static int __init init_bio(void)
  1429. {
  1430. bio_slab_max = 2;
  1431. bio_slab_nr = 0;
  1432. bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
  1433. GFP_KERNEL);
  1434. BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
  1435. if (!bio_slabs)
  1436. panic("bio: can't allocate bios\n");
  1437. bio_integrity_init();
  1438. biovec_init_slabs();
  1439. if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
  1440. panic("bio: can't allocate bios\n");
  1441. if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
  1442. panic("bio: can't create integrity pool\n");
  1443. return 0;
  1444. }
  1445. subsys_initcall(init_bio);