blk-map.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Functions related to mapping data to requests
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/sched/task_stack.h>
  7. #include <linux/module.h>
  8. #include <linux/bio.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/uio.h>
  11. #include "blk.h"
  12. struct bio_map_data {
  13. bool is_our_pages : 1;
  14. bool is_null_mapped : 1;
  15. struct iov_iter iter;
  16. struct iovec iov[];
  17. };
  18. static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
  19. gfp_t gfp_mask)
  20. {
  21. struct bio_map_data *bmd;
  22. if (data->nr_segs > UIO_MAXIOV)
  23. return NULL;
  24. bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
  25. if (!bmd)
  26. return NULL;
  27. memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
  28. bmd->iter = *data;
  29. bmd->iter.iov = bmd->iov;
  30. return bmd;
  31. }
  32. /**
  33. * bio_copy_from_iter - copy all pages from iov_iter to bio
  34. * @bio: The &struct bio which describes the I/O as destination
  35. * @iter: iov_iter as source
  36. *
  37. * Copy all pages from iov_iter to bio.
  38. * Returns 0 on success, or error on failure.
  39. */
  40. static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
  41. {
  42. struct bio_vec *bvec;
  43. struct bvec_iter_all iter_all;
  44. bio_for_each_segment_all(bvec, bio, iter_all) {
  45. ssize_t ret;
  46. ret = copy_page_from_iter(bvec->bv_page,
  47. bvec->bv_offset,
  48. bvec->bv_len,
  49. iter);
  50. if (!iov_iter_count(iter))
  51. break;
  52. if (ret < bvec->bv_len)
  53. return -EFAULT;
  54. }
  55. return 0;
  56. }
  57. /**
  58. * bio_copy_to_iter - copy all pages from bio to iov_iter
  59. * @bio: The &struct bio which describes the I/O as source
  60. * @iter: iov_iter as destination
  61. *
  62. * Copy all pages from bio to iov_iter.
  63. * Returns 0 on success, or error on failure.
  64. */
  65. static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
  66. {
  67. struct bio_vec *bvec;
  68. struct bvec_iter_all iter_all;
  69. bio_for_each_segment_all(bvec, bio, iter_all) {
  70. ssize_t ret;
  71. ret = copy_page_to_iter(bvec->bv_page,
  72. bvec->bv_offset,
  73. bvec->bv_len,
  74. &iter);
  75. if (!iov_iter_count(&iter))
  76. break;
  77. if (ret < bvec->bv_len)
  78. return -EFAULT;
  79. }
  80. return 0;
  81. }
  82. /**
  83. * bio_uncopy_user - finish previously mapped bio
  84. * @bio: bio being terminated
  85. *
  86. * Free pages allocated from bio_copy_user_iov() and write back data
  87. * to user space in case of a read.
  88. */
  89. static int bio_uncopy_user(struct bio *bio)
  90. {
  91. struct bio_map_data *bmd = bio->bi_private;
  92. int ret = 0;
  93. if (!bmd->is_null_mapped) {
  94. /*
  95. * if we're in a workqueue, the request is orphaned, so
  96. * don't copy into a random user address space, just free
  97. * and return -EINTR so user space doesn't expect any data.
  98. */
  99. if (!current->mm)
  100. ret = -EINTR;
  101. else if (bio_data_dir(bio) == READ)
  102. ret = bio_copy_to_iter(bio, bmd->iter);
  103. if (bmd->is_our_pages)
  104. bio_free_pages(bio);
  105. }
  106. kfree(bmd);
  107. bio_put(bio);
  108. return ret;
  109. }
  110. static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
  111. struct iov_iter *iter, gfp_t gfp_mask)
  112. {
  113. struct bio_map_data *bmd;
  114. struct page *page;
  115. struct bio *bio, *bounce_bio;
  116. int i = 0, ret;
  117. int nr_pages;
  118. unsigned int len = iter->count;
  119. unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
  120. bmd = bio_alloc_map_data(iter, gfp_mask);
  121. if (!bmd)
  122. return -ENOMEM;
  123. /*
  124. * We need to do a deep copy of the iov_iter including the iovecs.
  125. * The caller provided iov might point to an on-stack or otherwise
  126. * shortlived one.
  127. */
  128. bmd->is_our_pages = !map_data;
  129. bmd->is_null_mapped = (map_data && map_data->null_mapped);
  130. nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
  131. if (nr_pages > BIO_MAX_PAGES)
  132. nr_pages = BIO_MAX_PAGES;
  133. ret = -ENOMEM;
  134. bio = bio_kmalloc(gfp_mask, nr_pages);
  135. if (!bio)
  136. goto out_bmd;
  137. bio->bi_opf |= req_op(rq);
  138. if (map_data) {
  139. nr_pages = 1 << map_data->page_order;
  140. i = map_data->offset / PAGE_SIZE;
  141. }
  142. while (len) {
  143. unsigned int bytes = PAGE_SIZE;
  144. bytes -= offset;
  145. if (bytes > len)
  146. bytes = len;
  147. if (map_data) {
  148. if (i == map_data->nr_entries * nr_pages) {
  149. ret = -ENOMEM;
  150. goto cleanup;
  151. }
  152. page = map_data->pages[i / nr_pages];
  153. page += (i % nr_pages);
  154. i++;
  155. } else {
  156. page = alloc_page(rq->q->bounce_gfp | gfp_mask);
  157. if (!page) {
  158. ret = -ENOMEM;
  159. goto cleanup;
  160. }
  161. }
  162. if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
  163. if (!map_data)
  164. __free_page(page);
  165. break;
  166. }
  167. len -= bytes;
  168. offset = 0;
  169. }
  170. if (map_data)
  171. map_data->offset += bio->bi_iter.bi_size;
  172. /*
  173. * success
  174. */
  175. if ((iov_iter_rw(iter) == WRITE &&
  176. (!map_data || !map_data->null_mapped)) ||
  177. (map_data && map_data->from_user)) {
  178. ret = bio_copy_from_iter(bio, iter);
  179. if (ret)
  180. goto cleanup;
  181. } else {
  182. if (bmd->is_our_pages)
  183. zero_fill_bio(bio);
  184. iov_iter_advance(iter, bio->bi_iter.bi_size);
  185. }
  186. bio->bi_private = bmd;
  187. bounce_bio = bio;
  188. ret = blk_rq_append_bio(rq, &bounce_bio);
  189. if (ret)
  190. goto cleanup;
  191. /*
  192. * We link the bounce buffer in and could have to traverse it later, so
  193. * we have to get a ref to prevent it from being freed
  194. */
  195. bio_get(bounce_bio);
  196. return 0;
  197. cleanup:
  198. if (!map_data)
  199. bio_free_pages(bio);
  200. bio_put(bio);
  201. out_bmd:
  202. kfree(bmd);
  203. return ret;
  204. }
  205. static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
  206. gfp_t gfp_mask)
  207. {
  208. unsigned int max_sectors = queue_max_hw_sectors(rq->q);
  209. struct bio *bio, *bounce_bio;
  210. int ret;
  211. int j;
  212. if (!iov_iter_count(iter))
  213. return -EINVAL;
  214. bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
  215. if (!bio)
  216. return -ENOMEM;
  217. bio->bi_opf |= req_op(rq);
  218. while (iov_iter_count(iter)) {
  219. struct page **pages;
  220. ssize_t bytes;
  221. size_t offs, added = 0;
  222. int npages;
  223. bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
  224. if (unlikely(bytes <= 0)) {
  225. ret = bytes ? bytes : -EFAULT;
  226. goto out_unmap;
  227. }
  228. npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
  229. if (unlikely(offs & queue_dma_alignment(rq->q))) {
  230. ret = -EINVAL;
  231. j = 0;
  232. } else {
  233. for (j = 0; j < npages; j++) {
  234. struct page *page = pages[j];
  235. unsigned int n = PAGE_SIZE - offs;
  236. bool same_page = false;
  237. if (n > bytes)
  238. n = bytes;
  239. if (!bio_add_hw_page(rq->q, bio, page, n, offs,
  240. max_sectors, &same_page)) {
  241. if (same_page)
  242. put_page(page);
  243. break;
  244. }
  245. added += n;
  246. bytes -= n;
  247. offs = 0;
  248. }
  249. iov_iter_advance(iter, added);
  250. }
  251. /*
  252. * release the pages we didn't map into the bio, if any
  253. */
  254. while (j < npages)
  255. put_page(pages[j++]);
  256. kvfree(pages);
  257. /* couldn't stuff something into bio? */
  258. if (bytes)
  259. break;
  260. }
  261. /*
  262. * Subtle: if we end up needing to bounce a bio, it would normally
  263. * disappear when its bi_end_io is run. However, we need the original
  264. * bio for the unmap, so grab an extra reference to it
  265. */
  266. bio_get(bio);
  267. bounce_bio = bio;
  268. ret = blk_rq_append_bio(rq, &bounce_bio);
  269. if (ret)
  270. goto out_put_orig;
  271. /*
  272. * We link the bounce buffer in and could have to traverse it
  273. * later, so we have to get a ref to prevent it from being freed
  274. */
  275. bio_get(bounce_bio);
  276. return 0;
  277. out_put_orig:
  278. bio_put(bio);
  279. out_unmap:
  280. bio_release_pages(bio, false);
  281. bio_put(bio);
  282. return ret;
  283. }
  284. /**
  285. * bio_unmap_user - unmap a bio
  286. * @bio: the bio being unmapped
  287. *
  288. * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
  289. * process context.
  290. *
  291. * bio_unmap_user() may sleep.
  292. */
  293. static void bio_unmap_user(struct bio *bio)
  294. {
  295. bio_release_pages(bio, bio_data_dir(bio) == READ);
  296. bio_put(bio);
  297. bio_put(bio);
  298. }
  299. static void bio_invalidate_vmalloc_pages(struct bio *bio)
  300. {
  301. #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  302. if (bio->bi_private && !op_is_write(bio_op(bio))) {
  303. unsigned long i, len = 0;
  304. for (i = 0; i < bio->bi_vcnt; i++)
  305. len += bio->bi_io_vec[i].bv_len;
  306. invalidate_kernel_vmap_range(bio->bi_private, len);
  307. }
  308. #endif
  309. }
  310. static void bio_map_kern_endio(struct bio *bio)
  311. {
  312. bio_invalidate_vmalloc_pages(bio);
  313. bio_put(bio);
  314. }
  315. /**
  316. * bio_map_kern - map kernel address into bio
  317. * @q: the struct request_queue for the bio
  318. * @data: pointer to buffer to map
  319. * @len: length in bytes
  320. * @gfp_mask: allocation flags for bio allocation
  321. *
  322. * Map the kernel address into a bio suitable for io to a block
  323. * device. Returns an error pointer in case of error.
  324. */
  325. static struct bio *bio_map_kern(struct request_queue *q, void *data,
  326. unsigned int len, gfp_t gfp_mask)
  327. {
  328. unsigned long kaddr = (unsigned long)data;
  329. unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  330. unsigned long start = kaddr >> PAGE_SHIFT;
  331. const int nr_pages = end - start;
  332. bool is_vmalloc = is_vmalloc_addr(data);
  333. struct page *page;
  334. int offset, i;
  335. struct bio *bio;
  336. bio = bio_kmalloc(gfp_mask, nr_pages);
  337. if (!bio)
  338. return ERR_PTR(-ENOMEM);
  339. if (is_vmalloc) {
  340. flush_kernel_vmap_range(data, len);
  341. bio->bi_private = data;
  342. }
  343. offset = offset_in_page(kaddr);
  344. for (i = 0; i < nr_pages; i++) {
  345. unsigned int bytes = PAGE_SIZE - offset;
  346. if (len <= 0)
  347. break;
  348. if (bytes > len)
  349. bytes = len;
  350. if (!is_vmalloc)
  351. page = virt_to_page(data);
  352. else
  353. page = vmalloc_to_page(data);
  354. if (bio_add_pc_page(q, bio, page, bytes,
  355. offset) < bytes) {
  356. /* we don't support partial mappings */
  357. bio_put(bio);
  358. return ERR_PTR(-EINVAL);
  359. }
  360. data += bytes;
  361. len -= bytes;
  362. offset = 0;
  363. }
  364. bio->bi_end_io = bio_map_kern_endio;
  365. return bio;
  366. }
  367. static void bio_copy_kern_endio(struct bio *bio)
  368. {
  369. bio_free_pages(bio);
  370. bio_put(bio);
  371. }
  372. static void bio_copy_kern_endio_read(struct bio *bio)
  373. {
  374. char *p = bio->bi_private;
  375. struct bio_vec *bvec;
  376. struct bvec_iter_all iter_all;
  377. bio_for_each_segment_all(bvec, bio, iter_all) {
  378. memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
  379. p += bvec->bv_len;
  380. }
  381. bio_copy_kern_endio(bio);
  382. }
  383. /**
  384. * bio_copy_kern - copy kernel address into bio
  385. * @q: the struct request_queue for the bio
  386. * @data: pointer to buffer to copy
  387. * @len: length in bytes
  388. * @gfp_mask: allocation flags for bio and page allocation
  389. * @reading: data direction is READ
  390. *
  391. * copy the kernel address into a bio suitable for io to a block
  392. * device. Returns an error pointer in case of error.
  393. */
  394. static struct bio *bio_copy_kern(struct request_queue *q, void *data,
  395. unsigned int len, gfp_t gfp_mask, int reading)
  396. {
  397. unsigned long kaddr = (unsigned long)data;
  398. unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  399. unsigned long start = kaddr >> PAGE_SHIFT;
  400. struct bio *bio;
  401. void *p = data;
  402. int nr_pages = 0;
  403. /*
  404. * Overflow, abort
  405. */
  406. if (end < start)
  407. return ERR_PTR(-EINVAL);
  408. nr_pages = end - start;
  409. bio = bio_kmalloc(gfp_mask, nr_pages);
  410. if (!bio)
  411. return ERR_PTR(-ENOMEM);
  412. while (len) {
  413. struct page *page;
  414. unsigned int bytes = PAGE_SIZE;
  415. if (bytes > len)
  416. bytes = len;
  417. page = alloc_page(q->bounce_gfp | gfp_mask);
  418. if (!page)
  419. goto cleanup;
  420. if (!reading)
  421. memcpy(page_address(page), p, bytes);
  422. if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
  423. break;
  424. len -= bytes;
  425. p += bytes;
  426. }
  427. if (reading) {
  428. bio->bi_end_io = bio_copy_kern_endio_read;
  429. bio->bi_private = data;
  430. } else {
  431. bio->bi_end_io = bio_copy_kern_endio;
  432. }
  433. return bio;
  434. cleanup:
  435. bio_free_pages(bio);
  436. bio_put(bio);
  437. return ERR_PTR(-ENOMEM);
  438. }
  439. /*
  440. * Append a bio to a passthrough request. Only works if the bio can be merged
  441. * into the request based on the driver constraints.
  442. */
  443. int blk_rq_append_bio(struct request *rq, struct bio **bio)
  444. {
  445. struct bio *orig_bio = *bio;
  446. struct bvec_iter iter;
  447. struct bio_vec bv;
  448. unsigned int nr_segs = 0;
  449. blk_queue_bounce(rq->q, bio);
  450. bio_for_each_bvec(bv, *bio, iter)
  451. nr_segs++;
  452. if (!rq->bio) {
  453. blk_rq_bio_prep(rq, *bio, nr_segs);
  454. } else {
  455. if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
  456. if (orig_bio != *bio) {
  457. bio_put(*bio);
  458. *bio = orig_bio;
  459. }
  460. return -EINVAL;
  461. }
  462. rq->biotail->bi_next = *bio;
  463. rq->biotail = *bio;
  464. rq->__data_len += (*bio)->bi_iter.bi_size;
  465. bio_crypt_free_ctx(*bio);
  466. }
  467. return 0;
  468. }
  469. EXPORT_SYMBOL(blk_rq_append_bio);
  470. /**
  471. * blk_rq_map_user_iov - map user data to a request, for passthrough requests
  472. * @q: request queue where request should be inserted
  473. * @rq: request to map data to
  474. * @map_data: pointer to the rq_map_data holding pages (if necessary)
  475. * @iter: iovec iterator
  476. * @gfp_mask: memory allocation flags
  477. *
  478. * Description:
  479. * Data will be mapped directly for zero copy I/O, if possible. Otherwise
  480. * a kernel bounce buffer is used.
  481. *
  482. * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
  483. * still in process context.
  484. *
  485. * Note: The mapped bio may need to be bounced through blk_queue_bounce()
  486. * before being submitted to the device, as pages mapped may be out of
  487. * reach. It's the callers responsibility to make sure this happens. The
  488. * original bio must be passed back in to blk_rq_unmap_user() for proper
  489. * unmapping.
  490. */
  491. int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
  492. struct rq_map_data *map_data,
  493. const struct iov_iter *iter, gfp_t gfp_mask)
  494. {
  495. bool copy = false;
  496. unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
  497. struct bio *bio = NULL;
  498. struct iov_iter i;
  499. int ret = -EINVAL;
  500. if (!iter_is_iovec(iter))
  501. goto fail;
  502. if (map_data)
  503. copy = true;
  504. else if (iov_iter_alignment(iter) & align)
  505. copy = true;
  506. else if (queue_virt_boundary(q))
  507. copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
  508. i = *iter;
  509. do {
  510. if (copy)
  511. ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
  512. else
  513. ret = bio_map_user_iov(rq, &i, gfp_mask);
  514. if (ret)
  515. goto unmap_rq;
  516. if (!bio)
  517. bio = rq->bio;
  518. } while (iov_iter_count(&i));
  519. return 0;
  520. unmap_rq:
  521. blk_rq_unmap_user(bio);
  522. fail:
  523. rq->bio = NULL;
  524. return ret;
  525. }
  526. EXPORT_SYMBOL(blk_rq_map_user_iov);
  527. int blk_rq_map_user(struct request_queue *q, struct request *rq,
  528. struct rq_map_data *map_data, void __user *ubuf,
  529. unsigned long len, gfp_t gfp_mask)
  530. {
  531. struct iovec iov;
  532. struct iov_iter i;
  533. int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
  534. if (unlikely(ret < 0))
  535. return ret;
  536. return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
  537. }
  538. EXPORT_SYMBOL(blk_rq_map_user);
  539. /**
  540. * blk_rq_unmap_user - unmap a request with user data
  541. * @bio: start of bio list
  542. *
  543. * Description:
  544. * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
  545. * supply the original rq->bio from the blk_rq_map_user() return, since
  546. * the I/O completion may have changed rq->bio.
  547. */
  548. int blk_rq_unmap_user(struct bio *bio)
  549. {
  550. struct bio *mapped_bio;
  551. int ret = 0, ret2;
  552. while (bio) {
  553. mapped_bio = bio;
  554. if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
  555. mapped_bio = bio->bi_private;
  556. if (bio->bi_private) {
  557. ret2 = bio_uncopy_user(mapped_bio);
  558. if (ret2 && !ret)
  559. ret = ret2;
  560. } else {
  561. bio_unmap_user(mapped_bio);
  562. }
  563. mapped_bio = bio;
  564. bio = bio->bi_next;
  565. bio_put(mapped_bio);
  566. }
  567. return ret;
  568. }
  569. EXPORT_SYMBOL(blk_rq_unmap_user);
  570. /**
  571. * blk_rq_map_kern - map kernel data to a request, for passthrough requests
  572. * @q: request queue where request should be inserted
  573. * @rq: request to fill
  574. * @kbuf: the kernel buffer
  575. * @len: length of user data
  576. * @gfp_mask: memory allocation flags
  577. *
  578. * Description:
  579. * Data will be mapped directly if possible. Otherwise a bounce
  580. * buffer is used. Can be called multiple times to append multiple
  581. * buffers.
  582. */
  583. int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  584. unsigned int len, gfp_t gfp_mask)
  585. {
  586. int reading = rq_data_dir(rq) == READ;
  587. unsigned long addr = (unsigned long) kbuf;
  588. struct bio *bio, *orig_bio;
  589. int ret;
  590. if (len > (queue_max_hw_sectors(q) << 9))
  591. return -EINVAL;
  592. if (!len || !kbuf)
  593. return -EINVAL;
  594. if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf))
  595. bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
  596. else
  597. bio = bio_map_kern(q, kbuf, len, gfp_mask);
  598. if (IS_ERR(bio))
  599. return PTR_ERR(bio);
  600. bio->bi_opf &= ~REQ_OP_MASK;
  601. bio->bi_opf |= req_op(rq);
  602. orig_bio = bio;
  603. ret = blk_rq_append_bio(rq, &bio);
  604. if (unlikely(ret)) {
  605. /* request is too big */
  606. bio_put(orig_bio);
  607. return ret;
  608. }
  609. return 0;
  610. }
  611. EXPORT_SYMBOL(blk_rq_map_kern);