extent-io.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * BTRFS filesystem implementation for U-Boot
  4. *
  5. * 2017 Marek Behun, CZ.NIC, marek.behun@nic.cz
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/bug.h>
  9. #include <malloc.h>
  10. #include <memalign.h>
  11. #include "btrfs.h"
  12. #include "ctree.h"
  13. #include "extent-io.h"
  14. #include "disk-io.h"
  15. u64 btrfs_read_extent_inline(struct btrfs_path *path,
  16. struct btrfs_file_extent_item *extent, u64 offset,
  17. u64 size, char *out)
  18. {
  19. u32 clen, dlen, orig_size = size, res;
  20. const char *cbuf;
  21. char *dbuf;
  22. const int data_off = offsetof(struct btrfs_file_extent_item,
  23. disk_bytenr);
  24. clen = btrfs_path_item_size(path) - data_off;
  25. cbuf = (const char *) extent + data_off;
  26. dlen = extent->ram_bytes;
  27. if (offset > dlen)
  28. return -1ULL;
  29. if (size > dlen - offset)
  30. size = dlen - offset;
  31. if (extent->compression == BTRFS_COMPRESS_NONE) {
  32. memcpy(out, cbuf + offset, size);
  33. return size;
  34. }
  35. if (dlen > orig_size) {
  36. dbuf = malloc(dlen);
  37. if (!dbuf)
  38. return -1ULL;
  39. } else {
  40. dbuf = out;
  41. }
  42. res = btrfs_decompress(extent->compression, cbuf, clen, dbuf, dlen);
  43. if (res == -1 || res != dlen)
  44. goto err;
  45. if (dlen > orig_size) {
  46. memcpy(out, dbuf + offset, size);
  47. free(dbuf);
  48. } else if (offset) {
  49. memmove(out, dbuf + offset, size);
  50. }
  51. return size;
  52. err:
  53. if (dlen > orig_size)
  54. free(dbuf);
  55. return -1ULL;
  56. }
  57. u64 btrfs_read_extent_reg(struct btrfs_path *path,
  58. struct btrfs_file_extent_item *extent, u64 offset,
  59. u64 size, char *out)
  60. {
  61. u64 physical, clen, dlen, orig_size = size;
  62. u32 res;
  63. char *cbuf, *dbuf;
  64. clen = extent->disk_num_bytes;
  65. dlen = extent->num_bytes;
  66. if (offset > dlen)
  67. return -1ULL;
  68. if (size > dlen - offset)
  69. size = dlen - offset;
  70. /* sparse extent */
  71. if (extent->disk_bytenr == 0) {
  72. memset(out, 0, size);
  73. return size;
  74. }
  75. physical = btrfs_map_logical_to_physical(extent->disk_bytenr);
  76. if (physical == -1ULL)
  77. return -1ULL;
  78. if (extent->compression == BTRFS_COMPRESS_NONE) {
  79. physical += extent->offset + offset;
  80. if (!btrfs_devread(physical, size, out))
  81. return -1ULL;
  82. return size;
  83. }
  84. cbuf = malloc_cache_aligned(dlen > size ? clen + dlen : clen);
  85. if (!cbuf)
  86. return -1ULL;
  87. if (dlen > orig_size)
  88. dbuf = cbuf + clen;
  89. else
  90. dbuf = out;
  91. if (!btrfs_devread(physical, clen, cbuf))
  92. goto err;
  93. res = btrfs_decompress(extent->compression, cbuf, clen, dbuf, dlen);
  94. if (res == -1)
  95. goto err;
  96. if (dlen > orig_size)
  97. memcpy(out, dbuf + offset, size);
  98. else
  99. memmove(out, dbuf + offset, size);
  100. free(cbuf);
  101. return res;
  102. err:
  103. free(cbuf);
  104. return -1ULL;
  105. }
  106. void extent_io_tree_init(struct extent_io_tree *tree)
  107. {
  108. cache_tree_init(&tree->state);
  109. cache_tree_init(&tree->cache);
  110. tree->cache_size = 0;
  111. }
  112. static struct extent_state *alloc_extent_state(void)
  113. {
  114. struct extent_state *state;
  115. state = malloc(sizeof(*state));
  116. if (!state)
  117. return NULL;
  118. state->cache_node.objectid = 0;
  119. state->refs = 1;
  120. state->state = 0;
  121. state->xprivate = 0;
  122. return state;
  123. }
  124. static void btrfs_free_extent_state(struct extent_state *state)
  125. {
  126. state->refs--;
  127. BUG_ON(state->refs < 0);
  128. if (state->refs == 0)
  129. free(state);
  130. }
  131. static void free_extent_state_func(struct cache_extent *cache)
  132. {
  133. struct extent_state *es;
  134. es = container_of(cache, struct extent_state, cache_node);
  135. btrfs_free_extent_state(es);
  136. }
  137. static void free_extent_buffer_final(struct extent_buffer *eb);
  138. void extent_io_tree_cleanup(struct extent_io_tree *tree)
  139. {
  140. cache_tree_free_extents(&tree->state, free_extent_state_func);
  141. }
  142. static inline void update_extent_state(struct extent_state *state)
  143. {
  144. state->cache_node.start = state->start;
  145. state->cache_node.size = state->end + 1 - state->start;
  146. }
  147. /*
  148. * Utility function to look for merge candidates inside a given range.
  149. * Any extents with matching state are merged together into a single
  150. * extent in the tree. Extents with EXTENT_IO in their state field are
  151. * not merged
  152. */
  153. static int merge_state(struct extent_io_tree *tree,
  154. struct extent_state *state)
  155. {
  156. struct extent_state *other;
  157. struct cache_extent *other_node;
  158. if (state->state & EXTENT_IOBITS)
  159. return 0;
  160. other_node = prev_cache_extent(&state->cache_node);
  161. if (other_node) {
  162. other = container_of(other_node, struct extent_state,
  163. cache_node);
  164. if (other->end == state->start - 1 &&
  165. other->state == state->state) {
  166. state->start = other->start;
  167. update_extent_state(state);
  168. remove_cache_extent(&tree->state, &other->cache_node);
  169. btrfs_free_extent_state(other);
  170. }
  171. }
  172. other_node = next_cache_extent(&state->cache_node);
  173. if (other_node) {
  174. other = container_of(other_node, struct extent_state,
  175. cache_node);
  176. if (other->start == state->end + 1 &&
  177. other->state == state->state) {
  178. other->start = state->start;
  179. update_extent_state(other);
  180. remove_cache_extent(&tree->state, &state->cache_node);
  181. btrfs_free_extent_state(state);
  182. }
  183. }
  184. return 0;
  185. }
  186. /*
  187. * insert an extent_state struct into the tree. 'bits' are set on the
  188. * struct before it is inserted.
  189. */
  190. static int insert_state(struct extent_io_tree *tree,
  191. struct extent_state *state, u64 start, u64 end,
  192. int bits)
  193. {
  194. int ret;
  195. BUG_ON(end < start);
  196. state->state |= bits;
  197. state->start = start;
  198. state->end = end;
  199. update_extent_state(state);
  200. ret = insert_cache_extent(&tree->state, &state->cache_node);
  201. BUG_ON(ret);
  202. merge_state(tree, state);
  203. return 0;
  204. }
  205. /*
  206. * split a given extent state struct in two, inserting the preallocated
  207. * struct 'prealloc' as the newly created second half. 'split' indicates an
  208. * offset inside 'orig' where it should be split.
  209. */
  210. static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
  211. struct extent_state *prealloc, u64 split)
  212. {
  213. int ret;
  214. prealloc->start = orig->start;
  215. prealloc->end = split - 1;
  216. prealloc->state = orig->state;
  217. update_extent_state(prealloc);
  218. orig->start = split;
  219. update_extent_state(orig);
  220. ret = insert_cache_extent(&tree->state, &prealloc->cache_node);
  221. BUG_ON(ret);
  222. return 0;
  223. }
  224. /*
  225. * clear some bits on a range in the tree.
  226. */
  227. static int clear_state_bit(struct extent_io_tree *tree,
  228. struct extent_state *state, int bits)
  229. {
  230. int ret = state->state & bits;
  231. state->state &= ~bits;
  232. if (state->state == 0) {
  233. remove_cache_extent(&tree->state, &state->cache_node);
  234. btrfs_free_extent_state(state);
  235. } else {
  236. merge_state(tree, state);
  237. }
  238. return ret;
  239. }
  240. /*
  241. * extent_buffer_bitmap_set - set an area of a bitmap
  242. * @eb: the extent buffer
  243. * @start: offset of the bitmap item in the extent buffer
  244. * @pos: bit number of the first bit
  245. * @len: number of bits to set
  246. */
  247. void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
  248. unsigned long pos, unsigned long len)
  249. {
  250. u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
  251. const unsigned int size = pos + len;
  252. int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
  253. u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
  254. while (len >= bits_to_set) {
  255. *p |= mask_to_set;
  256. len -= bits_to_set;
  257. bits_to_set = BITS_PER_BYTE;
  258. mask_to_set = ~0;
  259. p++;
  260. }
  261. if (len) {
  262. mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
  263. *p |= mask_to_set;
  264. }
  265. }
  266. /*
  267. * extent_buffer_bitmap_clear - clear an area of a bitmap
  268. * @eb: the extent buffer
  269. * @start: offset of the bitmap item in the extent buffer
  270. * @pos: bit number of the first bit
  271. * @len: number of bits to clear
  272. */
  273. void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
  274. unsigned long pos, unsigned long len)
  275. {
  276. u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
  277. const unsigned int size = pos + len;
  278. int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
  279. u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
  280. while (len >= bits_to_clear) {
  281. *p &= ~mask_to_clear;
  282. len -= bits_to_clear;
  283. bits_to_clear = BITS_PER_BYTE;
  284. mask_to_clear = ~0;
  285. p++;
  286. }
  287. if (len) {
  288. mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
  289. *p &= ~mask_to_clear;
  290. }
  291. }
  292. /*
  293. * clear some bits on a range in the tree.
  294. */
  295. int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
  296. {
  297. struct extent_state *state;
  298. struct extent_state *prealloc = NULL;
  299. struct cache_extent *node;
  300. u64 last_end;
  301. int err;
  302. int set = 0;
  303. again:
  304. if (!prealloc) {
  305. prealloc = alloc_extent_state();
  306. if (!prealloc)
  307. return -ENOMEM;
  308. }
  309. /*
  310. * this search will find the extents that end after
  311. * our range starts
  312. */
  313. node = search_cache_extent(&tree->state, start);
  314. if (!node)
  315. goto out;
  316. state = container_of(node, struct extent_state, cache_node);
  317. if (state->start > end)
  318. goto out;
  319. last_end = state->end;
  320. /*
  321. * | ---- desired range ---- |
  322. * | state | or
  323. * | ------------- state -------------- |
  324. *
  325. * We need to split the extent we found, and may flip
  326. * bits on second half.
  327. *
  328. * If the extent we found extends past our range, we
  329. * just split and search again. It'll get split again
  330. * the next time though.
  331. *
  332. * If the extent we found is inside our range, we clear
  333. * the desired bit on it.
  334. */
  335. if (state->start < start) {
  336. err = split_state(tree, state, prealloc, start);
  337. BUG_ON(err == -EEXIST);
  338. prealloc = NULL;
  339. if (err)
  340. goto out;
  341. if (state->end <= end) {
  342. set |= clear_state_bit(tree, state, bits);
  343. if (last_end == (u64)-1)
  344. goto out;
  345. start = last_end + 1;
  346. } else {
  347. start = state->start;
  348. }
  349. goto search_again;
  350. }
  351. /*
  352. * | ---- desired range ---- |
  353. * | state |
  354. * We need to split the extent, and clear the bit
  355. * on the first half
  356. */
  357. if (state->start <= end && state->end > end) {
  358. err = split_state(tree, state, prealloc, end + 1);
  359. BUG_ON(err == -EEXIST);
  360. set |= clear_state_bit(tree, prealloc, bits);
  361. prealloc = NULL;
  362. goto out;
  363. }
  364. start = state->end + 1;
  365. set |= clear_state_bit(tree, state, bits);
  366. if (last_end == (u64)-1)
  367. goto out;
  368. start = last_end + 1;
  369. goto search_again;
  370. out:
  371. if (prealloc)
  372. btrfs_free_extent_state(prealloc);
  373. return set;
  374. search_again:
  375. if (start > end)
  376. goto out;
  377. goto again;
  378. }
  379. /*
  380. * set some bits on a range in the tree.
  381. */
  382. int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
  383. {
  384. struct extent_state *state;
  385. struct extent_state *prealloc = NULL;
  386. struct cache_extent *node;
  387. int err = 0;
  388. u64 last_start;
  389. u64 last_end;
  390. again:
  391. if (!prealloc) {
  392. prealloc = alloc_extent_state();
  393. if (!prealloc)
  394. return -ENOMEM;
  395. }
  396. /*
  397. * this search will find the extents that end after
  398. * our range starts
  399. */
  400. node = search_cache_extent(&tree->state, start);
  401. if (!node) {
  402. err = insert_state(tree, prealloc, start, end, bits);
  403. BUG_ON(err == -EEXIST);
  404. prealloc = NULL;
  405. goto out;
  406. }
  407. state = container_of(node, struct extent_state, cache_node);
  408. last_start = state->start;
  409. last_end = state->end;
  410. /*
  411. * | ---- desired range ---- |
  412. * | state |
  413. *
  414. * Just lock what we found and keep going
  415. */
  416. if (state->start == start && state->end <= end) {
  417. state->state |= bits;
  418. merge_state(tree, state);
  419. if (last_end == (u64)-1)
  420. goto out;
  421. start = last_end + 1;
  422. goto search_again;
  423. }
  424. /*
  425. * | ---- desired range ---- |
  426. * | state |
  427. * or
  428. * | ------------- state -------------- |
  429. *
  430. * We need to split the extent we found, and may flip bits on
  431. * second half.
  432. *
  433. * If the extent we found extends past our
  434. * range, we just split and search again. It'll get split
  435. * again the next time though.
  436. *
  437. * If the extent we found is inside our range, we set the
  438. * desired bit on it.
  439. */
  440. if (state->start < start) {
  441. err = split_state(tree, state, prealloc, start);
  442. BUG_ON(err == -EEXIST);
  443. prealloc = NULL;
  444. if (err)
  445. goto out;
  446. if (state->end <= end) {
  447. state->state |= bits;
  448. start = state->end + 1;
  449. merge_state(tree, state);
  450. if (last_end == (u64)-1)
  451. goto out;
  452. start = last_end + 1;
  453. } else {
  454. start = state->start;
  455. }
  456. goto search_again;
  457. }
  458. /*
  459. * | ---- desired range ---- |
  460. * | state | or | state |
  461. *
  462. * There's a hole, we need to insert something in it and
  463. * ignore the extent we found.
  464. */
  465. if (state->start > start) {
  466. u64 this_end;
  467. if (end < last_start)
  468. this_end = end;
  469. else
  470. this_end = last_start -1;
  471. err = insert_state(tree, prealloc, start, this_end,
  472. bits);
  473. BUG_ON(err == -EEXIST);
  474. prealloc = NULL;
  475. if (err)
  476. goto out;
  477. start = this_end + 1;
  478. goto search_again;
  479. }
  480. /*
  481. * | ---- desired range ---- |
  482. * | ---------- state ---------- |
  483. * We need to split the extent, and set the bit
  484. * on the first half
  485. */
  486. err = split_state(tree, state, prealloc, end + 1);
  487. BUG_ON(err == -EEXIST);
  488. state->state |= bits;
  489. merge_state(tree, prealloc);
  490. prealloc = NULL;
  491. out:
  492. if (prealloc)
  493. btrfs_free_extent_state(prealloc);
  494. return err;
  495. search_again:
  496. if (start > end)
  497. goto out;
  498. goto again;
  499. }
  500. int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
  501. {
  502. return set_extent_bits(tree, start, end, EXTENT_DIRTY);
  503. }
  504. int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
  505. {
  506. return clear_extent_bits(tree, start, end, EXTENT_DIRTY);
  507. }
  508. int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
  509. u64 *start_ret, u64 *end_ret, int bits)
  510. {
  511. struct cache_extent *node;
  512. struct extent_state *state;
  513. int ret = 1;
  514. /*
  515. * this search will find all the extents that end after
  516. * our range starts.
  517. */
  518. node = search_cache_extent(&tree->state, start);
  519. if (!node)
  520. goto out;
  521. while(1) {
  522. state = container_of(node, struct extent_state, cache_node);
  523. if (state->end >= start && (state->state & bits)) {
  524. *start_ret = state->start;
  525. *end_ret = state->end;
  526. ret = 0;
  527. break;
  528. }
  529. node = next_cache_extent(node);
  530. if (!node)
  531. break;
  532. }
  533. out:
  534. return ret;
  535. }
  536. int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
  537. int bits, int filled)
  538. {
  539. struct extent_state *state = NULL;
  540. struct cache_extent *node;
  541. int bitset = 0;
  542. node = search_cache_extent(&tree->state, start);
  543. while (node && start <= end) {
  544. state = container_of(node, struct extent_state, cache_node);
  545. if (filled && state->start > start) {
  546. bitset = 0;
  547. break;
  548. }
  549. if (state->start > end)
  550. break;
  551. if (state->state & bits) {
  552. bitset = 1;
  553. if (!filled)
  554. break;
  555. } else if (filled) {
  556. bitset = 0;
  557. break;
  558. }
  559. start = state->end + 1;
  560. if (start > end)
  561. break;
  562. node = next_cache_extent(node);
  563. if (!node) {
  564. if (filled)
  565. bitset = 0;
  566. break;
  567. }
  568. }
  569. return bitset;
  570. }
  571. int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
  572. {
  573. struct cache_extent *node;
  574. struct extent_state *state;
  575. int ret = 0;
  576. node = search_cache_extent(&tree->state, start);
  577. if (!node) {
  578. ret = -ENOENT;
  579. goto out;
  580. }
  581. state = container_of(node, struct extent_state, cache_node);
  582. if (state->start != start) {
  583. ret = -ENOENT;
  584. goto out;
  585. }
  586. state->xprivate = private;
  587. out:
  588. return ret;
  589. }
  590. int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
  591. {
  592. struct cache_extent *node;
  593. struct extent_state *state;
  594. int ret = 0;
  595. node = search_cache_extent(&tree->state, start);
  596. if (!node) {
  597. ret = -ENOENT;
  598. goto out;
  599. }
  600. state = container_of(node, struct extent_state, cache_node);
  601. if (state->start != start) {
  602. ret = -ENOENT;
  603. goto out;
  604. }
  605. *private = state->xprivate;
  606. out:
  607. return ret;
  608. }
  609. static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *info,
  610. u64 bytenr, u32 blocksize)
  611. {
  612. struct extent_buffer *eb;
  613. eb = calloc(1, sizeof(struct extent_buffer));
  614. if (!eb)
  615. return NULL;
  616. eb->data = malloc_cache_aligned(blocksize);
  617. if (!eb->data) {
  618. free(eb);
  619. return NULL;
  620. }
  621. eb->start = bytenr;
  622. eb->len = blocksize;
  623. eb->refs = 1;
  624. eb->flags = 0;
  625. eb->cache_node.start = bytenr;
  626. eb->cache_node.size = blocksize;
  627. eb->fs_info = info;
  628. memset_extent_buffer(eb, 0, 0, blocksize);
  629. return eb;
  630. }
  631. struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
  632. {
  633. struct extent_buffer *new;
  634. new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
  635. if (!new)
  636. return NULL;
  637. copy_extent_buffer(new, src, 0, 0, src->len);
  638. new->flags |= EXTENT_BUFFER_DUMMY;
  639. return new;
  640. }
  641. static void free_extent_buffer_final(struct extent_buffer *eb)
  642. {
  643. BUG_ON(eb->refs);
  644. if (!(eb->flags & EXTENT_BUFFER_DUMMY)) {
  645. struct extent_io_tree *tree = &eb->fs_info->extent_cache;
  646. remove_cache_extent(&tree->cache, &eb->cache_node);
  647. BUG_ON(tree->cache_size < eb->len);
  648. tree->cache_size -= eb->len;
  649. }
  650. free(eb->data);
  651. free(eb);
  652. }
  653. static void free_extent_buffer_internal(struct extent_buffer *eb, bool free_now)
  654. {
  655. if (!eb || IS_ERR(eb))
  656. return;
  657. eb->refs--;
  658. BUG_ON(eb->refs < 0);
  659. if (eb->refs == 0) {
  660. if (eb->flags & EXTENT_DIRTY) {
  661. error(
  662. "dirty eb leak (aborted trans): start %llu len %u",
  663. eb->start, eb->len);
  664. }
  665. if (eb->flags & EXTENT_BUFFER_DUMMY || free_now)
  666. free_extent_buffer_final(eb);
  667. }
  668. }
  669. void free_extent_buffer(struct extent_buffer *eb)
  670. {
  671. free_extent_buffer_internal(eb, 1);
  672. }
  673. struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
  674. u64 bytenr, u32 blocksize)
  675. {
  676. struct extent_buffer *eb = NULL;
  677. struct cache_extent *cache;
  678. cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
  679. if (cache && cache->start == bytenr &&
  680. cache->size == blocksize) {
  681. eb = container_of(cache, struct extent_buffer, cache_node);
  682. eb->refs++;
  683. }
  684. return eb;
  685. }
  686. struct extent_buffer *find_first_extent_buffer(struct extent_io_tree *tree,
  687. u64 start)
  688. {
  689. struct extent_buffer *eb = NULL;
  690. struct cache_extent *cache;
  691. cache = search_cache_extent(&tree->cache, start);
  692. if (cache) {
  693. eb = container_of(cache, struct extent_buffer, cache_node);
  694. eb->refs++;
  695. }
  696. return eb;
  697. }
  698. struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
  699. u64 bytenr, u32 blocksize)
  700. {
  701. struct extent_buffer *eb;
  702. struct extent_io_tree *tree = &fs_info->extent_cache;
  703. struct cache_extent *cache;
  704. cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
  705. if (cache && cache->start == bytenr &&
  706. cache->size == blocksize) {
  707. eb = container_of(cache, struct extent_buffer, cache_node);
  708. eb->refs++;
  709. } else {
  710. int ret;
  711. if (cache) {
  712. eb = container_of(cache, struct extent_buffer,
  713. cache_node);
  714. free_extent_buffer(eb);
  715. }
  716. eb = __alloc_extent_buffer(fs_info, bytenr, blocksize);
  717. if (!eb)
  718. return NULL;
  719. ret = insert_cache_extent(&tree->cache, &eb->cache_node);
  720. if (ret) {
  721. free(eb);
  722. return NULL;
  723. }
  724. tree->cache_size += blocksize;
  725. }
  726. return eb;
  727. }
  728. /*
  729. * Allocate a dummy extent buffer which won't be inserted into extent buffer
  730. * cache.
  731. *
  732. * This mostly allows super block read write using existing eb infrastructure
  733. * without pulluting the eb cache.
  734. *
  735. * This is especially important to avoid injecting eb->start == SZ_64K, as
  736. * fuzzed image could have invalid tree bytenr covers super block range,
  737. * and cause ref count underflow.
  738. */
  739. struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
  740. u64 bytenr, u32 blocksize)
  741. {
  742. struct extent_buffer *ret;
  743. ret = __alloc_extent_buffer(fs_info, bytenr, blocksize);
  744. if (!ret)
  745. return NULL;
  746. ret->flags |= EXTENT_BUFFER_DUMMY;
  747. return ret;
  748. }
  749. int read_extent_from_disk(struct blk_desc *desc, struct disk_partition *part,
  750. u64 physical, struct extent_buffer *eb,
  751. unsigned long offset, unsigned long len)
  752. {
  753. int ret;
  754. ret = __btrfs_devread(desc, part, eb->data + offset, len, physical);
  755. if (ret < 0)
  756. goto out;
  757. if (ret != len) {
  758. ret = -EIO;
  759. goto out;
  760. }
  761. ret = 0;
  762. out:
  763. return ret;
  764. }
  765. int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
  766. unsigned long start, unsigned long len)
  767. {
  768. return memcmp(eb->data + start, ptrv, len);
  769. }
  770. void read_extent_buffer(const struct extent_buffer *eb, void *dst,
  771. unsigned long start, unsigned long len)
  772. {
  773. memcpy(dst, eb->data + start, len);
  774. }
  775. void write_extent_buffer(struct extent_buffer *eb, const void *src,
  776. unsigned long start, unsigned long len)
  777. {
  778. memcpy(eb->data + start, src, len);
  779. }
  780. void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
  781. unsigned long dst_offset, unsigned long src_offset,
  782. unsigned long len)
  783. {
  784. memcpy(dst->data + dst_offset, src->data + src_offset, len);
  785. }
  786. void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  787. unsigned long src_offset, unsigned long len)
  788. {
  789. memmove(dst->data + dst_offset, dst->data + src_offset, len);
  790. }
  791. void memset_extent_buffer(struct extent_buffer *eb, char c,
  792. unsigned long start, unsigned long len)
  793. {
  794. memset(eb->data + start, c, len);
  795. }
  796. int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
  797. unsigned long nr)
  798. {
  799. return le_test_bit(nr, (u8 *)eb->data + start);
  800. }
  801. int set_extent_buffer_dirty(struct extent_buffer *eb)
  802. {
  803. struct extent_io_tree *tree = &eb->fs_info->extent_cache;
  804. if (!(eb->flags & EXTENT_DIRTY)) {
  805. eb->flags |= EXTENT_DIRTY;
  806. set_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
  807. extent_buffer_get(eb);
  808. }
  809. return 0;
  810. }
  811. int clear_extent_buffer_dirty(struct extent_buffer *eb)
  812. {
  813. struct extent_io_tree *tree = &eb->fs_info->extent_cache;
  814. if (eb->flags & EXTENT_DIRTY) {
  815. eb->flags &= ~EXTENT_DIRTY;
  816. clear_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
  817. free_extent_buffer(eb);
  818. }
  819. return 0;
  820. }