extent-io.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * BTRFS filesystem implementation for U-Boot
  4. *
  5. * 2017 Marek Behun, CZ.NIC, marek.behun@nic.cz
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/bug.h>
  9. #include <malloc.h>
  10. #include <memalign.h>
  11. #include "btrfs.h"
  12. #include "ctree.h"
  13. #include "extent-io.h"
  14. #include "disk-io.h"
  15. void extent_io_tree_init(struct extent_io_tree *tree)
  16. {
  17. cache_tree_init(&tree->state);
  18. cache_tree_init(&tree->cache);
  19. tree->cache_size = 0;
  20. }
  21. static struct extent_state *alloc_extent_state(void)
  22. {
  23. struct extent_state *state;
  24. state = malloc(sizeof(*state));
  25. if (!state)
  26. return NULL;
  27. state->cache_node.objectid = 0;
  28. state->refs = 1;
  29. state->state = 0;
  30. state->xprivate = 0;
  31. return state;
  32. }
  33. static void btrfs_free_extent_state(struct extent_state *state)
  34. {
  35. state->refs--;
  36. BUG_ON(state->refs < 0);
  37. if (state->refs == 0)
  38. free(state);
  39. }
  40. static void free_extent_state_func(struct cache_extent *cache)
  41. {
  42. struct extent_state *es;
  43. es = container_of(cache, struct extent_state, cache_node);
  44. btrfs_free_extent_state(es);
  45. }
  46. static void free_extent_buffer_final(struct extent_buffer *eb);
  47. void extent_io_tree_cleanup(struct extent_io_tree *tree)
  48. {
  49. cache_tree_free_extents(&tree->state, free_extent_state_func);
  50. }
  51. static inline void update_extent_state(struct extent_state *state)
  52. {
  53. state->cache_node.start = state->start;
  54. state->cache_node.size = state->end + 1 - state->start;
  55. }
  56. /*
  57. * Utility function to look for merge candidates inside a given range.
  58. * Any extents with matching state are merged together into a single
  59. * extent in the tree. Extents with EXTENT_IO in their state field are
  60. * not merged
  61. */
  62. static int merge_state(struct extent_io_tree *tree,
  63. struct extent_state *state)
  64. {
  65. struct extent_state *other;
  66. struct cache_extent *other_node;
  67. if (state->state & EXTENT_IOBITS)
  68. return 0;
  69. other_node = prev_cache_extent(&state->cache_node);
  70. if (other_node) {
  71. other = container_of(other_node, struct extent_state,
  72. cache_node);
  73. if (other->end == state->start - 1 &&
  74. other->state == state->state) {
  75. state->start = other->start;
  76. update_extent_state(state);
  77. remove_cache_extent(&tree->state, &other->cache_node);
  78. btrfs_free_extent_state(other);
  79. }
  80. }
  81. other_node = next_cache_extent(&state->cache_node);
  82. if (other_node) {
  83. other = container_of(other_node, struct extent_state,
  84. cache_node);
  85. if (other->start == state->end + 1 &&
  86. other->state == state->state) {
  87. other->start = state->start;
  88. update_extent_state(other);
  89. remove_cache_extent(&tree->state, &state->cache_node);
  90. btrfs_free_extent_state(state);
  91. }
  92. }
  93. return 0;
  94. }
  95. /*
  96. * insert an extent_state struct into the tree. 'bits' are set on the
  97. * struct before it is inserted.
  98. */
  99. static int insert_state(struct extent_io_tree *tree,
  100. struct extent_state *state, u64 start, u64 end,
  101. int bits)
  102. {
  103. int ret;
  104. BUG_ON(end < start);
  105. state->state |= bits;
  106. state->start = start;
  107. state->end = end;
  108. update_extent_state(state);
  109. ret = insert_cache_extent(&tree->state, &state->cache_node);
  110. BUG_ON(ret);
  111. merge_state(tree, state);
  112. return 0;
  113. }
  114. /*
  115. * split a given extent state struct in two, inserting the preallocated
  116. * struct 'prealloc' as the newly created second half. 'split' indicates an
  117. * offset inside 'orig' where it should be split.
  118. */
  119. static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
  120. struct extent_state *prealloc, u64 split)
  121. {
  122. int ret;
  123. prealloc->start = orig->start;
  124. prealloc->end = split - 1;
  125. prealloc->state = orig->state;
  126. update_extent_state(prealloc);
  127. orig->start = split;
  128. update_extent_state(orig);
  129. ret = insert_cache_extent(&tree->state, &prealloc->cache_node);
  130. BUG_ON(ret);
  131. return 0;
  132. }
  133. /*
  134. * clear some bits on a range in the tree.
  135. */
  136. static int clear_state_bit(struct extent_io_tree *tree,
  137. struct extent_state *state, int bits)
  138. {
  139. int ret = state->state & bits;
  140. state->state &= ~bits;
  141. if (state->state == 0) {
  142. remove_cache_extent(&tree->state, &state->cache_node);
  143. btrfs_free_extent_state(state);
  144. } else {
  145. merge_state(tree, state);
  146. }
  147. return ret;
  148. }
  149. /*
  150. * extent_buffer_bitmap_set - set an area of a bitmap
  151. * @eb: the extent buffer
  152. * @start: offset of the bitmap item in the extent buffer
  153. * @pos: bit number of the first bit
  154. * @len: number of bits to set
  155. */
  156. void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
  157. unsigned long pos, unsigned long len)
  158. {
  159. u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
  160. const unsigned int size = pos + len;
  161. int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
  162. u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
  163. while (len >= bits_to_set) {
  164. *p |= mask_to_set;
  165. len -= bits_to_set;
  166. bits_to_set = BITS_PER_BYTE;
  167. mask_to_set = ~0;
  168. p++;
  169. }
  170. if (len) {
  171. mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
  172. *p |= mask_to_set;
  173. }
  174. }
  175. /*
  176. * extent_buffer_bitmap_clear - clear an area of a bitmap
  177. * @eb: the extent buffer
  178. * @start: offset of the bitmap item in the extent buffer
  179. * @pos: bit number of the first bit
  180. * @len: number of bits to clear
  181. */
  182. void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
  183. unsigned long pos, unsigned long len)
  184. {
  185. u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
  186. const unsigned int size = pos + len;
  187. int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
  188. u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
  189. while (len >= bits_to_clear) {
  190. *p &= ~mask_to_clear;
  191. len -= bits_to_clear;
  192. bits_to_clear = BITS_PER_BYTE;
  193. mask_to_clear = ~0;
  194. p++;
  195. }
  196. if (len) {
  197. mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
  198. *p &= ~mask_to_clear;
  199. }
  200. }
  201. /*
  202. * clear some bits on a range in the tree.
  203. */
  204. int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
  205. {
  206. struct extent_state *state;
  207. struct extent_state *prealloc = NULL;
  208. struct cache_extent *node;
  209. u64 last_end;
  210. int err;
  211. int set = 0;
  212. again:
  213. if (!prealloc) {
  214. prealloc = alloc_extent_state();
  215. if (!prealloc)
  216. return -ENOMEM;
  217. }
  218. /*
  219. * this search will find the extents that end after
  220. * our range starts
  221. */
  222. node = search_cache_extent(&tree->state, start);
  223. if (!node)
  224. goto out;
  225. state = container_of(node, struct extent_state, cache_node);
  226. if (state->start > end)
  227. goto out;
  228. last_end = state->end;
  229. /*
  230. * | ---- desired range ---- |
  231. * | state | or
  232. * | ------------- state -------------- |
  233. *
  234. * We need to split the extent we found, and may flip
  235. * bits on second half.
  236. *
  237. * If the extent we found extends past our range, we
  238. * just split and search again. It'll get split again
  239. * the next time though.
  240. *
  241. * If the extent we found is inside our range, we clear
  242. * the desired bit on it.
  243. */
  244. if (state->start < start) {
  245. err = split_state(tree, state, prealloc, start);
  246. BUG_ON(err == -EEXIST);
  247. prealloc = NULL;
  248. if (err)
  249. goto out;
  250. if (state->end <= end) {
  251. set |= clear_state_bit(tree, state, bits);
  252. if (last_end == (u64)-1)
  253. goto out;
  254. start = last_end + 1;
  255. } else {
  256. start = state->start;
  257. }
  258. goto search_again;
  259. }
  260. /*
  261. * | ---- desired range ---- |
  262. * | state |
  263. * We need to split the extent, and clear the bit
  264. * on the first half
  265. */
  266. if (state->start <= end && state->end > end) {
  267. err = split_state(tree, state, prealloc, end + 1);
  268. BUG_ON(err == -EEXIST);
  269. set |= clear_state_bit(tree, prealloc, bits);
  270. prealloc = NULL;
  271. goto out;
  272. }
  273. start = state->end + 1;
  274. set |= clear_state_bit(tree, state, bits);
  275. if (last_end == (u64)-1)
  276. goto out;
  277. start = last_end + 1;
  278. goto search_again;
  279. out:
  280. if (prealloc)
  281. btrfs_free_extent_state(prealloc);
  282. return set;
  283. search_again:
  284. if (start > end)
  285. goto out;
  286. goto again;
  287. }
  288. /*
  289. * set some bits on a range in the tree.
  290. */
  291. int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
  292. {
  293. struct extent_state *state;
  294. struct extent_state *prealloc = NULL;
  295. struct cache_extent *node;
  296. int err = 0;
  297. u64 last_start;
  298. u64 last_end;
  299. again:
  300. if (!prealloc) {
  301. prealloc = alloc_extent_state();
  302. if (!prealloc)
  303. return -ENOMEM;
  304. }
  305. /*
  306. * this search will find the extents that end after
  307. * our range starts
  308. */
  309. node = search_cache_extent(&tree->state, start);
  310. if (!node) {
  311. err = insert_state(tree, prealloc, start, end, bits);
  312. BUG_ON(err == -EEXIST);
  313. prealloc = NULL;
  314. goto out;
  315. }
  316. state = container_of(node, struct extent_state, cache_node);
  317. last_start = state->start;
  318. last_end = state->end;
  319. /*
  320. * | ---- desired range ---- |
  321. * | state |
  322. *
  323. * Just lock what we found and keep going
  324. */
  325. if (state->start == start && state->end <= end) {
  326. state->state |= bits;
  327. merge_state(tree, state);
  328. if (last_end == (u64)-1)
  329. goto out;
  330. start = last_end + 1;
  331. goto search_again;
  332. }
  333. /*
  334. * | ---- desired range ---- |
  335. * | state |
  336. * or
  337. * | ------------- state -------------- |
  338. *
  339. * We need to split the extent we found, and may flip bits on
  340. * second half.
  341. *
  342. * If the extent we found extends past our
  343. * range, we just split and search again. It'll get split
  344. * again the next time though.
  345. *
  346. * If the extent we found is inside our range, we set the
  347. * desired bit on it.
  348. */
  349. if (state->start < start) {
  350. err = split_state(tree, state, prealloc, start);
  351. BUG_ON(err == -EEXIST);
  352. prealloc = NULL;
  353. if (err)
  354. goto out;
  355. if (state->end <= end) {
  356. state->state |= bits;
  357. start = state->end + 1;
  358. merge_state(tree, state);
  359. if (last_end == (u64)-1)
  360. goto out;
  361. start = last_end + 1;
  362. } else {
  363. start = state->start;
  364. }
  365. goto search_again;
  366. }
  367. /*
  368. * | ---- desired range ---- |
  369. * | state | or | state |
  370. *
  371. * There's a hole, we need to insert something in it and
  372. * ignore the extent we found.
  373. */
  374. if (state->start > start) {
  375. u64 this_end;
  376. if (end < last_start)
  377. this_end = end;
  378. else
  379. this_end = last_start -1;
  380. err = insert_state(tree, prealloc, start, this_end,
  381. bits);
  382. BUG_ON(err == -EEXIST);
  383. prealloc = NULL;
  384. if (err)
  385. goto out;
  386. start = this_end + 1;
  387. goto search_again;
  388. }
  389. /*
  390. * | ---- desired range ---- |
  391. * | ---------- state ---------- |
  392. * We need to split the extent, and set the bit
  393. * on the first half
  394. */
  395. err = split_state(tree, state, prealloc, end + 1);
  396. BUG_ON(err == -EEXIST);
  397. state->state |= bits;
  398. merge_state(tree, prealloc);
  399. prealloc = NULL;
  400. out:
  401. if (prealloc)
  402. btrfs_free_extent_state(prealloc);
  403. return err;
  404. search_again:
  405. if (start > end)
  406. goto out;
  407. goto again;
  408. }
  409. int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
  410. {
  411. return set_extent_bits(tree, start, end, EXTENT_DIRTY);
  412. }
  413. int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
  414. {
  415. return clear_extent_bits(tree, start, end, EXTENT_DIRTY);
  416. }
  417. int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
  418. u64 *start_ret, u64 *end_ret, int bits)
  419. {
  420. struct cache_extent *node;
  421. struct extent_state *state;
  422. int ret = 1;
  423. /*
  424. * this search will find all the extents that end after
  425. * our range starts.
  426. */
  427. node = search_cache_extent(&tree->state, start);
  428. if (!node)
  429. goto out;
  430. while(1) {
  431. state = container_of(node, struct extent_state, cache_node);
  432. if (state->end >= start && (state->state & bits)) {
  433. *start_ret = state->start;
  434. *end_ret = state->end;
  435. ret = 0;
  436. break;
  437. }
  438. node = next_cache_extent(node);
  439. if (!node)
  440. break;
  441. }
  442. out:
  443. return ret;
  444. }
  445. int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
  446. int bits, int filled)
  447. {
  448. struct extent_state *state = NULL;
  449. struct cache_extent *node;
  450. int bitset = 0;
  451. node = search_cache_extent(&tree->state, start);
  452. while (node && start <= end) {
  453. state = container_of(node, struct extent_state, cache_node);
  454. if (filled && state->start > start) {
  455. bitset = 0;
  456. break;
  457. }
  458. if (state->start > end)
  459. break;
  460. if (state->state & bits) {
  461. bitset = 1;
  462. if (!filled)
  463. break;
  464. } else if (filled) {
  465. bitset = 0;
  466. break;
  467. }
  468. start = state->end + 1;
  469. if (start > end)
  470. break;
  471. node = next_cache_extent(node);
  472. if (!node) {
  473. if (filled)
  474. bitset = 0;
  475. break;
  476. }
  477. }
  478. return bitset;
  479. }
  480. int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
  481. {
  482. struct cache_extent *node;
  483. struct extent_state *state;
  484. int ret = 0;
  485. node = search_cache_extent(&tree->state, start);
  486. if (!node) {
  487. ret = -ENOENT;
  488. goto out;
  489. }
  490. state = container_of(node, struct extent_state, cache_node);
  491. if (state->start != start) {
  492. ret = -ENOENT;
  493. goto out;
  494. }
  495. state->xprivate = private;
  496. out:
  497. return ret;
  498. }
  499. int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
  500. {
  501. struct cache_extent *node;
  502. struct extent_state *state;
  503. int ret = 0;
  504. node = search_cache_extent(&tree->state, start);
  505. if (!node) {
  506. ret = -ENOENT;
  507. goto out;
  508. }
  509. state = container_of(node, struct extent_state, cache_node);
  510. if (state->start != start) {
  511. ret = -ENOENT;
  512. goto out;
  513. }
  514. *private = state->xprivate;
  515. out:
  516. return ret;
  517. }
  518. static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *info,
  519. u64 bytenr, u32 blocksize)
  520. {
  521. struct extent_buffer *eb;
  522. eb = calloc(1, sizeof(struct extent_buffer));
  523. if (!eb)
  524. return NULL;
  525. eb->data = malloc_cache_aligned(blocksize);
  526. if (!eb->data) {
  527. free(eb);
  528. return NULL;
  529. }
  530. eb->start = bytenr;
  531. eb->len = blocksize;
  532. eb->refs = 1;
  533. eb->flags = 0;
  534. eb->cache_node.start = bytenr;
  535. eb->cache_node.size = blocksize;
  536. eb->fs_info = info;
  537. memset_extent_buffer(eb, 0, 0, blocksize);
  538. return eb;
  539. }
  540. struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
  541. {
  542. struct extent_buffer *new;
  543. new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
  544. if (!new)
  545. return NULL;
  546. copy_extent_buffer(new, src, 0, 0, src->len);
  547. new->flags |= EXTENT_BUFFER_DUMMY;
  548. return new;
  549. }
  550. static void free_extent_buffer_final(struct extent_buffer *eb)
  551. {
  552. BUG_ON(eb->refs);
  553. if (!(eb->flags & EXTENT_BUFFER_DUMMY)) {
  554. struct extent_io_tree *tree = &eb->fs_info->extent_cache;
  555. remove_cache_extent(&tree->cache, &eb->cache_node);
  556. BUG_ON(tree->cache_size < eb->len);
  557. tree->cache_size -= eb->len;
  558. }
  559. free(eb->data);
  560. free(eb);
  561. }
  562. static void free_extent_buffer_internal(struct extent_buffer *eb, bool free_now)
  563. {
  564. if (!eb || IS_ERR(eb))
  565. return;
  566. eb->refs--;
  567. BUG_ON(eb->refs < 0);
  568. if (eb->refs == 0) {
  569. if (eb->flags & EXTENT_DIRTY) {
  570. error(
  571. "dirty eb leak (aborted trans): start %llu len %u",
  572. eb->start, eb->len);
  573. }
  574. if (eb->flags & EXTENT_BUFFER_DUMMY || free_now)
  575. free_extent_buffer_final(eb);
  576. }
  577. }
  578. void free_extent_buffer(struct extent_buffer *eb)
  579. {
  580. free_extent_buffer_internal(eb, 1);
  581. }
  582. struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
  583. u64 bytenr, u32 blocksize)
  584. {
  585. struct extent_buffer *eb = NULL;
  586. struct cache_extent *cache;
  587. cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
  588. if (cache && cache->start == bytenr &&
  589. cache->size == blocksize) {
  590. eb = container_of(cache, struct extent_buffer, cache_node);
  591. eb->refs++;
  592. }
  593. return eb;
  594. }
  595. struct extent_buffer *find_first_extent_buffer(struct extent_io_tree *tree,
  596. u64 start)
  597. {
  598. struct extent_buffer *eb = NULL;
  599. struct cache_extent *cache;
  600. cache = search_cache_extent(&tree->cache, start);
  601. if (cache) {
  602. eb = container_of(cache, struct extent_buffer, cache_node);
  603. eb->refs++;
  604. }
  605. return eb;
  606. }
  607. struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
  608. u64 bytenr, u32 blocksize)
  609. {
  610. struct extent_buffer *eb;
  611. struct extent_io_tree *tree = &fs_info->extent_cache;
  612. struct cache_extent *cache;
  613. cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
  614. if (cache && cache->start == bytenr &&
  615. cache->size == blocksize) {
  616. eb = container_of(cache, struct extent_buffer, cache_node);
  617. eb->refs++;
  618. } else {
  619. int ret;
  620. if (cache) {
  621. eb = container_of(cache, struct extent_buffer,
  622. cache_node);
  623. free_extent_buffer(eb);
  624. }
  625. eb = __alloc_extent_buffer(fs_info, bytenr, blocksize);
  626. if (!eb)
  627. return NULL;
  628. ret = insert_cache_extent(&tree->cache, &eb->cache_node);
  629. if (ret) {
  630. free(eb);
  631. return NULL;
  632. }
  633. tree->cache_size += blocksize;
  634. }
  635. return eb;
  636. }
  637. /*
  638. * Allocate a dummy extent buffer which won't be inserted into extent buffer
  639. * cache.
  640. *
  641. * This mostly allows super block read write using existing eb infrastructure
  642. * without pulluting the eb cache.
  643. *
  644. * This is especially important to avoid injecting eb->start == SZ_64K, as
  645. * fuzzed image could have invalid tree bytenr covers super block range,
  646. * and cause ref count underflow.
  647. */
  648. struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
  649. u64 bytenr, u32 blocksize)
  650. {
  651. struct extent_buffer *ret;
  652. ret = __alloc_extent_buffer(fs_info, bytenr, blocksize);
  653. if (!ret)
  654. return NULL;
  655. ret->flags |= EXTENT_BUFFER_DUMMY;
  656. return ret;
  657. }
  658. int read_extent_from_disk(struct blk_desc *desc, struct disk_partition *part,
  659. u64 physical, struct extent_buffer *eb,
  660. unsigned long offset, unsigned long len)
  661. {
  662. int ret;
  663. ret = __btrfs_devread(desc, part, eb->data + offset, len, physical);
  664. if (ret < 0)
  665. goto out;
  666. if (ret != len) {
  667. ret = -EIO;
  668. goto out;
  669. }
  670. ret = 0;
  671. out:
  672. return ret;
  673. }
  674. int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
  675. unsigned long start, unsigned long len)
  676. {
  677. return memcmp(eb->data + start, ptrv, len);
  678. }
  679. void read_extent_buffer(const struct extent_buffer *eb, void *dst,
  680. unsigned long start, unsigned long len)
  681. {
  682. memcpy(dst, eb->data + start, len);
  683. }
  684. void write_extent_buffer(struct extent_buffer *eb, const void *src,
  685. unsigned long start, unsigned long len)
  686. {
  687. memcpy(eb->data + start, src, len);
  688. }
  689. void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
  690. unsigned long dst_offset, unsigned long src_offset,
  691. unsigned long len)
  692. {
  693. memcpy(dst->data + dst_offset, src->data + src_offset, len);
  694. }
  695. void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
  696. unsigned long src_offset, unsigned long len)
  697. {
  698. memmove(dst->data + dst_offset, dst->data + src_offset, len);
  699. }
  700. void memset_extent_buffer(struct extent_buffer *eb, char c,
  701. unsigned long start, unsigned long len)
  702. {
  703. memset(eb->data + start, c, len);
  704. }
  705. int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
  706. unsigned long nr)
  707. {
  708. return le_test_bit(nr, (u8 *)eb->data + start);
  709. }
  710. int set_extent_buffer_dirty(struct extent_buffer *eb)
  711. {
  712. struct extent_io_tree *tree = &eb->fs_info->extent_cache;
  713. if (!(eb->flags & EXTENT_DIRTY)) {
  714. eb->flags |= EXTENT_DIRTY;
  715. set_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
  716. extent_buffer_get(eb);
  717. }
  718. return 0;
  719. }
  720. int clear_extent_buffer_dirty(struct extent_buffer *eb)
  721. {
  722. struct extent_io_tree *tree = &eb->fs_info->extent_cache;
  723. if (eb->flags & EXTENT_DIRTY) {
  724. eb->flags &= ~EXTENT_DIRTY;
  725. clear_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
  726. free_extent_buffer(eb);
  727. }
  728. return 0;
  729. }