ordered-data.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2007 Oracle. All rights reserved.
  4. */
  5. #include <linux/slab.h>
  6. #include <linux/blkdev.h>
  7. #include <linux/writeback.h>
  8. #include <linux/sched/mm.h>
  9. #include "misc.h"
  10. #include "ctree.h"
  11. #include "transaction.h"
  12. #include "btrfs_inode.h"
  13. #include "extent_io.h"
  14. #include "disk-io.h"
  15. #include "compression.h"
  16. #include "delalloc-space.h"
  17. #include "qgroup.h"
  18. static struct kmem_cache *btrfs_ordered_extent_cache;
  19. static u64 entry_end(struct btrfs_ordered_extent *entry)
  20. {
  21. if (entry->file_offset + entry->num_bytes < entry->file_offset)
  22. return (u64)-1;
  23. return entry->file_offset + entry->num_bytes;
  24. }
  25. /* returns NULL if the insertion worked, or it returns the node it did find
  26. * in the tree
  27. */
  28. static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
  29. struct rb_node *node)
  30. {
  31. struct rb_node **p = &root->rb_node;
  32. struct rb_node *parent = NULL;
  33. struct btrfs_ordered_extent *entry;
  34. while (*p) {
  35. parent = *p;
  36. entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
  37. if (file_offset < entry->file_offset)
  38. p = &(*p)->rb_left;
  39. else if (file_offset >= entry_end(entry))
  40. p = &(*p)->rb_right;
  41. else
  42. return parent;
  43. }
  44. rb_link_node(node, parent, p);
  45. rb_insert_color(node, root);
  46. return NULL;
  47. }
  48. /*
  49. * look for a given offset in the tree, and if it can't be found return the
  50. * first lesser offset
  51. */
  52. static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
  53. struct rb_node **prev_ret)
  54. {
  55. struct rb_node *n = root->rb_node;
  56. struct rb_node *prev = NULL;
  57. struct rb_node *test;
  58. struct btrfs_ordered_extent *entry;
  59. struct btrfs_ordered_extent *prev_entry = NULL;
  60. while (n) {
  61. entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  62. prev = n;
  63. prev_entry = entry;
  64. if (file_offset < entry->file_offset)
  65. n = n->rb_left;
  66. else if (file_offset >= entry_end(entry))
  67. n = n->rb_right;
  68. else
  69. return n;
  70. }
  71. if (!prev_ret)
  72. return NULL;
  73. while (prev && file_offset >= entry_end(prev_entry)) {
  74. test = rb_next(prev);
  75. if (!test)
  76. break;
  77. prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  78. rb_node);
  79. if (file_offset < entry_end(prev_entry))
  80. break;
  81. prev = test;
  82. }
  83. if (prev)
  84. prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
  85. rb_node);
  86. while (prev && file_offset < entry_end(prev_entry)) {
  87. test = rb_prev(prev);
  88. if (!test)
  89. break;
  90. prev_entry = rb_entry(test, struct btrfs_ordered_extent,
  91. rb_node);
  92. prev = test;
  93. }
  94. *prev_ret = prev;
  95. return NULL;
  96. }
  97. /*
  98. * helper to check if a given offset is inside a given entry
  99. */
  100. static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
  101. {
  102. if (file_offset < entry->file_offset ||
  103. entry->file_offset + entry->num_bytes <= file_offset)
  104. return 0;
  105. return 1;
  106. }
  107. static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
  108. u64 len)
  109. {
  110. if (file_offset + len <= entry->file_offset ||
  111. entry->file_offset + entry->num_bytes <= file_offset)
  112. return 0;
  113. return 1;
  114. }
  115. /*
  116. * look find the first ordered struct that has this offset, otherwise
  117. * the first one less than this offset
  118. */
  119. static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
  120. u64 file_offset)
  121. {
  122. struct rb_root *root = &tree->tree;
  123. struct rb_node *prev = NULL;
  124. struct rb_node *ret;
  125. struct btrfs_ordered_extent *entry;
  126. if (tree->last) {
  127. entry = rb_entry(tree->last, struct btrfs_ordered_extent,
  128. rb_node);
  129. if (offset_in_entry(entry, file_offset))
  130. return tree->last;
  131. }
  132. ret = __tree_search(root, file_offset, &prev);
  133. if (!ret)
  134. ret = prev;
  135. if (ret)
  136. tree->last = ret;
  137. return ret;
  138. }
  139. /*
  140. * Allocate and add a new ordered_extent into the per-inode tree.
  141. *
  142. * The tree is given a single reference on the ordered extent that was
  143. * inserted.
  144. */
  145. static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
  146. u64 disk_bytenr, u64 num_bytes,
  147. u64 disk_num_bytes, int type, int dio,
  148. int compress_type)
  149. {
  150. struct btrfs_root *root = inode->root;
  151. struct btrfs_fs_info *fs_info = root->fs_info;
  152. struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
  153. struct rb_node *node;
  154. struct btrfs_ordered_extent *entry;
  155. int ret;
  156. if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
  157. /* For nocow write, we can release the qgroup rsv right now */
  158. ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
  159. if (ret < 0)
  160. return ret;
  161. ret = 0;
  162. } else {
  163. /*
  164. * The ordered extent has reserved qgroup space, release now
  165. * and pass the reserved number for qgroup_record to free.
  166. */
  167. ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
  168. if (ret < 0)
  169. return ret;
  170. }
  171. entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
  172. if (!entry)
  173. return -ENOMEM;
  174. entry->file_offset = file_offset;
  175. entry->disk_bytenr = disk_bytenr;
  176. entry->num_bytes = num_bytes;
  177. entry->disk_num_bytes = disk_num_bytes;
  178. entry->bytes_left = num_bytes;
  179. entry->inode = igrab(&inode->vfs_inode);
  180. entry->compress_type = compress_type;
  181. entry->truncated_len = (u64)-1;
  182. entry->qgroup_rsv = ret;
  183. if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
  184. set_bit(type, &entry->flags);
  185. if (dio) {
  186. percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes,
  187. fs_info->delalloc_batch);
  188. set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
  189. }
  190. /* one ref for the tree */
  191. refcount_set(&entry->refs, 1);
  192. init_waitqueue_head(&entry->wait);
  193. INIT_LIST_HEAD(&entry->list);
  194. INIT_LIST_HEAD(&entry->log_list);
  195. INIT_LIST_HEAD(&entry->root_extent_list);
  196. INIT_LIST_HEAD(&entry->work_list);
  197. init_completion(&entry->completion);
  198. trace_btrfs_ordered_extent_add(inode, entry);
  199. spin_lock_irq(&tree->lock);
  200. node = tree_insert(&tree->tree, file_offset,
  201. &entry->rb_node);
  202. if (node)
  203. btrfs_panic(fs_info, -EEXIST,
  204. "inconsistency in ordered tree at offset %llu",
  205. file_offset);
  206. spin_unlock_irq(&tree->lock);
  207. spin_lock(&root->ordered_extent_lock);
  208. list_add_tail(&entry->root_extent_list,
  209. &root->ordered_extents);
  210. root->nr_ordered_extents++;
  211. if (root->nr_ordered_extents == 1) {
  212. spin_lock(&fs_info->ordered_root_lock);
  213. BUG_ON(!list_empty(&root->ordered_root));
  214. list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
  215. spin_unlock(&fs_info->ordered_root_lock);
  216. }
  217. spin_unlock(&root->ordered_extent_lock);
  218. /*
  219. * We don't need the count_max_extents here, we can assume that all of
  220. * that work has been done at higher layers, so this is truly the
  221. * smallest the extent is going to get.
  222. */
  223. spin_lock(&inode->lock);
  224. btrfs_mod_outstanding_extents(inode, 1);
  225. spin_unlock(&inode->lock);
  226. return 0;
  227. }
  228. int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
  229. u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
  230. int type)
  231. {
  232. return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
  233. num_bytes, disk_num_bytes, type, 0,
  234. BTRFS_COMPRESS_NONE);
  235. }
  236. int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
  237. u64 disk_bytenr, u64 num_bytes,
  238. u64 disk_num_bytes, int type)
  239. {
  240. return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
  241. num_bytes, disk_num_bytes, type, 1,
  242. BTRFS_COMPRESS_NONE);
  243. }
  244. int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
  245. u64 disk_bytenr, u64 num_bytes,
  246. u64 disk_num_bytes, int type,
  247. int compress_type)
  248. {
  249. return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
  250. num_bytes, disk_num_bytes, type, 0,
  251. compress_type);
  252. }
  253. /*
  254. * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
  255. * when an ordered extent is finished. If the list covers more than one
  256. * ordered extent, it is split across multiples.
  257. */
  258. void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
  259. struct btrfs_ordered_sum *sum)
  260. {
  261. struct btrfs_ordered_inode_tree *tree;
  262. tree = &BTRFS_I(entry->inode)->ordered_tree;
  263. spin_lock_irq(&tree->lock);
  264. list_add_tail(&sum->list, &entry->list);
  265. spin_unlock_irq(&tree->lock);
  266. }
  267. /*
  268. * this is used to account for finished IO across a given range
  269. * of the file. The IO may span ordered extents. If
  270. * a given ordered_extent is completely done, 1 is returned, otherwise
  271. * 0.
  272. *
  273. * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
  274. * to make sure this function only returns 1 once for a given ordered extent.
  275. *
  276. * file_offset is updated to one byte past the range that is recorded as
  277. * complete. This allows you to walk forward in the file.
  278. */
  279. int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
  280. struct btrfs_ordered_extent **cached,
  281. u64 *file_offset, u64 io_size, int uptodate)
  282. {
  283. struct btrfs_fs_info *fs_info = inode->root->fs_info;
  284. struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
  285. struct rb_node *node;
  286. struct btrfs_ordered_extent *entry = NULL;
  287. int ret;
  288. unsigned long flags;
  289. u64 dec_end;
  290. u64 dec_start;
  291. u64 to_dec;
  292. spin_lock_irqsave(&tree->lock, flags);
  293. node = tree_search(tree, *file_offset);
  294. if (!node) {
  295. ret = 1;
  296. goto out;
  297. }
  298. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  299. if (!offset_in_entry(entry, *file_offset)) {
  300. ret = 1;
  301. goto out;
  302. }
  303. dec_start = max(*file_offset, entry->file_offset);
  304. dec_end = min(*file_offset + io_size,
  305. entry->file_offset + entry->num_bytes);
  306. *file_offset = dec_end;
  307. if (dec_start > dec_end) {
  308. btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
  309. dec_start, dec_end);
  310. }
  311. to_dec = dec_end - dec_start;
  312. if (to_dec > entry->bytes_left) {
  313. btrfs_crit(fs_info,
  314. "bad ordered accounting left %llu size %llu",
  315. entry->bytes_left, to_dec);
  316. }
  317. entry->bytes_left -= to_dec;
  318. if (!uptodate)
  319. set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
  320. if (entry->bytes_left == 0) {
  321. ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
  322. /* test_and_set_bit implies a barrier */
  323. cond_wake_up_nomb(&entry->wait);
  324. } else {
  325. ret = 1;
  326. }
  327. out:
  328. if (!ret && cached && entry) {
  329. *cached = entry;
  330. refcount_inc(&entry->refs);
  331. }
  332. spin_unlock_irqrestore(&tree->lock, flags);
  333. return ret == 0;
  334. }
  335. /*
  336. * this is used to account for finished IO across a given range
  337. * of the file. The IO should not span ordered extents. If
  338. * a given ordered_extent is completely done, 1 is returned, otherwise
  339. * 0.
  340. *
  341. * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
  342. * to make sure this function only returns 1 once for a given ordered extent.
  343. */
  344. int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
  345. struct btrfs_ordered_extent **cached,
  346. u64 file_offset, u64 io_size, int uptodate)
  347. {
  348. struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
  349. struct rb_node *node;
  350. struct btrfs_ordered_extent *entry = NULL;
  351. unsigned long flags;
  352. int ret;
  353. spin_lock_irqsave(&tree->lock, flags);
  354. if (cached && *cached) {
  355. entry = *cached;
  356. goto have_entry;
  357. }
  358. node = tree_search(tree, file_offset);
  359. if (!node) {
  360. ret = 1;
  361. goto out;
  362. }
  363. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  364. have_entry:
  365. if (!offset_in_entry(entry, file_offset)) {
  366. ret = 1;
  367. goto out;
  368. }
  369. if (io_size > entry->bytes_left) {
  370. btrfs_crit(inode->root->fs_info,
  371. "bad ordered accounting left %llu size %llu",
  372. entry->bytes_left, io_size);
  373. }
  374. entry->bytes_left -= io_size;
  375. if (!uptodate)
  376. set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
  377. if (entry->bytes_left == 0) {
  378. ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
  379. /* test_and_set_bit implies a barrier */
  380. cond_wake_up_nomb(&entry->wait);
  381. } else {
  382. ret = 1;
  383. }
  384. out:
  385. if (!ret && cached && entry) {
  386. *cached = entry;
  387. refcount_inc(&entry->refs);
  388. }
  389. spin_unlock_irqrestore(&tree->lock, flags);
  390. return ret == 0;
  391. }
  392. /*
  393. * used to drop a reference on an ordered extent. This will free
  394. * the extent if the last reference is dropped
  395. */
  396. void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
  397. {
  398. struct list_head *cur;
  399. struct btrfs_ordered_sum *sum;
  400. trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
  401. if (refcount_dec_and_test(&entry->refs)) {
  402. ASSERT(list_empty(&entry->root_extent_list));
  403. ASSERT(list_empty(&entry->log_list));
  404. ASSERT(RB_EMPTY_NODE(&entry->rb_node));
  405. if (entry->inode)
  406. btrfs_add_delayed_iput(entry->inode);
  407. while (!list_empty(&entry->list)) {
  408. cur = entry->list.next;
  409. sum = list_entry(cur, struct btrfs_ordered_sum, list);
  410. list_del(&sum->list);
  411. kvfree(sum);
  412. }
  413. kmem_cache_free(btrfs_ordered_extent_cache, entry);
  414. }
  415. }
  416. /*
  417. * remove an ordered extent from the tree. No references are dropped
  418. * and waiters are woken up.
  419. */
  420. void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
  421. struct btrfs_ordered_extent *entry)
  422. {
  423. struct btrfs_ordered_inode_tree *tree;
  424. struct btrfs_root *root = btrfs_inode->root;
  425. struct btrfs_fs_info *fs_info = root->fs_info;
  426. struct rb_node *node;
  427. bool pending;
  428. /* This is paired with btrfs_add_ordered_extent. */
  429. spin_lock(&btrfs_inode->lock);
  430. btrfs_mod_outstanding_extents(btrfs_inode, -1);
  431. spin_unlock(&btrfs_inode->lock);
  432. if (root != fs_info->tree_root)
  433. btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
  434. false);
  435. if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
  436. percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes,
  437. fs_info->delalloc_batch);
  438. tree = &btrfs_inode->ordered_tree;
  439. spin_lock_irq(&tree->lock);
  440. node = &entry->rb_node;
  441. rb_erase(node, &tree->tree);
  442. RB_CLEAR_NODE(node);
  443. if (tree->last == node)
  444. tree->last = NULL;
  445. set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
  446. pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
  447. spin_unlock_irq(&tree->lock);
  448. /*
  449. * The current running transaction is waiting on us, we need to let it
  450. * know that we're complete and wake it up.
  451. */
  452. if (pending) {
  453. struct btrfs_transaction *trans;
  454. /*
  455. * The checks for trans are just a formality, it should be set,
  456. * but if it isn't we don't want to deref/assert under the spin
  457. * lock, so be nice and check if trans is set, but ASSERT() so
  458. * if it isn't set a developer will notice.
  459. */
  460. spin_lock(&fs_info->trans_lock);
  461. trans = fs_info->running_transaction;
  462. if (trans)
  463. refcount_inc(&trans->use_count);
  464. spin_unlock(&fs_info->trans_lock);
  465. ASSERT(trans);
  466. if (trans) {
  467. if (atomic_dec_and_test(&trans->pending_ordered))
  468. wake_up(&trans->pending_wait);
  469. btrfs_put_transaction(trans);
  470. }
  471. }
  472. spin_lock(&root->ordered_extent_lock);
  473. list_del_init(&entry->root_extent_list);
  474. root->nr_ordered_extents--;
  475. trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
  476. if (!root->nr_ordered_extents) {
  477. spin_lock(&fs_info->ordered_root_lock);
  478. BUG_ON(list_empty(&root->ordered_root));
  479. list_del_init(&root->ordered_root);
  480. spin_unlock(&fs_info->ordered_root_lock);
  481. }
  482. spin_unlock(&root->ordered_extent_lock);
  483. wake_up(&entry->wait);
  484. }
  485. static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
  486. {
  487. struct btrfs_ordered_extent *ordered;
  488. ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
  489. btrfs_start_ordered_extent(ordered, 1);
  490. complete(&ordered->completion);
  491. }
  492. /*
  493. * wait for all the ordered extents in a root. This is done when balancing
  494. * space between drives.
  495. */
  496. u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
  497. const u64 range_start, const u64 range_len)
  498. {
  499. struct btrfs_fs_info *fs_info = root->fs_info;
  500. LIST_HEAD(splice);
  501. LIST_HEAD(skipped);
  502. LIST_HEAD(works);
  503. struct btrfs_ordered_extent *ordered, *next;
  504. u64 count = 0;
  505. const u64 range_end = range_start + range_len;
  506. mutex_lock(&root->ordered_extent_mutex);
  507. spin_lock(&root->ordered_extent_lock);
  508. list_splice_init(&root->ordered_extents, &splice);
  509. while (!list_empty(&splice) && nr) {
  510. ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
  511. root_extent_list);
  512. if (range_end <= ordered->disk_bytenr ||
  513. ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
  514. list_move_tail(&ordered->root_extent_list, &skipped);
  515. cond_resched_lock(&root->ordered_extent_lock);
  516. continue;
  517. }
  518. list_move_tail(&ordered->root_extent_list,
  519. &root->ordered_extents);
  520. refcount_inc(&ordered->refs);
  521. spin_unlock(&root->ordered_extent_lock);
  522. btrfs_init_work(&ordered->flush_work,
  523. btrfs_run_ordered_extent_work, NULL, NULL);
  524. list_add_tail(&ordered->work_list, &works);
  525. btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
  526. cond_resched();
  527. spin_lock(&root->ordered_extent_lock);
  528. if (nr != U64_MAX)
  529. nr--;
  530. count++;
  531. }
  532. list_splice_tail(&skipped, &root->ordered_extents);
  533. list_splice_tail(&splice, &root->ordered_extents);
  534. spin_unlock(&root->ordered_extent_lock);
  535. list_for_each_entry_safe(ordered, next, &works, work_list) {
  536. list_del_init(&ordered->work_list);
  537. wait_for_completion(&ordered->completion);
  538. btrfs_put_ordered_extent(ordered);
  539. cond_resched();
  540. }
  541. mutex_unlock(&root->ordered_extent_mutex);
  542. return count;
  543. }
  544. void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
  545. const u64 range_start, const u64 range_len)
  546. {
  547. struct btrfs_root *root;
  548. struct list_head splice;
  549. u64 done;
  550. INIT_LIST_HEAD(&splice);
  551. mutex_lock(&fs_info->ordered_operations_mutex);
  552. spin_lock(&fs_info->ordered_root_lock);
  553. list_splice_init(&fs_info->ordered_roots, &splice);
  554. while (!list_empty(&splice) && nr) {
  555. root = list_first_entry(&splice, struct btrfs_root,
  556. ordered_root);
  557. root = btrfs_grab_root(root);
  558. BUG_ON(!root);
  559. list_move_tail(&root->ordered_root,
  560. &fs_info->ordered_roots);
  561. spin_unlock(&fs_info->ordered_root_lock);
  562. done = btrfs_wait_ordered_extents(root, nr,
  563. range_start, range_len);
  564. btrfs_put_root(root);
  565. spin_lock(&fs_info->ordered_root_lock);
  566. if (nr != U64_MAX) {
  567. nr -= done;
  568. }
  569. }
  570. list_splice_tail(&splice, &fs_info->ordered_roots);
  571. spin_unlock(&fs_info->ordered_root_lock);
  572. mutex_unlock(&fs_info->ordered_operations_mutex);
  573. }
  574. /*
  575. * Used to start IO or wait for a given ordered extent to finish.
  576. *
  577. * If wait is one, this effectively waits on page writeback for all the pages
  578. * in the extent, and it waits on the io completion code to insert
  579. * metadata into the btree corresponding to the extent
  580. */
  581. void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
  582. {
  583. u64 start = entry->file_offset;
  584. u64 end = start + entry->num_bytes - 1;
  585. struct btrfs_inode *inode = BTRFS_I(entry->inode);
  586. trace_btrfs_ordered_extent_start(inode, entry);
  587. /*
  588. * pages in the range can be dirty, clean or writeback. We
  589. * start IO on any dirty ones so the wait doesn't stall waiting
  590. * for the flusher thread to find them
  591. */
  592. if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
  593. filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
  594. if (wait) {
  595. wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
  596. &entry->flags));
  597. }
  598. }
  599. /*
  600. * Used to wait on ordered extents across a large range of bytes.
  601. */
  602. int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
  603. {
  604. int ret = 0;
  605. int ret_wb = 0;
  606. u64 end;
  607. u64 orig_end;
  608. struct btrfs_ordered_extent *ordered;
  609. if (start + len < start) {
  610. orig_end = INT_LIMIT(loff_t);
  611. } else {
  612. orig_end = start + len - 1;
  613. if (orig_end > INT_LIMIT(loff_t))
  614. orig_end = INT_LIMIT(loff_t);
  615. }
  616. /* start IO across the range first to instantiate any delalloc
  617. * extents
  618. */
  619. ret = btrfs_fdatawrite_range(inode, start, orig_end);
  620. if (ret)
  621. return ret;
  622. /*
  623. * If we have a writeback error don't return immediately. Wait first
  624. * for any ordered extents that haven't completed yet. This is to make
  625. * sure no one can dirty the same page ranges and call writepages()
  626. * before the ordered extents complete - to avoid failures (-EEXIST)
  627. * when adding the new ordered extents to the ordered tree.
  628. */
  629. ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
  630. end = orig_end;
  631. while (1) {
  632. ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
  633. if (!ordered)
  634. break;
  635. if (ordered->file_offset > orig_end) {
  636. btrfs_put_ordered_extent(ordered);
  637. break;
  638. }
  639. if (ordered->file_offset + ordered->num_bytes <= start) {
  640. btrfs_put_ordered_extent(ordered);
  641. break;
  642. }
  643. btrfs_start_ordered_extent(ordered, 1);
  644. end = ordered->file_offset;
  645. /*
  646. * If the ordered extent had an error save the error but don't
  647. * exit without waiting first for all other ordered extents in
  648. * the range to complete.
  649. */
  650. if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
  651. ret = -EIO;
  652. btrfs_put_ordered_extent(ordered);
  653. if (end == 0 || end == start)
  654. break;
  655. end--;
  656. }
  657. return ret_wb ? ret_wb : ret;
  658. }
  659. /*
  660. * find an ordered extent corresponding to file_offset. return NULL if
  661. * nothing is found, otherwise take a reference on the extent and return it
  662. */
  663. struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
  664. u64 file_offset)
  665. {
  666. struct btrfs_ordered_inode_tree *tree;
  667. struct rb_node *node;
  668. struct btrfs_ordered_extent *entry = NULL;
  669. tree = &inode->ordered_tree;
  670. spin_lock_irq(&tree->lock);
  671. node = tree_search(tree, file_offset);
  672. if (!node)
  673. goto out;
  674. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  675. if (!offset_in_entry(entry, file_offset))
  676. entry = NULL;
  677. if (entry)
  678. refcount_inc(&entry->refs);
  679. out:
  680. spin_unlock_irq(&tree->lock);
  681. return entry;
  682. }
  683. /* Since the DIO code tries to lock a wide area we need to look for any ordered
  684. * extents that exist in the range, rather than just the start of the range.
  685. */
  686. struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
  687. struct btrfs_inode *inode, u64 file_offset, u64 len)
  688. {
  689. struct btrfs_ordered_inode_tree *tree;
  690. struct rb_node *node;
  691. struct btrfs_ordered_extent *entry = NULL;
  692. tree = &inode->ordered_tree;
  693. spin_lock_irq(&tree->lock);
  694. node = tree_search(tree, file_offset);
  695. if (!node) {
  696. node = tree_search(tree, file_offset + len);
  697. if (!node)
  698. goto out;
  699. }
  700. while (1) {
  701. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  702. if (range_overlaps(entry, file_offset, len))
  703. break;
  704. if (entry->file_offset >= file_offset + len) {
  705. entry = NULL;
  706. break;
  707. }
  708. entry = NULL;
  709. node = rb_next(node);
  710. if (!node)
  711. break;
  712. }
  713. out:
  714. if (entry)
  715. refcount_inc(&entry->refs);
  716. spin_unlock_irq(&tree->lock);
  717. return entry;
  718. }
  719. /*
  720. * Adds all ordered extents to the given list. The list ends up sorted by the
  721. * file_offset of the ordered extents.
  722. */
  723. void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
  724. struct list_head *list)
  725. {
  726. struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
  727. struct rb_node *n;
  728. ASSERT(inode_is_locked(&inode->vfs_inode));
  729. spin_lock_irq(&tree->lock);
  730. for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
  731. struct btrfs_ordered_extent *ordered;
  732. ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
  733. if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
  734. continue;
  735. ASSERT(list_empty(&ordered->log_list));
  736. list_add_tail(&ordered->log_list, list);
  737. refcount_inc(&ordered->refs);
  738. }
  739. spin_unlock_irq(&tree->lock);
  740. }
  741. /*
  742. * lookup and return any extent before 'file_offset'. NULL is returned
  743. * if none is found
  744. */
  745. struct btrfs_ordered_extent *
  746. btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
  747. {
  748. struct btrfs_ordered_inode_tree *tree;
  749. struct rb_node *node;
  750. struct btrfs_ordered_extent *entry = NULL;
  751. tree = &inode->ordered_tree;
  752. spin_lock_irq(&tree->lock);
  753. node = tree_search(tree, file_offset);
  754. if (!node)
  755. goto out;
  756. entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
  757. refcount_inc(&entry->refs);
  758. out:
  759. spin_unlock_irq(&tree->lock);
  760. return entry;
  761. }
  762. /*
  763. * search the ordered extents for one corresponding to 'offset' and
  764. * try to find a checksum. This is used because we allow pages to
  765. * be reclaimed before their checksum is actually put into the btree
  766. */
  767. int btrfs_find_ordered_sum(struct btrfs_inode *inode, u64 offset,
  768. u64 disk_bytenr, u8 *sum, int len)
  769. {
  770. struct btrfs_fs_info *fs_info = inode->root->fs_info;
  771. struct btrfs_ordered_sum *ordered_sum;
  772. struct btrfs_ordered_extent *ordered;
  773. struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
  774. unsigned long num_sectors;
  775. unsigned long i;
  776. u32 sectorsize = btrfs_inode_sectorsize(inode);
  777. const u8 blocksize_bits = inode->vfs_inode.i_sb->s_blocksize_bits;
  778. const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
  779. int index = 0;
  780. ordered = btrfs_lookup_ordered_extent(inode, offset);
  781. if (!ordered)
  782. return 0;
  783. spin_lock_irq(&tree->lock);
  784. list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
  785. if (disk_bytenr >= ordered_sum->bytenr &&
  786. disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
  787. i = (disk_bytenr - ordered_sum->bytenr) >> blocksize_bits;
  788. num_sectors = ordered_sum->len >> blocksize_bits;
  789. num_sectors = min_t(int, len - index, num_sectors - i);
  790. memcpy(sum + index, ordered_sum->sums + i * csum_size,
  791. num_sectors * csum_size);
  792. index += (int)num_sectors * csum_size;
  793. if (index == len)
  794. goto out;
  795. disk_bytenr += num_sectors * sectorsize;
  796. }
  797. }
  798. out:
  799. spin_unlock_irq(&tree->lock);
  800. btrfs_put_ordered_extent(ordered);
  801. return index;
  802. }
  803. /*
  804. * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
  805. * ordered extents in it are run to completion.
  806. *
  807. * @inode: Inode whose ordered tree is to be searched
  808. * @start: Beginning of range to flush
  809. * @end: Last byte of range to lock
  810. * @cached_state: If passed, will return the extent state responsible for the
  811. * locked range. It's the caller's responsibility to free the cached state.
  812. *
  813. * This function always returns with the given range locked, ensuring after it's
  814. * called no order extent can be pending.
  815. */
  816. void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
  817. u64 end,
  818. struct extent_state **cached_state)
  819. {
  820. struct btrfs_ordered_extent *ordered;
  821. struct extent_state *cache = NULL;
  822. struct extent_state **cachedp = &cache;
  823. if (cached_state)
  824. cachedp = cached_state;
  825. while (1) {
  826. lock_extent_bits(&inode->io_tree, start, end, cachedp);
  827. ordered = btrfs_lookup_ordered_range(inode, start,
  828. end - start + 1);
  829. if (!ordered) {
  830. /*
  831. * If no external cached_state has been passed then
  832. * decrement the extra ref taken for cachedp since we
  833. * aren't exposing it outside of this function
  834. */
  835. if (!cached_state)
  836. refcount_dec(&cache->refs);
  837. break;
  838. }
  839. unlock_extent_cached(&inode->io_tree, start, end, cachedp);
  840. btrfs_start_ordered_extent(ordered, 1);
  841. btrfs_put_ordered_extent(ordered);
  842. }
  843. }
  844. int __init ordered_data_init(void)
  845. {
  846. btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
  847. sizeof(struct btrfs_ordered_extent), 0,
  848. SLAB_MEM_SPREAD,
  849. NULL);
  850. if (!btrfs_ordered_extent_cache)
  851. return -ENOMEM;
  852. return 0;
  853. }
  854. void __cold ordered_data_exit(void)
  855. {
  856. kmem_cache_destroy(btrfs_ordered_extent_cache);
  857. }