extent_io.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef BTRFS_EXTENT_IO_H
  3. #define BTRFS_EXTENT_IO_H
  4. #include <linux/rbtree.h>
  5. #include <linux/refcount.h>
  6. #include <linux/fiemap.h>
  7. #include "ulist.h"
  8. /*
  9. * flags for bio submission. The high bits indicate the compression
  10. * type for this bio
  11. */
  12. #define EXTENT_BIO_COMPRESSED 1
  13. #define EXTENT_BIO_FLAG_SHIFT 16
  14. enum {
  15. EXTENT_BUFFER_UPTODATE,
  16. EXTENT_BUFFER_DIRTY,
  17. EXTENT_BUFFER_CORRUPT,
  18. /* this got triggered by readahead */
  19. EXTENT_BUFFER_READAHEAD,
  20. EXTENT_BUFFER_TREE_REF,
  21. EXTENT_BUFFER_STALE,
  22. EXTENT_BUFFER_WRITEBACK,
  23. /* read IO error */
  24. EXTENT_BUFFER_READ_ERR,
  25. EXTENT_BUFFER_UNMAPPED,
  26. EXTENT_BUFFER_IN_TREE,
  27. /* write IO error */
  28. EXTENT_BUFFER_WRITE_ERR,
  29. };
  30. /* these are flags for __process_pages_contig */
  31. #define PAGE_UNLOCK (1 << 0)
  32. #define PAGE_CLEAR_DIRTY (1 << 1)
  33. #define PAGE_SET_WRITEBACK (1 << 2)
  34. #define PAGE_END_WRITEBACK (1 << 3)
  35. #define PAGE_SET_PRIVATE2 (1 << 4)
  36. #define PAGE_SET_ERROR (1 << 5)
  37. #define PAGE_LOCK (1 << 6)
  38. /*
  39. * page->private values. Every page that is controlled by the extent
  40. * map has page->private set to one.
  41. */
  42. #define EXTENT_PAGE_PRIVATE 1
  43. /*
  44. * The extent buffer bitmap operations are done with byte granularity instead of
  45. * word granularity for two reasons:
  46. * 1. The bitmaps must be little-endian on disk.
  47. * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
  48. * single word in a bitmap may straddle two pages in the extent buffer.
  49. */
  50. #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
  51. #define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
  52. #define BITMAP_FIRST_BYTE_MASK(start) \
  53. ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
  54. #define BITMAP_LAST_BYTE_MASK(nbits) \
  55. (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
  56. struct btrfs_root;
  57. struct btrfs_inode;
  58. struct btrfs_io_bio;
  59. struct io_failure_record;
  60. struct extent_io_tree;
  61. typedef blk_status_t (submit_bio_hook_t)(struct inode *inode, struct bio *bio,
  62. int mirror_num,
  63. unsigned long bio_flags);
  64. typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
  65. struct bio *bio, u64 bio_offset);
  66. #define INLINE_EXTENT_BUFFER_PAGES 16
  67. #define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
  68. struct extent_buffer {
  69. u64 start;
  70. unsigned long len;
  71. unsigned long bflags;
  72. struct btrfs_fs_info *fs_info;
  73. spinlock_t refs_lock;
  74. atomic_t refs;
  75. atomic_t io_pages;
  76. int read_mirror;
  77. struct rcu_head rcu_head;
  78. pid_t lock_owner;
  79. int blocking_writers;
  80. atomic_t blocking_readers;
  81. bool lock_recursed;
  82. /* >= 0 if eb belongs to a log tree, -1 otherwise */
  83. short log_index;
  84. /* protects write locks */
  85. rwlock_t lock;
  86. /* readers use lock_wq while they wait for the write
  87. * lock holders to unlock
  88. */
  89. wait_queue_head_t write_lock_wq;
  90. /* writers use read_lock_wq while they wait for readers
  91. * to unlock
  92. */
  93. wait_queue_head_t read_lock_wq;
  94. struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
  95. #ifdef CONFIG_BTRFS_DEBUG
  96. int spinning_writers;
  97. atomic_t spinning_readers;
  98. atomic_t read_locks;
  99. int write_locks;
  100. struct list_head leak_list;
  101. #endif
  102. };
  103. /*
  104. * Structure to record how many bytes and which ranges are set/cleared
  105. */
  106. struct extent_changeset {
  107. /* How many bytes are set/cleared in this operation */
  108. u64 bytes_changed;
  109. /* Changed ranges */
  110. struct ulist range_changed;
  111. };
  112. static inline void extent_changeset_init(struct extent_changeset *changeset)
  113. {
  114. changeset->bytes_changed = 0;
  115. ulist_init(&changeset->range_changed);
  116. }
  117. static inline struct extent_changeset *extent_changeset_alloc(void)
  118. {
  119. struct extent_changeset *ret;
  120. ret = kmalloc(sizeof(*ret), GFP_KERNEL);
  121. if (!ret)
  122. return NULL;
  123. extent_changeset_init(ret);
  124. return ret;
  125. }
  126. static inline void extent_changeset_release(struct extent_changeset *changeset)
  127. {
  128. if (!changeset)
  129. return;
  130. changeset->bytes_changed = 0;
  131. ulist_release(&changeset->range_changed);
  132. }
  133. static inline void extent_changeset_free(struct extent_changeset *changeset)
  134. {
  135. if (!changeset)
  136. return;
  137. extent_changeset_release(changeset);
  138. kfree(changeset);
  139. }
  140. static inline void extent_set_compress_type(unsigned long *bio_flags,
  141. int compress_type)
  142. {
  143. *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT;
  144. }
  145. static inline int extent_compress_type(unsigned long bio_flags)
  146. {
  147. return bio_flags >> EXTENT_BIO_FLAG_SHIFT;
  148. }
  149. struct extent_map_tree;
  150. typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
  151. struct page *page, size_t pg_offset,
  152. u64 start, u64 len);
  153. int try_release_extent_mapping(struct page *page, gfp_t mask);
  154. int try_release_extent_buffer(struct page *page);
  155. int __must_check submit_one_bio(struct bio *bio, int mirror_num,
  156. unsigned long bio_flags);
  157. int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
  158. struct bio **bio, unsigned long *bio_flags,
  159. unsigned int read_flags, u64 *prev_em_start);
  160. int extent_write_full_page(struct page *page, struct writeback_control *wbc);
  161. int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
  162. int mode);
  163. int extent_writepages(struct address_space *mapping,
  164. struct writeback_control *wbc);
  165. int btree_write_cache_pages(struct address_space *mapping,
  166. struct writeback_control *wbc);
  167. void extent_readahead(struct readahead_control *rac);
  168. int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
  169. u64 start, u64 len);
  170. void set_page_extent_mapped(struct page *page);
  171. struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
  172. u64 start);
  173. struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
  174. u64 start, unsigned long len);
  175. struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
  176. u64 start);
  177. struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src);
  178. struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
  179. u64 start);
  180. void free_extent_buffer(struct extent_buffer *eb);
  181. void free_extent_buffer_stale(struct extent_buffer *eb);
  182. #define WAIT_NONE 0
  183. #define WAIT_COMPLETE 1
  184. #define WAIT_PAGE_LOCK 2
  185. int read_extent_buffer_pages(struct extent_buffer *eb, int wait,
  186. int mirror_num);
  187. void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
  188. static inline int num_extent_pages(const struct extent_buffer *eb)
  189. {
  190. return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) -
  191. (eb->start >> PAGE_SHIFT);
  192. }
  193. static inline int extent_buffer_uptodate(const struct extent_buffer *eb)
  194. {
  195. return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
  196. }
  197. int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
  198. unsigned long start, unsigned long len);
  199. void read_extent_buffer(const struct extent_buffer *eb, void *dst,
  200. unsigned long start,
  201. unsigned long len);
  202. int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
  203. void __user *dst, unsigned long start,
  204. unsigned long len);
  205. void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src);
  206. void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
  207. const void *src);
  208. void write_extent_buffer(const struct extent_buffer *eb, const void *src,
  209. unsigned long start, unsigned long len);
  210. void copy_extent_buffer_full(const struct extent_buffer *dst,
  211. const struct extent_buffer *src);
  212. void copy_extent_buffer(const struct extent_buffer *dst,
  213. const struct extent_buffer *src,
  214. unsigned long dst_offset, unsigned long src_offset,
  215. unsigned long len);
  216. void memcpy_extent_buffer(const struct extent_buffer *dst,
  217. unsigned long dst_offset, unsigned long src_offset,
  218. unsigned long len);
  219. void memmove_extent_buffer(const struct extent_buffer *dst,
  220. unsigned long dst_offset, unsigned long src_offset,
  221. unsigned long len);
  222. void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
  223. unsigned long len);
  224. int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
  225. unsigned long pos);
  226. void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
  227. unsigned long pos, unsigned long len);
  228. void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
  229. unsigned long start, unsigned long pos,
  230. unsigned long len);
  231. void clear_extent_buffer_dirty(const struct extent_buffer *eb);
  232. bool set_extent_buffer_dirty(struct extent_buffer *eb);
  233. void set_extent_buffer_uptodate(struct extent_buffer *eb);
  234. void clear_extent_buffer_uptodate(struct extent_buffer *eb);
  235. int extent_buffer_under_io(const struct extent_buffer *eb);
  236. void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
  237. void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
  238. void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
  239. struct page *locked_page,
  240. unsigned bits_to_clear,
  241. unsigned long page_ops);
  242. struct bio *btrfs_bio_alloc(u64 first_byte);
  243. struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
  244. struct bio *btrfs_bio_clone(struct bio *bio);
  245. struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size);
  246. struct btrfs_fs_info;
  247. struct btrfs_inode;
  248. int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
  249. u64 length, u64 logical, struct page *page,
  250. unsigned int pg_offset, int mirror_num);
  251. void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
  252. int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num);
  253. /*
  254. * When IO fails, either with EIO or csum verification fails, we
  255. * try other mirrors that might have a good copy of the data. This
  256. * io_failure_record is used to record state as we go through all the
  257. * mirrors. If another mirror has good data, the page is set up to date
  258. * and things continue. If a good mirror can't be found, the original
  259. * bio end_io callback is called to indicate things have failed.
  260. */
  261. struct io_failure_record {
  262. struct page *page;
  263. u64 start;
  264. u64 len;
  265. u64 logical;
  266. unsigned long bio_flags;
  267. int this_mirror;
  268. int failed_mirror;
  269. int in_validation;
  270. };
  271. blk_status_t btrfs_submit_read_repair(struct inode *inode,
  272. struct bio *failed_bio, u64 phy_offset,
  273. struct page *page, unsigned int pgoff,
  274. u64 start, u64 end, int failed_mirror,
  275. submit_bio_hook_t *submit_bio_hook);
  276. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  277. bool find_lock_delalloc_range(struct inode *inode,
  278. struct page *locked_page, u64 *start,
  279. u64 *end);
  280. #endif
  281. struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
  282. u64 start);
  283. #ifdef CONFIG_BTRFS_DEBUG
  284. void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info);
  285. #else
  286. #define btrfs_extent_buffer_leak_debug_check(fs_info) do {} while (0)
  287. #endif
  288. #endif