locking.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2008 Oracle. All rights reserved.
  4. */
  5. #ifndef BTRFS_LOCKING_H
  6. #define BTRFS_LOCKING_H
  7. #include <linux/atomic.h>
  8. #include <linux/wait.h>
  9. #include <linux/percpu_counter.h>
  10. #include "extent_io.h"
  11. #define BTRFS_WRITE_LOCK 1
  12. #define BTRFS_READ_LOCK 2
  13. #define BTRFS_WRITE_LOCK_BLOCKING 3
  14. #define BTRFS_READ_LOCK_BLOCKING 4
  15. /*
  16. * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
  17. * the time of this patch is 8, which is how many we use. Keep this in mind if
  18. * you decide you want to add another subclass.
  19. */
  20. enum btrfs_lock_nesting {
  21. BTRFS_NESTING_NORMAL,
  22. /*
  23. * When we COW a block we are holding the lock on the original block,
  24. * and since our lockdep maps are rootid+level, this confuses lockdep
  25. * when we lock the newly allocated COW'd block. Handle this by having
  26. * a subclass for COW'ed blocks so that lockdep doesn't complain.
  27. */
  28. BTRFS_NESTING_COW,
  29. /*
  30. * Oftentimes we need to lock adjacent nodes on the same level while
  31. * still holding the lock on the original node we searched to, such as
  32. * for searching forward or for split/balance.
  33. *
  34. * Because of this we need to indicate to lockdep that this is
  35. * acceptable by having a different subclass for each of these
  36. * operations.
  37. */
  38. BTRFS_NESTING_LEFT,
  39. BTRFS_NESTING_RIGHT,
  40. /*
  41. * When splitting we will be holding a lock on the left/right node when
  42. * we need to cow that node, thus we need a new set of subclasses for
  43. * these two operations.
  44. */
  45. BTRFS_NESTING_LEFT_COW,
  46. BTRFS_NESTING_RIGHT_COW,
  47. /*
  48. * When splitting we may push nodes to the left or right, but still use
  49. * the subsequent nodes in our path, keeping our locks on those adjacent
  50. * blocks. Thus when we go to allocate a new split block we've already
  51. * used up all of our available subclasses, so this subclass exists to
  52. * handle this case where we need to allocate a new split block.
  53. */
  54. BTRFS_NESTING_SPLIT,
  55. /*
  56. * When promoting a new block to a root we need to have a special
  57. * subclass so we don't confuse lockdep, as it will appear that we are
  58. * locking a higher level node before a lower level one. Copying also
  59. * has this problem as it appears we're locking the same block again
  60. * when we make a snapshot of an existing root.
  61. */
  62. BTRFS_NESTING_NEW_ROOT,
  63. /*
  64. * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
  65. * add this in here and add a static_assert to keep us from going over
  66. * the limit. As of this writing we're limited to 8, and we're
  67. * definitely using 8, hence this check to keep us from messing up in
  68. * the future.
  69. */
  70. BTRFS_NESTING_MAX,
  71. };
  72. static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
  73. "too many lock subclasses defined");
  74. struct btrfs_path;
  75. void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
  76. void btrfs_tree_lock(struct extent_buffer *eb);
  77. void btrfs_tree_unlock(struct extent_buffer *eb);
  78. void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest,
  79. bool recurse);
  80. void btrfs_tree_read_lock(struct extent_buffer *eb);
  81. void btrfs_tree_read_unlock(struct extent_buffer *eb);
  82. void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
  83. void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
  84. void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
  85. int btrfs_try_tree_read_lock(struct extent_buffer *eb);
  86. int btrfs_try_tree_write_lock(struct extent_buffer *eb);
  87. int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
  88. struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
  89. struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root,
  90. bool recurse);
  91. static inline struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
  92. {
  93. return __btrfs_read_lock_root_node(root, false);
  94. }
  95. #ifdef CONFIG_BTRFS_DEBUG
  96. static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
  97. BUG_ON(!eb->write_locks);
  98. }
  99. #else
  100. static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
  101. #endif
  102. void btrfs_set_path_blocking(struct btrfs_path *p);
  103. void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
  104. static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
  105. {
  106. if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING)
  107. btrfs_tree_unlock(eb);
  108. else if (rw == BTRFS_READ_LOCK_BLOCKING)
  109. btrfs_tree_read_unlock_blocking(eb);
  110. else if (rw == BTRFS_READ_LOCK)
  111. btrfs_tree_read_unlock(eb);
  112. else
  113. BUG();
  114. }
  115. struct btrfs_drew_lock {
  116. atomic_t readers;
  117. struct percpu_counter writers;
  118. wait_queue_head_t pending_writers;
  119. wait_queue_head_t pending_readers;
  120. };
  121. int btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
  122. void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock);
  123. void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
  124. bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
  125. void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
  126. void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
  127. void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
  128. #endif