discard.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/jiffies.h>
  3. #include <linux/kernel.h>
  4. #include <linux/ktime.h>
  5. #include <linux/list.h>
  6. #include <linux/math64.h>
  7. #include <linux/sizes.h>
  8. #include <linux/workqueue.h>
  9. #include "ctree.h"
  10. #include "block-group.h"
  11. #include "discard.h"
  12. #include "free-space-cache.h"
  13. /*
  14. * This contains the logic to handle async discard.
  15. *
  16. * Async discard manages trimming of free space outside of transaction commit.
  17. * Discarding is done by managing the block_groups on a LRU list based on free
  18. * space recency. Two passes are used to first prioritize discarding extents
  19. * and then allow for trimming in the bitmap the best opportunity to coalesce.
  20. * The block_groups are maintained on multiple lists to allow for multiple
  21. * passes with different discard filter requirements. A delayed work item is
  22. * used to manage discarding with timeout determined by a max of the delay
  23. * incurred by the iops rate limit, the byte rate limit, and the max delay of
  24. * BTRFS_DISCARD_MAX_DELAY.
  25. *
  26. * Note, this only keeps track of block_groups that are explicitly for data.
  27. * Mixed block_groups are not supported.
  28. *
  29. * The first list is special to manage discarding of fully free block groups.
  30. * This is necessary because we issue a final trim for a full free block group
  31. * after forgetting it. When a block group becomes unused, instead of directly
  32. * being added to the unused_bgs list, we add it to this first list. Then
  33. * from there, if it becomes fully discarded, we place it onto the unused_bgs
  34. * list.
  35. *
  36. * The in-memory free space cache serves as the backing state for discard.
  37. * Consequently this means there is no persistence. We opt to load all the
  38. * block groups in as not discarded, so the mount case degenerates to the
  39. * crashing case.
  40. *
  41. * As the free space cache uses bitmaps, there exists a tradeoff between
  42. * ease/efficiency for find_free_extent() and the accuracy of discard state.
  43. * Here we opt to let untrimmed regions merge with everything while only letting
  44. * trimmed regions merge with other trimmed regions. This can cause
  45. * overtrimming, but the coalescing benefit seems to be worth it. Additionally,
  46. * bitmap state is tracked as a whole. If we're able to fully trim a bitmap,
  47. * the trimmed flag is set on the bitmap. Otherwise, if an allocation comes in,
  48. * this resets the state and we will retry trimming the whole bitmap. This is a
  49. * tradeoff between discard state accuracy and the cost of accounting.
  50. */
  51. /* This is an initial delay to give some chance for block reuse */
  52. #define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC)
  53. #define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC)
  54. /* Target completion latency of discarding all discardable extents */
  55. #define BTRFS_DISCARD_TARGET_MSEC (6 * 60 * 60UL * MSEC_PER_SEC)
  56. #define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL)
  57. #define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL)
  58. #define BTRFS_DISCARD_MAX_IOPS (10U)
  59. /* Montonically decreasing minimum length filters after index 0 */
  60. static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
  61. 0,
  62. BTRFS_ASYNC_DISCARD_MAX_FILTER,
  63. BTRFS_ASYNC_DISCARD_MIN_FILTER
  64. };
  65. static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
  66. struct btrfs_block_group *block_group)
  67. {
  68. return &discard_ctl->discard_list[block_group->discard_index];
  69. }
  70. static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
  71. struct btrfs_block_group *block_group)
  72. {
  73. if (!btrfs_run_discard_work(discard_ctl))
  74. return;
  75. if (list_empty(&block_group->discard_list) ||
  76. block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) {
  77. if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED)
  78. block_group->discard_index = BTRFS_DISCARD_INDEX_START;
  79. block_group->discard_eligible_time = (ktime_get_ns() +
  80. BTRFS_DISCARD_DELAY);
  81. block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
  82. }
  83. list_move_tail(&block_group->discard_list,
  84. get_discard_list(discard_ctl, block_group));
  85. }
  86. static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
  87. struct btrfs_block_group *block_group)
  88. {
  89. if (!btrfs_is_block_group_data_only(block_group))
  90. return;
  91. spin_lock(&discard_ctl->lock);
  92. __add_to_discard_list(discard_ctl, block_group);
  93. spin_unlock(&discard_ctl->lock);
  94. }
  95. static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
  96. struct btrfs_block_group *block_group)
  97. {
  98. spin_lock(&discard_ctl->lock);
  99. if (!btrfs_run_discard_work(discard_ctl)) {
  100. spin_unlock(&discard_ctl->lock);
  101. return;
  102. }
  103. list_del_init(&block_group->discard_list);
  104. block_group->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
  105. block_group->discard_eligible_time = (ktime_get_ns() +
  106. BTRFS_DISCARD_UNUSED_DELAY);
  107. block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
  108. list_add_tail(&block_group->discard_list,
  109. &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]);
  110. spin_unlock(&discard_ctl->lock);
  111. }
  112. static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
  113. struct btrfs_block_group *block_group)
  114. {
  115. bool running = false;
  116. spin_lock(&discard_ctl->lock);
  117. if (block_group == discard_ctl->block_group) {
  118. running = true;
  119. discard_ctl->block_group = NULL;
  120. }
  121. block_group->discard_eligible_time = 0;
  122. list_del_init(&block_group->discard_list);
  123. spin_unlock(&discard_ctl->lock);
  124. return running;
  125. }
  126. /**
  127. * find_next_block_group - find block_group that's up next for discarding
  128. * @discard_ctl: discard control
  129. * @now: current time
  130. *
  131. * Iterate over the discard lists to find the next block_group up for
  132. * discarding checking the discard_eligible_time of block_group.
  133. */
  134. static struct btrfs_block_group *find_next_block_group(
  135. struct btrfs_discard_ctl *discard_ctl,
  136. u64 now)
  137. {
  138. struct btrfs_block_group *ret_block_group = NULL, *block_group;
  139. int i;
  140. for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
  141. struct list_head *discard_list = &discard_ctl->discard_list[i];
  142. if (!list_empty(discard_list)) {
  143. block_group = list_first_entry(discard_list,
  144. struct btrfs_block_group,
  145. discard_list);
  146. if (!ret_block_group)
  147. ret_block_group = block_group;
  148. if (ret_block_group->discard_eligible_time < now)
  149. break;
  150. if (ret_block_group->discard_eligible_time >
  151. block_group->discard_eligible_time)
  152. ret_block_group = block_group;
  153. }
  154. }
  155. return ret_block_group;
  156. }
  157. /**
  158. * peek_discard_list - wrap find_next_block_group()
  159. * @discard_ctl: discard control
  160. * @discard_state: the discard_state of the block_group after state management
  161. * @discard_index: the discard_index of the block_group after state management
  162. *
  163. * This wraps find_next_block_group() and sets the block_group to be in use.
  164. * discard_state's control flow is managed here. Variables related to
  165. * discard_state are reset here as needed (eg discard_cursor). @discard_state
  166. * and @discard_index are remembered as it may change while we're discarding,
  167. * but we want the discard to execute in the context determined here.
  168. */
  169. static struct btrfs_block_group *peek_discard_list(
  170. struct btrfs_discard_ctl *discard_ctl,
  171. enum btrfs_discard_state *discard_state,
  172. int *discard_index, u64 now)
  173. {
  174. struct btrfs_block_group *block_group;
  175. spin_lock(&discard_ctl->lock);
  176. again:
  177. block_group = find_next_block_group(discard_ctl, now);
  178. if (block_group && now >= block_group->discard_eligible_time) {
  179. if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
  180. block_group->used != 0) {
  181. if (btrfs_is_block_group_data_only(block_group))
  182. __add_to_discard_list(discard_ctl, block_group);
  183. else
  184. list_del_init(&block_group->discard_list);
  185. goto again;
  186. }
  187. if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) {
  188. block_group->discard_cursor = block_group->start;
  189. block_group->discard_state = BTRFS_DISCARD_EXTENTS;
  190. }
  191. discard_ctl->block_group = block_group;
  192. }
  193. if (block_group) {
  194. *discard_state = block_group->discard_state;
  195. *discard_index = block_group->discard_index;
  196. }
  197. spin_unlock(&discard_ctl->lock);
  198. return block_group;
  199. }
  200. /**
  201. * btrfs_discard_check_filter - updates a block groups filters
  202. * @block_group: block group of interest
  203. * @bytes: recently freed region size after coalescing
  204. *
  205. * Async discard maintains multiple lists with progressively smaller filters
  206. * to prioritize discarding based on size. Should a free space that matches
  207. * a larger filter be returned to the free_space_cache, prioritize that discard
  208. * by moving @block_group to the proper filter.
  209. */
  210. void btrfs_discard_check_filter(struct btrfs_block_group *block_group,
  211. u64 bytes)
  212. {
  213. struct btrfs_discard_ctl *discard_ctl;
  214. if (!block_group ||
  215. !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
  216. return;
  217. discard_ctl = &block_group->fs_info->discard_ctl;
  218. if (block_group->discard_index > BTRFS_DISCARD_INDEX_START &&
  219. bytes >= discard_minlen[block_group->discard_index - 1]) {
  220. int i;
  221. remove_from_discard_list(discard_ctl, block_group);
  222. for (i = BTRFS_DISCARD_INDEX_START; i < BTRFS_NR_DISCARD_LISTS;
  223. i++) {
  224. if (bytes >= discard_minlen[i]) {
  225. block_group->discard_index = i;
  226. add_to_discard_list(discard_ctl, block_group);
  227. break;
  228. }
  229. }
  230. }
  231. }
  232. /**
  233. * btrfs_update_discard_index - moves a block group along the discard lists
  234. * @discard_ctl: discard control
  235. * @block_group: block_group of interest
  236. *
  237. * Increment @block_group's discard_index. If it falls of the list, let it be.
  238. * Otherwise add it back to the appropriate list.
  239. */
  240. static void btrfs_update_discard_index(struct btrfs_discard_ctl *discard_ctl,
  241. struct btrfs_block_group *block_group)
  242. {
  243. block_group->discard_index++;
  244. if (block_group->discard_index == BTRFS_NR_DISCARD_LISTS) {
  245. block_group->discard_index = 1;
  246. return;
  247. }
  248. add_to_discard_list(discard_ctl, block_group);
  249. }
  250. /**
  251. * btrfs_discard_cancel_work - remove a block_group from the discard lists
  252. * @discard_ctl: discard control
  253. * @block_group: block_group of interest
  254. *
  255. * This removes @block_group from the discard lists. If necessary, it waits on
  256. * the current work and then reschedules the delayed work.
  257. */
  258. void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
  259. struct btrfs_block_group *block_group)
  260. {
  261. if (remove_from_discard_list(discard_ctl, block_group)) {
  262. cancel_delayed_work_sync(&discard_ctl->work);
  263. btrfs_discard_schedule_work(discard_ctl, true);
  264. }
  265. }
  266. /**
  267. * btrfs_discard_queue_work - handles queuing the block_groups
  268. * @discard_ctl: discard control
  269. * @block_group: block_group of interest
  270. *
  271. * This maintains the LRU order of the discard lists.
  272. */
  273. void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
  274. struct btrfs_block_group *block_group)
  275. {
  276. if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
  277. return;
  278. if (block_group->used == 0)
  279. add_to_discard_unused_list(discard_ctl, block_group);
  280. else
  281. add_to_discard_list(discard_ctl, block_group);
  282. if (!delayed_work_pending(&discard_ctl->work))
  283. btrfs_discard_schedule_work(discard_ctl, false);
  284. }
  285. static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
  286. u64 now, bool override)
  287. {
  288. struct btrfs_block_group *block_group;
  289. if (!btrfs_run_discard_work(discard_ctl))
  290. return;
  291. if (!override && delayed_work_pending(&discard_ctl->work))
  292. return;
  293. block_group = find_next_block_group(discard_ctl, now);
  294. if (block_group) {
  295. unsigned long delay = discard_ctl->delay;
  296. u32 kbps_limit = READ_ONCE(discard_ctl->kbps_limit);
  297. /*
  298. * A single delayed workqueue item is responsible for
  299. * discarding, so we can manage the bytes rate limit by keeping
  300. * track of the previous discard.
  301. */
  302. if (kbps_limit && discard_ctl->prev_discard) {
  303. u64 bps_limit = ((u64)kbps_limit) * SZ_1K;
  304. u64 bps_delay = div64_u64(discard_ctl->prev_discard *
  305. MSEC_PER_SEC, bps_limit);
  306. delay = max(delay, msecs_to_jiffies(bps_delay));
  307. }
  308. /*
  309. * This timeout is to hopefully prevent immediate discarding
  310. * in a recently allocated block group.
  311. */
  312. if (now < block_group->discard_eligible_time) {
  313. u64 bg_timeout = block_group->discard_eligible_time - now;
  314. delay = max(delay, nsecs_to_jiffies(bg_timeout));
  315. }
  316. mod_delayed_work(discard_ctl->discard_workers,
  317. &discard_ctl->work, delay);
  318. }
  319. }
  320. /*
  321. * btrfs_discard_schedule_work - responsible for scheduling the discard work
  322. * @discard_ctl: discard control
  323. * @override: override the current timer
  324. *
  325. * Discards are issued by a delayed workqueue item. @override is used to
  326. * update the current delay as the baseline delay interval is reevaluated on
  327. * transaction commit. This is also maxed with any other rate limit.
  328. */
  329. void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
  330. bool override)
  331. {
  332. const u64 now = ktime_get_ns();
  333. spin_lock(&discard_ctl->lock);
  334. __btrfs_discard_schedule_work(discard_ctl, now, override);
  335. spin_unlock(&discard_ctl->lock);
  336. }
  337. /**
  338. * btrfs_finish_discard_pass - determine next step of a block_group
  339. * @discard_ctl: discard control
  340. * @block_group: block_group of interest
  341. *
  342. * This determines the next step for a block group after it's finished going
  343. * through a pass on a discard list. If it is unused and fully trimmed, we can
  344. * mark it unused and send it to the unused_bgs path. Otherwise, pass it onto
  345. * the appropriate filter list or let it fall off.
  346. */
  347. static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl,
  348. struct btrfs_block_group *block_group)
  349. {
  350. remove_from_discard_list(discard_ctl, block_group);
  351. if (block_group->used == 0) {
  352. if (btrfs_is_free_space_trimmed(block_group))
  353. btrfs_mark_bg_unused(block_group);
  354. else
  355. add_to_discard_unused_list(discard_ctl, block_group);
  356. } else {
  357. btrfs_update_discard_index(discard_ctl, block_group);
  358. }
  359. }
  360. /**
  361. * btrfs_discard_workfn - discard work function
  362. * @work: work
  363. *
  364. * This finds the next block_group to start discarding and then discards a
  365. * single region. It does this in a two-pass fashion: first extents and second
  366. * bitmaps. Completely discarded block groups are sent to the unused_bgs path.
  367. */
  368. static void btrfs_discard_workfn(struct work_struct *work)
  369. {
  370. struct btrfs_discard_ctl *discard_ctl;
  371. struct btrfs_block_group *block_group;
  372. enum btrfs_discard_state discard_state;
  373. int discard_index = 0;
  374. u64 trimmed = 0;
  375. u64 minlen = 0;
  376. u64 now = ktime_get_ns();
  377. discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
  378. block_group = peek_discard_list(discard_ctl, &discard_state,
  379. &discard_index, now);
  380. if (!block_group || !btrfs_run_discard_work(discard_ctl))
  381. return;
  382. if (now < block_group->discard_eligible_time) {
  383. btrfs_discard_schedule_work(discard_ctl, false);
  384. return;
  385. }
  386. /* Perform discarding */
  387. minlen = discard_minlen[discard_index];
  388. if (discard_state == BTRFS_DISCARD_BITMAPS) {
  389. u64 maxlen = 0;
  390. /*
  391. * Use the previous levels minimum discard length as the max
  392. * length filter. In the case something is added to make a
  393. * region go beyond the max filter, the entire bitmap is set
  394. * back to BTRFS_TRIM_STATE_UNTRIMMED.
  395. */
  396. if (discard_index != BTRFS_DISCARD_INDEX_UNUSED)
  397. maxlen = discard_minlen[discard_index - 1];
  398. btrfs_trim_block_group_bitmaps(block_group, &trimmed,
  399. block_group->discard_cursor,
  400. btrfs_block_group_end(block_group),
  401. minlen, maxlen, true);
  402. discard_ctl->discard_bitmap_bytes += trimmed;
  403. } else {
  404. btrfs_trim_block_group_extents(block_group, &trimmed,
  405. block_group->discard_cursor,
  406. btrfs_block_group_end(block_group),
  407. minlen, true);
  408. discard_ctl->discard_extent_bytes += trimmed;
  409. }
  410. discard_ctl->prev_discard = trimmed;
  411. /* Determine next steps for a block_group */
  412. if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
  413. if (discard_state == BTRFS_DISCARD_BITMAPS) {
  414. btrfs_finish_discard_pass(discard_ctl, block_group);
  415. } else {
  416. block_group->discard_cursor = block_group->start;
  417. spin_lock(&discard_ctl->lock);
  418. if (block_group->discard_state !=
  419. BTRFS_DISCARD_RESET_CURSOR)
  420. block_group->discard_state =
  421. BTRFS_DISCARD_BITMAPS;
  422. spin_unlock(&discard_ctl->lock);
  423. }
  424. }
  425. spin_lock(&discard_ctl->lock);
  426. discard_ctl->block_group = NULL;
  427. __btrfs_discard_schedule_work(discard_ctl, now, false);
  428. spin_unlock(&discard_ctl->lock);
  429. }
  430. /**
  431. * btrfs_run_discard_work - determines if async discard should be running
  432. * @discard_ctl: discard control
  433. *
  434. * Checks if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set.
  435. */
  436. bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl)
  437. {
  438. struct btrfs_fs_info *fs_info = container_of(discard_ctl,
  439. struct btrfs_fs_info,
  440. discard_ctl);
  441. return (!(fs_info->sb->s_flags & SB_RDONLY) &&
  442. test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags));
  443. }
  444. /**
  445. * btrfs_discard_calc_delay - recalculate the base delay
  446. * @discard_ctl: discard control
  447. *
  448. * Recalculate the base delay which is based off the total number of
  449. * discardable_extents. Clamp this between the lower_limit (iops_limit or 1ms)
  450. * and the upper_limit (BTRFS_DISCARD_MAX_DELAY_MSEC).
  451. */
  452. void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
  453. {
  454. s32 discardable_extents;
  455. s64 discardable_bytes;
  456. u32 iops_limit;
  457. unsigned long delay;
  458. unsigned long lower_limit = BTRFS_DISCARD_MIN_DELAY_MSEC;
  459. discardable_extents = atomic_read(&discard_ctl->discardable_extents);
  460. if (!discardable_extents)
  461. return;
  462. spin_lock(&discard_ctl->lock);
  463. /*
  464. * The following is to fix a potential -1 discrepenancy that we're not
  465. * sure how to reproduce. But given that this is the only place that
  466. * utilizes these numbers and this is only called by from
  467. * btrfs_finish_extent_commit() which is synchronized, we can correct
  468. * here.
  469. */
  470. if (discardable_extents < 0)
  471. atomic_add(-discardable_extents,
  472. &discard_ctl->discardable_extents);
  473. discardable_bytes = atomic64_read(&discard_ctl->discardable_bytes);
  474. if (discardable_bytes < 0)
  475. atomic64_add(-discardable_bytes,
  476. &discard_ctl->discardable_bytes);
  477. if (discardable_extents <= 0) {
  478. spin_unlock(&discard_ctl->lock);
  479. return;
  480. }
  481. iops_limit = READ_ONCE(discard_ctl->iops_limit);
  482. if (iops_limit)
  483. lower_limit = max_t(unsigned long, lower_limit,
  484. MSEC_PER_SEC / iops_limit);
  485. delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents;
  486. delay = clamp(delay, lower_limit, BTRFS_DISCARD_MAX_DELAY_MSEC);
  487. discard_ctl->delay = msecs_to_jiffies(delay);
  488. spin_unlock(&discard_ctl->lock);
  489. }
  490. /**
  491. * btrfs_discard_update_discardable - propagate discard counters
  492. * @block_group: block_group of interest
  493. * @ctl: free_space_ctl of @block_group
  494. *
  495. * This propagates deltas of counters up to the discard_ctl. It maintains a
  496. * current counter and a previous counter passing the delta up to the global
  497. * stat. Then the current counter value becomes the previous counter value.
  498. */
  499. void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
  500. struct btrfs_free_space_ctl *ctl)
  501. {
  502. struct btrfs_discard_ctl *discard_ctl;
  503. s32 extents_delta;
  504. s64 bytes_delta;
  505. if (!block_group ||
  506. !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC) ||
  507. !btrfs_is_block_group_data_only(block_group))
  508. return;
  509. discard_ctl = &block_group->fs_info->discard_ctl;
  510. extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] -
  511. ctl->discardable_extents[BTRFS_STAT_PREV];
  512. if (extents_delta) {
  513. atomic_add(extents_delta, &discard_ctl->discardable_extents);
  514. ctl->discardable_extents[BTRFS_STAT_PREV] =
  515. ctl->discardable_extents[BTRFS_STAT_CURR];
  516. }
  517. bytes_delta = ctl->discardable_bytes[BTRFS_STAT_CURR] -
  518. ctl->discardable_bytes[BTRFS_STAT_PREV];
  519. if (bytes_delta) {
  520. atomic64_add(bytes_delta, &discard_ctl->discardable_bytes);
  521. ctl->discardable_bytes[BTRFS_STAT_PREV] =
  522. ctl->discardable_bytes[BTRFS_STAT_CURR];
  523. }
  524. }
  525. /**
  526. * btrfs_discard_punt_unused_bgs_list - punt unused_bgs list to discard lists
  527. * @fs_info: fs_info of interest
  528. *
  529. * The unused_bgs list needs to be punted to the discard lists because the
  530. * order of operations is changed. In the normal sychronous discard path, the
  531. * block groups are trimmed via a single large trim in transaction commit. This
  532. * is ultimately what we are trying to avoid with asynchronous discard. Thus,
  533. * it must be done before going down the unused_bgs path.
  534. */
  535. void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info)
  536. {
  537. struct btrfs_block_group *block_group, *next;
  538. spin_lock(&fs_info->unused_bgs_lock);
  539. /* We enabled async discard, so punt all to the queue */
  540. list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
  541. bg_list) {
  542. list_del_init(&block_group->bg_list);
  543. btrfs_put_block_group(block_group);
  544. btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
  545. }
  546. spin_unlock(&fs_info->unused_bgs_lock);
  547. }
  548. /**
  549. * btrfs_discard_purge_list - purge discard lists
  550. * @discard_ctl: discard control
  551. *
  552. * If we are disabling async discard, we may have intercepted block groups that
  553. * are completely free and ready for the unused_bgs path. As discarding will
  554. * now happen in transaction commit or not at all, we can safely mark the
  555. * corresponding block groups as unused and they will be sent on their merry
  556. * way to the unused_bgs list.
  557. */
  558. static void btrfs_discard_purge_list(struct btrfs_discard_ctl *discard_ctl)
  559. {
  560. struct btrfs_block_group *block_group, *next;
  561. int i;
  562. spin_lock(&discard_ctl->lock);
  563. for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
  564. list_for_each_entry_safe(block_group, next,
  565. &discard_ctl->discard_list[i],
  566. discard_list) {
  567. list_del_init(&block_group->discard_list);
  568. spin_unlock(&discard_ctl->lock);
  569. if (block_group->used == 0)
  570. btrfs_mark_bg_unused(block_group);
  571. spin_lock(&discard_ctl->lock);
  572. }
  573. }
  574. spin_unlock(&discard_ctl->lock);
  575. }
  576. void btrfs_discard_resume(struct btrfs_fs_info *fs_info)
  577. {
  578. if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
  579. btrfs_discard_cleanup(fs_info);
  580. return;
  581. }
  582. btrfs_discard_punt_unused_bgs_list(fs_info);
  583. set_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
  584. }
  585. void btrfs_discard_stop(struct btrfs_fs_info *fs_info)
  586. {
  587. clear_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
  588. }
  589. void btrfs_discard_init(struct btrfs_fs_info *fs_info)
  590. {
  591. struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl;
  592. int i;
  593. spin_lock_init(&discard_ctl->lock);
  594. INIT_DELAYED_WORK(&discard_ctl->work, btrfs_discard_workfn);
  595. for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++)
  596. INIT_LIST_HEAD(&discard_ctl->discard_list[i]);
  597. discard_ctl->prev_discard = 0;
  598. atomic_set(&discard_ctl->discardable_extents, 0);
  599. atomic64_set(&discard_ctl->discardable_bytes, 0);
  600. discard_ctl->max_discard_size = BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE;
  601. discard_ctl->delay = BTRFS_DISCARD_MAX_DELAY_MSEC;
  602. discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS;
  603. discard_ctl->kbps_limit = 0;
  604. discard_ctl->discard_extent_bytes = 0;
  605. discard_ctl->discard_bitmap_bytes = 0;
  606. atomic64_set(&discard_ctl->discard_bytes_saved, 0);
  607. }
  608. void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info)
  609. {
  610. btrfs_discard_stop(fs_info);
  611. cancel_delayed_work_sync(&fs_info->discard_ctl.work);
  612. btrfs_discard_purge_list(&fs_info->discard_ctl);
  613. }