shrinker.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * f2fs shrinker support
  4. * the basic infra was copied from fs/ubifs/shrinker.c
  5. *
  6. * Copyright (c) 2015 Motorola Mobility
  7. * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
  8. */
  9. #include <linux/fs.h>
  10. #include <linux/f2fs_fs.h>
  11. #include "f2fs.h"
  12. #include "node.h"
  13. static LIST_HEAD(f2fs_list);
  14. static DEFINE_SPINLOCK(f2fs_list_lock);
  15. static unsigned int shrinker_run_no;
  16. static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
  17. {
  18. return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT];
  19. }
  20. static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
  21. {
  22. long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS;
  23. return count > 0 ? count : 0;
  24. }
  25. static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
  26. {
  27. return atomic_read(&sbi->total_zombie_tree) +
  28. atomic_read(&sbi->total_ext_node);
  29. }
  30. unsigned long f2fs_shrink_count(struct shrinker *shrink,
  31. struct shrink_control *sc)
  32. {
  33. struct f2fs_sb_info *sbi;
  34. struct list_head *p;
  35. unsigned long count = 0;
  36. spin_lock(&f2fs_list_lock);
  37. p = f2fs_list.next;
  38. while (p != &f2fs_list) {
  39. sbi = list_entry(p, struct f2fs_sb_info, s_list);
  40. /* stop f2fs_put_super */
  41. if (!mutex_trylock(&sbi->umount_mutex)) {
  42. p = p->next;
  43. continue;
  44. }
  45. spin_unlock(&f2fs_list_lock);
  46. /* count extent cache entries */
  47. count += __count_extent_cache(sbi);
  48. /* count clean nat cache entries */
  49. count += __count_nat_entries(sbi);
  50. /* count free nids cache entries */
  51. count += __count_free_nids(sbi);
  52. spin_lock(&f2fs_list_lock);
  53. p = p->next;
  54. mutex_unlock(&sbi->umount_mutex);
  55. }
  56. spin_unlock(&f2fs_list_lock);
  57. return count;
  58. }
  59. unsigned long f2fs_shrink_scan(struct shrinker *shrink,
  60. struct shrink_control *sc)
  61. {
  62. unsigned long nr = sc->nr_to_scan;
  63. struct f2fs_sb_info *sbi;
  64. struct list_head *p;
  65. unsigned int run_no;
  66. unsigned long freed = 0;
  67. spin_lock(&f2fs_list_lock);
  68. do {
  69. run_no = ++shrinker_run_no;
  70. } while (run_no == 0);
  71. p = f2fs_list.next;
  72. while (p != &f2fs_list) {
  73. sbi = list_entry(p, struct f2fs_sb_info, s_list);
  74. if (sbi->shrinker_run_no == run_no)
  75. break;
  76. /* stop f2fs_put_super */
  77. if (!mutex_trylock(&sbi->umount_mutex)) {
  78. p = p->next;
  79. continue;
  80. }
  81. spin_unlock(&f2fs_list_lock);
  82. sbi->shrinker_run_no = run_no;
  83. /* shrink extent cache entries */
  84. freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
  85. /* shrink clean nat cache entries */
  86. if (freed < nr)
  87. freed += f2fs_try_to_free_nats(sbi, nr - freed);
  88. /* shrink free nids cache entries */
  89. if (freed < nr)
  90. freed += f2fs_try_to_free_nids(sbi, nr - freed);
  91. spin_lock(&f2fs_list_lock);
  92. p = p->next;
  93. list_move_tail(&sbi->s_list, &f2fs_list);
  94. mutex_unlock(&sbi->umount_mutex);
  95. if (freed >= nr)
  96. break;
  97. }
  98. spin_unlock(&f2fs_list_lock);
  99. return freed;
  100. }
  101. void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
  102. {
  103. spin_lock(&f2fs_list_lock);
  104. list_add_tail(&sbi->s_list, &f2fs_list);
  105. spin_unlock(&f2fs_list_lock);
  106. }
  107. void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
  108. {
  109. f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
  110. spin_lock(&f2fs_list_lock);
  111. list_del_init(&sbi->s_list);
  112. spin_unlock(&f2fs_list_lock);
  113. }