shrinker.h 3.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SHRINKER_H
  3. #define _LINUX_SHRINKER_H
  4. #include <linux/android_vendor.h>
  5. /*
  6. * This struct is used to pass information from page reclaim to the shrinkers.
  7. * We consolidate the values for easier extention later.
  8. *
  9. * The 'gfpmask' refers to the allocation we are currently trying to
  10. * fulfil.
  11. */
  12. struct shrink_control {
  13. gfp_t gfp_mask;
  14. /* current node being shrunk (for NUMA aware shrinkers) */
  15. int nid;
  16. /*
  17. * How many objects scan_objects should scan and try to reclaim.
  18. * This is reset before every call, so it is safe for callees
  19. * to modify.
  20. */
  21. unsigned long nr_to_scan;
  22. /*
  23. * How many objects did scan_objects process?
  24. * This defaults to nr_to_scan before every call, but the callee
  25. * should track its actual progress.
  26. */
  27. unsigned long nr_scanned;
  28. /* current memcg being shrunk (for memcg aware shrinkers) */
  29. struct mem_cgroup *memcg;
  30. ANDROID_OEM_DATA_ARRAY(1, 3);
  31. };
  32. #define SHRINK_STOP (~0UL)
  33. #define SHRINK_EMPTY (~0UL - 1)
  34. /*
  35. * A callback you can register to apply pressure to ageable caches.
  36. *
  37. * @count_objects should return the number of freeable items in the cache. If
  38. * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
  39. * returned in cases of the number of freeable items cannot be determined
  40. * or shrinker should skip this cache for this time (e.g., their number
  41. * is below shrinkable limit). No deadlock checks should be done during the
  42. * count callback - the shrinker relies on aggregating scan counts that couldn't
  43. * be executed due to potential deadlocks to be run at a later call when the
  44. * deadlock condition is no longer pending.
  45. *
  46. * @scan_objects will only be called if @count_objects returned a non-zero
  47. * value for the number of freeable objects. The callout should scan the cache
  48. * and attempt to free items from the cache. It should then return the number
  49. * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
  50. * due to potential deadlocks. If SHRINK_STOP is returned, then no further
  51. * attempts to call the @scan_objects will be made from the current reclaim
  52. * context.
  53. *
  54. * @flags determine the shrinker abilities, like numa awareness
  55. */
  56. struct shrinker {
  57. unsigned long (*count_objects)(struct shrinker *,
  58. struct shrink_control *sc);
  59. unsigned long (*scan_objects)(struct shrinker *,
  60. struct shrink_control *sc);
  61. long batch; /* reclaim batch size, 0 = default */
  62. int seeks; /* seeks to recreate an obj */
  63. unsigned flags;
  64. /* These are for internal use */
  65. struct list_head list;
  66. #ifdef CONFIG_MEMCG
  67. /* ID in shrinker_idr */
  68. int id;
  69. #endif
  70. /* objs pending delete, per node */
  71. atomic_long_t *nr_deferred;
  72. };
  73. #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
  74. /* Flags */
  75. #define SHRINKER_NUMA_AWARE (1 << 0)
  76. #define SHRINKER_MEMCG_AWARE (1 << 1)
  77. /*
  78. * It just makes sense when the shrinker is also MEMCG_AWARE for now,
  79. * non-MEMCG_AWARE shrinker should not have this flag set.
  80. */
  81. #define SHRINKER_NONSLAB (1 << 2)
  82. extern int prealloc_shrinker(struct shrinker *shrinker);
  83. extern void register_shrinker_prepared(struct shrinker *shrinker);
  84. extern int register_shrinker(struct shrinker *shrinker);
  85. extern void unregister_shrinker(struct shrinker *shrinker);
  86. extern void free_prealloced_shrinker(struct shrinker *shrinker);
  87. #endif