page_ref.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_PAGE_REF_H
  3. #define _LINUX_PAGE_REF_H
  4. #include <linux/atomic.h>
  5. #include <linux/mm_types.h>
  6. #include <linux/page-flags.h>
  7. #include <linux/tracepoint-defs.h>
  8. DECLARE_TRACEPOINT(page_ref_set);
  9. DECLARE_TRACEPOINT(page_ref_mod);
  10. DECLARE_TRACEPOINT(page_ref_mod_and_test);
  11. DECLARE_TRACEPOINT(page_ref_mod_and_return);
  12. DECLARE_TRACEPOINT(page_ref_mod_unless);
  13. DECLARE_TRACEPOINT(page_ref_freeze);
  14. DECLARE_TRACEPOINT(page_ref_unfreeze);
  15. #ifdef CONFIG_DEBUG_PAGE_REF
  16. /*
  17. * Ideally we would want to use the trace_<tracepoint>_enabled() helper
  18. * functions. But due to include header file issues, that is not
  19. * feasible. Instead we have to open code the static key functions.
  20. *
  21. * See trace_##name##_enabled(void) in include/linux/tracepoint.h
  22. */
  23. #define page_ref_tracepoint_active(t) tracepoint_enabled(t)
  24. extern void __page_ref_set(struct page *page, int v);
  25. extern void __page_ref_mod(struct page *page, int v);
  26. extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
  27. extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
  28. extern void __page_ref_mod_unless(struct page *page, int v, int u);
  29. extern void __page_ref_freeze(struct page *page, int v, int ret);
  30. extern void __page_ref_unfreeze(struct page *page, int v);
  31. #else
  32. #define page_ref_tracepoint_active(t) false
  33. static inline void __page_ref_set(struct page *page, int v)
  34. {
  35. }
  36. static inline void __page_ref_mod(struct page *page, int v)
  37. {
  38. }
  39. static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
  40. {
  41. }
  42. static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
  43. {
  44. }
  45. static inline void __page_ref_mod_unless(struct page *page, int v, int u)
  46. {
  47. }
  48. static inline void __page_ref_freeze(struct page *page, int v, int ret)
  49. {
  50. }
  51. static inline void __page_ref_unfreeze(struct page *page, int v)
  52. {
  53. }
  54. #endif
  55. static inline int page_ref_count(struct page *page)
  56. {
  57. return atomic_read(&page->_refcount);
  58. }
  59. static inline int page_count(struct page *page)
  60. {
  61. return atomic_read(&compound_head(page)->_refcount);
  62. }
  63. static inline void set_page_count(struct page *page, int v)
  64. {
  65. atomic_set(&page->_refcount, v);
  66. if (page_ref_tracepoint_active(page_ref_set))
  67. __page_ref_set(page, v);
  68. }
  69. /*
  70. * Setup the page count before being freed into the page allocator for
  71. * the first time (boot or memory hotplug)
  72. */
  73. static inline void init_page_count(struct page *page)
  74. {
  75. set_page_count(page, 1);
  76. }
  77. static inline void page_ref_add(struct page *page, int nr)
  78. {
  79. atomic_add(nr, &page->_refcount);
  80. if (page_ref_tracepoint_active(page_ref_mod))
  81. __page_ref_mod(page, nr);
  82. }
  83. static inline void page_ref_sub(struct page *page, int nr)
  84. {
  85. atomic_sub(nr, &page->_refcount);
  86. if (page_ref_tracepoint_active(page_ref_mod))
  87. __page_ref_mod(page, -nr);
  88. }
  89. static inline int page_ref_sub_return(struct page *page, int nr)
  90. {
  91. int ret = atomic_sub_return(nr, &page->_refcount);
  92. if (page_ref_tracepoint_active(page_ref_mod_and_return))
  93. __page_ref_mod_and_return(page, -nr, ret);
  94. return ret;
  95. }
  96. static inline void page_ref_inc(struct page *page)
  97. {
  98. atomic_inc(&page->_refcount);
  99. if (page_ref_tracepoint_active(page_ref_mod))
  100. __page_ref_mod(page, 1);
  101. }
  102. static inline void page_ref_dec(struct page *page)
  103. {
  104. atomic_dec(&page->_refcount);
  105. if (page_ref_tracepoint_active(page_ref_mod))
  106. __page_ref_mod(page, -1);
  107. }
  108. static inline int page_ref_sub_and_test(struct page *page, int nr)
  109. {
  110. int ret = atomic_sub_and_test(nr, &page->_refcount);
  111. if (page_ref_tracepoint_active(page_ref_mod_and_test))
  112. __page_ref_mod_and_test(page, -nr, ret);
  113. return ret;
  114. }
  115. static inline int page_ref_inc_return(struct page *page)
  116. {
  117. int ret = atomic_inc_return(&page->_refcount);
  118. if (page_ref_tracepoint_active(page_ref_mod_and_return))
  119. __page_ref_mod_and_return(page, 1, ret);
  120. return ret;
  121. }
  122. static inline int page_ref_dec_and_test(struct page *page)
  123. {
  124. int ret = atomic_dec_and_test(&page->_refcount);
  125. if (page_ref_tracepoint_active(page_ref_mod_and_test))
  126. __page_ref_mod_and_test(page, -1, ret);
  127. return ret;
  128. }
  129. static inline int page_ref_dec_return(struct page *page)
  130. {
  131. int ret = atomic_dec_return(&page->_refcount);
  132. if (page_ref_tracepoint_active(page_ref_mod_and_return))
  133. __page_ref_mod_and_return(page, -1, ret);
  134. return ret;
  135. }
  136. static inline int page_ref_add_unless(struct page *page, int nr, int u)
  137. {
  138. int ret = atomic_add_unless(&page->_refcount, nr, u);
  139. if (page_ref_tracepoint_active(page_ref_mod_unless))
  140. __page_ref_mod_unless(page, nr, ret);
  141. return ret;
  142. }
  143. static inline int page_ref_freeze(struct page *page, int count)
  144. {
  145. int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
  146. if (page_ref_tracepoint_active(page_ref_freeze))
  147. __page_ref_freeze(page, count, ret);
  148. return ret;
  149. }
  150. static inline void page_ref_unfreeze(struct page *page, int count)
  151. {
  152. VM_BUG_ON_PAGE(page_count(page) != 0, page);
  153. VM_BUG_ON(count == 0);
  154. atomic_set_release(&page->_refcount, count);
  155. if (page_ref_tracepoint_active(page_ref_unfreeze))
  156. __page_ref_unfreeze(page, count);
  157. }
  158. #endif