khugepaged.h 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_KHUGEPAGED_H
  3. #define _LINUX_KHUGEPAGED_H
  4. #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
  5. #include <linux/shmem_fs.h>
  6. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  7. extern struct attribute_group khugepaged_attr_group;
  8. extern int khugepaged_init(void);
  9. extern void khugepaged_destroy(void);
  10. extern int start_stop_khugepaged(void);
  11. extern int __khugepaged_enter(struct mm_struct *mm);
  12. extern void __khugepaged_exit(struct mm_struct *mm);
  13. extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
  14. unsigned long vm_flags);
  15. extern void khugepaged_min_free_kbytes_update(void);
  16. #ifdef CONFIG_SHMEM
  17. extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
  18. #else
  19. static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
  20. unsigned long addr)
  21. {
  22. }
  23. #endif
  24. #define khugepaged_enabled() \
  25. (transparent_hugepage_flags & \
  26. ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
  27. (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
  28. #define khugepaged_always() \
  29. (transparent_hugepage_flags & \
  30. (1<<TRANSPARENT_HUGEPAGE_FLAG))
  31. #define khugepaged_req_madv() \
  32. (transparent_hugepage_flags & \
  33. (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
  34. #define khugepaged_defrag() \
  35. (transparent_hugepage_flags & \
  36. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
  37. static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
  38. {
  39. if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
  40. return __khugepaged_enter(mm);
  41. return 0;
  42. }
  43. static inline void khugepaged_exit(struct mm_struct *mm)
  44. {
  45. if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
  46. __khugepaged_exit(mm);
  47. }
  48. static inline int khugepaged_enter(struct vm_area_struct *vma,
  49. unsigned long vm_flags)
  50. {
  51. if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
  52. if ((khugepaged_always() ||
  53. (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
  54. (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
  55. !(vm_flags & VM_NOHUGEPAGE) &&
  56. !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
  57. if (__khugepaged_enter(vma->vm_mm))
  58. return -ENOMEM;
  59. return 0;
  60. }
  61. #else /* CONFIG_TRANSPARENT_HUGEPAGE */
  62. static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
  63. {
  64. return 0;
  65. }
  66. static inline void khugepaged_exit(struct mm_struct *mm)
  67. {
  68. }
  69. static inline int khugepaged_enter(struct vm_area_struct *vma,
  70. unsigned long vm_flags)
  71. {
  72. return 0;
  73. }
  74. static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
  75. unsigned long vm_flags)
  76. {
  77. return 0;
  78. }
  79. static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
  80. unsigned long addr)
  81. {
  82. }
  83. static inline void khugepaged_min_free_kbytes_update(void)
  84. {
  85. }
  86. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  87. #endif /* _LINUX_KHUGEPAGED_H */