ptdump.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/pagewalk.h>
  3. #include <linux/ptdump.h>
  4. #include <linux/kasan.h>
  5. #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
  6. /*
  7. * This is an optimization for KASAN=y case. Since all kasan page tables
  8. * eventually point to the kasan_early_shadow_page we could call note_page()
  9. * right away without walking through lower level page tables. This saves
  10. * us dozens of seconds (minutes for 5-level config) while checking for
  11. * W+X mapping or reading kernel_page_tables debugfs file.
  12. */
  13. static inline int note_kasan_page_table(struct mm_walk *walk,
  14. unsigned long addr)
  15. {
  16. struct ptdump_state *st = walk->private;
  17. st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0]));
  18. walk->action = ACTION_CONTINUE;
  19. return 0;
  20. }
  21. #endif
  22. static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
  23. unsigned long next, struct mm_walk *walk)
  24. {
  25. struct ptdump_state *st = walk->private;
  26. pgd_t val = READ_ONCE(*pgd);
  27. #if CONFIG_PGTABLE_LEVELS > 4 && \
  28. (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
  29. if (pgd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_p4d)))
  30. return note_kasan_page_table(walk, addr);
  31. #endif
  32. if (st->effective_prot)
  33. st->effective_prot(st, 0, pgd_val(val));
  34. if (pgd_leaf(val))
  35. st->note_page(st, addr, 0, pgd_val(val));
  36. return 0;
  37. }
  38. static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
  39. unsigned long next, struct mm_walk *walk)
  40. {
  41. struct ptdump_state *st = walk->private;
  42. p4d_t val = READ_ONCE(*p4d);
  43. #if CONFIG_PGTABLE_LEVELS > 3 && \
  44. (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
  45. if (p4d_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pud)))
  46. return note_kasan_page_table(walk, addr);
  47. #endif
  48. if (st->effective_prot)
  49. st->effective_prot(st, 1, p4d_val(val));
  50. if (p4d_leaf(val))
  51. st->note_page(st, addr, 1, p4d_val(val));
  52. return 0;
  53. }
  54. static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
  55. unsigned long next, struct mm_walk *walk)
  56. {
  57. struct ptdump_state *st = walk->private;
  58. pud_t val = READ_ONCE(*pud);
  59. #if CONFIG_PGTABLE_LEVELS > 2 && \
  60. (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS))
  61. if (pud_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pmd)))
  62. return note_kasan_page_table(walk, addr);
  63. #endif
  64. if (st->effective_prot)
  65. st->effective_prot(st, 2, pud_val(val));
  66. if (pud_leaf(val))
  67. st->note_page(st, addr, 2, pud_val(val));
  68. return 0;
  69. }
  70. static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
  71. unsigned long next, struct mm_walk *walk)
  72. {
  73. struct ptdump_state *st = walk->private;
  74. pmd_t val = READ_ONCE(*pmd);
  75. #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
  76. if (pmd_page(val) == virt_to_page(lm_alias(kasan_early_shadow_pte)))
  77. return note_kasan_page_table(walk, addr);
  78. #endif
  79. if (st->effective_prot)
  80. st->effective_prot(st, 3, pmd_val(val));
  81. if (pmd_leaf(val))
  82. st->note_page(st, addr, 3, pmd_val(val));
  83. return 0;
  84. }
  85. static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
  86. unsigned long next, struct mm_walk *walk)
  87. {
  88. struct ptdump_state *st = walk->private;
  89. pte_t val = ptep_get(pte);
  90. if (st->effective_prot)
  91. st->effective_prot(st, 4, pte_val(val));
  92. st->note_page(st, addr, 4, pte_val(val));
  93. return 0;
  94. }
  95. static int ptdump_hole(unsigned long addr, unsigned long next,
  96. int depth, struct mm_walk *walk)
  97. {
  98. struct ptdump_state *st = walk->private;
  99. st->note_page(st, addr, depth, 0);
  100. return 0;
  101. }
  102. static const struct mm_walk_ops ptdump_ops = {
  103. .pgd_entry = ptdump_pgd_entry,
  104. .p4d_entry = ptdump_p4d_entry,
  105. .pud_entry = ptdump_pud_entry,
  106. .pmd_entry = ptdump_pmd_entry,
  107. .pte_entry = ptdump_pte_entry,
  108. .pte_hole = ptdump_hole,
  109. };
  110. void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
  111. {
  112. const struct ptdump_range *range = st->range;
  113. mmap_read_lock(mm);
  114. while (range->start != range->end) {
  115. walk_page_range_novma(mm, range->start, range->end,
  116. &ptdump_ops, pgd, st);
  117. range++;
  118. }
  119. mmap_read_unlock(mm);
  120. /* Flush out the last page */
  121. st->note_page(st, 0, -1, 0);
  122. }