pgtable-generic.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * mm/pgtable-generic.c
  4. *
  5. * Generic pgtable methods declared in linux/pgtable.h
  6. *
  7. * Copyright (C) 2010 Linus Torvalds
  8. */
  9. #include <linux/pagemap.h>
  10. #include <linux/hugetlb.h>
  11. #include <linux/pgtable.h>
  12. #include <asm/tlb.h>
  13. /*
  14. * If a p?d_bad entry is found while walking page tables, report
  15. * the error, before resetting entry to p?d_none. Usually (but
  16. * very seldom) called out from the p?d_none_or_clear_bad macros.
  17. */
  18. void pgd_clear_bad(pgd_t *pgd)
  19. {
  20. pgd_ERROR(*pgd);
  21. pgd_clear(pgd);
  22. }
  23. #ifndef __PAGETABLE_P4D_FOLDED
  24. void p4d_clear_bad(p4d_t *p4d)
  25. {
  26. p4d_ERROR(*p4d);
  27. p4d_clear(p4d);
  28. }
  29. #endif
  30. #ifndef __PAGETABLE_PUD_FOLDED
  31. void pud_clear_bad(pud_t *pud)
  32. {
  33. pud_ERROR(*pud);
  34. pud_clear(pud);
  35. }
  36. #endif
  37. /*
  38. * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
  39. * above. pmd folding is special and typically pmd_* macros refer to upper
  40. * level even when folded
  41. */
  42. void pmd_clear_bad(pmd_t *pmd)
  43. {
  44. pmd_ERROR(*pmd);
  45. pmd_clear(pmd);
  46. }
  47. #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  48. /*
  49. * Only sets the access flags (dirty, accessed), as well as write
  50. * permission. Furthermore, we know it always gets set to a "more
  51. * permissive" setting, which allows most architectures to optimize
  52. * this. We return whether the PTE actually changed, which in turn
  53. * instructs the caller to do things like update__mmu_cache. This
  54. * used to be done in the caller, but sparc needs minor faults to
  55. * force that call on sun4c so we changed this macro slightly
  56. */
  57. int ptep_set_access_flags(struct vm_area_struct *vma,
  58. unsigned long address, pte_t *ptep,
  59. pte_t entry, int dirty)
  60. {
  61. int changed = !pte_same(*ptep, entry);
  62. if (changed) {
  63. set_pte_at(vma->vm_mm, address, ptep, entry);
  64. flush_tlb_fix_spurious_fault(vma, address);
  65. }
  66. return changed;
  67. }
  68. #endif
  69. #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  70. int ptep_clear_flush_young(struct vm_area_struct *vma,
  71. unsigned long address, pte_t *ptep)
  72. {
  73. int young;
  74. young = ptep_test_and_clear_young(vma, address, ptep);
  75. if (young)
  76. flush_tlb_page(vma, address);
  77. return young;
  78. }
  79. #endif
  80. #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
  81. pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
  82. pte_t *ptep)
  83. {
  84. struct mm_struct *mm = (vma)->vm_mm;
  85. pte_t pte;
  86. pte = ptep_get_and_clear(mm, address, ptep);
  87. if (pte_accessible(mm, pte))
  88. flush_tlb_page(vma, address);
  89. return pte;
  90. }
  91. #endif
  92. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  93. #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  94. int pmdp_set_access_flags(struct vm_area_struct *vma,
  95. unsigned long address, pmd_t *pmdp,
  96. pmd_t entry, int dirty)
  97. {
  98. int changed = !pmd_same(*pmdp, entry);
  99. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  100. if (changed) {
  101. set_pmd_at(vma->vm_mm, address, pmdp, entry);
  102. flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  103. }
  104. return changed;
  105. }
  106. #endif
  107. #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  108. int pmdp_clear_flush_young(struct vm_area_struct *vma,
  109. unsigned long address, pmd_t *pmdp)
  110. {
  111. int young;
  112. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  113. young = pmdp_test_and_clear_young(vma, address, pmdp);
  114. if (young)
  115. flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  116. return young;
  117. }
  118. #endif
  119. #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
  120. pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
  121. pmd_t *pmdp)
  122. {
  123. pmd_t pmd;
  124. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  125. VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
  126. !pmd_devmap(*pmdp));
  127. pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
  128. flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  129. return pmd;
  130. }
  131. #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  132. pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
  133. pud_t *pudp)
  134. {
  135. pud_t pud;
  136. VM_BUG_ON(address & ~HPAGE_PUD_MASK);
  137. VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
  138. pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
  139. flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
  140. return pud;
  141. }
  142. #endif
  143. #endif
  144. #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
  145. void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  146. pgtable_t pgtable)
  147. {
  148. assert_spin_locked(pmd_lockptr(mm, pmdp));
  149. /* FIFO */
  150. if (!pmd_huge_pte(mm, pmdp))
  151. INIT_LIST_HEAD(&pgtable->lru);
  152. else
  153. list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
  154. pmd_huge_pte(mm, pmdp) = pgtable;
  155. }
  156. #endif
  157. #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
  158. /* no "address" argument so destroys page coloring of some arch */
  159. pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
  160. {
  161. pgtable_t pgtable;
  162. assert_spin_locked(pmd_lockptr(mm, pmdp));
  163. /* FIFO */
  164. pgtable = pmd_huge_pte(mm, pmdp);
  165. pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
  166. struct page, lru);
  167. if (pmd_huge_pte(mm, pmdp))
  168. list_del(&pgtable->lru);
  169. return pgtable;
  170. }
  171. #endif
  172. #ifndef __HAVE_ARCH_PMDP_INVALIDATE
  173. pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
  174. pmd_t *pmdp)
  175. {
  176. pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
  177. flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  178. return old;
  179. }
  180. #endif
  181. #ifndef pmdp_collapse_flush
  182. pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
  183. pmd_t *pmdp)
  184. {
  185. /*
  186. * pmd and hugepage pte format are same. So we could
  187. * use the same function.
  188. */
  189. pmd_t pmd;
  190. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  191. VM_BUG_ON(pmd_trans_huge(*pmdp));
  192. pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
  193. /* collapse entails shooting down ptes not pmd */
  194. flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  195. return pmd;
  196. }
  197. #endif
  198. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */