tlb.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* arch/sparc64/mm/tlb.c
  3. *
  4. * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/percpu.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/preempt.h>
  11. #include <asm/tlbflush.h>
  12. #include <asm/cacheflush.h>
  13. #include <asm/mmu_context.h>
  14. #include <asm/tlb.h>
  15. /* Heavily inspired by the ppc64 code. */
  16. static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
  17. void flush_tlb_pending(void)
  18. {
  19. struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  20. struct mm_struct *mm = tb->mm;
  21. if (!tb->tlb_nr)
  22. goto out;
  23. flush_tsb_user(tb);
  24. if (CTX_VALID(mm->context)) {
  25. if (tb->tlb_nr == 1) {
  26. global_flush_tlb_page(mm, tb->vaddrs[0]);
  27. } else {
  28. #ifdef CONFIG_SMP
  29. smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
  30. &tb->vaddrs[0]);
  31. #else
  32. __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
  33. tb->tlb_nr, &tb->vaddrs[0]);
  34. #endif
  35. }
  36. }
  37. tb->tlb_nr = 0;
  38. out:
  39. put_cpu_var(tlb_batch);
  40. }
  41. void arch_enter_lazy_mmu_mode(void)
  42. {
  43. struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
  44. tb->active = 1;
  45. }
  46. void arch_leave_lazy_mmu_mode(void)
  47. {
  48. struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
  49. if (tb->tlb_nr)
  50. flush_tlb_pending();
  51. tb->active = 0;
  52. }
  53. static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
  54. bool exec, unsigned int hugepage_shift)
  55. {
  56. struct tlb_batch *tb = &get_cpu_var(tlb_batch);
  57. unsigned long nr;
  58. vaddr &= PAGE_MASK;
  59. if (exec)
  60. vaddr |= 0x1UL;
  61. nr = tb->tlb_nr;
  62. if (unlikely(nr != 0 && mm != tb->mm)) {
  63. flush_tlb_pending();
  64. nr = 0;
  65. }
  66. if (!tb->active) {
  67. flush_tsb_user_page(mm, vaddr, hugepage_shift);
  68. global_flush_tlb_page(mm, vaddr);
  69. goto out;
  70. }
  71. if (nr == 0) {
  72. tb->mm = mm;
  73. tb->hugepage_shift = hugepage_shift;
  74. }
  75. if (tb->hugepage_shift != hugepage_shift) {
  76. flush_tlb_pending();
  77. tb->hugepage_shift = hugepage_shift;
  78. nr = 0;
  79. }
  80. tb->vaddrs[nr] = vaddr;
  81. tb->tlb_nr = ++nr;
  82. if (nr >= TLB_BATCH_NR)
  83. flush_tlb_pending();
  84. out:
  85. put_cpu_var(tlb_batch);
  86. }
  87. void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
  88. pte_t *ptep, pte_t orig, int fullmm,
  89. unsigned int hugepage_shift)
  90. {
  91. if (tlb_type != hypervisor &&
  92. pte_dirty(orig)) {
  93. unsigned long paddr, pfn = pte_pfn(orig);
  94. struct address_space *mapping;
  95. struct page *page;
  96. if (!pfn_valid(pfn))
  97. goto no_cache_flush;
  98. page = pfn_to_page(pfn);
  99. if (PageReserved(page))
  100. goto no_cache_flush;
  101. /* A real file page? */
  102. mapping = page_mapping_file(page);
  103. if (!mapping)
  104. goto no_cache_flush;
  105. paddr = (unsigned long) page_address(page);
  106. if ((paddr ^ vaddr) & (1 << 13))
  107. flush_dcache_page_all(mm, page);
  108. }
  109. no_cache_flush:
  110. if (!fullmm)
  111. tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
  112. }
  113. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  114. static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
  115. pmd_t pmd)
  116. {
  117. unsigned long end;
  118. pte_t *pte;
  119. pte = pte_offset_map(&pmd, vaddr);
  120. end = vaddr + HPAGE_SIZE;
  121. while (vaddr < end) {
  122. if (pte_val(*pte) & _PAGE_VALID) {
  123. bool exec = pte_exec(*pte);
  124. tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
  125. }
  126. pte++;
  127. vaddr += PAGE_SIZE;
  128. }
  129. pte_unmap(pte);
  130. }
  131. static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
  132. pmd_t orig, pmd_t pmd)
  133. {
  134. if (mm == &init_mm)
  135. return;
  136. if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
  137. /*
  138. * Note that this routine only sets pmds for THP pages.
  139. * Hugetlb pages are handled elsewhere. We need to check
  140. * for huge zero page. Huge zero pages are like hugetlb
  141. * pages in that there is no RSS, but there is the need
  142. * for TSB entries. So, huge zero page counts go into
  143. * hugetlb_pte_count.
  144. */
  145. if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
  146. if (is_huge_zero_page(pmd_page(pmd)))
  147. mm->context.hugetlb_pte_count++;
  148. else
  149. mm->context.thp_pte_count++;
  150. } else {
  151. if (is_huge_zero_page(pmd_page(orig)))
  152. mm->context.hugetlb_pte_count--;
  153. else
  154. mm->context.thp_pte_count--;
  155. }
  156. /* Do not try to allocate the TSB hash table if we
  157. * don't have one already. We have various locks held
  158. * and thus we'll end up doing a GFP_KERNEL allocation
  159. * in an atomic context.
  160. *
  161. * Instead, we let the first TLB miss on a hugepage
  162. * take care of this.
  163. */
  164. }
  165. if (!pmd_none(orig)) {
  166. addr &= HPAGE_MASK;
  167. if (pmd_trans_huge(orig)) {
  168. pte_t orig_pte = __pte(pmd_val(orig));
  169. bool exec = pte_exec(orig_pte);
  170. tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
  171. tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
  172. REAL_HPAGE_SHIFT);
  173. } else {
  174. tlb_batch_pmd_scan(mm, addr, orig);
  175. }
  176. }
  177. }
  178. void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  179. pmd_t *pmdp, pmd_t pmd)
  180. {
  181. pmd_t orig = *pmdp;
  182. *pmdp = pmd;
  183. __set_pmd_acct(mm, addr, orig, pmd);
  184. }
  185. static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
  186. unsigned long address, pmd_t *pmdp, pmd_t pmd)
  187. {
  188. pmd_t old;
  189. do {
  190. old = *pmdp;
  191. } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
  192. __set_pmd_acct(vma->vm_mm, address, old, pmd);
  193. return old;
  194. }
  195. /*
  196. * This routine is only called when splitting a THP
  197. */
  198. pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
  199. pmd_t *pmdp)
  200. {
  201. pmd_t old, entry;
  202. entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
  203. old = pmdp_establish(vma, address, pmdp, entry);
  204. flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
  205. /*
  206. * set_pmd_at() will not be called in a way to decrement
  207. * thp_pte_count when splitting a THP, so do it now.
  208. * Sanity check pmd before doing the actual decrement.
  209. */
  210. if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
  211. !is_huge_zero_page(pmd_page(entry)))
  212. (vma->vm_mm)->context.thp_pte_count--;
  213. return old;
  214. }
  215. void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  216. pgtable_t pgtable)
  217. {
  218. struct list_head *lh = (struct list_head *) pgtable;
  219. assert_spin_locked(&mm->page_table_lock);
  220. /* FIFO */
  221. if (!pmd_huge_pte(mm, pmdp))
  222. INIT_LIST_HEAD(lh);
  223. else
  224. list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
  225. pmd_huge_pte(mm, pmdp) = pgtable;
  226. }
  227. pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
  228. {
  229. struct list_head *lh;
  230. pgtable_t pgtable;
  231. assert_spin_locked(&mm->page_table_lock);
  232. /* FIFO */
  233. pgtable = pmd_huge_pte(mm, pmdp);
  234. lh = (struct list_head *) pgtable;
  235. if (list_empty(lh))
  236. pmd_huge_pte(mm, pmdp) = NULL;
  237. else {
  238. pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
  239. list_del(lh);
  240. }
  241. pte_val(pgtable[0]) = 0;
  242. pte_val(pgtable[1]) = 0;
  243. return pgtable;
  244. }
  245. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */