hugetlbpage.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * arch/arm64/mm/hugetlbpage.c
  4. *
  5. * Copyright (C) 2013 Linaro Ltd.
  6. *
  7. * Based on arch/x86/mm/hugetlbpage.c.
  8. */
  9. #include <linux/init.h>
  10. #include <linux/fs.h>
  11. #include <linux/mm.h>
  12. #include <linux/hugetlb.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/err.h>
  15. #include <linux/sysctl.h>
  16. #include <asm/mman.h>
  17. #include <asm/tlb.h>
  18. #include <asm/tlbflush.h>
  19. /*
  20. * HugeTLB Support Matrix
  21. *
  22. * ---------------------------------------------------
  23. * | Page Size | CONT PTE | PMD | CONT PMD | PUD |
  24. * ---------------------------------------------------
  25. * | 4K | 64K | 2M | 32M | 1G |
  26. * | 16K | 2M | 32M | 1G | |
  27. * | 64K | 2M | 512M | 16G | |
  28. * ---------------------------------------------------
  29. */
  30. /*
  31. * Reserve CMA areas for the largest supported gigantic
  32. * huge page when requested. Any other smaller gigantic
  33. * huge pages could still be served from those areas.
  34. */
  35. #ifdef CONFIG_CMA
  36. void __init arm64_hugetlb_cma_reserve(void)
  37. {
  38. int order;
  39. #ifdef CONFIG_ARM64_4K_PAGES
  40. order = PUD_SHIFT - PAGE_SHIFT;
  41. #else
  42. order = CONT_PMD_SHIFT - PAGE_SHIFT;
  43. #endif
  44. /*
  45. * HugeTLB CMA reservation is required for gigantic
  46. * huge pages which could not be allocated via the
  47. * page allocator. Just warn if there is any change
  48. * breaking this assumption.
  49. */
  50. WARN_ON(order <= MAX_ORDER);
  51. hugetlb_cma_reserve(order);
  52. }
  53. #endif /* CONFIG_CMA */
  54. #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
  55. bool arch_hugetlb_migration_supported(struct hstate *h)
  56. {
  57. size_t pagesize = huge_page_size(h);
  58. switch (pagesize) {
  59. #ifdef CONFIG_ARM64_4K_PAGES
  60. case PUD_SIZE:
  61. #endif
  62. case PMD_SIZE:
  63. case CONT_PMD_SIZE:
  64. case CONT_PTE_SIZE:
  65. return true;
  66. }
  67. pr_warn("%s: unrecognized huge page size 0x%lx\n",
  68. __func__, pagesize);
  69. return false;
  70. }
  71. #endif
  72. int pmd_huge(pmd_t pmd)
  73. {
  74. return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
  75. }
  76. int pud_huge(pud_t pud)
  77. {
  78. #ifndef __PAGETABLE_PMD_FOLDED
  79. return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
  80. #else
  81. return 0;
  82. #endif
  83. }
  84. /*
  85. * Select all bits except the pfn
  86. */
  87. static inline pgprot_t pte_pgprot(pte_t pte)
  88. {
  89. unsigned long pfn = pte_pfn(pte);
  90. return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
  91. }
  92. static int find_num_contig(struct mm_struct *mm, unsigned long addr,
  93. pte_t *ptep, size_t *pgsize)
  94. {
  95. pgd_t *pgdp = pgd_offset(mm, addr);
  96. p4d_t *p4dp;
  97. pud_t *pudp;
  98. pmd_t *pmdp;
  99. *pgsize = PAGE_SIZE;
  100. p4dp = p4d_offset(pgdp, addr);
  101. pudp = pud_offset(p4dp, addr);
  102. pmdp = pmd_offset(pudp, addr);
  103. if ((pte_t *)pmdp == ptep) {
  104. *pgsize = PMD_SIZE;
  105. return CONT_PMDS;
  106. }
  107. return CONT_PTES;
  108. }
  109. static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
  110. {
  111. int contig_ptes = 0;
  112. *pgsize = size;
  113. switch (size) {
  114. #ifdef CONFIG_ARM64_4K_PAGES
  115. case PUD_SIZE:
  116. #endif
  117. case PMD_SIZE:
  118. contig_ptes = 1;
  119. break;
  120. case CONT_PMD_SIZE:
  121. *pgsize = PMD_SIZE;
  122. contig_ptes = CONT_PMDS;
  123. break;
  124. case CONT_PTE_SIZE:
  125. *pgsize = PAGE_SIZE;
  126. contig_ptes = CONT_PTES;
  127. break;
  128. }
  129. return contig_ptes;
  130. }
  131. /*
  132. * Changing some bits of contiguous entries requires us to follow a
  133. * Break-Before-Make approach, breaking the whole contiguous set
  134. * before we can change any entries. See ARM DDI 0487A.k_iss10775,
  135. * "Misprogramming of the Contiguous bit", page D4-1762.
  136. *
  137. * This helper performs the break step.
  138. */
  139. static pte_t get_clear_flush(struct mm_struct *mm,
  140. unsigned long addr,
  141. pte_t *ptep,
  142. unsigned long pgsize,
  143. unsigned long ncontig)
  144. {
  145. pte_t orig_pte = huge_ptep_get(ptep);
  146. bool valid = pte_valid(orig_pte);
  147. unsigned long i, saddr = addr;
  148. for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
  149. pte_t pte = ptep_get_and_clear(mm, addr, ptep);
  150. /*
  151. * If HW_AFDBM is enabled, then the HW could turn on
  152. * the dirty or accessed bit for any page in the set,
  153. * so check them all.
  154. */
  155. if (pte_dirty(pte))
  156. orig_pte = pte_mkdirty(orig_pte);
  157. if (pte_young(pte))
  158. orig_pte = pte_mkyoung(orig_pte);
  159. }
  160. if (valid) {
  161. struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
  162. flush_tlb_range(&vma, saddr, addr);
  163. }
  164. return orig_pte;
  165. }
  166. /*
  167. * Changing some bits of contiguous entries requires us to follow a
  168. * Break-Before-Make approach, breaking the whole contiguous set
  169. * before we can change any entries. See ARM DDI 0487A.k_iss10775,
  170. * "Misprogramming of the Contiguous bit", page D4-1762.
  171. *
  172. * This helper performs the break step for use cases where the
  173. * original pte is not needed.
  174. */
  175. static void clear_flush(struct mm_struct *mm,
  176. unsigned long addr,
  177. pte_t *ptep,
  178. unsigned long pgsize,
  179. unsigned long ncontig)
  180. {
  181. struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
  182. unsigned long i, saddr = addr;
  183. for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
  184. pte_clear(mm, addr, ptep);
  185. flush_tlb_range(&vma, saddr, addr);
  186. }
  187. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  188. pte_t *ptep, pte_t pte)
  189. {
  190. size_t pgsize;
  191. int i;
  192. int ncontig;
  193. unsigned long pfn, dpfn;
  194. pgprot_t hugeprot;
  195. /*
  196. * Code needs to be expanded to handle huge swap and migration
  197. * entries. Needed for HUGETLB and MEMORY_FAILURE.
  198. */
  199. WARN_ON(!pte_present(pte));
  200. if (!pte_cont(pte)) {
  201. set_pte_at(mm, addr, ptep, pte);
  202. return;
  203. }
  204. ncontig = find_num_contig(mm, addr, ptep, &pgsize);
  205. pfn = pte_pfn(pte);
  206. dpfn = pgsize >> PAGE_SHIFT;
  207. hugeprot = pte_pgprot(pte);
  208. clear_flush(mm, addr, ptep, pgsize, ncontig);
  209. for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
  210. set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
  211. }
  212. void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
  213. pte_t *ptep, pte_t pte, unsigned long sz)
  214. {
  215. int i, ncontig;
  216. size_t pgsize;
  217. ncontig = num_contig_ptes(sz, &pgsize);
  218. for (i = 0; i < ncontig; i++, ptep++)
  219. set_pte(ptep, pte);
  220. }
  221. pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
  222. unsigned long addr, unsigned long sz)
  223. {
  224. pgd_t *pgdp;
  225. p4d_t *p4dp;
  226. pud_t *pudp;
  227. pmd_t *pmdp;
  228. pte_t *ptep = NULL;
  229. pgdp = pgd_offset(mm, addr);
  230. p4dp = p4d_offset(pgdp, addr);
  231. pudp = pud_alloc(mm, p4dp, addr);
  232. if (!pudp)
  233. return NULL;
  234. if (sz == PUD_SIZE) {
  235. ptep = (pte_t *)pudp;
  236. } else if (sz == (CONT_PTE_SIZE)) {
  237. pmdp = pmd_alloc(mm, pudp, addr);
  238. if (!pmdp)
  239. return NULL;
  240. WARN_ON(addr & (sz - 1));
  241. /*
  242. * Note that if this code were ever ported to the
  243. * 32-bit arm platform then it will cause trouble in
  244. * the case where CONFIG_HIGHPTE is set, since there
  245. * will be no pte_unmap() to correspond with this
  246. * pte_alloc_map().
  247. */
  248. ptep = pte_alloc_map(mm, pmdp, addr);
  249. } else if (sz == PMD_SIZE) {
  250. if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
  251. ptep = huge_pmd_share(mm, vma, addr, pudp);
  252. else
  253. ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
  254. } else if (sz == (CONT_PMD_SIZE)) {
  255. pmdp = pmd_alloc(mm, pudp, addr);
  256. WARN_ON(addr & (sz - 1));
  257. return (pte_t *)pmdp;
  258. }
  259. return ptep;
  260. }
  261. pte_t *huge_pte_offset(struct mm_struct *mm,
  262. unsigned long addr, unsigned long sz)
  263. {
  264. pgd_t *pgdp;
  265. p4d_t *p4dp;
  266. pud_t *pudp, pud;
  267. pmd_t *pmdp, pmd;
  268. pgdp = pgd_offset(mm, addr);
  269. if (!pgd_present(READ_ONCE(*pgdp)))
  270. return NULL;
  271. p4dp = p4d_offset(pgdp, addr);
  272. if (!p4d_present(READ_ONCE(*p4dp)))
  273. return NULL;
  274. pudp = pud_offset(p4dp, addr);
  275. pud = READ_ONCE(*pudp);
  276. if (sz != PUD_SIZE && pud_none(pud))
  277. return NULL;
  278. /* hugepage or swap? */
  279. if (pud_huge(pud) || !pud_present(pud))
  280. return (pte_t *)pudp;
  281. /* table; check the next level */
  282. if (sz == CONT_PMD_SIZE)
  283. addr &= CONT_PMD_MASK;
  284. pmdp = pmd_offset(pudp, addr);
  285. pmd = READ_ONCE(*pmdp);
  286. if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
  287. pmd_none(pmd))
  288. return NULL;
  289. if (pmd_huge(pmd) || !pmd_present(pmd))
  290. return (pte_t *)pmdp;
  291. if (sz == CONT_PTE_SIZE)
  292. return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
  293. return NULL;
  294. }
  295. pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
  296. struct page *page, int writable)
  297. {
  298. size_t pagesize = huge_page_size(hstate_vma(vma));
  299. if (pagesize == CONT_PTE_SIZE) {
  300. entry = pte_mkcont(entry);
  301. } else if (pagesize == CONT_PMD_SIZE) {
  302. entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
  303. } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
  304. pr_warn("%s: unrecognized huge page size 0x%lx\n",
  305. __func__, pagesize);
  306. }
  307. return entry;
  308. }
  309. void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
  310. pte_t *ptep, unsigned long sz)
  311. {
  312. int i, ncontig;
  313. size_t pgsize;
  314. ncontig = num_contig_ptes(sz, &pgsize);
  315. for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
  316. pte_clear(mm, addr, ptep);
  317. }
  318. pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  319. unsigned long addr, pte_t *ptep)
  320. {
  321. int ncontig;
  322. size_t pgsize;
  323. pte_t orig_pte = huge_ptep_get(ptep);
  324. if (!pte_cont(orig_pte))
  325. return ptep_get_and_clear(mm, addr, ptep);
  326. ncontig = find_num_contig(mm, addr, ptep, &pgsize);
  327. return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
  328. }
  329. /*
  330. * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
  331. * and write permission.
  332. *
  333. * For a contiguous huge pte range we need to check whether or not write
  334. * permission has to change only on the first pte in the set. Then for
  335. * all the contiguous ptes we need to check whether or not there is a
  336. * discrepancy between dirty or young.
  337. */
  338. static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
  339. {
  340. int i;
  341. if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
  342. return 1;
  343. for (i = 0; i < ncontig; i++) {
  344. pte_t orig_pte = huge_ptep_get(ptep + i);
  345. if (pte_dirty(pte) != pte_dirty(orig_pte))
  346. return 1;
  347. if (pte_young(pte) != pte_young(orig_pte))
  348. return 1;
  349. }
  350. return 0;
  351. }
  352. int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  353. unsigned long addr, pte_t *ptep,
  354. pte_t pte, int dirty)
  355. {
  356. int ncontig, i;
  357. size_t pgsize = 0;
  358. unsigned long pfn = pte_pfn(pte), dpfn;
  359. pgprot_t hugeprot;
  360. pte_t orig_pte;
  361. if (!pte_cont(pte))
  362. return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
  363. ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
  364. dpfn = pgsize >> PAGE_SHIFT;
  365. if (!__cont_access_flags_changed(ptep, pte, ncontig))
  366. return 0;
  367. orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
  368. /* Make sure we don't lose the dirty or young state */
  369. if (pte_dirty(orig_pte))
  370. pte = pte_mkdirty(pte);
  371. if (pte_young(orig_pte))
  372. pte = pte_mkyoung(pte);
  373. hugeprot = pte_pgprot(pte);
  374. for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
  375. set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
  376. return 1;
  377. }
  378. void huge_ptep_set_wrprotect(struct mm_struct *mm,
  379. unsigned long addr, pte_t *ptep)
  380. {
  381. unsigned long pfn, dpfn;
  382. pgprot_t hugeprot;
  383. int ncontig, i;
  384. size_t pgsize;
  385. pte_t pte;
  386. if (!pte_cont(READ_ONCE(*ptep))) {
  387. ptep_set_wrprotect(mm, addr, ptep);
  388. return;
  389. }
  390. ncontig = find_num_contig(mm, addr, ptep, &pgsize);
  391. dpfn = pgsize >> PAGE_SHIFT;
  392. pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
  393. pte = pte_wrprotect(pte);
  394. hugeprot = pte_pgprot(pte);
  395. pfn = pte_pfn(pte);
  396. for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
  397. set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
  398. }
  399. void huge_ptep_clear_flush(struct vm_area_struct *vma,
  400. unsigned long addr, pte_t *ptep)
  401. {
  402. size_t pgsize;
  403. int ncontig;
  404. if (!pte_cont(READ_ONCE(*ptep))) {
  405. ptep_clear_flush(vma, addr, ptep);
  406. return;
  407. }
  408. ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
  409. clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
  410. }
  411. static int __init hugetlbpage_init(void)
  412. {
  413. #ifdef CONFIG_ARM64_4K_PAGES
  414. hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
  415. #endif
  416. hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
  417. hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
  418. hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
  419. return 0;
  420. }
  421. arch_initcall(hugetlbpage_init);
  422. bool __init arch_hugetlb_valid_size(unsigned long size)
  423. {
  424. switch (size) {
  425. #ifdef CONFIG_ARM64_4K_PAGES
  426. case PUD_SIZE:
  427. #endif
  428. case CONT_PMD_SIZE:
  429. case PMD_SIZE:
  430. case CONT_PTE_SIZE:
  431. return true;
  432. }
  433. return false;
  434. }