debug_vm_pgtable.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * This kernel test validates architecture page table helpers and
  4. * accessors and helps in verifying their continued compliance with
  5. * expected generic MM semantics.
  6. *
  7. * Copyright (C) 2019 ARM Ltd.
  8. *
  9. * Author: Anshuman Khandual <anshuman.khandual@arm.com>
  10. */
  11. #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
  12. #include <linux/gfp.h>
  13. #include <linux/highmem.h>
  14. #include <linux/hugetlb.h>
  15. #include <linux/kernel.h>
  16. #include <linux/kconfig.h>
  17. #include <linux/mm.h>
  18. #include <linux/mman.h>
  19. #include <linux/mm_types.h>
  20. #include <linux/module.h>
  21. #include <linux/pfn_t.h>
  22. #include <linux/printk.h>
  23. #include <linux/pgtable.h>
  24. #include <linux/random.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/swap.h>
  27. #include <linux/swapops.h>
  28. #include <linux/start_kernel.h>
  29. #include <linux/sched/mm.h>
  30. #include <linux/io.h>
  31. #include <asm/pgalloc.h>
  32. #include <asm/tlbflush.h>
  33. /*
  34. * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
  35. * expectations that are being validated here. All future changes in here
  36. * or the documentation need to be in sync.
  37. */
  38. #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
  39. /*
  40. * On s390 platform, the lower 4 bits are used to identify given page table
  41. * entry type. But these bits might affect the ability to clear entries with
  42. * pxx_clear() because of how dynamic page table folding works on s390. So
  43. * while loading up the entries do not change the lower 4 bits. It does not
  44. * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
  45. * used to mark a pte entry.
  46. */
  47. #define S390_SKIP_MASK GENMASK(3, 0)
  48. #if __BITS_PER_LONG == 64
  49. #define PPC64_SKIP_MASK GENMASK(62, 62)
  50. #else
  51. #define PPC64_SKIP_MASK 0x0
  52. #endif
  53. #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
  54. #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
  55. #define RANDOM_NZVALUE GENMASK(7, 0)
  56. static void __init pte_basic_tests(unsigned long pfn, int idx)
  57. {
  58. pgprot_t prot = protection_map[idx];
  59. pte_t pte = pfn_pte(pfn, prot);
  60. unsigned long val = idx, *ptr = &val;
  61. pr_debug("Validating PTE basic (%pGv)\n", ptr);
  62. /*
  63. * This test needs to be executed after the given page table entry
  64. * is created with pfn_pte() to make sure that protection_map[idx]
  65. * does not have the dirty bit enabled from the beginning. This is
  66. * important for platforms like arm64 where (!PTE_RDONLY) indicate
  67. * dirty bit being set.
  68. */
  69. WARN_ON(pte_dirty(pte_wrprotect(pte)));
  70. WARN_ON(!pte_same(pte, pte));
  71. WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
  72. WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
  73. WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
  74. WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
  75. WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
  76. WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
  77. WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
  78. WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
  79. }
  80. static void __init pte_advanced_tests(struct mm_struct *mm,
  81. struct vm_area_struct *vma, pte_t *ptep,
  82. unsigned long pfn, unsigned long vaddr,
  83. pgprot_t prot)
  84. {
  85. pte_t pte = pfn_pte(pfn, prot);
  86. /*
  87. * Architectures optimize set_pte_at by avoiding TLB flush.
  88. * This requires set_pte_at to be not used to update an
  89. * existing pte entry. Clear pte before we do set_pte_at
  90. */
  91. pr_debug("Validating PTE advanced\n");
  92. pte = pfn_pte(pfn, prot);
  93. set_pte_at(mm, vaddr, ptep, pte);
  94. ptep_set_wrprotect(mm, vaddr, ptep);
  95. pte = ptep_get(ptep);
  96. WARN_ON(pte_write(pte));
  97. ptep_get_and_clear(mm, vaddr, ptep);
  98. pte = ptep_get(ptep);
  99. WARN_ON(!pte_none(pte));
  100. pte = pfn_pte(pfn, prot);
  101. pte = pte_wrprotect(pte);
  102. pte = pte_mkclean(pte);
  103. set_pte_at(mm, vaddr, ptep, pte);
  104. pte = pte_mkwrite(pte);
  105. pte = pte_mkdirty(pte);
  106. ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
  107. pte = ptep_get(ptep);
  108. WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
  109. ptep_get_and_clear_full(mm, vaddr, ptep, 1);
  110. pte = ptep_get(ptep);
  111. WARN_ON(!pte_none(pte));
  112. pte = pfn_pte(pfn, prot);
  113. pte = pte_mkyoung(pte);
  114. set_pte_at(mm, vaddr, ptep, pte);
  115. ptep_test_and_clear_young(vma, vaddr, ptep);
  116. pte = ptep_get(ptep);
  117. WARN_ON(pte_young(pte));
  118. ptep_get_and_clear_full(mm, vaddr, ptep, 1);
  119. }
  120. static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot)
  121. {
  122. pte_t pte = pfn_pte(pfn, prot);
  123. if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
  124. return;
  125. pr_debug("Validating PTE saved write\n");
  126. WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
  127. WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
  128. }
  129. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  130. static void __init pmd_basic_tests(unsigned long pfn, int idx)
  131. {
  132. pgprot_t prot = protection_map[idx];
  133. unsigned long val = idx, *ptr = &val;
  134. pmd_t pmd;
  135. if (!has_transparent_hugepage())
  136. return;
  137. pr_debug("Validating PMD basic (%pGv)\n", ptr);
  138. pmd = pfn_pmd(pfn, prot);
  139. /*
  140. * This test needs to be executed after the given page table entry
  141. * is created with pfn_pmd() to make sure that protection_map[idx]
  142. * does not have the dirty bit enabled from the beginning. This is
  143. * important for platforms like arm64 where (!PTE_RDONLY) indicate
  144. * dirty bit being set.
  145. */
  146. WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
  147. WARN_ON(!pmd_same(pmd, pmd));
  148. WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
  149. WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
  150. WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
  151. WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
  152. WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
  153. WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
  154. WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
  155. WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
  156. /*
  157. * A huge page does not point to next level page table
  158. * entry. Hence this must qualify as pmd_bad().
  159. */
  160. WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
  161. }
  162. static void __init pmd_advanced_tests(struct mm_struct *mm,
  163. struct vm_area_struct *vma, pmd_t *pmdp,
  164. unsigned long pfn, unsigned long vaddr,
  165. pgprot_t prot, pgtable_t pgtable)
  166. {
  167. pmd_t pmd;
  168. if (!has_transparent_hugepage())
  169. return;
  170. pr_debug("Validating PMD advanced\n");
  171. /* Align the address wrt HPAGE_PMD_SIZE */
  172. vaddr &= HPAGE_PMD_MASK;
  173. pgtable_trans_huge_deposit(mm, pmdp, pgtable);
  174. pmd = pfn_pmd(pfn, prot);
  175. set_pmd_at(mm, vaddr, pmdp, pmd);
  176. pmdp_set_wrprotect(mm, vaddr, pmdp);
  177. pmd = READ_ONCE(*pmdp);
  178. WARN_ON(pmd_write(pmd));
  179. pmdp_huge_get_and_clear(mm, vaddr, pmdp);
  180. pmd = READ_ONCE(*pmdp);
  181. WARN_ON(!pmd_none(pmd));
  182. pmd = pfn_pmd(pfn, prot);
  183. pmd = pmd_wrprotect(pmd);
  184. pmd = pmd_mkclean(pmd);
  185. set_pmd_at(mm, vaddr, pmdp, pmd);
  186. pmd = pmd_mkwrite(pmd);
  187. pmd = pmd_mkdirty(pmd);
  188. pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
  189. pmd = READ_ONCE(*pmdp);
  190. WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
  191. pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
  192. pmd = READ_ONCE(*pmdp);
  193. WARN_ON(!pmd_none(pmd));
  194. pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
  195. pmd = pmd_mkyoung(pmd);
  196. set_pmd_at(mm, vaddr, pmdp, pmd);
  197. pmdp_test_and_clear_young(vma, vaddr, pmdp);
  198. pmd = READ_ONCE(*pmdp);
  199. WARN_ON(pmd_young(pmd));
  200. /* Clear the pte entries */
  201. pmdp_huge_get_and_clear(mm, vaddr, pmdp);
  202. pgtable = pgtable_trans_huge_withdraw(mm, pmdp);
  203. }
  204. static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot)
  205. {
  206. pmd_t pmd;
  207. if (!has_transparent_hugepage())
  208. return;
  209. pr_debug("Validating PMD leaf\n");
  210. pmd = pfn_pmd(pfn, prot);
  211. /*
  212. * PMD based THP is a leaf entry.
  213. */
  214. pmd = pmd_mkhuge(pmd);
  215. WARN_ON(!pmd_leaf(pmd));
  216. }
  217. #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
  218. static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
  219. {
  220. pmd_t pmd;
  221. if (!arch_ioremap_pmd_supported())
  222. return;
  223. pr_debug("Validating PMD huge\n");
  224. /*
  225. * X86 defined pmd_set_huge() verifies that the given
  226. * PMD is not a populated non-leaf entry.
  227. */
  228. WRITE_ONCE(*pmdp, __pmd(0));
  229. WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
  230. WARN_ON(!pmd_clear_huge(pmdp));
  231. pmd = READ_ONCE(*pmdp);
  232. WARN_ON(!pmd_none(pmd));
  233. }
  234. #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
  235. static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
  236. #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
  237. static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot)
  238. {
  239. pmd_t pmd;
  240. if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
  241. return;
  242. if (!has_transparent_hugepage())
  243. return;
  244. pr_debug("Validating PMD saved write\n");
  245. pmd = pfn_pmd(pfn, prot);
  246. WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
  247. WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
  248. }
  249. #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  250. static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx)
  251. {
  252. pgprot_t prot = protection_map[idx];
  253. unsigned long val = idx, *ptr = &val;
  254. pud_t pud;
  255. if (!has_transparent_hugepage())
  256. return;
  257. pr_debug("Validating PUD basic (%pGv)\n", ptr);
  258. pud = pfn_pud(pfn, prot);
  259. /*
  260. * This test needs to be executed after the given page table entry
  261. * is created with pfn_pud() to make sure that protection_map[idx]
  262. * does not have the dirty bit enabled from the beginning. This is
  263. * important for platforms like arm64 where (!PTE_RDONLY) indicate
  264. * dirty bit being set.
  265. */
  266. WARN_ON(pud_dirty(pud_wrprotect(pud)));
  267. WARN_ON(!pud_same(pud, pud));
  268. WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
  269. WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
  270. WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
  271. WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
  272. WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
  273. WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
  274. WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
  275. WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
  276. if (mm_pmd_folded(mm))
  277. return;
  278. /*
  279. * A huge page does not point to next level page table
  280. * entry. Hence this must qualify as pud_bad().
  281. */
  282. WARN_ON(!pud_bad(pud_mkhuge(pud)));
  283. }
  284. static void __init pud_advanced_tests(struct mm_struct *mm,
  285. struct vm_area_struct *vma, pud_t *pudp,
  286. unsigned long pfn, unsigned long vaddr,
  287. pgprot_t prot)
  288. {
  289. pud_t pud;
  290. if (!has_transparent_hugepage())
  291. return;
  292. pr_debug("Validating PUD advanced\n");
  293. /* Align the address wrt HPAGE_PUD_SIZE */
  294. vaddr &= HPAGE_PUD_MASK;
  295. pud = pfn_pud(pfn, prot);
  296. set_pud_at(mm, vaddr, pudp, pud);
  297. pudp_set_wrprotect(mm, vaddr, pudp);
  298. pud = READ_ONCE(*pudp);
  299. WARN_ON(pud_write(pud));
  300. #ifndef __PAGETABLE_PMD_FOLDED
  301. pudp_huge_get_and_clear(mm, vaddr, pudp);
  302. pud = READ_ONCE(*pudp);
  303. WARN_ON(!pud_none(pud));
  304. #endif /* __PAGETABLE_PMD_FOLDED */
  305. pud = pfn_pud(pfn, prot);
  306. pud = pud_wrprotect(pud);
  307. pud = pud_mkclean(pud);
  308. set_pud_at(mm, vaddr, pudp, pud);
  309. pud = pud_mkwrite(pud);
  310. pud = pud_mkdirty(pud);
  311. pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
  312. pud = READ_ONCE(*pudp);
  313. WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
  314. #ifndef __PAGETABLE_PMD_FOLDED
  315. pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
  316. pud = READ_ONCE(*pudp);
  317. WARN_ON(!pud_none(pud));
  318. #endif /* __PAGETABLE_PMD_FOLDED */
  319. pud = pfn_pud(pfn, prot);
  320. pud = pud_mkyoung(pud);
  321. set_pud_at(mm, vaddr, pudp, pud);
  322. pudp_test_and_clear_young(vma, vaddr, pudp);
  323. pud = READ_ONCE(*pudp);
  324. WARN_ON(pud_young(pud));
  325. pudp_huge_get_and_clear(mm, vaddr, pudp);
  326. }
  327. static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot)
  328. {
  329. pud_t pud;
  330. if (!has_transparent_hugepage())
  331. return;
  332. pr_debug("Validating PUD leaf\n");
  333. pud = pfn_pud(pfn, prot);
  334. /*
  335. * PUD based THP is a leaf entry.
  336. */
  337. pud = pud_mkhuge(pud);
  338. WARN_ON(!pud_leaf(pud));
  339. }
  340. #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
  341. static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
  342. {
  343. pud_t pud;
  344. if (!arch_ioremap_pud_supported())
  345. return;
  346. pr_debug("Validating PUD huge\n");
  347. /*
  348. * X86 defined pud_set_huge() verifies that the given
  349. * PUD is not a populated non-leaf entry.
  350. */
  351. WRITE_ONCE(*pudp, __pud(0));
  352. WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
  353. WARN_ON(!pud_clear_huge(pudp));
  354. pud = READ_ONCE(*pudp);
  355. WARN_ON(!pud_none(pud));
  356. }
  357. #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
  358. static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
  359. #endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
  360. #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
  361. static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
  362. static void __init pud_advanced_tests(struct mm_struct *mm,
  363. struct vm_area_struct *vma, pud_t *pudp,
  364. unsigned long pfn, unsigned long vaddr,
  365. pgprot_t prot)
  366. {
  367. }
  368. static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
  369. static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
  370. {
  371. }
  372. #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
  373. #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
  374. static void __init pmd_basic_tests(unsigned long pfn, int idx) { }
  375. static void __init pud_basic_tests(struct mm_struct *mm, unsigned long pfn, int idx) { }
  376. static void __init pmd_advanced_tests(struct mm_struct *mm,
  377. struct vm_area_struct *vma, pmd_t *pmdp,
  378. unsigned long pfn, unsigned long vaddr,
  379. pgprot_t prot, pgtable_t pgtable)
  380. {
  381. }
  382. static void __init pud_advanced_tests(struct mm_struct *mm,
  383. struct vm_area_struct *vma, pud_t *pudp,
  384. unsigned long pfn, unsigned long vaddr,
  385. pgprot_t prot)
  386. {
  387. }
  388. static void __init pmd_leaf_tests(unsigned long pfn, pgprot_t prot) { }
  389. static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) { }
  390. static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
  391. {
  392. }
  393. static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
  394. {
  395. }
  396. static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) { }
  397. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  398. static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
  399. {
  400. p4d_t p4d;
  401. pr_debug("Validating P4D basic\n");
  402. memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
  403. WARN_ON(!p4d_same(p4d, p4d));
  404. }
  405. static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
  406. {
  407. pgd_t pgd;
  408. pr_debug("Validating PGD basic\n");
  409. memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
  410. WARN_ON(!pgd_same(pgd, pgd));
  411. }
  412. #ifndef __PAGETABLE_PUD_FOLDED
  413. static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
  414. {
  415. pud_t pud = READ_ONCE(*pudp);
  416. if (mm_pmd_folded(mm))
  417. return;
  418. pr_debug("Validating PUD clear\n");
  419. pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
  420. WRITE_ONCE(*pudp, pud);
  421. pud_clear(pudp);
  422. pud = READ_ONCE(*pudp);
  423. WARN_ON(!pud_none(pud));
  424. }
  425. static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
  426. pmd_t *pmdp)
  427. {
  428. pud_t pud;
  429. if (mm_pmd_folded(mm))
  430. return;
  431. pr_debug("Validating PUD populate\n");
  432. /*
  433. * This entry points to next level page table page.
  434. * Hence this must not qualify as pud_bad().
  435. */
  436. pud_populate(mm, pudp, pmdp);
  437. pud = READ_ONCE(*pudp);
  438. WARN_ON(pud_bad(pud));
  439. }
  440. #else /* !__PAGETABLE_PUD_FOLDED */
  441. static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
  442. static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
  443. pmd_t *pmdp)
  444. {
  445. }
  446. #endif /* PAGETABLE_PUD_FOLDED */
  447. #ifndef __PAGETABLE_P4D_FOLDED
  448. static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
  449. {
  450. p4d_t p4d = READ_ONCE(*p4dp);
  451. if (mm_pud_folded(mm))
  452. return;
  453. pr_debug("Validating P4D clear\n");
  454. p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
  455. WRITE_ONCE(*p4dp, p4d);
  456. p4d_clear(p4dp);
  457. p4d = READ_ONCE(*p4dp);
  458. WARN_ON(!p4d_none(p4d));
  459. }
  460. static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
  461. pud_t *pudp)
  462. {
  463. p4d_t p4d;
  464. if (mm_pud_folded(mm))
  465. return;
  466. pr_debug("Validating P4D populate\n");
  467. /*
  468. * This entry points to next level page table page.
  469. * Hence this must not qualify as p4d_bad().
  470. */
  471. pud_clear(pudp);
  472. p4d_clear(p4dp);
  473. p4d_populate(mm, p4dp, pudp);
  474. p4d = READ_ONCE(*p4dp);
  475. WARN_ON(p4d_bad(p4d));
  476. }
  477. static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
  478. {
  479. pgd_t pgd = READ_ONCE(*pgdp);
  480. if (mm_p4d_folded(mm))
  481. return;
  482. pr_debug("Validating PGD clear\n");
  483. pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
  484. WRITE_ONCE(*pgdp, pgd);
  485. pgd_clear(pgdp);
  486. pgd = READ_ONCE(*pgdp);
  487. WARN_ON(!pgd_none(pgd));
  488. }
  489. static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
  490. p4d_t *p4dp)
  491. {
  492. pgd_t pgd;
  493. if (mm_p4d_folded(mm))
  494. return;
  495. pr_debug("Validating PGD populate\n");
  496. /*
  497. * This entry points to next level page table page.
  498. * Hence this must not qualify as pgd_bad().
  499. */
  500. p4d_clear(p4dp);
  501. pgd_clear(pgdp);
  502. pgd_populate(mm, pgdp, p4dp);
  503. pgd = READ_ONCE(*pgdp);
  504. WARN_ON(pgd_bad(pgd));
  505. }
  506. #else /* !__PAGETABLE_P4D_FOLDED */
  507. static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
  508. static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
  509. static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
  510. pud_t *pudp)
  511. {
  512. }
  513. static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
  514. p4d_t *p4dp)
  515. {
  516. }
  517. #endif /* PAGETABLE_P4D_FOLDED */
  518. static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
  519. unsigned long pfn, unsigned long vaddr,
  520. pgprot_t prot)
  521. {
  522. pte_t pte = pfn_pte(pfn, prot);
  523. pr_debug("Validating PTE clear\n");
  524. #ifndef CONFIG_RISCV
  525. pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
  526. #endif
  527. set_pte_at(mm, vaddr, ptep, pte);
  528. barrier();
  529. pte_clear(mm, vaddr, ptep);
  530. pte = ptep_get(ptep);
  531. WARN_ON(!pte_none(pte));
  532. }
  533. static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
  534. {
  535. pmd_t pmd = READ_ONCE(*pmdp);
  536. pr_debug("Validating PMD clear\n");
  537. pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
  538. WRITE_ONCE(*pmdp, pmd);
  539. pmd_clear(pmdp);
  540. pmd = READ_ONCE(*pmdp);
  541. WARN_ON(!pmd_none(pmd));
  542. }
  543. static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
  544. pgtable_t pgtable)
  545. {
  546. pmd_t pmd;
  547. pr_debug("Validating PMD populate\n");
  548. /*
  549. * This entry points to next level page table page.
  550. * Hence this must not qualify as pmd_bad().
  551. */
  552. pmd_populate(mm, pmdp, pgtable);
  553. pmd = READ_ONCE(*pmdp);
  554. WARN_ON(pmd_bad(pmd));
  555. }
  556. static void __init pte_special_tests(unsigned long pfn, pgprot_t prot)
  557. {
  558. pte_t pte = pfn_pte(pfn, prot);
  559. if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
  560. return;
  561. pr_debug("Validating PTE special\n");
  562. WARN_ON(!pte_special(pte_mkspecial(pte)));
  563. }
  564. static void __init pte_protnone_tests(unsigned long pfn, pgprot_t prot)
  565. {
  566. pte_t pte = pfn_pte(pfn, prot);
  567. if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
  568. return;
  569. pr_debug("Validating PTE protnone\n");
  570. WARN_ON(!pte_protnone(pte));
  571. WARN_ON(!pte_present(pte));
  572. }
  573. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  574. static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot)
  575. {
  576. pmd_t pmd;
  577. if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
  578. return;
  579. if (!has_transparent_hugepage())
  580. return;
  581. pr_debug("Validating PMD protnone\n");
  582. pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
  583. WARN_ON(!pmd_protnone(pmd));
  584. WARN_ON(!pmd_present(pmd));
  585. }
  586. #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
  587. static void __init pmd_protnone_tests(unsigned long pfn, pgprot_t prot) { }
  588. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  589. #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
  590. static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot)
  591. {
  592. pte_t pte = pfn_pte(pfn, prot);
  593. pr_debug("Validating PTE devmap\n");
  594. WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
  595. }
  596. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  597. static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot)
  598. {
  599. pmd_t pmd;
  600. if (!has_transparent_hugepage())
  601. return;
  602. pr_debug("Validating PMD devmap\n");
  603. pmd = pfn_pmd(pfn, prot);
  604. WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
  605. }
  606. #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  607. static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot)
  608. {
  609. pud_t pud;
  610. if (!has_transparent_hugepage())
  611. return;
  612. pr_debug("Validating PUD devmap\n");
  613. pud = pfn_pud(pfn, prot);
  614. WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
  615. }
  616. #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
  617. static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
  618. #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
  619. #else /* CONFIG_TRANSPARENT_HUGEPAGE */
  620. static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
  621. static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
  622. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  623. #else
  624. static void __init pte_devmap_tests(unsigned long pfn, pgprot_t prot) { }
  625. static void __init pmd_devmap_tests(unsigned long pfn, pgprot_t prot) { }
  626. static void __init pud_devmap_tests(unsigned long pfn, pgprot_t prot) { }
  627. #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
  628. static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
  629. {
  630. pte_t pte = pfn_pte(pfn, prot);
  631. if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
  632. return;
  633. pr_debug("Validating PTE soft dirty\n");
  634. WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
  635. WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
  636. }
  637. static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
  638. {
  639. pte_t pte = pfn_pte(pfn, prot);
  640. if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
  641. return;
  642. pr_debug("Validating PTE swap soft dirty\n");
  643. WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
  644. WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
  645. }
  646. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  647. static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
  648. {
  649. pmd_t pmd;
  650. if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
  651. return;
  652. if (!has_transparent_hugepage())
  653. return;
  654. pr_debug("Validating PMD soft dirty\n");
  655. pmd = pfn_pmd(pfn, prot);
  656. WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
  657. WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
  658. }
  659. static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
  660. {
  661. pmd_t pmd;
  662. if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
  663. !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
  664. return;
  665. if (!has_transparent_hugepage())
  666. return;
  667. pr_debug("Validating PMD swap soft dirty\n");
  668. pmd = pfn_pmd(pfn, prot);
  669. WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
  670. WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
  671. }
  672. #else /* !CONFIG_ARCH_HAS_PTE_DEVMAP */
  673. static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
  674. static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
  675. {
  676. }
  677. #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
  678. static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
  679. {
  680. swp_entry_t swp;
  681. pte_t pte;
  682. pr_debug("Validating PTE swap\n");
  683. pte = pfn_pte(pfn, prot);
  684. swp = __pte_to_swp_entry(pte);
  685. pte = __swp_entry_to_pte(swp);
  686. WARN_ON(pfn != pte_pfn(pte));
  687. }
  688. #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
  689. static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
  690. {
  691. swp_entry_t swp;
  692. pmd_t pmd;
  693. if (!has_transparent_hugepage())
  694. return;
  695. pr_debug("Validating PMD swap\n");
  696. pmd = pfn_pmd(pfn, prot);
  697. swp = __pmd_to_swp_entry(pmd);
  698. pmd = __swp_entry_to_pmd(swp);
  699. WARN_ON(pfn != pmd_pfn(pmd));
  700. }
  701. #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
  702. static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
  703. #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
  704. static void __init swap_migration_tests(void)
  705. {
  706. struct page *page;
  707. swp_entry_t swp;
  708. if (!IS_ENABLED(CONFIG_MIGRATION))
  709. return;
  710. pr_debug("Validating swap migration\n");
  711. /*
  712. * swap_migration_tests() requires a dedicated page as it needs to
  713. * be locked before creating a migration entry from it. Locking the
  714. * page that actually maps kernel text ('start_kernel') can be real
  715. * problematic. Lets allocate a dedicated page explicitly for this
  716. * purpose that will be freed subsequently.
  717. */
  718. page = alloc_page(GFP_KERNEL);
  719. if (!page) {
  720. pr_err("page allocation failed\n");
  721. return;
  722. }
  723. /*
  724. * make_migration_entry() expects given page to be
  725. * locked, otherwise it stumbles upon a BUG_ON().
  726. */
  727. __SetPageLocked(page);
  728. swp = make_migration_entry(page, 1);
  729. WARN_ON(!is_migration_entry(swp));
  730. WARN_ON(!is_write_migration_entry(swp));
  731. make_migration_entry_read(&swp);
  732. WARN_ON(!is_migration_entry(swp));
  733. WARN_ON(is_write_migration_entry(swp));
  734. swp = make_migration_entry(page, 0);
  735. WARN_ON(!is_migration_entry(swp));
  736. WARN_ON(is_write_migration_entry(swp));
  737. __ClearPageLocked(page);
  738. __free_page(page);
  739. }
  740. #ifdef CONFIG_HUGETLB_PAGE
  741. static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot)
  742. {
  743. struct page *page;
  744. pte_t pte;
  745. pr_debug("Validating HugeTLB basic\n");
  746. /*
  747. * Accessing the page associated with the pfn is safe here,
  748. * as it was previously derived from a real kernel symbol.
  749. */
  750. page = pfn_to_page(pfn);
  751. pte = mk_huge_pte(page, prot);
  752. WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
  753. WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
  754. WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
  755. #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
  756. pte = pfn_pte(pfn, prot);
  757. WARN_ON(!pte_huge(pte_mkhuge(pte)));
  758. #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
  759. }
  760. #else /* !CONFIG_HUGETLB_PAGE */
  761. static void __init hugetlb_basic_tests(unsigned long pfn, pgprot_t prot) { }
  762. #endif /* CONFIG_HUGETLB_PAGE */
  763. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  764. static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
  765. {
  766. pmd_t pmd;
  767. if (!has_transparent_hugepage())
  768. return;
  769. pr_debug("Validating PMD based THP\n");
  770. /*
  771. * pmd_trans_huge() and pmd_present() must return positive after
  772. * MMU invalidation with pmd_mkinvalid(). This behavior is an
  773. * optimization for transparent huge page. pmd_trans_huge() must
  774. * be true if pmd_page() returns a valid THP to avoid taking the
  775. * pmd_lock when others walk over non transhuge pmds (i.e. there
  776. * are no THP allocated). Especially when splitting a THP and
  777. * removing the present bit from the pmd, pmd_trans_huge() still
  778. * needs to return true. pmd_present() should be true whenever
  779. * pmd_trans_huge() returns true.
  780. */
  781. pmd = pfn_pmd(pfn, prot);
  782. WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
  783. #ifndef __HAVE_ARCH_PMDP_INVALIDATE
  784. WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
  785. WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
  786. #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
  787. }
  788. #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  789. static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
  790. {
  791. pud_t pud;
  792. if (!has_transparent_hugepage())
  793. return;
  794. pr_debug("Validating PUD based THP\n");
  795. pud = pfn_pud(pfn, prot);
  796. WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
  797. /*
  798. * pud_mkinvalid() has been dropped for now. Enable back
  799. * these tests when it comes back with a modified pud_present().
  800. *
  801. * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
  802. * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
  803. */
  804. }
  805. #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
  806. static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
  807. #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
  808. #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
  809. static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
  810. static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
  811. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  812. static unsigned long __init get_random_vaddr(void)
  813. {
  814. unsigned long random_vaddr, random_pages, total_user_pages;
  815. total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
  816. random_pages = get_random_long() % total_user_pages;
  817. random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
  818. return random_vaddr;
  819. }
  820. static int __init debug_vm_pgtable(void)
  821. {
  822. struct vm_area_struct *vma;
  823. struct mm_struct *mm;
  824. pgd_t *pgdp;
  825. p4d_t *p4dp, *saved_p4dp;
  826. pud_t *pudp, *saved_pudp;
  827. pmd_t *pmdp, *saved_pmdp, pmd;
  828. pte_t *ptep;
  829. pgtable_t saved_ptep;
  830. pgprot_t prot, protnone;
  831. phys_addr_t paddr;
  832. unsigned long vaddr, pte_aligned, pmd_aligned;
  833. unsigned long pud_aligned, p4d_aligned, pgd_aligned;
  834. spinlock_t *ptl = NULL;
  835. int idx;
  836. pr_info("Validating architecture page table helpers\n");
  837. prot = vm_get_page_prot(VMFLAGS);
  838. vaddr = get_random_vaddr();
  839. mm = mm_alloc();
  840. if (!mm) {
  841. pr_err("mm_struct allocation failed\n");
  842. return 1;
  843. }
  844. /*
  845. * __P000 (or even __S000) will help create page table entries with
  846. * PROT_NONE permission as required for pxx_protnone_tests().
  847. */
  848. protnone = __P000;
  849. vma = vm_area_alloc(mm);
  850. if (!vma) {
  851. pr_err("vma allocation failed\n");
  852. return 1;
  853. }
  854. /*
  855. * PFN for mapping at PTE level is determined from a standard kernel
  856. * text symbol. But pfns for higher page table levels are derived by
  857. * masking lower bits of this real pfn. These derived pfns might not
  858. * exist on the platform but that does not really matter as pfn_pxx()
  859. * helpers will still create appropriate entries for the test. This
  860. * helps avoid large memory block allocations to be used for mapping
  861. * at higher page table levels.
  862. */
  863. paddr = __pa_symbol(&start_kernel);
  864. pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
  865. pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
  866. pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
  867. p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
  868. pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
  869. WARN_ON(!pfn_valid(pte_aligned));
  870. pgdp = pgd_offset(mm, vaddr);
  871. p4dp = p4d_alloc(mm, pgdp, vaddr);
  872. pudp = pud_alloc(mm, p4dp, vaddr);
  873. pmdp = pmd_alloc(mm, pudp, vaddr);
  874. /*
  875. * Allocate pgtable_t
  876. */
  877. if (pte_alloc(mm, pmdp)) {
  878. pr_err("pgtable allocation failed\n");
  879. return 1;
  880. }
  881. /*
  882. * Save all the page table page addresses as the page table
  883. * entries will be used for testing with random or garbage
  884. * values. These saved addresses will be used for freeing
  885. * page table pages.
  886. */
  887. pmd = READ_ONCE(*pmdp);
  888. saved_p4dp = p4d_offset(pgdp, 0UL);
  889. saved_pudp = pud_offset(p4dp, 0UL);
  890. saved_pmdp = pmd_offset(pudp, 0UL);
  891. saved_ptep = pmd_pgtable(pmd);
  892. /*
  893. * Iterate over the protection_map[] to make sure that all
  894. * the basic page table transformation validations just hold
  895. * true irrespective of the starting protection value for a
  896. * given page table entry.
  897. */
  898. for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) {
  899. pte_basic_tests(pte_aligned, idx);
  900. pmd_basic_tests(pmd_aligned, idx);
  901. pud_basic_tests(mm, pud_aligned, idx);
  902. }
  903. /*
  904. * Both P4D and PGD level tests are very basic which do not
  905. * involve creating page table entries from the protection
  906. * value and the given pfn. Hence just keep them out from
  907. * the above iteration for now to save some test execution
  908. * time.
  909. */
  910. p4d_basic_tests(p4d_aligned, prot);
  911. pgd_basic_tests(pgd_aligned, prot);
  912. pmd_leaf_tests(pmd_aligned, prot);
  913. pud_leaf_tests(pud_aligned, prot);
  914. pte_savedwrite_tests(pte_aligned, protnone);
  915. pmd_savedwrite_tests(pmd_aligned, protnone);
  916. pte_special_tests(pte_aligned, prot);
  917. pte_protnone_tests(pte_aligned, protnone);
  918. pmd_protnone_tests(pmd_aligned, protnone);
  919. pte_devmap_tests(pte_aligned, prot);
  920. pmd_devmap_tests(pmd_aligned, prot);
  921. pud_devmap_tests(pud_aligned, prot);
  922. pte_soft_dirty_tests(pte_aligned, prot);
  923. pmd_soft_dirty_tests(pmd_aligned, prot);
  924. pte_swap_soft_dirty_tests(pte_aligned, prot);
  925. pmd_swap_soft_dirty_tests(pmd_aligned, prot);
  926. pte_swap_tests(pte_aligned, prot);
  927. pmd_swap_tests(pmd_aligned, prot);
  928. swap_migration_tests();
  929. pmd_thp_tests(pmd_aligned, prot);
  930. pud_thp_tests(pud_aligned, prot);
  931. hugetlb_basic_tests(pte_aligned, prot);
  932. /*
  933. * Page table modifying tests. They need to hold
  934. * proper page table lock.
  935. */
  936. ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl);
  937. pte_clear_tests(mm, ptep, pte_aligned, vaddr, prot);
  938. pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
  939. pte_unmap_unlock(ptep, ptl);
  940. ptl = pmd_lock(mm, pmdp);
  941. pmd_clear_tests(mm, pmdp);
  942. pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep);
  943. pmd_huge_tests(pmdp, pmd_aligned, prot);
  944. pmd_populate_tests(mm, pmdp, saved_ptep);
  945. spin_unlock(ptl);
  946. ptl = pud_lock(mm, pudp);
  947. pud_clear_tests(mm, pudp);
  948. pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
  949. pud_huge_tests(pudp, pud_aligned, prot);
  950. pud_populate_tests(mm, pudp, saved_pmdp);
  951. spin_unlock(ptl);
  952. spin_lock(&mm->page_table_lock);
  953. p4d_clear_tests(mm, p4dp);
  954. pgd_clear_tests(mm, pgdp);
  955. p4d_populate_tests(mm, p4dp, saved_pudp);
  956. pgd_populate_tests(mm, pgdp, saved_p4dp);
  957. spin_unlock(&mm->page_table_lock);
  958. p4d_free(mm, saved_p4dp);
  959. pud_free(mm, saved_pudp);
  960. pmd_free(mm, saved_pmdp);
  961. pte_free(mm, saved_ptep);
  962. vm_area_free(vma);
  963. mm_dec_nr_puds(mm);
  964. mm_dec_nr_pmds(mm);
  965. mm_dec_nr_ptes(mm);
  966. mmdrop(mm);
  967. return 0;
  968. }
  969. late_initcall(debug_vm_pgtable);