pagewalk.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/pagewalk.h>
  3. #include <linux/highmem.h>
  4. #include <linux/sched.h>
  5. #include <linux/hugetlb.h>
  6. /*
  7. * We want to know the real level where a entry is located ignoring any
  8. * folding of levels which may be happening. For example if p4d is folded then
  9. * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
  10. */
  11. static int real_depth(int depth)
  12. {
  13. if (depth == 3 && PTRS_PER_PMD == 1)
  14. depth = 2;
  15. if (depth == 2 && PTRS_PER_PUD == 1)
  16. depth = 1;
  17. if (depth == 1 && PTRS_PER_P4D == 1)
  18. depth = 0;
  19. return depth;
  20. }
  21. static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
  22. unsigned long end, struct mm_walk *walk)
  23. {
  24. const struct mm_walk_ops *ops = walk->ops;
  25. int err = 0;
  26. for (;;) {
  27. err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
  28. if (err)
  29. break;
  30. if (addr >= end - PAGE_SIZE)
  31. break;
  32. addr += PAGE_SIZE;
  33. pte++;
  34. }
  35. return err;
  36. }
  37. static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  38. struct mm_walk *walk)
  39. {
  40. pte_t *pte;
  41. int err = 0;
  42. spinlock_t *ptl;
  43. if (walk->no_vma) {
  44. pte = pte_offset_map(pmd, addr);
  45. err = walk_pte_range_inner(pte, addr, end, walk);
  46. pte_unmap(pte);
  47. } else {
  48. pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
  49. err = walk_pte_range_inner(pte, addr, end, walk);
  50. pte_unmap_unlock(pte, ptl);
  51. }
  52. return err;
  53. }
  54. static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
  55. struct mm_walk *walk)
  56. {
  57. pmd_t *pmd;
  58. unsigned long next;
  59. const struct mm_walk_ops *ops = walk->ops;
  60. int err = 0;
  61. int depth = real_depth(3);
  62. pmd = pmd_offset(pud, addr);
  63. do {
  64. again:
  65. next = pmd_addr_end(addr, end);
  66. if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) {
  67. if (ops->pte_hole)
  68. err = ops->pte_hole(addr, next, depth, walk);
  69. if (err)
  70. break;
  71. continue;
  72. }
  73. walk->action = ACTION_SUBTREE;
  74. /*
  75. * This implies that each ->pmd_entry() handler
  76. * needs to know about pmd_trans_huge() pmds
  77. */
  78. if (ops->pmd_entry)
  79. err = ops->pmd_entry(pmd, addr, next, walk);
  80. if (err)
  81. break;
  82. if (walk->action == ACTION_AGAIN)
  83. goto again;
  84. /*
  85. * Check this here so we only break down trans_huge
  86. * pages when we _need_ to
  87. */
  88. if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
  89. walk->action == ACTION_CONTINUE ||
  90. !(ops->pte_entry))
  91. continue;
  92. if (walk->vma) {
  93. split_huge_pmd(walk->vma, pmd, addr);
  94. if (pmd_trans_unstable(pmd))
  95. goto again;
  96. }
  97. err = walk_pte_range(pmd, addr, next, walk);
  98. if (err)
  99. break;
  100. } while (pmd++, addr = next, addr != end);
  101. return err;
  102. }
  103. static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
  104. struct mm_walk *walk)
  105. {
  106. pud_t *pud;
  107. unsigned long next;
  108. const struct mm_walk_ops *ops = walk->ops;
  109. int err = 0;
  110. int depth = real_depth(2);
  111. pud = pud_offset(p4d, addr);
  112. do {
  113. again:
  114. next = pud_addr_end(addr, end);
  115. if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) {
  116. if (ops->pte_hole)
  117. err = ops->pte_hole(addr, next, depth, walk);
  118. if (err)
  119. break;
  120. continue;
  121. }
  122. walk->action = ACTION_SUBTREE;
  123. if (ops->pud_entry)
  124. err = ops->pud_entry(pud, addr, next, walk);
  125. if (err)
  126. break;
  127. if (walk->action == ACTION_AGAIN)
  128. goto again;
  129. if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
  130. walk->action == ACTION_CONTINUE ||
  131. !(ops->pmd_entry || ops->pte_entry))
  132. continue;
  133. if (walk->vma)
  134. split_huge_pud(walk->vma, pud, addr);
  135. if (pud_none(*pud))
  136. goto again;
  137. err = walk_pmd_range(pud, addr, next, walk);
  138. if (err)
  139. break;
  140. } while (pud++, addr = next, addr != end);
  141. return err;
  142. }
  143. static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
  144. struct mm_walk *walk)
  145. {
  146. p4d_t *p4d;
  147. unsigned long next;
  148. const struct mm_walk_ops *ops = walk->ops;
  149. int err = 0;
  150. int depth = real_depth(1);
  151. p4d = p4d_offset(pgd, addr);
  152. do {
  153. next = p4d_addr_end(addr, end);
  154. if (p4d_none_or_clear_bad(p4d)) {
  155. if (ops->pte_hole)
  156. err = ops->pte_hole(addr, next, depth, walk);
  157. if (err)
  158. break;
  159. continue;
  160. }
  161. if (ops->p4d_entry) {
  162. err = ops->p4d_entry(p4d, addr, next, walk);
  163. if (err)
  164. break;
  165. }
  166. if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
  167. err = walk_pud_range(p4d, addr, next, walk);
  168. if (err)
  169. break;
  170. } while (p4d++, addr = next, addr != end);
  171. return err;
  172. }
  173. static int walk_pgd_range(unsigned long addr, unsigned long end,
  174. struct mm_walk *walk)
  175. {
  176. pgd_t *pgd;
  177. unsigned long next;
  178. const struct mm_walk_ops *ops = walk->ops;
  179. int err = 0;
  180. if (walk->pgd)
  181. pgd = walk->pgd + pgd_index(addr);
  182. else
  183. pgd = pgd_offset(walk->mm, addr);
  184. do {
  185. next = pgd_addr_end(addr, end);
  186. if (pgd_none_or_clear_bad(pgd)) {
  187. if (ops->pte_hole)
  188. err = ops->pte_hole(addr, next, 0, walk);
  189. if (err)
  190. break;
  191. continue;
  192. }
  193. if (ops->pgd_entry) {
  194. err = ops->pgd_entry(pgd, addr, next, walk);
  195. if (err)
  196. break;
  197. }
  198. if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
  199. ops->pte_entry)
  200. err = walk_p4d_range(pgd, addr, next, walk);
  201. if (err)
  202. break;
  203. } while (pgd++, addr = next, addr != end);
  204. return err;
  205. }
  206. #ifdef CONFIG_HUGETLB_PAGE
  207. static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
  208. unsigned long end)
  209. {
  210. unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
  211. return boundary < end ? boundary : end;
  212. }
  213. static int walk_hugetlb_range(unsigned long addr, unsigned long end,
  214. struct mm_walk *walk)
  215. {
  216. struct vm_area_struct *vma = walk->vma;
  217. struct hstate *h = hstate_vma(vma);
  218. unsigned long next;
  219. unsigned long hmask = huge_page_mask(h);
  220. unsigned long sz = huge_page_size(h);
  221. pte_t *pte;
  222. const struct mm_walk_ops *ops = walk->ops;
  223. int err = 0;
  224. do {
  225. next = hugetlb_entry_end(h, addr, end);
  226. pte = huge_pte_offset(walk->mm, addr & hmask, sz);
  227. if (pte)
  228. err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
  229. else if (ops->pte_hole)
  230. err = ops->pte_hole(addr, next, -1, walk);
  231. if (err)
  232. break;
  233. } while (addr = next, addr != end);
  234. return err;
  235. }
  236. #else /* CONFIG_HUGETLB_PAGE */
  237. static int walk_hugetlb_range(unsigned long addr, unsigned long end,
  238. struct mm_walk *walk)
  239. {
  240. return 0;
  241. }
  242. #endif /* CONFIG_HUGETLB_PAGE */
  243. /*
  244. * Decide whether we really walk over the current vma on [@start, @end)
  245. * or skip it via the returned value. Return 0 if we do walk over the
  246. * current vma, and return 1 if we skip the vma. Negative values means
  247. * error, where we abort the current walk.
  248. */
  249. static int walk_page_test(unsigned long start, unsigned long end,
  250. struct mm_walk *walk)
  251. {
  252. struct vm_area_struct *vma = walk->vma;
  253. const struct mm_walk_ops *ops = walk->ops;
  254. if (ops->test_walk)
  255. return ops->test_walk(start, end, walk);
  256. /*
  257. * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
  258. * range, so we don't walk over it as we do for normal vmas. However,
  259. * Some callers are interested in handling hole range and they don't
  260. * want to just ignore any single address range. Such users certainly
  261. * define their ->pte_hole() callbacks, so let's delegate them to handle
  262. * vma(VM_PFNMAP).
  263. */
  264. if (vma->vm_flags & VM_PFNMAP) {
  265. int err = 1;
  266. if (ops->pte_hole)
  267. err = ops->pte_hole(start, end, -1, walk);
  268. return err ? err : 1;
  269. }
  270. return 0;
  271. }
  272. static int __walk_page_range(unsigned long start, unsigned long end,
  273. struct mm_walk *walk)
  274. {
  275. int err = 0;
  276. struct vm_area_struct *vma = walk->vma;
  277. const struct mm_walk_ops *ops = walk->ops;
  278. if (vma && ops->pre_vma) {
  279. err = ops->pre_vma(start, end, walk);
  280. if (err)
  281. return err;
  282. }
  283. if (vma && is_vm_hugetlb_page(vma)) {
  284. if (ops->hugetlb_entry)
  285. err = walk_hugetlb_range(start, end, walk);
  286. } else
  287. err = walk_pgd_range(start, end, walk);
  288. if (vma && ops->post_vma)
  289. ops->post_vma(walk);
  290. return err;
  291. }
  292. /**
  293. * walk_page_range - walk page table with caller specific callbacks
  294. * @mm: mm_struct representing the target process of page table walk
  295. * @start: start address of the virtual address range
  296. * @end: end address of the virtual address range
  297. * @ops: operation to call during the walk
  298. * @private: private data for callbacks' usage
  299. *
  300. * Recursively walk the page table tree of the process represented by @mm
  301. * within the virtual address range [@start, @end). During walking, we can do
  302. * some caller-specific works for each entry, by setting up pmd_entry(),
  303. * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
  304. * callbacks, the associated entries/pages are just ignored.
  305. * The return values of these callbacks are commonly defined like below:
  306. *
  307. * - 0 : succeeded to handle the current entry, and if you don't reach the
  308. * end address yet, continue to walk.
  309. * - >0 : succeeded to handle the current entry, and return to the caller
  310. * with caller specific value.
  311. * - <0 : failed to handle the current entry, and return to the caller
  312. * with error code.
  313. *
  314. * Before starting to walk page table, some callers want to check whether
  315. * they really want to walk over the current vma, typically by checking
  316. * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
  317. * purpose.
  318. *
  319. * If operations need to be staged before and committed after a vma is walked,
  320. * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
  321. * since it is intended to handle commit-type operations, can't return any
  322. * errors.
  323. *
  324. * struct mm_walk keeps current values of some common data like vma and pmd,
  325. * which are useful for the access from callbacks. If you want to pass some
  326. * caller-specific data to callbacks, @private should be helpful.
  327. *
  328. * Locking:
  329. * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
  330. * because these function traverse vma list and/or access to vma's data.
  331. */
  332. int walk_page_range(struct mm_struct *mm, unsigned long start,
  333. unsigned long end, const struct mm_walk_ops *ops,
  334. void *private)
  335. {
  336. int err = 0;
  337. unsigned long next;
  338. struct vm_area_struct *vma;
  339. struct mm_walk walk = {
  340. .ops = ops,
  341. .mm = mm,
  342. .private = private,
  343. };
  344. if (start >= end)
  345. return -EINVAL;
  346. if (!walk.mm)
  347. return -EINVAL;
  348. mmap_assert_locked(walk.mm);
  349. vma = find_vma(walk.mm, start);
  350. do {
  351. if (!vma) { /* after the last vma */
  352. walk.vma = NULL;
  353. next = end;
  354. } else if (start < vma->vm_start) { /* outside vma */
  355. walk.vma = NULL;
  356. next = min(end, vma->vm_start);
  357. } else { /* inside vma */
  358. walk.vma = vma;
  359. next = min(end, vma->vm_end);
  360. vma = vma->vm_next;
  361. err = walk_page_test(start, next, &walk);
  362. if (err > 0) {
  363. /*
  364. * positive return values are purely for
  365. * controlling the pagewalk, so should never
  366. * be passed to the callers.
  367. */
  368. err = 0;
  369. continue;
  370. }
  371. if (err < 0)
  372. break;
  373. }
  374. if (walk.vma || walk.ops->pte_hole)
  375. err = __walk_page_range(start, next, &walk);
  376. if (err)
  377. break;
  378. } while (start = next, start < end);
  379. return err;
  380. }
  381. EXPORT_SYMBOL_GPL(walk_page_range);
  382. /*
  383. * Similar to walk_page_range() but can walk any page tables even if they are
  384. * not backed by VMAs. Because 'unusual' entries may be walked this function
  385. * will also not lock the PTEs for the pte_entry() callback. This is useful for
  386. * walking the kernel pages tables or page tables for firmware.
  387. */
  388. int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
  389. unsigned long end, const struct mm_walk_ops *ops,
  390. pgd_t *pgd,
  391. void *private)
  392. {
  393. struct mm_walk walk = {
  394. .ops = ops,
  395. .mm = mm,
  396. .pgd = pgd,
  397. .private = private,
  398. .no_vma = true
  399. };
  400. if (start >= end || !walk.mm)
  401. return -EINVAL;
  402. mmap_assert_locked(walk.mm);
  403. return __walk_page_range(start, end, &walk);
  404. }
  405. int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
  406. void *private)
  407. {
  408. struct mm_walk walk = {
  409. .ops = ops,
  410. .mm = vma->vm_mm,
  411. .vma = vma,
  412. .private = private,
  413. };
  414. int err;
  415. if (!walk.mm)
  416. return -EINVAL;
  417. mmap_assert_locked(walk.mm);
  418. err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
  419. if (err > 0)
  420. return 0;
  421. if (err < 0)
  422. return err;
  423. return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
  424. }
  425. /**
  426. * walk_page_mapping - walk all memory areas mapped into a struct address_space.
  427. * @mapping: Pointer to the struct address_space
  428. * @first_index: First page offset in the address_space
  429. * @nr: Number of incremental page offsets to cover
  430. * @ops: operation to call during the walk
  431. * @private: private data for callbacks' usage
  432. *
  433. * This function walks all memory areas mapped into a struct address_space.
  434. * The walk is limited to only the given page-size index range, but if
  435. * the index boundaries cross a huge page-table entry, that entry will be
  436. * included.
  437. *
  438. * Also see walk_page_range() for additional information.
  439. *
  440. * Locking:
  441. * This function can't require that the struct mm_struct::mmap_lock is held,
  442. * since @mapping may be mapped by multiple processes. Instead
  443. * @mapping->i_mmap_rwsem must be held. This might have implications in the
  444. * callbacks, and it's up tho the caller to ensure that the
  445. * struct mm_struct::mmap_lock is not needed.
  446. *
  447. * Also this means that a caller can't rely on the struct
  448. * vm_area_struct::vm_flags to be constant across a call,
  449. * except for immutable flags. Callers requiring this shouldn't use
  450. * this function.
  451. *
  452. * Return: 0 on success, negative error code on failure, positive number on
  453. * caller defined premature termination.
  454. */
  455. int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
  456. pgoff_t nr, const struct mm_walk_ops *ops,
  457. void *private)
  458. {
  459. struct mm_walk walk = {
  460. .ops = ops,
  461. .private = private,
  462. };
  463. struct vm_area_struct *vma;
  464. pgoff_t vba, vea, cba, cea;
  465. unsigned long start_addr, end_addr;
  466. int err = 0;
  467. lockdep_assert_held(&mapping->i_mmap_rwsem);
  468. vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
  469. first_index + nr - 1) {
  470. /* Clip to the vma */
  471. vba = vma->vm_pgoff;
  472. vea = vba + vma_pages(vma);
  473. cba = first_index;
  474. cba = max(cba, vba);
  475. cea = first_index + nr;
  476. cea = min(cea, vea);
  477. start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
  478. end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
  479. if (start_addr >= end_addr)
  480. continue;
  481. walk.vma = vma;
  482. walk.mm = vma->vm_mm;
  483. err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
  484. if (err > 0) {
  485. err = 0;
  486. break;
  487. } else if (err < 0)
  488. break;
  489. err = __walk_page_range(start_addr, end_addr, &walk);
  490. if (err)
  491. break;
  492. }
  493. return err;
  494. }