tlb.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /* include/asm-generic/tlb.h
  3. *
  4. * Generic TLB shootdown code
  5. *
  6. * Copyright 2001 Red Hat, Inc.
  7. * Based on code from mm/memory.c Copyright Linus Torvalds and others.
  8. *
  9. * Copyright 2011 Red Hat, Inc., Peter Zijlstra
  10. */
  11. #ifndef _ASM_GENERIC__TLB_H
  12. #define _ASM_GENERIC__TLB_H
  13. #include <linux/mmu_notifier.h>
  14. #include <linux/swap.h>
  15. #include <linux/hugetlb_inline.h>
  16. #include <asm/tlbflush.h>
  17. #include <asm/cacheflush.h>
  18. /*
  19. * Blindly accessing user memory from NMI context can be dangerous
  20. * if we're in the middle of switching the current user task or switching
  21. * the loaded mm.
  22. */
  23. #ifndef nmi_uaccess_okay
  24. # define nmi_uaccess_okay() true
  25. #endif
  26. #ifdef CONFIG_MMU
  27. /*
  28. * Generic MMU-gather implementation.
  29. *
  30. * The mmu_gather data structure is used by the mm code to implement the
  31. * correct and efficient ordering of freeing pages and TLB invalidations.
  32. *
  33. * This correct ordering is:
  34. *
  35. * 1) unhook page
  36. * 2) TLB invalidate page
  37. * 3) free page
  38. *
  39. * That is, we must never free a page before we have ensured there are no live
  40. * translations left to it. Otherwise it might be possible to observe (or
  41. * worse, change) the page content after it has been reused.
  42. *
  43. * The mmu_gather API consists of:
  44. *
  45. * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
  46. *
  47. * Finish in particular will issue a (final) TLB invalidate and free
  48. * all (remaining) queued pages.
  49. *
  50. * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
  51. *
  52. * Defaults to flushing at tlb_end_vma() to reset the range; helps when
  53. * there's large holes between the VMAs.
  54. *
  55. * - tlb_remove_table()
  56. *
  57. * tlb_remove_table() is the basic primitive to free page-table directories
  58. * (__p*_free_tlb()). In it's most primitive form it is an alias for
  59. * tlb_remove_page() below, for when page directories are pages and have no
  60. * additional constraints.
  61. *
  62. * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
  63. *
  64. * - tlb_remove_page() / __tlb_remove_page()
  65. * - tlb_remove_page_size() / __tlb_remove_page_size()
  66. *
  67. * __tlb_remove_page_size() is the basic primitive that queues a page for
  68. * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
  69. * boolean indicating if the queue is (now) full and a call to
  70. * tlb_flush_mmu() is required.
  71. *
  72. * tlb_remove_page() and tlb_remove_page_size() imply the call to
  73. * tlb_flush_mmu() when required and has no return value.
  74. *
  75. * - tlb_change_page_size()
  76. *
  77. * call before __tlb_remove_page*() to set the current page-size; implies a
  78. * possible tlb_flush_mmu() call.
  79. *
  80. * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
  81. *
  82. * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
  83. * related state, like the range)
  84. *
  85. * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
  86. * whatever pages are still batched.
  87. *
  88. * - mmu_gather::fullmm
  89. *
  90. * A flag set by tlb_gather_mmu() to indicate we're going to free
  91. * the entire mm; this allows a number of optimizations.
  92. *
  93. * - We can ignore tlb_{start,end}_vma(); because we don't
  94. * care about ranges. Everything will be shot down.
  95. *
  96. * - (RISC) architectures that use ASIDs can cycle to a new ASID
  97. * and delay the invalidation until ASID space runs out.
  98. *
  99. * - mmu_gather::need_flush_all
  100. *
  101. * A flag that can be set by the arch code if it wants to force
  102. * flush the entire TLB irrespective of the range. For instance
  103. * x86-PAE needs this when changing top-level entries.
  104. *
  105. * And allows the architecture to provide and implement tlb_flush():
  106. *
  107. * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
  108. * use of:
  109. *
  110. * - mmu_gather::start / mmu_gather::end
  111. *
  112. * which provides the range that needs to be flushed to cover the pages to
  113. * be freed.
  114. *
  115. * - mmu_gather::freed_tables
  116. *
  117. * set when we freed page table pages
  118. *
  119. * - tlb_get_unmap_shift() / tlb_get_unmap_size()
  120. *
  121. * returns the smallest TLB entry size unmapped in this range.
  122. *
  123. * If an architecture does not provide tlb_flush() a default implementation
  124. * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
  125. * specified, in which case we'll default to flush_tlb_mm().
  126. *
  127. * Additionally there are a few opt-in features:
  128. *
  129. * MMU_GATHER_PAGE_SIZE
  130. *
  131. * This ensures we call tlb_flush() every time tlb_change_page_size() actually
  132. * changes the size and provides mmu_gather::page_size to tlb_flush().
  133. *
  134. * This might be useful if your architecture has size specific TLB
  135. * invalidation instructions.
  136. *
  137. * MMU_GATHER_TABLE_FREE
  138. *
  139. * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
  140. * for page directores (__p*_free_tlb()).
  141. *
  142. * Useful if your architecture has non-page page directories.
  143. *
  144. * When used, an architecture is expected to provide __tlb_remove_table()
  145. * which does the actual freeing of these pages.
  146. *
  147. * MMU_GATHER_RCU_TABLE_FREE
  148. *
  149. * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
  150. * comment below).
  151. *
  152. * Useful if your architecture doesn't use IPIs for remote TLB invalidates
  153. * and therefore doesn't naturally serialize with software page-table walkers.
  154. *
  155. * MMU_GATHER_NO_RANGE
  156. *
  157. * Use this if your architecture lacks an efficient flush_tlb_range().
  158. *
  159. * MMU_GATHER_NO_GATHER
  160. *
  161. * If the option is set the mmu_gather will not track individual pages for
  162. * delayed page free anymore. A platform that enables the option needs to
  163. * provide its own implementation of the __tlb_remove_page_size() function to
  164. * free pages.
  165. *
  166. * This is useful if your architecture already flushes TLB entries in the
  167. * various ptep_get_and_clear() functions.
  168. */
  169. #ifdef CONFIG_MMU_GATHER_TABLE_FREE
  170. struct mmu_table_batch {
  171. #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
  172. struct rcu_head rcu;
  173. #endif
  174. unsigned int nr;
  175. void *tables[0];
  176. };
  177. #define MAX_TABLE_BATCH \
  178. ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
  179. extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
  180. #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
  181. /*
  182. * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
  183. * page directories and we can use the normal page batching to free them.
  184. */
  185. #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
  186. #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
  187. #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
  188. /*
  189. * This allows an architecture that does not use the linux page-tables for
  190. * hardware to skip the TLBI when freeing page tables.
  191. */
  192. #ifndef tlb_needs_table_invalidate
  193. #define tlb_needs_table_invalidate() (true)
  194. #endif
  195. #else
  196. #ifdef tlb_needs_table_invalidate
  197. #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
  198. #endif
  199. #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
  200. #ifndef CONFIG_MMU_GATHER_NO_GATHER
  201. /*
  202. * If we can't allocate a page to make a big batch of page pointers
  203. * to work on, then just handle a few from the on-stack structure.
  204. */
  205. #define MMU_GATHER_BUNDLE 8
  206. struct mmu_gather_batch {
  207. struct mmu_gather_batch *next;
  208. unsigned int nr;
  209. unsigned int max;
  210. struct page *pages[0];
  211. };
  212. #define MAX_GATHER_BATCH \
  213. ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
  214. /*
  215. * Limit the maximum number of mmu_gather batches to reduce a risk of soft
  216. * lockups for non-preemptible kernels on huge machines when a lot of memory
  217. * is zapped during unmapping.
  218. * 10K pages freed at once should be safe even without a preemption point.
  219. */
  220. #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
  221. extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
  222. int page_size);
  223. #endif
  224. /*
  225. * struct mmu_gather is an opaque type used by the mm code for passing around
  226. * any data needed by arch specific code for tlb_remove_page.
  227. */
  228. struct mmu_gather {
  229. struct mm_struct *mm;
  230. #ifdef CONFIG_MMU_GATHER_TABLE_FREE
  231. struct mmu_table_batch *batch;
  232. #endif
  233. unsigned long start;
  234. unsigned long end;
  235. /*
  236. * we are in the middle of an operation to clear
  237. * a full mm and can make some optimizations
  238. */
  239. unsigned int fullmm : 1;
  240. /*
  241. * we have performed an operation which
  242. * requires a complete flush of the tlb
  243. */
  244. unsigned int need_flush_all : 1;
  245. /*
  246. * we have removed page directories
  247. */
  248. unsigned int freed_tables : 1;
  249. /*
  250. * at which levels have we cleared entries?
  251. */
  252. unsigned int cleared_ptes : 1;
  253. unsigned int cleared_pmds : 1;
  254. unsigned int cleared_puds : 1;
  255. unsigned int cleared_p4ds : 1;
  256. /*
  257. * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
  258. */
  259. unsigned int vma_exec : 1;
  260. unsigned int vma_huge : 1;
  261. unsigned int batch_count;
  262. #ifndef CONFIG_MMU_GATHER_NO_GATHER
  263. struct mmu_gather_batch *active;
  264. struct mmu_gather_batch local;
  265. struct page *__pages[MMU_GATHER_BUNDLE];
  266. #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
  267. unsigned int page_size;
  268. #endif
  269. #endif
  270. };
  271. void tlb_flush_mmu(struct mmu_gather *tlb);
  272. static inline void __tlb_adjust_range(struct mmu_gather *tlb,
  273. unsigned long address,
  274. unsigned int range_size)
  275. {
  276. tlb->start = min(tlb->start, address);
  277. tlb->end = max(tlb->end, address + range_size);
  278. }
  279. static inline void __tlb_reset_range(struct mmu_gather *tlb)
  280. {
  281. if (tlb->fullmm) {
  282. tlb->start = tlb->end = ~0;
  283. } else {
  284. tlb->start = TASK_SIZE;
  285. tlb->end = 0;
  286. }
  287. tlb->freed_tables = 0;
  288. tlb->cleared_ptes = 0;
  289. tlb->cleared_pmds = 0;
  290. tlb->cleared_puds = 0;
  291. tlb->cleared_p4ds = 0;
  292. /*
  293. * Do not reset mmu_gather::vma_* fields here, we do not
  294. * call into tlb_start_vma() again to set them if there is an
  295. * intermediate flush.
  296. */
  297. }
  298. #ifdef CONFIG_MMU_GATHER_NO_RANGE
  299. #if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
  300. #error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
  301. #endif
  302. /*
  303. * When an architecture does not have efficient means of range flushing TLBs
  304. * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
  305. * range small. We equally don't have to worry about page granularity or other
  306. * things.
  307. *
  308. * All we need to do is issue a full flush for any !0 range.
  309. */
  310. static inline void tlb_flush(struct mmu_gather *tlb)
  311. {
  312. if (tlb->end)
  313. flush_tlb_mm(tlb->mm);
  314. }
  315. static inline void
  316. tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
  317. #define tlb_end_vma tlb_end_vma
  318. static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
  319. #else /* CONFIG_MMU_GATHER_NO_RANGE */
  320. #ifndef tlb_flush
  321. #if defined(tlb_start_vma) || defined(tlb_end_vma)
  322. #error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
  323. #endif
  324. /*
  325. * When an architecture does not provide its own tlb_flush() implementation
  326. * but does have a reasonably efficient flush_vma_range() implementation
  327. * use that.
  328. */
  329. static inline void tlb_flush(struct mmu_gather *tlb)
  330. {
  331. if (tlb->fullmm || tlb->need_flush_all) {
  332. flush_tlb_mm(tlb->mm);
  333. } else if (tlb->end) {
  334. struct vm_area_struct vma = {
  335. .vm_mm = tlb->mm,
  336. .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
  337. (tlb->vma_huge ? VM_HUGETLB : 0),
  338. };
  339. flush_tlb_range(&vma, tlb->start, tlb->end);
  340. }
  341. }
  342. static inline void
  343. tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
  344. {
  345. /*
  346. * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
  347. * mips-4k) flush only large pages.
  348. *
  349. * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
  350. * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
  351. * range.
  352. *
  353. * We rely on tlb_end_vma() to issue a flush, such that when we reset
  354. * these values the batch is empty.
  355. */
  356. tlb->vma_huge = is_vm_hugetlb_page(vma);
  357. tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
  358. }
  359. #else
  360. static inline void
  361. tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
  362. #endif
  363. #endif /* CONFIG_MMU_GATHER_NO_RANGE */
  364. static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  365. {
  366. /*
  367. * Anything calling __tlb_adjust_range() also sets at least one of
  368. * these bits.
  369. */
  370. if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
  371. tlb->cleared_puds || tlb->cleared_p4ds))
  372. return;
  373. tlb_flush(tlb);
  374. mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
  375. __tlb_reset_range(tlb);
  376. }
  377. static inline void tlb_remove_page_size(struct mmu_gather *tlb,
  378. struct page *page, int page_size)
  379. {
  380. if (__tlb_remove_page_size(tlb, page, page_size))
  381. tlb_flush_mmu(tlb);
  382. }
  383. static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  384. {
  385. return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
  386. }
  387. /* tlb_remove_page
  388. * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
  389. * required.
  390. */
  391. static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  392. {
  393. return tlb_remove_page_size(tlb, page, PAGE_SIZE);
  394. }
  395. static inline void tlb_change_page_size(struct mmu_gather *tlb,
  396. unsigned int page_size)
  397. {
  398. #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
  399. if (tlb->page_size && tlb->page_size != page_size) {
  400. if (!tlb->fullmm && !tlb->need_flush_all)
  401. tlb_flush_mmu(tlb);
  402. }
  403. tlb->page_size = page_size;
  404. #endif
  405. }
  406. static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
  407. {
  408. if (tlb->cleared_ptes)
  409. return PAGE_SHIFT;
  410. if (tlb->cleared_pmds)
  411. return PMD_SHIFT;
  412. if (tlb->cleared_puds)
  413. return PUD_SHIFT;
  414. if (tlb->cleared_p4ds)
  415. return P4D_SHIFT;
  416. return PAGE_SHIFT;
  417. }
  418. static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
  419. {
  420. return 1UL << tlb_get_unmap_shift(tlb);
  421. }
  422. /*
  423. * In the case of tlb vma handling, we can optimise these away in the
  424. * case where we're doing a full MM flush. When we're doing a munmap,
  425. * the vmas are adjusted to only cover the region to be torn down.
  426. */
  427. #ifndef tlb_start_vma
  428. static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  429. {
  430. if (tlb->fullmm)
  431. return;
  432. tlb_update_vma_flags(tlb, vma);
  433. flush_cache_range(vma, vma->vm_start, vma->vm_end);
  434. }
  435. #endif
  436. #ifndef tlb_end_vma
  437. static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  438. {
  439. if (tlb->fullmm)
  440. return;
  441. /*
  442. * Do a TLB flush and reset the range at VMA boundaries; this avoids
  443. * the ranges growing with the unused space between consecutive VMAs,
  444. * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
  445. * this.
  446. */
  447. tlb_flush_mmu_tlbonly(tlb);
  448. }
  449. #endif
  450. /*
  451. * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
  452. * and set corresponding cleared_*.
  453. */
  454. static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
  455. unsigned long address, unsigned long size)
  456. {
  457. __tlb_adjust_range(tlb, address, size);
  458. tlb->cleared_ptes = 1;
  459. }
  460. static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
  461. unsigned long address, unsigned long size)
  462. {
  463. __tlb_adjust_range(tlb, address, size);
  464. tlb->cleared_pmds = 1;
  465. }
  466. static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
  467. unsigned long address, unsigned long size)
  468. {
  469. __tlb_adjust_range(tlb, address, size);
  470. tlb->cleared_puds = 1;
  471. }
  472. static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
  473. unsigned long address, unsigned long size)
  474. {
  475. __tlb_adjust_range(tlb, address, size);
  476. tlb->cleared_p4ds = 1;
  477. }
  478. #ifndef __tlb_remove_tlb_entry
  479. #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
  480. #endif
  481. /**
  482. * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  483. *
  484. * Record the fact that pte's were really unmapped by updating the range,
  485. * so we can later optimise away the tlb invalidate. This helps when
  486. * userspace is unmapping already-unmapped pages, which happens quite a lot.
  487. */
  488. #define tlb_remove_tlb_entry(tlb, ptep, address) \
  489. do { \
  490. tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
  491. __tlb_remove_tlb_entry(tlb, ptep, address); \
  492. } while (0)
  493. #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
  494. do { \
  495. unsigned long _sz = huge_page_size(h); \
  496. if (_sz >= P4D_SIZE) \
  497. tlb_flush_p4d_range(tlb, address, _sz); \
  498. else if (_sz >= PUD_SIZE) \
  499. tlb_flush_pud_range(tlb, address, _sz); \
  500. else if (_sz >= PMD_SIZE) \
  501. tlb_flush_pmd_range(tlb, address, _sz); \
  502. else \
  503. tlb_flush_pte_range(tlb, address, _sz); \
  504. __tlb_remove_tlb_entry(tlb, ptep, address); \
  505. } while (0)
  506. /**
  507. * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
  508. * This is a nop so far, because only x86 needs it.
  509. */
  510. #ifndef __tlb_remove_pmd_tlb_entry
  511. #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
  512. #endif
  513. #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
  514. do { \
  515. tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
  516. __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
  517. } while (0)
  518. /**
  519. * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
  520. * invalidation. This is a nop so far, because only x86 needs it.
  521. */
  522. #ifndef __tlb_remove_pud_tlb_entry
  523. #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
  524. #endif
  525. #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
  526. do { \
  527. tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
  528. __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
  529. } while (0)
  530. /*
  531. * For things like page tables caches (ie caching addresses "inside" the
  532. * page tables, like x86 does), for legacy reasons, flushing an
  533. * individual page had better flush the page table caches behind it. This
  534. * is definitely how x86 works, for example. And if you have an
  535. * architected non-legacy page table cache (which I'm not aware of
  536. * anybody actually doing), you're going to have some architecturally
  537. * explicit flushing for that, likely *separate* from a regular TLB entry
  538. * flush, and thus you'd need more than just some range expansion..
  539. *
  540. * So if we ever find an architecture
  541. * that would want something that odd, I think it is up to that
  542. * architecture to do its own odd thing, not cause pain for others
  543. * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
  544. *
  545. * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
  546. */
  547. #ifndef pte_free_tlb
  548. #define pte_free_tlb(tlb, ptep, address) \
  549. do { \
  550. tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
  551. tlb->freed_tables = 1; \
  552. __pte_free_tlb(tlb, ptep, address); \
  553. } while (0)
  554. #endif
  555. #ifndef pmd_free_tlb
  556. #define pmd_free_tlb(tlb, pmdp, address) \
  557. do { \
  558. tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
  559. tlb->freed_tables = 1; \
  560. __pmd_free_tlb(tlb, pmdp, address); \
  561. } while (0)
  562. #endif
  563. #ifndef pud_free_tlb
  564. #define pud_free_tlb(tlb, pudp, address) \
  565. do { \
  566. tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
  567. tlb->freed_tables = 1; \
  568. __pud_free_tlb(tlb, pudp, address); \
  569. } while (0)
  570. #endif
  571. #ifndef p4d_free_tlb
  572. #define p4d_free_tlb(tlb, pudp, address) \
  573. do { \
  574. __tlb_adjust_range(tlb, address, PAGE_SIZE); \
  575. tlb->freed_tables = 1; \
  576. __p4d_free_tlb(tlb, pudp, address); \
  577. } while (0)
  578. #endif
  579. #endif /* CONFIG_MMU */
  580. #endif /* _ASM_GENERIC__TLB_H */