highmem.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_HIGHMEM_H
  3. #define _LINUX_HIGHMEM_H
  4. #include <linux/fs.h>
  5. #include <linux/kernel.h>
  6. #include <linux/bug.h>
  7. #include <linux/mm.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/hardirq.h>
  10. #include <asm/cacheflush.h>
  11. #ifndef ARCH_HAS_FLUSH_ANON_PAGE
  12. static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
  13. {
  14. }
  15. #endif
  16. #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  17. static inline void flush_kernel_dcache_page(struct page *page)
  18. {
  19. }
  20. static inline void flush_kernel_vmap_range(void *vaddr, int size)
  21. {
  22. }
  23. static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
  24. {
  25. }
  26. #endif
  27. #include <asm/kmap_types.h>
  28. #ifdef CONFIG_HIGHMEM
  29. extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
  30. extern void kunmap_atomic_high(void *kvaddr);
  31. #include <asm/highmem.h>
  32. #ifndef ARCH_HAS_KMAP_FLUSH_TLB
  33. static inline void kmap_flush_tlb(unsigned long addr) { }
  34. #endif
  35. #ifndef kmap_prot
  36. #define kmap_prot PAGE_KERNEL
  37. #endif
  38. void *kmap_high(struct page *page);
  39. static inline void *kmap(struct page *page)
  40. {
  41. void *addr;
  42. might_sleep();
  43. if (!PageHighMem(page))
  44. addr = page_address(page);
  45. else
  46. addr = kmap_high(page);
  47. kmap_flush_tlb((unsigned long)addr);
  48. return addr;
  49. }
  50. void kunmap_high(struct page *page);
  51. static inline void kunmap(struct page *page)
  52. {
  53. might_sleep();
  54. if (!PageHighMem(page))
  55. return;
  56. kunmap_high(page);
  57. }
  58. /*
  59. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  60. * no global lock is needed and because the kmap code must perform a global TLB
  61. * invalidation when the kmap pool wraps.
  62. *
  63. * However when holding an atomic kmap it is not legal to sleep, so atomic
  64. * kmaps are appropriate for short, tight code paths only.
  65. *
  66. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  67. * gives a more generic (and caching) interface. But kmap_atomic can
  68. * be used in IRQ contexts, so in some (very limited) cases we need
  69. * it.
  70. */
  71. static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
  72. {
  73. preempt_disable();
  74. pagefault_disable();
  75. if (!PageHighMem(page))
  76. return page_address(page);
  77. return kmap_atomic_high_prot(page, prot);
  78. }
  79. #define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
  80. /* declarations for linux/mm/highmem.c */
  81. unsigned int nr_free_highpages(void);
  82. extern atomic_long_t _totalhigh_pages;
  83. static inline unsigned long totalhigh_pages(void)
  84. {
  85. return (unsigned long)atomic_long_read(&_totalhigh_pages);
  86. }
  87. static inline void totalhigh_pages_inc(void)
  88. {
  89. atomic_long_inc(&_totalhigh_pages);
  90. }
  91. static inline void totalhigh_pages_dec(void)
  92. {
  93. atomic_long_dec(&_totalhigh_pages);
  94. }
  95. static inline void totalhigh_pages_add(long count)
  96. {
  97. atomic_long_add(count, &_totalhigh_pages);
  98. }
  99. static inline void totalhigh_pages_set(long val)
  100. {
  101. atomic_long_set(&_totalhigh_pages, val);
  102. }
  103. void kmap_flush_unused(void);
  104. struct page *kmap_to_page(void *addr);
  105. #else /* CONFIG_HIGHMEM */
  106. static inline unsigned int nr_free_highpages(void) { return 0; }
  107. static inline struct page *kmap_to_page(void *addr)
  108. {
  109. return virt_to_page(addr);
  110. }
  111. static inline unsigned long totalhigh_pages(void) { return 0UL; }
  112. static inline void *kmap(struct page *page)
  113. {
  114. might_sleep();
  115. return page_address(page);
  116. }
  117. static inline void kunmap_high(struct page *page)
  118. {
  119. }
  120. static inline void kunmap(struct page *page)
  121. {
  122. #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
  123. kunmap_flush_on_unmap(page_address(page));
  124. #endif
  125. }
  126. static inline void *kmap_atomic(struct page *page)
  127. {
  128. preempt_disable();
  129. pagefault_disable();
  130. return page_address(page);
  131. }
  132. #define kmap_atomic_prot(page, prot) kmap_atomic(page)
  133. static inline void kunmap_atomic_high(void *addr)
  134. {
  135. /*
  136. * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
  137. * handles re-enabling faults + preemption
  138. */
  139. #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
  140. kunmap_flush_on_unmap(addr);
  141. #endif
  142. }
  143. #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
  144. #define kmap_flush_unused() do {} while(0)
  145. #endif /* CONFIG_HIGHMEM */
  146. #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
  147. DECLARE_PER_CPU(int, __kmap_atomic_idx);
  148. static inline int kmap_atomic_idx_push(void)
  149. {
  150. int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
  151. #ifdef CONFIG_DEBUG_HIGHMEM
  152. WARN_ON_ONCE(in_irq() && !irqs_disabled());
  153. BUG_ON(idx >= KM_TYPE_NR);
  154. #endif
  155. return idx;
  156. }
  157. static inline int kmap_atomic_idx(void)
  158. {
  159. return __this_cpu_read(__kmap_atomic_idx) - 1;
  160. }
  161. static inline void kmap_atomic_idx_pop(void)
  162. {
  163. #ifdef CONFIG_DEBUG_HIGHMEM
  164. int idx = __this_cpu_dec_return(__kmap_atomic_idx);
  165. BUG_ON(idx < 0);
  166. #else
  167. __this_cpu_dec(__kmap_atomic_idx);
  168. #endif
  169. }
  170. #endif
  171. /*
  172. * Prevent people trying to call kunmap_atomic() as if it were kunmap()
  173. * kunmap_atomic() should get the return value of kmap_atomic, not the page.
  174. */
  175. #define kunmap_atomic(addr) \
  176. do { \
  177. BUILD_BUG_ON(__same_type((addr), struct page *)); \
  178. kunmap_atomic_high(addr); \
  179. pagefault_enable(); \
  180. preempt_enable(); \
  181. } while (0)
  182. /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
  183. #ifndef clear_user_highpage
  184. static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
  185. {
  186. void *addr = kmap_atomic(page);
  187. clear_user_page(addr, vaddr, page);
  188. kunmap_atomic(addr);
  189. }
  190. #endif
  191. #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
  192. /**
  193. * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
  194. * @vma: The VMA the page is to be allocated for
  195. * @vaddr: The virtual address the page will be inserted into
  196. *
  197. * This function will allocate a page for a VMA that the caller knows will
  198. * be able to migrate in the future using move_pages() or reclaimed
  199. *
  200. * An architecture may override this function by defining
  201. * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own
  202. * implementation.
  203. */
  204. static inline struct page *
  205. alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
  206. unsigned long vaddr)
  207. {
  208. struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_CMA, vma, vaddr);
  209. if (page)
  210. clear_user_highpage(page, vaddr);
  211. return page;
  212. }
  213. #endif
  214. static inline void clear_highpage(struct page *page)
  215. {
  216. void *kaddr = kmap_atomic(page);
  217. clear_page(kaddr);
  218. kunmap_atomic(kaddr);
  219. }
  220. #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
  221. static inline void tag_clear_highpage(struct page *page)
  222. {
  223. }
  224. #endif
  225. static inline void zero_user_segments(struct page *page,
  226. unsigned start1, unsigned end1,
  227. unsigned start2, unsigned end2)
  228. {
  229. void *kaddr = kmap_atomic(page);
  230. BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
  231. if (end1 > start1)
  232. memset(kaddr + start1, 0, end1 - start1);
  233. if (end2 > start2)
  234. memset(kaddr + start2, 0, end2 - start2);
  235. kunmap_atomic(kaddr);
  236. flush_dcache_page(page);
  237. }
  238. static inline void zero_user_segment(struct page *page,
  239. unsigned start, unsigned end)
  240. {
  241. zero_user_segments(page, start, end, 0, 0);
  242. }
  243. static inline void zero_user(struct page *page,
  244. unsigned start, unsigned size)
  245. {
  246. zero_user_segments(page, start, start + size, 0, 0);
  247. }
  248. #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
  249. static inline void copy_user_highpage(struct page *to, struct page *from,
  250. unsigned long vaddr, struct vm_area_struct *vma)
  251. {
  252. char *vfrom, *vto;
  253. vfrom = kmap_atomic(from);
  254. vto = kmap_atomic(to);
  255. copy_user_page(vto, vfrom, vaddr, to);
  256. kunmap_atomic(vto);
  257. kunmap_atomic(vfrom);
  258. }
  259. #endif
  260. #ifndef __HAVE_ARCH_COPY_HIGHPAGE
  261. static inline void copy_highpage(struct page *to, struct page *from)
  262. {
  263. char *vfrom, *vto;
  264. vfrom = kmap_atomic(from);
  265. vto = kmap_atomic(to);
  266. copy_page(vto, vfrom);
  267. kunmap_atomic(vto);
  268. kunmap_atomic(vfrom);
  269. }
  270. #endif
  271. #endif /* _LINUX_HIGHMEM_H */