vmalloc.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_VMALLOC_H
  3. #define _LINUX_VMALLOC_H
  4. #include <linux/spinlock.h>
  5. #include <linux/init.h>
  6. #include <linux/list.h>
  7. #include <linux/llist.h>
  8. #include <asm/page.h> /* pgprot_t */
  9. #include <linux/rbtree.h>
  10. #include <linux/overflow.h>
  11. #include <linux/android_vendor.h>
  12. #include <asm/vmalloc.h>
  13. struct vm_area_struct; /* vma defining user mapping in mm_types.h */
  14. struct notifier_block; /* in notifier.h */
  15. /* bits in flags of vmalloc's vm_struct below */
  16. #define VM_IOREMAP 0x00000001 /* ioremap() and friends */
  17. #define VM_ALLOC 0x00000002 /* vmalloc() */
  18. #define VM_MAP 0x00000004 /* vmap()ed pages */
  19. #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
  20. #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
  21. #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
  22. #define VM_NO_GUARD 0x00000040 /* don't add guard page */
  23. #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
  24. #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
  25. #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
  26. /*
  27. * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
  28. *
  29. * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
  30. * shadow memory has been mapped. It's used to handle allocation errors so that
  31. * we don't try to poision shadow on free if it was never allocated.
  32. *
  33. * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
  34. * determine which allocations need the module shadow freed.
  35. */
  36. /* bits [20..32] reserved for arch specific ioremap internals */
  37. /*
  38. * Maximum alignment for ioremap() regions.
  39. * Can be overriden by arch-specific value.
  40. */
  41. #ifndef IOREMAP_MAX_ORDER
  42. #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
  43. #endif
  44. struct vm_struct {
  45. struct vm_struct *next;
  46. void *addr;
  47. unsigned long size;
  48. unsigned long flags;
  49. struct page **pages;
  50. unsigned int nr_pages;
  51. phys_addr_t phys_addr;
  52. const void *caller;
  53. ANDROID_OEM_DATA(1);
  54. };
  55. struct vmap_area {
  56. unsigned long va_start;
  57. unsigned long va_end;
  58. struct rb_node rb_node; /* address sorted rbtree */
  59. struct list_head list; /* address sorted list */
  60. /*
  61. * The following three variables can be packed, because
  62. * a vmap_area object is always one of the three states:
  63. * 1) in "free" tree (root is vmap_area_root)
  64. * 2) in "busy" tree (root is free_vmap_area_root)
  65. * 3) in purge list (head is vmap_purge_list)
  66. */
  67. union {
  68. unsigned long subtree_max_size; /* in "free" tree */
  69. struct vm_struct *vm; /* in "busy" tree */
  70. struct llist_node purge_list; /* in purge list */
  71. };
  72. };
  73. /*
  74. * Highlevel APIs for driver use
  75. */
  76. extern void vm_unmap_ram(const void *mem, unsigned int count);
  77. extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
  78. extern void vm_unmap_aliases(void);
  79. #ifdef CONFIG_MMU
  80. extern void __init vmalloc_init(void);
  81. extern unsigned long vmalloc_nr_pages(void);
  82. #else
  83. static inline void vmalloc_init(void)
  84. {
  85. }
  86. static inline unsigned long vmalloc_nr_pages(void) { return 0; }
  87. #endif
  88. extern void *vmalloc(unsigned long size);
  89. extern void *vzalloc(unsigned long size);
  90. extern void *vmalloc_user(unsigned long size);
  91. extern void *vmalloc_node(unsigned long size, int node);
  92. extern void *vzalloc_node(unsigned long size, int node);
  93. extern void *vmalloc_32(unsigned long size);
  94. extern void *vmalloc_32_user(unsigned long size);
  95. extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
  96. extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
  97. unsigned long start, unsigned long end, gfp_t gfp_mask,
  98. pgprot_t prot, unsigned long vm_flags, int node,
  99. const void *caller);
  100. void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
  101. int node, const void *caller);
  102. extern void vfree(const void *addr);
  103. extern void vfree_atomic(const void *addr);
  104. extern void *vmap(struct page **pages, unsigned int count,
  105. unsigned long flags, pgprot_t prot);
  106. void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
  107. extern void vunmap(const void *addr);
  108. extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
  109. unsigned long uaddr, void *kaddr,
  110. unsigned long pgoff, unsigned long size);
  111. extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
  112. unsigned long pgoff);
  113. /*
  114. * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
  115. * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
  116. * needs to be called.
  117. */
  118. #ifndef ARCH_PAGE_TABLE_SYNC_MASK
  119. #define ARCH_PAGE_TABLE_SYNC_MASK 0
  120. #endif
  121. /*
  122. * There is no default implementation for arch_sync_kernel_mappings(). It is
  123. * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
  124. * is 0.
  125. */
  126. void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
  127. /*
  128. * Lowlevel-APIs (not for driver use!)
  129. */
  130. static inline size_t get_vm_area_size(const struct vm_struct *area)
  131. {
  132. if (!(area->flags & VM_NO_GUARD))
  133. /* return actual size without guard page */
  134. return area->size - PAGE_SIZE;
  135. else
  136. return area->size;
  137. }
  138. extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
  139. extern struct vm_struct *get_vm_area_caller(unsigned long size,
  140. unsigned long flags, const void *caller);
  141. extern struct vm_struct *__get_vm_area_caller(unsigned long size,
  142. unsigned long flags,
  143. unsigned long start, unsigned long end,
  144. const void *caller);
  145. void free_vm_area(struct vm_struct *area);
  146. extern struct vm_struct *remove_vm_area(const void *addr);
  147. extern struct vm_struct *find_vm_area(const void *addr);
  148. #ifdef CONFIG_MMU
  149. extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
  150. pgprot_t prot, struct page **pages);
  151. int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
  152. struct page **pages);
  153. extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
  154. extern void unmap_kernel_range(unsigned long addr, unsigned long size);
  155. static inline void set_vm_flush_reset_perms(void *addr)
  156. {
  157. struct vm_struct *vm = find_vm_area(addr);
  158. if (vm)
  159. vm->flags |= VM_FLUSH_RESET_PERMS;
  160. }
  161. #else
  162. static inline int
  163. map_kernel_range_noflush(unsigned long start, unsigned long size,
  164. pgprot_t prot, struct page **pages)
  165. {
  166. return size >> PAGE_SHIFT;
  167. }
  168. #define map_kernel_range map_kernel_range_noflush
  169. static inline void
  170. unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
  171. {
  172. }
  173. #define unmap_kernel_range unmap_kernel_range_noflush
  174. static inline void set_vm_flush_reset_perms(void *addr)
  175. {
  176. }
  177. #endif
  178. /* for /dev/kmem */
  179. extern long vread(char *buf, char *addr, unsigned long count);
  180. extern long vwrite(char *buf, char *addr, unsigned long count);
  181. /*
  182. * Internals. Dont't use..
  183. */
  184. extern struct list_head vmap_area_list;
  185. extern __init void vm_area_add_early(struct vm_struct *vm);
  186. extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
  187. #ifdef CONFIG_SMP
  188. # ifdef CONFIG_MMU
  189. struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
  190. const size_t *sizes, int nr_vms,
  191. size_t align);
  192. void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
  193. # else
  194. static inline struct vm_struct **
  195. pcpu_get_vm_areas(const unsigned long *offsets,
  196. const size_t *sizes, int nr_vms,
  197. size_t align)
  198. {
  199. return NULL;
  200. }
  201. static inline void
  202. pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
  203. {
  204. }
  205. # endif
  206. #endif
  207. #ifdef CONFIG_MMU
  208. #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
  209. #else
  210. #define VMALLOC_TOTAL 0UL
  211. #endif
  212. int register_vmap_purge_notifier(struct notifier_block *nb);
  213. int unregister_vmap_purge_notifier(struct notifier_block *nb);
  214. #endif /* _LINUX_VMALLOC_H */