hugetlb.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_HUGETLB_H
  3. #define _LINUX_HUGETLB_H
  4. #include <linux/mm_types.h>
  5. #include <linux/mmdebug.h>
  6. #include <linux/fs.h>
  7. #include <linux/hugetlb_inline.h>
  8. #include <linux/cgroup.h>
  9. #include <linux/list.h>
  10. #include <linux/kref.h>
  11. #include <linux/pgtable.h>
  12. #include <linux/gfp.h>
  13. #include <linux/userfaultfd_k.h>
  14. struct ctl_table;
  15. struct user_struct;
  16. struct mmu_gather;
  17. #ifndef is_hugepd
  18. typedef struct { unsigned long pd; } hugepd_t;
  19. #define is_hugepd(hugepd) (0)
  20. #define __hugepd(x) ((hugepd_t) { (x) })
  21. #endif
  22. #ifdef CONFIG_HUGETLB_PAGE
  23. #include <linux/mempolicy.h>
  24. #include <linux/shm.h>
  25. #include <asm/tlbflush.h>
  26. struct hugepage_subpool {
  27. spinlock_t lock;
  28. long count;
  29. long max_hpages; /* Maximum huge pages or -1 if no maximum. */
  30. long used_hpages; /* Used count against maximum, includes */
  31. /* both alloced and reserved pages. */
  32. struct hstate *hstate;
  33. long min_hpages; /* Minimum huge pages or -1 if no minimum. */
  34. long rsv_hpages; /* Pages reserved against global pool to */
  35. /* sasitfy minimum size. */
  36. };
  37. struct resv_map {
  38. struct kref refs;
  39. spinlock_t lock;
  40. struct list_head regions;
  41. long adds_in_progress;
  42. struct list_head region_cache;
  43. long region_cache_count;
  44. #ifdef CONFIG_CGROUP_HUGETLB
  45. /*
  46. * On private mappings, the counter to uncharge reservations is stored
  47. * here. If these fields are 0, then either the mapping is shared, or
  48. * cgroup accounting is disabled for this resv_map.
  49. */
  50. struct page_counter *reservation_counter;
  51. unsigned long pages_per_hpage;
  52. struct cgroup_subsys_state *css;
  53. #endif
  54. };
  55. /*
  56. * Region tracking -- allows tracking of reservations and instantiated pages
  57. * across the pages in a mapping.
  58. *
  59. * The region data structures are embedded into a resv_map and protected
  60. * by a resv_map's lock. The set of regions within the resv_map represent
  61. * reservations for huge pages, or huge pages that have already been
  62. * instantiated within the map. The from and to elements are huge page
  63. * indicies into the associated mapping. from indicates the starting index
  64. * of the region. to represents the first index past the end of the region.
  65. *
  66. * For example, a file region structure with from == 0 and to == 4 represents
  67. * four huge pages in a mapping. It is important to note that the to element
  68. * represents the first element past the end of the region. This is used in
  69. * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
  70. *
  71. * Interval notation of the form [from, to) will be used to indicate that
  72. * the endpoint from is inclusive and to is exclusive.
  73. */
  74. struct file_region {
  75. struct list_head link;
  76. long from;
  77. long to;
  78. #ifdef CONFIG_CGROUP_HUGETLB
  79. /*
  80. * On shared mappings, each reserved region appears as a struct
  81. * file_region in resv_map. These fields hold the info needed to
  82. * uncharge each reservation.
  83. */
  84. struct page_counter *reservation_counter;
  85. struct cgroup_subsys_state *css;
  86. #endif
  87. };
  88. extern struct resv_map *resv_map_alloc(void);
  89. void resv_map_release(struct kref *ref);
  90. extern spinlock_t hugetlb_lock;
  91. extern int hugetlb_max_hstate __read_mostly;
  92. #define for_each_hstate(h) \
  93. for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
  94. struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  95. long min_hpages);
  96. void hugepage_put_subpool(struct hugepage_subpool *spool);
  97. void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
  98. int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
  99. int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
  100. loff_t *);
  101. int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
  102. loff_t *);
  103. int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
  104. loff_t *);
  105. int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
  106. long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
  107. struct page **, struct vm_area_struct **,
  108. unsigned long *, unsigned long *, long, unsigned int,
  109. int *);
  110. void unmap_hugepage_range(struct vm_area_struct *,
  111. unsigned long, unsigned long, struct page *);
  112. void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  113. struct vm_area_struct *vma,
  114. unsigned long start, unsigned long end,
  115. struct page *ref_page);
  116. void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  117. unsigned long start, unsigned long end,
  118. struct page *ref_page);
  119. void hugetlb_report_meminfo(struct seq_file *);
  120. int hugetlb_report_node_meminfo(char *buf, int len, int nid);
  121. void hugetlb_show_meminfo(void);
  122. unsigned long hugetlb_total_pages(void);
  123. vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  124. unsigned long address, unsigned int flags);
  125. #ifdef CONFIG_USERFAULTFD
  126. int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
  127. struct vm_area_struct *dst_vma,
  128. unsigned long dst_addr,
  129. unsigned long src_addr,
  130. enum mcopy_atomic_mode mode,
  131. struct page **pagep);
  132. #endif /* CONFIG_USERFAULTFD */
  133. int hugetlb_reserve_pages(struct inode *inode, long from, long to,
  134. struct vm_area_struct *vma,
  135. vm_flags_t vm_flags);
  136. long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
  137. long freed);
  138. bool isolate_huge_page(struct page *page, struct list_head *list);
  139. void putback_active_hugepage(struct page *page);
  140. void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
  141. void free_huge_page(struct page *page);
  142. void hugetlb_fix_reserve_counts(struct inode *inode);
  143. extern struct mutex *hugetlb_fault_mutex_table;
  144. u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
  145. pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
  146. unsigned long addr, pud_t *pud);
  147. struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
  148. extern int sysctl_hugetlb_shm_group;
  149. extern struct list_head huge_boot_pages;
  150. /* arch callbacks */
  151. pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
  152. unsigned long addr, unsigned long sz);
  153. pte_t *huge_pte_offset(struct mm_struct *mm,
  154. unsigned long addr, unsigned long sz);
  155. int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
  156. unsigned long *addr, pte_t *ptep);
  157. void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
  158. unsigned long *start, unsigned long *end);
  159. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  160. int write);
  161. struct page *follow_huge_pd(struct vm_area_struct *vma,
  162. unsigned long address, hugepd_t hpd,
  163. int flags, int pdshift);
  164. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  165. pmd_t *pmd, int flags);
  166. struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
  167. pud_t *pud, int flags);
  168. struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
  169. pgd_t *pgd, int flags);
  170. int pmd_huge(pmd_t pmd);
  171. int pud_huge(pud_t pud);
  172. unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
  173. unsigned long address, unsigned long end, pgprot_t newprot);
  174. bool is_hugetlb_entry_migration(pte_t pte);
  175. void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
  176. #else /* !CONFIG_HUGETLB_PAGE */
  177. static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  178. {
  179. }
  180. static inline unsigned long hugetlb_total_pages(void)
  181. {
  182. return 0;
  183. }
  184. static inline struct address_space *hugetlb_page_mapping_lock_write(
  185. struct page *hpage)
  186. {
  187. return NULL;
  188. }
  189. static inline int huge_pmd_unshare(struct mm_struct *mm,
  190. struct vm_area_struct *vma,
  191. unsigned long *addr, pte_t *ptep)
  192. {
  193. return 0;
  194. }
  195. static inline void adjust_range_if_pmd_sharing_possible(
  196. struct vm_area_struct *vma,
  197. unsigned long *start, unsigned long *end)
  198. {
  199. }
  200. static inline long follow_hugetlb_page(struct mm_struct *mm,
  201. struct vm_area_struct *vma, struct page **pages,
  202. struct vm_area_struct **vmas, unsigned long *position,
  203. unsigned long *nr_pages, long i, unsigned int flags,
  204. int *nonblocking)
  205. {
  206. BUG();
  207. return 0;
  208. }
  209. static inline struct page *follow_huge_addr(struct mm_struct *mm,
  210. unsigned long address, int write)
  211. {
  212. return ERR_PTR(-EINVAL);
  213. }
  214. static inline int copy_hugetlb_page_range(struct mm_struct *dst,
  215. struct mm_struct *src, struct vm_area_struct *vma)
  216. {
  217. BUG();
  218. return 0;
  219. }
  220. static inline void hugetlb_report_meminfo(struct seq_file *m)
  221. {
  222. }
  223. static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
  224. {
  225. return 0;
  226. }
  227. static inline void hugetlb_show_meminfo(void)
  228. {
  229. }
  230. static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
  231. unsigned long address, hugepd_t hpd, int flags,
  232. int pdshift)
  233. {
  234. return NULL;
  235. }
  236. static inline struct page *follow_huge_pmd(struct mm_struct *mm,
  237. unsigned long address, pmd_t *pmd, int flags)
  238. {
  239. return NULL;
  240. }
  241. static inline struct page *follow_huge_pud(struct mm_struct *mm,
  242. unsigned long address, pud_t *pud, int flags)
  243. {
  244. return NULL;
  245. }
  246. static inline struct page *follow_huge_pgd(struct mm_struct *mm,
  247. unsigned long address, pgd_t *pgd, int flags)
  248. {
  249. return NULL;
  250. }
  251. static inline int prepare_hugepage_range(struct file *file,
  252. unsigned long addr, unsigned long len)
  253. {
  254. return -EINVAL;
  255. }
  256. static inline int pmd_huge(pmd_t pmd)
  257. {
  258. return 0;
  259. }
  260. static inline int pud_huge(pud_t pud)
  261. {
  262. return 0;
  263. }
  264. static inline int is_hugepage_only_range(struct mm_struct *mm,
  265. unsigned long addr, unsigned long len)
  266. {
  267. return 0;
  268. }
  269. static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  270. unsigned long addr, unsigned long end,
  271. unsigned long floor, unsigned long ceiling)
  272. {
  273. BUG();
  274. }
  275. #ifdef CONFIG_USERFAULTFD
  276. static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
  277. pte_t *dst_pte,
  278. struct vm_area_struct *dst_vma,
  279. unsigned long dst_addr,
  280. unsigned long src_addr,
  281. enum mcopy_atomic_mode mode,
  282. struct page **pagep)
  283. {
  284. BUG();
  285. return 0;
  286. }
  287. #endif /* CONFIG_USERFAULTFD */
  288. static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
  289. unsigned long sz)
  290. {
  291. return NULL;
  292. }
  293. static inline bool isolate_huge_page(struct page *page, struct list_head *list)
  294. {
  295. return false;
  296. }
  297. static inline void putback_active_hugepage(struct page *page)
  298. {
  299. }
  300. static inline void move_hugetlb_state(struct page *oldpage,
  301. struct page *newpage, int reason)
  302. {
  303. }
  304. static inline unsigned long hugetlb_change_protection(
  305. struct vm_area_struct *vma, unsigned long address,
  306. unsigned long end, pgprot_t newprot)
  307. {
  308. return 0;
  309. }
  310. static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  311. struct vm_area_struct *vma, unsigned long start,
  312. unsigned long end, struct page *ref_page)
  313. {
  314. BUG();
  315. }
  316. static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
  317. struct vm_area_struct *vma, unsigned long start,
  318. unsigned long end, struct page *ref_page)
  319. {
  320. BUG();
  321. }
  322. static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
  323. struct vm_area_struct *vma, unsigned long address,
  324. unsigned int flags)
  325. {
  326. BUG();
  327. return 0;
  328. }
  329. static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
  330. #endif /* !CONFIG_HUGETLB_PAGE */
  331. /*
  332. * hugepages at page global directory. If arch support
  333. * hugepages at pgd level, they need to define this.
  334. */
  335. #ifndef pgd_huge
  336. #define pgd_huge(x) 0
  337. #endif
  338. #ifndef p4d_huge
  339. #define p4d_huge(x) 0
  340. #endif
  341. #ifndef pgd_write
  342. static inline int pgd_write(pgd_t pgd)
  343. {
  344. BUG();
  345. return 0;
  346. }
  347. #endif
  348. #define HUGETLB_ANON_FILE "anon_hugepage"
  349. enum {
  350. /*
  351. * The file will be used as an shm file so shmfs accounting rules
  352. * apply
  353. */
  354. HUGETLB_SHMFS_INODE = 1,
  355. /*
  356. * The file is being created on the internal vfs mount and shmfs
  357. * accounting rules do not apply
  358. */
  359. HUGETLB_ANONHUGE_INODE = 2,
  360. };
  361. #ifdef CONFIG_HUGETLBFS
  362. struct hugetlbfs_sb_info {
  363. long max_inodes; /* inodes allowed */
  364. long free_inodes; /* inodes free */
  365. spinlock_t stat_lock;
  366. struct hstate *hstate;
  367. struct hugepage_subpool *spool;
  368. kuid_t uid;
  369. kgid_t gid;
  370. umode_t mode;
  371. };
  372. static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
  373. {
  374. return sb->s_fs_info;
  375. }
  376. struct hugetlbfs_inode_info {
  377. struct shared_policy policy;
  378. struct inode vfs_inode;
  379. unsigned int seals;
  380. };
  381. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  382. {
  383. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  384. }
  385. extern const struct file_operations hugetlbfs_file_operations;
  386. extern const struct vm_operations_struct hugetlb_vm_ops;
  387. struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
  388. struct user_struct **user, int creat_flags,
  389. int page_size_log);
  390. static inline bool is_file_hugepages(struct file *file)
  391. {
  392. if (file->f_op == &hugetlbfs_file_operations)
  393. return true;
  394. return is_file_shm_hugepages(file);
  395. }
  396. static inline struct hstate *hstate_inode(struct inode *i)
  397. {
  398. return HUGETLBFS_SB(i->i_sb)->hstate;
  399. }
  400. #else /* !CONFIG_HUGETLBFS */
  401. #define is_file_hugepages(file) false
  402. static inline struct file *
  403. hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
  404. struct user_struct **user, int creat_flags,
  405. int page_size_log)
  406. {
  407. return ERR_PTR(-ENOSYS);
  408. }
  409. static inline struct hstate *hstate_inode(struct inode *i)
  410. {
  411. return NULL;
  412. }
  413. #endif /* !CONFIG_HUGETLBFS */
  414. #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  415. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  416. unsigned long len, unsigned long pgoff,
  417. unsigned long flags);
  418. #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
  419. #ifdef CONFIG_HUGETLB_PAGE
  420. #define HSTATE_NAME_LEN 32
  421. /* Defines one hugetlb page size */
  422. struct hstate {
  423. int next_nid_to_alloc;
  424. int next_nid_to_free;
  425. unsigned int order;
  426. unsigned long mask;
  427. unsigned long max_huge_pages;
  428. unsigned long nr_huge_pages;
  429. unsigned long free_huge_pages;
  430. unsigned long resv_huge_pages;
  431. unsigned long surplus_huge_pages;
  432. unsigned long nr_overcommit_huge_pages;
  433. struct list_head hugepage_activelist;
  434. struct list_head hugepage_freelists[MAX_NUMNODES];
  435. unsigned int nr_huge_pages_node[MAX_NUMNODES];
  436. unsigned int free_huge_pages_node[MAX_NUMNODES];
  437. unsigned int surplus_huge_pages_node[MAX_NUMNODES];
  438. #ifdef CONFIG_CGROUP_HUGETLB
  439. /* cgroup control files */
  440. struct cftype cgroup_files_dfl[7];
  441. struct cftype cgroup_files_legacy[9];
  442. #endif
  443. char name[HSTATE_NAME_LEN];
  444. };
  445. struct huge_bootmem_page {
  446. struct list_head list;
  447. struct hstate *hstate;
  448. };
  449. struct page *alloc_huge_page(struct vm_area_struct *vma,
  450. unsigned long addr, int avoid_reserve);
  451. struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
  452. nodemask_t *nmask, gfp_t gfp_mask);
  453. struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
  454. unsigned long address);
  455. int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
  456. pgoff_t idx);
  457. /* arch callback */
  458. int __init __alloc_bootmem_huge_page(struct hstate *h);
  459. int __init alloc_bootmem_huge_page(struct hstate *h);
  460. void __init hugetlb_add_hstate(unsigned order);
  461. bool __init arch_hugetlb_valid_size(unsigned long size);
  462. struct hstate *size_to_hstate(unsigned long size);
  463. #ifndef HUGE_MAX_HSTATE
  464. #define HUGE_MAX_HSTATE 1
  465. #endif
  466. extern struct hstate hstates[HUGE_MAX_HSTATE];
  467. extern unsigned int default_hstate_idx;
  468. #define default_hstate (hstates[default_hstate_idx])
  469. static inline struct hstate *hstate_file(struct file *f)
  470. {
  471. return hstate_inode(file_inode(f));
  472. }
  473. static inline struct hstate *hstate_sizelog(int page_size_log)
  474. {
  475. if (!page_size_log)
  476. return &default_hstate;
  477. return size_to_hstate(1UL << page_size_log);
  478. }
  479. static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
  480. {
  481. return hstate_file(vma->vm_file);
  482. }
  483. static inline unsigned long huge_page_size(struct hstate *h)
  484. {
  485. return (unsigned long)PAGE_SIZE << h->order;
  486. }
  487. extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
  488. extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
  489. static inline unsigned long huge_page_mask(struct hstate *h)
  490. {
  491. return h->mask;
  492. }
  493. static inline unsigned int huge_page_order(struct hstate *h)
  494. {
  495. return h->order;
  496. }
  497. static inline unsigned huge_page_shift(struct hstate *h)
  498. {
  499. return h->order + PAGE_SHIFT;
  500. }
  501. static inline bool hstate_is_gigantic(struct hstate *h)
  502. {
  503. return huge_page_order(h) >= MAX_ORDER;
  504. }
  505. static inline unsigned int pages_per_huge_page(struct hstate *h)
  506. {
  507. return 1 << h->order;
  508. }
  509. static inline unsigned int blocks_per_huge_page(struct hstate *h)
  510. {
  511. return huge_page_size(h) / 512;
  512. }
  513. #include <asm/hugetlb.h>
  514. #ifndef is_hugepage_only_range
  515. static inline int is_hugepage_only_range(struct mm_struct *mm,
  516. unsigned long addr, unsigned long len)
  517. {
  518. return 0;
  519. }
  520. #define is_hugepage_only_range is_hugepage_only_range
  521. #endif
  522. #ifndef arch_clear_hugepage_flags
  523. static inline void arch_clear_hugepage_flags(struct page *page) { }
  524. #define arch_clear_hugepage_flags arch_clear_hugepage_flags
  525. #endif
  526. #ifndef arch_make_huge_pte
  527. static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
  528. struct page *page, int writable)
  529. {
  530. return entry;
  531. }
  532. #endif
  533. static inline struct hstate *page_hstate(struct page *page)
  534. {
  535. VM_BUG_ON_PAGE(!PageHuge(page), page);
  536. return size_to_hstate(page_size(page));
  537. }
  538. static inline unsigned hstate_index_to_shift(unsigned index)
  539. {
  540. return hstates[index].order + PAGE_SHIFT;
  541. }
  542. static inline int hstate_index(struct hstate *h)
  543. {
  544. return h - hstates;
  545. }
  546. extern int dissolve_free_huge_page(struct page *page);
  547. extern int dissolve_free_huge_pages(unsigned long start_pfn,
  548. unsigned long end_pfn);
  549. #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
  550. #ifndef arch_hugetlb_migration_supported
  551. static inline bool arch_hugetlb_migration_supported(struct hstate *h)
  552. {
  553. if ((huge_page_shift(h) == PMD_SHIFT) ||
  554. (huge_page_shift(h) == PUD_SHIFT) ||
  555. (huge_page_shift(h) == PGDIR_SHIFT))
  556. return true;
  557. else
  558. return false;
  559. }
  560. #endif
  561. #else
  562. static inline bool arch_hugetlb_migration_supported(struct hstate *h)
  563. {
  564. return false;
  565. }
  566. #endif
  567. static inline bool hugepage_migration_supported(struct hstate *h)
  568. {
  569. return arch_hugetlb_migration_supported(h);
  570. }
  571. /*
  572. * Movability check is different as compared to migration check.
  573. * It determines whether or not a huge page should be placed on
  574. * movable zone or not. Movability of any huge page should be
  575. * required only if huge page size is supported for migration.
  576. * There wont be any reason for the huge page to be movable if
  577. * it is not migratable to start with. Also the size of the huge
  578. * page should be large enough to be placed under a movable zone
  579. * and still feasible enough to be migratable. Just the presence
  580. * in movable zone does not make the migration feasible.
  581. *
  582. * So even though large huge page sizes like the gigantic ones
  583. * are migratable they should not be movable because its not
  584. * feasible to migrate them from movable zone.
  585. */
  586. static inline bool hugepage_movable_supported(struct hstate *h)
  587. {
  588. if (!hugepage_migration_supported(h))
  589. return false;
  590. if (hstate_is_gigantic(h))
  591. return false;
  592. return true;
  593. }
  594. /* Movability of hugepages depends on migration support. */
  595. static inline gfp_t htlb_alloc_mask(struct hstate *h)
  596. {
  597. if (hugepage_movable_supported(h))
  598. return GFP_HIGHUSER_MOVABLE;
  599. else
  600. return GFP_HIGHUSER;
  601. }
  602. static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
  603. {
  604. gfp_t modified_mask = htlb_alloc_mask(h);
  605. /* Some callers might want to enforce node */
  606. modified_mask |= (gfp_mask & __GFP_THISNODE);
  607. modified_mask |= (gfp_mask & __GFP_NOWARN);
  608. return modified_mask;
  609. }
  610. static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
  611. struct mm_struct *mm, pte_t *pte)
  612. {
  613. if (huge_page_size(h) == PMD_SIZE)
  614. return pmd_lockptr(mm, (pmd_t *) pte);
  615. VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
  616. return &mm->page_table_lock;
  617. }
  618. #ifndef hugepages_supported
  619. /*
  620. * Some platform decide whether they support huge pages at boot
  621. * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
  622. * when there is no such support
  623. */
  624. #define hugepages_supported() (HPAGE_SHIFT != 0)
  625. #endif
  626. void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
  627. static inline void hugetlb_count_init(struct mm_struct *mm)
  628. {
  629. atomic_long_set(&mm->hugetlb_usage, 0);
  630. }
  631. static inline void hugetlb_count_add(long l, struct mm_struct *mm)
  632. {
  633. atomic_long_add(l, &mm->hugetlb_usage);
  634. }
  635. static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
  636. {
  637. atomic_long_sub(l, &mm->hugetlb_usage);
  638. }
  639. #ifndef set_huge_swap_pte_at
  640. static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
  641. pte_t *ptep, pte_t pte, unsigned long sz)
  642. {
  643. set_huge_pte_at(mm, addr, ptep, pte);
  644. }
  645. #endif
  646. #ifndef huge_ptep_modify_prot_start
  647. #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
  648. static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
  649. unsigned long addr, pte_t *ptep)
  650. {
  651. return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
  652. }
  653. #endif
  654. #ifndef huge_ptep_modify_prot_commit
  655. #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
  656. static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
  657. unsigned long addr, pte_t *ptep,
  658. pte_t old_pte, pte_t pte)
  659. {
  660. set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
  661. }
  662. #endif
  663. void set_page_huge_active(struct page *page);
  664. #else /* CONFIG_HUGETLB_PAGE */
  665. struct hstate {};
  666. static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
  667. unsigned long addr,
  668. int avoid_reserve)
  669. {
  670. return NULL;
  671. }
  672. static inline struct page *
  673. alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
  674. nodemask_t *nmask, gfp_t gfp_mask)
  675. {
  676. return NULL;
  677. }
  678. static inline struct page *alloc_huge_page_vma(struct hstate *h,
  679. struct vm_area_struct *vma,
  680. unsigned long address)
  681. {
  682. return NULL;
  683. }
  684. static inline int __alloc_bootmem_huge_page(struct hstate *h)
  685. {
  686. return 0;
  687. }
  688. static inline struct hstate *hstate_file(struct file *f)
  689. {
  690. return NULL;
  691. }
  692. static inline struct hstate *hstate_sizelog(int page_size_log)
  693. {
  694. return NULL;
  695. }
  696. static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
  697. {
  698. return NULL;
  699. }
  700. static inline struct hstate *page_hstate(struct page *page)
  701. {
  702. return NULL;
  703. }
  704. static inline unsigned long huge_page_size(struct hstate *h)
  705. {
  706. return PAGE_SIZE;
  707. }
  708. static inline unsigned long huge_page_mask(struct hstate *h)
  709. {
  710. return PAGE_MASK;
  711. }
  712. static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
  713. {
  714. return PAGE_SIZE;
  715. }
  716. static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  717. {
  718. return PAGE_SIZE;
  719. }
  720. static inline unsigned int huge_page_order(struct hstate *h)
  721. {
  722. return 0;
  723. }
  724. static inline unsigned int huge_page_shift(struct hstate *h)
  725. {
  726. return PAGE_SHIFT;
  727. }
  728. static inline bool hstate_is_gigantic(struct hstate *h)
  729. {
  730. return false;
  731. }
  732. static inline unsigned int pages_per_huge_page(struct hstate *h)
  733. {
  734. return 1;
  735. }
  736. static inline unsigned hstate_index_to_shift(unsigned index)
  737. {
  738. return 0;
  739. }
  740. static inline int hstate_index(struct hstate *h)
  741. {
  742. return 0;
  743. }
  744. static inline int dissolve_free_huge_page(struct page *page)
  745. {
  746. return 0;
  747. }
  748. static inline int dissolve_free_huge_pages(unsigned long start_pfn,
  749. unsigned long end_pfn)
  750. {
  751. return 0;
  752. }
  753. static inline bool hugepage_migration_supported(struct hstate *h)
  754. {
  755. return false;
  756. }
  757. static inline bool hugepage_movable_supported(struct hstate *h)
  758. {
  759. return false;
  760. }
  761. static inline gfp_t htlb_alloc_mask(struct hstate *h)
  762. {
  763. return 0;
  764. }
  765. static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
  766. {
  767. return 0;
  768. }
  769. static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
  770. struct mm_struct *mm, pte_t *pte)
  771. {
  772. return &mm->page_table_lock;
  773. }
  774. static inline void hugetlb_count_init(struct mm_struct *mm)
  775. {
  776. }
  777. static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
  778. {
  779. }
  780. static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
  781. {
  782. }
  783. static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
  784. pte_t *ptep, pte_t pte, unsigned long sz)
  785. {
  786. }
  787. #endif /* CONFIG_HUGETLB_PAGE */
  788. static inline spinlock_t *huge_pte_lock(struct hstate *h,
  789. struct mm_struct *mm, pte_t *pte)
  790. {
  791. spinlock_t *ptl;
  792. ptl = huge_pte_lockptr(h, mm, pte);
  793. spin_lock(ptl);
  794. return ptl;
  795. }
  796. #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
  797. extern void __init hugetlb_cma_reserve(int order);
  798. extern void __init hugetlb_cma_check(void);
  799. #else
  800. static inline __init void hugetlb_cma_reserve(int order)
  801. {
  802. }
  803. static inline __init void hugetlb_cma_check(void)
  804. {
  805. }
  806. #endif
  807. bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
  808. #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
  809. /*
  810. * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
  811. * implement this.
  812. */
  813. #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
  814. #endif
  815. #endif /* _LINUX_HUGETLB_H */