swap.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SWAP_H
  3. #define _LINUX_SWAP_H
  4. #include <linux/spinlock.h>
  5. #include <linux/linkage.h>
  6. #include <linux/mmzone.h>
  7. #include <linux/list.h>
  8. #include <linux/memcontrol.h>
  9. #include <linux/sched.h>
  10. #include <linux/node.h>
  11. #include <linux/fs.h>
  12. #include <linux/atomic.h>
  13. #include <linux/page-flags.h>
  14. #include <asm/page.h>
  15. struct notifier_block;
  16. struct bio;
  17. struct pagevec;
  18. #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
  19. #define SWAP_FLAG_PRIO_MASK 0x7fff
  20. #define SWAP_FLAG_PRIO_SHIFT 0
  21. #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
  22. #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
  23. #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
  24. #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
  25. SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
  26. SWAP_FLAG_DISCARD_PAGES)
  27. #define SWAP_BATCH 64
  28. static inline int current_is_kswapd(void)
  29. {
  30. return current->flags & PF_KSWAPD;
  31. }
  32. /*
  33. * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
  34. * be swapped to. The swap type and the offset into that swap type are
  35. * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
  36. * for the type means that the maximum number of swapcache pages is 27 bits
  37. * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
  38. * the type/offset into the pte as 5/27 as well.
  39. */
  40. #define MAX_SWAPFILES_SHIFT 5
  41. /*
  42. * Use some of the swap files numbers for other purposes. This
  43. * is a convenient way to hook into the VM to trigger special
  44. * actions on faults.
  45. */
  46. /*
  47. * Unaddressable device memory support. See include/linux/hmm.h and
  48. * Documentation/vm/hmm.rst. Short description is we need struct pages for
  49. * device memory that is unaddressable (inaccessible) by CPU, so that we can
  50. * migrate part of a process memory to device memory.
  51. *
  52. * When a page is migrated from CPU to device, we set the CPU page table entry
  53. * to a special SWP_DEVICE_* entry.
  54. */
  55. #ifdef CONFIG_DEVICE_PRIVATE
  56. #define SWP_DEVICE_NUM 2
  57. #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
  58. #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
  59. #else
  60. #define SWP_DEVICE_NUM 0
  61. #endif
  62. /*
  63. * NUMA node memory migration support
  64. */
  65. #ifdef CONFIG_MIGRATION
  66. #define SWP_MIGRATION_NUM 2
  67. #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
  68. #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
  69. #else
  70. #define SWP_MIGRATION_NUM 0
  71. #endif
  72. /*
  73. * Handling of hardware poisoned pages with memory corruption.
  74. */
  75. #ifdef CONFIG_MEMORY_FAILURE
  76. #define SWP_HWPOISON_NUM 1
  77. #define SWP_HWPOISON MAX_SWAPFILES
  78. #else
  79. #define SWP_HWPOISON_NUM 0
  80. #endif
  81. #define MAX_SWAPFILES \
  82. ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
  83. SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
  84. /*
  85. * Magic header for a swap area. The first part of the union is
  86. * what the swap magic looks like for the old (limited to 128MB)
  87. * swap area format, the second part of the union adds - in the
  88. * old reserved area - some extra information. Note that the first
  89. * kilobyte is reserved for boot loader or disk label stuff...
  90. *
  91. * Having the magic at the end of the PAGE_SIZE makes detecting swap
  92. * areas somewhat tricky on machines that support multiple page sizes.
  93. * For 2.5 we'll probably want to move the magic to just beyond the
  94. * bootbits...
  95. */
  96. union swap_header {
  97. struct {
  98. char reserved[PAGE_SIZE - 10];
  99. char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
  100. } magic;
  101. struct {
  102. char bootbits[1024]; /* Space for disklabel etc. */
  103. __u32 version;
  104. __u32 last_page;
  105. __u32 nr_badpages;
  106. unsigned char sws_uuid[16];
  107. unsigned char sws_volume[16];
  108. __u32 padding[117];
  109. __u32 badpages[1];
  110. } info;
  111. };
  112. /*
  113. * current->reclaim_state points to one of these when a task is running
  114. * memory reclaim
  115. */
  116. struct reclaim_state {
  117. unsigned long reclaimed_slab;
  118. };
  119. #ifdef __KERNEL__
  120. struct address_space;
  121. struct sysinfo;
  122. struct writeback_control;
  123. struct zone;
  124. /*
  125. * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
  126. * disk blocks. A list of swap extents maps the entire swapfile. (Where the
  127. * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
  128. * from setup, they're handled identically.
  129. *
  130. * We always assume that blocks are of size PAGE_SIZE.
  131. */
  132. struct swap_extent {
  133. struct rb_node rb_node;
  134. pgoff_t start_page;
  135. pgoff_t nr_pages;
  136. sector_t start_block;
  137. };
  138. /*
  139. * Max bad pages in the new format..
  140. */
  141. #define MAX_SWAP_BADPAGES \
  142. ((offsetof(union swap_header, magic.magic) - \
  143. offsetof(union swap_header, info.badpages)) / sizeof(int))
  144. enum {
  145. SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
  146. SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
  147. SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
  148. SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
  149. SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
  150. SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
  151. SWP_BLKDEV = (1 << 6), /* its a block device */
  152. SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
  153. SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
  154. SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
  155. SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
  156. SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
  157. SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
  158. SWP_VALID = (1 << 13), /* swap is valid to be operated on? */
  159. /* add others here before... */
  160. SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
  161. };
  162. #define SWAP_CLUSTER_MAX 32UL
  163. #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
  164. /* Bit flag in swap_map */
  165. #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
  166. #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
  167. /* Special value in first swap_map */
  168. #define SWAP_MAP_MAX 0x3e /* Max count */
  169. #define SWAP_MAP_BAD 0x3f /* Note page is bad */
  170. #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
  171. /* Special value in each swap_map continuation */
  172. #define SWAP_CONT_MAX 0x7f /* Max count */
  173. /*
  174. * We use this to track usage of a cluster. A cluster is a block of swap disk
  175. * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
  176. * free clusters are organized into a list. We fetch an entry from the list to
  177. * get a free cluster.
  178. *
  179. * The data field stores next cluster if the cluster is free or cluster usage
  180. * counter otherwise. The flags field determines if a cluster is free. This is
  181. * protected by swap_info_struct.lock.
  182. */
  183. struct swap_cluster_info {
  184. spinlock_t lock; /*
  185. * Protect swap_cluster_info fields
  186. * and swap_info_struct->swap_map
  187. * elements correspond to the swap
  188. * cluster
  189. */
  190. unsigned int data:24;
  191. unsigned int flags:8;
  192. };
  193. #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
  194. #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
  195. #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
  196. /*
  197. * We assign a cluster to each CPU, so each CPU can allocate swap entry from
  198. * its own cluster and swapout sequentially. The purpose is to optimize swapout
  199. * throughput.
  200. */
  201. struct percpu_cluster {
  202. struct swap_cluster_info index; /* Current cluster index */
  203. unsigned int next; /* Likely next allocation offset */
  204. };
  205. struct swap_cluster_list {
  206. struct swap_cluster_info head;
  207. struct swap_cluster_info tail;
  208. };
  209. /*
  210. * The in-memory structure used to track swap areas.
  211. */
  212. struct swap_info_struct {
  213. unsigned long flags; /* SWP_USED etc: see above */
  214. signed short prio; /* swap priority of this type */
  215. struct plist_node list; /* entry in swap_active_head */
  216. signed char type; /* strange name for an index */
  217. unsigned int max; /* extent of the swap_map */
  218. unsigned char *swap_map; /* vmalloc'ed array of usage counts */
  219. struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
  220. struct swap_cluster_list free_clusters; /* free clusters list */
  221. unsigned int lowest_bit; /* index of first free in swap_map */
  222. unsigned int highest_bit; /* index of last free in swap_map */
  223. unsigned int pages; /* total of usable pages of swap */
  224. unsigned int inuse_pages; /* number of those currently in use */
  225. unsigned int cluster_next; /* likely index for next allocation */
  226. unsigned int cluster_nr; /* countdown to next cluster search */
  227. unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
  228. struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
  229. struct rb_root swap_extent_root;/* root of the swap extent rbtree */
  230. struct block_device *bdev; /* swap device or bdev of swap file */
  231. struct file *swap_file; /* seldom referenced */
  232. unsigned int old_block_size; /* seldom referenced */
  233. #ifdef CONFIG_FRONTSWAP
  234. unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
  235. atomic_t frontswap_pages; /* frontswap pages in-use counter */
  236. #endif
  237. spinlock_t lock; /*
  238. * protect map scan related fields like
  239. * swap_map, lowest_bit, highest_bit,
  240. * inuse_pages, cluster_next,
  241. * cluster_nr, lowest_alloc,
  242. * highest_alloc, free/discard cluster
  243. * list. other fields are only changed
  244. * at swapon/swapoff, so are protected
  245. * by swap_lock. changing flags need
  246. * hold this lock and swap_lock. If
  247. * both locks need hold, hold swap_lock
  248. * first.
  249. */
  250. spinlock_t cont_lock; /*
  251. * protect swap count continuation page
  252. * list.
  253. */
  254. struct work_struct discard_work; /* discard worker */
  255. struct swap_cluster_list discard_clusters; /* discard clusters list */
  256. struct plist_node avail_lists[]; /*
  257. * entries in swap_avail_heads, one
  258. * entry per node.
  259. * Must be last as the number of the
  260. * array is nr_node_ids, which is not
  261. * a fixed value so have to allocate
  262. * dynamically.
  263. * And it has to be an array so that
  264. * plist_for_each_* can work.
  265. */
  266. };
  267. #ifdef CONFIG_64BIT
  268. #define SWAP_RA_ORDER_CEILING 5
  269. #else
  270. /* Avoid stack overflow, because we need to save part of page table */
  271. #define SWAP_RA_ORDER_CEILING 3
  272. #define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
  273. #endif
  274. struct vma_swap_readahead {
  275. unsigned short win;
  276. unsigned short offset;
  277. unsigned short nr_pte;
  278. #ifdef CONFIG_64BIT
  279. pte_t *ptes;
  280. #else
  281. pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
  282. #endif
  283. };
  284. /* linux/mm/workingset.c */
  285. void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
  286. void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
  287. void workingset_refault(struct page *page, void *shadow);
  288. void workingset_activation(struct page *page);
  289. /* Only track the nodes of mappings with shadow entries */
  290. void workingset_update_node(struct xa_node *node);
  291. #define mapping_set_update(xas, mapping) do { \
  292. if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
  293. xas_set_update(xas, workingset_update_node); \
  294. } while (0)
  295. /* linux/mm/page_alloc.c */
  296. extern unsigned long totalreserve_pages;
  297. extern unsigned long nr_free_buffer_pages(void);
  298. /* Definition of global_zone_page_state not available yet */
  299. #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
  300. /* linux/mm/swap.c */
  301. extern void lru_note_cost(struct lruvec *lruvec, bool file,
  302. unsigned int nr_pages);
  303. extern void lru_note_cost_page(struct page *);
  304. extern void lru_cache_add(struct page *);
  305. extern void lru_add_page_tail(struct page *page, struct page *page_tail,
  306. struct lruvec *lruvec, struct list_head *head);
  307. extern void mark_page_accessed(struct page *);
  308. extern bool lru_cache_disabled(void);
  309. extern void lru_cache_disable(void);
  310. extern void lru_cache_enable(void);
  311. extern void lru_add_drain(void);
  312. extern void lru_add_drain_cpu(int cpu);
  313. extern void lru_add_drain_cpu_zone(struct zone *zone);
  314. extern void lru_add_drain_all(void);
  315. extern void rotate_reclaimable_page(struct page *page);
  316. extern void deactivate_file_page(struct page *page);
  317. extern void deactivate_page(struct page *page);
  318. extern void mark_page_lazyfree(struct page *page);
  319. extern void mark_page_lazyfree_movetail(struct page *page, bool tail);
  320. extern void swap_setup(void);
  321. extern void __lru_cache_add_inactive_or_unevictable(struct page *page,
  322. unsigned long vma_flags);
  323. static inline void lru_cache_add_inactive_or_unevictable(struct page *page,
  324. struct vm_area_struct *vma)
  325. {
  326. return __lru_cache_add_inactive_or_unevictable(page, vma->vm_flags);
  327. }
  328. /* linux/mm/vmscan.c */
  329. extern unsigned long zone_reclaimable_pages(struct zone *zone);
  330. extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  331. gfp_t gfp_mask, nodemask_t *mask);
  332. extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
  333. extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
  334. unsigned long nr_pages,
  335. gfp_t gfp_mask,
  336. bool may_swap);
  337. extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
  338. gfp_t gfp_mask, bool noswap,
  339. pg_data_t *pgdat,
  340. unsigned long *nr_scanned);
  341. extern unsigned long shrink_all_memory(unsigned long nr_pages);
  342. extern int vm_swappiness;
  343. extern int remove_mapping(struct address_space *mapping, struct page *page);
  344. extern unsigned long reclaim_pages(struct list_head *page_list);
  345. #ifdef CONFIG_NUMA
  346. extern int node_reclaim_mode;
  347. extern int sysctl_min_unmapped_ratio;
  348. extern int sysctl_min_slab_ratio;
  349. #else
  350. #define node_reclaim_mode 0
  351. #endif
  352. extern void check_move_unevictable_pages(struct pagevec *pvec);
  353. extern int kswapd_run(int nid);
  354. extern void kswapd_stop(int nid);
  355. #ifdef CONFIG_SWAP
  356. #include <linux/blk_types.h> /* for bio_end_io_t */
  357. /* linux/mm/page_io.c */
  358. extern int swap_readpage(struct page *page, bool do_poll);
  359. extern int swap_writepage(struct page *page, struct writeback_control *wbc);
  360. extern void end_swap_bio_write(struct bio *bio);
  361. extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
  362. bio_end_io_t end_write_func);
  363. extern int swap_set_page_dirty(struct page *page);
  364. int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
  365. unsigned long nr_pages, sector_t start_block);
  366. int generic_swapfile_activate(struct swap_info_struct *, struct file *,
  367. sector_t *);
  368. /* linux/mm/swap_state.c */
  369. /* One swap address space for each 64M swap space */
  370. #define SWAP_ADDRESS_SPACE_SHIFT 14
  371. #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
  372. extern struct address_space *swapper_spaces[];
  373. #define swap_address_space(entry) \
  374. (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
  375. >> SWAP_ADDRESS_SPACE_SHIFT])
  376. extern unsigned long total_swapcache_pages(void);
  377. extern void show_swap_cache_info(void);
  378. extern int add_to_swap(struct page *page);
  379. extern void *get_shadow_from_swap_cache(swp_entry_t entry);
  380. extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
  381. gfp_t gfp, void **shadowp);
  382. extern void __delete_from_swap_cache(struct page *page,
  383. swp_entry_t entry, void *shadow);
  384. extern void delete_from_swap_cache(struct page *);
  385. extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
  386. unsigned long end);
  387. extern void free_page_and_swap_cache(struct page *);
  388. extern void free_pages_and_swap_cache(struct page **, int);
  389. extern struct page *lookup_swap_cache(swp_entry_t entry,
  390. struct vm_area_struct *vma,
  391. unsigned long addr);
  392. struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
  393. extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
  394. struct vm_area_struct *vma, unsigned long addr,
  395. bool do_poll);
  396. extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
  397. struct vm_area_struct *vma, unsigned long addr,
  398. bool *new_page_allocated);
  399. extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
  400. struct vm_fault *vmf);
  401. extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
  402. struct vm_fault *vmf);
  403. /* linux/mm/swapfile.c */
  404. extern atomic_long_t nr_swap_pages;
  405. extern long total_swap_pages;
  406. extern atomic_t nr_rotate_swap;
  407. extern bool has_usable_swap(void);
  408. /* Swap 50% full? Release swapcache more aggressively.. */
  409. static inline bool vm_swap_full(void)
  410. {
  411. return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
  412. }
  413. static inline long get_nr_swap_pages(void)
  414. {
  415. return atomic_long_read(&nr_swap_pages);
  416. }
  417. extern void si_swapinfo(struct sysinfo *);
  418. extern swp_entry_t get_swap_page(struct page *page);
  419. extern void put_swap_page(struct page *page, swp_entry_t entry);
  420. extern swp_entry_t get_swap_page_of_type(int);
  421. extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
  422. extern int add_swap_count_continuation(swp_entry_t, gfp_t);
  423. extern void swap_shmem_alloc(swp_entry_t);
  424. extern int swap_duplicate(swp_entry_t);
  425. extern int swapcache_prepare(swp_entry_t);
  426. extern void swap_free(swp_entry_t);
  427. extern void swapcache_free_entries(swp_entry_t *entries, int n);
  428. extern int free_swap_and_cache(swp_entry_t);
  429. int swap_type_of(dev_t device, sector_t offset);
  430. int find_first_swap(dev_t *device);
  431. extern unsigned int count_swap_pages(int, int);
  432. extern sector_t map_swap_page(struct page *, struct block_device **);
  433. extern sector_t swapdev_block(int, pgoff_t);
  434. extern int page_swapcount(struct page *);
  435. extern int __swap_count(swp_entry_t entry);
  436. extern int __swp_swapcount(swp_entry_t entry);
  437. extern int swp_swapcount(swp_entry_t entry);
  438. extern struct swap_info_struct *page_swap_info(struct page *);
  439. extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
  440. extern bool reuse_swap_page(struct page *, int *);
  441. extern int try_to_free_swap(struct page *);
  442. struct backing_dev_info;
  443. extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
  444. extern void exit_swap_address_space(unsigned int type);
  445. extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
  446. sector_t swap_page_sector(struct page *page);
  447. static inline void put_swap_device(struct swap_info_struct *si)
  448. {
  449. rcu_read_unlock();
  450. }
  451. #else /* CONFIG_SWAP */
  452. static inline int swap_readpage(struct page *page, bool do_poll)
  453. {
  454. return 0;
  455. }
  456. static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
  457. {
  458. return NULL;
  459. }
  460. #define swap_address_space(entry) (NULL)
  461. #define get_nr_swap_pages() 0L
  462. #define total_swap_pages 0L
  463. #define total_swapcache_pages() 0UL
  464. #define vm_swap_full() 0
  465. #define si_swapinfo(val) \
  466. do { (val)->freeswap = (val)->totalswap = 0; } while (0)
  467. /* only sparc can not include linux/pagemap.h in this file
  468. * so leave put_page and release_pages undeclared... */
  469. #define free_page_and_swap_cache(page) \
  470. put_page(page)
  471. #define free_pages_and_swap_cache(pages, nr) \
  472. release_pages((pages), (nr));
  473. static inline void show_swap_cache_info(void)
  474. {
  475. }
  476. #define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
  477. #define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
  478. static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
  479. {
  480. return 0;
  481. }
  482. static inline void swap_shmem_alloc(swp_entry_t swp)
  483. {
  484. }
  485. static inline int swap_duplicate(swp_entry_t swp)
  486. {
  487. return 0;
  488. }
  489. static inline void swap_free(swp_entry_t swp)
  490. {
  491. }
  492. static inline void put_swap_page(struct page *page, swp_entry_t swp)
  493. {
  494. }
  495. static inline struct page *swap_cluster_readahead(swp_entry_t entry,
  496. gfp_t gfp_mask, struct vm_fault *vmf)
  497. {
  498. return NULL;
  499. }
  500. static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
  501. struct vm_fault *vmf)
  502. {
  503. return NULL;
  504. }
  505. static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
  506. {
  507. return 0;
  508. }
  509. static inline struct page *lookup_swap_cache(swp_entry_t swp,
  510. struct vm_area_struct *vma,
  511. unsigned long addr)
  512. {
  513. return NULL;
  514. }
  515. static inline
  516. struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
  517. {
  518. return find_get_page(mapping, index);
  519. }
  520. static inline int add_to_swap(struct page *page)
  521. {
  522. return 0;
  523. }
  524. static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
  525. {
  526. return NULL;
  527. }
  528. static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
  529. gfp_t gfp_mask, void **shadowp)
  530. {
  531. return -1;
  532. }
  533. static inline void __delete_from_swap_cache(struct page *page,
  534. swp_entry_t entry, void *shadow)
  535. {
  536. }
  537. static inline void delete_from_swap_cache(struct page *page)
  538. {
  539. }
  540. static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
  541. unsigned long end)
  542. {
  543. }
  544. static inline int page_swapcount(struct page *page)
  545. {
  546. return 0;
  547. }
  548. static inline int __swap_count(swp_entry_t entry)
  549. {
  550. return 0;
  551. }
  552. static inline int __swp_swapcount(swp_entry_t entry)
  553. {
  554. return 0;
  555. }
  556. static inline int swp_swapcount(swp_entry_t entry)
  557. {
  558. return 0;
  559. }
  560. #define reuse_swap_page(page, total_map_swapcount) \
  561. (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
  562. static inline int try_to_free_swap(struct page *page)
  563. {
  564. return 0;
  565. }
  566. static inline swp_entry_t get_swap_page(struct page *page)
  567. {
  568. swp_entry_t entry;
  569. entry.val = 0;
  570. return entry;
  571. }
  572. #endif /* CONFIG_SWAP */
  573. #ifdef CONFIG_THP_SWAP
  574. extern int split_swap_cluster(swp_entry_t entry);
  575. #else
  576. static inline int split_swap_cluster(swp_entry_t entry)
  577. {
  578. return 0;
  579. }
  580. #endif
  581. #ifdef CONFIG_MEMCG
  582. static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
  583. {
  584. /* Cgroup2 doesn't have per-cgroup swappiness */
  585. if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
  586. return vm_swappiness;
  587. /* root ? */
  588. if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
  589. return vm_swappiness;
  590. return memcg->swappiness;
  591. }
  592. #else
  593. static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
  594. {
  595. return vm_swappiness;
  596. }
  597. #endif
  598. #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
  599. extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
  600. static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
  601. {
  602. if (mem_cgroup_disabled())
  603. return;
  604. __cgroup_throttle_swaprate(page, gfp_mask);
  605. }
  606. #else
  607. static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
  608. {
  609. }
  610. #endif
  611. #ifdef CONFIG_MEMCG_SWAP
  612. extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
  613. extern int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
  614. static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
  615. {
  616. if (mem_cgroup_disabled())
  617. return 0;
  618. return __mem_cgroup_try_charge_swap(page, entry);
  619. }
  620. extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
  621. static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
  622. {
  623. if (mem_cgroup_disabled())
  624. return;
  625. __mem_cgroup_uncharge_swap(entry, nr_pages);
  626. }
  627. extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
  628. extern bool mem_cgroup_swap_full(struct page *page);
  629. #else
  630. static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
  631. {
  632. }
  633. static inline int mem_cgroup_try_charge_swap(struct page *page,
  634. swp_entry_t entry)
  635. {
  636. return 0;
  637. }
  638. static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
  639. unsigned int nr_pages)
  640. {
  641. }
  642. static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
  643. {
  644. return get_nr_swap_pages();
  645. }
  646. static inline bool mem_cgroup_swap_full(struct page *page)
  647. {
  648. return vm_swap_full();
  649. }
  650. #endif
  651. #endif /* __KERNEL__*/
  652. #endif /* _LINUX_SWAP_H */