mm/swapfile: use percpu_ref to serialize against concurrent swapoff
[linux-2.6-block.git] / include / linux / swap.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_SWAP_H
3#define _LINUX_SWAP_H
4
1da177e4
LT
5#include <linux/spinlock.h>
6#include <linux/linkage.h>
7#include <linux/mmzone.h>
8#include <linux/list.h>
66e1707b 9#include <linux/memcontrol.h>
1da177e4 10#include <linux/sched.h>
af936a16 11#include <linux/node.h>
33806f06 12#include <linux/fs.h>
4ee60ec1 13#include <linux/pagemap.h>
60063497 14#include <linux/atomic.h>
c53954a0 15#include <linux/page-flags.h>
202e35db 16#include <uapi/linux/mempolicy.h>
1da177e4
LT
17#include <asm/page.h>
18
8bc719d3
MS
19struct notifier_block;
20
ab954160
AM
21struct bio;
22
64e3d12f
KHY
23struct pagevec;
24
1da177e4
LT
25#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
26#define SWAP_FLAG_PRIO_MASK 0x7fff
27#define SWAP_FLAG_PRIO_SHIFT 0
dcf6b7dd
RA
28#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
29#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
30#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
1da177e4 31
d15cab97 32#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
dcf6b7dd
RA
33 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
34 SWAP_FLAG_DISCARD_PAGES)
36005bae 35#define SWAP_BATCH 64
d15cab97 36
1da177e4
LT
37static inline int current_is_kswapd(void)
38{
39 return current->flags & PF_KSWAPD;
40}
41
42/*
43 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
44 * be swapped to. The swap type and the offset into that swap type are
45 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
46 * for the type means that the maximum number of swapcache pages is 27 bits
47 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
48 * the type/offset into the pte as 5/27 as well.
49 */
50#define MAX_SWAPFILES_SHIFT 5
a7420aa5
AK
51
52/*
53 * Use some of the swap files numbers for other purposes. This
54 * is a convenient way to hook into the VM to trigger special
55 * actions on faults.
56 */
57
5042db43
JG
58/*
59 * Unaddressable device memory support. See include/linux/hmm.h and
ad56b738 60 * Documentation/vm/hmm.rst. Short description is we need struct pages for
5042db43
JG
61 * device memory that is unaddressable (inaccessible) by CPU, so that we can
62 * migrate part of a process memory to device memory.
63 *
64 * When a page is migrated from CPU to device, we set the CPU page table entry
65 * to a special SWP_DEVICE_* entry.
66 */
67#ifdef CONFIG_DEVICE_PRIVATE
68#define SWP_DEVICE_NUM 2
69#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
70#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
71#else
72#define SWP_DEVICE_NUM 0
73#endif
74
a7420aa5
AK
75/*
76 * NUMA node memory migration support
77 */
78#ifdef CONFIG_MIGRATION
79#define SWP_MIGRATION_NUM 2
80#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
81#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
0697212a 82#else
a7420aa5 83#define SWP_MIGRATION_NUM 0
0697212a 84#endif
1da177e4 85
a7420aa5
AK
86/*
87 * Handling of hardware poisoned pages with memory corruption.
88 */
89#ifdef CONFIG_MEMORY_FAILURE
90#define SWP_HWPOISON_NUM 1
91#define SWP_HWPOISON MAX_SWAPFILES
92#else
93#define SWP_HWPOISON_NUM 0
94#endif
95
96#define MAX_SWAPFILES \
5042db43
JG
97 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
98 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
a7420aa5 99
1da177e4
LT
100/*
101 * Magic header for a swap area. The first part of the union is
102 * what the swap magic looks like for the old (limited to 128MB)
103 * swap area format, the second part of the union adds - in the
104 * old reserved area - some extra information. Note that the first
105 * kilobyte is reserved for boot loader or disk label stuff...
106 *
107 * Having the magic at the end of the PAGE_SIZE makes detecting swap
108 * areas somewhat tricky on machines that support multiple page sizes.
109 * For 2.5 we'll probably want to move the magic to just beyond the
110 * bootbits...
111 */
112union swap_header {
113 struct {
114 char reserved[PAGE_SIZE - 10];
115 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
116 } magic;
117 struct {
e8f03d02
AD
118 char bootbits[1024]; /* Space for disklabel etc. */
119 __u32 version;
120 __u32 last_page;
121 __u32 nr_badpages;
122 unsigned char sws_uuid[16];
123 unsigned char sws_volume[16];
124 __u32 padding[117];
125 __u32 badpages[1];
1da177e4
LT
126 } info;
127};
128
1da177e4
LT
129/*
130 * current->reclaim_state points to one of these when a task is running
131 * memory reclaim
132 */
133struct reclaim_state {
134 unsigned long reclaimed_slab;
135};
136
137#ifdef __KERNEL__
138
139struct address_space;
140struct sysinfo;
141struct writeback_control;
142struct zone;
143
144/*
145 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
146 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
147 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
148 * from setup, they're handled identically.
149 *
150 * We always assume that blocks are of size PAGE_SIZE.
151 */
152struct swap_extent {
4efaceb1 153 struct rb_node rb_node;
1da177e4
LT
154 pgoff_t start_page;
155 pgoff_t nr_pages;
156 sector_t start_block;
157};
158
159/*
160 * Max bad pages in the new format..
161 */
1da177e4 162#define MAX_SWAP_BADPAGES \
a4046c06
PHS
163 ((offsetof(union swap_header, magic.magic) - \
164 offsetof(union swap_header, info.badpages)) / sizeof(int))
1da177e4
LT
165
166enum {
167 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
168 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
dcf6b7dd 169 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
7992fde7 170 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
20137a49 171 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
570a335b 172 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
b2725643 173 SWP_BLKDEV = (1 << 6), /* its a block device */
bc4ae27d 174 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
32646315 175 SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
bc4ae27d
OS
176 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
177 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
178 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
179 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
52b7efdb 180 /* add others here before... */
eb085574 181 SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
1da177e4
LT
182};
183
d778df51 184#define SWAP_CLUSTER_MAX 32UL
748446bb 185#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
1da177e4 186
4b4bb6bb 187/* Bit flag in swap_map */
570a335b 188#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
4b4bb6bb
WY
189#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
190
191/* Special value in first swap_map */
192#define SWAP_MAP_MAX 0x3e /* Max count */
193#define SWAP_MAP_BAD 0x3f /* Note page is bad */
194#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
195
196/* Special value in each swap_map continuation */
197#define SWAP_CONT_MAX 0x7f /* Max count */
253d553b 198
2a8f9449
SL
199/*
200 * We use this to track usage of a cluster. A cluster is a block of swap disk
201 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
202 * free clusters are organized into a list. We fetch an entry from the list to
203 * get a free cluster.
204 *
205 * The data field stores next cluster if the cluster is free or cluster usage
206 * counter otherwise. The flags field determines if a cluster is free. This is
207 * protected by swap_info_struct.lock.
208 */
209struct swap_cluster_info {
235b6217
HY
210 spinlock_t lock; /*
211 * Protect swap_cluster_info fields
212 * and swap_info_struct->swap_map
213 * elements correspond to the swap
214 * cluster
215 */
2a8f9449
SL
216 unsigned int data:24;
217 unsigned int flags:8;
218};
219#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
220#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
e0709829 221#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
2a8f9449 222
ebc2a1a6
SL
223/*
224 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
225 * its own cluster and swapout sequentially. The purpose is to optimize swapout
226 * throughput.
227 */
228struct percpu_cluster {
229 struct swap_cluster_info index; /* Current cluster index */
230 unsigned int next; /* Likely next allocation offset */
231};
232
6b534915
HY
233struct swap_cluster_list {
234 struct swap_cluster_info head;
235 struct swap_cluster_info tail;
236};
237
1da177e4
LT
238/*
239 * The in-memory structure used to track swap areas.
1da177e4
LT
240 */
241struct swap_info_struct {
63d8620e 242 struct percpu_ref users; /* indicate and keep swap device valid. */
efa90a98
HD
243 unsigned long flags; /* SWP_USED etc: see above */
244 signed short prio; /* swap priority of this type */
18ab4d4c 245 struct plist_node list; /* entry in swap_active_head */
efa90a98 246 signed char type; /* strange name for an index */
7509765a
HD
247 unsigned int max; /* extent of the swap_map */
248 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
2a8f9449 249 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
6b534915 250 struct swap_cluster_list free_clusters; /* free clusters list */
7509765a
HD
251 unsigned int lowest_bit; /* index of first free in swap_map */
252 unsigned int highest_bit; /* index of last free in swap_map */
253 unsigned int pages; /* total of usable pages of swap */
254 unsigned int inuse_pages; /* number of those currently in use */
255 unsigned int cluster_next; /* likely index for next allocation */
256 unsigned int cluster_nr; /* countdown to next cluster search */
49070588 257 unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
ebc2a1a6 258 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
4efaceb1 259 struct rb_root swap_extent_root;/* root of the swap extent rbtree */
7509765a
HD
260 struct block_device *bdev; /* swap device or bdev of swap file */
261 struct file *swap_file; /* seldom referenced */
262 unsigned int old_block_size; /* seldom referenced */
63d8620e 263 struct completion comp; /* seldom referenced */
38b5faf4
DM
264#ifdef CONFIG_FRONTSWAP
265 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
266 atomic_t frontswap_pages; /* frontswap pages in-use counter */
267#endif
ec8acf20
SL
268 spinlock_t lock; /*
269 * protect map scan related fields like
270 * swap_map, lowest_bit, highest_bit,
271 * inuse_pages, cluster_next,
815c2c54
SL
272 * cluster_nr, lowest_alloc,
273 * highest_alloc, free/discard cluster
274 * list. other fields are only changed
275 * at swapon/swapoff, so are protected
276 * by swap_lock. changing flags need
277 * hold this lock and swap_lock. If
278 * both locks need hold, hold swap_lock
279 * first.
ec8acf20 280 */
2628bd6f
HY
281 spinlock_t cont_lock; /*
282 * protect swap count continuation page
283 * list.
284 */
815c2c54 285 struct work_struct discard_work; /* discard worker */
6b534915 286 struct swap_cluster_list discard_clusters; /* discard clusters list */
16c3380f 287 struct plist_node avail_lists[]; /*
66f71da9
AL
288 * entries in swap_avail_heads, one
289 * entry per node.
290 * Must be last as the number of the
291 * array is nr_node_ids, which is not
292 * a fixed value so have to allocate
293 * dynamically.
294 * And it has to be an array so that
295 * plist_for_each_* can work.
296 */
1da177e4
LT
297};
298
ec560175
HY
299#ifdef CONFIG_64BIT
300#define SWAP_RA_ORDER_CEILING 5
301#else
302/* Avoid stack overflow, because we need to save part of page table */
303#define SWAP_RA_ORDER_CEILING 3
304#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
305#endif
306
307struct vma_swap_readahead {
308 unsigned short win;
309 unsigned short offset;
310 unsigned short nr_pte;
311#ifdef CONFIG_64BIT
312 pte_t *ptes;
313#else
314 pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
315#endif
316};
317
a528910e 318/* linux/mm/workingset.c */
31d8fcac 319void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
b910718a 320void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
1899ad18 321void workingset_refault(struct page *page, void *shadow);
a528910e 322void workingset_activation(struct page *page);
c7df8ad2 323
74d60958
MW
324/* Only track the nodes of mappings with shadow entries */
325void workingset_update_node(struct xa_node *node);
326#define mapping_set_update(xas, mapping) do { \
327 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
328 xas_set_update(xas, workingset_update_node); \
329} while (0)
a528910e 330
1da177e4 331/* linux/mm/page_alloc.c */
cb45b0e9 332extern unsigned long totalreserve_pages;
ebec3862 333extern unsigned long nr_free_buffer_pages(void);
1da177e4 334
c41f012a
MH
335/* Definition of global_zone_page_state not available yet */
336#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
96177299
CL
337
338
1da177e4 339/* linux/mm/swap.c */
96f8bf4f
JW
340extern void lru_note_cost(struct lruvec *lruvec, bool file,
341 unsigned int nr_pages);
342extern void lru_note_cost_page(struct page *);
c53954a0 343extern void lru_cache_add(struct page *);
b3c97528 344extern void mark_page_accessed(struct page *);
d479960e
MK
345
346extern atomic_t lru_disable_count;
347
348static inline bool lru_cache_disabled(void)
349{
350 return atomic_read(&lru_disable_count);
351}
352
353static inline void lru_cache_enable(void)
354{
355 atomic_dec(&lru_disable_count);
356}
357
358extern void lru_cache_disable(void);
1da177e4 359extern void lru_add_drain(void);
f0cb3c76 360extern void lru_add_drain_cpu(int cpu);
b01b2141 361extern void lru_add_drain_cpu_zone(struct zone *zone);
5fbc4616 362extern void lru_add_drain_all(void);
ac6aadb2 363extern void rotate_reclaimable_page(struct page *page);
cc5993bd 364extern void deactivate_file_page(struct page *page);
9c276cc6 365extern void deactivate_page(struct page *page);
f7ad2a6c 366extern void mark_page_lazyfree(struct page *page);
1da177e4
LT
367extern void swap_setup(void);
368
b518154e 369extern void lru_cache_add_inactive_or_unevictable(struct page *page,
00501b53
JW
370 struct vm_area_struct *vma);
371
1da177e4 372/* linux/mm/vmscan.c */
5a1c84b4 373extern unsigned long zone_reclaimable_pages(struct zone *zone);
dac1d27b 374extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
327c0e96 375 gfp_t gfp_mask, nodemask_t *mask);
c2135f7c 376extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
b70a2a21
JW
377extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
378 unsigned long nr_pages,
379 gfp_t gfp_mask,
380 bool may_swap);
a9dd0a83 381extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
185efc0f 382 gfp_t gfp_mask, bool noswap,
ef8f2327 383 pg_data_t *pgdat,
185efc0f 384 unsigned long *nr_scanned);
69e05944 385extern unsigned long shrink_all_memory(unsigned long nr_pages);
1da177e4 386extern int vm_swappiness;
b20a3503
CL
387extern int remove_mapping(struct address_space *mapping, struct page *page);
388
1a4e58cc 389extern unsigned long reclaim_pages(struct list_head *page_list);
9eeff239 390#ifdef CONFIG_NUMA
a5f5f91d 391extern int node_reclaim_mode;
9614634f 392extern int sysctl_min_unmapped_ratio;
0ff38490 393extern int sysctl_min_slab_ratio;
9eeff239 394#else
a5f5f91d 395#define node_reclaim_mode 0
9eeff239
CL
396#endif
397
202e35db
DH
398static inline bool node_reclaim_enabled(void)
399{
400 /* Is any node_reclaim_mode bit set? */
401 return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
402}
403
64e3d12f 404extern void check_move_unevictable_pages(struct pagevec *pvec);
af936a16 405
3218ae14 406extern int kswapd_run(int nid);
8fe23e05 407extern void kswapd_stop(int nid);
33398cf2 408
1da177e4 409#ifdef CONFIG_SWAP
be297968
CH
410
411#include <linux/blk_types.h> /* for bio_end_io_t */
412
1da177e4 413/* linux/mm/page_io.c */
23955622 414extern int swap_readpage(struct page *page, bool do_poll);
1da177e4 415extern int swap_writepage(struct page *page, struct writeback_control *wbc);
4246a0b6 416extern void end_swap_bio_write(struct bio *bio);
1eec6702 417extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
4246a0b6 418 bio_end_io_t end_write_func);
62c230bc 419extern int swap_set_page_dirty(struct page *page);
1da177e4 420
a509bc1a
MG
421int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
422 unsigned long nr_pages, sector_t start_block);
423int generic_swapfile_activate(struct swap_info_struct *, struct file *,
424 sector_t *);
425
1da177e4 426/* linux/mm/swap_state.c */
4b3ef9da
HY
427/* One swap address space for each 64M swap space */
428#define SWAP_ADDRESS_SPACE_SHIFT 14
429#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
430extern struct address_space *swapper_spaces[];
431#define swap_address_space(entry) \
432 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
433 >> SWAP_ADDRESS_SPACE_SHIFT])
b6038942
SB
434static inline unsigned long total_swapcache_pages(void)
435{
436 return global_node_page_state(NR_SWAPCACHE);
437}
438
1da177e4 439extern void show_swap_cache_info(void);
0f074658 440extern int add_to_swap(struct page *page);
aae466b0 441extern void *get_shadow_from_swap_cache(swp_entry_t entry);
3852f676
JK
442extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
443 gfp_t gfp, void **shadowp);
444extern void __delete_from_swap_cache(struct page *page,
445 swp_entry_t entry, void *shadow);
1da177e4 446extern void delete_from_swap_cache(struct page *);
3852f676
JK
447extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
448 unsigned long end);
1da177e4
LT
449extern void free_page_and_swap_cache(struct page *);
450extern void free_pages_and_swap_cache(struct page **, int);
ec560175
HY
451extern struct page *lookup_swap_cache(swp_entry_t entry,
452 struct vm_area_struct *vma,
453 unsigned long addr);
61ef1865 454struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
02098fea 455extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
23955622
SL
456 struct vm_area_struct *vma, unsigned long addr,
457 bool do_poll);
5b999aad
DS
458extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
459 struct vm_area_struct *vma, unsigned long addr,
460 bool *new_page_allocated);
e9e9b7ec
MK
461extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
462 struct vm_fault *vmf);
463extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
464 struct vm_fault *vmf);
ec560175 465
1da177e4 466/* linux/mm/swapfile.c */
ec8acf20 467extern atomic_long_t nr_swap_pages;
1da177e4 468extern long total_swap_pages;
81a0298b 469extern atomic_t nr_rotate_swap;
67afa38e 470extern bool has_usable_swap(void);
ec8acf20
SL
471
472/* Swap 50% full? Release swapcache more aggressively.. */
473static inline bool vm_swap_full(void)
474{
475 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
476}
477
478static inline long get_nr_swap_pages(void)
479{
480 return atomic_long_read(&nr_swap_pages);
481}
482
1da177e4 483extern void si_swapinfo(struct sysinfo *);
38d8b4e6 484extern swp_entry_t get_swap_page(struct page *page);
75f6d6d2 485extern void put_swap_page(struct page *page, swp_entry_t entry);
910321ea 486extern swp_entry_t get_swap_page_of_type(int);
5d5e8f19 487extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
570a335b 488extern int add_swap_count_continuation(swp_entry_t, gfp_t);
aaa46865 489extern void swap_shmem_alloc(swp_entry_t);
570a335b
HD
490extern int swap_duplicate(swp_entry_t);
491extern int swapcache_prepare(swp_entry_t);
1da177e4 492extern void swap_free(swp_entry_t);
7c00bafe 493extern void swapcache_free_entries(swp_entry_t *entries, int n);
2509ef26 494extern int free_swap_and_cache(swp_entry_t);
21bd9005
CH
495int swap_type_of(dev_t device, sector_t offset);
496int find_first_swap(dev_t *device);
f577eb30 497extern unsigned int count_swap_pages(int, int);
3aef83e0 498extern sector_t swapdev_block(int, pgoff_t);
bde05d1c 499extern int page_swapcount(struct page *);
eb085574 500extern int __swap_count(swp_entry_t entry);
e8c26ab6 501extern int __swp_swapcount(swp_entry_t entry);
8334b962 502extern int swp_swapcount(swp_entry_t entry);
f981c595 503extern struct swap_info_struct *page_swap_info(struct page *);
0bcac06f 504extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
6d0a07ed 505extern bool reuse_swap_page(struct page *, int *);
a2c43eed 506extern int try_to_free_swap(struct page *);
1da177e4 507struct backing_dev_info;
4b3ef9da
HY
508extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
509extern void exit_swap_address_space(unsigned int type);
eb085574 510extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
caf6912f 511sector_t swap_page_sector(struct page *page);
eb085574
HY
512
513static inline void put_swap_device(struct swap_info_struct *si)
514{
63d8620e 515 percpu_ref_put(&si->users);
eb085574 516}
1da177e4 517
1da177e4
LT
518#else /* CONFIG_SWAP */
519
0bcac06f
MK
520static inline int swap_readpage(struct page *page, bool do_poll)
521{
522 return 0;
523}
524
525static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
526{
527 return NULL;
528}
529
d2cf5ad6 530#define swap_address_space(entry) (NULL)
ec8acf20 531#define get_nr_swap_pages() 0L
b962716b 532#define total_swap_pages 0L
33806f06 533#define total_swapcache_pages() 0UL
ec8acf20 534#define vm_swap_full() 0
1da177e4
LT
535
536#define si_swapinfo(val) \
537 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
9ae5b3c7 538/* only sparc can not include linux/pagemap.h in this file
ea1754a0 539 * so leave put_page and release_pages undeclared... */
1da177e4 540#define free_page_and_swap_cache(page) \
09cbfeaf 541 put_page(page)
1da177e4 542#define free_pages_and_swap_cache(pages, nr) \
c6f92f9f 543 release_pages((pages), (nr));
1da177e4 544
bd96b9eb
CK
545static inline void show_swap_cache_info(void)
546{
547}
548
5042db43
JG
549#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
550#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
bd96b9eb 551
570a335b 552static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
355cfa73 553{
570a335b
HD
554 return 0;
555}
556
aaa46865
HD
557static inline void swap_shmem_alloc(swp_entry_t swp)
558{
559}
560
570a335b
HD
561static inline int swap_duplicate(swp_entry_t swp)
562{
563 return 0;
355cfa73
KH
564}
565
bd96b9eb
CK
566static inline void swap_free(swp_entry_t swp)
567{
568}
569
75f6d6d2 570static inline void put_swap_page(struct page *page, swp_entry_t swp)
cb4b86ba
KH
571{
572}
573
e9e9b7ec
MK
574static inline struct page *swap_cluster_readahead(swp_entry_t entry,
575 gfp_t gfp_mask, struct vm_fault *vmf)
bd96b9eb
CK
576{
577 return NULL;
578}
579
e9e9b7ec
MK
580static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
581 struct vm_fault *vmf)
ec560175
HY
582{
583 return NULL;
584}
585
9fab5619
HD
586static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
587{
588 return 0;
589}
590
ec560175
HY
591static inline struct page *lookup_swap_cache(swp_entry_t swp,
592 struct vm_area_struct *vma,
593 unsigned long addr)
bd96b9eb
CK
594{
595 return NULL;
596}
597
61ef1865
MWO
598static inline
599struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
600{
601 return find_get_page(mapping, index);
602}
603
0f074658 604static inline int add_to_swap(struct page *page)
60371d97
HD
605{
606 return 0;
607}
608
aae466b0
JK
609static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
610{
611 return NULL;
612}
613
73b1262f 614static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
3852f676 615 gfp_t gfp_mask, void **shadowp)
bd96b9eb 616{
73b1262f 617 return -1;
bd96b9eb
CK
618}
619
4e17ec25 620static inline void __delete_from_swap_cache(struct page *page,
3852f676 621 swp_entry_t entry, void *shadow)
bd96b9eb
CK
622{
623}
624
625static inline void delete_from_swap_cache(struct page *page)
626{
627}
628
3852f676
JK
629static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
630 unsigned long end)
631{
632}
633
bde05d1c
HD
634static inline int page_swapcount(struct page *page)
635{
636 return 0;
637}
638
eb085574 639static inline int __swap_count(swp_entry_t entry)
aa8d22a1
MK
640{
641 return 0;
642}
643
e8c26ab6
TC
644static inline int __swp_swapcount(swp_entry_t entry)
645{
646 return 0;
647}
648
8334b962
MK
649static inline int swp_swapcount(swp_entry_t entry)
650{
651 return 0;
652}
653
ba3c4ce6
HY
654#define reuse_swap_page(page, total_map_swapcount) \
655 (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
1da177e4 656
a2c43eed 657static inline int try_to_free_swap(struct page *page)
68a22394
RR
658{
659 return 0;
660}
661
38d8b4e6 662static inline swp_entry_t get_swap_page(struct page *page)
1da177e4
LT
663{
664 swp_entry_t entry;
665 entry.val = 0;
666 return entry;
667}
668
1da177e4 669#endif /* CONFIG_SWAP */
6f2cb2f1 670
59807685
HY
671#ifdef CONFIG_THP_SWAP
672extern int split_swap_cluster(swp_entry_t entry);
673#else
674static inline int split_swap_cluster(swp_entry_t entry)
675{
676 return 0;
677}
678#endif
679
6f2cb2f1
VD
680#ifdef CONFIG_MEMCG
681static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
682{
4550c4e1
JW
683 /* Cgroup2 doesn't have per-cgroup swappiness */
684 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
685 return vm_swappiness;
686
6f2cb2f1 687 /* root ? */
59118c42 688 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
6f2cb2f1
VD
689 return vm_swappiness;
690
691 return memcg->swappiness;
692}
6f2cb2f1
VD
693#else
694static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
695{
696 return vm_swappiness;
697}
698#endif
699
2cf85583 700#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
6caa6a07 701extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
2cf85583 702#else
6caa6a07 703static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
2cf85583
TH
704{
705}
706#endif
707
6f2cb2f1
VD
708#ifdef CONFIG_MEMCG_SWAP
709extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
710extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
38d8b4e6 711extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
d8b38438 712extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
5ccc5aba 713extern bool mem_cgroup_swap_full(struct page *page);
6f2cb2f1
VD
714#else
715static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
716{
717}
718
719static inline int mem_cgroup_try_charge_swap(struct page *page,
720 swp_entry_t entry)
721{
722 return 0;
723}
724
38d8b4e6
HY
725static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
726 unsigned int nr_pages)
6f2cb2f1
VD
727{
728}
d8b38438
VD
729
730static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
731{
732 return get_nr_swap_pages();
733}
5ccc5aba
VD
734
735static inline bool mem_cgroup_swap_full(struct page *page)
736{
737 return vm_swap_full();
738}
6f2cb2f1
VD
739#endif
740
1da177e4
LT
741#endif /* __KERNEL__*/
742#endif /* _LINUX_SWAP_H */