mm/lru: Convert __pagevec_lru_add_fn to take a folio
[linux-2.6-block.git] / include / linux / swap.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_SWAP_H
3#define _LINUX_SWAP_H
4
1da177e4
LT
5#include <linux/spinlock.h>
6#include <linux/linkage.h>
7#include <linux/mmzone.h>
8#include <linux/list.h>
66e1707b 9#include <linux/memcontrol.h>
1da177e4 10#include <linux/sched.h>
af936a16 11#include <linux/node.h>
33806f06 12#include <linux/fs.h>
4ee60ec1 13#include <linux/pagemap.h>
60063497 14#include <linux/atomic.h>
c53954a0 15#include <linux/page-flags.h>
202e35db 16#include <uapi/linux/mempolicy.h>
1da177e4
LT
17#include <asm/page.h>
18
8bc719d3
MS
19struct notifier_block;
20
ab954160
AM
21struct bio;
22
64e3d12f
KHY
23struct pagevec;
24
1da177e4
LT
25#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
26#define SWAP_FLAG_PRIO_MASK 0x7fff
27#define SWAP_FLAG_PRIO_SHIFT 0
dcf6b7dd
RA
28#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
29#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
30#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
1da177e4 31
d15cab97 32#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
dcf6b7dd
RA
33 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
34 SWAP_FLAG_DISCARD_PAGES)
36005bae 35#define SWAP_BATCH 64
d15cab97 36
1da177e4
LT
37static inline int current_is_kswapd(void)
38{
39 return current->flags & PF_KSWAPD;
40}
41
42/*
43 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
44 * be swapped to. The swap type and the offset into that swap type are
45 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
46 * for the type means that the maximum number of swapcache pages is 27 bits
47 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
48 * the type/offset into the pte as 5/27 as well.
49 */
50#define MAX_SWAPFILES_SHIFT 5
a7420aa5
AK
51
52/*
53 * Use some of the swap files numbers for other purposes. This
54 * is a convenient way to hook into the VM to trigger special
55 * actions on faults.
56 */
57
5042db43
JG
58/*
59 * Unaddressable device memory support. See include/linux/hmm.h and
ad56b738 60 * Documentation/vm/hmm.rst. Short description is we need struct pages for
5042db43
JG
61 * device memory that is unaddressable (inaccessible) by CPU, so that we can
62 * migrate part of a process memory to device memory.
63 *
64 * When a page is migrated from CPU to device, we set the CPU page table entry
b756a3b5
AP
65 * to a special SWP_DEVICE_{READ|WRITE} entry.
66 *
67 * When a page is mapped by the device for exclusive access we set the CPU page
68 * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
5042db43
JG
69 */
70#ifdef CONFIG_DEVICE_PRIVATE
b756a3b5 71#define SWP_DEVICE_NUM 4
5042db43
JG
72#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
73#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
b756a3b5
AP
74#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
75#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
5042db43
JG
76#else
77#define SWP_DEVICE_NUM 0
78#endif
79
a7420aa5
AK
80/*
81 * NUMA node memory migration support
82 */
83#ifdef CONFIG_MIGRATION
84#define SWP_MIGRATION_NUM 2
85#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
86#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
0697212a 87#else
a7420aa5 88#define SWP_MIGRATION_NUM 0
0697212a 89#endif
1da177e4 90
a7420aa5
AK
91/*
92 * Handling of hardware poisoned pages with memory corruption.
93 */
94#ifdef CONFIG_MEMORY_FAILURE
95#define SWP_HWPOISON_NUM 1
96#define SWP_HWPOISON MAX_SWAPFILES
97#else
98#define SWP_HWPOISON_NUM 0
99#endif
100
101#define MAX_SWAPFILES \
5042db43
JG
102 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
103 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
a7420aa5 104
1da177e4
LT
105/*
106 * Magic header for a swap area. The first part of the union is
107 * what the swap magic looks like for the old (limited to 128MB)
108 * swap area format, the second part of the union adds - in the
109 * old reserved area - some extra information. Note that the first
110 * kilobyte is reserved for boot loader or disk label stuff...
111 *
112 * Having the magic at the end of the PAGE_SIZE makes detecting swap
113 * areas somewhat tricky on machines that support multiple page sizes.
114 * For 2.5 we'll probably want to move the magic to just beyond the
115 * bootbits...
116 */
117union swap_header {
118 struct {
119 char reserved[PAGE_SIZE - 10];
120 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
121 } magic;
122 struct {
e8f03d02
AD
123 char bootbits[1024]; /* Space for disklabel etc. */
124 __u32 version;
125 __u32 last_page;
126 __u32 nr_badpages;
127 unsigned char sws_uuid[16];
128 unsigned char sws_volume[16];
129 __u32 padding[117];
130 __u32 badpages[1];
1da177e4
LT
131 } info;
132};
133
1da177e4
LT
134/*
135 * current->reclaim_state points to one of these when a task is running
136 * memory reclaim
137 */
138struct reclaim_state {
139 unsigned long reclaimed_slab;
140};
141
142#ifdef __KERNEL__
143
144struct address_space;
145struct sysinfo;
146struct writeback_control;
147struct zone;
148
149/*
150 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
151 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
152 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
153 * from setup, they're handled identically.
154 *
155 * We always assume that blocks are of size PAGE_SIZE.
156 */
157struct swap_extent {
4efaceb1 158 struct rb_node rb_node;
1da177e4
LT
159 pgoff_t start_page;
160 pgoff_t nr_pages;
161 sector_t start_block;
162};
163
164/*
165 * Max bad pages in the new format..
166 */
1da177e4 167#define MAX_SWAP_BADPAGES \
a4046c06
PHS
168 ((offsetof(union swap_header, magic.magic) - \
169 offsetof(union swap_header, info.badpages)) / sizeof(int))
1da177e4
LT
170
171enum {
172 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
173 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
dcf6b7dd 174 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
7992fde7 175 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
20137a49 176 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
570a335b 177 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
b2725643 178 SWP_BLKDEV = (1 << 6), /* its a block device */
bc4ae27d 179 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
32646315 180 SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
bc4ae27d
OS
181 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
182 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
183 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
184 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
52b7efdb 185 /* add others here before... */
eb085574 186 SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
1da177e4
LT
187};
188
d778df51 189#define SWAP_CLUSTER_MAX 32UL
748446bb 190#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
1da177e4 191
4b4bb6bb 192/* Bit flag in swap_map */
570a335b 193#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
4b4bb6bb
WY
194#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
195
196/* Special value in first swap_map */
197#define SWAP_MAP_MAX 0x3e /* Max count */
198#define SWAP_MAP_BAD 0x3f /* Note page is bad */
199#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
200
201/* Special value in each swap_map continuation */
202#define SWAP_CONT_MAX 0x7f /* Max count */
253d553b 203
2a8f9449
SL
204/*
205 * We use this to track usage of a cluster. A cluster is a block of swap disk
206 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
207 * free clusters are organized into a list. We fetch an entry from the list to
208 * get a free cluster.
209 *
210 * The data field stores next cluster if the cluster is free or cluster usage
211 * counter otherwise. The flags field determines if a cluster is free. This is
212 * protected by swap_info_struct.lock.
213 */
214struct swap_cluster_info {
235b6217
HY
215 spinlock_t lock; /*
216 * Protect swap_cluster_info fields
217 * and swap_info_struct->swap_map
218 * elements correspond to the swap
219 * cluster
220 */
2a8f9449
SL
221 unsigned int data:24;
222 unsigned int flags:8;
223};
224#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
225#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
e0709829 226#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
2a8f9449 227
ebc2a1a6
SL
228/*
229 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
230 * its own cluster and swapout sequentially. The purpose is to optimize swapout
231 * throughput.
232 */
233struct percpu_cluster {
234 struct swap_cluster_info index; /* Current cluster index */
235 unsigned int next; /* Likely next allocation offset */
236};
237
6b534915
HY
238struct swap_cluster_list {
239 struct swap_cluster_info head;
240 struct swap_cluster_info tail;
241};
242
1da177e4
LT
243/*
244 * The in-memory structure used to track swap areas.
1da177e4
LT
245 */
246struct swap_info_struct {
63d8620e 247 struct percpu_ref users; /* indicate and keep swap device valid. */
efa90a98
HD
248 unsigned long flags; /* SWP_USED etc: see above */
249 signed short prio; /* swap priority of this type */
18ab4d4c 250 struct plist_node list; /* entry in swap_active_head */
efa90a98 251 signed char type; /* strange name for an index */
7509765a
HD
252 unsigned int max; /* extent of the swap_map */
253 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
2a8f9449 254 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
6b534915 255 struct swap_cluster_list free_clusters; /* free clusters list */
7509765a
HD
256 unsigned int lowest_bit; /* index of first free in swap_map */
257 unsigned int highest_bit; /* index of last free in swap_map */
258 unsigned int pages; /* total of usable pages of swap */
259 unsigned int inuse_pages; /* number of those currently in use */
260 unsigned int cluster_next; /* likely index for next allocation */
261 unsigned int cluster_nr; /* countdown to next cluster search */
49070588 262 unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
ebc2a1a6 263 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
4efaceb1 264 struct rb_root swap_extent_root;/* root of the swap extent rbtree */
7509765a
HD
265 struct block_device *bdev; /* swap device or bdev of swap file */
266 struct file *swap_file; /* seldom referenced */
267 unsigned int old_block_size; /* seldom referenced */
63d8620e 268 struct completion comp; /* seldom referenced */
38b5faf4
DM
269#ifdef CONFIG_FRONTSWAP
270 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
271 atomic_t frontswap_pages; /* frontswap pages in-use counter */
272#endif
ec8acf20
SL
273 spinlock_t lock; /*
274 * protect map scan related fields like
275 * swap_map, lowest_bit, highest_bit,
276 * inuse_pages, cluster_next,
815c2c54
SL
277 * cluster_nr, lowest_alloc,
278 * highest_alloc, free/discard cluster
279 * list. other fields are only changed
280 * at swapon/swapoff, so are protected
281 * by swap_lock. changing flags need
282 * hold this lock and swap_lock. If
283 * both locks need hold, hold swap_lock
284 * first.
ec8acf20 285 */
2628bd6f
HY
286 spinlock_t cont_lock; /*
287 * protect swap count continuation page
288 * list.
289 */
815c2c54 290 struct work_struct discard_work; /* discard worker */
6b534915 291 struct swap_cluster_list discard_clusters; /* discard clusters list */
16c3380f 292 struct plist_node avail_lists[]; /*
66f71da9
AL
293 * entries in swap_avail_heads, one
294 * entry per node.
295 * Must be last as the number of the
296 * array is nr_node_ids, which is not
297 * a fixed value so have to allocate
298 * dynamically.
299 * And it has to be an array so that
300 * plist_for_each_* can work.
301 */
1da177e4
LT
302};
303
ec560175
HY
304#ifdef CONFIG_64BIT
305#define SWAP_RA_ORDER_CEILING 5
306#else
307/* Avoid stack overflow, because we need to save part of page table */
308#define SWAP_RA_ORDER_CEILING 3
309#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
310#endif
311
312struct vma_swap_readahead {
313 unsigned short win;
314 unsigned short offset;
315 unsigned short nr_pte;
316#ifdef CONFIG_64BIT
317 pte_t *ptes;
318#else
319 pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
320#endif
321};
322
2f52578f
MWO
323static inline swp_entry_t folio_swap_entry(struct folio *folio)
324{
325 swp_entry_t entry = { .val = page_private(&folio->page) };
326 return entry;
327}
328
a528910e 329/* linux/mm/workingset.c */
31d8fcac 330void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
b910718a 331void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
0995d7e5 332void workingset_refault(struct folio *folio, void *shadow);
c5ce619a 333void workingset_activation(struct folio *folio);
c7df8ad2 334
74d60958
MW
335/* Only track the nodes of mappings with shadow entries */
336void workingset_update_node(struct xa_node *node);
337#define mapping_set_update(xas, mapping) do { \
338 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
339 xas_set_update(xas, workingset_update_node); \
340} while (0)
a528910e 341
1da177e4 342/* linux/mm/page_alloc.c */
cb45b0e9 343extern unsigned long totalreserve_pages;
ebec3862 344extern unsigned long nr_free_buffer_pages(void);
1da177e4 345
c41f012a
MH
346/* Definition of global_zone_page_state not available yet */
347#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
96177299
CL
348
349
1da177e4 350/* linux/mm/swap.c */
96f8bf4f
JW
351extern void lru_note_cost(struct lruvec *lruvec, bool file,
352 unsigned int nr_pages);
0995d7e5 353extern void lru_note_cost_folio(struct folio *);
c53954a0 354extern void lru_cache_add(struct page *);
76580b65
MWO
355void mark_page_accessed(struct page *);
356void folio_mark_accessed(struct folio *);
d479960e
MK
357
358extern atomic_t lru_disable_count;
359
360static inline bool lru_cache_disabled(void)
361{
362 return atomic_read(&lru_disable_count);
363}
364
365static inline void lru_cache_enable(void)
366{
367 atomic_dec(&lru_disable_count);
368}
369
370extern void lru_cache_disable(void);
1da177e4 371extern void lru_add_drain(void);
f0cb3c76 372extern void lru_add_drain_cpu(int cpu);
b01b2141 373extern void lru_add_drain_cpu_zone(struct zone *zone);
5fbc4616 374extern void lru_add_drain_all(void);
cc5993bd 375extern void deactivate_file_page(struct page *page);
9c276cc6 376extern void deactivate_page(struct page *page);
f7ad2a6c 377extern void mark_page_lazyfree(struct page *page);
1da177e4
LT
378extern void swap_setup(void);
379
b518154e 380extern void lru_cache_add_inactive_or_unevictable(struct page *page,
00501b53
JW
381 struct vm_area_struct *vma);
382
1da177e4 383/* linux/mm/vmscan.c */
5a1c84b4 384extern unsigned long zone_reclaimable_pages(struct zone *zone);
dac1d27b 385extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
327c0e96 386 gfp_t gfp_mask, nodemask_t *mask);
c2135f7c 387extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
b70a2a21
JW
388extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
389 unsigned long nr_pages,
390 gfp_t gfp_mask,
391 bool may_swap);
a9dd0a83 392extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
185efc0f 393 gfp_t gfp_mask, bool noswap,
ef8f2327 394 pg_data_t *pgdat,
185efc0f 395 unsigned long *nr_scanned);
69e05944 396extern unsigned long shrink_all_memory(unsigned long nr_pages);
1da177e4 397extern int vm_swappiness;
b20a3503
CL
398extern int remove_mapping(struct address_space *mapping, struct page *page);
399
1a4e58cc 400extern unsigned long reclaim_pages(struct list_head *page_list);
9eeff239 401#ifdef CONFIG_NUMA
a5f5f91d 402extern int node_reclaim_mode;
9614634f 403extern int sysctl_min_unmapped_ratio;
0ff38490 404extern int sysctl_min_slab_ratio;
9eeff239 405#else
a5f5f91d 406#define node_reclaim_mode 0
9eeff239
CL
407#endif
408
202e35db
DH
409static inline bool node_reclaim_enabled(void)
410{
411 /* Is any node_reclaim_mode bit set? */
412 return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
413}
414
64e3d12f 415extern void check_move_unevictable_pages(struct pagevec *pvec);
af936a16 416
b87c517a 417extern void kswapd_run(int nid);
8fe23e05 418extern void kswapd_stop(int nid);
33398cf2 419
1da177e4 420#ifdef CONFIG_SWAP
be297968
CH
421
422#include <linux/blk_types.h> /* for bio_end_io_t */
423
1da177e4 424/* linux/mm/page_io.c */
23955622 425extern int swap_readpage(struct page *page, bool do_poll);
1da177e4 426extern int swap_writepage(struct page *page, struct writeback_control *wbc);
4246a0b6 427extern void end_swap_bio_write(struct bio *bio);
1eec6702 428extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
4246a0b6 429 bio_end_io_t end_write_func);
62c230bc 430extern int swap_set_page_dirty(struct page *page);
1da177e4 431
a509bc1a
MG
432int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
433 unsigned long nr_pages, sector_t start_block);
434int generic_swapfile_activate(struct swap_info_struct *, struct file *,
435 sector_t *);
436
1da177e4 437/* linux/mm/swap_state.c */
4b3ef9da
HY
438/* One swap address space for each 64M swap space */
439#define SWAP_ADDRESS_SPACE_SHIFT 14
440#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
441extern struct address_space *swapper_spaces[];
442#define swap_address_space(entry) \
443 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
444 >> SWAP_ADDRESS_SPACE_SHIFT])
b6038942
SB
445static inline unsigned long total_swapcache_pages(void)
446{
447 return global_node_page_state(NR_SWAPCACHE);
448}
449
1da177e4 450extern void show_swap_cache_info(void);
0f074658 451extern int add_to_swap(struct page *page);
aae466b0 452extern void *get_shadow_from_swap_cache(swp_entry_t entry);
3852f676
JK
453extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
454 gfp_t gfp, void **shadowp);
455extern void __delete_from_swap_cache(struct page *page,
456 swp_entry_t entry, void *shadow);
1da177e4 457extern void delete_from_swap_cache(struct page *);
3852f676
JK
458extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
459 unsigned long end);
f4c4a3f4 460extern void free_swap_cache(struct page *);
1da177e4
LT
461extern void free_page_and_swap_cache(struct page *);
462extern void free_pages_and_swap_cache(struct page **, int);
ec560175
HY
463extern struct page *lookup_swap_cache(swp_entry_t entry,
464 struct vm_area_struct *vma,
465 unsigned long addr);
61ef1865 466struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
02098fea 467extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
23955622
SL
468 struct vm_area_struct *vma, unsigned long addr,
469 bool do_poll);
5b999aad
DS
470extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
471 struct vm_area_struct *vma, unsigned long addr,
472 bool *new_page_allocated);
e9e9b7ec
MK
473extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
474 struct vm_fault *vmf);
475extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
476 struct vm_fault *vmf);
ec560175 477
1da177e4 478/* linux/mm/swapfile.c */
ec8acf20 479extern atomic_long_t nr_swap_pages;
1da177e4 480extern long total_swap_pages;
81a0298b 481extern atomic_t nr_rotate_swap;
67afa38e 482extern bool has_usable_swap(void);
ec8acf20
SL
483
484/* Swap 50% full? Release swapcache more aggressively.. */
485static inline bool vm_swap_full(void)
486{
487 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
488}
489
490static inline long get_nr_swap_pages(void)
491{
492 return atomic_long_read(&nr_swap_pages);
493}
494
1da177e4 495extern void si_swapinfo(struct sysinfo *);
38d8b4e6 496extern swp_entry_t get_swap_page(struct page *page);
75f6d6d2 497extern void put_swap_page(struct page *page, swp_entry_t entry);
910321ea 498extern swp_entry_t get_swap_page_of_type(int);
5d5e8f19 499extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
570a335b 500extern int add_swap_count_continuation(swp_entry_t, gfp_t);
aaa46865 501extern void swap_shmem_alloc(swp_entry_t);
570a335b
HD
502extern int swap_duplicate(swp_entry_t);
503extern int swapcache_prepare(swp_entry_t);
1da177e4 504extern void swap_free(swp_entry_t);
7c00bafe 505extern void swapcache_free_entries(swp_entry_t *entries, int n);
2509ef26 506extern int free_swap_and_cache(swp_entry_t);
21bd9005
CH
507int swap_type_of(dev_t device, sector_t offset);
508int find_first_swap(dev_t *device);
f577eb30 509extern unsigned int count_swap_pages(int, int);
3aef83e0 510extern sector_t swapdev_block(int, pgoff_t);
bde05d1c 511extern int page_swapcount(struct page *);
eb085574 512extern int __swap_count(swp_entry_t entry);
e8c26ab6 513extern int __swp_swapcount(swp_entry_t entry);
8334b962 514extern int swp_swapcount(swp_entry_t entry);
f981c595 515extern struct swap_info_struct *page_swap_info(struct page *);
0bcac06f 516extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
6d0a07ed 517extern bool reuse_swap_page(struct page *, int *);
a2c43eed 518extern int try_to_free_swap(struct page *);
1da177e4 519struct backing_dev_info;
4b3ef9da
HY
520extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
521extern void exit_swap_address_space(unsigned int type);
eb085574 522extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
caf6912f 523sector_t swap_page_sector(struct page *page);
eb085574
HY
524
525static inline void put_swap_device(struct swap_info_struct *si)
526{
63d8620e 527 percpu_ref_put(&si->users);
eb085574 528}
1da177e4 529
1da177e4
LT
530#else /* CONFIG_SWAP */
531
0bcac06f
MK
532static inline int swap_readpage(struct page *page, bool do_poll)
533{
534 return 0;
535}
536
537static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
538{
539 return NULL;
540}
541
2799e775
ML
542static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
543{
544 return NULL;
545}
546
547static inline void put_swap_device(struct swap_info_struct *si)
548{
549}
550
2bb6a033
MG
551static inline struct address_space *swap_address_space(swp_entry_t entry)
552{
553 return NULL;
554}
555
ec8acf20 556#define get_nr_swap_pages() 0L
b962716b 557#define total_swap_pages 0L
33806f06 558#define total_swapcache_pages() 0UL
ec8acf20 559#define vm_swap_full() 0
1da177e4
LT
560
561#define si_swapinfo(val) \
562 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
9ae5b3c7 563/* only sparc can not include linux/pagemap.h in this file
ea1754a0 564 * so leave put_page and release_pages undeclared... */
1da177e4 565#define free_page_and_swap_cache(page) \
09cbfeaf 566 put_page(page)
1da177e4 567#define free_pages_and_swap_cache(pages, nr) \
c6f92f9f 568 release_pages((pages), (nr));
1da177e4 569
f4c4a3f4
HY
570static inline void free_swap_cache(struct page *page)
571{
572}
573
bd96b9eb
CK
574static inline void show_swap_cache_info(void)
575{
576}
577
af5cdaf8
AP
578/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
579#define free_swap_and_cache(e) is_pfn_swap_entry(e)
bd96b9eb 580
570a335b 581static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
355cfa73 582{
570a335b
HD
583 return 0;
584}
585
aaa46865
HD
586static inline void swap_shmem_alloc(swp_entry_t swp)
587{
588}
589
570a335b
HD
590static inline int swap_duplicate(swp_entry_t swp)
591{
592 return 0;
355cfa73
KH
593}
594
bd96b9eb
CK
595static inline void swap_free(swp_entry_t swp)
596{
597}
598
75f6d6d2 599static inline void put_swap_page(struct page *page, swp_entry_t swp)
cb4b86ba
KH
600{
601}
602
e9e9b7ec
MK
603static inline struct page *swap_cluster_readahead(swp_entry_t entry,
604 gfp_t gfp_mask, struct vm_fault *vmf)
bd96b9eb
CK
605{
606 return NULL;
607}
608
e9e9b7ec
MK
609static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
610 struct vm_fault *vmf)
ec560175
HY
611{
612 return NULL;
613}
614
9fab5619
HD
615static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
616{
617 return 0;
618}
619
ec560175
HY
620static inline struct page *lookup_swap_cache(swp_entry_t swp,
621 struct vm_area_struct *vma,
622 unsigned long addr)
bd96b9eb
CK
623{
624 return NULL;
625}
626
61ef1865
MWO
627static inline
628struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
629{
630 return find_get_page(mapping, index);
631}
632
0f074658 633static inline int add_to_swap(struct page *page)
60371d97
HD
634{
635 return 0;
636}
637
aae466b0
JK
638static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
639{
640 return NULL;
641}
642
73b1262f 643static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
3852f676 644 gfp_t gfp_mask, void **shadowp)
bd96b9eb 645{
73b1262f 646 return -1;
bd96b9eb
CK
647}
648
4e17ec25 649static inline void __delete_from_swap_cache(struct page *page,
3852f676 650 swp_entry_t entry, void *shadow)
bd96b9eb
CK
651{
652}
653
654static inline void delete_from_swap_cache(struct page *page)
655{
656}
657
3852f676
JK
658static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
659 unsigned long end)
660{
661}
662
bde05d1c
HD
663static inline int page_swapcount(struct page *page)
664{
665 return 0;
666}
667
eb085574 668static inline int __swap_count(swp_entry_t entry)
aa8d22a1
MK
669{
670 return 0;
671}
672
e8c26ab6
TC
673static inline int __swp_swapcount(swp_entry_t entry)
674{
675 return 0;
676}
677
8334b962
MK
678static inline int swp_swapcount(swp_entry_t entry)
679{
680 return 0;
681}
682
ba3c4ce6
HY
683#define reuse_swap_page(page, total_map_swapcount) \
684 (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
1da177e4 685
a2c43eed 686static inline int try_to_free_swap(struct page *page)
68a22394
RR
687{
688 return 0;
689}
690
38d8b4e6 691static inline swp_entry_t get_swap_page(struct page *page)
1da177e4
LT
692{
693 swp_entry_t entry;
694 entry.val = 0;
695 return entry;
696}
697
1da177e4 698#endif /* CONFIG_SWAP */
6f2cb2f1 699
59807685
HY
700#ifdef CONFIG_THP_SWAP
701extern int split_swap_cluster(swp_entry_t entry);
702#else
703static inline int split_swap_cluster(swp_entry_t entry)
704{
705 return 0;
706}
707#endif
708
6f2cb2f1
VD
709#ifdef CONFIG_MEMCG
710static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
711{
4550c4e1
JW
712 /* Cgroup2 doesn't have per-cgroup swappiness */
713 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
714 return vm_swappiness;
715
6f2cb2f1 716 /* root ? */
59118c42 717 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
6f2cb2f1
VD
718 return vm_swappiness;
719
720 return memcg->swappiness;
721}
6f2cb2f1
VD
722#else
723static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
724{
725 return vm_swappiness;
726}
727#endif
728
2cf85583 729#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
01c4b28c
SB
730extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
731static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
732{
733 if (mem_cgroup_disabled())
734 return;
735 __cgroup_throttle_swaprate(page, gfp_mask);
736}
2cf85583 737#else
6caa6a07 738static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
2cf85583
TH
739{
740}
741#endif
742
6f2cb2f1
VD
743#ifdef CONFIG_MEMCG_SWAP
744extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
01c4b28c
SB
745extern int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
746static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
747{
748 if (mem_cgroup_disabled())
749 return 0;
750 return __mem_cgroup_try_charge_swap(page, entry);
751}
752
753extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
754static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
755{
756 if (mem_cgroup_disabled())
757 return;
758 __mem_cgroup_uncharge_swap(entry, nr_pages);
759}
760
d8b38438 761extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
5ccc5aba 762extern bool mem_cgroup_swap_full(struct page *page);
6f2cb2f1
VD
763#else
764static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
765{
766}
767
768static inline int mem_cgroup_try_charge_swap(struct page *page,
769 swp_entry_t entry)
770{
771 return 0;
772}
773
38d8b4e6
HY
774static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
775 unsigned int nr_pages)
6f2cb2f1
VD
776{
777}
d8b38438
VD
778
779static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
780{
781 return get_nr_swap_pages();
782}
5ccc5aba
VD
783
784static inline bool mem_cgroup_swap_full(struct page *page)
785{
786 return vm_swap_full();
787}
6f2cb2f1
VD
788#endif
789
1da177e4
LT
790#endif /* __KERNEL__*/
791#endif /* _LINUX_SWAP_H */