Merge tag 'sched_ext-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[linux-2.6-block.git] / include / linux / swap.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_SWAP_H
3#define _LINUX_SWAP_H
4
1da177e4
LT
5#include <linux/spinlock.h>
6#include <linux/linkage.h>
7#include <linux/mmzone.h>
8#include <linux/list.h>
66e1707b 9#include <linux/memcontrol.h>
1da177e4 10#include <linux/sched.h>
af936a16 11#include <linux/node.h>
33806f06 12#include <linux/fs.h>
4ee60ec1 13#include <linux/pagemap.h>
60063497 14#include <linux/atomic.h>
c53954a0 15#include <linux/page-flags.h>
202e35db 16#include <uapi/linux/mempolicy.h>
1da177e4
LT
17#include <asm/page.h>
18
8bc719d3
MS
19struct notifier_block;
20
ab954160
AM
21struct bio;
22
64e3d12f
KHY
23struct pagevec;
24
1da177e4
LT
25#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
26#define SWAP_FLAG_PRIO_MASK 0x7fff
27#define SWAP_FLAG_PRIO_SHIFT 0
dcf6b7dd
RA
28#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
29#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
30#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
1da177e4 31
d15cab97 32#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
dcf6b7dd
RA
33 SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
34 SWAP_FLAG_DISCARD_PAGES)
36005bae 35#define SWAP_BATCH 64
d15cab97 36
1da177e4
LT
37static inline int current_is_kswapd(void)
38{
39 return current->flags & PF_KSWAPD;
40}
41
42/*
43 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
44 * be swapped to. The swap type and the offset into that swap type are
45 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
46 * for the type means that the maximum number of swapcache pages is 27 bits
47 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
48 * the type/offset into the pte as 5/27 as well.
49 */
50#define MAX_SWAPFILES_SHIFT 5
a7420aa5
AK
51
52/*
53 * Use some of the swap files numbers for other purposes. This
54 * is a convenient way to hook into the VM to trigger special
55 * actions on faults.
56 */
57
679d1033 58/*
ca92ea3d
PX
59 * PTE markers are used to persist information onto PTEs that otherwise
60 * should be a none pte. As its name "PTE" hints, it should only be
61 * applied to the leaves of pgtables.
679d1033 62 */
679d1033
PX
63#define SWP_PTE_MARKER_NUM 1
64#define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
65 SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
679d1033 66
5042db43
JG
67/*
68 * Unaddressable device memory support. See include/linux/hmm.h and
ee65728e 69 * Documentation/mm/hmm.rst. Short description is we need struct pages for
5042db43
JG
70 * device memory that is unaddressable (inaccessible) by CPU, so that we can
71 * migrate part of a process memory to device memory.
72 *
73 * When a page is migrated from CPU to device, we set the CPU page table entry
b756a3b5
AP
74 * to a special SWP_DEVICE_{READ|WRITE} entry.
75 *
76 * When a page is mapped by the device for exclusive access we set the CPU page
77 * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
5042db43
JG
78 */
79#ifdef CONFIG_DEVICE_PRIVATE
b756a3b5 80#define SWP_DEVICE_NUM 4
5042db43
JG
81#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
82#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
b756a3b5
AP
83#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
84#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
5042db43
JG
85#else
86#define SWP_DEVICE_NUM 0
87#endif
88
a7420aa5 89/*
6c287605
DH
90 * Page migration support.
91 *
92 * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
93 * indicates that the referenced (part of) an anonymous page is exclusive to
94 * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
95 * (part of) an anonymous page that are mapped writable are exclusive to a
96 * single process.
a7420aa5
AK
97 */
98#ifdef CONFIG_MIGRATION
6c287605
DH
99#define SWP_MIGRATION_NUM 3
100#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
101#define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
102#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
0697212a 103#else
a7420aa5 104#define SWP_MIGRATION_NUM 0
0697212a 105#endif
1da177e4 106
a7420aa5
AK
107/*
108 * Handling of hardware poisoned pages with memory corruption.
109 */
110#ifdef CONFIG_MEMORY_FAILURE
111#define SWP_HWPOISON_NUM 1
112#define SWP_HWPOISON MAX_SWAPFILES
113#else
114#define SWP_HWPOISON_NUM 0
115#endif
116
117#define MAX_SWAPFILES \
5042db43 118 ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
9f186f9e 119 SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
15520a3f 120 SWP_PTE_MARKER_NUM)
a7420aa5 121
1da177e4
LT
122/*
123 * Magic header for a swap area. The first part of the union is
124 * what the swap magic looks like for the old (limited to 128MB)
125 * swap area format, the second part of the union adds - in the
126 * old reserved area - some extra information. Note that the first
127 * kilobyte is reserved for boot loader or disk label stuff...
128 *
129 * Having the magic at the end of the PAGE_SIZE makes detecting swap
130 * areas somewhat tricky on machines that support multiple page sizes.
131 * For 2.5 we'll probably want to move the magic to just beyond the
132 * bootbits...
133 */
134union swap_header {
135 struct {
136 char reserved[PAGE_SIZE - 10];
137 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
138 } magic;
139 struct {
e8f03d02
AD
140 char bootbits[1024]; /* Space for disklabel etc. */
141 __u32 version;
142 __u32 last_page;
143 __u32 nr_badpages;
144 unsigned char sws_uuid[16];
145 unsigned char sws_volume[16];
146 __u32 padding[117];
147 __u32 badpages[1];
1da177e4
LT
148 } info;
149};
150
1da177e4
LT
151/*
152 * current->reclaim_state points to one of these when a task is running
153 * memory reclaim
154 */
155struct reclaim_state {
c7b23b68
YA
156 /* pages reclaimed outside of LRU-based reclaim */
157 unsigned long reclaimed;
bd74fdae
YZ
158#ifdef CONFIG_LRU_GEN
159 /* per-thread mm walk data */
160 struct lru_gen_mm_walk *mm_walk;
161#endif
1da177e4
LT
162};
163
c7b23b68
YA
164/*
165 * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based
166 * reclaim
167 * @pages: number of pages reclaimed
168 *
169 * If the current process is undergoing a reclaim operation, increment the
170 * number of reclaimed pages by @pages.
171 */
172static inline void mm_account_reclaimed_pages(unsigned long pages)
173{
174 if (current->reclaim_state)
175 current->reclaim_state->reclaimed += pages;
176}
177
1da177e4
LT
178#ifdef __KERNEL__
179
180struct address_space;
181struct sysinfo;
182struct writeback_control;
183struct zone;
184
185/*
186 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
ff351f4b
ML
187 * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
188 * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
1da177e4
LT
189 * from setup, they're handled identically.
190 *
191 * We always assume that blocks are of size PAGE_SIZE.
192 */
193struct swap_extent {
4efaceb1 194 struct rb_node rb_node;
1da177e4
LT
195 pgoff_t start_page;
196 pgoff_t nr_pages;
197 sector_t start_block;
198};
199
200/*
201 * Max bad pages in the new format..
202 */
1da177e4 203#define MAX_SWAP_BADPAGES \
a4046c06
PHS
204 ((offsetof(union swap_header, magic.magic) - \
205 offsetof(union swap_header, info.badpages)) / sizeof(int))
1da177e4
LT
206
207enum {
208 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
209 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
dcf6b7dd 210 SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
7992fde7 211 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
20137a49 212 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
570a335b 213 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
b2725643 214 SWP_BLKDEV = (1 << 6), /* its a block device */
bc4ae27d 215 SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
32646315 216 SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
bc4ae27d
OS
217 SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
218 SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
219 SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
220 SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
52b7efdb 221 /* add others here before... */
eb085574 222 SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
1da177e4
LT
223};
224
d778df51 225#define SWAP_CLUSTER_MAX 32UL
748446bb 226#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
1da177e4 227
4b4bb6bb 228/* Bit flag in swap_map */
570a335b 229#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
4b4bb6bb
WY
230#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
231
232/* Special value in first swap_map */
233#define SWAP_MAP_MAX 0x3e /* Max count */
234#define SWAP_MAP_BAD 0x3f /* Note page is bad */
235#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
236
237/* Special value in each swap_map continuation */
238#define SWAP_CONT_MAX 0x7f /* Max count */
253d553b 239
2a8f9449
SL
240/*
241 * We use this to track usage of a cluster. A cluster is a block of swap disk
242 * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
243 * free clusters are organized into a list. We fetch an entry from the list to
244 * get a free cluster.
245 *
73ed0baa
CL
246 * The flags field determines if a cluster is free. This is
247 * protected by cluster lock.
2a8f9449
SL
248 */
249struct swap_cluster_info {
235b6217
HY
250 spinlock_t lock; /*
251 * Protect swap_cluster_info fields
73ed0baa
CL
252 * other than list, and swap_info_struct->swap_map
253 * elements corresponding to the swap cluster.
235b6217 254 */
73ed0baa
CL
255 u16 count;
256 u8 flags;
d07a46a4 257 u8 order;
73ed0baa 258 struct list_head list;
2a8f9449
SL
259};
260#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
d07a46a4 261#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */
477cb7ba 262#define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */
2cacbdfd 263#define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */
2a8f9449 264
14c62da2
RR
265/*
266 * The first page in the swap file is the swap header, which is always marked
267 * bad to prevent it from being allocated as an entry. This also prevents the
268 * cluster to which it belongs being marked free. Therefore 0 is safe to use as
269 * a sentinel to indicate next is not valid in percpu_cluster.
270 */
271#define SWAP_NEXT_INVALID 0
272
845982eb
RR
273#ifdef CONFIG_THP_SWAP
274#define SWAP_NR_ORDERS (PMD_ORDER + 1)
275#else
276#define SWAP_NR_ORDERS 1
277#endif
278
ebc2a1a6
SL
279/*
280 * We assign a cluster to each CPU, so each CPU can allocate swap entry from
281 * its own cluster and swapout sequentially. The purpose is to optimize swapout
282 * throughput.
283 */
284struct percpu_cluster {
845982eb 285 unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */
ebc2a1a6
SL
286};
287
1da177e4
LT
288/*
289 * The in-memory structure used to track swap areas.
1da177e4
LT
290 */
291struct swap_info_struct {
63d8620e 292 struct percpu_ref users; /* indicate and keep swap device valid. */
efa90a98
HD
293 unsigned long flags; /* SWP_USED etc: see above */
294 signed short prio; /* swap priority of this type */
18ab4d4c 295 struct plist_node list; /* entry in swap_active_head */
efa90a98 296 signed char type; /* strange name for an index */
7509765a
HD
297 unsigned int max; /* extent of the swap_map */
298 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
0ca0c24e 299 unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */
2a8f9449 300 struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
73ed0baa 301 struct list_head free_clusters; /* free clusters list */
2cacbdfd 302 struct list_head full_clusters; /* full clusters list */
d07a46a4
CL
303 struct list_head nonfull_clusters[SWAP_NR_ORDERS];
304 /* list of cluster that contains at least one free slot */
477cb7ba
KS
305 struct list_head frag_clusters[SWAP_NR_ORDERS];
306 /* list of cluster that are fragmented or contented */
661383c6 307 unsigned int frag_cluster_nr[SWAP_NR_ORDERS];
7509765a
HD
308 unsigned int lowest_bit; /* index of first free in swap_map */
309 unsigned int highest_bit; /* index of last free in swap_map */
310 unsigned int pages; /* total of usable pages of swap */
311 unsigned int inuse_pages; /* number of those currently in use */
312 unsigned int cluster_next; /* likely index for next allocation */
313 unsigned int cluster_nr; /* countdown to next cluster search */
49070588 314 unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
ebc2a1a6 315 struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
4efaceb1 316 struct rb_root swap_extent_root;/* root of the swap extent rbtree */
7509765a
HD
317 struct block_device *bdev; /* swap device or bdev of swap file */
318 struct file *swap_file; /* seldom referenced */
63d8620e 319 struct completion comp; /* seldom referenced */
ec8acf20
SL
320 spinlock_t lock; /*
321 * protect map scan related fields like
322 * swap_map, lowest_bit, highest_bit,
323 * inuse_pages, cluster_next,
815c2c54
SL
324 * cluster_nr, lowest_alloc,
325 * highest_alloc, free/discard cluster
326 * list. other fields are only changed
327 * at swapon/swapoff, so are protected
328 * by swap_lock. changing flags need
329 * hold this lock and swap_lock. If
330 * both locks need hold, hold swap_lock
331 * first.
ec8acf20 332 */
2628bd6f
HY
333 spinlock_t cont_lock; /*
334 * protect swap count continuation page
335 * list.
336 */
815c2c54 337 struct work_struct discard_work; /* discard worker */
73ed0baa 338 struct list_head discard_clusters; /* discard clusters list */
16c3380f 339 struct plist_node avail_lists[]; /*
66f71da9
AL
340 * entries in swap_avail_heads, one
341 * entry per node.
342 * Must be last as the number of the
343 * array is nr_node_ids, which is not
344 * a fixed value so have to allocate
345 * dynamically.
346 * And it has to be an array so that
347 * plist_for_each_* can work.
348 */
1da177e4
LT
349};
350
cfeed8ff
DH
351static inline swp_entry_t page_swap_entry(struct page *page)
352{
353 struct folio *folio = page_folio(page);
3d2c9087 354 swp_entry_t entry = folio->swap;
cfeed8ff
DH
355
356 entry.val += folio_page_idx(folio, page);
357 return entry;
358}
359
a528910e 360/* linux/mm/workingset.c */
5a4d8944
NP
361bool workingset_test_recent(void *shadow, bool file, bool *workingset,
362 bool flush);
31d8fcac 363void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
8927f647 364void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
0995d7e5 365void workingset_refault(struct folio *folio, void *shadow);
c5ce619a 366void workingset_activation(struct folio *folio);
c7df8ad2 367
1da177e4 368/* linux/mm/page_alloc.c */
cb45b0e9 369extern unsigned long totalreserve_pages;
1da177e4 370
c41f012a
MH
371/* Definition of global_zone_page_state not available yet */
372#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
96177299
CL
373
374
1da177e4 375/* linux/mm/swap.c */
0538a82c
JW
376void lru_note_cost(struct lruvec *lruvec, bool file,
377 unsigned int nr_io, unsigned int nr_rotated);
378void lru_note_cost_refault(struct folio *);
681ecf63
MWO
379void folio_add_lru(struct folio *);
380void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
76580b65
MWO
381void mark_page_accessed(struct page *);
382void folio_mark_accessed(struct folio *);
d479960e
MK
383
384extern atomic_t lru_disable_count;
385
386static inline bool lru_cache_disabled(void)
387{
388 return atomic_read(&lru_disable_count);
389}
390
391static inline void lru_cache_enable(void)
392{
393 atomic_dec(&lru_disable_count);
394}
395
396extern void lru_cache_disable(void);
1da177e4 397extern void lru_add_drain(void);
f0cb3c76 398extern void lru_add_drain_cpu(int cpu);
b01b2141 399extern void lru_add_drain_cpu_zone(struct zone *zone);
5fbc4616 400extern void lru_add_drain_all(void);
5a9e3474 401void folio_deactivate(struct folio *folio);
6a6fe9eb 402void folio_mark_lazyfree(struct folio *folio);
1da177e4
LT
403extern void swap_setup(void);
404
405/* linux/mm/vmscan.c */
5a1c84b4 406extern unsigned long zone_reclaimable_pages(struct zone *zone);
dac1d27b 407extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
327c0e96 408 gfp_t gfp_mask, nodemask_t *mask);
73b73bac
YA
409
410#define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
411#define MEMCG_RECLAIM_PROACTIVE (1 << 2)
410abb20
DS
412#define MIN_SWAPPINESS 0
413#define MAX_SWAPPINESS 200
b70a2a21
JW
414extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
415 unsigned long nr_pages,
416 gfp_t gfp_mask,
68cd9050
DS
417 unsigned int reclaim_options,
418 int *swappiness);
a9dd0a83 419extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
185efc0f 420 gfp_t gfp_mask, bool noswap,
ef8f2327 421 pg_data_t *pgdat,
185efc0f 422 unsigned long *nr_scanned);
69e05944 423extern unsigned long shrink_all_memory(unsigned long nr_pages);
1da177e4 424extern int vm_swappiness;
5100da38 425long remove_mapping(struct address_space *mapping, struct folio *folio);
b20a3503 426
9eeff239 427#ifdef CONFIG_NUMA
a5f5f91d 428extern int node_reclaim_mode;
9614634f 429extern int sysctl_min_unmapped_ratio;
0ff38490 430extern int sysctl_min_slab_ratio;
9eeff239 431#else
a5f5f91d 432#define node_reclaim_mode 0
9eeff239
CL
433#endif
434
202e35db
DH
435static inline bool node_reclaim_enabled(void)
436{
437 /* Is any node_reclaim_mode bit set? */
438 return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
439}
440
77414d19 441void check_move_unevictable_folios(struct folio_batch *fbatch);
af936a16 442
e5797dc0
ML
443extern void __meminit kswapd_run(int nid);
444extern void __meminit kswapd_stop(int nid);
33398cf2 445
1da177e4 446#ifdef CONFIG_SWAP
be297968 447
a509bc1a
MG
448int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
449 unsigned long nr_pages, sector_t start_block);
450int generic_swapfile_activate(struct swap_info_struct *, struct file *,
451 sector_t *);
452
b6038942
SB
453static inline unsigned long total_swapcache_pages(void)
454{
455 return global_node_page_state(NR_SWAPCACHE);
456}
457
63b77499
MWO
458void free_swap_cache(struct folio *folio);
459void free_page_and_swap_cache(struct page *);
460void free_pages_and_swap_cache(struct encoded_page **, int);
1da177e4 461/* linux/mm/swapfile.c */
ec8acf20 462extern atomic_long_t nr_swap_pages;
1da177e4 463extern long total_swap_pages;
81a0298b 464extern atomic_t nr_rotate_swap;
67afa38e 465extern bool has_usable_swap(void);
ec8acf20
SL
466
467/* Swap 50% full? Release swapcache more aggressively.. */
468static inline bool vm_swap_full(void)
469{
470 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
471}
472
473static inline long get_nr_swap_pages(void)
474{
475 return atomic_long_read(&nr_swap_pages);
476}
477
1da177e4 478extern void si_swapinfo(struct sysinfo *);
e2e3fdc7 479swp_entry_t folio_alloc_swap(struct folio *folio);
bdb0ed54 480bool folio_free_swap(struct folio *folio);
4081f744 481void put_swap_folio(struct folio *folio, swp_entry_t entry);
910321ea 482extern swp_entry_t get_swap_page_of_type(int);
9faaa0f8 483extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order);
570a335b 484extern int add_swap_count_continuation(swp_entry_t, gfp_t);
65018076 485extern void swap_shmem_alloc(swp_entry_t, int);
570a335b 486extern int swap_duplicate(swp_entry_t);
9f101bef 487extern int swapcache_prepare(swp_entry_t entry, int nr);
ebfba004 488extern void swap_free_nr(swp_entry_t entry, int nr_pages);
7c00bafe 489extern void swapcache_free_entries(swp_entry_t *entries, int n);
a62fb92a 490extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
21bd9005
CH
491int swap_type_of(dev_t device, sector_t offset);
492int find_first_swap(dev_t *device);
f577eb30 493extern unsigned int count_swap_pages(int, int);
3aef83e0 494extern sector_t swapdev_block(int, pgoff_t);
eb085574 495extern int __swap_count(swp_entry_t entry);
3ecdeb0f 496extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
8334b962 497extern int swp_swapcount(swp_entry_t entry);
69fe7d67 498struct swap_info_struct *swp_swap_info(swp_entry_t entry);
1da177e4 499struct backing_dev_info;
4b3ef9da
HY
500extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
501extern void exit_swap_address_space(unsigned int type);
eb085574 502extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
3a61e6f6 503sector_t swap_folio_sector(struct folio *folio);
eb085574
HY
504
505static inline void put_swap_device(struct swap_info_struct *si)
506{
63d8620e 507 percpu_ref_put(&si->users);
eb085574 508}
1da177e4 509
1da177e4 510#else /* CONFIG_SWAP */
0bcac06f
MK
511static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
512{
513 return NULL;
514}
515
2799e775
ML
516static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
517{
518 return NULL;
519}
520
521static inline void put_swap_device(struct swap_info_struct *si)
522{
523}
524
ec8acf20 525#define get_nr_swap_pages() 0L
b962716b 526#define total_swap_pages 0L
33806f06 527#define total_swapcache_pages() 0UL
ec8acf20 528#define vm_swap_full() 0
1da177e4
LT
529
530#define si_swapinfo(val) \
531 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
9ae5b3c7 532/* only sparc can not include linux/pagemap.h in this file
ea1754a0 533 * so leave put_page and release_pages undeclared... */
1da177e4 534#define free_page_and_swap_cache(page) \
09cbfeaf 535 put_page(page)
1da177e4 536#define free_pages_and_swap_cache(pages, nr) \
c6f92f9f 537 release_pages((pages), (nr));
1da177e4 538
a62fb92a
RR
539static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr)
540{
541}
bd96b9eb 542
63b77499 543static inline void free_swap_cache(struct folio *folio)
1baec203
ML
544{
545}
546
570a335b 547static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
355cfa73 548{
570a335b
HD
549 return 0;
550}
551
65018076 552static inline void swap_shmem_alloc(swp_entry_t swp, int nr)
aaa46865
HD
553{
554}
555
570a335b
HD
556static inline int swap_duplicate(swp_entry_t swp)
557{
558 return 0;
355cfa73
KH
559}
560
9f101bef 561static inline int swapcache_prepare(swp_entry_t swp, int nr)
13ddaf26
KS
562{
563 return 0;
564}
565
ebfba004
CH
566static inline void swap_free_nr(swp_entry_t entry, int nr_pages)
567{
568}
569
4081f744 570static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
cb4b86ba
KH
571{
572}
573
eb085574 574static inline int __swap_count(swp_entry_t entry)
aa8d22a1
MK
575{
576 return 0;
577}
578
3ecdeb0f 579static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
e8c26ab6
TC
580{
581 return 0;
582}
583
8334b962
MK
584static inline int swp_swapcount(swp_entry_t entry)
585{
586 return 0;
587}
588
e2e3fdc7 589static inline swp_entry_t folio_alloc_swap(struct folio *folio)
1da177e4
LT
590{
591 swp_entry_t entry;
592 entry.val = 0;
593 return entry;
594}
595
bdb0ed54
MWO
596static inline bool folio_free_swap(struct folio *folio)
597{
598 return false;
599}
600
4b60c0ff
N
601static inline int add_swap_extent(struct swap_info_struct *sis,
602 unsigned long start_page,
603 unsigned long nr_pages, sector_t start_block)
604{
605 return -EINVAL;
606}
1da177e4 607#endif /* CONFIG_SWAP */
6f2cb2f1 608
a62fb92a
RR
609static inline void free_swap_and_cache(swp_entry_t entry)
610{
611 free_swap_and_cache_nr(entry, 1);
612}
613
54f7a49c
BS
614static inline void swap_free(swp_entry_t entry)
615{
616 swap_free_nr(entry, 1);
617}
618
6f2cb2f1
VD
619#ifdef CONFIG_MEMCG
620static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
621{
4550c4e1
JW
622 /* Cgroup2 doesn't have per-cgroup swappiness */
623 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
82b3aa26 624 return READ_ONCE(vm_swappiness);
4550c4e1 625
6f2cb2f1 626 /* root ? */
59118c42 627 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
82b3aa26 628 return READ_ONCE(vm_swappiness);
6f2cb2f1 629
82b3aa26 630 return READ_ONCE(memcg->swappiness);
6f2cb2f1 631}
6f2cb2f1
VD
632#else
633static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
634{
82b3aa26 635 return READ_ONCE(vm_swappiness);
6f2cb2f1
VD
636}
637#endif
638
2cf85583 639#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3e4fb13a
KW
640void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp);
641static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
01c4b28c
SB
642{
643 if (mem_cgroup_disabled())
644 return;
3e4fb13a 645 __folio_throttle_swaprate(folio, gfp);
01c4b28c 646}
2cf85583 647#else
039bc124
MWO
648static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
649{
039bc124 650}
3e4fb13a 651#endif
2cf85583 652
e55b9f96 653#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
3ecb0087 654void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
e2e3fdc7
MWO
655int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
656static inline int mem_cgroup_try_charge_swap(struct folio *folio,
657 swp_entry_t entry)
01c4b28c
SB
658{
659 if (mem_cgroup_disabled())
660 return 0;
e2e3fdc7 661 return __mem_cgroup_try_charge_swap(folio, entry);
01c4b28c
SB
662}
663
664extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
665static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
666{
667 if (mem_cgroup_disabled())
668 return;
669 __mem_cgroup_uncharge_swap(entry, nr_pages);
670}
671
d8b38438 672extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
9202d527 673extern bool mem_cgroup_swap_full(struct folio *folio);
6f2cb2f1 674#else
3ecb0087 675static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
6f2cb2f1
VD
676{
677}
678
e2e3fdc7 679static inline int mem_cgroup_try_charge_swap(struct folio *folio,
6f2cb2f1
VD
680 swp_entry_t entry)
681{
682 return 0;
683}
684
38d8b4e6
HY
685static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
686 unsigned int nr_pages)
6f2cb2f1
VD
687{
688}
d8b38438
VD
689
690static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
691{
692 return get_nr_swap_pages();
693}
5ccc5aba 694
9202d527 695static inline bool mem_cgroup_swap_full(struct folio *folio)
5ccc5aba
VD
696{
697 return vm_swap_full();
698}
6f2cb2f1
VD
699#endif
700
1da177e4
LT
701#endif /* __KERNEL__*/
702#endif /* _LINUX_SWAP_H */