Merge tag 'sched_ext-for-6.12-rc1-fixes' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / mm / swapfile.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/swapfile.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 */
8
c97ab271 9#include <linux/blkdev.h>
1da177e4 10#include <linux/mm.h>
6e84f315 11#include <linux/sched/mm.h>
29930025 12#include <linux/sched/task.h>
1da177e4
LT
13#include <linux/hugetlb.h>
14#include <linux/mman.h>
15#include <linux/slab.h>
16#include <linux/kernel_stat.h>
17#include <linux/swap.h>
18#include <linux/vmalloc.h>
19#include <linux/pagemap.h>
20#include <linux/namei.h>
072441e2 21#include <linux/shmem_fs.h>
e41d12f5 22#include <linux/blk-cgroup.h>
20137a49 23#include <linux/random.h>
1da177e4
LT
24#include <linux/writeback.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/init.h>
5ad64688 28#include <linux/ksm.h>
1da177e4
LT
29#include <linux/rmap.h>
30#include <linux/security.h>
31#include <linux/backing-dev.h>
fc0abb14 32#include <linux/mutex.h>
c59ede7b 33#include <linux/capability.h>
1da177e4 34#include <linux/syscalls.h>
8a9f3ccd 35#include <linux/memcontrol.h>
66d7dd51 36#include <linux/poll.h>
72788c38 37#include <linux/oom.h>
38b5faf4 38#include <linux/swapfile.h>
f981c595 39#include <linux/export.h>
67afa38e 40#include <linux/swap_slots.h>
155b5f88 41#include <linux/sort.h>
63d8620e 42#include <linux/completion.h>
07f44ac3 43#include <linux/suspend.h>
42c06a0e 44#include <linux/zswap.h>
8b7787a5 45#include <linux/plist.h>
1da177e4 46
1da177e4
LT
47#include <asm/tlbflush.h>
48#include <linux/swapops.h>
5d1ea48b 49#include <linux/swap_cgroup.h>
00cde042 50#include "internal.h"
014bb1de 51#include "swap.h"
1da177e4 52
570a335b
HD
53static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
54 unsigned char);
55static void free_swap_count_continuations(struct swap_info_struct *);
862590ac
KS
56static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry,
57 unsigned int nr_pages);
5f843a9a
CL
58static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
59 unsigned int nr_entries);
862590ac
KS
60static bool folio_swapcache_freeable(struct folio *folio);
61static struct swap_cluster_info *lock_cluster_or_swap_info(
62 struct swap_info_struct *si, unsigned long offset);
63static void unlock_cluster_or_swap_info(struct swap_info_struct *si,
64 struct swap_cluster_info *ci);
570a335b 65
633423a0 66static DEFINE_SPINLOCK(swap_lock);
7c363b8c 67static unsigned int nr_swapfiles;
ec8acf20 68atomic_long_t nr_swap_pages;
fb0fec50
CW
69/*
70 * Some modules use swappable objects and may try to swap them out under
71 * memory pressure (via the shrinker). Before doing so, they may wish to
72 * check to see if any swap space is available.
73 */
74EXPORT_SYMBOL_GPL(nr_swap_pages);
ec8acf20 75/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
1da177e4 76long total_swap_pages;
a2468cc9 77static int least_priority = -1;
be45a490 78unsigned long swapfile_maximum_size;
5154e607
PX
79#ifdef CONFIG_MIGRATION
80bool swap_migration_ad_supported;
81#endif /* CONFIG_MIGRATION */
1da177e4 82
1da177e4
LT
83static const char Bad_file[] = "Bad swap file entry ";
84static const char Unused_file[] = "Unused swap file entry ";
85static const char Bad_offset[] = "Bad swap offset entry ";
86static const char Unused_offset[] = "Unused swap offset entry ";
87
adfab836
DS
88/*
89 * all active swap_info_structs
90 * protected with swap_lock, and ordered by priority.
91 */
633423a0 92static PLIST_HEAD(swap_active_head);
18ab4d4c
DS
93
94/*
95 * all available (active, not full) swap_info_structs
96 * protected with swap_avail_lock, ordered by priority.
e2e3fdc7 97 * This is used by folio_alloc_swap() instead of swap_active_head
18ab4d4c 98 * because swap_active_head includes all swap_info_structs,
e2e3fdc7 99 * but folio_alloc_swap() doesn't need to look at full ones.
18ab4d4c
DS
100 * This uses its own lock instead of swap_lock because when a
101 * swap_info_struct changes between not-full/full, it needs to
102 * add/remove itself to/from this list, but the swap_info_struct->lock
103 * is held and the locking order requires swap_lock to be taken
104 * before any swap_info_struct->lock.
105 */
bfc6b1ca 106static struct plist_head *swap_avail_heads;
18ab4d4c 107static DEFINE_SPINLOCK(swap_avail_lock);
1da177e4 108
42c06a0e 109static struct swap_info_struct *swap_info[MAX_SWAPFILES];
1da177e4 110
fc0abb14 111static DEFINE_MUTEX(swapon_mutex);
1da177e4 112
66d7dd51
KS
113static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
114/* Activity counter to indicate that a swapon or swapoff has occurred */
115static atomic_t proc_poll_event = ATOMIC_INIT(0);
116
81a0298b
HY
117atomic_t nr_rotate_swap = ATOMIC_INIT(0);
118
c10d38cc
DJ
119static struct swap_info_struct *swap_type_to_swap_info(int type)
120{
a4b45114 121 if (type >= MAX_SWAPFILES)
c10d38cc
DJ
122 return NULL;
123
a4b45114 124 return READ_ONCE(swap_info[type]); /* rcu_dereference() */
c10d38cc
DJ
125}
126
8d69aaee 127static inline unsigned char swap_count(unsigned char ent)
355cfa73 128{
955c97f0 129 return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
355cfa73
KH
130}
131
bcd49e86
HY
132/* Reclaim the swap entry anyway if possible */
133#define TTRS_ANYWAY 0x1
134/*
135 * Reclaim the swap entry if there are no more mappings of the
136 * corresponding page
137 */
138#define TTRS_UNMAPPED 0x2
862590ac 139/* Reclaim the swap entry if swap is getting full */
bcd49e86 140#define TTRS_FULL 0x4
862590ac
KS
141/* Reclaim directly, bypass the slot cache and don't touch device lock */
142#define TTRS_DIRECT 0x8
143
144static bool swap_is_has_cache(struct swap_info_struct *si,
145 unsigned long offset, int nr_pages)
146{
147 unsigned char *map = si->swap_map + offset;
148 unsigned char *map_end = map + nr_pages;
149
150 do {
151 VM_BUG_ON(!(*map & SWAP_HAS_CACHE));
152 if (*map != SWAP_HAS_CACHE)
153 return false;
154 } while (++map < map_end);
155
156 return true;
157}
bcd49e86 158
bea67dcc
BS
159static bool swap_is_last_map(struct swap_info_struct *si,
160 unsigned long offset, int nr_pages, bool *has_cache)
161{
162 unsigned char *map = si->swap_map + offset;
163 unsigned char *map_end = map + nr_pages;
164 unsigned char count = *map;
165
166 if (swap_count(count) != 1)
167 return false;
168
169 while (++map < map_end) {
170 if (*map != count)
171 return false;
172 }
173
174 *has_cache = !!(count & SWAP_HAS_CACHE);
175 return true;
176}
177
a62fb92a
RR
178/*
179 * returns number of pages in the folio that backs the swap entry. If positive,
180 * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no
181 * folio was associated with the swap entry.
182 */
bcd49e86
HY
183static int __try_to_reclaim_swap(struct swap_info_struct *si,
184 unsigned long offset, unsigned long flags)
c9e44410 185{
efa90a98 186 swp_entry_t entry = swp_entry(si->type, offset);
862590ac
KS
187 struct address_space *address_space = swap_address_space(entry);
188 struct swap_cluster_info *ci;
2c3f6194 189 struct folio *folio;
862590ac
KS
190 int ret, nr_pages;
191 bool need_reclaim;
c9e44410 192
862590ac 193 folio = filemap_get_folio(address_space, swap_cache_index(entry));
66dabbb6 194 if (IS_ERR(folio))
c9e44410 195 return 0;
862590ac
KS
196
197 /* offset could point to the middle of a large folio */
198 entry = folio->swap;
199 offset = swp_offset(entry);
200 nr_pages = folio_nr_pages(folio);
201 ret = -nr_pages;
202
c9e44410 203 /*
bcd49e86 204 * When this function is called from scan_swap_map_slots() and it's
2c3f6194 205 * called by vmscan.c at reclaiming folios. So we hold a folio lock
bcd49e86 206 * here. We have to use trylock for avoiding deadlock. This is a special
2c3f6194 207 * case and you should use folio_free_swap() with explicit folio_lock()
c9e44410
KH
208 * in usual operations.
209 */
862590ac
KS
210 if (!folio_trylock(folio))
211 goto out;
212
213 need_reclaim = ((flags & TTRS_ANYWAY) ||
214 ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
215 ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)));
216 if (!need_reclaim || !folio_swapcache_freeable(folio))
217 goto out_unlock;
218
219 /*
220 * It's safe to delete the folio from swap cache only if the folio's
221 * swap_map is HAS_CACHE only, which means the slots have no page table
222 * reference or pending writeback, and can't be allocated to others.
223 */
224 ci = lock_cluster_or_swap_info(si, offset);
225 need_reclaim = swap_is_has_cache(si, offset, nr_pages);
226 unlock_cluster_or_swap_info(si, ci);
227 if (!need_reclaim)
228 goto out_unlock;
229
230 if (!(flags & TTRS_DIRECT)) {
231 /* Free through slot cache */
232 delete_from_swap_cache(folio);
233 folio_set_dirty(folio);
234 ret = nr_pages;
235 goto out_unlock;
c9e44410 236 }
862590ac
KS
237
238 xa_lock_irq(&address_space->i_pages);
239 __delete_from_swap_cache(folio, entry, NULL);
240 xa_unlock_irq(&address_space->i_pages);
241 folio_ref_sub(folio, nr_pages);
242 folio_set_dirty(folio);
243
244 spin_lock(&si->lock);
245 /* Only sinple page folio can be backed by zswap */
246 if (nr_pages == 1)
247 zswap_invalidate(entry);
248 swap_entry_range_free(si, entry, nr_pages);
249 spin_unlock(&si->lock);
250 ret = nr_pages;
251out_unlock:
252 folio_unlock(folio);
253out:
2c3f6194 254 folio_put(folio);
c9e44410
KH
255 return ret;
256}
355cfa73 257
4efaceb1
AL
258static inline struct swap_extent *first_se(struct swap_info_struct *sis)
259{
260 struct rb_node *rb = rb_first(&sis->swap_extent_root);
261 return rb_entry(rb, struct swap_extent, rb_node);
262}
263
264static inline struct swap_extent *next_se(struct swap_extent *se)
265{
266 struct rb_node *rb = rb_next(&se->rb_node);
267 return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
268}
269
6a6ba831
HD
270/*
271 * swapon tell device that all the old swap contents can be discarded,
272 * to allow the swap device to optimize its wear-levelling.
273 */
274static int discard_swap(struct swap_info_struct *si)
275{
276 struct swap_extent *se;
9625a5f2
HD
277 sector_t start_block;
278 sector_t nr_blocks;
6a6ba831
HD
279 int err = 0;
280
9625a5f2 281 /* Do not discard the swap header page! */
4efaceb1 282 se = first_se(si);
9625a5f2
HD
283 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
284 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
285 if (nr_blocks) {
286 err = blkdev_issue_discard(si->bdev, start_block,
44abff2c 287 nr_blocks, GFP_KERNEL);
9625a5f2
HD
288 if (err)
289 return err;
290 cond_resched();
291 }
6a6ba831 292
4efaceb1 293 for (se = next_se(se); se; se = next_se(se)) {
9625a5f2
HD
294 start_block = se->start_block << (PAGE_SHIFT - 9);
295 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
6a6ba831
HD
296
297 err = blkdev_issue_discard(si->bdev, start_block,
44abff2c 298 nr_blocks, GFP_KERNEL);
6a6ba831
HD
299 if (err)
300 break;
301
302 cond_resched();
303 }
304 return err; /* That will often be -EOPNOTSUPP */
305}
306
4efaceb1
AL
307static struct swap_extent *
308offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
309{
310 struct swap_extent *se;
311 struct rb_node *rb;
312
313 rb = sis->swap_extent_root.rb_node;
314 while (rb) {
315 se = rb_entry(rb, struct swap_extent, rb_node);
316 if (offset < se->start_page)
317 rb = rb->rb_left;
318 else if (offset >= se->start_page + se->nr_pages)
319 rb = rb->rb_right;
320 else
321 return se;
322 }
323 /* It *must* be present */
324 BUG();
325}
326
3a61e6f6 327sector_t swap_folio_sector(struct folio *folio)
caf6912f 328{
3a61e6f6 329 struct swap_info_struct *sis = swp_swap_info(folio->swap);
caf6912f
JA
330 struct swap_extent *se;
331 sector_t sector;
332 pgoff_t offset;
333
3a61e6f6 334 offset = swp_offset(folio->swap);
caf6912f
JA
335 se = offset_to_swap_extent(sis, offset);
336 sector = se->start_block + (offset - se->start_page);
337 return sector << (PAGE_SHIFT - 9);
338}
339
7992fde7
HD
340/*
341 * swap allocation tell device that a cluster of swap can now be discarded,
342 * to allow the swap device to optimize its wear-levelling.
343 */
344static void discard_swap_cluster(struct swap_info_struct *si,
345 pgoff_t start_page, pgoff_t nr_pages)
346{
4efaceb1 347 struct swap_extent *se = offset_to_swap_extent(si, start_page);
7992fde7
HD
348
349 while (nr_pages) {
4efaceb1
AL
350 pgoff_t offset = start_page - se->start_page;
351 sector_t start_block = se->start_block + offset;
352 sector_t nr_blocks = se->nr_pages - offset;
353
354 if (nr_blocks > nr_pages)
355 nr_blocks = nr_pages;
356 start_page += nr_blocks;
357 nr_pages -= nr_blocks;
358
359 start_block <<= PAGE_SHIFT - 9;
360 nr_blocks <<= PAGE_SHIFT - 9;
361 if (blkdev_issue_discard(si->bdev, start_block,
44abff2c 362 nr_blocks, GFP_NOIO))
4efaceb1 363 break;
7992fde7 364
4efaceb1 365 se = next_se(se);
7992fde7
HD
366 }
367}
368
38d8b4e6
HY
369#ifdef CONFIG_THP_SWAP
370#define SWAPFILE_CLUSTER HPAGE_PMD_NR
a448f2d0 371
9faaa0f8 372#define swap_entry_order(order) (order)
38d8b4e6 373#else
048c27fd 374#define SWAPFILE_CLUSTER 256
a448f2d0
HY
375
376/*
9faaa0f8 377 * Define swap_entry_order() as constant to let compiler to optimize
a448f2d0
HY
378 * out some code if !CONFIG_THP_SWAP
379 */
9faaa0f8 380#define swap_entry_order(order) 0
38d8b4e6 381#endif
048c27fd
HD
382#define LATENCY_LIMIT 256
383
2a8f9449
SL
384static inline bool cluster_is_free(struct swap_cluster_info *info)
385{
386 return info->flags & CLUSTER_FLAG_FREE;
387}
388
73ed0baa
CL
389static inline unsigned int cluster_index(struct swap_info_struct *si,
390 struct swap_cluster_info *ci)
2a8f9449 391{
73ed0baa 392 return ci - si->cluster_info;
2a8f9449
SL
393}
394
5f843a9a
CL
395static inline unsigned int cluster_offset(struct swap_info_struct *si,
396 struct swap_cluster_info *ci)
397{
398 return cluster_index(si, ci) * SWAPFILE_CLUSTER;
399}
400
235b6217
HY
401static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
402 unsigned long offset)
403{
404 struct swap_cluster_info *ci;
405
406 ci = si->cluster_info;
407 if (ci) {
408 ci += offset / SWAPFILE_CLUSTER;
409 spin_lock(&ci->lock);
410 }
411 return ci;
412}
413
414static inline void unlock_cluster(struct swap_cluster_info *ci)
415{
416 if (ci)
417 spin_unlock(&ci->lock);
418}
419
59d98bf3
HY
420/*
421 * Determine the locking method in use for this device. Return
422 * swap_cluster_info if SSD-style cluster-based locking is in place.
423 */
235b6217 424static inline struct swap_cluster_info *lock_cluster_or_swap_info(
59d98bf3 425 struct swap_info_struct *si, unsigned long offset)
235b6217
HY
426{
427 struct swap_cluster_info *ci;
428
59d98bf3 429 /* Try to use fine-grained SSD-style locking if available: */
235b6217 430 ci = lock_cluster(si, offset);
59d98bf3 431 /* Otherwise, fall back to traditional, coarse locking: */
235b6217
HY
432 if (!ci)
433 spin_lock(&si->lock);
434
435 return ci;
436}
437
438static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
439 struct swap_cluster_info *ci)
440{
441 if (ci)
442 unlock_cluster(ci);
443 else
444 spin_unlock(&si->lock);
445}
446
815c2c54
SL
447/* Add a cluster to discard list and schedule it to do discard */
448static void swap_cluster_schedule_discard(struct swap_info_struct *si,
73ed0baa 449 struct swap_cluster_info *ci)
815c2c54 450{
73ed0baa 451 unsigned int idx = cluster_index(si, ci);
815c2c54 452 /*
bb243f7d 453 * If scan_swap_map_slots() can't find a free cluster, it will check
815c2c54 454 * si->swap_map directly. To make sure the discarding cluster isn't
bb243f7d
ML
455 * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
456 * It will be cleared after discard
815c2c54
SL
457 */
458 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
459 SWAP_MAP_BAD, SWAPFILE_CLUSTER);
460
d07a46a4 461 VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
2cacbdfd 462 list_move_tail(&ci->list, &si->discard_clusters);
d07a46a4 463 ci->flags = 0;
815c2c54
SL
464 schedule_work(&si->discard_work);
465}
466
73ed0baa 467static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
38d8b4e6 468{
5f843a9a
CL
469 lockdep_assert_held(&si->lock);
470 lockdep_assert_held(&ci->lock);
471
2cacbdfd 472 if (ci->flags)
d07a46a4
CL
473 list_move_tail(&ci->list, &si->free_clusters);
474 else
475 list_add_tail(&ci->list, &si->free_clusters);
73ed0baa 476 ci->flags = CLUSTER_FLAG_FREE;
5f843a9a 477 ci->order = 0;
38d8b4e6
HY
478}
479
815c2c54
SL
480/*
481 * Doing discard actually. After a cluster discard is finished, the cluster
482 * will be added to free cluster list. caller should hold si->lock.
483*/
484static void swap_do_scheduled_discard(struct swap_info_struct *si)
485{
73ed0baa 486 struct swap_cluster_info *ci;
815c2c54
SL
487 unsigned int idx;
488
73ed0baa
CL
489 while (!list_empty(&si->discard_clusters)) {
490 ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
491 list_del(&ci->list);
492 idx = cluster_index(si, ci);
815c2c54
SL
493 spin_unlock(&si->lock);
494
495 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
496 SWAPFILE_CLUSTER);
497
498 spin_lock(&si->lock);
73ed0baa
CL
499 spin_lock(&ci->lock);
500 __free_cluster(si, ci);
815c2c54
SL
501 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
502 0, SWAPFILE_CLUSTER);
73ed0baa 503 spin_unlock(&ci->lock);
815c2c54
SL
504 }
505}
506
507static void swap_discard_work(struct work_struct *work)
508{
509 struct swap_info_struct *si;
510
511 si = container_of(work, struct swap_info_struct, discard_work);
512
513 spin_lock(&si->lock);
514 swap_do_scheduled_discard(si);
515 spin_unlock(&si->lock);
516}
517
63d8620e
ML
518static void swap_users_ref_free(struct percpu_ref *ref)
519{
520 struct swap_info_struct *si;
521
522 si = container_of(ref, struct swap_info_struct, users);
523 complete(&si->comp);
524}
525
73ed0baa 526static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
38d8b4e6 527{
73ed0baa 528 VM_BUG_ON(ci->count != 0);
5f843a9a
CL
529 lockdep_assert_held(&si->lock);
530 lockdep_assert_held(&ci->lock);
661383c6
KS
531
532 if (ci->flags & CLUSTER_FLAG_FRAG)
533 si->frag_cluster_nr[ci->order]--;
534
38d8b4e6
HY
535 /*
536 * If the swap is discardable, prepare discard the cluster
537 * instead of free it immediately. The cluster will be freed
538 * after discard.
539 */
540 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
541 (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
73ed0baa 542 swap_cluster_schedule_discard(si, ci);
38d8b4e6
HY
543 return;
544 }
545
73ed0baa 546 __free_cluster(si, ci);
38d8b4e6
HY
547}
548
2a8f9449 549/*
3b2561b5
KS
550 * The cluster corresponding to page_nr will be used. The cluster will not be
551 * added to free cluster list and its usage counter will be increased by 1.
552 * Only used for initialization.
2a8f9449 553 */
b85508d7 554static void inc_cluster_info_page(struct swap_info_struct *si,
3b2561b5 555 struct swap_cluster_info *cluster_info, unsigned long page_nr)
2a8f9449
SL
556{
557 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
3b2561b5 558 struct swap_cluster_info *ci;
2a8f9449
SL
559
560 if (!cluster_info)
561 return;
2a8f9449 562
3b2561b5
KS
563 ci = cluster_info + idx;
564 ci->count++;
845982eb 565
3b2561b5
KS
566 VM_BUG_ON(ci->count > SWAPFILE_CLUSTER);
567 VM_BUG_ON(ci->flags);
2a8f9449
SL
568}
569
570/*
650975d2 571 * The cluster ci decreases @nr_pages usage. If the usage counter becomes 0,
73ed0baa
CL
572 * which means no page in the cluster is in use, we can optionally discard
573 * the cluster and add it to free cluster list.
2a8f9449 574 */
b85508d7 575static void dec_cluster_info_page(struct swap_info_struct *si,
650975d2 576 struct swap_cluster_info *ci, int nr_pages)
2a8f9449 577{
b85508d7 578 if (!si->cluster_info)
2a8f9449
SL
579 return;
580
650975d2 581 VM_BUG_ON(ci->count < nr_pages);
5f843a9a 582 VM_BUG_ON(cluster_is_free(ci));
b85508d7 583 lockdep_assert_held(&si->lock);
5f843a9a 584 lockdep_assert_held(&ci->lock);
650975d2 585 ci->count -= nr_pages;
2a8f9449 586
d07a46a4 587 if (!ci->count) {
b85508d7 588 free_cluster(si, ci);
d07a46a4
CL
589 return;
590 }
591
592 if (!(ci->flags & CLUSTER_FLAG_NONFULL)) {
5f843a9a 593 VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE);
2cacbdfd 594 if (ci->flags & CLUSTER_FLAG_FRAG)
b85508d7
BS
595 si->frag_cluster_nr[ci->order]--;
596 list_move_tail(&ci->list, &si->nonfull_clusters[ci->order]);
5f843a9a 597 ci->flags = CLUSTER_FLAG_NONFULL;
d07a46a4 598 }
2a8f9449
SL
599}
600
661383c6
KS
601static bool cluster_reclaim_range(struct swap_info_struct *si,
602 struct swap_cluster_info *ci,
603 unsigned long start, unsigned long end)
2a8f9449 604{
661383c6
KS
605 unsigned char *map = si->swap_map;
606 unsigned long offset;
607
608 spin_unlock(&ci->lock);
609 spin_unlock(&si->lock);
610
611 for (offset = start; offset < end; offset++) {
612 switch (READ_ONCE(map[offset])) {
613 case 0:
614 continue;
615 case SWAP_HAS_CACHE:
616 if (__try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT) > 0)
617 continue;
618 goto out;
619 default:
620 goto out;
621 }
622 }
623out:
624 spin_lock(&si->lock);
625 spin_lock(&ci->lock);
ebc2a1a6 626
661383c6
KS
627 /*
628 * Recheck the range no matter reclaim succeeded or not, the slot
629 * could have been be freed while we are not holding the lock.
630 */
631 for (offset = start; offset < end; offset++)
632 if (READ_ONCE(map[offset]))
5f843a9a 633 return false;
ebc2a1a6 634
845982eb
RR
635 return true;
636}
637
661383c6
KS
638static bool cluster_scan_range(struct swap_info_struct *si,
639 struct swap_cluster_info *ci,
640 unsigned long start, unsigned int nr_pages)
641{
642 unsigned long offset, end = start + nr_pages;
643 unsigned char *map = si->swap_map;
644 bool need_reclaim = false;
5f843a9a 645
661383c6
KS
646 for (offset = start; offset < end; offset++) {
647 switch (READ_ONCE(map[offset])) {
648 case 0:
649 continue;
650 case SWAP_HAS_CACHE:
651 if (!vm_swap_full())
652 return false;
653 need_reclaim = true;
654 continue;
655 default:
656 return false;
657 }
658 }
659
660 if (need_reclaim)
661 return cluster_reclaim_range(si, ci, start, end);
662
663 return true;
664}
665
666static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
667 unsigned int start, unsigned char usage,
668 unsigned int order)
845982eb 669{
5f843a9a 670 unsigned int nr_pages = 1 << order;
845982eb 671
5f843a9a
CL
672 if (cluster_is_free(ci)) {
673 if (nr_pages < SWAPFILE_CLUSTER) {
674 list_move_tail(&ci->list, &si->nonfull_clusters[order]);
675 ci->flags = CLUSTER_FLAG_NONFULL;
676 }
677 ci->order = order;
845982eb
RR
678 }
679
5f843a9a
CL
680 memset(si->swap_map + start, usage, nr_pages);
681 swap_range_alloc(si, start, nr_pages);
682 ci->count += nr_pages;
683
684 if (ci->count == SWAPFILE_CLUSTER) {
477cb7ba
KS
685 VM_BUG_ON(!(ci->flags &
686 (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG)));
661383c6
KS
687 if (ci->flags & CLUSTER_FLAG_FRAG)
688 si->frag_cluster_nr[ci->order]--;
2cacbdfd
KS
689 list_move_tail(&ci->list, &si->full_clusters);
690 ci->flags = CLUSTER_FLAG_FULL;
5f843a9a
CL
691 }
692}
693
694static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigned long offset,
695 unsigned int *foundp, unsigned int order,
696 unsigned char usage)
697{
698 unsigned long start = offset & ~(SWAPFILE_CLUSTER - 1);
699 unsigned long end = min(start + SWAPFILE_CLUSTER, si->max);
700 unsigned int nr_pages = 1 << order;
701 struct swap_cluster_info *ci;
702
703 if (end < nr_pages)
704 return SWAP_NEXT_INVALID;
705 end -= nr_pages;
706
707 ci = lock_cluster(si, offset);
708 if (ci->count + nr_pages > SWAPFILE_CLUSTER) {
709 offset = SWAP_NEXT_INVALID;
710 goto done;
711 }
712
713 while (offset <= end) {
661383c6 714 if (cluster_scan_range(si, ci, offset, nr_pages)) {
5f843a9a
CL
715 cluster_alloc_range(si, ci, offset, usage, order);
716 *foundp = offset;
717 if (ci->count == SWAPFILE_CLUSTER) {
718 offset = SWAP_NEXT_INVALID;
719 goto done;
720 }
721 offset += nr_pages;
722 break;
723 }
724 offset += nr_pages;
725 }
726 if (offset > end)
727 offset = SWAP_NEXT_INVALID;
728done:
729 unlock_cluster(ci);
730 return offset;
ebc2a1a6
SL
731}
732
2cacbdfd
KS
733static void swap_reclaim_full_clusters(struct swap_info_struct *si)
734{
735 long to_scan = 1;
736 unsigned long offset, end;
737 struct swap_cluster_info *ci;
738 unsigned char *map = si->swap_map;
739 int nr_reclaim, total_reclaimed = 0;
740
741 if (atomic_long_read(&nr_swap_pages) <= SWAPFILE_CLUSTER)
742 to_scan = si->inuse_pages / SWAPFILE_CLUSTER;
743
744 while (!list_empty(&si->full_clusters)) {
745 ci = list_first_entry(&si->full_clusters, struct swap_cluster_info, list);
746 list_move_tail(&ci->list, &si->full_clusters);
747 offset = cluster_offset(si, ci);
748 end = min(si->max, offset + SWAPFILE_CLUSTER);
749 to_scan--;
750
751 while (offset < end) {
752 if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
753 spin_unlock(&si->lock);
754 nr_reclaim = __try_to_reclaim_swap(si, offset,
755 TTRS_ANYWAY | TTRS_DIRECT);
756 spin_lock(&si->lock);
757 if (nr_reclaim > 0) {
758 offset += nr_reclaim;
759 total_reclaimed += nr_reclaim;
760 continue;
761 } else if (nr_reclaim < 0) {
762 offset += -nr_reclaim;
763 continue;
764 }
765 }
766 offset++;
767 }
768 if (to_scan <= 0 || total_reclaimed)
769 break;
770 }
771}
772
ebc2a1a6 773/*
845982eb
RR
774 * Try to get swap entries with specified order from current cpu's swap entry
775 * pool (a cluster). This might involve allocating a new cluster for current CPU
776 * too.
ebc2a1a6 777 */
5f843a9a
CL
778static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
779 unsigned char usage)
ebc2a1a6
SL
780{
781 struct percpu_cluster *cluster;
661383c6 782 struct swap_cluster_info *ci;
5f843a9a 783 unsigned int offset, found = 0;
ebc2a1a6
SL
784
785new_cluster:
5f843a9a 786 lockdep_assert_held(&si->lock);
ebc2a1a6 787 cluster = this_cpu_ptr(si->percpu_cluster);
5f843a9a
CL
788 offset = cluster->next[order];
789 if (offset) {
790 offset = alloc_swap_scan_cluster(si, offset, &found, order, usage);
791 if (found)
792 goto done;
ebc2a1a6
SL
793 }
794
5f843a9a
CL
795 if (!list_empty(&si->free_clusters)) {
796 ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list);
797 offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage);
798 VM_BUG_ON(!found);
799 goto done;
800 }
801
802 if (order < PMD_ORDER) {
661383c6
KS
803 unsigned int frags = 0;
804
805 while (!list_empty(&si->nonfull_clusters[order])) {
806 ci = list_first_entry(&si->nonfull_clusters[order],
807 struct swap_cluster_info, list);
808 list_move_tail(&ci->list, &si->frag_clusters[order]);
477cb7ba 809 ci->flags = CLUSTER_FLAG_FRAG;
661383c6 810 si->frag_cluster_nr[order]++;
5f843a9a
CL
811 offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
812 &found, order, usage);
661383c6 813 frags++;
5f843a9a 814 if (found)
477cb7ba 815 break;
7b9e2de1 816 }
477cb7ba
KS
817
818 if (!found) {
661383c6
KS
819 /*
820 * Nonfull clusters are moved to frag tail if we reached
821 * here, count them too, don't over scan the frag list.
822 */
823 while (frags < si->frag_cluster_nr[order]) {
824 ci = list_first_entry(&si->frag_clusters[order],
825 struct swap_cluster_info, list);
826 /*
827 * Rotate the frag list to iterate, they were all failing
828 * high order allocation or moved here due to per-CPU usage,
829 * this help keeping usable cluster ahead.
830 */
831 list_move_tail(&ci->list, &si->frag_clusters[order]);
477cb7ba
KS
832 offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
833 &found, order, usage);
661383c6 834 frags++;
477cb7ba
KS
835 if (found)
836 break;
837 }
838 }
ebc2a1a6 839 }
5f843a9a 840
477cb7ba
KS
841 if (found)
842 goto done;
843
5f843a9a
CL
844 if (!list_empty(&si->discard_clusters)) {
845 /*
846 * we don't have free cluster but have some clusters in
847 * discarding, do discard now and reclaim them, then
848 * reread cluster_next_cpu since we dropped si->lock
849 */
850 swap_do_scheduled_discard(si);
ebc2a1a6
SL
851 goto new_cluster;
852 }
5f843a9a
CL
853
854 if (order)
855 goto done;
856
477cb7ba 857 /* Order 0 stealing from higher order */
5f843a9a 858 for (int o = 1; o < SWAP_NR_ORDERS; o++) {
661383c6
KS
859 /*
860 * Clusters here have at least one usable slots and can't fail order 0
861 * allocation, but reclaim may drop si->lock and race with another user.
862 */
863 while (!list_empty(&si->frag_clusters[o])) {
477cb7ba
KS
864 ci = list_first_entry(&si->frag_clusters[o],
865 struct swap_cluster_info, list);
661383c6
KS
866 offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
867 &found, 0, usage);
868 if (found)
869 goto done;
477cb7ba
KS
870 }
871
661383c6
KS
872 while (!list_empty(&si->nonfull_clusters[o])) {
873 ci = list_first_entry(&si->nonfull_clusters[o],
874 struct swap_cluster_info, list);
5f843a9a
CL
875 offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci),
876 &found, 0, usage);
661383c6
KS
877 if (found)
878 goto done;
5f843a9a
CL
879 }
880 }
2cacbdfd 881
5f843a9a 882done:
2cacbdfd
KS
883 /* Try reclaim from full clusters if device is nearfull */
884 if (vm_swap_full() && (!found || (si->pages - si->inuse_pages) < SWAPFILE_CLUSTER)) {
885 swap_reclaim_full_clusters(si);
886 if (!found && !order && si->pages != si->inuse_pages)
887 goto new_cluster;
888 }
889
5f843a9a
CL
890 cluster->next[order] = offset;
891 return found;
2a8f9449
SL
892}
893
b85508d7 894static void __del_from_avail_list(struct swap_info_struct *si)
a2468cc9
AL
895{
896 int nid;
897
b85508d7 898 assert_spin_locked(&si->lock);
a2468cc9 899 for_each_node(nid)
b85508d7 900 plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]);
a2468cc9
AL
901}
902
b85508d7 903static void del_from_avail_list(struct swap_info_struct *si)
a2468cc9
AL
904{
905 spin_lock(&swap_avail_lock);
b85508d7 906 __del_from_avail_list(si);
a2468cc9
AL
907 spin_unlock(&swap_avail_lock);
908}
909
38d8b4e6
HY
910static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
911 unsigned int nr_entries)
912{
913 unsigned int end = offset + nr_entries - 1;
914
915 if (offset == si->lowest_bit)
916 si->lowest_bit += nr_entries;
917 if (end == si->highest_bit)
a449bf58 918 WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
c8945306 919 WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries);
38d8b4e6
HY
920 if (si->inuse_pages == si->pages) {
921 si->lowest_bit = si->max;
922 si->highest_bit = 0;
a2468cc9 923 del_from_avail_list(si);
38d8b4e6
HY
924 }
925}
926
b85508d7 927static void add_to_avail_list(struct swap_info_struct *si)
a2468cc9
AL
928{
929 int nid;
930
931 spin_lock(&swap_avail_lock);
67490031 932 for_each_node(nid)
b85508d7 933 plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
a2468cc9
AL
934 spin_unlock(&swap_avail_lock);
935}
936
38d8b4e6
HY
937static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
938 unsigned int nr_entries)
939{
3852f676 940 unsigned long begin = offset;
38d8b4e6
HY
941 unsigned long end = offset + nr_entries - 1;
942 void (*swap_slot_free_notify)(struct block_device *, unsigned long);
0ca0c24e
UA
943 unsigned int i;
944
945 /*
946 * Use atomic clear_bit operations only on zeromap instead of non-atomic
947 * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes.
948 */
949 for (i = 0; i < nr_entries; i++)
950 clear_bit(offset + i, si->zeromap);
38d8b4e6
HY
951
952 if (offset < si->lowest_bit)
953 si->lowest_bit = offset;
954 if (end > si->highest_bit) {
955 bool was_full = !si->highest_bit;
956
a449bf58 957 WRITE_ONCE(si->highest_bit, end);
a2468cc9
AL
958 if (was_full && (si->flags & SWP_WRITEOK))
959 add_to_avail_list(si);
38d8b4e6 960 }
38d8b4e6
HY
961 if (si->flags & SWP_BLKDEV)
962 swap_slot_free_notify =
963 si->bdev->bd_disk->fops->swap_slot_free_notify;
964 else
965 swap_slot_free_notify = NULL;
966 while (offset <= end) {
8a84802e 967 arch_swap_invalidate_page(si->type, offset);
38d8b4e6
HY
968 if (swap_slot_free_notify)
969 swap_slot_free_notify(si->bdev, offset);
970 offset++;
971 }
3852f676 972 clear_shadow_from_swap_cache(si->type, begin, end);
64cf264c
YA
973
974 /*
975 * Make sure that try_to_unuse() observes si->inuse_pages reaching 0
976 * only after the above cleanups are done.
977 */
978 smp_wmb();
979 atomic_long_add(nr_entries, &nr_swap_pages);
980 WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
38d8b4e6
HY
981}
982
49070588
HY
983static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
984{
985 unsigned long prev;
986
987 if (!(si->flags & SWP_SOLIDSTATE)) {
988 si->cluster_next = next;
989 return;
990 }
991
992 prev = this_cpu_read(*si->cluster_next_cpu);
993 /*
994 * Cross the swap address space size aligned trunk, choose
995 * another trunk randomly to avoid lock contention on swap
996 * address space if possible.
997 */
998 if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) !=
999 (next >> SWAP_ADDRESS_SPACE_SHIFT)) {
1000 /* No free swap slots available */
1001 if (si->highest_bit <= si->lowest_bit)
1002 return;
e8a533cb 1003 next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit);
49070588
HY
1004 next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
1005 next = max_t(unsigned int, next, si->lowest_bit);
1006 }
1007 this_cpu_write(*si->cluster_next_cpu, next);
1008}
1009
4b9ae842
ML
1010static bool swap_offset_available_and_locked(struct swap_info_struct *si,
1011 unsigned long offset)
1012{
1013 if (data_race(!si->swap_map[offset])) {
1014 spin_lock(&si->lock);
1015 return true;
1016 }
1017
1018 if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
1019 spin_lock(&si->lock);
1020 return true;
1021 }
1022
1023 return false;
1024}
1025
5f843a9a
CL
1026static int cluster_alloc_swap(struct swap_info_struct *si,
1027 unsigned char usage, int nr,
1028 swp_entry_t slots[], int order)
1029{
1030 int n_ret = 0;
1031
1032 VM_BUG_ON(!si->cluster_info);
1033
1034 while (n_ret < nr) {
1035 unsigned long offset = cluster_alloc_swap_entry(si, order, usage);
1036
1037 if (!offset)
1038 break;
1039 slots[n_ret++] = swp_entry(si->type, offset);
1040 }
1041
1042 return n_ret;
1043}
1044
36005bae
TC
1045static int scan_swap_map_slots(struct swap_info_struct *si,
1046 unsigned char usage, int nr,
845982eb 1047 swp_entry_t slots[], int order)
1da177e4 1048{
ebebbbe9 1049 unsigned long offset;
c60aa176 1050 unsigned long scan_base;
7992fde7 1051 unsigned long last_in_cluster = 0;
048c27fd 1052 int latency_ration = LATENCY_LIMIT;
845982eb 1053 unsigned int nr_pages = 1 << order;
36005bae 1054 int n_ret = 0;
ed43af10 1055 bool scanned_many = false;
36005bae 1056
886bb7e9 1057 /*
7dfad418
HD
1058 * We try to cluster swap pages by allocating them sequentially
1059 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
1060 * way, however, we resort to first-free allocation, starting
1061 * a new cluster. This prevents us from scattering swap pages
1062 * all over the entire swap partition, so that we reduce
1063 * overall disk seek times between swap pages. -- sct
1064 * But we do now try to find an empty cluster. -Andrea
c60aa176 1065 * And we let swap pages go all over an SSD partition. Hugh
7dfad418
HD
1066 */
1067
845982eb
RR
1068 if (order > 0) {
1069 /*
1070 * Should not even be attempting large allocations when huge
1071 * page swap is disabled. Warn and fail the allocation.
1072 */
1073 if (!IS_ENABLED(CONFIG_THP_SWAP) ||
1074 nr_pages > SWAPFILE_CLUSTER) {
1075 VM_WARN_ON_ONCE(1);
1076 return 0;
1077 }
1078
1079 /*
1080 * Swapfile is not block device or not using clusters so unable
1081 * to allocate large entries.
1082 */
1083 if (!(si->flags & SWP_BLKDEV) || !si->cluster_info)
1084 return 0;
1085 }
1086
5f843a9a
CL
1087 if (si->cluster_info)
1088 return cluster_alloc_swap(si, usage, nr, slots, order);
1089
52b7efdb 1090 si->flags += SWP_SCANNING;
5f843a9a
CL
1091
1092 /* For HDD, sequential access is more important. */
1093 scan_base = si->cluster_next;
49070588 1094 offset = scan_base;
ebebbbe9 1095
5f843a9a 1096 if (unlikely(!si->cluster_nr--)) {
ebebbbe9
HD
1097 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
1098 si->cluster_nr = SWAPFILE_CLUSTER - 1;
1099 goto checks;
1100 }
2a8f9449 1101
ec8acf20 1102 spin_unlock(&si->lock);
7dfad418 1103
c60aa176
HD
1104 /*
1105 * If seek is expensive, start searching for new cluster from
1106 * start of partition, to minimize the span of allocated swap.
c60aa176 1107 */
50088c44 1108 scan_base = offset = si->lowest_bit;
7dfad418
HD
1109 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
1110
1111 /* Locate the first empty (unaligned) cluster */
5ee9562c 1112 for (; last_in_cluster <= READ_ONCE(si->highest_bit); offset++) {
1da177e4 1113 if (si->swap_map[offset])
7dfad418
HD
1114 last_in_cluster = offset + SWAPFILE_CLUSTER;
1115 else if (offset == last_in_cluster) {
ec8acf20 1116 spin_lock(&si->lock);
ebebbbe9
HD
1117 offset -= SWAPFILE_CLUSTER - 1;
1118 si->cluster_next = offset;
1119 si->cluster_nr = SWAPFILE_CLUSTER - 1;
c60aa176
HD
1120 goto checks;
1121 }
1122 if (unlikely(--latency_ration < 0)) {
1123 cond_resched();
1124 latency_ration = LATENCY_LIMIT;
1125 }
1126 }
1127
1128 offset = scan_base;
ec8acf20 1129 spin_lock(&si->lock);
ebebbbe9 1130 si->cluster_nr = SWAPFILE_CLUSTER - 1;
1da177e4 1131 }
7dfad418 1132
ebebbbe9
HD
1133checks:
1134 if (!(si->flags & SWP_WRITEOK))
52b7efdb 1135 goto no_page;
7dfad418
HD
1136 if (!si->highest_bit)
1137 goto no_page;
ebebbbe9 1138 if (offset > si->highest_bit)
c60aa176 1139 scan_base = offset = si->lowest_bit;
c9e44410 1140
b73d7fce
HD
1141 /* reuse swap entry of cache-only swap if not busy. */
1142 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
c9e44410 1143 int swap_was_freed;
ec8acf20 1144 spin_unlock(&si->lock);
862590ac 1145 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
ec8acf20 1146 spin_lock(&si->lock);
c9e44410 1147 /* entry was freed successfully, try to use this again */
a62fb92a 1148 if (swap_was_freed > 0)
c9e44410
KH
1149 goto checks;
1150 goto scan; /* check next one */
1151 }
1152
235b6217 1153 if (si->swap_map[offset]) {
36005bae
TC
1154 if (!n_ret)
1155 goto scan;
1156 else
1157 goto done;
235b6217 1158 }
845982eb 1159 memset(si->swap_map + offset, usage, nr_pages);
ebebbbe9 1160
845982eb 1161 swap_range_alloc(si, offset, nr_pages);
36005bae
TC
1162 slots[n_ret++] = swp_entry(si->type, offset);
1163
1164 /* got enough slots or reach max slots? */
1165 if ((n_ret == nr) || (offset >= si->highest_bit))
1166 goto done;
1167
1168 /* search for next available slot */
1169
1170 /* time to take a break? */
1171 if (unlikely(--latency_ration < 0)) {
1172 if (n_ret)
1173 goto done;
1174 spin_unlock(&si->lock);
1175 cond_resched();
1176 spin_lock(&si->lock);
1177 latency_ration = LATENCY_LIMIT;
1178 }
1179
5f843a9a 1180 if (si->cluster_nr && !si->swap_map[++offset]) {
f4eaf51a 1181 /* non-ssd case, still more slots in cluster? */
36005bae
TC
1182 --si->cluster_nr;
1183 goto checks;
1184 }
7992fde7 1185
ed43af10
HY
1186 /*
1187 * Even if there's no free clusters available (fragmented),
1188 * try to scan a little more quickly with lock held unless we
1189 * have scanned too many slots already.
1190 */
1191 if (!scanned_many) {
1192 unsigned long scan_limit;
1193
1194 if (offset < scan_base)
1195 scan_limit = scan_base;
1196 else
1197 scan_limit = si->highest_bit;
1198 for (; offset <= scan_limit && --latency_ration > 0;
1199 offset++) {
1200 if (!si->swap_map[offset])
1201 goto checks;
1202 }
1203 }
1204
36005bae 1205done:
845982eb
RR
1206 if (order == 0)
1207 set_cluster_next(si, offset + 1);
36005bae
TC
1208 si->flags -= SWP_SCANNING;
1209 return n_ret;
7dfad418 1210
ebebbbe9 1211scan:
845982eb 1212 VM_WARN_ON(order > 0);
ec8acf20 1213 spin_unlock(&si->lock);
a449bf58 1214 while (++offset <= READ_ONCE(si->highest_bit)) {
048c27fd
HD
1215 if (unlikely(--latency_ration < 0)) {
1216 cond_resched();
1217 latency_ration = LATENCY_LIMIT;
ed43af10 1218 scanned_many = true;
048c27fd 1219 }
de1ccfb6
CW
1220 if (swap_offset_available_and_locked(si, offset))
1221 goto checks;
7dfad418 1222 }
c60aa176 1223 offset = si->lowest_bit;
a5998061 1224 while (offset < scan_base) {
c60aa176
HD
1225 if (unlikely(--latency_ration < 0)) {
1226 cond_resched();
1227 latency_ration = LATENCY_LIMIT;
ed43af10 1228 scanned_many = true;
c60aa176 1229 }
de1ccfb6
CW
1230 if (swap_offset_available_and_locked(si, offset))
1231 goto checks;
a5998061 1232 offset++;
c60aa176 1233 }
ec8acf20 1234 spin_lock(&si->lock);
7dfad418
HD
1235
1236no_page:
52b7efdb 1237 si->flags -= SWP_SCANNING;
36005bae 1238 return n_ret;
1da177e4
LT
1239}
1240
9faaa0f8 1241int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order)
1da177e4 1242{
9faaa0f8
RR
1243 int order = swap_entry_order(entry_order);
1244 unsigned long size = 1 << order;
adfab836 1245 struct swap_info_struct *si, *next;
36005bae
TC
1246 long avail_pgs;
1247 int n_ret = 0;
a2468cc9 1248 int node;
1da177e4 1249
b50da6e9
ZH
1250 spin_lock(&swap_avail_lock);
1251
5d5e8f19 1252 avail_pgs = atomic_long_read(&nr_swap_pages) / size;
b50da6e9
ZH
1253 if (avail_pgs <= 0) {
1254 spin_unlock(&swap_avail_lock);
fb4f88dc 1255 goto noswap;
b50da6e9 1256 }
36005bae 1257
08d3090f 1258 n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
36005bae 1259
5d5e8f19 1260 atomic_long_sub(n_goal * size, &nr_swap_pages);
fb4f88dc 1261
18ab4d4c 1262start_over:
a2468cc9
AL
1263 node = numa_node_id();
1264 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
18ab4d4c 1265 /* requeue si to after same-priority siblings */
a2468cc9 1266 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
18ab4d4c 1267 spin_unlock(&swap_avail_lock);
ec8acf20 1268 spin_lock(&si->lock);
adfab836 1269 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
18ab4d4c 1270 spin_lock(&swap_avail_lock);
a2468cc9 1271 if (plist_node_empty(&si->avail_lists[node])) {
18ab4d4c
DS
1272 spin_unlock(&si->lock);
1273 goto nextsi;
1274 }
1275 WARN(!si->highest_bit,
1276 "swap_info %d in list but !highest_bit\n",
1277 si->type);
1278 WARN(!(si->flags & SWP_WRITEOK),
1279 "swap_info %d in list but !SWP_WRITEOK\n",
1280 si->type);
a2468cc9 1281 __del_from_avail_list(si);
ec8acf20 1282 spin_unlock(&si->lock);
18ab4d4c 1283 goto nextsi;
ec8acf20 1284 }
845982eb
RR
1285 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1286 n_goal, swp_entries, order);
ec8acf20 1287 spin_unlock(&si->lock);
845982eb 1288 if (n_ret || size > 1)
36005bae 1289 goto check_out;
7717fc1a 1290 cond_resched();
36005bae 1291
18ab4d4c
DS
1292 spin_lock(&swap_avail_lock);
1293nextsi:
adfab836
DS
1294 /*
1295 * if we got here, it's likely that si was almost full before,
bb243f7d
ML
1296 * and since scan_swap_map_slots() can drop the si->lock,
1297 * multiple callers probably all tried to get a page from the
1298 * same si and it filled up before we could get one; or, the si
1299 * filled up between us dropping swap_avail_lock and taking
1300 * si->lock. Since we dropped the swap_avail_lock, the
1301 * swap_avail_head list may have been modified; so if next is
1302 * still in the swap_avail_head list then try it, otherwise
1303 * start over if we have not gotten any slots.
adfab836 1304 */
a2468cc9 1305 if (plist_node_empty(&next->avail_lists[node]))
18ab4d4c 1306 goto start_over;
1da177e4 1307 }
fb4f88dc 1308
18ab4d4c
DS
1309 spin_unlock(&swap_avail_lock);
1310
36005bae
TC
1311check_out:
1312 if (n_ret < n_goal)
5d5e8f19 1313 atomic_long_add((long)(n_goal - n_ret) * size,
38d8b4e6 1314 &nr_swap_pages);
fb4f88dc 1315noswap:
36005bae
TC
1316 return n_ret;
1317}
1318
afba72b1 1319static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1da177e4 1320{
b85508d7 1321 struct swap_info_struct *si;
eb085574 1322 unsigned long offset;
1da177e4
LT
1323
1324 if (!entry.val)
1325 goto out;
b85508d7
BS
1326 si = swp_swap_info(entry);
1327 if (!si)
1da177e4 1328 goto bad_nofile;
b85508d7 1329 if (data_race(!(si->flags & SWP_USED)))
1da177e4
LT
1330 goto bad_device;
1331 offset = swp_offset(entry);
b85508d7 1332 if (offset >= si->max)
1da177e4 1333 goto bad_offset;
b85508d7 1334 if (data_race(!si->swap_map[swp_offset(entry)]))
afba72b1 1335 goto bad_free;
b85508d7 1336 return si;
1da177e4 1337
afba72b1
ML
1338bad_free:
1339 pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1340 goto out;
1da177e4 1341bad_offset:
cf532faa 1342 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1da177e4
LT
1343 goto out;
1344bad_device:
cf532faa 1345 pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1da177e4
LT
1346 goto out;
1347bad_nofile:
cf532faa 1348 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1da177e4
LT
1349out:
1350 return NULL;
886bb7e9 1351}
1da177e4 1352
7c00bafe
TC
1353static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1354 struct swap_info_struct *q)
1355{
1356 struct swap_info_struct *p;
1357
1358 p = _swap_info_get(entry);
1359
1360 if (p != q) {
1361 if (q != NULL)
1362 spin_unlock(&q->lock);
1363 if (p != NULL)
1364 spin_lock(&p->lock);
1365 }
1366 return p;
1367}
1368
b85508d7 1369static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
b32d5f32
HY
1370 unsigned long offset,
1371 unsigned char usage)
1da177e4 1372{
8d69aaee
HD
1373 unsigned char count;
1374 unsigned char has_cache;
235b6217 1375
b85508d7 1376 count = si->swap_map[offset];
235b6217 1377
253d553b
HD
1378 has_cache = count & SWAP_HAS_CACHE;
1379 count &= ~SWAP_HAS_CACHE;
355cfa73 1380
253d553b 1381 if (usage == SWAP_HAS_CACHE) {
355cfa73 1382 VM_BUG_ON(!has_cache);
253d553b 1383 has_cache = 0;
aaa46865
HD
1384 } else if (count == SWAP_MAP_SHMEM) {
1385 /*
1386 * Or we could insist on shmem.c using a special
1387 * swap_shmem_free() and free_shmem_swap_and_cache()...
1388 */
1389 count = 0;
570a335b
HD
1390 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1391 if (count == COUNT_CONTINUED) {
b85508d7 1392 if (swap_count_continued(si, offset, count))
570a335b
HD
1393 count = SWAP_MAP_MAX | COUNT_CONTINUED;
1394 else
1395 count = SWAP_MAP_MAX;
1396 } else
1397 count--;
1398 }
253d553b 1399
253d553b 1400 usage = count | has_cache;
a449bf58 1401 if (usage)
b85508d7 1402 WRITE_ONCE(si->swap_map[offset], usage);
a449bf58 1403 else
b85508d7 1404 WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
7c00bafe 1405
b32d5f32
HY
1406 return usage;
1407}
1408
eb085574 1409/*
a95722a0 1410 * When we get a swap entry, if there aren't some other ways to
d4a34d7f
HY
1411 * prevent swapoff, such as the folio in swap cache is locked, RCU
1412 * reader side is locked, etc., the swap entry may become invalid
1413 * because of swapoff. Then, we need to enclose all swap related
1414 * functions with get_swap_device() and put_swap_device(), unless the
1415 * swap functions call get/put_swap_device() by themselves.
a95722a0 1416 *
d4a34d7f
HY
1417 * RCU reader side lock (including any spinlock) is sufficient to
1418 * prevent swapoff, because synchronize_rcu() is called in swapoff()
1419 * before freeing data structures.
82b1c07a 1420 *
eb085574
HY
1421 * Check whether swap entry is valid in the swap device. If so,
1422 * return pointer to swap_info_struct, and keep the swap entry valid
1423 * via preventing the swap device from being swapoff, until
1424 * put_swap_device() is called. Otherwise return NULL.
1425 *
eb085574 1426 * Notice that swapoff or swapoff+swapon can still happen before the
63d8620e
ML
1427 * percpu_ref_tryget_live() in get_swap_device() or after the
1428 * percpu_ref_put() in put_swap_device() if there isn't any other way
a95722a0
HY
1429 * to prevent swapoff. The caller must be prepared for that. For
1430 * example, the following situation is possible.
eb085574
HY
1431 *
1432 * CPU1 CPU2
1433 * do_swap_page()
1434 * ... swapoff+swapon
1435 * __read_swap_cache_async()
1436 * swapcache_prepare()
1437 * __swap_duplicate()
1438 * // check swap_map
1439 * // verify PTE not changed
1440 *
1441 * In __swap_duplicate(), the swap_map need to be checked before
1442 * changing partly because the specified swap entry may be for another
1443 * swap device which has been swapoff. And in do_swap_page(), after
1444 * the page is read from the swap device, the PTE is verified not
1445 * changed with the page table locked to check whether the swap device
1446 * has been swapoff or swapoff+swapon.
1447 */
1448struct swap_info_struct *get_swap_device(swp_entry_t entry)
1449{
1450 struct swap_info_struct *si;
1451 unsigned long offset;
1452
1453 if (!entry.val)
1454 goto out;
1455 si = swp_swap_info(entry);
1456 if (!si)
1457 goto bad_nofile;
63d8620e
ML
1458 if (!percpu_ref_tryget_live(&si->users))
1459 goto out;
1460 /*
1461 * Guarantee the si->users are checked before accessing other
1462 * fields of swap_info_struct.
1463 *
1464 * Paired with the spin_unlock() after setup_swap_info() in
1465 * enable_swap_info().
1466 */
1467 smp_rmb();
eb085574
HY
1468 offset = swp_offset(entry);
1469 if (offset >= si->max)
63d8620e 1470 goto put_out;
eb085574
HY
1471
1472 return si;
1473bad_nofile:
1474 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1475out:
1476 return NULL;
63d8620e 1477put_out:
23b230ba 1478 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
63d8620e 1479 percpu_ref_put(&si->users);
eb085574
HY
1480 return NULL;
1481}
1482
b85508d7 1483static unsigned char __swap_entry_free(struct swap_info_struct *si,
33e16272 1484 swp_entry_t entry)
b32d5f32
HY
1485{
1486 struct swap_cluster_info *ci;
1487 unsigned long offset = swp_offset(entry);
33e16272 1488 unsigned char usage;
b32d5f32 1489
b85508d7
BS
1490 ci = lock_cluster_or_swap_info(si, offset);
1491 usage = __swap_entry_free_locked(si, offset, 1);
1492 unlock_cluster_or_swap_info(si, ci);
10e364da
HY
1493 if (!usage)
1494 free_swap_slot(entry);
7c00bafe
TC
1495
1496 return usage;
1497}
355cfa73 1498
bea67dcc
BS
1499static bool __swap_entries_free(struct swap_info_struct *si,
1500 swp_entry_t entry, int nr)
1501{
1502 unsigned long offset = swp_offset(entry);
1503 unsigned int type = swp_type(entry);
1504 struct swap_cluster_info *ci;
1505 bool has_cache = false;
1506 unsigned char count;
1507 int i;
1508
1509 if (nr <= 1 || swap_count(data_race(si->swap_map[offset])) != 1)
1510 goto fallback;
1511 /* cross into another cluster */
1512 if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
1513 goto fallback;
1514
1515 ci = lock_cluster_or_swap_info(si, offset);
1516 if (!swap_is_last_map(si, offset, nr, &has_cache)) {
1517 unlock_cluster_or_swap_info(si, ci);
1518 goto fallback;
1519 }
1520 for (i = 0; i < nr; i++)
1521 WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
1522 unlock_cluster_or_swap_info(si, ci);
1523
1524 if (!has_cache) {
1525 for (i = 0; i < nr; i++)
1526 zswap_invalidate(swp_entry(si->type, offset + i));
1527 spin_lock(&si->lock);
1528 swap_entry_range_free(si, entry, nr);
1529 spin_unlock(&si->lock);
1530 }
1531 return has_cache;
1532
1533fallback:
1534 for (i = 0; i < nr; i++) {
1535 if (data_race(si->swap_map[offset + i])) {
1536 count = __swap_entry_free(si, swp_entry(type, offset + i));
1537 if (count == SWAP_HAS_CACHE)
1538 has_cache = true;
1539 } else {
1540 WARN_ON_ONCE(1);
1541 }
1542 }
1543 return has_cache;
1544}
1545
650975d2
KS
1546/*
1547 * Drop the last HAS_CACHE flag of swap entries, caller have to
1548 * ensure all entries belong to the same cgroup.
1549 */
b85508d7 1550static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry,
650975d2 1551 unsigned int nr_pages)
7c00bafe 1552{
7c00bafe 1553 unsigned long offset = swp_offset(entry);
b85508d7 1554 unsigned char *map = si->swap_map + offset;
650975d2
KS
1555 unsigned char *map_end = map + nr_pages;
1556 struct swap_cluster_info *ci;
7c00bafe 1557
b85508d7 1558 ci = lock_cluster(si, offset);
650975d2
KS
1559 do {
1560 VM_BUG_ON(*map != SWAP_HAS_CACHE);
1561 *map = 0;
1562 } while (++map < map_end);
b85508d7 1563 dec_cluster_info_page(si, ci, nr_pages);
235b6217
HY
1564 unlock_cluster(ci);
1565
650975d2 1566 mem_cgroup_uncharge_swap(entry, nr_pages);
b85508d7 1567 swap_range_free(si, offset, nr_pages);
1da177e4
LT
1568}
1569
b85508d7 1570static void cluster_swap_free_nr(struct swap_info_struct *si,
d2539ed7
BS
1571 unsigned long offset, int nr_pages,
1572 unsigned char usage)
ebfba004
CH
1573{
1574 struct swap_cluster_info *ci;
1575 DECLARE_BITMAP(to_free, BITS_PER_LONG) = { 0 };
1576 int i, nr;
1577
b85508d7 1578 ci = lock_cluster_or_swap_info(si, offset);
ebfba004
CH
1579 while (nr_pages) {
1580 nr = min(BITS_PER_LONG, nr_pages);
1581 for (i = 0; i < nr; i++) {
b85508d7 1582 if (!__swap_entry_free_locked(si, offset + i, usage))
ebfba004
CH
1583 bitmap_set(to_free, i, 1);
1584 }
1585 if (!bitmap_empty(to_free, BITS_PER_LONG)) {
b85508d7 1586 unlock_cluster_or_swap_info(si, ci);
ebfba004 1587 for_each_set_bit(i, to_free, BITS_PER_LONG)
b85508d7 1588 free_swap_slot(swp_entry(si->type, offset + i));
ebfba004
CH
1589 if (nr == nr_pages)
1590 return;
1591 bitmap_clear(to_free, 0, BITS_PER_LONG);
b85508d7 1592 ci = lock_cluster_or_swap_info(si, offset);
ebfba004
CH
1593 }
1594 offset += nr;
1595 nr_pages -= nr;
1596 }
b85508d7 1597 unlock_cluster_or_swap_info(si, ci);
ebfba004
CH
1598}
1599
54f7a49c
BS
1600/*
1601 * Caller has made sure that the swap device corresponding to entry
1602 * is still around or has not been recycled.
1603 */
ebfba004
CH
1604void swap_free_nr(swp_entry_t entry, int nr_pages)
1605{
1606 int nr;
1607 struct swap_info_struct *sis;
1608 unsigned long offset = swp_offset(entry);
1609
1610 sis = _swap_info_get(entry);
1611 if (!sis)
1612 return;
1613
1614 while (nr_pages) {
1615 nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
d2539ed7 1616 cluster_swap_free_nr(sis, offset, nr, 1);
ebfba004
CH
1617 offset += nr;
1618 nr_pages -= nr;
1619 }
1620}
1621
cb4b86ba
KH
1622/*
1623 * Called after dropping swapcache to decrease refcnt to swap entries.
1624 */
4081f744 1625void put_swap_folio(struct folio *folio, swp_entry_t entry)
38d8b4e6
HY
1626{
1627 unsigned long offset = swp_offset(entry);
38d8b4e6
HY
1628 struct swap_cluster_info *ci;
1629 struct swap_info_struct *si;
9faaa0f8 1630 int size = 1 << swap_entry_order(folio_order(folio));
fe5266d5 1631
a3aea839 1632 si = _swap_info_get(entry);
38d8b4e6
HY
1633 if (!si)
1634 return;
1635
c2343d27 1636 ci = lock_cluster_or_swap_info(si, offset);
862590ac
KS
1637 if (size > 1 && swap_is_has_cache(si, offset, size)) {
1638 unlock_cluster_or_swap_info(si, ci);
1639 spin_lock(&si->lock);
1640 swap_entry_range_free(si, entry, size);
1641 spin_unlock(&si->lock);
1642 return;
a448f2d0 1643 }
862590ac 1644 for (int i = 0; i < size; i++, entry.val++) {
c2343d27
HY
1645 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1646 unlock_cluster_or_swap_info(si, ci);
1647 free_swap_slot(entry);
1648 if (i == size - 1)
1649 return;
1650 lock_cluster_or_swap_info(si, offset);
a3aea839
HY
1651 }
1652 }
c2343d27 1653 unlock_cluster_or_swap_info(si, ci);
38d8b4e6 1654}
59807685 1655
155b5f88
HY
1656static int swp_entry_cmp(const void *ent1, const void *ent2)
1657{
1658 const swp_entry_t *e1 = ent1, *e2 = ent2;
1659
1660 return (int)swp_type(*e1) - (int)swp_type(*e2);
1661}
1662
7c00bafe
TC
1663void swapcache_free_entries(swp_entry_t *entries, int n)
1664{
1665 struct swap_info_struct *p, *prev;
1666 int i;
1667
1668 if (n <= 0)
1669 return;
1670
1671 prev = NULL;
1672 p = NULL;
155b5f88
HY
1673
1674 /*
1675 * Sort swap entries by swap device, so each lock is only taken once.
1676 * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1677 * so low that it isn't necessary to optimize further.
1678 */
1679 if (nr_swapfiles > 1)
1680 sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
7c00bafe
TC
1681 for (i = 0; i < n; ++i) {
1682 p = swap_info_get_cont(entries[i], prev);
1683 if (p)
650975d2 1684 swap_entry_range_free(p, entries[i], 1);
7c00bafe
TC
1685 prev = p;
1686 }
235b6217 1687 if (p)
7c00bafe 1688 spin_unlock(&p->lock);
cb4b86ba
KH
1689}
1690
eb085574 1691int __swap_count(swp_entry_t entry)
aa8d22a1 1692{
f9f956b5 1693 struct swap_info_struct *si = swp_swap_info(entry);
aa8d22a1
MK
1694 pgoff_t offset = swp_offset(entry);
1695
f9f956b5 1696 return swap_count(si->swap_map[offset]);
aa8d22a1
MK
1697}
1698
14d01ee9
MWO
1699/*
1700 * How many references to @entry are currently swapped out?
1701 * This does not give an exact answer when swap count is continued,
1702 * but does include the high COUNT_CONTINUED flag to allow for that.
1703 */
3ecdeb0f 1704int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
322b8afe 1705{
322b8afe
HY
1706 pgoff_t offset = swp_offset(entry);
1707 struct swap_cluster_info *ci;
14d01ee9 1708 int count;
322b8afe
HY
1709
1710 ci = lock_cluster_or_swap_info(si, offset);
1711 count = swap_count(si->swap_map[offset]);
1712 unlock_cluster_or_swap_info(si, ci);
1713 return count;
1714}
1715
8334b962
MK
1716/*
1717 * How many references to @entry are currently swapped out?
1718 * This considers COUNT_CONTINUED so it returns exact answer.
1719 */
1720int swp_swapcount(swp_entry_t entry)
1721{
1722 int count, tmp_count, n;
b85508d7 1723 struct swap_info_struct *si;
235b6217 1724 struct swap_cluster_info *ci;
8334b962
MK
1725 struct page *page;
1726 pgoff_t offset;
1727 unsigned char *map;
1728
b85508d7
BS
1729 si = _swap_info_get(entry);
1730 if (!si)
8334b962
MK
1731 return 0;
1732
235b6217
HY
1733 offset = swp_offset(entry);
1734
b85508d7 1735 ci = lock_cluster_or_swap_info(si, offset);
235b6217 1736
b85508d7 1737 count = swap_count(si->swap_map[offset]);
8334b962
MK
1738 if (!(count & COUNT_CONTINUED))
1739 goto out;
1740
1741 count &= ~COUNT_CONTINUED;
1742 n = SWAP_MAP_MAX + 1;
1743
b85508d7 1744 page = vmalloc_to_page(si->swap_map + offset);
8334b962
MK
1745 offset &= ~PAGE_MASK;
1746 VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1747
1748 do {
a8ae4991 1749 page = list_next_entry(page, lru);
829c3151 1750 map = kmap_local_page(page);
8334b962 1751 tmp_count = map[offset];
829c3151 1752 kunmap_local(map);
8334b962
MK
1753
1754 count += (tmp_count & ~COUNT_CONTINUED) * n;
1755 n *= (SWAP_CONT_MAX + 1);
1756 } while (tmp_count & COUNT_CONTINUED);
1757out:
b85508d7 1758 unlock_cluster_or_swap_info(si, ci);
8334b962
MK
1759 return count;
1760}
1761
e0709829 1762static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
d7d0d389 1763 swp_entry_t entry, int order)
e0709829
HY
1764{
1765 struct swap_cluster_info *ci;
1766 unsigned char *map = si->swap_map;
d7d0d389 1767 unsigned int nr_pages = 1 << order;
e0709829 1768 unsigned long roffset = swp_offset(entry);
d7d0d389 1769 unsigned long offset = round_down(roffset, nr_pages);
e0709829
HY
1770 int i;
1771 bool ret = false;
1772
1773 ci = lock_cluster_or_swap_info(si, offset);
d7d0d389 1774 if (!ci || nr_pages == 1) {
afa4711e 1775 if (swap_count(map[roffset]))
e0709829
HY
1776 ret = true;
1777 goto unlock_out;
1778 }
d7d0d389 1779 for (i = 0; i < nr_pages; i++) {
afa4711e 1780 if (swap_count(map[offset + i])) {
e0709829
HY
1781 ret = true;
1782 break;
1783 }
1784 }
1785unlock_out:
1786 unlock_cluster_or_swap_info(si, ci);
1787 return ret;
1788}
1789
2397f780 1790static bool folio_swapped(struct folio *folio)
e0709829 1791{
3d2c9087 1792 swp_entry_t entry = folio->swap;
14d01ee9
MWO
1793 struct swap_info_struct *si = _swap_info_get(entry);
1794
1795 if (!si)
1796 return false;
e0709829 1797
2397f780 1798 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
14d01ee9 1799 return swap_swapcount(si, entry) != 0;
e0709829 1800
d7d0d389 1801 return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
e0709829 1802}
ba3c4ce6 1803
862590ac 1804static bool folio_swapcache_freeable(struct folio *folio)
1da177e4 1805{
2397f780 1806 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1da177e4 1807
2397f780 1808 if (!folio_test_swapcache(folio))
bdb0ed54 1809 return false;
2397f780 1810 if (folio_test_writeback(folio))
bdb0ed54 1811 return false;
1da177e4 1812
b73d7fce
HD
1813 /*
1814 * Once hibernation has begun to create its image of memory,
bdb0ed54 1815 * there's a danger that one of the calls to folio_free_swap()
b73d7fce
HD
1816 * - most probably a call from __try_to_reclaim_swap() while
1817 * hibernation is allocating its own swap pages for the image,
1818 * but conceivably even a call from memory reclaim - will free
bdb0ed54
MWO
1819 * the swap from a folio which has already been recorded in the
1820 * image as a clean swapcache folio, and then reuse its swap for
b73d7fce 1821 * another page of the image. On waking from hibernation, the
bdb0ed54 1822 * original folio might be freed under memory pressure, then
b73d7fce
HD
1823 * later read back in from swap, now with the wrong data.
1824 *
2de1a7e4 1825 * Hibernation suspends storage while it is writing the image
f90ac398 1826 * to disk so check that here.
b73d7fce 1827 */
f90ac398 1828 if (pm_suspended_storage())
bdb0ed54 1829 return false;
b73d7fce 1830
862590ac
KS
1831 return true;
1832}
1833
1834/**
1835 * folio_free_swap() - Free the swap space used for this folio.
1836 * @folio: The folio to remove.
1837 *
1838 * If swap is getting full, or if there are no more mappings of this folio,
1839 * then call folio_free_swap to free its swap space.
1840 *
1841 * Return: true if we were able to release the swap space.
1842 */
1843bool folio_free_swap(struct folio *folio)
1844{
1845 if (!folio_swapcache_freeable(folio))
1846 return false;
1847 if (folio_swapped(folio))
1848 return false;
1849
75fa68a5 1850 delete_from_swap_cache(folio);
2397f780 1851 folio_set_dirty(folio);
bdb0ed54 1852 return true;
68a22394
RR
1853}
1854
a62fb92a
RR
1855/**
1856 * free_swap_and_cache_nr() - Release reference on range of swap entries and
1857 * reclaim their cache if no more references remain.
1858 * @entry: First entry of range.
1859 * @nr: Number of entries in range.
1860 *
1861 * For each swap entry in the contiguous range, release a reference. If any swap
1862 * entries become free, try to reclaim their underlying folios, if present. The
1863 * offset range is defined by [entry.offset, entry.offset + nr).
1da177e4 1864 */
a62fb92a 1865void free_swap_and_cache_nr(swp_entry_t entry, int nr)
1da177e4 1866{
a62fb92a
RR
1867 const unsigned long start_offset = swp_offset(entry);
1868 const unsigned long end_offset = start_offset + nr;
a62fb92a
RR
1869 struct swap_info_struct *si;
1870 bool any_only_cache = false;
1871 unsigned long offset;
1da177e4 1872
a7420aa5 1873 if (non_swap_entry(entry))
a62fb92a
RR
1874 return;
1875
1876 si = get_swap_device(entry);
1877 if (!si)
1878 return;
1879
1880 if (WARN_ON(end_offset > si->max))
1881 goto out;
0697212a 1882
a62fb92a
RR
1883 /*
1884 * First free all entries in the range.
1885 */
bea67dcc 1886 any_only_cache = __swap_entries_free(si, entry, nr);
a62fb92a
RR
1887
1888 /*
1889 * Short-circuit the below loop if none of the entries had their
1890 * reference drop to zero.
1891 */
1892 if (!any_only_cache)
1893 goto out;
82b1c07a 1894
a62fb92a
RR
1895 /*
1896 * Now go back over the range trying to reclaim the swap cache. This is
1897 * more efficient for large folios because we will only try to reclaim
1898 * the swap once per folio in the common case. If we do
1899 * __swap_entry_free() and __try_to_reclaim_swap() in the same loop, the
1900 * latter will get a reference and lock the folio for every individual
1901 * page but will only succeed once the swap slot for every subpage is
1902 * zero.
1903 */
1904 for (offset = start_offset; offset < end_offset; offset += nr) {
1905 nr = 1;
1906 if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
1907 /*
1908 * Folios are always naturally aligned in swap so
1909 * advance forward to the next boundary. Zero means no
1910 * folio was found for the swap entry, so advance by 1
1911 * in this case. Negative value means folio was found
1912 * but could not be reclaimed. Here we can still advance
1913 * to the next boundary.
1914 */
1915 nr = __try_to_reclaim_swap(si, offset,
862590ac 1916 TTRS_UNMAPPED | TTRS_FULL);
a62fb92a
RR
1917 if (nr == 0)
1918 nr = 1;
1919 else if (nr < 0)
1920 nr = -nr;
1921 nr = ALIGN(offset + 1, nr) - offset;
1922 }
1da177e4 1923 }
a62fb92a
RR
1924
1925out:
1926 put_swap_device(si);
1da177e4
LT
1927}
1928
b0cb1a19 1929#ifdef CONFIG_HIBERNATION
bb243f7d
ML
1930
1931swp_entry_t get_swap_page_of_type(int type)
1932{
1933 struct swap_info_struct *si = swap_type_to_swap_info(type);
1934 swp_entry_t entry = {0};
1935
1936 if (!si)
1937 goto fail;
1938
1939 /* This is called for allocating swap entry, not cache */
1940 spin_lock(&si->lock);
845982eb 1941 if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0))
bb243f7d
ML
1942 atomic_long_dec(&nr_swap_pages);
1943 spin_unlock(&si->lock);
1944fail:
1945 return entry;
1946}
1947
f577eb30 1948/*
915bae9e 1949 * Find the swap type that corresponds to given device (if any).
f577eb30 1950 *
915bae9e
RW
1951 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1952 * from 0, in which the swap header is expected to be located.
1953 *
1954 * This is needed for the suspend to disk (aka swsusp).
f577eb30 1955 */
21bd9005 1956int swap_type_of(dev_t device, sector_t offset)
f577eb30 1957{
efa90a98 1958 int type;
f577eb30 1959
21bd9005
CH
1960 if (!device)
1961 return -1;
915bae9e 1962
f577eb30 1963 spin_lock(&swap_lock);
efa90a98
HD
1964 for (type = 0; type < nr_swapfiles; type++) {
1965 struct swap_info_struct *sis = swap_info[type];
f577eb30 1966
915bae9e 1967 if (!(sis->flags & SWP_WRITEOK))
f577eb30 1968 continue;
b6b5bce3 1969
21bd9005 1970 if (device == sis->bdev->bd_dev) {
4efaceb1 1971 struct swap_extent *se = first_se(sis);
915bae9e 1972
915bae9e
RW
1973 if (se->start_block == offset) {
1974 spin_unlock(&swap_lock);
efa90a98 1975 return type;
915bae9e 1976 }
f577eb30
RW
1977 }
1978 }
1979 spin_unlock(&swap_lock);
21bd9005
CH
1980 return -ENODEV;
1981}
915bae9e 1982
21bd9005
CH
1983int find_first_swap(dev_t *device)
1984{
1985 int type;
915bae9e 1986
21bd9005
CH
1987 spin_lock(&swap_lock);
1988 for (type = 0; type < nr_swapfiles; type++) {
1989 struct swap_info_struct *sis = swap_info[type];
1990
1991 if (!(sis->flags & SWP_WRITEOK))
1992 continue;
1993 *device = sis->bdev->bd_dev;
1994 spin_unlock(&swap_lock);
1995 return type;
1996 }
1997 spin_unlock(&swap_lock);
f577eb30
RW
1998 return -ENODEV;
1999}
2000
73c34b6a
HD
2001/*
2002 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
2003 * corresponding to given index in swap_info (swap type).
2004 */
2005sector_t swapdev_block(int type, pgoff_t offset)
2006{
c10d38cc 2007 struct swap_info_struct *si = swap_type_to_swap_info(type);
f885056a 2008 struct swap_extent *se;
73c34b6a 2009
c10d38cc 2010 if (!si || !(si->flags & SWP_WRITEOK))
73c34b6a 2011 return 0;
f885056a
CH
2012 se = offset_to_swap_extent(si, offset);
2013 return se->start_block + (offset - se->start_page);
73c34b6a
HD
2014}
2015
f577eb30
RW
2016/*
2017 * Return either the total number of swap pages of given type, or the number
2018 * of free pages of that type (depending on @free)
2019 *
2020 * This is needed for software suspend
2021 */
2022unsigned int count_swap_pages(int type, int free)
2023{
2024 unsigned int n = 0;
2025
efa90a98
HD
2026 spin_lock(&swap_lock);
2027 if ((unsigned int)type < nr_swapfiles) {
2028 struct swap_info_struct *sis = swap_info[type];
2029
ec8acf20 2030 spin_lock(&sis->lock);
efa90a98
HD
2031 if (sis->flags & SWP_WRITEOK) {
2032 n = sis->pages;
f577eb30 2033 if (free)
efa90a98 2034 n -= sis->inuse_pages;
f577eb30 2035 }
ec8acf20 2036 spin_unlock(&sis->lock);
f577eb30 2037 }
efa90a98 2038 spin_unlock(&swap_lock);
f577eb30
RW
2039 return n;
2040}
73c34b6a 2041#endif /* CONFIG_HIBERNATION */
f577eb30 2042
9f8bdb3f 2043static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
179ef71c 2044{
099dd687 2045 return pte_same(pte_swp_clear_flags(pte), swp_pte);
179ef71c
CG
2046}
2047
1da177e4 2048/*
72866f6f
HD
2049 * No need to decide whether this PTE shares the swap entry with others,
2050 * just let do_wp_page work it out if a write is requested later - to
2051 * force COW, vm_page_prot omits write permission from any private vma.
1da177e4 2052 */
044d66c1 2053static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
f102cd8b 2054 unsigned long addr, swp_entry_t entry, struct folio *folio)
1da177e4 2055{
f00f4843
MWO
2056 struct page *page;
2057 struct folio *swapcache;
044d66c1 2058 spinlock_t *ptl;
c33c7948 2059 pte_t *pte, new_pte, old_pte;
f00f4843 2060 bool hwpoisoned = false;
044d66c1
HD
2061 int ret = 1;
2062
f00f4843 2063 swapcache = folio;
96db66d9
MWO
2064 folio = ksm_might_need_to_copy(folio, vma, addr);
2065 if (unlikely(!folio))
9e16b7fb 2066 return -ENOMEM;
f00f4843
MWO
2067 else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
2068 hwpoisoned = true;
2069 folio = swapcache;
2070 }
2071
2072 page = folio_file_page(folio, swp_offset(entry));
2073 if (PageHWPoison(page))
f985fc32 2074 hwpoisoned = true;
9e16b7fb 2075
044d66c1 2076 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
c33c7948
RR
2077 if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
2078 swp_entry_to_pte(entry)))) {
044d66c1
HD
2079 ret = 0;
2080 goto out;
2081 }
8a9f3ccd 2082
c33c7948
RR
2083 old_pte = ptep_get(pte);
2084
f00f4843 2085 if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
6b970599 2086 swp_entry_t swp_entry;
9f186f9e
ML
2087
2088 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
f985fc32 2089 if (hwpoisoned) {
f00f4843 2090 swp_entry = make_hwpoison_entry(page);
6b970599 2091 } else {
af19487f 2092 swp_entry = make_poisoned_swp_entry();
6b970599
KW
2093 }
2094 new_pte = swp_entry_to_pte(swp_entry);
9f186f9e 2095 ret = 0;
6b970599 2096 goto setpte;
9f186f9e
ML
2097 }
2098
b53e24c4
PC
2099 /*
2100 * Some architectures may have to restore extra metadata to the page
2101 * when reading from swap. This metadata may be indexed by swap entry
2102 * so this must be called before swap_free().
2103 */
f238b8c3 2104 arch_swap_restore(folio_swap(entry, folio), folio);
b53e24c4 2105
b084d435 2106 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
d559db08 2107 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
f00f4843
MWO
2108 folio_get(folio);
2109 if (folio == swapcache) {
1493a191
DH
2110 rmap_t rmap_flags = RMAP_NONE;
2111
2112 /*
f00f4843
MWO
2113 * See do_swap_page(): writeback would be problematic.
2114 * However, we do a folio_wait_writeback() just before this
2115 * call and have the folio locked.
1493a191 2116 */
f00f4843 2117 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
c33c7948 2118 if (pte_swp_exclusive(old_pte))
1493a191 2119 rmap_flags |= RMAP_EXCLUSIVE;
9ae2feac
BS
2120 /*
2121 * We currently only expect small !anon folios, which are either
2122 * fully exclusive or fully shared. If we ever get large folios
2123 * here, we have to be careful.
2124 */
2125 if (!folio_test_anon(folio)) {
2126 VM_WARN_ON_ONCE(folio_test_large(folio));
2127 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
2128 folio_add_new_anon_rmap(folio, vma, addr, rmap_flags);
2129 } else {
2130 folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
2131 }
00501b53 2132 } else { /* ksm created a completely new copy */
15bde4ab 2133 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
f00f4843 2134 folio_add_lru_vma(folio, vma);
00501b53 2135 }
14a762dd 2136 new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
c33c7948 2137 if (pte_swp_soft_dirty(old_pte))
14a762dd 2138 new_pte = pte_mksoft_dirty(new_pte);
c33c7948 2139 if (pte_swp_uffd_wp(old_pte))
14a762dd 2140 new_pte = pte_mkuffd_wp(new_pte);
6b970599 2141setpte:
14a762dd 2142 set_pte_at(vma->vm_mm, addr, pte, new_pte);
1da177e4 2143 swap_free(entry);
044d66c1 2144out:
d850fa72
HD
2145 if (pte)
2146 pte_unmap_unlock(pte, ptl);
f00f4843
MWO
2147 if (folio != swapcache) {
2148 folio_unlock(folio);
2149 folio_put(folio);
9e16b7fb 2150 }
044d66c1 2151 return ret;
1da177e4
LT
2152}
2153
2154static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
b56a2d8a 2155 unsigned long addr, unsigned long end,
10a9c496 2156 unsigned int type)
1da177e4 2157{
d850fa72 2158 pte_t *pte = NULL;
b56a2d8a 2159 struct swap_info_struct *si;
1da177e4 2160
b56a2d8a 2161 si = swap_info[type];
1da177e4 2162 do {
f102cd8b
MWO
2163 struct folio *folio;
2164 unsigned long offset;
3f79b187 2165 unsigned char swp_count;
d850fa72
HD
2166 swp_entry_t entry;
2167 int ret;
c33c7948 2168 pte_t ptent;
d850fa72
HD
2169
2170 if (!pte++) {
2171 pte = pte_offset_map(pmd, addr);
2172 if (!pte)
2173 break;
2174 }
f102cd8b 2175
c33c7948 2176 ptent = ptep_get_lockless(pte);
f102cd8b 2177
c33c7948 2178 if (!is_swap_pte(ptent))
b56a2d8a
VRP
2179 continue;
2180
c33c7948 2181 entry = pte_to_swp_entry(ptent);
b56a2d8a
VRP
2182 if (swp_type(entry) != type)
2183 continue;
2184
2185 offset = swp_offset(entry);
b56a2d8a 2186 pte_unmap(pte);
d850fa72
HD
2187 pte = NULL;
2188
f102cd8b
MWO
2189 folio = swap_cache_get_folio(entry, vma, addr);
2190 if (!folio) {
8c63ca5b
WD
2191 struct vm_fault vmf = {
2192 .vma = vma,
2193 .address = addr,
824ddc60 2194 .real_address = addr,
8c63ca5b
WD
2195 .pmd = pmd,
2196 };
2197
94dc8bff 2198 folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
ebc5951e
AR
2199 &vmf);
2200 }
f102cd8b 2201 if (!folio) {
3f79b187
KS
2202 swp_count = READ_ONCE(si->swap_map[offset]);
2203 if (swp_count == 0 || swp_count == SWAP_MAP_BAD)
d850fa72 2204 continue;
b56a2d8a
VRP
2205 return -ENOMEM;
2206 }
2207
f102cd8b
MWO
2208 folio_lock(folio);
2209 folio_wait_writeback(folio);
2210 ret = unuse_pte(vma, pmd, addr, entry, folio);
b56a2d8a 2211 if (ret < 0) {
f102cd8b
MWO
2212 folio_unlock(folio);
2213 folio_put(folio);
d850fa72 2214 return ret;
b56a2d8a
VRP
2215 }
2216
f102cd8b
MWO
2217 folio_free_swap(folio);
2218 folio_unlock(folio);
2219 folio_put(folio);
d850fa72 2220 } while (addr += PAGE_SIZE, addr != end);
b56a2d8a 2221
d850fa72
HD
2222 if (pte)
2223 pte_unmap(pte);
2224 return 0;
1da177e4
LT
2225}
2226
2227static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
2228 unsigned long addr, unsigned long end,
10a9c496 2229 unsigned int type)
1da177e4
LT
2230{
2231 pmd_t *pmd;
2232 unsigned long next;
8a9f3ccd 2233 int ret;
1da177e4
LT
2234
2235 pmd = pmd_offset(pud, addr);
2236 do {
dc644a07 2237 cond_resched();
1da177e4 2238 next = pmd_addr_end(addr, end);
10a9c496 2239 ret = unuse_pte_range(vma, pmd, addr, next, type);
8a9f3ccd
BS
2240 if (ret)
2241 return ret;
1da177e4
LT
2242 } while (pmd++, addr = next, addr != end);
2243 return 0;
2244}
2245
c2febafc 2246static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
1da177e4 2247 unsigned long addr, unsigned long end,
10a9c496 2248 unsigned int type)
1da177e4
LT
2249{
2250 pud_t *pud;
2251 unsigned long next;
8a9f3ccd 2252 int ret;
1da177e4 2253
c2febafc 2254 pud = pud_offset(p4d, addr);
1da177e4
LT
2255 do {
2256 next = pud_addr_end(addr, end);
2257 if (pud_none_or_clear_bad(pud))
2258 continue;
10a9c496 2259 ret = unuse_pmd_range(vma, pud, addr, next, type);
8a9f3ccd
BS
2260 if (ret)
2261 return ret;
1da177e4
LT
2262 } while (pud++, addr = next, addr != end);
2263 return 0;
2264}
2265
c2febafc
KS
2266static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2267 unsigned long addr, unsigned long end,
10a9c496 2268 unsigned int type)
c2febafc
KS
2269{
2270 p4d_t *p4d;
2271 unsigned long next;
2272 int ret;
2273
2274 p4d = p4d_offset(pgd, addr);
2275 do {
2276 next = p4d_addr_end(addr, end);
2277 if (p4d_none_or_clear_bad(p4d))
2278 continue;
10a9c496 2279 ret = unuse_pud_range(vma, p4d, addr, next, type);
c2febafc
KS
2280 if (ret)
2281 return ret;
2282 } while (p4d++, addr = next, addr != end);
2283 return 0;
2284}
2285
10a9c496 2286static int unuse_vma(struct vm_area_struct *vma, unsigned int type)
1da177e4
LT
2287{
2288 pgd_t *pgd;
2289 unsigned long addr, end, next;
8a9f3ccd 2290 int ret;
1da177e4 2291
b56a2d8a
VRP
2292 addr = vma->vm_start;
2293 end = vma->vm_end;
1da177e4
LT
2294
2295 pgd = pgd_offset(vma->vm_mm, addr);
2296 do {
2297 next = pgd_addr_end(addr, end);
2298 if (pgd_none_or_clear_bad(pgd))
2299 continue;
10a9c496 2300 ret = unuse_p4d_range(vma, pgd, addr, next, type);
8a9f3ccd
BS
2301 if (ret)
2302 return ret;
1da177e4
LT
2303 } while (pgd++, addr = next, addr != end);
2304 return 0;
2305}
2306
10a9c496 2307static int unuse_mm(struct mm_struct *mm, unsigned int type)
1da177e4
LT
2308{
2309 struct vm_area_struct *vma;
8a9f3ccd 2310 int ret = 0;
208c09db 2311 VMA_ITERATOR(vmi, mm, 0);
1da177e4 2312
d8ed45c5 2313 mmap_read_lock(mm);
208c09db 2314 for_each_vma(vmi, vma) {
b56a2d8a 2315 if (vma->anon_vma) {
10a9c496 2316 ret = unuse_vma(vma, type);
b56a2d8a
VRP
2317 if (ret)
2318 break;
2319 }
208c09db 2320
dc644a07 2321 cond_resched();
1da177e4 2322 }
d8ed45c5 2323 mmap_read_unlock(mm);
b56a2d8a 2324 return ret;
1da177e4
LT
2325}
2326
2327/*
3c3115ad
ML
2328 * Scan swap_map from current position to next entry still in use.
2329 * Return 0 if there are no inuse entries after prev till end of
2330 * the map.
1da177e4 2331 */
6eb396dc 2332static unsigned int find_next_to_unuse(struct swap_info_struct *si,
10a9c496 2333 unsigned int prev)
1da177e4 2334{
b56a2d8a 2335 unsigned int i;
8d69aaee 2336 unsigned char count;
1da177e4
LT
2337
2338 /*
5d337b91 2339 * No need for swap_lock here: we're just looking
1da177e4
LT
2340 * for whether an entry is in use, not modifying it; false
2341 * hits are okay, and sys_swapoff() has already prevented new
5d337b91 2342 * allocations from this area (while holding swap_lock).
1da177e4 2343 */
b56a2d8a 2344 for (i = prev + 1; i < si->max; i++) {
4db0c3c2 2345 count = READ_ONCE(si->swap_map[i]);
355cfa73 2346 if (count && swap_count(count) != SWAP_MAP_BAD)
10a9c496 2347 break;
dc644a07
HD
2348 if ((i % LATENCY_LIMIT) == 0)
2349 cond_resched();
1da177e4 2350 }
b56a2d8a
VRP
2351
2352 if (i == si->max)
2353 i = 0;
2354
1da177e4
LT
2355 return i;
2356}
2357
10a9c496 2358static int try_to_unuse(unsigned int type)
1da177e4 2359{
b56a2d8a
VRP
2360 struct mm_struct *prev_mm;
2361 struct mm_struct *mm;
2362 struct list_head *p;
2363 int retval = 0;
efa90a98 2364 struct swap_info_struct *si = swap_info[type];
000085b9 2365 struct folio *folio;
1da177e4 2366 swp_entry_t entry;
b56a2d8a 2367 unsigned int i;
1da177e4 2368
21820948 2369 if (!READ_ONCE(si->inuse_pages))
64cf264c 2370 goto success;
1da177e4 2371
b56a2d8a 2372retry:
10a9c496 2373 retval = shmem_unuse(type);
b56a2d8a 2374 if (retval)
10a9c496 2375 return retval;
b56a2d8a
VRP
2376
2377 prev_mm = &init_mm;
2378 mmget(prev_mm);
2379
2380 spin_lock(&mmlist_lock);
2381 p = &init_mm.mmlist;
21820948 2382 while (READ_ONCE(si->inuse_pages) &&
64165b1a
HD
2383 !signal_pending(current) &&
2384 (p = p->next) != &init_mm.mmlist) {
1da177e4 2385
b56a2d8a
VRP
2386 mm = list_entry(p, struct mm_struct, mmlist);
2387 if (!mmget_not_zero(mm))
2388 continue;
2389 spin_unlock(&mmlist_lock);
2390 mmput(prev_mm);
2391 prev_mm = mm;
10a9c496 2392 retval = unuse_mm(mm, type);
b56a2d8a
VRP
2393 if (retval) {
2394 mmput(prev_mm);
10a9c496 2395 return retval;
1da177e4
LT
2396 }
2397
2398 /*
b56a2d8a
VRP
2399 * Make sure that we aren't completely killing
2400 * interactive performance.
1da177e4 2401 */
b56a2d8a
VRP
2402 cond_resched();
2403 spin_lock(&mmlist_lock);
2404 }
2405 spin_unlock(&mmlist_lock);
1da177e4 2406
b56a2d8a 2407 mmput(prev_mm);
1da177e4 2408
b56a2d8a 2409 i = 0;
21820948 2410 while (READ_ONCE(si->inuse_pages) &&
64165b1a 2411 !signal_pending(current) &&
10a9c496 2412 (i = find_next_to_unuse(si, i)) != 0) {
1da177e4 2413
b56a2d8a 2414 entry = swp_entry(type, i);
7aad25b4 2415 folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
66dabbb6 2416 if (IS_ERR(folio))
b56a2d8a 2417 continue;
68bdc8d6
HD
2418
2419 /*
000085b9
MWO
2420 * It is conceivable that a racing task removed this folio from
2421 * swap cache just before we acquired the page lock. The folio
b56a2d8a 2422 * might even be back in swap cache on another swap area. But
000085b9 2423 * that is okay, folio_free_swap() only removes stale folios.
1da177e4 2424 */
000085b9
MWO
2425 folio_lock(folio);
2426 folio_wait_writeback(folio);
2427 folio_free_swap(folio);
2428 folio_unlock(folio);
2429 folio_put(folio);
1da177e4
LT
2430 }
2431
b56a2d8a
VRP
2432 /*
2433 * Lets check again to see if there are still swap entries in the map.
2434 * If yes, we would need to do retry the unuse logic again.
2435 * Under global memory pressure, swap entries can be reinserted back
2436 * into process space after the mmlist loop above passes over them.
dd862deb 2437 *
e2e3fdc7
MWO
2438 * Limit the number of retries? No: when mmget_not_zero()
2439 * above fails, that mm is likely to be freeing swap from
2440 * exit_mmap(), which proceeds at its own independent pace;
2441 * and even shmem_writepage() could have been preempted after
2442 * folio_alloc_swap(), temporarily hiding that swap. It's easy
2443 * and robust (though cpu-intensive) just to keep retrying.
b56a2d8a 2444 */
21820948 2445 if (READ_ONCE(si->inuse_pages)) {
64165b1a
HD
2446 if (!signal_pending(current))
2447 goto retry;
10a9c496 2448 return -EINTR;
64165b1a 2449 }
10a9c496 2450
64cf264c
YA
2451success:
2452 /*
2453 * Make sure that further cleanups after try_to_unuse() returns happen
2454 * after swap_range_free() reduces si->inuse_pages to 0.
2455 */
2456 smp_mb();
10a9c496 2457 return 0;
1da177e4
LT
2458}
2459
2460/*
5d337b91
HD
2461 * After a successful try_to_unuse, if no swap is now in use, we know
2462 * we can empty the mmlist. swap_lock must be held on entry and exit.
2463 * Note that mmlist_lock nests inside swap_lock, and an mm must be
1da177e4
LT
2464 * added to the mmlist just after page_duplicate - before would be racy.
2465 */
2466static void drain_mmlist(void)
2467{
2468 struct list_head *p, *next;
efa90a98 2469 unsigned int type;
1da177e4 2470
efa90a98
HD
2471 for (type = 0; type < nr_swapfiles; type++)
2472 if (swap_info[type]->inuse_pages)
1da177e4
LT
2473 return;
2474 spin_lock(&mmlist_lock);
2475 list_for_each_safe(p, next, &init_mm.mmlist)
2476 list_del_init(p);
2477 spin_unlock(&mmlist_lock);
2478}
2479
1da177e4
LT
2480/*
2481 * Free all of a swapdev's extent information
2482 */
2483static void destroy_swap_extents(struct swap_info_struct *sis)
2484{
4efaceb1
AL
2485 while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2486 struct rb_node *rb = sis->swap_extent_root.rb_node;
2487 struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
1da177e4 2488
4efaceb1 2489 rb_erase(rb, &sis->swap_extent_root);
1da177e4
LT
2490 kfree(se);
2491 }
62c230bc 2492
bc4ae27d 2493 if (sis->flags & SWP_ACTIVATED) {
62c230bc
MG
2494 struct file *swap_file = sis->swap_file;
2495 struct address_space *mapping = swap_file->f_mapping;
2496
bc4ae27d
OS
2497 sis->flags &= ~SWP_ACTIVATED;
2498 if (mapping->a_ops->swap_deactivate)
2499 mapping->a_ops->swap_deactivate(swap_file);
62c230bc 2500 }
1da177e4
LT
2501}
2502
2503/*
2504 * Add a block range (and the corresponding page range) into this swapdev's
4efaceb1 2505 * extent tree.
1da177e4 2506 *
11d31886 2507 * This function rather assumes that it is called in ascending page order.
1da177e4 2508 */
a509bc1a 2509int
1da177e4
LT
2510add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2511 unsigned long nr_pages, sector_t start_block)
2512{
4efaceb1 2513 struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
1da177e4
LT
2514 struct swap_extent *se;
2515 struct swap_extent *new_se;
4efaceb1
AL
2516
2517 /*
2518 * place the new node at the right most since the
2519 * function is called in ascending page order.
2520 */
2521 while (*link) {
2522 parent = *link;
2523 link = &parent->rb_right;
2524 }
2525
2526 if (parent) {
2527 se = rb_entry(parent, struct swap_extent, rb_node);
11d31886
HD
2528 BUG_ON(se->start_page + se->nr_pages != start_page);
2529 if (se->start_block + se->nr_pages == start_block) {
1da177e4
LT
2530 /* Merge it */
2531 se->nr_pages += nr_pages;
2532 return 0;
2533 }
1da177e4
LT
2534 }
2535
4efaceb1 2536 /* No merge, insert a new extent. */
1da177e4
LT
2537 new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2538 if (new_se == NULL)
2539 return -ENOMEM;
2540 new_se->start_page = start_page;
2541 new_se->nr_pages = nr_pages;
2542 new_se->start_block = start_block;
2543
4efaceb1
AL
2544 rb_link_node(&new_se->rb_node, parent, link);
2545 rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
53092a74 2546 return 1;
1da177e4 2547}
aa8aa8a3 2548EXPORT_SYMBOL_GPL(add_swap_extent);
1da177e4
LT
2549
2550/*
2551 * A `swap extent' is a simple thing which maps a contiguous range of pages
ff351f4b 2552 * onto a contiguous range of disk blocks. A rbtree of swap extents is
c9bdf768 2553 * built at swapon time and is then used at swap_writepage/swap_read_folio
1da177e4
LT
2554 * time for locating where on disk a page belongs.
2555 *
2556 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2557 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2558 * swap files identically.
2559 *
2560 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
ff351f4b 2561 * extent rbtree operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
1da177e4
LT
2562 * swapfiles are handled *identically* after swapon time.
2563 *
2564 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
ff351f4b
ML
2565 * and will parse them into a rbtree, in PAGE_SIZE chunks. If some stray
2566 * blocks are found which do not fall within the PAGE_SIZE alignment
1da177e4
LT
2567 * requirements, they are simply tossed out - we will never use those blocks
2568 * for swapping.
2569 *
1638045c
DW
2570 * For all swap devices we set S_SWAPFILE across the life of the swapon. This
2571 * prevents users from writing to the swap device, which will corrupt memory.
1da177e4
LT
2572 *
2573 * The amount of disk space which a single swap extent represents varies.
2574 * Typically it is in the 1-4 megabyte range. So we can have hundreds of
ff351f4b 2575 * extents in the rbtree. - akpm.
1da177e4 2576 */
53092a74 2577static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1da177e4 2578{
62c230bc
MG
2579 struct file *swap_file = sis->swap_file;
2580 struct address_space *mapping = swap_file->f_mapping;
2581 struct inode *inode = mapping->host;
1da177e4
LT
2582 int ret;
2583
1da177e4
LT
2584 if (S_ISBLK(inode->i_mode)) {
2585 ret = add_swap_extent(sis, 0, sis->max, 0);
53092a74 2586 *span = sis->pages;
a509bc1a 2587 return ret;
1da177e4
LT
2588 }
2589
62c230bc 2590 if (mapping->a_ops->swap_activate) {
a509bc1a 2591 ret = mapping->a_ops->swap_activate(sis, swap_file, span);
4b60c0ff
N
2592 if (ret < 0)
2593 return ret;
2594 sis->flags |= SWP_ACTIVATED;
e1209d3a
N
2595 if ((sis->flags & SWP_FS_OPS) &&
2596 sio_pool_init() != 0) {
2597 destroy_swap_extents(sis);
2598 return -ENOMEM;
62c230bc 2599 }
a509bc1a 2600 return ret;
62c230bc
MG
2601 }
2602
a509bc1a 2603 return generic_swapfile_activate(sis, swap_file, span);
1da177e4
LT
2604}
2605
b85508d7 2606static int swap_node(struct swap_info_struct *si)
a2468cc9
AL
2607{
2608 struct block_device *bdev;
2609
b85508d7
BS
2610 if (si->bdev)
2611 bdev = si->bdev;
a2468cc9 2612 else
b85508d7 2613 bdev = si->swap_file->f_inode->i_sb->s_bdev;
a2468cc9
AL
2614
2615 return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2616}
2617
b85508d7 2618static void setup_swap_info(struct swap_info_struct *si, int prio,
eb085574 2619 unsigned char *swap_map,
0ca0c24e
UA
2620 struct swap_cluster_info *cluster_info,
2621 unsigned long *zeromap)
40531542 2622{
a2468cc9
AL
2623 int i;
2624
40531542 2625 if (prio >= 0)
b85508d7 2626 si->prio = prio;
40531542 2627 else
b85508d7 2628 si->prio = --least_priority;
18ab4d4c
DS
2629 /*
2630 * the plist prio is negated because plist ordering is
2631 * low-to-high, while swap ordering is high-to-low
2632 */
b85508d7 2633 si->list.prio = -si->prio;
a2468cc9 2634 for_each_node(i) {
b85508d7
BS
2635 if (si->prio >= 0)
2636 si->avail_lists[i].prio = -si->prio;
a2468cc9 2637 else {
b85508d7
BS
2638 if (swap_node(si) == i)
2639 si->avail_lists[i].prio = 1;
a2468cc9 2640 else
b85508d7 2641 si->avail_lists[i].prio = -si->prio;
a2468cc9
AL
2642 }
2643 }
b85508d7
BS
2644 si->swap_map = swap_map;
2645 si->cluster_info = cluster_info;
0ca0c24e 2646 si->zeromap = zeromap;
eb085574
HY
2647}
2648
b85508d7 2649static void _enable_swap_info(struct swap_info_struct *si)
eb085574 2650{
b85508d7
BS
2651 si->flags |= SWP_WRITEOK;
2652 atomic_long_add(si->pages, &nr_swap_pages);
2653 total_swap_pages += si->pages;
40531542 2654
adfab836 2655 assert_spin_locked(&swap_lock);
adfab836 2656 /*
18ab4d4c
DS
2657 * both lists are plists, and thus priority ordered.
2658 * swap_active_head needs to be priority ordered for swapoff(),
2659 * which on removal of any swap_info_struct with an auto-assigned
2660 * (i.e. negative) priority increments the auto-assigned priority
2661 * of any lower-priority swap_info_structs.
e2e3fdc7 2662 * swap_avail_head needs to be priority ordered for folio_alloc_swap(),
18ab4d4c
DS
2663 * which allocates swap pages from the highest available priority
2664 * swap_info_struct.
adfab836 2665 */
b85508d7 2666 plist_add(&si->list, &swap_active_head);
c70699e5
MW
2667
2668 /* add to available list iff swap device is not full */
b85508d7
BS
2669 if (si->highest_bit)
2670 add_to_avail_list(si);
cf0cac0a
CEB
2671}
2672
b85508d7 2673static void enable_swap_info(struct swap_info_struct *si, int prio,
cf0cac0a 2674 unsigned char *swap_map,
0ca0c24e
UA
2675 struct swap_cluster_info *cluster_info,
2676 unsigned long *zeromap)
cf0cac0a
CEB
2677{
2678 spin_lock(&swap_lock);
b85508d7 2679 spin_lock(&si->lock);
0ca0c24e 2680 setup_swap_info(si, prio, swap_map, cluster_info, zeromap);
b85508d7 2681 spin_unlock(&si->lock);
eb085574
HY
2682 spin_unlock(&swap_lock);
2683 /*
63d8620e 2684 * Finished initializing swap device, now it's safe to reference it.
eb085574 2685 */
b85508d7 2686 percpu_ref_resurrect(&si->users);
eb085574 2687 spin_lock(&swap_lock);
b85508d7
BS
2688 spin_lock(&si->lock);
2689 _enable_swap_info(si);
2690 spin_unlock(&si->lock);
cf0cac0a
CEB
2691 spin_unlock(&swap_lock);
2692}
2693
b85508d7 2694static void reinsert_swap_info(struct swap_info_struct *si)
cf0cac0a
CEB
2695{
2696 spin_lock(&swap_lock);
b85508d7 2697 spin_lock(&si->lock);
0ca0c24e 2698 setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap);
b85508d7
BS
2699 _enable_swap_info(si);
2700 spin_unlock(&si->lock);
40531542
CEB
2701 spin_unlock(&swap_lock);
2702}
2703
80e75021
KW
2704static bool __has_usable_swap(void)
2705{
2706 return !plist_head_empty(&swap_active_head);
2707}
2708
67afa38e
TC
2709bool has_usable_swap(void)
2710{
80e75021 2711 bool ret;
67afa38e
TC
2712
2713 spin_lock(&swap_lock);
80e75021 2714 ret = __has_usable_swap();
67afa38e
TC
2715 spin_unlock(&swap_lock);
2716 return ret;
2717}
2718
c4ea37c2 2719SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1da177e4 2720{
73c34b6a 2721 struct swap_info_struct *p = NULL;
8d69aaee 2722 unsigned char *swap_map;
0ca0c24e 2723 unsigned long *zeromap;
2a8f9449 2724 struct swap_cluster_info *cluster_info;
1da177e4
LT
2725 struct file *swap_file, *victim;
2726 struct address_space *mapping;
2727 struct inode *inode;
91a27b2a 2728 struct filename *pathname;
adfab836 2729 int err, found = 0;
886bb7e9 2730
1da177e4
LT
2731 if (!capable(CAP_SYS_ADMIN))
2732 return -EPERM;
2733
191c5424
AV
2734 BUG_ON(!current->mm);
2735
1da177e4 2736 pathname = getname(specialfile);
1da177e4 2737 if (IS_ERR(pathname))
f58b59c1 2738 return PTR_ERR(pathname);
1da177e4 2739
669abf4e 2740 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
1da177e4
LT
2741 err = PTR_ERR(victim);
2742 if (IS_ERR(victim))
2743 goto out;
2744
2745 mapping = victim->f_mapping;
5d337b91 2746 spin_lock(&swap_lock);
18ab4d4c 2747 plist_for_each_entry(p, &swap_active_head, list) {
22c6f8fd 2748 if (p->flags & SWP_WRITEOK) {
adfab836
DS
2749 if (p->swap_file->f_mapping == mapping) {
2750 found = 1;
1da177e4 2751 break;
adfab836 2752 }
1da177e4 2753 }
1da177e4 2754 }
adfab836 2755 if (!found) {
1da177e4 2756 err = -EINVAL;
5d337b91 2757 spin_unlock(&swap_lock);
1da177e4
LT
2758 goto out_dput;
2759 }
191c5424 2760 if (!security_vm_enough_memory_mm(current->mm, p->pages))
1da177e4
LT
2761 vm_unacct_memory(p->pages);
2762 else {
2763 err = -ENOMEM;
5d337b91 2764 spin_unlock(&swap_lock);
1da177e4
LT
2765 goto out_dput;
2766 }
ec8acf20 2767 spin_lock(&p->lock);
6fe7d6b9 2768 del_from_avail_list(p);
78ecba08 2769 if (p->prio < 0) {
adfab836 2770 struct swap_info_struct *si = p;
a2468cc9 2771 int nid;
adfab836 2772
18ab4d4c 2773 plist_for_each_entry_continue(si, &swap_active_head, list) {
adfab836 2774 si->prio++;
18ab4d4c 2775 si->list.prio--;
a2468cc9
AL
2776 for_each_node(nid) {
2777 if (si->avail_lists[nid].prio != 1)
2778 si->avail_lists[nid].prio--;
2779 }
adfab836 2780 }
78ecba08
HD
2781 least_priority++;
2782 }
18ab4d4c 2783 plist_del(&p->list, &swap_active_head);
ec8acf20 2784 atomic_long_sub(p->pages, &nr_swap_pages);
1da177e4
LT
2785 total_swap_pages -= p->pages;
2786 p->flags &= ~SWP_WRITEOK;
ec8acf20 2787 spin_unlock(&p->lock);
5d337b91 2788 spin_unlock(&swap_lock);
fb4f88dc 2789
039939a6
TC
2790 disable_swap_slots_cache_lock();
2791
e1e12d2f 2792 set_current_oom_origin();
10a9c496 2793 err = try_to_unuse(p->type);
e1e12d2f 2794 clear_current_oom_origin();
1da177e4 2795
1da177e4
LT
2796 if (err) {
2797 /* re-insert swap space back into swap_list */
cf0cac0a 2798 reinsert_swap_info(p);
039939a6 2799 reenable_swap_slots_cache_unlock();
1da177e4
LT
2800 goto out_dput;
2801 }
52b7efdb 2802
039939a6
TC
2803 reenable_swap_slots_cache_unlock();
2804
eb085574 2805 /*
63d8620e 2806 * Wait for swap operations protected by get/put_swap_device()
d4a34d7f
HY
2807 * to complete. Because of synchronize_rcu() here, all swap
2808 * operations protected by RCU reader side lock (including any
2809 * spinlock) will be waited too. This makes it easy to
2810 * prevent folio_test_swapcache() and the following swap cache
2811 * operations from racing with swapoff.
eb085574 2812 */
63d8620e 2813 percpu_ref_kill(&p->users);
eb085574 2814 synchronize_rcu();
63d8620e 2815 wait_for_completion(&p->comp);
eb085574 2816
815c2c54
SL
2817 flush_work(&p->discard_work);
2818
5d337b91 2819 destroy_swap_extents(p);
570a335b
HD
2820 if (p->flags & SWP_CONTINUED)
2821 free_swap_count_continuations(p);
2822
10f0d2a5 2823 if (!p->bdev || !bdev_nonrot(p->bdev))
81a0298b
HY
2824 atomic_dec(&nr_rotate_swap);
2825
fc0abb14 2826 mutex_lock(&swapon_mutex);
5d337b91 2827 spin_lock(&swap_lock);
ec8acf20 2828 spin_lock(&p->lock);
5d337b91
HD
2829 drain_mmlist();
2830
bb243f7d 2831 /* wait for anyone still in scan_swap_map_slots */
52b7efdb
HD
2832 p->highest_bit = 0; /* cuts scans short */
2833 while (p->flags >= SWP_SCANNING) {
ec8acf20 2834 spin_unlock(&p->lock);
5d337b91 2835 spin_unlock(&swap_lock);
13e4b57f 2836 schedule_timeout_uninterruptible(1);
5d337b91 2837 spin_lock(&swap_lock);
ec8acf20 2838 spin_lock(&p->lock);
52b7efdb 2839 }
52b7efdb 2840
1da177e4
LT
2841 swap_file = p->swap_file;
2842 p->swap_file = NULL;
2843 p->max = 0;
2844 swap_map = p->swap_map;
2845 p->swap_map = NULL;
0ca0c24e
UA
2846 zeromap = p->zeromap;
2847 p->zeromap = NULL;
2a8f9449
SL
2848 cluster_info = p->cluster_info;
2849 p->cluster_info = NULL;
ec8acf20 2850 spin_unlock(&p->lock);
5d337b91 2851 spin_unlock(&swap_lock);
8a84802e 2852 arch_swap_invalidate_area(p->type);
42c06a0e 2853 zswap_swapoff(p->type);
fc0abb14 2854 mutex_unlock(&swapon_mutex);
ebc2a1a6
SL
2855 free_percpu(p->percpu_cluster);
2856 p->percpu_cluster = NULL;
49070588
HY
2857 free_percpu(p->cluster_next_cpu);
2858 p->cluster_next_cpu = NULL;
1da177e4 2859 vfree(swap_map);
0ca0c24e 2860 kvfree(zeromap);
54f180d3 2861 kvfree(cluster_info);
2de1a7e4 2862 /* Destroy swap account information */
adfab836 2863 swap_cgroup_swapoff(p->type);
4b3ef9da 2864 exit_swap_address_space(p->type);
27a7faa0 2865
1da177e4 2866 inode = mapping->host;
1638045c
DW
2867
2868 inode_lock(inode);
2869 inode->i_flags &= ~S_SWAPFILE;
2870 inode_unlock(inode);
1da177e4 2871 filp_close(swap_file, NULL);
f893ab41
WY
2872
2873 /*
2874 * Clear the SWP_USED flag after all resources are freed so that swapon
2875 * can reuse this swap_info in alloc_swap_info() safely. It is ok to
2876 * not hold p->lock after we cleared its SWP_WRITEOK.
2877 */
2878 spin_lock(&swap_lock);
2879 p->flags = 0;
2880 spin_unlock(&swap_lock);
2881
1da177e4 2882 err = 0;
66d7dd51
KS
2883 atomic_inc(&proc_poll_event);
2884 wake_up_interruptible(&proc_poll_wait);
1da177e4
LT
2885
2886out_dput:
2887 filp_close(victim, NULL);
2888out:
f58b59c1 2889 putname(pathname);
1da177e4
LT
2890 return err;
2891}
2892
2893#ifdef CONFIG_PROC_FS
9dd95748 2894static __poll_t swaps_poll(struct file *file, poll_table *wait)
66d7dd51 2895{
f1514638 2896 struct seq_file *seq = file->private_data;
66d7dd51
KS
2897
2898 poll_wait(file, &proc_poll_wait, wait);
2899
f1514638
KS
2900 if (seq->poll_event != atomic_read(&proc_poll_event)) {
2901 seq->poll_event = atomic_read(&proc_poll_event);
a9a08845 2902 return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
66d7dd51
KS
2903 }
2904
a9a08845 2905 return EPOLLIN | EPOLLRDNORM;
66d7dd51
KS
2906}
2907
1da177e4
LT
2908/* iterator */
2909static void *swap_start(struct seq_file *swap, loff_t *pos)
2910{
efa90a98
HD
2911 struct swap_info_struct *si;
2912 int type;
1da177e4
LT
2913 loff_t l = *pos;
2914
fc0abb14 2915 mutex_lock(&swapon_mutex);
1da177e4 2916
881e4aab
SS
2917 if (!l)
2918 return SEQ_START_TOKEN;
2919
c10d38cc 2920 for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
efa90a98 2921 if (!(si->flags & SWP_USED) || !si->swap_map)
1da177e4 2922 continue;
881e4aab 2923 if (!--l)
efa90a98 2924 return si;
1da177e4
LT
2925 }
2926
2927 return NULL;
2928}
2929
2930static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2931{
efa90a98
HD
2932 struct swap_info_struct *si = v;
2933 int type;
1da177e4 2934
881e4aab 2935 if (v == SEQ_START_TOKEN)
efa90a98
HD
2936 type = 0;
2937 else
2938 type = si->type + 1;
881e4aab 2939
10c8d69f 2940 ++(*pos);
c10d38cc 2941 for (; (si = swap_type_to_swap_info(type)); type++) {
efa90a98 2942 if (!(si->flags & SWP_USED) || !si->swap_map)
1da177e4 2943 continue;
efa90a98 2944 return si;
1da177e4
LT
2945 }
2946
2947 return NULL;
2948}
2949
2950static void swap_stop(struct seq_file *swap, void *v)
2951{
fc0abb14 2952 mutex_unlock(&swapon_mutex);
1da177e4
LT
2953}
2954
2955static int swap_show(struct seq_file *swap, void *v)
2956{
efa90a98 2957 struct swap_info_struct *si = v;
1da177e4
LT
2958 struct file *file;
2959 int len;
642929a2 2960 unsigned long bytes, inuse;
1da177e4 2961
efa90a98 2962 if (si == SEQ_START_TOKEN) {
68d68ff6 2963 seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
881e4aab
SS
2964 return 0;
2965 }
1da177e4 2966
00cde042
Z
2967 bytes = K(si->pages);
2968 inuse = K(READ_ONCE(si->inuse_pages));
6f793940 2969
efa90a98 2970 file = si->swap_file;
2726d566 2971 len = seq_file_path(swap, file, " \t\n\\");
642929a2 2972 seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n",
886bb7e9 2973 len < 40 ? 40 - len : 1, " ",
496ad9aa 2974 S_ISBLK(file_inode(file)->i_mode) ?
1da177e4 2975 "partition" : "file\t",
6f793940
RD
2976 bytes, bytes < 10000000 ? "\t" : "",
2977 inuse, inuse < 10000000 ? "\t" : "",
efa90a98 2978 si->prio);
1da177e4
LT
2979 return 0;
2980}
2981
15ad7cdc 2982static const struct seq_operations swaps_op = {
1da177e4
LT
2983 .start = swap_start,
2984 .next = swap_next,
2985 .stop = swap_stop,
2986 .show = swap_show
2987};
2988
2989static int swaps_open(struct inode *inode, struct file *file)
2990{
f1514638 2991 struct seq_file *seq;
66d7dd51
KS
2992 int ret;
2993
66d7dd51 2994 ret = seq_open(file, &swaps_op);
f1514638 2995 if (ret)
66d7dd51 2996 return ret;
66d7dd51 2997
f1514638
KS
2998 seq = file->private_data;
2999 seq->poll_event = atomic_read(&proc_poll_event);
3000 return 0;
1da177e4
LT
3001}
3002
97a32539 3003static const struct proc_ops swaps_proc_ops = {
d919b33d 3004 .proc_flags = PROC_ENTRY_PERMANENT,
97a32539
AD
3005 .proc_open = swaps_open,
3006 .proc_read = seq_read,
3007 .proc_lseek = seq_lseek,
3008 .proc_release = seq_release,
3009 .proc_poll = swaps_poll,
1da177e4
LT
3010};
3011
3012static int __init procswaps_init(void)
3013{
97a32539 3014 proc_create("swaps", 0, NULL, &swaps_proc_ops);
1da177e4
LT
3015 return 0;
3016}
3017__initcall(procswaps_init);
3018#endif /* CONFIG_PROC_FS */
3019
1796316a
JB
3020#ifdef MAX_SWAPFILES_CHECK
3021static int __init max_swapfiles_check(void)
3022{
3023 MAX_SWAPFILES_CHECK();
3024 return 0;
3025}
3026late_initcall(max_swapfiles_check);
3027#endif
3028
53cbb243 3029static struct swap_info_struct *alloc_swap_info(void)
1da177e4 3030{
73c34b6a 3031 struct swap_info_struct *p;
b11a76b3 3032 struct swap_info_struct *defer = NULL;
1da177e4 3033 unsigned int type;
a2468cc9 3034 int i;
efa90a98 3035
96008744 3036 p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
efa90a98 3037 if (!p)
53cbb243 3038 return ERR_PTR(-ENOMEM);
efa90a98 3039
63d8620e
ML
3040 if (percpu_ref_init(&p->users, swap_users_ref_free,
3041 PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
3042 kvfree(p);
3043 return ERR_PTR(-ENOMEM);
3044 }
3045
5d337b91 3046 spin_lock(&swap_lock);
efa90a98
HD
3047 for (type = 0; type < nr_swapfiles; type++) {
3048 if (!(swap_info[type]->flags & SWP_USED))
1da177e4 3049 break;
efa90a98 3050 }
0697212a 3051 if (type >= MAX_SWAPFILES) {
5d337b91 3052 spin_unlock(&swap_lock);
63d8620e 3053 percpu_ref_exit(&p->users);
873d7bcf 3054 kvfree(p);
730c0581 3055 return ERR_PTR(-EPERM);
1da177e4 3056 }
efa90a98
HD
3057 if (type >= nr_swapfiles) {
3058 p->type = type;
efa90a98 3059 /*
a4b45114
HY
3060 * Publish the swap_info_struct after initializing it.
3061 * Note that kvzalloc() above zeroes all its fields.
efa90a98 3062 */
a4b45114
HY
3063 smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
3064 nr_swapfiles++;
efa90a98 3065 } else {
b11a76b3 3066 defer = p;
efa90a98
HD
3067 p = swap_info[type];
3068 /*
3069 * Do not memset this entry: a racing procfs swap_next()
3070 * would be relying on p->type to remain valid.
3071 */
3072 }
4efaceb1 3073 p->swap_extent_root = RB_ROOT;
18ab4d4c 3074 plist_node_init(&p->list, 0);
a2468cc9
AL
3075 for_each_node(i)
3076 plist_node_init(&p->avail_lists[i], 0);
1da177e4 3077 p->flags = SWP_USED;
5d337b91 3078 spin_unlock(&swap_lock);
63d8620e
ML
3079 if (defer) {
3080 percpu_ref_exit(&defer->users);
3081 kvfree(defer);
3082 }
ec8acf20 3083 spin_lock_init(&p->lock);
2628bd6f 3084 spin_lock_init(&p->cont_lock);
63d8620e 3085 init_completion(&p->comp);
efa90a98 3086
53cbb243 3087 return p;
53cbb243
CEB
3088}
3089
b85508d7 3090static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
4d0e1e10 3091{
4d0e1e10 3092 if (S_ISBLK(inode->i_mode)) {
b85508d7 3093 si->bdev = I_BDEV(inode);
12d2966d
NA
3094 /*
3095 * Zoned block devices contain zones that have a sequential
3096 * write only restriction. Hence zoned block devices are not
3097 * suitable for swapping. Disallow them here.
3098 */
b85508d7 3099 if (bdev_is_zoned(si->bdev))
12d2966d 3100 return -EINVAL;
b85508d7 3101 si->flags |= SWP_BLKDEV;
4d0e1e10 3102 } else if (S_ISREG(inode->i_mode)) {
b85508d7 3103 si->bdev = inode->i_sb->s_bdev;
1638045c
DW
3104 }
3105
4d0e1e10 3106 return 0;
4d0e1e10
CEB
3107}
3108
377eeaa8
AK
3109
3110/*
3111 * Find out how many pages are allowed for a single swap device. There
3112 * are two limiting factors:
3113 * 1) the number of bits for the swap offset in the swp_entry_t type, and
3114 * 2) the number of bits in the swap pte, as defined by the different
3115 * architectures.
3116 *
3117 * In order to find the largest possible bit mask, a swap entry with
3118 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
3119 * decoded to a swp_entry_t again, and finally the swap offset is
3120 * extracted.
3121 *
3122 * This will mask all the bits from the initial ~0UL mask that can't
3123 * be encoded in either the swp_entry_t or the architecture definition
3124 * of a swap pte.
3125 */
3126unsigned long generic_max_swapfile_size(void)
3127{
3128 return swp_offset(pte_to_swp_entry(
3129 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
3130}
3131
3132/* Can be overridden by an architecture for additional checks. */
be45a490 3133__weak unsigned long arch_max_swapfile_size(void)
377eeaa8
AK
3134{
3135 return generic_max_swapfile_size();
3136}
3137
b85508d7 3138static unsigned long read_swap_header(struct swap_info_struct *si,
ca8bd38b
CEB
3139 union swap_header *swap_header,
3140 struct inode *inode)
3141{
3142 int i;
3143 unsigned long maxpages;
3144 unsigned long swapfilepages;
d6bbbd29 3145 unsigned long last_page;
ca8bd38b
CEB
3146
3147 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
465c47fd 3148 pr_err("Unable to find swap-space signature\n");
38719025 3149 return 0;
ca8bd38b
CEB
3150 }
3151
041711ce 3152 /* swap partition endianness hack... */
ca8bd38b
CEB
3153 if (swab32(swap_header->info.version) == 1) {
3154 swab32s(&swap_header->info.version);
3155 swab32s(&swap_header->info.last_page);
3156 swab32s(&swap_header->info.nr_badpages);
dd111be6
JH
3157 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3158 return 0;
ca8bd38b
CEB
3159 for (i = 0; i < swap_header->info.nr_badpages; i++)
3160 swab32s(&swap_header->info.badpages[i]);
3161 }
3162 /* Check the swap header's sub-version */
3163 if (swap_header->info.version != 1) {
465c47fd
AM
3164 pr_warn("Unable to handle swap header version %d\n",
3165 swap_header->info.version);
38719025 3166 return 0;
ca8bd38b
CEB
3167 }
3168
b85508d7
BS
3169 si->lowest_bit = 1;
3170 si->cluster_next = 1;
3171 si->cluster_nr = 0;
ca8bd38b 3172
be45a490 3173 maxpages = swapfile_maximum_size;
d6bbbd29 3174 last_page = swap_header->info.last_page;
a06ad633
TA
3175 if (!last_page) {
3176 pr_warn("Empty swap-file\n");
3177 return 0;
3178 }
d6bbbd29 3179 if (last_page > maxpages) {
465c47fd 3180 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
00cde042 3181 K(maxpages), K(last_page));
d6bbbd29
RJ
3182 }
3183 if (maxpages > last_page) {
3184 maxpages = last_page + 1;
ca8bd38b
CEB
3185 /* p->max is an unsigned int: don't overflow it */
3186 if ((unsigned int)maxpages == 0)
3187 maxpages = UINT_MAX;
3188 }
b85508d7 3189 si->highest_bit = maxpages - 1;
ca8bd38b
CEB
3190
3191 if (!maxpages)
38719025 3192 return 0;
ca8bd38b
CEB
3193 swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
3194 if (swapfilepages && maxpages > swapfilepages) {
465c47fd 3195 pr_warn("Swap area shorter than signature indicates\n");
38719025 3196 return 0;
ca8bd38b
CEB
3197 }
3198 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
38719025 3199 return 0;
ca8bd38b 3200 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
38719025 3201 return 0;
ca8bd38b
CEB
3202
3203 return maxpages;
ca8bd38b
CEB
3204}
3205
4b3ef9da 3206#define SWAP_CLUSTER_INFO_COLS \
235b6217 3207 DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
4b3ef9da
HY
3208#define SWAP_CLUSTER_SPACE_COLS \
3209 DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3210#define SWAP_CLUSTER_COLS \
3211 max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
235b6217 3212
b85508d7 3213static int setup_swap_map_and_extents(struct swap_info_struct *si,
915d4d7b
CEB
3214 union swap_header *swap_header,
3215 unsigned char *swap_map,
3216 unsigned long maxpages,
3217 sector_t *span)
3218{
915d4d7b 3219 unsigned int nr_good_pages;
b843786b 3220 unsigned long i;
915d4d7b
CEB
3221 int nr_extents;
3222
3223 nr_good_pages = maxpages - 1; /* omit header page */
3224
3225 for (i = 0; i < swap_header->info.nr_badpages; i++) {
3226 unsigned int page_nr = swap_header->info.badpages[i];
bdb8e3f6
CEB
3227 if (page_nr == 0 || page_nr > swap_header->info.last_page)
3228 return -EINVAL;
915d4d7b
CEB
3229 if (page_nr < maxpages) {
3230 swap_map[page_nr] = SWAP_MAP_BAD;
3231 nr_good_pages--;
3232 }
3233 }
3234
3235 if (nr_good_pages) {
3236 swap_map[0] = SWAP_MAP_BAD;
b85508d7
BS
3237 si->max = maxpages;
3238 si->pages = nr_good_pages;
3239 nr_extents = setup_swap_extents(si, span);
bdb8e3f6
CEB
3240 if (nr_extents < 0)
3241 return nr_extents;
b85508d7 3242 nr_good_pages = si->pages;
915d4d7b
CEB
3243 }
3244 if (!nr_good_pages) {
465c47fd 3245 pr_warn("Empty swap-file\n");
bdb8e3f6 3246 return -EINVAL;
915d4d7b
CEB
3247 }
3248
b843786b
JW
3249 return nr_extents;
3250}
3251
3252static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
3253 union swap_header *swap_header,
3254 unsigned long maxpages)
3255{
3256 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3257 unsigned long col = si->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
3258 struct swap_cluster_info *cluster_info;
3259 unsigned long i, j, k, idx;
3260 int cpu, err = -ENOMEM;
3261
3262 cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL);
2a8f9449 3263 if (!cluster_info)
b843786b
JW
3264 goto err;
3265
3266 for (i = 0; i < nr_clusters; i++)
3267 spin_lock_init(&cluster_info[i].lock);
3268
3269 si->cluster_next_cpu = alloc_percpu(unsigned int);
3270 if (!si->cluster_next_cpu)
3271 goto err_free;
3272
3273 /* Random start position to help with wear leveling */
3274 for_each_possible_cpu(cpu)
3275 per_cpu(*si->cluster_next_cpu, cpu) =
3276 get_random_u32_inclusive(1, si->highest_bit);
3277
3278 si->percpu_cluster = alloc_percpu(struct percpu_cluster);
3279 if (!si->percpu_cluster)
3280 goto err_free;
3281
3282 for_each_possible_cpu(cpu) {
3283 struct percpu_cluster *cluster;
3284
3285 cluster = per_cpu_ptr(si->percpu_cluster, cpu);
3286 for (i = 0; i < SWAP_NR_ORDERS; i++)
3287 cluster->next[i] = SWAP_NEXT_INVALID;
3288 }
3289
3290 /*
3291 * Mark unusable pages as unavailable. The clusters aren't
3292 * marked free yet, so no list operations are involved yet.
3293 *
3294 * See setup_swap_map_and_extents(): header page, bad pages,
3295 * and the EOF part of the last cluster.
3296 */
3297 inc_cluster_info_page(si, cluster_info, 0);
3298 for (i = 0; i < swap_header->info.nr_badpages; i++)
3299 inc_cluster_info_page(si, cluster_info,
3300 swap_header->info.badpages[i]);
3301 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3302 inc_cluster_info_page(si, cluster_info, i);
3303
3304 INIT_LIST_HEAD(&si->free_clusters);
3305 INIT_LIST_HEAD(&si->full_clusters);
3306 INIT_LIST_HEAD(&si->discard_clusters);
3307
3308 for (i = 0; i < SWAP_NR_ORDERS; i++) {
3309 INIT_LIST_HEAD(&si->nonfull_clusters[i]);
3310 INIT_LIST_HEAD(&si->frag_clusters[i]);
3311 si->frag_cluster_nr[i] = 0;
3312 }
2a8f9449 3313
4b3ef9da
HY
3314 /*
3315 * Reduce false cache line sharing between cluster_info and
3316 * sharing same address space.
3317 */
235b6217
HY
3318 for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3319 j = (k + col) % SWAP_CLUSTER_COLS;
3320 for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
73ed0baa 3321 struct swap_cluster_info *ci;
235b6217 3322 idx = i * SWAP_CLUSTER_COLS + j;
73ed0baa 3323 ci = cluster_info + idx;
235b6217
HY
3324 if (idx >= nr_clusters)
3325 continue;
5f843a9a
CL
3326 if (ci->count) {
3327 ci->flags = CLUSTER_FLAG_NONFULL;
b85508d7 3328 list_add_tail(&ci->list, &si->nonfull_clusters[0]);
235b6217 3329 continue;
5f843a9a 3330 }
73ed0baa 3331 ci->flags = CLUSTER_FLAG_FREE;
b85508d7 3332 list_add_tail(&ci->list, &si->free_clusters);
2a8f9449 3333 }
2a8f9449 3334 }
b843786b
JW
3335
3336 return cluster_info;
3337
3338err_free:
3339 kvfree(cluster_info);
3340err:
3341 return ERR_PTR(err);
915d4d7b
CEB
3342}
3343
53cbb243
CEB
3344SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3345{
b85508d7 3346 struct swap_info_struct *si;
91a27b2a 3347 struct filename *name;
53cbb243
CEB
3348 struct file *swap_file = NULL;
3349 struct address_space *mapping;
51cc3a66 3350 struct dentry *dentry;
40531542 3351 int prio;
53cbb243
CEB
3352 int error;
3353 union swap_header *swap_header;
915d4d7b 3354 int nr_extents;
53cbb243
CEB
3355 sector_t span;
3356 unsigned long maxpages;
53cbb243 3357 unsigned char *swap_map = NULL;
0ca0c24e 3358 unsigned long *zeromap = NULL;
2a8f9449 3359 struct swap_cluster_info *cluster_info = NULL;
97b76796 3360 struct folio *folio = NULL;
53cbb243 3361 struct inode *inode = NULL;
7cbf3192 3362 bool inced_nr_rotate_swap = false;
53cbb243 3363
d15cab97
HD
3364 if (swap_flags & ~SWAP_FLAGS_VALID)
3365 return -EINVAL;
3366
53cbb243
CEB
3367 if (!capable(CAP_SYS_ADMIN))
3368 return -EPERM;
3369
a2468cc9
AL
3370 if (!swap_avail_heads)
3371 return -ENOMEM;
3372
b85508d7
BS
3373 si = alloc_swap_info();
3374 if (IS_ERR(si))
3375 return PTR_ERR(si);
53cbb243 3376
b85508d7 3377 INIT_WORK(&si->discard_work, swap_discard_work);
815c2c54 3378
1da177e4 3379 name = getname(specialfile);
1da177e4 3380 if (IS_ERR(name)) {
7de7fb6b 3381 error = PTR_ERR(name);
1da177e4 3382 name = NULL;
bd69010b 3383 goto bad_swap;
1da177e4 3384 }
51d908b3 3385 swap_file = file_open_name(name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
1da177e4 3386 if (IS_ERR(swap_file)) {
7de7fb6b 3387 error = PTR_ERR(swap_file);
1da177e4 3388 swap_file = NULL;
bd69010b 3389 goto bad_swap;
1da177e4
LT
3390 }
3391
b85508d7 3392 si->swap_file = swap_file;
1da177e4 3393 mapping = swap_file->f_mapping;
51cc3a66 3394 dentry = swap_file->f_path.dentry;
2130781e 3395 inode = mapping->host;
6f179af8 3396
b85508d7 3397 error = claim_swapfile(si, inode);
4d0e1e10 3398 if (unlikely(error))
1da177e4 3399 goto bad_swap;
1da177e4 3400
d795a90e 3401 inode_lock(inode);
51cc3a66
HD
3402 if (d_unlinked(dentry) || cant_mount(dentry)) {
3403 error = -ENOENT;
3404 goto bad_swap_unlock_inode;
3405 }
d795a90e
NA
3406 if (IS_SWAPFILE(inode)) {
3407 error = -EBUSY;
3408 goto bad_swap_unlock_inode;
3409 }
3410
1da177e4
LT
3411 /*
3412 * Read the swap header.
3413 */
7e0a1265 3414 if (!mapping->a_ops->read_folio) {
1da177e4 3415 error = -EINVAL;
d795a90e 3416 goto bad_swap_unlock_inode;
1da177e4 3417 }
97b76796
MWO
3418 folio = read_mapping_folio(mapping, 0, swap_file);
3419 if (IS_ERR(folio)) {
3420 error = PTR_ERR(folio);
d795a90e 3421 goto bad_swap_unlock_inode;
1da177e4 3422 }
97b76796 3423 swap_header = kmap_local_folio(folio, 0);
1da177e4 3424
b85508d7 3425 maxpages = read_swap_header(si, swap_header, inode);
ca8bd38b 3426 if (unlikely(!maxpages)) {
1da177e4 3427 error = -EINVAL;
d795a90e 3428 goto bad_swap_unlock_inode;
1da177e4 3429 }
886bb7e9 3430
81e33971 3431 /* OK, set up the swap map and apply the bad block list */
803d0c83 3432 swap_map = vzalloc(maxpages);
81e33971
HD
3433 if (!swap_map) {
3434 error = -ENOMEM;
d795a90e 3435 goto bad_swap_unlock_inode;
81e33971 3436 }
f0571429 3437
b843786b
JW
3438 error = swap_cgroup_swapon(si->type, maxpages);
3439 if (error)
3440 goto bad_swap_unlock_inode;
3441
3442 nr_extents = setup_swap_map_and_extents(si, swap_header, swap_map,
3443 maxpages, &span);
3444 if (unlikely(nr_extents < 0)) {
3445 error = nr_extents;
3446 goto bad_swap_unlock_inode;
3447 }
3448
0ca0c24e
UA
3449 /*
3450 * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might
3451 * be above MAX_PAGE_ORDER incase of a large swap file.
3452 */
3453 zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long),
3454 GFP_KERNEL | __GFP_ZERO);
3455 if (!zeromap) {
3456 error = -ENOMEM;
3457 goto bad_swap_unlock_inode;
3458 }
3459
b85508d7
BS
3460 if (si->bdev && bdev_stable_writes(si->bdev))
3461 si->flags |= SWP_STABLE_WRITES;
f0571429 3462
b85508d7
BS
3463 if (si->bdev && bdev_synchronous(si->bdev))
3464 si->flags |= SWP_SYNCHRONOUS_IO;
539a6fea 3465
b85508d7 3466 if (si->bdev && bdev_nonrot(si->bdev)) {
b85508d7 3467 si->flags |= SWP_SOLIDSTATE;
2a8f9449 3468
b843786b
JW
3469 cluster_info = setup_clusters(si, swap_header, maxpages);
3470 if (IS_ERR(cluster_info)) {
3471 error = PTR_ERR(cluster_info);
3472 cluster_info = NULL;
d795a90e 3473 goto bad_swap_unlock_inode;
2a8f9449 3474 }
7cbf3192 3475 } else {
81a0298b 3476 atomic_inc(&nr_rotate_swap);
7cbf3192
OS
3477 inced_nr_rotate_swap = true;
3478 }
1da177e4 3479
70200574 3480 if ((swap_flags & SWAP_FLAG_DISCARD) &&
b85508d7 3481 si->bdev && bdev_max_discard_sectors(si->bdev)) {
2a8f9449
SL
3482 /*
3483 * When discard is enabled for swap with no particular
3484 * policy flagged, we set all swap discard flags here in
3485 * order to sustain backward compatibility with older
3486 * swapon(8) releases.
3487 */
b85508d7 3488 si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
2a8f9449 3489 SWP_PAGE_DISCARD);
dcf6b7dd 3490
2a8f9449
SL
3491 /*
3492 * By flagging sys_swapon, a sysadmin can tell us to
3493 * either do single-time area discards only, or to just
3494 * perform discards for released swap page-clusters.
3495 * Now it's time to adjust the p->flags accordingly.
3496 */
3497 if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
b85508d7 3498 si->flags &= ~SWP_PAGE_DISCARD;
2a8f9449 3499 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
b85508d7 3500 si->flags &= ~SWP_AREA_DISCARD;
2a8f9449
SL
3501
3502 /* issue a swapon-time discard if it's still required */
b85508d7
BS
3503 if (si->flags & SWP_AREA_DISCARD) {
3504 int err = discard_swap(si);
2a8f9449
SL
3505 if (unlikely(err))
3506 pr_err("swapon: discard_swap(%p): %d\n",
b85508d7 3507 si, err);
dcf6b7dd 3508 }
20137a49 3509 }
6a6ba831 3510
b85508d7 3511 error = init_swap_address_space(si->type, maxpages);
4b3ef9da 3512 if (error)
d795a90e 3513 goto bad_swap_unlock_inode;
4b3ef9da 3514
b85508d7 3515 error = zswap_swapon(si->type, maxpages);
bb29fd77
CZ
3516 if (error)
3517 goto free_swap_address_space;
3518
dc617f29
DW
3519 /*
3520 * Flush any pending IO and dirty mappings before we start using this
3521 * swap device.
3522 */
3523 inode->i_flags |= S_SWAPFILE;
3524 error = inode_drain_writes(inode);
3525 if (error) {
3526 inode->i_flags &= ~S_SWAPFILE;
bb29fd77 3527 goto free_swap_zswap;
dc617f29
DW
3528 }
3529
fc0abb14 3530 mutex_lock(&swapon_mutex);
40531542 3531 prio = -1;
78ecba08 3532 if (swap_flags & SWAP_FLAG_PREFER)
40531542 3533 prio =
78ecba08 3534 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
0ca0c24e 3535 enable_swap_info(si, prio, swap_map, cluster_info, zeromap);
c69dbfb8 3536
42c06a0e 3537 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n",
b85508d7 3538 K(si->pages), name->name, si->prio, nr_extents,
00cde042 3539 K((unsigned long long)span),
b85508d7
BS
3540 (si->flags & SWP_SOLIDSTATE) ? "SS" : "",
3541 (si->flags & SWP_DISCARDABLE) ? "D" : "",
3542 (si->flags & SWP_AREA_DISCARD) ? "s" : "",
3543 (si->flags & SWP_PAGE_DISCARD) ? "c" : "");
c69dbfb8 3544
fc0abb14 3545 mutex_unlock(&swapon_mutex);
66d7dd51
KS
3546 atomic_inc(&proc_poll_event);
3547 wake_up_interruptible(&proc_poll_wait);
3548
1da177e4
LT
3549 error = 0;
3550 goto out;
bb29fd77 3551free_swap_zswap:
b85508d7 3552 zswap_swapoff(si->type);
822bca52 3553free_swap_address_space:
b85508d7 3554 exit_swap_address_space(si->type);
d795a90e
NA
3555bad_swap_unlock_inode:
3556 inode_unlock(inode);
1da177e4 3557bad_swap:
b85508d7
BS
3558 free_percpu(si->percpu_cluster);
3559 si->percpu_cluster = NULL;
3560 free_percpu(si->cluster_next_cpu);
3561 si->cluster_next_cpu = NULL;
d795a90e 3562 inode = NULL;
b85508d7
BS
3563 destroy_swap_extents(si);
3564 swap_cgroup_swapoff(si->type);
5d337b91 3565 spin_lock(&swap_lock);
b85508d7
BS
3566 si->swap_file = NULL;
3567 si->flags = 0;
5d337b91 3568 spin_unlock(&swap_lock);
1da177e4 3569 vfree(swap_map);
0ca0c24e 3570 kvfree(zeromap);
8606a1a9 3571 kvfree(cluster_info);
7cbf3192
OS
3572 if (inced_nr_rotate_swap)
3573 atomic_dec(&nr_rotate_swap);
d795a90e 3574 if (swap_file)
1da177e4
LT
3575 filp_close(swap_file, NULL);
3576out:
97b76796
MWO
3577 if (!IS_ERR_OR_NULL(folio))
3578 folio_release_kmap(folio, swap_header);
1da177e4
LT
3579 if (name)
3580 putname(name);
1638045c 3581 if (inode)
5955102c 3582 inode_unlock(inode);
039939a6
TC
3583 if (!error)
3584 enable_swap_slots_cache();
1da177e4
LT
3585 return error;
3586}
3587
3588void si_swapinfo(struct sysinfo *val)
3589{
efa90a98 3590 unsigned int type;
1da177e4
LT
3591 unsigned long nr_to_be_unused = 0;
3592
5d337b91 3593 spin_lock(&swap_lock);
efa90a98
HD
3594 for (type = 0; type < nr_swapfiles; type++) {
3595 struct swap_info_struct *si = swap_info[type];
3596
3597 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
c8945306 3598 nr_to_be_unused += READ_ONCE(si->inuse_pages);
1da177e4 3599 }
ec8acf20 3600 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
1da177e4 3601 val->totalswap = total_swap_pages + nr_to_be_unused;
5d337b91 3602 spin_unlock(&swap_lock);
1da177e4
LT
3603}
3604
3605/*
9f101bef 3606 * Verify that nr swap entries are valid and increment their swap map counts.
1da177e4 3607 *
355cfa73
KH
3608 * Returns error code in following case.
3609 * - success -> 0
3610 * - swp_entry is invalid -> EINVAL
3611 * - swp_entry is migration entry -> EINVAL
3612 * - swap-cache reference is requested but there is already one. -> EEXIST
3613 * - swap-cache reference is requested but the entry is not used. -> ENOENT
570a335b 3614 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
1da177e4 3615 */
9f101bef 3616static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
1da177e4 3617{
b85508d7 3618 struct swap_info_struct *si;
235b6217 3619 struct swap_cluster_info *ci;
c10d38cc 3620 unsigned long offset;
8d69aaee
HD
3621 unsigned char count;
3622 unsigned char has_cache;
9f101bef 3623 int err, i;
1da177e4 3624
b85508d7 3625 si = swp_swap_info(entry);
235b6217 3626
eb085574 3627 offset = swp_offset(entry);
9f101bef
BS
3628 VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
3629 VM_WARN_ON(usage == 1 && nr > 1);
b85508d7 3630 ci = lock_cluster_or_swap_info(si, offset);
355cfa73 3631
253d553b 3632 err = 0;
9f101bef 3633 for (i = 0; i < nr; i++) {
b85508d7 3634 count = si->swap_map[offset + i];
355cfa73 3635
9f101bef
BS
3636 /*
3637 * swapin_readahead() doesn't check if a swap entry is valid, so the
3638 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3639 */
3640 if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3641 err = -ENOENT;
3642 goto unlock_out;
3643 }
355cfa73 3644
9f101bef
BS
3645 has_cache = count & SWAP_HAS_CACHE;
3646 count &= ~SWAP_HAS_CACHE;
3647
3648 if (!count && !has_cache) {
253d553b 3649 err = -ENOENT;
9f101bef
BS
3650 } else if (usage == SWAP_HAS_CACHE) {
3651 if (has_cache)
3652 err = -EEXIST;
3653 } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) {
3654 err = -EINVAL;
3655 }
3656
3657 if (err)
3658 goto unlock_out;
3659 }
355cfa73 3660
9f101bef 3661 for (i = 0; i < nr; i++) {
b85508d7 3662 count = si->swap_map[offset + i];
9f101bef
BS
3663 has_cache = count & SWAP_HAS_CACHE;
3664 count &= ~SWAP_HAS_CACHE;
253d553b 3665
9f101bef
BS
3666 if (usage == SWAP_HAS_CACHE)
3667 has_cache = SWAP_HAS_CACHE;
3668 else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
570a335b 3669 count += usage;
b85508d7 3670 else if (swap_count_continued(si, offset + i, count))
570a335b 3671 count = COUNT_CONTINUED;
9f101bef
BS
3672 else {
3673 /*
3674 * Don't need to rollback changes, because if
3675 * usage == 1, there must be nr == 1.
3676 */
570a335b 3677 err = -ENOMEM;
9f101bef
BS
3678 goto unlock_out;
3679 }
253d553b 3680
b85508d7 3681 WRITE_ONCE(si->swap_map[offset + i], count | has_cache);
9f101bef 3682 }
253d553b 3683
355cfa73 3684unlock_out:
b85508d7 3685 unlock_cluster_or_swap_info(si, ci);
253d553b 3686 return err;
1da177e4 3687}
253d553b 3688
aaa46865
HD
3689/*
3690 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3691 * (in which case its reference count is never incremented).
3692 */
65018076 3693void swap_shmem_alloc(swp_entry_t entry, int nr)
aaa46865 3694{
65018076 3695 __swap_duplicate(entry, SWAP_MAP_SHMEM, nr);
aaa46865
HD
3696}
3697
355cfa73 3698/*
08259d58
HD
3699 * Increase reference count of swap entry by 1.
3700 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3701 * but could not be atomically allocated. Returns 0, just as if it succeeded,
3702 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3703 * might occur if a page table entry has got corrupted.
355cfa73 3704 */
570a335b 3705int swap_duplicate(swp_entry_t entry)
355cfa73 3706{
570a335b
HD
3707 int err = 0;
3708
9f101bef 3709 while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM)
570a335b
HD
3710 err = add_swap_count_continuation(entry, GFP_ATOMIC);
3711 return err;
355cfa73 3712}
1da177e4 3713
cb4b86ba 3714/*
9f101bef 3715 * @entry: first swap entry from which we allocate nr swap cache.
355cfa73 3716 *
9f101bef 3717 * Called when allocating swap cache for existing swap entries,
355cfa73 3718 * This can return error codes. Returns 0 at success.
3eeba135 3719 * -EEXIST means there is a swap cache.
355cfa73 3720 * Note: return code is different from swap_duplicate().
cb4b86ba 3721 */
9f101bef 3722int swapcache_prepare(swp_entry_t entry, int nr)
cb4b86ba 3723{
9f101bef 3724 return __swap_duplicate(entry, SWAP_HAS_CACHE, nr);
cb4b86ba
KH
3725}
3726
9f101bef 3727void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
13ddaf26 3728{
13ddaf26 3729 unsigned long offset = swp_offset(entry);
13ddaf26 3730
9f101bef 3731 cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE);
13ddaf26
KS
3732}
3733
0bcac06f
MK
3734struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3735{
c10d38cc 3736 return swap_type_to_swap_info(swp_type(entry));
0bcac06f
MK
3737}
3738
f981c595 3739/*
2f52578f 3740 * out-of-line methods to avoid include hell.
f981c595 3741 */
2f52578f 3742struct address_space *swapcache_mapping(struct folio *folio)
f981c595 3743{
69fe7d67 3744 return swp_swap_info(folio->swap)->swap_file->f_mapping;
f981c595 3745}
2f52578f 3746EXPORT_SYMBOL_GPL(swapcache_mapping);
f981c595 3747
05b0c7ed 3748pgoff_t __folio_swap_cache_index(struct folio *folio)
f981c595 3749{
7aad25b4 3750 return swap_cache_index(folio->swap);
f981c595 3751}
05b0c7ed 3752EXPORT_SYMBOL_GPL(__folio_swap_cache_index);
f981c595 3753
570a335b
HD
3754/*
3755 * add_swap_count_continuation - called when a swap count is duplicated
3756 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3757 * page of the original vmalloc'ed swap_map, to hold the continuation count
3758 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
3759 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3760 *
3761 * These continuation pages are seldom referenced: the common paths all work
3762 * on the original swap_map, only referring to a continuation page when the
3763 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3764 *
3765 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3766 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3767 * can be called after dropping locks.
3768 */
3769int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3770{
3771 struct swap_info_struct *si;
235b6217 3772 struct swap_cluster_info *ci;
570a335b
HD
3773 struct page *head;
3774 struct page *page;
3775 struct page *list_page;
3776 pgoff_t offset;
3777 unsigned char count;
eb085574 3778 int ret = 0;
570a335b
HD
3779
3780 /*
3781 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3782 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3783 */
3784 page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3785
eb085574 3786 si = get_swap_device(entry);
570a335b
HD
3787 if (!si) {
3788 /*
3789 * An acceptable race has occurred since the failing
eb085574 3790 * __swap_duplicate(): the swap device may be swapoff
570a335b
HD
3791 */
3792 goto outer;
3793 }
eb085574 3794 spin_lock(&si->lock);
570a335b
HD
3795
3796 offset = swp_offset(entry);
235b6217
HY
3797
3798 ci = lock_cluster(si, offset);
3799
d8aa24e0 3800 count = swap_count(si->swap_map[offset]);
570a335b
HD
3801
3802 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3803 /*
3804 * The higher the swap count, the more likely it is that tasks
3805 * will race to add swap count continuation: we need to avoid
3806 * over-provisioning.
3807 */
3808 goto out;
3809 }
3810
3811 if (!page) {
eb085574
HY
3812 ret = -ENOMEM;
3813 goto out;
570a335b
HD
3814 }
3815
570a335b
HD
3816 head = vmalloc_to_page(si->swap_map + offset);
3817 offset &= ~PAGE_MASK;
3818
2628bd6f 3819 spin_lock(&si->cont_lock);
570a335b
HD
3820 /*
3821 * Page allocation does not initialize the page's lru field,
3822 * but it does always reset its private field.
3823 */
3824 if (!page_private(head)) {
3825 BUG_ON(count & COUNT_CONTINUED);
3826 INIT_LIST_HEAD(&head->lru);
3827 set_page_private(head, SWP_CONTINUED);
3828 si->flags |= SWP_CONTINUED;
3829 }
3830
3831 list_for_each_entry(list_page, &head->lru, lru) {
3832 unsigned char *map;
3833
3834 /*
3835 * If the previous map said no continuation, but we've found
3836 * a continuation page, free our allocation and use this one.
3837 */
3838 if (!(count & COUNT_CONTINUED))
2628bd6f 3839 goto out_unlock_cont;
570a335b 3840
829c3151 3841 map = kmap_local_page(list_page) + offset;
570a335b 3842 count = *map;
829c3151 3843 kunmap_local(map);
570a335b
HD
3844
3845 /*
3846 * If this continuation count now has some space in it,
3847 * free our allocation and use this one.
3848 */
3849 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2628bd6f 3850 goto out_unlock_cont;
570a335b
HD
3851 }
3852
3853 list_add_tail(&page->lru, &head->lru);
3854 page = NULL; /* now it's attached, don't free it */
2628bd6f
HY
3855out_unlock_cont:
3856 spin_unlock(&si->cont_lock);
570a335b 3857out:
235b6217 3858 unlock_cluster(ci);
ec8acf20 3859 spin_unlock(&si->lock);
eb085574 3860 put_swap_device(si);
570a335b
HD
3861outer:
3862 if (page)
3863 __free_page(page);
eb085574 3864 return ret;
570a335b
HD
3865}
3866
3867/*
3868 * swap_count_continued - when the original swap_map count is incremented
3869 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3870 * into, carry if so, or else fail until a new continuation page is allocated;
3871 * when the original swap_map count is decremented from 0 with continuation,
3872 * borrow from the continuation and report whether it still holds more.
235b6217
HY
3873 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3874 * lock.
570a335b
HD
3875 */
3876static bool swap_count_continued(struct swap_info_struct *si,
3877 pgoff_t offset, unsigned char count)
3878{
3879 struct page *head;
3880 struct page *page;
3881 unsigned char *map;
2628bd6f 3882 bool ret;
570a335b
HD
3883
3884 head = vmalloc_to_page(si->swap_map + offset);
3885 if (page_private(head) != SWP_CONTINUED) {
3886 BUG_ON(count & COUNT_CONTINUED);
3887 return false; /* need to add count continuation */
3888 }
3889
2628bd6f 3890 spin_lock(&si->cont_lock);
570a335b 3891 offset &= ~PAGE_MASK;
213516ac 3892 page = list_next_entry(head, lru);
829c3151 3893 map = kmap_local_page(page) + offset;
570a335b
HD
3894
3895 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
3896 goto init_map; /* jump over SWAP_CONT_MAX checks */
3897
3898 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3899 /*
3900 * Think of how you add 1 to 999
3901 */
3902 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
829c3151 3903 kunmap_local(map);
213516ac 3904 page = list_next_entry(page, lru);
570a335b 3905 BUG_ON(page == head);
829c3151 3906 map = kmap_local_page(page) + offset;
570a335b
HD
3907 }
3908 if (*map == SWAP_CONT_MAX) {
829c3151 3909 kunmap_local(map);
213516ac 3910 page = list_next_entry(page, lru);
2628bd6f
HY
3911 if (page == head) {
3912 ret = false; /* add count continuation */
3913 goto out;
3914 }
829c3151 3915 map = kmap_local_page(page) + offset;
570a335b
HD
3916init_map: *map = 0; /* we didn't zero the page */
3917 }
3918 *map += 1;
829c3151 3919 kunmap_local(map);
213516ac 3920 while ((page = list_prev_entry(page, lru)) != head) {
829c3151 3921 map = kmap_local_page(page) + offset;
570a335b 3922 *map = COUNT_CONTINUED;
829c3151 3923 kunmap_local(map);
570a335b 3924 }
2628bd6f 3925 ret = true; /* incremented */
570a335b
HD
3926
3927 } else { /* decrementing */
3928 /*
3929 * Think of how you subtract 1 from 1000
3930 */
3931 BUG_ON(count != COUNT_CONTINUED);
3932 while (*map == COUNT_CONTINUED) {
829c3151 3933 kunmap_local(map);
213516ac 3934 page = list_next_entry(page, lru);
570a335b 3935 BUG_ON(page == head);
829c3151 3936 map = kmap_local_page(page) + offset;
570a335b
HD
3937 }
3938 BUG_ON(*map == 0);
3939 *map -= 1;
3940 if (*map == 0)
3941 count = 0;
829c3151 3942 kunmap_local(map);
213516ac 3943 while ((page = list_prev_entry(page, lru)) != head) {
829c3151 3944 map = kmap_local_page(page) + offset;
570a335b
HD
3945 *map = SWAP_CONT_MAX | count;
3946 count = COUNT_CONTINUED;
829c3151 3947 kunmap_local(map);
570a335b 3948 }
2628bd6f 3949 ret = count == COUNT_CONTINUED;
570a335b 3950 }
2628bd6f
HY
3951out:
3952 spin_unlock(&si->cont_lock);
3953 return ret;
570a335b
HD
3954}
3955
3956/*
3957 * free_swap_count_continuations - swapoff free all the continuation pages
3958 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3959 */
3960static void free_swap_count_continuations(struct swap_info_struct *si)
3961{
3962 pgoff_t offset;
3963
3964 for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3965 struct page *head;
3966 head = vmalloc_to_page(si->swap_map + offset);
3967 if (page_private(head)) {
0d576d20
GT
3968 struct page *page, *next;
3969
3970 list_for_each_entry_safe(page, next, &head->lru, lru) {
3971 list_del(&page->lru);
570a335b
HD
3972 __free_page(page);
3973 }
3974 }
3975 }
3976}
a2468cc9 3977
2cf85583 3978#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
3e4fb13a 3979void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
2cf85583
TH
3980{
3981 struct swap_info_struct *si, *next;
3e4fb13a 3982 int nid = folio_nid(folio);
6caa6a07 3983
3e4fb13a 3984 if (!(gfp & __GFP_IO))
2cf85583
TH
3985 return;
3986
80e75021
KW
3987 if (!__has_usable_swap())
3988 return;
3989
2cf85583
TH
3990 if (!blk_cgroup_congested())
3991 return;
3992
3993 /*
3994 * We've already scheduled a throttle, avoid taking the global swap
3995 * lock.
3996 */
f05837ed 3997 if (current->throttle_disk)
2cf85583
TH
3998 return;
3999
4000 spin_lock(&swap_avail_lock);
6caa6a07
JW
4001 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
4002 avail_lists[nid]) {
2cf85583 4003 if (si->bdev) {
de185b56 4004 blkcg_schedule_throttle(si->bdev->bd_disk, true);
2cf85583
TH
4005 break;
4006 }
4007 }
4008 spin_unlock(&swap_avail_lock);
4009}
4010#endif
4011
a2468cc9
AL
4012static int __init swapfile_init(void)
4013{
4014 int nid;
4015
4016 swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
4017 GFP_KERNEL);
4018 if (!swap_avail_heads) {
4019 pr_emerg("Not enough memory for swap heads, swap is disabled\n");
4020 return -ENOMEM;
4021 }
4022
4023 for_each_node(nid)
4024 plist_head_init(&swap_avail_heads[nid]);
4025
be45a490
PX
4026 swapfile_maximum_size = arch_max_swapfile_size();
4027
5154e607
PX
4028#ifdef CONFIG_MIGRATION
4029 if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))
4030 swap_migration_ad_supported = true;
4031#endif /* CONFIG_MIGRATION */
4032
a2468cc9
AL
4033 return 0;
4034}
4035subsys_initcall(swapfile_init);