1 // SPDX-License-Identifier: GPL-2.0
3 * linux/mm/swap_state.c
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/mempolicy.h>
14 #include <linux/swap.h>
15 #include <linux/swapops.h>
16 #include <linux/init.h>
17 #include <linux/pagemap.h>
18 #include <linux/pagevec.h>
19 #include <linux/backing-dev.h>
20 #include <linux/blkdev.h>
21 #include <linux/migrate.h>
22 #include <linux/vmalloc.h>
23 #include <linux/swap_slots.h>
24 #include <linux/huge_mm.h>
25 #include <linux/shmem_fs.h>
30 * swapper_space is a fiction, retained to simplify the path through
31 * vmscan's shrink_folio_list.
33 static const struct address_space_operations swap_aops = {
34 .writepage = swap_writepage,
35 .dirty_folio = noop_dirty_folio,
36 #ifdef CONFIG_MIGRATION
37 .migrate_folio = migrate_folio,
41 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
42 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
43 static bool enable_vma_readahead __read_mostly = true;
45 #define SWAP_RA_ORDER_CEILING 5
47 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
48 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
49 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
50 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
52 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
53 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
54 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
56 #define SWAP_RA_VAL(addr, win, hits) \
57 (((addr) & PAGE_MASK) | \
58 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
59 ((hits) & SWAP_RA_HITS_MASK))
61 /* Initial readahead hits is 4 to start up with a small window */
62 #define GET_SWAP_RA_VAL(vma) \
63 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
65 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
67 void show_swap_cache_info(void)
69 printk("%lu pages in swap cache\n", total_swapcache_pages());
70 printk("Free swap = %ldkB\n", K(get_nr_swap_pages()));
71 printk("Total swap = %lukB\n", K(total_swap_pages));
74 void *get_shadow_from_swap_cache(swp_entry_t entry)
76 struct address_space *address_space = swap_address_space(entry);
77 pgoff_t idx = swap_cache_index(entry);
80 shadow = xa_load(&address_space->i_pages, idx);
81 if (xa_is_value(shadow))
87 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
88 * but sets SwapCache flag and private instead of mapping and index.
90 int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
91 gfp_t gfp, void **shadowp)
93 struct address_space *address_space = swap_address_space(entry);
94 pgoff_t idx = swap_cache_index(entry);
95 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
96 unsigned long i, nr = folio_nr_pages(folio);
99 xas_set_update(&xas, workingset_update_node);
101 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
102 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
103 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
105 folio_ref_add(folio, nr);
106 folio_set_swapcache(folio);
111 xas_create_range(&xas);
114 for (i = 0; i < nr; i++) {
115 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
117 old = xas_load(&xas);
118 if (xa_is_value(old))
121 xas_store(&xas, folio);
124 address_space->nrpages += nr;
125 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
126 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
128 xas_unlock_irq(&xas);
129 } while (xas_nomem(&xas, gfp));
131 if (!xas_error(&xas))
134 folio_clear_swapcache(folio);
135 folio_ref_sub(folio, nr);
136 return xas_error(&xas);
140 * This must be called only on folios that have
141 * been verified to be in the swap cache.
143 void __delete_from_swap_cache(struct folio *folio,
144 swp_entry_t entry, void *shadow)
146 struct address_space *address_space = swap_address_space(entry);
148 long nr = folio_nr_pages(folio);
149 pgoff_t idx = swap_cache_index(entry);
150 XA_STATE(xas, &address_space->i_pages, idx);
152 xas_set_update(&xas, workingset_update_node);
154 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
155 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
156 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
158 for (i = 0; i < nr; i++) {
159 void *entry = xas_store(&xas, shadow);
160 VM_BUG_ON_PAGE(entry != folio, entry);
164 folio_clear_swapcache(folio);
165 address_space->nrpages -= nr;
166 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
167 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
171 * add_to_swap - allocate swap space for a folio
172 * @folio: folio we want to move to swap
174 * Allocate swap space for the folio and add the folio to the
177 * Context: Caller needs to hold the folio lock.
178 * Return: Whether the folio was added to the swap cache.
180 bool add_to_swap(struct folio *folio)
185 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
186 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
188 entry = folio_alloc_swap(folio);
193 * XArray node allocations from PF_MEMALLOC contexts could
194 * completely exhaust the page allocator. __GFP_NOMEMALLOC
195 * stops emergency reserves from being allocated.
197 * TODO: this could cause a theoretical memory reclaim
198 * deadlock in the swap out path.
201 * Add it to the swap cache.
203 err = add_to_swap_cache(folio, entry,
204 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
207 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
208 * clear SWAP_HAS_CACHE flag.
212 * Normally the folio will be dirtied in unmap because its
213 * pte should be dirty. A special case is MADV_FREE page. The
214 * page's pte could have dirty bit cleared but the folio's
215 * SwapBacked flag is still set because clearing the dirty bit
216 * and SwapBacked flag has no lock protected. For such folio,
217 * unmap will not set dirty bit for it, so folio reclaim will
218 * not write the folio out. This can cause data corruption when
219 * the folio is swapped in later. Always setting the dirty flag
220 * for the folio solves the problem.
222 folio_mark_dirty(folio);
227 put_swap_folio(folio, entry);
232 * This must be called only on folios that have
233 * been verified to be in the swap cache and locked.
234 * It will never put the folio into the free list,
235 * the caller has a reference on the folio.
237 void delete_from_swap_cache(struct folio *folio)
239 swp_entry_t entry = folio->swap;
240 struct address_space *address_space = swap_address_space(entry);
242 xa_lock_irq(&address_space->i_pages);
243 __delete_from_swap_cache(folio, entry, NULL);
244 xa_unlock_irq(&address_space->i_pages);
246 put_swap_folio(folio, entry);
247 folio_ref_sub(folio, folio_nr_pages(folio));
250 void clear_shadow_from_swap_cache(int type, unsigned long begin,
253 unsigned long curr = begin;
257 swp_entry_t entry = swp_entry(type, curr);
258 unsigned long index = curr & SWAP_ADDRESS_SPACE_MASK;
259 struct address_space *address_space = swap_address_space(entry);
260 XA_STATE(xas, &address_space->i_pages, index);
262 xas_set_update(&xas, workingset_update_node);
264 xa_lock_irq(&address_space->i_pages);
265 xas_for_each(&xas, old, min(index + (end - curr), SWAP_ADDRESS_SPACE_PAGES)) {
266 if (!xa_is_value(old))
268 xas_store(&xas, NULL);
270 xa_unlock_irq(&address_space->i_pages);
272 /* search the next swapcache until we meet end */
273 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
275 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
282 * If we are the only user, then try to free up the swap cache.
284 * Its ok to check the swapcache flag without the folio lock
285 * here because we are going to recheck again inside
286 * folio_free_swap() _with_ the lock.
289 void free_swap_cache(struct folio *folio)
291 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
292 folio_trylock(folio)) {
293 folio_free_swap(folio);
299 * Perform a free_page(), also freeing any swap cache associated with
300 * this page if it is the last user of the page.
302 void free_page_and_swap_cache(struct page *page)
304 struct folio *folio = page_folio(page);
306 free_swap_cache(folio);
307 if (!is_huge_zero_folio(folio))
312 * Passed an array of pages, drop them all from swapcache and then release
313 * them. They are removed from the LRU and freed if this is their last use.
315 void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
317 struct folio_batch folios;
318 unsigned int refs[PAGEVEC_SIZE];
321 folio_batch_init(&folios);
322 for (int i = 0; i < nr; i++) {
323 struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
325 free_swap_cache(folio);
327 if (unlikely(encoded_page_flags(pages[i]) &
328 ENCODED_PAGE_BIT_NR_PAGES_NEXT))
329 refs[folios.nr] = encoded_nr_pages(pages[++i]);
331 if (folio_batch_add(&folios, folio) == 0)
332 folios_put_refs(&folios, refs);
335 folios_put_refs(&folios, refs);
338 static inline bool swap_use_vma_readahead(void)
340 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
344 * Lookup a swap entry in the swap cache. A found folio will be returned
345 * unlocked and with its refcount incremented - we rely on the kernel
346 * lock getting page table operations atomic even if we drop the folio
347 * lock before returning.
349 * Caller must lock the swap device or hold a reference to keep it valid.
351 struct folio *swap_cache_get_folio(swp_entry_t entry,
352 struct vm_area_struct *vma, unsigned long addr)
356 folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
357 if (!IS_ERR(folio)) {
358 bool vma_ra = swap_use_vma_readahead();
362 * At the moment, we don't support PG_readahead for anon THP
363 * so let's bail out rather than confusing the readahead stat.
365 if (unlikely(folio_test_large(folio)))
368 readahead = folio_test_clear_readahead(folio);
370 unsigned long ra_val;
373 ra_val = GET_SWAP_RA_VAL(vma);
374 win = SWAP_RA_WIN(ra_val);
375 hits = SWAP_RA_HITS(ra_val);
377 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
378 atomic_long_set(&vma->swap_readahead_info,
379 SWAP_RA_VAL(addr, win, hits));
383 count_vm_event(SWAP_RA_HIT);
385 atomic_inc(&swapin_readahead_hits);
395 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
396 * @mapping: The address_space to search.
397 * @index: The page cache index.
399 * This differs from filemap_get_folio() in that it will also look for the
400 * folio in the swap cache.
402 * Return: The found folio or %NULL.
404 struct folio *filemap_get_incore_folio(struct address_space *mapping,
408 struct swap_info_struct *si;
409 struct folio *folio = filemap_get_entry(mapping, index);
412 return ERR_PTR(-ENOENT);
413 if (!xa_is_value(folio))
415 if (!shmem_mapping(mapping))
416 return ERR_PTR(-ENOENT);
418 swp = radix_to_swp_entry(folio);
419 /* There might be swapin error entries in shmem mapping. */
420 if (non_swap_entry(swp))
421 return ERR_PTR(-ENOENT);
422 /* Prevent swapoff from happening to us */
423 si = get_swap_device(swp);
425 return ERR_PTR(-ENOENT);
426 index = swap_cache_index(swp);
427 folio = filemap_get_folio(swap_address_space(swp), index);
432 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
433 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
436 struct swap_info_struct *si;
440 *new_page_allocated = false;
441 si = get_swap_device(entry);
448 * First check the swap cache. Since this is normally
449 * called after swap_cache_get_folio() failed, re-calling
450 * that would confuse statistics.
452 folio = filemap_get_folio(swap_address_space(entry),
453 swap_cache_index(entry));
458 * Just skip read ahead for unused swap slot.
459 * During swap_off when swap_slot_cache is disabled,
460 * we have to handle the race between putting
461 * swap entry in swap cache and marking swap slot
462 * as SWAP_HAS_CACHE. That's done in later part of code or
463 * else swap_off will be aborted if we return NULL.
465 if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
469 * Get a new folio to read into from swap. Allocate it now,
470 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
471 * cause any racers to loop around until we add it to cache.
473 folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
478 * Swap entry may have been freed since our caller observed it.
480 err = swapcache_prepare(entry);
489 * Protect against a recursive call to __read_swap_cache_async()
490 * on the same entry waiting forever here because SWAP_HAS_CACHE
491 * is set but the folio is not the swap cache yet. This can
492 * happen today if mem_cgroup_swapin_charge_folio() below
493 * triggers reclaim through zswap, which may call
494 * __read_swap_cache_async() in the writeback path.
500 * We might race against __delete_from_swap_cache(), and
501 * stumble across a swap_map entry whose SWAP_HAS_CACHE
502 * has not yet been cleared. Or race against another
503 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
504 * in swap_map, but not yet added its folio to swap cache.
506 schedule_timeout_uninterruptible(1);
510 * The swap entry is ours to swap in. Prepare the new folio.
513 __folio_set_locked(folio);
514 __folio_set_swapbacked(folio);
516 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
519 /* May fail (-ENOMEM) if XArray node allocation failed. */
520 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
523 mem_cgroup_swapin_uncharge_swap(entry);
526 workingset_refault(folio, shadow);
528 /* Caller will initiate read into locked folio */
529 folio_add_lru(folio);
530 *new_page_allocated = true;
536 put_swap_folio(folio, entry);
545 * Locate a page of swap in physical memory, reserving swap cache space
546 * and reading the disk if it is not already cached.
547 * A failure return means that either the page allocation failed or that
548 * the swap entry is no longer in use.
550 * get/put_swap_device() aren't needed to call this function, because
551 * __read_swap_cache_async() call them and swap_read_folio() holds the
552 * swap cache folio lock.
554 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
555 struct vm_area_struct *vma, unsigned long addr,
556 struct swap_iocb **plug)
559 struct mempolicy *mpol;
563 mpol = get_vma_policy(vma, addr, 0, &ilx);
564 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
565 &page_allocated, false);
569 swap_read_folio(folio, plug);
573 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
574 unsigned long offset,
579 unsigned int pages, last_ra;
582 * This heuristic has been found to work well on both sequential and
583 * random loads, swapping to hard disk or to SSD: please don't ask
584 * what the "+ 2" means, it just happens to work well, that's all.
589 * We can have no readahead hits to judge by: but must not get
590 * stuck here forever, so check for an adjacent offset instead
591 * (and don't even bother to check whether swap type is same).
593 if (offset != prev_offset + 1 && offset != prev_offset - 1)
596 unsigned int roundup = 4;
597 while (roundup < pages)
602 if (pages > max_pages)
605 /* Don't shrink readahead too fast */
606 last_ra = prev_win / 2;
613 static unsigned long swapin_nr_pages(unsigned long offset)
615 static unsigned long prev_offset;
616 unsigned int hits, pages, max_pages;
617 static atomic_t last_readahead_pages;
619 max_pages = 1 << READ_ONCE(page_cluster);
623 hits = atomic_xchg(&swapin_readahead_hits, 0);
624 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
626 atomic_read(&last_readahead_pages));
628 WRITE_ONCE(prev_offset, offset);
629 atomic_set(&last_readahead_pages, pages);
635 * swap_cluster_readahead - swap in pages in hope we need them soon
636 * @entry: swap entry of this memory
637 * @gfp_mask: memory allocation flags
638 * @mpol: NUMA memory allocation policy to be applied
639 * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
641 * Returns the struct folio for entry and addr, after queueing swapin.
643 * Primitive swap readahead code. We simply read an aligned block of
644 * (1 << page_cluster) entries in the swap area. This method is chosen
645 * because it doesn't cost us any seek time. We also make sure to queue
646 * the 'original' request together with the readahead ones...
648 * Note: it is intentional that the same NUMA policy and interleave index
649 * are used for every page of the readahead: neighbouring pages on swap
650 * are fairly likely to have been swapped out from the same node.
652 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
653 struct mempolicy *mpol, pgoff_t ilx)
656 unsigned long entry_offset = swp_offset(entry);
657 unsigned long offset = entry_offset;
658 unsigned long start_offset, end_offset;
660 struct swap_info_struct *si = swp_swap_info(entry);
661 struct blk_plug plug;
662 struct swap_iocb *splug = NULL;
665 mask = swapin_nr_pages(offset) - 1;
669 /* Read a page_cluster sized and aligned cluster around offset. */
670 start_offset = offset & ~mask;
671 end_offset = offset | mask;
672 if (!start_offset) /* First page is swap header. */
674 if (end_offset >= si->max)
675 end_offset = si->max - 1;
677 blk_start_plug(&plug);
678 for (offset = start_offset; offset <= end_offset ; offset++) {
679 /* Ok, do the async read-ahead now */
680 folio = __read_swap_cache_async(
681 swp_entry(swp_type(entry), offset),
682 gfp_mask, mpol, ilx, &page_allocated, false);
685 if (page_allocated) {
686 swap_read_folio(folio, &splug);
687 if (offset != entry_offset) {
688 folio_set_readahead(folio);
689 count_vm_event(SWAP_RA);
694 blk_finish_plug(&plug);
695 swap_read_unplug(splug);
696 lru_add_drain(); /* Push any new pages onto the LRU now */
698 /* The page was likely read above, so no need for plugging here */
699 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
700 &page_allocated, false);
701 if (unlikely(page_allocated)) {
702 zswap_folio_swapin(folio);
703 swap_read_folio(folio, NULL);
708 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
710 struct address_space *spaces, *space;
713 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
714 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
717 for (i = 0; i < nr; i++) {
719 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
720 atomic_set(&space->i_mmap_writable, 0);
721 space->a_ops = &swap_aops;
722 /* swap cache doesn't use writeback related tags */
723 mapping_set_no_writeback_tags(space);
725 nr_swapper_spaces[type] = nr;
726 swapper_spaces[type] = spaces;
731 void exit_swap_address_space(unsigned int type)
734 struct address_space *spaces = swapper_spaces[type];
736 for (i = 0; i < nr_swapper_spaces[type]; i++)
737 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
739 nr_swapper_spaces[type] = 0;
740 swapper_spaces[type] = NULL;
743 static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start,
746 struct vm_area_struct *vma = vmf->vma;
747 unsigned long ra_val;
748 unsigned long faddr, prev_faddr, left, right;
749 unsigned int max_win, hits, prev_win, win;
751 max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING);
755 faddr = vmf->address;
756 ra_val = GET_SWAP_RA_VAL(vma);
757 prev_faddr = SWAP_RA_ADDR(ra_val);
758 prev_win = SWAP_RA_WIN(ra_val);
759 hits = SWAP_RA_HITS(ra_val);
760 win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits,
762 atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0));
766 if (faddr == prev_faddr + PAGE_SIZE)
768 else if (prev_faddr == faddr + PAGE_SIZE)
769 left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE;
771 left = faddr - (((win - 1) / 2) << PAGE_SHIFT);
772 right = left + (win << PAGE_SHIFT);
775 *start = max3(left, vma->vm_start, faddr & PMD_MASK);
776 *end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE);
782 * swap_vma_readahead - swap in pages in hope we need them soon
783 * @targ_entry: swap entry of the targeted memory
784 * @gfp_mask: memory allocation flags
785 * @mpol: NUMA memory allocation policy to be applied
786 * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
787 * @vmf: fault information
789 * Returns the struct folio for entry and addr, after queueing swapin.
791 * Primitive swap readahead code. We simply read in a few pages whose
792 * virtual addresses are around the fault address in the same vma.
794 * Caller must hold read mmap_lock if vmf->vma is not NULL.
797 static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
798 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
800 struct blk_plug plug;
801 struct swap_iocb *splug = NULL;
803 pte_t *pte = NULL, pentry;
805 unsigned long start, end, addr;
810 win = swap_vma_ra_win(vmf, &start, &end);
814 ilx = targ_ilx - PFN_DOWN(vmf->address - start);
816 blk_start_plug(&plug);
817 for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) {
819 pte = pte_offset_map(vmf->pmd, addr);
823 pentry = ptep_get_lockless(pte);
824 if (!is_swap_pte(pentry))
826 entry = pte_to_swp_entry(pentry);
827 if (unlikely(non_swap_entry(entry)))
831 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
832 &page_allocated, false);
835 if (page_allocated) {
836 swap_read_folio(folio, &splug);
837 if (addr != vmf->address) {
838 folio_set_readahead(folio);
839 count_vm_event(SWAP_RA);
846 blk_finish_plug(&plug);
847 swap_read_unplug(splug);
850 /* The folio was likely read above, so no need for plugging here */
851 folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
852 &page_allocated, false);
853 if (unlikely(page_allocated)) {
854 zswap_folio_swapin(folio);
855 swap_read_folio(folio, NULL);
861 * swapin_readahead - swap in pages in hope we need them soon
862 * @entry: swap entry of this memory
863 * @gfp_mask: memory allocation flags
864 * @vmf: fault information
866 * Returns the struct page for entry and addr, after queueing swapin.
868 * It's a main entry function for swap readahead. By the configuration,
869 * it will read ahead blocks by cluster-based(ie, physical disk based)
870 * or vma-based(ie, virtual address based on faulty address) readahead.
872 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
873 struct vm_fault *vmf)
875 struct mempolicy *mpol;
879 mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
880 folio = swap_use_vma_readahead() ?
881 swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
882 swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
887 return folio_file_page(folio, swp_offset(entry));
891 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
892 struct kobj_attribute *attr, char *buf)
894 return sysfs_emit(buf, "%s\n",
895 enable_vma_readahead ? "true" : "false");
897 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
898 struct kobj_attribute *attr,
899 const char *buf, size_t count)
903 ret = kstrtobool(buf, &enable_vma_readahead);
909 static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
911 static struct attribute *swap_attrs[] = {
912 &vma_ra_enabled_attr.attr,
916 static const struct attribute_group swap_attr_group = {
920 static int __init swap_init_sysfs(void)
923 struct kobject *swap_kobj;
925 swap_kobj = kobject_create_and_add("swap", mm_kobj);
927 pr_err("failed to create swap kobject\n");
930 err = sysfs_create_group(swap_kobj, &swap_attr_group);
932 pr_err("failed to register swap group\n");
938 kobject_put(swap_kobj);
941 subsys_initcall(swap_init_sysfs);