Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/mm/swap_state.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
6 | * Swap reorganised 29.12.95, Stephen Tweedie | |
7 | * | |
8 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie | |
9 | */ | |
1da177e4 | 10 | #include <linux/mm.h> |
5a0e3ad6 | 11 | #include <linux/gfp.h> |
1da177e4 LT |
12 | #include <linux/kernel_stat.h> |
13 | #include <linux/swap.h> | |
46017e95 | 14 | #include <linux/swapops.h> |
1da177e4 LT |
15 | #include <linux/init.h> |
16 | #include <linux/pagemap.h> | |
1da177e4 | 17 | #include <linux/backing-dev.h> |
3fb5c298 | 18 | #include <linux/blkdev.h> |
c484d410 | 19 | #include <linux/pagevec.h> |
b20a3503 | 20 | #include <linux/migrate.h> |
4b3ef9da | 21 | #include <linux/vmalloc.h> |
67afa38e | 22 | #include <linux/swap_slots.h> |
38d8b4e6 | 23 | #include <linux/huge_mm.h> |
61ef1865 | 24 | #include <linux/shmem_fs.h> |
243bce09 | 25 | #include "internal.h" |
1da177e4 LT |
26 | |
27 | /* | |
28 | * swapper_space is a fiction, retained to simplify the path through | |
7eaceacc | 29 | * vmscan's shrink_page_list. |
1da177e4 | 30 | */ |
f5e54d6e | 31 | static const struct address_space_operations swap_aops = { |
1da177e4 | 32 | .writepage = swap_writepage, |
62c230bc | 33 | .set_page_dirty = swap_set_page_dirty, |
1c93923c | 34 | #ifdef CONFIG_MIGRATION |
e965f963 | 35 | .migratepage = migrate_page, |
1c93923c | 36 | #endif |
1da177e4 LT |
37 | }; |
38 | ||
783cb68e CD |
39 | struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; |
40 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; | |
f5c754d6 | 41 | static bool enable_vma_readahead __read_mostly = true; |
ec560175 | 42 | |
ec560175 HY |
43 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) |
44 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) | |
45 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK | |
46 | #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) | |
47 | ||
48 | #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) | |
49 | #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) | |
50 | #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) | |
51 | ||
52 | #define SWAP_RA_VAL(addr, win, hits) \ | |
53 | (((addr) & PAGE_MASK) | \ | |
54 | (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ | |
55 | ((hits) & SWAP_RA_HITS_MASK)) | |
56 | ||
57 | /* Initial readahead hits is 4 to start up with a small window */ | |
58 | #define GET_SWAP_RA_VAL(vma) \ | |
59 | (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) | |
1da177e4 | 60 | |
b96a3db2 QC |
61 | #define INC_CACHE_INFO(x) data_race(swap_cache_info.x++) |
62 | #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr)) | |
1da177e4 LT |
63 | |
64 | static struct { | |
65 | unsigned long add_total; | |
66 | unsigned long del_total; | |
67 | unsigned long find_success; | |
68 | unsigned long find_total; | |
1da177e4 LT |
69 | } swap_cache_info; |
70 | ||
579f8290 SL |
71 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
72 | ||
1da177e4 LT |
73 | void show_swap_cache_info(void) |
74 | { | |
33806f06 | 75 | printk("%lu pages in swap cache\n", total_swapcache_pages()); |
2c97b7fc | 76 | printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", |
1da177e4 | 77 | swap_cache_info.add_total, swap_cache_info.del_total, |
bb63be0a | 78 | swap_cache_info.find_success, swap_cache_info.find_total); |
ec8acf20 SL |
79 | printk("Free swap = %ldkB\n", |
80 | get_nr_swap_pages() << (PAGE_SHIFT - 10)); | |
1da177e4 LT |
81 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); |
82 | } | |
83 | ||
aae466b0 JK |
84 | void *get_shadow_from_swap_cache(swp_entry_t entry) |
85 | { | |
86 | struct address_space *address_space = swap_address_space(entry); | |
87 | pgoff_t idx = swp_offset(entry); | |
88 | struct page *page; | |
89 | ||
8c647dd1 | 90 | page = xa_load(&address_space->i_pages, idx); |
aae466b0 JK |
91 | if (xa_is_value(page)) |
92 | return page; | |
aae466b0 JK |
93 | return NULL; |
94 | } | |
95 | ||
1da177e4 | 96 | /* |
8d93b41c | 97 | * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, |
1da177e4 LT |
98 | * but sets SwapCache flag and private instead of mapping and index. |
99 | */ | |
3852f676 JK |
100 | int add_to_swap_cache(struct page *page, swp_entry_t entry, |
101 | gfp_t gfp, void **shadowp) | |
1da177e4 | 102 | { |
8d93b41c | 103 | struct address_space *address_space = swap_address_space(entry); |
38d8b4e6 | 104 | pgoff_t idx = swp_offset(entry); |
8d93b41c | 105 | XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); |
6c357848 | 106 | unsigned long i, nr = thp_nr_pages(page); |
3852f676 | 107 | void *old; |
1da177e4 | 108 | |
309381fe SL |
109 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
110 | VM_BUG_ON_PAGE(PageSwapCache(page), page); | |
111 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); | |
51726b12 | 112 | |
38d8b4e6 | 113 | page_ref_add(page, nr); |
31a56396 | 114 | SetPageSwapCache(page); |
31a56396 | 115 | |
8d93b41c | 116 | do { |
3852f676 JK |
117 | unsigned long nr_shadows = 0; |
118 | ||
8d93b41c MW |
119 | xas_lock_irq(&xas); |
120 | xas_create_range(&xas); | |
121 | if (xas_error(&xas)) | |
122 | goto unlock; | |
123 | for (i = 0; i < nr; i++) { | |
124 | VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); | |
3852f676 JK |
125 | old = xas_load(&xas); |
126 | if (xa_is_value(old)) { | |
127 | nr_shadows++; | |
128 | if (shadowp) | |
129 | *shadowp = old; | |
130 | } | |
8d93b41c | 131 | set_page_private(page + i, entry.val + i); |
4101196b | 132 | xas_store(&xas, page); |
8d93b41c MW |
133 | xas_next(&xas); |
134 | } | |
3852f676 | 135 | address_space->nrexceptional -= nr_shadows; |
38d8b4e6 HY |
136 | address_space->nrpages += nr; |
137 | __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); | |
b6038942 | 138 | __mod_lruvec_page_state(page, NR_SWAPCACHE, nr); |
38d8b4e6 | 139 | ADD_CACHE_INFO(add_total, nr); |
8d93b41c MW |
140 | unlock: |
141 | xas_unlock_irq(&xas); | |
142 | } while (xas_nomem(&xas, gfp)); | |
31a56396 | 143 | |
8d93b41c MW |
144 | if (!xas_error(&xas)) |
145 | return 0; | |
31a56396 | 146 | |
8d93b41c MW |
147 | ClearPageSwapCache(page); |
148 | page_ref_sub(page, nr); | |
149 | return xas_error(&xas); | |
1da177e4 LT |
150 | } |
151 | ||
1da177e4 LT |
152 | /* |
153 | * This must be called only on pages that have | |
154 | * been verified to be in the swap cache. | |
155 | */ | |
3852f676 JK |
156 | void __delete_from_swap_cache(struct page *page, |
157 | swp_entry_t entry, void *shadow) | |
1da177e4 | 158 | { |
4e17ec25 | 159 | struct address_space *address_space = swap_address_space(entry); |
6c357848 | 160 | int i, nr = thp_nr_pages(page); |
4e17ec25 MW |
161 | pgoff_t idx = swp_offset(entry); |
162 | XA_STATE(xas, &address_space->i_pages, idx); | |
33806f06 | 163 | |
309381fe SL |
164 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
165 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); | |
166 | VM_BUG_ON_PAGE(PageWriteback(page), page); | |
1da177e4 | 167 | |
38d8b4e6 | 168 | for (i = 0; i < nr; i++) { |
3852f676 | 169 | void *entry = xas_store(&xas, shadow); |
4101196b | 170 | VM_BUG_ON_PAGE(entry != page, entry); |
38d8b4e6 | 171 | set_page_private(page + i, 0); |
4e17ec25 | 172 | xas_next(&xas); |
38d8b4e6 | 173 | } |
1da177e4 | 174 | ClearPageSwapCache(page); |
3852f676 JK |
175 | if (shadow) |
176 | address_space->nrexceptional += nr; | |
38d8b4e6 HY |
177 | address_space->nrpages -= nr; |
178 | __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); | |
b6038942 | 179 | __mod_lruvec_page_state(page, NR_SWAPCACHE, -nr); |
38d8b4e6 | 180 | ADD_CACHE_INFO(del_total, nr); |
1da177e4 LT |
181 | } |
182 | ||
183 | /** | |
184 | * add_to_swap - allocate swap space for a page | |
185 | * @page: page we want to move to swap | |
186 | * | |
187 | * Allocate swap space for the page and add the page to the | |
188 | * swap cache. Caller needs to hold the page lock. | |
189 | */ | |
0f074658 | 190 | int add_to_swap(struct page *page) |
1da177e4 LT |
191 | { |
192 | swp_entry_t entry; | |
1da177e4 LT |
193 | int err; |
194 | ||
309381fe SL |
195 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
196 | VM_BUG_ON_PAGE(!PageUptodate(page), page); | |
1da177e4 | 197 | |
38d8b4e6 | 198 | entry = get_swap_page(page); |
2ca4532a | 199 | if (!entry.val) |
0f074658 MK |
200 | return 0; |
201 | ||
2ca4532a | 202 | /* |
8d93b41c | 203 | * XArray node allocations from PF_MEMALLOC contexts could |
2ca4532a DN |
204 | * completely exhaust the page allocator. __GFP_NOMEMALLOC |
205 | * stops emergency reserves from being allocated. | |
206 | * | |
207 | * TODO: this could cause a theoretical memory reclaim | |
208 | * deadlock in the swap out path. | |
209 | */ | |
210 | /* | |
854e9ed0 | 211 | * Add it to the swap cache. |
2ca4532a DN |
212 | */ |
213 | err = add_to_swap_cache(page, entry, | |
3852f676 | 214 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); |
38d8b4e6 | 215 | if (err) |
bd53b714 | 216 | /* |
2ca4532a DN |
217 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
218 | * clear SWAP_HAS_CACHE flag. | |
1da177e4 | 219 | */ |
0f074658 | 220 | goto fail; |
9625456c SL |
221 | /* |
222 | * Normally the page will be dirtied in unmap because its pte should be | |
0e9aa675 | 223 | * dirty. A special case is MADV_FREE page. The page's pte could have |
9625456c SL |
224 | * dirty bit cleared but the page's SwapBacked bit is still set because |
225 | * clearing the dirty bit and SwapBacked bit has no lock protected. For | |
226 | * such page, unmap will not set dirty bit for it, so page reclaim will | |
227 | * not write the page out. This can cause data corruption when the page | |
228 | * is swap in later. Always setting the dirty bit for the page solves | |
229 | * the problem. | |
230 | */ | |
231 | set_page_dirty(page); | |
38d8b4e6 HY |
232 | |
233 | return 1; | |
234 | ||
38d8b4e6 | 235 | fail: |
0f074658 | 236 | put_swap_page(page, entry); |
38d8b4e6 | 237 | return 0; |
1da177e4 LT |
238 | } |
239 | ||
240 | /* | |
241 | * This must be called only on pages that have | |
242 | * been verified to be in the swap cache and locked. | |
243 | * It will never put the page into the free list, | |
244 | * the caller has a reference on the page. | |
245 | */ | |
246 | void delete_from_swap_cache(struct page *page) | |
247 | { | |
4e17ec25 MW |
248 | swp_entry_t entry = { .val = page_private(page) }; |
249 | struct address_space *address_space = swap_address_space(entry); | |
1da177e4 | 250 | |
b93b0163 | 251 | xa_lock_irq(&address_space->i_pages); |
3852f676 | 252 | __delete_from_swap_cache(page, entry, NULL); |
b93b0163 | 253 | xa_unlock_irq(&address_space->i_pages); |
1da177e4 | 254 | |
75f6d6d2 | 255 | put_swap_page(page, entry); |
6c357848 | 256 | page_ref_sub(page, thp_nr_pages(page)); |
1da177e4 LT |
257 | } |
258 | ||
3852f676 JK |
259 | void clear_shadow_from_swap_cache(int type, unsigned long begin, |
260 | unsigned long end) | |
261 | { | |
262 | unsigned long curr = begin; | |
263 | void *old; | |
264 | ||
265 | for (;;) { | |
266 | unsigned long nr_shadows = 0; | |
267 | swp_entry_t entry = swp_entry(type, curr); | |
268 | struct address_space *address_space = swap_address_space(entry); | |
269 | XA_STATE(xas, &address_space->i_pages, curr); | |
270 | ||
271 | xa_lock_irq(&address_space->i_pages); | |
272 | xas_for_each(&xas, old, end) { | |
273 | if (!xa_is_value(old)) | |
274 | continue; | |
275 | xas_store(&xas, NULL); | |
276 | nr_shadows++; | |
277 | } | |
278 | address_space->nrexceptional -= nr_shadows; | |
279 | xa_unlock_irq(&address_space->i_pages); | |
280 | ||
281 | /* search the next swapcache until we meet end */ | |
282 | curr >>= SWAP_ADDRESS_SPACE_SHIFT; | |
283 | curr++; | |
284 | curr <<= SWAP_ADDRESS_SPACE_SHIFT; | |
285 | if (curr > end) | |
286 | break; | |
287 | } | |
288 | } | |
289 | ||
1da177e4 LT |
290 | /* |
291 | * If we are the only user, then try to free up the swap cache. | |
292 | * | |
293 | * Its ok to check for PageSwapCache without the page lock | |
a2c43eed HD |
294 | * here because we are going to recheck again inside |
295 | * try_to_free_swap() _with_ the lock. | |
1da177e4 LT |
296 | * - Marcelo |
297 | */ | |
298 | static inline void free_swap_cache(struct page *page) | |
299 | { | |
a2c43eed HD |
300 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { |
301 | try_to_free_swap(page); | |
1da177e4 LT |
302 | unlock_page(page); |
303 | } | |
304 | } | |
305 | ||
306 | /* | |
307 | * Perform a free_page(), also freeing any swap cache associated with | |
b8072f09 | 308 | * this page if it is the last user of the page. |
1da177e4 LT |
309 | */ |
310 | void free_page_and_swap_cache(struct page *page) | |
311 | { | |
312 | free_swap_cache(page); | |
6fcb52a5 | 313 | if (!is_huge_zero_page(page)) |
770a5370 | 314 | put_page(page); |
1da177e4 LT |
315 | } |
316 | ||
317 | /* | |
318 | * Passed an array of pages, drop them all from swapcache and then release | |
319 | * them. They are removed from the LRU and freed if this is their last use. | |
320 | */ | |
321 | void free_pages_and_swap_cache(struct page **pages, int nr) | |
322 | { | |
1da177e4 | 323 | struct page **pagep = pages; |
aabfb572 | 324 | int i; |
1da177e4 LT |
325 | |
326 | lru_add_drain(); | |
aabfb572 MH |
327 | for (i = 0; i < nr; i++) |
328 | free_swap_cache(pagep[i]); | |
c6f92f9f | 329 | release_pages(pagep, nr); |
1da177e4 LT |
330 | } |
331 | ||
e9e9b7ec MK |
332 | static inline bool swap_use_vma_readahead(void) |
333 | { | |
334 | return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); | |
335 | } | |
336 | ||
1da177e4 LT |
337 | /* |
338 | * Lookup a swap entry in the swap cache. A found page will be returned | |
339 | * unlocked and with its refcount incremented - we rely on the kernel | |
340 | * lock getting page table operations atomic even if we drop the page | |
341 | * lock before returning. | |
342 | */ | |
ec560175 HY |
343 | struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, |
344 | unsigned long addr) | |
1da177e4 LT |
345 | { |
346 | struct page *page; | |
eb085574 | 347 | struct swap_info_struct *si; |
1da177e4 | 348 | |
eb085574 HY |
349 | si = get_swap_device(entry); |
350 | if (!si) | |
351 | return NULL; | |
f6ab1f7f | 352 | page = find_get_page(swap_address_space(entry), swp_offset(entry)); |
eb085574 | 353 | put_swap_device(si); |
1da177e4 | 354 | |
ec560175 HY |
355 | INC_CACHE_INFO(find_total); |
356 | if (page) { | |
eaf649eb MK |
357 | bool vma_ra = swap_use_vma_readahead(); |
358 | bool readahead; | |
359 | ||
1da177e4 | 360 | INC_CACHE_INFO(find_success); |
eaf649eb MK |
361 | /* |
362 | * At the moment, we don't support PG_readahead for anon THP | |
363 | * so let's bail out rather than confusing the readahead stat. | |
364 | */ | |
ec560175 HY |
365 | if (unlikely(PageTransCompound(page))) |
366 | return page; | |
eaf649eb | 367 | |
ec560175 | 368 | readahead = TestClearPageReadahead(page); |
eaf649eb MK |
369 | if (vma && vma_ra) { |
370 | unsigned long ra_val; | |
371 | int win, hits; | |
372 | ||
373 | ra_val = GET_SWAP_RA_VAL(vma); | |
374 | win = SWAP_RA_WIN(ra_val); | |
375 | hits = SWAP_RA_HITS(ra_val); | |
ec560175 HY |
376 | if (readahead) |
377 | hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); | |
378 | atomic_long_set(&vma->swap_readahead_info, | |
379 | SWAP_RA_VAL(addr, win, hits)); | |
380 | } | |
eaf649eb | 381 | |
ec560175 | 382 | if (readahead) { |
cbc65df2 | 383 | count_vm_event(SWAP_RA_HIT); |
eaf649eb | 384 | if (!vma || !vma_ra) |
ec560175 | 385 | atomic_inc(&swapin_readahead_hits); |
cbc65df2 | 386 | } |
579f8290 | 387 | } |
eaf649eb | 388 | |
1da177e4 LT |
389 | return page; |
390 | } | |
391 | ||
61ef1865 MWO |
392 | /** |
393 | * find_get_incore_page - Find and get a page from the page or swap caches. | |
394 | * @mapping: The address_space to search. | |
395 | * @index: The page cache index. | |
396 | * | |
397 | * This differs from find_get_page() in that it will also look for the | |
398 | * page in the swap cache. | |
399 | * | |
400 | * Return: The found page or %NULL. | |
401 | */ | |
402 | struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index) | |
403 | { | |
404 | swp_entry_t swp; | |
405 | struct swap_info_struct *si; | |
44835d20 MWO |
406 | struct page *page = pagecache_get_page(mapping, index, |
407 | FGP_ENTRY | FGP_HEAD, 0); | |
61ef1865 | 408 | |
a6de4b48 | 409 | if (!page) |
61ef1865 | 410 | return page; |
a6de4b48 MWO |
411 | if (!xa_is_value(page)) |
412 | return find_subpage(page, index); | |
61ef1865 MWO |
413 | if (!shmem_mapping(mapping)) |
414 | return NULL; | |
415 | ||
416 | swp = radix_to_swp_entry(page); | |
417 | /* Prevent swapoff from happening to us */ | |
418 | si = get_swap_device(swp); | |
419 | if (!si) | |
420 | return NULL; | |
421 | page = find_get_page(swap_address_space(swp), swp_offset(swp)); | |
422 | put_swap_device(si); | |
423 | return page; | |
424 | } | |
425 | ||
5b999aad DS |
426 | struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
427 | struct vm_area_struct *vma, unsigned long addr, | |
428 | bool *new_page_allocated) | |
1da177e4 | 429 | { |
eb085574 | 430 | struct swap_info_struct *si; |
4c6355b2 | 431 | struct page *page; |
aae466b0 | 432 | void *shadow = NULL; |
4c6355b2 | 433 | |
5b999aad | 434 | *new_page_allocated = false; |
1da177e4 | 435 | |
4c6355b2 JW |
436 | for (;;) { |
437 | int err; | |
1da177e4 LT |
438 | /* |
439 | * First check the swap cache. Since this is normally | |
440 | * called after lookup_swap_cache() failed, re-calling | |
441 | * that would confuse statistics. | |
442 | */ | |
eb085574 HY |
443 | si = get_swap_device(entry); |
444 | if (!si) | |
4c6355b2 JW |
445 | return NULL; |
446 | page = find_get_page(swap_address_space(entry), | |
447 | swp_offset(entry)); | |
eb085574 | 448 | put_swap_device(si); |
4c6355b2 JW |
449 | if (page) |
450 | return page; | |
1da177e4 | 451 | |
ba81f838 HY |
452 | /* |
453 | * Just skip read ahead for unused swap slot. | |
454 | * During swap_off when swap_slot_cache is disabled, | |
455 | * we have to handle the race between putting | |
456 | * swap entry in swap cache and marking swap slot | |
457 | * as SWAP_HAS_CACHE. That's done in later part of code or | |
458 | * else swap_off will be aborted if we return NULL. | |
459 | */ | |
460 | if (!__swp_swapcount(entry) && swap_slot_cache_enabled) | |
4c6355b2 | 461 | return NULL; |
e8c26ab6 | 462 | |
1da177e4 | 463 | /* |
4c6355b2 JW |
464 | * Get a new page to read into from swap. Allocate it now, |
465 | * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will | |
466 | * cause any racers to loop around until we add it to cache. | |
1da177e4 | 467 | */ |
4c6355b2 JW |
468 | page = alloc_page_vma(gfp_mask, vma, addr); |
469 | if (!page) | |
470 | return NULL; | |
1da177e4 | 471 | |
f000944d HD |
472 | /* |
473 | * Swap entry may have been freed since our caller observed it. | |
474 | */ | |
355cfa73 | 475 | err = swapcache_prepare(entry); |
4c6355b2 | 476 | if (!err) |
f000944d HD |
477 | break; |
478 | ||
4c6355b2 JW |
479 | put_page(page); |
480 | if (err != -EEXIST) | |
481 | return NULL; | |
482 | ||
2ca4532a | 483 | /* |
4c6355b2 JW |
484 | * We might race against __delete_from_swap_cache(), and |
485 | * stumble across a swap_map entry whose SWAP_HAS_CACHE | |
486 | * has not yet been cleared. Or race against another | |
487 | * __read_swap_cache_async(), which has set SWAP_HAS_CACHE | |
488 | * in swap_map, but not yet added its page to swap cache. | |
2ca4532a | 489 | */ |
4c6355b2 JW |
490 | cond_resched(); |
491 | } | |
492 | ||
493 | /* | |
494 | * The swap entry is ours to swap in. Prepare the new page. | |
495 | */ | |
496 | ||
497 | __SetPageLocked(page); | |
498 | __SetPageSwapBacked(page); | |
499 | ||
500 | /* May fail (-ENOMEM) if XArray node allocation failed. */ | |
aae466b0 | 501 | if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) { |
4c6355b2 JW |
502 | put_swap_page(page, entry); |
503 | goto fail_unlock; | |
504 | } | |
505 | ||
d9eb1ea2 | 506 | if (mem_cgroup_charge(page, NULL, gfp_mask)) { |
4c6355b2 JW |
507 | delete_from_swap_cache(page); |
508 | goto fail_unlock; | |
509 | } | |
510 | ||
aae466b0 JK |
511 | if (shadow) |
512 | workingset_refault(page, shadow); | |
314b57fb | 513 | |
4c6355b2 | 514 | /* Caller will initiate read into locked page */ |
6058eaec | 515 | lru_cache_add(page); |
4c6355b2 JW |
516 | *new_page_allocated = true; |
517 | return page; | |
1da177e4 | 518 | |
4c6355b2 JW |
519 | fail_unlock: |
520 | unlock_page(page); | |
521 | put_page(page); | |
522 | return NULL; | |
1da177e4 | 523 | } |
46017e95 | 524 | |
5b999aad DS |
525 | /* |
526 | * Locate a page of swap in physical memory, reserving swap cache space | |
527 | * and reading the disk if it is not already cached. | |
528 | * A failure return means that either the page allocation failed or that | |
529 | * the swap entry is no longer in use. | |
530 | */ | |
531 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |
23955622 | 532 | struct vm_area_struct *vma, unsigned long addr, bool do_poll) |
5b999aad DS |
533 | { |
534 | bool page_was_allocated; | |
535 | struct page *retpage = __read_swap_cache_async(entry, gfp_mask, | |
536 | vma, addr, &page_was_allocated); | |
537 | ||
538 | if (page_was_allocated) | |
23955622 | 539 | swap_readpage(retpage, do_poll); |
5b999aad DS |
540 | |
541 | return retpage; | |
542 | } | |
543 | ||
ec560175 HY |
544 | static unsigned int __swapin_nr_pages(unsigned long prev_offset, |
545 | unsigned long offset, | |
546 | int hits, | |
547 | int max_pages, | |
548 | int prev_win) | |
579f8290 | 549 | { |
ec560175 | 550 | unsigned int pages, last_ra; |
579f8290 SL |
551 | |
552 | /* | |
553 | * This heuristic has been found to work well on both sequential and | |
554 | * random loads, swapping to hard disk or to SSD: please don't ask | |
555 | * what the "+ 2" means, it just happens to work well, that's all. | |
556 | */ | |
ec560175 | 557 | pages = hits + 2; |
579f8290 SL |
558 | if (pages == 2) { |
559 | /* | |
560 | * We can have no readahead hits to judge by: but must not get | |
561 | * stuck here forever, so check for an adjacent offset instead | |
562 | * (and don't even bother to check whether swap type is same). | |
563 | */ | |
564 | if (offset != prev_offset + 1 && offset != prev_offset - 1) | |
565 | pages = 1; | |
579f8290 SL |
566 | } else { |
567 | unsigned int roundup = 4; | |
568 | while (roundup < pages) | |
569 | roundup <<= 1; | |
570 | pages = roundup; | |
571 | } | |
572 | ||
573 | if (pages > max_pages) | |
574 | pages = max_pages; | |
575 | ||
576 | /* Don't shrink readahead too fast */ | |
ec560175 | 577 | last_ra = prev_win / 2; |
579f8290 SL |
578 | if (pages < last_ra) |
579 | pages = last_ra; | |
ec560175 HY |
580 | |
581 | return pages; | |
582 | } | |
583 | ||
584 | static unsigned long swapin_nr_pages(unsigned long offset) | |
585 | { | |
586 | static unsigned long prev_offset; | |
587 | unsigned int hits, pages, max_pages; | |
588 | static atomic_t last_readahead_pages; | |
589 | ||
590 | max_pages = 1 << READ_ONCE(page_cluster); | |
591 | if (max_pages <= 1) | |
592 | return 1; | |
593 | ||
594 | hits = atomic_xchg(&swapin_readahead_hits, 0); | |
d6c1f098 QC |
595 | pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, |
596 | max_pages, | |
ec560175 HY |
597 | atomic_read(&last_readahead_pages)); |
598 | if (!hits) | |
d6c1f098 | 599 | WRITE_ONCE(prev_offset, offset); |
579f8290 SL |
600 | atomic_set(&last_readahead_pages, pages); |
601 | ||
602 | return pages; | |
603 | } | |
604 | ||
46017e95 | 605 | /** |
e9e9b7ec | 606 | * swap_cluster_readahead - swap in pages in hope we need them soon |
46017e95 | 607 | * @entry: swap entry of this memory |
7682486b | 608 | * @gfp_mask: memory allocation flags |
e9e9b7ec | 609 | * @vmf: fault information |
46017e95 HD |
610 | * |
611 | * Returns the struct page for entry and addr, after queueing swapin. | |
612 | * | |
613 | * Primitive swap readahead code. We simply read an aligned block of | |
614 | * (1 << page_cluster) entries in the swap area. This method is chosen | |
615 | * because it doesn't cost us any seek time. We also make sure to queue | |
616 | * the 'original' request together with the readahead ones... | |
617 | * | |
618 | * This has been extended to use the NUMA policies from the mm triggering | |
619 | * the readahead. | |
620 | * | |
c1e8d7c6 | 621 | * Caller must hold read mmap_lock if vmf->vma is not NULL. |
46017e95 | 622 | */ |
e9e9b7ec MK |
623 | struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, |
624 | struct vm_fault *vmf) | |
46017e95 | 625 | { |
46017e95 | 626 | struct page *page; |
579f8290 SL |
627 | unsigned long entry_offset = swp_offset(entry); |
628 | unsigned long offset = entry_offset; | |
67f96aa2 | 629 | unsigned long start_offset, end_offset; |
579f8290 | 630 | unsigned long mask; |
e9a6effa | 631 | struct swap_info_struct *si = swp_swap_info(entry); |
3fb5c298 | 632 | struct blk_plug plug; |
c4fa6309 | 633 | bool do_poll = true, page_allocated; |
e9e9b7ec MK |
634 | struct vm_area_struct *vma = vmf->vma; |
635 | unsigned long addr = vmf->address; | |
46017e95 | 636 | |
579f8290 SL |
637 | mask = swapin_nr_pages(offset) - 1; |
638 | if (!mask) | |
639 | goto skip; | |
640 | ||
8fd2e0b5 | 641 | /* Test swap type to make sure the dereference is safe */ |
32646315 | 642 | if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) { |
8fd2e0b5 YS |
643 | struct inode *inode = si->swap_file->f_mapping->host; |
644 | if (inode_read_congested(inode)) | |
645 | goto skip; | |
646 | } | |
647 | ||
23955622 | 648 | do_poll = false; |
67f96aa2 RR |
649 | /* Read a page_cluster sized and aligned cluster around offset. */ |
650 | start_offset = offset & ~mask; | |
651 | end_offset = offset | mask; | |
652 | if (!start_offset) /* First page is swap header. */ | |
653 | start_offset++; | |
e9a6effa HY |
654 | if (end_offset >= si->max) |
655 | end_offset = si->max - 1; | |
67f96aa2 | 656 | |
3fb5c298 | 657 | blk_start_plug(&plug); |
67f96aa2 | 658 | for (offset = start_offset; offset <= end_offset ; offset++) { |
46017e95 | 659 | /* Ok, do the async read-ahead now */ |
c4fa6309 HY |
660 | page = __read_swap_cache_async( |
661 | swp_entry(swp_type(entry), offset), | |
662 | gfp_mask, vma, addr, &page_allocated); | |
46017e95 | 663 | if (!page) |
67f96aa2 | 664 | continue; |
c4fa6309 HY |
665 | if (page_allocated) { |
666 | swap_readpage(page, false); | |
eaf649eb | 667 | if (offset != entry_offset) { |
c4fa6309 HY |
668 | SetPageReadahead(page); |
669 | count_vm_event(SWAP_RA); | |
670 | } | |
cbc65df2 | 671 | } |
09cbfeaf | 672 | put_page(page); |
46017e95 | 673 | } |
3fb5c298 CE |
674 | blk_finish_plug(&plug); |
675 | ||
46017e95 | 676 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
579f8290 | 677 | skip: |
23955622 | 678 | return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); |
46017e95 | 679 | } |
4b3ef9da HY |
680 | |
681 | int init_swap_address_space(unsigned int type, unsigned long nr_pages) | |
682 | { | |
683 | struct address_space *spaces, *space; | |
684 | unsigned int i, nr; | |
685 | ||
686 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); | |
778e1cdd | 687 | spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); |
4b3ef9da HY |
688 | if (!spaces) |
689 | return -ENOMEM; | |
690 | for (i = 0; i < nr; i++) { | |
691 | space = spaces + i; | |
a2833486 | 692 | xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); |
4b3ef9da HY |
693 | atomic_set(&space->i_mmap_writable, 0); |
694 | space->a_ops = &swap_aops; | |
695 | /* swap cache doesn't use writeback related tags */ | |
696 | mapping_set_no_writeback_tags(space); | |
4b3ef9da HY |
697 | } |
698 | nr_swapper_spaces[type] = nr; | |
054f1d1f | 699 | swapper_spaces[type] = spaces; |
4b3ef9da HY |
700 | |
701 | return 0; | |
702 | } | |
703 | ||
704 | void exit_swap_address_space(unsigned int type) | |
705 | { | |
054f1d1f | 706 | kvfree(swapper_spaces[type]); |
4b3ef9da | 707 | nr_swapper_spaces[type] = 0; |
054f1d1f | 708 | swapper_spaces[type] = NULL; |
4b3ef9da | 709 | } |
ec560175 HY |
710 | |
711 | static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, | |
712 | unsigned long faddr, | |
713 | unsigned long lpfn, | |
714 | unsigned long rpfn, | |
715 | unsigned long *start, | |
716 | unsigned long *end) | |
717 | { | |
718 | *start = max3(lpfn, PFN_DOWN(vma->vm_start), | |
719 | PFN_DOWN(faddr & PMD_MASK)); | |
720 | *end = min3(rpfn, PFN_DOWN(vma->vm_end), | |
721 | PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); | |
722 | } | |
723 | ||
eaf649eb MK |
724 | static void swap_ra_info(struct vm_fault *vmf, |
725 | struct vma_swap_readahead *ra_info) | |
ec560175 HY |
726 | { |
727 | struct vm_area_struct *vma = vmf->vma; | |
eaf649eb | 728 | unsigned long ra_val; |
ec560175 HY |
729 | swp_entry_t entry; |
730 | unsigned long faddr, pfn, fpfn; | |
731 | unsigned long start, end; | |
eaf649eb | 732 | pte_t *pte, *orig_pte; |
ec560175 HY |
733 | unsigned int max_win, hits, prev_win, win, left; |
734 | #ifndef CONFIG_64BIT | |
735 | pte_t *tpte; | |
736 | #endif | |
737 | ||
61b63972 HY |
738 | max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), |
739 | SWAP_RA_ORDER_CEILING); | |
740 | if (max_win == 1) { | |
eaf649eb MK |
741 | ra_info->win = 1; |
742 | return; | |
61b63972 HY |
743 | } |
744 | ||
ec560175 | 745 | faddr = vmf->address; |
eaf649eb MK |
746 | orig_pte = pte = pte_offset_map(vmf->pmd, faddr); |
747 | entry = pte_to_swp_entry(*pte); | |
748 | if ((unlikely(non_swap_entry(entry)))) { | |
749 | pte_unmap(orig_pte); | |
750 | return; | |
751 | } | |
ec560175 | 752 | |
ec560175 | 753 | fpfn = PFN_DOWN(faddr); |
eaf649eb MK |
754 | ra_val = GET_SWAP_RA_VAL(vma); |
755 | pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); | |
756 | prev_win = SWAP_RA_WIN(ra_val); | |
757 | hits = SWAP_RA_HITS(ra_val); | |
758 | ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, | |
ec560175 HY |
759 | max_win, prev_win); |
760 | atomic_long_set(&vma->swap_readahead_info, | |
761 | SWAP_RA_VAL(faddr, win, 0)); | |
762 | ||
eaf649eb MK |
763 | if (win == 1) { |
764 | pte_unmap(orig_pte); | |
765 | return; | |
766 | } | |
ec560175 HY |
767 | |
768 | /* Copy the PTEs because the page table may be unmapped */ | |
769 | if (fpfn == pfn + 1) | |
770 | swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end); | |
771 | else if (pfn == fpfn + 1) | |
772 | swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1, | |
773 | &start, &end); | |
774 | else { | |
775 | left = (win - 1) / 2; | |
776 | swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left, | |
777 | &start, &end); | |
778 | } | |
eaf649eb MK |
779 | ra_info->nr_pte = end - start; |
780 | ra_info->offset = fpfn - start; | |
781 | pte -= ra_info->offset; | |
ec560175 | 782 | #ifdef CONFIG_64BIT |
eaf649eb | 783 | ra_info->ptes = pte; |
ec560175 | 784 | #else |
eaf649eb | 785 | tpte = ra_info->ptes; |
ec560175 HY |
786 | for (pfn = start; pfn != end; pfn++) |
787 | *tpte++ = *pte++; | |
788 | #endif | |
eaf649eb | 789 | pte_unmap(orig_pte); |
ec560175 HY |
790 | } |
791 | ||
e9f59873 YS |
792 | /** |
793 | * swap_vma_readahead - swap in pages in hope we need them soon | |
27ec4878 | 794 | * @fentry: swap entry of this memory |
e9f59873 YS |
795 | * @gfp_mask: memory allocation flags |
796 | * @vmf: fault information | |
797 | * | |
798 | * Returns the struct page for entry and addr, after queueing swapin. | |
799 | * | |
800 | * Primitive swap readahead code. We simply read in a few pages whoes | |
801 | * virtual addresses are around the fault address in the same vma. | |
802 | * | |
c1e8d7c6 | 803 | * Caller must hold read mmap_lock if vmf->vma is not NULL. |
e9f59873 YS |
804 | * |
805 | */ | |
f5c754d6 CIK |
806 | static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, |
807 | struct vm_fault *vmf) | |
ec560175 HY |
808 | { |
809 | struct blk_plug plug; | |
810 | struct vm_area_struct *vma = vmf->vma; | |
811 | struct page *page; | |
812 | pte_t *pte, pentry; | |
813 | swp_entry_t entry; | |
814 | unsigned int i; | |
815 | bool page_allocated; | |
e97af699 ML |
816 | struct vma_swap_readahead ra_info = { |
817 | .win = 1, | |
818 | }; | |
ec560175 | 819 | |
eaf649eb MK |
820 | swap_ra_info(vmf, &ra_info); |
821 | if (ra_info.win == 1) | |
ec560175 HY |
822 | goto skip; |
823 | ||
824 | blk_start_plug(&plug); | |
eaf649eb | 825 | for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; |
ec560175 HY |
826 | i++, pte++) { |
827 | pentry = *pte; | |
828 | if (pte_none(pentry)) | |
829 | continue; | |
830 | if (pte_present(pentry)) | |
831 | continue; | |
832 | entry = pte_to_swp_entry(pentry); | |
833 | if (unlikely(non_swap_entry(entry))) | |
834 | continue; | |
835 | page = __read_swap_cache_async(entry, gfp_mask, vma, | |
836 | vmf->address, &page_allocated); | |
837 | if (!page) | |
838 | continue; | |
839 | if (page_allocated) { | |
840 | swap_readpage(page, false); | |
eaf649eb | 841 | if (i != ra_info.offset) { |
ec560175 HY |
842 | SetPageReadahead(page); |
843 | count_vm_event(SWAP_RA); | |
844 | } | |
845 | } | |
846 | put_page(page); | |
847 | } | |
848 | blk_finish_plug(&plug); | |
849 | lru_add_drain(); | |
850 | skip: | |
851 | return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, | |
eaf649eb | 852 | ra_info.win == 1); |
ec560175 | 853 | } |
d9bfcfdc | 854 | |
e9e9b7ec MK |
855 | /** |
856 | * swapin_readahead - swap in pages in hope we need them soon | |
857 | * @entry: swap entry of this memory | |
858 | * @gfp_mask: memory allocation flags | |
859 | * @vmf: fault information | |
860 | * | |
861 | * Returns the struct page for entry and addr, after queueing swapin. | |
862 | * | |
863 | * It's a main entry function for swap readahead. By the configuration, | |
864 | * it will read ahead blocks by cluster-based(ie, physical disk based) | |
865 | * or vma-based(ie, virtual address based on faulty address) readahead. | |
866 | */ | |
867 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, | |
868 | struct vm_fault *vmf) | |
869 | { | |
870 | return swap_use_vma_readahead() ? | |
871 | swap_vma_readahead(entry, gfp_mask, vmf) : | |
872 | swap_cluster_readahead(entry, gfp_mask, vmf); | |
873 | } | |
874 | ||
d9bfcfdc HY |
875 | #ifdef CONFIG_SYSFS |
876 | static ssize_t vma_ra_enabled_show(struct kobject *kobj, | |
877 | struct kobj_attribute *attr, char *buf) | |
878 | { | |
ae7a927d JP |
879 | return sysfs_emit(buf, "%s\n", |
880 | enable_vma_readahead ? "true" : "false"); | |
d9bfcfdc HY |
881 | } |
882 | static ssize_t vma_ra_enabled_store(struct kobject *kobj, | |
883 | struct kobj_attribute *attr, | |
884 | const char *buf, size_t count) | |
885 | { | |
886 | if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) | |
e9e9b7ec | 887 | enable_vma_readahead = true; |
d9bfcfdc | 888 | else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) |
e9e9b7ec | 889 | enable_vma_readahead = false; |
d9bfcfdc HY |
890 | else |
891 | return -EINVAL; | |
892 | ||
893 | return count; | |
894 | } | |
895 | static struct kobj_attribute vma_ra_enabled_attr = | |
896 | __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, | |
897 | vma_ra_enabled_store); | |
898 | ||
d9bfcfdc HY |
899 | static struct attribute *swap_attrs[] = { |
900 | &vma_ra_enabled_attr.attr, | |
d9bfcfdc HY |
901 | NULL, |
902 | }; | |
903 | ||
e48333b6 | 904 | static const struct attribute_group swap_attr_group = { |
d9bfcfdc HY |
905 | .attrs = swap_attrs, |
906 | }; | |
907 | ||
908 | static int __init swap_init_sysfs(void) | |
909 | { | |
910 | int err; | |
911 | struct kobject *swap_kobj; | |
912 | ||
913 | swap_kobj = kobject_create_and_add("swap", mm_kobj); | |
914 | if (!swap_kobj) { | |
915 | pr_err("failed to create swap kobject\n"); | |
916 | return -ENOMEM; | |
917 | } | |
918 | err = sysfs_create_group(swap_kobj, &swap_attr_group); | |
919 | if (err) { | |
920 | pr_err("failed to register swap group\n"); | |
921 | goto delete_obj; | |
922 | } | |
923 | return 0; | |
924 | ||
925 | delete_obj: | |
926 | kobject_put(swap_kobj); | |
927 | return err; | |
928 | } | |
929 | subsys_initcall(swap_init_sysfs); | |
930 | #endif |