Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/mm/swap_state.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
6 | * Swap reorganised 29.12.95, Stephen Tweedie | |
7 | * | |
8 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie | |
9 | */ | |
1da177e4 | 10 | #include <linux/mm.h> |
5a0e3ad6 | 11 | #include <linux/gfp.h> |
1da177e4 | 12 | #include <linux/kernel_stat.h> |
ddc1a5cb | 13 | #include <linux/mempolicy.h> |
1da177e4 | 14 | #include <linux/swap.h> |
46017e95 | 15 | #include <linux/swapops.h> |
1da177e4 LT |
16 | #include <linux/init.h> |
17 | #include <linux/pagemap.h> | |
1da177e4 | 18 | #include <linux/backing-dev.h> |
3fb5c298 | 19 | #include <linux/blkdev.h> |
b20a3503 | 20 | #include <linux/migrate.h> |
4b3ef9da | 21 | #include <linux/vmalloc.h> |
67afa38e | 22 | #include <linux/swap_slots.h> |
38d8b4e6 | 23 | #include <linux/huge_mm.h> |
61ef1865 | 24 | #include <linux/shmem_fs.h> |
243bce09 | 25 | #include "internal.h" |
014bb1de | 26 | #include "swap.h" |
1da177e4 LT |
27 | |
28 | /* | |
29 | * swapper_space is a fiction, retained to simplify the path through | |
7eaceacc | 30 | * vmscan's shrink_page_list. |
1da177e4 | 31 | */ |
f5e54d6e | 32 | static const struct address_space_operations swap_aops = { |
1da177e4 | 33 | .writepage = swap_writepage, |
4c4a7634 | 34 | .dirty_folio = noop_dirty_folio, |
1c93923c | 35 | #ifdef CONFIG_MIGRATION |
54184650 | 36 | .migrate_folio = migrate_folio, |
1c93923c | 37 | #endif |
1da177e4 LT |
38 | }; |
39 | ||
783cb68e CD |
40 | struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; |
41 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; | |
f5c754d6 | 42 | static bool enable_vma_readahead __read_mostly = true; |
ec560175 | 43 | |
ec560175 HY |
44 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) |
45 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) | |
46 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK | |
47 | #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) | |
48 | ||
49 | #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) | |
50 | #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) | |
51 | #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) | |
52 | ||
53 | #define SWAP_RA_VAL(addr, win, hits) \ | |
54 | (((addr) & PAGE_MASK) | \ | |
55 | (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ | |
56 | ((hits) & SWAP_RA_HITS_MASK)) | |
57 | ||
58 | /* Initial readahead hits is 4 to start up with a small window */ | |
59 | #define GET_SWAP_RA_VAL(vma) \ | |
60 | (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) | |
1da177e4 | 61 | |
579f8290 SL |
62 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
63 | ||
1da177e4 LT |
64 | void show_swap_cache_info(void) |
65 | { | |
33806f06 | 66 | printk("%lu pages in swap cache\n", total_swapcache_pages()); |
3cb8eaa4 Z |
67 | printk("Free swap = %ldkB\n", K(get_nr_swap_pages())); |
68 | printk("Total swap = %lukB\n", K(total_swap_pages)); | |
1da177e4 LT |
69 | } |
70 | ||
aae466b0 JK |
71 | void *get_shadow_from_swap_cache(swp_entry_t entry) |
72 | { | |
73 | struct address_space *address_space = swap_address_space(entry); | |
74 | pgoff_t idx = swp_offset(entry); | |
75 | struct page *page; | |
76 | ||
8c647dd1 | 77 | page = xa_load(&address_space->i_pages, idx); |
aae466b0 JK |
78 | if (xa_is_value(page)) |
79 | return page; | |
aae466b0 JK |
80 | return NULL; |
81 | } | |
82 | ||
1da177e4 | 83 | /* |
2bb876b5 | 84 | * add_to_swap_cache resembles filemap_add_folio on swapper_space, |
1da177e4 LT |
85 | * but sets SwapCache flag and private instead of mapping and index. |
86 | */ | |
a4c366f0 | 87 | int add_to_swap_cache(struct folio *folio, swp_entry_t entry, |
3852f676 | 88 | gfp_t gfp, void **shadowp) |
1da177e4 | 89 | { |
8d93b41c | 90 | struct address_space *address_space = swap_address_space(entry); |
38d8b4e6 | 91 | pgoff_t idx = swp_offset(entry); |
a4c366f0 MWO |
92 | XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); |
93 | unsigned long i, nr = folio_nr_pages(folio); | |
3852f676 | 94 | void *old; |
1da177e4 | 95 | |
5649d113 YY |
96 | xas_set_update(&xas, workingset_update_node); |
97 | ||
a4c366f0 MWO |
98 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
99 | VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); | |
100 | VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); | |
51726b12 | 101 | |
a4c366f0 MWO |
102 | folio_ref_add(folio, nr); |
103 | folio_set_swapcache(folio); | |
3d2c9087 | 104 | folio->swap = entry; |
31a56396 | 105 | |
8d93b41c MW |
106 | do { |
107 | xas_lock_irq(&xas); | |
108 | xas_create_range(&xas); | |
109 | if (xas_error(&xas)) | |
110 | goto unlock; | |
111 | for (i = 0; i < nr; i++) { | |
a4c366f0 | 112 | VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); |
e5b306a0 KS |
113 | if (shadowp) { |
114 | old = xas_load(&xas); | |
115 | if (xa_is_value(old)) | |
3852f676 JK |
116 | *shadowp = old; |
117 | } | |
a4c366f0 | 118 | xas_store(&xas, folio); |
8d93b41c MW |
119 | xas_next(&xas); |
120 | } | |
38d8b4e6 | 121 | address_space->nrpages += nr; |
a4c366f0 MWO |
122 | __node_stat_mod_folio(folio, NR_FILE_PAGES, nr); |
123 | __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr); | |
8d93b41c MW |
124 | unlock: |
125 | xas_unlock_irq(&xas); | |
126 | } while (xas_nomem(&xas, gfp)); | |
31a56396 | 127 | |
8d93b41c MW |
128 | if (!xas_error(&xas)) |
129 | return 0; | |
31a56396 | 130 | |
a4c366f0 MWO |
131 | folio_clear_swapcache(folio); |
132 | folio_ref_sub(folio, nr); | |
8d93b41c | 133 | return xas_error(&xas); |
1da177e4 LT |
134 | } |
135 | ||
1da177e4 | 136 | /* |
ceff9d33 | 137 | * This must be called only on folios that have |
1da177e4 LT |
138 | * been verified to be in the swap cache. |
139 | */ | |
ceff9d33 | 140 | void __delete_from_swap_cache(struct folio *folio, |
3852f676 | 141 | swp_entry_t entry, void *shadow) |
1da177e4 | 142 | { |
4e17ec25 | 143 | struct address_space *address_space = swap_address_space(entry); |
ceff9d33 MWO |
144 | int i; |
145 | long nr = folio_nr_pages(folio); | |
4e17ec25 MW |
146 | pgoff_t idx = swp_offset(entry); |
147 | XA_STATE(xas, &address_space->i_pages, idx); | |
33806f06 | 148 | |
5649d113 YY |
149 | xas_set_update(&xas, workingset_update_node); |
150 | ||
ceff9d33 MWO |
151 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
152 | VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); | |
153 | VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); | |
1da177e4 | 154 | |
38d8b4e6 | 155 | for (i = 0; i < nr; i++) { |
3852f676 | 156 | void *entry = xas_store(&xas, shadow); |
b9eb7776 | 157 | VM_BUG_ON_PAGE(entry != folio, entry); |
4e17ec25 | 158 | xas_next(&xas); |
38d8b4e6 | 159 | } |
3d2c9087 | 160 | folio->swap.val = 0; |
ceff9d33 | 161 | folio_clear_swapcache(folio); |
38d8b4e6 | 162 | address_space->nrpages -= nr; |
ceff9d33 MWO |
163 | __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr); |
164 | __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr); | |
1da177e4 LT |
165 | } |
166 | ||
167 | /** | |
09c02e56 MWO |
168 | * add_to_swap - allocate swap space for a folio |
169 | * @folio: folio we want to move to swap | |
1da177e4 | 170 | * |
09c02e56 MWO |
171 | * Allocate swap space for the folio and add the folio to the |
172 | * swap cache. | |
173 | * | |
174 | * Context: Caller needs to hold the folio lock. | |
175 | * Return: Whether the folio was added to the swap cache. | |
1da177e4 | 176 | */ |
09c02e56 | 177 | bool add_to_swap(struct folio *folio) |
1da177e4 LT |
178 | { |
179 | swp_entry_t entry; | |
1da177e4 LT |
180 | int err; |
181 | ||
09c02e56 MWO |
182 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
183 | VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); | |
1da177e4 | 184 | |
e2e3fdc7 | 185 | entry = folio_alloc_swap(folio); |
2ca4532a | 186 | if (!entry.val) |
09c02e56 | 187 | return false; |
0f074658 | 188 | |
2ca4532a | 189 | /* |
8d93b41c | 190 | * XArray node allocations from PF_MEMALLOC contexts could |
2ca4532a DN |
191 | * completely exhaust the page allocator. __GFP_NOMEMALLOC |
192 | * stops emergency reserves from being allocated. | |
193 | * | |
194 | * TODO: this could cause a theoretical memory reclaim | |
195 | * deadlock in the swap out path. | |
196 | */ | |
197 | /* | |
854e9ed0 | 198 | * Add it to the swap cache. |
2ca4532a | 199 | */ |
a4c366f0 | 200 | err = add_to_swap_cache(folio, entry, |
3852f676 | 201 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); |
38d8b4e6 | 202 | if (err) |
bd53b714 | 203 | /* |
2ca4532a DN |
204 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
205 | * clear SWAP_HAS_CACHE flag. | |
1da177e4 | 206 | */ |
0f074658 | 207 | goto fail; |
9625456c | 208 | /* |
09c02e56 MWO |
209 | * Normally the folio will be dirtied in unmap because its |
210 | * pte should be dirty. A special case is MADV_FREE page. The | |
211 | * page's pte could have dirty bit cleared but the folio's | |
212 | * SwapBacked flag is still set because clearing the dirty bit | |
213 | * and SwapBacked flag has no lock protected. For such folio, | |
214 | * unmap will not set dirty bit for it, so folio reclaim will | |
215 | * not write the folio out. This can cause data corruption when | |
216 | * the folio is swapped in later. Always setting the dirty flag | |
217 | * for the folio solves the problem. | |
9625456c | 218 | */ |
09c02e56 | 219 | folio_mark_dirty(folio); |
38d8b4e6 | 220 | |
09c02e56 | 221 | return true; |
38d8b4e6 | 222 | |
38d8b4e6 | 223 | fail: |
4081f744 | 224 | put_swap_folio(folio, entry); |
09c02e56 | 225 | return false; |
1da177e4 LT |
226 | } |
227 | ||
228 | /* | |
75fa68a5 | 229 | * This must be called only on folios that have |
1da177e4 | 230 | * been verified to be in the swap cache and locked. |
75fa68a5 MWO |
231 | * It will never put the folio into the free list, |
232 | * the caller has a reference on the folio. | |
1da177e4 | 233 | */ |
75fa68a5 | 234 | void delete_from_swap_cache(struct folio *folio) |
1da177e4 | 235 | { |
3d2c9087 | 236 | swp_entry_t entry = folio->swap; |
4e17ec25 | 237 | struct address_space *address_space = swap_address_space(entry); |
1da177e4 | 238 | |
b93b0163 | 239 | xa_lock_irq(&address_space->i_pages); |
ceff9d33 | 240 | __delete_from_swap_cache(folio, entry, NULL); |
b93b0163 | 241 | xa_unlock_irq(&address_space->i_pages); |
1da177e4 | 242 | |
4081f744 | 243 | put_swap_folio(folio, entry); |
75fa68a5 | 244 | folio_ref_sub(folio, folio_nr_pages(folio)); |
1da177e4 LT |
245 | } |
246 | ||
3852f676 JK |
247 | void clear_shadow_from_swap_cache(int type, unsigned long begin, |
248 | unsigned long end) | |
249 | { | |
250 | unsigned long curr = begin; | |
251 | void *old; | |
252 | ||
253 | for (;;) { | |
3852f676 JK |
254 | swp_entry_t entry = swp_entry(type, curr); |
255 | struct address_space *address_space = swap_address_space(entry); | |
256 | XA_STATE(xas, &address_space->i_pages, curr); | |
257 | ||
5649d113 YY |
258 | xas_set_update(&xas, workingset_update_node); |
259 | ||
3852f676 JK |
260 | xa_lock_irq(&address_space->i_pages); |
261 | xas_for_each(&xas, old, end) { | |
262 | if (!xa_is_value(old)) | |
263 | continue; | |
264 | xas_store(&xas, NULL); | |
3852f676 | 265 | } |
3852f676 JK |
266 | xa_unlock_irq(&address_space->i_pages); |
267 | ||
268 | /* search the next swapcache until we meet end */ | |
269 | curr >>= SWAP_ADDRESS_SPACE_SHIFT; | |
270 | curr++; | |
271 | curr <<= SWAP_ADDRESS_SPACE_SHIFT; | |
272 | if (curr > end) | |
273 | break; | |
274 | } | |
275 | } | |
276 | ||
c33c7948 RR |
277 | /* |
278 | * If we are the only user, then try to free up the swap cache. | |
279 | * | |
aedd74d4 | 280 | * Its ok to check the swapcache flag without the folio lock |
a2c43eed | 281 | * here because we are going to recheck again inside |
aedd74d4 | 282 | * folio_free_swap() _with_ the lock. |
1da177e4 LT |
283 | * - Marcelo |
284 | */ | |
f4c4a3f4 | 285 | void free_swap_cache(struct page *page) |
1da177e4 | 286 | { |
aedd74d4 MWO |
287 | struct folio *folio = page_folio(page); |
288 | ||
289 | if (folio_test_swapcache(folio) && !folio_mapped(folio) && | |
290 | folio_trylock(folio)) { | |
291 | folio_free_swap(folio); | |
292 | folio_unlock(folio); | |
1da177e4 LT |
293 | } |
294 | } | |
295 | ||
c33c7948 | 296 | /* |
1da177e4 | 297 | * Perform a free_page(), also freeing any swap cache associated with |
b8072f09 | 298 | * this page if it is the last user of the page. |
1da177e4 LT |
299 | */ |
300 | void free_page_and_swap_cache(struct page *page) | |
301 | { | |
302 | free_swap_cache(page); | |
6fcb52a5 | 303 | if (!is_huge_zero_page(page)) |
770a5370 | 304 | put_page(page); |
1da177e4 LT |
305 | } |
306 | ||
307 | /* | |
308 | * Passed an array of pages, drop them all from swapcache and then release | |
309 | * them. They are removed from the LRU and freed if this is their last use. | |
310 | */ | |
7cc8f9c7 | 311 | void free_pages_and_swap_cache(struct encoded_page **pages, int nr) |
1da177e4 | 312 | { |
1da177e4 | 313 | lru_add_drain(); |
7cc8f9c7 LT |
314 | for (int i = 0; i < nr; i++) |
315 | free_swap_cache(encoded_page_ptr(pages[i])); | |
316 | release_pages(pages, nr); | |
1da177e4 LT |
317 | } |
318 | ||
e9e9b7ec MK |
319 | static inline bool swap_use_vma_readahead(void) |
320 | { | |
321 | return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); | |
322 | } | |
323 | ||
1da177e4 | 324 | /* |
c9edc242 | 325 | * Lookup a swap entry in the swap cache. A found folio will be returned |
1da177e4 | 326 | * unlocked and with its refcount incremented - we rely on the kernel |
c9edc242 | 327 | * lock getting page table operations atomic even if we drop the folio |
1da177e4 | 328 | * lock before returning. |
cbc2bd98 KS |
329 | * |
330 | * Caller must lock the swap device or hold a reference to keep it valid. | |
1da177e4 | 331 | */ |
c9edc242 MWO |
332 | struct folio *swap_cache_get_folio(swp_entry_t entry, |
333 | struct vm_area_struct *vma, unsigned long addr) | |
1da177e4 | 334 | { |
c9edc242 | 335 | struct folio *folio; |
1da177e4 | 336 | |
c9edc242 | 337 | folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry)); |
66dabbb6 | 338 | if (!IS_ERR(folio)) { |
eaf649eb MK |
339 | bool vma_ra = swap_use_vma_readahead(); |
340 | bool readahead; | |
341 | ||
eaf649eb MK |
342 | /* |
343 | * At the moment, we don't support PG_readahead for anon THP | |
344 | * so let's bail out rather than confusing the readahead stat. | |
345 | */ | |
c9edc242 MWO |
346 | if (unlikely(folio_test_large(folio))) |
347 | return folio; | |
eaf649eb | 348 | |
c9edc242 | 349 | readahead = folio_test_clear_readahead(folio); |
eaf649eb MK |
350 | if (vma && vma_ra) { |
351 | unsigned long ra_val; | |
352 | int win, hits; | |
353 | ||
354 | ra_val = GET_SWAP_RA_VAL(vma); | |
355 | win = SWAP_RA_WIN(ra_val); | |
356 | hits = SWAP_RA_HITS(ra_val); | |
ec560175 HY |
357 | if (readahead) |
358 | hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); | |
359 | atomic_long_set(&vma->swap_readahead_info, | |
360 | SWAP_RA_VAL(addr, win, hits)); | |
361 | } | |
eaf649eb | 362 | |
ec560175 | 363 | if (readahead) { |
cbc65df2 | 364 | count_vm_event(SWAP_RA_HIT); |
eaf649eb | 365 | if (!vma || !vma_ra) |
ec560175 | 366 | atomic_inc(&swapin_readahead_hits); |
cbc65df2 | 367 | } |
66dabbb6 CH |
368 | } else { |
369 | folio = NULL; | |
579f8290 | 370 | } |
eaf649eb | 371 | |
c9edc242 MWO |
372 | return folio; |
373 | } | |
374 | ||
61ef1865 | 375 | /** |
524984ff | 376 | * filemap_get_incore_folio - Find and get a folio from the page or swap caches. |
61ef1865 MWO |
377 | * @mapping: The address_space to search. |
378 | * @index: The page cache index. | |
379 | * | |
524984ff MWO |
380 | * This differs from filemap_get_folio() in that it will also look for the |
381 | * folio in the swap cache. | |
61ef1865 | 382 | * |
524984ff | 383 | * Return: The found folio or %NULL. |
61ef1865 | 384 | */ |
524984ff MWO |
385 | struct folio *filemap_get_incore_folio(struct address_space *mapping, |
386 | pgoff_t index) | |
61ef1865 MWO |
387 | { |
388 | swp_entry_t swp; | |
389 | struct swap_info_struct *si; | |
097b3e59 | 390 | struct folio *folio = filemap_get_entry(mapping, index); |
61ef1865 | 391 | |
66dabbb6 CH |
392 | if (!folio) |
393 | return ERR_PTR(-ENOENT); | |
dd8095b1 | 394 | if (!xa_is_value(folio)) |
66dabbb6 | 395 | return folio; |
61ef1865 | 396 | if (!shmem_mapping(mapping)) |
66dabbb6 | 397 | return ERR_PTR(-ENOENT); |
61ef1865 | 398 | |
dd8095b1 | 399 | swp = radix_to_swp_entry(folio); |
ba6851b4 ML |
400 | /* There might be swapin error entries in shmem mapping. */ |
401 | if (non_swap_entry(swp)) | |
66dabbb6 | 402 | return ERR_PTR(-ENOENT); |
61ef1865 MWO |
403 | /* Prevent swapoff from happening to us */ |
404 | si = get_swap_device(swp); | |
405 | if (!si) | |
66dabbb6 | 406 | return ERR_PTR(-ENOENT); |
dd8095b1 MWO |
407 | index = swp_offset(swp); |
408 | folio = filemap_get_folio(swap_address_space(swp), index); | |
61ef1865 | 409 | put_swap_device(si); |
524984ff | 410 | return folio; |
61ef1865 MWO |
411 | } |
412 | ||
5b999aad | 413 | struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
ddc1a5cb | 414 | struct mempolicy *mpol, pgoff_t ilx, |
a65b0e76 DC |
415 | bool *new_page_allocated, |
416 | bool skip_if_exists) | |
1da177e4 | 417 | { |
eb085574 | 418 | struct swap_info_struct *si; |
a0d3374b | 419 | struct folio *folio; |
46a774d3 | 420 | struct page *page; |
aae466b0 | 421 | void *shadow = NULL; |
4c6355b2 | 422 | |
5b999aad | 423 | *new_page_allocated = false; |
46a774d3 HY |
424 | si = get_swap_device(entry); |
425 | if (!si) | |
426 | return NULL; | |
1da177e4 | 427 | |
4c6355b2 JW |
428 | for (;;) { |
429 | int err; | |
1da177e4 LT |
430 | /* |
431 | * First check the swap cache. Since this is normally | |
cb691e2f | 432 | * called after swap_cache_get_folio() failed, re-calling |
1da177e4 LT |
433 | * that would confuse statistics. |
434 | */ | |
a0d3374b MWO |
435 | folio = filemap_get_folio(swap_address_space(entry), |
436 | swp_offset(entry)); | |
46a774d3 HY |
437 | if (!IS_ERR(folio)) { |
438 | page = folio_file_page(folio, swp_offset(entry)); | |
439 | goto got_page; | |
440 | } | |
1da177e4 | 441 | |
ba81f838 HY |
442 | /* |
443 | * Just skip read ahead for unused swap slot. | |
444 | * During swap_off when swap_slot_cache is disabled, | |
445 | * we have to handle the race between putting | |
446 | * swap entry in swap cache and marking swap slot | |
447 | * as SWAP_HAS_CACHE. That's done in later part of code or | |
448 | * else swap_off will be aborted if we return NULL. | |
449 | */ | |
3ecdeb0f | 450 | if (!swap_swapcount(si, entry) && swap_slot_cache_enabled) |
46a774d3 | 451 | goto fail_put_swap; |
e8c26ab6 | 452 | |
1da177e4 | 453 | /* |
4c6355b2 JW |
454 | * Get a new page to read into from swap. Allocate it now, |
455 | * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will | |
456 | * cause any racers to loop around until we add it to cache. | |
1da177e4 | 457 | */ |
ddc1a5cb HD |
458 | folio = (struct folio *)alloc_pages_mpol(gfp_mask, 0, |
459 | mpol, ilx, numa_node_id()); | |
a0d3374b | 460 | if (!folio) |
46a774d3 | 461 | goto fail_put_swap; |
1da177e4 | 462 | |
f000944d HD |
463 | /* |
464 | * Swap entry may have been freed since our caller observed it. | |
465 | */ | |
355cfa73 | 466 | err = swapcache_prepare(entry); |
4c6355b2 | 467 | if (!err) |
f000944d HD |
468 | break; |
469 | ||
a0d3374b | 470 | folio_put(folio); |
4c6355b2 | 471 | if (err != -EEXIST) |
46a774d3 | 472 | goto fail_put_swap; |
4c6355b2 | 473 | |
a65b0e76 DC |
474 | /* |
475 | * Protect against a recursive call to __read_swap_cache_async() | |
476 | * on the same entry waiting forever here because SWAP_HAS_CACHE | |
477 | * is set but the folio is not the swap cache yet. This can | |
478 | * happen today if mem_cgroup_swapin_charge_folio() below | |
479 | * triggers reclaim through zswap, which may call | |
480 | * __read_swap_cache_async() in the writeback path. | |
481 | */ | |
482 | if (skip_if_exists) | |
483 | goto fail_put_swap; | |
484 | ||
2ca4532a | 485 | /* |
4c6355b2 JW |
486 | * We might race against __delete_from_swap_cache(), and |
487 | * stumble across a swap_map entry whose SWAP_HAS_CACHE | |
488 | * has not yet been cleared. Or race against another | |
489 | * __read_swap_cache_async(), which has set SWAP_HAS_CACHE | |
490 | * in swap_map, but not yet added its page to swap cache. | |
2ca4532a | 491 | */ |
029c4628 | 492 | schedule_timeout_uninterruptible(1); |
4c6355b2 JW |
493 | } |
494 | ||
495 | /* | |
496 | * The swap entry is ours to swap in. Prepare the new page. | |
497 | */ | |
498 | ||
a0d3374b MWO |
499 | __folio_set_locked(folio); |
500 | __folio_set_swapbacked(folio); | |
4c6355b2 | 501 | |
65995918 | 502 | if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry)) |
4c6355b2 | 503 | goto fail_unlock; |
4c6355b2 | 504 | |
0add0c77 | 505 | /* May fail (-ENOMEM) if XArray node allocation failed. */ |
a4c366f0 | 506 | if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) |
4c6355b2 | 507 | goto fail_unlock; |
0add0c77 SB |
508 | |
509 | mem_cgroup_swapin_uncharge_swap(entry); | |
4c6355b2 | 510 | |
aae466b0 | 511 | if (shadow) |
a0d3374b | 512 | workingset_refault(folio, shadow); |
314b57fb | 513 | |
a0d3374b MWO |
514 | /* Caller will initiate read into locked folio */ |
515 | folio_add_lru(folio); | |
4c6355b2 | 516 | *new_page_allocated = true; |
46a774d3 HY |
517 | page = &folio->page; |
518 | got_page: | |
519 | put_swap_device(si); | |
520 | return page; | |
1da177e4 | 521 | |
4c6355b2 | 522 | fail_unlock: |
4081f744 | 523 | put_swap_folio(folio, entry); |
a0d3374b MWO |
524 | folio_unlock(folio); |
525 | folio_put(folio); | |
46a774d3 HY |
526 | fail_put_swap: |
527 | put_swap_device(si); | |
4c6355b2 | 528 | return NULL; |
1da177e4 | 529 | } |
46017e95 | 530 | |
5b999aad DS |
531 | /* |
532 | * Locate a page of swap in physical memory, reserving swap cache space | |
533 | * and reading the disk if it is not already cached. | |
534 | * A failure return means that either the page allocation failed or that | |
535 | * the swap entry is no longer in use. | |
46a774d3 HY |
536 | * |
537 | * get/put_swap_device() aren't needed to call this function, because | |
538 | * __read_swap_cache_async() call them and swap_readpage() holds the | |
539 | * swap cache folio lock. | |
5b999aad DS |
540 | */ |
541 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |
5169b844 | 542 | struct vm_area_struct *vma, |
b243dcbf | 543 | unsigned long addr, struct swap_iocb **plug) |
5b999aad | 544 | { |
ddc1a5cb HD |
545 | bool page_allocated; |
546 | struct mempolicy *mpol; | |
547 | pgoff_t ilx; | |
548 | struct page *page; | |
5b999aad | 549 | |
ddc1a5cb HD |
550 | mpol = get_vma_policy(vma, addr, 0, &ilx); |
551 | page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, | |
a65b0e76 | 552 | &page_allocated, false); |
ddc1a5cb | 553 | mpol_cond_put(mpol); |
5b999aad | 554 | |
ddc1a5cb HD |
555 | if (page_allocated) |
556 | swap_readpage(page, false, plug); | |
557 | return page; | |
5b999aad DS |
558 | } |
559 | ||
ec560175 HY |
560 | static unsigned int __swapin_nr_pages(unsigned long prev_offset, |
561 | unsigned long offset, | |
562 | int hits, | |
563 | int max_pages, | |
564 | int prev_win) | |
579f8290 | 565 | { |
ec560175 | 566 | unsigned int pages, last_ra; |
579f8290 SL |
567 | |
568 | /* | |
569 | * This heuristic has been found to work well on both sequential and | |
570 | * random loads, swapping to hard disk or to SSD: please don't ask | |
571 | * what the "+ 2" means, it just happens to work well, that's all. | |
572 | */ | |
ec560175 | 573 | pages = hits + 2; |
579f8290 SL |
574 | if (pages == 2) { |
575 | /* | |
576 | * We can have no readahead hits to judge by: but must not get | |
577 | * stuck here forever, so check for an adjacent offset instead | |
578 | * (and don't even bother to check whether swap type is same). | |
579 | */ | |
580 | if (offset != prev_offset + 1 && offset != prev_offset - 1) | |
581 | pages = 1; | |
579f8290 SL |
582 | } else { |
583 | unsigned int roundup = 4; | |
584 | while (roundup < pages) | |
585 | roundup <<= 1; | |
586 | pages = roundup; | |
587 | } | |
588 | ||
589 | if (pages > max_pages) | |
590 | pages = max_pages; | |
591 | ||
592 | /* Don't shrink readahead too fast */ | |
ec560175 | 593 | last_ra = prev_win / 2; |
579f8290 SL |
594 | if (pages < last_ra) |
595 | pages = last_ra; | |
ec560175 HY |
596 | |
597 | return pages; | |
598 | } | |
599 | ||
600 | static unsigned long swapin_nr_pages(unsigned long offset) | |
601 | { | |
602 | static unsigned long prev_offset; | |
603 | unsigned int hits, pages, max_pages; | |
604 | static atomic_t last_readahead_pages; | |
605 | ||
606 | max_pages = 1 << READ_ONCE(page_cluster); | |
607 | if (max_pages <= 1) | |
608 | return 1; | |
609 | ||
610 | hits = atomic_xchg(&swapin_readahead_hits, 0); | |
d6c1f098 QC |
611 | pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, |
612 | max_pages, | |
ec560175 HY |
613 | atomic_read(&last_readahead_pages)); |
614 | if (!hits) | |
d6c1f098 | 615 | WRITE_ONCE(prev_offset, offset); |
579f8290 SL |
616 | atomic_set(&last_readahead_pages, pages); |
617 | ||
618 | return pages; | |
619 | } | |
620 | ||
46017e95 | 621 | /** |
e9e9b7ec | 622 | * swap_cluster_readahead - swap in pages in hope we need them soon |
46017e95 | 623 | * @entry: swap entry of this memory |
7682486b | 624 | * @gfp_mask: memory allocation flags |
ddc1a5cb HD |
625 | * @mpol: NUMA memory allocation policy to be applied |
626 | * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE | |
46017e95 HD |
627 | * |
628 | * Returns the struct page for entry and addr, after queueing swapin. | |
629 | * | |
630 | * Primitive swap readahead code. We simply read an aligned block of | |
631 | * (1 << page_cluster) entries in the swap area. This method is chosen | |
632 | * because it doesn't cost us any seek time. We also make sure to queue | |
633 | * the 'original' request together with the readahead ones... | |
634 | * | |
ddc1a5cb HD |
635 | * Note: it is intentional that the same NUMA policy and interleave index |
636 | * are used for every page of the readahead: neighbouring pages on swap | |
637 | * are fairly likely to have been swapped out from the same node. | |
46017e95 | 638 | */ |
e9e9b7ec | 639 | struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, |
ddc1a5cb | 640 | struct mempolicy *mpol, pgoff_t ilx) |
46017e95 | 641 | { |
46017e95 | 642 | struct page *page; |
579f8290 SL |
643 | unsigned long entry_offset = swp_offset(entry); |
644 | unsigned long offset = entry_offset; | |
67f96aa2 | 645 | unsigned long start_offset, end_offset; |
579f8290 | 646 | unsigned long mask; |
e9a6effa | 647 | struct swap_info_struct *si = swp_swap_info(entry); |
3fb5c298 | 648 | struct blk_plug plug; |
5169b844 | 649 | struct swap_iocb *splug = NULL; |
b243dcbf | 650 | bool page_allocated; |
46017e95 | 651 | |
579f8290 SL |
652 | mask = swapin_nr_pages(offset) - 1; |
653 | if (!mask) | |
654 | goto skip; | |
655 | ||
67f96aa2 RR |
656 | /* Read a page_cluster sized and aligned cluster around offset. */ |
657 | start_offset = offset & ~mask; | |
658 | end_offset = offset | mask; | |
659 | if (!start_offset) /* First page is swap header. */ | |
660 | start_offset++; | |
e9a6effa HY |
661 | if (end_offset >= si->max) |
662 | end_offset = si->max - 1; | |
67f96aa2 | 663 | |
3fb5c298 | 664 | blk_start_plug(&plug); |
67f96aa2 | 665 | for (offset = start_offset; offset <= end_offset ; offset++) { |
46017e95 | 666 | /* Ok, do the async read-ahead now */ |
c4fa6309 | 667 | page = __read_swap_cache_async( |
ddc1a5cb | 668 | swp_entry(swp_type(entry), offset), |
a65b0e76 | 669 | gfp_mask, mpol, ilx, &page_allocated, false); |
46017e95 | 670 | if (!page) |
67f96aa2 | 671 | continue; |
c4fa6309 | 672 | if (page_allocated) { |
5169b844 | 673 | swap_readpage(page, false, &splug); |
eaf649eb | 674 | if (offset != entry_offset) { |
c4fa6309 HY |
675 | SetPageReadahead(page); |
676 | count_vm_event(SWAP_RA); | |
677 | } | |
cbc65df2 | 678 | } |
09cbfeaf | 679 | put_page(page); |
46017e95 | 680 | } |
3fb5c298 | 681 | blk_finish_plug(&plug); |
5169b844 | 682 | swap_read_unplug(splug); |
46017e95 | 683 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
579f8290 | 684 | skip: |
5169b844 | 685 | /* The page was likely read above, so no need for plugging here */ |
ddc1a5cb | 686 | page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, |
a65b0e76 | 687 | &page_allocated, false); |
ddc1a5cb HD |
688 | if (unlikely(page_allocated)) |
689 | swap_readpage(page, false, NULL); | |
b5ba474f | 690 | zswap_page_swapin(page); |
ddc1a5cb | 691 | return page; |
46017e95 | 692 | } |
4b3ef9da HY |
693 | |
694 | int init_swap_address_space(unsigned int type, unsigned long nr_pages) | |
695 | { | |
696 | struct address_space *spaces, *space; | |
697 | unsigned int i, nr; | |
698 | ||
699 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); | |
778e1cdd | 700 | spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); |
4b3ef9da HY |
701 | if (!spaces) |
702 | return -ENOMEM; | |
703 | for (i = 0; i < nr; i++) { | |
704 | space = spaces + i; | |
a2833486 | 705 | xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); |
4b3ef9da HY |
706 | atomic_set(&space->i_mmap_writable, 0); |
707 | space->a_ops = &swap_aops; | |
708 | /* swap cache doesn't use writeback related tags */ | |
709 | mapping_set_no_writeback_tags(space); | |
4b3ef9da HY |
710 | } |
711 | nr_swapper_spaces[type] = nr; | |
054f1d1f | 712 | swapper_spaces[type] = spaces; |
4b3ef9da HY |
713 | |
714 | return 0; | |
715 | } | |
716 | ||
717 | void exit_swap_address_space(unsigned int type) | |
718 | { | |
eea4a501 HY |
719 | int i; |
720 | struct address_space *spaces = swapper_spaces[type]; | |
721 | ||
722 | for (i = 0; i < nr_swapper_spaces[type]; i++) | |
723 | VM_WARN_ON_ONCE(!mapping_empty(&spaces[i])); | |
724 | kvfree(spaces); | |
4b3ef9da | 725 | nr_swapper_spaces[type] = 0; |
054f1d1f | 726 | swapper_spaces[type] = NULL; |
4b3ef9da | 727 | } |
ec560175 | 728 | |
4f8fcf4c HD |
729 | #define SWAP_RA_ORDER_CEILING 5 |
730 | ||
731 | struct vma_swap_readahead { | |
732 | unsigned short win; | |
733 | unsigned short offset; | |
734 | unsigned short nr_pte; | |
735 | }; | |
736 | ||
eaf649eb | 737 | static void swap_ra_info(struct vm_fault *vmf, |
16ba391e | 738 | struct vma_swap_readahead *ra_info) |
ec560175 HY |
739 | { |
740 | struct vm_area_struct *vma = vmf->vma; | |
eaf649eb | 741 | unsigned long ra_val; |
16ba391e | 742 | unsigned long faddr, pfn, fpfn, lpfn, rpfn; |
ec560175 | 743 | unsigned long start, end; |
16ba391e | 744 | unsigned int max_win, hits, prev_win, win; |
ec560175 | 745 | |
61b63972 HY |
746 | max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), |
747 | SWAP_RA_ORDER_CEILING); | |
748 | if (max_win == 1) { | |
eaf649eb MK |
749 | ra_info->win = 1; |
750 | return; | |
61b63972 HY |
751 | } |
752 | ||
ec560175 | 753 | faddr = vmf->address; |
ec560175 | 754 | fpfn = PFN_DOWN(faddr); |
eaf649eb MK |
755 | ra_val = GET_SWAP_RA_VAL(vma); |
756 | pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); | |
757 | prev_win = SWAP_RA_WIN(ra_val); | |
758 | hits = SWAP_RA_HITS(ra_val); | |
759 | ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, | |
ec560175 HY |
760 | max_win, prev_win); |
761 | atomic_long_set(&vma->swap_readahead_info, | |
762 | SWAP_RA_VAL(faddr, win, 0)); | |
18ad72f5 | 763 | if (win == 1) |
eaf649eb | 764 | return; |
ec560175 | 765 | |
16ba391e KS |
766 | if (fpfn == pfn + 1) { |
767 | lpfn = fpfn; | |
768 | rpfn = fpfn + win; | |
769 | } else if (pfn == fpfn + 1) { | |
770 | lpfn = fpfn - win + 1; | |
771 | rpfn = fpfn + 1; | |
772 | } else { | |
773 | unsigned int left = (win - 1) / 2; | |
774 | ||
775 | lpfn = fpfn - left; | |
776 | rpfn = fpfn + win - left; | |
ec560175 | 777 | } |
16ba391e KS |
778 | start = max3(lpfn, PFN_DOWN(vma->vm_start), |
779 | PFN_DOWN(faddr & PMD_MASK)); | |
780 | end = min3(rpfn, PFN_DOWN(vma->vm_end), | |
781 | PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); | |
782 | ||
eaf649eb MK |
783 | ra_info->nr_pte = end - start; |
784 | ra_info->offset = fpfn - start; | |
ec560175 HY |
785 | } |
786 | ||
e9f59873 YS |
787 | /** |
788 | * swap_vma_readahead - swap in pages in hope we need them soon | |
ddc1a5cb | 789 | * @targ_entry: swap entry of the targeted memory |
e9f59873 | 790 | * @gfp_mask: memory allocation flags |
ddc1a5cb HD |
791 | * @mpol: NUMA memory allocation policy to be applied |
792 | * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE | |
e9f59873 YS |
793 | * @vmf: fault information |
794 | * | |
795 | * Returns the struct page for entry and addr, after queueing swapin. | |
796 | * | |
cb152a1a | 797 | * Primitive swap readahead code. We simply read in a few pages whose |
e9f59873 YS |
798 | * virtual addresses are around the fault address in the same vma. |
799 | * | |
c1e8d7c6 | 800 | * Caller must hold read mmap_lock if vmf->vma is not NULL. |
e9f59873 YS |
801 | * |
802 | */ | |
ddc1a5cb HD |
803 | static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, |
804 | struct mempolicy *mpol, pgoff_t targ_ilx, | |
f5c754d6 | 805 | struct vm_fault *vmf) |
ec560175 HY |
806 | { |
807 | struct blk_plug plug; | |
5169b844 | 808 | struct swap_iocb *splug = NULL; |
ec560175 | 809 | struct page *page; |
4f8fcf4c HD |
810 | pte_t *pte = NULL, pentry; |
811 | unsigned long addr; | |
ec560175 | 812 | swp_entry_t entry; |
ddc1a5cb | 813 | pgoff_t ilx; |
ec560175 HY |
814 | unsigned int i; |
815 | bool page_allocated; | |
e97af699 ML |
816 | struct vma_swap_readahead ra_info = { |
817 | .win = 1, | |
818 | }; | |
ec560175 | 819 | |
eaf649eb MK |
820 | swap_ra_info(vmf, &ra_info); |
821 | if (ra_info.win == 1) | |
ec560175 HY |
822 | goto skip; |
823 | ||
4f8fcf4c | 824 | addr = vmf->address - (ra_info.offset * PAGE_SIZE); |
ddc1a5cb | 825 | ilx = targ_ilx - ra_info.offset; |
4f8fcf4c | 826 | |
ec560175 | 827 | blk_start_plug(&plug); |
ddc1a5cb | 828 | for (i = 0; i < ra_info.nr_pte; i++, ilx++, addr += PAGE_SIZE) { |
4f8fcf4c HD |
829 | if (!pte++) { |
830 | pte = pte_offset_map(vmf->pmd, addr); | |
831 | if (!pte) | |
832 | break; | |
833 | } | |
834 | pentry = ptep_get_lockless(pte); | |
92bafb20 | 835 | if (!is_swap_pte(pentry)) |
ec560175 HY |
836 | continue; |
837 | entry = pte_to_swp_entry(pentry); | |
838 | if (unlikely(non_swap_entry(entry))) | |
839 | continue; | |
4f8fcf4c HD |
840 | pte_unmap(pte); |
841 | pte = NULL; | |
ddc1a5cb | 842 | page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, |
a65b0e76 | 843 | &page_allocated, false); |
ec560175 HY |
844 | if (!page) |
845 | continue; | |
846 | if (page_allocated) { | |
5169b844 | 847 | swap_readpage(page, false, &splug); |
eaf649eb | 848 | if (i != ra_info.offset) { |
ec560175 HY |
849 | SetPageReadahead(page); |
850 | count_vm_event(SWAP_RA); | |
851 | } | |
852 | } | |
853 | put_page(page); | |
854 | } | |
4f8fcf4c HD |
855 | if (pte) |
856 | pte_unmap(pte); | |
ec560175 | 857 | blk_finish_plug(&plug); |
5169b844 | 858 | swap_read_unplug(splug); |
ec560175 HY |
859 | lru_add_drain(); |
860 | skip: | |
5169b844 | 861 | /* The page was likely read above, so no need for plugging here */ |
ddc1a5cb | 862 | page = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx, |
a65b0e76 | 863 | &page_allocated, false); |
ddc1a5cb HD |
864 | if (unlikely(page_allocated)) |
865 | swap_readpage(page, false, NULL); | |
b5ba474f | 866 | zswap_page_swapin(page); |
ddc1a5cb | 867 | return page; |
ec560175 | 868 | } |
d9bfcfdc | 869 | |
e9e9b7ec MK |
870 | /** |
871 | * swapin_readahead - swap in pages in hope we need them soon | |
872 | * @entry: swap entry of this memory | |
873 | * @gfp_mask: memory allocation flags | |
874 | * @vmf: fault information | |
875 | * | |
876 | * Returns the struct page for entry and addr, after queueing swapin. | |
877 | * | |
878 | * It's a main entry function for swap readahead. By the configuration, | |
879 | * it will read ahead blocks by cluster-based(ie, physical disk based) | |
880 | * or vma-based(ie, virtual address based on faulty address) readahead. | |
881 | */ | |
882 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, | |
883 | struct vm_fault *vmf) | |
884 | { | |
ddc1a5cb HD |
885 | struct mempolicy *mpol; |
886 | pgoff_t ilx; | |
887 | struct page *page; | |
888 | ||
889 | mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx); | |
890 | page = swap_use_vma_readahead() ? | |
891 | swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) : | |
892 | swap_cluster_readahead(entry, gfp_mask, mpol, ilx); | |
893 | mpol_cond_put(mpol); | |
894 | return page; | |
e9e9b7ec MK |
895 | } |
896 | ||
d9bfcfdc HY |
897 | #ifdef CONFIG_SYSFS |
898 | static ssize_t vma_ra_enabled_show(struct kobject *kobj, | |
899 | struct kobj_attribute *attr, char *buf) | |
900 | { | |
ae7a927d JP |
901 | return sysfs_emit(buf, "%s\n", |
902 | enable_vma_readahead ? "true" : "false"); | |
d9bfcfdc HY |
903 | } |
904 | static ssize_t vma_ra_enabled_store(struct kobject *kobj, | |
905 | struct kobj_attribute *attr, | |
906 | const char *buf, size_t count) | |
907 | { | |
717aeab4 JG |
908 | ssize_t ret; |
909 | ||
910 | ret = kstrtobool(buf, &enable_vma_readahead); | |
911 | if (ret) | |
912 | return ret; | |
d9bfcfdc HY |
913 | |
914 | return count; | |
915 | } | |
6106b93e | 916 | static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled); |
d9bfcfdc | 917 | |
d9bfcfdc HY |
918 | static struct attribute *swap_attrs[] = { |
919 | &vma_ra_enabled_attr.attr, | |
d9bfcfdc HY |
920 | NULL, |
921 | }; | |
922 | ||
e48333b6 | 923 | static const struct attribute_group swap_attr_group = { |
d9bfcfdc HY |
924 | .attrs = swap_attrs, |
925 | }; | |
926 | ||
927 | static int __init swap_init_sysfs(void) | |
928 | { | |
929 | int err; | |
930 | struct kobject *swap_kobj; | |
931 | ||
932 | swap_kobj = kobject_create_and_add("swap", mm_kobj); | |
933 | if (!swap_kobj) { | |
934 | pr_err("failed to create swap kobject\n"); | |
935 | return -ENOMEM; | |
936 | } | |
937 | err = sysfs_create_group(swap_kobj, &swap_attr_group); | |
938 | if (err) { | |
939 | pr_err("failed to register swap group\n"); | |
940 | goto delete_obj; | |
941 | } | |
942 | return 0; | |
943 | ||
944 | delete_obj: | |
945 | kobject_put(swap_kobj); | |
946 | return err; | |
947 | } | |
948 | subsys_initcall(swap_init_sysfs); | |
949 | #endif |