Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/swap_state.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
5 | * Swap reorganised 29.12.95, Stephen Tweedie | |
6 | * | |
7 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie | |
8 | */ | |
1da177e4 | 9 | #include <linux/mm.h> |
5a0e3ad6 | 10 | #include <linux/gfp.h> |
1da177e4 LT |
11 | #include <linux/kernel_stat.h> |
12 | #include <linux/swap.h> | |
46017e95 | 13 | #include <linux/swapops.h> |
1da177e4 LT |
14 | #include <linux/init.h> |
15 | #include <linux/pagemap.h> | |
1da177e4 | 16 | #include <linux/backing-dev.h> |
3fb5c298 | 17 | #include <linux/blkdev.h> |
c484d410 | 18 | #include <linux/pagevec.h> |
b20a3503 | 19 | #include <linux/migrate.h> |
4b3ef9da | 20 | #include <linux/vmalloc.h> |
1da177e4 LT |
21 | |
22 | #include <asm/pgtable.h> | |
23 | ||
24 | /* | |
25 | * swapper_space is a fiction, retained to simplify the path through | |
7eaceacc | 26 | * vmscan's shrink_page_list. |
1da177e4 | 27 | */ |
f5e54d6e | 28 | static const struct address_space_operations swap_aops = { |
1da177e4 | 29 | .writepage = swap_writepage, |
62c230bc | 30 | .set_page_dirty = swap_set_page_dirty, |
1c93923c | 31 | #ifdef CONFIG_MIGRATION |
e965f963 | 32 | .migratepage = migrate_page, |
1c93923c | 33 | #endif |
1da177e4 LT |
34 | }; |
35 | ||
4b3ef9da HY |
36 | struct address_space *swapper_spaces[MAX_SWAPFILES]; |
37 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; | |
1da177e4 LT |
38 | |
39 | #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) | |
40 | ||
41 | static struct { | |
42 | unsigned long add_total; | |
43 | unsigned long del_total; | |
44 | unsigned long find_success; | |
45 | unsigned long find_total; | |
1da177e4 LT |
46 | } swap_cache_info; |
47 | ||
33806f06 SL |
48 | unsigned long total_swapcache_pages(void) |
49 | { | |
4b3ef9da | 50 | unsigned int i, j, nr; |
33806f06 | 51 | unsigned long ret = 0; |
4b3ef9da | 52 | struct address_space *spaces; |
33806f06 | 53 | |
4b3ef9da HY |
54 | rcu_read_lock(); |
55 | for (i = 0; i < MAX_SWAPFILES; i++) { | |
56 | /* | |
57 | * The corresponding entries in nr_swapper_spaces and | |
58 | * swapper_spaces will be reused only after at least | |
59 | * one grace period. So it is impossible for them | |
60 | * belongs to different usage. | |
61 | */ | |
62 | nr = nr_swapper_spaces[i]; | |
63 | spaces = rcu_dereference(swapper_spaces[i]); | |
64 | if (!nr || !spaces) | |
65 | continue; | |
66 | for (j = 0; j < nr; j++) | |
67 | ret += spaces[j].nrpages; | |
68 | } | |
69 | rcu_read_unlock(); | |
33806f06 SL |
70 | return ret; |
71 | } | |
72 | ||
579f8290 SL |
73 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
74 | ||
1da177e4 LT |
75 | void show_swap_cache_info(void) |
76 | { | |
33806f06 | 77 | printk("%lu pages in swap cache\n", total_swapcache_pages()); |
2c97b7fc | 78 | printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", |
1da177e4 | 79 | swap_cache_info.add_total, swap_cache_info.del_total, |
bb63be0a | 80 | swap_cache_info.find_success, swap_cache_info.find_total); |
ec8acf20 SL |
81 | printk("Free swap = %ldkB\n", |
82 | get_nr_swap_pages() << (PAGE_SHIFT - 10)); | |
1da177e4 LT |
83 | printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); |
84 | } | |
85 | ||
86 | /* | |
31a56396 | 87 | * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, |
1da177e4 LT |
88 | * but sets SwapCache flag and private instead of mapping and index. |
89 | */ | |
2f772e6c | 90 | int __add_to_swap_cache(struct page *page, swp_entry_t entry) |
1da177e4 LT |
91 | { |
92 | int error; | |
33806f06 | 93 | struct address_space *address_space; |
1da177e4 | 94 | |
309381fe SL |
95 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
96 | VM_BUG_ON_PAGE(PageSwapCache(page), page); | |
97 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); | |
51726b12 | 98 | |
09cbfeaf | 99 | get_page(page); |
31a56396 DN |
100 | SetPageSwapCache(page); |
101 | set_page_private(page, entry.val); | |
102 | ||
33806f06 SL |
103 | address_space = swap_address_space(entry); |
104 | spin_lock_irq(&address_space->tree_lock); | |
105 | error = radix_tree_insert(&address_space->page_tree, | |
f6ab1f7f | 106 | swp_offset(entry), page); |
31a56396 | 107 | if (likely(!error)) { |
33806f06 | 108 | address_space->nrpages++; |
11fb9989 | 109 | __inc_node_page_state(page, NR_FILE_PAGES); |
31a56396 DN |
110 | INC_CACHE_INFO(add_total); |
111 | } | |
33806f06 | 112 | spin_unlock_irq(&address_space->tree_lock); |
31a56396 DN |
113 | |
114 | if (unlikely(error)) { | |
2ca4532a DN |
115 | /* |
116 | * Only the context which have set SWAP_HAS_CACHE flag | |
117 | * would call add_to_swap_cache(). | |
118 | * So add_to_swap_cache() doesn't returns -EEXIST. | |
119 | */ | |
120 | VM_BUG_ON(error == -EEXIST); | |
31a56396 DN |
121 | set_page_private(page, 0UL); |
122 | ClearPageSwapCache(page); | |
09cbfeaf | 123 | put_page(page); |
31a56396 DN |
124 | } |
125 | ||
126 | return error; | |
127 | } | |
128 | ||
129 | ||
130 | int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | |
131 | { | |
132 | int error; | |
133 | ||
5e4c0d97 | 134 | error = radix_tree_maybe_preload(gfp_mask); |
35c754d7 | 135 | if (!error) { |
31a56396 | 136 | error = __add_to_swap_cache(page, entry); |
1da177e4 | 137 | radix_tree_preload_end(); |
fa1de900 | 138 | } |
1da177e4 LT |
139 | return error; |
140 | } | |
141 | ||
1da177e4 LT |
142 | /* |
143 | * This must be called only on pages that have | |
144 | * been verified to be in the swap cache. | |
145 | */ | |
146 | void __delete_from_swap_cache(struct page *page) | |
147 | { | |
33806f06 SL |
148 | swp_entry_t entry; |
149 | struct address_space *address_space; | |
150 | ||
309381fe SL |
151 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
152 | VM_BUG_ON_PAGE(!PageSwapCache(page), page); | |
153 | VM_BUG_ON_PAGE(PageWriteback(page), page); | |
1da177e4 | 154 | |
33806f06 SL |
155 | entry.val = page_private(page); |
156 | address_space = swap_address_space(entry); | |
f6ab1f7f | 157 | radix_tree_delete(&address_space->page_tree, swp_offset(entry)); |
4c21e2f2 | 158 | set_page_private(page, 0); |
1da177e4 | 159 | ClearPageSwapCache(page); |
33806f06 | 160 | address_space->nrpages--; |
11fb9989 | 161 | __dec_node_page_state(page, NR_FILE_PAGES); |
1da177e4 LT |
162 | INC_CACHE_INFO(del_total); |
163 | } | |
164 | ||
165 | /** | |
166 | * add_to_swap - allocate swap space for a page | |
167 | * @page: page we want to move to swap | |
168 | * | |
169 | * Allocate swap space for the page and add the page to the | |
170 | * swap cache. Caller needs to hold the page lock. | |
171 | */ | |
5bc7b8ac | 172 | int add_to_swap(struct page *page, struct list_head *list) |
1da177e4 LT |
173 | { |
174 | swp_entry_t entry; | |
1da177e4 LT |
175 | int err; |
176 | ||
309381fe SL |
177 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
178 | VM_BUG_ON_PAGE(!PageUptodate(page), page); | |
1da177e4 | 179 | |
2ca4532a DN |
180 | entry = get_swap_page(); |
181 | if (!entry.val) | |
182 | return 0; | |
183 | ||
37e84351 VD |
184 | if (mem_cgroup_try_charge_swap(page, entry)) { |
185 | swapcache_free(entry); | |
186 | return 0; | |
187 | } | |
188 | ||
3f04f62f | 189 | if (unlikely(PageTransHuge(page))) |
5bc7b8ac | 190 | if (unlikely(split_huge_page_to_list(page, list))) { |
0a31bc97 | 191 | swapcache_free(entry); |
3f04f62f AA |
192 | return 0; |
193 | } | |
194 | ||
2ca4532a DN |
195 | /* |
196 | * Radix-tree node allocations from PF_MEMALLOC contexts could | |
197 | * completely exhaust the page allocator. __GFP_NOMEMALLOC | |
198 | * stops emergency reserves from being allocated. | |
199 | * | |
200 | * TODO: this could cause a theoretical memory reclaim | |
201 | * deadlock in the swap out path. | |
202 | */ | |
203 | /* | |
854e9ed0 | 204 | * Add it to the swap cache. |
2ca4532a DN |
205 | */ |
206 | err = add_to_swap_cache(page, entry, | |
207 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); | |
208 | ||
854e9ed0 | 209 | if (!err) { |
2ca4532a DN |
210 | return 1; |
211 | } else { /* -ENOMEM radix-tree allocation failure */ | |
bd53b714 | 212 | /* |
2ca4532a DN |
213 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
214 | * clear SWAP_HAS_CACHE flag. | |
1da177e4 | 215 | */ |
0a31bc97 | 216 | swapcache_free(entry); |
2ca4532a | 217 | return 0; |
1da177e4 LT |
218 | } |
219 | } | |
220 | ||
221 | /* | |
222 | * This must be called only on pages that have | |
223 | * been verified to be in the swap cache and locked. | |
224 | * It will never put the page into the free list, | |
225 | * the caller has a reference on the page. | |
226 | */ | |
227 | void delete_from_swap_cache(struct page *page) | |
228 | { | |
229 | swp_entry_t entry; | |
33806f06 | 230 | struct address_space *address_space; |
1da177e4 | 231 | |
4c21e2f2 | 232 | entry.val = page_private(page); |
1da177e4 | 233 | |
33806f06 SL |
234 | address_space = swap_address_space(entry); |
235 | spin_lock_irq(&address_space->tree_lock); | |
1da177e4 | 236 | __delete_from_swap_cache(page); |
33806f06 | 237 | spin_unlock_irq(&address_space->tree_lock); |
1da177e4 | 238 | |
0a31bc97 | 239 | swapcache_free(entry); |
09cbfeaf | 240 | put_page(page); |
1da177e4 LT |
241 | } |
242 | ||
1da177e4 LT |
243 | /* |
244 | * If we are the only user, then try to free up the swap cache. | |
245 | * | |
246 | * Its ok to check for PageSwapCache without the page lock | |
a2c43eed HD |
247 | * here because we are going to recheck again inside |
248 | * try_to_free_swap() _with_ the lock. | |
1da177e4 LT |
249 | * - Marcelo |
250 | */ | |
251 | static inline void free_swap_cache(struct page *page) | |
252 | { | |
a2c43eed HD |
253 | if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { |
254 | try_to_free_swap(page); | |
1da177e4 LT |
255 | unlock_page(page); |
256 | } | |
257 | } | |
258 | ||
259 | /* | |
260 | * Perform a free_page(), also freeing any swap cache associated with | |
b8072f09 | 261 | * this page if it is the last user of the page. |
1da177e4 LT |
262 | */ |
263 | void free_page_and_swap_cache(struct page *page) | |
264 | { | |
265 | free_swap_cache(page); | |
6fcb52a5 | 266 | if (!is_huge_zero_page(page)) |
770a5370 | 267 | put_page(page); |
1da177e4 LT |
268 | } |
269 | ||
270 | /* | |
271 | * Passed an array of pages, drop them all from swapcache and then release | |
272 | * them. They are removed from the LRU and freed if this is their last use. | |
273 | */ | |
274 | void free_pages_and_swap_cache(struct page **pages, int nr) | |
275 | { | |
1da177e4 | 276 | struct page **pagep = pages; |
aabfb572 | 277 | int i; |
1da177e4 LT |
278 | |
279 | lru_add_drain(); | |
aabfb572 MH |
280 | for (i = 0; i < nr; i++) |
281 | free_swap_cache(pagep[i]); | |
282 | release_pages(pagep, nr, false); | |
1da177e4 LT |
283 | } |
284 | ||
285 | /* | |
286 | * Lookup a swap entry in the swap cache. A found page will be returned | |
287 | * unlocked and with its refcount incremented - we rely on the kernel | |
288 | * lock getting page table operations atomic even if we drop the page | |
289 | * lock before returning. | |
290 | */ | |
291 | struct page * lookup_swap_cache(swp_entry_t entry) | |
292 | { | |
293 | struct page *page; | |
294 | ||
f6ab1f7f | 295 | page = find_get_page(swap_address_space(entry), swp_offset(entry)); |
1da177e4 | 296 | |
579f8290 | 297 | if (page) { |
1da177e4 | 298 | INC_CACHE_INFO(find_success); |
579f8290 SL |
299 | if (TestClearPageReadahead(page)) |
300 | atomic_inc(&swapin_readahead_hits); | |
301 | } | |
1da177e4 LT |
302 | |
303 | INC_CACHE_INFO(find_total); | |
304 | return page; | |
305 | } | |
306 | ||
5b999aad DS |
307 | struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
308 | struct vm_area_struct *vma, unsigned long addr, | |
309 | bool *new_page_allocated) | |
1da177e4 LT |
310 | { |
311 | struct page *found_page, *new_page = NULL; | |
5b999aad | 312 | struct address_space *swapper_space = swap_address_space(entry); |
1da177e4 | 313 | int err; |
5b999aad | 314 | *new_page_allocated = false; |
1da177e4 LT |
315 | |
316 | do { | |
317 | /* | |
318 | * First check the swap cache. Since this is normally | |
319 | * called after lookup_swap_cache() failed, re-calling | |
320 | * that would confuse statistics. | |
321 | */ | |
f6ab1f7f | 322 | found_page = find_get_page(swapper_space, swp_offset(entry)); |
1da177e4 LT |
323 | if (found_page) |
324 | break; | |
325 | ||
326 | /* | |
327 | * Get a new page to read into from swap. | |
328 | */ | |
329 | if (!new_page) { | |
02098fea | 330 | new_page = alloc_page_vma(gfp_mask, vma, addr); |
1da177e4 LT |
331 | if (!new_page) |
332 | break; /* Out of memory */ | |
333 | } | |
334 | ||
31a56396 DN |
335 | /* |
336 | * call radix_tree_preload() while we can wait. | |
337 | */ | |
5e4c0d97 | 338 | err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); |
31a56396 DN |
339 | if (err) |
340 | break; | |
341 | ||
f000944d HD |
342 | /* |
343 | * Swap entry may have been freed since our caller observed it. | |
344 | */ | |
355cfa73 | 345 | err = swapcache_prepare(entry); |
cbab0e4e | 346 | if (err == -EEXIST) { |
31a56396 | 347 | radix_tree_preload_end(); |
cbab0e4e RA |
348 | /* |
349 | * We might race against get_swap_page() and stumble | |
350 | * across a SWAP_HAS_CACHE swap_map entry whose page | |
351 | * has not been brought into the swapcache yet, while | |
352 | * the other end is scheduled away waiting on discard | |
353 | * I/O completion at scan_swap_map(). | |
354 | * | |
355 | * In order to avoid turning this transitory state | |
356 | * into a permanent loop around this -EEXIST case | |
357 | * if !CONFIG_PREEMPT and the I/O completion happens | |
358 | * to be waiting on the CPU waitqueue where we are now | |
359 | * busy looping, we just conditionally invoke the | |
360 | * scheduler here, if there are some more important | |
361 | * tasks to run. | |
362 | */ | |
363 | cond_resched(); | |
355cfa73 | 364 | continue; |
31a56396 DN |
365 | } |
366 | if (err) { /* swp entry is obsolete ? */ | |
367 | radix_tree_preload_end(); | |
f000944d | 368 | break; |
31a56396 | 369 | } |
f000944d | 370 | |
2ca4532a | 371 | /* May fail (-ENOMEM) if radix-tree node allocation failed. */ |
48c935ad | 372 | __SetPageLocked(new_page); |
fa9949da | 373 | __SetPageSwapBacked(new_page); |
31a56396 | 374 | err = __add_to_swap_cache(new_page, entry); |
529ae9aa | 375 | if (likely(!err)) { |
31a56396 | 376 | radix_tree_preload_end(); |
1da177e4 LT |
377 | /* |
378 | * Initiate read into locked page and return. | |
379 | */ | |
c5fdae46 | 380 | lru_cache_add_anon(new_page); |
5b999aad | 381 | *new_page_allocated = true; |
1da177e4 LT |
382 | return new_page; |
383 | } | |
31a56396 | 384 | radix_tree_preload_end(); |
48c935ad | 385 | __ClearPageLocked(new_page); |
2ca4532a DN |
386 | /* |
387 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely | |
388 | * clear SWAP_HAS_CACHE flag. | |
389 | */ | |
0a31bc97 | 390 | swapcache_free(entry); |
f000944d | 391 | } while (err != -ENOMEM); |
1da177e4 LT |
392 | |
393 | if (new_page) | |
09cbfeaf | 394 | put_page(new_page); |
1da177e4 LT |
395 | return found_page; |
396 | } | |
46017e95 | 397 | |
5b999aad DS |
398 | /* |
399 | * Locate a page of swap in physical memory, reserving swap cache space | |
400 | * and reading the disk if it is not already cached. | |
401 | * A failure return means that either the page allocation failed or that | |
402 | * the swap entry is no longer in use. | |
403 | */ | |
404 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |
405 | struct vm_area_struct *vma, unsigned long addr) | |
406 | { | |
407 | bool page_was_allocated; | |
408 | struct page *retpage = __read_swap_cache_async(entry, gfp_mask, | |
409 | vma, addr, &page_was_allocated); | |
410 | ||
411 | if (page_was_allocated) | |
412 | swap_readpage(retpage); | |
413 | ||
414 | return retpage; | |
415 | } | |
416 | ||
579f8290 SL |
417 | static unsigned long swapin_nr_pages(unsigned long offset) |
418 | { | |
419 | static unsigned long prev_offset; | |
420 | unsigned int pages, max_pages, last_ra; | |
421 | static atomic_t last_readahead_pages; | |
422 | ||
4db0c3c2 | 423 | max_pages = 1 << READ_ONCE(page_cluster); |
579f8290 SL |
424 | if (max_pages <= 1) |
425 | return 1; | |
426 | ||
427 | /* | |
428 | * This heuristic has been found to work well on both sequential and | |
429 | * random loads, swapping to hard disk or to SSD: please don't ask | |
430 | * what the "+ 2" means, it just happens to work well, that's all. | |
431 | */ | |
432 | pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; | |
433 | if (pages == 2) { | |
434 | /* | |
435 | * We can have no readahead hits to judge by: but must not get | |
436 | * stuck here forever, so check for an adjacent offset instead | |
437 | * (and don't even bother to check whether swap type is same). | |
438 | */ | |
439 | if (offset != prev_offset + 1 && offset != prev_offset - 1) | |
440 | pages = 1; | |
441 | prev_offset = offset; | |
442 | } else { | |
443 | unsigned int roundup = 4; | |
444 | while (roundup < pages) | |
445 | roundup <<= 1; | |
446 | pages = roundup; | |
447 | } | |
448 | ||
449 | if (pages > max_pages) | |
450 | pages = max_pages; | |
451 | ||
452 | /* Don't shrink readahead too fast */ | |
453 | last_ra = atomic_read(&last_readahead_pages) / 2; | |
454 | if (pages < last_ra) | |
455 | pages = last_ra; | |
456 | atomic_set(&last_readahead_pages, pages); | |
457 | ||
458 | return pages; | |
459 | } | |
460 | ||
46017e95 HD |
461 | /** |
462 | * swapin_readahead - swap in pages in hope we need them soon | |
463 | * @entry: swap entry of this memory | |
7682486b | 464 | * @gfp_mask: memory allocation flags |
46017e95 HD |
465 | * @vma: user vma this address belongs to |
466 | * @addr: target address for mempolicy | |
467 | * | |
468 | * Returns the struct page for entry and addr, after queueing swapin. | |
469 | * | |
470 | * Primitive swap readahead code. We simply read an aligned block of | |
471 | * (1 << page_cluster) entries in the swap area. This method is chosen | |
472 | * because it doesn't cost us any seek time. We also make sure to queue | |
473 | * the 'original' request together with the readahead ones... | |
474 | * | |
475 | * This has been extended to use the NUMA policies from the mm triggering | |
476 | * the readahead. | |
477 | * | |
478 | * Caller must hold down_read on the vma->vm_mm if vma is not NULL. | |
479 | */ | |
02098fea | 480 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
46017e95 HD |
481 | struct vm_area_struct *vma, unsigned long addr) |
482 | { | |
46017e95 | 483 | struct page *page; |
579f8290 SL |
484 | unsigned long entry_offset = swp_offset(entry); |
485 | unsigned long offset = entry_offset; | |
67f96aa2 | 486 | unsigned long start_offset, end_offset; |
579f8290 | 487 | unsigned long mask; |
3fb5c298 | 488 | struct blk_plug plug; |
46017e95 | 489 | |
579f8290 SL |
490 | mask = swapin_nr_pages(offset) - 1; |
491 | if (!mask) | |
492 | goto skip; | |
493 | ||
67f96aa2 RR |
494 | /* Read a page_cluster sized and aligned cluster around offset. */ |
495 | start_offset = offset & ~mask; | |
496 | end_offset = offset | mask; | |
497 | if (!start_offset) /* First page is swap header. */ | |
498 | start_offset++; | |
499 | ||
3fb5c298 | 500 | blk_start_plug(&plug); |
67f96aa2 | 501 | for (offset = start_offset; offset <= end_offset ; offset++) { |
46017e95 HD |
502 | /* Ok, do the async read-ahead now */ |
503 | page = read_swap_cache_async(swp_entry(swp_type(entry), offset), | |
02098fea | 504 | gfp_mask, vma, addr); |
46017e95 | 505 | if (!page) |
67f96aa2 | 506 | continue; |
579f8290 SL |
507 | if (offset != entry_offset) |
508 | SetPageReadahead(page); | |
09cbfeaf | 509 | put_page(page); |
46017e95 | 510 | } |
3fb5c298 CE |
511 | blk_finish_plug(&plug); |
512 | ||
46017e95 | 513 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
579f8290 | 514 | skip: |
02098fea | 515 | return read_swap_cache_async(entry, gfp_mask, vma, addr); |
46017e95 | 516 | } |
4b3ef9da HY |
517 | |
518 | int init_swap_address_space(unsigned int type, unsigned long nr_pages) | |
519 | { | |
520 | struct address_space *spaces, *space; | |
521 | unsigned int i, nr; | |
522 | ||
523 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); | |
524 | spaces = vzalloc(sizeof(struct address_space) * nr); | |
525 | if (!spaces) | |
526 | return -ENOMEM; | |
527 | for (i = 0; i < nr; i++) { | |
528 | space = spaces + i; | |
529 | INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN); | |
530 | atomic_set(&space->i_mmap_writable, 0); | |
531 | space->a_ops = &swap_aops; | |
532 | /* swap cache doesn't use writeback related tags */ | |
533 | mapping_set_no_writeback_tags(space); | |
534 | spin_lock_init(&space->tree_lock); | |
535 | } | |
536 | nr_swapper_spaces[type] = nr; | |
537 | rcu_assign_pointer(swapper_spaces[type], spaces); | |
538 | ||
539 | return 0; | |
540 | } | |
541 | ||
542 | void exit_swap_address_space(unsigned int type) | |
543 | { | |
544 | struct address_space *spaces; | |
545 | ||
546 | spaces = swapper_spaces[type]; | |
547 | nr_swapper_spaces[type] = 0; | |
548 | rcu_assign_pointer(swapper_spaces[type], NULL); | |
549 | synchronize_rcu(); | |
550 | kvfree(spaces); | |
551 | } |